##// END OF EJS Templates
obsolete: rename divergent volatile set into contentdivergent volatile set...
Boris Feld -
r33773:f3f06c26 default
parent child Browse files
Show More
@@ -1,2371 +1,2371 b''
1 1 # context.py - changeset and file context objects for mercurial
2 2 #
3 3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import os
12 12 import re
13 13 import stat
14 14
15 15 from .i18n import _
16 16 from .node import (
17 17 addednodeid,
18 18 bin,
19 19 hex,
20 20 modifiednodeid,
21 21 nullid,
22 22 nullrev,
23 23 short,
24 24 wdirid,
25 25 wdirnodes,
26 26 wdirrev,
27 27 )
28 28 from . import (
29 29 encoding,
30 30 error,
31 31 fileset,
32 32 match as matchmod,
33 33 mdiff,
34 34 obsolete as obsmod,
35 35 patch,
36 36 pathutil,
37 37 phases,
38 38 pycompat,
39 39 repoview,
40 40 revlog,
41 41 scmutil,
42 42 sparse,
43 43 subrepo,
44 44 util,
45 45 )
46 46
47 47 propertycache = util.propertycache
48 48
49 49 nonascii = re.compile(r'[^\x21-\x7f]').search
50 50
51 51 class basectx(object):
52 52 """A basectx object represents the common logic for its children:
53 53 changectx: read-only context that is already present in the repo,
54 54 workingctx: a context that represents the working directory and can
55 55 be committed,
56 56 memctx: a context that represents changes in-memory and can also
57 57 be committed."""
58 58 def __new__(cls, repo, changeid='', *args, **kwargs):
59 59 if isinstance(changeid, basectx):
60 60 return changeid
61 61
62 62 o = super(basectx, cls).__new__(cls)
63 63
64 64 o._repo = repo
65 65 o._rev = nullrev
66 66 o._node = nullid
67 67
68 68 return o
69 69
70 70 def __bytes__(self):
71 71 return short(self.node())
72 72
73 73 __str__ = encoding.strmethod(__bytes__)
74 74
75 75 def __int__(self):
76 76 return self.rev()
77 77
78 78 def __repr__(self):
79 79 return r"<%s %s>" % (type(self).__name__, str(self))
80 80
81 81 def __eq__(self, other):
82 82 try:
83 83 return type(self) == type(other) and self._rev == other._rev
84 84 except AttributeError:
85 85 return False
86 86
87 87 def __ne__(self, other):
88 88 return not (self == other)
89 89
90 90 def __contains__(self, key):
91 91 return key in self._manifest
92 92
93 93 def __getitem__(self, key):
94 94 return self.filectx(key)
95 95
96 96 def __iter__(self):
97 97 return iter(self._manifest)
98 98
99 99 def _buildstatusmanifest(self, status):
100 100 """Builds a manifest that includes the given status results, if this is
101 101 a working copy context. For non-working copy contexts, it just returns
102 102 the normal manifest."""
103 103 return self.manifest()
104 104
105 105 def _matchstatus(self, other, match):
106 106 """return match.always if match is none
107 107
108 108 This internal method provides a way for child objects to override the
109 109 match operator.
110 110 """
111 111 return match or matchmod.always(self._repo.root, self._repo.getcwd())
112 112
113 113 def _buildstatus(self, other, s, match, listignored, listclean,
114 114 listunknown):
115 115 """build a status with respect to another context"""
116 116 # Load earliest manifest first for caching reasons. More specifically,
117 117 # if you have revisions 1000 and 1001, 1001 is probably stored as a
118 118 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
119 119 # 1000 and cache it so that when you read 1001, we just need to apply a
120 120 # delta to what's in the cache. So that's one full reconstruction + one
121 121 # delta application.
122 122 mf2 = None
123 123 if self.rev() is not None and self.rev() < other.rev():
124 124 mf2 = self._buildstatusmanifest(s)
125 125 mf1 = other._buildstatusmanifest(s)
126 126 if mf2 is None:
127 127 mf2 = self._buildstatusmanifest(s)
128 128
129 129 modified, added = [], []
130 130 removed = []
131 131 clean = []
132 132 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
133 133 deletedset = set(deleted)
134 134 d = mf1.diff(mf2, match=match, clean=listclean)
135 135 for fn, value in d.iteritems():
136 136 if fn in deletedset:
137 137 continue
138 138 if value is None:
139 139 clean.append(fn)
140 140 continue
141 141 (node1, flag1), (node2, flag2) = value
142 142 if node1 is None:
143 143 added.append(fn)
144 144 elif node2 is None:
145 145 removed.append(fn)
146 146 elif flag1 != flag2:
147 147 modified.append(fn)
148 148 elif node2 not in wdirnodes:
149 149 # When comparing files between two commits, we save time by
150 150 # not comparing the file contents when the nodeids differ.
151 151 # Note that this means we incorrectly report a reverted change
152 152 # to a file as a modification.
153 153 modified.append(fn)
154 154 elif self[fn].cmp(other[fn]):
155 155 modified.append(fn)
156 156 else:
157 157 clean.append(fn)
158 158
159 159 if removed:
160 160 # need to filter files if they are already reported as removed
161 161 unknown = [fn for fn in unknown if fn not in mf1 and
162 162 (not match or match(fn))]
163 163 ignored = [fn for fn in ignored if fn not in mf1 and
164 164 (not match or match(fn))]
165 165 # if they're deleted, don't report them as removed
166 166 removed = [fn for fn in removed if fn not in deletedset]
167 167
168 168 return scmutil.status(modified, added, removed, deleted, unknown,
169 169 ignored, clean)
170 170
171 171 @propertycache
172 172 def substate(self):
173 173 return subrepo.state(self, self._repo.ui)
174 174
175 175 def subrev(self, subpath):
176 176 return self.substate[subpath][1]
177 177
178 178 def rev(self):
179 179 return self._rev
180 180 def node(self):
181 181 return self._node
182 182 def hex(self):
183 183 return hex(self.node())
184 184 def manifest(self):
185 185 return self._manifest
186 186 def manifestctx(self):
187 187 return self._manifestctx
188 188 def repo(self):
189 189 return self._repo
190 190 def phasestr(self):
191 191 return phases.phasenames[self.phase()]
192 192 def mutable(self):
193 193 return self.phase() > phases.public
194 194
195 195 def getfileset(self, expr):
196 196 return fileset.getfileset(self, expr)
197 197
198 198 def obsolete(self):
199 199 """True if the changeset is obsolete"""
200 200 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
201 201
202 202 def extinct(self):
203 203 """True if the changeset is extinct"""
204 204 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
205 205
206 206 def unstable(self):
207 207 msg = ("'context.unstable' is deprecated, "
208 208 "use 'context.orphan'")
209 209 self._repo.ui.deprecwarn(msg, '4.4')
210 210 return self.orphan()
211 211
212 212 def orphan(self):
213 213 """True if the changeset is not obsolete but it's ancestor are"""
214 214 return self.rev() in obsmod.getrevs(self._repo, 'orphan')
215 215
216 216 def bumped(self):
217 217 msg = ("'context.bumped' is deprecated, "
218 218 "use 'context.phasedivergent'")
219 219 self._repo.ui.deprecwarn(msg, '4.4')
220 220 return self.phasedivergent()
221 221
222 222 def phasedivergent(self):
223 223 """True if the changeset try to be a successor of a public changeset
224 224
225 225 Only non-public and non-obsolete changesets may be bumped.
226 226 """
227 227 return self.rev() in obsmod.getrevs(self._repo, 'bumped')
228 228
229 229 def divergent(self):
230 230 msg = ("'context.divergent' is deprecated, "
231 231 "use 'context.contentdivergent'")
232 232 self._repo.ui.deprecwarn(msg, '4.4')
233 233 return self.contentdivergent()
234 234
235 235 def contentdivergent(self):
236 236 """Is a successors of a changeset with multiple possible successors set
237 237
238 238 Only non-public and non-obsolete changesets may be divergent.
239 239 """
240 return self.rev() in obsmod.getrevs(self._repo, 'divergent')
240 return self.rev() in obsmod.getrevs(self._repo, 'contentdivergent')
241 241
242 242 def troubled(self):
243 243 msg = ("'context.troubled' is deprecated, "
244 244 "use 'context.isunstable'")
245 245 self._repo.ui.deprecwarn(msg, '4.4')
246 246 return self.unstable()
247 247
248 248 def isunstable(self):
249 249 """True if the changeset is either unstable, bumped or divergent"""
250 250 return self.orphan() or self.phasedivergent() or self.contentdivergent()
251 251
252 252 def troubles(self):
253 253 """Keep the old version around in order to avoid breaking extensions
254 254 about different return values.
255 255 """
256 256 msg = ("'context.troubles' is deprecated, "
257 257 "use 'context.instabilities'")
258 258 self._repo.ui.deprecwarn(msg, '4.4')
259 259
260 260 troubles = []
261 261 if self.orphan():
262 262 troubles.append('orphan')
263 263 if self.phasedivergent():
264 264 troubles.append('bumped')
265 265 if self.contentdivergent():
266 266 troubles.append('divergent')
267 267 return troubles
268 268
269 269 def instabilities(self):
270 270 """return the list of instabilities affecting this changeset.
271 271
272 272 Instabilities are returned as strings. possible values are:
273 273 - orphan,
274 274 - phase-divergent,
275 275 - content-divergent.
276 276 """
277 277 instabilities = []
278 278 if self.orphan():
279 279 instabilities.append('orphan')
280 280 if self.phasedivergent():
281 281 instabilities.append('phase-divergent')
282 282 if self.contentdivergent():
283 283 instabilities.append('content-divergent')
284 284 return instabilities
285 285
286 286 def parents(self):
287 287 """return contexts for each parent changeset"""
288 288 return self._parents
289 289
290 290 def p1(self):
291 291 return self._parents[0]
292 292
293 293 def p2(self):
294 294 parents = self._parents
295 295 if len(parents) == 2:
296 296 return parents[1]
297 297 return changectx(self._repo, nullrev)
298 298
299 299 def _fileinfo(self, path):
300 300 if r'_manifest' in self.__dict__:
301 301 try:
302 302 return self._manifest[path], self._manifest.flags(path)
303 303 except KeyError:
304 304 raise error.ManifestLookupError(self._node, path,
305 305 _('not found in manifest'))
306 306 if r'_manifestdelta' in self.__dict__ or path in self.files():
307 307 if path in self._manifestdelta:
308 308 return (self._manifestdelta[path],
309 309 self._manifestdelta.flags(path))
310 310 mfl = self._repo.manifestlog
311 311 try:
312 312 node, flag = mfl[self._changeset.manifest].find(path)
313 313 except KeyError:
314 314 raise error.ManifestLookupError(self._node, path,
315 315 _('not found in manifest'))
316 316
317 317 return node, flag
318 318
319 319 def filenode(self, path):
320 320 return self._fileinfo(path)[0]
321 321
322 322 def flags(self, path):
323 323 try:
324 324 return self._fileinfo(path)[1]
325 325 except error.LookupError:
326 326 return ''
327 327
328 328 def sub(self, path, allowcreate=True):
329 329 '''return a subrepo for the stored revision of path, never wdir()'''
330 330 return subrepo.subrepo(self, path, allowcreate=allowcreate)
331 331
332 332 def nullsub(self, path, pctx):
333 333 return subrepo.nullsubrepo(self, path, pctx)
334 334
335 335 def workingsub(self, path):
336 336 '''return a subrepo for the stored revision, or wdir if this is a wdir
337 337 context.
338 338 '''
339 339 return subrepo.subrepo(self, path, allowwdir=True)
340 340
341 341 def match(self, pats=None, include=None, exclude=None, default='glob',
342 342 listsubrepos=False, badfn=None):
343 343 r = self._repo
344 344 return matchmod.match(r.root, r.getcwd(), pats,
345 345 include, exclude, default,
346 346 auditor=r.nofsauditor, ctx=self,
347 347 listsubrepos=listsubrepos, badfn=badfn)
348 348
349 349 def diff(self, ctx2=None, match=None, **opts):
350 350 """Returns a diff generator for the given contexts and matcher"""
351 351 if ctx2 is None:
352 352 ctx2 = self.p1()
353 353 if ctx2 is not None:
354 354 ctx2 = self._repo[ctx2]
355 355 diffopts = patch.diffopts(self._repo.ui, opts)
356 356 return patch.diff(self._repo, ctx2, self, match=match, opts=diffopts)
357 357
358 358 def dirs(self):
359 359 return self._manifest.dirs()
360 360
361 361 def hasdir(self, dir):
362 362 return self._manifest.hasdir(dir)
363 363
364 364 def status(self, other=None, match=None, listignored=False,
365 365 listclean=False, listunknown=False, listsubrepos=False):
366 366 """return status of files between two nodes or node and working
367 367 directory.
368 368
369 369 If other is None, compare this node with working directory.
370 370
371 371 returns (modified, added, removed, deleted, unknown, ignored, clean)
372 372 """
373 373
374 374 ctx1 = self
375 375 ctx2 = self._repo[other]
376 376
377 377 # This next code block is, admittedly, fragile logic that tests for
378 378 # reversing the contexts and wouldn't need to exist if it weren't for
379 379 # the fast (and common) code path of comparing the working directory
380 380 # with its first parent.
381 381 #
382 382 # What we're aiming for here is the ability to call:
383 383 #
384 384 # workingctx.status(parentctx)
385 385 #
386 386 # If we always built the manifest for each context and compared those,
387 387 # then we'd be done. But the special case of the above call means we
388 388 # just copy the manifest of the parent.
389 389 reversed = False
390 390 if (not isinstance(ctx1, changectx)
391 391 and isinstance(ctx2, changectx)):
392 392 reversed = True
393 393 ctx1, ctx2 = ctx2, ctx1
394 394
395 395 match = ctx2._matchstatus(ctx1, match)
396 396 r = scmutil.status([], [], [], [], [], [], [])
397 397 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
398 398 listunknown)
399 399
400 400 if reversed:
401 401 # Reverse added and removed. Clear deleted, unknown and ignored as
402 402 # these make no sense to reverse.
403 403 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
404 404 r.clean)
405 405
406 406 if listsubrepos:
407 407 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
408 408 try:
409 409 rev2 = ctx2.subrev(subpath)
410 410 except KeyError:
411 411 # A subrepo that existed in node1 was deleted between
412 412 # node1 and node2 (inclusive). Thus, ctx2's substate
413 413 # won't contain that subpath. The best we can do ignore it.
414 414 rev2 = None
415 415 submatch = matchmod.subdirmatcher(subpath, match)
416 416 s = sub.status(rev2, match=submatch, ignored=listignored,
417 417 clean=listclean, unknown=listunknown,
418 418 listsubrepos=True)
419 419 for rfiles, sfiles in zip(r, s):
420 420 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
421 421
422 422 for l in r:
423 423 l.sort()
424 424
425 425 return r
426 426
427 427 def _filterederror(repo, changeid):
428 428 """build an exception to be raised about a filtered changeid
429 429
430 430 This is extracted in a function to help extensions (eg: evolve) to
431 431 experiment with various message variants."""
432 432 if repo.filtername.startswith('visible'):
433 433 msg = _("hidden revision '%s'") % changeid
434 434 hint = _('use --hidden to access hidden revisions')
435 435 return error.FilteredRepoLookupError(msg, hint=hint)
436 436 msg = _("filtered revision '%s' (not in '%s' subset)")
437 437 msg %= (changeid, repo.filtername)
438 438 return error.FilteredRepoLookupError(msg)
439 439
440 440 class changectx(basectx):
441 441 """A changecontext object makes access to data related to a particular
442 442 changeset convenient. It represents a read-only context already present in
443 443 the repo."""
444 444 def __init__(self, repo, changeid=''):
445 445 """changeid is a revision number, node, or tag"""
446 446
447 447 # since basectx.__new__ already took care of copying the object, we
448 448 # don't need to do anything in __init__, so we just exit here
449 449 if isinstance(changeid, basectx):
450 450 return
451 451
452 452 if changeid == '':
453 453 changeid = '.'
454 454 self._repo = repo
455 455
456 456 try:
457 457 if isinstance(changeid, int):
458 458 self._node = repo.changelog.node(changeid)
459 459 self._rev = changeid
460 460 return
461 461 if not pycompat.ispy3 and isinstance(changeid, long):
462 462 changeid = str(changeid)
463 463 if changeid == 'null':
464 464 self._node = nullid
465 465 self._rev = nullrev
466 466 return
467 467 if changeid == 'tip':
468 468 self._node = repo.changelog.tip()
469 469 self._rev = repo.changelog.rev(self._node)
470 470 return
471 471 if changeid == '.' or changeid == repo.dirstate.p1():
472 472 # this is a hack to delay/avoid loading obsmarkers
473 473 # when we know that '.' won't be hidden
474 474 self._node = repo.dirstate.p1()
475 475 self._rev = repo.unfiltered().changelog.rev(self._node)
476 476 return
477 477 if len(changeid) == 20:
478 478 try:
479 479 self._node = changeid
480 480 self._rev = repo.changelog.rev(changeid)
481 481 return
482 482 except error.FilteredRepoLookupError:
483 483 raise
484 484 except LookupError:
485 485 pass
486 486
487 487 try:
488 488 r = int(changeid)
489 489 if '%d' % r != changeid:
490 490 raise ValueError
491 491 l = len(repo.changelog)
492 492 if r < 0:
493 493 r += l
494 494 if r < 0 or r >= l and r != wdirrev:
495 495 raise ValueError
496 496 self._rev = r
497 497 self._node = repo.changelog.node(r)
498 498 return
499 499 except error.FilteredIndexError:
500 500 raise
501 501 except (ValueError, OverflowError, IndexError):
502 502 pass
503 503
504 504 if len(changeid) == 40:
505 505 try:
506 506 self._node = bin(changeid)
507 507 self._rev = repo.changelog.rev(self._node)
508 508 return
509 509 except error.FilteredLookupError:
510 510 raise
511 511 except (TypeError, LookupError):
512 512 pass
513 513
514 514 # lookup bookmarks through the name interface
515 515 try:
516 516 self._node = repo.names.singlenode(repo, changeid)
517 517 self._rev = repo.changelog.rev(self._node)
518 518 return
519 519 except KeyError:
520 520 pass
521 521 except error.FilteredRepoLookupError:
522 522 raise
523 523 except error.RepoLookupError:
524 524 pass
525 525
526 526 self._node = repo.unfiltered().changelog._partialmatch(changeid)
527 527 if self._node is not None:
528 528 self._rev = repo.changelog.rev(self._node)
529 529 return
530 530
531 531 # lookup failed
532 532 # check if it might have come from damaged dirstate
533 533 #
534 534 # XXX we could avoid the unfiltered if we had a recognizable
535 535 # exception for filtered changeset access
536 536 if changeid in repo.unfiltered().dirstate.parents():
537 537 msg = _("working directory has unknown parent '%s'!")
538 538 raise error.Abort(msg % short(changeid))
539 539 try:
540 540 if len(changeid) == 20 and nonascii(changeid):
541 541 changeid = hex(changeid)
542 542 except TypeError:
543 543 pass
544 544 except (error.FilteredIndexError, error.FilteredLookupError,
545 545 error.FilteredRepoLookupError):
546 546 raise _filterederror(repo, changeid)
547 547 except IndexError:
548 548 pass
549 549 raise error.RepoLookupError(
550 550 _("unknown revision '%s'") % changeid)
551 551
552 552 def __hash__(self):
553 553 try:
554 554 return hash(self._rev)
555 555 except AttributeError:
556 556 return id(self)
557 557
558 558 def __nonzero__(self):
559 559 return self._rev != nullrev
560 560
561 561 __bool__ = __nonzero__
562 562
563 563 @propertycache
564 564 def _changeset(self):
565 565 return self._repo.changelog.changelogrevision(self.rev())
566 566
567 567 @propertycache
568 568 def _manifest(self):
569 569 return self._manifestctx.read()
570 570
571 571 @property
572 572 def _manifestctx(self):
573 573 return self._repo.manifestlog[self._changeset.manifest]
574 574
575 575 @propertycache
576 576 def _manifestdelta(self):
577 577 return self._manifestctx.readdelta()
578 578
579 579 @propertycache
580 580 def _parents(self):
581 581 repo = self._repo
582 582 p1, p2 = repo.changelog.parentrevs(self._rev)
583 583 if p2 == nullrev:
584 584 return [changectx(repo, p1)]
585 585 return [changectx(repo, p1), changectx(repo, p2)]
586 586
587 587 def changeset(self):
588 588 c = self._changeset
589 589 return (
590 590 c.manifest,
591 591 c.user,
592 592 c.date,
593 593 c.files,
594 594 c.description,
595 595 c.extra,
596 596 )
597 597 def manifestnode(self):
598 598 return self._changeset.manifest
599 599
600 600 def user(self):
601 601 return self._changeset.user
602 602 def date(self):
603 603 return self._changeset.date
604 604 def files(self):
605 605 return self._changeset.files
606 606 def description(self):
607 607 return self._changeset.description
608 608 def branch(self):
609 609 return encoding.tolocal(self._changeset.extra.get("branch"))
610 610 def closesbranch(self):
611 611 return 'close' in self._changeset.extra
612 612 def extra(self):
613 613 return self._changeset.extra
614 614 def tags(self):
615 615 return self._repo.nodetags(self._node)
616 616 def bookmarks(self):
617 617 return self._repo.nodebookmarks(self._node)
618 618 def phase(self):
619 619 return self._repo._phasecache.phase(self._repo, self._rev)
620 620 def hidden(self):
621 621 return self._rev in repoview.filterrevs(self._repo, 'visible')
622 622
623 623 def children(self):
624 624 """return contexts for each child changeset"""
625 625 c = self._repo.changelog.children(self._node)
626 626 return [changectx(self._repo, x) for x in c]
627 627
628 628 def ancestors(self):
629 629 for a in self._repo.changelog.ancestors([self._rev]):
630 630 yield changectx(self._repo, a)
631 631
632 632 def descendants(self):
633 633 for d in self._repo.changelog.descendants([self._rev]):
634 634 yield changectx(self._repo, d)
635 635
636 636 def filectx(self, path, fileid=None, filelog=None):
637 637 """get a file context from this changeset"""
638 638 if fileid is None:
639 639 fileid = self.filenode(path)
640 640 return filectx(self._repo, path, fileid=fileid,
641 641 changectx=self, filelog=filelog)
642 642
643 643 def ancestor(self, c2, warn=False):
644 644 """return the "best" ancestor context of self and c2
645 645
646 646 If there are multiple candidates, it will show a message and check
647 647 merge.preferancestor configuration before falling back to the
648 648 revlog ancestor."""
649 649 # deal with workingctxs
650 650 n2 = c2._node
651 651 if n2 is None:
652 652 n2 = c2._parents[0]._node
653 653 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
654 654 if not cahs:
655 655 anc = nullid
656 656 elif len(cahs) == 1:
657 657 anc = cahs[0]
658 658 else:
659 659 # experimental config: merge.preferancestor
660 660 for r in self._repo.ui.configlist('merge', 'preferancestor', ['*']):
661 661 try:
662 662 ctx = changectx(self._repo, r)
663 663 except error.RepoLookupError:
664 664 continue
665 665 anc = ctx.node()
666 666 if anc in cahs:
667 667 break
668 668 else:
669 669 anc = self._repo.changelog.ancestor(self._node, n2)
670 670 if warn:
671 671 self._repo.ui.status(
672 672 (_("note: using %s as ancestor of %s and %s\n") %
673 673 (short(anc), short(self._node), short(n2))) +
674 674 ''.join(_(" alternatively, use --config "
675 675 "merge.preferancestor=%s\n") %
676 676 short(n) for n in sorted(cahs) if n != anc))
677 677 return changectx(self._repo, anc)
678 678
679 679 def descendant(self, other):
680 680 """True if other is descendant of this changeset"""
681 681 return self._repo.changelog.descendant(self._rev, other._rev)
682 682
683 683 def walk(self, match):
684 684 '''Generates matching file names.'''
685 685
686 686 # Wrap match.bad method to have message with nodeid
687 687 def bad(fn, msg):
688 688 # The manifest doesn't know about subrepos, so don't complain about
689 689 # paths into valid subrepos.
690 690 if any(fn == s or fn.startswith(s + '/')
691 691 for s in self.substate):
692 692 return
693 693 match.bad(fn, _('no such file in rev %s') % self)
694 694
695 695 m = matchmod.badmatch(match, bad)
696 696 return self._manifest.walk(m)
697 697
698 698 def matches(self, match):
699 699 return self.walk(match)
700 700
701 701 class basefilectx(object):
702 702 """A filecontext object represents the common logic for its children:
703 703 filectx: read-only access to a filerevision that is already present
704 704 in the repo,
705 705 workingfilectx: a filecontext that represents files from the working
706 706 directory,
707 707 memfilectx: a filecontext that represents files in-memory,
708 708 overlayfilectx: duplicate another filecontext with some fields overridden.
709 709 """
710 710 @propertycache
711 711 def _filelog(self):
712 712 return self._repo.file(self._path)
713 713
714 714 @propertycache
715 715 def _changeid(self):
716 716 if r'_changeid' in self.__dict__:
717 717 return self._changeid
718 718 elif r'_changectx' in self.__dict__:
719 719 return self._changectx.rev()
720 720 elif r'_descendantrev' in self.__dict__:
721 721 # this file context was created from a revision with a known
722 722 # descendant, we can (lazily) correct for linkrev aliases
723 723 return self._adjustlinkrev(self._descendantrev)
724 724 else:
725 725 return self._filelog.linkrev(self._filerev)
726 726
727 727 @propertycache
728 728 def _filenode(self):
729 729 if r'_fileid' in self.__dict__:
730 730 return self._filelog.lookup(self._fileid)
731 731 else:
732 732 return self._changectx.filenode(self._path)
733 733
734 734 @propertycache
735 735 def _filerev(self):
736 736 return self._filelog.rev(self._filenode)
737 737
738 738 @propertycache
739 739 def _repopath(self):
740 740 return self._path
741 741
742 742 def __nonzero__(self):
743 743 try:
744 744 self._filenode
745 745 return True
746 746 except error.LookupError:
747 747 # file is missing
748 748 return False
749 749
750 750 __bool__ = __nonzero__
751 751
752 752 def __bytes__(self):
753 753 try:
754 754 return "%s@%s" % (self.path(), self._changectx)
755 755 except error.LookupError:
756 756 return "%s@???" % self.path()
757 757
758 758 __str__ = encoding.strmethod(__bytes__)
759 759
760 760 def __repr__(self):
761 761 return "<%s %s>" % (type(self).__name__, str(self))
762 762
763 763 def __hash__(self):
764 764 try:
765 765 return hash((self._path, self._filenode))
766 766 except AttributeError:
767 767 return id(self)
768 768
769 769 def __eq__(self, other):
770 770 try:
771 771 return (type(self) == type(other) and self._path == other._path
772 772 and self._filenode == other._filenode)
773 773 except AttributeError:
774 774 return False
775 775
776 776 def __ne__(self, other):
777 777 return not (self == other)
778 778
779 779 def filerev(self):
780 780 return self._filerev
781 781 def filenode(self):
782 782 return self._filenode
783 783 @propertycache
784 784 def _flags(self):
785 785 return self._changectx.flags(self._path)
786 786 def flags(self):
787 787 return self._flags
788 788 def filelog(self):
789 789 return self._filelog
790 790 def rev(self):
791 791 return self._changeid
792 792 def linkrev(self):
793 793 return self._filelog.linkrev(self._filerev)
794 794 def node(self):
795 795 return self._changectx.node()
796 796 def hex(self):
797 797 return self._changectx.hex()
798 798 def user(self):
799 799 return self._changectx.user()
800 800 def date(self):
801 801 return self._changectx.date()
802 802 def files(self):
803 803 return self._changectx.files()
804 804 def description(self):
805 805 return self._changectx.description()
806 806 def branch(self):
807 807 return self._changectx.branch()
808 808 def extra(self):
809 809 return self._changectx.extra()
810 810 def phase(self):
811 811 return self._changectx.phase()
812 812 def phasestr(self):
813 813 return self._changectx.phasestr()
814 814 def manifest(self):
815 815 return self._changectx.manifest()
816 816 def changectx(self):
817 817 return self._changectx
818 818 def renamed(self):
819 819 return self._copied
820 820 def repo(self):
821 821 return self._repo
822 822 def size(self):
823 823 return len(self.data())
824 824
825 825 def path(self):
826 826 return self._path
827 827
828 828 def isbinary(self):
829 829 try:
830 830 return util.binary(self.data())
831 831 except IOError:
832 832 return False
833 833 def isexec(self):
834 834 return 'x' in self.flags()
835 835 def islink(self):
836 836 return 'l' in self.flags()
837 837
838 838 def isabsent(self):
839 839 """whether this filectx represents a file not in self._changectx
840 840
841 841 This is mainly for merge code to detect change/delete conflicts. This is
842 842 expected to be True for all subclasses of basectx."""
843 843 return False
844 844
845 845 _customcmp = False
846 846 def cmp(self, fctx):
847 847 """compare with other file context
848 848
849 849 returns True if different than fctx.
850 850 """
851 851 if fctx._customcmp:
852 852 return fctx.cmp(self)
853 853
854 854 if (fctx._filenode is None
855 855 and (self._repo._encodefilterpats
856 856 # if file data starts with '\1\n', empty metadata block is
857 857 # prepended, which adds 4 bytes to filelog.size().
858 858 or self.size() - 4 == fctx.size())
859 859 or self.size() == fctx.size()):
860 860 return self._filelog.cmp(self._filenode, fctx.data())
861 861
862 862 return True
863 863
864 864 def _adjustlinkrev(self, srcrev, inclusive=False):
865 865 """return the first ancestor of <srcrev> introducing <fnode>
866 866
867 867 If the linkrev of the file revision does not point to an ancestor of
868 868 srcrev, we'll walk down the ancestors until we find one introducing
869 869 this file revision.
870 870
871 871 :srcrev: the changeset revision we search ancestors from
872 872 :inclusive: if true, the src revision will also be checked
873 873 """
874 874 repo = self._repo
875 875 cl = repo.unfiltered().changelog
876 876 mfl = repo.manifestlog
877 877 # fetch the linkrev
878 878 lkr = self.linkrev()
879 879 # hack to reuse ancestor computation when searching for renames
880 880 memberanc = getattr(self, '_ancestrycontext', None)
881 881 iteranc = None
882 882 if srcrev is None:
883 883 # wctx case, used by workingfilectx during mergecopy
884 884 revs = [p.rev() for p in self._repo[None].parents()]
885 885 inclusive = True # we skipped the real (revless) source
886 886 else:
887 887 revs = [srcrev]
888 888 if memberanc is None:
889 889 memberanc = iteranc = cl.ancestors(revs, lkr,
890 890 inclusive=inclusive)
891 891 # check if this linkrev is an ancestor of srcrev
892 892 if lkr not in memberanc:
893 893 if iteranc is None:
894 894 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
895 895 fnode = self._filenode
896 896 path = self._path
897 897 for a in iteranc:
898 898 ac = cl.read(a) # get changeset data (we avoid object creation)
899 899 if path in ac[3]: # checking the 'files' field.
900 900 # The file has been touched, check if the content is
901 901 # similar to the one we search for.
902 902 if fnode == mfl[ac[0]].readfast().get(path):
903 903 return a
904 904 # In theory, we should never get out of that loop without a result.
905 905 # But if manifest uses a buggy file revision (not children of the
906 906 # one it replaces) we could. Such a buggy situation will likely
907 907 # result is crash somewhere else at to some point.
908 908 return lkr
909 909
910 910 def introrev(self):
911 911 """return the rev of the changeset which introduced this file revision
912 912
913 913 This method is different from linkrev because it take into account the
914 914 changeset the filectx was created from. It ensures the returned
915 915 revision is one of its ancestors. This prevents bugs from
916 916 'linkrev-shadowing' when a file revision is used by multiple
917 917 changesets.
918 918 """
919 919 lkr = self.linkrev()
920 920 attrs = vars(self)
921 921 noctx = not ('_changeid' in attrs or '_changectx' in attrs)
922 922 if noctx or self.rev() == lkr:
923 923 return self.linkrev()
924 924 return self._adjustlinkrev(self.rev(), inclusive=True)
925 925
926 926 def _parentfilectx(self, path, fileid, filelog):
927 927 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
928 928 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
929 929 if '_changeid' in vars(self) or '_changectx' in vars(self):
930 930 # If self is associated with a changeset (probably explicitly
931 931 # fed), ensure the created filectx is associated with a
932 932 # changeset that is an ancestor of self.changectx.
933 933 # This lets us later use _adjustlinkrev to get a correct link.
934 934 fctx._descendantrev = self.rev()
935 935 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
936 936 elif '_descendantrev' in vars(self):
937 937 # Otherwise propagate _descendantrev if we have one associated.
938 938 fctx._descendantrev = self._descendantrev
939 939 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
940 940 return fctx
941 941
942 942 def parents(self):
943 943 _path = self._path
944 944 fl = self._filelog
945 945 parents = self._filelog.parents(self._filenode)
946 946 pl = [(_path, node, fl) for node in parents if node != nullid]
947 947
948 948 r = fl.renamed(self._filenode)
949 949 if r:
950 950 # - In the simple rename case, both parent are nullid, pl is empty.
951 951 # - In case of merge, only one of the parent is null id and should
952 952 # be replaced with the rename information. This parent is -always-
953 953 # the first one.
954 954 #
955 955 # As null id have always been filtered out in the previous list
956 956 # comprehension, inserting to 0 will always result in "replacing
957 957 # first nullid parent with rename information.
958 958 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
959 959
960 960 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
961 961
962 962 def p1(self):
963 963 return self.parents()[0]
964 964
965 965 def p2(self):
966 966 p = self.parents()
967 967 if len(p) == 2:
968 968 return p[1]
969 969 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
970 970
971 971 def annotate(self, follow=False, linenumber=False, skiprevs=None,
972 972 diffopts=None):
973 973 '''returns a list of tuples of ((ctx, number), line) for each line
974 974 in the file, where ctx is the filectx of the node where
975 975 that line was last changed; if linenumber parameter is true, number is
976 976 the line number at the first appearance in the managed file, otherwise,
977 977 number has a fixed value of False.
978 978 '''
979 979
980 980 def lines(text):
981 981 if text.endswith("\n"):
982 982 return text.count("\n")
983 983 return text.count("\n") + int(bool(text))
984 984
985 985 if linenumber:
986 986 def decorate(text, rev):
987 987 return ([(rev, i) for i in xrange(1, lines(text) + 1)], text)
988 988 else:
989 989 def decorate(text, rev):
990 990 return ([(rev, False)] * lines(text), text)
991 991
992 992 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
993 993
994 994 def parents(f):
995 995 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
996 996 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
997 997 # from the topmost introrev (= srcrev) down to p.linkrev() if it
998 998 # isn't an ancestor of the srcrev.
999 999 f._changeid
1000 1000 pl = f.parents()
1001 1001
1002 1002 # Don't return renamed parents if we aren't following.
1003 1003 if not follow:
1004 1004 pl = [p for p in pl if p.path() == f.path()]
1005 1005
1006 1006 # renamed filectx won't have a filelog yet, so set it
1007 1007 # from the cache to save time
1008 1008 for p in pl:
1009 1009 if not '_filelog' in p.__dict__:
1010 1010 p._filelog = getlog(p.path())
1011 1011
1012 1012 return pl
1013 1013
1014 1014 # use linkrev to find the first changeset where self appeared
1015 1015 base = self
1016 1016 introrev = self.introrev()
1017 1017 if self.rev() != introrev:
1018 1018 base = self.filectx(self.filenode(), changeid=introrev)
1019 1019 if getattr(base, '_ancestrycontext', None) is None:
1020 1020 cl = self._repo.changelog
1021 1021 if introrev is None:
1022 1022 # wctx is not inclusive, but works because _ancestrycontext
1023 1023 # is used to test filelog revisions
1024 1024 ac = cl.ancestors([p.rev() for p in base.parents()],
1025 1025 inclusive=True)
1026 1026 else:
1027 1027 ac = cl.ancestors([introrev], inclusive=True)
1028 1028 base._ancestrycontext = ac
1029 1029
1030 1030 # This algorithm would prefer to be recursive, but Python is a
1031 1031 # bit recursion-hostile. Instead we do an iterative
1032 1032 # depth-first search.
1033 1033
1034 1034 # 1st DFS pre-calculates pcache and needed
1035 1035 visit = [base]
1036 1036 pcache = {}
1037 1037 needed = {base: 1}
1038 1038 while visit:
1039 1039 f = visit.pop()
1040 1040 if f in pcache:
1041 1041 continue
1042 1042 pl = parents(f)
1043 1043 pcache[f] = pl
1044 1044 for p in pl:
1045 1045 needed[p] = needed.get(p, 0) + 1
1046 1046 if p not in pcache:
1047 1047 visit.append(p)
1048 1048
1049 1049 # 2nd DFS does the actual annotate
1050 1050 visit[:] = [base]
1051 1051 hist = {}
1052 1052 while visit:
1053 1053 f = visit[-1]
1054 1054 if f in hist:
1055 1055 visit.pop()
1056 1056 continue
1057 1057
1058 1058 ready = True
1059 1059 pl = pcache[f]
1060 1060 for p in pl:
1061 1061 if p not in hist:
1062 1062 ready = False
1063 1063 visit.append(p)
1064 1064 if ready:
1065 1065 visit.pop()
1066 1066 curr = decorate(f.data(), f)
1067 1067 skipchild = False
1068 1068 if skiprevs is not None:
1069 1069 skipchild = f._changeid in skiprevs
1070 1070 curr = _annotatepair([hist[p] for p in pl], f, curr, skipchild,
1071 1071 diffopts)
1072 1072 for p in pl:
1073 1073 if needed[p] == 1:
1074 1074 del hist[p]
1075 1075 del needed[p]
1076 1076 else:
1077 1077 needed[p] -= 1
1078 1078
1079 1079 hist[f] = curr
1080 1080 del pcache[f]
1081 1081
1082 1082 return zip(hist[base][0], hist[base][1].splitlines(True))
1083 1083
1084 1084 def ancestors(self, followfirst=False):
1085 1085 visit = {}
1086 1086 c = self
1087 1087 if followfirst:
1088 1088 cut = 1
1089 1089 else:
1090 1090 cut = None
1091 1091
1092 1092 while True:
1093 1093 for parent in c.parents()[:cut]:
1094 1094 visit[(parent.linkrev(), parent.filenode())] = parent
1095 1095 if not visit:
1096 1096 break
1097 1097 c = visit.pop(max(visit))
1098 1098 yield c
1099 1099
1100 1100 def _annotatepair(parents, childfctx, child, skipchild, diffopts):
1101 1101 r'''
1102 1102 Given parent and child fctxes and annotate data for parents, for all lines
1103 1103 in either parent that match the child, annotate the child with the parent's
1104 1104 data.
1105 1105
1106 1106 Additionally, if `skipchild` is True, replace all other lines with parent
1107 1107 annotate data as well such that child is never blamed for any lines.
1108 1108
1109 1109 >>> oldfctx = 'old'
1110 1110 >>> p1fctx, p2fctx, childfctx = 'p1', 'p2', 'c'
1111 1111 >>> olddata = 'a\nb\n'
1112 1112 >>> p1data = 'a\nb\nc\n'
1113 1113 >>> p2data = 'a\nc\nd\n'
1114 1114 >>> childdata = 'a\nb2\nc\nc2\nd\n'
1115 1115 >>> diffopts = mdiff.diffopts()
1116 1116
1117 1117 >>> def decorate(text, rev):
1118 1118 ... return ([(rev, i) for i in xrange(1, text.count('\n') + 1)], text)
1119 1119
1120 1120 Basic usage:
1121 1121
1122 1122 >>> oldann = decorate(olddata, oldfctx)
1123 1123 >>> p1ann = decorate(p1data, p1fctx)
1124 1124 >>> p1ann = _annotatepair([oldann], p1fctx, p1ann, False, diffopts)
1125 1125 >>> p1ann[0]
1126 1126 [('old', 1), ('old', 2), ('p1', 3)]
1127 1127 >>> p2ann = decorate(p2data, p2fctx)
1128 1128 >>> p2ann = _annotatepair([oldann], p2fctx, p2ann, False, diffopts)
1129 1129 >>> p2ann[0]
1130 1130 [('old', 1), ('p2', 2), ('p2', 3)]
1131 1131
1132 1132 Test with multiple parents (note the difference caused by ordering):
1133 1133
1134 1134 >>> childann = decorate(childdata, childfctx)
1135 1135 >>> childann = _annotatepair([p1ann, p2ann], childfctx, childann, False,
1136 1136 ... diffopts)
1137 1137 >>> childann[0]
1138 1138 [('old', 1), ('c', 2), ('p2', 2), ('c', 4), ('p2', 3)]
1139 1139
1140 1140 >>> childann = decorate(childdata, childfctx)
1141 1141 >>> childann = _annotatepair([p2ann, p1ann], childfctx, childann, False,
1142 1142 ... diffopts)
1143 1143 >>> childann[0]
1144 1144 [('old', 1), ('c', 2), ('p1', 3), ('c', 4), ('p2', 3)]
1145 1145
1146 1146 Test with skipchild (note the difference caused by ordering):
1147 1147
1148 1148 >>> childann = decorate(childdata, childfctx)
1149 1149 >>> childann = _annotatepair([p1ann, p2ann], childfctx, childann, True,
1150 1150 ... diffopts)
1151 1151 >>> childann[0]
1152 1152 [('old', 1), ('old', 2), ('p2', 2), ('p2', 2), ('p2', 3)]
1153 1153
1154 1154 >>> childann = decorate(childdata, childfctx)
1155 1155 >>> childann = _annotatepair([p2ann, p1ann], childfctx, childann, True,
1156 1156 ... diffopts)
1157 1157 >>> childann[0]
1158 1158 [('old', 1), ('old', 2), ('p1', 3), ('p1', 3), ('p2', 3)]
1159 1159 '''
1160 1160 pblocks = [(parent, mdiff.allblocks(parent[1], child[1], opts=diffopts))
1161 1161 for parent in parents]
1162 1162
1163 1163 if skipchild:
1164 1164 # Need to iterate over the blocks twice -- make it a list
1165 1165 pblocks = [(p, list(blocks)) for (p, blocks) in pblocks]
1166 1166 # Mercurial currently prefers p2 over p1 for annotate.
1167 1167 # TODO: change this?
1168 1168 for parent, blocks in pblocks:
1169 1169 for (a1, a2, b1, b2), t in blocks:
1170 1170 # Changed blocks ('!') or blocks made only of blank lines ('~')
1171 1171 # belong to the child.
1172 1172 if t == '=':
1173 1173 child[0][b1:b2] = parent[0][a1:a2]
1174 1174
1175 1175 if skipchild:
1176 1176 # Now try and match up anything that couldn't be matched,
1177 1177 # Reversing pblocks maintains bias towards p2, matching above
1178 1178 # behavior.
1179 1179 pblocks.reverse()
1180 1180
1181 1181 # The heuristics are:
1182 1182 # * Work on blocks of changed lines (effectively diff hunks with -U0).
1183 1183 # This could potentially be smarter but works well enough.
1184 1184 # * For a non-matching section, do a best-effort fit. Match lines in
1185 1185 # diff hunks 1:1, dropping lines as necessary.
1186 1186 # * Repeat the last line as a last resort.
1187 1187
1188 1188 # First, replace as much as possible without repeating the last line.
1189 1189 remaining = [(parent, []) for parent, _blocks in pblocks]
1190 1190 for idx, (parent, blocks) in enumerate(pblocks):
1191 1191 for (a1, a2, b1, b2), _t in blocks:
1192 1192 if a2 - a1 >= b2 - b1:
1193 1193 for bk in xrange(b1, b2):
1194 1194 if child[0][bk][0] == childfctx:
1195 1195 ak = min(a1 + (bk - b1), a2 - 1)
1196 1196 child[0][bk] = parent[0][ak]
1197 1197 else:
1198 1198 remaining[idx][1].append((a1, a2, b1, b2))
1199 1199
1200 1200 # Then, look at anything left, which might involve repeating the last
1201 1201 # line.
1202 1202 for parent, blocks in remaining:
1203 1203 for a1, a2, b1, b2 in blocks:
1204 1204 for bk in xrange(b1, b2):
1205 1205 if child[0][bk][0] == childfctx:
1206 1206 ak = min(a1 + (bk - b1), a2 - 1)
1207 1207 child[0][bk] = parent[0][ak]
1208 1208 return child
1209 1209
1210 1210 class filectx(basefilectx):
1211 1211 """A filecontext object makes access to data related to a particular
1212 1212 filerevision convenient."""
1213 1213 def __init__(self, repo, path, changeid=None, fileid=None,
1214 1214 filelog=None, changectx=None):
1215 1215 """changeid can be a changeset revision, node, or tag.
1216 1216 fileid can be a file revision or node."""
1217 1217 self._repo = repo
1218 1218 self._path = path
1219 1219
1220 1220 assert (changeid is not None
1221 1221 or fileid is not None
1222 1222 or changectx is not None), \
1223 1223 ("bad args: changeid=%r, fileid=%r, changectx=%r"
1224 1224 % (changeid, fileid, changectx))
1225 1225
1226 1226 if filelog is not None:
1227 1227 self._filelog = filelog
1228 1228
1229 1229 if changeid is not None:
1230 1230 self._changeid = changeid
1231 1231 if changectx is not None:
1232 1232 self._changectx = changectx
1233 1233 if fileid is not None:
1234 1234 self._fileid = fileid
1235 1235
1236 1236 @propertycache
1237 1237 def _changectx(self):
1238 1238 try:
1239 1239 return changectx(self._repo, self._changeid)
1240 1240 except error.FilteredRepoLookupError:
1241 1241 # Linkrev may point to any revision in the repository. When the
1242 1242 # repository is filtered this may lead to `filectx` trying to build
1243 1243 # `changectx` for filtered revision. In such case we fallback to
1244 1244 # creating `changectx` on the unfiltered version of the reposition.
1245 1245 # This fallback should not be an issue because `changectx` from
1246 1246 # `filectx` are not used in complex operations that care about
1247 1247 # filtering.
1248 1248 #
1249 1249 # This fallback is a cheap and dirty fix that prevent several
1250 1250 # crashes. It does not ensure the behavior is correct. However the
1251 1251 # behavior was not correct before filtering either and "incorrect
1252 1252 # behavior" is seen as better as "crash"
1253 1253 #
1254 1254 # Linkrevs have several serious troubles with filtering that are
1255 1255 # complicated to solve. Proper handling of the issue here should be
1256 1256 # considered when solving linkrev issue are on the table.
1257 1257 return changectx(self._repo.unfiltered(), self._changeid)
1258 1258
1259 1259 def filectx(self, fileid, changeid=None):
1260 1260 '''opens an arbitrary revision of the file without
1261 1261 opening a new filelog'''
1262 1262 return filectx(self._repo, self._path, fileid=fileid,
1263 1263 filelog=self._filelog, changeid=changeid)
1264 1264
1265 1265 def rawdata(self):
1266 1266 return self._filelog.revision(self._filenode, raw=True)
1267 1267
1268 1268 def rawflags(self):
1269 1269 """low-level revlog flags"""
1270 1270 return self._filelog.flags(self._filerev)
1271 1271
1272 1272 def data(self):
1273 1273 try:
1274 1274 return self._filelog.read(self._filenode)
1275 1275 except error.CensoredNodeError:
1276 1276 if self._repo.ui.config("censor", "policy") == "ignore":
1277 1277 return ""
1278 1278 raise error.Abort(_("censored node: %s") % short(self._filenode),
1279 1279 hint=_("set censor.policy to ignore errors"))
1280 1280
1281 1281 def size(self):
1282 1282 return self._filelog.size(self._filerev)
1283 1283
1284 1284 @propertycache
1285 1285 def _copied(self):
1286 1286 """check if file was actually renamed in this changeset revision
1287 1287
1288 1288 If rename logged in file revision, we report copy for changeset only
1289 1289 if file revisions linkrev points back to the changeset in question
1290 1290 or both changeset parents contain different file revisions.
1291 1291 """
1292 1292
1293 1293 renamed = self._filelog.renamed(self._filenode)
1294 1294 if not renamed:
1295 1295 return renamed
1296 1296
1297 1297 if self.rev() == self.linkrev():
1298 1298 return renamed
1299 1299
1300 1300 name = self.path()
1301 1301 fnode = self._filenode
1302 1302 for p in self._changectx.parents():
1303 1303 try:
1304 1304 if fnode == p.filenode(name):
1305 1305 return None
1306 1306 except error.LookupError:
1307 1307 pass
1308 1308 return renamed
1309 1309
1310 1310 def children(self):
1311 1311 # hard for renames
1312 1312 c = self._filelog.children(self._filenode)
1313 1313 return [filectx(self._repo, self._path, fileid=x,
1314 1314 filelog=self._filelog) for x in c]
1315 1315
1316 1316 class committablectx(basectx):
1317 1317 """A committablectx object provides common functionality for a context that
1318 1318 wants the ability to commit, e.g. workingctx or memctx."""
1319 1319 def __init__(self, repo, text="", user=None, date=None, extra=None,
1320 1320 changes=None):
1321 1321 self._repo = repo
1322 1322 self._rev = None
1323 1323 self._node = None
1324 1324 self._text = text
1325 1325 if date:
1326 1326 self._date = util.parsedate(date)
1327 1327 if user:
1328 1328 self._user = user
1329 1329 if changes:
1330 1330 self._status = changes
1331 1331
1332 1332 self._extra = {}
1333 1333 if extra:
1334 1334 self._extra = extra.copy()
1335 1335 if 'branch' not in self._extra:
1336 1336 try:
1337 1337 branch = encoding.fromlocal(self._repo.dirstate.branch())
1338 1338 except UnicodeDecodeError:
1339 1339 raise error.Abort(_('branch name not in UTF-8!'))
1340 1340 self._extra['branch'] = branch
1341 1341 if self._extra['branch'] == '':
1342 1342 self._extra['branch'] = 'default'
1343 1343
1344 1344 def __bytes__(self):
1345 1345 return bytes(self._parents[0]) + "+"
1346 1346
1347 1347 __str__ = encoding.strmethod(__bytes__)
1348 1348
1349 1349 def __nonzero__(self):
1350 1350 return True
1351 1351
1352 1352 __bool__ = __nonzero__
1353 1353
1354 1354 def _buildflagfunc(self):
1355 1355 # Create a fallback function for getting file flags when the
1356 1356 # filesystem doesn't support them
1357 1357
1358 1358 copiesget = self._repo.dirstate.copies().get
1359 1359 parents = self.parents()
1360 1360 if len(parents) < 2:
1361 1361 # when we have one parent, it's easy: copy from parent
1362 1362 man = parents[0].manifest()
1363 1363 def func(f):
1364 1364 f = copiesget(f, f)
1365 1365 return man.flags(f)
1366 1366 else:
1367 1367 # merges are tricky: we try to reconstruct the unstored
1368 1368 # result from the merge (issue1802)
1369 1369 p1, p2 = parents
1370 1370 pa = p1.ancestor(p2)
1371 1371 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1372 1372
1373 1373 def func(f):
1374 1374 f = copiesget(f, f) # may be wrong for merges with copies
1375 1375 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1376 1376 if fl1 == fl2:
1377 1377 return fl1
1378 1378 if fl1 == fla:
1379 1379 return fl2
1380 1380 if fl2 == fla:
1381 1381 return fl1
1382 1382 return '' # punt for conflicts
1383 1383
1384 1384 return func
1385 1385
1386 1386 @propertycache
1387 1387 def _flagfunc(self):
1388 1388 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1389 1389
1390 1390 @propertycache
1391 1391 def _status(self):
1392 1392 return self._repo.status()
1393 1393
1394 1394 @propertycache
1395 1395 def _user(self):
1396 1396 return self._repo.ui.username()
1397 1397
1398 1398 @propertycache
1399 1399 def _date(self):
1400 1400 ui = self._repo.ui
1401 1401 date = ui.configdate('devel', 'default-date')
1402 1402 if date is None:
1403 1403 date = util.makedate()
1404 1404 return date
1405 1405
1406 1406 def subrev(self, subpath):
1407 1407 return None
1408 1408
1409 1409 def manifestnode(self):
1410 1410 return None
1411 1411 def user(self):
1412 1412 return self._user or self._repo.ui.username()
1413 1413 def date(self):
1414 1414 return self._date
1415 1415 def description(self):
1416 1416 return self._text
1417 1417 def files(self):
1418 1418 return sorted(self._status.modified + self._status.added +
1419 1419 self._status.removed)
1420 1420
1421 1421 def modified(self):
1422 1422 return self._status.modified
1423 1423 def added(self):
1424 1424 return self._status.added
1425 1425 def removed(self):
1426 1426 return self._status.removed
1427 1427 def deleted(self):
1428 1428 return self._status.deleted
1429 1429 def branch(self):
1430 1430 return encoding.tolocal(self._extra['branch'])
1431 1431 def closesbranch(self):
1432 1432 return 'close' in self._extra
1433 1433 def extra(self):
1434 1434 return self._extra
1435 1435
1436 1436 def tags(self):
1437 1437 return []
1438 1438
1439 1439 def bookmarks(self):
1440 1440 b = []
1441 1441 for p in self.parents():
1442 1442 b.extend(p.bookmarks())
1443 1443 return b
1444 1444
1445 1445 def phase(self):
1446 1446 phase = phases.draft # default phase to draft
1447 1447 for p in self.parents():
1448 1448 phase = max(phase, p.phase())
1449 1449 return phase
1450 1450
1451 1451 def hidden(self):
1452 1452 return False
1453 1453
1454 1454 def children(self):
1455 1455 return []
1456 1456
1457 1457 def flags(self, path):
1458 1458 if r'_manifest' in self.__dict__:
1459 1459 try:
1460 1460 return self._manifest.flags(path)
1461 1461 except KeyError:
1462 1462 return ''
1463 1463
1464 1464 try:
1465 1465 return self._flagfunc(path)
1466 1466 except OSError:
1467 1467 return ''
1468 1468
1469 1469 def ancestor(self, c2):
1470 1470 """return the "best" ancestor context of self and c2"""
1471 1471 return self._parents[0].ancestor(c2) # punt on two parents for now
1472 1472
1473 1473 def walk(self, match):
1474 1474 '''Generates matching file names.'''
1475 1475 return sorted(self._repo.dirstate.walk(match, sorted(self.substate),
1476 1476 True, False))
1477 1477
1478 1478 def matches(self, match):
1479 1479 return sorted(self._repo.dirstate.matches(match))
1480 1480
1481 1481 def ancestors(self):
1482 1482 for p in self._parents:
1483 1483 yield p
1484 1484 for a in self._repo.changelog.ancestors(
1485 1485 [p.rev() for p in self._parents]):
1486 1486 yield changectx(self._repo, a)
1487 1487
1488 1488 def markcommitted(self, node):
1489 1489 """Perform post-commit cleanup necessary after committing this ctx
1490 1490
1491 1491 Specifically, this updates backing stores this working context
1492 1492 wraps to reflect the fact that the changes reflected by this
1493 1493 workingctx have been committed. For example, it marks
1494 1494 modified and added files as normal in the dirstate.
1495 1495
1496 1496 """
1497 1497
1498 1498 with self._repo.dirstate.parentchange():
1499 1499 for f in self.modified() + self.added():
1500 1500 self._repo.dirstate.normal(f)
1501 1501 for f in self.removed():
1502 1502 self._repo.dirstate.drop(f)
1503 1503 self._repo.dirstate.setparents(node)
1504 1504
1505 1505 # write changes out explicitly, because nesting wlock at
1506 1506 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1507 1507 # from immediately doing so for subsequent changing files
1508 1508 self._repo.dirstate.write(self._repo.currenttransaction())
1509 1509
1510 1510 def dirty(self, missing=False, merge=True, branch=True):
1511 1511 return False
1512 1512
1513 1513 class workingctx(committablectx):
1514 1514 """A workingctx object makes access to data related to
1515 1515 the current working directory convenient.
1516 1516 date - any valid date string or (unixtime, offset), or None.
1517 1517 user - username string, or None.
1518 1518 extra - a dictionary of extra values, or None.
1519 1519 changes - a list of file lists as returned by localrepo.status()
1520 1520 or None to use the repository status.
1521 1521 """
1522 1522 def __init__(self, repo, text="", user=None, date=None, extra=None,
1523 1523 changes=None):
1524 1524 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1525 1525
1526 1526 def __iter__(self):
1527 1527 d = self._repo.dirstate
1528 1528 for f in d:
1529 1529 if d[f] != 'r':
1530 1530 yield f
1531 1531
1532 1532 def __contains__(self, key):
1533 1533 return self._repo.dirstate[key] not in "?r"
1534 1534
1535 1535 def hex(self):
1536 1536 return hex(wdirid)
1537 1537
1538 1538 @propertycache
1539 1539 def _parents(self):
1540 1540 p = self._repo.dirstate.parents()
1541 1541 if p[1] == nullid:
1542 1542 p = p[:-1]
1543 1543 return [changectx(self._repo, x) for x in p]
1544 1544
1545 1545 def filectx(self, path, filelog=None):
1546 1546 """get a file context from the working directory"""
1547 1547 return workingfilectx(self._repo, path, workingctx=self,
1548 1548 filelog=filelog)
1549 1549
1550 1550 def dirty(self, missing=False, merge=True, branch=True):
1551 1551 "check whether a working directory is modified"
1552 1552 # check subrepos first
1553 1553 for s in sorted(self.substate):
1554 1554 if self.sub(s).dirty(missing=missing):
1555 1555 return True
1556 1556 # check current working dir
1557 1557 return ((merge and self.p2()) or
1558 1558 (branch and self.branch() != self.p1().branch()) or
1559 1559 self.modified() or self.added() or self.removed() or
1560 1560 (missing and self.deleted()))
1561 1561
1562 1562 def add(self, list, prefix=""):
1563 1563 with self._repo.wlock():
1564 1564 ui, ds = self._repo.ui, self._repo.dirstate
1565 1565 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1566 1566 rejected = []
1567 1567 lstat = self._repo.wvfs.lstat
1568 1568 for f in list:
1569 1569 # ds.pathto() returns an absolute file when this is invoked from
1570 1570 # the keyword extension. That gets flagged as non-portable on
1571 1571 # Windows, since it contains the drive letter and colon.
1572 1572 scmutil.checkportable(ui, os.path.join(prefix, f))
1573 1573 try:
1574 1574 st = lstat(f)
1575 1575 except OSError:
1576 1576 ui.warn(_("%s does not exist!\n") % uipath(f))
1577 1577 rejected.append(f)
1578 1578 continue
1579 1579 if st.st_size > 10000000:
1580 1580 ui.warn(_("%s: up to %d MB of RAM may be required "
1581 1581 "to manage this file\n"
1582 1582 "(use 'hg revert %s' to cancel the "
1583 1583 "pending addition)\n")
1584 1584 % (f, 3 * st.st_size // 1000000, uipath(f)))
1585 1585 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1586 1586 ui.warn(_("%s not added: only files and symlinks "
1587 1587 "supported currently\n") % uipath(f))
1588 1588 rejected.append(f)
1589 1589 elif ds[f] in 'amn':
1590 1590 ui.warn(_("%s already tracked!\n") % uipath(f))
1591 1591 elif ds[f] == 'r':
1592 1592 ds.normallookup(f)
1593 1593 else:
1594 1594 ds.add(f)
1595 1595 return rejected
1596 1596
1597 1597 def forget(self, files, prefix=""):
1598 1598 with self._repo.wlock():
1599 1599 ds = self._repo.dirstate
1600 1600 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1601 1601 rejected = []
1602 1602 for f in files:
1603 1603 if f not in self._repo.dirstate:
1604 1604 self._repo.ui.warn(_("%s not tracked!\n") % uipath(f))
1605 1605 rejected.append(f)
1606 1606 elif self._repo.dirstate[f] != 'a':
1607 1607 self._repo.dirstate.remove(f)
1608 1608 else:
1609 1609 self._repo.dirstate.drop(f)
1610 1610 return rejected
1611 1611
1612 1612 def undelete(self, list):
1613 1613 pctxs = self.parents()
1614 1614 with self._repo.wlock():
1615 1615 ds = self._repo.dirstate
1616 1616 for f in list:
1617 1617 if self._repo.dirstate[f] != 'r':
1618 1618 self._repo.ui.warn(_("%s not removed!\n") % ds.pathto(f))
1619 1619 else:
1620 1620 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1621 1621 t = fctx.data()
1622 1622 self._repo.wwrite(f, t, fctx.flags())
1623 1623 self._repo.dirstate.normal(f)
1624 1624
1625 1625 def copy(self, source, dest):
1626 1626 try:
1627 1627 st = self._repo.wvfs.lstat(dest)
1628 1628 except OSError as err:
1629 1629 if err.errno != errno.ENOENT:
1630 1630 raise
1631 1631 self._repo.ui.warn(_("%s does not exist!\n")
1632 1632 % self._repo.dirstate.pathto(dest))
1633 1633 return
1634 1634 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1635 1635 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1636 1636 "symbolic link\n")
1637 1637 % self._repo.dirstate.pathto(dest))
1638 1638 else:
1639 1639 with self._repo.wlock():
1640 1640 if self._repo.dirstate[dest] in '?':
1641 1641 self._repo.dirstate.add(dest)
1642 1642 elif self._repo.dirstate[dest] in 'r':
1643 1643 self._repo.dirstate.normallookup(dest)
1644 1644 self._repo.dirstate.copy(source, dest)
1645 1645
1646 1646 def match(self, pats=None, include=None, exclude=None, default='glob',
1647 1647 listsubrepos=False, badfn=None):
1648 1648 r = self._repo
1649 1649
1650 1650 # Only a case insensitive filesystem needs magic to translate user input
1651 1651 # to actual case in the filesystem.
1652 1652 icasefs = not util.fscasesensitive(r.root)
1653 1653 return matchmod.match(r.root, r.getcwd(), pats, include, exclude,
1654 1654 default, auditor=r.auditor, ctx=self,
1655 1655 listsubrepos=listsubrepos, badfn=badfn,
1656 1656 icasefs=icasefs)
1657 1657
1658 1658 def _filtersuspectsymlink(self, files):
1659 1659 if not files or self._repo.dirstate._checklink:
1660 1660 return files
1661 1661
1662 1662 # Symlink placeholders may get non-symlink-like contents
1663 1663 # via user error or dereferencing by NFS or Samba servers,
1664 1664 # so we filter out any placeholders that don't look like a
1665 1665 # symlink
1666 1666 sane = []
1667 1667 for f in files:
1668 1668 if self.flags(f) == 'l':
1669 1669 d = self[f].data()
1670 1670 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1671 1671 self._repo.ui.debug('ignoring suspect symlink placeholder'
1672 1672 ' "%s"\n' % f)
1673 1673 continue
1674 1674 sane.append(f)
1675 1675 return sane
1676 1676
1677 1677 def _checklookup(self, files):
1678 1678 # check for any possibly clean files
1679 1679 if not files:
1680 1680 return [], [], []
1681 1681
1682 1682 modified = []
1683 1683 deleted = []
1684 1684 fixup = []
1685 1685 pctx = self._parents[0]
1686 1686 # do a full compare of any files that might have changed
1687 1687 for f in sorted(files):
1688 1688 try:
1689 1689 # This will return True for a file that got replaced by a
1690 1690 # directory in the interim, but fixing that is pretty hard.
1691 1691 if (f not in pctx or self.flags(f) != pctx.flags(f)
1692 1692 or pctx[f].cmp(self[f])):
1693 1693 modified.append(f)
1694 1694 else:
1695 1695 fixup.append(f)
1696 1696 except (IOError, OSError):
1697 1697 # A file become inaccessible in between? Mark it as deleted,
1698 1698 # matching dirstate behavior (issue5584).
1699 1699 # The dirstate has more complex behavior around whether a
1700 1700 # missing file matches a directory, etc, but we don't need to
1701 1701 # bother with that: if f has made it to this point, we're sure
1702 1702 # it's in the dirstate.
1703 1703 deleted.append(f)
1704 1704
1705 1705 return modified, deleted, fixup
1706 1706
1707 1707 def _poststatusfixup(self, status, fixup):
1708 1708 """update dirstate for files that are actually clean"""
1709 1709 poststatus = self._repo.postdsstatus()
1710 1710 if fixup or poststatus:
1711 1711 try:
1712 1712 oldid = self._repo.dirstate.identity()
1713 1713
1714 1714 # updating the dirstate is optional
1715 1715 # so we don't wait on the lock
1716 1716 # wlock can invalidate the dirstate, so cache normal _after_
1717 1717 # taking the lock
1718 1718 with self._repo.wlock(False):
1719 1719 if self._repo.dirstate.identity() == oldid:
1720 1720 if fixup:
1721 1721 normal = self._repo.dirstate.normal
1722 1722 for f in fixup:
1723 1723 normal(f)
1724 1724 # write changes out explicitly, because nesting
1725 1725 # wlock at runtime may prevent 'wlock.release()'
1726 1726 # after this block from doing so for subsequent
1727 1727 # changing files
1728 1728 tr = self._repo.currenttransaction()
1729 1729 self._repo.dirstate.write(tr)
1730 1730
1731 1731 if poststatus:
1732 1732 for ps in poststatus:
1733 1733 ps(self, status)
1734 1734 else:
1735 1735 # in this case, writing changes out breaks
1736 1736 # consistency, because .hg/dirstate was
1737 1737 # already changed simultaneously after last
1738 1738 # caching (see also issue5584 for detail)
1739 1739 self._repo.ui.debug('skip updating dirstate: '
1740 1740 'identity mismatch\n')
1741 1741 except error.LockError:
1742 1742 pass
1743 1743 finally:
1744 1744 # Even if the wlock couldn't be grabbed, clear out the list.
1745 1745 self._repo.clearpostdsstatus()
1746 1746
1747 1747 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1748 1748 unknown=False):
1749 1749 '''Gets the status from the dirstate -- internal use only.'''
1750 1750 listignored, listclean, listunknown = ignored, clean, unknown
1751 1751 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1752 1752 subrepos = []
1753 1753 if '.hgsub' in self:
1754 1754 subrepos = sorted(self.substate)
1755 1755 cmp, s = self._repo.dirstate.status(match, subrepos, listignored,
1756 1756 listclean, listunknown)
1757 1757
1758 1758 # check for any possibly clean files
1759 1759 fixup = []
1760 1760 if cmp:
1761 1761 modified2, deleted2, fixup = self._checklookup(cmp)
1762 1762 s.modified.extend(modified2)
1763 1763 s.deleted.extend(deleted2)
1764 1764
1765 1765 if fixup and listclean:
1766 1766 s.clean.extend(fixup)
1767 1767
1768 1768 self._poststatusfixup(s, fixup)
1769 1769
1770 1770 if match.always():
1771 1771 # cache for performance
1772 1772 if s.unknown or s.ignored or s.clean:
1773 1773 # "_status" is cached with list*=False in the normal route
1774 1774 self._status = scmutil.status(s.modified, s.added, s.removed,
1775 1775 s.deleted, [], [], [])
1776 1776 else:
1777 1777 self._status = s
1778 1778
1779 1779 return s
1780 1780
1781 1781 @propertycache
1782 1782 def _manifest(self):
1783 1783 """generate a manifest corresponding to the values in self._status
1784 1784
1785 1785 This reuse the file nodeid from parent, but we use special node
1786 1786 identifiers for added and modified files. This is used by manifests
1787 1787 merge to see that files are different and by update logic to avoid
1788 1788 deleting newly added files.
1789 1789 """
1790 1790 return self._buildstatusmanifest(self._status)
1791 1791
1792 1792 def _buildstatusmanifest(self, status):
1793 1793 """Builds a manifest that includes the given status results."""
1794 1794 parents = self.parents()
1795 1795
1796 1796 man = parents[0].manifest().copy()
1797 1797
1798 1798 ff = self._flagfunc
1799 1799 for i, l in ((addednodeid, status.added),
1800 1800 (modifiednodeid, status.modified)):
1801 1801 for f in l:
1802 1802 man[f] = i
1803 1803 try:
1804 1804 man.setflag(f, ff(f))
1805 1805 except OSError:
1806 1806 pass
1807 1807
1808 1808 for f in status.deleted + status.removed:
1809 1809 if f in man:
1810 1810 del man[f]
1811 1811
1812 1812 return man
1813 1813
1814 1814 def _buildstatus(self, other, s, match, listignored, listclean,
1815 1815 listunknown):
1816 1816 """build a status with respect to another context
1817 1817
1818 1818 This includes logic for maintaining the fast path of status when
1819 1819 comparing the working directory against its parent, which is to skip
1820 1820 building a new manifest if self (working directory) is not comparing
1821 1821 against its parent (repo['.']).
1822 1822 """
1823 1823 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1824 1824 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1825 1825 # might have accidentally ended up with the entire contents of the file
1826 1826 # they are supposed to be linking to.
1827 1827 s.modified[:] = self._filtersuspectsymlink(s.modified)
1828 1828 if other != self._repo['.']:
1829 1829 s = super(workingctx, self)._buildstatus(other, s, match,
1830 1830 listignored, listclean,
1831 1831 listunknown)
1832 1832 return s
1833 1833
1834 1834 def _matchstatus(self, other, match):
1835 1835 """override the match method with a filter for directory patterns
1836 1836
1837 1837 We use inheritance to customize the match.bad method only in cases of
1838 1838 workingctx since it belongs only to the working directory when
1839 1839 comparing against the parent changeset.
1840 1840
1841 1841 If we aren't comparing against the working directory's parent, then we
1842 1842 just use the default match object sent to us.
1843 1843 """
1844 1844 superself = super(workingctx, self)
1845 1845 match = superself._matchstatus(other, match)
1846 1846 if other != self._repo['.']:
1847 1847 def bad(f, msg):
1848 1848 # 'f' may be a directory pattern from 'match.files()',
1849 1849 # so 'f not in ctx1' is not enough
1850 1850 if f not in other and not other.hasdir(f):
1851 1851 self._repo.ui.warn('%s: %s\n' %
1852 1852 (self._repo.dirstate.pathto(f), msg))
1853 1853 match.bad = bad
1854 1854 return match
1855 1855
1856 1856 def markcommitted(self, node):
1857 1857 super(workingctx, self).markcommitted(node)
1858 1858
1859 1859 sparse.aftercommit(self._repo, node)
1860 1860
1861 1861 class committablefilectx(basefilectx):
1862 1862 """A committablefilectx provides common functionality for a file context
1863 1863 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1864 1864 def __init__(self, repo, path, filelog=None, ctx=None):
1865 1865 self._repo = repo
1866 1866 self._path = path
1867 1867 self._changeid = None
1868 1868 self._filerev = self._filenode = None
1869 1869
1870 1870 if filelog is not None:
1871 1871 self._filelog = filelog
1872 1872 if ctx:
1873 1873 self._changectx = ctx
1874 1874
1875 1875 def __nonzero__(self):
1876 1876 return True
1877 1877
1878 1878 __bool__ = __nonzero__
1879 1879
1880 1880 def linkrev(self):
1881 1881 # linked to self._changectx no matter if file is modified or not
1882 1882 return self.rev()
1883 1883
1884 1884 def parents(self):
1885 1885 '''return parent filectxs, following copies if necessary'''
1886 1886 def filenode(ctx, path):
1887 1887 return ctx._manifest.get(path, nullid)
1888 1888
1889 1889 path = self._path
1890 1890 fl = self._filelog
1891 1891 pcl = self._changectx._parents
1892 1892 renamed = self.renamed()
1893 1893
1894 1894 if renamed:
1895 1895 pl = [renamed + (None,)]
1896 1896 else:
1897 1897 pl = [(path, filenode(pcl[0], path), fl)]
1898 1898
1899 1899 for pc in pcl[1:]:
1900 1900 pl.append((path, filenode(pc, path), fl))
1901 1901
1902 1902 return [self._parentfilectx(p, fileid=n, filelog=l)
1903 1903 for p, n, l in pl if n != nullid]
1904 1904
1905 1905 def children(self):
1906 1906 return []
1907 1907
1908 1908 class workingfilectx(committablefilectx):
1909 1909 """A workingfilectx object makes access to data related to a particular
1910 1910 file in the working directory convenient."""
1911 1911 def __init__(self, repo, path, filelog=None, workingctx=None):
1912 1912 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1913 1913
1914 1914 @propertycache
1915 1915 def _changectx(self):
1916 1916 return workingctx(self._repo)
1917 1917
1918 1918 def data(self):
1919 1919 return self._repo.wread(self._path)
1920 1920 def renamed(self):
1921 1921 rp = self._repo.dirstate.copied(self._path)
1922 1922 if not rp:
1923 1923 return None
1924 1924 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1925 1925
1926 1926 def size(self):
1927 1927 return self._repo.wvfs.lstat(self._path).st_size
1928 1928 def date(self):
1929 1929 t, tz = self._changectx.date()
1930 1930 try:
1931 1931 return (self._repo.wvfs.lstat(self._path).st_mtime, tz)
1932 1932 except OSError as err:
1933 1933 if err.errno != errno.ENOENT:
1934 1934 raise
1935 1935 return (t, tz)
1936 1936
1937 1937 def exists(self):
1938 1938 return self._repo.wvfs.exists(self._path)
1939 1939
1940 1940 def lexists(self):
1941 1941 return self._repo.wvfs.lexists(self._path)
1942 1942
1943 1943 def audit(self):
1944 1944 return self._repo.wvfs.audit(self._path)
1945 1945
1946 1946 def cmp(self, fctx):
1947 1947 """compare with other file context
1948 1948
1949 1949 returns True if different than fctx.
1950 1950 """
1951 1951 # fctx should be a filectx (not a workingfilectx)
1952 1952 # invert comparison to reuse the same code path
1953 1953 return fctx.cmp(self)
1954 1954
1955 1955 def remove(self, ignoremissing=False):
1956 1956 """wraps unlink for a repo's working directory"""
1957 1957 self._repo.wvfs.unlinkpath(self._path, ignoremissing=ignoremissing)
1958 1958
1959 1959 def write(self, data, flags, backgroundclose=False):
1960 1960 """wraps repo.wwrite"""
1961 1961 self._repo.wwrite(self._path, data, flags,
1962 1962 backgroundclose=backgroundclose)
1963 1963
1964 1964 def setflags(self, l, x):
1965 1965 self._repo.wvfs.setflags(self._path, l, x)
1966 1966
1967 1967 class workingcommitctx(workingctx):
1968 1968 """A workingcommitctx object makes access to data related to
1969 1969 the revision being committed convenient.
1970 1970
1971 1971 This hides changes in the working directory, if they aren't
1972 1972 committed in this context.
1973 1973 """
1974 1974 def __init__(self, repo, changes,
1975 1975 text="", user=None, date=None, extra=None):
1976 1976 super(workingctx, self).__init__(repo, text, user, date, extra,
1977 1977 changes)
1978 1978
1979 1979 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1980 1980 unknown=False):
1981 1981 """Return matched files only in ``self._status``
1982 1982
1983 1983 Uncommitted files appear "clean" via this context, even if
1984 1984 they aren't actually so in the working directory.
1985 1985 """
1986 1986 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1987 1987 if clean:
1988 1988 clean = [f for f in self._manifest if f not in self._changedset]
1989 1989 else:
1990 1990 clean = []
1991 1991 return scmutil.status([f for f in self._status.modified if match(f)],
1992 1992 [f for f in self._status.added if match(f)],
1993 1993 [f for f in self._status.removed if match(f)],
1994 1994 [], [], [], clean)
1995 1995
1996 1996 @propertycache
1997 1997 def _changedset(self):
1998 1998 """Return the set of files changed in this context
1999 1999 """
2000 2000 changed = set(self._status.modified)
2001 2001 changed.update(self._status.added)
2002 2002 changed.update(self._status.removed)
2003 2003 return changed
2004 2004
2005 2005 def makecachingfilectxfn(func):
2006 2006 """Create a filectxfn that caches based on the path.
2007 2007
2008 2008 We can't use util.cachefunc because it uses all arguments as the cache
2009 2009 key and this creates a cycle since the arguments include the repo and
2010 2010 memctx.
2011 2011 """
2012 2012 cache = {}
2013 2013
2014 2014 def getfilectx(repo, memctx, path):
2015 2015 if path not in cache:
2016 2016 cache[path] = func(repo, memctx, path)
2017 2017 return cache[path]
2018 2018
2019 2019 return getfilectx
2020 2020
2021 2021 def memfilefromctx(ctx):
2022 2022 """Given a context return a memfilectx for ctx[path]
2023 2023
2024 2024 This is a convenience method for building a memctx based on another
2025 2025 context.
2026 2026 """
2027 2027 def getfilectx(repo, memctx, path):
2028 2028 fctx = ctx[path]
2029 2029 # this is weird but apparently we only keep track of one parent
2030 2030 # (why not only store that instead of a tuple?)
2031 2031 copied = fctx.renamed()
2032 2032 if copied:
2033 2033 copied = copied[0]
2034 2034 return memfilectx(repo, path, fctx.data(),
2035 2035 islink=fctx.islink(), isexec=fctx.isexec(),
2036 2036 copied=copied, memctx=memctx)
2037 2037
2038 2038 return getfilectx
2039 2039
2040 2040 def memfilefrompatch(patchstore):
2041 2041 """Given a patch (e.g. patchstore object) return a memfilectx
2042 2042
2043 2043 This is a convenience method for building a memctx based on a patchstore.
2044 2044 """
2045 2045 def getfilectx(repo, memctx, path):
2046 2046 data, mode, copied = patchstore.getfile(path)
2047 2047 if data is None:
2048 2048 return None
2049 2049 islink, isexec = mode
2050 2050 return memfilectx(repo, path, data, islink=islink,
2051 2051 isexec=isexec, copied=copied,
2052 2052 memctx=memctx)
2053 2053
2054 2054 return getfilectx
2055 2055
2056 2056 class memctx(committablectx):
2057 2057 """Use memctx to perform in-memory commits via localrepo.commitctx().
2058 2058
2059 2059 Revision information is supplied at initialization time while
2060 2060 related files data and is made available through a callback
2061 2061 mechanism. 'repo' is the current localrepo, 'parents' is a
2062 2062 sequence of two parent revisions identifiers (pass None for every
2063 2063 missing parent), 'text' is the commit message and 'files' lists
2064 2064 names of files touched by the revision (normalized and relative to
2065 2065 repository root).
2066 2066
2067 2067 filectxfn(repo, memctx, path) is a callable receiving the
2068 2068 repository, the current memctx object and the normalized path of
2069 2069 requested file, relative to repository root. It is fired by the
2070 2070 commit function for every file in 'files', but calls order is
2071 2071 undefined. If the file is available in the revision being
2072 2072 committed (updated or added), filectxfn returns a memfilectx
2073 2073 object. If the file was removed, filectxfn return None for recent
2074 2074 Mercurial. Moved files are represented by marking the source file
2075 2075 removed and the new file added with copy information (see
2076 2076 memfilectx).
2077 2077
2078 2078 user receives the committer name and defaults to current
2079 2079 repository username, date is the commit date in any format
2080 2080 supported by util.parsedate() and defaults to current date, extra
2081 2081 is a dictionary of metadata or is left empty.
2082 2082 """
2083 2083
2084 2084 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
2085 2085 # Extensions that need to retain compatibility across Mercurial 3.1 can use
2086 2086 # this field to determine what to do in filectxfn.
2087 2087 _returnnoneformissingfiles = True
2088 2088
2089 2089 def __init__(self, repo, parents, text, files, filectxfn, user=None,
2090 2090 date=None, extra=None, branch=None, editor=False):
2091 2091 super(memctx, self).__init__(repo, text, user, date, extra)
2092 2092 self._rev = None
2093 2093 self._node = None
2094 2094 parents = [(p or nullid) for p in parents]
2095 2095 p1, p2 = parents
2096 2096 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
2097 2097 files = sorted(set(files))
2098 2098 self._files = files
2099 2099 if branch is not None:
2100 2100 self._extra['branch'] = encoding.fromlocal(branch)
2101 2101 self.substate = {}
2102 2102
2103 2103 if isinstance(filectxfn, patch.filestore):
2104 2104 filectxfn = memfilefrompatch(filectxfn)
2105 2105 elif not callable(filectxfn):
2106 2106 # if store is not callable, wrap it in a function
2107 2107 filectxfn = memfilefromctx(filectxfn)
2108 2108
2109 2109 # memoizing increases performance for e.g. vcs convert scenarios.
2110 2110 self._filectxfn = makecachingfilectxfn(filectxfn)
2111 2111
2112 2112 if editor:
2113 2113 self._text = editor(self._repo, self, [])
2114 2114 self._repo.savecommitmessage(self._text)
2115 2115
2116 2116 def filectx(self, path, filelog=None):
2117 2117 """get a file context from the working directory
2118 2118
2119 2119 Returns None if file doesn't exist and should be removed."""
2120 2120 return self._filectxfn(self._repo, self, path)
2121 2121
2122 2122 def commit(self):
2123 2123 """commit context to the repo"""
2124 2124 return self._repo.commitctx(self)
2125 2125
2126 2126 @propertycache
2127 2127 def _manifest(self):
2128 2128 """generate a manifest based on the return values of filectxfn"""
2129 2129
2130 2130 # keep this simple for now; just worry about p1
2131 2131 pctx = self._parents[0]
2132 2132 man = pctx.manifest().copy()
2133 2133
2134 2134 for f in self._status.modified:
2135 2135 p1node = nullid
2136 2136 p2node = nullid
2137 2137 p = pctx[f].parents() # if file isn't in pctx, check p2?
2138 2138 if len(p) > 0:
2139 2139 p1node = p[0].filenode()
2140 2140 if len(p) > 1:
2141 2141 p2node = p[1].filenode()
2142 2142 man[f] = revlog.hash(self[f].data(), p1node, p2node)
2143 2143
2144 2144 for f in self._status.added:
2145 2145 man[f] = revlog.hash(self[f].data(), nullid, nullid)
2146 2146
2147 2147 for f in self._status.removed:
2148 2148 if f in man:
2149 2149 del man[f]
2150 2150
2151 2151 return man
2152 2152
2153 2153 @propertycache
2154 2154 def _status(self):
2155 2155 """Calculate exact status from ``files`` specified at construction
2156 2156 """
2157 2157 man1 = self.p1().manifest()
2158 2158 p2 = self._parents[1]
2159 2159 # "1 < len(self._parents)" can't be used for checking
2160 2160 # existence of the 2nd parent, because "memctx._parents" is
2161 2161 # explicitly initialized by the list, of which length is 2.
2162 2162 if p2.node() != nullid:
2163 2163 man2 = p2.manifest()
2164 2164 managing = lambda f: f in man1 or f in man2
2165 2165 else:
2166 2166 managing = lambda f: f in man1
2167 2167
2168 2168 modified, added, removed = [], [], []
2169 2169 for f in self._files:
2170 2170 if not managing(f):
2171 2171 added.append(f)
2172 2172 elif self[f]:
2173 2173 modified.append(f)
2174 2174 else:
2175 2175 removed.append(f)
2176 2176
2177 2177 return scmutil.status(modified, added, removed, [], [], [], [])
2178 2178
2179 2179 class memfilectx(committablefilectx):
2180 2180 """memfilectx represents an in-memory file to commit.
2181 2181
2182 2182 See memctx and committablefilectx for more details.
2183 2183 """
2184 2184 def __init__(self, repo, path, data, islink=False,
2185 2185 isexec=False, copied=None, memctx=None):
2186 2186 """
2187 2187 path is the normalized file path relative to repository root.
2188 2188 data is the file content as a string.
2189 2189 islink is True if the file is a symbolic link.
2190 2190 isexec is True if the file is executable.
2191 2191 copied is the source file path if current file was copied in the
2192 2192 revision being committed, or None."""
2193 2193 super(memfilectx, self).__init__(repo, path, None, memctx)
2194 2194 self._data = data
2195 2195 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
2196 2196 self._copied = None
2197 2197 if copied:
2198 2198 self._copied = (copied, nullid)
2199 2199
2200 2200 def data(self):
2201 2201 return self._data
2202 2202
2203 2203 def remove(self, ignoremissing=False):
2204 2204 """wraps unlink for a repo's working directory"""
2205 2205 # need to figure out what to do here
2206 2206 del self._changectx[self._path]
2207 2207
2208 2208 def write(self, data, flags):
2209 2209 """wraps repo.wwrite"""
2210 2210 self._data = data
2211 2211
2212 2212 class overlayfilectx(committablefilectx):
2213 2213 """Like memfilectx but take an original filectx and optional parameters to
2214 2214 override parts of it. This is useful when fctx.data() is expensive (i.e.
2215 2215 flag processor is expensive) and raw data, flags, and filenode could be
2216 2216 reused (ex. rebase or mode-only amend a REVIDX_EXTSTORED file).
2217 2217 """
2218 2218
2219 2219 def __init__(self, originalfctx, datafunc=None, path=None, flags=None,
2220 2220 copied=None, ctx=None):
2221 2221 """originalfctx: filecontext to duplicate
2222 2222
2223 2223 datafunc: None or a function to override data (file content). It is a
2224 2224 function to be lazy. path, flags, copied, ctx: None or overridden value
2225 2225
2226 2226 copied could be (path, rev), or False. copied could also be just path,
2227 2227 and will be converted to (path, nullid). This simplifies some callers.
2228 2228 """
2229 2229
2230 2230 if path is None:
2231 2231 path = originalfctx.path()
2232 2232 if ctx is None:
2233 2233 ctx = originalfctx.changectx()
2234 2234 ctxmatch = lambda: True
2235 2235 else:
2236 2236 ctxmatch = lambda: ctx == originalfctx.changectx()
2237 2237
2238 2238 repo = originalfctx.repo()
2239 2239 flog = originalfctx.filelog()
2240 2240 super(overlayfilectx, self).__init__(repo, path, flog, ctx)
2241 2241
2242 2242 if copied is None:
2243 2243 copied = originalfctx.renamed()
2244 2244 copiedmatch = lambda: True
2245 2245 else:
2246 2246 if copied and not isinstance(copied, tuple):
2247 2247 # repo._filecommit will recalculate copyrev so nullid is okay
2248 2248 copied = (copied, nullid)
2249 2249 copiedmatch = lambda: copied == originalfctx.renamed()
2250 2250
2251 2251 # When data, copied (could affect data), ctx (could affect filelog
2252 2252 # parents) are not overridden, rawdata, rawflags, and filenode may be
2253 2253 # reused (repo._filecommit should double check filelog parents).
2254 2254 #
2255 2255 # path, flags are not hashed in filelog (but in manifestlog) so they do
2256 2256 # not affect reusable here.
2257 2257 #
2258 2258 # If ctx or copied is overridden to a same value with originalfctx,
2259 2259 # still consider it's reusable. originalfctx.renamed() may be a bit
2260 2260 # expensive so it's not called unless necessary. Assuming datafunc is
2261 2261 # always expensive, do not call it for this "reusable" test.
2262 2262 reusable = datafunc is None and ctxmatch() and copiedmatch()
2263 2263
2264 2264 if datafunc is None:
2265 2265 datafunc = originalfctx.data
2266 2266 if flags is None:
2267 2267 flags = originalfctx.flags()
2268 2268
2269 2269 self._datafunc = datafunc
2270 2270 self._flags = flags
2271 2271 self._copied = copied
2272 2272
2273 2273 if reusable:
2274 2274 # copy extra fields from originalfctx
2275 2275 attrs = ['rawdata', 'rawflags', '_filenode', '_filerev']
2276 2276 for attr in attrs:
2277 2277 if util.safehasattr(originalfctx, attr):
2278 2278 setattr(self, attr, getattr(originalfctx, attr))
2279 2279
2280 2280 def data(self):
2281 2281 return self._datafunc()
2282 2282
2283 2283 class metadataonlyctx(committablectx):
2284 2284 """Like memctx but it's reusing the manifest of different commit.
2285 2285 Intended to be used by lightweight operations that are creating
2286 2286 metadata-only changes.
2287 2287
2288 2288 Revision information is supplied at initialization time. 'repo' is the
2289 2289 current localrepo, 'ctx' is original revision which manifest we're reuisng
2290 2290 'parents' is a sequence of two parent revisions identifiers (pass None for
2291 2291 every missing parent), 'text' is the commit.
2292 2292
2293 2293 user receives the committer name and defaults to current repository
2294 2294 username, date is the commit date in any format supported by
2295 2295 util.parsedate() and defaults to current date, extra is a dictionary of
2296 2296 metadata or is left empty.
2297 2297 """
2298 2298 def __new__(cls, repo, originalctx, *args, **kwargs):
2299 2299 return super(metadataonlyctx, cls).__new__(cls, repo)
2300 2300
2301 2301 def __init__(self, repo, originalctx, parents, text, user=None, date=None,
2302 2302 extra=None, editor=False):
2303 2303 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2304 2304 self._rev = None
2305 2305 self._node = None
2306 2306 self._originalctx = originalctx
2307 2307 self._manifestnode = originalctx.manifestnode()
2308 2308 parents = [(p or nullid) for p in parents]
2309 2309 p1, p2 = self._parents = [changectx(self._repo, p) for p in parents]
2310 2310
2311 2311 # sanity check to ensure that the reused manifest parents are
2312 2312 # manifests of our commit parents
2313 2313 mp1, mp2 = self.manifestctx().parents
2314 2314 if p1 != nullid and p1.manifestnode() != mp1:
2315 2315 raise RuntimeError('can\'t reuse the manifest: '
2316 2316 'its p1 doesn\'t match the new ctx p1')
2317 2317 if p2 != nullid and p2.manifestnode() != mp2:
2318 2318 raise RuntimeError('can\'t reuse the manifest: '
2319 2319 'its p2 doesn\'t match the new ctx p2')
2320 2320
2321 2321 self._files = originalctx.files()
2322 2322 self.substate = {}
2323 2323
2324 2324 if editor:
2325 2325 self._text = editor(self._repo, self, [])
2326 2326 self._repo.savecommitmessage(self._text)
2327 2327
2328 2328 def manifestnode(self):
2329 2329 return self._manifestnode
2330 2330
2331 2331 @property
2332 2332 def _manifestctx(self):
2333 2333 return self._repo.manifestlog[self._manifestnode]
2334 2334
2335 2335 def filectx(self, path, filelog=None):
2336 2336 return self._originalctx.filectx(path, filelog=filelog)
2337 2337
2338 2338 def commit(self):
2339 2339 """commit context to the repo"""
2340 2340 return self._repo.commitctx(self)
2341 2341
2342 2342 @property
2343 2343 def _manifest(self):
2344 2344 return self._originalctx.manifest()
2345 2345
2346 2346 @propertycache
2347 2347 def _status(self):
2348 2348 """Calculate exact status from ``files`` specified in the ``origctx``
2349 2349 and parents manifests.
2350 2350 """
2351 2351 man1 = self.p1().manifest()
2352 2352 p2 = self._parents[1]
2353 2353 # "1 < len(self._parents)" can't be used for checking
2354 2354 # existence of the 2nd parent, because "metadataonlyctx._parents" is
2355 2355 # explicitly initialized by the list, of which length is 2.
2356 2356 if p2.node() != nullid:
2357 2357 man2 = p2.manifest()
2358 2358 managing = lambda f: f in man1 or f in man2
2359 2359 else:
2360 2360 managing = lambda f: f in man1
2361 2361
2362 2362 modified, added, removed = [], [], []
2363 2363 for f in self._files:
2364 2364 if not managing(f):
2365 2365 added.append(f)
2366 2366 elif self[f]:
2367 2367 modified.append(f)
2368 2368 else:
2369 2369 removed.append(f)
2370 2370
2371 2371 return scmutil.status(modified, added, removed, [], [], [], [])
@@ -1,1058 +1,1066 b''
1 1 # obsolete.py - obsolete markers handling
2 2 #
3 3 # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
4 4 # Logilab SA <contact@logilab.fr>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 """Obsolete marker handling
10 10
11 11 An obsolete marker maps an old changeset to a list of new
12 12 changesets. If the list of new changesets is empty, the old changeset
13 13 is said to be "killed". Otherwise, the old changeset is being
14 14 "replaced" by the new changesets.
15 15
16 16 Obsolete markers can be used to record and distribute changeset graph
17 17 transformations performed by history rewrite operations, and help
18 18 building new tools to reconcile conflicting rewrite actions. To
19 19 facilitate conflict resolution, markers include various annotations
20 20 besides old and news changeset identifiers, such as creation date or
21 21 author name.
22 22
23 23 The old obsoleted changeset is called a "predecessor" and possible
24 24 replacements are called "successors". Markers that used changeset X as
25 25 a predecessor are called "successor markers of X" because they hold
26 26 information about the successors of X. Markers that use changeset Y as
27 27 a successors are call "predecessor markers of Y" because they hold
28 28 information about the predecessors of Y.
29 29
30 30 Examples:
31 31
32 32 - When changeset A is replaced by changeset A', one marker is stored:
33 33
34 34 (A, (A',))
35 35
36 36 - When changesets A and B are folded into a new changeset C, two markers are
37 37 stored:
38 38
39 39 (A, (C,)) and (B, (C,))
40 40
41 41 - When changeset A is simply "pruned" from the graph, a marker is created:
42 42
43 43 (A, ())
44 44
45 45 - When changeset A is split into B and C, a single marker is used:
46 46
47 47 (A, (B, C))
48 48
49 49 We use a single marker to distinguish the "split" case from the "divergence"
50 50 case. If two independent operations rewrite the same changeset A in to A' and
51 51 A'', we have an error case: divergent rewriting. We can detect it because
52 52 two markers will be created independently:
53 53
54 54 (A, (B,)) and (A, (C,))
55 55
56 56 Format
57 57 ------
58 58
59 59 Markers are stored in an append-only file stored in
60 60 '.hg/store/obsstore'.
61 61
62 62 The file starts with a version header:
63 63
64 64 - 1 unsigned byte: version number, starting at zero.
65 65
66 66 The header is followed by the markers. Marker format depend of the version. See
67 67 comment associated with each format for details.
68 68
69 69 """
70 70 from __future__ import absolute_import
71 71
72 72 import errno
73 73 import struct
74 74
75 75 from .i18n import _
76 76 from . import (
77 77 error,
78 78 node,
79 79 obsutil,
80 80 phases,
81 81 policy,
82 82 util,
83 83 )
84 84
85 85 parsers = policy.importmod(r'parsers')
86 86
87 87 _pack = struct.pack
88 88 _unpack = struct.unpack
89 89 _calcsize = struct.calcsize
90 90 propertycache = util.propertycache
91 91
92 92 # the obsolete feature is not mature enough to be enabled by default.
93 93 # you have to rely on third party extension extension to enable this.
94 94 _enabled = False
95 95
96 96 # Options for obsolescence
97 97 createmarkersopt = 'createmarkers'
98 98 allowunstableopt = 'allowunstable'
99 99 exchangeopt = 'exchange'
100 100
101 101 def isenabled(repo, option):
102 102 """Returns True if the given repository has the given obsolete option
103 103 enabled.
104 104 """
105 105 result = set(repo.ui.configlist('experimental', 'stabilization'))
106 106 if 'all' in result:
107 107 return True
108 108
109 109 # For migration purposes, temporarily return true if the config hasn't been
110 110 # set but _enabled is true.
111 111 if len(result) == 0 and _enabled:
112 112 return True
113 113
114 114 # createmarkers must be enabled if other options are enabled
115 115 if ((allowunstableopt in result or exchangeopt in result) and
116 116 not createmarkersopt in result):
117 117 raise error.Abort(_("'createmarkers' obsolete option must be enabled "
118 118 "if other obsolete options are enabled"))
119 119
120 120 return option in result
121 121
122 122 ### obsolescence marker flag
123 123
124 124 ## bumpedfix flag
125 125 #
126 126 # When a changeset A' succeed to a changeset A which became public, we call A'
127 127 # "bumped" because it's a successors of a public changesets
128 128 #
129 129 # o A' (bumped)
130 130 # |`:
131 131 # | o A
132 132 # |/
133 133 # o Z
134 134 #
135 135 # The way to solve this situation is to create a new changeset Ad as children
136 136 # of A. This changeset have the same content than A'. So the diff from A to A'
137 137 # is the same than the diff from A to Ad. Ad is marked as a successors of A'
138 138 #
139 139 # o Ad
140 140 # |`:
141 141 # | x A'
142 142 # |'|
143 143 # o | A
144 144 # |/
145 145 # o Z
146 146 #
147 147 # But by transitivity Ad is also a successors of A. To avoid having Ad marked
148 148 # as bumped too, we add the `bumpedfix` flag to the marker. <A', (Ad,)>.
149 149 # This flag mean that the successors express the changes between the public and
150 150 # bumped version and fix the situation, breaking the transitivity of
151 151 # "bumped" here.
152 152 bumpedfix = 1
153 153 usingsha256 = 2
154 154
155 155 ## Parsing and writing of version "0"
156 156 #
157 157 # The header is followed by the markers. Each marker is made of:
158 158 #
159 159 # - 1 uint8 : number of new changesets "N", can be zero.
160 160 #
161 161 # - 1 uint32: metadata size "M" in bytes.
162 162 #
163 163 # - 1 byte: a bit field. It is reserved for flags used in common
164 164 # obsolete marker operations, to avoid repeated decoding of metadata
165 165 # entries.
166 166 #
167 167 # - 20 bytes: obsoleted changeset identifier.
168 168 #
169 169 # - N*20 bytes: new changesets identifiers.
170 170 #
171 171 # - M bytes: metadata as a sequence of nul-terminated strings. Each
172 172 # string contains a key and a value, separated by a colon ':', without
173 173 # additional encoding. Keys cannot contain '\0' or ':' and values
174 174 # cannot contain '\0'.
175 175 _fm0version = 0
176 176 _fm0fixed = '>BIB20s'
177 177 _fm0node = '20s'
178 178 _fm0fsize = _calcsize(_fm0fixed)
179 179 _fm0fnodesize = _calcsize(_fm0node)
180 180
181 181 def _fm0readmarkers(data, off, stop):
182 182 # Loop on markers
183 183 while off < stop:
184 184 # read fixed part
185 185 cur = data[off:off + _fm0fsize]
186 186 off += _fm0fsize
187 187 numsuc, mdsize, flags, pre = _unpack(_fm0fixed, cur)
188 188 # read replacement
189 189 sucs = ()
190 190 if numsuc:
191 191 s = (_fm0fnodesize * numsuc)
192 192 cur = data[off:off + s]
193 193 sucs = _unpack(_fm0node * numsuc, cur)
194 194 off += s
195 195 # read metadata
196 196 # (metadata will be decoded on demand)
197 197 metadata = data[off:off + mdsize]
198 198 if len(metadata) != mdsize:
199 199 raise error.Abort(_('parsing obsolete marker: metadata is too '
200 200 'short, %d bytes expected, got %d')
201 201 % (mdsize, len(metadata)))
202 202 off += mdsize
203 203 metadata = _fm0decodemeta(metadata)
204 204 try:
205 205 when, offset = metadata.pop('date', '0 0').split(' ')
206 206 date = float(when), int(offset)
207 207 except ValueError:
208 208 date = (0., 0)
209 209 parents = None
210 210 if 'p2' in metadata:
211 211 parents = (metadata.pop('p1', None), metadata.pop('p2', None))
212 212 elif 'p1' in metadata:
213 213 parents = (metadata.pop('p1', None),)
214 214 elif 'p0' in metadata:
215 215 parents = ()
216 216 if parents is not None:
217 217 try:
218 218 parents = tuple(node.bin(p) for p in parents)
219 219 # if parent content is not a nodeid, drop the data
220 220 for p in parents:
221 221 if len(p) != 20:
222 222 parents = None
223 223 break
224 224 except TypeError:
225 225 # if content cannot be translated to nodeid drop the data.
226 226 parents = None
227 227
228 228 metadata = tuple(sorted(metadata.iteritems()))
229 229
230 230 yield (pre, sucs, flags, metadata, date, parents)
231 231
232 232 def _fm0encodeonemarker(marker):
233 233 pre, sucs, flags, metadata, date, parents = marker
234 234 if flags & usingsha256:
235 235 raise error.Abort(_('cannot handle sha256 with old obsstore format'))
236 236 metadata = dict(metadata)
237 237 time, tz = date
238 238 metadata['date'] = '%r %i' % (time, tz)
239 239 if parents is not None:
240 240 if not parents:
241 241 # mark that we explicitly recorded no parents
242 242 metadata['p0'] = ''
243 243 for i, p in enumerate(parents, 1):
244 244 metadata['p%i' % i] = node.hex(p)
245 245 metadata = _fm0encodemeta(metadata)
246 246 numsuc = len(sucs)
247 247 format = _fm0fixed + (_fm0node * numsuc)
248 248 data = [numsuc, len(metadata), flags, pre]
249 249 data.extend(sucs)
250 250 return _pack(format, *data) + metadata
251 251
252 252 def _fm0encodemeta(meta):
253 253 """Return encoded metadata string to string mapping.
254 254
255 255 Assume no ':' in key and no '\0' in both key and value."""
256 256 for key, value in meta.iteritems():
257 257 if ':' in key or '\0' in key:
258 258 raise ValueError("':' and '\0' are forbidden in metadata key'")
259 259 if '\0' in value:
260 260 raise ValueError("':' is forbidden in metadata value'")
261 261 return '\0'.join(['%s:%s' % (k, meta[k]) for k in sorted(meta)])
262 262
263 263 def _fm0decodemeta(data):
264 264 """Return string to string dictionary from encoded version."""
265 265 d = {}
266 266 for l in data.split('\0'):
267 267 if l:
268 268 key, value = l.split(':')
269 269 d[key] = value
270 270 return d
271 271
272 272 ## Parsing and writing of version "1"
273 273 #
274 274 # The header is followed by the markers. Each marker is made of:
275 275 #
276 276 # - uint32: total size of the marker (including this field)
277 277 #
278 278 # - float64: date in seconds since epoch
279 279 #
280 280 # - int16: timezone offset in minutes
281 281 #
282 282 # - uint16: a bit field. It is reserved for flags used in common
283 283 # obsolete marker operations, to avoid repeated decoding of metadata
284 284 # entries.
285 285 #
286 286 # - uint8: number of successors "N", can be zero.
287 287 #
288 288 # - uint8: number of parents "P", can be zero.
289 289 #
290 290 # 0: parents data stored but no parent,
291 291 # 1: one parent stored,
292 292 # 2: two parents stored,
293 293 # 3: no parent data stored
294 294 #
295 295 # - uint8: number of metadata entries M
296 296 #
297 297 # - 20 or 32 bytes: predecessor changeset identifier.
298 298 #
299 299 # - N*(20 or 32) bytes: successors changesets identifiers.
300 300 #
301 301 # - P*(20 or 32) bytes: parents of the predecessors changesets.
302 302 #
303 303 # - M*(uint8, uint8): size of all metadata entries (key and value)
304 304 #
305 305 # - remaining bytes: the metadata, each (key, value) pair after the other.
306 306 _fm1version = 1
307 307 _fm1fixed = '>IdhHBBB20s'
308 308 _fm1nodesha1 = '20s'
309 309 _fm1nodesha256 = '32s'
310 310 _fm1nodesha1size = _calcsize(_fm1nodesha1)
311 311 _fm1nodesha256size = _calcsize(_fm1nodesha256)
312 312 _fm1fsize = _calcsize(_fm1fixed)
313 313 _fm1parentnone = 3
314 314 _fm1parentshift = 14
315 315 _fm1parentmask = (_fm1parentnone << _fm1parentshift)
316 316 _fm1metapair = 'BB'
317 317 _fm1metapairsize = _calcsize(_fm1metapair)
318 318
319 319 def _fm1purereadmarkers(data, off, stop):
320 320 # make some global constants local for performance
321 321 noneflag = _fm1parentnone
322 322 sha2flag = usingsha256
323 323 sha1size = _fm1nodesha1size
324 324 sha2size = _fm1nodesha256size
325 325 sha1fmt = _fm1nodesha1
326 326 sha2fmt = _fm1nodesha256
327 327 metasize = _fm1metapairsize
328 328 metafmt = _fm1metapair
329 329 fsize = _fm1fsize
330 330 unpack = _unpack
331 331
332 332 # Loop on markers
333 333 ufixed = struct.Struct(_fm1fixed).unpack
334 334
335 335 while off < stop:
336 336 # read fixed part
337 337 o1 = off + fsize
338 338 t, secs, tz, flags, numsuc, numpar, nummeta, prec = ufixed(data[off:o1])
339 339
340 340 if flags & sha2flag:
341 341 # FIXME: prec was read as a SHA1, needs to be amended
342 342
343 343 # read 0 or more successors
344 344 if numsuc == 1:
345 345 o2 = o1 + sha2size
346 346 sucs = (data[o1:o2],)
347 347 else:
348 348 o2 = o1 + sha2size * numsuc
349 349 sucs = unpack(sha2fmt * numsuc, data[o1:o2])
350 350
351 351 # read parents
352 352 if numpar == noneflag:
353 353 o3 = o2
354 354 parents = None
355 355 elif numpar == 1:
356 356 o3 = o2 + sha2size
357 357 parents = (data[o2:o3],)
358 358 else:
359 359 o3 = o2 + sha2size * numpar
360 360 parents = unpack(sha2fmt * numpar, data[o2:o3])
361 361 else:
362 362 # read 0 or more successors
363 363 if numsuc == 1:
364 364 o2 = o1 + sha1size
365 365 sucs = (data[o1:o2],)
366 366 else:
367 367 o2 = o1 + sha1size * numsuc
368 368 sucs = unpack(sha1fmt * numsuc, data[o1:o2])
369 369
370 370 # read parents
371 371 if numpar == noneflag:
372 372 o3 = o2
373 373 parents = None
374 374 elif numpar == 1:
375 375 o3 = o2 + sha1size
376 376 parents = (data[o2:o3],)
377 377 else:
378 378 o3 = o2 + sha1size * numpar
379 379 parents = unpack(sha1fmt * numpar, data[o2:o3])
380 380
381 381 # read metadata
382 382 off = o3 + metasize * nummeta
383 383 metapairsize = unpack('>' + (metafmt * nummeta), data[o3:off])
384 384 metadata = []
385 385 for idx in xrange(0, len(metapairsize), 2):
386 386 o1 = off + metapairsize[idx]
387 387 o2 = o1 + metapairsize[idx + 1]
388 388 metadata.append((data[off:o1], data[o1:o2]))
389 389 off = o2
390 390
391 391 yield (prec, sucs, flags, tuple(metadata), (secs, tz * 60), parents)
392 392
393 393 def _fm1encodeonemarker(marker):
394 394 pre, sucs, flags, metadata, date, parents = marker
395 395 # determine node size
396 396 _fm1node = _fm1nodesha1
397 397 if flags & usingsha256:
398 398 _fm1node = _fm1nodesha256
399 399 numsuc = len(sucs)
400 400 numextranodes = numsuc
401 401 if parents is None:
402 402 numpar = _fm1parentnone
403 403 else:
404 404 numpar = len(parents)
405 405 numextranodes += numpar
406 406 formatnodes = _fm1node * numextranodes
407 407 formatmeta = _fm1metapair * len(metadata)
408 408 format = _fm1fixed + formatnodes + formatmeta
409 409 # tz is stored in minutes so we divide by 60
410 410 tz = date[1]//60
411 411 data = [None, date[0], tz, flags, numsuc, numpar, len(metadata), pre]
412 412 data.extend(sucs)
413 413 if parents is not None:
414 414 data.extend(parents)
415 415 totalsize = _calcsize(format)
416 416 for key, value in metadata:
417 417 lk = len(key)
418 418 lv = len(value)
419 419 data.append(lk)
420 420 data.append(lv)
421 421 totalsize += lk + lv
422 422 data[0] = totalsize
423 423 data = [_pack(format, *data)]
424 424 for key, value in metadata:
425 425 data.append(key)
426 426 data.append(value)
427 427 return ''.join(data)
428 428
429 429 def _fm1readmarkers(data, off, stop):
430 430 native = getattr(parsers, 'fm1readmarkers', None)
431 431 if not native:
432 432 return _fm1purereadmarkers(data, off, stop)
433 433 return native(data, off, stop)
434 434
435 435 # mapping to read/write various marker formats
436 436 # <version> -> (decoder, encoder)
437 437 formats = {_fm0version: (_fm0readmarkers, _fm0encodeonemarker),
438 438 _fm1version: (_fm1readmarkers, _fm1encodeonemarker)}
439 439
440 440 def _readmarkerversion(data):
441 441 return _unpack('>B', data[0:1])[0]
442 442
443 443 @util.nogc
444 444 def _readmarkers(data, off=None, stop=None):
445 445 """Read and enumerate markers from raw data"""
446 446 diskversion = _readmarkerversion(data)
447 447 if not off:
448 448 off = 1 # skip 1 byte version number
449 449 if stop is None:
450 450 stop = len(data)
451 451 if diskversion not in formats:
452 452 msg = _('parsing obsolete marker: unknown version %r') % diskversion
453 453 raise error.UnknownVersion(msg, version=diskversion)
454 454 return diskversion, formats[diskversion][0](data, off, stop)
455 455
456 456 def encodeheader(version=_fm0version):
457 457 return _pack('>B', version)
458 458
459 459 def encodemarkers(markers, addheader=False, version=_fm0version):
460 460 # Kept separate from flushmarkers(), it will be reused for
461 461 # markers exchange.
462 462 encodeone = formats[version][1]
463 463 if addheader:
464 464 yield encodeheader(version)
465 465 for marker in markers:
466 466 yield encodeone(marker)
467 467
468 468 @util.nogc
469 469 def _addsuccessors(successors, markers):
470 470 for mark in markers:
471 471 successors.setdefault(mark[0], set()).add(mark)
472 472
473 473 def _addprecursors(*args, **kwargs):
474 474 msg = ("'obsolete._addprecursors' is deprecated, "
475 475 "use 'obsolete._addpredecessors'")
476 476 util.nouideprecwarn(msg, '4.4')
477 477
478 478 return _addpredecessors(*args, **kwargs)
479 479
480 480 @util.nogc
481 481 def _addpredecessors(predecessors, markers):
482 482 for mark in markers:
483 483 for suc in mark[1]:
484 484 predecessors.setdefault(suc, set()).add(mark)
485 485
486 486 @util.nogc
487 487 def _addchildren(children, markers):
488 488 for mark in markers:
489 489 parents = mark[5]
490 490 if parents is not None:
491 491 for p in parents:
492 492 children.setdefault(p, set()).add(mark)
493 493
494 494 def _checkinvalidmarkers(markers):
495 495 """search for marker with invalid data and raise error if needed
496 496
497 497 Exist as a separated function to allow the evolve extension for a more
498 498 subtle handling.
499 499 """
500 500 for mark in markers:
501 501 if node.nullid in mark[1]:
502 502 raise error.Abort(_('bad obsolescence marker detected: '
503 503 'invalid successors nullid'))
504 504
505 505 class obsstore(object):
506 506 """Store obsolete markers
507 507
508 508 Markers can be accessed with two mappings:
509 509 - predecessors[x] -> set(markers on predecessors edges of x)
510 510 - successors[x] -> set(markers on successors edges of x)
511 511 - children[x] -> set(markers on predecessors edges of children(x)
512 512 """
513 513
514 514 fields = ('prec', 'succs', 'flag', 'meta', 'date', 'parents')
515 515 # prec: nodeid, predecessors changesets
516 516 # succs: tuple of nodeid, successor changesets (0-N length)
517 517 # flag: integer, flag field carrying modifier for the markers (see doc)
518 518 # meta: binary blob, encoded metadata dictionary
519 519 # date: (float, int) tuple, date of marker creation
520 520 # parents: (tuple of nodeid) or None, parents of predecessors
521 521 # None is used when no data has been recorded
522 522
523 523 def __init__(self, svfs, defaultformat=_fm1version, readonly=False):
524 524 # caches for various obsolescence related cache
525 525 self.caches = {}
526 526 self.svfs = svfs
527 527 self._defaultformat = defaultformat
528 528 self._readonly = readonly
529 529
530 530 def __iter__(self):
531 531 return iter(self._all)
532 532
533 533 def __len__(self):
534 534 return len(self._all)
535 535
536 536 def __nonzero__(self):
537 537 if not self._cached('_all'):
538 538 try:
539 539 return self.svfs.stat('obsstore').st_size > 1
540 540 except OSError as inst:
541 541 if inst.errno != errno.ENOENT:
542 542 raise
543 543 # just build an empty _all list if no obsstore exists, which
544 544 # avoids further stat() syscalls
545 545 pass
546 546 return bool(self._all)
547 547
548 548 __bool__ = __nonzero__
549 549
550 550 @property
551 551 def readonly(self):
552 552 """True if marker creation is disabled
553 553
554 554 Remove me in the future when obsolete marker is always on."""
555 555 return self._readonly
556 556
557 557 def create(self, transaction, prec, succs=(), flag=0, parents=None,
558 558 date=None, metadata=None, ui=None):
559 559 """obsolete: add a new obsolete marker
560 560
561 561 * ensuring it is hashable
562 562 * check mandatory metadata
563 563 * encode metadata
564 564
565 565 If you are a human writing code creating marker you want to use the
566 566 `createmarkers` function in this module instead.
567 567
568 568 return True if a new marker have been added, False if the markers
569 569 already existed (no op).
570 570 """
571 571 if metadata is None:
572 572 metadata = {}
573 573 if date is None:
574 574 if 'date' in metadata:
575 575 # as a courtesy for out-of-tree extensions
576 576 date = util.parsedate(metadata.pop('date'))
577 577 elif ui is not None:
578 578 date = ui.configdate('devel', 'default-date')
579 579 if date is None:
580 580 date = util.makedate()
581 581 else:
582 582 date = util.makedate()
583 583 if len(prec) != 20:
584 584 raise ValueError(prec)
585 585 for succ in succs:
586 586 if len(succ) != 20:
587 587 raise ValueError(succ)
588 588 if prec in succs:
589 589 raise ValueError(_('in-marker cycle with %s') % node.hex(prec))
590 590
591 591 metadata = tuple(sorted(metadata.iteritems()))
592 592
593 593 marker = (bytes(prec), tuple(succs), int(flag), metadata, date, parents)
594 594 return bool(self.add(transaction, [marker]))
595 595
596 596 def add(self, transaction, markers):
597 597 """Add new markers to the store
598 598
599 599 Take care of filtering duplicate.
600 600 Return the number of new marker."""
601 601 if self._readonly:
602 602 raise error.Abort(_('creating obsolete markers is not enabled on '
603 603 'this repo'))
604 604 known = set()
605 605 getsuccessors = self.successors.get
606 606 new = []
607 607 for m in markers:
608 608 if m not in getsuccessors(m[0], ()) and m not in known:
609 609 known.add(m)
610 610 new.append(m)
611 611 if new:
612 612 f = self.svfs('obsstore', 'ab')
613 613 try:
614 614 offset = f.tell()
615 615 transaction.add('obsstore', offset)
616 616 # offset == 0: new file - add the version header
617 617 data = b''.join(encodemarkers(new, offset == 0, self._version))
618 618 f.write(data)
619 619 finally:
620 620 # XXX: f.close() == filecache invalidation == obsstore rebuilt.
621 621 # call 'filecacheentry.refresh()' here
622 622 f.close()
623 623 addedmarkers = transaction.changes.get('obsmarkers')
624 624 if addedmarkers is not None:
625 625 addedmarkers.update(new)
626 626 self._addmarkers(new, data)
627 627 # new marker *may* have changed several set. invalidate the cache.
628 628 self.caches.clear()
629 629 # records the number of new markers for the transaction hooks
630 630 previous = int(transaction.hookargs.get('new_obsmarkers', '0'))
631 631 transaction.hookargs['new_obsmarkers'] = str(previous + len(new))
632 632 return len(new)
633 633
634 634 def mergemarkers(self, transaction, data):
635 635 """merge a binary stream of markers inside the obsstore
636 636
637 637 Returns the number of new markers added."""
638 638 version, markers = _readmarkers(data)
639 639 return self.add(transaction, markers)
640 640
641 641 @propertycache
642 642 def _data(self):
643 643 return self.svfs.tryread('obsstore')
644 644
645 645 @propertycache
646 646 def _version(self):
647 647 if len(self._data) >= 1:
648 648 return _readmarkerversion(self._data)
649 649 else:
650 650 return self._defaultformat
651 651
652 652 @propertycache
653 653 def _all(self):
654 654 data = self._data
655 655 if not data:
656 656 return []
657 657 self._version, markers = _readmarkers(data)
658 658 markers = list(markers)
659 659 _checkinvalidmarkers(markers)
660 660 return markers
661 661
662 662 @propertycache
663 663 def successors(self):
664 664 successors = {}
665 665 _addsuccessors(successors, self._all)
666 666 return successors
667 667
668 668 @property
669 669 def precursors(self):
670 670 msg = ("'obsstore.precursors' is deprecated, "
671 671 "use 'obsstore.predecessors'")
672 672 util.nouideprecwarn(msg, '4.4')
673 673
674 674 return self.predecessors
675 675
676 676 @propertycache
677 677 def predecessors(self):
678 678 predecessors = {}
679 679 _addpredecessors(predecessors, self._all)
680 680 return predecessors
681 681
682 682 @propertycache
683 683 def children(self):
684 684 children = {}
685 685 _addchildren(children, self._all)
686 686 return children
687 687
688 688 def _cached(self, attr):
689 689 return attr in self.__dict__
690 690
691 691 def _addmarkers(self, markers, rawdata):
692 692 markers = list(markers) # to allow repeated iteration
693 693 self._data = self._data + rawdata
694 694 self._all.extend(markers)
695 695 if self._cached('successors'):
696 696 _addsuccessors(self.successors, markers)
697 697 if self._cached('predecessors'):
698 698 _addpredecessors(self.predecessors, markers)
699 699 if self._cached('children'):
700 700 _addchildren(self.children, markers)
701 701 _checkinvalidmarkers(markers)
702 702
703 703 def relevantmarkers(self, nodes):
704 704 """return a set of all obsolescence markers relevant to a set of nodes.
705 705
706 706 "relevant" to a set of nodes mean:
707 707
708 708 - marker that use this changeset as successor
709 709 - prune marker of direct children on this changeset
710 710 - recursive application of the two rules on predecessors of these
711 711 markers
712 712
713 713 It is a set so you cannot rely on order."""
714 714
715 715 pendingnodes = set(nodes)
716 716 seenmarkers = set()
717 717 seennodes = set(pendingnodes)
718 718 precursorsmarkers = self.predecessors
719 719 succsmarkers = self.successors
720 720 children = self.children
721 721 while pendingnodes:
722 722 direct = set()
723 723 for current in pendingnodes:
724 724 direct.update(precursorsmarkers.get(current, ()))
725 725 pruned = [m for m in children.get(current, ()) if not m[1]]
726 726 direct.update(pruned)
727 727 pruned = [m for m in succsmarkers.get(current, ()) if not m[1]]
728 728 direct.update(pruned)
729 729 direct -= seenmarkers
730 730 pendingnodes = set([m[0] for m in direct])
731 731 seenmarkers |= direct
732 732 pendingnodes -= seennodes
733 733 seennodes |= pendingnodes
734 734 return seenmarkers
735 735
736 736 def makestore(ui, repo):
737 737 """Create an obsstore instance from a repo."""
738 738 # read default format for new obsstore.
739 739 # developer config: format.obsstore-version
740 740 defaultformat = ui.configint('format', 'obsstore-version')
741 741 # rely on obsstore class default when possible.
742 742 kwargs = {}
743 743 if defaultformat is not None:
744 744 kwargs['defaultformat'] = defaultformat
745 745 readonly = not isenabled(repo, createmarkersopt)
746 746 store = obsstore(repo.svfs, readonly=readonly, **kwargs)
747 747 if store and readonly:
748 748 ui.warn(_('obsolete feature not enabled but %i markers found!\n')
749 749 % len(list(store)))
750 750 return store
751 751
752 752 def commonversion(versions):
753 753 """Return the newest version listed in both versions and our local formats.
754 754
755 755 Returns None if no common version exists.
756 756 """
757 757 versions.sort(reverse=True)
758 758 # search for highest version known on both side
759 759 for v in versions:
760 760 if v in formats:
761 761 return v
762 762 return None
763 763
764 764 # arbitrary picked to fit into 8K limit from HTTP server
765 765 # you have to take in account:
766 766 # - the version header
767 767 # - the base85 encoding
768 768 _maxpayload = 5300
769 769
770 770 def _pushkeyescape(markers):
771 771 """encode markers into a dict suitable for pushkey exchange
772 772
773 773 - binary data is base85 encoded
774 774 - split in chunks smaller than 5300 bytes"""
775 775 keys = {}
776 776 parts = []
777 777 currentlen = _maxpayload * 2 # ensure we create a new part
778 778 for marker in markers:
779 779 nextdata = _fm0encodeonemarker(marker)
780 780 if (len(nextdata) + currentlen > _maxpayload):
781 781 currentpart = []
782 782 currentlen = 0
783 783 parts.append(currentpart)
784 784 currentpart.append(nextdata)
785 785 currentlen += len(nextdata)
786 786 for idx, part in enumerate(reversed(parts)):
787 787 data = ''.join([_pack('>B', _fm0version)] + part)
788 788 keys['dump%i' % idx] = util.b85encode(data)
789 789 return keys
790 790
791 791 def listmarkers(repo):
792 792 """List markers over pushkey"""
793 793 if not repo.obsstore:
794 794 return {}
795 795 return _pushkeyescape(sorted(repo.obsstore))
796 796
797 797 def pushmarker(repo, key, old, new):
798 798 """Push markers over pushkey"""
799 799 if not key.startswith('dump'):
800 800 repo.ui.warn(_('unknown key: %r') % key)
801 801 return False
802 802 if old:
803 803 repo.ui.warn(_('unexpected old value for %r') % key)
804 804 return False
805 805 data = util.b85decode(new)
806 806 lock = repo.lock()
807 807 try:
808 808 tr = repo.transaction('pushkey: obsolete markers')
809 809 try:
810 810 repo.obsstore.mergemarkers(tr, data)
811 811 repo.invalidatevolatilesets()
812 812 tr.close()
813 813 return True
814 814 finally:
815 815 tr.release()
816 816 finally:
817 817 lock.release()
818 818
819 819 # keep compatibility for the 4.3 cycle
820 820 def allprecursors(obsstore, nodes, ignoreflags=0):
821 821 movemsg = 'obsolete.allprecursors moved to obsutil.allprecursors'
822 822 util.nouideprecwarn(movemsg, '4.3')
823 823 return obsutil.allprecursors(obsstore, nodes, ignoreflags)
824 824
825 825 def allsuccessors(obsstore, nodes, ignoreflags=0):
826 826 movemsg = 'obsolete.allsuccessors moved to obsutil.allsuccessors'
827 827 util.nouideprecwarn(movemsg, '4.3')
828 828 return obsutil.allsuccessors(obsstore, nodes, ignoreflags)
829 829
830 830 def marker(repo, data):
831 831 movemsg = 'obsolete.marker moved to obsutil.marker'
832 832 repo.ui.deprecwarn(movemsg, '4.3')
833 833 return obsutil.marker(repo, data)
834 834
835 835 def getmarkers(repo, nodes=None, exclusive=False):
836 836 movemsg = 'obsolete.getmarkers moved to obsutil.getmarkers'
837 837 repo.ui.deprecwarn(movemsg, '4.3')
838 838 return obsutil.getmarkers(repo, nodes=nodes, exclusive=exclusive)
839 839
840 840 def exclusivemarkers(repo, nodes):
841 841 movemsg = 'obsolete.exclusivemarkers moved to obsutil.exclusivemarkers'
842 842 repo.ui.deprecwarn(movemsg, '4.3')
843 843 return obsutil.exclusivemarkers(repo, nodes)
844 844
845 845 def foreground(repo, nodes):
846 846 movemsg = 'obsolete.foreground moved to obsutil.foreground'
847 847 repo.ui.deprecwarn(movemsg, '4.3')
848 848 return obsutil.foreground(repo, nodes)
849 849
850 850 def successorssets(repo, initialnode, cache=None):
851 851 movemsg = 'obsolete.successorssets moved to obsutil.successorssets'
852 852 repo.ui.deprecwarn(movemsg, '4.3')
853 853 return obsutil.successorssets(repo, initialnode, cache=cache)
854 854
855 855 # mapping of 'set-name' -> <function to compute this set>
856 856 cachefuncs = {}
857 857 def cachefor(name):
858 858 """Decorator to register a function as computing the cache for a set"""
859 859 def decorator(func):
860 860 if name in cachefuncs:
861 861 msg = "duplicated registration for volatileset '%s' (existing: %r)"
862 862 raise error.ProgrammingError(msg % (name, cachefuncs[name]))
863 863 cachefuncs[name] = func
864 864 return func
865 865 return decorator
866 866
867 867 def getrevs(repo, name):
868 868 """Return the set of revision that belong to the <name> set
869 869
870 870 Such access may compute the set and cache it for future use"""
871 871 repo = repo.unfiltered()
872 872 if not repo.obsstore:
873 873 return frozenset()
874 874 if name not in repo.obsstore.caches:
875 875 repo.obsstore.caches[name] = cachefuncs[name](repo)
876 876 return repo.obsstore.caches[name]
877 877
878 878 # To be simple we need to invalidate obsolescence cache when:
879 879 #
880 880 # - new changeset is added:
881 881 # - public phase is changed
882 882 # - obsolescence marker are added
883 883 # - strip is used a repo
884 884 def clearobscaches(repo):
885 885 """Remove all obsolescence related cache from a repo
886 886
887 887 This remove all cache in obsstore is the obsstore already exist on the
888 888 repo.
889 889
890 890 (We could be smarter here given the exact event that trigger the cache
891 891 clearing)"""
892 892 # only clear cache is there is obsstore data in this repo
893 893 if 'obsstore' in repo._filecache:
894 894 repo.obsstore.caches.clear()
895 895
896 896 def _mutablerevs(repo):
897 897 """the set of mutable revision in the repository"""
898 898 return repo._phasecache.getrevset(repo, (phases.draft, phases.secret))
899 899
900 900 @cachefor('obsolete')
901 901 def _computeobsoleteset(repo):
902 902 """the set of obsolete revisions"""
903 903 getnode = repo.changelog.node
904 904 notpublic = _mutablerevs(repo)
905 905 isobs = repo.obsstore.successors.__contains__
906 906 obs = set(r for r in notpublic if isobs(getnode(r)))
907 907 return obs
908 908
909 909 @cachefor('unstable')
910 910 def _computeunstableset(repo):
911 911 msg = ("'unstable' volatile set is deprecated, "
912 912 "use 'orphan'")
913 913 repo.ui.deprecwarn(msg, '4.4')
914 914
915 915 return _computeorphanset(repo)
916 916
917 917 @cachefor('orphan')
918 918 def _computeorphanset(repo):
919 919 """the set of non obsolete revisions with obsolete parents"""
920 920 pfunc = repo.changelog.parentrevs
921 921 mutable = _mutablerevs(repo)
922 922 obsolete = getrevs(repo, 'obsolete')
923 923 others = mutable - obsolete
924 924 unstable = set()
925 925 for r in sorted(others):
926 926 # A rev is unstable if one of its parent is obsolete or unstable
927 927 # this works since we traverse following growing rev order
928 928 for p in pfunc(r):
929 929 if p in obsolete or p in unstable:
930 930 unstable.add(r)
931 931 break
932 932 return unstable
933 933
934 934 @cachefor('suspended')
935 935 def _computesuspendedset(repo):
936 936 """the set of obsolete parents with non obsolete descendants"""
937 937 suspended = repo.changelog.ancestors(getrevs(repo, 'orphan'))
938 938 return set(r for r in getrevs(repo, 'obsolete') if r in suspended)
939 939
940 940 @cachefor('extinct')
941 941 def _computeextinctset(repo):
942 942 """the set of obsolete parents without non obsolete descendants"""
943 943 return getrevs(repo, 'obsolete') - getrevs(repo, 'suspended')
944 944
945 945
946 946 @cachefor('bumped')
947 947 def _computebumpedset(repo):
948 948 """the set of revs trying to obsolete public revisions"""
949 949 bumped = set()
950 950 # util function (avoid attribute lookup in the loop)
951 951 phase = repo._phasecache.phase # would be faster to grab the full list
952 952 public = phases.public
953 953 cl = repo.changelog
954 954 torev = cl.nodemap.get
955 955 for ctx in repo.set('(not public()) and (not obsolete())'):
956 956 rev = ctx.rev()
957 957 # We only evaluate mutable, non-obsolete revision
958 958 node = ctx.node()
959 959 # (future) A cache of predecessors may worth if split is very common
960 960 for pnode in obsutil.allpredecessors(repo.obsstore, [node],
961 961 ignoreflags=bumpedfix):
962 962 prev = torev(pnode) # unfiltered! but so is phasecache
963 963 if (prev is not None) and (phase(repo, prev) <= public):
964 964 # we have a public predecessor
965 965 bumped.add(rev)
966 966 break # Next draft!
967 967 return bumped
968 968
969 969 @cachefor('divergent')
970 970 def _computedivergentset(repo):
971 msg = ("'divergent' volatile set is deprecated, "
972 "use 'contentdivergent'")
973 repo.ui.deprecwarn(msg, '4.4')
974
975 return _computecontentdivergentset(repo)
976
977 @cachefor('contentdivergent')
978 def _computecontentdivergentset(repo):
971 979 """the set of rev that compete to be the final successors of some revision.
972 980 """
973 981 divergent = set()
974 982 obsstore = repo.obsstore
975 983 newermap = {}
976 984 for ctx in repo.set('(not public()) - obsolete()'):
977 985 mark = obsstore.predecessors.get(ctx.node(), ())
978 986 toprocess = set(mark)
979 987 seen = set()
980 988 while toprocess:
981 989 prec = toprocess.pop()[0]
982 990 if prec in seen:
983 991 continue # emergency cycle hanging prevention
984 992 seen.add(prec)
985 993 if prec not in newermap:
986 994 obsutil.successorssets(repo, prec, cache=newermap)
987 995 newer = [n for n in newermap[prec] if n]
988 996 if len(newer) > 1:
989 997 divergent.add(ctx.rev())
990 998 break
991 999 toprocess.update(obsstore.predecessors.get(prec, ()))
992 1000 return divergent
993 1001
994 1002
995 1003 def createmarkers(repo, relations, flag=0, date=None, metadata=None,
996 1004 operation=None):
997 1005 """Add obsolete markers between changesets in a repo
998 1006
999 1007 <relations> must be an iterable of (<old>, (<new>, ...)[,{metadata}])
1000 1008 tuple. `old` and `news` are changectx. metadata is an optional dictionary
1001 1009 containing metadata for this marker only. It is merged with the global
1002 1010 metadata specified through the `metadata` argument of this function,
1003 1011
1004 1012 Trying to obsolete a public changeset will raise an exception.
1005 1013
1006 1014 Current user and date are used except if specified otherwise in the
1007 1015 metadata attribute.
1008 1016
1009 1017 This function operates within a transaction of its own, but does
1010 1018 not take any lock on the repo.
1011 1019 """
1012 1020 # prepare metadata
1013 1021 if metadata is None:
1014 1022 metadata = {}
1015 1023 if 'user' not in metadata:
1016 1024 metadata['user'] = repo.ui.username()
1017 1025 useoperation = repo.ui.configbool('experimental',
1018 1026 'stabilization.track-operation')
1019 1027 if useoperation and operation:
1020 1028 metadata['operation'] = operation
1021 1029 tr = repo.transaction('add-obsolescence-marker')
1022 1030 try:
1023 1031 markerargs = []
1024 1032 for rel in relations:
1025 1033 prec = rel[0]
1026 1034 sucs = rel[1]
1027 1035 localmetadata = metadata.copy()
1028 1036 if 2 < len(rel):
1029 1037 localmetadata.update(rel[2])
1030 1038
1031 1039 if not prec.mutable():
1032 1040 raise error.Abort(_("cannot obsolete public changeset: %s")
1033 1041 % prec,
1034 1042 hint="see 'hg help phases' for details")
1035 1043 nprec = prec.node()
1036 1044 nsucs = tuple(s.node() for s in sucs)
1037 1045 npare = None
1038 1046 if not nsucs:
1039 1047 npare = tuple(p.node() for p in prec.parents())
1040 1048 if nprec in nsucs:
1041 1049 raise error.Abort(_("changeset %s cannot obsolete itself")
1042 1050 % prec)
1043 1051
1044 1052 # Creating the marker causes the hidden cache to become invalid,
1045 1053 # which causes recomputation when we ask for prec.parents() above.
1046 1054 # Resulting in n^2 behavior. So let's prepare all of the args
1047 1055 # first, then create the markers.
1048 1056 markerargs.append((nprec, nsucs, npare, localmetadata))
1049 1057
1050 1058 for args in markerargs:
1051 1059 nprec, nsucs, npare, localmetadata = args
1052 1060 repo.obsstore.create(tr, nprec, nsucs, flag, parents=npare,
1053 1061 date=date, metadata=localmetadata,
1054 1062 ui=repo.ui)
1055 1063 repo.filteredrevcache.clear()
1056 1064 tr.close()
1057 1065 finally:
1058 1066 tr.release()
@@ -1,2139 +1,2139 b''
1 1 # revset.py - revision set queries for mercurial
2 2 #
3 3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import re
11 11
12 12 from .i18n import _
13 13 from . import (
14 14 dagop,
15 15 destutil,
16 16 encoding,
17 17 error,
18 18 hbisect,
19 19 match as matchmod,
20 20 node,
21 21 obsolete as obsmod,
22 22 obsutil,
23 23 pathutil,
24 24 phases,
25 25 registrar,
26 26 repoview,
27 27 revsetlang,
28 28 scmutil,
29 29 smartset,
30 30 util,
31 31 )
32 32
33 33 # helpers for processing parsed tree
34 34 getsymbol = revsetlang.getsymbol
35 35 getstring = revsetlang.getstring
36 36 getinteger = revsetlang.getinteger
37 37 getboolean = revsetlang.getboolean
38 38 getlist = revsetlang.getlist
39 39 getrange = revsetlang.getrange
40 40 getargs = revsetlang.getargs
41 41 getargsdict = revsetlang.getargsdict
42 42
43 43 # constants used as an argument of match() and matchany()
44 44 anyorder = revsetlang.anyorder
45 45 defineorder = revsetlang.defineorder
46 46 followorder = revsetlang.followorder
47 47
48 48 baseset = smartset.baseset
49 49 generatorset = smartset.generatorset
50 50 spanset = smartset.spanset
51 51 fullreposet = smartset.fullreposet
52 52
53 53 # helpers
54 54
55 55 def getset(repo, subset, x):
56 56 if not x:
57 57 raise error.ParseError(_("missing argument"))
58 58 return methods[x[0]](repo, subset, *x[1:])
59 59
60 60 def _getrevsource(repo, r):
61 61 extra = repo[r].extra()
62 62 for label in ('source', 'transplant_source', 'rebase_source'):
63 63 if label in extra:
64 64 try:
65 65 return repo[extra[label]].rev()
66 66 except error.RepoLookupError:
67 67 pass
68 68 return None
69 69
70 70 # operator methods
71 71
72 72 def stringset(repo, subset, x):
73 73 x = scmutil.intrev(repo[x])
74 74 if (x in subset
75 75 or x == node.nullrev and isinstance(subset, fullreposet)):
76 76 return baseset([x])
77 77 return baseset()
78 78
79 79 def rangeset(repo, subset, x, y, order):
80 80 m = getset(repo, fullreposet(repo), x)
81 81 n = getset(repo, fullreposet(repo), y)
82 82
83 83 if not m or not n:
84 84 return baseset()
85 85 return _makerangeset(repo, subset, m.first(), n.last(), order)
86 86
87 87 def rangeall(repo, subset, x, order):
88 88 assert x is None
89 89 return _makerangeset(repo, subset, 0, len(repo) - 1, order)
90 90
91 91 def rangepre(repo, subset, y, order):
92 92 # ':y' can't be rewritten to '0:y' since '0' may be hidden
93 93 n = getset(repo, fullreposet(repo), y)
94 94 if not n:
95 95 return baseset()
96 96 return _makerangeset(repo, subset, 0, n.last(), order)
97 97
98 98 def rangepost(repo, subset, x, order):
99 99 m = getset(repo, fullreposet(repo), x)
100 100 if not m:
101 101 return baseset()
102 102 return _makerangeset(repo, subset, m.first(), len(repo) - 1, order)
103 103
104 104 def _makerangeset(repo, subset, m, n, order):
105 105 if m == n:
106 106 r = baseset([m])
107 107 elif n == node.wdirrev:
108 108 r = spanset(repo, m, len(repo)) + baseset([n])
109 109 elif m == node.wdirrev:
110 110 r = baseset([m]) + spanset(repo, len(repo) - 1, n - 1)
111 111 elif m < n:
112 112 r = spanset(repo, m, n + 1)
113 113 else:
114 114 r = spanset(repo, m, n - 1)
115 115
116 116 if order == defineorder:
117 117 return r & subset
118 118 else:
119 119 # carrying the sorting over when possible would be more efficient
120 120 return subset & r
121 121
122 122 def dagrange(repo, subset, x, y, order):
123 123 r = fullreposet(repo)
124 124 xs = dagop.reachableroots(repo, getset(repo, r, x), getset(repo, r, y),
125 125 includepath=True)
126 126 return subset & xs
127 127
128 128 def andset(repo, subset, x, y, order):
129 129 return getset(repo, getset(repo, subset, x), y)
130 130
131 131 def differenceset(repo, subset, x, y, order):
132 132 return getset(repo, subset, x) - getset(repo, subset, y)
133 133
134 134 def _orsetlist(repo, subset, xs):
135 135 assert xs
136 136 if len(xs) == 1:
137 137 return getset(repo, subset, xs[0])
138 138 p = len(xs) // 2
139 139 a = _orsetlist(repo, subset, xs[:p])
140 140 b = _orsetlist(repo, subset, xs[p:])
141 141 return a + b
142 142
143 143 def orset(repo, subset, x, order):
144 144 xs = getlist(x)
145 145 if order == followorder:
146 146 # slow path to take the subset order
147 147 return subset & _orsetlist(repo, fullreposet(repo), xs)
148 148 else:
149 149 return _orsetlist(repo, subset, xs)
150 150
151 151 def notset(repo, subset, x, order):
152 152 return subset - getset(repo, subset, x)
153 153
154 154 def relationset(repo, subset, x, y, order):
155 155 raise error.ParseError(_("can't use a relation in this context"))
156 156
157 157 def relsubscriptset(repo, subset, x, y, z, order):
158 158 # this is pretty basic implementation of 'x#y[z]' operator, still
159 159 # experimental so undocumented. see the wiki for further ideas.
160 160 # https://www.mercurial-scm.org/wiki/RevsetOperatorPlan
161 161 rel = getsymbol(y)
162 162 n = getinteger(z, _("relation subscript must be an integer"))
163 163
164 164 # TODO: perhaps this should be a table of relation functions
165 165 if rel in ('g', 'generations'):
166 166 # TODO: support range, rewrite tests, and drop startdepth argument
167 167 # from ancestors() and descendants() predicates
168 168 if n <= 0:
169 169 n = -n
170 170 return _ancestors(repo, subset, x, startdepth=n, stopdepth=n + 1)
171 171 else:
172 172 return _descendants(repo, subset, x, startdepth=n, stopdepth=n + 1)
173 173
174 174 raise error.UnknownIdentifier(rel, ['generations'])
175 175
176 176 def subscriptset(repo, subset, x, y, order):
177 177 raise error.ParseError(_("can't use a subscript in this context"))
178 178
179 179 def listset(repo, subset, *xs):
180 180 raise error.ParseError(_("can't use a list in this context"),
181 181 hint=_('see hg help "revsets.x or y"'))
182 182
183 183 def keyvaluepair(repo, subset, k, v):
184 184 raise error.ParseError(_("can't use a key-value pair in this context"))
185 185
186 186 def func(repo, subset, a, b, order):
187 187 f = getsymbol(a)
188 188 if f in symbols:
189 189 func = symbols[f]
190 190 if getattr(func, '_takeorder', False):
191 191 return func(repo, subset, b, order)
192 192 return func(repo, subset, b)
193 193
194 194 keep = lambda fn: getattr(fn, '__doc__', None) is not None
195 195
196 196 syms = [s for (s, fn) in symbols.items() if keep(fn)]
197 197 raise error.UnknownIdentifier(f, syms)
198 198
199 199 # functions
200 200
201 201 # symbols are callables like:
202 202 # fn(repo, subset, x)
203 203 # with:
204 204 # repo - current repository instance
205 205 # subset - of revisions to be examined
206 206 # x - argument in tree form
207 207 symbols = {}
208 208
209 209 # symbols which can't be used for a DoS attack for any given input
210 210 # (e.g. those which accept regexes as plain strings shouldn't be included)
211 211 # functions that just return a lot of changesets (like all) don't count here
212 212 safesymbols = set()
213 213
214 214 predicate = registrar.revsetpredicate()
215 215
216 216 @predicate('_destupdate')
217 217 def _destupdate(repo, subset, x):
218 218 # experimental revset for update destination
219 219 args = getargsdict(x, 'limit', 'clean')
220 220 return subset & baseset([destutil.destupdate(repo, **args)[0]])
221 221
222 222 @predicate('_destmerge')
223 223 def _destmerge(repo, subset, x):
224 224 # experimental revset for merge destination
225 225 sourceset = None
226 226 if x is not None:
227 227 sourceset = getset(repo, fullreposet(repo), x)
228 228 return subset & baseset([destutil.destmerge(repo, sourceset=sourceset)])
229 229
230 230 @predicate('adds(pattern)', safe=True)
231 231 def adds(repo, subset, x):
232 232 """Changesets that add a file matching pattern.
233 233
234 234 The pattern without explicit kind like ``glob:`` is expected to be
235 235 relative to the current directory and match against a file or a
236 236 directory.
237 237 """
238 238 # i18n: "adds" is a keyword
239 239 pat = getstring(x, _("adds requires a pattern"))
240 240 return checkstatus(repo, subset, pat, 1)
241 241
242 242 @predicate('ancestor(*changeset)', safe=True)
243 243 def ancestor(repo, subset, x):
244 244 """A greatest common ancestor of the changesets.
245 245
246 246 Accepts 0 or more changesets.
247 247 Will return empty list when passed no args.
248 248 Greatest common ancestor of a single changeset is that changeset.
249 249 """
250 250 # i18n: "ancestor" is a keyword
251 251 l = getlist(x)
252 252 rl = fullreposet(repo)
253 253 anc = None
254 254
255 255 # (getset(repo, rl, i) for i in l) generates a list of lists
256 256 for revs in (getset(repo, rl, i) for i in l):
257 257 for r in revs:
258 258 if anc is None:
259 259 anc = repo[r]
260 260 else:
261 261 anc = anc.ancestor(repo[r])
262 262
263 263 if anc is not None and anc.rev() in subset:
264 264 return baseset([anc.rev()])
265 265 return baseset()
266 266
267 267 def _ancestors(repo, subset, x, followfirst=False, startdepth=None,
268 268 stopdepth=None):
269 269 heads = getset(repo, fullreposet(repo), x)
270 270 if not heads:
271 271 return baseset()
272 272 s = dagop.revancestors(repo, heads, followfirst, startdepth, stopdepth)
273 273 return subset & s
274 274
275 275 @predicate('ancestors(set[, depth])', safe=True)
276 276 def ancestors(repo, subset, x):
277 277 """Changesets that are ancestors of changesets in set, including the
278 278 given changesets themselves.
279 279
280 280 If depth is specified, the result only includes changesets up to
281 281 the specified generation.
282 282 """
283 283 # startdepth is for internal use only until we can decide the UI
284 284 args = getargsdict(x, 'ancestors', 'set depth startdepth')
285 285 if 'set' not in args:
286 286 # i18n: "ancestors" is a keyword
287 287 raise error.ParseError(_('ancestors takes at least 1 argument'))
288 288 startdepth = stopdepth = None
289 289 if 'startdepth' in args:
290 290 n = getinteger(args['startdepth'],
291 291 "ancestors expects an integer startdepth")
292 292 if n < 0:
293 293 raise error.ParseError("negative startdepth")
294 294 startdepth = n
295 295 if 'depth' in args:
296 296 # i18n: "ancestors" is a keyword
297 297 n = getinteger(args['depth'], _("ancestors expects an integer depth"))
298 298 if n < 0:
299 299 raise error.ParseError(_("negative depth"))
300 300 stopdepth = n + 1
301 301 return _ancestors(repo, subset, args['set'],
302 302 startdepth=startdepth, stopdepth=stopdepth)
303 303
304 304 @predicate('_firstancestors', safe=True)
305 305 def _firstancestors(repo, subset, x):
306 306 # ``_firstancestors(set)``
307 307 # Like ``ancestors(set)`` but follows only the first parents.
308 308 return _ancestors(repo, subset, x, followfirst=True)
309 309
310 310 def _childrenspec(repo, subset, x, n, order):
311 311 """Changesets that are the Nth child of a changeset
312 312 in set.
313 313 """
314 314 cs = set()
315 315 for r in getset(repo, fullreposet(repo), x):
316 316 for i in range(n):
317 317 c = repo[r].children()
318 318 if len(c) == 0:
319 319 break
320 320 if len(c) > 1:
321 321 raise error.RepoLookupError(
322 322 _("revision in set has more than one child"))
323 323 r = c[0].rev()
324 324 else:
325 325 cs.add(r)
326 326 return subset & cs
327 327
328 328 def ancestorspec(repo, subset, x, n, order):
329 329 """``set~n``
330 330 Changesets that are the Nth ancestor (first parents only) of a changeset
331 331 in set.
332 332 """
333 333 n = getinteger(n, _("~ expects a number"))
334 334 if n < 0:
335 335 # children lookup
336 336 return _childrenspec(repo, subset, x, -n, order)
337 337 ps = set()
338 338 cl = repo.changelog
339 339 for r in getset(repo, fullreposet(repo), x):
340 340 for i in range(n):
341 341 try:
342 342 r = cl.parentrevs(r)[0]
343 343 except error.WdirUnsupported:
344 344 r = repo[r].parents()[0].rev()
345 345 ps.add(r)
346 346 return subset & ps
347 347
348 348 @predicate('author(string)', safe=True)
349 349 def author(repo, subset, x):
350 350 """Alias for ``user(string)``.
351 351 """
352 352 # i18n: "author" is a keyword
353 353 n = getstring(x, _("author requires a string"))
354 354 kind, pattern, matcher = _substringmatcher(n, casesensitive=False)
355 355 return subset.filter(lambda x: matcher(repo[x].user()),
356 356 condrepr=('<user %r>', n))
357 357
358 358 @predicate('bisect(string)', safe=True)
359 359 def bisect(repo, subset, x):
360 360 """Changesets marked in the specified bisect status:
361 361
362 362 - ``good``, ``bad``, ``skip``: csets explicitly marked as good/bad/skip
363 363 - ``goods``, ``bads`` : csets topologically good/bad
364 364 - ``range`` : csets taking part in the bisection
365 365 - ``pruned`` : csets that are goods, bads or skipped
366 366 - ``untested`` : csets whose fate is yet unknown
367 367 - ``ignored`` : csets ignored due to DAG topology
368 368 - ``current`` : the cset currently being bisected
369 369 """
370 370 # i18n: "bisect" is a keyword
371 371 status = getstring(x, _("bisect requires a string")).lower()
372 372 state = set(hbisect.get(repo, status))
373 373 return subset & state
374 374
375 375 # Backward-compatibility
376 376 # - no help entry so that we do not advertise it any more
377 377 @predicate('bisected', safe=True)
378 378 def bisected(repo, subset, x):
379 379 return bisect(repo, subset, x)
380 380
381 381 @predicate('bookmark([name])', safe=True)
382 382 def bookmark(repo, subset, x):
383 383 """The named bookmark or all bookmarks.
384 384
385 385 Pattern matching is supported for `name`. See :hg:`help revisions.patterns`.
386 386 """
387 387 # i18n: "bookmark" is a keyword
388 388 args = getargs(x, 0, 1, _('bookmark takes one or no arguments'))
389 389 if args:
390 390 bm = getstring(args[0],
391 391 # i18n: "bookmark" is a keyword
392 392 _('the argument to bookmark must be a string'))
393 393 kind, pattern, matcher = util.stringmatcher(bm)
394 394 bms = set()
395 395 if kind == 'literal':
396 396 bmrev = repo._bookmarks.get(pattern, None)
397 397 if not bmrev:
398 398 raise error.RepoLookupError(_("bookmark '%s' does not exist")
399 399 % pattern)
400 400 bms.add(repo[bmrev].rev())
401 401 else:
402 402 matchrevs = set()
403 403 for name, bmrev in repo._bookmarks.iteritems():
404 404 if matcher(name):
405 405 matchrevs.add(bmrev)
406 406 if not matchrevs:
407 407 raise error.RepoLookupError(_("no bookmarks exist"
408 408 " that match '%s'") % pattern)
409 409 for bmrev in matchrevs:
410 410 bms.add(repo[bmrev].rev())
411 411 else:
412 412 bms = {repo[r].rev() for r in repo._bookmarks.values()}
413 413 bms -= {node.nullrev}
414 414 return subset & bms
415 415
416 416 @predicate('branch(string or set)', safe=True)
417 417 def branch(repo, subset, x):
418 418 """
419 419 All changesets belonging to the given branch or the branches of the given
420 420 changesets.
421 421
422 422 Pattern matching is supported for `string`. See
423 423 :hg:`help revisions.patterns`.
424 424 """
425 425 getbi = repo.revbranchcache().branchinfo
426 426 def getbranch(r):
427 427 try:
428 428 return getbi(r)[0]
429 429 except error.WdirUnsupported:
430 430 return repo[r].branch()
431 431
432 432 try:
433 433 b = getstring(x, '')
434 434 except error.ParseError:
435 435 # not a string, but another revspec, e.g. tip()
436 436 pass
437 437 else:
438 438 kind, pattern, matcher = util.stringmatcher(b)
439 439 if kind == 'literal':
440 440 # note: falls through to the revspec case if no branch with
441 441 # this name exists and pattern kind is not specified explicitly
442 442 if pattern in repo.branchmap():
443 443 return subset.filter(lambda r: matcher(getbranch(r)),
444 444 condrepr=('<branch %r>', b))
445 445 if b.startswith('literal:'):
446 446 raise error.RepoLookupError(_("branch '%s' does not exist")
447 447 % pattern)
448 448 else:
449 449 return subset.filter(lambda r: matcher(getbranch(r)),
450 450 condrepr=('<branch %r>', b))
451 451
452 452 s = getset(repo, fullreposet(repo), x)
453 453 b = set()
454 454 for r in s:
455 455 b.add(getbranch(r))
456 456 c = s.__contains__
457 457 return subset.filter(lambda r: c(r) or getbranch(r) in b,
458 458 condrepr=lambda: '<branch %r>' % sorted(b))
459 459
460 460 @predicate('bumped()', safe=True)
461 461 def bumped(repo, subset, x):
462 462 msg = ("'bumped()' is deprecated, "
463 463 "use 'phasedivergent()'")
464 464 repo.ui.deprecwarn(msg, '4.4')
465 465
466 466 return phasedivergent(repo, subset, x)
467 467
468 468 @predicate('phasedivergent()', safe=True)
469 469 def phasedivergent(repo, subset, x):
470 470 """Mutable changesets marked as successors of public changesets.
471 471
472 472 Only non-public and non-obsolete changesets can be `phasedivergent`.
473 473 """
474 474 # i18n: "phasedivergent" is a keyword
475 475 getargs(x, 0, 0, _("phasedivergent takes no arguments"))
476 476 bumped = obsmod.getrevs(repo, 'bumped')
477 477 return subset & bumped
478 478
479 479 @predicate('bundle()', safe=True)
480 480 def bundle(repo, subset, x):
481 481 """Changesets in the bundle.
482 482
483 483 Bundle must be specified by the -R option."""
484 484
485 485 try:
486 486 bundlerevs = repo.changelog.bundlerevs
487 487 except AttributeError:
488 488 raise error.Abort(_("no bundle provided - specify with -R"))
489 489 return subset & bundlerevs
490 490
491 491 def checkstatus(repo, subset, pat, field):
492 492 hasset = matchmod.patkind(pat) == 'set'
493 493
494 494 mcache = [None]
495 495 def matches(x):
496 496 c = repo[x]
497 497 if not mcache[0] or hasset:
498 498 mcache[0] = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
499 499 m = mcache[0]
500 500 fname = None
501 501 if not m.anypats() and len(m.files()) == 1:
502 502 fname = m.files()[0]
503 503 if fname is not None:
504 504 if fname not in c.files():
505 505 return False
506 506 else:
507 507 for f in c.files():
508 508 if m(f):
509 509 break
510 510 else:
511 511 return False
512 512 files = repo.status(c.p1().node(), c.node())[field]
513 513 if fname is not None:
514 514 if fname in files:
515 515 return True
516 516 else:
517 517 for f in files:
518 518 if m(f):
519 519 return True
520 520
521 521 return subset.filter(matches, condrepr=('<status[%r] %r>', field, pat))
522 522
523 523 def _children(repo, subset, parentset):
524 524 if not parentset:
525 525 return baseset()
526 526 cs = set()
527 527 pr = repo.changelog.parentrevs
528 528 minrev = parentset.min()
529 529 nullrev = node.nullrev
530 530 for r in subset:
531 531 if r <= minrev:
532 532 continue
533 533 p1, p2 = pr(r)
534 534 if p1 in parentset:
535 535 cs.add(r)
536 536 if p2 != nullrev and p2 in parentset:
537 537 cs.add(r)
538 538 return baseset(cs)
539 539
540 540 @predicate('children(set)', safe=True)
541 541 def children(repo, subset, x):
542 542 """Child changesets of changesets in set.
543 543 """
544 544 s = getset(repo, fullreposet(repo), x)
545 545 cs = _children(repo, subset, s)
546 546 return subset & cs
547 547
548 548 @predicate('closed()', safe=True)
549 549 def closed(repo, subset, x):
550 550 """Changeset is closed.
551 551 """
552 552 # i18n: "closed" is a keyword
553 553 getargs(x, 0, 0, _("closed takes no arguments"))
554 554 return subset.filter(lambda r: repo[r].closesbranch(),
555 555 condrepr='<branch closed>')
556 556
557 557 @predicate('contains(pattern)')
558 558 def contains(repo, subset, x):
559 559 """The revision's manifest contains a file matching pattern (but might not
560 560 modify it). See :hg:`help patterns` for information about file patterns.
561 561
562 562 The pattern without explicit kind like ``glob:`` is expected to be
563 563 relative to the current directory and match against a file exactly
564 564 for efficiency.
565 565 """
566 566 # i18n: "contains" is a keyword
567 567 pat = getstring(x, _("contains requires a pattern"))
568 568
569 569 def matches(x):
570 570 if not matchmod.patkind(pat):
571 571 pats = pathutil.canonpath(repo.root, repo.getcwd(), pat)
572 572 if pats in repo[x]:
573 573 return True
574 574 else:
575 575 c = repo[x]
576 576 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
577 577 for f in c.manifest():
578 578 if m(f):
579 579 return True
580 580 return False
581 581
582 582 return subset.filter(matches, condrepr=('<contains %r>', pat))
583 583
584 584 @predicate('converted([id])', safe=True)
585 585 def converted(repo, subset, x):
586 586 """Changesets converted from the given identifier in the old repository if
587 587 present, or all converted changesets if no identifier is specified.
588 588 """
589 589
590 590 # There is exactly no chance of resolving the revision, so do a simple
591 591 # string compare and hope for the best
592 592
593 593 rev = None
594 594 # i18n: "converted" is a keyword
595 595 l = getargs(x, 0, 1, _('converted takes one or no arguments'))
596 596 if l:
597 597 # i18n: "converted" is a keyword
598 598 rev = getstring(l[0], _('converted requires a revision'))
599 599
600 600 def _matchvalue(r):
601 601 source = repo[r].extra().get('convert_revision', None)
602 602 return source is not None and (rev is None or source.startswith(rev))
603 603
604 604 return subset.filter(lambda r: _matchvalue(r),
605 605 condrepr=('<converted %r>', rev))
606 606
607 607 @predicate('date(interval)', safe=True)
608 608 def date(repo, subset, x):
609 609 """Changesets within the interval, see :hg:`help dates`.
610 610 """
611 611 # i18n: "date" is a keyword
612 612 ds = getstring(x, _("date requires a string"))
613 613 dm = util.matchdate(ds)
614 614 return subset.filter(lambda x: dm(repo[x].date()[0]),
615 615 condrepr=('<date %r>', ds))
616 616
617 617 @predicate('desc(string)', safe=True)
618 618 def desc(repo, subset, x):
619 619 """Search commit message for string. The match is case-insensitive.
620 620
621 621 Pattern matching is supported for `string`. See
622 622 :hg:`help revisions.patterns`.
623 623 """
624 624 # i18n: "desc" is a keyword
625 625 ds = getstring(x, _("desc requires a string"))
626 626
627 627 kind, pattern, matcher = _substringmatcher(ds, casesensitive=False)
628 628
629 629 return subset.filter(lambda r: matcher(repo[r].description()),
630 630 condrepr=('<desc %r>', ds))
631 631
632 632 def _descendants(repo, subset, x, followfirst=False, startdepth=None,
633 633 stopdepth=None):
634 634 roots = getset(repo, fullreposet(repo), x)
635 635 if not roots:
636 636 return baseset()
637 637 s = dagop.revdescendants(repo, roots, followfirst, startdepth, stopdepth)
638 638 return subset & s
639 639
640 640 @predicate('descendants(set[, depth])', safe=True)
641 641 def descendants(repo, subset, x):
642 642 """Changesets which are descendants of changesets in set, including the
643 643 given changesets themselves.
644 644
645 645 If depth is specified, the result only includes changesets up to
646 646 the specified generation.
647 647 """
648 648 # startdepth is for internal use only until we can decide the UI
649 649 args = getargsdict(x, 'descendants', 'set depth startdepth')
650 650 if 'set' not in args:
651 651 # i18n: "descendants" is a keyword
652 652 raise error.ParseError(_('descendants takes at least 1 argument'))
653 653 startdepth = stopdepth = None
654 654 if 'startdepth' in args:
655 655 n = getinteger(args['startdepth'],
656 656 "descendants expects an integer startdepth")
657 657 if n < 0:
658 658 raise error.ParseError("negative startdepth")
659 659 startdepth = n
660 660 if 'depth' in args:
661 661 # i18n: "descendants" is a keyword
662 662 n = getinteger(args['depth'], _("descendants expects an integer depth"))
663 663 if n < 0:
664 664 raise error.ParseError(_("negative depth"))
665 665 stopdepth = n + 1
666 666 return _descendants(repo, subset, args['set'],
667 667 startdepth=startdepth, stopdepth=stopdepth)
668 668
669 669 @predicate('_firstdescendants', safe=True)
670 670 def _firstdescendants(repo, subset, x):
671 671 # ``_firstdescendants(set)``
672 672 # Like ``descendants(set)`` but follows only the first parents.
673 673 return _descendants(repo, subset, x, followfirst=True)
674 674
675 675 @predicate('destination([set])', safe=True)
676 676 def destination(repo, subset, x):
677 677 """Changesets that were created by a graft, transplant or rebase operation,
678 678 with the given revisions specified as the source. Omitting the optional set
679 679 is the same as passing all().
680 680 """
681 681 if x is not None:
682 682 sources = getset(repo, fullreposet(repo), x)
683 683 else:
684 684 sources = fullreposet(repo)
685 685
686 686 dests = set()
687 687
688 688 # subset contains all of the possible destinations that can be returned, so
689 689 # iterate over them and see if their source(s) were provided in the arg set.
690 690 # Even if the immediate src of r is not in the arg set, src's source (or
691 691 # further back) may be. Scanning back further than the immediate src allows
692 692 # transitive transplants and rebases to yield the same results as transitive
693 693 # grafts.
694 694 for r in subset:
695 695 src = _getrevsource(repo, r)
696 696 lineage = None
697 697
698 698 while src is not None:
699 699 if lineage is None:
700 700 lineage = list()
701 701
702 702 lineage.append(r)
703 703
704 704 # The visited lineage is a match if the current source is in the arg
705 705 # set. Since every candidate dest is visited by way of iterating
706 706 # subset, any dests further back in the lineage will be tested by a
707 707 # different iteration over subset. Likewise, if the src was already
708 708 # selected, the current lineage can be selected without going back
709 709 # further.
710 710 if src in sources or src in dests:
711 711 dests.update(lineage)
712 712 break
713 713
714 714 r = src
715 715 src = _getrevsource(repo, r)
716 716
717 717 return subset.filter(dests.__contains__,
718 718 condrepr=lambda: '<destination %r>' % sorted(dests))
719 719
720 720 @predicate('divergent()', safe=True)
721 721 def divergent(repo, subset, x):
722 722 msg = ("'divergent()' is deprecated, "
723 723 "use 'contentdivergent()'")
724 724 repo.ui.deprecwarn(msg, '4.4')
725 725
726 726 return contentdivergent(repo, subset, x)
727 727
728 728 @predicate('contentdivergent()', safe=True)
729 729 def contentdivergent(repo, subset, x):
730 730 """
731 731 Final successors of changesets with an alternative set of final successors.
732 732 """
733 733 # i18n: "contentdivergent" is a keyword
734 734 getargs(x, 0, 0, _("contentdivergent takes no arguments"))
735 divergent = obsmod.getrevs(repo, 'divergent')
736 return subset & divergent
735 contentdivergent = obsmod.getrevs(repo, 'contentdivergent')
736 return subset & contentdivergent
737 737
738 738 @predicate('extinct()', safe=True)
739 739 def extinct(repo, subset, x):
740 740 """Obsolete changesets with obsolete descendants only.
741 741 """
742 742 # i18n: "extinct" is a keyword
743 743 getargs(x, 0, 0, _("extinct takes no arguments"))
744 744 extincts = obsmod.getrevs(repo, 'extinct')
745 745 return subset & extincts
746 746
747 747 @predicate('extra(label, [value])', safe=True)
748 748 def extra(repo, subset, x):
749 749 """Changesets with the given label in the extra metadata, with the given
750 750 optional value.
751 751
752 752 Pattern matching is supported for `value`. See
753 753 :hg:`help revisions.patterns`.
754 754 """
755 755 args = getargsdict(x, 'extra', 'label value')
756 756 if 'label' not in args:
757 757 # i18n: "extra" is a keyword
758 758 raise error.ParseError(_('extra takes at least 1 argument'))
759 759 # i18n: "extra" is a keyword
760 760 label = getstring(args['label'], _('first argument to extra must be '
761 761 'a string'))
762 762 value = None
763 763
764 764 if 'value' in args:
765 765 # i18n: "extra" is a keyword
766 766 value = getstring(args['value'], _('second argument to extra must be '
767 767 'a string'))
768 768 kind, value, matcher = util.stringmatcher(value)
769 769
770 770 def _matchvalue(r):
771 771 extra = repo[r].extra()
772 772 return label in extra and (value is None or matcher(extra[label]))
773 773
774 774 return subset.filter(lambda r: _matchvalue(r),
775 775 condrepr=('<extra[%r] %r>', label, value))
776 776
777 777 @predicate('filelog(pattern)', safe=True)
778 778 def filelog(repo, subset, x):
779 779 """Changesets connected to the specified filelog.
780 780
781 781 For performance reasons, visits only revisions mentioned in the file-level
782 782 filelog, rather than filtering through all changesets (much faster, but
783 783 doesn't include deletes or duplicate changes). For a slower, more accurate
784 784 result, use ``file()``.
785 785
786 786 The pattern without explicit kind like ``glob:`` is expected to be
787 787 relative to the current directory and match against a file exactly
788 788 for efficiency.
789 789
790 790 If some linkrev points to revisions filtered by the current repoview, we'll
791 791 work around it to return a non-filtered value.
792 792 """
793 793
794 794 # i18n: "filelog" is a keyword
795 795 pat = getstring(x, _("filelog requires a pattern"))
796 796 s = set()
797 797 cl = repo.changelog
798 798
799 799 if not matchmod.patkind(pat):
800 800 f = pathutil.canonpath(repo.root, repo.getcwd(), pat)
801 801 files = [f]
802 802 else:
803 803 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=repo[None])
804 804 files = (f for f in repo[None] if m(f))
805 805
806 806 for f in files:
807 807 fl = repo.file(f)
808 808 known = {}
809 809 scanpos = 0
810 810 for fr in list(fl):
811 811 fn = fl.node(fr)
812 812 if fn in known:
813 813 s.add(known[fn])
814 814 continue
815 815
816 816 lr = fl.linkrev(fr)
817 817 if lr in cl:
818 818 s.add(lr)
819 819 elif scanpos is not None:
820 820 # lowest matching changeset is filtered, scan further
821 821 # ahead in changelog
822 822 start = max(lr, scanpos) + 1
823 823 scanpos = None
824 824 for r in cl.revs(start):
825 825 # minimize parsing of non-matching entries
826 826 if f in cl.revision(r) and f in cl.readfiles(r):
827 827 try:
828 828 # try to use manifest delta fastpath
829 829 n = repo[r].filenode(f)
830 830 if n not in known:
831 831 if n == fn:
832 832 s.add(r)
833 833 scanpos = r
834 834 break
835 835 else:
836 836 known[n] = r
837 837 except error.ManifestLookupError:
838 838 # deletion in changelog
839 839 continue
840 840
841 841 return subset & s
842 842
843 843 @predicate('first(set, [n])', safe=True, takeorder=True)
844 844 def first(repo, subset, x, order):
845 845 """An alias for limit().
846 846 """
847 847 return limit(repo, subset, x, order)
848 848
849 849 def _follow(repo, subset, x, name, followfirst=False):
850 850 l = getargs(x, 0, 2, _("%s takes no arguments or a pattern "
851 851 "and an optional revset") % name)
852 852 c = repo['.']
853 853 if l:
854 854 x = getstring(l[0], _("%s expected a pattern") % name)
855 855 rev = None
856 856 if len(l) >= 2:
857 857 revs = getset(repo, fullreposet(repo), l[1])
858 858 if len(revs) != 1:
859 859 raise error.RepoLookupError(
860 860 _("%s expected one starting revision") % name)
861 861 rev = revs.last()
862 862 c = repo[rev]
863 863 matcher = matchmod.match(repo.root, repo.getcwd(), [x],
864 864 ctx=repo[rev], default='path')
865 865
866 866 files = c.manifest().walk(matcher)
867 867
868 868 s = set()
869 869 for fname in files:
870 870 fctx = c[fname]
871 871 s = s.union(set(c.rev() for c in fctx.ancestors(followfirst)))
872 872 # include the revision responsible for the most recent version
873 873 s.add(fctx.introrev())
874 874 else:
875 875 s = dagop.revancestors(repo, baseset([c.rev()]), followfirst)
876 876
877 877 return subset & s
878 878
879 879 @predicate('follow([pattern[, startrev]])', safe=True)
880 880 def follow(repo, subset, x):
881 881 """
882 882 An alias for ``::.`` (ancestors of the working directory's first parent).
883 883 If pattern is specified, the histories of files matching given
884 884 pattern in the revision given by startrev are followed, including copies.
885 885 """
886 886 return _follow(repo, subset, x, 'follow')
887 887
888 888 @predicate('_followfirst', safe=True)
889 889 def _followfirst(repo, subset, x):
890 890 # ``followfirst([pattern[, startrev]])``
891 891 # Like ``follow([pattern[, startrev]])`` but follows only the first parent
892 892 # of every revisions or files revisions.
893 893 return _follow(repo, subset, x, '_followfirst', followfirst=True)
894 894
895 895 @predicate('followlines(file, fromline:toline[, startrev=., descend=False])',
896 896 safe=True)
897 897 def followlines(repo, subset, x):
898 898 """Changesets modifying `file` in line range ('fromline', 'toline').
899 899
900 900 Line range corresponds to 'file' content at 'startrev' and should hence be
901 901 consistent with file size. If startrev is not specified, working directory's
902 902 parent is used.
903 903
904 904 By default, ancestors of 'startrev' are returned. If 'descend' is True,
905 905 descendants of 'startrev' are returned though renames are (currently) not
906 906 followed in this direction.
907 907 """
908 908 args = getargsdict(x, 'followlines', 'file *lines startrev descend')
909 909 if len(args['lines']) != 1:
910 910 raise error.ParseError(_("followlines requires a line range"))
911 911
912 912 rev = '.'
913 913 if 'startrev' in args:
914 914 revs = getset(repo, fullreposet(repo), args['startrev'])
915 915 if len(revs) != 1:
916 916 raise error.ParseError(
917 917 # i18n: "followlines" is a keyword
918 918 _("followlines expects exactly one revision"))
919 919 rev = revs.last()
920 920
921 921 pat = getstring(args['file'], _("followlines requires a pattern"))
922 922 if not matchmod.patkind(pat):
923 923 fname = pathutil.canonpath(repo.root, repo.getcwd(), pat)
924 924 else:
925 925 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=repo[rev])
926 926 files = [f for f in repo[rev] if m(f)]
927 927 if len(files) != 1:
928 928 # i18n: "followlines" is a keyword
929 929 raise error.ParseError(_("followlines expects exactly one file"))
930 930 fname = files[0]
931 931
932 932 # i18n: "followlines" is a keyword
933 933 lr = getrange(args['lines'][0], _("followlines expects a line range"))
934 934 fromline, toline = [getinteger(a, _("line range bounds must be integers"))
935 935 for a in lr]
936 936 fromline, toline = util.processlinerange(fromline, toline)
937 937
938 938 fctx = repo[rev].filectx(fname)
939 939 descend = False
940 940 if 'descend' in args:
941 941 descend = getboolean(args['descend'],
942 942 # i18n: "descend" is a keyword
943 943 _("descend argument must be a boolean"))
944 944 if descend:
945 945 rs = generatorset(
946 946 (c.rev() for c, _linerange
947 947 in dagop.blockdescendants(fctx, fromline, toline)),
948 948 iterasc=True)
949 949 else:
950 950 rs = generatorset(
951 951 (c.rev() for c, _linerange
952 952 in dagop.blockancestors(fctx, fromline, toline)),
953 953 iterasc=False)
954 954 return subset & rs
955 955
956 956 @predicate('all()', safe=True)
957 957 def getall(repo, subset, x):
958 958 """All changesets, the same as ``0:tip``.
959 959 """
960 960 # i18n: "all" is a keyword
961 961 getargs(x, 0, 0, _("all takes no arguments"))
962 962 return subset & spanset(repo) # drop "null" if any
963 963
964 964 @predicate('grep(regex)')
965 965 def grep(repo, subset, x):
966 966 """Like ``keyword(string)`` but accepts a regex. Use ``grep(r'...')``
967 967 to ensure special escape characters are handled correctly. Unlike
968 968 ``keyword(string)``, the match is case-sensitive.
969 969 """
970 970 try:
971 971 # i18n: "grep" is a keyword
972 972 gr = re.compile(getstring(x, _("grep requires a string")))
973 973 except re.error as e:
974 974 raise error.ParseError(_('invalid match pattern: %s') % e)
975 975
976 976 def matches(x):
977 977 c = repo[x]
978 978 for e in c.files() + [c.user(), c.description()]:
979 979 if gr.search(e):
980 980 return True
981 981 return False
982 982
983 983 return subset.filter(matches, condrepr=('<grep %r>', gr.pattern))
984 984
985 985 @predicate('_matchfiles', safe=True)
986 986 def _matchfiles(repo, subset, x):
987 987 # _matchfiles takes a revset list of prefixed arguments:
988 988 #
989 989 # [p:foo, i:bar, x:baz]
990 990 #
991 991 # builds a match object from them and filters subset. Allowed
992 992 # prefixes are 'p:' for regular patterns, 'i:' for include
993 993 # patterns and 'x:' for exclude patterns. Use 'r:' prefix to pass
994 994 # a revision identifier, or the empty string to reference the
995 995 # working directory, from which the match object is
996 996 # initialized. Use 'd:' to set the default matching mode, default
997 997 # to 'glob'. At most one 'r:' and 'd:' argument can be passed.
998 998
999 999 l = getargs(x, 1, -1, "_matchfiles requires at least one argument")
1000 1000 pats, inc, exc = [], [], []
1001 1001 rev, default = None, None
1002 1002 for arg in l:
1003 1003 s = getstring(arg, "_matchfiles requires string arguments")
1004 1004 prefix, value = s[:2], s[2:]
1005 1005 if prefix == 'p:':
1006 1006 pats.append(value)
1007 1007 elif prefix == 'i:':
1008 1008 inc.append(value)
1009 1009 elif prefix == 'x:':
1010 1010 exc.append(value)
1011 1011 elif prefix == 'r:':
1012 1012 if rev is not None:
1013 1013 raise error.ParseError('_matchfiles expected at most one '
1014 1014 'revision')
1015 1015 if value != '': # empty means working directory; leave rev as None
1016 1016 rev = value
1017 1017 elif prefix == 'd:':
1018 1018 if default is not None:
1019 1019 raise error.ParseError('_matchfiles expected at most one '
1020 1020 'default mode')
1021 1021 default = value
1022 1022 else:
1023 1023 raise error.ParseError('invalid _matchfiles prefix: %s' % prefix)
1024 1024 if not default:
1025 1025 default = 'glob'
1026 1026
1027 1027 m = matchmod.match(repo.root, repo.getcwd(), pats, include=inc,
1028 1028 exclude=exc, ctx=repo[rev], default=default)
1029 1029
1030 1030 # This directly read the changelog data as creating changectx for all
1031 1031 # revisions is quite expensive.
1032 1032 getfiles = repo.changelog.readfiles
1033 1033 wdirrev = node.wdirrev
1034 1034 def matches(x):
1035 1035 if x == wdirrev:
1036 1036 files = repo[x].files()
1037 1037 else:
1038 1038 files = getfiles(x)
1039 1039 for f in files:
1040 1040 if m(f):
1041 1041 return True
1042 1042 return False
1043 1043
1044 1044 return subset.filter(matches,
1045 1045 condrepr=('<matchfiles patterns=%r, include=%r '
1046 1046 'exclude=%r, default=%r, rev=%r>',
1047 1047 pats, inc, exc, default, rev))
1048 1048
1049 1049 @predicate('file(pattern)', safe=True)
1050 1050 def hasfile(repo, subset, x):
1051 1051 """Changesets affecting files matched by pattern.
1052 1052
1053 1053 For a faster but less accurate result, consider using ``filelog()``
1054 1054 instead.
1055 1055
1056 1056 This predicate uses ``glob:`` as the default kind of pattern.
1057 1057 """
1058 1058 # i18n: "file" is a keyword
1059 1059 pat = getstring(x, _("file requires a pattern"))
1060 1060 return _matchfiles(repo, subset, ('string', 'p:' + pat))
1061 1061
1062 1062 @predicate('head()', safe=True)
1063 1063 def head(repo, subset, x):
1064 1064 """Changeset is a named branch head.
1065 1065 """
1066 1066 # i18n: "head" is a keyword
1067 1067 getargs(x, 0, 0, _("head takes no arguments"))
1068 1068 hs = set()
1069 1069 cl = repo.changelog
1070 1070 for ls in repo.branchmap().itervalues():
1071 1071 hs.update(cl.rev(h) for h in ls)
1072 1072 return subset & baseset(hs)
1073 1073
1074 1074 @predicate('heads(set)', safe=True)
1075 1075 def heads(repo, subset, x):
1076 1076 """Members of set with no children in set.
1077 1077 """
1078 1078 s = getset(repo, subset, x)
1079 1079 ps = parents(repo, subset, x)
1080 1080 return s - ps
1081 1081
1082 1082 @predicate('hidden()', safe=True)
1083 1083 def hidden(repo, subset, x):
1084 1084 """Hidden changesets.
1085 1085 """
1086 1086 # i18n: "hidden" is a keyword
1087 1087 getargs(x, 0, 0, _("hidden takes no arguments"))
1088 1088 hiddenrevs = repoview.filterrevs(repo, 'visible')
1089 1089 return subset & hiddenrevs
1090 1090
1091 1091 @predicate('keyword(string)', safe=True)
1092 1092 def keyword(repo, subset, x):
1093 1093 """Search commit message, user name, and names of changed files for
1094 1094 string. The match is case-insensitive.
1095 1095
1096 1096 For a regular expression or case sensitive search of these fields, use
1097 1097 ``grep(regex)``.
1098 1098 """
1099 1099 # i18n: "keyword" is a keyword
1100 1100 kw = encoding.lower(getstring(x, _("keyword requires a string")))
1101 1101
1102 1102 def matches(r):
1103 1103 c = repo[r]
1104 1104 return any(kw in encoding.lower(t)
1105 1105 for t in c.files() + [c.user(), c.description()])
1106 1106
1107 1107 return subset.filter(matches, condrepr=('<keyword %r>', kw))
1108 1108
1109 1109 @predicate('limit(set[, n[, offset]])', safe=True, takeorder=True)
1110 1110 def limit(repo, subset, x, order):
1111 1111 """First n members of set, defaulting to 1, starting from offset.
1112 1112 """
1113 1113 args = getargsdict(x, 'limit', 'set n offset')
1114 1114 if 'set' not in args:
1115 1115 # i18n: "limit" is a keyword
1116 1116 raise error.ParseError(_("limit requires one to three arguments"))
1117 1117 # i18n: "limit" is a keyword
1118 1118 lim = getinteger(args.get('n'), _("limit expects a number"), default=1)
1119 1119 if lim < 0:
1120 1120 raise error.ParseError(_("negative number to select"))
1121 1121 # i18n: "limit" is a keyword
1122 1122 ofs = getinteger(args.get('offset'), _("limit expects a number"), default=0)
1123 1123 if ofs < 0:
1124 1124 raise error.ParseError(_("negative offset"))
1125 1125 os = getset(repo, fullreposet(repo), args['set'])
1126 1126 ls = os.slice(ofs, ofs + lim)
1127 1127 if order == followorder and lim > 1:
1128 1128 return subset & ls
1129 1129 return ls & subset
1130 1130
1131 1131 @predicate('last(set, [n])', safe=True, takeorder=True)
1132 1132 def last(repo, subset, x, order):
1133 1133 """Last n members of set, defaulting to 1.
1134 1134 """
1135 1135 # i18n: "last" is a keyword
1136 1136 l = getargs(x, 1, 2, _("last requires one or two arguments"))
1137 1137 lim = 1
1138 1138 if len(l) == 2:
1139 1139 # i18n: "last" is a keyword
1140 1140 lim = getinteger(l[1], _("last expects a number"))
1141 1141 if lim < 0:
1142 1142 raise error.ParseError(_("negative number to select"))
1143 1143 os = getset(repo, fullreposet(repo), l[0])
1144 1144 os.reverse()
1145 1145 ls = os.slice(0, lim)
1146 1146 if order == followorder and lim > 1:
1147 1147 return subset & ls
1148 1148 ls.reverse()
1149 1149 return ls & subset
1150 1150
1151 1151 @predicate('max(set)', safe=True)
1152 1152 def maxrev(repo, subset, x):
1153 1153 """Changeset with highest revision number in set.
1154 1154 """
1155 1155 os = getset(repo, fullreposet(repo), x)
1156 1156 try:
1157 1157 m = os.max()
1158 1158 if m in subset:
1159 1159 return baseset([m], datarepr=('<max %r, %r>', subset, os))
1160 1160 except ValueError:
1161 1161 # os.max() throws a ValueError when the collection is empty.
1162 1162 # Same as python's max().
1163 1163 pass
1164 1164 return baseset(datarepr=('<max %r, %r>', subset, os))
1165 1165
1166 1166 @predicate('merge()', safe=True)
1167 1167 def merge(repo, subset, x):
1168 1168 """Changeset is a merge changeset.
1169 1169 """
1170 1170 # i18n: "merge" is a keyword
1171 1171 getargs(x, 0, 0, _("merge takes no arguments"))
1172 1172 cl = repo.changelog
1173 1173 return subset.filter(lambda r: cl.parentrevs(r)[1] != -1,
1174 1174 condrepr='<merge>')
1175 1175
1176 1176 @predicate('branchpoint()', safe=True)
1177 1177 def branchpoint(repo, subset, x):
1178 1178 """Changesets with more than one child.
1179 1179 """
1180 1180 # i18n: "branchpoint" is a keyword
1181 1181 getargs(x, 0, 0, _("branchpoint takes no arguments"))
1182 1182 cl = repo.changelog
1183 1183 if not subset:
1184 1184 return baseset()
1185 1185 # XXX this should be 'parentset.min()' assuming 'parentset' is a smartset
1186 1186 # (and if it is not, it should.)
1187 1187 baserev = min(subset)
1188 1188 parentscount = [0]*(len(repo) - baserev)
1189 1189 for r in cl.revs(start=baserev + 1):
1190 1190 for p in cl.parentrevs(r):
1191 1191 if p >= baserev:
1192 1192 parentscount[p - baserev] += 1
1193 1193 return subset.filter(lambda r: parentscount[r - baserev] > 1,
1194 1194 condrepr='<branchpoint>')
1195 1195
1196 1196 @predicate('min(set)', safe=True)
1197 1197 def minrev(repo, subset, x):
1198 1198 """Changeset with lowest revision number in set.
1199 1199 """
1200 1200 os = getset(repo, fullreposet(repo), x)
1201 1201 try:
1202 1202 m = os.min()
1203 1203 if m in subset:
1204 1204 return baseset([m], datarepr=('<min %r, %r>', subset, os))
1205 1205 except ValueError:
1206 1206 # os.min() throws a ValueError when the collection is empty.
1207 1207 # Same as python's min().
1208 1208 pass
1209 1209 return baseset(datarepr=('<min %r, %r>', subset, os))
1210 1210
1211 1211 @predicate('modifies(pattern)', safe=True)
1212 1212 def modifies(repo, subset, x):
1213 1213 """Changesets modifying files matched by pattern.
1214 1214
1215 1215 The pattern without explicit kind like ``glob:`` is expected to be
1216 1216 relative to the current directory and match against a file or a
1217 1217 directory.
1218 1218 """
1219 1219 # i18n: "modifies" is a keyword
1220 1220 pat = getstring(x, _("modifies requires a pattern"))
1221 1221 return checkstatus(repo, subset, pat, 0)
1222 1222
1223 1223 @predicate('named(namespace)')
1224 1224 def named(repo, subset, x):
1225 1225 """The changesets in a given namespace.
1226 1226
1227 1227 Pattern matching is supported for `namespace`. See
1228 1228 :hg:`help revisions.patterns`.
1229 1229 """
1230 1230 # i18n: "named" is a keyword
1231 1231 args = getargs(x, 1, 1, _('named requires a namespace argument'))
1232 1232
1233 1233 ns = getstring(args[0],
1234 1234 # i18n: "named" is a keyword
1235 1235 _('the argument to named must be a string'))
1236 1236 kind, pattern, matcher = util.stringmatcher(ns)
1237 1237 namespaces = set()
1238 1238 if kind == 'literal':
1239 1239 if pattern not in repo.names:
1240 1240 raise error.RepoLookupError(_("namespace '%s' does not exist")
1241 1241 % ns)
1242 1242 namespaces.add(repo.names[pattern])
1243 1243 else:
1244 1244 for name, ns in repo.names.iteritems():
1245 1245 if matcher(name):
1246 1246 namespaces.add(ns)
1247 1247 if not namespaces:
1248 1248 raise error.RepoLookupError(_("no namespace exists"
1249 1249 " that match '%s'") % pattern)
1250 1250
1251 1251 names = set()
1252 1252 for ns in namespaces:
1253 1253 for name in ns.listnames(repo):
1254 1254 if name not in ns.deprecated:
1255 1255 names.update(repo[n].rev() for n in ns.nodes(repo, name))
1256 1256
1257 1257 names -= {node.nullrev}
1258 1258 return subset & names
1259 1259
1260 1260 @predicate('id(string)', safe=True)
1261 1261 def node_(repo, subset, x):
1262 1262 """Revision non-ambiguously specified by the given hex string prefix.
1263 1263 """
1264 1264 # i18n: "id" is a keyword
1265 1265 l = getargs(x, 1, 1, _("id requires one argument"))
1266 1266 # i18n: "id" is a keyword
1267 1267 n = getstring(l[0], _("id requires a string"))
1268 1268 if len(n) == 40:
1269 1269 try:
1270 1270 rn = repo.changelog.rev(node.bin(n))
1271 1271 except error.WdirUnsupported:
1272 1272 rn = node.wdirrev
1273 1273 except (LookupError, TypeError):
1274 1274 rn = None
1275 1275 else:
1276 1276 rn = None
1277 1277 try:
1278 1278 pm = repo.changelog._partialmatch(n)
1279 1279 if pm is not None:
1280 1280 rn = repo.changelog.rev(pm)
1281 1281 except error.WdirUnsupported:
1282 1282 rn = node.wdirrev
1283 1283
1284 1284 if rn is None:
1285 1285 return baseset()
1286 1286 result = baseset([rn])
1287 1287 return result & subset
1288 1288
1289 1289 @predicate('obsolete()', safe=True)
1290 1290 def obsolete(repo, subset, x):
1291 1291 """Mutable changeset with a newer version."""
1292 1292 # i18n: "obsolete" is a keyword
1293 1293 getargs(x, 0, 0, _("obsolete takes no arguments"))
1294 1294 obsoletes = obsmod.getrevs(repo, 'obsolete')
1295 1295 return subset & obsoletes
1296 1296
1297 1297 @predicate('only(set, [set])', safe=True)
1298 1298 def only(repo, subset, x):
1299 1299 """Changesets that are ancestors of the first set that are not ancestors
1300 1300 of any other head in the repo. If a second set is specified, the result
1301 1301 is ancestors of the first set that are not ancestors of the second set
1302 1302 (i.e. ::<set1> - ::<set2>).
1303 1303 """
1304 1304 cl = repo.changelog
1305 1305 # i18n: "only" is a keyword
1306 1306 args = getargs(x, 1, 2, _('only takes one or two arguments'))
1307 1307 include = getset(repo, fullreposet(repo), args[0])
1308 1308 if len(args) == 1:
1309 1309 if not include:
1310 1310 return baseset()
1311 1311
1312 1312 descendants = set(dagop.revdescendants(repo, include, False))
1313 1313 exclude = [rev for rev in cl.headrevs()
1314 1314 if not rev in descendants and not rev in include]
1315 1315 else:
1316 1316 exclude = getset(repo, fullreposet(repo), args[1])
1317 1317
1318 1318 results = set(cl.findmissingrevs(common=exclude, heads=include))
1319 1319 # XXX we should turn this into a baseset instead of a set, smartset may do
1320 1320 # some optimizations from the fact this is a baseset.
1321 1321 return subset & results
1322 1322
1323 1323 @predicate('origin([set])', safe=True)
1324 1324 def origin(repo, subset, x):
1325 1325 """
1326 1326 Changesets that were specified as a source for the grafts, transplants or
1327 1327 rebases that created the given revisions. Omitting the optional set is the
1328 1328 same as passing all(). If a changeset created by these operations is itself
1329 1329 specified as a source for one of these operations, only the source changeset
1330 1330 for the first operation is selected.
1331 1331 """
1332 1332 if x is not None:
1333 1333 dests = getset(repo, fullreposet(repo), x)
1334 1334 else:
1335 1335 dests = fullreposet(repo)
1336 1336
1337 1337 def _firstsrc(rev):
1338 1338 src = _getrevsource(repo, rev)
1339 1339 if src is None:
1340 1340 return None
1341 1341
1342 1342 while True:
1343 1343 prev = _getrevsource(repo, src)
1344 1344
1345 1345 if prev is None:
1346 1346 return src
1347 1347 src = prev
1348 1348
1349 1349 o = {_firstsrc(r) for r in dests}
1350 1350 o -= {None}
1351 1351 # XXX we should turn this into a baseset instead of a set, smartset may do
1352 1352 # some optimizations from the fact this is a baseset.
1353 1353 return subset & o
1354 1354
1355 1355 @predicate('outgoing([path])', safe=False)
1356 1356 def outgoing(repo, subset, x):
1357 1357 """Changesets not found in the specified destination repository, or the
1358 1358 default push location.
1359 1359 """
1360 1360 # Avoid cycles.
1361 1361 from . import (
1362 1362 discovery,
1363 1363 hg,
1364 1364 )
1365 1365 # i18n: "outgoing" is a keyword
1366 1366 l = getargs(x, 0, 1, _("outgoing takes one or no arguments"))
1367 1367 # i18n: "outgoing" is a keyword
1368 1368 dest = l and getstring(l[0], _("outgoing requires a repository path")) or ''
1369 1369 dest = repo.ui.expandpath(dest or 'default-push', dest or 'default')
1370 1370 dest, branches = hg.parseurl(dest)
1371 1371 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1372 1372 if revs:
1373 1373 revs = [repo.lookup(rev) for rev in revs]
1374 1374 other = hg.peer(repo, {}, dest)
1375 1375 repo.ui.pushbuffer()
1376 1376 outgoing = discovery.findcommonoutgoing(repo, other, onlyheads=revs)
1377 1377 repo.ui.popbuffer()
1378 1378 cl = repo.changelog
1379 1379 o = {cl.rev(r) for r in outgoing.missing}
1380 1380 return subset & o
1381 1381
1382 1382 @predicate('p1([set])', safe=True)
1383 1383 def p1(repo, subset, x):
1384 1384 """First parent of changesets in set, or the working directory.
1385 1385 """
1386 1386 if x is None:
1387 1387 p = repo[x].p1().rev()
1388 1388 if p >= 0:
1389 1389 return subset & baseset([p])
1390 1390 return baseset()
1391 1391
1392 1392 ps = set()
1393 1393 cl = repo.changelog
1394 1394 for r in getset(repo, fullreposet(repo), x):
1395 1395 try:
1396 1396 ps.add(cl.parentrevs(r)[0])
1397 1397 except error.WdirUnsupported:
1398 1398 ps.add(repo[r].parents()[0].rev())
1399 1399 ps -= {node.nullrev}
1400 1400 # XXX we should turn this into a baseset instead of a set, smartset may do
1401 1401 # some optimizations from the fact this is a baseset.
1402 1402 return subset & ps
1403 1403
1404 1404 @predicate('p2([set])', safe=True)
1405 1405 def p2(repo, subset, x):
1406 1406 """Second parent of changesets in set, or the working directory.
1407 1407 """
1408 1408 if x is None:
1409 1409 ps = repo[x].parents()
1410 1410 try:
1411 1411 p = ps[1].rev()
1412 1412 if p >= 0:
1413 1413 return subset & baseset([p])
1414 1414 return baseset()
1415 1415 except IndexError:
1416 1416 return baseset()
1417 1417
1418 1418 ps = set()
1419 1419 cl = repo.changelog
1420 1420 for r in getset(repo, fullreposet(repo), x):
1421 1421 try:
1422 1422 ps.add(cl.parentrevs(r)[1])
1423 1423 except error.WdirUnsupported:
1424 1424 parents = repo[r].parents()
1425 1425 if len(parents) == 2:
1426 1426 ps.add(parents[1])
1427 1427 ps -= {node.nullrev}
1428 1428 # XXX we should turn this into a baseset instead of a set, smartset may do
1429 1429 # some optimizations from the fact this is a baseset.
1430 1430 return subset & ps
1431 1431
1432 1432 def parentpost(repo, subset, x, order):
1433 1433 return p1(repo, subset, x)
1434 1434
1435 1435 @predicate('parents([set])', safe=True)
1436 1436 def parents(repo, subset, x):
1437 1437 """
1438 1438 The set of all parents for all changesets in set, or the working directory.
1439 1439 """
1440 1440 if x is None:
1441 1441 ps = set(p.rev() for p in repo[x].parents())
1442 1442 else:
1443 1443 ps = set()
1444 1444 cl = repo.changelog
1445 1445 up = ps.update
1446 1446 parentrevs = cl.parentrevs
1447 1447 for r in getset(repo, fullreposet(repo), x):
1448 1448 try:
1449 1449 up(parentrevs(r))
1450 1450 except error.WdirUnsupported:
1451 1451 up(p.rev() for p in repo[r].parents())
1452 1452 ps -= {node.nullrev}
1453 1453 return subset & ps
1454 1454
1455 1455 def _phase(repo, subset, *targets):
1456 1456 """helper to select all rev in <targets> phases"""
1457 1457 s = repo._phasecache.getrevset(repo, targets)
1458 1458 return subset & s
1459 1459
1460 1460 @predicate('draft()', safe=True)
1461 1461 def draft(repo, subset, x):
1462 1462 """Changeset in draft phase."""
1463 1463 # i18n: "draft" is a keyword
1464 1464 getargs(x, 0, 0, _("draft takes no arguments"))
1465 1465 target = phases.draft
1466 1466 return _phase(repo, subset, target)
1467 1467
1468 1468 @predicate('secret()', safe=True)
1469 1469 def secret(repo, subset, x):
1470 1470 """Changeset in secret phase."""
1471 1471 # i18n: "secret" is a keyword
1472 1472 getargs(x, 0, 0, _("secret takes no arguments"))
1473 1473 target = phases.secret
1474 1474 return _phase(repo, subset, target)
1475 1475
1476 1476 def parentspec(repo, subset, x, n, order):
1477 1477 """``set^0``
1478 1478 The set.
1479 1479 ``set^1`` (or ``set^``), ``set^2``
1480 1480 First or second parent, respectively, of all changesets in set.
1481 1481 """
1482 1482 try:
1483 1483 n = int(n[1])
1484 1484 if n not in (0, 1, 2):
1485 1485 raise ValueError
1486 1486 except (TypeError, ValueError):
1487 1487 raise error.ParseError(_("^ expects a number 0, 1, or 2"))
1488 1488 ps = set()
1489 1489 cl = repo.changelog
1490 1490 for r in getset(repo, fullreposet(repo), x):
1491 1491 if n == 0:
1492 1492 ps.add(r)
1493 1493 elif n == 1:
1494 1494 try:
1495 1495 ps.add(cl.parentrevs(r)[0])
1496 1496 except error.WdirUnsupported:
1497 1497 ps.add(repo[r].parents()[0].rev())
1498 1498 else:
1499 1499 try:
1500 1500 parents = cl.parentrevs(r)
1501 1501 if parents[1] != node.nullrev:
1502 1502 ps.add(parents[1])
1503 1503 except error.WdirUnsupported:
1504 1504 parents = repo[r].parents()
1505 1505 if len(parents) == 2:
1506 1506 ps.add(parents[1].rev())
1507 1507 return subset & ps
1508 1508
1509 1509 @predicate('present(set)', safe=True)
1510 1510 def present(repo, subset, x):
1511 1511 """An empty set, if any revision in set isn't found; otherwise,
1512 1512 all revisions in set.
1513 1513
1514 1514 If any of specified revisions is not present in the local repository,
1515 1515 the query is normally aborted. But this predicate allows the query
1516 1516 to continue even in such cases.
1517 1517 """
1518 1518 try:
1519 1519 return getset(repo, subset, x)
1520 1520 except error.RepoLookupError:
1521 1521 return baseset()
1522 1522
1523 1523 # for internal use
1524 1524 @predicate('_notpublic', safe=True)
1525 1525 def _notpublic(repo, subset, x):
1526 1526 getargs(x, 0, 0, "_notpublic takes no arguments")
1527 1527 return _phase(repo, subset, phases.draft, phases.secret)
1528 1528
1529 1529 @predicate('public()', safe=True)
1530 1530 def public(repo, subset, x):
1531 1531 """Changeset in public phase."""
1532 1532 # i18n: "public" is a keyword
1533 1533 getargs(x, 0, 0, _("public takes no arguments"))
1534 1534 phase = repo._phasecache.phase
1535 1535 target = phases.public
1536 1536 condition = lambda r: phase(repo, r) == target
1537 1537 return subset.filter(condition, condrepr=('<phase %r>', target),
1538 1538 cache=False)
1539 1539
1540 1540 @predicate('remote([id [,path]])', safe=False)
1541 1541 def remote(repo, subset, x):
1542 1542 """Local revision that corresponds to the given identifier in a
1543 1543 remote repository, if present. Here, the '.' identifier is a
1544 1544 synonym for the current local branch.
1545 1545 """
1546 1546
1547 1547 from . import hg # avoid start-up nasties
1548 1548 # i18n: "remote" is a keyword
1549 1549 l = getargs(x, 0, 2, _("remote takes zero, one, or two arguments"))
1550 1550
1551 1551 q = '.'
1552 1552 if len(l) > 0:
1553 1553 # i18n: "remote" is a keyword
1554 1554 q = getstring(l[0], _("remote requires a string id"))
1555 1555 if q == '.':
1556 1556 q = repo['.'].branch()
1557 1557
1558 1558 dest = ''
1559 1559 if len(l) > 1:
1560 1560 # i18n: "remote" is a keyword
1561 1561 dest = getstring(l[1], _("remote requires a repository path"))
1562 1562 dest = repo.ui.expandpath(dest or 'default')
1563 1563 dest, branches = hg.parseurl(dest)
1564 1564 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1565 1565 if revs:
1566 1566 revs = [repo.lookup(rev) for rev in revs]
1567 1567 other = hg.peer(repo, {}, dest)
1568 1568 n = other.lookup(q)
1569 1569 if n in repo:
1570 1570 r = repo[n].rev()
1571 1571 if r in subset:
1572 1572 return baseset([r])
1573 1573 return baseset()
1574 1574
1575 1575 @predicate('removes(pattern)', safe=True)
1576 1576 def removes(repo, subset, x):
1577 1577 """Changesets which remove files matching pattern.
1578 1578
1579 1579 The pattern without explicit kind like ``glob:`` is expected to be
1580 1580 relative to the current directory and match against a file or a
1581 1581 directory.
1582 1582 """
1583 1583 # i18n: "removes" is a keyword
1584 1584 pat = getstring(x, _("removes requires a pattern"))
1585 1585 return checkstatus(repo, subset, pat, 2)
1586 1586
1587 1587 @predicate('rev(number)', safe=True)
1588 1588 def rev(repo, subset, x):
1589 1589 """Revision with the given numeric identifier.
1590 1590 """
1591 1591 # i18n: "rev" is a keyword
1592 1592 l = getargs(x, 1, 1, _("rev requires one argument"))
1593 1593 try:
1594 1594 # i18n: "rev" is a keyword
1595 1595 l = int(getstring(l[0], _("rev requires a number")))
1596 1596 except (TypeError, ValueError):
1597 1597 # i18n: "rev" is a keyword
1598 1598 raise error.ParseError(_("rev expects a number"))
1599 1599 if l not in repo.changelog and l not in (node.nullrev, node.wdirrev):
1600 1600 return baseset()
1601 1601 return subset & baseset([l])
1602 1602
1603 1603 @predicate('matching(revision [, field])', safe=True)
1604 1604 def matching(repo, subset, x):
1605 1605 """Changesets in which a given set of fields match the set of fields in the
1606 1606 selected revision or set.
1607 1607
1608 1608 To match more than one field pass the list of fields to match separated
1609 1609 by spaces (e.g. ``author description``).
1610 1610
1611 1611 Valid fields are most regular revision fields and some special fields.
1612 1612
1613 1613 Regular revision fields are ``description``, ``author``, ``branch``,
1614 1614 ``date``, ``files``, ``phase``, ``parents``, ``substate``, ``user``
1615 1615 and ``diff``.
1616 1616 Note that ``author`` and ``user`` are synonyms. ``diff`` refers to the
1617 1617 contents of the revision. Two revisions matching their ``diff`` will
1618 1618 also match their ``files``.
1619 1619
1620 1620 Special fields are ``summary`` and ``metadata``:
1621 1621 ``summary`` matches the first line of the description.
1622 1622 ``metadata`` is equivalent to matching ``description user date``
1623 1623 (i.e. it matches the main metadata fields).
1624 1624
1625 1625 ``metadata`` is the default field which is used when no fields are
1626 1626 specified. You can match more than one field at a time.
1627 1627 """
1628 1628 # i18n: "matching" is a keyword
1629 1629 l = getargs(x, 1, 2, _("matching takes 1 or 2 arguments"))
1630 1630
1631 1631 revs = getset(repo, fullreposet(repo), l[0])
1632 1632
1633 1633 fieldlist = ['metadata']
1634 1634 if len(l) > 1:
1635 1635 fieldlist = getstring(l[1],
1636 1636 # i18n: "matching" is a keyword
1637 1637 _("matching requires a string "
1638 1638 "as its second argument")).split()
1639 1639
1640 1640 # Make sure that there are no repeated fields,
1641 1641 # expand the 'special' 'metadata' field type
1642 1642 # and check the 'files' whenever we check the 'diff'
1643 1643 fields = []
1644 1644 for field in fieldlist:
1645 1645 if field == 'metadata':
1646 1646 fields += ['user', 'description', 'date']
1647 1647 elif field == 'diff':
1648 1648 # a revision matching the diff must also match the files
1649 1649 # since matching the diff is very costly, make sure to
1650 1650 # also match the files first
1651 1651 fields += ['files', 'diff']
1652 1652 else:
1653 1653 if field == 'author':
1654 1654 field = 'user'
1655 1655 fields.append(field)
1656 1656 fields = set(fields)
1657 1657 if 'summary' in fields and 'description' in fields:
1658 1658 # If a revision matches its description it also matches its summary
1659 1659 fields.discard('summary')
1660 1660
1661 1661 # We may want to match more than one field
1662 1662 # Not all fields take the same amount of time to be matched
1663 1663 # Sort the selected fields in order of increasing matching cost
1664 1664 fieldorder = ['phase', 'parents', 'user', 'date', 'branch', 'summary',
1665 1665 'files', 'description', 'substate', 'diff']
1666 1666 def fieldkeyfunc(f):
1667 1667 try:
1668 1668 return fieldorder.index(f)
1669 1669 except ValueError:
1670 1670 # assume an unknown field is very costly
1671 1671 return len(fieldorder)
1672 1672 fields = list(fields)
1673 1673 fields.sort(key=fieldkeyfunc)
1674 1674
1675 1675 # Each field will be matched with its own "getfield" function
1676 1676 # which will be added to the getfieldfuncs array of functions
1677 1677 getfieldfuncs = []
1678 1678 _funcs = {
1679 1679 'user': lambda r: repo[r].user(),
1680 1680 'branch': lambda r: repo[r].branch(),
1681 1681 'date': lambda r: repo[r].date(),
1682 1682 'description': lambda r: repo[r].description(),
1683 1683 'files': lambda r: repo[r].files(),
1684 1684 'parents': lambda r: repo[r].parents(),
1685 1685 'phase': lambda r: repo[r].phase(),
1686 1686 'substate': lambda r: repo[r].substate,
1687 1687 'summary': lambda r: repo[r].description().splitlines()[0],
1688 1688 'diff': lambda r: list(repo[r].diff(git=True),)
1689 1689 }
1690 1690 for info in fields:
1691 1691 getfield = _funcs.get(info, None)
1692 1692 if getfield is None:
1693 1693 raise error.ParseError(
1694 1694 # i18n: "matching" is a keyword
1695 1695 _("unexpected field name passed to matching: %s") % info)
1696 1696 getfieldfuncs.append(getfield)
1697 1697 # convert the getfield array of functions into a "getinfo" function
1698 1698 # which returns an array of field values (or a single value if there
1699 1699 # is only one field to match)
1700 1700 getinfo = lambda r: [f(r) for f in getfieldfuncs]
1701 1701
1702 1702 def matches(x):
1703 1703 for rev in revs:
1704 1704 target = getinfo(rev)
1705 1705 match = True
1706 1706 for n, f in enumerate(getfieldfuncs):
1707 1707 if target[n] != f(x):
1708 1708 match = False
1709 1709 if match:
1710 1710 return True
1711 1711 return False
1712 1712
1713 1713 return subset.filter(matches, condrepr=('<matching%r %r>', fields, revs))
1714 1714
1715 1715 @predicate('reverse(set)', safe=True, takeorder=True)
1716 1716 def reverse(repo, subset, x, order):
1717 1717 """Reverse order of set.
1718 1718 """
1719 1719 l = getset(repo, subset, x)
1720 1720 if order == defineorder:
1721 1721 l.reverse()
1722 1722 return l
1723 1723
1724 1724 @predicate('roots(set)', safe=True)
1725 1725 def roots(repo, subset, x):
1726 1726 """Changesets in set with no parent changeset in set.
1727 1727 """
1728 1728 s = getset(repo, fullreposet(repo), x)
1729 1729 parents = repo.changelog.parentrevs
1730 1730 def filter(r):
1731 1731 for p in parents(r):
1732 1732 if 0 <= p and p in s:
1733 1733 return False
1734 1734 return True
1735 1735 return subset & s.filter(filter, condrepr='<roots>')
1736 1736
1737 1737 _sortkeyfuncs = {
1738 1738 'rev': lambda c: c.rev(),
1739 1739 'branch': lambda c: c.branch(),
1740 1740 'desc': lambda c: c.description(),
1741 1741 'user': lambda c: c.user(),
1742 1742 'author': lambda c: c.user(),
1743 1743 'date': lambda c: c.date()[0],
1744 1744 }
1745 1745
1746 1746 def _getsortargs(x):
1747 1747 """Parse sort options into (set, [(key, reverse)], opts)"""
1748 1748 args = getargsdict(x, 'sort', 'set keys topo.firstbranch')
1749 1749 if 'set' not in args:
1750 1750 # i18n: "sort" is a keyword
1751 1751 raise error.ParseError(_('sort requires one or two arguments'))
1752 1752 keys = "rev"
1753 1753 if 'keys' in args:
1754 1754 # i18n: "sort" is a keyword
1755 1755 keys = getstring(args['keys'], _("sort spec must be a string"))
1756 1756
1757 1757 keyflags = []
1758 1758 for k in keys.split():
1759 1759 fk = k
1760 1760 reverse = (k[0] == '-')
1761 1761 if reverse:
1762 1762 k = k[1:]
1763 1763 if k not in _sortkeyfuncs and k != 'topo':
1764 1764 raise error.ParseError(_("unknown sort key %r") % fk)
1765 1765 keyflags.append((k, reverse))
1766 1766
1767 1767 if len(keyflags) > 1 and any(k == 'topo' for k, reverse in keyflags):
1768 1768 # i18n: "topo" is a keyword
1769 1769 raise error.ParseError(_('topo sort order cannot be combined '
1770 1770 'with other sort keys'))
1771 1771
1772 1772 opts = {}
1773 1773 if 'topo.firstbranch' in args:
1774 1774 if any(k == 'topo' for k, reverse in keyflags):
1775 1775 opts['topo.firstbranch'] = args['topo.firstbranch']
1776 1776 else:
1777 1777 # i18n: "topo" and "topo.firstbranch" are keywords
1778 1778 raise error.ParseError(_('topo.firstbranch can only be used '
1779 1779 'when using the topo sort key'))
1780 1780
1781 1781 return args['set'], keyflags, opts
1782 1782
1783 1783 @predicate('sort(set[, [-]key... [, ...]])', safe=True, takeorder=True)
1784 1784 def sort(repo, subset, x, order):
1785 1785 """Sort set by keys. The default sort order is ascending, specify a key
1786 1786 as ``-key`` to sort in descending order.
1787 1787
1788 1788 The keys can be:
1789 1789
1790 1790 - ``rev`` for the revision number,
1791 1791 - ``branch`` for the branch name,
1792 1792 - ``desc`` for the commit message (description),
1793 1793 - ``user`` for user name (``author`` can be used as an alias),
1794 1794 - ``date`` for the commit date
1795 1795 - ``topo`` for a reverse topographical sort
1796 1796
1797 1797 The ``topo`` sort order cannot be combined with other sort keys. This sort
1798 1798 takes one optional argument, ``topo.firstbranch``, which takes a revset that
1799 1799 specifies what topographical branches to prioritize in the sort.
1800 1800
1801 1801 """
1802 1802 s, keyflags, opts = _getsortargs(x)
1803 1803 revs = getset(repo, subset, s)
1804 1804
1805 1805 if not keyflags or order != defineorder:
1806 1806 return revs
1807 1807 if len(keyflags) == 1 and keyflags[0][0] == "rev":
1808 1808 revs.sort(reverse=keyflags[0][1])
1809 1809 return revs
1810 1810 elif keyflags[0][0] == "topo":
1811 1811 firstbranch = ()
1812 1812 if 'topo.firstbranch' in opts:
1813 1813 firstbranch = getset(repo, subset, opts['topo.firstbranch'])
1814 1814 revs = baseset(dagop.toposort(revs, repo.changelog.parentrevs,
1815 1815 firstbranch),
1816 1816 istopo=True)
1817 1817 if keyflags[0][1]:
1818 1818 revs.reverse()
1819 1819 return revs
1820 1820
1821 1821 # sort() is guaranteed to be stable
1822 1822 ctxs = [repo[r] for r in revs]
1823 1823 for k, reverse in reversed(keyflags):
1824 1824 ctxs.sort(key=_sortkeyfuncs[k], reverse=reverse)
1825 1825 return baseset([c.rev() for c in ctxs])
1826 1826
1827 1827 @predicate('subrepo([pattern])')
1828 1828 def subrepo(repo, subset, x):
1829 1829 """Changesets that add, modify or remove the given subrepo. If no subrepo
1830 1830 pattern is named, any subrepo changes are returned.
1831 1831 """
1832 1832 # i18n: "subrepo" is a keyword
1833 1833 args = getargs(x, 0, 1, _('subrepo takes at most one argument'))
1834 1834 pat = None
1835 1835 if len(args) != 0:
1836 1836 pat = getstring(args[0], _("subrepo requires a pattern"))
1837 1837
1838 1838 m = matchmod.exact(repo.root, repo.root, ['.hgsubstate'])
1839 1839
1840 1840 def submatches(names):
1841 1841 k, p, m = util.stringmatcher(pat)
1842 1842 for name in names:
1843 1843 if m(name):
1844 1844 yield name
1845 1845
1846 1846 def matches(x):
1847 1847 c = repo[x]
1848 1848 s = repo.status(c.p1().node(), c.node(), match=m)
1849 1849
1850 1850 if pat is None:
1851 1851 return s.added or s.modified or s.removed
1852 1852
1853 1853 if s.added:
1854 1854 return any(submatches(c.substate.keys()))
1855 1855
1856 1856 if s.modified:
1857 1857 subs = set(c.p1().substate.keys())
1858 1858 subs.update(c.substate.keys())
1859 1859
1860 1860 for path in submatches(subs):
1861 1861 if c.p1().substate.get(path) != c.substate.get(path):
1862 1862 return True
1863 1863
1864 1864 if s.removed:
1865 1865 return any(submatches(c.p1().substate.keys()))
1866 1866
1867 1867 return False
1868 1868
1869 1869 return subset.filter(matches, condrepr=('<subrepo %r>', pat))
1870 1870
1871 1871 def _mapbynodefunc(repo, s, f):
1872 1872 """(repo, smartset, [node] -> [node]) -> smartset
1873 1873
1874 1874 Helper method to map a smartset to another smartset given a function only
1875 1875 talking about nodes. Handles converting between rev numbers and nodes, and
1876 1876 filtering.
1877 1877 """
1878 1878 cl = repo.unfiltered().changelog
1879 1879 torev = cl.rev
1880 1880 tonode = cl.node
1881 1881 nodemap = cl.nodemap
1882 1882 result = set(torev(n) for n in f(tonode(r) for r in s) if n in nodemap)
1883 1883 return smartset.baseset(result - repo.changelog.filteredrevs)
1884 1884
1885 1885 @predicate('successors(set)', safe=True)
1886 1886 def successors(repo, subset, x):
1887 1887 """All successors for set, including the given set themselves"""
1888 1888 s = getset(repo, fullreposet(repo), x)
1889 1889 f = lambda nodes: obsutil.allsuccessors(repo.obsstore, nodes)
1890 1890 d = _mapbynodefunc(repo, s, f)
1891 1891 return subset & d
1892 1892
1893 1893 def _substringmatcher(pattern, casesensitive=True):
1894 1894 kind, pattern, matcher = util.stringmatcher(pattern,
1895 1895 casesensitive=casesensitive)
1896 1896 if kind == 'literal':
1897 1897 if not casesensitive:
1898 1898 pattern = encoding.lower(pattern)
1899 1899 matcher = lambda s: pattern in encoding.lower(s)
1900 1900 else:
1901 1901 matcher = lambda s: pattern in s
1902 1902 return kind, pattern, matcher
1903 1903
1904 1904 @predicate('tag([name])', safe=True)
1905 1905 def tag(repo, subset, x):
1906 1906 """The specified tag by name, or all tagged revisions if no name is given.
1907 1907
1908 1908 Pattern matching is supported for `name`. See
1909 1909 :hg:`help revisions.patterns`.
1910 1910 """
1911 1911 # i18n: "tag" is a keyword
1912 1912 args = getargs(x, 0, 1, _("tag takes one or no arguments"))
1913 1913 cl = repo.changelog
1914 1914 if args:
1915 1915 pattern = getstring(args[0],
1916 1916 # i18n: "tag" is a keyword
1917 1917 _('the argument to tag must be a string'))
1918 1918 kind, pattern, matcher = util.stringmatcher(pattern)
1919 1919 if kind == 'literal':
1920 1920 # avoid resolving all tags
1921 1921 tn = repo._tagscache.tags.get(pattern, None)
1922 1922 if tn is None:
1923 1923 raise error.RepoLookupError(_("tag '%s' does not exist")
1924 1924 % pattern)
1925 1925 s = {repo[tn].rev()}
1926 1926 else:
1927 1927 s = {cl.rev(n) for t, n in repo.tagslist() if matcher(t)}
1928 1928 else:
1929 1929 s = {cl.rev(n) for t, n in repo.tagslist() if t != 'tip'}
1930 1930 return subset & s
1931 1931
1932 1932 @predicate('tagged', safe=True)
1933 1933 def tagged(repo, subset, x):
1934 1934 return tag(repo, subset, x)
1935 1935
1936 1936 @predicate('unstable()', safe=True)
1937 1937 def unstable(repo, subset, x):
1938 1938 msg = ("'unstable()' is deprecated, "
1939 1939 "use 'orphan()'")
1940 1940 repo.ui.deprecwarn(msg, '4.4')
1941 1941
1942 1942 return orphan(repo, subset, x)
1943 1943
1944 1944 @predicate('orphan()', safe=True)
1945 1945 def orphan(repo, subset, x):
1946 1946 """Non-obsolete changesets with obsolete ancestors.
1947 1947 """
1948 1948 # i18n: "orphan" is a keyword
1949 1949 getargs(x, 0, 0, _("orphan takes no arguments"))
1950 1950 orphan = obsmod.getrevs(repo, 'orphan')
1951 1951 return subset & orphan
1952 1952
1953 1953
1954 1954 @predicate('user(string)', safe=True)
1955 1955 def user(repo, subset, x):
1956 1956 """User name contains string. The match is case-insensitive.
1957 1957
1958 1958 Pattern matching is supported for `string`. See
1959 1959 :hg:`help revisions.patterns`.
1960 1960 """
1961 1961 return author(repo, subset, x)
1962 1962
1963 1963 @predicate('wdir()', safe=True)
1964 1964 def wdir(repo, subset, x):
1965 1965 """Working directory. (EXPERIMENTAL)"""
1966 1966 # i18n: "wdir" is a keyword
1967 1967 getargs(x, 0, 0, _("wdir takes no arguments"))
1968 1968 if node.wdirrev in subset or isinstance(subset, fullreposet):
1969 1969 return baseset([node.wdirrev])
1970 1970 return baseset()
1971 1971
1972 1972 def _orderedlist(repo, subset, x):
1973 1973 s = getstring(x, "internal error")
1974 1974 if not s:
1975 1975 return baseset()
1976 1976 # remove duplicates here. it's difficult for caller to deduplicate sets
1977 1977 # because different symbols can point to the same rev.
1978 1978 cl = repo.changelog
1979 1979 ls = []
1980 1980 seen = set()
1981 1981 for t in s.split('\0'):
1982 1982 try:
1983 1983 # fast path for integer revision
1984 1984 r = int(t)
1985 1985 if str(r) != t or r not in cl:
1986 1986 raise ValueError
1987 1987 revs = [r]
1988 1988 except ValueError:
1989 1989 revs = stringset(repo, subset, t)
1990 1990
1991 1991 for r in revs:
1992 1992 if r in seen:
1993 1993 continue
1994 1994 if (r in subset
1995 1995 or r == node.nullrev and isinstance(subset, fullreposet)):
1996 1996 ls.append(r)
1997 1997 seen.add(r)
1998 1998 return baseset(ls)
1999 1999
2000 2000 # for internal use
2001 2001 @predicate('_list', safe=True, takeorder=True)
2002 2002 def _list(repo, subset, x, order):
2003 2003 if order == followorder:
2004 2004 # slow path to take the subset order
2005 2005 return subset & _orderedlist(repo, fullreposet(repo), x)
2006 2006 else:
2007 2007 return _orderedlist(repo, subset, x)
2008 2008
2009 2009 def _orderedintlist(repo, subset, x):
2010 2010 s = getstring(x, "internal error")
2011 2011 if not s:
2012 2012 return baseset()
2013 2013 ls = [int(r) for r in s.split('\0')]
2014 2014 s = subset
2015 2015 return baseset([r for r in ls if r in s])
2016 2016
2017 2017 # for internal use
2018 2018 @predicate('_intlist', safe=True, takeorder=True)
2019 2019 def _intlist(repo, subset, x, order):
2020 2020 if order == followorder:
2021 2021 # slow path to take the subset order
2022 2022 return subset & _orderedintlist(repo, fullreposet(repo), x)
2023 2023 else:
2024 2024 return _orderedintlist(repo, subset, x)
2025 2025
2026 2026 def _orderedhexlist(repo, subset, x):
2027 2027 s = getstring(x, "internal error")
2028 2028 if not s:
2029 2029 return baseset()
2030 2030 cl = repo.changelog
2031 2031 ls = [cl.rev(node.bin(r)) for r in s.split('\0')]
2032 2032 s = subset
2033 2033 return baseset([r for r in ls if r in s])
2034 2034
2035 2035 # for internal use
2036 2036 @predicate('_hexlist', safe=True, takeorder=True)
2037 2037 def _hexlist(repo, subset, x, order):
2038 2038 if order == followorder:
2039 2039 # slow path to take the subset order
2040 2040 return subset & _orderedhexlist(repo, fullreposet(repo), x)
2041 2041 else:
2042 2042 return _orderedhexlist(repo, subset, x)
2043 2043
2044 2044 methods = {
2045 2045 "range": rangeset,
2046 2046 "rangeall": rangeall,
2047 2047 "rangepre": rangepre,
2048 2048 "rangepost": rangepost,
2049 2049 "dagrange": dagrange,
2050 2050 "string": stringset,
2051 2051 "symbol": stringset,
2052 2052 "and": andset,
2053 2053 "or": orset,
2054 2054 "not": notset,
2055 2055 "difference": differenceset,
2056 2056 "relation": relationset,
2057 2057 "relsubscript": relsubscriptset,
2058 2058 "subscript": subscriptset,
2059 2059 "list": listset,
2060 2060 "keyvalue": keyvaluepair,
2061 2061 "func": func,
2062 2062 "ancestor": ancestorspec,
2063 2063 "parent": parentspec,
2064 2064 "parentpost": parentpost,
2065 2065 }
2066 2066
2067 2067 def posttreebuilthook(tree, repo):
2068 2068 # hook for extensions to execute code on the optimized tree
2069 2069 pass
2070 2070
2071 2071 def match(ui, spec, repo=None, order=defineorder):
2072 2072 """Create a matcher for a single revision spec
2073 2073
2074 2074 If order=followorder, a matcher takes the ordering specified by the input
2075 2075 set.
2076 2076 """
2077 2077 return matchany(ui, [spec], repo=repo, order=order)
2078 2078
2079 2079 def matchany(ui, specs, repo=None, order=defineorder, localalias=None):
2080 2080 """Create a matcher that will include any revisions matching one of the
2081 2081 given specs
2082 2082
2083 2083 If order=followorder, a matcher takes the ordering specified by the input
2084 2084 set.
2085 2085
2086 2086 If localalias is not None, it is a dict {name: definitionstring}. It takes
2087 2087 precedence over [revsetalias] config section.
2088 2088 """
2089 2089 if not specs:
2090 2090 def mfunc(repo, subset=None):
2091 2091 return baseset()
2092 2092 return mfunc
2093 2093 if not all(specs):
2094 2094 raise error.ParseError(_("empty query"))
2095 2095 lookup = None
2096 2096 if repo:
2097 2097 lookup = repo.__contains__
2098 2098 if len(specs) == 1:
2099 2099 tree = revsetlang.parse(specs[0], lookup)
2100 2100 else:
2101 2101 tree = ('or',
2102 2102 ('list',) + tuple(revsetlang.parse(s, lookup) for s in specs))
2103 2103
2104 2104 aliases = []
2105 2105 warn = None
2106 2106 if ui:
2107 2107 aliases.extend(ui.configitems('revsetalias'))
2108 2108 warn = ui.warn
2109 2109 if localalias:
2110 2110 aliases.extend(localalias.items())
2111 2111 if aliases:
2112 2112 tree = revsetlang.expandaliases(tree, aliases, warn=warn)
2113 2113 tree = revsetlang.foldconcat(tree)
2114 2114 tree = revsetlang.analyze(tree, order)
2115 2115 tree = revsetlang.optimize(tree)
2116 2116 posttreebuilthook(tree, repo)
2117 2117 return makematcher(tree)
2118 2118
2119 2119 def makematcher(tree):
2120 2120 """Create a matcher from an evaluatable tree"""
2121 2121 def mfunc(repo, subset=None):
2122 2122 if subset is None:
2123 2123 subset = fullreposet(repo)
2124 2124 return getset(repo, subset, tree)
2125 2125 return mfunc
2126 2126
2127 2127 def loadpredicate(ui, extname, registrarobj):
2128 2128 """Load revset predicates from specified registrarobj
2129 2129 """
2130 2130 for name, func in registrarobj._table.iteritems():
2131 2131 symbols[name] = func
2132 2132 if func._safe:
2133 2133 safesymbols.add(name)
2134 2134
2135 2135 # load built-in predicates explicitly to setup safesymbols
2136 2136 loadpredicate(None, None, predicate)
2137 2137
2138 2138 # tell hggettext to extract docstrings from these functions:
2139 2139 i18nfunctions = symbols.values()
General Comments 0
You need to be logged in to leave comments. Login now