##// END OF EJS Templates
revset: add i18n comments to error messages for followlines predicate...
FUJIWARA Katsunori -
r32086:2a2744df stable
parent child Browse files
Show More
@@ -1,2290 +1,2294
1 1 # revset.py - revision set queries for mercurial
2 2 #
3 3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import heapq
11 11 import re
12 12
13 13 from .i18n import _
14 14 from . import (
15 15 destutil,
16 16 encoding,
17 17 error,
18 18 hbisect,
19 19 match as matchmod,
20 20 node,
21 21 obsolete as obsmod,
22 22 pathutil,
23 23 phases,
24 24 registrar,
25 25 repoview,
26 26 revsetlang,
27 27 smartset,
28 28 util,
29 29 )
30 30
31 31 # helpers for processing parsed tree
32 32 getsymbol = revsetlang.getsymbol
33 33 getstring = revsetlang.getstring
34 34 getinteger = revsetlang.getinteger
35 35 getboolean = revsetlang.getboolean
36 36 getlist = revsetlang.getlist
37 37 getrange = revsetlang.getrange
38 38 getargs = revsetlang.getargs
39 39 getargsdict = revsetlang.getargsdict
40 40
41 41 # constants used as an argument of match() and matchany()
42 42 anyorder = revsetlang.anyorder
43 43 defineorder = revsetlang.defineorder
44 44 followorder = revsetlang.followorder
45 45
46 46 baseset = smartset.baseset
47 47 generatorset = smartset.generatorset
48 48 spanset = smartset.spanset
49 49 fullreposet = smartset.fullreposet
50 50
51 51 def _revancestors(repo, revs, followfirst):
52 52 """Like revlog.ancestors(), but supports followfirst."""
53 53 if followfirst:
54 54 cut = 1
55 55 else:
56 56 cut = None
57 57 cl = repo.changelog
58 58
59 59 def iterate():
60 60 revs.sort(reverse=True)
61 61 irevs = iter(revs)
62 62 h = []
63 63
64 64 inputrev = next(irevs, None)
65 65 if inputrev is not None:
66 66 heapq.heappush(h, -inputrev)
67 67
68 68 seen = set()
69 69 while h:
70 70 current = -heapq.heappop(h)
71 71 if current == inputrev:
72 72 inputrev = next(irevs, None)
73 73 if inputrev is not None:
74 74 heapq.heappush(h, -inputrev)
75 75 if current not in seen:
76 76 seen.add(current)
77 77 yield current
78 78 for parent in cl.parentrevs(current)[:cut]:
79 79 if parent != node.nullrev:
80 80 heapq.heappush(h, -parent)
81 81
82 82 return generatorset(iterate(), iterasc=False)
83 83
84 84 def _revdescendants(repo, revs, followfirst):
85 85 """Like revlog.descendants() but supports followfirst."""
86 86 if followfirst:
87 87 cut = 1
88 88 else:
89 89 cut = None
90 90
91 91 def iterate():
92 92 cl = repo.changelog
93 93 # XXX this should be 'parentset.min()' assuming 'parentset' is a
94 94 # smartset (and if it is not, it should.)
95 95 first = min(revs)
96 96 nullrev = node.nullrev
97 97 if first == nullrev:
98 98 # Are there nodes with a null first parent and a non-null
99 99 # second one? Maybe. Do we care? Probably not.
100 100 for i in cl:
101 101 yield i
102 102 else:
103 103 seen = set(revs)
104 104 for i in cl.revs(first + 1):
105 105 for x in cl.parentrevs(i)[:cut]:
106 106 if x != nullrev and x in seen:
107 107 seen.add(i)
108 108 yield i
109 109 break
110 110
111 111 return generatorset(iterate(), iterasc=True)
112 112
113 113 def _reachablerootspure(repo, minroot, roots, heads, includepath):
114 114 """return (heads(::<roots> and ::<heads>))
115 115
116 116 If includepath is True, return (<roots>::<heads>)."""
117 117 if not roots:
118 118 return []
119 119 parentrevs = repo.changelog.parentrevs
120 120 roots = set(roots)
121 121 visit = list(heads)
122 122 reachable = set()
123 123 seen = {}
124 124 # prefetch all the things! (because python is slow)
125 125 reached = reachable.add
126 126 dovisit = visit.append
127 127 nextvisit = visit.pop
128 128 # open-code the post-order traversal due to the tiny size of
129 129 # sys.getrecursionlimit()
130 130 while visit:
131 131 rev = nextvisit()
132 132 if rev in roots:
133 133 reached(rev)
134 134 if not includepath:
135 135 continue
136 136 parents = parentrevs(rev)
137 137 seen[rev] = parents
138 138 for parent in parents:
139 139 if parent >= minroot and parent not in seen:
140 140 dovisit(parent)
141 141 if not reachable:
142 142 return baseset()
143 143 if not includepath:
144 144 return reachable
145 145 for rev in sorted(seen):
146 146 for parent in seen[rev]:
147 147 if parent in reachable:
148 148 reached(rev)
149 149 return reachable
150 150
151 151 def reachableroots(repo, roots, heads, includepath=False):
152 152 """return (heads(::<roots> and ::<heads>))
153 153
154 154 If includepath is True, return (<roots>::<heads>)."""
155 155 if not roots:
156 156 return baseset()
157 157 minroot = roots.min()
158 158 roots = list(roots)
159 159 heads = list(heads)
160 160 try:
161 161 revs = repo.changelog.reachableroots(minroot, heads, roots, includepath)
162 162 except AttributeError:
163 163 revs = _reachablerootspure(repo, minroot, roots, heads, includepath)
164 164 revs = baseset(revs)
165 165 revs.sort()
166 166 return revs
167 167
168 168 # helpers
169 169
170 170 def getset(repo, subset, x):
171 171 if not x:
172 172 raise error.ParseError(_("missing argument"))
173 173 return methods[x[0]](repo, subset, *x[1:])
174 174
175 175 def _getrevsource(repo, r):
176 176 extra = repo[r].extra()
177 177 for label in ('source', 'transplant_source', 'rebase_source'):
178 178 if label in extra:
179 179 try:
180 180 return repo[extra[label]].rev()
181 181 except error.RepoLookupError:
182 182 pass
183 183 return None
184 184
185 185 # operator methods
186 186
187 187 def stringset(repo, subset, x):
188 188 x = repo[x].rev()
189 189 if (x in subset
190 190 or x == node.nullrev and isinstance(subset, fullreposet)):
191 191 return baseset([x])
192 192 return baseset()
193 193
194 194 def rangeset(repo, subset, x, y, order):
195 195 m = getset(repo, fullreposet(repo), x)
196 196 n = getset(repo, fullreposet(repo), y)
197 197
198 198 if not m or not n:
199 199 return baseset()
200 200 return _makerangeset(repo, subset, m.first(), n.last(), order)
201 201
202 202 def rangeall(repo, subset, x, order):
203 203 assert x is None
204 204 return _makerangeset(repo, subset, 0, len(repo) - 1, order)
205 205
206 206 def rangepre(repo, subset, y, order):
207 207 # ':y' can't be rewritten to '0:y' since '0' may be hidden
208 208 n = getset(repo, fullreposet(repo), y)
209 209 if not n:
210 210 return baseset()
211 211 return _makerangeset(repo, subset, 0, n.last(), order)
212 212
213 213 def rangepost(repo, subset, x, order):
214 214 m = getset(repo, fullreposet(repo), x)
215 215 if not m:
216 216 return baseset()
217 217 return _makerangeset(repo, subset, m.first(), len(repo) - 1, order)
218 218
219 219 def _makerangeset(repo, subset, m, n, order):
220 220 if m == n:
221 221 r = baseset([m])
222 222 elif n == node.wdirrev:
223 223 r = spanset(repo, m, len(repo)) + baseset([n])
224 224 elif m == node.wdirrev:
225 225 r = baseset([m]) + spanset(repo, len(repo) - 1, n - 1)
226 226 elif m < n:
227 227 r = spanset(repo, m, n + 1)
228 228 else:
229 229 r = spanset(repo, m, n - 1)
230 230
231 231 if order == defineorder:
232 232 return r & subset
233 233 else:
234 234 # carrying the sorting over when possible would be more efficient
235 235 return subset & r
236 236
237 237 def dagrange(repo, subset, x, y, order):
238 238 r = fullreposet(repo)
239 239 xs = reachableroots(repo, getset(repo, r, x), getset(repo, r, y),
240 240 includepath=True)
241 241 return subset & xs
242 242
243 243 def andset(repo, subset, x, y, order):
244 244 return getset(repo, getset(repo, subset, x), y)
245 245
246 246 def differenceset(repo, subset, x, y, order):
247 247 return getset(repo, subset, x) - getset(repo, subset, y)
248 248
249 249 def _orsetlist(repo, subset, xs):
250 250 assert xs
251 251 if len(xs) == 1:
252 252 return getset(repo, subset, xs[0])
253 253 p = len(xs) // 2
254 254 a = _orsetlist(repo, subset, xs[:p])
255 255 b = _orsetlist(repo, subset, xs[p:])
256 256 return a + b
257 257
258 258 def orset(repo, subset, x, order):
259 259 xs = getlist(x)
260 260 if order == followorder:
261 261 # slow path to take the subset order
262 262 return subset & _orsetlist(repo, fullreposet(repo), xs)
263 263 else:
264 264 return _orsetlist(repo, subset, xs)
265 265
266 266 def notset(repo, subset, x, order):
267 267 return subset - getset(repo, subset, x)
268 268
269 269 def listset(repo, subset, *xs):
270 270 raise error.ParseError(_("can't use a list in this context"),
271 271 hint=_('see hg help "revsets.x or y"'))
272 272
273 273 def keyvaluepair(repo, subset, k, v):
274 274 raise error.ParseError(_("can't use a key-value pair in this context"))
275 275
276 276 def func(repo, subset, a, b, order):
277 277 f = getsymbol(a)
278 278 if f in symbols:
279 279 func = symbols[f]
280 280 if getattr(func, '_takeorder', False):
281 281 return func(repo, subset, b, order)
282 282 return func(repo, subset, b)
283 283
284 284 keep = lambda fn: getattr(fn, '__doc__', None) is not None
285 285
286 286 syms = [s for (s, fn) in symbols.items() if keep(fn)]
287 287 raise error.UnknownIdentifier(f, syms)
288 288
289 289 # functions
290 290
291 291 # symbols are callables like:
292 292 # fn(repo, subset, x)
293 293 # with:
294 294 # repo - current repository instance
295 295 # subset - of revisions to be examined
296 296 # x - argument in tree form
297 297 symbols = {}
298 298
299 299 # symbols which can't be used for a DoS attack for any given input
300 300 # (e.g. those which accept regexes as plain strings shouldn't be included)
301 301 # functions that just return a lot of changesets (like all) don't count here
302 302 safesymbols = set()
303 303
304 304 predicate = registrar.revsetpredicate()
305 305
306 306 @predicate('_destupdate')
307 307 def _destupdate(repo, subset, x):
308 308 # experimental revset for update destination
309 309 args = getargsdict(x, 'limit', 'clean')
310 310 return subset & baseset([destutil.destupdate(repo, **args)[0]])
311 311
312 312 @predicate('_destmerge')
313 313 def _destmerge(repo, subset, x):
314 314 # experimental revset for merge destination
315 315 sourceset = None
316 316 if x is not None:
317 317 sourceset = getset(repo, fullreposet(repo), x)
318 318 return subset & baseset([destutil.destmerge(repo, sourceset=sourceset)])
319 319
320 320 @predicate('adds(pattern)', safe=True)
321 321 def adds(repo, subset, x):
322 322 """Changesets that add a file matching pattern.
323 323
324 324 The pattern without explicit kind like ``glob:`` is expected to be
325 325 relative to the current directory and match against a file or a
326 326 directory.
327 327 """
328 328 # i18n: "adds" is a keyword
329 329 pat = getstring(x, _("adds requires a pattern"))
330 330 return checkstatus(repo, subset, pat, 1)
331 331
332 332 @predicate('ancestor(*changeset)', safe=True)
333 333 def ancestor(repo, subset, x):
334 334 """A greatest common ancestor of the changesets.
335 335
336 336 Accepts 0 or more changesets.
337 337 Will return empty list when passed no args.
338 338 Greatest common ancestor of a single changeset is that changeset.
339 339 """
340 340 # i18n: "ancestor" is a keyword
341 341 l = getlist(x)
342 342 rl = fullreposet(repo)
343 343 anc = None
344 344
345 345 # (getset(repo, rl, i) for i in l) generates a list of lists
346 346 for revs in (getset(repo, rl, i) for i in l):
347 347 for r in revs:
348 348 if anc is None:
349 349 anc = repo[r]
350 350 else:
351 351 anc = anc.ancestor(repo[r])
352 352
353 353 if anc is not None and anc.rev() in subset:
354 354 return baseset([anc.rev()])
355 355 return baseset()
356 356
357 357 def _ancestors(repo, subset, x, followfirst=False):
358 358 heads = getset(repo, fullreposet(repo), x)
359 359 if not heads:
360 360 return baseset()
361 361 s = _revancestors(repo, heads, followfirst)
362 362 return subset & s
363 363
364 364 @predicate('ancestors(set)', safe=True)
365 365 def ancestors(repo, subset, x):
366 366 """Changesets that are ancestors of a changeset in set.
367 367 """
368 368 return _ancestors(repo, subset, x)
369 369
370 370 @predicate('_firstancestors', safe=True)
371 371 def _firstancestors(repo, subset, x):
372 372 # ``_firstancestors(set)``
373 373 # Like ``ancestors(set)`` but follows only the first parents.
374 374 return _ancestors(repo, subset, x, followfirst=True)
375 375
376 376 def ancestorspec(repo, subset, x, n, order):
377 377 """``set~n``
378 378 Changesets that are the Nth ancestor (first parents only) of a changeset
379 379 in set.
380 380 """
381 381 n = getinteger(n, _("~ expects a number"))
382 382 ps = set()
383 383 cl = repo.changelog
384 384 for r in getset(repo, fullreposet(repo), x):
385 385 for i in range(n):
386 386 r = cl.parentrevs(r)[0]
387 387 ps.add(r)
388 388 return subset & ps
389 389
390 390 @predicate('author(string)', safe=True)
391 391 def author(repo, subset, x):
392 392 """Alias for ``user(string)``.
393 393 """
394 394 # i18n: "author" is a keyword
395 395 n = getstring(x, _("author requires a string"))
396 396 kind, pattern, matcher = _substringmatcher(n, casesensitive=False)
397 397 return subset.filter(lambda x: matcher(repo[x].user()),
398 398 condrepr=('<user %r>', n))
399 399
400 400 @predicate('bisect(string)', safe=True)
401 401 def bisect(repo, subset, x):
402 402 """Changesets marked in the specified bisect status:
403 403
404 404 - ``good``, ``bad``, ``skip``: csets explicitly marked as good/bad/skip
405 405 - ``goods``, ``bads`` : csets topologically good/bad
406 406 - ``range`` : csets taking part in the bisection
407 407 - ``pruned`` : csets that are goods, bads or skipped
408 408 - ``untested`` : csets whose fate is yet unknown
409 409 - ``ignored`` : csets ignored due to DAG topology
410 410 - ``current`` : the cset currently being bisected
411 411 """
412 412 # i18n: "bisect" is a keyword
413 413 status = getstring(x, _("bisect requires a string")).lower()
414 414 state = set(hbisect.get(repo, status))
415 415 return subset & state
416 416
417 417 # Backward-compatibility
418 418 # - no help entry so that we do not advertise it any more
419 419 @predicate('bisected', safe=True)
420 420 def bisected(repo, subset, x):
421 421 return bisect(repo, subset, x)
422 422
423 423 @predicate('bookmark([name])', safe=True)
424 424 def bookmark(repo, subset, x):
425 425 """The named bookmark or all bookmarks.
426 426
427 427 Pattern matching is supported for `name`. See :hg:`help revisions.patterns`.
428 428 """
429 429 # i18n: "bookmark" is a keyword
430 430 args = getargs(x, 0, 1, _('bookmark takes one or no arguments'))
431 431 if args:
432 432 bm = getstring(args[0],
433 433 # i18n: "bookmark" is a keyword
434 434 _('the argument to bookmark must be a string'))
435 435 kind, pattern, matcher = util.stringmatcher(bm)
436 436 bms = set()
437 437 if kind == 'literal':
438 438 bmrev = repo._bookmarks.get(pattern, None)
439 439 if not bmrev:
440 440 raise error.RepoLookupError(_("bookmark '%s' does not exist")
441 441 % pattern)
442 442 bms.add(repo[bmrev].rev())
443 443 else:
444 444 matchrevs = set()
445 445 for name, bmrev in repo._bookmarks.iteritems():
446 446 if matcher(name):
447 447 matchrevs.add(bmrev)
448 448 if not matchrevs:
449 449 raise error.RepoLookupError(_("no bookmarks exist"
450 450 " that match '%s'") % pattern)
451 451 for bmrev in matchrevs:
452 452 bms.add(repo[bmrev].rev())
453 453 else:
454 454 bms = set([repo[r].rev()
455 455 for r in repo._bookmarks.values()])
456 456 bms -= set([node.nullrev])
457 457 return subset & bms
458 458
459 459 @predicate('branch(string or set)', safe=True)
460 460 def branch(repo, subset, x):
461 461 """
462 462 All changesets belonging to the given branch or the branches of the given
463 463 changesets.
464 464
465 465 Pattern matching is supported for `string`. See
466 466 :hg:`help revisions.patterns`.
467 467 """
468 468 getbi = repo.revbranchcache().branchinfo
469 469
470 470 try:
471 471 b = getstring(x, '')
472 472 except error.ParseError:
473 473 # not a string, but another revspec, e.g. tip()
474 474 pass
475 475 else:
476 476 kind, pattern, matcher = util.stringmatcher(b)
477 477 if kind == 'literal':
478 478 # note: falls through to the revspec case if no branch with
479 479 # this name exists and pattern kind is not specified explicitly
480 480 if pattern in repo.branchmap():
481 481 return subset.filter(lambda r: matcher(getbi(r)[0]),
482 482 condrepr=('<branch %r>', b))
483 483 if b.startswith('literal:'):
484 484 raise error.RepoLookupError(_("branch '%s' does not exist")
485 485 % pattern)
486 486 else:
487 487 return subset.filter(lambda r: matcher(getbi(r)[0]),
488 488 condrepr=('<branch %r>', b))
489 489
490 490 s = getset(repo, fullreposet(repo), x)
491 491 b = set()
492 492 for r in s:
493 493 b.add(getbi(r)[0])
494 494 c = s.__contains__
495 495 return subset.filter(lambda r: c(r) or getbi(r)[0] in b,
496 496 condrepr=lambda: '<branch %r>' % sorted(b))
497 497
498 498 @predicate('bumped()', safe=True)
499 499 def bumped(repo, subset, x):
500 500 """Mutable changesets marked as successors of public changesets.
501 501
502 502 Only non-public and non-obsolete changesets can be `bumped`.
503 503 """
504 504 # i18n: "bumped" is a keyword
505 505 getargs(x, 0, 0, _("bumped takes no arguments"))
506 506 bumped = obsmod.getrevs(repo, 'bumped')
507 507 return subset & bumped
508 508
509 509 @predicate('bundle()', safe=True)
510 510 def bundle(repo, subset, x):
511 511 """Changesets in the bundle.
512 512
513 513 Bundle must be specified by the -R option."""
514 514
515 515 try:
516 516 bundlerevs = repo.changelog.bundlerevs
517 517 except AttributeError:
518 518 raise error.Abort(_("no bundle provided - specify with -R"))
519 519 return subset & bundlerevs
520 520
521 521 def checkstatus(repo, subset, pat, field):
522 522 hasset = matchmod.patkind(pat) == 'set'
523 523
524 524 mcache = [None]
525 525 def matches(x):
526 526 c = repo[x]
527 527 if not mcache[0] or hasset:
528 528 mcache[0] = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
529 529 m = mcache[0]
530 530 fname = None
531 531 if not m.anypats() and len(m.files()) == 1:
532 532 fname = m.files()[0]
533 533 if fname is not None:
534 534 if fname not in c.files():
535 535 return False
536 536 else:
537 537 for f in c.files():
538 538 if m(f):
539 539 break
540 540 else:
541 541 return False
542 542 files = repo.status(c.p1().node(), c.node())[field]
543 543 if fname is not None:
544 544 if fname in files:
545 545 return True
546 546 else:
547 547 for f in files:
548 548 if m(f):
549 549 return True
550 550
551 551 return subset.filter(matches, condrepr=('<status[%r] %r>', field, pat))
552 552
553 553 def _children(repo, subset, parentset):
554 554 if not parentset:
555 555 return baseset()
556 556 cs = set()
557 557 pr = repo.changelog.parentrevs
558 558 minrev = parentset.min()
559 559 nullrev = node.nullrev
560 560 for r in subset:
561 561 if r <= minrev:
562 562 continue
563 563 p1, p2 = pr(r)
564 564 if p1 in parentset:
565 565 cs.add(r)
566 566 if p2 != nullrev and p2 in parentset:
567 567 cs.add(r)
568 568 return baseset(cs)
569 569
570 570 @predicate('children(set)', safe=True)
571 571 def children(repo, subset, x):
572 572 """Child changesets of changesets in set.
573 573 """
574 574 s = getset(repo, fullreposet(repo), x)
575 575 cs = _children(repo, subset, s)
576 576 return subset & cs
577 577
578 578 @predicate('closed()', safe=True)
579 579 def closed(repo, subset, x):
580 580 """Changeset is closed.
581 581 """
582 582 # i18n: "closed" is a keyword
583 583 getargs(x, 0, 0, _("closed takes no arguments"))
584 584 return subset.filter(lambda r: repo[r].closesbranch(),
585 585 condrepr='<branch closed>')
586 586
587 587 @predicate('contains(pattern)')
588 588 def contains(repo, subset, x):
589 589 """The revision's manifest contains a file matching pattern (but might not
590 590 modify it). See :hg:`help patterns` for information about file patterns.
591 591
592 592 The pattern without explicit kind like ``glob:`` is expected to be
593 593 relative to the current directory and match against a file exactly
594 594 for efficiency.
595 595 """
596 596 # i18n: "contains" is a keyword
597 597 pat = getstring(x, _("contains requires a pattern"))
598 598
599 599 def matches(x):
600 600 if not matchmod.patkind(pat):
601 601 pats = pathutil.canonpath(repo.root, repo.getcwd(), pat)
602 602 if pats in repo[x]:
603 603 return True
604 604 else:
605 605 c = repo[x]
606 606 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
607 607 for f in c.manifest():
608 608 if m(f):
609 609 return True
610 610 return False
611 611
612 612 return subset.filter(matches, condrepr=('<contains %r>', pat))
613 613
614 614 @predicate('converted([id])', safe=True)
615 615 def converted(repo, subset, x):
616 616 """Changesets converted from the given identifier in the old repository if
617 617 present, or all converted changesets if no identifier is specified.
618 618 """
619 619
620 620 # There is exactly no chance of resolving the revision, so do a simple
621 621 # string compare and hope for the best
622 622
623 623 rev = None
624 624 # i18n: "converted" is a keyword
625 625 l = getargs(x, 0, 1, _('converted takes one or no arguments'))
626 626 if l:
627 627 # i18n: "converted" is a keyword
628 628 rev = getstring(l[0], _('converted requires a revision'))
629 629
630 630 def _matchvalue(r):
631 631 source = repo[r].extra().get('convert_revision', None)
632 632 return source is not None and (rev is None or source.startswith(rev))
633 633
634 634 return subset.filter(lambda r: _matchvalue(r),
635 635 condrepr=('<converted %r>', rev))
636 636
637 637 @predicate('date(interval)', safe=True)
638 638 def date(repo, subset, x):
639 639 """Changesets within the interval, see :hg:`help dates`.
640 640 """
641 641 # i18n: "date" is a keyword
642 642 ds = getstring(x, _("date requires a string"))
643 643 dm = util.matchdate(ds)
644 644 return subset.filter(lambda x: dm(repo[x].date()[0]),
645 645 condrepr=('<date %r>', ds))
646 646
647 647 @predicate('desc(string)', safe=True)
648 648 def desc(repo, subset, x):
649 649 """Search commit message for string. The match is case-insensitive.
650 650
651 651 Pattern matching is supported for `string`. See
652 652 :hg:`help revisions.patterns`.
653 653 """
654 654 # i18n: "desc" is a keyword
655 655 ds = getstring(x, _("desc requires a string"))
656 656
657 657 kind, pattern, matcher = _substringmatcher(ds, casesensitive=False)
658 658
659 659 return subset.filter(lambda r: matcher(repo[r].description()),
660 660 condrepr=('<desc %r>', ds))
661 661
662 662 def _descendants(repo, subset, x, followfirst=False):
663 663 roots = getset(repo, fullreposet(repo), x)
664 664 if not roots:
665 665 return baseset()
666 666 s = _revdescendants(repo, roots, followfirst)
667 667
668 668 # Both sets need to be ascending in order to lazily return the union
669 669 # in the correct order.
670 670 base = subset & roots
671 671 desc = subset & s
672 672 result = base + desc
673 673 if subset.isascending():
674 674 result.sort()
675 675 elif subset.isdescending():
676 676 result.sort(reverse=True)
677 677 else:
678 678 result = subset & result
679 679 return result
680 680
681 681 @predicate('descendants(set)', safe=True)
682 682 def descendants(repo, subset, x):
683 683 """Changesets which are descendants of changesets in set.
684 684 """
685 685 return _descendants(repo, subset, x)
686 686
687 687 @predicate('_firstdescendants', safe=True)
688 688 def _firstdescendants(repo, subset, x):
689 689 # ``_firstdescendants(set)``
690 690 # Like ``descendants(set)`` but follows only the first parents.
691 691 return _descendants(repo, subset, x, followfirst=True)
692 692
693 693 @predicate('destination([set])', safe=True)
694 694 def destination(repo, subset, x):
695 695 """Changesets that were created by a graft, transplant or rebase operation,
696 696 with the given revisions specified as the source. Omitting the optional set
697 697 is the same as passing all().
698 698 """
699 699 if x is not None:
700 700 sources = getset(repo, fullreposet(repo), x)
701 701 else:
702 702 sources = fullreposet(repo)
703 703
704 704 dests = set()
705 705
706 706 # subset contains all of the possible destinations that can be returned, so
707 707 # iterate over them and see if their source(s) were provided in the arg set.
708 708 # Even if the immediate src of r is not in the arg set, src's source (or
709 709 # further back) may be. Scanning back further than the immediate src allows
710 710 # transitive transplants and rebases to yield the same results as transitive
711 711 # grafts.
712 712 for r in subset:
713 713 src = _getrevsource(repo, r)
714 714 lineage = None
715 715
716 716 while src is not None:
717 717 if lineage is None:
718 718 lineage = list()
719 719
720 720 lineage.append(r)
721 721
722 722 # The visited lineage is a match if the current source is in the arg
723 723 # set. Since every candidate dest is visited by way of iterating
724 724 # subset, any dests further back in the lineage will be tested by a
725 725 # different iteration over subset. Likewise, if the src was already
726 726 # selected, the current lineage can be selected without going back
727 727 # further.
728 728 if src in sources or src in dests:
729 729 dests.update(lineage)
730 730 break
731 731
732 732 r = src
733 733 src = _getrevsource(repo, r)
734 734
735 735 return subset.filter(dests.__contains__,
736 736 condrepr=lambda: '<destination %r>' % sorted(dests))
737 737
738 738 @predicate('divergent()', safe=True)
739 739 def divergent(repo, subset, x):
740 740 """
741 741 Final successors of changesets with an alternative set of final successors.
742 742 """
743 743 # i18n: "divergent" is a keyword
744 744 getargs(x, 0, 0, _("divergent takes no arguments"))
745 745 divergent = obsmod.getrevs(repo, 'divergent')
746 746 return subset & divergent
747 747
748 748 @predicate('extinct()', safe=True)
749 749 def extinct(repo, subset, x):
750 750 """Obsolete changesets with obsolete descendants only.
751 751 """
752 752 # i18n: "extinct" is a keyword
753 753 getargs(x, 0, 0, _("extinct takes no arguments"))
754 754 extincts = obsmod.getrevs(repo, 'extinct')
755 755 return subset & extincts
756 756
757 757 @predicate('extra(label, [value])', safe=True)
758 758 def extra(repo, subset, x):
759 759 """Changesets with the given label in the extra metadata, with the given
760 760 optional value.
761 761
762 762 Pattern matching is supported for `value`. See
763 763 :hg:`help revisions.patterns`.
764 764 """
765 765 args = getargsdict(x, 'extra', 'label value')
766 766 if 'label' not in args:
767 767 # i18n: "extra" is a keyword
768 768 raise error.ParseError(_('extra takes at least 1 argument'))
769 769 # i18n: "extra" is a keyword
770 770 label = getstring(args['label'], _('first argument to extra must be '
771 771 'a string'))
772 772 value = None
773 773
774 774 if 'value' in args:
775 775 # i18n: "extra" is a keyword
776 776 value = getstring(args['value'], _('second argument to extra must be '
777 777 'a string'))
778 778 kind, value, matcher = util.stringmatcher(value)
779 779
780 780 def _matchvalue(r):
781 781 extra = repo[r].extra()
782 782 return label in extra and (value is None or matcher(extra[label]))
783 783
784 784 return subset.filter(lambda r: _matchvalue(r),
785 785 condrepr=('<extra[%r] %r>', label, value))
786 786
787 787 @predicate('filelog(pattern)', safe=True)
788 788 def filelog(repo, subset, x):
789 789 """Changesets connected to the specified filelog.
790 790
791 791 For performance reasons, visits only revisions mentioned in the file-level
792 792 filelog, rather than filtering through all changesets (much faster, but
793 793 doesn't include deletes or duplicate changes). For a slower, more accurate
794 794 result, use ``file()``.
795 795
796 796 The pattern without explicit kind like ``glob:`` is expected to be
797 797 relative to the current directory and match against a file exactly
798 798 for efficiency.
799 799
800 800 If some linkrev points to revisions filtered by the current repoview, we'll
801 801 work around it to return a non-filtered value.
802 802 """
803 803
804 804 # i18n: "filelog" is a keyword
805 805 pat = getstring(x, _("filelog requires a pattern"))
806 806 s = set()
807 807 cl = repo.changelog
808 808
809 809 if not matchmod.patkind(pat):
810 810 f = pathutil.canonpath(repo.root, repo.getcwd(), pat)
811 811 files = [f]
812 812 else:
813 813 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=repo[None])
814 814 files = (f for f in repo[None] if m(f))
815 815
816 816 for f in files:
817 817 fl = repo.file(f)
818 818 known = {}
819 819 scanpos = 0
820 820 for fr in list(fl):
821 821 fn = fl.node(fr)
822 822 if fn in known:
823 823 s.add(known[fn])
824 824 continue
825 825
826 826 lr = fl.linkrev(fr)
827 827 if lr in cl:
828 828 s.add(lr)
829 829 elif scanpos is not None:
830 830 # lowest matching changeset is filtered, scan further
831 831 # ahead in changelog
832 832 start = max(lr, scanpos) + 1
833 833 scanpos = None
834 834 for r in cl.revs(start):
835 835 # minimize parsing of non-matching entries
836 836 if f in cl.revision(r) and f in cl.readfiles(r):
837 837 try:
838 838 # try to use manifest delta fastpath
839 839 n = repo[r].filenode(f)
840 840 if n not in known:
841 841 if n == fn:
842 842 s.add(r)
843 843 scanpos = r
844 844 break
845 845 else:
846 846 known[n] = r
847 847 except error.ManifestLookupError:
848 848 # deletion in changelog
849 849 continue
850 850
851 851 return subset & s
852 852
853 853 @predicate('first(set, [n])', safe=True)
854 854 def first(repo, subset, x):
855 855 """An alias for limit().
856 856 """
857 857 return limit(repo, subset, x)
858 858
859 859 def _follow(repo, subset, x, name, followfirst=False):
860 860 l = getargs(x, 0, 2, _("%s takes no arguments or a pattern "
861 861 "and an optional revset") % name)
862 862 c = repo['.']
863 863 if l:
864 864 x = getstring(l[0], _("%s expected a pattern") % name)
865 865 rev = None
866 866 if len(l) >= 2:
867 867 revs = getset(repo, fullreposet(repo), l[1])
868 868 if len(revs) != 1:
869 869 raise error.RepoLookupError(
870 870 _("%s expected one starting revision") % name)
871 871 rev = revs.last()
872 872 c = repo[rev]
873 873 matcher = matchmod.match(repo.root, repo.getcwd(), [x],
874 874 ctx=repo[rev], default='path')
875 875
876 876 files = c.manifest().walk(matcher)
877 877
878 878 s = set()
879 879 for fname in files:
880 880 fctx = c[fname]
881 881 s = s.union(set(c.rev() for c in fctx.ancestors(followfirst)))
882 882 # include the revision responsible for the most recent version
883 883 s.add(fctx.introrev())
884 884 else:
885 885 s = _revancestors(repo, baseset([c.rev()]), followfirst)
886 886
887 887 return subset & s
888 888
889 889 @predicate('follow([pattern[, startrev]])', safe=True)
890 890 def follow(repo, subset, x):
891 891 """
892 892 An alias for ``::.`` (ancestors of the working directory's first parent).
893 893 If pattern is specified, the histories of files matching given
894 894 pattern in the revision given by startrev are followed, including copies.
895 895 """
896 896 return _follow(repo, subset, x, 'follow')
897 897
898 898 @predicate('_followfirst', safe=True)
899 899 def _followfirst(repo, subset, x):
900 900 # ``followfirst([pattern[, startrev]])``
901 901 # Like ``follow([pattern[, startrev]])`` but follows only the first parent
902 902 # of every revisions or files revisions.
903 903 return _follow(repo, subset, x, '_followfirst', followfirst=True)
904 904
905 905 @predicate('followlines(file, fromline:toline[, startrev=., descend=False])',
906 906 safe=True)
907 907 def followlines(repo, subset, x):
908 908 """Changesets modifying `file` in line range ('fromline', 'toline').
909 909
910 910 Line range corresponds to 'file' content at 'startrev' and should hence be
911 911 consistent with file size. If startrev is not specified, working directory's
912 912 parent is used.
913 913
914 914 By default, ancestors of 'startrev' are returned. If 'descend' is True,
915 915 descendants of 'startrev' are returned though renames are (currently) not
916 916 followed in this direction.
917 917 """
918 918 from . import context # avoid circular import issues
919 919
920 920 args = getargsdict(x, 'followlines', 'file *lines startrev descend')
921 921 if len(args['lines']) != 1:
922 922 raise error.ParseError(_("followlines requires a line range"))
923 923
924 924 rev = '.'
925 925 if 'startrev' in args:
926 926 revs = getset(repo, fullreposet(repo), args['startrev'])
927 927 if len(revs) != 1:
928 928 raise error.ParseError(
929 # i18n: "followlines" is a keyword
929 930 _("followlines expects exactly one revision"))
930 931 rev = revs.last()
931 932
932 933 pat = getstring(args['file'], _("followlines requires a pattern"))
933 934 if not matchmod.patkind(pat):
934 935 fname = pathutil.canonpath(repo.root, repo.getcwd(), pat)
935 936 else:
936 937 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=repo[rev])
937 938 files = [f for f in repo[rev] if m(f)]
938 939 if len(files) != 1:
940 # i18n: "followlines" is a keyword
939 941 raise error.ParseError(_("followlines expects exactly one file"))
940 942 fname = files[0]
941 943
944 # i18n: "followlines" is a keyword
942 945 lr = getrange(args['lines'][0], _("followlines expects a line range"))
943 946 fromline, toline = [getinteger(a, _("line range bounds must be integers"))
944 947 for a in lr]
945 948 fromline, toline = util.processlinerange(fromline, toline)
946 949
947 950 fctx = repo[rev].filectx(fname)
948 951 descend = False
949 952 if 'descend' in args:
950 953 descend = getboolean(args['descend'],
951 _("'descend' argument must be a boolean"))
954 # i18n: "descend" is a keyword
955 _("descend argument must be a boolean"))
952 956 if descend:
953 957 rs = generatorset(
954 958 (c.rev() for c, _linerange
955 959 in context.blockdescendants(fctx, fromline, toline)),
956 960 iterasc=True)
957 961 else:
958 962 rs = generatorset(
959 963 (c.rev() for c, _linerange
960 964 in context.blockancestors(fctx, fromline, toline)),
961 965 iterasc=False)
962 966 return subset & rs
963 967
964 968 @predicate('all()', safe=True)
965 969 def getall(repo, subset, x):
966 970 """All changesets, the same as ``0:tip``.
967 971 """
968 972 # i18n: "all" is a keyword
969 973 getargs(x, 0, 0, _("all takes no arguments"))
970 974 return subset & spanset(repo) # drop "null" if any
971 975
972 976 @predicate('grep(regex)')
973 977 def grep(repo, subset, x):
974 978 """Like ``keyword(string)`` but accepts a regex. Use ``grep(r'...')``
975 979 to ensure special escape characters are handled correctly. Unlike
976 980 ``keyword(string)``, the match is case-sensitive.
977 981 """
978 982 try:
979 983 # i18n: "grep" is a keyword
980 984 gr = re.compile(getstring(x, _("grep requires a string")))
981 985 except re.error as e:
982 986 raise error.ParseError(_('invalid match pattern: %s') % e)
983 987
984 988 def matches(x):
985 989 c = repo[x]
986 990 for e in c.files() + [c.user(), c.description()]:
987 991 if gr.search(e):
988 992 return True
989 993 return False
990 994
991 995 return subset.filter(matches, condrepr=('<grep %r>', gr.pattern))
992 996
993 997 @predicate('_matchfiles', safe=True)
994 998 def _matchfiles(repo, subset, x):
995 999 # _matchfiles takes a revset list of prefixed arguments:
996 1000 #
997 1001 # [p:foo, i:bar, x:baz]
998 1002 #
999 1003 # builds a match object from them and filters subset. Allowed
1000 1004 # prefixes are 'p:' for regular patterns, 'i:' for include
1001 1005 # patterns and 'x:' for exclude patterns. Use 'r:' prefix to pass
1002 1006 # a revision identifier, or the empty string to reference the
1003 1007 # working directory, from which the match object is
1004 1008 # initialized. Use 'd:' to set the default matching mode, default
1005 1009 # to 'glob'. At most one 'r:' and 'd:' argument can be passed.
1006 1010
1007 1011 l = getargs(x, 1, -1, "_matchfiles requires at least one argument")
1008 1012 pats, inc, exc = [], [], []
1009 1013 rev, default = None, None
1010 1014 for arg in l:
1011 1015 s = getstring(arg, "_matchfiles requires string arguments")
1012 1016 prefix, value = s[:2], s[2:]
1013 1017 if prefix == 'p:':
1014 1018 pats.append(value)
1015 1019 elif prefix == 'i:':
1016 1020 inc.append(value)
1017 1021 elif prefix == 'x:':
1018 1022 exc.append(value)
1019 1023 elif prefix == 'r:':
1020 1024 if rev is not None:
1021 1025 raise error.ParseError('_matchfiles expected at most one '
1022 1026 'revision')
1023 1027 if value != '': # empty means working directory; leave rev as None
1024 1028 rev = value
1025 1029 elif prefix == 'd:':
1026 1030 if default is not None:
1027 1031 raise error.ParseError('_matchfiles expected at most one '
1028 1032 'default mode')
1029 1033 default = value
1030 1034 else:
1031 1035 raise error.ParseError('invalid _matchfiles prefix: %s' % prefix)
1032 1036 if not default:
1033 1037 default = 'glob'
1034 1038
1035 1039 m = matchmod.match(repo.root, repo.getcwd(), pats, include=inc,
1036 1040 exclude=exc, ctx=repo[rev], default=default)
1037 1041
1038 1042 # This directly read the changelog data as creating changectx for all
1039 1043 # revisions is quite expensive.
1040 1044 getfiles = repo.changelog.readfiles
1041 1045 wdirrev = node.wdirrev
1042 1046 def matches(x):
1043 1047 if x == wdirrev:
1044 1048 files = repo[x].files()
1045 1049 else:
1046 1050 files = getfiles(x)
1047 1051 for f in files:
1048 1052 if m(f):
1049 1053 return True
1050 1054 return False
1051 1055
1052 1056 return subset.filter(matches,
1053 1057 condrepr=('<matchfiles patterns=%r, include=%r '
1054 1058 'exclude=%r, default=%r, rev=%r>',
1055 1059 pats, inc, exc, default, rev))
1056 1060
1057 1061 @predicate('file(pattern)', safe=True)
1058 1062 def hasfile(repo, subset, x):
1059 1063 """Changesets affecting files matched by pattern.
1060 1064
1061 1065 For a faster but less accurate result, consider using ``filelog()``
1062 1066 instead.
1063 1067
1064 1068 This predicate uses ``glob:`` as the default kind of pattern.
1065 1069 """
1066 1070 # i18n: "file" is a keyword
1067 1071 pat = getstring(x, _("file requires a pattern"))
1068 1072 return _matchfiles(repo, subset, ('string', 'p:' + pat))
1069 1073
1070 1074 @predicate('head()', safe=True)
1071 1075 def head(repo, subset, x):
1072 1076 """Changeset is a named branch head.
1073 1077 """
1074 1078 # i18n: "head" is a keyword
1075 1079 getargs(x, 0, 0, _("head takes no arguments"))
1076 1080 hs = set()
1077 1081 cl = repo.changelog
1078 1082 for ls in repo.branchmap().itervalues():
1079 1083 hs.update(cl.rev(h) for h in ls)
1080 1084 return subset & baseset(hs)
1081 1085
1082 1086 @predicate('heads(set)', safe=True)
1083 1087 def heads(repo, subset, x):
1084 1088 """Members of set with no children in set.
1085 1089 """
1086 1090 s = getset(repo, subset, x)
1087 1091 ps = parents(repo, subset, x)
1088 1092 return s - ps
1089 1093
1090 1094 @predicate('hidden()', safe=True)
1091 1095 def hidden(repo, subset, x):
1092 1096 """Hidden changesets.
1093 1097 """
1094 1098 # i18n: "hidden" is a keyword
1095 1099 getargs(x, 0, 0, _("hidden takes no arguments"))
1096 1100 hiddenrevs = repoview.filterrevs(repo, 'visible')
1097 1101 return subset & hiddenrevs
1098 1102
1099 1103 @predicate('keyword(string)', safe=True)
1100 1104 def keyword(repo, subset, x):
1101 1105 """Search commit message, user name, and names of changed files for
1102 1106 string. The match is case-insensitive.
1103 1107
1104 1108 For a regular expression or case sensitive search of these fields, use
1105 1109 ``grep(regex)``.
1106 1110 """
1107 1111 # i18n: "keyword" is a keyword
1108 1112 kw = encoding.lower(getstring(x, _("keyword requires a string")))
1109 1113
1110 1114 def matches(r):
1111 1115 c = repo[r]
1112 1116 return any(kw in encoding.lower(t)
1113 1117 for t in c.files() + [c.user(), c.description()])
1114 1118
1115 1119 return subset.filter(matches, condrepr=('<keyword %r>', kw))
1116 1120
1117 1121 @predicate('limit(set[, n[, offset]])', safe=True)
1118 1122 def limit(repo, subset, x):
1119 1123 """First n members of set, defaulting to 1, starting from offset.
1120 1124 """
1121 1125 args = getargsdict(x, 'limit', 'set n offset')
1122 1126 if 'set' not in args:
1123 1127 # i18n: "limit" is a keyword
1124 1128 raise error.ParseError(_("limit requires one to three arguments"))
1125 1129 # i18n: "limit" is a keyword
1126 1130 lim = getinteger(args.get('n'), _("limit expects a number"), default=1)
1127 1131 # i18n: "limit" is a keyword
1128 1132 ofs = getinteger(args.get('offset'), _("limit expects a number"), default=0)
1129 1133 if ofs < 0:
1130 1134 raise error.ParseError(_("negative offset"))
1131 1135 os = getset(repo, fullreposet(repo), args['set'])
1132 1136 result = []
1133 1137 it = iter(os)
1134 1138 for x in xrange(ofs):
1135 1139 y = next(it, None)
1136 1140 if y is None:
1137 1141 break
1138 1142 for x in xrange(lim):
1139 1143 y = next(it, None)
1140 1144 if y is None:
1141 1145 break
1142 1146 elif y in subset:
1143 1147 result.append(y)
1144 1148 return baseset(result, datarepr=('<limit n=%d, offset=%d, %r, %r>',
1145 1149 lim, ofs, subset, os))
1146 1150
1147 1151 @predicate('last(set, [n])', safe=True)
1148 1152 def last(repo, subset, x):
1149 1153 """Last n members of set, defaulting to 1.
1150 1154 """
1151 1155 # i18n: "last" is a keyword
1152 1156 l = getargs(x, 1, 2, _("last requires one or two arguments"))
1153 1157 lim = 1
1154 1158 if len(l) == 2:
1155 1159 # i18n: "last" is a keyword
1156 1160 lim = getinteger(l[1], _("last expects a number"))
1157 1161 os = getset(repo, fullreposet(repo), l[0])
1158 1162 os.reverse()
1159 1163 result = []
1160 1164 it = iter(os)
1161 1165 for x in xrange(lim):
1162 1166 y = next(it, None)
1163 1167 if y is None:
1164 1168 break
1165 1169 elif y in subset:
1166 1170 result.append(y)
1167 1171 return baseset(result, datarepr=('<last n=%d, %r, %r>', lim, subset, os))
1168 1172
1169 1173 @predicate('max(set)', safe=True)
1170 1174 def maxrev(repo, subset, x):
1171 1175 """Changeset with highest revision number in set.
1172 1176 """
1173 1177 os = getset(repo, fullreposet(repo), x)
1174 1178 try:
1175 1179 m = os.max()
1176 1180 if m in subset:
1177 1181 return baseset([m], datarepr=('<max %r, %r>', subset, os))
1178 1182 except ValueError:
1179 1183 # os.max() throws a ValueError when the collection is empty.
1180 1184 # Same as python's max().
1181 1185 pass
1182 1186 return baseset(datarepr=('<max %r, %r>', subset, os))
1183 1187
1184 1188 @predicate('merge()', safe=True)
1185 1189 def merge(repo, subset, x):
1186 1190 """Changeset is a merge changeset.
1187 1191 """
1188 1192 # i18n: "merge" is a keyword
1189 1193 getargs(x, 0, 0, _("merge takes no arguments"))
1190 1194 cl = repo.changelog
1191 1195 return subset.filter(lambda r: cl.parentrevs(r)[1] != -1,
1192 1196 condrepr='<merge>')
1193 1197
1194 1198 @predicate('branchpoint()', safe=True)
1195 1199 def branchpoint(repo, subset, x):
1196 1200 """Changesets with more than one child.
1197 1201 """
1198 1202 # i18n: "branchpoint" is a keyword
1199 1203 getargs(x, 0, 0, _("branchpoint takes no arguments"))
1200 1204 cl = repo.changelog
1201 1205 if not subset:
1202 1206 return baseset()
1203 1207 # XXX this should be 'parentset.min()' assuming 'parentset' is a smartset
1204 1208 # (and if it is not, it should.)
1205 1209 baserev = min(subset)
1206 1210 parentscount = [0]*(len(repo) - baserev)
1207 1211 for r in cl.revs(start=baserev + 1):
1208 1212 for p in cl.parentrevs(r):
1209 1213 if p >= baserev:
1210 1214 parentscount[p - baserev] += 1
1211 1215 return subset.filter(lambda r: parentscount[r - baserev] > 1,
1212 1216 condrepr='<branchpoint>')
1213 1217
1214 1218 @predicate('min(set)', safe=True)
1215 1219 def minrev(repo, subset, x):
1216 1220 """Changeset with lowest revision number in set.
1217 1221 """
1218 1222 os = getset(repo, fullreposet(repo), x)
1219 1223 try:
1220 1224 m = os.min()
1221 1225 if m in subset:
1222 1226 return baseset([m], datarepr=('<min %r, %r>', subset, os))
1223 1227 except ValueError:
1224 1228 # os.min() throws a ValueError when the collection is empty.
1225 1229 # Same as python's min().
1226 1230 pass
1227 1231 return baseset(datarepr=('<min %r, %r>', subset, os))
1228 1232
1229 1233 @predicate('modifies(pattern)', safe=True)
1230 1234 def modifies(repo, subset, x):
1231 1235 """Changesets modifying files matched by pattern.
1232 1236
1233 1237 The pattern without explicit kind like ``glob:`` is expected to be
1234 1238 relative to the current directory and match against a file or a
1235 1239 directory.
1236 1240 """
1237 1241 # i18n: "modifies" is a keyword
1238 1242 pat = getstring(x, _("modifies requires a pattern"))
1239 1243 return checkstatus(repo, subset, pat, 0)
1240 1244
1241 1245 @predicate('named(namespace)')
1242 1246 def named(repo, subset, x):
1243 1247 """The changesets in a given namespace.
1244 1248
1245 1249 Pattern matching is supported for `namespace`. See
1246 1250 :hg:`help revisions.patterns`.
1247 1251 """
1248 1252 # i18n: "named" is a keyword
1249 1253 args = getargs(x, 1, 1, _('named requires a namespace argument'))
1250 1254
1251 1255 ns = getstring(args[0],
1252 1256 # i18n: "named" is a keyword
1253 1257 _('the argument to named must be a string'))
1254 1258 kind, pattern, matcher = util.stringmatcher(ns)
1255 1259 namespaces = set()
1256 1260 if kind == 'literal':
1257 1261 if pattern not in repo.names:
1258 1262 raise error.RepoLookupError(_("namespace '%s' does not exist")
1259 1263 % ns)
1260 1264 namespaces.add(repo.names[pattern])
1261 1265 else:
1262 1266 for name, ns in repo.names.iteritems():
1263 1267 if matcher(name):
1264 1268 namespaces.add(ns)
1265 1269 if not namespaces:
1266 1270 raise error.RepoLookupError(_("no namespace exists"
1267 1271 " that match '%s'") % pattern)
1268 1272
1269 1273 names = set()
1270 1274 for ns in namespaces:
1271 1275 for name in ns.listnames(repo):
1272 1276 if name not in ns.deprecated:
1273 1277 names.update(repo[n].rev() for n in ns.nodes(repo, name))
1274 1278
1275 1279 names -= set([node.nullrev])
1276 1280 return subset & names
1277 1281
1278 1282 @predicate('id(string)', safe=True)
1279 1283 def node_(repo, subset, x):
1280 1284 """Revision non-ambiguously specified by the given hex string prefix.
1281 1285 """
1282 1286 # i18n: "id" is a keyword
1283 1287 l = getargs(x, 1, 1, _("id requires one argument"))
1284 1288 # i18n: "id" is a keyword
1285 1289 n = getstring(l[0], _("id requires a string"))
1286 1290 if len(n) == 40:
1287 1291 try:
1288 1292 rn = repo.changelog.rev(node.bin(n))
1289 1293 except (LookupError, TypeError):
1290 1294 rn = None
1291 1295 else:
1292 1296 rn = None
1293 1297 pm = repo.changelog._partialmatch(n)
1294 1298 if pm is not None:
1295 1299 rn = repo.changelog.rev(pm)
1296 1300
1297 1301 if rn is None:
1298 1302 return baseset()
1299 1303 result = baseset([rn])
1300 1304 return result & subset
1301 1305
1302 1306 @predicate('obsolete()', safe=True)
1303 1307 def obsolete(repo, subset, x):
1304 1308 """Mutable changeset with a newer version."""
1305 1309 # i18n: "obsolete" is a keyword
1306 1310 getargs(x, 0, 0, _("obsolete takes no arguments"))
1307 1311 obsoletes = obsmod.getrevs(repo, 'obsolete')
1308 1312 return subset & obsoletes
1309 1313
1310 1314 @predicate('only(set, [set])', safe=True)
1311 1315 def only(repo, subset, x):
1312 1316 """Changesets that are ancestors of the first set that are not ancestors
1313 1317 of any other head in the repo. If a second set is specified, the result
1314 1318 is ancestors of the first set that are not ancestors of the second set
1315 1319 (i.e. ::<set1> - ::<set2>).
1316 1320 """
1317 1321 cl = repo.changelog
1318 1322 # i18n: "only" is a keyword
1319 1323 args = getargs(x, 1, 2, _('only takes one or two arguments'))
1320 1324 include = getset(repo, fullreposet(repo), args[0])
1321 1325 if len(args) == 1:
1322 1326 if not include:
1323 1327 return baseset()
1324 1328
1325 1329 descendants = set(_revdescendants(repo, include, False))
1326 1330 exclude = [rev for rev in cl.headrevs()
1327 1331 if not rev in descendants and not rev in include]
1328 1332 else:
1329 1333 exclude = getset(repo, fullreposet(repo), args[1])
1330 1334
1331 1335 results = set(cl.findmissingrevs(common=exclude, heads=include))
1332 1336 # XXX we should turn this into a baseset instead of a set, smartset may do
1333 1337 # some optimizations from the fact this is a baseset.
1334 1338 return subset & results
1335 1339
1336 1340 @predicate('origin([set])', safe=True)
1337 1341 def origin(repo, subset, x):
1338 1342 """
1339 1343 Changesets that were specified as a source for the grafts, transplants or
1340 1344 rebases that created the given revisions. Omitting the optional set is the
1341 1345 same as passing all(). If a changeset created by these operations is itself
1342 1346 specified as a source for one of these operations, only the source changeset
1343 1347 for the first operation is selected.
1344 1348 """
1345 1349 if x is not None:
1346 1350 dests = getset(repo, fullreposet(repo), x)
1347 1351 else:
1348 1352 dests = fullreposet(repo)
1349 1353
1350 1354 def _firstsrc(rev):
1351 1355 src = _getrevsource(repo, rev)
1352 1356 if src is None:
1353 1357 return None
1354 1358
1355 1359 while True:
1356 1360 prev = _getrevsource(repo, src)
1357 1361
1358 1362 if prev is None:
1359 1363 return src
1360 1364 src = prev
1361 1365
1362 1366 o = set([_firstsrc(r) for r in dests])
1363 1367 o -= set([None])
1364 1368 # XXX we should turn this into a baseset instead of a set, smartset may do
1365 1369 # some optimizations from the fact this is a baseset.
1366 1370 return subset & o
1367 1371
1368 1372 @predicate('outgoing([path])', safe=False)
1369 1373 def outgoing(repo, subset, x):
1370 1374 """Changesets not found in the specified destination repository, or the
1371 1375 default push location.
1372 1376 """
1373 1377 # Avoid cycles.
1374 1378 from . import (
1375 1379 discovery,
1376 1380 hg,
1377 1381 )
1378 1382 # i18n: "outgoing" is a keyword
1379 1383 l = getargs(x, 0, 1, _("outgoing takes one or no arguments"))
1380 1384 # i18n: "outgoing" is a keyword
1381 1385 dest = l and getstring(l[0], _("outgoing requires a repository path")) or ''
1382 1386 dest = repo.ui.expandpath(dest or 'default-push', dest or 'default')
1383 1387 dest, branches = hg.parseurl(dest)
1384 1388 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1385 1389 if revs:
1386 1390 revs = [repo.lookup(rev) for rev in revs]
1387 1391 other = hg.peer(repo, {}, dest)
1388 1392 repo.ui.pushbuffer()
1389 1393 outgoing = discovery.findcommonoutgoing(repo, other, onlyheads=revs)
1390 1394 repo.ui.popbuffer()
1391 1395 cl = repo.changelog
1392 1396 o = set([cl.rev(r) for r in outgoing.missing])
1393 1397 return subset & o
1394 1398
1395 1399 @predicate('p1([set])', safe=True)
1396 1400 def p1(repo, subset, x):
1397 1401 """First parent of changesets in set, or the working directory.
1398 1402 """
1399 1403 if x is None:
1400 1404 p = repo[x].p1().rev()
1401 1405 if p >= 0:
1402 1406 return subset & baseset([p])
1403 1407 return baseset()
1404 1408
1405 1409 ps = set()
1406 1410 cl = repo.changelog
1407 1411 for r in getset(repo, fullreposet(repo), x):
1408 1412 ps.add(cl.parentrevs(r)[0])
1409 1413 ps -= set([node.nullrev])
1410 1414 # XXX we should turn this into a baseset instead of a set, smartset may do
1411 1415 # some optimizations from the fact this is a baseset.
1412 1416 return subset & ps
1413 1417
1414 1418 @predicate('p2([set])', safe=True)
1415 1419 def p2(repo, subset, x):
1416 1420 """Second parent of changesets in set, or the working directory.
1417 1421 """
1418 1422 if x is None:
1419 1423 ps = repo[x].parents()
1420 1424 try:
1421 1425 p = ps[1].rev()
1422 1426 if p >= 0:
1423 1427 return subset & baseset([p])
1424 1428 return baseset()
1425 1429 except IndexError:
1426 1430 return baseset()
1427 1431
1428 1432 ps = set()
1429 1433 cl = repo.changelog
1430 1434 for r in getset(repo, fullreposet(repo), x):
1431 1435 ps.add(cl.parentrevs(r)[1])
1432 1436 ps -= set([node.nullrev])
1433 1437 # XXX we should turn this into a baseset instead of a set, smartset may do
1434 1438 # some optimizations from the fact this is a baseset.
1435 1439 return subset & ps
1436 1440
1437 1441 def parentpost(repo, subset, x, order):
1438 1442 return p1(repo, subset, x)
1439 1443
1440 1444 @predicate('parents([set])', safe=True)
1441 1445 def parents(repo, subset, x):
1442 1446 """
1443 1447 The set of all parents for all changesets in set, or the working directory.
1444 1448 """
1445 1449 if x is None:
1446 1450 ps = set(p.rev() for p in repo[x].parents())
1447 1451 else:
1448 1452 ps = set()
1449 1453 cl = repo.changelog
1450 1454 up = ps.update
1451 1455 parentrevs = cl.parentrevs
1452 1456 for r in getset(repo, fullreposet(repo), x):
1453 1457 if r == node.wdirrev:
1454 1458 up(p.rev() for p in repo[r].parents())
1455 1459 else:
1456 1460 up(parentrevs(r))
1457 1461 ps -= set([node.nullrev])
1458 1462 return subset & ps
1459 1463
1460 1464 def _phase(repo, subset, *targets):
1461 1465 """helper to select all rev in <targets> phases"""
1462 1466 s = repo._phasecache.getrevset(repo, targets)
1463 1467 return subset & s
1464 1468
1465 1469 @predicate('draft()', safe=True)
1466 1470 def draft(repo, subset, x):
1467 1471 """Changeset in draft phase."""
1468 1472 # i18n: "draft" is a keyword
1469 1473 getargs(x, 0, 0, _("draft takes no arguments"))
1470 1474 target = phases.draft
1471 1475 return _phase(repo, subset, target)
1472 1476
1473 1477 @predicate('secret()', safe=True)
1474 1478 def secret(repo, subset, x):
1475 1479 """Changeset in secret phase."""
1476 1480 # i18n: "secret" is a keyword
1477 1481 getargs(x, 0, 0, _("secret takes no arguments"))
1478 1482 target = phases.secret
1479 1483 return _phase(repo, subset, target)
1480 1484
1481 1485 def parentspec(repo, subset, x, n, order):
1482 1486 """``set^0``
1483 1487 The set.
1484 1488 ``set^1`` (or ``set^``), ``set^2``
1485 1489 First or second parent, respectively, of all changesets in set.
1486 1490 """
1487 1491 try:
1488 1492 n = int(n[1])
1489 1493 if n not in (0, 1, 2):
1490 1494 raise ValueError
1491 1495 except (TypeError, ValueError):
1492 1496 raise error.ParseError(_("^ expects a number 0, 1, or 2"))
1493 1497 ps = set()
1494 1498 cl = repo.changelog
1495 1499 for r in getset(repo, fullreposet(repo), x):
1496 1500 if n == 0:
1497 1501 ps.add(r)
1498 1502 elif n == 1:
1499 1503 ps.add(cl.parentrevs(r)[0])
1500 1504 elif n == 2:
1501 1505 parents = cl.parentrevs(r)
1502 1506 if parents[1] != node.nullrev:
1503 1507 ps.add(parents[1])
1504 1508 return subset & ps
1505 1509
1506 1510 @predicate('present(set)', safe=True)
1507 1511 def present(repo, subset, x):
1508 1512 """An empty set, if any revision in set isn't found; otherwise,
1509 1513 all revisions in set.
1510 1514
1511 1515 If any of specified revisions is not present in the local repository,
1512 1516 the query is normally aborted. But this predicate allows the query
1513 1517 to continue even in such cases.
1514 1518 """
1515 1519 try:
1516 1520 return getset(repo, subset, x)
1517 1521 except error.RepoLookupError:
1518 1522 return baseset()
1519 1523
1520 1524 # for internal use
1521 1525 @predicate('_notpublic', safe=True)
1522 1526 def _notpublic(repo, subset, x):
1523 1527 getargs(x, 0, 0, "_notpublic takes no arguments")
1524 1528 return _phase(repo, subset, phases.draft, phases.secret)
1525 1529
1526 1530 @predicate('public()', safe=True)
1527 1531 def public(repo, subset, x):
1528 1532 """Changeset in public phase."""
1529 1533 # i18n: "public" is a keyword
1530 1534 getargs(x, 0, 0, _("public takes no arguments"))
1531 1535 phase = repo._phasecache.phase
1532 1536 target = phases.public
1533 1537 condition = lambda r: phase(repo, r) == target
1534 1538 return subset.filter(condition, condrepr=('<phase %r>', target),
1535 1539 cache=False)
1536 1540
1537 1541 @predicate('remote([id [,path]])', safe=False)
1538 1542 def remote(repo, subset, x):
1539 1543 """Local revision that corresponds to the given identifier in a
1540 1544 remote repository, if present. Here, the '.' identifier is a
1541 1545 synonym for the current local branch.
1542 1546 """
1543 1547
1544 1548 from . import hg # avoid start-up nasties
1545 1549 # i18n: "remote" is a keyword
1546 1550 l = getargs(x, 0, 2, _("remote takes zero, one, or two arguments"))
1547 1551
1548 1552 q = '.'
1549 1553 if len(l) > 0:
1550 1554 # i18n: "remote" is a keyword
1551 1555 q = getstring(l[0], _("remote requires a string id"))
1552 1556 if q == '.':
1553 1557 q = repo['.'].branch()
1554 1558
1555 1559 dest = ''
1556 1560 if len(l) > 1:
1557 1561 # i18n: "remote" is a keyword
1558 1562 dest = getstring(l[1], _("remote requires a repository path"))
1559 1563 dest = repo.ui.expandpath(dest or 'default')
1560 1564 dest, branches = hg.parseurl(dest)
1561 1565 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1562 1566 if revs:
1563 1567 revs = [repo.lookup(rev) for rev in revs]
1564 1568 other = hg.peer(repo, {}, dest)
1565 1569 n = other.lookup(q)
1566 1570 if n in repo:
1567 1571 r = repo[n].rev()
1568 1572 if r in subset:
1569 1573 return baseset([r])
1570 1574 return baseset()
1571 1575
1572 1576 @predicate('removes(pattern)', safe=True)
1573 1577 def removes(repo, subset, x):
1574 1578 """Changesets which remove files matching pattern.
1575 1579
1576 1580 The pattern without explicit kind like ``glob:`` is expected to be
1577 1581 relative to the current directory and match against a file or a
1578 1582 directory.
1579 1583 """
1580 1584 # i18n: "removes" is a keyword
1581 1585 pat = getstring(x, _("removes requires a pattern"))
1582 1586 return checkstatus(repo, subset, pat, 2)
1583 1587
1584 1588 @predicate('rev(number)', safe=True)
1585 1589 def rev(repo, subset, x):
1586 1590 """Revision with the given numeric identifier.
1587 1591 """
1588 1592 # i18n: "rev" is a keyword
1589 1593 l = getargs(x, 1, 1, _("rev requires one argument"))
1590 1594 try:
1591 1595 # i18n: "rev" is a keyword
1592 1596 l = int(getstring(l[0], _("rev requires a number")))
1593 1597 except (TypeError, ValueError):
1594 1598 # i18n: "rev" is a keyword
1595 1599 raise error.ParseError(_("rev expects a number"))
1596 1600 if l not in repo.changelog and l != node.nullrev:
1597 1601 return baseset()
1598 1602 return subset & baseset([l])
1599 1603
1600 1604 @predicate('matching(revision [, field])', safe=True)
1601 1605 def matching(repo, subset, x):
1602 1606 """Changesets in which a given set of fields match the set of fields in the
1603 1607 selected revision or set.
1604 1608
1605 1609 To match more than one field pass the list of fields to match separated
1606 1610 by spaces (e.g. ``author description``).
1607 1611
1608 1612 Valid fields are most regular revision fields and some special fields.
1609 1613
1610 1614 Regular revision fields are ``description``, ``author``, ``branch``,
1611 1615 ``date``, ``files``, ``phase``, ``parents``, ``substate``, ``user``
1612 1616 and ``diff``.
1613 1617 Note that ``author`` and ``user`` are synonyms. ``diff`` refers to the
1614 1618 contents of the revision. Two revisions matching their ``diff`` will
1615 1619 also match their ``files``.
1616 1620
1617 1621 Special fields are ``summary`` and ``metadata``:
1618 1622 ``summary`` matches the first line of the description.
1619 1623 ``metadata`` is equivalent to matching ``description user date``
1620 1624 (i.e. it matches the main metadata fields).
1621 1625
1622 1626 ``metadata`` is the default field which is used when no fields are
1623 1627 specified. You can match more than one field at a time.
1624 1628 """
1625 1629 # i18n: "matching" is a keyword
1626 1630 l = getargs(x, 1, 2, _("matching takes 1 or 2 arguments"))
1627 1631
1628 1632 revs = getset(repo, fullreposet(repo), l[0])
1629 1633
1630 1634 fieldlist = ['metadata']
1631 1635 if len(l) > 1:
1632 1636 fieldlist = getstring(l[1],
1633 1637 # i18n: "matching" is a keyword
1634 1638 _("matching requires a string "
1635 1639 "as its second argument")).split()
1636 1640
1637 1641 # Make sure that there are no repeated fields,
1638 1642 # expand the 'special' 'metadata' field type
1639 1643 # and check the 'files' whenever we check the 'diff'
1640 1644 fields = []
1641 1645 for field in fieldlist:
1642 1646 if field == 'metadata':
1643 1647 fields += ['user', 'description', 'date']
1644 1648 elif field == 'diff':
1645 1649 # a revision matching the diff must also match the files
1646 1650 # since matching the diff is very costly, make sure to
1647 1651 # also match the files first
1648 1652 fields += ['files', 'diff']
1649 1653 else:
1650 1654 if field == 'author':
1651 1655 field = 'user'
1652 1656 fields.append(field)
1653 1657 fields = set(fields)
1654 1658 if 'summary' in fields and 'description' in fields:
1655 1659 # If a revision matches its description it also matches its summary
1656 1660 fields.discard('summary')
1657 1661
1658 1662 # We may want to match more than one field
1659 1663 # Not all fields take the same amount of time to be matched
1660 1664 # Sort the selected fields in order of increasing matching cost
1661 1665 fieldorder = ['phase', 'parents', 'user', 'date', 'branch', 'summary',
1662 1666 'files', 'description', 'substate', 'diff']
1663 1667 def fieldkeyfunc(f):
1664 1668 try:
1665 1669 return fieldorder.index(f)
1666 1670 except ValueError:
1667 1671 # assume an unknown field is very costly
1668 1672 return len(fieldorder)
1669 1673 fields = list(fields)
1670 1674 fields.sort(key=fieldkeyfunc)
1671 1675
1672 1676 # Each field will be matched with its own "getfield" function
1673 1677 # which will be added to the getfieldfuncs array of functions
1674 1678 getfieldfuncs = []
1675 1679 _funcs = {
1676 1680 'user': lambda r: repo[r].user(),
1677 1681 'branch': lambda r: repo[r].branch(),
1678 1682 'date': lambda r: repo[r].date(),
1679 1683 'description': lambda r: repo[r].description(),
1680 1684 'files': lambda r: repo[r].files(),
1681 1685 'parents': lambda r: repo[r].parents(),
1682 1686 'phase': lambda r: repo[r].phase(),
1683 1687 'substate': lambda r: repo[r].substate,
1684 1688 'summary': lambda r: repo[r].description().splitlines()[0],
1685 1689 'diff': lambda r: list(repo[r].diff(git=True),)
1686 1690 }
1687 1691 for info in fields:
1688 1692 getfield = _funcs.get(info, None)
1689 1693 if getfield is None:
1690 1694 raise error.ParseError(
1691 1695 # i18n: "matching" is a keyword
1692 1696 _("unexpected field name passed to matching: %s") % info)
1693 1697 getfieldfuncs.append(getfield)
1694 1698 # convert the getfield array of functions into a "getinfo" function
1695 1699 # which returns an array of field values (or a single value if there
1696 1700 # is only one field to match)
1697 1701 getinfo = lambda r: [f(r) for f in getfieldfuncs]
1698 1702
1699 1703 def matches(x):
1700 1704 for rev in revs:
1701 1705 target = getinfo(rev)
1702 1706 match = True
1703 1707 for n, f in enumerate(getfieldfuncs):
1704 1708 if target[n] != f(x):
1705 1709 match = False
1706 1710 if match:
1707 1711 return True
1708 1712 return False
1709 1713
1710 1714 return subset.filter(matches, condrepr=('<matching%r %r>', fields, revs))
1711 1715
1712 1716 @predicate('reverse(set)', safe=True, takeorder=True)
1713 1717 def reverse(repo, subset, x, order):
1714 1718 """Reverse order of set.
1715 1719 """
1716 1720 l = getset(repo, subset, x)
1717 1721 if order == defineorder:
1718 1722 l.reverse()
1719 1723 return l
1720 1724
1721 1725 @predicate('roots(set)', safe=True)
1722 1726 def roots(repo, subset, x):
1723 1727 """Changesets in set with no parent changeset in set.
1724 1728 """
1725 1729 s = getset(repo, fullreposet(repo), x)
1726 1730 parents = repo.changelog.parentrevs
1727 1731 def filter(r):
1728 1732 for p in parents(r):
1729 1733 if 0 <= p and p in s:
1730 1734 return False
1731 1735 return True
1732 1736 return subset & s.filter(filter, condrepr='<roots>')
1733 1737
1734 1738 _sortkeyfuncs = {
1735 1739 'rev': lambda c: c.rev(),
1736 1740 'branch': lambda c: c.branch(),
1737 1741 'desc': lambda c: c.description(),
1738 1742 'user': lambda c: c.user(),
1739 1743 'author': lambda c: c.user(),
1740 1744 'date': lambda c: c.date()[0],
1741 1745 }
1742 1746
1743 1747 def _getsortargs(x):
1744 1748 """Parse sort options into (set, [(key, reverse)], opts)"""
1745 1749 args = getargsdict(x, 'sort', 'set keys topo.firstbranch')
1746 1750 if 'set' not in args:
1747 1751 # i18n: "sort" is a keyword
1748 1752 raise error.ParseError(_('sort requires one or two arguments'))
1749 1753 keys = "rev"
1750 1754 if 'keys' in args:
1751 1755 # i18n: "sort" is a keyword
1752 1756 keys = getstring(args['keys'], _("sort spec must be a string"))
1753 1757
1754 1758 keyflags = []
1755 1759 for k in keys.split():
1756 1760 fk = k
1757 1761 reverse = (k[0] == '-')
1758 1762 if reverse:
1759 1763 k = k[1:]
1760 1764 if k not in _sortkeyfuncs and k != 'topo':
1761 1765 raise error.ParseError(_("unknown sort key %r") % fk)
1762 1766 keyflags.append((k, reverse))
1763 1767
1764 1768 if len(keyflags) > 1 and any(k == 'topo' for k, reverse in keyflags):
1765 1769 # i18n: "topo" is a keyword
1766 1770 raise error.ParseError(_('topo sort order cannot be combined '
1767 1771 'with other sort keys'))
1768 1772
1769 1773 opts = {}
1770 1774 if 'topo.firstbranch' in args:
1771 1775 if any(k == 'topo' for k, reverse in keyflags):
1772 1776 opts['topo.firstbranch'] = args['topo.firstbranch']
1773 1777 else:
1774 1778 # i18n: "topo" and "topo.firstbranch" are keywords
1775 1779 raise error.ParseError(_('topo.firstbranch can only be used '
1776 1780 'when using the topo sort key'))
1777 1781
1778 1782 return args['set'], keyflags, opts
1779 1783
1780 1784 @predicate('sort(set[, [-]key... [, ...]])', safe=True, takeorder=True)
1781 1785 def sort(repo, subset, x, order):
1782 1786 """Sort set by keys. The default sort order is ascending, specify a key
1783 1787 as ``-key`` to sort in descending order.
1784 1788
1785 1789 The keys can be:
1786 1790
1787 1791 - ``rev`` for the revision number,
1788 1792 - ``branch`` for the branch name,
1789 1793 - ``desc`` for the commit message (description),
1790 1794 - ``user`` for user name (``author`` can be used as an alias),
1791 1795 - ``date`` for the commit date
1792 1796 - ``topo`` for a reverse topographical sort
1793 1797
1794 1798 The ``topo`` sort order cannot be combined with other sort keys. This sort
1795 1799 takes one optional argument, ``topo.firstbranch``, which takes a revset that
1796 1800 specifies what topographical branches to prioritize in the sort.
1797 1801
1798 1802 """
1799 1803 s, keyflags, opts = _getsortargs(x)
1800 1804 revs = getset(repo, subset, s)
1801 1805
1802 1806 if not keyflags or order != defineorder:
1803 1807 return revs
1804 1808 if len(keyflags) == 1 and keyflags[0][0] == "rev":
1805 1809 revs.sort(reverse=keyflags[0][1])
1806 1810 return revs
1807 1811 elif keyflags[0][0] == "topo":
1808 1812 firstbranch = ()
1809 1813 if 'topo.firstbranch' in opts:
1810 1814 firstbranch = getset(repo, subset, opts['topo.firstbranch'])
1811 1815 revs = baseset(_toposort(revs, repo.changelog.parentrevs, firstbranch),
1812 1816 istopo=True)
1813 1817 if keyflags[0][1]:
1814 1818 revs.reverse()
1815 1819 return revs
1816 1820
1817 1821 # sort() is guaranteed to be stable
1818 1822 ctxs = [repo[r] for r in revs]
1819 1823 for k, reverse in reversed(keyflags):
1820 1824 ctxs.sort(key=_sortkeyfuncs[k], reverse=reverse)
1821 1825 return baseset([c.rev() for c in ctxs])
1822 1826
1823 1827 def _toposort(revs, parentsfunc, firstbranch=()):
1824 1828 """Yield revisions from heads to roots one (topo) branch at a time.
1825 1829
1826 1830 This function aims to be used by a graph generator that wishes to minimize
1827 1831 the number of parallel branches and their interleaving.
1828 1832
1829 1833 Example iteration order (numbers show the "true" order in a changelog):
1830 1834
1831 1835 o 4
1832 1836 |
1833 1837 o 1
1834 1838 |
1835 1839 | o 3
1836 1840 | |
1837 1841 | o 2
1838 1842 |/
1839 1843 o 0
1840 1844
1841 1845 Note that the ancestors of merges are understood by the current
1842 1846 algorithm to be on the same branch. This means no reordering will
1843 1847 occur behind a merge.
1844 1848 """
1845 1849
1846 1850 ### Quick summary of the algorithm
1847 1851 #
1848 1852 # This function is based around a "retention" principle. We keep revisions
1849 1853 # in memory until we are ready to emit a whole branch that immediately
1850 1854 # "merges" into an existing one. This reduces the number of parallel
1851 1855 # branches with interleaved revisions.
1852 1856 #
1853 1857 # During iteration revs are split into two groups:
1854 1858 # A) revision already emitted
1855 1859 # B) revision in "retention". They are stored as different subgroups.
1856 1860 #
1857 1861 # for each REV, we do the following logic:
1858 1862 #
1859 1863 # 1) if REV is a parent of (A), we will emit it. If there is a
1860 1864 # retention group ((B) above) that is blocked on REV being
1861 1865 # available, we emit all the revisions out of that retention
1862 1866 # group first.
1863 1867 #
1864 1868 # 2) else, we'll search for a subgroup in (B) awaiting for REV to be
1865 1869 # available, if such subgroup exist, we add REV to it and the subgroup is
1866 1870 # now awaiting for REV.parents() to be available.
1867 1871 #
1868 1872 # 3) finally if no such group existed in (B), we create a new subgroup.
1869 1873 #
1870 1874 #
1871 1875 # To bootstrap the algorithm, we emit the tipmost revision (which
1872 1876 # puts it in group (A) from above).
1873 1877
1874 1878 revs.sort(reverse=True)
1875 1879
1876 1880 # Set of parents of revision that have been emitted. They can be considered
1877 1881 # unblocked as the graph generator is already aware of them so there is no
1878 1882 # need to delay the revisions that reference them.
1879 1883 #
1880 1884 # If someone wants to prioritize a branch over the others, pre-filling this
1881 1885 # set will force all other branches to wait until this branch is ready to be
1882 1886 # emitted.
1883 1887 unblocked = set(firstbranch)
1884 1888
1885 1889 # list of groups waiting to be displayed, each group is defined by:
1886 1890 #
1887 1891 # (revs: lists of revs waiting to be displayed,
1888 1892 # blocked: set of that cannot be displayed before those in 'revs')
1889 1893 #
1890 1894 # The second value ('blocked') correspond to parents of any revision in the
1891 1895 # group ('revs') that is not itself contained in the group. The main idea
1892 1896 # of this algorithm is to delay as much as possible the emission of any
1893 1897 # revision. This means waiting for the moment we are about to display
1894 1898 # these parents to display the revs in a group.
1895 1899 #
1896 1900 # This first implementation is smart until it encounters a merge: it will
1897 1901 # emit revs as soon as any parent is about to be emitted and can grow an
1898 1902 # arbitrary number of revs in 'blocked'. In practice this mean we properly
1899 1903 # retains new branches but gives up on any special ordering for ancestors
1900 1904 # of merges. The implementation can be improved to handle this better.
1901 1905 #
1902 1906 # The first subgroup is special. It corresponds to all the revision that
1903 1907 # were already emitted. The 'revs' lists is expected to be empty and the
1904 1908 # 'blocked' set contains the parents revisions of already emitted revision.
1905 1909 #
1906 1910 # You could pre-seed the <parents> set of groups[0] to a specific
1907 1911 # changesets to select what the first emitted branch should be.
1908 1912 groups = [([], unblocked)]
1909 1913 pendingheap = []
1910 1914 pendingset = set()
1911 1915
1912 1916 heapq.heapify(pendingheap)
1913 1917 heappop = heapq.heappop
1914 1918 heappush = heapq.heappush
1915 1919 for currentrev in revs:
1916 1920 # Heap works with smallest element, we want highest so we invert
1917 1921 if currentrev not in pendingset:
1918 1922 heappush(pendingheap, -currentrev)
1919 1923 pendingset.add(currentrev)
1920 1924 # iterates on pending rev until after the current rev have been
1921 1925 # processed.
1922 1926 rev = None
1923 1927 while rev != currentrev:
1924 1928 rev = -heappop(pendingheap)
1925 1929 pendingset.remove(rev)
1926 1930
1927 1931 # Seek for a subgroup blocked, waiting for the current revision.
1928 1932 matching = [i for i, g in enumerate(groups) if rev in g[1]]
1929 1933
1930 1934 if matching:
1931 1935 # The main idea is to gather together all sets that are blocked
1932 1936 # on the same revision.
1933 1937 #
1934 1938 # Groups are merged when a common blocking ancestor is
1935 1939 # observed. For example, given two groups:
1936 1940 #
1937 1941 # revs [5, 4] waiting for 1
1938 1942 # revs [3, 2] waiting for 1
1939 1943 #
1940 1944 # These two groups will be merged when we process
1941 1945 # 1. In theory, we could have merged the groups when
1942 1946 # we added 2 to the group it is now in (we could have
1943 1947 # noticed the groups were both blocked on 1 then), but
1944 1948 # the way it works now makes the algorithm simpler.
1945 1949 #
1946 1950 # We also always keep the oldest subgroup first. We can
1947 1951 # probably improve the behavior by having the longest set
1948 1952 # first. That way, graph algorithms could minimise the length
1949 1953 # of parallel lines their drawing. This is currently not done.
1950 1954 targetidx = matching.pop(0)
1951 1955 trevs, tparents = groups[targetidx]
1952 1956 for i in matching:
1953 1957 gr = groups[i]
1954 1958 trevs.extend(gr[0])
1955 1959 tparents |= gr[1]
1956 1960 # delete all merged subgroups (except the one we kept)
1957 1961 # (starting from the last subgroup for performance and
1958 1962 # sanity reasons)
1959 1963 for i in reversed(matching):
1960 1964 del groups[i]
1961 1965 else:
1962 1966 # This is a new head. We create a new subgroup for it.
1963 1967 targetidx = len(groups)
1964 1968 groups.append(([], set([rev])))
1965 1969
1966 1970 gr = groups[targetidx]
1967 1971
1968 1972 # We now add the current nodes to this subgroups. This is done
1969 1973 # after the subgroup merging because all elements from a subgroup
1970 1974 # that relied on this rev must precede it.
1971 1975 #
1972 1976 # we also update the <parents> set to include the parents of the
1973 1977 # new nodes.
1974 1978 if rev == currentrev: # only display stuff in rev
1975 1979 gr[0].append(rev)
1976 1980 gr[1].remove(rev)
1977 1981 parents = [p for p in parentsfunc(rev) if p > node.nullrev]
1978 1982 gr[1].update(parents)
1979 1983 for p in parents:
1980 1984 if p not in pendingset:
1981 1985 pendingset.add(p)
1982 1986 heappush(pendingheap, -p)
1983 1987
1984 1988 # Look for a subgroup to display
1985 1989 #
1986 1990 # When unblocked is empty (if clause), we were not waiting for any
1987 1991 # revisions during the first iteration (if no priority was given) or
1988 1992 # if we emitted a whole disconnected set of the graph (reached a
1989 1993 # root). In that case we arbitrarily take the oldest known
1990 1994 # subgroup. The heuristic could probably be better.
1991 1995 #
1992 1996 # Otherwise (elif clause) if the subgroup is blocked on
1993 1997 # a revision we just emitted, we can safely emit it as
1994 1998 # well.
1995 1999 if not unblocked:
1996 2000 if len(groups) > 1: # display other subset
1997 2001 targetidx = 1
1998 2002 gr = groups[1]
1999 2003 elif not gr[1] & unblocked:
2000 2004 gr = None
2001 2005
2002 2006 if gr is not None:
2003 2007 # update the set of awaited revisions with the one from the
2004 2008 # subgroup
2005 2009 unblocked |= gr[1]
2006 2010 # output all revisions in the subgroup
2007 2011 for r in gr[0]:
2008 2012 yield r
2009 2013 # delete the subgroup that you just output
2010 2014 # unless it is groups[0] in which case you just empty it.
2011 2015 if targetidx:
2012 2016 del groups[targetidx]
2013 2017 else:
2014 2018 gr[0][:] = []
2015 2019 # Check if we have some subgroup waiting for revisions we are not going to
2016 2020 # iterate over
2017 2021 for g in groups:
2018 2022 for r in g[0]:
2019 2023 yield r
2020 2024
2021 2025 @predicate('subrepo([pattern])')
2022 2026 def subrepo(repo, subset, x):
2023 2027 """Changesets that add, modify or remove the given subrepo. If no subrepo
2024 2028 pattern is named, any subrepo changes are returned.
2025 2029 """
2026 2030 # i18n: "subrepo" is a keyword
2027 2031 args = getargs(x, 0, 1, _('subrepo takes at most one argument'))
2028 2032 pat = None
2029 2033 if len(args) != 0:
2030 2034 pat = getstring(args[0], _("subrepo requires a pattern"))
2031 2035
2032 2036 m = matchmod.exact(repo.root, repo.root, ['.hgsubstate'])
2033 2037
2034 2038 def submatches(names):
2035 2039 k, p, m = util.stringmatcher(pat)
2036 2040 for name in names:
2037 2041 if m(name):
2038 2042 yield name
2039 2043
2040 2044 def matches(x):
2041 2045 c = repo[x]
2042 2046 s = repo.status(c.p1().node(), c.node(), match=m)
2043 2047
2044 2048 if pat is None:
2045 2049 return s.added or s.modified or s.removed
2046 2050
2047 2051 if s.added:
2048 2052 return any(submatches(c.substate.keys()))
2049 2053
2050 2054 if s.modified:
2051 2055 subs = set(c.p1().substate.keys())
2052 2056 subs.update(c.substate.keys())
2053 2057
2054 2058 for path in submatches(subs):
2055 2059 if c.p1().substate.get(path) != c.substate.get(path):
2056 2060 return True
2057 2061
2058 2062 if s.removed:
2059 2063 return any(submatches(c.p1().substate.keys()))
2060 2064
2061 2065 return False
2062 2066
2063 2067 return subset.filter(matches, condrepr=('<subrepo %r>', pat))
2064 2068
2065 2069 def _substringmatcher(pattern, casesensitive=True):
2066 2070 kind, pattern, matcher = util.stringmatcher(pattern,
2067 2071 casesensitive=casesensitive)
2068 2072 if kind == 'literal':
2069 2073 if not casesensitive:
2070 2074 pattern = encoding.lower(pattern)
2071 2075 matcher = lambda s: pattern in encoding.lower(s)
2072 2076 else:
2073 2077 matcher = lambda s: pattern in s
2074 2078 return kind, pattern, matcher
2075 2079
2076 2080 @predicate('tag([name])', safe=True)
2077 2081 def tag(repo, subset, x):
2078 2082 """The specified tag by name, or all tagged revisions if no name is given.
2079 2083
2080 2084 Pattern matching is supported for `name`. See
2081 2085 :hg:`help revisions.patterns`.
2082 2086 """
2083 2087 # i18n: "tag" is a keyword
2084 2088 args = getargs(x, 0, 1, _("tag takes one or no arguments"))
2085 2089 cl = repo.changelog
2086 2090 if args:
2087 2091 pattern = getstring(args[0],
2088 2092 # i18n: "tag" is a keyword
2089 2093 _('the argument to tag must be a string'))
2090 2094 kind, pattern, matcher = util.stringmatcher(pattern)
2091 2095 if kind == 'literal':
2092 2096 # avoid resolving all tags
2093 2097 tn = repo._tagscache.tags.get(pattern, None)
2094 2098 if tn is None:
2095 2099 raise error.RepoLookupError(_("tag '%s' does not exist")
2096 2100 % pattern)
2097 2101 s = set([repo[tn].rev()])
2098 2102 else:
2099 2103 s = set([cl.rev(n) for t, n in repo.tagslist() if matcher(t)])
2100 2104 else:
2101 2105 s = set([cl.rev(n) for t, n in repo.tagslist() if t != 'tip'])
2102 2106 return subset & s
2103 2107
2104 2108 @predicate('tagged', safe=True)
2105 2109 def tagged(repo, subset, x):
2106 2110 return tag(repo, subset, x)
2107 2111
2108 2112 @predicate('unstable()', safe=True)
2109 2113 def unstable(repo, subset, x):
2110 2114 """Non-obsolete changesets with obsolete ancestors.
2111 2115 """
2112 2116 # i18n: "unstable" is a keyword
2113 2117 getargs(x, 0, 0, _("unstable takes no arguments"))
2114 2118 unstables = obsmod.getrevs(repo, 'unstable')
2115 2119 return subset & unstables
2116 2120
2117 2121
2118 2122 @predicate('user(string)', safe=True)
2119 2123 def user(repo, subset, x):
2120 2124 """User name contains string. The match is case-insensitive.
2121 2125
2122 2126 Pattern matching is supported for `string`. See
2123 2127 :hg:`help revisions.patterns`.
2124 2128 """
2125 2129 return author(repo, subset, x)
2126 2130
2127 2131 @predicate('wdir', safe=True)
2128 2132 def wdir(repo, subset, x):
2129 2133 """Working directory. (EXPERIMENTAL)"""
2130 2134 # i18n: "wdir" is a keyword
2131 2135 getargs(x, 0, 0, _("wdir takes no arguments"))
2132 2136 if node.wdirrev in subset or isinstance(subset, fullreposet):
2133 2137 return baseset([node.wdirrev])
2134 2138 return baseset()
2135 2139
2136 2140 def _orderedlist(repo, subset, x):
2137 2141 s = getstring(x, "internal error")
2138 2142 if not s:
2139 2143 return baseset()
2140 2144 # remove duplicates here. it's difficult for caller to deduplicate sets
2141 2145 # because different symbols can point to the same rev.
2142 2146 cl = repo.changelog
2143 2147 ls = []
2144 2148 seen = set()
2145 2149 for t in s.split('\0'):
2146 2150 try:
2147 2151 # fast path for integer revision
2148 2152 r = int(t)
2149 2153 if str(r) != t or r not in cl:
2150 2154 raise ValueError
2151 2155 revs = [r]
2152 2156 except ValueError:
2153 2157 revs = stringset(repo, subset, t)
2154 2158
2155 2159 for r in revs:
2156 2160 if r in seen:
2157 2161 continue
2158 2162 if (r in subset
2159 2163 or r == node.nullrev and isinstance(subset, fullreposet)):
2160 2164 ls.append(r)
2161 2165 seen.add(r)
2162 2166 return baseset(ls)
2163 2167
2164 2168 # for internal use
2165 2169 @predicate('_list', safe=True, takeorder=True)
2166 2170 def _list(repo, subset, x, order):
2167 2171 if order == followorder:
2168 2172 # slow path to take the subset order
2169 2173 return subset & _orderedlist(repo, fullreposet(repo), x)
2170 2174 else:
2171 2175 return _orderedlist(repo, subset, x)
2172 2176
2173 2177 def _orderedintlist(repo, subset, x):
2174 2178 s = getstring(x, "internal error")
2175 2179 if not s:
2176 2180 return baseset()
2177 2181 ls = [int(r) for r in s.split('\0')]
2178 2182 s = subset
2179 2183 return baseset([r for r in ls if r in s])
2180 2184
2181 2185 # for internal use
2182 2186 @predicate('_intlist', safe=True, takeorder=True)
2183 2187 def _intlist(repo, subset, x, order):
2184 2188 if order == followorder:
2185 2189 # slow path to take the subset order
2186 2190 return subset & _orderedintlist(repo, fullreposet(repo), x)
2187 2191 else:
2188 2192 return _orderedintlist(repo, subset, x)
2189 2193
2190 2194 def _orderedhexlist(repo, subset, x):
2191 2195 s = getstring(x, "internal error")
2192 2196 if not s:
2193 2197 return baseset()
2194 2198 cl = repo.changelog
2195 2199 ls = [cl.rev(node.bin(r)) for r in s.split('\0')]
2196 2200 s = subset
2197 2201 return baseset([r for r in ls if r in s])
2198 2202
2199 2203 # for internal use
2200 2204 @predicate('_hexlist', safe=True, takeorder=True)
2201 2205 def _hexlist(repo, subset, x, order):
2202 2206 if order == followorder:
2203 2207 # slow path to take the subset order
2204 2208 return subset & _orderedhexlist(repo, fullreposet(repo), x)
2205 2209 else:
2206 2210 return _orderedhexlist(repo, subset, x)
2207 2211
2208 2212 methods = {
2209 2213 "range": rangeset,
2210 2214 "rangeall": rangeall,
2211 2215 "rangepre": rangepre,
2212 2216 "rangepost": rangepost,
2213 2217 "dagrange": dagrange,
2214 2218 "string": stringset,
2215 2219 "symbol": stringset,
2216 2220 "and": andset,
2217 2221 "or": orset,
2218 2222 "not": notset,
2219 2223 "difference": differenceset,
2220 2224 "list": listset,
2221 2225 "keyvalue": keyvaluepair,
2222 2226 "func": func,
2223 2227 "ancestor": ancestorspec,
2224 2228 "parent": parentspec,
2225 2229 "parentpost": parentpost,
2226 2230 }
2227 2231
2228 2232 def posttreebuilthook(tree, repo):
2229 2233 # hook for extensions to execute code on the optimized tree
2230 2234 pass
2231 2235
2232 2236 def match(ui, spec, repo=None, order=defineorder):
2233 2237 """Create a matcher for a single revision spec
2234 2238
2235 2239 If order=followorder, a matcher takes the ordering specified by the input
2236 2240 set.
2237 2241 """
2238 2242 return matchany(ui, [spec], repo=repo, order=order)
2239 2243
2240 2244 def matchany(ui, specs, repo=None, order=defineorder):
2241 2245 """Create a matcher that will include any revisions matching one of the
2242 2246 given specs
2243 2247
2244 2248 If order=followorder, a matcher takes the ordering specified by the input
2245 2249 set.
2246 2250 """
2247 2251 if not specs:
2248 2252 def mfunc(repo, subset=None):
2249 2253 return baseset()
2250 2254 return mfunc
2251 2255 if not all(specs):
2252 2256 raise error.ParseError(_("empty query"))
2253 2257 lookup = None
2254 2258 if repo:
2255 2259 lookup = repo.__contains__
2256 2260 if len(specs) == 1:
2257 2261 tree = revsetlang.parse(specs[0], lookup)
2258 2262 else:
2259 2263 tree = ('or',
2260 2264 ('list',) + tuple(revsetlang.parse(s, lookup) for s in specs))
2261 2265
2262 2266 if ui:
2263 2267 tree = revsetlang.expandaliases(ui, tree)
2264 2268 tree = revsetlang.foldconcat(tree)
2265 2269 tree = revsetlang.analyze(tree, order)
2266 2270 tree = revsetlang.optimize(tree)
2267 2271 posttreebuilthook(tree, repo)
2268 2272 return makematcher(tree)
2269 2273
2270 2274 def makematcher(tree):
2271 2275 """Create a matcher from an evaluatable tree"""
2272 2276 def mfunc(repo, subset=None):
2273 2277 if subset is None:
2274 2278 subset = fullreposet(repo)
2275 2279 return getset(repo, subset, tree)
2276 2280 return mfunc
2277 2281
2278 2282 def loadpredicate(ui, extname, registrarobj):
2279 2283 """Load revset predicates from specified registrarobj
2280 2284 """
2281 2285 for name, func in registrarobj._table.iteritems():
2282 2286 symbols[name] = func
2283 2287 if func._safe:
2284 2288 safesymbols.add(name)
2285 2289
2286 2290 # load built-in predicates explicitly to setup safesymbols
2287 2291 loadpredicate(None, None, predicate)
2288 2292
2289 2293 # tell hggettext to extract docstrings from these functions:
2290 2294 i18nfunctions = symbols.values()
@@ -1,831 +1,831
1 1 $ HGMERGE=true; export HGMERGE
2 2
3 3 init
4 4
5 5 $ hg init repo
6 6 $ cd repo
7 7
8 8 commit
9 9
10 10 $ echo 'a' > a
11 11 $ hg ci -A -m test -u nobody -d '1 0'
12 12 adding a
13 13
14 14 annotate -c
15 15
16 16 $ hg annotate -c a
17 17 8435f90966e4: a
18 18
19 19 annotate -cl
20 20
21 21 $ hg annotate -cl a
22 22 8435f90966e4:1: a
23 23
24 24 annotate -d
25 25
26 26 $ hg annotate -d a
27 27 Thu Jan 01 00:00:01 1970 +0000: a
28 28
29 29 annotate -n
30 30
31 31 $ hg annotate -n a
32 32 0: a
33 33
34 34 annotate -nl
35 35
36 36 $ hg annotate -nl a
37 37 0:1: a
38 38
39 39 annotate -u
40 40
41 41 $ hg annotate -u a
42 42 nobody: a
43 43
44 44 annotate -cdnu
45 45
46 46 $ hg annotate -cdnu a
47 47 nobody 0 8435f90966e4 Thu Jan 01 00:00:01 1970 +0000: a
48 48
49 49 annotate -cdnul
50 50
51 51 $ hg annotate -cdnul a
52 52 nobody 0 8435f90966e4 Thu Jan 01 00:00:01 1970 +0000:1: a
53 53
54 54 annotate (JSON)
55 55
56 56 $ hg annotate -Tjson a
57 57 [
58 58 {
59 59 "line": "a\n",
60 60 "rev": 0
61 61 }
62 62 ]
63 63
64 64 $ hg annotate -Tjson -cdfnul a
65 65 [
66 66 {
67 67 "date": [1.0, 0],
68 68 "file": "a",
69 69 "line": "a\n",
70 70 "line_number": 1,
71 71 "node": "8435f90966e442695d2ded29fdade2bac5ad8065",
72 72 "rev": 0,
73 73 "user": "nobody"
74 74 }
75 75 ]
76 76
77 77 $ cat <<EOF >>a
78 78 > a
79 79 > a
80 80 > EOF
81 81 $ hg ci -ma1 -d '1 0'
82 82 $ hg cp a b
83 83 $ hg ci -mb -d '1 0'
84 84 $ cat <<EOF >> b
85 85 > b4
86 86 > b5
87 87 > b6
88 88 > EOF
89 89 $ hg ci -mb2 -d '2 0'
90 90
91 91 annotate -n b
92 92
93 93 $ hg annotate -n b
94 94 0: a
95 95 1: a
96 96 1: a
97 97 3: b4
98 98 3: b5
99 99 3: b6
100 100
101 101 annotate --no-follow b
102 102
103 103 $ hg annotate --no-follow b
104 104 2: a
105 105 2: a
106 106 2: a
107 107 3: b4
108 108 3: b5
109 109 3: b6
110 110
111 111 annotate -nl b
112 112
113 113 $ hg annotate -nl b
114 114 0:1: a
115 115 1:2: a
116 116 1:3: a
117 117 3:4: b4
118 118 3:5: b5
119 119 3:6: b6
120 120
121 121 annotate -nf b
122 122
123 123 $ hg annotate -nf b
124 124 0 a: a
125 125 1 a: a
126 126 1 a: a
127 127 3 b: b4
128 128 3 b: b5
129 129 3 b: b6
130 130
131 131 annotate -nlf b
132 132
133 133 $ hg annotate -nlf b
134 134 0 a:1: a
135 135 1 a:2: a
136 136 1 a:3: a
137 137 3 b:4: b4
138 138 3 b:5: b5
139 139 3 b:6: b6
140 140
141 141 $ hg up -C 2
142 142 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
143 143 $ cat <<EOF >> b
144 144 > b4
145 145 > c
146 146 > b5
147 147 > EOF
148 148 $ hg ci -mb2.1 -d '2 0'
149 149 created new head
150 150 $ hg merge
151 151 merging b
152 152 0 files updated, 1 files merged, 0 files removed, 0 files unresolved
153 153 (branch merge, don't forget to commit)
154 154 $ hg ci -mmergeb -d '3 0'
155 155
156 156 annotate after merge
157 157
158 158 $ hg annotate -nf b
159 159 0 a: a
160 160 1 a: a
161 161 1 a: a
162 162 3 b: b4
163 163 4 b: c
164 164 3 b: b5
165 165
166 166 annotate after merge with -l
167 167
168 168 $ hg annotate -nlf b
169 169 0 a:1: a
170 170 1 a:2: a
171 171 1 a:3: a
172 172 3 b:4: b4
173 173 4 b:5: c
174 174 3 b:5: b5
175 175
176 176 $ hg up -C 1
177 177 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
178 178 $ hg cp a b
179 179 $ cat <<EOF > b
180 180 > a
181 181 > z
182 182 > a
183 183 > EOF
184 184 $ hg ci -mc -d '3 0'
185 185 created new head
186 186 $ hg merge
187 187 merging b
188 188 0 files updated, 1 files merged, 0 files removed, 0 files unresolved
189 189 (branch merge, don't forget to commit)
190 190 $ cat <<EOF >> b
191 191 > b4
192 192 > c
193 193 > b5
194 194 > EOF
195 195 $ echo d >> b
196 196 $ hg ci -mmerge2 -d '4 0'
197 197
198 198 annotate after rename merge
199 199
200 200 $ hg annotate -nf b
201 201 0 a: a
202 202 6 b: z
203 203 1 a: a
204 204 3 b: b4
205 205 4 b: c
206 206 3 b: b5
207 207 7 b: d
208 208
209 209 annotate after rename merge with -l
210 210
211 211 $ hg annotate -nlf b
212 212 0 a:1: a
213 213 6 b:2: z
214 214 1 a:3: a
215 215 3 b:4: b4
216 216 4 b:5: c
217 217 3 b:5: b5
218 218 7 b:7: d
219 219
220 220 Issue2807: alignment of line numbers with -l
221 221
222 222 $ echo more >> b
223 223 $ hg ci -mmore -d '5 0'
224 224 $ echo more >> b
225 225 $ hg ci -mmore -d '6 0'
226 226 $ echo more >> b
227 227 $ hg ci -mmore -d '7 0'
228 228 $ hg annotate -nlf b
229 229 0 a: 1: a
230 230 6 b: 2: z
231 231 1 a: 3: a
232 232 3 b: 4: b4
233 233 4 b: 5: c
234 234 3 b: 5: b5
235 235 7 b: 7: d
236 236 8 b: 8: more
237 237 9 b: 9: more
238 238 10 b:10: more
239 239
240 240 linkrev vs rev
241 241
242 242 $ hg annotate -r tip -n a
243 243 0: a
244 244 1: a
245 245 1: a
246 246
247 247 linkrev vs rev with -l
248 248
249 249 $ hg annotate -r tip -nl a
250 250 0:1: a
251 251 1:2: a
252 252 1:3: a
253 253
254 254 Issue589: "undelete" sequence leads to crash
255 255
256 256 annotate was crashing when trying to --follow something
257 257
258 258 like A -> B -> A
259 259
260 260 generate ABA rename configuration
261 261
262 262 $ echo foo > foo
263 263 $ hg add foo
264 264 $ hg ci -m addfoo
265 265 $ hg rename foo bar
266 266 $ hg ci -m renamefoo
267 267 $ hg rename bar foo
268 268 $ hg ci -m renamebar
269 269
270 270 annotate after ABA with follow
271 271
272 272 $ hg annotate --follow foo
273 273 foo: foo
274 274
275 275 missing file
276 276
277 277 $ hg ann nosuchfile
278 278 abort: nosuchfile: no such file in rev e9e6b4fa872f
279 279 [255]
280 280
281 281 annotate file without '\n' on last line
282 282
283 283 $ printf "" > c
284 284 $ hg ci -A -m test -u nobody -d '1 0'
285 285 adding c
286 286 $ hg annotate c
287 287 $ printf "a\nb" > c
288 288 $ hg ci -m test
289 289 $ hg annotate c
290 290 [0-9]+: a (re)
291 291 [0-9]+: b (re)
292 292
293 293 Issue3841: check annotation of the file of which filelog includes
294 294 merging between the revision and its ancestor
295 295
296 296 to reproduce the situation with recent Mercurial, this script uses (1)
297 297 "hg debugsetparents" to merge without ancestor check by "hg merge",
298 298 and (2) the extension to allow filelog merging between the revision
299 299 and its ancestor by overriding "repo._filecommit".
300 300
301 301 $ cat > ../legacyrepo.py <<EOF
302 302 > from mercurial import node, error
303 303 > def reposetup(ui, repo):
304 304 > class legacyrepo(repo.__class__):
305 305 > def _filecommit(self, fctx, manifest1, manifest2,
306 306 > linkrev, tr, changelist):
307 307 > fname = fctx.path()
308 308 > text = fctx.data()
309 309 > flog = self.file(fname)
310 310 > fparent1 = manifest1.get(fname, node.nullid)
311 311 > fparent2 = manifest2.get(fname, node.nullid)
312 312 > meta = {}
313 313 > copy = fctx.renamed()
314 314 > if copy and copy[0] != fname:
315 315 > raise error.Abort('copying is not supported')
316 316 > if fparent2 != node.nullid:
317 317 > changelist.append(fname)
318 318 > return flog.add(text, meta, tr, linkrev,
319 319 > fparent1, fparent2)
320 320 > raise error.Abort('only merging is supported')
321 321 > repo.__class__ = legacyrepo
322 322 > EOF
323 323
324 324 $ cat > baz <<EOF
325 325 > 1
326 326 > 2
327 327 > 3
328 328 > 4
329 329 > 5
330 330 > EOF
331 331 $ hg add baz
332 332 $ hg commit -m "baz:0"
333 333
334 334 $ cat > baz <<EOF
335 335 > 1 baz:1
336 336 > 2
337 337 > 3
338 338 > 4
339 339 > 5
340 340 > EOF
341 341 $ hg commit -m "baz:1"
342 342
343 343 $ cat > baz <<EOF
344 344 > 1 baz:1
345 345 > 2 baz:2
346 346 > 3
347 347 > 4
348 348 > 5
349 349 > EOF
350 350 $ hg debugsetparents 17 17
351 351 $ hg --config extensions.legacyrepo=../legacyrepo.py commit -m "baz:2"
352 352 $ hg debugindexdot .hg/store/data/baz.i
353 353 digraph G {
354 354 -1 -> 0
355 355 0 -> 1
356 356 1 -> 2
357 357 1 -> 2
358 358 }
359 359 $ hg annotate baz
360 360 17: 1 baz:1
361 361 18: 2 baz:2
362 362 16: 3
363 363 16: 4
364 364 16: 5
365 365
366 366 $ cat > baz <<EOF
367 367 > 1 baz:1
368 368 > 2 baz:2
369 369 > 3 baz:3
370 370 > 4
371 371 > 5
372 372 > EOF
373 373 $ hg commit -m "baz:3"
374 374
375 375 $ cat > baz <<EOF
376 376 > 1 baz:1
377 377 > 2 baz:2
378 378 > 3 baz:3
379 379 > 4 baz:4
380 380 > 5
381 381 > EOF
382 382 $ hg debugsetparents 19 18
383 383 $ hg --config extensions.legacyrepo=../legacyrepo.py commit -m "baz:4"
384 384 $ hg debugindexdot .hg/store/data/baz.i
385 385 digraph G {
386 386 -1 -> 0
387 387 0 -> 1
388 388 1 -> 2
389 389 1 -> 2
390 390 2 -> 3
391 391 3 -> 4
392 392 2 -> 4
393 393 }
394 394 $ hg annotate baz
395 395 17: 1 baz:1
396 396 18: 2 baz:2
397 397 19: 3 baz:3
398 398 20: 4 baz:4
399 399 16: 5
400 400
401 401 annotate clean file
402 402
403 403 $ hg annotate -ncr "wdir()" foo
404 404 11 472b18db256d : foo
405 405
406 406 annotate modified file
407 407
408 408 $ echo foofoo >> foo
409 409 $ hg annotate -r "wdir()" foo
410 410 11 : foo
411 411 20+: foofoo
412 412
413 413 $ hg annotate -cr "wdir()" foo
414 414 472b18db256d : foo
415 415 b6bedd5477e7+: foofoo
416 416
417 417 $ hg annotate -ncr "wdir()" foo
418 418 11 472b18db256d : foo
419 419 20 b6bedd5477e7+: foofoo
420 420
421 421 $ hg annotate --debug -ncr "wdir()" foo
422 422 11 472b18db256d1e8282064eab4bfdaf48cbfe83cd : foo
423 423 20 b6bedd5477e797f25e568a6402d4697f3f895a72+: foofoo
424 424
425 425 $ hg annotate -udr "wdir()" foo
426 426 test Thu Jan 01 00:00:00 1970 +0000: foo
427 427 test [A-Za-z0-9:+ ]+: foofoo (re)
428 428
429 429 $ hg annotate -ncr "wdir()" -Tjson foo
430 430 [
431 431 {
432 432 "line": "foo\n",
433 433 "node": "472b18db256d1e8282064eab4bfdaf48cbfe83cd",
434 434 "rev": 11
435 435 },
436 436 {
437 437 "line": "foofoo\n",
438 438 "node": null,
439 439 "rev": null
440 440 }
441 441 ]
442 442
443 443 annotate added file
444 444
445 445 $ echo bar > bar
446 446 $ hg add bar
447 447 $ hg annotate -ncr "wdir()" bar
448 448 20 b6bedd5477e7+: bar
449 449
450 450 annotate renamed file
451 451
452 452 $ hg rename foo renamefoo2
453 453 $ hg annotate -ncr "wdir()" renamefoo2
454 454 11 472b18db256d : foo
455 455 20 b6bedd5477e7+: foofoo
456 456
457 457 annotate missing file
458 458
459 459 $ rm baz
460 460 #if windows
461 461 $ hg annotate -ncr "wdir()" baz
462 462 abort: $TESTTMP\repo\baz: The system cannot find the file specified
463 463 [255]
464 464 #else
465 465 $ hg annotate -ncr "wdir()" baz
466 466 abort: No such file or directory: $TESTTMP/repo/baz
467 467 [255]
468 468 #endif
469 469
470 470 annotate removed file
471 471
472 472 $ hg rm baz
473 473 #if windows
474 474 $ hg annotate -ncr "wdir()" baz
475 475 abort: $TESTTMP\repo\baz: The system cannot find the file specified
476 476 [255]
477 477 #else
478 478 $ hg annotate -ncr "wdir()" baz
479 479 abort: No such file or directory: $TESTTMP/repo/baz
480 480 [255]
481 481 #endif
482 482
483 483 $ hg revert --all --no-backup --quiet
484 484 $ hg id -n
485 485 20
486 486
487 487 Test followlines() revset; we usually check both followlines(pat, range) and
488 488 followlines(pat, range, descend=True) to make sure both give the same result
489 489 when they should.
490 490
491 491 $ echo a >> foo
492 492 $ hg ci -m 'foo: add a'
493 493 $ hg log -T '{rev}: {desc}\n' -r 'followlines(baz, 3:5)'
494 494 16: baz:0
495 495 19: baz:3
496 496 20: baz:4
497 497 $ hg log -T '{rev}: {desc}\n' -r 'followlines(baz, 3:5, startrev=20)'
498 498 16: baz:0
499 499 19: baz:3
500 500 20: baz:4
501 501 $ hg log -T '{rev}: {desc}\n' -r 'followlines(baz, 3:5, startrev=19)'
502 502 16: baz:0
503 503 19: baz:3
504 504 $ hg log -T '{rev}: {desc}\n' -r 'followlines(baz, 3:5, startrev=19, descend=True)'
505 505 19: baz:3
506 506 20: baz:4
507 507 $ printf "0\n0\n" | cat - baz > baz1
508 508 $ mv baz1 baz
509 509 $ hg ci -m 'added two lines with 0'
510 510 $ hg log -T '{rev}: {desc}\n' -r 'followlines(baz, 5:7)'
511 511 16: baz:0
512 512 19: baz:3
513 513 20: baz:4
514 514 $ hg log -T '{rev}: {desc}\n' -r 'followlines(baz, 3:5, descend=true, startrev=19)'
515 515 19: baz:3
516 516 20: baz:4
517 517 $ echo 6 >> baz
518 518 $ hg ci -m 'added line 8'
519 519 $ hg log -T '{rev}: {desc}\n' -r 'followlines(baz, 5:7)'
520 520 16: baz:0
521 521 19: baz:3
522 522 20: baz:4
523 523 $ hg log -T '{rev}: {desc}\n' -r 'followlines(baz, 3:5, startrev=19, descend=1)'
524 524 19: baz:3
525 525 20: baz:4
526 526 $ sed 's/3/3+/' baz > baz.new
527 527 $ mv baz.new baz
528 528 $ hg ci -m 'baz:3->3+'
529 529 $ hg log -T '{rev}: {desc}\n' -r 'followlines(baz, 5:7, descend=0)'
530 530 16: baz:0
531 531 19: baz:3
532 532 20: baz:4
533 533 24: baz:3->3+
534 534 $ hg log -T '{rev}: {desc}\n' -r 'followlines(baz, 3:5, startrev=17, descend=True)'
535 535 19: baz:3
536 536 20: baz:4
537 537 24: baz:3->3+
538 538 $ hg log -T '{rev}: {desc}\n' -r 'followlines(baz, 1:2, descend=false)'
539 539 22: added two lines with 0
540 540
541 541 file patterns are okay
542 542 $ hg log -T '{rev}: {desc}\n' -r 'followlines("path:baz", 1:2)'
543 543 22: added two lines with 0
544 544
545 545 renames are followed
546 546 $ hg mv baz qux
547 547 $ sed 's/4/4+/' qux > qux.new
548 548 $ mv qux.new qux
549 549 $ hg ci -m 'qux:4->4+'
550 550 $ hg log -T '{rev}: {desc}\n' -r 'followlines(qux, 5:7)'
551 551 16: baz:0
552 552 19: baz:3
553 553 20: baz:4
554 554 24: baz:3->3+
555 555 25: qux:4->4+
556 556
557 557 but are missed when following children
558 558 $ hg log -T '{rev}: {desc}\n' -r 'followlines(baz, 5:7, startrev=22, descend=True)'
559 559 24: baz:3->3+
560 560
561 561 merge
562 562 $ hg up 24 --quiet
563 563 $ echo 7 >> baz
564 564 $ hg ci -m 'one more line, out of line range'
565 565 created new head
566 566 $ sed 's/3+/3-/' baz > baz.new
567 567 $ mv baz.new baz
568 568 $ hg ci -m 'baz:3+->3-'
569 569 $ hg log -T '{rev}: {desc}\n' -r 'followlines(baz, 5:7)'
570 570 16: baz:0
571 571 19: baz:3
572 572 20: baz:4
573 573 24: baz:3->3+
574 574 27: baz:3+->3-
575 575 $ hg merge 25
576 576 merging baz and qux to qux
577 577 0 files updated, 1 files merged, 0 files removed, 0 files unresolved
578 578 (branch merge, don't forget to commit)
579 579 $ hg ci -m merge
580 580 $ hg log -T '{rev}: {desc}\n' -r 'followlines(qux, 5:7)'
581 581 16: baz:0
582 582 19: baz:3
583 583 20: baz:4
584 584 24: baz:3->3+
585 585 25: qux:4->4+
586 586 27: baz:3+->3-
587 587 28: merge
588 588 $ hg up 25 --quiet
589 589 $ hg merge 27
590 590 merging qux and baz to qux
591 591 0 files updated, 1 files merged, 0 files removed, 0 files unresolved
592 592 (branch merge, don't forget to commit)
593 593 $ hg ci -m 'merge from other side'
594 594 created new head
595 595 $ hg log -T '{rev}: {desc}\n' -r 'followlines(qux, 5:7)'
596 596 16: baz:0
597 597 19: baz:3
598 598 20: baz:4
599 599 24: baz:3->3+
600 600 25: qux:4->4+
601 601 27: baz:3+->3-
602 602 29: merge from other side
603 603 $ hg up 24 --quiet
604 604
605 605 we are missing the branch with rename when following children
606 606 $ hg log -T '{rev}: {desc}\n' -r 'followlines(baz, 5:7, startrev=26, descend=True)'
607 607 27: baz:3+->3-
608 608
609 609 we follow all branches in descending direction
610 610 $ hg up 23 --quiet
611 611 $ sed 's/3/+3/' baz > baz.new
612 612 $ mv baz.new baz
613 613 $ hg ci -m 'baz:3->+3'
614 614 created new head
615 615 $ hg log -T '{rev}: {desc}\n' -r 'followlines(baz, 2:5, startrev=16, descend=True)' --graph
616 616 @ 30: baz:3->+3
617 617 :
618 618 : o 27: baz:3+->3-
619 619 : :
620 620 : o 24: baz:3->3+
621 621 :/
622 622 o 20: baz:4
623 623 |\
624 624 | o 19: baz:3
625 625 |/
626 626 o 18: baz:2
627 627 :
628 628 o 16: baz:0
629 629 |
630 630 ~
631 631
632 632 check error cases
633 633 $ hg up 24 --quiet
634 634 $ hg log -r 'followlines()'
635 635 hg: parse error: followlines takes at least 1 positional arguments
636 636 [255]
637 637 $ hg log -r 'followlines(baz)'
638 638 hg: parse error: followlines requires a line range
639 639 [255]
640 640 $ hg log -r 'followlines(baz, 1)'
641 641 hg: parse error: followlines expects a line range
642 642 [255]
643 643 $ hg log -r 'followlines(baz, 1:2, startrev=desc("b"))'
644 644 hg: parse error: followlines expects exactly one revision
645 645 [255]
646 646 $ hg log -r 'followlines("glob:*", 1:2)'
647 647 hg: parse error: followlines expects exactly one file
648 648 [255]
649 649 $ hg log -r 'followlines(baz, 1:)'
650 650 hg: parse error: line range bounds must be integers
651 651 [255]
652 652 $ hg log -r 'followlines(baz, :1)'
653 653 hg: parse error: line range bounds must be integers
654 654 [255]
655 655 $ hg log -r 'followlines(baz, x:4)'
656 656 hg: parse error: line range bounds must be integers
657 657 [255]
658 658 $ hg log -r 'followlines(baz, 5:4)'
659 659 hg: parse error: line range must be positive
660 660 [255]
661 661 $ hg log -r 'followlines(baz, 0:4)'
662 662 hg: parse error: fromline must be strictly positive
663 663 [255]
664 664 $ hg log -r 'followlines(baz, 2:40)'
665 665 abort: line range exceeds file size
666 666 [255]
667 667 $ hg log -r 'followlines(baz, 2:4, startrev=20, descend=[1])'
668 668 hg: parse error at 43: syntax error in revset 'followlines(baz, 2:4, startrev=20, descend=[1])'
669 669 [255]
670 670 $ hg log -r 'followlines(baz, 2:4, startrev=20, descend=a)'
671 hg: parse error: 'descend' argument must be a boolean
671 hg: parse error: descend argument must be a boolean
672 672 [255]
673 673
674 674 Test annotate with whitespace options
675 675
676 676 $ cd ..
677 677 $ hg init repo-ws
678 678 $ cd repo-ws
679 679 $ cat > a <<EOF
680 680 > aa
681 681 >
682 682 > b b
683 683 > EOF
684 684 $ hg ci -Am "adda"
685 685 adding a
686 686 $ sed 's/EOL$//g' > a <<EOF
687 687 > a a
688 688 >
689 689 > EOL
690 690 > b b
691 691 > EOF
692 692 $ hg ci -m "changea"
693 693
694 694 Annotate with no option
695 695
696 696 $ hg annotate a
697 697 1: a a
698 698 0:
699 699 1:
700 700 1: b b
701 701
702 702 Annotate with --ignore-space-change
703 703
704 704 $ hg annotate --ignore-space-change a
705 705 1: a a
706 706 1:
707 707 0:
708 708 0: b b
709 709
710 710 Annotate with --ignore-all-space
711 711
712 712 $ hg annotate --ignore-all-space a
713 713 0: a a
714 714 0:
715 715 1:
716 716 0: b b
717 717
718 718 Annotate with --ignore-blank-lines (similar to no options case)
719 719
720 720 $ hg annotate --ignore-blank-lines a
721 721 1: a a
722 722 0:
723 723 1:
724 724 1: b b
725 725
726 726 $ cd ..
727 727
728 728 Annotate with linkrev pointing to another branch
729 729 ------------------------------------------------
730 730
731 731 create history with a filerev whose linkrev points to another branch
732 732
733 733 $ hg init branchedlinkrev
734 734 $ cd branchedlinkrev
735 735 $ echo A > a
736 736 $ hg commit -Am 'contentA'
737 737 adding a
738 738 $ echo B >> a
739 739 $ hg commit -m 'contentB'
740 740 $ hg up --rev 'desc(contentA)'
741 741 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
742 742 $ echo unrelated > unrelated
743 743 $ hg commit -Am 'unrelated'
744 744 adding unrelated
745 745 created new head
746 746 $ hg graft -r 'desc(contentB)'
747 747 grafting 1:fd27c222e3e6 "contentB"
748 748 $ echo C >> a
749 749 $ hg commit -m 'contentC'
750 750 $ echo W >> a
751 751 $ hg log -G
752 752 @ changeset: 4:072f1e8df249
753 753 | tag: tip
754 754 | user: test
755 755 | date: Thu Jan 01 00:00:00 1970 +0000
756 756 | summary: contentC
757 757 |
758 758 o changeset: 3:ff38df03cc4b
759 759 | user: test
760 760 | date: Thu Jan 01 00:00:00 1970 +0000
761 761 | summary: contentB
762 762 |
763 763 o changeset: 2:62aaf3f6fc06
764 764 | parent: 0:f0932f74827e
765 765 | user: test
766 766 | date: Thu Jan 01 00:00:00 1970 +0000
767 767 | summary: unrelated
768 768 |
769 769 | o changeset: 1:fd27c222e3e6
770 770 |/ user: test
771 771 | date: Thu Jan 01 00:00:00 1970 +0000
772 772 | summary: contentB
773 773 |
774 774 o changeset: 0:f0932f74827e
775 775 user: test
776 776 date: Thu Jan 01 00:00:00 1970 +0000
777 777 summary: contentA
778 778
779 779
780 780 Annotate should list ancestor of starting revision only
781 781
782 782 $ hg annotate a
783 783 0: A
784 784 3: B
785 785 4: C
786 786
787 787 $ hg annotate a -r 'wdir()'
788 788 0 : A
789 789 3 : B
790 790 4 : C
791 791 4+: W
792 792
793 793 Even when the starting revision is the linkrev-shadowed one:
794 794
795 795 $ hg annotate a -r 3
796 796 0: A
797 797 3: B
798 798
799 799 $ cd ..
800 800
801 801 Issue5360: Deleted chunk in p1 of a merge changeset
802 802
803 803 $ hg init repo-5360
804 804 $ cd repo-5360
805 805 $ echo 1 > a
806 806 $ hg commit -A a -m 1
807 807 $ echo 2 >> a
808 808 $ hg commit -m 2
809 809 $ echo a > a
810 810 $ hg commit -m a
811 811 $ hg update '.^' -q
812 812 $ echo 3 >> a
813 813 $ hg commit -m 3 -q
814 814 $ hg merge 2 -q
815 815 $ cat > a << EOF
816 816 > b
817 817 > 1
818 818 > 2
819 819 > 3
820 820 > a
821 821 > EOF
822 822 $ hg resolve --mark -q
823 823 $ hg commit -m m
824 824 $ hg annotate a
825 825 4: b
826 826 0: 1
827 827 1: 2
828 828 3: 3
829 829 2: a
830 830
831 831 $ cd ..
General Comments 0
You need to be logged in to leave comments. Login now