##// END OF EJS Templates
revset: add a followlines(file, fromline, toline[, rev]) revset...
Denis Laxalde -
r30719:42c75b4f default
parent child Browse files
Show More
@@ -1,3846 +1,3892 b''
1 1 # revset.py - revision set queries for mercurial
2 2 #
3 3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import heapq
11 11 import re
12 12 import string
13 13
14 14 from .i18n import _
15 15 from . import (
16 16 destutil,
17 17 encoding,
18 18 error,
19 19 hbisect,
20 20 match as matchmod,
21 21 node,
22 22 obsolete as obsmod,
23 23 parser,
24 24 pathutil,
25 25 phases,
26 26 pycompat,
27 27 registrar,
28 28 repoview,
29 29 util,
30 30 )
31 31
32 32 def _revancestors(repo, revs, followfirst):
33 33 """Like revlog.ancestors(), but supports followfirst."""
34 34 if followfirst:
35 35 cut = 1
36 36 else:
37 37 cut = None
38 38 cl = repo.changelog
39 39
40 40 def iterate():
41 41 revs.sort(reverse=True)
42 42 irevs = iter(revs)
43 43 h = []
44 44
45 45 inputrev = next(irevs, None)
46 46 if inputrev is not None:
47 47 heapq.heappush(h, -inputrev)
48 48
49 49 seen = set()
50 50 while h:
51 51 current = -heapq.heappop(h)
52 52 if current == inputrev:
53 53 inputrev = next(irevs, None)
54 54 if inputrev is not None:
55 55 heapq.heappush(h, -inputrev)
56 56 if current not in seen:
57 57 seen.add(current)
58 58 yield current
59 59 for parent in cl.parentrevs(current)[:cut]:
60 60 if parent != node.nullrev:
61 61 heapq.heappush(h, -parent)
62 62
63 63 return generatorset(iterate(), iterasc=False)
64 64
65 65 def _revdescendants(repo, revs, followfirst):
66 66 """Like revlog.descendants() but supports followfirst."""
67 67 if followfirst:
68 68 cut = 1
69 69 else:
70 70 cut = None
71 71
72 72 def iterate():
73 73 cl = repo.changelog
74 74 # XXX this should be 'parentset.min()' assuming 'parentset' is a
75 75 # smartset (and if it is not, it should.)
76 76 first = min(revs)
77 77 nullrev = node.nullrev
78 78 if first == nullrev:
79 79 # Are there nodes with a null first parent and a non-null
80 80 # second one? Maybe. Do we care? Probably not.
81 81 for i in cl:
82 82 yield i
83 83 else:
84 84 seen = set(revs)
85 85 for i in cl.revs(first + 1):
86 86 for x in cl.parentrevs(i)[:cut]:
87 87 if x != nullrev and x in seen:
88 88 seen.add(i)
89 89 yield i
90 90 break
91 91
92 92 return generatorset(iterate(), iterasc=True)
93 93
94 94 def _reachablerootspure(repo, minroot, roots, heads, includepath):
95 95 """return (heads(::<roots> and ::<heads>))
96 96
97 97 If includepath is True, return (<roots>::<heads>)."""
98 98 if not roots:
99 99 return []
100 100 parentrevs = repo.changelog.parentrevs
101 101 roots = set(roots)
102 102 visit = list(heads)
103 103 reachable = set()
104 104 seen = {}
105 105 # prefetch all the things! (because python is slow)
106 106 reached = reachable.add
107 107 dovisit = visit.append
108 108 nextvisit = visit.pop
109 109 # open-code the post-order traversal due to the tiny size of
110 110 # sys.getrecursionlimit()
111 111 while visit:
112 112 rev = nextvisit()
113 113 if rev in roots:
114 114 reached(rev)
115 115 if not includepath:
116 116 continue
117 117 parents = parentrevs(rev)
118 118 seen[rev] = parents
119 119 for parent in parents:
120 120 if parent >= minroot and parent not in seen:
121 121 dovisit(parent)
122 122 if not reachable:
123 123 return baseset()
124 124 if not includepath:
125 125 return reachable
126 126 for rev in sorted(seen):
127 127 for parent in seen[rev]:
128 128 if parent in reachable:
129 129 reached(rev)
130 130 return reachable
131 131
132 132 def reachableroots(repo, roots, heads, includepath=False):
133 133 """return (heads(::<roots> and ::<heads>))
134 134
135 135 If includepath is True, return (<roots>::<heads>)."""
136 136 if not roots:
137 137 return baseset()
138 138 minroot = roots.min()
139 139 roots = list(roots)
140 140 heads = list(heads)
141 141 try:
142 142 revs = repo.changelog.reachableroots(minroot, heads, roots, includepath)
143 143 except AttributeError:
144 144 revs = _reachablerootspure(repo, minroot, roots, heads, includepath)
145 145 revs = baseset(revs)
146 146 revs.sort()
147 147 return revs
148 148
149 149 elements = {
150 150 # token-type: binding-strength, primary, prefix, infix, suffix
151 151 "(": (21, None, ("group", 1, ")"), ("func", 1, ")"), None),
152 152 "##": (20, None, None, ("_concat", 20), None),
153 153 "~": (18, None, None, ("ancestor", 18), None),
154 154 "^": (18, None, None, ("parent", 18), "parentpost"),
155 155 "-": (5, None, ("negate", 19), ("minus", 5), None),
156 156 "::": (17, None, ("dagrangepre", 17), ("dagrange", 17), "dagrangepost"),
157 157 "..": (17, None, ("dagrangepre", 17), ("dagrange", 17), "dagrangepost"),
158 158 ":": (15, "rangeall", ("rangepre", 15), ("range", 15), "rangepost"),
159 159 "not": (10, None, ("not", 10), None, None),
160 160 "!": (10, None, ("not", 10), None, None),
161 161 "and": (5, None, None, ("and", 5), None),
162 162 "&": (5, None, None, ("and", 5), None),
163 163 "%": (5, None, None, ("only", 5), "onlypost"),
164 164 "or": (4, None, None, ("or", 4), None),
165 165 "|": (4, None, None, ("or", 4), None),
166 166 "+": (4, None, None, ("or", 4), None),
167 167 "=": (3, None, None, ("keyvalue", 3), None),
168 168 ",": (2, None, None, ("list", 2), None),
169 169 ")": (0, None, None, None, None),
170 170 "symbol": (0, "symbol", None, None, None),
171 171 "string": (0, "string", None, None, None),
172 172 "end": (0, None, None, None, None),
173 173 }
174 174
175 175 keywords = set(['and', 'or', 'not'])
176 176
177 177 # default set of valid characters for the initial letter of symbols
178 178 _syminitletters = set(
179 179 string.ascii_letters +
180 180 string.digits + pycompat.sysstr('._@')) | set(map(chr, xrange(128, 256)))
181 181
182 182 # default set of valid characters for non-initial letters of symbols
183 183 _symletters = _syminitletters | set(pycompat.sysstr('-/'))
184 184
185 185 def tokenize(program, lookup=None, syminitletters=None, symletters=None):
186 186 '''
187 187 Parse a revset statement into a stream of tokens
188 188
189 189 ``syminitletters`` is the set of valid characters for the initial
190 190 letter of symbols.
191 191
192 192 By default, character ``c`` is recognized as valid for initial
193 193 letter of symbols, if ``c.isalnum() or c in '._@' or ord(c) > 127``.
194 194
195 195 ``symletters`` is the set of valid characters for non-initial
196 196 letters of symbols.
197 197
198 198 By default, character ``c`` is recognized as valid for non-initial
199 199 letters of symbols, if ``c.isalnum() or c in '-._/@' or ord(c) > 127``.
200 200
201 201 Check that @ is a valid unquoted token character (issue3686):
202 202 >>> list(tokenize("@::"))
203 203 [('symbol', '@', 0), ('::', None, 1), ('end', None, 3)]
204 204
205 205 '''
206 206 if syminitletters is None:
207 207 syminitletters = _syminitletters
208 208 if symletters is None:
209 209 symletters = _symletters
210 210
211 211 if program and lookup:
212 212 # attempt to parse old-style ranges first to deal with
213 213 # things like old-tag which contain query metacharacters
214 214 parts = program.split(':', 1)
215 215 if all(lookup(sym) for sym in parts if sym):
216 216 if parts[0]:
217 217 yield ('symbol', parts[0], 0)
218 218 if len(parts) > 1:
219 219 s = len(parts[0])
220 220 yield (':', None, s)
221 221 if parts[1]:
222 222 yield ('symbol', parts[1], s + 1)
223 223 yield ('end', None, len(program))
224 224 return
225 225
226 226 pos, l = 0, len(program)
227 227 while pos < l:
228 228 c = program[pos]
229 229 if c.isspace(): # skip inter-token whitespace
230 230 pass
231 231 elif c == ':' and program[pos:pos + 2] == '::': # look ahead carefully
232 232 yield ('::', None, pos)
233 233 pos += 1 # skip ahead
234 234 elif c == '.' and program[pos:pos + 2] == '..': # look ahead carefully
235 235 yield ('..', None, pos)
236 236 pos += 1 # skip ahead
237 237 elif c == '#' and program[pos:pos + 2] == '##': # look ahead carefully
238 238 yield ('##', None, pos)
239 239 pos += 1 # skip ahead
240 240 elif c in "():=,-|&+!~^%": # handle simple operators
241 241 yield (c, None, pos)
242 242 elif (c in '"\'' or c == 'r' and
243 243 program[pos:pos + 2] in ("r'", 'r"')): # handle quoted strings
244 244 if c == 'r':
245 245 pos += 1
246 246 c = program[pos]
247 247 decode = lambda x: x
248 248 else:
249 249 decode = parser.unescapestr
250 250 pos += 1
251 251 s = pos
252 252 while pos < l: # find closing quote
253 253 d = program[pos]
254 254 if d == '\\': # skip over escaped characters
255 255 pos += 2
256 256 continue
257 257 if d == c:
258 258 yield ('string', decode(program[s:pos]), s)
259 259 break
260 260 pos += 1
261 261 else:
262 262 raise error.ParseError(_("unterminated string"), s)
263 263 # gather up a symbol/keyword
264 264 elif c in syminitletters:
265 265 s = pos
266 266 pos += 1
267 267 while pos < l: # find end of symbol
268 268 d = program[pos]
269 269 if d not in symletters:
270 270 break
271 271 if d == '.' and program[pos - 1] == '.': # special case for ..
272 272 pos -= 1
273 273 break
274 274 pos += 1
275 275 sym = program[s:pos]
276 276 if sym in keywords: # operator keywords
277 277 yield (sym, None, s)
278 278 elif '-' in sym:
279 279 # some jerk gave us foo-bar-baz, try to check if it's a symbol
280 280 if lookup and lookup(sym):
281 281 # looks like a real symbol
282 282 yield ('symbol', sym, s)
283 283 else:
284 284 # looks like an expression
285 285 parts = sym.split('-')
286 286 for p in parts[:-1]:
287 287 if p: # possible consecutive -
288 288 yield ('symbol', p, s)
289 289 s += len(p)
290 290 yield ('-', None, pos)
291 291 s += 1
292 292 if parts[-1]: # possible trailing -
293 293 yield ('symbol', parts[-1], s)
294 294 else:
295 295 yield ('symbol', sym, s)
296 296 pos -= 1
297 297 else:
298 298 raise error.ParseError(_("syntax error in revset '%s'") %
299 299 program, pos)
300 300 pos += 1
301 301 yield ('end', None, pos)
302 302
303 303 # helpers
304 304
305 305 def getsymbol(x):
306 306 if x and x[0] == 'symbol':
307 307 return x[1]
308 308 raise error.ParseError(_('not a symbol'))
309 309
310 310 def getstring(x, err):
311 311 if x and (x[0] == 'string' or x[0] == 'symbol'):
312 312 return x[1]
313 313 raise error.ParseError(err)
314 314
315 315 def getlist(x):
316 316 if not x:
317 317 return []
318 318 if x[0] == 'list':
319 319 return list(x[1:])
320 320 return [x]
321 321
322 322 def getargs(x, min, max, err):
323 323 l = getlist(x)
324 324 if len(l) < min or (max >= 0 and len(l) > max):
325 325 raise error.ParseError(err)
326 326 return l
327 327
328 328 def getargsdict(x, funcname, keys):
329 329 return parser.buildargsdict(getlist(x), funcname, keys.split(),
330 330 keyvaluenode='keyvalue', keynode='symbol')
331 331
332 332 def getset(repo, subset, x):
333 333 if not x:
334 334 raise error.ParseError(_("missing argument"))
335 335 s = methods[x[0]](repo, subset, *x[1:])
336 336 if util.safehasattr(s, 'isascending'):
337 337 return s
338 338 # else case should not happen, because all non-func are internal,
339 339 # ignoring for now.
340 340 if x[0] == 'func' and x[1][0] == 'symbol' and x[1][1] in symbols:
341 341 repo.ui.deprecwarn('revset "%s" uses list instead of smartset'
342 342 % x[1][1],
343 343 '3.9')
344 344 return baseset(s)
345 345
346 346 def _getrevsource(repo, r):
347 347 extra = repo[r].extra()
348 348 for label in ('source', 'transplant_source', 'rebase_source'):
349 349 if label in extra:
350 350 try:
351 351 return repo[extra[label]].rev()
352 352 except error.RepoLookupError:
353 353 pass
354 354 return None
355 355
356 356 # operator methods
357 357
358 358 def stringset(repo, subset, x):
359 359 x = repo[x].rev()
360 360 if (x in subset
361 361 or x == node.nullrev and isinstance(subset, fullreposet)):
362 362 return baseset([x])
363 363 return baseset()
364 364
365 365 def rangeset(repo, subset, x, y, order):
366 366 m = getset(repo, fullreposet(repo), x)
367 367 n = getset(repo, fullreposet(repo), y)
368 368
369 369 if not m or not n:
370 370 return baseset()
371 371 return _makerangeset(repo, subset, m.first(), n.last(), order)
372 372
373 373 def rangepre(repo, subset, y, order):
374 374 # ':y' can't be rewritten to '0:y' since '0' may be hidden
375 375 n = getset(repo, fullreposet(repo), y)
376 376 if not n:
377 377 return baseset()
378 378 return _makerangeset(repo, subset, 0, n.last(), order)
379 379
380 380 def _makerangeset(repo, subset, m, n, order):
381 381 if m == n:
382 382 r = baseset([m])
383 383 elif n == node.wdirrev:
384 384 r = spanset(repo, m, len(repo)) + baseset([n])
385 385 elif m == node.wdirrev:
386 386 r = baseset([m]) + spanset(repo, len(repo) - 1, n - 1)
387 387 elif m < n:
388 388 r = spanset(repo, m, n + 1)
389 389 else:
390 390 r = spanset(repo, m, n - 1)
391 391
392 392 if order == defineorder:
393 393 return r & subset
394 394 else:
395 395 # carrying the sorting over when possible would be more efficient
396 396 return subset & r
397 397
398 398 def dagrange(repo, subset, x, y, order):
399 399 r = fullreposet(repo)
400 400 xs = reachableroots(repo, getset(repo, r, x), getset(repo, r, y),
401 401 includepath=True)
402 402 return subset & xs
403 403
404 404 def andset(repo, subset, x, y, order):
405 405 return getset(repo, getset(repo, subset, x), y)
406 406
407 407 def differenceset(repo, subset, x, y, order):
408 408 return getset(repo, subset, x) - getset(repo, subset, y)
409 409
410 410 def _orsetlist(repo, subset, xs):
411 411 assert xs
412 412 if len(xs) == 1:
413 413 return getset(repo, subset, xs[0])
414 414 p = len(xs) // 2
415 415 a = _orsetlist(repo, subset, xs[:p])
416 416 b = _orsetlist(repo, subset, xs[p:])
417 417 return a + b
418 418
419 419 def orset(repo, subset, x, order):
420 420 xs = getlist(x)
421 421 if order == followorder:
422 422 # slow path to take the subset order
423 423 return subset & _orsetlist(repo, fullreposet(repo), xs)
424 424 else:
425 425 return _orsetlist(repo, subset, xs)
426 426
427 427 def notset(repo, subset, x, order):
428 428 return subset - getset(repo, subset, x)
429 429
430 430 def listset(repo, subset, *xs):
431 431 raise error.ParseError(_("can't use a list in this context"),
432 432 hint=_('see hg help "revsets.x or y"'))
433 433
434 434 def keyvaluepair(repo, subset, k, v):
435 435 raise error.ParseError(_("can't use a key-value pair in this context"))
436 436
437 437 def func(repo, subset, a, b, order):
438 438 f = getsymbol(a)
439 439 if f in symbols:
440 440 func = symbols[f]
441 441 if getattr(func, '_takeorder', False):
442 442 return func(repo, subset, b, order)
443 443 return func(repo, subset, b)
444 444
445 445 keep = lambda fn: getattr(fn, '__doc__', None) is not None
446 446
447 447 syms = [s for (s, fn) in symbols.items() if keep(fn)]
448 448 raise error.UnknownIdentifier(f, syms)
449 449
450 450 # functions
451 451
452 452 # symbols are callables like:
453 453 # fn(repo, subset, x)
454 454 # with:
455 455 # repo - current repository instance
456 456 # subset - of revisions to be examined
457 457 # x - argument in tree form
458 458 symbols = {}
459 459
460 460 # symbols which can't be used for a DoS attack for any given input
461 461 # (e.g. those which accept regexes as plain strings shouldn't be included)
462 462 # functions that just return a lot of changesets (like all) don't count here
463 463 safesymbols = set()
464 464
465 465 predicate = registrar.revsetpredicate()
466 466
467 467 @predicate('_destupdate')
468 468 def _destupdate(repo, subset, x):
469 469 # experimental revset for update destination
470 470 args = getargsdict(x, 'limit', 'clean check')
471 471 return subset & baseset([destutil.destupdate(repo, **args)[0]])
472 472
473 473 @predicate('_destmerge')
474 474 def _destmerge(repo, subset, x):
475 475 # experimental revset for merge destination
476 476 sourceset = None
477 477 if x is not None:
478 478 sourceset = getset(repo, fullreposet(repo), x)
479 479 return subset & baseset([destutil.destmerge(repo, sourceset=sourceset)])
480 480
481 481 @predicate('adds(pattern)', safe=True)
482 482 def adds(repo, subset, x):
483 483 """Changesets that add a file matching pattern.
484 484
485 485 The pattern without explicit kind like ``glob:`` is expected to be
486 486 relative to the current directory and match against a file or a
487 487 directory.
488 488 """
489 489 # i18n: "adds" is a keyword
490 490 pat = getstring(x, _("adds requires a pattern"))
491 491 return checkstatus(repo, subset, pat, 1)
492 492
493 493 @predicate('ancestor(*changeset)', safe=True)
494 494 def ancestor(repo, subset, x):
495 495 """A greatest common ancestor of the changesets.
496 496
497 497 Accepts 0 or more changesets.
498 498 Will return empty list when passed no args.
499 499 Greatest common ancestor of a single changeset is that changeset.
500 500 """
501 501 # i18n: "ancestor" is a keyword
502 502 l = getlist(x)
503 503 rl = fullreposet(repo)
504 504 anc = None
505 505
506 506 # (getset(repo, rl, i) for i in l) generates a list of lists
507 507 for revs in (getset(repo, rl, i) for i in l):
508 508 for r in revs:
509 509 if anc is None:
510 510 anc = repo[r]
511 511 else:
512 512 anc = anc.ancestor(repo[r])
513 513
514 514 if anc is not None and anc.rev() in subset:
515 515 return baseset([anc.rev()])
516 516 return baseset()
517 517
518 518 def _ancestors(repo, subset, x, followfirst=False):
519 519 heads = getset(repo, fullreposet(repo), x)
520 520 if not heads:
521 521 return baseset()
522 522 s = _revancestors(repo, heads, followfirst)
523 523 return subset & s
524 524
525 525 @predicate('ancestors(set)', safe=True)
526 526 def ancestors(repo, subset, x):
527 527 """Changesets that are ancestors of a changeset in set.
528 528 """
529 529 return _ancestors(repo, subset, x)
530 530
531 531 @predicate('_firstancestors', safe=True)
532 532 def _firstancestors(repo, subset, x):
533 533 # ``_firstancestors(set)``
534 534 # Like ``ancestors(set)`` but follows only the first parents.
535 535 return _ancestors(repo, subset, x, followfirst=True)
536 536
537 537 def ancestorspec(repo, subset, x, n, order):
538 538 """``set~n``
539 539 Changesets that are the Nth ancestor (first parents only) of a changeset
540 540 in set.
541 541 """
542 542 try:
543 543 n = int(n[1])
544 544 except (TypeError, ValueError):
545 545 raise error.ParseError(_("~ expects a number"))
546 546 ps = set()
547 547 cl = repo.changelog
548 548 for r in getset(repo, fullreposet(repo), x):
549 549 for i in range(n):
550 550 r = cl.parentrevs(r)[0]
551 551 ps.add(r)
552 552 return subset & ps
553 553
554 554 @predicate('author(string)', safe=True)
555 555 def author(repo, subset, x):
556 556 """Alias for ``user(string)``.
557 557 """
558 558 # i18n: "author" is a keyword
559 559 n = encoding.lower(getstring(x, _("author requires a string")))
560 560 kind, pattern, matcher = _substringmatcher(n)
561 561 return subset.filter(lambda x: matcher(encoding.lower(repo[x].user())),
562 562 condrepr=('<user %r>', n))
563 563
564 564 @predicate('bisect(string)', safe=True)
565 565 def bisect(repo, subset, x):
566 566 """Changesets marked in the specified bisect status:
567 567
568 568 - ``good``, ``bad``, ``skip``: csets explicitly marked as good/bad/skip
569 569 - ``goods``, ``bads`` : csets topologically good/bad
570 570 - ``range`` : csets taking part in the bisection
571 571 - ``pruned`` : csets that are goods, bads or skipped
572 572 - ``untested`` : csets whose fate is yet unknown
573 573 - ``ignored`` : csets ignored due to DAG topology
574 574 - ``current`` : the cset currently being bisected
575 575 """
576 576 # i18n: "bisect" is a keyword
577 577 status = getstring(x, _("bisect requires a string")).lower()
578 578 state = set(hbisect.get(repo, status))
579 579 return subset & state
580 580
581 581 # Backward-compatibility
582 582 # - no help entry so that we do not advertise it any more
583 583 @predicate('bisected', safe=True)
584 584 def bisected(repo, subset, x):
585 585 return bisect(repo, subset, x)
586 586
587 587 @predicate('bookmark([name])', safe=True)
588 588 def bookmark(repo, subset, x):
589 589 """The named bookmark or all bookmarks.
590 590
591 591 If `name` starts with `re:`, the remainder of the name is treated as
592 592 a regular expression. To match a bookmark that actually starts with `re:`,
593 593 use the prefix `literal:`.
594 594 """
595 595 # i18n: "bookmark" is a keyword
596 596 args = getargs(x, 0, 1, _('bookmark takes one or no arguments'))
597 597 if args:
598 598 bm = getstring(args[0],
599 599 # i18n: "bookmark" is a keyword
600 600 _('the argument to bookmark must be a string'))
601 601 kind, pattern, matcher = util.stringmatcher(bm)
602 602 bms = set()
603 603 if kind == 'literal':
604 604 bmrev = repo._bookmarks.get(pattern, None)
605 605 if not bmrev:
606 606 raise error.RepoLookupError(_("bookmark '%s' does not exist")
607 607 % pattern)
608 608 bms.add(repo[bmrev].rev())
609 609 else:
610 610 matchrevs = set()
611 611 for name, bmrev in repo._bookmarks.iteritems():
612 612 if matcher(name):
613 613 matchrevs.add(bmrev)
614 614 if not matchrevs:
615 615 raise error.RepoLookupError(_("no bookmarks exist"
616 616 " that match '%s'") % pattern)
617 617 for bmrev in matchrevs:
618 618 bms.add(repo[bmrev].rev())
619 619 else:
620 620 bms = set([repo[r].rev()
621 621 for r in repo._bookmarks.values()])
622 622 bms -= set([node.nullrev])
623 623 return subset & bms
624 624
625 625 @predicate('branch(string or set)', safe=True)
626 626 def branch(repo, subset, x):
627 627 """
628 628 All changesets belonging to the given branch or the branches of the given
629 629 changesets.
630 630
631 631 If `string` starts with `re:`, the remainder of the name is treated as
632 632 a regular expression. To match a branch that actually starts with `re:`,
633 633 use the prefix `literal:`.
634 634 """
635 635 getbi = repo.revbranchcache().branchinfo
636 636
637 637 try:
638 638 b = getstring(x, '')
639 639 except error.ParseError:
640 640 # not a string, but another revspec, e.g. tip()
641 641 pass
642 642 else:
643 643 kind, pattern, matcher = util.stringmatcher(b)
644 644 if kind == 'literal':
645 645 # note: falls through to the revspec case if no branch with
646 646 # this name exists and pattern kind is not specified explicitly
647 647 if pattern in repo.branchmap():
648 648 return subset.filter(lambda r: matcher(getbi(r)[0]),
649 649 condrepr=('<branch %r>', b))
650 650 if b.startswith('literal:'):
651 651 raise error.RepoLookupError(_("branch '%s' does not exist")
652 652 % pattern)
653 653 else:
654 654 return subset.filter(lambda r: matcher(getbi(r)[0]),
655 655 condrepr=('<branch %r>', b))
656 656
657 657 s = getset(repo, fullreposet(repo), x)
658 658 b = set()
659 659 for r in s:
660 660 b.add(getbi(r)[0])
661 661 c = s.__contains__
662 662 return subset.filter(lambda r: c(r) or getbi(r)[0] in b,
663 663 condrepr=lambda: '<branch %r>' % sorted(b))
664 664
665 665 @predicate('bumped()', safe=True)
666 666 def bumped(repo, subset, x):
667 667 """Mutable changesets marked as successors of public changesets.
668 668
669 669 Only non-public and non-obsolete changesets can be `bumped`.
670 670 """
671 671 # i18n: "bumped" is a keyword
672 672 getargs(x, 0, 0, _("bumped takes no arguments"))
673 673 bumped = obsmod.getrevs(repo, 'bumped')
674 674 return subset & bumped
675 675
676 676 @predicate('bundle()', safe=True)
677 677 def bundle(repo, subset, x):
678 678 """Changesets in the bundle.
679 679
680 680 Bundle must be specified by the -R option."""
681 681
682 682 try:
683 683 bundlerevs = repo.changelog.bundlerevs
684 684 except AttributeError:
685 685 raise error.Abort(_("no bundle provided - specify with -R"))
686 686 return subset & bundlerevs
687 687
688 688 def checkstatus(repo, subset, pat, field):
689 689 hasset = matchmod.patkind(pat) == 'set'
690 690
691 691 mcache = [None]
692 692 def matches(x):
693 693 c = repo[x]
694 694 if not mcache[0] or hasset:
695 695 mcache[0] = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
696 696 m = mcache[0]
697 697 fname = None
698 698 if not m.anypats() and len(m.files()) == 1:
699 699 fname = m.files()[0]
700 700 if fname is not None:
701 701 if fname not in c.files():
702 702 return False
703 703 else:
704 704 for f in c.files():
705 705 if m(f):
706 706 break
707 707 else:
708 708 return False
709 709 files = repo.status(c.p1().node(), c.node())[field]
710 710 if fname is not None:
711 711 if fname in files:
712 712 return True
713 713 else:
714 714 for f in files:
715 715 if m(f):
716 716 return True
717 717
718 718 return subset.filter(matches, condrepr=('<status[%r] %r>', field, pat))
719 719
720 720 def _children(repo, subset, parentset):
721 721 if not parentset:
722 722 return baseset()
723 723 cs = set()
724 724 pr = repo.changelog.parentrevs
725 725 minrev = parentset.min()
726 726 nullrev = node.nullrev
727 727 for r in subset:
728 728 if r <= minrev:
729 729 continue
730 730 p1, p2 = pr(r)
731 731 if p1 in parentset:
732 732 cs.add(r)
733 733 if p2 != nullrev and p2 in parentset:
734 734 cs.add(r)
735 735 return baseset(cs)
736 736
737 737 @predicate('children(set)', safe=True)
738 738 def children(repo, subset, x):
739 739 """Child changesets of changesets in set.
740 740 """
741 741 s = getset(repo, fullreposet(repo), x)
742 742 cs = _children(repo, subset, s)
743 743 return subset & cs
744 744
745 745 @predicate('closed()', safe=True)
746 746 def closed(repo, subset, x):
747 747 """Changeset is closed.
748 748 """
749 749 # i18n: "closed" is a keyword
750 750 getargs(x, 0, 0, _("closed takes no arguments"))
751 751 return subset.filter(lambda r: repo[r].closesbranch(),
752 752 condrepr='<branch closed>')
753 753
754 754 @predicate('contains(pattern)')
755 755 def contains(repo, subset, x):
756 756 """The revision's manifest contains a file matching pattern (but might not
757 757 modify it). See :hg:`help patterns` for information about file patterns.
758 758
759 759 The pattern without explicit kind like ``glob:`` is expected to be
760 760 relative to the current directory and match against a file exactly
761 761 for efficiency.
762 762 """
763 763 # i18n: "contains" is a keyword
764 764 pat = getstring(x, _("contains requires a pattern"))
765 765
766 766 def matches(x):
767 767 if not matchmod.patkind(pat):
768 768 pats = pathutil.canonpath(repo.root, repo.getcwd(), pat)
769 769 if pats in repo[x]:
770 770 return True
771 771 else:
772 772 c = repo[x]
773 773 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
774 774 for f in c.manifest():
775 775 if m(f):
776 776 return True
777 777 return False
778 778
779 779 return subset.filter(matches, condrepr=('<contains %r>', pat))
780 780
781 781 @predicate('converted([id])', safe=True)
782 782 def converted(repo, subset, x):
783 783 """Changesets converted from the given identifier in the old repository if
784 784 present, or all converted changesets if no identifier is specified.
785 785 """
786 786
787 787 # There is exactly no chance of resolving the revision, so do a simple
788 788 # string compare and hope for the best
789 789
790 790 rev = None
791 791 # i18n: "converted" is a keyword
792 792 l = getargs(x, 0, 1, _('converted takes one or no arguments'))
793 793 if l:
794 794 # i18n: "converted" is a keyword
795 795 rev = getstring(l[0], _('converted requires a revision'))
796 796
797 797 def _matchvalue(r):
798 798 source = repo[r].extra().get('convert_revision', None)
799 799 return source is not None and (rev is None or source.startswith(rev))
800 800
801 801 return subset.filter(lambda r: _matchvalue(r),
802 802 condrepr=('<converted %r>', rev))
803 803
804 804 @predicate('date(interval)', safe=True)
805 805 def date(repo, subset, x):
806 806 """Changesets within the interval, see :hg:`help dates`.
807 807 """
808 808 # i18n: "date" is a keyword
809 809 ds = getstring(x, _("date requires a string"))
810 810 dm = util.matchdate(ds)
811 811 return subset.filter(lambda x: dm(repo[x].date()[0]),
812 812 condrepr=('<date %r>', ds))
813 813
814 814 @predicate('desc(string)', safe=True)
815 815 def desc(repo, subset, x):
816 816 """Search commit message for string. The match is case-insensitive.
817 817 """
818 818 # i18n: "desc" is a keyword
819 819 ds = encoding.lower(getstring(x, _("desc requires a string")))
820 820
821 821 def matches(x):
822 822 c = repo[x]
823 823 return ds in encoding.lower(c.description())
824 824
825 825 return subset.filter(matches, condrepr=('<desc %r>', ds))
826 826
827 827 def _descendants(repo, subset, x, followfirst=False):
828 828 roots = getset(repo, fullreposet(repo), x)
829 829 if not roots:
830 830 return baseset()
831 831 s = _revdescendants(repo, roots, followfirst)
832 832
833 833 # Both sets need to be ascending in order to lazily return the union
834 834 # in the correct order.
835 835 base = subset & roots
836 836 desc = subset & s
837 837 result = base + desc
838 838 if subset.isascending():
839 839 result.sort()
840 840 elif subset.isdescending():
841 841 result.sort(reverse=True)
842 842 else:
843 843 result = subset & result
844 844 return result
845 845
846 846 @predicate('descendants(set)', safe=True)
847 847 def descendants(repo, subset, x):
848 848 """Changesets which are descendants of changesets in set.
849 849 """
850 850 return _descendants(repo, subset, x)
851 851
852 852 @predicate('_firstdescendants', safe=True)
853 853 def _firstdescendants(repo, subset, x):
854 854 # ``_firstdescendants(set)``
855 855 # Like ``descendants(set)`` but follows only the first parents.
856 856 return _descendants(repo, subset, x, followfirst=True)
857 857
858 858 @predicate('destination([set])', safe=True)
859 859 def destination(repo, subset, x):
860 860 """Changesets that were created by a graft, transplant or rebase operation,
861 861 with the given revisions specified as the source. Omitting the optional set
862 862 is the same as passing all().
863 863 """
864 864 if x is not None:
865 865 sources = getset(repo, fullreposet(repo), x)
866 866 else:
867 867 sources = fullreposet(repo)
868 868
869 869 dests = set()
870 870
871 871 # subset contains all of the possible destinations that can be returned, so
872 872 # iterate over them and see if their source(s) were provided in the arg set.
873 873 # Even if the immediate src of r is not in the arg set, src's source (or
874 874 # further back) may be. Scanning back further than the immediate src allows
875 875 # transitive transplants and rebases to yield the same results as transitive
876 876 # grafts.
877 877 for r in subset:
878 878 src = _getrevsource(repo, r)
879 879 lineage = None
880 880
881 881 while src is not None:
882 882 if lineage is None:
883 883 lineage = list()
884 884
885 885 lineage.append(r)
886 886
887 887 # The visited lineage is a match if the current source is in the arg
888 888 # set. Since every candidate dest is visited by way of iterating
889 889 # subset, any dests further back in the lineage will be tested by a
890 890 # different iteration over subset. Likewise, if the src was already
891 891 # selected, the current lineage can be selected without going back
892 892 # further.
893 893 if src in sources or src in dests:
894 894 dests.update(lineage)
895 895 break
896 896
897 897 r = src
898 898 src = _getrevsource(repo, r)
899 899
900 900 return subset.filter(dests.__contains__,
901 901 condrepr=lambda: '<destination %r>' % sorted(dests))
902 902
903 903 @predicate('divergent()', safe=True)
904 904 def divergent(repo, subset, x):
905 905 """
906 906 Final successors of changesets with an alternative set of final successors.
907 907 """
908 908 # i18n: "divergent" is a keyword
909 909 getargs(x, 0, 0, _("divergent takes no arguments"))
910 910 divergent = obsmod.getrevs(repo, 'divergent')
911 911 return subset & divergent
912 912
913 913 @predicate('extinct()', safe=True)
914 914 def extinct(repo, subset, x):
915 915 """Obsolete changesets with obsolete descendants only.
916 916 """
917 917 # i18n: "extinct" is a keyword
918 918 getargs(x, 0, 0, _("extinct takes no arguments"))
919 919 extincts = obsmod.getrevs(repo, 'extinct')
920 920 return subset & extincts
921 921
922 922 @predicate('extra(label, [value])', safe=True)
923 923 def extra(repo, subset, x):
924 924 """Changesets with the given label in the extra metadata, with the given
925 925 optional value.
926 926
927 927 If `value` starts with `re:`, the remainder of the value is treated as
928 928 a regular expression. To match a value that actually starts with `re:`,
929 929 use the prefix `literal:`.
930 930 """
931 931 args = getargsdict(x, 'extra', 'label value')
932 932 if 'label' not in args:
933 933 # i18n: "extra" is a keyword
934 934 raise error.ParseError(_('extra takes at least 1 argument'))
935 935 # i18n: "extra" is a keyword
936 936 label = getstring(args['label'], _('first argument to extra must be '
937 937 'a string'))
938 938 value = None
939 939
940 940 if 'value' in args:
941 941 # i18n: "extra" is a keyword
942 942 value = getstring(args['value'], _('second argument to extra must be '
943 943 'a string'))
944 944 kind, value, matcher = util.stringmatcher(value)
945 945
946 946 def _matchvalue(r):
947 947 extra = repo[r].extra()
948 948 return label in extra and (value is None or matcher(extra[label]))
949 949
950 950 return subset.filter(lambda r: _matchvalue(r),
951 951 condrepr=('<extra[%r] %r>', label, value))
952 952
953 953 @predicate('filelog(pattern)', safe=True)
954 954 def filelog(repo, subset, x):
955 955 """Changesets connected to the specified filelog.
956 956
957 957 For performance reasons, visits only revisions mentioned in the file-level
958 958 filelog, rather than filtering through all changesets (much faster, but
959 959 doesn't include deletes or duplicate changes). For a slower, more accurate
960 960 result, use ``file()``.
961 961
962 962 The pattern without explicit kind like ``glob:`` is expected to be
963 963 relative to the current directory and match against a file exactly
964 964 for efficiency.
965 965
966 966 If some linkrev points to revisions filtered by the current repoview, we'll
967 967 work around it to return a non-filtered value.
968 968 """
969 969
970 970 # i18n: "filelog" is a keyword
971 971 pat = getstring(x, _("filelog requires a pattern"))
972 972 s = set()
973 973 cl = repo.changelog
974 974
975 975 if not matchmod.patkind(pat):
976 976 f = pathutil.canonpath(repo.root, repo.getcwd(), pat)
977 977 files = [f]
978 978 else:
979 979 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=repo[None])
980 980 files = (f for f in repo[None] if m(f))
981 981
982 982 for f in files:
983 983 fl = repo.file(f)
984 984 known = {}
985 985 scanpos = 0
986 986 for fr in list(fl):
987 987 fn = fl.node(fr)
988 988 if fn in known:
989 989 s.add(known[fn])
990 990 continue
991 991
992 992 lr = fl.linkrev(fr)
993 993 if lr in cl:
994 994 s.add(lr)
995 995 elif scanpos is not None:
996 996 # lowest matching changeset is filtered, scan further
997 997 # ahead in changelog
998 998 start = max(lr, scanpos) + 1
999 999 scanpos = None
1000 1000 for r in cl.revs(start):
1001 1001 # minimize parsing of non-matching entries
1002 1002 if f in cl.revision(r) and f in cl.readfiles(r):
1003 1003 try:
1004 1004 # try to use manifest delta fastpath
1005 1005 n = repo[r].filenode(f)
1006 1006 if n not in known:
1007 1007 if n == fn:
1008 1008 s.add(r)
1009 1009 scanpos = r
1010 1010 break
1011 1011 else:
1012 1012 known[n] = r
1013 1013 except error.ManifestLookupError:
1014 1014 # deletion in changelog
1015 1015 continue
1016 1016
1017 1017 return subset & s
1018 1018
1019 1019 @predicate('first(set, [n])', safe=True)
1020 1020 def first(repo, subset, x):
1021 1021 """An alias for limit().
1022 1022 """
1023 1023 return limit(repo, subset, x)
1024 1024
1025 1025 def _follow(repo, subset, x, name, followfirst=False):
1026 1026 l = getargs(x, 0, 2, _("%s takes no arguments or a pattern "
1027 1027 "and an optional revset") % name)
1028 1028 c = repo['.']
1029 1029 if l:
1030 1030 x = getstring(l[0], _("%s expected a pattern") % name)
1031 1031 rev = None
1032 1032 if len(l) >= 2:
1033 1033 revs = getset(repo, fullreposet(repo), l[1])
1034 1034 if len(revs) != 1:
1035 1035 raise error.RepoLookupError(
1036 1036 _("%s expected one starting revision") % name)
1037 1037 rev = revs.last()
1038 1038 c = repo[rev]
1039 1039 matcher = matchmod.match(repo.root, repo.getcwd(), [x],
1040 1040 ctx=repo[rev], default='path')
1041 1041
1042 1042 files = c.manifest().walk(matcher)
1043 1043
1044 1044 s = set()
1045 1045 for fname in files:
1046 1046 fctx = c[fname]
1047 1047 s = s.union(set(c.rev() for c in fctx.ancestors(followfirst)))
1048 1048 # include the revision responsible for the most recent version
1049 1049 s.add(fctx.introrev())
1050 1050 else:
1051 1051 s = _revancestors(repo, baseset([c.rev()]), followfirst)
1052 1052
1053 1053 return subset & s
1054 1054
1055 1055 @predicate('follow([pattern[, startrev]])', safe=True)
1056 1056 def follow(repo, subset, x):
1057 1057 """
1058 1058 An alias for ``::.`` (ancestors of the working directory's first parent).
1059 1059 If pattern is specified, the histories of files matching given
1060 1060 pattern in the revision given by startrev are followed, including copies.
1061 1061 """
1062 1062 return _follow(repo, subset, x, 'follow')
1063 1063
1064 1064 @predicate('_followfirst', safe=True)
1065 1065 def _followfirst(repo, subset, x):
1066 1066 # ``followfirst([pattern[, startrev]])``
1067 1067 # Like ``follow([pattern[, startrev]])`` but follows only the first parent
1068 1068 # of every revisions or files revisions.
1069 1069 return _follow(repo, subset, x, '_followfirst', followfirst=True)
1070 1070
1071 @predicate('followlines(file, fromline, toline[, rev=.])', safe=True)
1072 def followlines(repo, subset, x):
1073 """Changesets modifying `file` in line range ('fromline', 'toline').
1074
1075 Line range corresponds to 'file' content at 'rev' and should hence be
1076 consistent with file size. If rev is not specified, working directory's
1077 parent is used.
1078 """
1079 from . import context # avoid circular import issues
1080
1081 args = getargs(x, 3, 4, _("followlines takes at least three arguments"))
1082
1083 rev = '.'
1084 if len(args) == 4:
1085 revarg = getargsdict(args[3], 'followlines', 'rev')
1086 if 'rev' in revarg:
1087 revs = getset(repo, fullreposet(repo), revarg['rev'])
1088 if len(revs) != 1:
1089 raise error.ParseError(
1090 _("followlines expects exactly one revision"))
1091 rev = revs.last()
1092
1093 pat = getstring(args[0], _("followlines requires a pattern"))
1094 if not matchmod.patkind(pat):
1095 fname = pathutil.canonpath(repo.root, repo.getcwd(), pat)
1096 else:
1097 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=repo[rev])
1098 files = [f for f in repo[rev] if m(f)]
1099 if len(files) != 1:
1100 raise error.ParseError(_("followlines expects exactly one file"))
1101 fname = files[0]
1102
1103 try:
1104 fromline, toline = [int(getsymbol(a)) for a in args[1:3]]
1105 except ValueError:
1106 raise error.ParseError(_("line range bounds must be integers"))
1107 if toline - fromline < 0:
1108 raise error.ParseError(_("line range must be positive"))
1109 if fromline < 1:
1110 raise error.ParseError(_("fromline must be strictly positive"))
1111 fromline -= 1
1112
1113 fctx = repo[rev].filectx(fname)
1114 revs = (c.rev() for c in context.blockancestors(fctx, fromline, toline))
1115 return subset & generatorset(revs, iterasc=False)
1116
1071 1117 @predicate('all()', safe=True)
1072 1118 def getall(repo, subset, x):
1073 1119 """All changesets, the same as ``0:tip``.
1074 1120 """
1075 1121 # i18n: "all" is a keyword
1076 1122 getargs(x, 0, 0, _("all takes no arguments"))
1077 1123 return subset & spanset(repo) # drop "null" if any
1078 1124
1079 1125 @predicate('grep(regex)')
1080 1126 def grep(repo, subset, x):
1081 1127 """Like ``keyword(string)`` but accepts a regex. Use ``grep(r'...')``
1082 1128 to ensure special escape characters are handled correctly. Unlike
1083 1129 ``keyword(string)``, the match is case-sensitive.
1084 1130 """
1085 1131 try:
1086 1132 # i18n: "grep" is a keyword
1087 1133 gr = re.compile(getstring(x, _("grep requires a string")))
1088 1134 except re.error as e:
1089 1135 raise error.ParseError(_('invalid match pattern: %s') % e)
1090 1136
1091 1137 def matches(x):
1092 1138 c = repo[x]
1093 1139 for e in c.files() + [c.user(), c.description()]:
1094 1140 if gr.search(e):
1095 1141 return True
1096 1142 return False
1097 1143
1098 1144 return subset.filter(matches, condrepr=('<grep %r>', gr.pattern))
1099 1145
1100 1146 @predicate('_matchfiles', safe=True)
1101 1147 def _matchfiles(repo, subset, x):
1102 1148 # _matchfiles takes a revset list of prefixed arguments:
1103 1149 #
1104 1150 # [p:foo, i:bar, x:baz]
1105 1151 #
1106 1152 # builds a match object from them and filters subset. Allowed
1107 1153 # prefixes are 'p:' for regular patterns, 'i:' for include
1108 1154 # patterns and 'x:' for exclude patterns. Use 'r:' prefix to pass
1109 1155 # a revision identifier, or the empty string to reference the
1110 1156 # working directory, from which the match object is
1111 1157 # initialized. Use 'd:' to set the default matching mode, default
1112 1158 # to 'glob'. At most one 'r:' and 'd:' argument can be passed.
1113 1159
1114 1160 l = getargs(x, 1, -1, "_matchfiles requires at least one argument")
1115 1161 pats, inc, exc = [], [], []
1116 1162 rev, default = None, None
1117 1163 for arg in l:
1118 1164 s = getstring(arg, "_matchfiles requires string arguments")
1119 1165 prefix, value = s[:2], s[2:]
1120 1166 if prefix == 'p:':
1121 1167 pats.append(value)
1122 1168 elif prefix == 'i:':
1123 1169 inc.append(value)
1124 1170 elif prefix == 'x:':
1125 1171 exc.append(value)
1126 1172 elif prefix == 'r:':
1127 1173 if rev is not None:
1128 1174 raise error.ParseError('_matchfiles expected at most one '
1129 1175 'revision')
1130 1176 if value != '': # empty means working directory; leave rev as None
1131 1177 rev = value
1132 1178 elif prefix == 'd:':
1133 1179 if default is not None:
1134 1180 raise error.ParseError('_matchfiles expected at most one '
1135 1181 'default mode')
1136 1182 default = value
1137 1183 else:
1138 1184 raise error.ParseError('invalid _matchfiles prefix: %s' % prefix)
1139 1185 if not default:
1140 1186 default = 'glob'
1141 1187
1142 1188 m = matchmod.match(repo.root, repo.getcwd(), pats, include=inc,
1143 1189 exclude=exc, ctx=repo[rev], default=default)
1144 1190
1145 1191 # This directly read the changelog data as creating changectx for all
1146 1192 # revisions is quite expensive.
1147 1193 getfiles = repo.changelog.readfiles
1148 1194 wdirrev = node.wdirrev
1149 1195 def matches(x):
1150 1196 if x == wdirrev:
1151 1197 files = repo[x].files()
1152 1198 else:
1153 1199 files = getfiles(x)
1154 1200 for f in files:
1155 1201 if m(f):
1156 1202 return True
1157 1203 return False
1158 1204
1159 1205 return subset.filter(matches,
1160 1206 condrepr=('<matchfiles patterns=%r, include=%r '
1161 1207 'exclude=%r, default=%r, rev=%r>',
1162 1208 pats, inc, exc, default, rev))
1163 1209
1164 1210 @predicate('file(pattern)', safe=True)
1165 1211 def hasfile(repo, subset, x):
1166 1212 """Changesets affecting files matched by pattern.
1167 1213
1168 1214 For a faster but less accurate result, consider using ``filelog()``
1169 1215 instead.
1170 1216
1171 1217 This predicate uses ``glob:`` as the default kind of pattern.
1172 1218 """
1173 1219 # i18n: "file" is a keyword
1174 1220 pat = getstring(x, _("file requires a pattern"))
1175 1221 return _matchfiles(repo, subset, ('string', 'p:' + pat))
1176 1222
1177 1223 @predicate('head()', safe=True)
1178 1224 def head(repo, subset, x):
1179 1225 """Changeset is a named branch head.
1180 1226 """
1181 1227 # i18n: "head" is a keyword
1182 1228 getargs(x, 0, 0, _("head takes no arguments"))
1183 1229 hs = set()
1184 1230 cl = repo.changelog
1185 1231 for ls in repo.branchmap().itervalues():
1186 1232 hs.update(cl.rev(h) for h in ls)
1187 1233 return subset & baseset(hs)
1188 1234
1189 1235 @predicate('heads(set)', safe=True)
1190 1236 def heads(repo, subset, x):
1191 1237 """Members of set with no children in set.
1192 1238 """
1193 1239 s = getset(repo, subset, x)
1194 1240 ps = parents(repo, subset, x)
1195 1241 return s - ps
1196 1242
1197 1243 @predicate('hidden()', safe=True)
1198 1244 def hidden(repo, subset, x):
1199 1245 """Hidden changesets.
1200 1246 """
1201 1247 # i18n: "hidden" is a keyword
1202 1248 getargs(x, 0, 0, _("hidden takes no arguments"))
1203 1249 hiddenrevs = repoview.filterrevs(repo, 'visible')
1204 1250 return subset & hiddenrevs
1205 1251
1206 1252 @predicate('keyword(string)', safe=True)
1207 1253 def keyword(repo, subset, x):
1208 1254 """Search commit message, user name, and names of changed files for
1209 1255 string. The match is case-insensitive.
1210 1256 """
1211 1257 # i18n: "keyword" is a keyword
1212 1258 kw = encoding.lower(getstring(x, _("keyword requires a string")))
1213 1259
1214 1260 def matches(r):
1215 1261 c = repo[r]
1216 1262 return any(kw in encoding.lower(t)
1217 1263 for t in c.files() + [c.user(), c.description()])
1218 1264
1219 1265 return subset.filter(matches, condrepr=('<keyword %r>', kw))
1220 1266
1221 1267 @predicate('limit(set[, n[, offset]])', safe=True)
1222 1268 def limit(repo, subset, x):
1223 1269 """First n members of set, defaulting to 1, starting from offset.
1224 1270 """
1225 1271 args = getargsdict(x, 'limit', 'set n offset')
1226 1272 if 'set' not in args:
1227 1273 # i18n: "limit" is a keyword
1228 1274 raise error.ParseError(_("limit requires one to three arguments"))
1229 1275 try:
1230 1276 lim, ofs = 1, 0
1231 1277 if 'n' in args:
1232 1278 # i18n: "limit" is a keyword
1233 1279 lim = int(getstring(args['n'], _("limit requires a number")))
1234 1280 if 'offset' in args:
1235 1281 # i18n: "limit" is a keyword
1236 1282 ofs = int(getstring(args['offset'], _("limit requires a number")))
1237 1283 if ofs < 0:
1238 1284 raise error.ParseError(_("negative offset"))
1239 1285 except (TypeError, ValueError):
1240 1286 # i18n: "limit" is a keyword
1241 1287 raise error.ParseError(_("limit expects a number"))
1242 1288 os = getset(repo, fullreposet(repo), args['set'])
1243 1289 result = []
1244 1290 it = iter(os)
1245 1291 for x in xrange(ofs):
1246 1292 y = next(it, None)
1247 1293 if y is None:
1248 1294 break
1249 1295 for x in xrange(lim):
1250 1296 y = next(it, None)
1251 1297 if y is None:
1252 1298 break
1253 1299 elif y in subset:
1254 1300 result.append(y)
1255 1301 return baseset(result, datarepr=('<limit n=%d, offset=%d, %r, %r>',
1256 1302 lim, ofs, subset, os))
1257 1303
1258 1304 @predicate('last(set, [n])', safe=True)
1259 1305 def last(repo, subset, x):
1260 1306 """Last n members of set, defaulting to 1.
1261 1307 """
1262 1308 # i18n: "last" is a keyword
1263 1309 l = getargs(x, 1, 2, _("last requires one or two arguments"))
1264 1310 try:
1265 1311 lim = 1
1266 1312 if len(l) == 2:
1267 1313 # i18n: "last" is a keyword
1268 1314 lim = int(getstring(l[1], _("last requires a number")))
1269 1315 except (TypeError, ValueError):
1270 1316 # i18n: "last" is a keyword
1271 1317 raise error.ParseError(_("last expects a number"))
1272 1318 os = getset(repo, fullreposet(repo), l[0])
1273 1319 os.reverse()
1274 1320 result = []
1275 1321 it = iter(os)
1276 1322 for x in xrange(lim):
1277 1323 y = next(it, None)
1278 1324 if y is None:
1279 1325 break
1280 1326 elif y in subset:
1281 1327 result.append(y)
1282 1328 return baseset(result, datarepr=('<last n=%d, %r, %r>', lim, subset, os))
1283 1329
1284 1330 @predicate('max(set)', safe=True)
1285 1331 def maxrev(repo, subset, x):
1286 1332 """Changeset with highest revision number in set.
1287 1333 """
1288 1334 os = getset(repo, fullreposet(repo), x)
1289 1335 try:
1290 1336 m = os.max()
1291 1337 if m in subset:
1292 1338 return baseset([m], datarepr=('<max %r, %r>', subset, os))
1293 1339 except ValueError:
1294 1340 # os.max() throws a ValueError when the collection is empty.
1295 1341 # Same as python's max().
1296 1342 pass
1297 1343 return baseset(datarepr=('<max %r, %r>', subset, os))
1298 1344
1299 1345 @predicate('merge()', safe=True)
1300 1346 def merge(repo, subset, x):
1301 1347 """Changeset is a merge changeset.
1302 1348 """
1303 1349 # i18n: "merge" is a keyword
1304 1350 getargs(x, 0, 0, _("merge takes no arguments"))
1305 1351 cl = repo.changelog
1306 1352 return subset.filter(lambda r: cl.parentrevs(r)[1] != -1,
1307 1353 condrepr='<merge>')
1308 1354
1309 1355 @predicate('branchpoint()', safe=True)
1310 1356 def branchpoint(repo, subset, x):
1311 1357 """Changesets with more than one child.
1312 1358 """
1313 1359 # i18n: "branchpoint" is a keyword
1314 1360 getargs(x, 0, 0, _("branchpoint takes no arguments"))
1315 1361 cl = repo.changelog
1316 1362 if not subset:
1317 1363 return baseset()
1318 1364 # XXX this should be 'parentset.min()' assuming 'parentset' is a smartset
1319 1365 # (and if it is not, it should.)
1320 1366 baserev = min(subset)
1321 1367 parentscount = [0]*(len(repo) - baserev)
1322 1368 for r in cl.revs(start=baserev + 1):
1323 1369 for p in cl.parentrevs(r):
1324 1370 if p >= baserev:
1325 1371 parentscount[p - baserev] += 1
1326 1372 return subset.filter(lambda r: parentscount[r - baserev] > 1,
1327 1373 condrepr='<branchpoint>')
1328 1374
1329 1375 @predicate('min(set)', safe=True)
1330 1376 def minrev(repo, subset, x):
1331 1377 """Changeset with lowest revision number in set.
1332 1378 """
1333 1379 os = getset(repo, fullreposet(repo), x)
1334 1380 try:
1335 1381 m = os.min()
1336 1382 if m in subset:
1337 1383 return baseset([m], datarepr=('<min %r, %r>', subset, os))
1338 1384 except ValueError:
1339 1385 # os.min() throws a ValueError when the collection is empty.
1340 1386 # Same as python's min().
1341 1387 pass
1342 1388 return baseset(datarepr=('<min %r, %r>', subset, os))
1343 1389
1344 1390 @predicate('modifies(pattern)', safe=True)
1345 1391 def modifies(repo, subset, x):
1346 1392 """Changesets modifying files matched by pattern.
1347 1393
1348 1394 The pattern without explicit kind like ``glob:`` is expected to be
1349 1395 relative to the current directory and match against a file or a
1350 1396 directory.
1351 1397 """
1352 1398 # i18n: "modifies" is a keyword
1353 1399 pat = getstring(x, _("modifies requires a pattern"))
1354 1400 return checkstatus(repo, subset, pat, 0)
1355 1401
1356 1402 @predicate('named(namespace)')
1357 1403 def named(repo, subset, x):
1358 1404 """The changesets in a given namespace.
1359 1405
1360 1406 If `namespace` starts with `re:`, the remainder of the string is treated as
1361 1407 a regular expression. To match a namespace that actually starts with `re:`,
1362 1408 use the prefix `literal:`.
1363 1409 """
1364 1410 # i18n: "named" is a keyword
1365 1411 args = getargs(x, 1, 1, _('named requires a namespace argument'))
1366 1412
1367 1413 ns = getstring(args[0],
1368 1414 # i18n: "named" is a keyword
1369 1415 _('the argument to named must be a string'))
1370 1416 kind, pattern, matcher = util.stringmatcher(ns)
1371 1417 namespaces = set()
1372 1418 if kind == 'literal':
1373 1419 if pattern not in repo.names:
1374 1420 raise error.RepoLookupError(_("namespace '%s' does not exist")
1375 1421 % ns)
1376 1422 namespaces.add(repo.names[pattern])
1377 1423 else:
1378 1424 for name, ns in repo.names.iteritems():
1379 1425 if matcher(name):
1380 1426 namespaces.add(ns)
1381 1427 if not namespaces:
1382 1428 raise error.RepoLookupError(_("no namespace exists"
1383 1429 " that match '%s'") % pattern)
1384 1430
1385 1431 names = set()
1386 1432 for ns in namespaces:
1387 1433 for name in ns.listnames(repo):
1388 1434 if name not in ns.deprecated:
1389 1435 names.update(repo[n].rev() for n in ns.nodes(repo, name))
1390 1436
1391 1437 names -= set([node.nullrev])
1392 1438 return subset & names
1393 1439
1394 1440 @predicate('id(string)', safe=True)
1395 1441 def node_(repo, subset, x):
1396 1442 """Revision non-ambiguously specified by the given hex string prefix.
1397 1443 """
1398 1444 # i18n: "id" is a keyword
1399 1445 l = getargs(x, 1, 1, _("id requires one argument"))
1400 1446 # i18n: "id" is a keyword
1401 1447 n = getstring(l[0], _("id requires a string"))
1402 1448 if len(n) == 40:
1403 1449 try:
1404 1450 rn = repo.changelog.rev(node.bin(n))
1405 1451 except (LookupError, TypeError):
1406 1452 rn = None
1407 1453 else:
1408 1454 rn = None
1409 1455 pm = repo.changelog._partialmatch(n)
1410 1456 if pm is not None:
1411 1457 rn = repo.changelog.rev(pm)
1412 1458
1413 1459 if rn is None:
1414 1460 return baseset()
1415 1461 result = baseset([rn])
1416 1462 return result & subset
1417 1463
1418 1464 @predicate('obsolete()', safe=True)
1419 1465 def obsolete(repo, subset, x):
1420 1466 """Mutable changeset with a newer version."""
1421 1467 # i18n: "obsolete" is a keyword
1422 1468 getargs(x, 0, 0, _("obsolete takes no arguments"))
1423 1469 obsoletes = obsmod.getrevs(repo, 'obsolete')
1424 1470 return subset & obsoletes
1425 1471
1426 1472 @predicate('only(set, [set])', safe=True)
1427 1473 def only(repo, subset, x):
1428 1474 """Changesets that are ancestors of the first set that are not ancestors
1429 1475 of any other head in the repo. If a second set is specified, the result
1430 1476 is ancestors of the first set that are not ancestors of the second set
1431 1477 (i.e. ::<set1> - ::<set2>).
1432 1478 """
1433 1479 cl = repo.changelog
1434 1480 # i18n: "only" is a keyword
1435 1481 args = getargs(x, 1, 2, _('only takes one or two arguments'))
1436 1482 include = getset(repo, fullreposet(repo), args[0])
1437 1483 if len(args) == 1:
1438 1484 if not include:
1439 1485 return baseset()
1440 1486
1441 1487 descendants = set(_revdescendants(repo, include, False))
1442 1488 exclude = [rev for rev in cl.headrevs()
1443 1489 if not rev in descendants and not rev in include]
1444 1490 else:
1445 1491 exclude = getset(repo, fullreposet(repo), args[1])
1446 1492
1447 1493 results = set(cl.findmissingrevs(common=exclude, heads=include))
1448 1494 # XXX we should turn this into a baseset instead of a set, smartset may do
1449 1495 # some optimizations from the fact this is a baseset.
1450 1496 return subset & results
1451 1497
1452 1498 @predicate('origin([set])', safe=True)
1453 1499 def origin(repo, subset, x):
1454 1500 """
1455 1501 Changesets that were specified as a source for the grafts, transplants or
1456 1502 rebases that created the given revisions. Omitting the optional set is the
1457 1503 same as passing all(). If a changeset created by these operations is itself
1458 1504 specified as a source for one of these operations, only the source changeset
1459 1505 for the first operation is selected.
1460 1506 """
1461 1507 if x is not None:
1462 1508 dests = getset(repo, fullreposet(repo), x)
1463 1509 else:
1464 1510 dests = fullreposet(repo)
1465 1511
1466 1512 def _firstsrc(rev):
1467 1513 src = _getrevsource(repo, rev)
1468 1514 if src is None:
1469 1515 return None
1470 1516
1471 1517 while True:
1472 1518 prev = _getrevsource(repo, src)
1473 1519
1474 1520 if prev is None:
1475 1521 return src
1476 1522 src = prev
1477 1523
1478 1524 o = set([_firstsrc(r) for r in dests])
1479 1525 o -= set([None])
1480 1526 # XXX we should turn this into a baseset instead of a set, smartset may do
1481 1527 # some optimizations from the fact this is a baseset.
1482 1528 return subset & o
1483 1529
1484 1530 @predicate('outgoing([path])', safe=True)
1485 1531 def outgoing(repo, subset, x):
1486 1532 """Changesets not found in the specified destination repository, or the
1487 1533 default push location.
1488 1534 """
1489 1535 # Avoid cycles.
1490 1536 from . import (
1491 1537 discovery,
1492 1538 hg,
1493 1539 )
1494 1540 # i18n: "outgoing" is a keyword
1495 1541 l = getargs(x, 0, 1, _("outgoing takes one or no arguments"))
1496 1542 # i18n: "outgoing" is a keyword
1497 1543 dest = l and getstring(l[0], _("outgoing requires a repository path")) or ''
1498 1544 dest = repo.ui.expandpath(dest or 'default-push', dest or 'default')
1499 1545 dest, branches = hg.parseurl(dest)
1500 1546 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1501 1547 if revs:
1502 1548 revs = [repo.lookup(rev) for rev in revs]
1503 1549 other = hg.peer(repo, {}, dest)
1504 1550 repo.ui.pushbuffer()
1505 1551 outgoing = discovery.findcommonoutgoing(repo, other, onlyheads=revs)
1506 1552 repo.ui.popbuffer()
1507 1553 cl = repo.changelog
1508 1554 o = set([cl.rev(r) for r in outgoing.missing])
1509 1555 return subset & o
1510 1556
1511 1557 @predicate('p1([set])', safe=True)
1512 1558 def p1(repo, subset, x):
1513 1559 """First parent of changesets in set, or the working directory.
1514 1560 """
1515 1561 if x is None:
1516 1562 p = repo[x].p1().rev()
1517 1563 if p >= 0:
1518 1564 return subset & baseset([p])
1519 1565 return baseset()
1520 1566
1521 1567 ps = set()
1522 1568 cl = repo.changelog
1523 1569 for r in getset(repo, fullreposet(repo), x):
1524 1570 ps.add(cl.parentrevs(r)[0])
1525 1571 ps -= set([node.nullrev])
1526 1572 # XXX we should turn this into a baseset instead of a set, smartset may do
1527 1573 # some optimizations from the fact this is a baseset.
1528 1574 return subset & ps
1529 1575
1530 1576 @predicate('p2([set])', safe=True)
1531 1577 def p2(repo, subset, x):
1532 1578 """Second parent of changesets in set, or the working directory.
1533 1579 """
1534 1580 if x is None:
1535 1581 ps = repo[x].parents()
1536 1582 try:
1537 1583 p = ps[1].rev()
1538 1584 if p >= 0:
1539 1585 return subset & baseset([p])
1540 1586 return baseset()
1541 1587 except IndexError:
1542 1588 return baseset()
1543 1589
1544 1590 ps = set()
1545 1591 cl = repo.changelog
1546 1592 for r in getset(repo, fullreposet(repo), x):
1547 1593 ps.add(cl.parentrevs(r)[1])
1548 1594 ps -= set([node.nullrev])
1549 1595 # XXX we should turn this into a baseset instead of a set, smartset may do
1550 1596 # some optimizations from the fact this is a baseset.
1551 1597 return subset & ps
1552 1598
1553 1599 def parentpost(repo, subset, x, order):
1554 1600 return p1(repo, subset, x)
1555 1601
1556 1602 @predicate('parents([set])', safe=True)
1557 1603 def parents(repo, subset, x):
1558 1604 """
1559 1605 The set of all parents for all changesets in set, or the working directory.
1560 1606 """
1561 1607 if x is None:
1562 1608 ps = set(p.rev() for p in repo[x].parents())
1563 1609 else:
1564 1610 ps = set()
1565 1611 cl = repo.changelog
1566 1612 up = ps.update
1567 1613 parentrevs = cl.parentrevs
1568 1614 for r in getset(repo, fullreposet(repo), x):
1569 1615 if r == node.wdirrev:
1570 1616 up(p.rev() for p in repo[r].parents())
1571 1617 else:
1572 1618 up(parentrevs(r))
1573 1619 ps -= set([node.nullrev])
1574 1620 return subset & ps
1575 1621
1576 1622 def _phase(repo, subset, target):
1577 1623 """helper to select all rev in phase <target>"""
1578 1624 repo._phasecache.loadphaserevs(repo) # ensure phase's sets are loaded
1579 1625 if repo._phasecache._phasesets:
1580 1626 s = repo._phasecache._phasesets[target] - repo.changelog.filteredrevs
1581 1627 s = baseset(s)
1582 1628 s.sort() # set are non ordered, so we enforce ascending
1583 1629 return subset & s
1584 1630 else:
1585 1631 phase = repo._phasecache.phase
1586 1632 condition = lambda r: phase(repo, r) == target
1587 1633 return subset.filter(condition, condrepr=('<phase %r>', target),
1588 1634 cache=False)
1589 1635
1590 1636 @predicate('draft()', safe=True)
1591 1637 def draft(repo, subset, x):
1592 1638 """Changeset in draft phase."""
1593 1639 # i18n: "draft" is a keyword
1594 1640 getargs(x, 0, 0, _("draft takes no arguments"))
1595 1641 target = phases.draft
1596 1642 return _phase(repo, subset, target)
1597 1643
1598 1644 @predicate('secret()', safe=True)
1599 1645 def secret(repo, subset, x):
1600 1646 """Changeset in secret phase."""
1601 1647 # i18n: "secret" is a keyword
1602 1648 getargs(x, 0, 0, _("secret takes no arguments"))
1603 1649 target = phases.secret
1604 1650 return _phase(repo, subset, target)
1605 1651
1606 1652 def parentspec(repo, subset, x, n, order):
1607 1653 """``set^0``
1608 1654 The set.
1609 1655 ``set^1`` (or ``set^``), ``set^2``
1610 1656 First or second parent, respectively, of all changesets in set.
1611 1657 """
1612 1658 try:
1613 1659 n = int(n[1])
1614 1660 if n not in (0, 1, 2):
1615 1661 raise ValueError
1616 1662 except (TypeError, ValueError):
1617 1663 raise error.ParseError(_("^ expects a number 0, 1, or 2"))
1618 1664 ps = set()
1619 1665 cl = repo.changelog
1620 1666 for r in getset(repo, fullreposet(repo), x):
1621 1667 if n == 0:
1622 1668 ps.add(r)
1623 1669 elif n == 1:
1624 1670 ps.add(cl.parentrevs(r)[0])
1625 1671 elif n == 2:
1626 1672 parents = cl.parentrevs(r)
1627 1673 if parents[1] != node.nullrev:
1628 1674 ps.add(parents[1])
1629 1675 return subset & ps
1630 1676
1631 1677 @predicate('present(set)', safe=True)
1632 1678 def present(repo, subset, x):
1633 1679 """An empty set, if any revision in set isn't found; otherwise,
1634 1680 all revisions in set.
1635 1681
1636 1682 If any of specified revisions is not present in the local repository,
1637 1683 the query is normally aborted. But this predicate allows the query
1638 1684 to continue even in such cases.
1639 1685 """
1640 1686 try:
1641 1687 return getset(repo, subset, x)
1642 1688 except error.RepoLookupError:
1643 1689 return baseset()
1644 1690
1645 1691 # for internal use
1646 1692 @predicate('_notpublic', safe=True)
1647 1693 def _notpublic(repo, subset, x):
1648 1694 getargs(x, 0, 0, "_notpublic takes no arguments")
1649 1695 repo._phasecache.loadphaserevs(repo) # ensure phase's sets are loaded
1650 1696 if repo._phasecache._phasesets:
1651 1697 s = set()
1652 1698 for u in repo._phasecache._phasesets[1:]:
1653 1699 s.update(u)
1654 1700 s = baseset(s - repo.changelog.filteredrevs)
1655 1701 s.sort()
1656 1702 return subset & s
1657 1703 else:
1658 1704 phase = repo._phasecache.phase
1659 1705 target = phases.public
1660 1706 condition = lambda r: phase(repo, r) != target
1661 1707 return subset.filter(condition, condrepr=('<phase %r>', target),
1662 1708 cache=False)
1663 1709
1664 1710 @predicate('public()', safe=True)
1665 1711 def public(repo, subset, x):
1666 1712 """Changeset in public phase."""
1667 1713 # i18n: "public" is a keyword
1668 1714 getargs(x, 0, 0, _("public takes no arguments"))
1669 1715 phase = repo._phasecache.phase
1670 1716 target = phases.public
1671 1717 condition = lambda r: phase(repo, r) == target
1672 1718 return subset.filter(condition, condrepr=('<phase %r>', target),
1673 1719 cache=False)
1674 1720
1675 1721 @predicate('remote([id [,path]])', safe=True)
1676 1722 def remote(repo, subset, x):
1677 1723 """Local revision that corresponds to the given identifier in a
1678 1724 remote repository, if present. Here, the '.' identifier is a
1679 1725 synonym for the current local branch.
1680 1726 """
1681 1727
1682 1728 from . import hg # avoid start-up nasties
1683 1729 # i18n: "remote" is a keyword
1684 1730 l = getargs(x, 0, 2, _("remote takes zero, one, or two arguments"))
1685 1731
1686 1732 q = '.'
1687 1733 if len(l) > 0:
1688 1734 # i18n: "remote" is a keyword
1689 1735 q = getstring(l[0], _("remote requires a string id"))
1690 1736 if q == '.':
1691 1737 q = repo['.'].branch()
1692 1738
1693 1739 dest = ''
1694 1740 if len(l) > 1:
1695 1741 # i18n: "remote" is a keyword
1696 1742 dest = getstring(l[1], _("remote requires a repository path"))
1697 1743 dest = repo.ui.expandpath(dest or 'default')
1698 1744 dest, branches = hg.parseurl(dest)
1699 1745 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1700 1746 if revs:
1701 1747 revs = [repo.lookup(rev) for rev in revs]
1702 1748 other = hg.peer(repo, {}, dest)
1703 1749 n = other.lookup(q)
1704 1750 if n in repo:
1705 1751 r = repo[n].rev()
1706 1752 if r in subset:
1707 1753 return baseset([r])
1708 1754 return baseset()
1709 1755
1710 1756 @predicate('removes(pattern)', safe=True)
1711 1757 def removes(repo, subset, x):
1712 1758 """Changesets which remove files matching pattern.
1713 1759
1714 1760 The pattern without explicit kind like ``glob:`` is expected to be
1715 1761 relative to the current directory and match against a file or a
1716 1762 directory.
1717 1763 """
1718 1764 # i18n: "removes" is a keyword
1719 1765 pat = getstring(x, _("removes requires a pattern"))
1720 1766 return checkstatus(repo, subset, pat, 2)
1721 1767
1722 1768 @predicate('rev(number)', safe=True)
1723 1769 def rev(repo, subset, x):
1724 1770 """Revision with the given numeric identifier.
1725 1771 """
1726 1772 # i18n: "rev" is a keyword
1727 1773 l = getargs(x, 1, 1, _("rev requires one argument"))
1728 1774 try:
1729 1775 # i18n: "rev" is a keyword
1730 1776 l = int(getstring(l[0], _("rev requires a number")))
1731 1777 except (TypeError, ValueError):
1732 1778 # i18n: "rev" is a keyword
1733 1779 raise error.ParseError(_("rev expects a number"))
1734 1780 if l not in repo.changelog and l != node.nullrev:
1735 1781 return baseset()
1736 1782 return subset & baseset([l])
1737 1783
1738 1784 @predicate('matching(revision [, field])', safe=True)
1739 1785 def matching(repo, subset, x):
1740 1786 """Changesets in which a given set of fields match the set of fields in the
1741 1787 selected revision or set.
1742 1788
1743 1789 To match more than one field pass the list of fields to match separated
1744 1790 by spaces (e.g. ``author description``).
1745 1791
1746 1792 Valid fields are most regular revision fields and some special fields.
1747 1793
1748 1794 Regular revision fields are ``description``, ``author``, ``branch``,
1749 1795 ``date``, ``files``, ``phase``, ``parents``, ``substate``, ``user``
1750 1796 and ``diff``.
1751 1797 Note that ``author`` and ``user`` are synonyms. ``diff`` refers to the
1752 1798 contents of the revision. Two revisions matching their ``diff`` will
1753 1799 also match their ``files``.
1754 1800
1755 1801 Special fields are ``summary`` and ``metadata``:
1756 1802 ``summary`` matches the first line of the description.
1757 1803 ``metadata`` is equivalent to matching ``description user date``
1758 1804 (i.e. it matches the main metadata fields).
1759 1805
1760 1806 ``metadata`` is the default field which is used when no fields are
1761 1807 specified. You can match more than one field at a time.
1762 1808 """
1763 1809 # i18n: "matching" is a keyword
1764 1810 l = getargs(x, 1, 2, _("matching takes 1 or 2 arguments"))
1765 1811
1766 1812 revs = getset(repo, fullreposet(repo), l[0])
1767 1813
1768 1814 fieldlist = ['metadata']
1769 1815 if len(l) > 1:
1770 1816 fieldlist = getstring(l[1],
1771 1817 # i18n: "matching" is a keyword
1772 1818 _("matching requires a string "
1773 1819 "as its second argument")).split()
1774 1820
1775 1821 # Make sure that there are no repeated fields,
1776 1822 # expand the 'special' 'metadata' field type
1777 1823 # and check the 'files' whenever we check the 'diff'
1778 1824 fields = []
1779 1825 for field in fieldlist:
1780 1826 if field == 'metadata':
1781 1827 fields += ['user', 'description', 'date']
1782 1828 elif field == 'diff':
1783 1829 # a revision matching the diff must also match the files
1784 1830 # since matching the diff is very costly, make sure to
1785 1831 # also match the files first
1786 1832 fields += ['files', 'diff']
1787 1833 else:
1788 1834 if field == 'author':
1789 1835 field = 'user'
1790 1836 fields.append(field)
1791 1837 fields = set(fields)
1792 1838 if 'summary' in fields and 'description' in fields:
1793 1839 # If a revision matches its description it also matches its summary
1794 1840 fields.discard('summary')
1795 1841
1796 1842 # We may want to match more than one field
1797 1843 # Not all fields take the same amount of time to be matched
1798 1844 # Sort the selected fields in order of increasing matching cost
1799 1845 fieldorder = ['phase', 'parents', 'user', 'date', 'branch', 'summary',
1800 1846 'files', 'description', 'substate', 'diff']
1801 1847 def fieldkeyfunc(f):
1802 1848 try:
1803 1849 return fieldorder.index(f)
1804 1850 except ValueError:
1805 1851 # assume an unknown field is very costly
1806 1852 return len(fieldorder)
1807 1853 fields = list(fields)
1808 1854 fields.sort(key=fieldkeyfunc)
1809 1855
1810 1856 # Each field will be matched with its own "getfield" function
1811 1857 # which will be added to the getfieldfuncs array of functions
1812 1858 getfieldfuncs = []
1813 1859 _funcs = {
1814 1860 'user': lambda r: repo[r].user(),
1815 1861 'branch': lambda r: repo[r].branch(),
1816 1862 'date': lambda r: repo[r].date(),
1817 1863 'description': lambda r: repo[r].description(),
1818 1864 'files': lambda r: repo[r].files(),
1819 1865 'parents': lambda r: repo[r].parents(),
1820 1866 'phase': lambda r: repo[r].phase(),
1821 1867 'substate': lambda r: repo[r].substate,
1822 1868 'summary': lambda r: repo[r].description().splitlines()[0],
1823 1869 'diff': lambda r: list(repo[r].diff(git=True),)
1824 1870 }
1825 1871 for info in fields:
1826 1872 getfield = _funcs.get(info, None)
1827 1873 if getfield is None:
1828 1874 raise error.ParseError(
1829 1875 # i18n: "matching" is a keyword
1830 1876 _("unexpected field name passed to matching: %s") % info)
1831 1877 getfieldfuncs.append(getfield)
1832 1878 # convert the getfield array of functions into a "getinfo" function
1833 1879 # which returns an array of field values (or a single value if there
1834 1880 # is only one field to match)
1835 1881 getinfo = lambda r: [f(r) for f in getfieldfuncs]
1836 1882
1837 1883 def matches(x):
1838 1884 for rev in revs:
1839 1885 target = getinfo(rev)
1840 1886 match = True
1841 1887 for n, f in enumerate(getfieldfuncs):
1842 1888 if target[n] != f(x):
1843 1889 match = False
1844 1890 if match:
1845 1891 return True
1846 1892 return False
1847 1893
1848 1894 return subset.filter(matches, condrepr=('<matching%r %r>', fields, revs))
1849 1895
1850 1896 @predicate('reverse(set)', safe=True, takeorder=True)
1851 1897 def reverse(repo, subset, x, order):
1852 1898 """Reverse order of set.
1853 1899 """
1854 1900 l = getset(repo, subset, x)
1855 1901 if order == defineorder:
1856 1902 l.reverse()
1857 1903 return l
1858 1904
1859 1905 @predicate('roots(set)', safe=True)
1860 1906 def roots(repo, subset, x):
1861 1907 """Changesets in set with no parent changeset in set.
1862 1908 """
1863 1909 s = getset(repo, fullreposet(repo), x)
1864 1910 parents = repo.changelog.parentrevs
1865 1911 def filter(r):
1866 1912 for p in parents(r):
1867 1913 if 0 <= p and p in s:
1868 1914 return False
1869 1915 return True
1870 1916 return subset & s.filter(filter, condrepr='<roots>')
1871 1917
1872 1918 _sortkeyfuncs = {
1873 1919 'rev': lambda c: c.rev(),
1874 1920 'branch': lambda c: c.branch(),
1875 1921 'desc': lambda c: c.description(),
1876 1922 'user': lambda c: c.user(),
1877 1923 'author': lambda c: c.user(),
1878 1924 'date': lambda c: c.date()[0],
1879 1925 }
1880 1926
1881 1927 def _getsortargs(x):
1882 1928 """Parse sort options into (set, [(key, reverse)], opts)"""
1883 1929 args = getargsdict(x, 'sort', 'set keys topo.firstbranch')
1884 1930 if 'set' not in args:
1885 1931 # i18n: "sort" is a keyword
1886 1932 raise error.ParseError(_('sort requires one or two arguments'))
1887 1933 keys = "rev"
1888 1934 if 'keys' in args:
1889 1935 # i18n: "sort" is a keyword
1890 1936 keys = getstring(args['keys'], _("sort spec must be a string"))
1891 1937
1892 1938 keyflags = []
1893 1939 for k in keys.split():
1894 1940 fk = k
1895 1941 reverse = (k[0] == '-')
1896 1942 if reverse:
1897 1943 k = k[1:]
1898 1944 if k not in _sortkeyfuncs and k != 'topo':
1899 1945 raise error.ParseError(_("unknown sort key %r") % fk)
1900 1946 keyflags.append((k, reverse))
1901 1947
1902 1948 if len(keyflags) > 1 and any(k == 'topo' for k, reverse in keyflags):
1903 1949 # i18n: "topo" is a keyword
1904 1950 raise error.ParseError(_('topo sort order cannot be combined '
1905 1951 'with other sort keys'))
1906 1952
1907 1953 opts = {}
1908 1954 if 'topo.firstbranch' in args:
1909 1955 if any(k == 'topo' for k, reverse in keyflags):
1910 1956 opts['topo.firstbranch'] = args['topo.firstbranch']
1911 1957 else:
1912 1958 # i18n: "topo" and "topo.firstbranch" are keywords
1913 1959 raise error.ParseError(_('topo.firstbranch can only be used '
1914 1960 'when using the topo sort key'))
1915 1961
1916 1962 return args['set'], keyflags, opts
1917 1963
1918 1964 @predicate('sort(set[, [-]key... [, ...]])', safe=True, takeorder=True)
1919 1965 def sort(repo, subset, x, order):
1920 1966 """Sort set by keys. The default sort order is ascending, specify a key
1921 1967 as ``-key`` to sort in descending order.
1922 1968
1923 1969 The keys can be:
1924 1970
1925 1971 - ``rev`` for the revision number,
1926 1972 - ``branch`` for the branch name,
1927 1973 - ``desc`` for the commit message (description),
1928 1974 - ``user`` for user name (``author`` can be used as an alias),
1929 1975 - ``date`` for the commit date
1930 1976 - ``topo`` for a reverse topographical sort
1931 1977
1932 1978 The ``topo`` sort order cannot be combined with other sort keys. This sort
1933 1979 takes one optional argument, ``topo.firstbranch``, which takes a revset that
1934 1980 specifies what topographical branches to prioritize in the sort.
1935 1981
1936 1982 """
1937 1983 s, keyflags, opts = _getsortargs(x)
1938 1984 revs = getset(repo, subset, s)
1939 1985
1940 1986 if not keyflags or order != defineorder:
1941 1987 return revs
1942 1988 if len(keyflags) == 1 and keyflags[0][0] == "rev":
1943 1989 revs.sort(reverse=keyflags[0][1])
1944 1990 return revs
1945 1991 elif keyflags[0][0] == "topo":
1946 1992 firstbranch = ()
1947 1993 if 'topo.firstbranch' in opts:
1948 1994 firstbranch = getset(repo, subset, opts['topo.firstbranch'])
1949 1995 revs = baseset(_toposort(revs, repo.changelog.parentrevs, firstbranch),
1950 1996 istopo=True)
1951 1997 if keyflags[0][1]:
1952 1998 revs.reverse()
1953 1999 return revs
1954 2000
1955 2001 # sort() is guaranteed to be stable
1956 2002 ctxs = [repo[r] for r in revs]
1957 2003 for k, reverse in reversed(keyflags):
1958 2004 ctxs.sort(key=_sortkeyfuncs[k], reverse=reverse)
1959 2005 return baseset([c.rev() for c in ctxs])
1960 2006
1961 2007 def _toposort(revs, parentsfunc, firstbranch=()):
1962 2008 """Yield revisions from heads to roots one (topo) branch at a time.
1963 2009
1964 2010 This function aims to be used by a graph generator that wishes to minimize
1965 2011 the number of parallel branches and their interleaving.
1966 2012
1967 2013 Example iteration order (numbers show the "true" order in a changelog):
1968 2014
1969 2015 o 4
1970 2016 |
1971 2017 o 1
1972 2018 |
1973 2019 | o 3
1974 2020 | |
1975 2021 | o 2
1976 2022 |/
1977 2023 o 0
1978 2024
1979 2025 Note that the ancestors of merges are understood by the current
1980 2026 algorithm to be on the same branch. This means no reordering will
1981 2027 occur behind a merge.
1982 2028 """
1983 2029
1984 2030 ### Quick summary of the algorithm
1985 2031 #
1986 2032 # This function is based around a "retention" principle. We keep revisions
1987 2033 # in memory until we are ready to emit a whole branch that immediately
1988 2034 # "merges" into an existing one. This reduces the number of parallel
1989 2035 # branches with interleaved revisions.
1990 2036 #
1991 2037 # During iteration revs are split into two groups:
1992 2038 # A) revision already emitted
1993 2039 # B) revision in "retention". They are stored as different subgroups.
1994 2040 #
1995 2041 # for each REV, we do the following logic:
1996 2042 #
1997 2043 # 1) if REV is a parent of (A), we will emit it. If there is a
1998 2044 # retention group ((B) above) that is blocked on REV being
1999 2045 # available, we emit all the revisions out of that retention
2000 2046 # group first.
2001 2047 #
2002 2048 # 2) else, we'll search for a subgroup in (B) awaiting for REV to be
2003 2049 # available, if such subgroup exist, we add REV to it and the subgroup is
2004 2050 # now awaiting for REV.parents() to be available.
2005 2051 #
2006 2052 # 3) finally if no such group existed in (B), we create a new subgroup.
2007 2053 #
2008 2054 #
2009 2055 # To bootstrap the algorithm, we emit the tipmost revision (which
2010 2056 # puts it in group (A) from above).
2011 2057
2012 2058 revs.sort(reverse=True)
2013 2059
2014 2060 # Set of parents of revision that have been emitted. They can be considered
2015 2061 # unblocked as the graph generator is already aware of them so there is no
2016 2062 # need to delay the revisions that reference them.
2017 2063 #
2018 2064 # If someone wants to prioritize a branch over the others, pre-filling this
2019 2065 # set will force all other branches to wait until this branch is ready to be
2020 2066 # emitted.
2021 2067 unblocked = set(firstbranch)
2022 2068
2023 2069 # list of groups waiting to be displayed, each group is defined by:
2024 2070 #
2025 2071 # (revs: lists of revs waiting to be displayed,
2026 2072 # blocked: set of that cannot be displayed before those in 'revs')
2027 2073 #
2028 2074 # The second value ('blocked') correspond to parents of any revision in the
2029 2075 # group ('revs') that is not itself contained in the group. The main idea
2030 2076 # of this algorithm is to delay as much as possible the emission of any
2031 2077 # revision. This means waiting for the moment we are about to display
2032 2078 # these parents to display the revs in a group.
2033 2079 #
2034 2080 # This first implementation is smart until it encounters a merge: it will
2035 2081 # emit revs as soon as any parent is about to be emitted and can grow an
2036 2082 # arbitrary number of revs in 'blocked'. In practice this mean we properly
2037 2083 # retains new branches but gives up on any special ordering for ancestors
2038 2084 # of merges. The implementation can be improved to handle this better.
2039 2085 #
2040 2086 # The first subgroup is special. It corresponds to all the revision that
2041 2087 # were already emitted. The 'revs' lists is expected to be empty and the
2042 2088 # 'blocked' set contains the parents revisions of already emitted revision.
2043 2089 #
2044 2090 # You could pre-seed the <parents> set of groups[0] to a specific
2045 2091 # changesets to select what the first emitted branch should be.
2046 2092 groups = [([], unblocked)]
2047 2093 pendingheap = []
2048 2094 pendingset = set()
2049 2095
2050 2096 heapq.heapify(pendingheap)
2051 2097 heappop = heapq.heappop
2052 2098 heappush = heapq.heappush
2053 2099 for currentrev in revs:
2054 2100 # Heap works with smallest element, we want highest so we invert
2055 2101 if currentrev not in pendingset:
2056 2102 heappush(pendingheap, -currentrev)
2057 2103 pendingset.add(currentrev)
2058 2104 # iterates on pending rev until after the current rev have been
2059 2105 # processed.
2060 2106 rev = None
2061 2107 while rev != currentrev:
2062 2108 rev = -heappop(pendingheap)
2063 2109 pendingset.remove(rev)
2064 2110
2065 2111 # Seek for a subgroup blocked, waiting for the current revision.
2066 2112 matching = [i for i, g in enumerate(groups) if rev in g[1]]
2067 2113
2068 2114 if matching:
2069 2115 # The main idea is to gather together all sets that are blocked
2070 2116 # on the same revision.
2071 2117 #
2072 2118 # Groups are merged when a common blocking ancestor is
2073 2119 # observed. For example, given two groups:
2074 2120 #
2075 2121 # revs [5, 4] waiting for 1
2076 2122 # revs [3, 2] waiting for 1
2077 2123 #
2078 2124 # These two groups will be merged when we process
2079 2125 # 1. In theory, we could have merged the groups when
2080 2126 # we added 2 to the group it is now in (we could have
2081 2127 # noticed the groups were both blocked on 1 then), but
2082 2128 # the way it works now makes the algorithm simpler.
2083 2129 #
2084 2130 # We also always keep the oldest subgroup first. We can
2085 2131 # probably improve the behavior by having the longest set
2086 2132 # first. That way, graph algorithms could minimise the length
2087 2133 # of parallel lines their drawing. This is currently not done.
2088 2134 targetidx = matching.pop(0)
2089 2135 trevs, tparents = groups[targetidx]
2090 2136 for i in matching:
2091 2137 gr = groups[i]
2092 2138 trevs.extend(gr[0])
2093 2139 tparents |= gr[1]
2094 2140 # delete all merged subgroups (except the one we kept)
2095 2141 # (starting from the last subgroup for performance and
2096 2142 # sanity reasons)
2097 2143 for i in reversed(matching):
2098 2144 del groups[i]
2099 2145 else:
2100 2146 # This is a new head. We create a new subgroup for it.
2101 2147 targetidx = len(groups)
2102 2148 groups.append(([], set([rev])))
2103 2149
2104 2150 gr = groups[targetidx]
2105 2151
2106 2152 # We now add the current nodes to this subgroups. This is done
2107 2153 # after the subgroup merging because all elements from a subgroup
2108 2154 # that relied on this rev must precede it.
2109 2155 #
2110 2156 # we also update the <parents> set to include the parents of the
2111 2157 # new nodes.
2112 2158 if rev == currentrev: # only display stuff in rev
2113 2159 gr[0].append(rev)
2114 2160 gr[1].remove(rev)
2115 2161 parents = [p for p in parentsfunc(rev) if p > node.nullrev]
2116 2162 gr[1].update(parents)
2117 2163 for p in parents:
2118 2164 if p not in pendingset:
2119 2165 pendingset.add(p)
2120 2166 heappush(pendingheap, -p)
2121 2167
2122 2168 # Look for a subgroup to display
2123 2169 #
2124 2170 # When unblocked is empty (if clause), we were not waiting for any
2125 2171 # revisions during the first iteration (if no priority was given) or
2126 2172 # if we emitted a whole disconnected set of the graph (reached a
2127 2173 # root). In that case we arbitrarily take the oldest known
2128 2174 # subgroup. The heuristic could probably be better.
2129 2175 #
2130 2176 # Otherwise (elif clause) if the subgroup is blocked on
2131 2177 # a revision we just emitted, we can safely emit it as
2132 2178 # well.
2133 2179 if not unblocked:
2134 2180 if len(groups) > 1: # display other subset
2135 2181 targetidx = 1
2136 2182 gr = groups[1]
2137 2183 elif not gr[1] & unblocked:
2138 2184 gr = None
2139 2185
2140 2186 if gr is not None:
2141 2187 # update the set of awaited revisions with the one from the
2142 2188 # subgroup
2143 2189 unblocked |= gr[1]
2144 2190 # output all revisions in the subgroup
2145 2191 for r in gr[0]:
2146 2192 yield r
2147 2193 # delete the subgroup that you just output
2148 2194 # unless it is groups[0] in which case you just empty it.
2149 2195 if targetidx:
2150 2196 del groups[targetidx]
2151 2197 else:
2152 2198 gr[0][:] = []
2153 2199 # Check if we have some subgroup waiting for revisions we are not going to
2154 2200 # iterate over
2155 2201 for g in groups:
2156 2202 for r in g[0]:
2157 2203 yield r
2158 2204
2159 2205 @predicate('subrepo([pattern])')
2160 2206 def subrepo(repo, subset, x):
2161 2207 """Changesets that add, modify or remove the given subrepo. If no subrepo
2162 2208 pattern is named, any subrepo changes are returned.
2163 2209 """
2164 2210 # i18n: "subrepo" is a keyword
2165 2211 args = getargs(x, 0, 1, _('subrepo takes at most one argument'))
2166 2212 pat = None
2167 2213 if len(args) != 0:
2168 2214 pat = getstring(args[0], _("subrepo requires a pattern"))
2169 2215
2170 2216 m = matchmod.exact(repo.root, repo.root, ['.hgsubstate'])
2171 2217
2172 2218 def submatches(names):
2173 2219 k, p, m = util.stringmatcher(pat)
2174 2220 for name in names:
2175 2221 if m(name):
2176 2222 yield name
2177 2223
2178 2224 def matches(x):
2179 2225 c = repo[x]
2180 2226 s = repo.status(c.p1().node(), c.node(), match=m)
2181 2227
2182 2228 if pat is None:
2183 2229 return s.added or s.modified or s.removed
2184 2230
2185 2231 if s.added:
2186 2232 return any(submatches(c.substate.keys()))
2187 2233
2188 2234 if s.modified:
2189 2235 subs = set(c.p1().substate.keys())
2190 2236 subs.update(c.substate.keys())
2191 2237
2192 2238 for path in submatches(subs):
2193 2239 if c.p1().substate.get(path) != c.substate.get(path):
2194 2240 return True
2195 2241
2196 2242 if s.removed:
2197 2243 return any(submatches(c.p1().substate.keys()))
2198 2244
2199 2245 return False
2200 2246
2201 2247 return subset.filter(matches, condrepr=('<subrepo %r>', pat))
2202 2248
2203 2249 def _substringmatcher(pattern):
2204 2250 kind, pattern, matcher = util.stringmatcher(pattern)
2205 2251 if kind == 'literal':
2206 2252 matcher = lambda s: pattern in s
2207 2253 return kind, pattern, matcher
2208 2254
2209 2255 @predicate('tag([name])', safe=True)
2210 2256 def tag(repo, subset, x):
2211 2257 """The specified tag by name, or all tagged revisions if no name is given.
2212 2258
2213 2259 If `name` starts with `re:`, the remainder of the name is treated as
2214 2260 a regular expression. To match a tag that actually starts with `re:`,
2215 2261 use the prefix `literal:`.
2216 2262 """
2217 2263 # i18n: "tag" is a keyword
2218 2264 args = getargs(x, 0, 1, _("tag takes one or no arguments"))
2219 2265 cl = repo.changelog
2220 2266 if args:
2221 2267 pattern = getstring(args[0],
2222 2268 # i18n: "tag" is a keyword
2223 2269 _('the argument to tag must be a string'))
2224 2270 kind, pattern, matcher = util.stringmatcher(pattern)
2225 2271 if kind == 'literal':
2226 2272 # avoid resolving all tags
2227 2273 tn = repo._tagscache.tags.get(pattern, None)
2228 2274 if tn is None:
2229 2275 raise error.RepoLookupError(_("tag '%s' does not exist")
2230 2276 % pattern)
2231 2277 s = set([repo[tn].rev()])
2232 2278 else:
2233 2279 s = set([cl.rev(n) for t, n in repo.tagslist() if matcher(t)])
2234 2280 else:
2235 2281 s = set([cl.rev(n) for t, n in repo.tagslist() if t != 'tip'])
2236 2282 return subset & s
2237 2283
2238 2284 @predicate('tagged', safe=True)
2239 2285 def tagged(repo, subset, x):
2240 2286 return tag(repo, subset, x)
2241 2287
2242 2288 @predicate('unstable()', safe=True)
2243 2289 def unstable(repo, subset, x):
2244 2290 """Non-obsolete changesets with obsolete ancestors.
2245 2291 """
2246 2292 # i18n: "unstable" is a keyword
2247 2293 getargs(x, 0, 0, _("unstable takes no arguments"))
2248 2294 unstables = obsmod.getrevs(repo, 'unstable')
2249 2295 return subset & unstables
2250 2296
2251 2297
2252 2298 @predicate('user(string)', safe=True)
2253 2299 def user(repo, subset, x):
2254 2300 """User name contains string. The match is case-insensitive.
2255 2301
2256 2302 If `string` starts with `re:`, the remainder of the string is treated as
2257 2303 a regular expression. To match a user that actually contains `re:`, use
2258 2304 the prefix `literal:`.
2259 2305 """
2260 2306 return author(repo, subset, x)
2261 2307
2262 2308 @predicate('wdir', safe=True)
2263 2309 def wdir(repo, subset, x):
2264 2310 """Working directory. (EXPERIMENTAL)"""
2265 2311 # i18n: "wdir" is a keyword
2266 2312 getargs(x, 0, 0, _("wdir takes no arguments"))
2267 2313 if node.wdirrev in subset or isinstance(subset, fullreposet):
2268 2314 return baseset([node.wdirrev])
2269 2315 return baseset()
2270 2316
2271 2317 def _orderedlist(repo, subset, x):
2272 2318 s = getstring(x, "internal error")
2273 2319 if not s:
2274 2320 return baseset()
2275 2321 # remove duplicates here. it's difficult for caller to deduplicate sets
2276 2322 # because different symbols can point to the same rev.
2277 2323 cl = repo.changelog
2278 2324 ls = []
2279 2325 seen = set()
2280 2326 for t in s.split('\0'):
2281 2327 try:
2282 2328 # fast path for integer revision
2283 2329 r = int(t)
2284 2330 if str(r) != t or r not in cl:
2285 2331 raise ValueError
2286 2332 revs = [r]
2287 2333 except ValueError:
2288 2334 revs = stringset(repo, subset, t)
2289 2335
2290 2336 for r in revs:
2291 2337 if r in seen:
2292 2338 continue
2293 2339 if (r in subset
2294 2340 or r == node.nullrev and isinstance(subset, fullreposet)):
2295 2341 ls.append(r)
2296 2342 seen.add(r)
2297 2343 return baseset(ls)
2298 2344
2299 2345 # for internal use
2300 2346 @predicate('_list', safe=True, takeorder=True)
2301 2347 def _list(repo, subset, x, order):
2302 2348 if order == followorder:
2303 2349 # slow path to take the subset order
2304 2350 return subset & _orderedlist(repo, fullreposet(repo), x)
2305 2351 else:
2306 2352 return _orderedlist(repo, subset, x)
2307 2353
2308 2354 def _orderedintlist(repo, subset, x):
2309 2355 s = getstring(x, "internal error")
2310 2356 if not s:
2311 2357 return baseset()
2312 2358 ls = [int(r) for r in s.split('\0')]
2313 2359 s = subset
2314 2360 return baseset([r for r in ls if r in s])
2315 2361
2316 2362 # for internal use
2317 2363 @predicate('_intlist', safe=True, takeorder=True)
2318 2364 def _intlist(repo, subset, x, order):
2319 2365 if order == followorder:
2320 2366 # slow path to take the subset order
2321 2367 return subset & _orderedintlist(repo, fullreposet(repo), x)
2322 2368 else:
2323 2369 return _orderedintlist(repo, subset, x)
2324 2370
2325 2371 def _orderedhexlist(repo, subset, x):
2326 2372 s = getstring(x, "internal error")
2327 2373 if not s:
2328 2374 return baseset()
2329 2375 cl = repo.changelog
2330 2376 ls = [cl.rev(node.bin(r)) for r in s.split('\0')]
2331 2377 s = subset
2332 2378 return baseset([r for r in ls if r in s])
2333 2379
2334 2380 # for internal use
2335 2381 @predicate('_hexlist', safe=True, takeorder=True)
2336 2382 def _hexlist(repo, subset, x, order):
2337 2383 if order == followorder:
2338 2384 # slow path to take the subset order
2339 2385 return subset & _orderedhexlist(repo, fullreposet(repo), x)
2340 2386 else:
2341 2387 return _orderedhexlist(repo, subset, x)
2342 2388
2343 2389 methods = {
2344 2390 "range": rangeset,
2345 2391 "rangepre": rangepre,
2346 2392 "dagrange": dagrange,
2347 2393 "string": stringset,
2348 2394 "symbol": stringset,
2349 2395 "and": andset,
2350 2396 "or": orset,
2351 2397 "not": notset,
2352 2398 "difference": differenceset,
2353 2399 "list": listset,
2354 2400 "keyvalue": keyvaluepair,
2355 2401 "func": func,
2356 2402 "ancestor": ancestorspec,
2357 2403 "parent": parentspec,
2358 2404 "parentpost": parentpost,
2359 2405 }
2360 2406
2361 2407 # Constants for ordering requirement, used in _analyze():
2362 2408 #
2363 2409 # If 'define', any nested functions and operations can change the ordering of
2364 2410 # the entries in the set. If 'follow', any nested functions and operations
2365 2411 # should take the ordering specified by the first operand to the '&' operator.
2366 2412 #
2367 2413 # For instance,
2368 2414 #
2369 2415 # X & (Y | Z)
2370 2416 # ^ ^^^^^^^
2371 2417 # | follow
2372 2418 # define
2373 2419 #
2374 2420 # will be evaluated as 'or(y(x()), z(x()))', where 'x()' can change the order
2375 2421 # of the entries in the set, but 'y()', 'z()' and 'or()' shouldn't.
2376 2422 #
2377 2423 # 'any' means the order doesn't matter. For instance,
2378 2424 #
2379 2425 # X & !Y
2380 2426 # ^
2381 2427 # any
2382 2428 #
2383 2429 # 'y()' can either enforce its ordering requirement or take the ordering
2384 2430 # specified by 'x()' because 'not()' doesn't care the order.
2385 2431 #
2386 2432 # Transition of ordering requirement:
2387 2433 #
2388 2434 # 1. starts with 'define'
2389 2435 # 2. shifts to 'follow' by 'x & y'
2390 2436 # 3. changes back to 'define' on function call 'f(x)' or function-like
2391 2437 # operation 'x (f) y' because 'f' may have its own ordering requirement
2392 2438 # for 'x' and 'y' (e.g. 'first(x)')
2393 2439 #
2394 2440 anyorder = 'any' # don't care the order
2395 2441 defineorder = 'define' # should define the order
2396 2442 followorder = 'follow' # must follow the current order
2397 2443
2398 2444 # transition table for 'x & y', from the current expression 'x' to 'y'
2399 2445 _tofolloworder = {
2400 2446 anyorder: anyorder,
2401 2447 defineorder: followorder,
2402 2448 followorder: followorder,
2403 2449 }
2404 2450
2405 2451 def _matchonly(revs, bases):
2406 2452 """
2407 2453 >>> f = lambda *args: _matchonly(*map(parse, args))
2408 2454 >>> f('ancestors(A)', 'not ancestors(B)')
2409 2455 ('list', ('symbol', 'A'), ('symbol', 'B'))
2410 2456 """
2411 2457 if (revs is not None
2412 2458 and revs[0] == 'func'
2413 2459 and getsymbol(revs[1]) == 'ancestors'
2414 2460 and bases is not None
2415 2461 and bases[0] == 'not'
2416 2462 and bases[1][0] == 'func'
2417 2463 and getsymbol(bases[1][1]) == 'ancestors'):
2418 2464 return ('list', revs[2], bases[1][2])
2419 2465
2420 2466 def _fixops(x):
2421 2467 """Rewrite raw parsed tree to resolve ambiguous syntax which cannot be
2422 2468 handled well by our simple top-down parser"""
2423 2469 if not isinstance(x, tuple):
2424 2470 return x
2425 2471
2426 2472 op = x[0]
2427 2473 if op == 'parent':
2428 2474 # x^:y means (x^) : y, not x ^ (:y)
2429 2475 # x^: means (x^) :, not x ^ (:)
2430 2476 post = ('parentpost', x[1])
2431 2477 if x[2][0] == 'dagrangepre':
2432 2478 return _fixops(('dagrange', post, x[2][1]))
2433 2479 elif x[2][0] == 'rangepre':
2434 2480 return _fixops(('range', post, x[2][1]))
2435 2481 elif x[2][0] == 'rangeall':
2436 2482 return _fixops(('rangepost', post))
2437 2483 elif op == 'or':
2438 2484 # make number of arguments deterministic:
2439 2485 # x + y + z -> (or x y z) -> (or (list x y z))
2440 2486 return (op, _fixops(('list',) + x[1:]))
2441 2487
2442 2488 return (op,) + tuple(_fixops(y) for y in x[1:])
2443 2489
2444 2490 def _analyze(x, order):
2445 2491 if x is None:
2446 2492 return x
2447 2493
2448 2494 op = x[0]
2449 2495 if op == 'minus':
2450 2496 return _analyze(('and', x[1], ('not', x[2])), order)
2451 2497 elif op == 'only':
2452 2498 t = ('func', ('symbol', 'only'), ('list', x[1], x[2]))
2453 2499 return _analyze(t, order)
2454 2500 elif op == 'onlypost':
2455 2501 return _analyze(('func', ('symbol', 'only'), x[1]), order)
2456 2502 elif op == 'dagrangepre':
2457 2503 return _analyze(('func', ('symbol', 'ancestors'), x[1]), order)
2458 2504 elif op == 'dagrangepost':
2459 2505 return _analyze(('func', ('symbol', 'descendants'), x[1]), order)
2460 2506 elif op == 'rangeall':
2461 2507 return _analyze(('rangepre', ('string', 'tip')), order)
2462 2508 elif op == 'rangepost':
2463 2509 return _analyze(('range', x[1], ('string', 'tip')), order)
2464 2510 elif op == 'negate':
2465 2511 s = getstring(x[1], _("can't negate that"))
2466 2512 return _analyze(('string', '-' + s), order)
2467 2513 elif op in ('string', 'symbol'):
2468 2514 return x
2469 2515 elif op == 'and':
2470 2516 ta = _analyze(x[1], order)
2471 2517 tb = _analyze(x[2], _tofolloworder[order])
2472 2518 return (op, ta, tb, order)
2473 2519 elif op == 'or':
2474 2520 return (op, _analyze(x[1], order), order)
2475 2521 elif op == 'not':
2476 2522 return (op, _analyze(x[1], anyorder), order)
2477 2523 elif op in ('rangepre', 'parentpost'):
2478 2524 return (op, _analyze(x[1], defineorder), order)
2479 2525 elif op == 'group':
2480 2526 return _analyze(x[1], order)
2481 2527 elif op in ('dagrange', 'range', 'parent', 'ancestor'):
2482 2528 ta = _analyze(x[1], defineorder)
2483 2529 tb = _analyze(x[2], defineorder)
2484 2530 return (op, ta, tb, order)
2485 2531 elif op == 'list':
2486 2532 return (op,) + tuple(_analyze(y, order) for y in x[1:])
2487 2533 elif op == 'keyvalue':
2488 2534 return (op, x[1], _analyze(x[2], order))
2489 2535 elif op == 'func':
2490 2536 f = getsymbol(x[1])
2491 2537 d = defineorder
2492 2538 if f == 'present':
2493 2539 # 'present(set)' is known to return the argument set with no
2494 2540 # modification, so forward the current order to its argument
2495 2541 d = order
2496 2542 return (op, x[1], _analyze(x[2], d), order)
2497 2543 raise ValueError('invalid operator %r' % op)
2498 2544
2499 2545 def analyze(x, order=defineorder):
2500 2546 """Transform raw parsed tree to evaluatable tree which can be fed to
2501 2547 optimize() or getset()
2502 2548
2503 2549 All pseudo operations should be mapped to real operations or functions
2504 2550 defined in methods or symbols table respectively.
2505 2551
2506 2552 'order' specifies how the current expression 'x' is ordered (see the
2507 2553 constants defined above.)
2508 2554 """
2509 2555 return _analyze(x, order)
2510 2556
2511 2557 def _optimize(x, small):
2512 2558 if x is None:
2513 2559 return 0, x
2514 2560
2515 2561 smallbonus = 1
2516 2562 if small:
2517 2563 smallbonus = .5
2518 2564
2519 2565 op = x[0]
2520 2566 if op in ('string', 'symbol'):
2521 2567 return smallbonus, x # single revisions are small
2522 2568 elif op == 'and':
2523 2569 wa, ta = _optimize(x[1], True)
2524 2570 wb, tb = _optimize(x[2], True)
2525 2571 order = x[3]
2526 2572 w = min(wa, wb)
2527 2573
2528 2574 # (::x and not ::y)/(not ::y and ::x) have a fast path
2529 2575 tm = _matchonly(ta, tb) or _matchonly(tb, ta)
2530 2576 if tm:
2531 2577 return w, ('func', ('symbol', 'only'), tm, order)
2532 2578
2533 2579 if tb is not None and tb[0] == 'not':
2534 2580 return wa, ('difference', ta, tb[1], order)
2535 2581
2536 2582 if wa > wb:
2537 2583 return w, (op, tb, ta, order)
2538 2584 return w, (op, ta, tb, order)
2539 2585 elif op == 'or':
2540 2586 # fast path for machine-generated expression, that is likely to have
2541 2587 # lots of trivial revisions: 'a + b + c()' to '_list(a b) + c()'
2542 2588 order = x[2]
2543 2589 ws, ts, ss = [], [], []
2544 2590 def flushss():
2545 2591 if not ss:
2546 2592 return
2547 2593 if len(ss) == 1:
2548 2594 w, t = ss[0]
2549 2595 else:
2550 2596 s = '\0'.join(t[1] for w, t in ss)
2551 2597 y = ('func', ('symbol', '_list'), ('string', s), order)
2552 2598 w, t = _optimize(y, False)
2553 2599 ws.append(w)
2554 2600 ts.append(t)
2555 2601 del ss[:]
2556 2602 for y in getlist(x[1]):
2557 2603 w, t = _optimize(y, False)
2558 2604 if t is not None and (t[0] == 'string' or t[0] == 'symbol'):
2559 2605 ss.append((w, t))
2560 2606 continue
2561 2607 flushss()
2562 2608 ws.append(w)
2563 2609 ts.append(t)
2564 2610 flushss()
2565 2611 if len(ts) == 1:
2566 2612 return ws[0], ts[0] # 'or' operation is fully optimized out
2567 2613 # we can't reorder trees by weight because it would change the order.
2568 2614 # ("sort(a + b)" == "sort(b + a)", but "a + b" != "b + a")
2569 2615 # ts = tuple(t for w, t in sorted(zip(ws, ts), key=lambda wt: wt[0]))
2570 2616 return max(ws), (op, ('list',) + tuple(ts), order)
2571 2617 elif op == 'not':
2572 2618 # Optimize not public() to _notpublic() because we have a fast version
2573 2619 if x[1][:3] == ('func', ('symbol', 'public'), None):
2574 2620 order = x[1][3]
2575 2621 newsym = ('func', ('symbol', '_notpublic'), None, order)
2576 2622 o = _optimize(newsym, not small)
2577 2623 return o[0], o[1]
2578 2624 else:
2579 2625 o = _optimize(x[1], not small)
2580 2626 order = x[2]
2581 2627 return o[0], (op, o[1], order)
2582 2628 elif op in ('rangepre', 'parentpost'):
2583 2629 o = _optimize(x[1], small)
2584 2630 order = x[2]
2585 2631 return o[0], (op, o[1], order)
2586 2632 elif op in ('dagrange', 'range', 'parent', 'ancestor'):
2587 2633 wa, ta = _optimize(x[1], small)
2588 2634 wb, tb = _optimize(x[2], small)
2589 2635 order = x[3]
2590 2636 return wa + wb, (op, ta, tb, order)
2591 2637 elif op == 'list':
2592 2638 ws, ts = zip(*(_optimize(y, small) for y in x[1:]))
2593 2639 return sum(ws), (op,) + ts
2594 2640 elif op == 'keyvalue':
2595 2641 w, t = _optimize(x[2], small)
2596 2642 return w, (op, x[1], t)
2597 2643 elif op == 'func':
2598 2644 f = getsymbol(x[1])
2599 2645 wa, ta = _optimize(x[2], small)
2600 2646 if f in ('author', 'branch', 'closed', 'date', 'desc', 'file', 'grep',
2601 2647 'keyword', 'outgoing', 'user', 'destination'):
2602 2648 w = 10 # slow
2603 2649 elif f in ('modifies', 'adds', 'removes'):
2604 2650 w = 30 # slower
2605 2651 elif f == "contains":
2606 2652 w = 100 # very slow
2607 2653 elif f == "ancestor":
2608 2654 w = 1 * smallbonus
2609 2655 elif f in ('reverse', 'limit', 'first', 'wdir', '_intlist'):
2610 2656 w = 0
2611 2657 elif f == "sort":
2612 2658 w = 10 # assume most sorts look at changelog
2613 2659 else:
2614 2660 w = 1
2615 2661 order = x[3]
2616 2662 return w + wa, (op, x[1], ta, order)
2617 2663 raise ValueError('invalid operator %r' % op)
2618 2664
2619 2665 def optimize(tree):
2620 2666 """Optimize evaluatable tree
2621 2667
2622 2668 All pseudo operations should be transformed beforehand.
2623 2669 """
2624 2670 _weight, newtree = _optimize(tree, small=True)
2625 2671 return newtree
2626 2672
2627 2673 # the set of valid characters for the initial letter of symbols in
2628 2674 # alias declarations and definitions
2629 2675 _aliassyminitletters = _syminitletters | set(pycompat.sysstr('$'))
2630 2676
2631 2677 def _parsewith(spec, lookup=None, syminitletters=None):
2632 2678 """Generate a parse tree of given spec with given tokenizing options
2633 2679
2634 2680 >>> _parsewith('foo($1)', syminitletters=_aliassyminitletters)
2635 2681 ('func', ('symbol', 'foo'), ('symbol', '$1'))
2636 2682 >>> _parsewith('$1')
2637 2683 Traceback (most recent call last):
2638 2684 ...
2639 2685 ParseError: ("syntax error in revset '$1'", 0)
2640 2686 >>> _parsewith('foo bar')
2641 2687 Traceback (most recent call last):
2642 2688 ...
2643 2689 ParseError: ('invalid token', 4)
2644 2690 """
2645 2691 p = parser.parser(elements)
2646 2692 tree, pos = p.parse(tokenize(spec, lookup=lookup,
2647 2693 syminitletters=syminitletters))
2648 2694 if pos != len(spec):
2649 2695 raise error.ParseError(_('invalid token'), pos)
2650 2696 return _fixops(parser.simplifyinfixops(tree, ('list', 'or')))
2651 2697
2652 2698 class _aliasrules(parser.basealiasrules):
2653 2699 """Parsing and expansion rule set of revset aliases"""
2654 2700 _section = _('revset alias')
2655 2701
2656 2702 @staticmethod
2657 2703 def _parse(spec):
2658 2704 """Parse alias declaration/definition ``spec``
2659 2705
2660 2706 This allows symbol names to use also ``$`` as an initial letter
2661 2707 (for backward compatibility), and callers of this function should
2662 2708 examine whether ``$`` is used also for unexpected symbols or not.
2663 2709 """
2664 2710 return _parsewith(spec, syminitletters=_aliassyminitletters)
2665 2711
2666 2712 @staticmethod
2667 2713 def _trygetfunc(tree):
2668 2714 if tree[0] == 'func' and tree[1][0] == 'symbol':
2669 2715 return tree[1][1], getlist(tree[2])
2670 2716
2671 2717 def expandaliases(ui, tree):
2672 2718 aliases = _aliasrules.buildmap(ui.configitems('revsetalias'))
2673 2719 tree = _aliasrules.expand(aliases, tree)
2674 2720 # warn about problematic (but not referred) aliases
2675 2721 for name, alias in sorted(aliases.iteritems()):
2676 2722 if alias.error and not alias.warned:
2677 2723 ui.warn(_('warning: %s\n') % (alias.error))
2678 2724 alias.warned = True
2679 2725 return tree
2680 2726
2681 2727 def foldconcat(tree):
2682 2728 """Fold elements to be concatenated by `##`
2683 2729 """
2684 2730 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2685 2731 return tree
2686 2732 if tree[0] == '_concat':
2687 2733 pending = [tree]
2688 2734 l = []
2689 2735 while pending:
2690 2736 e = pending.pop()
2691 2737 if e[0] == '_concat':
2692 2738 pending.extend(reversed(e[1:]))
2693 2739 elif e[0] in ('string', 'symbol'):
2694 2740 l.append(e[1])
2695 2741 else:
2696 2742 msg = _("\"##\" can't concatenate \"%s\" element") % (e[0])
2697 2743 raise error.ParseError(msg)
2698 2744 return ('string', ''.join(l))
2699 2745 else:
2700 2746 return tuple(foldconcat(t) for t in tree)
2701 2747
2702 2748 def parse(spec, lookup=None):
2703 2749 return _parsewith(spec, lookup=lookup)
2704 2750
2705 2751 def posttreebuilthook(tree, repo):
2706 2752 # hook for extensions to execute code on the optimized tree
2707 2753 pass
2708 2754
2709 2755 def match(ui, spec, repo=None, order=defineorder):
2710 2756 """Create a matcher for a single revision spec
2711 2757
2712 2758 If order=followorder, a matcher takes the ordering specified by the input
2713 2759 set.
2714 2760 """
2715 2761 return matchany(ui, [spec], repo=repo, order=order)
2716 2762
2717 2763 def matchany(ui, specs, repo=None, order=defineorder):
2718 2764 """Create a matcher that will include any revisions matching one of the
2719 2765 given specs
2720 2766
2721 2767 If order=followorder, a matcher takes the ordering specified by the input
2722 2768 set.
2723 2769 """
2724 2770 if not specs:
2725 2771 def mfunc(repo, subset=None):
2726 2772 return baseset()
2727 2773 return mfunc
2728 2774 if not all(specs):
2729 2775 raise error.ParseError(_("empty query"))
2730 2776 lookup = None
2731 2777 if repo:
2732 2778 lookup = repo.__contains__
2733 2779 if len(specs) == 1:
2734 2780 tree = parse(specs[0], lookup)
2735 2781 else:
2736 2782 tree = ('or', ('list',) + tuple(parse(s, lookup) for s in specs))
2737 2783
2738 2784 if ui:
2739 2785 tree = expandaliases(ui, tree)
2740 2786 tree = foldconcat(tree)
2741 2787 tree = analyze(tree, order)
2742 2788 tree = optimize(tree)
2743 2789 posttreebuilthook(tree, repo)
2744 2790 return makematcher(tree)
2745 2791
2746 2792 def makematcher(tree):
2747 2793 """Create a matcher from an evaluatable tree"""
2748 2794 def mfunc(repo, subset=None):
2749 2795 if subset is None:
2750 2796 subset = fullreposet(repo)
2751 2797 if util.safehasattr(subset, 'isascending'):
2752 2798 result = getset(repo, subset, tree)
2753 2799 else:
2754 2800 result = getset(repo, baseset(subset), tree)
2755 2801 return result
2756 2802 return mfunc
2757 2803
2758 2804 def formatspec(expr, *args):
2759 2805 '''
2760 2806 This is a convenience function for using revsets internally, and
2761 2807 escapes arguments appropriately. Aliases are intentionally ignored
2762 2808 so that intended expression behavior isn't accidentally subverted.
2763 2809
2764 2810 Supported arguments:
2765 2811
2766 2812 %r = revset expression, parenthesized
2767 2813 %d = int(arg), no quoting
2768 2814 %s = string(arg), escaped and single-quoted
2769 2815 %b = arg.branch(), escaped and single-quoted
2770 2816 %n = hex(arg), single-quoted
2771 2817 %% = a literal '%'
2772 2818
2773 2819 Prefixing the type with 'l' specifies a parenthesized list of that type.
2774 2820
2775 2821 >>> formatspec('%r:: and %lr', '10 or 11', ("this()", "that()"))
2776 2822 '(10 or 11):: and ((this()) or (that()))'
2777 2823 >>> formatspec('%d:: and not %d::', 10, 20)
2778 2824 '10:: and not 20::'
2779 2825 >>> formatspec('%ld or %ld', [], [1])
2780 2826 "_list('') or 1"
2781 2827 >>> formatspec('keyword(%s)', 'foo\\xe9')
2782 2828 "keyword('foo\\\\xe9')"
2783 2829 >>> b = lambda: 'default'
2784 2830 >>> b.branch = b
2785 2831 >>> formatspec('branch(%b)', b)
2786 2832 "branch('default')"
2787 2833 >>> formatspec('root(%ls)', ['a', 'b', 'c', 'd'])
2788 2834 "root(_list('a\\x00b\\x00c\\x00d'))"
2789 2835 '''
2790 2836
2791 2837 def quote(s):
2792 2838 return repr(str(s))
2793 2839
2794 2840 def argtype(c, arg):
2795 2841 if c == 'd':
2796 2842 return str(int(arg))
2797 2843 elif c == 's':
2798 2844 return quote(arg)
2799 2845 elif c == 'r':
2800 2846 parse(arg) # make sure syntax errors are confined
2801 2847 return '(%s)' % arg
2802 2848 elif c == 'n':
2803 2849 return quote(node.hex(arg))
2804 2850 elif c == 'b':
2805 2851 return quote(arg.branch())
2806 2852
2807 2853 def listexp(s, t):
2808 2854 l = len(s)
2809 2855 if l == 0:
2810 2856 return "_list('')"
2811 2857 elif l == 1:
2812 2858 return argtype(t, s[0])
2813 2859 elif t == 'd':
2814 2860 return "_intlist('%s')" % "\0".join(str(int(a)) for a in s)
2815 2861 elif t == 's':
2816 2862 return "_list('%s')" % "\0".join(s)
2817 2863 elif t == 'n':
2818 2864 return "_hexlist('%s')" % "\0".join(node.hex(a) for a in s)
2819 2865 elif t == 'b':
2820 2866 return "_list('%s')" % "\0".join(a.branch() for a in s)
2821 2867
2822 2868 m = l // 2
2823 2869 return '(%s or %s)' % (listexp(s[:m], t), listexp(s[m:], t))
2824 2870
2825 2871 ret = ''
2826 2872 pos = 0
2827 2873 arg = 0
2828 2874 while pos < len(expr):
2829 2875 c = expr[pos]
2830 2876 if c == '%':
2831 2877 pos += 1
2832 2878 d = expr[pos]
2833 2879 if d == '%':
2834 2880 ret += d
2835 2881 elif d in 'dsnbr':
2836 2882 ret += argtype(d, args[arg])
2837 2883 arg += 1
2838 2884 elif d == 'l':
2839 2885 # a list of some type
2840 2886 pos += 1
2841 2887 d = expr[pos]
2842 2888 ret += listexp(list(args[arg]), d)
2843 2889 arg += 1
2844 2890 else:
2845 2891 raise error.Abort(_('unexpected revspec format character %s')
2846 2892 % d)
2847 2893 else:
2848 2894 ret += c
2849 2895 pos += 1
2850 2896
2851 2897 return ret
2852 2898
2853 2899 def prettyformat(tree):
2854 2900 return parser.prettyformat(tree, ('string', 'symbol'))
2855 2901
2856 2902 def depth(tree):
2857 2903 if isinstance(tree, tuple):
2858 2904 return max(map(depth, tree)) + 1
2859 2905 else:
2860 2906 return 0
2861 2907
2862 2908 def funcsused(tree):
2863 2909 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2864 2910 return set()
2865 2911 else:
2866 2912 funcs = set()
2867 2913 for s in tree[1:]:
2868 2914 funcs |= funcsused(s)
2869 2915 if tree[0] == 'func':
2870 2916 funcs.add(tree[1][1])
2871 2917 return funcs
2872 2918
2873 2919 def _formatsetrepr(r):
2874 2920 """Format an optional printable representation of a set
2875 2921
2876 2922 ======== =================================
2877 2923 type(r) example
2878 2924 ======== =================================
2879 2925 tuple ('<not %r>', other)
2880 2926 str '<branch closed>'
2881 2927 callable lambda: '<branch %r>' % sorted(b)
2882 2928 object other
2883 2929 ======== =================================
2884 2930 """
2885 2931 if r is None:
2886 2932 return ''
2887 2933 elif isinstance(r, tuple):
2888 2934 return r[0] % r[1:]
2889 2935 elif isinstance(r, str):
2890 2936 return r
2891 2937 elif callable(r):
2892 2938 return r()
2893 2939 else:
2894 2940 return repr(r)
2895 2941
2896 2942 class abstractsmartset(object):
2897 2943
2898 2944 def __nonzero__(self):
2899 2945 """True if the smartset is not empty"""
2900 2946 raise NotImplementedError()
2901 2947
2902 2948 def __contains__(self, rev):
2903 2949 """provide fast membership testing"""
2904 2950 raise NotImplementedError()
2905 2951
2906 2952 def __iter__(self):
2907 2953 """iterate the set in the order it is supposed to be iterated"""
2908 2954 raise NotImplementedError()
2909 2955
2910 2956 # Attributes containing a function to perform a fast iteration in a given
2911 2957 # direction. A smartset can have none, one, or both defined.
2912 2958 #
2913 2959 # Default value is None instead of a function returning None to avoid
2914 2960 # initializing an iterator just for testing if a fast method exists.
2915 2961 fastasc = None
2916 2962 fastdesc = None
2917 2963
2918 2964 def isascending(self):
2919 2965 """True if the set will iterate in ascending order"""
2920 2966 raise NotImplementedError()
2921 2967
2922 2968 def isdescending(self):
2923 2969 """True if the set will iterate in descending order"""
2924 2970 raise NotImplementedError()
2925 2971
2926 2972 def istopo(self):
2927 2973 """True if the set will iterate in topographical order"""
2928 2974 raise NotImplementedError()
2929 2975
2930 2976 def min(self):
2931 2977 """return the minimum element in the set"""
2932 2978 if self.fastasc is None:
2933 2979 v = min(self)
2934 2980 else:
2935 2981 for v in self.fastasc():
2936 2982 break
2937 2983 else:
2938 2984 raise ValueError('arg is an empty sequence')
2939 2985 self.min = lambda: v
2940 2986 return v
2941 2987
2942 2988 def max(self):
2943 2989 """return the maximum element in the set"""
2944 2990 if self.fastdesc is None:
2945 2991 return max(self)
2946 2992 else:
2947 2993 for v in self.fastdesc():
2948 2994 break
2949 2995 else:
2950 2996 raise ValueError('arg is an empty sequence')
2951 2997 self.max = lambda: v
2952 2998 return v
2953 2999
2954 3000 def first(self):
2955 3001 """return the first element in the set (user iteration perspective)
2956 3002
2957 3003 Return None if the set is empty"""
2958 3004 raise NotImplementedError()
2959 3005
2960 3006 def last(self):
2961 3007 """return the last element in the set (user iteration perspective)
2962 3008
2963 3009 Return None if the set is empty"""
2964 3010 raise NotImplementedError()
2965 3011
2966 3012 def __len__(self):
2967 3013 """return the length of the smartsets
2968 3014
2969 3015 This can be expensive on smartset that could be lazy otherwise."""
2970 3016 raise NotImplementedError()
2971 3017
2972 3018 def reverse(self):
2973 3019 """reverse the expected iteration order"""
2974 3020 raise NotImplementedError()
2975 3021
2976 3022 def sort(self, reverse=True):
2977 3023 """get the set to iterate in an ascending or descending order"""
2978 3024 raise NotImplementedError()
2979 3025
2980 3026 def __and__(self, other):
2981 3027 """Returns a new object with the intersection of the two collections.
2982 3028
2983 3029 This is part of the mandatory API for smartset."""
2984 3030 if isinstance(other, fullreposet):
2985 3031 return self
2986 3032 return self.filter(other.__contains__, condrepr=other, cache=False)
2987 3033
2988 3034 def __add__(self, other):
2989 3035 """Returns a new object with the union of the two collections.
2990 3036
2991 3037 This is part of the mandatory API for smartset."""
2992 3038 return addset(self, other)
2993 3039
2994 3040 def __sub__(self, other):
2995 3041 """Returns a new object with the substraction of the two collections.
2996 3042
2997 3043 This is part of the mandatory API for smartset."""
2998 3044 c = other.__contains__
2999 3045 return self.filter(lambda r: not c(r), condrepr=('<not %r>', other),
3000 3046 cache=False)
3001 3047
3002 3048 def filter(self, condition, condrepr=None, cache=True):
3003 3049 """Returns this smartset filtered by condition as a new smartset.
3004 3050
3005 3051 `condition` is a callable which takes a revision number and returns a
3006 3052 boolean. Optional `condrepr` provides a printable representation of
3007 3053 the given `condition`.
3008 3054
3009 3055 This is part of the mandatory API for smartset."""
3010 3056 # builtin cannot be cached. but do not needs to
3011 3057 if cache and util.safehasattr(condition, 'func_code'):
3012 3058 condition = util.cachefunc(condition)
3013 3059 return filteredset(self, condition, condrepr)
3014 3060
3015 3061 class baseset(abstractsmartset):
3016 3062 """Basic data structure that represents a revset and contains the basic
3017 3063 operation that it should be able to perform.
3018 3064
3019 3065 Every method in this class should be implemented by any smartset class.
3020 3066 """
3021 3067 def __init__(self, data=(), datarepr=None, istopo=False):
3022 3068 """
3023 3069 datarepr: a tuple of (format, obj, ...), a function or an object that
3024 3070 provides a printable representation of the given data.
3025 3071 """
3026 3072 self._ascending = None
3027 3073 self._istopo = istopo
3028 3074 if not isinstance(data, list):
3029 3075 if isinstance(data, set):
3030 3076 self._set = data
3031 3077 # set has no order we pick one for stability purpose
3032 3078 self._ascending = True
3033 3079 data = list(data)
3034 3080 self._list = data
3035 3081 self._datarepr = datarepr
3036 3082
3037 3083 @util.propertycache
3038 3084 def _set(self):
3039 3085 return set(self._list)
3040 3086
3041 3087 @util.propertycache
3042 3088 def _asclist(self):
3043 3089 asclist = self._list[:]
3044 3090 asclist.sort()
3045 3091 return asclist
3046 3092
3047 3093 def __iter__(self):
3048 3094 if self._ascending is None:
3049 3095 return iter(self._list)
3050 3096 elif self._ascending:
3051 3097 return iter(self._asclist)
3052 3098 else:
3053 3099 return reversed(self._asclist)
3054 3100
3055 3101 def fastasc(self):
3056 3102 return iter(self._asclist)
3057 3103
3058 3104 def fastdesc(self):
3059 3105 return reversed(self._asclist)
3060 3106
3061 3107 @util.propertycache
3062 3108 def __contains__(self):
3063 3109 return self._set.__contains__
3064 3110
3065 3111 def __nonzero__(self):
3066 3112 return bool(self._list)
3067 3113
3068 3114 def sort(self, reverse=False):
3069 3115 self._ascending = not bool(reverse)
3070 3116 self._istopo = False
3071 3117
3072 3118 def reverse(self):
3073 3119 if self._ascending is None:
3074 3120 self._list.reverse()
3075 3121 else:
3076 3122 self._ascending = not self._ascending
3077 3123 self._istopo = False
3078 3124
3079 3125 def __len__(self):
3080 3126 return len(self._list)
3081 3127
3082 3128 def isascending(self):
3083 3129 """Returns True if the collection is ascending order, False if not.
3084 3130
3085 3131 This is part of the mandatory API for smartset."""
3086 3132 if len(self) <= 1:
3087 3133 return True
3088 3134 return self._ascending is not None and self._ascending
3089 3135
3090 3136 def isdescending(self):
3091 3137 """Returns True if the collection is descending order, False if not.
3092 3138
3093 3139 This is part of the mandatory API for smartset."""
3094 3140 if len(self) <= 1:
3095 3141 return True
3096 3142 return self._ascending is not None and not self._ascending
3097 3143
3098 3144 def istopo(self):
3099 3145 """Is the collection is in topographical order or not.
3100 3146
3101 3147 This is part of the mandatory API for smartset."""
3102 3148 if len(self) <= 1:
3103 3149 return True
3104 3150 return self._istopo
3105 3151
3106 3152 def first(self):
3107 3153 if self:
3108 3154 if self._ascending is None:
3109 3155 return self._list[0]
3110 3156 elif self._ascending:
3111 3157 return self._asclist[0]
3112 3158 else:
3113 3159 return self._asclist[-1]
3114 3160 return None
3115 3161
3116 3162 def last(self):
3117 3163 if self:
3118 3164 if self._ascending is None:
3119 3165 return self._list[-1]
3120 3166 elif self._ascending:
3121 3167 return self._asclist[-1]
3122 3168 else:
3123 3169 return self._asclist[0]
3124 3170 return None
3125 3171
3126 3172 def __repr__(self):
3127 3173 d = {None: '', False: '-', True: '+'}[self._ascending]
3128 3174 s = _formatsetrepr(self._datarepr)
3129 3175 if not s:
3130 3176 l = self._list
3131 3177 # if _list has been built from a set, it might have a different
3132 3178 # order from one python implementation to another.
3133 3179 # We fallback to the sorted version for a stable output.
3134 3180 if self._ascending is not None:
3135 3181 l = self._asclist
3136 3182 s = repr(l)
3137 3183 return '<%s%s %s>' % (type(self).__name__, d, s)
3138 3184
3139 3185 class filteredset(abstractsmartset):
3140 3186 """Duck type for baseset class which iterates lazily over the revisions in
3141 3187 the subset and contains a function which tests for membership in the
3142 3188 revset
3143 3189 """
3144 3190 def __init__(self, subset, condition=lambda x: True, condrepr=None):
3145 3191 """
3146 3192 condition: a function that decide whether a revision in the subset
3147 3193 belongs to the revset or not.
3148 3194 condrepr: a tuple of (format, obj, ...), a function or an object that
3149 3195 provides a printable representation of the given condition.
3150 3196 """
3151 3197 self._subset = subset
3152 3198 self._condition = condition
3153 3199 self._condrepr = condrepr
3154 3200
3155 3201 def __contains__(self, x):
3156 3202 return x in self._subset and self._condition(x)
3157 3203
3158 3204 def __iter__(self):
3159 3205 return self._iterfilter(self._subset)
3160 3206
3161 3207 def _iterfilter(self, it):
3162 3208 cond = self._condition
3163 3209 for x in it:
3164 3210 if cond(x):
3165 3211 yield x
3166 3212
3167 3213 @property
3168 3214 def fastasc(self):
3169 3215 it = self._subset.fastasc
3170 3216 if it is None:
3171 3217 return None
3172 3218 return lambda: self._iterfilter(it())
3173 3219
3174 3220 @property
3175 3221 def fastdesc(self):
3176 3222 it = self._subset.fastdesc
3177 3223 if it is None:
3178 3224 return None
3179 3225 return lambda: self._iterfilter(it())
3180 3226
3181 3227 def __nonzero__(self):
3182 3228 fast = None
3183 3229 candidates = [self.fastasc if self.isascending() else None,
3184 3230 self.fastdesc if self.isdescending() else None,
3185 3231 self.fastasc,
3186 3232 self.fastdesc]
3187 3233 for candidate in candidates:
3188 3234 if candidate is not None:
3189 3235 fast = candidate
3190 3236 break
3191 3237
3192 3238 if fast is not None:
3193 3239 it = fast()
3194 3240 else:
3195 3241 it = self
3196 3242
3197 3243 for r in it:
3198 3244 return True
3199 3245 return False
3200 3246
3201 3247 def __len__(self):
3202 3248 # Basic implementation to be changed in future patches.
3203 3249 # until this gets improved, we use generator expression
3204 3250 # here, since list comprehensions are free to call __len__ again
3205 3251 # causing infinite recursion
3206 3252 l = baseset(r for r in self)
3207 3253 return len(l)
3208 3254
3209 3255 def sort(self, reverse=False):
3210 3256 self._subset.sort(reverse=reverse)
3211 3257
3212 3258 def reverse(self):
3213 3259 self._subset.reverse()
3214 3260
3215 3261 def isascending(self):
3216 3262 return self._subset.isascending()
3217 3263
3218 3264 def isdescending(self):
3219 3265 return self._subset.isdescending()
3220 3266
3221 3267 def istopo(self):
3222 3268 return self._subset.istopo()
3223 3269
3224 3270 def first(self):
3225 3271 for x in self:
3226 3272 return x
3227 3273 return None
3228 3274
3229 3275 def last(self):
3230 3276 it = None
3231 3277 if self.isascending():
3232 3278 it = self.fastdesc
3233 3279 elif self.isdescending():
3234 3280 it = self.fastasc
3235 3281 if it is not None:
3236 3282 for x in it():
3237 3283 return x
3238 3284 return None #empty case
3239 3285 else:
3240 3286 x = None
3241 3287 for x in self:
3242 3288 pass
3243 3289 return x
3244 3290
3245 3291 def __repr__(self):
3246 3292 xs = [repr(self._subset)]
3247 3293 s = _formatsetrepr(self._condrepr)
3248 3294 if s:
3249 3295 xs.append(s)
3250 3296 return '<%s %s>' % (type(self).__name__, ', '.join(xs))
3251 3297
3252 3298 def _iterordered(ascending, iter1, iter2):
3253 3299 """produce an ordered iteration from two iterators with the same order
3254 3300
3255 3301 The ascending is used to indicated the iteration direction.
3256 3302 """
3257 3303 choice = max
3258 3304 if ascending:
3259 3305 choice = min
3260 3306
3261 3307 val1 = None
3262 3308 val2 = None
3263 3309 try:
3264 3310 # Consume both iterators in an ordered way until one is empty
3265 3311 while True:
3266 3312 if val1 is None:
3267 3313 val1 = next(iter1)
3268 3314 if val2 is None:
3269 3315 val2 = next(iter2)
3270 3316 n = choice(val1, val2)
3271 3317 yield n
3272 3318 if val1 == n:
3273 3319 val1 = None
3274 3320 if val2 == n:
3275 3321 val2 = None
3276 3322 except StopIteration:
3277 3323 # Flush any remaining values and consume the other one
3278 3324 it = iter2
3279 3325 if val1 is not None:
3280 3326 yield val1
3281 3327 it = iter1
3282 3328 elif val2 is not None:
3283 3329 # might have been equality and both are empty
3284 3330 yield val2
3285 3331 for val in it:
3286 3332 yield val
3287 3333
3288 3334 class addset(abstractsmartset):
3289 3335 """Represent the addition of two sets
3290 3336
3291 3337 Wrapper structure for lazily adding two structures without losing much
3292 3338 performance on the __contains__ method
3293 3339
3294 3340 If the ascending attribute is set, that means the two structures are
3295 3341 ordered in either an ascending or descending way. Therefore, we can add
3296 3342 them maintaining the order by iterating over both at the same time
3297 3343
3298 3344 >>> xs = baseset([0, 3, 2])
3299 3345 >>> ys = baseset([5, 2, 4])
3300 3346
3301 3347 >>> rs = addset(xs, ys)
3302 3348 >>> bool(rs), 0 in rs, 1 in rs, 5 in rs, rs.first(), rs.last()
3303 3349 (True, True, False, True, 0, 4)
3304 3350 >>> rs = addset(xs, baseset([]))
3305 3351 >>> bool(rs), 0 in rs, 1 in rs, rs.first(), rs.last()
3306 3352 (True, True, False, 0, 2)
3307 3353 >>> rs = addset(baseset([]), baseset([]))
3308 3354 >>> bool(rs), 0 in rs, rs.first(), rs.last()
3309 3355 (False, False, None, None)
3310 3356
3311 3357 iterate unsorted:
3312 3358 >>> rs = addset(xs, ys)
3313 3359 >>> # (use generator because pypy could call len())
3314 3360 >>> list(x for x in rs) # without _genlist
3315 3361 [0, 3, 2, 5, 4]
3316 3362 >>> assert not rs._genlist
3317 3363 >>> len(rs)
3318 3364 5
3319 3365 >>> [x for x in rs] # with _genlist
3320 3366 [0, 3, 2, 5, 4]
3321 3367 >>> assert rs._genlist
3322 3368
3323 3369 iterate ascending:
3324 3370 >>> rs = addset(xs, ys, ascending=True)
3325 3371 >>> # (use generator because pypy could call len())
3326 3372 >>> list(x for x in rs), list(x for x in rs.fastasc()) # without _asclist
3327 3373 ([0, 2, 3, 4, 5], [0, 2, 3, 4, 5])
3328 3374 >>> assert not rs._asclist
3329 3375 >>> len(rs)
3330 3376 5
3331 3377 >>> [x for x in rs], [x for x in rs.fastasc()]
3332 3378 ([0, 2, 3, 4, 5], [0, 2, 3, 4, 5])
3333 3379 >>> assert rs._asclist
3334 3380
3335 3381 iterate descending:
3336 3382 >>> rs = addset(xs, ys, ascending=False)
3337 3383 >>> # (use generator because pypy could call len())
3338 3384 >>> list(x for x in rs), list(x for x in rs.fastdesc()) # without _asclist
3339 3385 ([5, 4, 3, 2, 0], [5, 4, 3, 2, 0])
3340 3386 >>> assert not rs._asclist
3341 3387 >>> len(rs)
3342 3388 5
3343 3389 >>> [x for x in rs], [x for x in rs.fastdesc()]
3344 3390 ([5, 4, 3, 2, 0], [5, 4, 3, 2, 0])
3345 3391 >>> assert rs._asclist
3346 3392
3347 3393 iterate ascending without fastasc:
3348 3394 >>> rs = addset(xs, generatorset(ys), ascending=True)
3349 3395 >>> assert rs.fastasc is None
3350 3396 >>> [x for x in rs]
3351 3397 [0, 2, 3, 4, 5]
3352 3398
3353 3399 iterate descending without fastdesc:
3354 3400 >>> rs = addset(generatorset(xs), ys, ascending=False)
3355 3401 >>> assert rs.fastdesc is None
3356 3402 >>> [x for x in rs]
3357 3403 [5, 4, 3, 2, 0]
3358 3404 """
3359 3405 def __init__(self, revs1, revs2, ascending=None):
3360 3406 self._r1 = revs1
3361 3407 self._r2 = revs2
3362 3408 self._iter = None
3363 3409 self._ascending = ascending
3364 3410 self._genlist = None
3365 3411 self._asclist = None
3366 3412
3367 3413 def __len__(self):
3368 3414 return len(self._list)
3369 3415
3370 3416 def __nonzero__(self):
3371 3417 return bool(self._r1) or bool(self._r2)
3372 3418
3373 3419 @util.propertycache
3374 3420 def _list(self):
3375 3421 if not self._genlist:
3376 3422 self._genlist = baseset(iter(self))
3377 3423 return self._genlist
3378 3424
3379 3425 def __iter__(self):
3380 3426 """Iterate over both collections without repeating elements
3381 3427
3382 3428 If the ascending attribute is not set, iterate over the first one and
3383 3429 then over the second one checking for membership on the first one so we
3384 3430 dont yield any duplicates.
3385 3431
3386 3432 If the ascending attribute is set, iterate over both collections at the
3387 3433 same time, yielding only one value at a time in the given order.
3388 3434 """
3389 3435 if self._ascending is None:
3390 3436 if self._genlist:
3391 3437 return iter(self._genlist)
3392 3438 def arbitraryordergen():
3393 3439 for r in self._r1:
3394 3440 yield r
3395 3441 inr1 = self._r1.__contains__
3396 3442 for r in self._r2:
3397 3443 if not inr1(r):
3398 3444 yield r
3399 3445 return arbitraryordergen()
3400 3446 # try to use our own fast iterator if it exists
3401 3447 self._trysetasclist()
3402 3448 if self._ascending:
3403 3449 attr = 'fastasc'
3404 3450 else:
3405 3451 attr = 'fastdesc'
3406 3452 it = getattr(self, attr)
3407 3453 if it is not None:
3408 3454 return it()
3409 3455 # maybe half of the component supports fast
3410 3456 # get iterator for _r1
3411 3457 iter1 = getattr(self._r1, attr)
3412 3458 if iter1 is None:
3413 3459 # let's avoid side effect (not sure it matters)
3414 3460 iter1 = iter(sorted(self._r1, reverse=not self._ascending))
3415 3461 else:
3416 3462 iter1 = iter1()
3417 3463 # get iterator for _r2
3418 3464 iter2 = getattr(self._r2, attr)
3419 3465 if iter2 is None:
3420 3466 # let's avoid side effect (not sure it matters)
3421 3467 iter2 = iter(sorted(self._r2, reverse=not self._ascending))
3422 3468 else:
3423 3469 iter2 = iter2()
3424 3470 return _iterordered(self._ascending, iter1, iter2)
3425 3471
3426 3472 def _trysetasclist(self):
3427 3473 """populate the _asclist attribute if possible and necessary"""
3428 3474 if self._genlist is not None and self._asclist is None:
3429 3475 self._asclist = sorted(self._genlist)
3430 3476
3431 3477 @property
3432 3478 def fastasc(self):
3433 3479 self._trysetasclist()
3434 3480 if self._asclist is not None:
3435 3481 return self._asclist.__iter__
3436 3482 iter1 = self._r1.fastasc
3437 3483 iter2 = self._r2.fastasc
3438 3484 if None in (iter1, iter2):
3439 3485 return None
3440 3486 return lambda: _iterordered(True, iter1(), iter2())
3441 3487
3442 3488 @property
3443 3489 def fastdesc(self):
3444 3490 self._trysetasclist()
3445 3491 if self._asclist is not None:
3446 3492 return self._asclist.__reversed__
3447 3493 iter1 = self._r1.fastdesc
3448 3494 iter2 = self._r2.fastdesc
3449 3495 if None in (iter1, iter2):
3450 3496 return None
3451 3497 return lambda: _iterordered(False, iter1(), iter2())
3452 3498
3453 3499 def __contains__(self, x):
3454 3500 return x in self._r1 or x in self._r2
3455 3501
3456 3502 def sort(self, reverse=False):
3457 3503 """Sort the added set
3458 3504
3459 3505 For this we use the cached list with all the generated values and if we
3460 3506 know they are ascending or descending we can sort them in a smart way.
3461 3507 """
3462 3508 self._ascending = not reverse
3463 3509
3464 3510 def isascending(self):
3465 3511 return self._ascending is not None and self._ascending
3466 3512
3467 3513 def isdescending(self):
3468 3514 return self._ascending is not None and not self._ascending
3469 3515
3470 3516 def istopo(self):
3471 3517 # not worth the trouble asserting if the two sets combined are still
3472 3518 # in topographical order. Use the sort() predicate to explicitly sort
3473 3519 # again instead.
3474 3520 return False
3475 3521
3476 3522 def reverse(self):
3477 3523 if self._ascending is None:
3478 3524 self._list.reverse()
3479 3525 else:
3480 3526 self._ascending = not self._ascending
3481 3527
3482 3528 def first(self):
3483 3529 for x in self:
3484 3530 return x
3485 3531 return None
3486 3532
3487 3533 def last(self):
3488 3534 self.reverse()
3489 3535 val = self.first()
3490 3536 self.reverse()
3491 3537 return val
3492 3538
3493 3539 def __repr__(self):
3494 3540 d = {None: '', False: '-', True: '+'}[self._ascending]
3495 3541 return '<%s%s %r, %r>' % (type(self).__name__, d, self._r1, self._r2)
3496 3542
3497 3543 class generatorset(abstractsmartset):
3498 3544 """Wrap a generator for lazy iteration
3499 3545
3500 3546 Wrapper structure for generators that provides lazy membership and can
3501 3547 be iterated more than once.
3502 3548 When asked for membership it generates values until either it finds the
3503 3549 requested one or has gone through all the elements in the generator
3504 3550 """
3505 3551 def __init__(self, gen, iterasc=None):
3506 3552 """
3507 3553 gen: a generator producing the values for the generatorset.
3508 3554 """
3509 3555 self._gen = gen
3510 3556 self._asclist = None
3511 3557 self._cache = {}
3512 3558 self._genlist = []
3513 3559 self._finished = False
3514 3560 self._ascending = True
3515 3561 if iterasc is not None:
3516 3562 if iterasc:
3517 3563 self.fastasc = self._iterator
3518 3564 self.__contains__ = self._asccontains
3519 3565 else:
3520 3566 self.fastdesc = self._iterator
3521 3567 self.__contains__ = self._desccontains
3522 3568
3523 3569 def __nonzero__(self):
3524 3570 # Do not use 'for r in self' because it will enforce the iteration
3525 3571 # order (default ascending), possibly unrolling a whole descending
3526 3572 # iterator.
3527 3573 if self._genlist:
3528 3574 return True
3529 3575 for r in self._consumegen():
3530 3576 return True
3531 3577 return False
3532 3578
3533 3579 def __contains__(self, x):
3534 3580 if x in self._cache:
3535 3581 return self._cache[x]
3536 3582
3537 3583 # Use new values only, as existing values would be cached.
3538 3584 for l in self._consumegen():
3539 3585 if l == x:
3540 3586 return True
3541 3587
3542 3588 self._cache[x] = False
3543 3589 return False
3544 3590
3545 3591 def _asccontains(self, x):
3546 3592 """version of contains optimised for ascending generator"""
3547 3593 if x in self._cache:
3548 3594 return self._cache[x]
3549 3595
3550 3596 # Use new values only, as existing values would be cached.
3551 3597 for l in self._consumegen():
3552 3598 if l == x:
3553 3599 return True
3554 3600 if l > x:
3555 3601 break
3556 3602
3557 3603 self._cache[x] = False
3558 3604 return False
3559 3605
3560 3606 def _desccontains(self, x):
3561 3607 """version of contains optimised for descending generator"""
3562 3608 if x in self._cache:
3563 3609 return self._cache[x]
3564 3610
3565 3611 # Use new values only, as existing values would be cached.
3566 3612 for l in self._consumegen():
3567 3613 if l == x:
3568 3614 return True
3569 3615 if l < x:
3570 3616 break
3571 3617
3572 3618 self._cache[x] = False
3573 3619 return False
3574 3620
3575 3621 def __iter__(self):
3576 3622 if self._ascending:
3577 3623 it = self.fastasc
3578 3624 else:
3579 3625 it = self.fastdesc
3580 3626 if it is not None:
3581 3627 return it()
3582 3628 # we need to consume the iterator
3583 3629 for x in self._consumegen():
3584 3630 pass
3585 3631 # recall the same code
3586 3632 return iter(self)
3587 3633
3588 3634 def _iterator(self):
3589 3635 if self._finished:
3590 3636 return iter(self._genlist)
3591 3637
3592 3638 # We have to use this complex iteration strategy to allow multiple
3593 3639 # iterations at the same time. We need to be able to catch revision
3594 3640 # removed from _consumegen and added to genlist in another instance.
3595 3641 #
3596 3642 # Getting rid of it would provide an about 15% speed up on this
3597 3643 # iteration.
3598 3644 genlist = self._genlist
3599 3645 nextrev = self._consumegen().next
3600 3646 _len = len # cache global lookup
3601 3647 def gen():
3602 3648 i = 0
3603 3649 while True:
3604 3650 if i < _len(genlist):
3605 3651 yield genlist[i]
3606 3652 else:
3607 3653 yield nextrev()
3608 3654 i += 1
3609 3655 return gen()
3610 3656
3611 3657 def _consumegen(self):
3612 3658 cache = self._cache
3613 3659 genlist = self._genlist.append
3614 3660 for item in self._gen:
3615 3661 cache[item] = True
3616 3662 genlist(item)
3617 3663 yield item
3618 3664 if not self._finished:
3619 3665 self._finished = True
3620 3666 asc = self._genlist[:]
3621 3667 asc.sort()
3622 3668 self._asclist = asc
3623 3669 self.fastasc = asc.__iter__
3624 3670 self.fastdesc = asc.__reversed__
3625 3671
3626 3672 def __len__(self):
3627 3673 for x in self._consumegen():
3628 3674 pass
3629 3675 return len(self._genlist)
3630 3676
3631 3677 def sort(self, reverse=False):
3632 3678 self._ascending = not reverse
3633 3679
3634 3680 def reverse(self):
3635 3681 self._ascending = not self._ascending
3636 3682
3637 3683 def isascending(self):
3638 3684 return self._ascending
3639 3685
3640 3686 def isdescending(self):
3641 3687 return not self._ascending
3642 3688
3643 3689 def istopo(self):
3644 3690 # not worth the trouble asserting if the two sets combined are still
3645 3691 # in topographical order. Use the sort() predicate to explicitly sort
3646 3692 # again instead.
3647 3693 return False
3648 3694
3649 3695 def first(self):
3650 3696 if self._ascending:
3651 3697 it = self.fastasc
3652 3698 else:
3653 3699 it = self.fastdesc
3654 3700 if it is None:
3655 3701 # we need to consume all and try again
3656 3702 for x in self._consumegen():
3657 3703 pass
3658 3704 return self.first()
3659 3705 return next(it(), None)
3660 3706
3661 3707 def last(self):
3662 3708 if self._ascending:
3663 3709 it = self.fastdesc
3664 3710 else:
3665 3711 it = self.fastasc
3666 3712 if it is None:
3667 3713 # we need to consume all and try again
3668 3714 for x in self._consumegen():
3669 3715 pass
3670 3716 return self.first()
3671 3717 return next(it(), None)
3672 3718
3673 3719 def __repr__(self):
3674 3720 d = {False: '-', True: '+'}[self._ascending]
3675 3721 return '<%s%s>' % (type(self).__name__, d)
3676 3722
3677 3723 class spanset(abstractsmartset):
3678 3724 """Duck type for baseset class which represents a range of revisions and
3679 3725 can work lazily and without having all the range in memory
3680 3726
3681 3727 Note that spanset(x, y) behave almost like xrange(x, y) except for two
3682 3728 notable points:
3683 3729 - when x < y it will be automatically descending,
3684 3730 - revision filtered with this repoview will be skipped.
3685 3731
3686 3732 """
3687 3733 def __init__(self, repo, start=0, end=None):
3688 3734 """
3689 3735 start: first revision included the set
3690 3736 (default to 0)
3691 3737 end: first revision excluded (last+1)
3692 3738 (default to len(repo)
3693 3739
3694 3740 Spanset will be descending if `end` < `start`.
3695 3741 """
3696 3742 if end is None:
3697 3743 end = len(repo)
3698 3744 self._ascending = start <= end
3699 3745 if not self._ascending:
3700 3746 start, end = end + 1, start +1
3701 3747 self._start = start
3702 3748 self._end = end
3703 3749 self._hiddenrevs = repo.changelog.filteredrevs
3704 3750
3705 3751 def sort(self, reverse=False):
3706 3752 self._ascending = not reverse
3707 3753
3708 3754 def reverse(self):
3709 3755 self._ascending = not self._ascending
3710 3756
3711 3757 def istopo(self):
3712 3758 # not worth the trouble asserting if the two sets combined are still
3713 3759 # in topographical order. Use the sort() predicate to explicitly sort
3714 3760 # again instead.
3715 3761 return False
3716 3762
3717 3763 def _iterfilter(self, iterrange):
3718 3764 s = self._hiddenrevs
3719 3765 for r in iterrange:
3720 3766 if r not in s:
3721 3767 yield r
3722 3768
3723 3769 def __iter__(self):
3724 3770 if self._ascending:
3725 3771 return self.fastasc()
3726 3772 else:
3727 3773 return self.fastdesc()
3728 3774
3729 3775 def fastasc(self):
3730 3776 iterrange = xrange(self._start, self._end)
3731 3777 if self._hiddenrevs:
3732 3778 return self._iterfilter(iterrange)
3733 3779 return iter(iterrange)
3734 3780
3735 3781 def fastdesc(self):
3736 3782 iterrange = xrange(self._end - 1, self._start - 1, -1)
3737 3783 if self._hiddenrevs:
3738 3784 return self._iterfilter(iterrange)
3739 3785 return iter(iterrange)
3740 3786
3741 3787 def __contains__(self, rev):
3742 3788 hidden = self._hiddenrevs
3743 3789 return ((self._start <= rev < self._end)
3744 3790 and not (hidden and rev in hidden))
3745 3791
3746 3792 def __nonzero__(self):
3747 3793 for r in self:
3748 3794 return True
3749 3795 return False
3750 3796
3751 3797 def __len__(self):
3752 3798 if not self._hiddenrevs:
3753 3799 return abs(self._end - self._start)
3754 3800 else:
3755 3801 count = 0
3756 3802 start = self._start
3757 3803 end = self._end
3758 3804 for rev in self._hiddenrevs:
3759 3805 if (end < rev <= start) or (start <= rev < end):
3760 3806 count += 1
3761 3807 return abs(self._end - self._start) - count
3762 3808
3763 3809 def isascending(self):
3764 3810 return self._ascending
3765 3811
3766 3812 def isdescending(self):
3767 3813 return not self._ascending
3768 3814
3769 3815 def first(self):
3770 3816 if self._ascending:
3771 3817 it = self.fastasc
3772 3818 else:
3773 3819 it = self.fastdesc
3774 3820 for x in it():
3775 3821 return x
3776 3822 return None
3777 3823
3778 3824 def last(self):
3779 3825 if self._ascending:
3780 3826 it = self.fastdesc
3781 3827 else:
3782 3828 it = self.fastasc
3783 3829 for x in it():
3784 3830 return x
3785 3831 return None
3786 3832
3787 3833 def __repr__(self):
3788 3834 d = {False: '-', True: '+'}[self._ascending]
3789 3835 return '<%s%s %d:%d>' % (type(self).__name__, d,
3790 3836 self._start, self._end - 1)
3791 3837
3792 3838 class fullreposet(spanset):
3793 3839 """a set containing all revisions in the repo
3794 3840
3795 3841 This class exists to host special optimization and magic to handle virtual
3796 3842 revisions such as "null".
3797 3843 """
3798 3844
3799 3845 def __init__(self, repo):
3800 3846 super(fullreposet, self).__init__(repo)
3801 3847
3802 3848 def __and__(self, other):
3803 3849 """As self contains the whole repo, all of the other set should also be
3804 3850 in self. Therefore `self & other = other`.
3805 3851
3806 3852 This boldly assumes the other contains valid revs only.
3807 3853 """
3808 3854 # other not a smartset, make is so
3809 3855 if not util.safehasattr(other, 'isascending'):
3810 3856 # filter out hidden revision
3811 3857 # (this boldly assumes all smartset are pure)
3812 3858 #
3813 3859 # `other` was used with "&", let's assume this is a set like
3814 3860 # object.
3815 3861 other = baseset(other - self._hiddenrevs)
3816 3862
3817 3863 other.sort(reverse=self.isdescending())
3818 3864 return other
3819 3865
3820 3866 def prettyformatset(revs):
3821 3867 lines = []
3822 3868 rs = repr(revs)
3823 3869 p = 0
3824 3870 while p < len(rs):
3825 3871 q = rs.find('<', p + 1)
3826 3872 if q < 0:
3827 3873 q = len(rs)
3828 3874 l = rs.count('<', 0, p) - rs.count('>', 0, p)
3829 3875 assert l >= 0
3830 3876 lines.append((l, rs[p:q].rstrip()))
3831 3877 p = q
3832 3878 return '\n'.join(' ' * l + s for l, s in lines)
3833 3879
3834 3880 def loadpredicate(ui, extname, registrarobj):
3835 3881 """Load revset predicates from specified registrarobj
3836 3882 """
3837 3883 for name, func in registrarobj._table.iteritems():
3838 3884 symbols[name] = func
3839 3885 if func._safe:
3840 3886 safesymbols.add(name)
3841 3887
3842 3888 # load built-in predicates explicitly to setup safesymbols
3843 3889 loadpredicate(None, None, predicate)
3844 3890
3845 3891 # tell hggettext to extract docstrings from these functions:
3846 3892 i18nfunctions = symbols.values()
@@ -1,640 +1,761 b''
1 1 $ HGMERGE=true; export HGMERGE
2 2
3 3 init
4 4
5 5 $ hg init repo
6 6 $ cd repo
7 7
8 8 commit
9 9
10 10 $ echo 'a' > a
11 11 $ hg ci -A -m test -u nobody -d '1 0'
12 12 adding a
13 13
14 14 annotate -c
15 15
16 16 $ hg annotate -c a
17 17 8435f90966e4: a
18 18
19 19 annotate -cl
20 20
21 21 $ hg annotate -cl a
22 22 8435f90966e4:1: a
23 23
24 24 annotate -d
25 25
26 26 $ hg annotate -d a
27 27 Thu Jan 01 00:00:01 1970 +0000: a
28 28
29 29 annotate -n
30 30
31 31 $ hg annotate -n a
32 32 0: a
33 33
34 34 annotate -nl
35 35
36 36 $ hg annotate -nl a
37 37 0:1: a
38 38
39 39 annotate -u
40 40
41 41 $ hg annotate -u a
42 42 nobody: a
43 43
44 44 annotate -cdnu
45 45
46 46 $ hg annotate -cdnu a
47 47 nobody 0 8435f90966e4 Thu Jan 01 00:00:01 1970 +0000: a
48 48
49 49 annotate -cdnul
50 50
51 51 $ hg annotate -cdnul a
52 52 nobody 0 8435f90966e4 Thu Jan 01 00:00:01 1970 +0000:1: a
53 53
54 54 annotate (JSON)
55 55
56 56 $ hg annotate -Tjson a
57 57 [
58 58 {
59 59 "line": "a\n",
60 60 "rev": 0
61 61 }
62 62 ]
63 63
64 64 $ hg annotate -Tjson -cdfnul a
65 65 [
66 66 {
67 67 "date": [1.0, 0],
68 68 "file": "a",
69 69 "line": "a\n",
70 70 "line_number": 1,
71 71 "node": "8435f90966e442695d2ded29fdade2bac5ad8065",
72 72 "rev": 0,
73 73 "user": "nobody"
74 74 }
75 75 ]
76 76
77 77 $ cat <<EOF >>a
78 78 > a
79 79 > a
80 80 > EOF
81 81 $ hg ci -ma1 -d '1 0'
82 82 $ hg cp a b
83 83 $ hg ci -mb -d '1 0'
84 84 $ cat <<EOF >> b
85 85 > b4
86 86 > b5
87 87 > b6
88 88 > EOF
89 89 $ hg ci -mb2 -d '2 0'
90 90
91 91 annotate -n b
92 92
93 93 $ hg annotate -n b
94 94 0: a
95 95 1: a
96 96 1: a
97 97 3: b4
98 98 3: b5
99 99 3: b6
100 100
101 101 annotate --no-follow b
102 102
103 103 $ hg annotate --no-follow b
104 104 2: a
105 105 2: a
106 106 2: a
107 107 3: b4
108 108 3: b5
109 109 3: b6
110 110
111 111 annotate -nl b
112 112
113 113 $ hg annotate -nl b
114 114 0:1: a
115 115 1:2: a
116 116 1:3: a
117 117 3:4: b4
118 118 3:5: b5
119 119 3:6: b6
120 120
121 121 annotate -nf b
122 122
123 123 $ hg annotate -nf b
124 124 0 a: a
125 125 1 a: a
126 126 1 a: a
127 127 3 b: b4
128 128 3 b: b5
129 129 3 b: b6
130 130
131 131 annotate -nlf b
132 132
133 133 $ hg annotate -nlf b
134 134 0 a:1: a
135 135 1 a:2: a
136 136 1 a:3: a
137 137 3 b:4: b4
138 138 3 b:5: b5
139 139 3 b:6: b6
140 140
141 141 $ hg up -C 2
142 142 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
143 143 $ cat <<EOF >> b
144 144 > b4
145 145 > c
146 146 > b5
147 147 > EOF
148 148 $ hg ci -mb2.1 -d '2 0'
149 149 created new head
150 150 $ hg merge
151 151 merging b
152 152 0 files updated, 1 files merged, 0 files removed, 0 files unresolved
153 153 (branch merge, don't forget to commit)
154 154 $ hg ci -mmergeb -d '3 0'
155 155
156 156 annotate after merge
157 157
158 158 $ hg annotate -nf b
159 159 0 a: a
160 160 1 a: a
161 161 1 a: a
162 162 3 b: b4
163 163 4 b: c
164 164 3 b: b5
165 165
166 166 annotate after merge with -l
167 167
168 168 $ hg annotate -nlf b
169 169 0 a:1: a
170 170 1 a:2: a
171 171 1 a:3: a
172 172 3 b:4: b4
173 173 4 b:5: c
174 174 3 b:5: b5
175 175
176 176 $ hg up -C 1
177 177 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
178 178 $ hg cp a b
179 179 $ cat <<EOF > b
180 180 > a
181 181 > z
182 182 > a
183 183 > EOF
184 184 $ hg ci -mc -d '3 0'
185 185 created new head
186 186 $ hg merge
187 187 merging b
188 188 0 files updated, 1 files merged, 0 files removed, 0 files unresolved
189 189 (branch merge, don't forget to commit)
190 190 $ cat <<EOF >> b
191 191 > b4
192 192 > c
193 193 > b5
194 194 > EOF
195 195 $ echo d >> b
196 196 $ hg ci -mmerge2 -d '4 0'
197 197
198 198 annotate after rename merge
199 199
200 200 $ hg annotate -nf b
201 201 0 a: a
202 202 6 b: z
203 203 1 a: a
204 204 3 b: b4
205 205 4 b: c
206 206 3 b: b5
207 207 7 b: d
208 208
209 209 annotate after rename merge with -l
210 210
211 211 $ hg annotate -nlf b
212 212 0 a:1: a
213 213 6 b:2: z
214 214 1 a:3: a
215 215 3 b:4: b4
216 216 4 b:5: c
217 217 3 b:5: b5
218 218 7 b:7: d
219 219
220 220 Issue2807: alignment of line numbers with -l
221 221
222 222 $ echo more >> b
223 223 $ hg ci -mmore -d '5 0'
224 224 $ echo more >> b
225 225 $ hg ci -mmore -d '6 0'
226 226 $ echo more >> b
227 227 $ hg ci -mmore -d '7 0'
228 228 $ hg annotate -nlf b
229 229 0 a: 1: a
230 230 6 b: 2: z
231 231 1 a: 3: a
232 232 3 b: 4: b4
233 233 4 b: 5: c
234 234 3 b: 5: b5
235 235 7 b: 7: d
236 236 8 b: 8: more
237 237 9 b: 9: more
238 238 10 b:10: more
239 239
240 240 linkrev vs rev
241 241
242 242 $ hg annotate -r tip -n a
243 243 0: a
244 244 1: a
245 245 1: a
246 246
247 247 linkrev vs rev with -l
248 248
249 249 $ hg annotate -r tip -nl a
250 250 0:1: a
251 251 1:2: a
252 252 1:3: a
253 253
254 254 Issue589: "undelete" sequence leads to crash
255 255
256 256 annotate was crashing when trying to --follow something
257 257
258 258 like A -> B -> A
259 259
260 260 generate ABA rename configuration
261 261
262 262 $ echo foo > foo
263 263 $ hg add foo
264 264 $ hg ci -m addfoo
265 265 $ hg rename foo bar
266 266 $ hg ci -m renamefoo
267 267 $ hg rename bar foo
268 268 $ hg ci -m renamebar
269 269
270 270 annotate after ABA with follow
271 271
272 272 $ hg annotate --follow foo
273 273 foo: foo
274 274
275 275 missing file
276 276
277 277 $ hg ann nosuchfile
278 278 abort: nosuchfile: no such file in rev e9e6b4fa872f
279 279 [255]
280 280
281 281 annotate file without '\n' on last line
282 282
283 283 $ printf "" > c
284 284 $ hg ci -A -m test -u nobody -d '1 0'
285 285 adding c
286 286 $ hg annotate c
287 287 $ printf "a\nb" > c
288 288 $ hg ci -m test
289 289 $ hg annotate c
290 290 [0-9]+: a (re)
291 291 [0-9]+: b (re)
292 292
293 293 Issue3841: check annotation of the file of which filelog includes
294 294 merging between the revision and its ancestor
295 295
296 296 to reproduce the situation with recent Mercurial, this script uses (1)
297 297 "hg debugsetparents" to merge without ancestor check by "hg merge",
298 298 and (2) the extension to allow filelog merging between the revision
299 299 and its ancestor by overriding "repo._filecommit".
300 300
301 301 $ cat > ../legacyrepo.py <<EOF
302 302 > from mercurial import node, error
303 303 > def reposetup(ui, repo):
304 304 > class legacyrepo(repo.__class__):
305 305 > def _filecommit(self, fctx, manifest1, manifest2,
306 306 > linkrev, tr, changelist):
307 307 > fname = fctx.path()
308 308 > text = fctx.data()
309 309 > flog = self.file(fname)
310 310 > fparent1 = manifest1.get(fname, node.nullid)
311 311 > fparent2 = manifest2.get(fname, node.nullid)
312 312 > meta = {}
313 313 > copy = fctx.renamed()
314 314 > if copy and copy[0] != fname:
315 315 > raise error.Abort('copying is not supported')
316 316 > if fparent2 != node.nullid:
317 317 > changelist.append(fname)
318 318 > return flog.add(text, meta, tr, linkrev,
319 319 > fparent1, fparent2)
320 320 > raise error.Abort('only merging is supported')
321 321 > repo.__class__ = legacyrepo
322 322 > EOF
323 323
324 324 $ cat > baz <<EOF
325 325 > 1
326 326 > 2
327 327 > 3
328 328 > 4
329 329 > 5
330 330 > EOF
331 331 $ hg add baz
332 332 $ hg commit -m "baz:0"
333 333
334 334 $ cat > baz <<EOF
335 335 > 1 baz:1
336 336 > 2
337 337 > 3
338 338 > 4
339 339 > 5
340 340 > EOF
341 341 $ hg commit -m "baz:1"
342 342
343 343 $ cat > baz <<EOF
344 344 > 1 baz:1
345 345 > 2 baz:2
346 346 > 3
347 347 > 4
348 348 > 5
349 349 > EOF
350 350 $ hg debugsetparents 17 17
351 351 $ hg --config extensions.legacyrepo=../legacyrepo.py commit -m "baz:2"
352 352 $ hg debugindexdot .hg/store/data/baz.i
353 353 digraph G {
354 354 -1 -> 0
355 355 0 -> 1
356 356 1 -> 2
357 357 1 -> 2
358 358 }
359 359 $ hg annotate baz
360 360 17: 1 baz:1
361 361 18: 2 baz:2
362 362 16: 3
363 363 16: 4
364 364 16: 5
365 365
366 366 $ cat > baz <<EOF
367 367 > 1 baz:1
368 368 > 2 baz:2
369 369 > 3 baz:3
370 370 > 4
371 371 > 5
372 372 > EOF
373 373 $ hg commit -m "baz:3"
374 374
375 375 $ cat > baz <<EOF
376 376 > 1 baz:1
377 377 > 2 baz:2
378 378 > 3 baz:3
379 379 > 4 baz:4
380 380 > 5
381 381 > EOF
382 382 $ hg debugsetparents 19 18
383 383 $ hg --config extensions.legacyrepo=../legacyrepo.py commit -m "baz:4"
384 384 $ hg debugindexdot .hg/store/data/baz.i
385 385 digraph G {
386 386 -1 -> 0
387 387 0 -> 1
388 388 1 -> 2
389 389 1 -> 2
390 390 2 -> 3
391 391 3 -> 4
392 392 2 -> 4
393 393 }
394 394 $ hg annotate baz
395 395 17: 1 baz:1
396 396 18: 2 baz:2
397 397 19: 3 baz:3
398 398 20: 4 baz:4
399 399 16: 5
400 400
401 401 annotate clean file
402 402
403 403 $ hg annotate -ncr "wdir()" foo
404 404 11 472b18db256d : foo
405 405
406 406 annotate modified file
407 407
408 408 $ echo foofoo >> foo
409 409 $ hg annotate -r "wdir()" foo
410 410 11 : foo
411 411 20+: foofoo
412 412
413 413 $ hg annotate -cr "wdir()" foo
414 414 472b18db256d : foo
415 415 b6bedd5477e7+: foofoo
416 416
417 417 $ hg annotate -ncr "wdir()" foo
418 418 11 472b18db256d : foo
419 419 20 b6bedd5477e7+: foofoo
420 420
421 421 $ hg annotate --debug -ncr "wdir()" foo
422 422 11 472b18db256d1e8282064eab4bfdaf48cbfe83cd : foo
423 423 20 b6bedd5477e797f25e568a6402d4697f3f895a72+: foofoo
424 424
425 425 $ hg annotate -udr "wdir()" foo
426 426 test Thu Jan 01 00:00:00 1970 +0000: foo
427 427 test [A-Za-z0-9:+ ]+: foofoo (re)
428 428
429 429 $ hg annotate -ncr "wdir()" -Tjson foo
430 430 [
431 431 {
432 432 "line": "foo\n",
433 433 "node": "472b18db256d1e8282064eab4bfdaf48cbfe83cd",
434 434 "rev": 11
435 435 },
436 436 {
437 437 "line": "foofoo\n",
438 438 "node": null,
439 439 "rev": null
440 440 }
441 441 ]
442 442
443 443 annotate added file
444 444
445 445 $ echo bar > bar
446 446 $ hg add bar
447 447 $ hg annotate -ncr "wdir()" bar
448 448 20 b6bedd5477e7+: bar
449 449
450 450 annotate renamed file
451 451
452 452 $ hg rename foo renamefoo2
453 453 $ hg annotate -ncr "wdir()" renamefoo2
454 454 11 472b18db256d : foo
455 455 20 b6bedd5477e7+: foofoo
456 456
457 457 annotate missing file
458 458
459 459 $ rm baz
460 460 #if windows
461 461 $ hg annotate -ncr "wdir()" baz
462 462 abort: $TESTTMP\repo\baz: The system cannot find the file specified
463 463 [255]
464 464 #else
465 465 $ hg annotate -ncr "wdir()" baz
466 466 abort: No such file or directory: $TESTTMP/repo/baz
467 467 [255]
468 468 #endif
469 469
470 470 annotate removed file
471 471
472 472 $ hg rm baz
473 473 #if windows
474 474 $ hg annotate -ncr "wdir()" baz
475 475 abort: $TESTTMP\repo\baz: The system cannot find the file specified
476 476 [255]
477 477 #else
478 478 $ hg annotate -ncr "wdir()" baz
479 479 abort: No such file or directory: $TESTTMP/repo/baz
480 480 [255]
481 481 #endif
482 482
483 $ hg revert --all --no-backup --quiet
484 $ hg id -n
485 20
486
487 Test followlines() revset
488
489 $ hg log -T '{rev}: {desc}\n' -r 'followlines(baz, 3, 5)'
490 16: baz:0
491 19: baz:3
492 20: baz:4
493 $ hg log -T '{rev}: {desc}\n' -r 'followlines(baz, 3, 5, rev=20)'
494 16: baz:0
495 19: baz:3
496 20: baz:4
497 $ hg log -T '{rev}: {desc}\n' -r 'followlines(baz, 3, 5, rev=.^)'
498 16: baz:0
499 19: baz:3
500 $ printf "0\n0\n" | cat - baz > baz1
501 $ mv baz1 baz
502 $ hg ci -m 'added two lines with 0'
503 $ hg log -T '{rev}: {desc}\n' -r 'followlines(baz, 5, 7)'
504 16: baz:0
505 19: baz:3
506 20: baz:4
507 $ echo 6 >> baz
508 $ hg ci -m 'added line 8'
509 $ hg log -T '{rev}: {desc}\n' -r 'followlines(baz, 5, 7)'
510 16: baz:0
511 19: baz:3
512 20: baz:4
513 $ sed 's/3/3+/' baz > baz.new
514 $ mv baz.new baz
515 $ hg ci -m 'baz:3->3+'
516 $ hg log -T '{rev}: {desc}\n' -r 'followlines(baz, 5, 7)'
517 16: baz:0
518 19: baz:3
519 20: baz:4
520 23: baz:3->3+
521 $ hg log -T '{rev}: {desc}\n' -r 'followlines(baz, 1, 2)'
522 21: added two lines with 0
523
524 file patterns are okay
525 $ hg log -T '{rev}: {desc}\n' -r 'followlines("path:baz", 1, 2)'
526 21: added two lines with 0
527
528 renames are followed
529 $ hg mv baz qux
530 $ sed 's/4/4+/' qux > qux.new
531 $ mv qux.new qux
532 $ hg ci -m 'qux:4->4+'
533 $ hg log -T '{rev}: {desc}\n' -r 'followlines(qux, 5, 7)'
534 16: baz:0
535 19: baz:3
536 20: baz:4
537 23: baz:3->3+
538 24: qux:4->4+
539 $ hg up 23 --quiet
540
541 merge
542 $ echo 7 >> baz
543 $ hg ci -m 'one more line, out of line range'
544 created new head
545 $ sed 's/3+/3-/' baz > baz.new
546 $ mv baz.new baz
547 $ hg ci -m 'baz:3+->3-'
548 $ hg log -T '{rev}: {desc}\n' -r 'followlines(baz, 5, 7)'
549 16: baz:0
550 19: baz:3
551 20: baz:4
552 23: baz:3->3+
553 26: baz:3+->3-
554 $ hg merge 24
555 merging baz and qux to qux
556 0 files updated, 1 files merged, 0 files removed, 0 files unresolved
557 (branch merge, don't forget to commit)
558 $ hg ci -m merge
559 $ hg log -T '{rev}: {desc}\n' -r 'followlines(qux, 5, 7)'
560 16: baz:0
561 19: baz:3
562 20: baz:4
563 23: baz:3->3+
564 24: qux:4->4+
565 26: baz:3+->3-
566 27: merge
567 $ hg up 24 --quiet
568 $ hg merge 26
569 merging qux and baz to qux
570 0 files updated, 1 files merged, 0 files removed, 0 files unresolved
571 (branch merge, don't forget to commit)
572 $ hg ci -m 'merge from other side'
573 created new head
574 $ hg log -T '{rev}: {desc}\n' -r 'followlines(qux, 5, 7)'
575 16: baz:0
576 19: baz:3
577 20: baz:4
578 23: baz:3->3+
579 24: qux:4->4+
580 26: baz:3+->3-
581 28: merge from other side
582 $ hg up 23 --quiet
583
584 check error cases
585 $ hg log -r 'followlines(baz, 1, 2, rev=desc("b"))'
586 hg: parse error: followlines expects exactly one revision
587 [255]
588 $ hg log -r 'followlines("glob:*", 1, 2)'
589 hg: parse error: followlines expects exactly one file
590 [255]
591 $ hg log -r 'followlines(baz, x, 4)'
592 hg: parse error: line range bounds must be integers
593 [255]
594 $ hg log -r 'followlines(baz, 5, 4)'
595 hg: parse error: line range must be positive
596 [255]
597 $ hg log -r 'followlines(baz, 0, 4)'
598 hg: parse error: fromline must be strictly positive
599 [255]
600 $ hg log -r 'followlines(baz, 2, 40)'
601 abort: line range exceeds file size
602 [255]
603
483 604 Test annotate with whitespace options
484 605
485 606 $ cd ..
486 607 $ hg init repo-ws
487 608 $ cd repo-ws
488 609 $ cat > a <<EOF
489 610 > aa
490 611 >
491 612 > b b
492 613 > EOF
493 614 $ hg ci -Am "adda"
494 615 adding a
495 616 $ sed 's/EOL$//g' > a <<EOF
496 617 > a a
497 618 >
498 619 > EOL
499 620 > b b
500 621 > EOF
501 622 $ hg ci -m "changea"
502 623
503 624 Annotate with no option
504 625
505 626 $ hg annotate a
506 627 1: a a
507 628 0:
508 629 1:
509 630 1: b b
510 631
511 632 Annotate with --ignore-space-change
512 633
513 634 $ hg annotate --ignore-space-change a
514 635 1: a a
515 636 1:
516 637 0:
517 638 0: b b
518 639
519 640 Annotate with --ignore-all-space
520 641
521 642 $ hg annotate --ignore-all-space a
522 643 0: a a
523 644 0:
524 645 1:
525 646 0: b b
526 647
527 648 Annotate with --ignore-blank-lines (similar to no options case)
528 649
529 650 $ hg annotate --ignore-blank-lines a
530 651 1: a a
531 652 0:
532 653 1:
533 654 1: b b
534 655
535 656 $ cd ..
536 657
537 658 Annotate with linkrev pointing to another branch
538 659 ------------------------------------------------
539 660
540 661 create history with a filerev whose linkrev points to another branch
541 662
542 663 $ hg init branchedlinkrev
543 664 $ cd branchedlinkrev
544 665 $ echo A > a
545 666 $ hg commit -Am 'contentA'
546 667 adding a
547 668 $ echo B >> a
548 669 $ hg commit -m 'contentB'
549 670 $ hg up --rev 'desc(contentA)'
550 671 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
551 672 $ echo unrelated > unrelated
552 673 $ hg commit -Am 'unrelated'
553 674 adding unrelated
554 675 created new head
555 676 $ hg graft -r 'desc(contentB)'
556 677 grafting 1:fd27c222e3e6 "contentB"
557 678 $ echo C >> a
558 679 $ hg commit -m 'contentC'
559 680 $ echo W >> a
560 681 $ hg log -G
561 682 @ changeset: 4:072f1e8df249
562 683 | tag: tip
563 684 | user: test
564 685 | date: Thu Jan 01 00:00:00 1970 +0000
565 686 | summary: contentC
566 687 |
567 688 o changeset: 3:ff38df03cc4b
568 689 | user: test
569 690 | date: Thu Jan 01 00:00:00 1970 +0000
570 691 | summary: contentB
571 692 |
572 693 o changeset: 2:62aaf3f6fc06
573 694 | parent: 0:f0932f74827e
574 695 | user: test
575 696 | date: Thu Jan 01 00:00:00 1970 +0000
576 697 | summary: unrelated
577 698 |
578 699 | o changeset: 1:fd27c222e3e6
579 700 |/ user: test
580 701 | date: Thu Jan 01 00:00:00 1970 +0000
581 702 | summary: contentB
582 703 |
583 704 o changeset: 0:f0932f74827e
584 705 user: test
585 706 date: Thu Jan 01 00:00:00 1970 +0000
586 707 summary: contentA
587 708
588 709
589 710 Annotate should list ancestor of starting revision only
590 711
591 712 $ hg annotate a
592 713 0: A
593 714 3: B
594 715 4: C
595 716
596 717 $ hg annotate a -r 'wdir()'
597 718 0 : A
598 719 3 : B
599 720 4 : C
600 721 4+: W
601 722
602 723 Even when the starting revision is the linkrev-shadowed one:
603 724
604 725 $ hg annotate a -r 3
605 726 0: A
606 727 3: B
607 728
608 729 $ cd ..
609 730
610 731 Issue5360: Deleted chunk in p1 of a merge changeset
611 732
612 733 $ hg init repo-5360
613 734 $ cd repo-5360
614 735 $ echo 1 > a
615 736 $ hg commit -A a -m 1
616 737 $ echo 2 >> a
617 738 $ hg commit -m 2
618 739 $ echo a > a
619 740 $ hg commit -m a
620 741 $ hg update '.^' -q
621 742 $ echo 3 >> a
622 743 $ hg commit -m 3 -q
623 744 $ hg merge 2 -q
624 745 $ cat > a << EOF
625 746 > b
626 747 > 1
627 748 > 2
628 749 > 3
629 750 > a
630 751 > EOF
631 752 $ hg resolve --mark -q
632 753 $ hg commit -m m
633 754 $ hg annotate a
634 755 4: b
635 756 0: 1
636 757 1: 2
637 758 3: 3
638 759 2: a
639 760
640 761 $ cd ..
General Comments 0
You need to be logged in to leave comments. Login now