##// END OF EJS Templates
revset: get rid of redundant error checking from match()...
Yuya Nishihara -
r29425:4f5531f8 default
parent child Browse files
Show More
@@ -1,3663 +1,3661 b''
1 1 # revset.py - revision set queries for mercurial
2 2 #
3 3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import heapq
11 11 import re
12 12
13 13 from .i18n import _
14 14 from . import (
15 15 destutil,
16 16 encoding,
17 17 error,
18 18 hbisect,
19 19 match as matchmod,
20 20 node,
21 21 obsolete as obsmod,
22 22 parser,
23 23 pathutil,
24 24 phases,
25 25 registrar,
26 26 repoview,
27 27 util,
28 28 )
29 29
30 30 def _revancestors(repo, revs, followfirst):
31 31 """Like revlog.ancestors(), but supports followfirst."""
32 32 if followfirst:
33 33 cut = 1
34 34 else:
35 35 cut = None
36 36 cl = repo.changelog
37 37
38 38 def iterate():
39 39 revs.sort(reverse=True)
40 40 irevs = iter(revs)
41 41 h = []
42 42
43 43 inputrev = next(irevs, None)
44 44 if inputrev is not None:
45 45 heapq.heappush(h, -inputrev)
46 46
47 47 seen = set()
48 48 while h:
49 49 current = -heapq.heappop(h)
50 50 if current == inputrev:
51 51 inputrev = next(irevs, None)
52 52 if inputrev is not None:
53 53 heapq.heappush(h, -inputrev)
54 54 if current not in seen:
55 55 seen.add(current)
56 56 yield current
57 57 for parent in cl.parentrevs(current)[:cut]:
58 58 if parent != node.nullrev:
59 59 heapq.heappush(h, -parent)
60 60
61 61 return generatorset(iterate(), iterasc=False)
62 62
63 63 def _revdescendants(repo, revs, followfirst):
64 64 """Like revlog.descendants() but supports followfirst."""
65 65 if followfirst:
66 66 cut = 1
67 67 else:
68 68 cut = None
69 69
70 70 def iterate():
71 71 cl = repo.changelog
72 72 # XXX this should be 'parentset.min()' assuming 'parentset' is a
73 73 # smartset (and if it is not, it should.)
74 74 first = min(revs)
75 75 nullrev = node.nullrev
76 76 if first == nullrev:
77 77 # Are there nodes with a null first parent and a non-null
78 78 # second one? Maybe. Do we care? Probably not.
79 79 for i in cl:
80 80 yield i
81 81 else:
82 82 seen = set(revs)
83 83 for i in cl.revs(first + 1):
84 84 for x in cl.parentrevs(i)[:cut]:
85 85 if x != nullrev and x in seen:
86 86 seen.add(i)
87 87 yield i
88 88 break
89 89
90 90 return generatorset(iterate(), iterasc=True)
91 91
92 92 def _reachablerootspure(repo, minroot, roots, heads, includepath):
93 93 """return (heads(::<roots> and ::<heads>))
94 94
95 95 If includepath is True, return (<roots>::<heads>)."""
96 96 if not roots:
97 97 return []
98 98 parentrevs = repo.changelog.parentrevs
99 99 roots = set(roots)
100 100 visit = list(heads)
101 101 reachable = set()
102 102 seen = {}
103 103 # prefetch all the things! (because python is slow)
104 104 reached = reachable.add
105 105 dovisit = visit.append
106 106 nextvisit = visit.pop
107 107 # open-code the post-order traversal due to the tiny size of
108 108 # sys.getrecursionlimit()
109 109 while visit:
110 110 rev = nextvisit()
111 111 if rev in roots:
112 112 reached(rev)
113 113 if not includepath:
114 114 continue
115 115 parents = parentrevs(rev)
116 116 seen[rev] = parents
117 117 for parent in parents:
118 118 if parent >= minroot and parent not in seen:
119 119 dovisit(parent)
120 120 if not reachable:
121 121 return baseset()
122 122 if not includepath:
123 123 return reachable
124 124 for rev in sorted(seen):
125 125 for parent in seen[rev]:
126 126 if parent in reachable:
127 127 reached(rev)
128 128 return reachable
129 129
130 130 def reachableroots(repo, roots, heads, includepath=False):
131 131 """return (heads(::<roots> and ::<heads>))
132 132
133 133 If includepath is True, return (<roots>::<heads>)."""
134 134 if not roots:
135 135 return baseset()
136 136 minroot = roots.min()
137 137 roots = list(roots)
138 138 heads = list(heads)
139 139 try:
140 140 revs = repo.changelog.reachableroots(minroot, heads, roots, includepath)
141 141 except AttributeError:
142 142 revs = _reachablerootspure(repo, minroot, roots, heads, includepath)
143 143 revs = baseset(revs)
144 144 revs.sort()
145 145 return revs
146 146
147 147 elements = {
148 148 # token-type: binding-strength, primary, prefix, infix, suffix
149 149 "(": (21, None, ("group", 1, ")"), ("func", 1, ")"), None),
150 150 "##": (20, None, None, ("_concat", 20), None),
151 151 "~": (18, None, None, ("ancestor", 18), None),
152 152 "^": (18, None, None, ("parent", 18), ("parentpost", 18)),
153 153 "-": (5, None, ("negate", 19), ("minus", 5), None),
154 154 "::": (17, None, ("dagrangepre", 17), ("dagrange", 17),
155 155 ("dagrangepost", 17)),
156 156 "..": (17, None, ("dagrangepre", 17), ("dagrange", 17),
157 157 ("dagrangepost", 17)),
158 158 ":": (15, "rangeall", ("rangepre", 15), ("range", 15), ("rangepost", 15)),
159 159 "not": (10, None, ("not", 10), None, None),
160 160 "!": (10, None, ("not", 10), None, None),
161 161 "and": (5, None, None, ("and", 5), None),
162 162 "&": (5, None, None, ("and", 5), None),
163 163 "%": (5, None, None, ("only", 5), ("onlypost", 5)),
164 164 "or": (4, None, None, ("or", 4), None),
165 165 "|": (4, None, None, ("or", 4), None),
166 166 "+": (4, None, None, ("or", 4), None),
167 167 "=": (3, None, None, ("keyvalue", 3), None),
168 168 ",": (2, None, None, ("list", 2), None),
169 169 ")": (0, None, None, None, None),
170 170 "symbol": (0, "symbol", None, None, None),
171 171 "string": (0, "string", None, None, None),
172 172 "end": (0, None, None, None, None),
173 173 }
174 174
175 175 keywords = set(['and', 'or', 'not'])
176 176
177 177 # default set of valid characters for the initial letter of symbols
178 178 _syminitletters = set(c for c in [chr(i) for i in xrange(256)]
179 179 if c.isalnum() or c in '._@' or ord(c) > 127)
180 180
181 181 # default set of valid characters for non-initial letters of symbols
182 182 _symletters = set(c for c in [chr(i) for i in xrange(256)]
183 183 if c.isalnum() or c in '-._/@' or ord(c) > 127)
184 184
185 185 def tokenize(program, lookup=None, syminitletters=None, symletters=None):
186 186 '''
187 187 Parse a revset statement into a stream of tokens
188 188
189 189 ``syminitletters`` is the set of valid characters for the initial
190 190 letter of symbols.
191 191
192 192 By default, character ``c`` is recognized as valid for initial
193 193 letter of symbols, if ``c.isalnum() or c in '._@' or ord(c) > 127``.
194 194
195 195 ``symletters`` is the set of valid characters for non-initial
196 196 letters of symbols.
197 197
198 198 By default, character ``c`` is recognized as valid for non-initial
199 199 letters of symbols, if ``c.isalnum() or c in '-._/@' or ord(c) > 127``.
200 200
201 201 Check that @ is a valid unquoted token character (issue3686):
202 202 >>> list(tokenize("@::"))
203 203 [('symbol', '@', 0), ('::', None, 1), ('end', None, 3)]
204 204
205 205 '''
206 206 if syminitletters is None:
207 207 syminitletters = _syminitletters
208 208 if symletters is None:
209 209 symletters = _symletters
210 210
211 211 if program and lookup:
212 212 # attempt to parse old-style ranges first to deal with
213 213 # things like old-tag which contain query metacharacters
214 214 parts = program.split(':', 1)
215 215 if all(lookup(sym) for sym in parts if sym):
216 216 if parts[0]:
217 217 yield ('symbol', parts[0], 0)
218 218 if len(parts) > 1:
219 219 s = len(parts[0])
220 220 yield (':', None, s)
221 221 if parts[1]:
222 222 yield ('symbol', parts[1], s + 1)
223 223 yield ('end', None, len(program))
224 224 return
225 225
226 226 pos, l = 0, len(program)
227 227 while pos < l:
228 228 c = program[pos]
229 229 if c.isspace(): # skip inter-token whitespace
230 230 pass
231 231 elif c == ':' and program[pos:pos + 2] == '::': # look ahead carefully
232 232 yield ('::', None, pos)
233 233 pos += 1 # skip ahead
234 234 elif c == '.' and program[pos:pos + 2] == '..': # look ahead carefully
235 235 yield ('..', None, pos)
236 236 pos += 1 # skip ahead
237 237 elif c == '#' and program[pos:pos + 2] == '##': # look ahead carefully
238 238 yield ('##', None, pos)
239 239 pos += 1 # skip ahead
240 240 elif c in "():=,-|&+!~^%": # handle simple operators
241 241 yield (c, None, pos)
242 242 elif (c in '"\'' or c == 'r' and
243 243 program[pos:pos + 2] in ("r'", 'r"')): # handle quoted strings
244 244 if c == 'r':
245 245 pos += 1
246 246 c = program[pos]
247 247 decode = lambda x: x
248 248 else:
249 249 decode = parser.unescapestr
250 250 pos += 1
251 251 s = pos
252 252 while pos < l: # find closing quote
253 253 d = program[pos]
254 254 if d == '\\': # skip over escaped characters
255 255 pos += 2
256 256 continue
257 257 if d == c:
258 258 yield ('string', decode(program[s:pos]), s)
259 259 break
260 260 pos += 1
261 261 else:
262 262 raise error.ParseError(_("unterminated string"), s)
263 263 # gather up a symbol/keyword
264 264 elif c in syminitletters:
265 265 s = pos
266 266 pos += 1
267 267 while pos < l: # find end of symbol
268 268 d = program[pos]
269 269 if d not in symletters:
270 270 break
271 271 if d == '.' and program[pos - 1] == '.': # special case for ..
272 272 pos -= 1
273 273 break
274 274 pos += 1
275 275 sym = program[s:pos]
276 276 if sym in keywords: # operator keywords
277 277 yield (sym, None, s)
278 278 elif '-' in sym:
279 279 # some jerk gave us foo-bar-baz, try to check if it's a symbol
280 280 if lookup and lookup(sym):
281 281 # looks like a real symbol
282 282 yield ('symbol', sym, s)
283 283 else:
284 284 # looks like an expression
285 285 parts = sym.split('-')
286 286 for p in parts[:-1]:
287 287 if p: # possible consecutive -
288 288 yield ('symbol', p, s)
289 289 s += len(p)
290 290 yield ('-', None, pos)
291 291 s += 1
292 292 if parts[-1]: # possible trailing -
293 293 yield ('symbol', parts[-1], s)
294 294 else:
295 295 yield ('symbol', sym, s)
296 296 pos -= 1
297 297 else:
298 298 raise error.ParseError(_("syntax error in revset '%s'") %
299 299 program, pos)
300 300 pos += 1
301 301 yield ('end', None, pos)
302 302
303 303 # helpers
304 304
305 305 def getstring(x, err):
306 306 if x and (x[0] == 'string' or x[0] == 'symbol'):
307 307 return x[1]
308 308 raise error.ParseError(err)
309 309
310 310 def getlist(x):
311 311 if not x:
312 312 return []
313 313 if x[0] == 'list':
314 314 return list(x[1:])
315 315 return [x]
316 316
317 317 def getargs(x, min, max, err):
318 318 l = getlist(x)
319 319 if len(l) < min or (max >= 0 and len(l) > max):
320 320 raise error.ParseError(err)
321 321 return l
322 322
323 323 def getargsdict(x, funcname, keys):
324 324 return parser.buildargsdict(getlist(x), funcname, keys.split(),
325 325 keyvaluenode='keyvalue', keynode='symbol')
326 326
327 327 def getset(repo, subset, x):
328 328 if not x:
329 329 raise error.ParseError(_("missing argument"))
330 330 s = methods[x[0]](repo, subset, *x[1:])
331 331 if util.safehasattr(s, 'isascending'):
332 332 return s
333 333 # else case should not happen, because all non-func are internal,
334 334 # ignoring for now.
335 335 if x[0] == 'func' and x[1][0] == 'symbol' and x[1][1] in symbols:
336 336 repo.ui.deprecwarn('revset "%s" uses list instead of smartset'
337 337 % x[1][1],
338 338 '3.9')
339 339 return baseset(s)
340 340
341 341 def _getrevsource(repo, r):
342 342 extra = repo[r].extra()
343 343 for label in ('source', 'transplant_source', 'rebase_source'):
344 344 if label in extra:
345 345 try:
346 346 return repo[extra[label]].rev()
347 347 except error.RepoLookupError:
348 348 pass
349 349 return None
350 350
351 351 # operator methods
352 352
353 353 def stringset(repo, subset, x):
354 354 x = repo[x].rev()
355 355 if (x in subset
356 356 or x == node.nullrev and isinstance(subset, fullreposet)):
357 357 return baseset([x])
358 358 return baseset()
359 359
360 360 def rangeset(repo, subset, x, y):
361 361 m = getset(repo, fullreposet(repo), x)
362 362 n = getset(repo, fullreposet(repo), y)
363 363
364 364 if not m or not n:
365 365 return baseset()
366 366 m, n = m.first(), n.last()
367 367
368 368 if m == n:
369 369 r = baseset([m])
370 370 elif n == node.wdirrev:
371 371 r = spanset(repo, m, len(repo)) + baseset([n])
372 372 elif m == node.wdirrev:
373 373 r = baseset([m]) + spanset(repo, len(repo) - 1, n - 1)
374 374 elif m < n:
375 375 r = spanset(repo, m, n + 1)
376 376 else:
377 377 r = spanset(repo, m, n - 1)
378 378 # XXX We should combine with subset first: 'subset & baseset(...)'. This is
379 379 # necessary to ensure we preserve the order in subset.
380 380 #
381 381 # This has performance implication, carrying the sorting over when possible
382 382 # would be more efficient.
383 383 return r & subset
384 384
385 385 def dagrange(repo, subset, x, y):
386 386 r = fullreposet(repo)
387 387 xs = reachableroots(repo, getset(repo, r, x), getset(repo, r, y),
388 388 includepath=True)
389 389 return subset & xs
390 390
391 391 def andset(repo, subset, x, y):
392 392 return getset(repo, getset(repo, subset, x), y)
393 393
394 394 def differenceset(repo, subset, x, y):
395 395 return getset(repo, subset, x) - getset(repo, subset, y)
396 396
397 397 def orset(repo, subset, *xs):
398 398 assert xs
399 399 if len(xs) == 1:
400 400 return getset(repo, subset, xs[0])
401 401 p = len(xs) // 2
402 402 a = orset(repo, subset, *xs[:p])
403 403 b = orset(repo, subset, *xs[p:])
404 404 return a + b
405 405
406 406 def notset(repo, subset, x):
407 407 return subset - getset(repo, subset, x)
408 408
409 409 def listset(repo, subset, *xs):
410 410 raise error.ParseError(_("can't use a list in this context"),
411 411 hint=_('see hg help "revsets.x or y"'))
412 412
413 413 def keyvaluepair(repo, subset, k, v):
414 414 raise error.ParseError(_("can't use a key-value pair in this context"))
415 415
416 416 def func(repo, subset, a, b):
417 417 if a[0] == 'symbol' and a[1] in symbols:
418 418 return symbols[a[1]](repo, subset, b)
419 419
420 420 keep = lambda fn: getattr(fn, '__doc__', None) is not None
421 421
422 422 syms = [s for (s, fn) in symbols.items() if keep(fn)]
423 423 raise error.UnknownIdentifier(a[1], syms)
424 424
425 425 # functions
426 426
427 427 # symbols are callables like:
428 428 # fn(repo, subset, x)
429 429 # with:
430 430 # repo - current repository instance
431 431 # subset - of revisions to be examined
432 432 # x - argument in tree form
433 433 symbols = {}
434 434
435 435 # symbols which can't be used for a DoS attack for any given input
436 436 # (e.g. those which accept regexes as plain strings shouldn't be included)
437 437 # functions that just return a lot of changesets (like all) don't count here
438 438 safesymbols = set()
439 439
440 440 predicate = registrar.revsetpredicate()
441 441
442 442 @predicate('_destupdate')
443 443 def _destupdate(repo, subset, x):
444 444 # experimental revset for update destination
445 445 args = getargsdict(x, 'limit', 'clean check')
446 446 return subset & baseset([destutil.destupdate(repo, **args)[0]])
447 447
448 448 @predicate('_destmerge')
449 449 def _destmerge(repo, subset, x):
450 450 # experimental revset for merge destination
451 451 sourceset = None
452 452 if x is not None:
453 453 sourceset = getset(repo, fullreposet(repo), x)
454 454 return subset & baseset([destutil.destmerge(repo, sourceset=sourceset)])
455 455
456 456 @predicate('adds(pattern)', safe=True)
457 457 def adds(repo, subset, x):
458 458 """Changesets that add a file matching pattern.
459 459
460 460 The pattern without explicit kind like ``glob:`` is expected to be
461 461 relative to the current directory and match against a file or a
462 462 directory.
463 463 """
464 464 # i18n: "adds" is a keyword
465 465 pat = getstring(x, _("adds requires a pattern"))
466 466 return checkstatus(repo, subset, pat, 1)
467 467
468 468 @predicate('ancestor(*changeset)', safe=True)
469 469 def ancestor(repo, subset, x):
470 470 """A greatest common ancestor of the changesets.
471 471
472 472 Accepts 0 or more changesets.
473 473 Will return empty list when passed no args.
474 474 Greatest common ancestor of a single changeset is that changeset.
475 475 """
476 476 # i18n: "ancestor" is a keyword
477 477 l = getlist(x)
478 478 rl = fullreposet(repo)
479 479 anc = None
480 480
481 481 # (getset(repo, rl, i) for i in l) generates a list of lists
482 482 for revs in (getset(repo, rl, i) for i in l):
483 483 for r in revs:
484 484 if anc is None:
485 485 anc = repo[r]
486 486 else:
487 487 anc = anc.ancestor(repo[r])
488 488
489 489 if anc is not None and anc.rev() in subset:
490 490 return baseset([anc.rev()])
491 491 return baseset()
492 492
493 493 def _ancestors(repo, subset, x, followfirst=False):
494 494 heads = getset(repo, fullreposet(repo), x)
495 495 if not heads:
496 496 return baseset()
497 497 s = _revancestors(repo, heads, followfirst)
498 498 return subset & s
499 499
500 500 @predicate('ancestors(set)', safe=True)
501 501 def ancestors(repo, subset, x):
502 502 """Changesets that are ancestors of a changeset in set.
503 503 """
504 504 return _ancestors(repo, subset, x)
505 505
506 506 @predicate('_firstancestors', safe=True)
507 507 def _firstancestors(repo, subset, x):
508 508 # ``_firstancestors(set)``
509 509 # Like ``ancestors(set)`` but follows only the first parents.
510 510 return _ancestors(repo, subset, x, followfirst=True)
511 511
512 512 def ancestorspec(repo, subset, x, n):
513 513 """``set~n``
514 514 Changesets that are the Nth ancestor (first parents only) of a changeset
515 515 in set.
516 516 """
517 517 try:
518 518 n = int(n[1])
519 519 except (TypeError, ValueError):
520 520 raise error.ParseError(_("~ expects a number"))
521 521 ps = set()
522 522 cl = repo.changelog
523 523 for r in getset(repo, fullreposet(repo), x):
524 524 for i in range(n):
525 525 r = cl.parentrevs(r)[0]
526 526 ps.add(r)
527 527 return subset & ps
528 528
529 529 @predicate('author(string)', safe=True)
530 530 def author(repo, subset, x):
531 531 """Alias for ``user(string)``.
532 532 """
533 533 # i18n: "author" is a keyword
534 534 n = encoding.lower(getstring(x, _("author requires a string")))
535 535 kind, pattern, matcher = _substringmatcher(n)
536 536 return subset.filter(lambda x: matcher(encoding.lower(repo[x].user())),
537 537 condrepr=('<user %r>', n))
538 538
539 539 @predicate('bisect(string)', safe=True)
540 540 def bisect(repo, subset, x):
541 541 """Changesets marked in the specified bisect status:
542 542
543 543 - ``good``, ``bad``, ``skip``: csets explicitly marked as good/bad/skip
544 544 - ``goods``, ``bads`` : csets topologically good/bad
545 545 - ``range`` : csets taking part in the bisection
546 546 - ``pruned`` : csets that are goods, bads or skipped
547 547 - ``untested`` : csets whose fate is yet unknown
548 548 - ``ignored`` : csets ignored due to DAG topology
549 549 - ``current`` : the cset currently being bisected
550 550 """
551 551 # i18n: "bisect" is a keyword
552 552 status = getstring(x, _("bisect requires a string")).lower()
553 553 state = set(hbisect.get(repo, status))
554 554 return subset & state
555 555
556 556 # Backward-compatibility
557 557 # - no help entry so that we do not advertise it any more
558 558 @predicate('bisected', safe=True)
559 559 def bisected(repo, subset, x):
560 560 return bisect(repo, subset, x)
561 561
562 562 @predicate('bookmark([name])', safe=True)
563 563 def bookmark(repo, subset, x):
564 564 """The named bookmark or all bookmarks.
565 565
566 566 If `name` starts with `re:`, the remainder of the name is treated as
567 567 a regular expression. To match a bookmark that actually starts with `re:`,
568 568 use the prefix `literal:`.
569 569 """
570 570 # i18n: "bookmark" is a keyword
571 571 args = getargs(x, 0, 1, _('bookmark takes one or no arguments'))
572 572 if args:
573 573 bm = getstring(args[0],
574 574 # i18n: "bookmark" is a keyword
575 575 _('the argument to bookmark must be a string'))
576 576 kind, pattern, matcher = util.stringmatcher(bm)
577 577 bms = set()
578 578 if kind == 'literal':
579 579 bmrev = repo._bookmarks.get(pattern, None)
580 580 if not bmrev:
581 581 raise error.RepoLookupError(_("bookmark '%s' does not exist")
582 582 % pattern)
583 583 bms.add(repo[bmrev].rev())
584 584 else:
585 585 matchrevs = set()
586 586 for name, bmrev in repo._bookmarks.iteritems():
587 587 if matcher(name):
588 588 matchrevs.add(bmrev)
589 589 if not matchrevs:
590 590 raise error.RepoLookupError(_("no bookmarks exist"
591 591 " that match '%s'") % pattern)
592 592 for bmrev in matchrevs:
593 593 bms.add(repo[bmrev].rev())
594 594 else:
595 595 bms = set([repo[r].rev()
596 596 for r in repo._bookmarks.values()])
597 597 bms -= set([node.nullrev])
598 598 return subset & bms
599 599
600 600 @predicate('branch(string or set)', safe=True)
601 601 def branch(repo, subset, x):
602 602 """
603 603 All changesets belonging to the given branch or the branches of the given
604 604 changesets.
605 605
606 606 If `string` starts with `re:`, the remainder of the name is treated as
607 607 a regular expression. To match a branch that actually starts with `re:`,
608 608 use the prefix `literal:`.
609 609 """
610 610 getbi = repo.revbranchcache().branchinfo
611 611
612 612 try:
613 613 b = getstring(x, '')
614 614 except error.ParseError:
615 615 # not a string, but another revspec, e.g. tip()
616 616 pass
617 617 else:
618 618 kind, pattern, matcher = util.stringmatcher(b)
619 619 if kind == 'literal':
620 620 # note: falls through to the revspec case if no branch with
621 621 # this name exists and pattern kind is not specified explicitly
622 622 if pattern in repo.branchmap():
623 623 return subset.filter(lambda r: matcher(getbi(r)[0]),
624 624 condrepr=('<branch %r>', b))
625 625 if b.startswith('literal:'):
626 626 raise error.RepoLookupError(_("branch '%s' does not exist")
627 627 % pattern)
628 628 else:
629 629 return subset.filter(lambda r: matcher(getbi(r)[0]),
630 630 condrepr=('<branch %r>', b))
631 631
632 632 s = getset(repo, fullreposet(repo), x)
633 633 b = set()
634 634 for r in s:
635 635 b.add(getbi(r)[0])
636 636 c = s.__contains__
637 637 return subset.filter(lambda r: c(r) or getbi(r)[0] in b,
638 638 condrepr=lambda: '<branch %r>' % sorted(b))
639 639
640 640 @predicate('bumped()', safe=True)
641 641 def bumped(repo, subset, x):
642 642 """Mutable changesets marked as successors of public changesets.
643 643
644 644 Only non-public and non-obsolete changesets can be `bumped`.
645 645 """
646 646 # i18n: "bumped" is a keyword
647 647 getargs(x, 0, 0, _("bumped takes no arguments"))
648 648 bumped = obsmod.getrevs(repo, 'bumped')
649 649 return subset & bumped
650 650
651 651 @predicate('bundle()', safe=True)
652 652 def bundle(repo, subset, x):
653 653 """Changesets in the bundle.
654 654
655 655 Bundle must be specified by the -R option."""
656 656
657 657 try:
658 658 bundlerevs = repo.changelog.bundlerevs
659 659 except AttributeError:
660 660 raise error.Abort(_("no bundle provided - specify with -R"))
661 661 return subset & bundlerevs
662 662
663 663 def checkstatus(repo, subset, pat, field):
664 664 hasset = matchmod.patkind(pat) == 'set'
665 665
666 666 mcache = [None]
667 667 def matches(x):
668 668 c = repo[x]
669 669 if not mcache[0] or hasset:
670 670 mcache[0] = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
671 671 m = mcache[0]
672 672 fname = None
673 673 if not m.anypats() and len(m.files()) == 1:
674 674 fname = m.files()[0]
675 675 if fname is not None:
676 676 if fname not in c.files():
677 677 return False
678 678 else:
679 679 for f in c.files():
680 680 if m(f):
681 681 break
682 682 else:
683 683 return False
684 684 files = repo.status(c.p1().node(), c.node())[field]
685 685 if fname is not None:
686 686 if fname in files:
687 687 return True
688 688 else:
689 689 for f in files:
690 690 if m(f):
691 691 return True
692 692
693 693 return subset.filter(matches, condrepr=('<status[%r] %r>', field, pat))
694 694
695 695 def _children(repo, subset, parentset):
696 696 if not parentset:
697 697 return baseset()
698 698 cs = set()
699 699 pr = repo.changelog.parentrevs
700 700 minrev = parentset.min()
701 701 for r in subset:
702 702 if r <= minrev:
703 703 continue
704 704 for p in pr(r):
705 705 if p in parentset:
706 706 cs.add(r)
707 707 return baseset(cs)
708 708
709 709 @predicate('children(set)', safe=True)
710 710 def children(repo, subset, x):
711 711 """Child changesets of changesets in set.
712 712 """
713 713 s = getset(repo, fullreposet(repo), x)
714 714 cs = _children(repo, subset, s)
715 715 return subset & cs
716 716
717 717 @predicate('closed()', safe=True)
718 718 def closed(repo, subset, x):
719 719 """Changeset is closed.
720 720 """
721 721 # i18n: "closed" is a keyword
722 722 getargs(x, 0, 0, _("closed takes no arguments"))
723 723 return subset.filter(lambda r: repo[r].closesbranch(),
724 724 condrepr='<branch closed>')
725 725
726 726 @predicate('contains(pattern)')
727 727 def contains(repo, subset, x):
728 728 """The revision's manifest contains a file matching pattern (but might not
729 729 modify it). See :hg:`help patterns` for information about file patterns.
730 730
731 731 The pattern without explicit kind like ``glob:`` is expected to be
732 732 relative to the current directory and match against a file exactly
733 733 for efficiency.
734 734 """
735 735 # i18n: "contains" is a keyword
736 736 pat = getstring(x, _("contains requires a pattern"))
737 737
738 738 def matches(x):
739 739 if not matchmod.patkind(pat):
740 740 pats = pathutil.canonpath(repo.root, repo.getcwd(), pat)
741 741 if pats in repo[x]:
742 742 return True
743 743 else:
744 744 c = repo[x]
745 745 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
746 746 for f in c.manifest():
747 747 if m(f):
748 748 return True
749 749 return False
750 750
751 751 return subset.filter(matches, condrepr=('<contains %r>', pat))
752 752
753 753 @predicate('converted([id])', safe=True)
754 754 def converted(repo, subset, x):
755 755 """Changesets converted from the given identifier in the old repository if
756 756 present, or all converted changesets if no identifier is specified.
757 757 """
758 758
759 759 # There is exactly no chance of resolving the revision, so do a simple
760 760 # string compare and hope for the best
761 761
762 762 rev = None
763 763 # i18n: "converted" is a keyword
764 764 l = getargs(x, 0, 1, _('converted takes one or no arguments'))
765 765 if l:
766 766 # i18n: "converted" is a keyword
767 767 rev = getstring(l[0], _('converted requires a revision'))
768 768
769 769 def _matchvalue(r):
770 770 source = repo[r].extra().get('convert_revision', None)
771 771 return source is not None and (rev is None or source.startswith(rev))
772 772
773 773 return subset.filter(lambda r: _matchvalue(r),
774 774 condrepr=('<converted %r>', rev))
775 775
776 776 @predicate('date(interval)', safe=True)
777 777 def date(repo, subset, x):
778 778 """Changesets within the interval, see :hg:`help dates`.
779 779 """
780 780 # i18n: "date" is a keyword
781 781 ds = getstring(x, _("date requires a string"))
782 782 dm = util.matchdate(ds)
783 783 return subset.filter(lambda x: dm(repo[x].date()[0]),
784 784 condrepr=('<date %r>', ds))
785 785
786 786 @predicate('desc(string)', safe=True)
787 787 def desc(repo, subset, x):
788 788 """Search commit message for string. The match is case-insensitive.
789 789 """
790 790 # i18n: "desc" is a keyword
791 791 ds = encoding.lower(getstring(x, _("desc requires a string")))
792 792
793 793 def matches(x):
794 794 c = repo[x]
795 795 return ds in encoding.lower(c.description())
796 796
797 797 return subset.filter(matches, condrepr=('<desc %r>', ds))
798 798
799 799 def _descendants(repo, subset, x, followfirst=False):
800 800 roots = getset(repo, fullreposet(repo), x)
801 801 if not roots:
802 802 return baseset()
803 803 s = _revdescendants(repo, roots, followfirst)
804 804
805 805 # Both sets need to be ascending in order to lazily return the union
806 806 # in the correct order.
807 807 base = subset & roots
808 808 desc = subset & s
809 809 result = base + desc
810 810 if subset.isascending():
811 811 result.sort()
812 812 elif subset.isdescending():
813 813 result.sort(reverse=True)
814 814 else:
815 815 result = subset & result
816 816 return result
817 817
818 818 @predicate('descendants(set)', safe=True)
819 819 def descendants(repo, subset, x):
820 820 """Changesets which are descendants of changesets in set.
821 821 """
822 822 return _descendants(repo, subset, x)
823 823
824 824 @predicate('_firstdescendants', safe=True)
825 825 def _firstdescendants(repo, subset, x):
826 826 # ``_firstdescendants(set)``
827 827 # Like ``descendants(set)`` but follows only the first parents.
828 828 return _descendants(repo, subset, x, followfirst=True)
829 829
830 830 @predicate('destination([set])', safe=True)
831 831 def destination(repo, subset, x):
832 832 """Changesets that were created by a graft, transplant or rebase operation,
833 833 with the given revisions specified as the source. Omitting the optional set
834 834 is the same as passing all().
835 835 """
836 836 if x is not None:
837 837 sources = getset(repo, fullreposet(repo), x)
838 838 else:
839 839 sources = fullreposet(repo)
840 840
841 841 dests = set()
842 842
843 843 # subset contains all of the possible destinations that can be returned, so
844 844 # iterate over them and see if their source(s) were provided in the arg set.
845 845 # Even if the immediate src of r is not in the arg set, src's source (or
846 846 # further back) may be. Scanning back further than the immediate src allows
847 847 # transitive transplants and rebases to yield the same results as transitive
848 848 # grafts.
849 849 for r in subset:
850 850 src = _getrevsource(repo, r)
851 851 lineage = None
852 852
853 853 while src is not None:
854 854 if lineage is None:
855 855 lineage = list()
856 856
857 857 lineage.append(r)
858 858
859 859 # The visited lineage is a match if the current source is in the arg
860 860 # set. Since every candidate dest is visited by way of iterating
861 861 # subset, any dests further back in the lineage will be tested by a
862 862 # different iteration over subset. Likewise, if the src was already
863 863 # selected, the current lineage can be selected without going back
864 864 # further.
865 865 if src in sources or src in dests:
866 866 dests.update(lineage)
867 867 break
868 868
869 869 r = src
870 870 src = _getrevsource(repo, r)
871 871
872 872 return subset.filter(dests.__contains__,
873 873 condrepr=lambda: '<destination %r>' % sorted(dests))
874 874
875 875 @predicate('divergent()', safe=True)
876 876 def divergent(repo, subset, x):
877 877 """
878 878 Final successors of changesets with an alternative set of final successors.
879 879 """
880 880 # i18n: "divergent" is a keyword
881 881 getargs(x, 0, 0, _("divergent takes no arguments"))
882 882 divergent = obsmod.getrevs(repo, 'divergent')
883 883 return subset & divergent
884 884
885 885 @predicate('extinct()', safe=True)
886 886 def extinct(repo, subset, x):
887 887 """Obsolete changesets with obsolete descendants only.
888 888 """
889 889 # i18n: "extinct" is a keyword
890 890 getargs(x, 0, 0, _("extinct takes no arguments"))
891 891 extincts = obsmod.getrevs(repo, 'extinct')
892 892 return subset & extincts
893 893
894 894 @predicate('extra(label, [value])', safe=True)
895 895 def extra(repo, subset, x):
896 896 """Changesets with the given label in the extra metadata, with the given
897 897 optional value.
898 898
899 899 If `value` starts with `re:`, the remainder of the value is treated as
900 900 a regular expression. To match a value that actually starts with `re:`,
901 901 use the prefix `literal:`.
902 902 """
903 903 args = getargsdict(x, 'extra', 'label value')
904 904 if 'label' not in args:
905 905 # i18n: "extra" is a keyword
906 906 raise error.ParseError(_('extra takes at least 1 argument'))
907 907 # i18n: "extra" is a keyword
908 908 label = getstring(args['label'], _('first argument to extra must be '
909 909 'a string'))
910 910 value = None
911 911
912 912 if 'value' in args:
913 913 # i18n: "extra" is a keyword
914 914 value = getstring(args['value'], _('second argument to extra must be '
915 915 'a string'))
916 916 kind, value, matcher = util.stringmatcher(value)
917 917
918 918 def _matchvalue(r):
919 919 extra = repo[r].extra()
920 920 return label in extra and (value is None or matcher(extra[label]))
921 921
922 922 return subset.filter(lambda r: _matchvalue(r),
923 923 condrepr=('<extra[%r] %r>', label, value))
924 924
925 925 @predicate('filelog(pattern)', safe=True)
926 926 def filelog(repo, subset, x):
927 927 """Changesets connected to the specified filelog.
928 928
929 929 For performance reasons, visits only revisions mentioned in the file-level
930 930 filelog, rather than filtering through all changesets (much faster, but
931 931 doesn't include deletes or duplicate changes). For a slower, more accurate
932 932 result, use ``file()``.
933 933
934 934 The pattern without explicit kind like ``glob:`` is expected to be
935 935 relative to the current directory and match against a file exactly
936 936 for efficiency.
937 937
938 938 If some linkrev points to revisions filtered by the current repoview, we'll
939 939 work around it to return a non-filtered value.
940 940 """
941 941
942 942 # i18n: "filelog" is a keyword
943 943 pat = getstring(x, _("filelog requires a pattern"))
944 944 s = set()
945 945 cl = repo.changelog
946 946
947 947 if not matchmod.patkind(pat):
948 948 f = pathutil.canonpath(repo.root, repo.getcwd(), pat)
949 949 files = [f]
950 950 else:
951 951 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=repo[None])
952 952 files = (f for f in repo[None] if m(f))
953 953
954 954 for f in files:
955 955 fl = repo.file(f)
956 956 known = {}
957 957 scanpos = 0
958 958 for fr in list(fl):
959 959 fn = fl.node(fr)
960 960 if fn in known:
961 961 s.add(known[fn])
962 962 continue
963 963
964 964 lr = fl.linkrev(fr)
965 965 if lr in cl:
966 966 s.add(lr)
967 967 elif scanpos is not None:
968 968 # lowest matching changeset is filtered, scan further
969 969 # ahead in changelog
970 970 start = max(lr, scanpos) + 1
971 971 scanpos = None
972 972 for r in cl.revs(start):
973 973 # minimize parsing of non-matching entries
974 974 if f in cl.revision(r) and f in cl.readfiles(r):
975 975 try:
976 976 # try to use manifest delta fastpath
977 977 n = repo[r].filenode(f)
978 978 if n not in known:
979 979 if n == fn:
980 980 s.add(r)
981 981 scanpos = r
982 982 break
983 983 else:
984 984 known[n] = r
985 985 except error.ManifestLookupError:
986 986 # deletion in changelog
987 987 continue
988 988
989 989 return subset & s
990 990
991 991 @predicate('first(set, [n])', safe=True)
992 992 def first(repo, subset, x):
993 993 """An alias for limit().
994 994 """
995 995 return limit(repo, subset, x)
996 996
997 997 def _follow(repo, subset, x, name, followfirst=False):
998 998 l = getargs(x, 0, 1, _("%s takes no arguments or a pattern") % name)
999 999 c = repo['.']
1000 1000 if l:
1001 1001 x = getstring(l[0], _("%s expected a pattern") % name)
1002 1002 matcher = matchmod.match(repo.root, repo.getcwd(), [x],
1003 1003 ctx=repo[None], default='path')
1004 1004
1005 1005 files = c.manifest().walk(matcher)
1006 1006
1007 1007 s = set()
1008 1008 for fname in files:
1009 1009 fctx = c[fname]
1010 1010 s = s.union(set(c.rev() for c in fctx.ancestors(followfirst)))
1011 1011 # include the revision responsible for the most recent version
1012 1012 s.add(fctx.introrev())
1013 1013 else:
1014 1014 s = _revancestors(repo, baseset([c.rev()]), followfirst)
1015 1015
1016 1016 return subset & s
1017 1017
1018 1018 @predicate('follow([pattern])', safe=True)
1019 1019 def follow(repo, subset, x):
1020 1020 """
1021 1021 An alias for ``::.`` (ancestors of the working directory's first parent).
1022 1022 If pattern is specified, the histories of files matching given
1023 1023 pattern is followed, including copies.
1024 1024 """
1025 1025 return _follow(repo, subset, x, 'follow')
1026 1026
1027 1027 @predicate('_followfirst', safe=True)
1028 1028 def _followfirst(repo, subset, x):
1029 1029 # ``followfirst([pattern])``
1030 1030 # Like ``follow([pattern])`` but follows only the first parent of
1031 1031 # every revisions or files revisions.
1032 1032 return _follow(repo, subset, x, '_followfirst', followfirst=True)
1033 1033
1034 1034 @predicate('all()', safe=True)
1035 1035 def getall(repo, subset, x):
1036 1036 """All changesets, the same as ``0:tip``.
1037 1037 """
1038 1038 # i18n: "all" is a keyword
1039 1039 getargs(x, 0, 0, _("all takes no arguments"))
1040 1040 return subset & spanset(repo) # drop "null" if any
1041 1041
1042 1042 @predicate('grep(regex)')
1043 1043 def grep(repo, subset, x):
1044 1044 """Like ``keyword(string)`` but accepts a regex. Use ``grep(r'...')``
1045 1045 to ensure special escape characters are handled correctly. Unlike
1046 1046 ``keyword(string)``, the match is case-sensitive.
1047 1047 """
1048 1048 try:
1049 1049 # i18n: "grep" is a keyword
1050 1050 gr = re.compile(getstring(x, _("grep requires a string")))
1051 1051 except re.error as e:
1052 1052 raise error.ParseError(_('invalid match pattern: %s') % e)
1053 1053
1054 1054 def matches(x):
1055 1055 c = repo[x]
1056 1056 for e in c.files() + [c.user(), c.description()]:
1057 1057 if gr.search(e):
1058 1058 return True
1059 1059 return False
1060 1060
1061 1061 return subset.filter(matches, condrepr=('<grep %r>', gr.pattern))
1062 1062
1063 1063 @predicate('_matchfiles', safe=True)
1064 1064 def _matchfiles(repo, subset, x):
1065 1065 # _matchfiles takes a revset list of prefixed arguments:
1066 1066 #
1067 1067 # [p:foo, i:bar, x:baz]
1068 1068 #
1069 1069 # builds a match object from them and filters subset. Allowed
1070 1070 # prefixes are 'p:' for regular patterns, 'i:' for include
1071 1071 # patterns and 'x:' for exclude patterns. Use 'r:' prefix to pass
1072 1072 # a revision identifier, or the empty string to reference the
1073 1073 # working directory, from which the match object is
1074 1074 # initialized. Use 'd:' to set the default matching mode, default
1075 1075 # to 'glob'. At most one 'r:' and 'd:' argument can be passed.
1076 1076
1077 1077 l = getargs(x, 1, -1, "_matchfiles requires at least one argument")
1078 1078 pats, inc, exc = [], [], []
1079 1079 rev, default = None, None
1080 1080 for arg in l:
1081 1081 s = getstring(arg, "_matchfiles requires string arguments")
1082 1082 prefix, value = s[:2], s[2:]
1083 1083 if prefix == 'p:':
1084 1084 pats.append(value)
1085 1085 elif prefix == 'i:':
1086 1086 inc.append(value)
1087 1087 elif prefix == 'x:':
1088 1088 exc.append(value)
1089 1089 elif prefix == 'r:':
1090 1090 if rev is not None:
1091 1091 raise error.ParseError('_matchfiles expected at most one '
1092 1092 'revision')
1093 1093 if value != '': # empty means working directory; leave rev as None
1094 1094 rev = value
1095 1095 elif prefix == 'd:':
1096 1096 if default is not None:
1097 1097 raise error.ParseError('_matchfiles expected at most one '
1098 1098 'default mode')
1099 1099 default = value
1100 1100 else:
1101 1101 raise error.ParseError('invalid _matchfiles prefix: %s' % prefix)
1102 1102 if not default:
1103 1103 default = 'glob'
1104 1104
1105 1105 m = matchmod.match(repo.root, repo.getcwd(), pats, include=inc,
1106 1106 exclude=exc, ctx=repo[rev], default=default)
1107 1107
1108 1108 # This directly read the changelog data as creating changectx for all
1109 1109 # revisions is quite expensive.
1110 1110 getfiles = repo.changelog.readfiles
1111 1111 wdirrev = node.wdirrev
1112 1112 def matches(x):
1113 1113 if x == wdirrev:
1114 1114 files = repo[x].files()
1115 1115 else:
1116 1116 files = getfiles(x)
1117 1117 for f in files:
1118 1118 if m(f):
1119 1119 return True
1120 1120 return False
1121 1121
1122 1122 return subset.filter(matches,
1123 1123 condrepr=('<matchfiles patterns=%r, include=%r '
1124 1124 'exclude=%r, default=%r, rev=%r>',
1125 1125 pats, inc, exc, default, rev))
1126 1126
1127 1127 @predicate('file(pattern)', safe=True)
1128 1128 def hasfile(repo, subset, x):
1129 1129 """Changesets affecting files matched by pattern.
1130 1130
1131 1131 For a faster but less accurate result, consider using ``filelog()``
1132 1132 instead.
1133 1133
1134 1134 This predicate uses ``glob:`` as the default kind of pattern.
1135 1135 """
1136 1136 # i18n: "file" is a keyword
1137 1137 pat = getstring(x, _("file requires a pattern"))
1138 1138 return _matchfiles(repo, subset, ('string', 'p:' + pat))
1139 1139
1140 1140 @predicate('head()', safe=True)
1141 1141 def head(repo, subset, x):
1142 1142 """Changeset is a named branch head.
1143 1143 """
1144 1144 # i18n: "head" is a keyword
1145 1145 getargs(x, 0, 0, _("head takes no arguments"))
1146 1146 hs = set()
1147 1147 cl = repo.changelog
1148 1148 for ls in repo.branchmap().itervalues():
1149 1149 hs.update(cl.rev(h) for h in ls)
1150 1150 return subset & baseset(hs)
1151 1151
1152 1152 @predicate('heads(set)', safe=True)
1153 1153 def heads(repo, subset, x):
1154 1154 """Members of set with no children in set.
1155 1155 """
1156 1156 s = getset(repo, subset, x)
1157 1157 ps = parents(repo, subset, x)
1158 1158 return s - ps
1159 1159
1160 1160 @predicate('hidden()', safe=True)
1161 1161 def hidden(repo, subset, x):
1162 1162 """Hidden changesets.
1163 1163 """
1164 1164 # i18n: "hidden" is a keyword
1165 1165 getargs(x, 0, 0, _("hidden takes no arguments"))
1166 1166 hiddenrevs = repoview.filterrevs(repo, 'visible')
1167 1167 return subset & hiddenrevs
1168 1168
1169 1169 @predicate('keyword(string)', safe=True)
1170 1170 def keyword(repo, subset, x):
1171 1171 """Search commit message, user name, and names of changed files for
1172 1172 string. The match is case-insensitive.
1173 1173 """
1174 1174 # i18n: "keyword" is a keyword
1175 1175 kw = encoding.lower(getstring(x, _("keyword requires a string")))
1176 1176
1177 1177 def matches(r):
1178 1178 c = repo[r]
1179 1179 return any(kw in encoding.lower(t)
1180 1180 for t in c.files() + [c.user(), c.description()])
1181 1181
1182 1182 return subset.filter(matches, condrepr=('<keyword %r>', kw))
1183 1183
1184 1184 @predicate('limit(set[, n[, offset]])', safe=True)
1185 1185 def limit(repo, subset, x):
1186 1186 """First n members of set, defaulting to 1, starting from offset.
1187 1187 """
1188 1188 args = getargsdict(x, 'limit', 'set n offset')
1189 1189 if 'set' not in args:
1190 1190 # i18n: "limit" is a keyword
1191 1191 raise error.ParseError(_("limit requires one to three arguments"))
1192 1192 try:
1193 1193 lim, ofs = 1, 0
1194 1194 if 'n' in args:
1195 1195 # i18n: "limit" is a keyword
1196 1196 lim = int(getstring(args['n'], _("limit requires a number")))
1197 1197 if 'offset' in args:
1198 1198 # i18n: "limit" is a keyword
1199 1199 ofs = int(getstring(args['offset'], _("limit requires a number")))
1200 1200 if ofs < 0:
1201 1201 raise error.ParseError(_("negative offset"))
1202 1202 except (TypeError, ValueError):
1203 1203 # i18n: "limit" is a keyword
1204 1204 raise error.ParseError(_("limit expects a number"))
1205 1205 os = getset(repo, fullreposet(repo), args['set'])
1206 1206 result = []
1207 1207 it = iter(os)
1208 1208 for x in xrange(ofs):
1209 1209 y = next(it, None)
1210 1210 if y is None:
1211 1211 break
1212 1212 for x in xrange(lim):
1213 1213 y = next(it, None)
1214 1214 if y is None:
1215 1215 break
1216 1216 elif y in subset:
1217 1217 result.append(y)
1218 1218 return baseset(result, datarepr=('<limit n=%d, offset=%d, %r, %r>',
1219 1219 lim, ofs, subset, os))
1220 1220
1221 1221 @predicate('last(set, [n])', safe=True)
1222 1222 def last(repo, subset, x):
1223 1223 """Last n members of set, defaulting to 1.
1224 1224 """
1225 1225 # i18n: "last" is a keyword
1226 1226 l = getargs(x, 1, 2, _("last requires one or two arguments"))
1227 1227 try:
1228 1228 lim = 1
1229 1229 if len(l) == 2:
1230 1230 # i18n: "last" is a keyword
1231 1231 lim = int(getstring(l[1], _("last requires a number")))
1232 1232 except (TypeError, ValueError):
1233 1233 # i18n: "last" is a keyword
1234 1234 raise error.ParseError(_("last expects a number"))
1235 1235 os = getset(repo, fullreposet(repo), l[0])
1236 1236 os.reverse()
1237 1237 result = []
1238 1238 it = iter(os)
1239 1239 for x in xrange(lim):
1240 1240 y = next(it, None)
1241 1241 if y is None:
1242 1242 break
1243 1243 elif y in subset:
1244 1244 result.append(y)
1245 1245 return baseset(result, datarepr=('<last n=%d, %r, %r>', lim, subset, os))
1246 1246
1247 1247 @predicate('max(set)', safe=True)
1248 1248 def maxrev(repo, subset, x):
1249 1249 """Changeset with highest revision number in set.
1250 1250 """
1251 1251 os = getset(repo, fullreposet(repo), x)
1252 1252 try:
1253 1253 m = os.max()
1254 1254 if m in subset:
1255 1255 return baseset([m], datarepr=('<max %r, %r>', subset, os))
1256 1256 except ValueError:
1257 1257 # os.max() throws a ValueError when the collection is empty.
1258 1258 # Same as python's max().
1259 1259 pass
1260 1260 return baseset(datarepr=('<max %r, %r>', subset, os))
1261 1261
1262 1262 @predicate('merge()', safe=True)
1263 1263 def merge(repo, subset, x):
1264 1264 """Changeset is a merge changeset.
1265 1265 """
1266 1266 # i18n: "merge" is a keyword
1267 1267 getargs(x, 0, 0, _("merge takes no arguments"))
1268 1268 cl = repo.changelog
1269 1269 return subset.filter(lambda r: cl.parentrevs(r)[1] != -1,
1270 1270 condrepr='<merge>')
1271 1271
1272 1272 @predicate('branchpoint()', safe=True)
1273 1273 def branchpoint(repo, subset, x):
1274 1274 """Changesets with more than one child.
1275 1275 """
1276 1276 # i18n: "branchpoint" is a keyword
1277 1277 getargs(x, 0, 0, _("branchpoint takes no arguments"))
1278 1278 cl = repo.changelog
1279 1279 if not subset:
1280 1280 return baseset()
1281 1281 # XXX this should be 'parentset.min()' assuming 'parentset' is a smartset
1282 1282 # (and if it is not, it should.)
1283 1283 baserev = min(subset)
1284 1284 parentscount = [0]*(len(repo) - baserev)
1285 1285 for r in cl.revs(start=baserev + 1):
1286 1286 for p in cl.parentrevs(r):
1287 1287 if p >= baserev:
1288 1288 parentscount[p - baserev] += 1
1289 1289 return subset.filter(lambda r: parentscount[r - baserev] > 1,
1290 1290 condrepr='<branchpoint>')
1291 1291
1292 1292 @predicate('min(set)', safe=True)
1293 1293 def minrev(repo, subset, x):
1294 1294 """Changeset with lowest revision number in set.
1295 1295 """
1296 1296 os = getset(repo, fullreposet(repo), x)
1297 1297 try:
1298 1298 m = os.min()
1299 1299 if m in subset:
1300 1300 return baseset([m], datarepr=('<min %r, %r>', subset, os))
1301 1301 except ValueError:
1302 1302 # os.min() throws a ValueError when the collection is empty.
1303 1303 # Same as python's min().
1304 1304 pass
1305 1305 return baseset(datarepr=('<min %r, %r>', subset, os))
1306 1306
1307 1307 @predicate('modifies(pattern)', safe=True)
1308 1308 def modifies(repo, subset, x):
1309 1309 """Changesets modifying files matched by pattern.
1310 1310
1311 1311 The pattern without explicit kind like ``glob:`` is expected to be
1312 1312 relative to the current directory and match against a file or a
1313 1313 directory.
1314 1314 """
1315 1315 # i18n: "modifies" is a keyword
1316 1316 pat = getstring(x, _("modifies requires a pattern"))
1317 1317 return checkstatus(repo, subset, pat, 0)
1318 1318
1319 1319 @predicate('named(namespace)')
1320 1320 def named(repo, subset, x):
1321 1321 """The changesets in a given namespace.
1322 1322
1323 1323 If `namespace` starts with `re:`, the remainder of the string is treated as
1324 1324 a regular expression. To match a namespace that actually starts with `re:`,
1325 1325 use the prefix `literal:`.
1326 1326 """
1327 1327 # i18n: "named" is a keyword
1328 1328 args = getargs(x, 1, 1, _('named requires a namespace argument'))
1329 1329
1330 1330 ns = getstring(args[0],
1331 1331 # i18n: "named" is a keyword
1332 1332 _('the argument to named must be a string'))
1333 1333 kind, pattern, matcher = util.stringmatcher(ns)
1334 1334 namespaces = set()
1335 1335 if kind == 'literal':
1336 1336 if pattern not in repo.names:
1337 1337 raise error.RepoLookupError(_("namespace '%s' does not exist")
1338 1338 % ns)
1339 1339 namespaces.add(repo.names[pattern])
1340 1340 else:
1341 1341 for name, ns in repo.names.iteritems():
1342 1342 if matcher(name):
1343 1343 namespaces.add(ns)
1344 1344 if not namespaces:
1345 1345 raise error.RepoLookupError(_("no namespace exists"
1346 1346 " that match '%s'") % pattern)
1347 1347
1348 1348 names = set()
1349 1349 for ns in namespaces:
1350 1350 for name in ns.listnames(repo):
1351 1351 if name not in ns.deprecated:
1352 1352 names.update(repo[n].rev() for n in ns.nodes(repo, name))
1353 1353
1354 1354 names -= set([node.nullrev])
1355 1355 return subset & names
1356 1356
1357 1357 @predicate('id(string)', safe=True)
1358 1358 def node_(repo, subset, x):
1359 1359 """Revision non-ambiguously specified by the given hex string prefix.
1360 1360 """
1361 1361 # i18n: "id" is a keyword
1362 1362 l = getargs(x, 1, 1, _("id requires one argument"))
1363 1363 # i18n: "id" is a keyword
1364 1364 n = getstring(l[0], _("id requires a string"))
1365 1365 if len(n) == 40:
1366 1366 try:
1367 1367 rn = repo.changelog.rev(node.bin(n))
1368 1368 except (LookupError, TypeError):
1369 1369 rn = None
1370 1370 else:
1371 1371 rn = None
1372 1372 pm = repo.changelog._partialmatch(n)
1373 1373 if pm is not None:
1374 1374 rn = repo.changelog.rev(pm)
1375 1375
1376 1376 if rn is None:
1377 1377 return baseset()
1378 1378 result = baseset([rn])
1379 1379 return result & subset
1380 1380
1381 1381 @predicate('obsolete()', safe=True)
1382 1382 def obsolete(repo, subset, x):
1383 1383 """Mutable changeset with a newer version."""
1384 1384 # i18n: "obsolete" is a keyword
1385 1385 getargs(x, 0, 0, _("obsolete takes no arguments"))
1386 1386 obsoletes = obsmod.getrevs(repo, 'obsolete')
1387 1387 return subset & obsoletes
1388 1388
1389 1389 @predicate('only(set, [set])', safe=True)
1390 1390 def only(repo, subset, x):
1391 1391 """Changesets that are ancestors of the first set that are not ancestors
1392 1392 of any other head in the repo. If a second set is specified, the result
1393 1393 is ancestors of the first set that are not ancestors of the second set
1394 1394 (i.e. ::<set1> - ::<set2>).
1395 1395 """
1396 1396 cl = repo.changelog
1397 1397 # i18n: "only" is a keyword
1398 1398 args = getargs(x, 1, 2, _('only takes one or two arguments'))
1399 1399 include = getset(repo, fullreposet(repo), args[0])
1400 1400 if len(args) == 1:
1401 1401 if not include:
1402 1402 return baseset()
1403 1403
1404 1404 descendants = set(_revdescendants(repo, include, False))
1405 1405 exclude = [rev for rev in cl.headrevs()
1406 1406 if not rev in descendants and not rev in include]
1407 1407 else:
1408 1408 exclude = getset(repo, fullreposet(repo), args[1])
1409 1409
1410 1410 results = set(cl.findmissingrevs(common=exclude, heads=include))
1411 1411 # XXX we should turn this into a baseset instead of a set, smartset may do
1412 1412 # some optimisations from the fact this is a baseset.
1413 1413 return subset & results
1414 1414
1415 1415 @predicate('origin([set])', safe=True)
1416 1416 def origin(repo, subset, x):
1417 1417 """
1418 1418 Changesets that were specified as a source for the grafts, transplants or
1419 1419 rebases that created the given revisions. Omitting the optional set is the
1420 1420 same as passing all(). If a changeset created by these operations is itself
1421 1421 specified as a source for one of these operations, only the source changeset
1422 1422 for the first operation is selected.
1423 1423 """
1424 1424 if x is not None:
1425 1425 dests = getset(repo, fullreposet(repo), x)
1426 1426 else:
1427 1427 dests = fullreposet(repo)
1428 1428
1429 1429 def _firstsrc(rev):
1430 1430 src = _getrevsource(repo, rev)
1431 1431 if src is None:
1432 1432 return None
1433 1433
1434 1434 while True:
1435 1435 prev = _getrevsource(repo, src)
1436 1436
1437 1437 if prev is None:
1438 1438 return src
1439 1439 src = prev
1440 1440
1441 1441 o = set([_firstsrc(r) for r in dests])
1442 1442 o -= set([None])
1443 1443 # XXX we should turn this into a baseset instead of a set, smartset may do
1444 1444 # some optimisations from the fact this is a baseset.
1445 1445 return subset & o
1446 1446
1447 1447 @predicate('outgoing([path])', safe=True)
1448 1448 def outgoing(repo, subset, x):
1449 1449 """Changesets not found in the specified destination repository, or the
1450 1450 default push location.
1451 1451 """
1452 1452 # Avoid cycles.
1453 1453 from . import (
1454 1454 discovery,
1455 1455 hg,
1456 1456 )
1457 1457 # i18n: "outgoing" is a keyword
1458 1458 l = getargs(x, 0, 1, _("outgoing takes one or no arguments"))
1459 1459 # i18n: "outgoing" is a keyword
1460 1460 dest = l and getstring(l[0], _("outgoing requires a repository path")) or ''
1461 1461 dest = repo.ui.expandpath(dest or 'default-push', dest or 'default')
1462 1462 dest, branches = hg.parseurl(dest)
1463 1463 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1464 1464 if revs:
1465 1465 revs = [repo.lookup(rev) for rev in revs]
1466 1466 other = hg.peer(repo, {}, dest)
1467 1467 repo.ui.pushbuffer()
1468 1468 outgoing = discovery.findcommonoutgoing(repo, other, onlyheads=revs)
1469 1469 repo.ui.popbuffer()
1470 1470 cl = repo.changelog
1471 1471 o = set([cl.rev(r) for r in outgoing.missing])
1472 1472 return subset & o
1473 1473
1474 1474 @predicate('p1([set])', safe=True)
1475 1475 def p1(repo, subset, x):
1476 1476 """First parent of changesets in set, or the working directory.
1477 1477 """
1478 1478 if x is None:
1479 1479 p = repo[x].p1().rev()
1480 1480 if p >= 0:
1481 1481 return subset & baseset([p])
1482 1482 return baseset()
1483 1483
1484 1484 ps = set()
1485 1485 cl = repo.changelog
1486 1486 for r in getset(repo, fullreposet(repo), x):
1487 1487 ps.add(cl.parentrevs(r)[0])
1488 1488 ps -= set([node.nullrev])
1489 1489 # XXX we should turn this into a baseset instead of a set, smartset may do
1490 1490 # some optimisations from the fact this is a baseset.
1491 1491 return subset & ps
1492 1492
1493 1493 @predicate('p2([set])', safe=True)
1494 1494 def p2(repo, subset, x):
1495 1495 """Second parent of changesets in set, or the working directory.
1496 1496 """
1497 1497 if x is None:
1498 1498 ps = repo[x].parents()
1499 1499 try:
1500 1500 p = ps[1].rev()
1501 1501 if p >= 0:
1502 1502 return subset & baseset([p])
1503 1503 return baseset()
1504 1504 except IndexError:
1505 1505 return baseset()
1506 1506
1507 1507 ps = set()
1508 1508 cl = repo.changelog
1509 1509 for r in getset(repo, fullreposet(repo), x):
1510 1510 ps.add(cl.parentrevs(r)[1])
1511 1511 ps -= set([node.nullrev])
1512 1512 # XXX we should turn this into a baseset instead of a set, smartset may do
1513 1513 # some optimisations from the fact this is a baseset.
1514 1514 return subset & ps
1515 1515
1516 1516 @predicate('parents([set])', safe=True)
1517 1517 def parents(repo, subset, x):
1518 1518 """
1519 1519 The set of all parents for all changesets in set, or the working directory.
1520 1520 """
1521 1521 if x is None:
1522 1522 ps = set(p.rev() for p in repo[x].parents())
1523 1523 else:
1524 1524 ps = set()
1525 1525 cl = repo.changelog
1526 1526 up = ps.update
1527 1527 parentrevs = cl.parentrevs
1528 1528 for r in getset(repo, fullreposet(repo), x):
1529 1529 if r == node.wdirrev:
1530 1530 up(p.rev() for p in repo[r].parents())
1531 1531 else:
1532 1532 up(parentrevs(r))
1533 1533 ps -= set([node.nullrev])
1534 1534 return subset & ps
1535 1535
1536 1536 def _phase(repo, subset, target):
1537 1537 """helper to select all rev in phase <target>"""
1538 1538 repo._phasecache.loadphaserevs(repo) # ensure phase's sets are loaded
1539 1539 if repo._phasecache._phasesets:
1540 1540 s = repo._phasecache._phasesets[target] - repo.changelog.filteredrevs
1541 1541 s = baseset(s)
1542 1542 s.sort() # set are non ordered, so we enforce ascending
1543 1543 return subset & s
1544 1544 else:
1545 1545 phase = repo._phasecache.phase
1546 1546 condition = lambda r: phase(repo, r) == target
1547 1547 return subset.filter(condition, condrepr=('<phase %r>', target),
1548 1548 cache=False)
1549 1549
1550 1550 @predicate('draft()', safe=True)
1551 1551 def draft(repo, subset, x):
1552 1552 """Changeset in draft phase."""
1553 1553 # i18n: "draft" is a keyword
1554 1554 getargs(x, 0, 0, _("draft takes no arguments"))
1555 1555 target = phases.draft
1556 1556 return _phase(repo, subset, target)
1557 1557
1558 1558 @predicate('secret()', safe=True)
1559 1559 def secret(repo, subset, x):
1560 1560 """Changeset in secret phase."""
1561 1561 # i18n: "secret" is a keyword
1562 1562 getargs(x, 0, 0, _("secret takes no arguments"))
1563 1563 target = phases.secret
1564 1564 return _phase(repo, subset, target)
1565 1565
1566 1566 def parentspec(repo, subset, x, n):
1567 1567 """``set^0``
1568 1568 The set.
1569 1569 ``set^1`` (or ``set^``), ``set^2``
1570 1570 First or second parent, respectively, of all changesets in set.
1571 1571 """
1572 1572 try:
1573 1573 n = int(n[1])
1574 1574 if n not in (0, 1, 2):
1575 1575 raise ValueError
1576 1576 except (TypeError, ValueError):
1577 1577 raise error.ParseError(_("^ expects a number 0, 1, or 2"))
1578 1578 ps = set()
1579 1579 cl = repo.changelog
1580 1580 for r in getset(repo, fullreposet(repo), x):
1581 1581 if n == 0:
1582 1582 ps.add(r)
1583 1583 elif n == 1:
1584 1584 ps.add(cl.parentrevs(r)[0])
1585 1585 elif n == 2:
1586 1586 parents = cl.parentrevs(r)
1587 1587 if len(parents) > 1:
1588 1588 ps.add(parents[1])
1589 1589 return subset & ps
1590 1590
1591 1591 @predicate('present(set)', safe=True)
1592 1592 def present(repo, subset, x):
1593 1593 """An empty set, if any revision in set isn't found; otherwise,
1594 1594 all revisions in set.
1595 1595
1596 1596 If any of specified revisions is not present in the local repository,
1597 1597 the query is normally aborted. But this predicate allows the query
1598 1598 to continue even in such cases.
1599 1599 """
1600 1600 try:
1601 1601 return getset(repo, subset, x)
1602 1602 except error.RepoLookupError:
1603 1603 return baseset()
1604 1604
1605 1605 # for internal use
1606 1606 @predicate('_notpublic', safe=True)
1607 1607 def _notpublic(repo, subset, x):
1608 1608 getargs(x, 0, 0, "_notpublic takes no arguments")
1609 1609 repo._phasecache.loadphaserevs(repo) # ensure phase's sets are loaded
1610 1610 if repo._phasecache._phasesets:
1611 1611 s = set()
1612 1612 for u in repo._phasecache._phasesets[1:]:
1613 1613 s.update(u)
1614 1614 s = baseset(s - repo.changelog.filteredrevs)
1615 1615 s.sort()
1616 1616 return subset & s
1617 1617 else:
1618 1618 phase = repo._phasecache.phase
1619 1619 target = phases.public
1620 1620 condition = lambda r: phase(repo, r) != target
1621 1621 return subset.filter(condition, condrepr=('<phase %r>', target),
1622 1622 cache=False)
1623 1623
1624 1624 @predicate('public()', safe=True)
1625 1625 def public(repo, subset, x):
1626 1626 """Changeset in public phase."""
1627 1627 # i18n: "public" is a keyword
1628 1628 getargs(x, 0, 0, _("public takes no arguments"))
1629 1629 phase = repo._phasecache.phase
1630 1630 target = phases.public
1631 1631 condition = lambda r: phase(repo, r) == target
1632 1632 return subset.filter(condition, condrepr=('<phase %r>', target),
1633 1633 cache=False)
1634 1634
1635 1635 @predicate('remote([id [,path]])', safe=True)
1636 1636 def remote(repo, subset, x):
1637 1637 """Local revision that corresponds to the given identifier in a
1638 1638 remote repository, if present. Here, the '.' identifier is a
1639 1639 synonym for the current local branch.
1640 1640 """
1641 1641
1642 1642 from . import hg # avoid start-up nasties
1643 1643 # i18n: "remote" is a keyword
1644 1644 l = getargs(x, 0, 2, _("remote takes zero, one, or two arguments"))
1645 1645
1646 1646 q = '.'
1647 1647 if len(l) > 0:
1648 1648 # i18n: "remote" is a keyword
1649 1649 q = getstring(l[0], _("remote requires a string id"))
1650 1650 if q == '.':
1651 1651 q = repo['.'].branch()
1652 1652
1653 1653 dest = ''
1654 1654 if len(l) > 1:
1655 1655 # i18n: "remote" is a keyword
1656 1656 dest = getstring(l[1], _("remote requires a repository path"))
1657 1657 dest = repo.ui.expandpath(dest or 'default')
1658 1658 dest, branches = hg.parseurl(dest)
1659 1659 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1660 1660 if revs:
1661 1661 revs = [repo.lookup(rev) for rev in revs]
1662 1662 other = hg.peer(repo, {}, dest)
1663 1663 n = other.lookup(q)
1664 1664 if n in repo:
1665 1665 r = repo[n].rev()
1666 1666 if r in subset:
1667 1667 return baseset([r])
1668 1668 return baseset()
1669 1669
1670 1670 @predicate('removes(pattern)', safe=True)
1671 1671 def removes(repo, subset, x):
1672 1672 """Changesets which remove files matching pattern.
1673 1673
1674 1674 The pattern without explicit kind like ``glob:`` is expected to be
1675 1675 relative to the current directory and match against a file or a
1676 1676 directory.
1677 1677 """
1678 1678 # i18n: "removes" is a keyword
1679 1679 pat = getstring(x, _("removes requires a pattern"))
1680 1680 return checkstatus(repo, subset, pat, 2)
1681 1681
1682 1682 @predicate('rev(number)', safe=True)
1683 1683 def rev(repo, subset, x):
1684 1684 """Revision with the given numeric identifier.
1685 1685 """
1686 1686 # i18n: "rev" is a keyword
1687 1687 l = getargs(x, 1, 1, _("rev requires one argument"))
1688 1688 try:
1689 1689 # i18n: "rev" is a keyword
1690 1690 l = int(getstring(l[0], _("rev requires a number")))
1691 1691 except (TypeError, ValueError):
1692 1692 # i18n: "rev" is a keyword
1693 1693 raise error.ParseError(_("rev expects a number"))
1694 1694 if l not in repo.changelog and l != node.nullrev:
1695 1695 return baseset()
1696 1696 return subset & baseset([l])
1697 1697
1698 1698 @predicate('matching(revision [, field])', safe=True)
1699 1699 def matching(repo, subset, x):
1700 1700 """Changesets in which a given set of fields match the set of fields in the
1701 1701 selected revision or set.
1702 1702
1703 1703 To match more than one field pass the list of fields to match separated
1704 1704 by spaces (e.g. ``author description``).
1705 1705
1706 1706 Valid fields are most regular revision fields and some special fields.
1707 1707
1708 1708 Regular revision fields are ``description``, ``author``, ``branch``,
1709 1709 ``date``, ``files``, ``phase``, ``parents``, ``substate``, ``user``
1710 1710 and ``diff``.
1711 1711 Note that ``author`` and ``user`` are synonyms. ``diff`` refers to the
1712 1712 contents of the revision. Two revisions matching their ``diff`` will
1713 1713 also match their ``files``.
1714 1714
1715 1715 Special fields are ``summary`` and ``metadata``:
1716 1716 ``summary`` matches the first line of the description.
1717 1717 ``metadata`` is equivalent to matching ``description user date``
1718 1718 (i.e. it matches the main metadata fields).
1719 1719
1720 1720 ``metadata`` is the default field which is used when no fields are
1721 1721 specified. You can match more than one field at a time.
1722 1722 """
1723 1723 # i18n: "matching" is a keyword
1724 1724 l = getargs(x, 1, 2, _("matching takes 1 or 2 arguments"))
1725 1725
1726 1726 revs = getset(repo, fullreposet(repo), l[0])
1727 1727
1728 1728 fieldlist = ['metadata']
1729 1729 if len(l) > 1:
1730 1730 fieldlist = getstring(l[1],
1731 1731 # i18n: "matching" is a keyword
1732 1732 _("matching requires a string "
1733 1733 "as its second argument")).split()
1734 1734
1735 1735 # Make sure that there are no repeated fields,
1736 1736 # expand the 'special' 'metadata' field type
1737 1737 # and check the 'files' whenever we check the 'diff'
1738 1738 fields = []
1739 1739 for field in fieldlist:
1740 1740 if field == 'metadata':
1741 1741 fields += ['user', 'description', 'date']
1742 1742 elif field == 'diff':
1743 1743 # a revision matching the diff must also match the files
1744 1744 # since matching the diff is very costly, make sure to
1745 1745 # also match the files first
1746 1746 fields += ['files', 'diff']
1747 1747 else:
1748 1748 if field == 'author':
1749 1749 field = 'user'
1750 1750 fields.append(field)
1751 1751 fields = set(fields)
1752 1752 if 'summary' in fields and 'description' in fields:
1753 1753 # If a revision matches its description it also matches its summary
1754 1754 fields.discard('summary')
1755 1755
1756 1756 # We may want to match more than one field
1757 1757 # Not all fields take the same amount of time to be matched
1758 1758 # Sort the selected fields in order of increasing matching cost
1759 1759 fieldorder = ['phase', 'parents', 'user', 'date', 'branch', 'summary',
1760 1760 'files', 'description', 'substate', 'diff']
1761 1761 def fieldkeyfunc(f):
1762 1762 try:
1763 1763 return fieldorder.index(f)
1764 1764 except ValueError:
1765 1765 # assume an unknown field is very costly
1766 1766 return len(fieldorder)
1767 1767 fields = list(fields)
1768 1768 fields.sort(key=fieldkeyfunc)
1769 1769
1770 1770 # Each field will be matched with its own "getfield" function
1771 1771 # which will be added to the getfieldfuncs array of functions
1772 1772 getfieldfuncs = []
1773 1773 _funcs = {
1774 1774 'user': lambda r: repo[r].user(),
1775 1775 'branch': lambda r: repo[r].branch(),
1776 1776 'date': lambda r: repo[r].date(),
1777 1777 'description': lambda r: repo[r].description(),
1778 1778 'files': lambda r: repo[r].files(),
1779 1779 'parents': lambda r: repo[r].parents(),
1780 1780 'phase': lambda r: repo[r].phase(),
1781 1781 'substate': lambda r: repo[r].substate,
1782 1782 'summary': lambda r: repo[r].description().splitlines()[0],
1783 1783 'diff': lambda r: list(repo[r].diff(git=True),)
1784 1784 }
1785 1785 for info in fields:
1786 1786 getfield = _funcs.get(info, None)
1787 1787 if getfield is None:
1788 1788 raise error.ParseError(
1789 1789 # i18n: "matching" is a keyword
1790 1790 _("unexpected field name passed to matching: %s") % info)
1791 1791 getfieldfuncs.append(getfield)
1792 1792 # convert the getfield array of functions into a "getinfo" function
1793 1793 # which returns an array of field values (or a single value if there
1794 1794 # is only one field to match)
1795 1795 getinfo = lambda r: [f(r) for f in getfieldfuncs]
1796 1796
1797 1797 def matches(x):
1798 1798 for rev in revs:
1799 1799 target = getinfo(rev)
1800 1800 match = True
1801 1801 for n, f in enumerate(getfieldfuncs):
1802 1802 if target[n] != f(x):
1803 1803 match = False
1804 1804 if match:
1805 1805 return True
1806 1806 return False
1807 1807
1808 1808 return subset.filter(matches, condrepr=('<matching%r %r>', fields, revs))
1809 1809
1810 1810 @predicate('reverse(set)', safe=True)
1811 1811 def reverse(repo, subset, x):
1812 1812 """Reverse order of set.
1813 1813 """
1814 1814 l = getset(repo, subset, x)
1815 1815 l.reverse()
1816 1816 return l
1817 1817
1818 1818 @predicate('roots(set)', safe=True)
1819 1819 def roots(repo, subset, x):
1820 1820 """Changesets in set with no parent changeset in set.
1821 1821 """
1822 1822 s = getset(repo, fullreposet(repo), x)
1823 1823 parents = repo.changelog.parentrevs
1824 1824 def filter(r):
1825 1825 for p in parents(r):
1826 1826 if 0 <= p and p in s:
1827 1827 return False
1828 1828 return True
1829 1829 return subset & s.filter(filter, condrepr='<roots>')
1830 1830
1831 1831 _sortkeyfuncs = {
1832 1832 'rev': lambda c: c.rev(),
1833 1833 'branch': lambda c: c.branch(),
1834 1834 'desc': lambda c: c.description(),
1835 1835 'user': lambda c: c.user(),
1836 1836 'author': lambda c: c.user(),
1837 1837 'date': lambda c: c.date()[0],
1838 1838 }
1839 1839
1840 1840 def _getsortargs(x):
1841 1841 """Parse sort options into (set, [(key, reverse)], opts)"""
1842 1842 args = getargsdict(x, 'sort', 'set keys topo.firstbranch')
1843 1843 if 'set' not in args:
1844 1844 # i18n: "sort" is a keyword
1845 1845 raise error.ParseError(_('sort requires one or two arguments'))
1846 1846 keys = "rev"
1847 1847 if 'keys' in args:
1848 1848 # i18n: "sort" is a keyword
1849 1849 keys = getstring(args['keys'], _("sort spec must be a string"))
1850 1850
1851 1851 keyflags = []
1852 1852 for k in keys.split():
1853 1853 fk = k
1854 1854 reverse = (k[0] == '-')
1855 1855 if reverse:
1856 1856 k = k[1:]
1857 1857 if k not in _sortkeyfuncs and k != 'topo':
1858 1858 raise error.ParseError(_("unknown sort key %r") % fk)
1859 1859 keyflags.append((k, reverse))
1860 1860
1861 1861 if len(keyflags) > 1 and any(k == 'topo' for k, reverse in keyflags):
1862 1862 # i18n: "topo" is a keyword
1863 1863 raise error.ParseError(_(
1864 1864 'topo sort order cannot be combined with other sort keys'))
1865 1865
1866 1866 opts = {}
1867 1867 if 'topo.firstbranch' in args:
1868 1868 if any(k == 'topo' for k, reverse in keyflags):
1869 1869 opts['topo.firstbranch'] = args['topo.firstbranch']
1870 1870 else:
1871 1871 # i18n: "topo" and "topo.firstbranch" are keywords
1872 1872 raise error.ParseError(_(
1873 1873 'topo.firstbranch can only be used when using the topo sort '
1874 1874 'key'))
1875 1875
1876 1876 return args['set'], keyflags, opts
1877 1877
1878 1878 @predicate('sort(set[, [-]key... [, ...]])', safe=True)
1879 1879 def sort(repo, subset, x):
1880 1880 """Sort set by keys. The default sort order is ascending, specify a key
1881 1881 as ``-key`` to sort in descending order.
1882 1882
1883 1883 The keys can be:
1884 1884
1885 1885 - ``rev`` for the revision number,
1886 1886 - ``branch`` for the branch name,
1887 1887 - ``desc`` for the commit message (description),
1888 1888 - ``user`` for user name (``author`` can be used as an alias),
1889 1889 - ``date`` for the commit date
1890 1890 - ``topo`` for a reverse topographical sort
1891 1891
1892 1892 The ``topo`` sort order cannot be combined with other sort keys. This sort
1893 1893 takes one optional argument, ``topo.firstbranch``, which takes a revset that
1894 1894 specifies what topographical branches to prioritize in the sort.
1895 1895
1896 1896 """
1897 1897 s, keyflags, opts = _getsortargs(x)
1898 1898 revs = getset(repo, subset, s)
1899 1899
1900 1900 if not keyflags:
1901 1901 return revs
1902 1902 if len(keyflags) == 1 and keyflags[0][0] == "rev":
1903 1903 revs.sort(reverse=keyflags[0][1])
1904 1904 return revs
1905 1905 elif keyflags[0][0] == "topo":
1906 1906 firstbranch = ()
1907 1907 if 'topo.firstbranch' in opts:
1908 1908 firstbranch = getset(repo, subset, opts['topo.firstbranch'])
1909 1909 revs = baseset(_toposort(revs, repo.changelog.parentrevs, firstbranch),
1910 1910 istopo=True)
1911 1911 if keyflags[0][1]:
1912 1912 revs.reverse()
1913 1913 return revs
1914 1914
1915 1915 # sort() is guaranteed to be stable
1916 1916 ctxs = [repo[r] for r in revs]
1917 1917 for k, reverse in reversed(keyflags):
1918 1918 ctxs.sort(key=_sortkeyfuncs[k], reverse=reverse)
1919 1919 return baseset([c.rev() for c in ctxs])
1920 1920
1921 1921 def _toposort(revs, parentsfunc, firstbranch=()):
1922 1922 """Yield revisions from heads to roots one (topo) branch at a time.
1923 1923
1924 1924 This function aims to be used by a graph generator that wishes to minimize
1925 1925 the number of parallel branches and their interleaving.
1926 1926
1927 1927 Example iteration order (numbers show the "true" order in a changelog):
1928 1928
1929 1929 o 4
1930 1930 |
1931 1931 o 1
1932 1932 |
1933 1933 | o 3
1934 1934 | |
1935 1935 | o 2
1936 1936 |/
1937 1937 o 0
1938 1938
1939 1939 Note that the ancestors of merges are understood by the current
1940 1940 algorithm to be on the same branch. This means no reordering will
1941 1941 occur behind a merge.
1942 1942 """
1943 1943
1944 1944 ### Quick summary of the algorithm
1945 1945 #
1946 1946 # This function is based around a "retention" principle. We keep revisions
1947 1947 # in memory until we are ready to emit a whole branch that immediately
1948 1948 # "merges" into an existing one. This reduces the number of parallel
1949 1949 # branches with interleaved revisions.
1950 1950 #
1951 1951 # During iteration revs are split into two groups:
1952 1952 # A) revision already emitted
1953 1953 # B) revision in "retention". They are stored as different subgroups.
1954 1954 #
1955 1955 # for each REV, we do the following logic:
1956 1956 #
1957 1957 # 1) if REV is a parent of (A), we will emit it. If there is a
1958 1958 # retention group ((B) above) that is blocked on REV being
1959 1959 # available, we emit all the revisions out of that retention
1960 1960 # group first.
1961 1961 #
1962 1962 # 2) else, we'll search for a subgroup in (B) awaiting for REV to be
1963 1963 # available, if such subgroup exist, we add REV to it and the subgroup is
1964 1964 # now awaiting for REV.parents() to be available.
1965 1965 #
1966 1966 # 3) finally if no such group existed in (B), we create a new subgroup.
1967 1967 #
1968 1968 #
1969 1969 # To bootstrap the algorithm, we emit the tipmost revision (which
1970 1970 # puts it in group (A) from above).
1971 1971
1972 1972 revs.sort(reverse=True)
1973 1973
1974 1974 # Set of parents of revision that have been emitted. They can be considered
1975 1975 # unblocked as the graph generator is already aware of them so there is no
1976 1976 # need to delay the revisions that reference them.
1977 1977 #
1978 1978 # If someone wants to prioritize a branch over the others, pre-filling this
1979 1979 # set will force all other branches to wait until this branch is ready to be
1980 1980 # emitted.
1981 1981 unblocked = set(firstbranch)
1982 1982
1983 1983 # list of groups waiting to be displayed, each group is defined by:
1984 1984 #
1985 1985 # (revs: lists of revs waiting to be displayed,
1986 1986 # blocked: set of that cannot be displayed before those in 'revs')
1987 1987 #
1988 1988 # The second value ('blocked') correspond to parents of any revision in the
1989 1989 # group ('revs') that is not itself contained in the group. The main idea
1990 1990 # of this algorithm is to delay as much as possible the emission of any
1991 1991 # revision. This means waiting for the moment we are about to display
1992 1992 # these parents to display the revs in a group.
1993 1993 #
1994 1994 # This first implementation is smart until it encounters a merge: it will
1995 1995 # emit revs as soon as any parent is about to be emitted and can grow an
1996 1996 # arbitrary number of revs in 'blocked'. In practice this mean we properly
1997 1997 # retains new branches but gives up on any special ordering for ancestors
1998 1998 # of merges. The implementation can be improved to handle this better.
1999 1999 #
2000 2000 # The first subgroup is special. It corresponds to all the revision that
2001 2001 # were already emitted. The 'revs' lists is expected to be empty and the
2002 2002 # 'blocked' set contains the parents revisions of already emitted revision.
2003 2003 #
2004 2004 # You could pre-seed the <parents> set of groups[0] to a specific
2005 2005 # changesets to select what the first emitted branch should be.
2006 2006 groups = [([], unblocked)]
2007 2007 pendingheap = []
2008 2008 pendingset = set()
2009 2009
2010 2010 heapq.heapify(pendingheap)
2011 2011 heappop = heapq.heappop
2012 2012 heappush = heapq.heappush
2013 2013 for currentrev in revs:
2014 2014 # Heap works with smallest element, we want highest so we invert
2015 2015 if currentrev not in pendingset:
2016 2016 heappush(pendingheap, -currentrev)
2017 2017 pendingset.add(currentrev)
2018 2018 # iterates on pending rev until after the current rev have been
2019 2019 # processed.
2020 2020 rev = None
2021 2021 while rev != currentrev:
2022 2022 rev = -heappop(pendingheap)
2023 2023 pendingset.remove(rev)
2024 2024
2025 2025 # Seek for a subgroup blocked, waiting for the current revision.
2026 2026 matching = [i for i, g in enumerate(groups) if rev in g[1]]
2027 2027
2028 2028 if matching:
2029 2029 # The main idea is to gather together all sets that are blocked
2030 2030 # on the same revision.
2031 2031 #
2032 2032 # Groups are merged when a common blocking ancestor is
2033 2033 # observed. For example, given two groups:
2034 2034 #
2035 2035 # revs [5, 4] waiting for 1
2036 2036 # revs [3, 2] waiting for 1
2037 2037 #
2038 2038 # These two groups will be merged when we process
2039 2039 # 1. In theory, we could have merged the groups when
2040 2040 # we added 2 to the group it is now in (we could have
2041 2041 # noticed the groups were both blocked on 1 then), but
2042 2042 # the way it works now makes the algorithm simpler.
2043 2043 #
2044 2044 # We also always keep the oldest subgroup first. We can
2045 2045 # probably improve the behavior by having the longest set
2046 2046 # first. That way, graph algorithms could minimise the length
2047 2047 # of parallel lines their drawing. This is currently not done.
2048 2048 targetidx = matching.pop(0)
2049 2049 trevs, tparents = groups[targetidx]
2050 2050 for i in matching:
2051 2051 gr = groups[i]
2052 2052 trevs.extend(gr[0])
2053 2053 tparents |= gr[1]
2054 2054 # delete all merged subgroups (except the one we kept)
2055 2055 # (starting from the last subgroup for performance and
2056 2056 # sanity reasons)
2057 2057 for i in reversed(matching):
2058 2058 del groups[i]
2059 2059 else:
2060 2060 # This is a new head. We create a new subgroup for it.
2061 2061 targetidx = len(groups)
2062 2062 groups.append(([], set([rev])))
2063 2063
2064 2064 gr = groups[targetidx]
2065 2065
2066 2066 # We now add the current nodes to this subgroups. This is done
2067 2067 # after the subgroup merging because all elements from a subgroup
2068 2068 # that relied on this rev must precede it.
2069 2069 #
2070 2070 # we also update the <parents> set to include the parents of the
2071 2071 # new nodes.
2072 2072 if rev == currentrev: # only display stuff in rev
2073 2073 gr[0].append(rev)
2074 2074 gr[1].remove(rev)
2075 2075 parents = [p for p in parentsfunc(rev) if p > node.nullrev]
2076 2076 gr[1].update(parents)
2077 2077 for p in parents:
2078 2078 if p not in pendingset:
2079 2079 pendingset.add(p)
2080 2080 heappush(pendingheap, -p)
2081 2081
2082 2082 # Look for a subgroup to display
2083 2083 #
2084 2084 # When unblocked is empty (if clause), we were not waiting for any
2085 2085 # revisions during the first iteration (if no priority was given) or
2086 2086 # if we emitted a whole disconnected set of the graph (reached a
2087 2087 # root). In that case we arbitrarily take the oldest known
2088 2088 # subgroup. The heuristic could probably be better.
2089 2089 #
2090 2090 # Otherwise (elif clause) if the subgroup is blocked on
2091 2091 # a revision we just emitted, we can safely emit it as
2092 2092 # well.
2093 2093 if not unblocked:
2094 2094 if len(groups) > 1: # display other subset
2095 2095 targetidx = 1
2096 2096 gr = groups[1]
2097 2097 elif not gr[1] & unblocked:
2098 2098 gr = None
2099 2099
2100 2100 if gr is not None:
2101 2101 # update the set of awaited revisions with the one from the
2102 2102 # subgroup
2103 2103 unblocked |= gr[1]
2104 2104 # output all revisions in the subgroup
2105 2105 for r in gr[0]:
2106 2106 yield r
2107 2107 # delete the subgroup that you just output
2108 2108 # unless it is groups[0] in which case you just empty it.
2109 2109 if targetidx:
2110 2110 del groups[targetidx]
2111 2111 else:
2112 2112 gr[0][:] = []
2113 2113 # Check if we have some subgroup waiting for revisions we are not going to
2114 2114 # iterate over
2115 2115 for g in groups:
2116 2116 for r in g[0]:
2117 2117 yield r
2118 2118
2119 2119 @predicate('subrepo([pattern])')
2120 2120 def subrepo(repo, subset, x):
2121 2121 """Changesets that add, modify or remove the given subrepo. If no subrepo
2122 2122 pattern is named, any subrepo changes are returned.
2123 2123 """
2124 2124 # i18n: "subrepo" is a keyword
2125 2125 args = getargs(x, 0, 1, _('subrepo takes at most one argument'))
2126 2126 pat = None
2127 2127 if len(args) != 0:
2128 2128 pat = getstring(args[0], _("subrepo requires a pattern"))
2129 2129
2130 2130 m = matchmod.exact(repo.root, repo.root, ['.hgsubstate'])
2131 2131
2132 2132 def submatches(names):
2133 2133 k, p, m = util.stringmatcher(pat)
2134 2134 for name in names:
2135 2135 if m(name):
2136 2136 yield name
2137 2137
2138 2138 def matches(x):
2139 2139 c = repo[x]
2140 2140 s = repo.status(c.p1().node(), c.node(), match=m)
2141 2141
2142 2142 if pat is None:
2143 2143 return s.added or s.modified or s.removed
2144 2144
2145 2145 if s.added:
2146 2146 return any(submatches(c.substate.keys()))
2147 2147
2148 2148 if s.modified:
2149 2149 subs = set(c.p1().substate.keys())
2150 2150 subs.update(c.substate.keys())
2151 2151
2152 2152 for path in submatches(subs):
2153 2153 if c.p1().substate.get(path) != c.substate.get(path):
2154 2154 return True
2155 2155
2156 2156 if s.removed:
2157 2157 return any(submatches(c.p1().substate.keys()))
2158 2158
2159 2159 return False
2160 2160
2161 2161 return subset.filter(matches, condrepr=('<subrepo %r>', pat))
2162 2162
2163 2163 def _substringmatcher(pattern):
2164 2164 kind, pattern, matcher = util.stringmatcher(pattern)
2165 2165 if kind == 'literal':
2166 2166 matcher = lambda s: pattern in s
2167 2167 return kind, pattern, matcher
2168 2168
2169 2169 @predicate('tag([name])', safe=True)
2170 2170 def tag(repo, subset, x):
2171 2171 """The specified tag by name, or all tagged revisions if no name is given.
2172 2172
2173 2173 If `name` starts with `re:`, the remainder of the name is treated as
2174 2174 a regular expression. To match a tag that actually starts with `re:`,
2175 2175 use the prefix `literal:`.
2176 2176 """
2177 2177 # i18n: "tag" is a keyword
2178 2178 args = getargs(x, 0, 1, _("tag takes one or no arguments"))
2179 2179 cl = repo.changelog
2180 2180 if args:
2181 2181 pattern = getstring(args[0],
2182 2182 # i18n: "tag" is a keyword
2183 2183 _('the argument to tag must be a string'))
2184 2184 kind, pattern, matcher = util.stringmatcher(pattern)
2185 2185 if kind == 'literal':
2186 2186 # avoid resolving all tags
2187 2187 tn = repo._tagscache.tags.get(pattern, None)
2188 2188 if tn is None:
2189 2189 raise error.RepoLookupError(_("tag '%s' does not exist")
2190 2190 % pattern)
2191 2191 s = set([repo[tn].rev()])
2192 2192 else:
2193 2193 s = set([cl.rev(n) for t, n in repo.tagslist() if matcher(t)])
2194 2194 else:
2195 2195 s = set([cl.rev(n) for t, n in repo.tagslist() if t != 'tip'])
2196 2196 return subset & s
2197 2197
2198 2198 @predicate('tagged', safe=True)
2199 2199 def tagged(repo, subset, x):
2200 2200 return tag(repo, subset, x)
2201 2201
2202 2202 @predicate('unstable()', safe=True)
2203 2203 def unstable(repo, subset, x):
2204 2204 """Non-obsolete changesets with obsolete ancestors.
2205 2205 """
2206 2206 # i18n: "unstable" is a keyword
2207 2207 getargs(x, 0, 0, _("unstable takes no arguments"))
2208 2208 unstables = obsmod.getrevs(repo, 'unstable')
2209 2209 return subset & unstables
2210 2210
2211 2211
2212 2212 @predicate('user(string)', safe=True)
2213 2213 def user(repo, subset, x):
2214 2214 """User name contains string. The match is case-insensitive.
2215 2215
2216 2216 If `string` starts with `re:`, the remainder of the string is treated as
2217 2217 a regular expression. To match a user that actually contains `re:`, use
2218 2218 the prefix `literal:`.
2219 2219 """
2220 2220 return author(repo, subset, x)
2221 2221
2222 2222 # experimental
2223 2223 @predicate('wdir', safe=True)
2224 2224 def wdir(repo, subset, x):
2225 2225 # i18n: "wdir" is a keyword
2226 2226 getargs(x, 0, 0, _("wdir takes no arguments"))
2227 2227 if node.wdirrev in subset or isinstance(subset, fullreposet):
2228 2228 return baseset([node.wdirrev])
2229 2229 return baseset()
2230 2230
2231 2231 # for internal use
2232 2232 @predicate('_list', safe=True)
2233 2233 def _list(repo, subset, x):
2234 2234 s = getstring(x, "internal error")
2235 2235 if not s:
2236 2236 return baseset()
2237 2237 # remove duplicates here. it's difficult for caller to deduplicate sets
2238 2238 # because different symbols can point to the same rev.
2239 2239 cl = repo.changelog
2240 2240 ls = []
2241 2241 seen = set()
2242 2242 for t in s.split('\0'):
2243 2243 try:
2244 2244 # fast path for integer revision
2245 2245 r = int(t)
2246 2246 if str(r) != t or r not in cl:
2247 2247 raise ValueError
2248 2248 revs = [r]
2249 2249 except ValueError:
2250 2250 revs = stringset(repo, subset, t)
2251 2251
2252 2252 for r in revs:
2253 2253 if r in seen:
2254 2254 continue
2255 2255 if (r in subset
2256 2256 or r == node.nullrev and isinstance(subset, fullreposet)):
2257 2257 ls.append(r)
2258 2258 seen.add(r)
2259 2259 return baseset(ls)
2260 2260
2261 2261 # for internal use
2262 2262 @predicate('_intlist', safe=True)
2263 2263 def _intlist(repo, subset, x):
2264 2264 s = getstring(x, "internal error")
2265 2265 if not s:
2266 2266 return baseset()
2267 2267 ls = [int(r) for r in s.split('\0')]
2268 2268 s = subset
2269 2269 return baseset([r for r in ls if r in s])
2270 2270
2271 2271 # for internal use
2272 2272 @predicate('_hexlist', safe=True)
2273 2273 def _hexlist(repo, subset, x):
2274 2274 s = getstring(x, "internal error")
2275 2275 if not s:
2276 2276 return baseset()
2277 2277 cl = repo.changelog
2278 2278 ls = [cl.rev(node.bin(r)) for r in s.split('\0')]
2279 2279 s = subset
2280 2280 return baseset([r for r in ls if r in s])
2281 2281
2282 2282 methods = {
2283 2283 "range": rangeset,
2284 2284 "dagrange": dagrange,
2285 2285 "string": stringset,
2286 2286 "symbol": stringset,
2287 2287 "and": andset,
2288 2288 "or": orset,
2289 2289 "not": notset,
2290 2290 "difference": differenceset,
2291 2291 "list": listset,
2292 2292 "keyvalue": keyvaluepair,
2293 2293 "func": func,
2294 2294 "ancestor": ancestorspec,
2295 2295 "parent": parentspec,
2296 2296 "parentpost": p1,
2297 2297 }
2298 2298
2299 2299 def _matchonly(revs, bases):
2300 2300 """
2301 2301 >>> f = lambda *args: _matchonly(*map(parse, args))
2302 2302 >>> f('ancestors(A)', 'not ancestors(B)')
2303 2303 ('list', ('symbol', 'A'), ('symbol', 'B'))
2304 2304 """
2305 2305 if (revs is not None
2306 2306 and revs[0] == 'func'
2307 2307 and getstring(revs[1], _('not a symbol')) == 'ancestors'
2308 2308 and bases is not None
2309 2309 and bases[0] == 'not'
2310 2310 and bases[1][0] == 'func'
2311 2311 and getstring(bases[1][1], _('not a symbol')) == 'ancestors'):
2312 2312 return ('list', revs[2], bases[1][2])
2313 2313
2314 2314 def _optimize(x, small):
2315 2315 if x is None:
2316 2316 return 0, x
2317 2317
2318 2318 smallbonus = 1
2319 2319 if small:
2320 2320 smallbonus = .5
2321 2321
2322 2322 op = x[0]
2323 2323 if op == 'minus':
2324 2324 return _optimize(('and', x[1], ('not', x[2])), small)
2325 2325 elif op == 'only':
2326 2326 t = ('func', ('symbol', 'only'), ('list', x[1], x[2]))
2327 2327 return _optimize(t, small)
2328 2328 elif op == 'onlypost':
2329 2329 return _optimize(('func', ('symbol', 'only'), x[1]), small)
2330 2330 elif op == 'dagrangepre':
2331 2331 return _optimize(('func', ('symbol', 'ancestors'), x[1]), small)
2332 2332 elif op == 'dagrangepost':
2333 2333 return _optimize(('func', ('symbol', 'descendants'), x[1]), small)
2334 2334 elif op == 'rangeall':
2335 2335 return _optimize(('range', ('string', '0'), ('string', 'tip')), small)
2336 2336 elif op == 'rangepre':
2337 2337 return _optimize(('range', ('string', '0'), x[1]), small)
2338 2338 elif op == 'rangepost':
2339 2339 return _optimize(('range', x[1], ('string', 'tip')), small)
2340 2340 elif op == 'negate':
2341 2341 s = getstring(x[1], _("can't negate that"))
2342 2342 return _optimize(('string', '-' + s), small)
2343 2343 elif op in 'string symbol negate':
2344 2344 return smallbonus, x # single revisions are small
2345 2345 elif op == 'and':
2346 2346 wa, ta = _optimize(x[1], True)
2347 2347 wb, tb = _optimize(x[2], True)
2348 2348 w = min(wa, wb)
2349 2349
2350 2350 # (::x and not ::y)/(not ::y and ::x) have a fast path
2351 2351 tm = _matchonly(ta, tb) or _matchonly(tb, ta)
2352 2352 if tm:
2353 2353 return w, ('func', ('symbol', 'only'), tm)
2354 2354
2355 2355 if tb is not None and tb[0] == 'not':
2356 2356 return wa, ('difference', ta, tb[1])
2357 2357
2358 2358 if wa > wb:
2359 2359 return w, (op, tb, ta)
2360 2360 return w, (op, ta, tb)
2361 2361 elif op == 'or':
2362 2362 # fast path for machine-generated expression, that is likely to have
2363 2363 # lots of trivial revisions: 'a + b + c()' to '_list(a b) + c()'
2364 2364 ws, ts, ss = [], [], []
2365 2365 def flushss():
2366 2366 if not ss:
2367 2367 return
2368 2368 if len(ss) == 1:
2369 2369 w, t = ss[0]
2370 2370 else:
2371 2371 s = '\0'.join(t[1] for w, t in ss)
2372 2372 y = ('func', ('symbol', '_list'), ('string', s))
2373 2373 w, t = _optimize(y, False)
2374 2374 ws.append(w)
2375 2375 ts.append(t)
2376 2376 del ss[:]
2377 2377 for y in x[1:]:
2378 2378 w, t = _optimize(y, False)
2379 2379 if t is not None and (t[0] == 'string' or t[0] == 'symbol'):
2380 2380 ss.append((w, t))
2381 2381 continue
2382 2382 flushss()
2383 2383 ws.append(w)
2384 2384 ts.append(t)
2385 2385 flushss()
2386 2386 if len(ts) == 1:
2387 2387 return ws[0], ts[0] # 'or' operation is fully optimized out
2388 2388 # we can't reorder trees by weight because it would change the order.
2389 2389 # ("sort(a + b)" == "sort(b + a)", but "a + b" != "b + a")
2390 2390 # ts = tuple(t for w, t in sorted(zip(ws, ts), key=lambda wt: wt[0]))
2391 2391 return max(ws), (op,) + tuple(ts)
2392 2392 elif op == 'not':
2393 2393 # Optimize not public() to _notpublic() because we have a fast version
2394 2394 if x[1] == ('func', ('symbol', 'public'), None):
2395 2395 newsym = ('func', ('symbol', '_notpublic'), None)
2396 2396 o = _optimize(newsym, not small)
2397 2397 return o[0], o[1]
2398 2398 else:
2399 2399 o = _optimize(x[1], not small)
2400 2400 return o[0], (op, o[1])
2401 2401 elif op == 'parentpost':
2402 2402 o = _optimize(x[1], small)
2403 2403 return o[0], (op, o[1])
2404 2404 elif op == 'group':
2405 2405 return _optimize(x[1], small)
2406 2406 elif op in 'dagrange range parent ancestorspec':
2407 2407 if op == 'parent':
2408 2408 # x^:y means (x^) : y, not x ^ (:y)
2409 2409 post = ('parentpost', x[1])
2410 2410 if x[2][0] == 'dagrangepre':
2411 2411 return _optimize(('dagrange', post, x[2][1]), small)
2412 2412 elif x[2][0] == 'rangepre':
2413 2413 return _optimize(('range', post, x[2][1]), small)
2414 2414
2415 2415 wa, ta = _optimize(x[1], small)
2416 2416 wb, tb = _optimize(x[2], small)
2417 2417 return wa + wb, (op, ta, tb)
2418 2418 elif op == 'list':
2419 2419 ws, ts = zip(*(_optimize(y, small) for y in x[1:]))
2420 2420 return sum(ws), (op,) + ts
2421 2421 elif op == 'func':
2422 2422 f = getstring(x[1], _("not a symbol"))
2423 2423 wa, ta = _optimize(x[2], small)
2424 2424 if f in ("author branch closed date desc file grep keyword "
2425 2425 "outgoing user"):
2426 2426 w = 10 # slow
2427 2427 elif f in "modifies adds removes":
2428 2428 w = 30 # slower
2429 2429 elif f == "contains":
2430 2430 w = 100 # very slow
2431 2431 elif f == "ancestor":
2432 2432 w = 1 * smallbonus
2433 2433 elif f in "reverse limit first _intlist":
2434 2434 w = 0
2435 2435 elif f in "sort":
2436 2436 w = 10 # assume most sorts look at changelog
2437 2437 else:
2438 2438 w = 1
2439 2439 return w + wa, (op, x[1], ta)
2440 2440 return 1, x
2441 2441
2442 2442 def optimize(tree):
2443 2443 _weight, newtree = _optimize(tree, small=True)
2444 2444 return newtree
2445 2445
2446 2446 # the set of valid characters for the initial letter of symbols in
2447 2447 # alias declarations and definitions
2448 2448 _aliassyminitletters = set(c for c in [chr(i) for i in xrange(256)]
2449 2449 if c.isalnum() or c in '._@$' or ord(c) > 127)
2450 2450
2451 2451 def _parsewith(spec, lookup=None, syminitletters=None):
2452 2452 """Generate a parse tree of given spec with given tokenizing options
2453 2453
2454 2454 >>> _parsewith('foo($1)', syminitletters=_aliassyminitletters)
2455 2455 ('func', ('symbol', 'foo'), ('symbol', '$1'))
2456 2456 >>> _parsewith('$1')
2457 2457 Traceback (most recent call last):
2458 2458 ...
2459 2459 ParseError: ("syntax error in revset '$1'", 0)
2460 2460 >>> _parsewith('foo bar')
2461 2461 Traceback (most recent call last):
2462 2462 ...
2463 2463 ParseError: ('invalid token', 4)
2464 2464 """
2465 2465 p = parser.parser(elements)
2466 2466 tree, pos = p.parse(tokenize(spec, lookup=lookup,
2467 2467 syminitletters=syminitletters))
2468 2468 if pos != len(spec):
2469 2469 raise error.ParseError(_('invalid token'), pos)
2470 2470 return parser.simplifyinfixops(tree, ('list', 'or'))
2471 2471
2472 2472 class _aliasrules(parser.basealiasrules):
2473 2473 """Parsing and expansion rule set of revset aliases"""
2474 2474 _section = _('revset alias')
2475 2475
2476 2476 @staticmethod
2477 2477 def _parse(spec):
2478 2478 """Parse alias declaration/definition ``spec``
2479 2479
2480 2480 This allows symbol names to use also ``$`` as an initial letter
2481 2481 (for backward compatibility), and callers of this function should
2482 2482 examine whether ``$`` is used also for unexpected symbols or not.
2483 2483 """
2484 2484 return _parsewith(spec, syminitletters=_aliassyminitletters)
2485 2485
2486 2486 @staticmethod
2487 2487 def _trygetfunc(tree):
2488 2488 if tree[0] == 'func' and tree[1][0] == 'symbol':
2489 2489 return tree[1][1], getlist(tree[2])
2490 2490
2491 2491 def expandaliases(ui, tree, showwarning=None):
2492 2492 aliases = _aliasrules.buildmap(ui.configitems('revsetalias'))
2493 2493 tree = _aliasrules.expand(aliases, tree)
2494 2494 if showwarning:
2495 2495 # warn about problematic (but not referred) aliases
2496 2496 for name, alias in sorted(aliases.iteritems()):
2497 2497 if alias.error and not alias.warned:
2498 2498 showwarning(_('warning: %s\n') % (alias.error))
2499 2499 alias.warned = True
2500 2500 return tree
2501 2501
2502 2502 def foldconcat(tree):
2503 2503 """Fold elements to be concatenated by `##`
2504 2504 """
2505 2505 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2506 2506 return tree
2507 2507 if tree[0] == '_concat':
2508 2508 pending = [tree]
2509 2509 l = []
2510 2510 while pending:
2511 2511 e = pending.pop()
2512 2512 if e[0] == '_concat':
2513 2513 pending.extend(reversed(e[1:]))
2514 2514 elif e[0] in ('string', 'symbol'):
2515 2515 l.append(e[1])
2516 2516 else:
2517 2517 msg = _("\"##\" can't concatenate \"%s\" element") % (e[0])
2518 2518 raise error.ParseError(msg)
2519 2519 return ('string', ''.join(l))
2520 2520 else:
2521 2521 return tuple(foldconcat(t) for t in tree)
2522 2522
2523 2523 def parse(spec, lookup=None):
2524 2524 return _parsewith(spec, lookup=lookup)
2525 2525
2526 2526 def posttreebuilthook(tree, repo):
2527 2527 # hook for extensions to execute code on the optimized tree
2528 2528 pass
2529 2529
2530 2530 def match(ui, spec, repo=None):
2531 2531 """Create a matcher for a single revision spec."""
2532 if not spec:
2533 raise error.ParseError(_("empty query"))
2534 2532 return matchany(ui, [spec], repo=repo)
2535 2533
2536 2534 def matchany(ui, specs, repo=None):
2537 2535 """Create a matcher that will include any revisions matching one of the
2538 2536 given specs"""
2539 2537 if not specs:
2540 2538 def mfunc(repo, subset=None):
2541 2539 return baseset()
2542 2540 return mfunc
2543 2541 if not all(specs):
2544 2542 raise error.ParseError(_("empty query"))
2545 2543 lookup = None
2546 2544 if repo:
2547 2545 lookup = repo.__contains__
2548 2546 if len(specs) == 1:
2549 2547 tree = parse(specs[0], lookup)
2550 2548 else:
2551 2549 tree = ('or',) + tuple(parse(s, lookup) for s in specs)
2552 2550 return _makematcher(ui, tree, repo)
2553 2551
2554 2552 def _makematcher(ui, tree, repo):
2555 2553 if ui:
2556 2554 tree = expandaliases(ui, tree, showwarning=ui.warn)
2557 2555 tree = foldconcat(tree)
2558 2556 tree = optimize(tree)
2559 2557 posttreebuilthook(tree, repo)
2560 2558 def mfunc(repo, subset=None):
2561 2559 if subset is None:
2562 2560 subset = fullreposet(repo)
2563 2561 if util.safehasattr(subset, 'isascending'):
2564 2562 result = getset(repo, subset, tree)
2565 2563 else:
2566 2564 result = getset(repo, baseset(subset), tree)
2567 2565 return result
2568 2566 return mfunc
2569 2567
2570 2568 def formatspec(expr, *args):
2571 2569 '''
2572 2570 This is a convenience function for using revsets internally, and
2573 2571 escapes arguments appropriately. Aliases are intentionally ignored
2574 2572 so that intended expression behavior isn't accidentally subverted.
2575 2573
2576 2574 Supported arguments:
2577 2575
2578 2576 %r = revset expression, parenthesized
2579 2577 %d = int(arg), no quoting
2580 2578 %s = string(arg), escaped and single-quoted
2581 2579 %b = arg.branch(), escaped and single-quoted
2582 2580 %n = hex(arg), single-quoted
2583 2581 %% = a literal '%'
2584 2582
2585 2583 Prefixing the type with 'l' specifies a parenthesized list of that type.
2586 2584
2587 2585 >>> formatspec('%r:: and %lr', '10 or 11', ("this()", "that()"))
2588 2586 '(10 or 11):: and ((this()) or (that()))'
2589 2587 >>> formatspec('%d:: and not %d::', 10, 20)
2590 2588 '10:: and not 20::'
2591 2589 >>> formatspec('%ld or %ld', [], [1])
2592 2590 "_list('') or 1"
2593 2591 >>> formatspec('keyword(%s)', 'foo\\xe9')
2594 2592 "keyword('foo\\\\xe9')"
2595 2593 >>> b = lambda: 'default'
2596 2594 >>> b.branch = b
2597 2595 >>> formatspec('branch(%b)', b)
2598 2596 "branch('default')"
2599 2597 >>> formatspec('root(%ls)', ['a', 'b', 'c', 'd'])
2600 2598 "root(_list('a\\x00b\\x00c\\x00d'))"
2601 2599 '''
2602 2600
2603 2601 def quote(s):
2604 2602 return repr(str(s))
2605 2603
2606 2604 def argtype(c, arg):
2607 2605 if c == 'd':
2608 2606 return str(int(arg))
2609 2607 elif c == 's':
2610 2608 return quote(arg)
2611 2609 elif c == 'r':
2612 2610 parse(arg) # make sure syntax errors are confined
2613 2611 return '(%s)' % arg
2614 2612 elif c == 'n':
2615 2613 return quote(node.hex(arg))
2616 2614 elif c == 'b':
2617 2615 return quote(arg.branch())
2618 2616
2619 2617 def listexp(s, t):
2620 2618 l = len(s)
2621 2619 if l == 0:
2622 2620 return "_list('')"
2623 2621 elif l == 1:
2624 2622 return argtype(t, s[0])
2625 2623 elif t == 'd':
2626 2624 return "_intlist('%s')" % "\0".join(str(int(a)) for a in s)
2627 2625 elif t == 's':
2628 2626 return "_list('%s')" % "\0".join(s)
2629 2627 elif t == 'n':
2630 2628 return "_hexlist('%s')" % "\0".join(node.hex(a) for a in s)
2631 2629 elif t == 'b':
2632 2630 return "_list('%s')" % "\0".join(a.branch() for a in s)
2633 2631
2634 2632 m = l // 2
2635 2633 return '(%s or %s)' % (listexp(s[:m], t), listexp(s[m:], t))
2636 2634
2637 2635 ret = ''
2638 2636 pos = 0
2639 2637 arg = 0
2640 2638 while pos < len(expr):
2641 2639 c = expr[pos]
2642 2640 if c == '%':
2643 2641 pos += 1
2644 2642 d = expr[pos]
2645 2643 if d == '%':
2646 2644 ret += d
2647 2645 elif d in 'dsnbr':
2648 2646 ret += argtype(d, args[arg])
2649 2647 arg += 1
2650 2648 elif d == 'l':
2651 2649 # a list of some type
2652 2650 pos += 1
2653 2651 d = expr[pos]
2654 2652 ret += listexp(list(args[arg]), d)
2655 2653 arg += 1
2656 2654 else:
2657 2655 raise error.Abort(_('unexpected revspec format character %s')
2658 2656 % d)
2659 2657 else:
2660 2658 ret += c
2661 2659 pos += 1
2662 2660
2663 2661 return ret
2664 2662
2665 2663 def prettyformat(tree):
2666 2664 return parser.prettyformat(tree, ('string', 'symbol'))
2667 2665
2668 2666 def depth(tree):
2669 2667 if isinstance(tree, tuple):
2670 2668 return max(map(depth, tree)) + 1
2671 2669 else:
2672 2670 return 0
2673 2671
2674 2672 def funcsused(tree):
2675 2673 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2676 2674 return set()
2677 2675 else:
2678 2676 funcs = set()
2679 2677 for s in tree[1:]:
2680 2678 funcs |= funcsused(s)
2681 2679 if tree[0] == 'func':
2682 2680 funcs.add(tree[1][1])
2683 2681 return funcs
2684 2682
2685 2683 def _formatsetrepr(r):
2686 2684 """Format an optional printable representation of a set
2687 2685
2688 2686 ======== =================================
2689 2687 type(r) example
2690 2688 ======== =================================
2691 2689 tuple ('<not %r>', other)
2692 2690 str '<branch closed>'
2693 2691 callable lambda: '<branch %r>' % sorted(b)
2694 2692 object other
2695 2693 ======== =================================
2696 2694 """
2697 2695 if r is None:
2698 2696 return ''
2699 2697 elif isinstance(r, tuple):
2700 2698 return r[0] % r[1:]
2701 2699 elif isinstance(r, str):
2702 2700 return r
2703 2701 elif callable(r):
2704 2702 return r()
2705 2703 else:
2706 2704 return repr(r)
2707 2705
2708 2706 class abstractsmartset(object):
2709 2707
2710 2708 def __nonzero__(self):
2711 2709 """True if the smartset is not empty"""
2712 2710 raise NotImplementedError()
2713 2711
2714 2712 def __contains__(self, rev):
2715 2713 """provide fast membership testing"""
2716 2714 raise NotImplementedError()
2717 2715
2718 2716 def __iter__(self):
2719 2717 """iterate the set in the order it is supposed to be iterated"""
2720 2718 raise NotImplementedError()
2721 2719
2722 2720 # Attributes containing a function to perform a fast iteration in a given
2723 2721 # direction. A smartset can have none, one, or both defined.
2724 2722 #
2725 2723 # Default value is None instead of a function returning None to avoid
2726 2724 # initializing an iterator just for testing if a fast method exists.
2727 2725 fastasc = None
2728 2726 fastdesc = None
2729 2727
2730 2728 def isascending(self):
2731 2729 """True if the set will iterate in ascending order"""
2732 2730 raise NotImplementedError()
2733 2731
2734 2732 def isdescending(self):
2735 2733 """True if the set will iterate in descending order"""
2736 2734 raise NotImplementedError()
2737 2735
2738 2736 def istopo(self):
2739 2737 """True if the set will iterate in topographical order"""
2740 2738 raise NotImplementedError()
2741 2739
2742 2740 @util.cachefunc
2743 2741 def min(self):
2744 2742 """return the minimum element in the set"""
2745 2743 if self.fastasc is not None:
2746 2744 for r in self.fastasc():
2747 2745 return r
2748 2746 raise ValueError('arg is an empty sequence')
2749 2747 return min(self)
2750 2748
2751 2749 @util.cachefunc
2752 2750 def max(self):
2753 2751 """return the maximum element in the set"""
2754 2752 if self.fastdesc is not None:
2755 2753 for r in self.fastdesc():
2756 2754 return r
2757 2755 raise ValueError('arg is an empty sequence')
2758 2756 return max(self)
2759 2757
2760 2758 def first(self):
2761 2759 """return the first element in the set (user iteration perspective)
2762 2760
2763 2761 Return None if the set is empty"""
2764 2762 raise NotImplementedError()
2765 2763
2766 2764 def last(self):
2767 2765 """return the last element in the set (user iteration perspective)
2768 2766
2769 2767 Return None if the set is empty"""
2770 2768 raise NotImplementedError()
2771 2769
2772 2770 def __len__(self):
2773 2771 """return the length of the smartsets
2774 2772
2775 2773 This can be expensive on smartset that could be lazy otherwise."""
2776 2774 raise NotImplementedError()
2777 2775
2778 2776 def reverse(self):
2779 2777 """reverse the expected iteration order"""
2780 2778 raise NotImplementedError()
2781 2779
2782 2780 def sort(self, reverse=True):
2783 2781 """get the set to iterate in an ascending or descending order"""
2784 2782 raise NotImplementedError()
2785 2783
2786 2784 def __and__(self, other):
2787 2785 """Returns a new object with the intersection of the two collections.
2788 2786
2789 2787 This is part of the mandatory API for smartset."""
2790 2788 if isinstance(other, fullreposet):
2791 2789 return self
2792 2790 return self.filter(other.__contains__, condrepr=other, cache=False)
2793 2791
2794 2792 def __add__(self, other):
2795 2793 """Returns a new object with the union of the two collections.
2796 2794
2797 2795 This is part of the mandatory API for smartset."""
2798 2796 return addset(self, other)
2799 2797
2800 2798 def __sub__(self, other):
2801 2799 """Returns a new object with the substraction of the two collections.
2802 2800
2803 2801 This is part of the mandatory API for smartset."""
2804 2802 c = other.__contains__
2805 2803 return self.filter(lambda r: not c(r), condrepr=('<not %r>', other),
2806 2804 cache=False)
2807 2805
2808 2806 def filter(self, condition, condrepr=None, cache=True):
2809 2807 """Returns this smartset filtered by condition as a new smartset.
2810 2808
2811 2809 `condition` is a callable which takes a revision number and returns a
2812 2810 boolean. Optional `condrepr` provides a printable representation of
2813 2811 the given `condition`.
2814 2812
2815 2813 This is part of the mandatory API for smartset."""
2816 2814 # builtin cannot be cached. but do not needs to
2817 2815 if cache and util.safehasattr(condition, 'func_code'):
2818 2816 condition = util.cachefunc(condition)
2819 2817 return filteredset(self, condition, condrepr)
2820 2818
2821 2819 class baseset(abstractsmartset):
2822 2820 """Basic data structure that represents a revset and contains the basic
2823 2821 operation that it should be able to perform.
2824 2822
2825 2823 Every method in this class should be implemented by any smartset class.
2826 2824 """
2827 2825 def __init__(self, data=(), datarepr=None, istopo=False):
2828 2826 """
2829 2827 datarepr: a tuple of (format, obj, ...), a function or an object that
2830 2828 provides a printable representation of the given data.
2831 2829 """
2832 2830 self._ascending = None
2833 2831 self._istopo = istopo
2834 2832 if not isinstance(data, list):
2835 2833 if isinstance(data, set):
2836 2834 self._set = data
2837 2835 # set has no order we pick one for stability purpose
2838 2836 self._ascending = True
2839 2837 data = list(data)
2840 2838 self._list = data
2841 2839 self._datarepr = datarepr
2842 2840
2843 2841 @util.propertycache
2844 2842 def _set(self):
2845 2843 return set(self._list)
2846 2844
2847 2845 @util.propertycache
2848 2846 def _asclist(self):
2849 2847 asclist = self._list[:]
2850 2848 asclist.sort()
2851 2849 return asclist
2852 2850
2853 2851 def __iter__(self):
2854 2852 if self._ascending is None:
2855 2853 return iter(self._list)
2856 2854 elif self._ascending:
2857 2855 return iter(self._asclist)
2858 2856 else:
2859 2857 return reversed(self._asclist)
2860 2858
2861 2859 def fastasc(self):
2862 2860 return iter(self._asclist)
2863 2861
2864 2862 def fastdesc(self):
2865 2863 return reversed(self._asclist)
2866 2864
2867 2865 @util.propertycache
2868 2866 def __contains__(self):
2869 2867 return self._set.__contains__
2870 2868
2871 2869 def __nonzero__(self):
2872 2870 return bool(self._list)
2873 2871
2874 2872 def sort(self, reverse=False):
2875 2873 self._ascending = not bool(reverse)
2876 2874 self._istopo = False
2877 2875
2878 2876 def reverse(self):
2879 2877 if self._ascending is None:
2880 2878 self._list.reverse()
2881 2879 else:
2882 2880 self._ascending = not self._ascending
2883 2881 self._istopo = False
2884 2882
2885 2883 def __len__(self):
2886 2884 return len(self._list)
2887 2885
2888 2886 def isascending(self):
2889 2887 """Returns True if the collection is ascending order, False if not.
2890 2888
2891 2889 This is part of the mandatory API for smartset."""
2892 2890 if len(self) <= 1:
2893 2891 return True
2894 2892 return self._ascending is not None and self._ascending
2895 2893
2896 2894 def isdescending(self):
2897 2895 """Returns True if the collection is descending order, False if not.
2898 2896
2899 2897 This is part of the mandatory API for smartset."""
2900 2898 if len(self) <= 1:
2901 2899 return True
2902 2900 return self._ascending is not None and not self._ascending
2903 2901
2904 2902 def istopo(self):
2905 2903 """Is the collection is in topographical order or not.
2906 2904
2907 2905 This is part of the mandatory API for smartset."""
2908 2906 if len(self) <= 1:
2909 2907 return True
2910 2908 return self._istopo
2911 2909
2912 2910 def first(self):
2913 2911 if self:
2914 2912 if self._ascending is None:
2915 2913 return self._list[0]
2916 2914 elif self._ascending:
2917 2915 return self._asclist[0]
2918 2916 else:
2919 2917 return self._asclist[-1]
2920 2918 return None
2921 2919
2922 2920 def last(self):
2923 2921 if self:
2924 2922 if self._ascending is None:
2925 2923 return self._list[-1]
2926 2924 elif self._ascending:
2927 2925 return self._asclist[-1]
2928 2926 else:
2929 2927 return self._asclist[0]
2930 2928 return None
2931 2929
2932 2930 def __repr__(self):
2933 2931 d = {None: '', False: '-', True: '+'}[self._ascending]
2934 2932 s = _formatsetrepr(self._datarepr)
2935 2933 if not s:
2936 2934 l = self._list
2937 2935 # if _list has been built from a set, it might have a different
2938 2936 # order from one python implementation to another.
2939 2937 # We fallback to the sorted version for a stable output.
2940 2938 if self._ascending is not None:
2941 2939 l = self._asclist
2942 2940 s = repr(l)
2943 2941 return '<%s%s %s>' % (type(self).__name__, d, s)
2944 2942
2945 2943 class filteredset(abstractsmartset):
2946 2944 """Duck type for baseset class which iterates lazily over the revisions in
2947 2945 the subset and contains a function which tests for membership in the
2948 2946 revset
2949 2947 """
2950 2948 def __init__(self, subset, condition=lambda x: True, condrepr=None):
2951 2949 """
2952 2950 condition: a function that decide whether a revision in the subset
2953 2951 belongs to the revset or not.
2954 2952 condrepr: a tuple of (format, obj, ...), a function or an object that
2955 2953 provides a printable representation of the given condition.
2956 2954 """
2957 2955 self._subset = subset
2958 2956 self._condition = condition
2959 2957 self._condrepr = condrepr
2960 2958
2961 2959 def __contains__(self, x):
2962 2960 return x in self._subset and self._condition(x)
2963 2961
2964 2962 def __iter__(self):
2965 2963 return self._iterfilter(self._subset)
2966 2964
2967 2965 def _iterfilter(self, it):
2968 2966 cond = self._condition
2969 2967 for x in it:
2970 2968 if cond(x):
2971 2969 yield x
2972 2970
2973 2971 @property
2974 2972 def fastasc(self):
2975 2973 it = self._subset.fastasc
2976 2974 if it is None:
2977 2975 return None
2978 2976 return lambda: self._iterfilter(it())
2979 2977
2980 2978 @property
2981 2979 def fastdesc(self):
2982 2980 it = self._subset.fastdesc
2983 2981 if it is None:
2984 2982 return None
2985 2983 return lambda: self._iterfilter(it())
2986 2984
2987 2985 def __nonzero__(self):
2988 2986 fast = None
2989 2987 candidates = [self.fastasc if self.isascending() else None,
2990 2988 self.fastdesc if self.isdescending() else None,
2991 2989 self.fastasc,
2992 2990 self.fastdesc]
2993 2991 for candidate in candidates:
2994 2992 if candidate is not None:
2995 2993 fast = candidate
2996 2994 break
2997 2995
2998 2996 if fast is not None:
2999 2997 it = fast()
3000 2998 else:
3001 2999 it = self
3002 3000
3003 3001 for r in it:
3004 3002 return True
3005 3003 return False
3006 3004
3007 3005 def __len__(self):
3008 3006 # Basic implementation to be changed in future patches.
3009 3007 # until this gets improved, we use generator expression
3010 3008 # here, since list compr is free to call __len__ again
3011 3009 # causing infinite recursion
3012 3010 l = baseset(r for r in self)
3013 3011 return len(l)
3014 3012
3015 3013 def sort(self, reverse=False):
3016 3014 self._subset.sort(reverse=reverse)
3017 3015
3018 3016 def reverse(self):
3019 3017 self._subset.reverse()
3020 3018
3021 3019 def isascending(self):
3022 3020 return self._subset.isascending()
3023 3021
3024 3022 def isdescending(self):
3025 3023 return self._subset.isdescending()
3026 3024
3027 3025 def istopo(self):
3028 3026 return self._subset.istopo()
3029 3027
3030 3028 def first(self):
3031 3029 for x in self:
3032 3030 return x
3033 3031 return None
3034 3032
3035 3033 def last(self):
3036 3034 it = None
3037 3035 if self.isascending():
3038 3036 it = self.fastdesc
3039 3037 elif self.isdescending():
3040 3038 it = self.fastasc
3041 3039 if it is not None:
3042 3040 for x in it():
3043 3041 return x
3044 3042 return None #empty case
3045 3043 else:
3046 3044 x = None
3047 3045 for x in self:
3048 3046 pass
3049 3047 return x
3050 3048
3051 3049 def __repr__(self):
3052 3050 xs = [repr(self._subset)]
3053 3051 s = _formatsetrepr(self._condrepr)
3054 3052 if s:
3055 3053 xs.append(s)
3056 3054 return '<%s %s>' % (type(self).__name__, ', '.join(xs))
3057 3055
3058 3056 def _iterordered(ascending, iter1, iter2):
3059 3057 """produce an ordered iteration from two iterators with the same order
3060 3058
3061 3059 The ascending is used to indicated the iteration direction.
3062 3060 """
3063 3061 choice = max
3064 3062 if ascending:
3065 3063 choice = min
3066 3064
3067 3065 val1 = None
3068 3066 val2 = None
3069 3067 try:
3070 3068 # Consume both iterators in an ordered way until one is empty
3071 3069 while True:
3072 3070 if val1 is None:
3073 3071 val1 = next(iter1)
3074 3072 if val2 is None:
3075 3073 val2 = next(iter2)
3076 3074 n = choice(val1, val2)
3077 3075 yield n
3078 3076 if val1 == n:
3079 3077 val1 = None
3080 3078 if val2 == n:
3081 3079 val2 = None
3082 3080 except StopIteration:
3083 3081 # Flush any remaining values and consume the other one
3084 3082 it = iter2
3085 3083 if val1 is not None:
3086 3084 yield val1
3087 3085 it = iter1
3088 3086 elif val2 is not None:
3089 3087 # might have been equality and both are empty
3090 3088 yield val2
3091 3089 for val in it:
3092 3090 yield val
3093 3091
3094 3092 class addset(abstractsmartset):
3095 3093 """Represent the addition of two sets
3096 3094
3097 3095 Wrapper structure for lazily adding two structures without losing much
3098 3096 performance on the __contains__ method
3099 3097
3100 3098 If the ascending attribute is set, that means the two structures are
3101 3099 ordered in either an ascending or descending way. Therefore, we can add
3102 3100 them maintaining the order by iterating over both at the same time
3103 3101
3104 3102 >>> xs = baseset([0, 3, 2])
3105 3103 >>> ys = baseset([5, 2, 4])
3106 3104
3107 3105 >>> rs = addset(xs, ys)
3108 3106 >>> bool(rs), 0 in rs, 1 in rs, 5 in rs, rs.first(), rs.last()
3109 3107 (True, True, False, True, 0, 4)
3110 3108 >>> rs = addset(xs, baseset([]))
3111 3109 >>> bool(rs), 0 in rs, 1 in rs, rs.first(), rs.last()
3112 3110 (True, True, False, 0, 2)
3113 3111 >>> rs = addset(baseset([]), baseset([]))
3114 3112 >>> bool(rs), 0 in rs, rs.first(), rs.last()
3115 3113 (False, False, None, None)
3116 3114
3117 3115 iterate unsorted:
3118 3116 >>> rs = addset(xs, ys)
3119 3117 >>> # (use generator because pypy could call len())
3120 3118 >>> list(x for x in rs) # without _genlist
3121 3119 [0, 3, 2, 5, 4]
3122 3120 >>> assert not rs._genlist
3123 3121 >>> len(rs)
3124 3122 5
3125 3123 >>> [x for x in rs] # with _genlist
3126 3124 [0, 3, 2, 5, 4]
3127 3125 >>> assert rs._genlist
3128 3126
3129 3127 iterate ascending:
3130 3128 >>> rs = addset(xs, ys, ascending=True)
3131 3129 >>> # (use generator because pypy could call len())
3132 3130 >>> list(x for x in rs), list(x for x in rs.fastasc()) # without _asclist
3133 3131 ([0, 2, 3, 4, 5], [0, 2, 3, 4, 5])
3134 3132 >>> assert not rs._asclist
3135 3133 >>> len(rs)
3136 3134 5
3137 3135 >>> [x for x in rs], [x for x in rs.fastasc()]
3138 3136 ([0, 2, 3, 4, 5], [0, 2, 3, 4, 5])
3139 3137 >>> assert rs._asclist
3140 3138
3141 3139 iterate descending:
3142 3140 >>> rs = addset(xs, ys, ascending=False)
3143 3141 >>> # (use generator because pypy could call len())
3144 3142 >>> list(x for x in rs), list(x for x in rs.fastdesc()) # without _asclist
3145 3143 ([5, 4, 3, 2, 0], [5, 4, 3, 2, 0])
3146 3144 >>> assert not rs._asclist
3147 3145 >>> len(rs)
3148 3146 5
3149 3147 >>> [x for x in rs], [x for x in rs.fastdesc()]
3150 3148 ([5, 4, 3, 2, 0], [5, 4, 3, 2, 0])
3151 3149 >>> assert rs._asclist
3152 3150
3153 3151 iterate ascending without fastasc:
3154 3152 >>> rs = addset(xs, generatorset(ys), ascending=True)
3155 3153 >>> assert rs.fastasc is None
3156 3154 >>> [x for x in rs]
3157 3155 [0, 2, 3, 4, 5]
3158 3156
3159 3157 iterate descending without fastdesc:
3160 3158 >>> rs = addset(generatorset(xs), ys, ascending=False)
3161 3159 >>> assert rs.fastdesc is None
3162 3160 >>> [x for x in rs]
3163 3161 [5, 4, 3, 2, 0]
3164 3162 """
3165 3163 def __init__(self, revs1, revs2, ascending=None):
3166 3164 self._r1 = revs1
3167 3165 self._r2 = revs2
3168 3166 self._iter = None
3169 3167 self._ascending = ascending
3170 3168 self._genlist = None
3171 3169 self._asclist = None
3172 3170
3173 3171 def __len__(self):
3174 3172 return len(self._list)
3175 3173
3176 3174 def __nonzero__(self):
3177 3175 return bool(self._r1) or bool(self._r2)
3178 3176
3179 3177 @util.propertycache
3180 3178 def _list(self):
3181 3179 if not self._genlist:
3182 3180 self._genlist = baseset(iter(self))
3183 3181 return self._genlist
3184 3182
3185 3183 def __iter__(self):
3186 3184 """Iterate over both collections without repeating elements
3187 3185
3188 3186 If the ascending attribute is not set, iterate over the first one and
3189 3187 then over the second one checking for membership on the first one so we
3190 3188 dont yield any duplicates.
3191 3189
3192 3190 If the ascending attribute is set, iterate over both collections at the
3193 3191 same time, yielding only one value at a time in the given order.
3194 3192 """
3195 3193 if self._ascending is None:
3196 3194 if self._genlist:
3197 3195 return iter(self._genlist)
3198 3196 def arbitraryordergen():
3199 3197 for r in self._r1:
3200 3198 yield r
3201 3199 inr1 = self._r1.__contains__
3202 3200 for r in self._r2:
3203 3201 if not inr1(r):
3204 3202 yield r
3205 3203 return arbitraryordergen()
3206 3204 # try to use our own fast iterator if it exists
3207 3205 self._trysetasclist()
3208 3206 if self._ascending:
3209 3207 attr = 'fastasc'
3210 3208 else:
3211 3209 attr = 'fastdesc'
3212 3210 it = getattr(self, attr)
3213 3211 if it is not None:
3214 3212 return it()
3215 3213 # maybe half of the component supports fast
3216 3214 # get iterator for _r1
3217 3215 iter1 = getattr(self._r1, attr)
3218 3216 if iter1 is None:
3219 3217 # let's avoid side effect (not sure it matters)
3220 3218 iter1 = iter(sorted(self._r1, reverse=not self._ascending))
3221 3219 else:
3222 3220 iter1 = iter1()
3223 3221 # get iterator for _r2
3224 3222 iter2 = getattr(self._r2, attr)
3225 3223 if iter2 is None:
3226 3224 # let's avoid side effect (not sure it matters)
3227 3225 iter2 = iter(sorted(self._r2, reverse=not self._ascending))
3228 3226 else:
3229 3227 iter2 = iter2()
3230 3228 return _iterordered(self._ascending, iter1, iter2)
3231 3229
3232 3230 def _trysetasclist(self):
3233 3231 """populate the _asclist attribute if possible and necessary"""
3234 3232 if self._genlist is not None and self._asclist is None:
3235 3233 self._asclist = sorted(self._genlist)
3236 3234
3237 3235 @property
3238 3236 def fastasc(self):
3239 3237 self._trysetasclist()
3240 3238 if self._asclist is not None:
3241 3239 return self._asclist.__iter__
3242 3240 iter1 = self._r1.fastasc
3243 3241 iter2 = self._r2.fastasc
3244 3242 if None in (iter1, iter2):
3245 3243 return None
3246 3244 return lambda: _iterordered(True, iter1(), iter2())
3247 3245
3248 3246 @property
3249 3247 def fastdesc(self):
3250 3248 self._trysetasclist()
3251 3249 if self._asclist is not None:
3252 3250 return self._asclist.__reversed__
3253 3251 iter1 = self._r1.fastdesc
3254 3252 iter2 = self._r2.fastdesc
3255 3253 if None in (iter1, iter2):
3256 3254 return None
3257 3255 return lambda: _iterordered(False, iter1(), iter2())
3258 3256
3259 3257 def __contains__(self, x):
3260 3258 return x in self._r1 or x in self._r2
3261 3259
3262 3260 def sort(self, reverse=False):
3263 3261 """Sort the added set
3264 3262
3265 3263 For this we use the cached list with all the generated values and if we
3266 3264 know they are ascending or descending we can sort them in a smart way.
3267 3265 """
3268 3266 self._ascending = not reverse
3269 3267
3270 3268 def isascending(self):
3271 3269 return self._ascending is not None and self._ascending
3272 3270
3273 3271 def isdescending(self):
3274 3272 return self._ascending is not None and not self._ascending
3275 3273
3276 3274 def istopo(self):
3277 3275 # not worth the trouble asserting if the two sets combined are still
3278 3276 # in topographical order. Use the sort() predicate to explicitly sort
3279 3277 # again instead.
3280 3278 return False
3281 3279
3282 3280 def reverse(self):
3283 3281 if self._ascending is None:
3284 3282 self._list.reverse()
3285 3283 else:
3286 3284 self._ascending = not self._ascending
3287 3285
3288 3286 def first(self):
3289 3287 for x in self:
3290 3288 return x
3291 3289 return None
3292 3290
3293 3291 def last(self):
3294 3292 self.reverse()
3295 3293 val = self.first()
3296 3294 self.reverse()
3297 3295 return val
3298 3296
3299 3297 def __repr__(self):
3300 3298 d = {None: '', False: '-', True: '+'}[self._ascending]
3301 3299 return '<%s%s %r, %r>' % (type(self).__name__, d, self._r1, self._r2)
3302 3300
3303 3301 class generatorset(abstractsmartset):
3304 3302 """Wrap a generator for lazy iteration
3305 3303
3306 3304 Wrapper structure for generators that provides lazy membership and can
3307 3305 be iterated more than once.
3308 3306 When asked for membership it generates values until either it finds the
3309 3307 requested one or has gone through all the elements in the generator
3310 3308 """
3311 3309 def __init__(self, gen, iterasc=None):
3312 3310 """
3313 3311 gen: a generator producing the values for the generatorset.
3314 3312 """
3315 3313 self._gen = gen
3316 3314 self._asclist = None
3317 3315 self._cache = {}
3318 3316 self._genlist = []
3319 3317 self._finished = False
3320 3318 self._ascending = True
3321 3319 if iterasc is not None:
3322 3320 if iterasc:
3323 3321 self.fastasc = self._iterator
3324 3322 self.__contains__ = self._asccontains
3325 3323 else:
3326 3324 self.fastdesc = self._iterator
3327 3325 self.__contains__ = self._desccontains
3328 3326
3329 3327 def __nonzero__(self):
3330 3328 # Do not use 'for r in self' because it will enforce the iteration
3331 3329 # order (default ascending), possibly unrolling a whole descending
3332 3330 # iterator.
3333 3331 if self._genlist:
3334 3332 return True
3335 3333 for r in self._consumegen():
3336 3334 return True
3337 3335 return False
3338 3336
3339 3337 def __contains__(self, x):
3340 3338 if x in self._cache:
3341 3339 return self._cache[x]
3342 3340
3343 3341 # Use new values only, as existing values would be cached.
3344 3342 for l in self._consumegen():
3345 3343 if l == x:
3346 3344 return True
3347 3345
3348 3346 self._cache[x] = False
3349 3347 return False
3350 3348
3351 3349 def _asccontains(self, x):
3352 3350 """version of contains optimised for ascending generator"""
3353 3351 if x in self._cache:
3354 3352 return self._cache[x]
3355 3353
3356 3354 # Use new values only, as existing values would be cached.
3357 3355 for l in self._consumegen():
3358 3356 if l == x:
3359 3357 return True
3360 3358 if l > x:
3361 3359 break
3362 3360
3363 3361 self._cache[x] = False
3364 3362 return False
3365 3363
3366 3364 def _desccontains(self, x):
3367 3365 """version of contains optimised for descending generator"""
3368 3366 if x in self._cache:
3369 3367 return self._cache[x]
3370 3368
3371 3369 # Use new values only, as existing values would be cached.
3372 3370 for l in self._consumegen():
3373 3371 if l == x:
3374 3372 return True
3375 3373 if l < x:
3376 3374 break
3377 3375
3378 3376 self._cache[x] = False
3379 3377 return False
3380 3378
3381 3379 def __iter__(self):
3382 3380 if self._ascending:
3383 3381 it = self.fastasc
3384 3382 else:
3385 3383 it = self.fastdesc
3386 3384 if it is not None:
3387 3385 return it()
3388 3386 # we need to consume the iterator
3389 3387 for x in self._consumegen():
3390 3388 pass
3391 3389 # recall the same code
3392 3390 return iter(self)
3393 3391
3394 3392 def _iterator(self):
3395 3393 if self._finished:
3396 3394 return iter(self._genlist)
3397 3395
3398 3396 # We have to use this complex iteration strategy to allow multiple
3399 3397 # iterations at the same time. We need to be able to catch revision
3400 3398 # removed from _consumegen and added to genlist in another instance.
3401 3399 #
3402 3400 # Getting rid of it would provide an about 15% speed up on this
3403 3401 # iteration.
3404 3402 genlist = self._genlist
3405 3403 nextrev = self._consumegen().next
3406 3404 _len = len # cache global lookup
3407 3405 def gen():
3408 3406 i = 0
3409 3407 while True:
3410 3408 if i < _len(genlist):
3411 3409 yield genlist[i]
3412 3410 else:
3413 3411 yield nextrev()
3414 3412 i += 1
3415 3413 return gen()
3416 3414
3417 3415 def _consumegen(self):
3418 3416 cache = self._cache
3419 3417 genlist = self._genlist.append
3420 3418 for item in self._gen:
3421 3419 cache[item] = True
3422 3420 genlist(item)
3423 3421 yield item
3424 3422 if not self._finished:
3425 3423 self._finished = True
3426 3424 asc = self._genlist[:]
3427 3425 asc.sort()
3428 3426 self._asclist = asc
3429 3427 self.fastasc = asc.__iter__
3430 3428 self.fastdesc = asc.__reversed__
3431 3429
3432 3430 def __len__(self):
3433 3431 for x in self._consumegen():
3434 3432 pass
3435 3433 return len(self._genlist)
3436 3434
3437 3435 def sort(self, reverse=False):
3438 3436 self._ascending = not reverse
3439 3437
3440 3438 def reverse(self):
3441 3439 self._ascending = not self._ascending
3442 3440
3443 3441 def isascending(self):
3444 3442 return self._ascending
3445 3443
3446 3444 def isdescending(self):
3447 3445 return not self._ascending
3448 3446
3449 3447 def istopo(self):
3450 3448 # not worth the trouble asserting if the two sets combined are still
3451 3449 # in topographical order. Use the sort() predicate to explicitly sort
3452 3450 # again instead.
3453 3451 return False
3454 3452
3455 3453 def first(self):
3456 3454 if self._ascending:
3457 3455 it = self.fastasc
3458 3456 else:
3459 3457 it = self.fastdesc
3460 3458 if it is None:
3461 3459 # we need to consume all and try again
3462 3460 for x in self._consumegen():
3463 3461 pass
3464 3462 return self.first()
3465 3463 return next(it(), None)
3466 3464
3467 3465 def last(self):
3468 3466 if self._ascending:
3469 3467 it = self.fastdesc
3470 3468 else:
3471 3469 it = self.fastasc
3472 3470 if it is None:
3473 3471 # we need to consume all and try again
3474 3472 for x in self._consumegen():
3475 3473 pass
3476 3474 return self.first()
3477 3475 return next(it(), None)
3478 3476
3479 3477 def __repr__(self):
3480 3478 d = {False: '-', True: '+'}[self._ascending]
3481 3479 return '<%s%s>' % (type(self).__name__, d)
3482 3480
3483 3481 class spanset(abstractsmartset):
3484 3482 """Duck type for baseset class which represents a range of revisions and
3485 3483 can work lazily and without having all the range in memory
3486 3484
3487 3485 Note that spanset(x, y) behave almost like xrange(x, y) except for two
3488 3486 notable points:
3489 3487 - when x < y it will be automatically descending,
3490 3488 - revision filtered with this repoview will be skipped.
3491 3489
3492 3490 """
3493 3491 def __init__(self, repo, start=0, end=None):
3494 3492 """
3495 3493 start: first revision included the set
3496 3494 (default to 0)
3497 3495 end: first revision excluded (last+1)
3498 3496 (default to len(repo)
3499 3497
3500 3498 Spanset will be descending if `end` < `start`.
3501 3499 """
3502 3500 if end is None:
3503 3501 end = len(repo)
3504 3502 self._ascending = start <= end
3505 3503 if not self._ascending:
3506 3504 start, end = end + 1, start +1
3507 3505 self._start = start
3508 3506 self._end = end
3509 3507 self._hiddenrevs = repo.changelog.filteredrevs
3510 3508
3511 3509 def sort(self, reverse=False):
3512 3510 self._ascending = not reverse
3513 3511
3514 3512 def reverse(self):
3515 3513 self._ascending = not self._ascending
3516 3514
3517 3515 def istopo(self):
3518 3516 # not worth the trouble asserting if the two sets combined are still
3519 3517 # in topographical order. Use the sort() predicate to explicitly sort
3520 3518 # again instead.
3521 3519 return False
3522 3520
3523 3521 def _iterfilter(self, iterrange):
3524 3522 s = self._hiddenrevs
3525 3523 for r in iterrange:
3526 3524 if r not in s:
3527 3525 yield r
3528 3526
3529 3527 def __iter__(self):
3530 3528 if self._ascending:
3531 3529 return self.fastasc()
3532 3530 else:
3533 3531 return self.fastdesc()
3534 3532
3535 3533 def fastasc(self):
3536 3534 iterrange = xrange(self._start, self._end)
3537 3535 if self._hiddenrevs:
3538 3536 return self._iterfilter(iterrange)
3539 3537 return iter(iterrange)
3540 3538
3541 3539 def fastdesc(self):
3542 3540 iterrange = xrange(self._end - 1, self._start - 1, -1)
3543 3541 if self._hiddenrevs:
3544 3542 return self._iterfilter(iterrange)
3545 3543 return iter(iterrange)
3546 3544
3547 3545 def __contains__(self, rev):
3548 3546 hidden = self._hiddenrevs
3549 3547 return ((self._start <= rev < self._end)
3550 3548 and not (hidden and rev in hidden))
3551 3549
3552 3550 def __nonzero__(self):
3553 3551 for r in self:
3554 3552 return True
3555 3553 return False
3556 3554
3557 3555 def __len__(self):
3558 3556 if not self._hiddenrevs:
3559 3557 return abs(self._end - self._start)
3560 3558 else:
3561 3559 count = 0
3562 3560 start = self._start
3563 3561 end = self._end
3564 3562 for rev in self._hiddenrevs:
3565 3563 if (end < rev <= start) or (start <= rev < end):
3566 3564 count += 1
3567 3565 return abs(self._end - self._start) - count
3568 3566
3569 3567 def isascending(self):
3570 3568 return self._ascending
3571 3569
3572 3570 def isdescending(self):
3573 3571 return not self._ascending
3574 3572
3575 3573 def first(self):
3576 3574 if self._ascending:
3577 3575 it = self.fastasc
3578 3576 else:
3579 3577 it = self.fastdesc
3580 3578 for x in it():
3581 3579 return x
3582 3580 return None
3583 3581
3584 3582 def last(self):
3585 3583 if self._ascending:
3586 3584 it = self.fastdesc
3587 3585 else:
3588 3586 it = self.fastasc
3589 3587 for x in it():
3590 3588 return x
3591 3589 return None
3592 3590
3593 3591 def __repr__(self):
3594 3592 d = {False: '-', True: '+'}[self._ascending]
3595 3593 return '<%s%s %d:%d>' % (type(self).__name__, d,
3596 3594 self._start, self._end - 1)
3597 3595
3598 3596 class fullreposet(spanset):
3599 3597 """a set containing all revisions in the repo
3600 3598
3601 3599 This class exists to host special optimization and magic to handle virtual
3602 3600 revisions such as "null".
3603 3601 """
3604 3602
3605 3603 def __init__(self, repo):
3606 3604 super(fullreposet, self).__init__(repo)
3607 3605
3608 3606 def __and__(self, other):
3609 3607 """As self contains the whole repo, all of the other set should also be
3610 3608 in self. Therefore `self & other = other`.
3611 3609
3612 3610 This boldly assumes the other contains valid revs only.
3613 3611 """
3614 3612 # other not a smartset, make is so
3615 3613 if not util.safehasattr(other, 'isascending'):
3616 3614 # filter out hidden revision
3617 3615 # (this boldly assumes all smartset are pure)
3618 3616 #
3619 3617 # `other` was used with "&", let's assume this is a set like
3620 3618 # object.
3621 3619 other = baseset(other - self._hiddenrevs)
3622 3620
3623 3621 # XXX As fullreposet is also used as bootstrap, this is wrong.
3624 3622 #
3625 3623 # With a giveme312() revset returning [3,1,2], this makes
3626 3624 # 'hg log -r "giveme312()"' -> 1, 2, 3 (wrong)
3627 3625 # We cannot just drop it because other usage still need to sort it:
3628 3626 # 'hg log -r "all() and giveme312()"' -> 1, 2, 3 (right)
3629 3627 #
3630 3628 # There is also some faulty revset implementations that rely on it
3631 3629 # (eg: children as of its state in e8075329c5fb)
3632 3630 #
3633 3631 # When we fix the two points above we can move this into the if clause
3634 3632 other.sort(reverse=self.isdescending())
3635 3633 return other
3636 3634
3637 3635 def prettyformatset(revs):
3638 3636 lines = []
3639 3637 rs = repr(revs)
3640 3638 p = 0
3641 3639 while p < len(rs):
3642 3640 q = rs.find('<', p + 1)
3643 3641 if q < 0:
3644 3642 q = len(rs)
3645 3643 l = rs.count('<', 0, p) - rs.count('>', 0, p)
3646 3644 assert l >= 0
3647 3645 lines.append((l, rs[p:q].rstrip()))
3648 3646 p = q
3649 3647 return '\n'.join(' ' * l + s for l, s in lines)
3650 3648
3651 3649 def loadpredicate(ui, extname, registrarobj):
3652 3650 """Load revset predicates from specified registrarobj
3653 3651 """
3654 3652 for name, func in registrarobj._table.iteritems():
3655 3653 symbols[name] = func
3656 3654 if func._safe:
3657 3655 safesymbols.add(name)
3658 3656
3659 3657 # load built-in predicates explicitly to setup safesymbols
3660 3658 loadpredicate(None, None, predicate)
3661 3659
3662 3660 # tell hggettext to extract docstrings from these functions:
3663 3661 i18nfunctions = symbols.values()
General Comments 0
You need to be logged in to leave comments. Login now