##// END OF EJS Templates
revset: add public function to create matcher from evaluatable tree...
Yuya Nishihara -
r29906:41491cf9 default
parent child Browse files
Show More
@@ -1,3730 +1,3732
1 1 # revset.py - revision set queries for mercurial
2 2 #
3 3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import heapq
11 11 import re
12 12
13 13 from .i18n import _
14 14 from . import (
15 15 destutil,
16 16 encoding,
17 17 error,
18 18 hbisect,
19 19 match as matchmod,
20 20 node,
21 21 obsolete as obsmod,
22 22 parser,
23 23 pathutil,
24 24 phases,
25 25 registrar,
26 26 repoview,
27 27 util,
28 28 )
29 29
30 30 def _revancestors(repo, revs, followfirst):
31 31 """Like revlog.ancestors(), but supports followfirst."""
32 32 if followfirst:
33 33 cut = 1
34 34 else:
35 35 cut = None
36 36 cl = repo.changelog
37 37
38 38 def iterate():
39 39 revs.sort(reverse=True)
40 40 irevs = iter(revs)
41 41 h = []
42 42
43 43 inputrev = next(irevs, None)
44 44 if inputrev is not None:
45 45 heapq.heappush(h, -inputrev)
46 46
47 47 seen = set()
48 48 while h:
49 49 current = -heapq.heappop(h)
50 50 if current == inputrev:
51 51 inputrev = next(irevs, None)
52 52 if inputrev is not None:
53 53 heapq.heappush(h, -inputrev)
54 54 if current not in seen:
55 55 seen.add(current)
56 56 yield current
57 57 for parent in cl.parentrevs(current)[:cut]:
58 58 if parent != node.nullrev:
59 59 heapq.heappush(h, -parent)
60 60
61 61 return generatorset(iterate(), iterasc=False)
62 62
63 63 def _revdescendants(repo, revs, followfirst):
64 64 """Like revlog.descendants() but supports followfirst."""
65 65 if followfirst:
66 66 cut = 1
67 67 else:
68 68 cut = None
69 69
70 70 def iterate():
71 71 cl = repo.changelog
72 72 # XXX this should be 'parentset.min()' assuming 'parentset' is a
73 73 # smartset (and if it is not, it should.)
74 74 first = min(revs)
75 75 nullrev = node.nullrev
76 76 if first == nullrev:
77 77 # Are there nodes with a null first parent and a non-null
78 78 # second one? Maybe. Do we care? Probably not.
79 79 for i in cl:
80 80 yield i
81 81 else:
82 82 seen = set(revs)
83 83 for i in cl.revs(first + 1):
84 84 for x in cl.parentrevs(i)[:cut]:
85 85 if x != nullrev and x in seen:
86 86 seen.add(i)
87 87 yield i
88 88 break
89 89
90 90 return generatorset(iterate(), iterasc=True)
91 91
92 92 def _reachablerootspure(repo, minroot, roots, heads, includepath):
93 93 """return (heads(::<roots> and ::<heads>))
94 94
95 95 If includepath is True, return (<roots>::<heads>)."""
96 96 if not roots:
97 97 return []
98 98 parentrevs = repo.changelog.parentrevs
99 99 roots = set(roots)
100 100 visit = list(heads)
101 101 reachable = set()
102 102 seen = {}
103 103 # prefetch all the things! (because python is slow)
104 104 reached = reachable.add
105 105 dovisit = visit.append
106 106 nextvisit = visit.pop
107 107 # open-code the post-order traversal due to the tiny size of
108 108 # sys.getrecursionlimit()
109 109 while visit:
110 110 rev = nextvisit()
111 111 if rev in roots:
112 112 reached(rev)
113 113 if not includepath:
114 114 continue
115 115 parents = parentrevs(rev)
116 116 seen[rev] = parents
117 117 for parent in parents:
118 118 if parent >= minroot and parent not in seen:
119 119 dovisit(parent)
120 120 if not reachable:
121 121 return baseset()
122 122 if not includepath:
123 123 return reachable
124 124 for rev in sorted(seen):
125 125 for parent in seen[rev]:
126 126 if parent in reachable:
127 127 reached(rev)
128 128 return reachable
129 129
130 130 def reachableroots(repo, roots, heads, includepath=False):
131 131 """return (heads(::<roots> and ::<heads>))
132 132
133 133 If includepath is True, return (<roots>::<heads>)."""
134 134 if not roots:
135 135 return baseset()
136 136 minroot = roots.min()
137 137 roots = list(roots)
138 138 heads = list(heads)
139 139 try:
140 140 revs = repo.changelog.reachableroots(minroot, heads, roots, includepath)
141 141 except AttributeError:
142 142 revs = _reachablerootspure(repo, minroot, roots, heads, includepath)
143 143 revs = baseset(revs)
144 144 revs.sort()
145 145 return revs
146 146
147 147 elements = {
148 148 # token-type: binding-strength, primary, prefix, infix, suffix
149 149 "(": (21, None, ("group", 1, ")"), ("func", 1, ")"), None),
150 150 "##": (20, None, None, ("_concat", 20), None),
151 151 "~": (18, None, None, ("ancestor", 18), None),
152 152 "^": (18, None, None, ("parent", 18), "parentpost"),
153 153 "-": (5, None, ("negate", 19), ("minus", 5), None),
154 154 "::": (17, None, ("dagrangepre", 17), ("dagrange", 17), "dagrangepost"),
155 155 "..": (17, None, ("dagrangepre", 17), ("dagrange", 17), "dagrangepost"),
156 156 ":": (15, "rangeall", ("rangepre", 15), ("range", 15), "rangepost"),
157 157 "not": (10, None, ("not", 10), None, None),
158 158 "!": (10, None, ("not", 10), None, None),
159 159 "and": (5, None, None, ("and", 5), None),
160 160 "&": (5, None, None, ("and", 5), None),
161 161 "%": (5, None, None, ("only", 5), "onlypost"),
162 162 "or": (4, None, None, ("or", 4), None),
163 163 "|": (4, None, None, ("or", 4), None),
164 164 "+": (4, None, None, ("or", 4), None),
165 165 "=": (3, None, None, ("keyvalue", 3), None),
166 166 ",": (2, None, None, ("list", 2), None),
167 167 ")": (0, None, None, None, None),
168 168 "symbol": (0, "symbol", None, None, None),
169 169 "string": (0, "string", None, None, None),
170 170 "end": (0, None, None, None, None),
171 171 }
172 172
173 173 keywords = set(['and', 'or', 'not'])
174 174
175 175 # default set of valid characters for the initial letter of symbols
176 176 _syminitletters = set(c for c in [chr(i) for i in xrange(256)]
177 177 if c.isalnum() or c in '._@' or ord(c) > 127)
178 178
179 179 # default set of valid characters for non-initial letters of symbols
180 180 _symletters = set(c for c in [chr(i) for i in xrange(256)]
181 181 if c.isalnum() or c in '-._/@' or ord(c) > 127)
182 182
183 183 def tokenize(program, lookup=None, syminitletters=None, symletters=None):
184 184 '''
185 185 Parse a revset statement into a stream of tokens
186 186
187 187 ``syminitletters`` is the set of valid characters for the initial
188 188 letter of symbols.
189 189
190 190 By default, character ``c`` is recognized as valid for initial
191 191 letter of symbols, if ``c.isalnum() or c in '._@' or ord(c) > 127``.
192 192
193 193 ``symletters`` is the set of valid characters for non-initial
194 194 letters of symbols.
195 195
196 196 By default, character ``c`` is recognized as valid for non-initial
197 197 letters of symbols, if ``c.isalnum() or c in '-._/@' or ord(c) > 127``.
198 198
199 199 Check that @ is a valid unquoted token character (issue3686):
200 200 >>> list(tokenize("@::"))
201 201 [('symbol', '@', 0), ('::', None, 1), ('end', None, 3)]
202 202
203 203 '''
204 204 if syminitletters is None:
205 205 syminitletters = _syminitletters
206 206 if symletters is None:
207 207 symletters = _symletters
208 208
209 209 if program and lookup:
210 210 # attempt to parse old-style ranges first to deal with
211 211 # things like old-tag which contain query metacharacters
212 212 parts = program.split(':', 1)
213 213 if all(lookup(sym) for sym in parts if sym):
214 214 if parts[0]:
215 215 yield ('symbol', parts[0], 0)
216 216 if len(parts) > 1:
217 217 s = len(parts[0])
218 218 yield (':', None, s)
219 219 if parts[1]:
220 220 yield ('symbol', parts[1], s + 1)
221 221 yield ('end', None, len(program))
222 222 return
223 223
224 224 pos, l = 0, len(program)
225 225 while pos < l:
226 226 c = program[pos]
227 227 if c.isspace(): # skip inter-token whitespace
228 228 pass
229 229 elif c == ':' and program[pos:pos + 2] == '::': # look ahead carefully
230 230 yield ('::', None, pos)
231 231 pos += 1 # skip ahead
232 232 elif c == '.' and program[pos:pos + 2] == '..': # look ahead carefully
233 233 yield ('..', None, pos)
234 234 pos += 1 # skip ahead
235 235 elif c == '#' and program[pos:pos + 2] == '##': # look ahead carefully
236 236 yield ('##', None, pos)
237 237 pos += 1 # skip ahead
238 238 elif c in "():=,-|&+!~^%": # handle simple operators
239 239 yield (c, None, pos)
240 240 elif (c in '"\'' or c == 'r' and
241 241 program[pos:pos + 2] in ("r'", 'r"')): # handle quoted strings
242 242 if c == 'r':
243 243 pos += 1
244 244 c = program[pos]
245 245 decode = lambda x: x
246 246 else:
247 247 decode = parser.unescapestr
248 248 pos += 1
249 249 s = pos
250 250 while pos < l: # find closing quote
251 251 d = program[pos]
252 252 if d == '\\': # skip over escaped characters
253 253 pos += 2
254 254 continue
255 255 if d == c:
256 256 yield ('string', decode(program[s:pos]), s)
257 257 break
258 258 pos += 1
259 259 else:
260 260 raise error.ParseError(_("unterminated string"), s)
261 261 # gather up a symbol/keyword
262 262 elif c in syminitletters:
263 263 s = pos
264 264 pos += 1
265 265 while pos < l: # find end of symbol
266 266 d = program[pos]
267 267 if d not in symletters:
268 268 break
269 269 if d == '.' and program[pos - 1] == '.': # special case for ..
270 270 pos -= 1
271 271 break
272 272 pos += 1
273 273 sym = program[s:pos]
274 274 if sym in keywords: # operator keywords
275 275 yield (sym, None, s)
276 276 elif '-' in sym:
277 277 # some jerk gave us foo-bar-baz, try to check if it's a symbol
278 278 if lookup and lookup(sym):
279 279 # looks like a real symbol
280 280 yield ('symbol', sym, s)
281 281 else:
282 282 # looks like an expression
283 283 parts = sym.split('-')
284 284 for p in parts[:-1]:
285 285 if p: # possible consecutive -
286 286 yield ('symbol', p, s)
287 287 s += len(p)
288 288 yield ('-', None, pos)
289 289 s += 1
290 290 if parts[-1]: # possible trailing -
291 291 yield ('symbol', parts[-1], s)
292 292 else:
293 293 yield ('symbol', sym, s)
294 294 pos -= 1
295 295 else:
296 296 raise error.ParseError(_("syntax error in revset '%s'") %
297 297 program, pos)
298 298 pos += 1
299 299 yield ('end', None, pos)
300 300
301 301 # helpers
302 302
303 303 def getsymbol(x):
304 304 if x and x[0] == 'symbol':
305 305 return x[1]
306 306 raise error.ParseError(_('not a symbol'))
307 307
308 308 def getstring(x, err):
309 309 if x and (x[0] == 'string' or x[0] == 'symbol'):
310 310 return x[1]
311 311 raise error.ParseError(err)
312 312
313 313 def getlist(x):
314 314 if not x:
315 315 return []
316 316 if x[0] == 'list':
317 317 return list(x[1:])
318 318 return [x]
319 319
320 320 def getargs(x, min, max, err):
321 321 l = getlist(x)
322 322 if len(l) < min or (max >= 0 and len(l) > max):
323 323 raise error.ParseError(err)
324 324 return l
325 325
326 326 def getargsdict(x, funcname, keys):
327 327 return parser.buildargsdict(getlist(x), funcname, keys.split(),
328 328 keyvaluenode='keyvalue', keynode='symbol')
329 329
330 330 def getset(repo, subset, x):
331 331 if not x:
332 332 raise error.ParseError(_("missing argument"))
333 333 s = methods[x[0]](repo, subset, *x[1:])
334 334 if util.safehasattr(s, 'isascending'):
335 335 return s
336 336 # else case should not happen, because all non-func are internal,
337 337 # ignoring for now.
338 338 if x[0] == 'func' and x[1][0] == 'symbol' and x[1][1] in symbols:
339 339 repo.ui.deprecwarn('revset "%s" uses list instead of smartset'
340 340 % x[1][1],
341 341 '3.9')
342 342 return baseset(s)
343 343
344 344 def _getrevsource(repo, r):
345 345 extra = repo[r].extra()
346 346 for label in ('source', 'transplant_source', 'rebase_source'):
347 347 if label in extra:
348 348 try:
349 349 return repo[extra[label]].rev()
350 350 except error.RepoLookupError:
351 351 pass
352 352 return None
353 353
354 354 # operator methods
355 355
356 356 def stringset(repo, subset, x):
357 357 x = repo[x].rev()
358 358 if (x in subset
359 359 or x == node.nullrev and isinstance(subset, fullreposet)):
360 360 return baseset([x])
361 361 return baseset()
362 362
363 363 def rangeset(repo, subset, x, y):
364 364 m = getset(repo, fullreposet(repo), x)
365 365 n = getset(repo, fullreposet(repo), y)
366 366
367 367 if not m or not n:
368 368 return baseset()
369 369 m, n = m.first(), n.last()
370 370
371 371 if m == n:
372 372 r = baseset([m])
373 373 elif n == node.wdirrev:
374 374 r = spanset(repo, m, len(repo)) + baseset([n])
375 375 elif m == node.wdirrev:
376 376 r = baseset([m]) + spanset(repo, len(repo) - 1, n - 1)
377 377 elif m < n:
378 378 r = spanset(repo, m, n + 1)
379 379 else:
380 380 r = spanset(repo, m, n - 1)
381 381 # XXX We should combine with subset first: 'subset & baseset(...)'. This is
382 382 # necessary to ensure we preserve the order in subset.
383 383 #
384 384 # This has performance implication, carrying the sorting over when possible
385 385 # would be more efficient.
386 386 return r & subset
387 387
388 388 def dagrange(repo, subset, x, y):
389 389 r = fullreposet(repo)
390 390 xs = reachableroots(repo, getset(repo, r, x), getset(repo, r, y),
391 391 includepath=True)
392 392 return subset & xs
393 393
394 394 def andset(repo, subset, x, y):
395 395 return getset(repo, getset(repo, subset, x), y)
396 396
397 397 def differenceset(repo, subset, x, y):
398 398 return getset(repo, subset, x) - getset(repo, subset, y)
399 399
400 400 def orset(repo, subset, *xs):
401 401 assert xs
402 402 if len(xs) == 1:
403 403 return getset(repo, subset, xs[0])
404 404 p = len(xs) // 2
405 405 a = orset(repo, subset, *xs[:p])
406 406 b = orset(repo, subset, *xs[p:])
407 407 return a + b
408 408
409 409 def notset(repo, subset, x):
410 410 return subset - getset(repo, subset, x)
411 411
412 412 def listset(repo, subset, *xs):
413 413 raise error.ParseError(_("can't use a list in this context"),
414 414 hint=_('see hg help "revsets.x or y"'))
415 415
416 416 def keyvaluepair(repo, subset, k, v):
417 417 raise error.ParseError(_("can't use a key-value pair in this context"))
418 418
419 419 def func(repo, subset, a, b):
420 420 f = getsymbol(a)
421 421 if f in symbols:
422 422 return symbols[f](repo, subset, b)
423 423
424 424 keep = lambda fn: getattr(fn, '__doc__', None) is not None
425 425
426 426 syms = [s for (s, fn) in symbols.items() if keep(fn)]
427 427 raise error.UnknownIdentifier(f, syms)
428 428
429 429 # functions
430 430
431 431 # symbols are callables like:
432 432 # fn(repo, subset, x)
433 433 # with:
434 434 # repo - current repository instance
435 435 # subset - of revisions to be examined
436 436 # x - argument in tree form
437 437 symbols = {}
438 438
439 439 # symbols which can't be used for a DoS attack for any given input
440 440 # (e.g. those which accept regexes as plain strings shouldn't be included)
441 441 # functions that just return a lot of changesets (like all) don't count here
442 442 safesymbols = set()
443 443
444 444 predicate = registrar.revsetpredicate()
445 445
446 446 @predicate('_destupdate')
447 447 def _destupdate(repo, subset, x):
448 448 # experimental revset for update destination
449 449 args = getargsdict(x, 'limit', 'clean check')
450 450 return subset & baseset([destutil.destupdate(repo, **args)[0]])
451 451
452 452 @predicate('_destmerge')
453 453 def _destmerge(repo, subset, x):
454 454 # experimental revset for merge destination
455 455 sourceset = None
456 456 if x is not None:
457 457 sourceset = getset(repo, fullreposet(repo), x)
458 458 return subset & baseset([destutil.destmerge(repo, sourceset=sourceset)])
459 459
460 460 @predicate('adds(pattern)', safe=True)
461 461 def adds(repo, subset, x):
462 462 """Changesets that add a file matching pattern.
463 463
464 464 The pattern without explicit kind like ``glob:`` is expected to be
465 465 relative to the current directory and match against a file or a
466 466 directory.
467 467 """
468 468 # i18n: "adds" is a keyword
469 469 pat = getstring(x, _("adds requires a pattern"))
470 470 return checkstatus(repo, subset, pat, 1)
471 471
472 472 @predicate('ancestor(*changeset)', safe=True)
473 473 def ancestor(repo, subset, x):
474 474 """A greatest common ancestor of the changesets.
475 475
476 476 Accepts 0 or more changesets.
477 477 Will return empty list when passed no args.
478 478 Greatest common ancestor of a single changeset is that changeset.
479 479 """
480 480 # i18n: "ancestor" is a keyword
481 481 l = getlist(x)
482 482 rl = fullreposet(repo)
483 483 anc = None
484 484
485 485 # (getset(repo, rl, i) for i in l) generates a list of lists
486 486 for revs in (getset(repo, rl, i) for i in l):
487 487 for r in revs:
488 488 if anc is None:
489 489 anc = repo[r]
490 490 else:
491 491 anc = anc.ancestor(repo[r])
492 492
493 493 if anc is not None and anc.rev() in subset:
494 494 return baseset([anc.rev()])
495 495 return baseset()
496 496
497 497 def _ancestors(repo, subset, x, followfirst=False):
498 498 heads = getset(repo, fullreposet(repo), x)
499 499 if not heads:
500 500 return baseset()
501 501 s = _revancestors(repo, heads, followfirst)
502 502 return subset & s
503 503
504 504 @predicate('ancestors(set)', safe=True)
505 505 def ancestors(repo, subset, x):
506 506 """Changesets that are ancestors of a changeset in set.
507 507 """
508 508 return _ancestors(repo, subset, x)
509 509
510 510 @predicate('_firstancestors', safe=True)
511 511 def _firstancestors(repo, subset, x):
512 512 # ``_firstancestors(set)``
513 513 # Like ``ancestors(set)`` but follows only the first parents.
514 514 return _ancestors(repo, subset, x, followfirst=True)
515 515
516 516 def ancestorspec(repo, subset, x, n):
517 517 """``set~n``
518 518 Changesets that are the Nth ancestor (first parents only) of a changeset
519 519 in set.
520 520 """
521 521 try:
522 522 n = int(n[1])
523 523 except (TypeError, ValueError):
524 524 raise error.ParseError(_("~ expects a number"))
525 525 ps = set()
526 526 cl = repo.changelog
527 527 for r in getset(repo, fullreposet(repo), x):
528 528 for i in range(n):
529 529 r = cl.parentrevs(r)[0]
530 530 ps.add(r)
531 531 return subset & ps
532 532
533 533 @predicate('author(string)', safe=True)
534 534 def author(repo, subset, x):
535 535 """Alias for ``user(string)``.
536 536 """
537 537 # i18n: "author" is a keyword
538 538 n = encoding.lower(getstring(x, _("author requires a string")))
539 539 kind, pattern, matcher = _substringmatcher(n)
540 540 return subset.filter(lambda x: matcher(encoding.lower(repo[x].user())),
541 541 condrepr=('<user %r>', n))
542 542
543 543 @predicate('bisect(string)', safe=True)
544 544 def bisect(repo, subset, x):
545 545 """Changesets marked in the specified bisect status:
546 546
547 547 - ``good``, ``bad``, ``skip``: csets explicitly marked as good/bad/skip
548 548 - ``goods``, ``bads`` : csets topologically good/bad
549 549 - ``range`` : csets taking part in the bisection
550 550 - ``pruned`` : csets that are goods, bads or skipped
551 551 - ``untested`` : csets whose fate is yet unknown
552 552 - ``ignored`` : csets ignored due to DAG topology
553 553 - ``current`` : the cset currently being bisected
554 554 """
555 555 # i18n: "bisect" is a keyword
556 556 status = getstring(x, _("bisect requires a string")).lower()
557 557 state = set(hbisect.get(repo, status))
558 558 return subset & state
559 559
560 560 # Backward-compatibility
561 561 # - no help entry so that we do not advertise it any more
562 562 @predicate('bisected', safe=True)
563 563 def bisected(repo, subset, x):
564 564 return bisect(repo, subset, x)
565 565
566 566 @predicate('bookmark([name])', safe=True)
567 567 def bookmark(repo, subset, x):
568 568 """The named bookmark or all bookmarks.
569 569
570 570 If `name` starts with `re:`, the remainder of the name is treated as
571 571 a regular expression. To match a bookmark that actually starts with `re:`,
572 572 use the prefix `literal:`.
573 573 """
574 574 # i18n: "bookmark" is a keyword
575 575 args = getargs(x, 0, 1, _('bookmark takes one or no arguments'))
576 576 if args:
577 577 bm = getstring(args[0],
578 578 # i18n: "bookmark" is a keyword
579 579 _('the argument to bookmark must be a string'))
580 580 kind, pattern, matcher = util.stringmatcher(bm)
581 581 bms = set()
582 582 if kind == 'literal':
583 583 bmrev = repo._bookmarks.get(pattern, None)
584 584 if not bmrev:
585 585 raise error.RepoLookupError(_("bookmark '%s' does not exist")
586 586 % pattern)
587 587 bms.add(repo[bmrev].rev())
588 588 else:
589 589 matchrevs = set()
590 590 for name, bmrev in repo._bookmarks.iteritems():
591 591 if matcher(name):
592 592 matchrevs.add(bmrev)
593 593 if not matchrevs:
594 594 raise error.RepoLookupError(_("no bookmarks exist"
595 595 " that match '%s'") % pattern)
596 596 for bmrev in matchrevs:
597 597 bms.add(repo[bmrev].rev())
598 598 else:
599 599 bms = set([repo[r].rev()
600 600 for r in repo._bookmarks.values()])
601 601 bms -= set([node.nullrev])
602 602 return subset & bms
603 603
604 604 @predicate('branch(string or set)', safe=True)
605 605 def branch(repo, subset, x):
606 606 """
607 607 All changesets belonging to the given branch or the branches of the given
608 608 changesets.
609 609
610 610 If `string` starts with `re:`, the remainder of the name is treated as
611 611 a regular expression. To match a branch that actually starts with `re:`,
612 612 use the prefix `literal:`.
613 613 """
614 614 getbi = repo.revbranchcache().branchinfo
615 615
616 616 try:
617 617 b = getstring(x, '')
618 618 except error.ParseError:
619 619 # not a string, but another revspec, e.g. tip()
620 620 pass
621 621 else:
622 622 kind, pattern, matcher = util.stringmatcher(b)
623 623 if kind == 'literal':
624 624 # note: falls through to the revspec case if no branch with
625 625 # this name exists and pattern kind is not specified explicitly
626 626 if pattern in repo.branchmap():
627 627 return subset.filter(lambda r: matcher(getbi(r)[0]),
628 628 condrepr=('<branch %r>', b))
629 629 if b.startswith('literal:'):
630 630 raise error.RepoLookupError(_("branch '%s' does not exist")
631 631 % pattern)
632 632 else:
633 633 return subset.filter(lambda r: matcher(getbi(r)[0]),
634 634 condrepr=('<branch %r>', b))
635 635
636 636 s = getset(repo, fullreposet(repo), x)
637 637 b = set()
638 638 for r in s:
639 639 b.add(getbi(r)[0])
640 640 c = s.__contains__
641 641 return subset.filter(lambda r: c(r) or getbi(r)[0] in b,
642 642 condrepr=lambda: '<branch %r>' % sorted(b))
643 643
644 644 @predicate('bumped()', safe=True)
645 645 def bumped(repo, subset, x):
646 646 """Mutable changesets marked as successors of public changesets.
647 647
648 648 Only non-public and non-obsolete changesets can be `bumped`.
649 649 """
650 650 # i18n: "bumped" is a keyword
651 651 getargs(x, 0, 0, _("bumped takes no arguments"))
652 652 bumped = obsmod.getrevs(repo, 'bumped')
653 653 return subset & bumped
654 654
655 655 @predicate('bundle()', safe=True)
656 656 def bundle(repo, subset, x):
657 657 """Changesets in the bundle.
658 658
659 659 Bundle must be specified by the -R option."""
660 660
661 661 try:
662 662 bundlerevs = repo.changelog.bundlerevs
663 663 except AttributeError:
664 664 raise error.Abort(_("no bundle provided - specify with -R"))
665 665 return subset & bundlerevs
666 666
667 667 def checkstatus(repo, subset, pat, field):
668 668 hasset = matchmod.patkind(pat) == 'set'
669 669
670 670 mcache = [None]
671 671 def matches(x):
672 672 c = repo[x]
673 673 if not mcache[0] or hasset:
674 674 mcache[0] = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
675 675 m = mcache[0]
676 676 fname = None
677 677 if not m.anypats() and len(m.files()) == 1:
678 678 fname = m.files()[0]
679 679 if fname is not None:
680 680 if fname not in c.files():
681 681 return False
682 682 else:
683 683 for f in c.files():
684 684 if m(f):
685 685 break
686 686 else:
687 687 return False
688 688 files = repo.status(c.p1().node(), c.node())[field]
689 689 if fname is not None:
690 690 if fname in files:
691 691 return True
692 692 else:
693 693 for f in files:
694 694 if m(f):
695 695 return True
696 696
697 697 return subset.filter(matches, condrepr=('<status[%r] %r>', field, pat))
698 698
699 699 def _children(repo, subset, parentset):
700 700 if not parentset:
701 701 return baseset()
702 702 cs = set()
703 703 pr = repo.changelog.parentrevs
704 704 minrev = parentset.min()
705 705 for r in subset:
706 706 if r <= minrev:
707 707 continue
708 708 for p in pr(r):
709 709 if p in parentset:
710 710 cs.add(r)
711 711 return baseset(cs)
712 712
713 713 @predicate('children(set)', safe=True)
714 714 def children(repo, subset, x):
715 715 """Child changesets of changesets in set.
716 716 """
717 717 s = getset(repo, fullreposet(repo), x)
718 718 cs = _children(repo, subset, s)
719 719 return subset & cs
720 720
721 721 @predicate('closed()', safe=True)
722 722 def closed(repo, subset, x):
723 723 """Changeset is closed.
724 724 """
725 725 # i18n: "closed" is a keyword
726 726 getargs(x, 0, 0, _("closed takes no arguments"))
727 727 return subset.filter(lambda r: repo[r].closesbranch(),
728 728 condrepr='<branch closed>')
729 729
730 730 @predicate('contains(pattern)')
731 731 def contains(repo, subset, x):
732 732 """The revision's manifest contains a file matching pattern (but might not
733 733 modify it). See :hg:`help patterns` for information about file patterns.
734 734
735 735 The pattern without explicit kind like ``glob:`` is expected to be
736 736 relative to the current directory and match against a file exactly
737 737 for efficiency.
738 738 """
739 739 # i18n: "contains" is a keyword
740 740 pat = getstring(x, _("contains requires a pattern"))
741 741
742 742 def matches(x):
743 743 if not matchmod.patkind(pat):
744 744 pats = pathutil.canonpath(repo.root, repo.getcwd(), pat)
745 745 if pats in repo[x]:
746 746 return True
747 747 else:
748 748 c = repo[x]
749 749 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
750 750 for f in c.manifest():
751 751 if m(f):
752 752 return True
753 753 return False
754 754
755 755 return subset.filter(matches, condrepr=('<contains %r>', pat))
756 756
757 757 @predicate('converted([id])', safe=True)
758 758 def converted(repo, subset, x):
759 759 """Changesets converted from the given identifier in the old repository if
760 760 present, or all converted changesets if no identifier is specified.
761 761 """
762 762
763 763 # There is exactly no chance of resolving the revision, so do a simple
764 764 # string compare and hope for the best
765 765
766 766 rev = None
767 767 # i18n: "converted" is a keyword
768 768 l = getargs(x, 0, 1, _('converted takes one or no arguments'))
769 769 if l:
770 770 # i18n: "converted" is a keyword
771 771 rev = getstring(l[0], _('converted requires a revision'))
772 772
773 773 def _matchvalue(r):
774 774 source = repo[r].extra().get('convert_revision', None)
775 775 return source is not None and (rev is None or source.startswith(rev))
776 776
777 777 return subset.filter(lambda r: _matchvalue(r),
778 778 condrepr=('<converted %r>', rev))
779 779
780 780 @predicate('date(interval)', safe=True)
781 781 def date(repo, subset, x):
782 782 """Changesets within the interval, see :hg:`help dates`.
783 783 """
784 784 # i18n: "date" is a keyword
785 785 ds = getstring(x, _("date requires a string"))
786 786 dm = util.matchdate(ds)
787 787 return subset.filter(lambda x: dm(repo[x].date()[0]),
788 788 condrepr=('<date %r>', ds))
789 789
790 790 @predicate('desc(string)', safe=True)
791 791 def desc(repo, subset, x):
792 792 """Search commit message for string. The match is case-insensitive.
793 793 """
794 794 # i18n: "desc" is a keyword
795 795 ds = encoding.lower(getstring(x, _("desc requires a string")))
796 796
797 797 def matches(x):
798 798 c = repo[x]
799 799 return ds in encoding.lower(c.description())
800 800
801 801 return subset.filter(matches, condrepr=('<desc %r>', ds))
802 802
803 803 def _descendants(repo, subset, x, followfirst=False):
804 804 roots = getset(repo, fullreposet(repo), x)
805 805 if not roots:
806 806 return baseset()
807 807 s = _revdescendants(repo, roots, followfirst)
808 808
809 809 # Both sets need to be ascending in order to lazily return the union
810 810 # in the correct order.
811 811 base = subset & roots
812 812 desc = subset & s
813 813 result = base + desc
814 814 if subset.isascending():
815 815 result.sort()
816 816 elif subset.isdescending():
817 817 result.sort(reverse=True)
818 818 else:
819 819 result = subset & result
820 820 return result
821 821
822 822 @predicate('descendants(set)', safe=True)
823 823 def descendants(repo, subset, x):
824 824 """Changesets which are descendants of changesets in set.
825 825 """
826 826 return _descendants(repo, subset, x)
827 827
828 828 @predicate('_firstdescendants', safe=True)
829 829 def _firstdescendants(repo, subset, x):
830 830 # ``_firstdescendants(set)``
831 831 # Like ``descendants(set)`` but follows only the first parents.
832 832 return _descendants(repo, subset, x, followfirst=True)
833 833
834 834 @predicate('destination([set])', safe=True)
835 835 def destination(repo, subset, x):
836 836 """Changesets that were created by a graft, transplant or rebase operation,
837 837 with the given revisions specified as the source. Omitting the optional set
838 838 is the same as passing all().
839 839 """
840 840 if x is not None:
841 841 sources = getset(repo, fullreposet(repo), x)
842 842 else:
843 843 sources = fullreposet(repo)
844 844
845 845 dests = set()
846 846
847 847 # subset contains all of the possible destinations that can be returned, so
848 848 # iterate over them and see if their source(s) were provided in the arg set.
849 849 # Even if the immediate src of r is not in the arg set, src's source (or
850 850 # further back) may be. Scanning back further than the immediate src allows
851 851 # transitive transplants and rebases to yield the same results as transitive
852 852 # grafts.
853 853 for r in subset:
854 854 src = _getrevsource(repo, r)
855 855 lineage = None
856 856
857 857 while src is not None:
858 858 if lineage is None:
859 859 lineage = list()
860 860
861 861 lineage.append(r)
862 862
863 863 # The visited lineage is a match if the current source is in the arg
864 864 # set. Since every candidate dest is visited by way of iterating
865 865 # subset, any dests further back in the lineage will be tested by a
866 866 # different iteration over subset. Likewise, if the src was already
867 867 # selected, the current lineage can be selected without going back
868 868 # further.
869 869 if src in sources or src in dests:
870 870 dests.update(lineage)
871 871 break
872 872
873 873 r = src
874 874 src = _getrevsource(repo, r)
875 875
876 876 return subset.filter(dests.__contains__,
877 877 condrepr=lambda: '<destination %r>' % sorted(dests))
878 878
879 879 @predicate('divergent()', safe=True)
880 880 def divergent(repo, subset, x):
881 881 """
882 882 Final successors of changesets with an alternative set of final successors.
883 883 """
884 884 # i18n: "divergent" is a keyword
885 885 getargs(x, 0, 0, _("divergent takes no arguments"))
886 886 divergent = obsmod.getrevs(repo, 'divergent')
887 887 return subset & divergent
888 888
889 889 @predicate('extinct()', safe=True)
890 890 def extinct(repo, subset, x):
891 891 """Obsolete changesets with obsolete descendants only.
892 892 """
893 893 # i18n: "extinct" is a keyword
894 894 getargs(x, 0, 0, _("extinct takes no arguments"))
895 895 extincts = obsmod.getrevs(repo, 'extinct')
896 896 return subset & extincts
897 897
898 898 @predicate('extra(label, [value])', safe=True)
899 899 def extra(repo, subset, x):
900 900 """Changesets with the given label in the extra metadata, with the given
901 901 optional value.
902 902
903 903 If `value` starts with `re:`, the remainder of the value is treated as
904 904 a regular expression. To match a value that actually starts with `re:`,
905 905 use the prefix `literal:`.
906 906 """
907 907 args = getargsdict(x, 'extra', 'label value')
908 908 if 'label' not in args:
909 909 # i18n: "extra" is a keyword
910 910 raise error.ParseError(_('extra takes at least 1 argument'))
911 911 # i18n: "extra" is a keyword
912 912 label = getstring(args['label'], _('first argument to extra must be '
913 913 'a string'))
914 914 value = None
915 915
916 916 if 'value' in args:
917 917 # i18n: "extra" is a keyword
918 918 value = getstring(args['value'], _('second argument to extra must be '
919 919 'a string'))
920 920 kind, value, matcher = util.stringmatcher(value)
921 921
922 922 def _matchvalue(r):
923 923 extra = repo[r].extra()
924 924 return label in extra and (value is None or matcher(extra[label]))
925 925
926 926 return subset.filter(lambda r: _matchvalue(r),
927 927 condrepr=('<extra[%r] %r>', label, value))
928 928
929 929 @predicate('filelog(pattern)', safe=True)
930 930 def filelog(repo, subset, x):
931 931 """Changesets connected to the specified filelog.
932 932
933 933 For performance reasons, visits only revisions mentioned in the file-level
934 934 filelog, rather than filtering through all changesets (much faster, but
935 935 doesn't include deletes or duplicate changes). For a slower, more accurate
936 936 result, use ``file()``.
937 937
938 938 The pattern without explicit kind like ``glob:`` is expected to be
939 939 relative to the current directory and match against a file exactly
940 940 for efficiency.
941 941
942 942 If some linkrev points to revisions filtered by the current repoview, we'll
943 943 work around it to return a non-filtered value.
944 944 """
945 945
946 946 # i18n: "filelog" is a keyword
947 947 pat = getstring(x, _("filelog requires a pattern"))
948 948 s = set()
949 949 cl = repo.changelog
950 950
951 951 if not matchmod.patkind(pat):
952 952 f = pathutil.canonpath(repo.root, repo.getcwd(), pat)
953 953 files = [f]
954 954 else:
955 955 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=repo[None])
956 956 files = (f for f in repo[None] if m(f))
957 957
958 958 for f in files:
959 959 fl = repo.file(f)
960 960 known = {}
961 961 scanpos = 0
962 962 for fr in list(fl):
963 963 fn = fl.node(fr)
964 964 if fn in known:
965 965 s.add(known[fn])
966 966 continue
967 967
968 968 lr = fl.linkrev(fr)
969 969 if lr in cl:
970 970 s.add(lr)
971 971 elif scanpos is not None:
972 972 # lowest matching changeset is filtered, scan further
973 973 # ahead in changelog
974 974 start = max(lr, scanpos) + 1
975 975 scanpos = None
976 976 for r in cl.revs(start):
977 977 # minimize parsing of non-matching entries
978 978 if f in cl.revision(r) and f in cl.readfiles(r):
979 979 try:
980 980 # try to use manifest delta fastpath
981 981 n = repo[r].filenode(f)
982 982 if n not in known:
983 983 if n == fn:
984 984 s.add(r)
985 985 scanpos = r
986 986 break
987 987 else:
988 988 known[n] = r
989 989 except error.ManifestLookupError:
990 990 # deletion in changelog
991 991 continue
992 992
993 993 return subset & s
994 994
995 995 @predicate('first(set, [n])', safe=True)
996 996 def first(repo, subset, x):
997 997 """An alias for limit().
998 998 """
999 999 return limit(repo, subset, x)
1000 1000
1001 1001 def _follow(repo, subset, x, name, followfirst=False):
1002 1002 l = getargs(x, 0, 2, _("%s takes no arguments or a pattern "
1003 1003 "and an optional revset") % name)
1004 1004 c = repo['.']
1005 1005 if l:
1006 1006 x = getstring(l[0], _("%s expected a pattern") % name)
1007 1007 rev = None
1008 1008 if len(l) >= 2:
1009 1009 rev = getset(repo, fullreposet(repo), l[1]).last()
1010 1010 if rev is None:
1011 1011 raise error.RepoLookupError(
1012 1012 _("%s: starting revision set cannot be empty") % name)
1013 1013 c = repo[rev]
1014 1014 matcher = matchmod.match(repo.root, repo.getcwd(), [x],
1015 1015 ctx=repo[rev], default='path')
1016 1016
1017 1017 files = c.manifest().walk(matcher)
1018 1018
1019 1019 s = set()
1020 1020 for fname in files:
1021 1021 fctx = c[fname]
1022 1022 s = s.union(set(c.rev() for c in fctx.ancestors(followfirst)))
1023 1023 # include the revision responsible for the most recent version
1024 1024 s.add(fctx.introrev())
1025 1025 else:
1026 1026 s = _revancestors(repo, baseset([c.rev()]), followfirst)
1027 1027
1028 1028 return subset & s
1029 1029
1030 1030 @predicate('follow([pattern[, startrev]])', safe=True)
1031 1031 def follow(repo, subset, x):
1032 1032 """
1033 1033 An alias for ``::.`` (ancestors of the working directory's first parent).
1034 1034 If pattern is specified, the histories of files matching given
1035 1035 pattern in the revision given by startrev are followed, including copies.
1036 1036 """
1037 1037 return _follow(repo, subset, x, 'follow')
1038 1038
1039 1039 @predicate('_followfirst', safe=True)
1040 1040 def _followfirst(repo, subset, x):
1041 1041 # ``followfirst([pattern[, startrev]])``
1042 1042 # Like ``follow([pattern[, startrev]])`` but follows only the first parent
1043 1043 # of every revisions or files revisions.
1044 1044 return _follow(repo, subset, x, '_followfirst', followfirst=True)
1045 1045
1046 1046 @predicate('all()', safe=True)
1047 1047 def getall(repo, subset, x):
1048 1048 """All changesets, the same as ``0:tip``.
1049 1049 """
1050 1050 # i18n: "all" is a keyword
1051 1051 getargs(x, 0, 0, _("all takes no arguments"))
1052 1052 return subset & spanset(repo) # drop "null" if any
1053 1053
1054 1054 @predicate('grep(regex)')
1055 1055 def grep(repo, subset, x):
1056 1056 """Like ``keyword(string)`` but accepts a regex. Use ``grep(r'...')``
1057 1057 to ensure special escape characters are handled correctly. Unlike
1058 1058 ``keyword(string)``, the match is case-sensitive.
1059 1059 """
1060 1060 try:
1061 1061 # i18n: "grep" is a keyword
1062 1062 gr = re.compile(getstring(x, _("grep requires a string")))
1063 1063 except re.error as e:
1064 1064 raise error.ParseError(_('invalid match pattern: %s') % e)
1065 1065
1066 1066 def matches(x):
1067 1067 c = repo[x]
1068 1068 for e in c.files() + [c.user(), c.description()]:
1069 1069 if gr.search(e):
1070 1070 return True
1071 1071 return False
1072 1072
1073 1073 return subset.filter(matches, condrepr=('<grep %r>', gr.pattern))
1074 1074
1075 1075 @predicate('_matchfiles', safe=True)
1076 1076 def _matchfiles(repo, subset, x):
1077 1077 # _matchfiles takes a revset list of prefixed arguments:
1078 1078 #
1079 1079 # [p:foo, i:bar, x:baz]
1080 1080 #
1081 1081 # builds a match object from them and filters subset. Allowed
1082 1082 # prefixes are 'p:' for regular patterns, 'i:' for include
1083 1083 # patterns and 'x:' for exclude patterns. Use 'r:' prefix to pass
1084 1084 # a revision identifier, or the empty string to reference the
1085 1085 # working directory, from which the match object is
1086 1086 # initialized. Use 'd:' to set the default matching mode, default
1087 1087 # to 'glob'. At most one 'r:' and 'd:' argument can be passed.
1088 1088
1089 1089 l = getargs(x, 1, -1, "_matchfiles requires at least one argument")
1090 1090 pats, inc, exc = [], [], []
1091 1091 rev, default = None, None
1092 1092 for arg in l:
1093 1093 s = getstring(arg, "_matchfiles requires string arguments")
1094 1094 prefix, value = s[:2], s[2:]
1095 1095 if prefix == 'p:':
1096 1096 pats.append(value)
1097 1097 elif prefix == 'i:':
1098 1098 inc.append(value)
1099 1099 elif prefix == 'x:':
1100 1100 exc.append(value)
1101 1101 elif prefix == 'r:':
1102 1102 if rev is not None:
1103 1103 raise error.ParseError('_matchfiles expected at most one '
1104 1104 'revision')
1105 1105 if value != '': # empty means working directory; leave rev as None
1106 1106 rev = value
1107 1107 elif prefix == 'd:':
1108 1108 if default is not None:
1109 1109 raise error.ParseError('_matchfiles expected at most one '
1110 1110 'default mode')
1111 1111 default = value
1112 1112 else:
1113 1113 raise error.ParseError('invalid _matchfiles prefix: %s' % prefix)
1114 1114 if not default:
1115 1115 default = 'glob'
1116 1116
1117 1117 m = matchmod.match(repo.root, repo.getcwd(), pats, include=inc,
1118 1118 exclude=exc, ctx=repo[rev], default=default)
1119 1119
1120 1120 # This directly read the changelog data as creating changectx for all
1121 1121 # revisions is quite expensive.
1122 1122 getfiles = repo.changelog.readfiles
1123 1123 wdirrev = node.wdirrev
1124 1124 def matches(x):
1125 1125 if x == wdirrev:
1126 1126 files = repo[x].files()
1127 1127 else:
1128 1128 files = getfiles(x)
1129 1129 for f in files:
1130 1130 if m(f):
1131 1131 return True
1132 1132 return False
1133 1133
1134 1134 return subset.filter(matches,
1135 1135 condrepr=('<matchfiles patterns=%r, include=%r '
1136 1136 'exclude=%r, default=%r, rev=%r>',
1137 1137 pats, inc, exc, default, rev))
1138 1138
1139 1139 @predicate('file(pattern)', safe=True)
1140 1140 def hasfile(repo, subset, x):
1141 1141 """Changesets affecting files matched by pattern.
1142 1142
1143 1143 For a faster but less accurate result, consider using ``filelog()``
1144 1144 instead.
1145 1145
1146 1146 This predicate uses ``glob:`` as the default kind of pattern.
1147 1147 """
1148 1148 # i18n: "file" is a keyword
1149 1149 pat = getstring(x, _("file requires a pattern"))
1150 1150 return _matchfiles(repo, subset, ('string', 'p:' + pat))
1151 1151
1152 1152 @predicate('head()', safe=True)
1153 1153 def head(repo, subset, x):
1154 1154 """Changeset is a named branch head.
1155 1155 """
1156 1156 # i18n: "head" is a keyword
1157 1157 getargs(x, 0, 0, _("head takes no arguments"))
1158 1158 hs = set()
1159 1159 cl = repo.changelog
1160 1160 for ls in repo.branchmap().itervalues():
1161 1161 hs.update(cl.rev(h) for h in ls)
1162 1162 return subset & baseset(hs)
1163 1163
1164 1164 @predicate('heads(set)', safe=True)
1165 1165 def heads(repo, subset, x):
1166 1166 """Members of set with no children in set.
1167 1167 """
1168 1168 s = getset(repo, subset, x)
1169 1169 ps = parents(repo, subset, x)
1170 1170 return s - ps
1171 1171
1172 1172 @predicate('hidden()', safe=True)
1173 1173 def hidden(repo, subset, x):
1174 1174 """Hidden changesets.
1175 1175 """
1176 1176 # i18n: "hidden" is a keyword
1177 1177 getargs(x, 0, 0, _("hidden takes no arguments"))
1178 1178 hiddenrevs = repoview.filterrevs(repo, 'visible')
1179 1179 return subset & hiddenrevs
1180 1180
1181 1181 @predicate('keyword(string)', safe=True)
1182 1182 def keyword(repo, subset, x):
1183 1183 """Search commit message, user name, and names of changed files for
1184 1184 string. The match is case-insensitive.
1185 1185 """
1186 1186 # i18n: "keyword" is a keyword
1187 1187 kw = encoding.lower(getstring(x, _("keyword requires a string")))
1188 1188
1189 1189 def matches(r):
1190 1190 c = repo[r]
1191 1191 return any(kw in encoding.lower(t)
1192 1192 for t in c.files() + [c.user(), c.description()])
1193 1193
1194 1194 return subset.filter(matches, condrepr=('<keyword %r>', kw))
1195 1195
1196 1196 @predicate('limit(set[, n[, offset]])', safe=True)
1197 1197 def limit(repo, subset, x):
1198 1198 """First n members of set, defaulting to 1, starting from offset.
1199 1199 """
1200 1200 args = getargsdict(x, 'limit', 'set n offset')
1201 1201 if 'set' not in args:
1202 1202 # i18n: "limit" is a keyword
1203 1203 raise error.ParseError(_("limit requires one to three arguments"))
1204 1204 try:
1205 1205 lim, ofs = 1, 0
1206 1206 if 'n' in args:
1207 1207 # i18n: "limit" is a keyword
1208 1208 lim = int(getstring(args['n'], _("limit requires a number")))
1209 1209 if 'offset' in args:
1210 1210 # i18n: "limit" is a keyword
1211 1211 ofs = int(getstring(args['offset'], _("limit requires a number")))
1212 1212 if ofs < 0:
1213 1213 raise error.ParseError(_("negative offset"))
1214 1214 except (TypeError, ValueError):
1215 1215 # i18n: "limit" is a keyword
1216 1216 raise error.ParseError(_("limit expects a number"))
1217 1217 os = getset(repo, fullreposet(repo), args['set'])
1218 1218 result = []
1219 1219 it = iter(os)
1220 1220 for x in xrange(ofs):
1221 1221 y = next(it, None)
1222 1222 if y is None:
1223 1223 break
1224 1224 for x in xrange(lim):
1225 1225 y = next(it, None)
1226 1226 if y is None:
1227 1227 break
1228 1228 elif y in subset:
1229 1229 result.append(y)
1230 1230 return baseset(result, datarepr=('<limit n=%d, offset=%d, %r, %r>',
1231 1231 lim, ofs, subset, os))
1232 1232
1233 1233 @predicate('last(set, [n])', safe=True)
1234 1234 def last(repo, subset, x):
1235 1235 """Last n members of set, defaulting to 1.
1236 1236 """
1237 1237 # i18n: "last" is a keyword
1238 1238 l = getargs(x, 1, 2, _("last requires one or two arguments"))
1239 1239 try:
1240 1240 lim = 1
1241 1241 if len(l) == 2:
1242 1242 # i18n: "last" is a keyword
1243 1243 lim = int(getstring(l[1], _("last requires a number")))
1244 1244 except (TypeError, ValueError):
1245 1245 # i18n: "last" is a keyword
1246 1246 raise error.ParseError(_("last expects a number"))
1247 1247 os = getset(repo, fullreposet(repo), l[0])
1248 1248 os.reverse()
1249 1249 result = []
1250 1250 it = iter(os)
1251 1251 for x in xrange(lim):
1252 1252 y = next(it, None)
1253 1253 if y is None:
1254 1254 break
1255 1255 elif y in subset:
1256 1256 result.append(y)
1257 1257 return baseset(result, datarepr=('<last n=%d, %r, %r>', lim, subset, os))
1258 1258
1259 1259 @predicate('max(set)', safe=True)
1260 1260 def maxrev(repo, subset, x):
1261 1261 """Changeset with highest revision number in set.
1262 1262 """
1263 1263 os = getset(repo, fullreposet(repo), x)
1264 1264 try:
1265 1265 m = os.max()
1266 1266 if m in subset:
1267 1267 return baseset([m], datarepr=('<max %r, %r>', subset, os))
1268 1268 except ValueError:
1269 1269 # os.max() throws a ValueError when the collection is empty.
1270 1270 # Same as python's max().
1271 1271 pass
1272 1272 return baseset(datarepr=('<max %r, %r>', subset, os))
1273 1273
1274 1274 @predicate('merge()', safe=True)
1275 1275 def merge(repo, subset, x):
1276 1276 """Changeset is a merge changeset.
1277 1277 """
1278 1278 # i18n: "merge" is a keyword
1279 1279 getargs(x, 0, 0, _("merge takes no arguments"))
1280 1280 cl = repo.changelog
1281 1281 return subset.filter(lambda r: cl.parentrevs(r)[1] != -1,
1282 1282 condrepr='<merge>')
1283 1283
1284 1284 @predicate('branchpoint()', safe=True)
1285 1285 def branchpoint(repo, subset, x):
1286 1286 """Changesets with more than one child.
1287 1287 """
1288 1288 # i18n: "branchpoint" is a keyword
1289 1289 getargs(x, 0, 0, _("branchpoint takes no arguments"))
1290 1290 cl = repo.changelog
1291 1291 if not subset:
1292 1292 return baseset()
1293 1293 # XXX this should be 'parentset.min()' assuming 'parentset' is a smartset
1294 1294 # (and if it is not, it should.)
1295 1295 baserev = min(subset)
1296 1296 parentscount = [0]*(len(repo) - baserev)
1297 1297 for r in cl.revs(start=baserev + 1):
1298 1298 for p in cl.parentrevs(r):
1299 1299 if p >= baserev:
1300 1300 parentscount[p - baserev] += 1
1301 1301 return subset.filter(lambda r: parentscount[r - baserev] > 1,
1302 1302 condrepr='<branchpoint>')
1303 1303
1304 1304 @predicate('min(set)', safe=True)
1305 1305 def minrev(repo, subset, x):
1306 1306 """Changeset with lowest revision number in set.
1307 1307 """
1308 1308 os = getset(repo, fullreposet(repo), x)
1309 1309 try:
1310 1310 m = os.min()
1311 1311 if m in subset:
1312 1312 return baseset([m], datarepr=('<min %r, %r>', subset, os))
1313 1313 except ValueError:
1314 1314 # os.min() throws a ValueError when the collection is empty.
1315 1315 # Same as python's min().
1316 1316 pass
1317 1317 return baseset(datarepr=('<min %r, %r>', subset, os))
1318 1318
1319 1319 @predicate('modifies(pattern)', safe=True)
1320 1320 def modifies(repo, subset, x):
1321 1321 """Changesets modifying files matched by pattern.
1322 1322
1323 1323 The pattern without explicit kind like ``glob:`` is expected to be
1324 1324 relative to the current directory and match against a file or a
1325 1325 directory.
1326 1326 """
1327 1327 # i18n: "modifies" is a keyword
1328 1328 pat = getstring(x, _("modifies requires a pattern"))
1329 1329 return checkstatus(repo, subset, pat, 0)
1330 1330
1331 1331 @predicate('named(namespace)')
1332 1332 def named(repo, subset, x):
1333 1333 """The changesets in a given namespace.
1334 1334
1335 1335 If `namespace` starts with `re:`, the remainder of the string is treated as
1336 1336 a regular expression. To match a namespace that actually starts with `re:`,
1337 1337 use the prefix `literal:`.
1338 1338 """
1339 1339 # i18n: "named" is a keyword
1340 1340 args = getargs(x, 1, 1, _('named requires a namespace argument'))
1341 1341
1342 1342 ns = getstring(args[0],
1343 1343 # i18n: "named" is a keyword
1344 1344 _('the argument to named must be a string'))
1345 1345 kind, pattern, matcher = util.stringmatcher(ns)
1346 1346 namespaces = set()
1347 1347 if kind == 'literal':
1348 1348 if pattern not in repo.names:
1349 1349 raise error.RepoLookupError(_("namespace '%s' does not exist")
1350 1350 % ns)
1351 1351 namespaces.add(repo.names[pattern])
1352 1352 else:
1353 1353 for name, ns in repo.names.iteritems():
1354 1354 if matcher(name):
1355 1355 namespaces.add(ns)
1356 1356 if not namespaces:
1357 1357 raise error.RepoLookupError(_("no namespace exists"
1358 1358 " that match '%s'") % pattern)
1359 1359
1360 1360 names = set()
1361 1361 for ns in namespaces:
1362 1362 for name in ns.listnames(repo):
1363 1363 if name not in ns.deprecated:
1364 1364 names.update(repo[n].rev() for n in ns.nodes(repo, name))
1365 1365
1366 1366 names -= set([node.nullrev])
1367 1367 return subset & names
1368 1368
1369 1369 @predicate('id(string)', safe=True)
1370 1370 def node_(repo, subset, x):
1371 1371 """Revision non-ambiguously specified by the given hex string prefix.
1372 1372 """
1373 1373 # i18n: "id" is a keyword
1374 1374 l = getargs(x, 1, 1, _("id requires one argument"))
1375 1375 # i18n: "id" is a keyword
1376 1376 n = getstring(l[0], _("id requires a string"))
1377 1377 if len(n) == 40:
1378 1378 try:
1379 1379 rn = repo.changelog.rev(node.bin(n))
1380 1380 except (LookupError, TypeError):
1381 1381 rn = None
1382 1382 else:
1383 1383 rn = None
1384 1384 pm = repo.changelog._partialmatch(n)
1385 1385 if pm is not None:
1386 1386 rn = repo.changelog.rev(pm)
1387 1387
1388 1388 if rn is None:
1389 1389 return baseset()
1390 1390 result = baseset([rn])
1391 1391 return result & subset
1392 1392
1393 1393 @predicate('obsolete()', safe=True)
1394 1394 def obsolete(repo, subset, x):
1395 1395 """Mutable changeset with a newer version."""
1396 1396 # i18n: "obsolete" is a keyword
1397 1397 getargs(x, 0, 0, _("obsolete takes no arguments"))
1398 1398 obsoletes = obsmod.getrevs(repo, 'obsolete')
1399 1399 return subset & obsoletes
1400 1400
1401 1401 @predicate('only(set, [set])', safe=True)
1402 1402 def only(repo, subset, x):
1403 1403 """Changesets that are ancestors of the first set that are not ancestors
1404 1404 of any other head in the repo. If a second set is specified, the result
1405 1405 is ancestors of the first set that are not ancestors of the second set
1406 1406 (i.e. ::<set1> - ::<set2>).
1407 1407 """
1408 1408 cl = repo.changelog
1409 1409 # i18n: "only" is a keyword
1410 1410 args = getargs(x, 1, 2, _('only takes one or two arguments'))
1411 1411 include = getset(repo, fullreposet(repo), args[0])
1412 1412 if len(args) == 1:
1413 1413 if not include:
1414 1414 return baseset()
1415 1415
1416 1416 descendants = set(_revdescendants(repo, include, False))
1417 1417 exclude = [rev for rev in cl.headrevs()
1418 1418 if not rev in descendants and not rev in include]
1419 1419 else:
1420 1420 exclude = getset(repo, fullreposet(repo), args[1])
1421 1421
1422 1422 results = set(cl.findmissingrevs(common=exclude, heads=include))
1423 1423 # XXX we should turn this into a baseset instead of a set, smartset may do
1424 1424 # some optimisations from the fact this is a baseset.
1425 1425 return subset & results
1426 1426
1427 1427 @predicate('origin([set])', safe=True)
1428 1428 def origin(repo, subset, x):
1429 1429 """
1430 1430 Changesets that were specified as a source for the grafts, transplants or
1431 1431 rebases that created the given revisions. Omitting the optional set is the
1432 1432 same as passing all(). If a changeset created by these operations is itself
1433 1433 specified as a source for one of these operations, only the source changeset
1434 1434 for the first operation is selected.
1435 1435 """
1436 1436 if x is not None:
1437 1437 dests = getset(repo, fullreposet(repo), x)
1438 1438 else:
1439 1439 dests = fullreposet(repo)
1440 1440
1441 1441 def _firstsrc(rev):
1442 1442 src = _getrevsource(repo, rev)
1443 1443 if src is None:
1444 1444 return None
1445 1445
1446 1446 while True:
1447 1447 prev = _getrevsource(repo, src)
1448 1448
1449 1449 if prev is None:
1450 1450 return src
1451 1451 src = prev
1452 1452
1453 1453 o = set([_firstsrc(r) for r in dests])
1454 1454 o -= set([None])
1455 1455 # XXX we should turn this into a baseset instead of a set, smartset may do
1456 1456 # some optimisations from the fact this is a baseset.
1457 1457 return subset & o
1458 1458
1459 1459 @predicate('outgoing([path])', safe=True)
1460 1460 def outgoing(repo, subset, x):
1461 1461 """Changesets not found in the specified destination repository, or the
1462 1462 default push location.
1463 1463 """
1464 1464 # Avoid cycles.
1465 1465 from . import (
1466 1466 discovery,
1467 1467 hg,
1468 1468 )
1469 1469 # i18n: "outgoing" is a keyword
1470 1470 l = getargs(x, 0, 1, _("outgoing takes one or no arguments"))
1471 1471 # i18n: "outgoing" is a keyword
1472 1472 dest = l and getstring(l[0], _("outgoing requires a repository path")) or ''
1473 1473 dest = repo.ui.expandpath(dest or 'default-push', dest or 'default')
1474 1474 dest, branches = hg.parseurl(dest)
1475 1475 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1476 1476 if revs:
1477 1477 revs = [repo.lookup(rev) for rev in revs]
1478 1478 other = hg.peer(repo, {}, dest)
1479 1479 repo.ui.pushbuffer()
1480 1480 outgoing = discovery.findcommonoutgoing(repo, other, onlyheads=revs)
1481 1481 repo.ui.popbuffer()
1482 1482 cl = repo.changelog
1483 1483 o = set([cl.rev(r) for r in outgoing.missing])
1484 1484 return subset & o
1485 1485
1486 1486 @predicate('p1([set])', safe=True)
1487 1487 def p1(repo, subset, x):
1488 1488 """First parent of changesets in set, or the working directory.
1489 1489 """
1490 1490 if x is None:
1491 1491 p = repo[x].p1().rev()
1492 1492 if p >= 0:
1493 1493 return subset & baseset([p])
1494 1494 return baseset()
1495 1495
1496 1496 ps = set()
1497 1497 cl = repo.changelog
1498 1498 for r in getset(repo, fullreposet(repo), x):
1499 1499 ps.add(cl.parentrevs(r)[0])
1500 1500 ps -= set([node.nullrev])
1501 1501 # XXX we should turn this into a baseset instead of a set, smartset may do
1502 1502 # some optimisations from the fact this is a baseset.
1503 1503 return subset & ps
1504 1504
1505 1505 @predicate('p2([set])', safe=True)
1506 1506 def p2(repo, subset, x):
1507 1507 """Second parent of changesets in set, or the working directory.
1508 1508 """
1509 1509 if x is None:
1510 1510 ps = repo[x].parents()
1511 1511 try:
1512 1512 p = ps[1].rev()
1513 1513 if p >= 0:
1514 1514 return subset & baseset([p])
1515 1515 return baseset()
1516 1516 except IndexError:
1517 1517 return baseset()
1518 1518
1519 1519 ps = set()
1520 1520 cl = repo.changelog
1521 1521 for r in getset(repo, fullreposet(repo), x):
1522 1522 ps.add(cl.parentrevs(r)[1])
1523 1523 ps -= set([node.nullrev])
1524 1524 # XXX we should turn this into a baseset instead of a set, smartset may do
1525 1525 # some optimisations from the fact this is a baseset.
1526 1526 return subset & ps
1527 1527
1528 1528 @predicate('parents([set])', safe=True)
1529 1529 def parents(repo, subset, x):
1530 1530 """
1531 1531 The set of all parents for all changesets in set, or the working directory.
1532 1532 """
1533 1533 if x is None:
1534 1534 ps = set(p.rev() for p in repo[x].parents())
1535 1535 else:
1536 1536 ps = set()
1537 1537 cl = repo.changelog
1538 1538 up = ps.update
1539 1539 parentrevs = cl.parentrevs
1540 1540 for r in getset(repo, fullreposet(repo), x):
1541 1541 if r == node.wdirrev:
1542 1542 up(p.rev() for p in repo[r].parents())
1543 1543 else:
1544 1544 up(parentrevs(r))
1545 1545 ps -= set([node.nullrev])
1546 1546 return subset & ps
1547 1547
1548 1548 def _phase(repo, subset, target):
1549 1549 """helper to select all rev in phase <target>"""
1550 1550 repo._phasecache.loadphaserevs(repo) # ensure phase's sets are loaded
1551 1551 if repo._phasecache._phasesets:
1552 1552 s = repo._phasecache._phasesets[target] - repo.changelog.filteredrevs
1553 1553 s = baseset(s)
1554 1554 s.sort() # set are non ordered, so we enforce ascending
1555 1555 return subset & s
1556 1556 else:
1557 1557 phase = repo._phasecache.phase
1558 1558 condition = lambda r: phase(repo, r) == target
1559 1559 return subset.filter(condition, condrepr=('<phase %r>', target),
1560 1560 cache=False)
1561 1561
1562 1562 @predicate('draft()', safe=True)
1563 1563 def draft(repo, subset, x):
1564 1564 """Changeset in draft phase."""
1565 1565 # i18n: "draft" is a keyword
1566 1566 getargs(x, 0, 0, _("draft takes no arguments"))
1567 1567 target = phases.draft
1568 1568 return _phase(repo, subset, target)
1569 1569
1570 1570 @predicate('secret()', safe=True)
1571 1571 def secret(repo, subset, x):
1572 1572 """Changeset in secret phase."""
1573 1573 # i18n: "secret" is a keyword
1574 1574 getargs(x, 0, 0, _("secret takes no arguments"))
1575 1575 target = phases.secret
1576 1576 return _phase(repo, subset, target)
1577 1577
1578 1578 def parentspec(repo, subset, x, n):
1579 1579 """``set^0``
1580 1580 The set.
1581 1581 ``set^1`` (or ``set^``), ``set^2``
1582 1582 First or second parent, respectively, of all changesets in set.
1583 1583 """
1584 1584 try:
1585 1585 n = int(n[1])
1586 1586 if n not in (0, 1, 2):
1587 1587 raise ValueError
1588 1588 except (TypeError, ValueError):
1589 1589 raise error.ParseError(_("^ expects a number 0, 1, or 2"))
1590 1590 ps = set()
1591 1591 cl = repo.changelog
1592 1592 for r in getset(repo, fullreposet(repo), x):
1593 1593 if n == 0:
1594 1594 ps.add(r)
1595 1595 elif n == 1:
1596 1596 ps.add(cl.parentrevs(r)[0])
1597 1597 elif n == 2:
1598 1598 parents = cl.parentrevs(r)
1599 1599 if len(parents) > 1:
1600 1600 ps.add(parents[1])
1601 1601 return subset & ps
1602 1602
1603 1603 @predicate('present(set)', safe=True)
1604 1604 def present(repo, subset, x):
1605 1605 """An empty set, if any revision in set isn't found; otherwise,
1606 1606 all revisions in set.
1607 1607
1608 1608 If any of specified revisions is not present in the local repository,
1609 1609 the query is normally aborted. But this predicate allows the query
1610 1610 to continue even in such cases.
1611 1611 """
1612 1612 try:
1613 1613 return getset(repo, subset, x)
1614 1614 except error.RepoLookupError:
1615 1615 return baseset()
1616 1616
1617 1617 # for internal use
1618 1618 @predicate('_notpublic', safe=True)
1619 1619 def _notpublic(repo, subset, x):
1620 1620 getargs(x, 0, 0, "_notpublic takes no arguments")
1621 1621 repo._phasecache.loadphaserevs(repo) # ensure phase's sets are loaded
1622 1622 if repo._phasecache._phasesets:
1623 1623 s = set()
1624 1624 for u in repo._phasecache._phasesets[1:]:
1625 1625 s.update(u)
1626 1626 s = baseset(s - repo.changelog.filteredrevs)
1627 1627 s.sort()
1628 1628 return subset & s
1629 1629 else:
1630 1630 phase = repo._phasecache.phase
1631 1631 target = phases.public
1632 1632 condition = lambda r: phase(repo, r) != target
1633 1633 return subset.filter(condition, condrepr=('<phase %r>', target),
1634 1634 cache=False)
1635 1635
1636 1636 @predicate('public()', safe=True)
1637 1637 def public(repo, subset, x):
1638 1638 """Changeset in public phase."""
1639 1639 # i18n: "public" is a keyword
1640 1640 getargs(x, 0, 0, _("public takes no arguments"))
1641 1641 phase = repo._phasecache.phase
1642 1642 target = phases.public
1643 1643 condition = lambda r: phase(repo, r) == target
1644 1644 return subset.filter(condition, condrepr=('<phase %r>', target),
1645 1645 cache=False)
1646 1646
1647 1647 @predicate('remote([id [,path]])', safe=True)
1648 1648 def remote(repo, subset, x):
1649 1649 """Local revision that corresponds to the given identifier in a
1650 1650 remote repository, if present. Here, the '.' identifier is a
1651 1651 synonym for the current local branch.
1652 1652 """
1653 1653
1654 1654 from . import hg # avoid start-up nasties
1655 1655 # i18n: "remote" is a keyword
1656 1656 l = getargs(x, 0, 2, _("remote takes zero, one, or two arguments"))
1657 1657
1658 1658 q = '.'
1659 1659 if len(l) > 0:
1660 1660 # i18n: "remote" is a keyword
1661 1661 q = getstring(l[0], _("remote requires a string id"))
1662 1662 if q == '.':
1663 1663 q = repo['.'].branch()
1664 1664
1665 1665 dest = ''
1666 1666 if len(l) > 1:
1667 1667 # i18n: "remote" is a keyword
1668 1668 dest = getstring(l[1], _("remote requires a repository path"))
1669 1669 dest = repo.ui.expandpath(dest or 'default')
1670 1670 dest, branches = hg.parseurl(dest)
1671 1671 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1672 1672 if revs:
1673 1673 revs = [repo.lookup(rev) for rev in revs]
1674 1674 other = hg.peer(repo, {}, dest)
1675 1675 n = other.lookup(q)
1676 1676 if n in repo:
1677 1677 r = repo[n].rev()
1678 1678 if r in subset:
1679 1679 return baseset([r])
1680 1680 return baseset()
1681 1681
1682 1682 @predicate('removes(pattern)', safe=True)
1683 1683 def removes(repo, subset, x):
1684 1684 """Changesets which remove files matching pattern.
1685 1685
1686 1686 The pattern without explicit kind like ``glob:`` is expected to be
1687 1687 relative to the current directory and match against a file or a
1688 1688 directory.
1689 1689 """
1690 1690 # i18n: "removes" is a keyword
1691 1691 pat = getstring(x, _("removes requires a pattern"))
1692 1692 return checkstatus(repo, subset, pat, 2)
1693 1693
1694 1694 @predicate('rev(number)', safe=True)
1695 1695 def rev(repo, subset, x):
1696 1696 """Revision with the given numeric identifier.
1697 1697 """
1698 1698 # i18n: "rev" is a keyword
1699 1699 l = getargs(x, 1, 1, _("rev requires one argument"))
1700 1700 try:
1701 1701 # i18n: "rev" is a keyword
1702 1702 l = int(getstring(l[0], _("rev requires a number")))
1703 1703 except (TypeError, ValueError):
1704 1704 # i18n: "rev" is a keyword
1705 1705 raise error.ParseError(_("rev expects a number"))
1706 1706 if l not in repo.changelog and l != node.nullrev:
1707 1707 return baseset()
1708 1708 return subset & baseset([l])
1709 1709
1710 1710 @predicate('matching(revision [, field])', safe=True)
1711 1711 def matching(repo, subset, x):
1712 1712 """Changesets in which a given set of fields match the set of fields in the
1713 1713 selected revision or set.
1714 1714
1715 1715 To match more than one field pass the list of fields to match separated
1716 1716 by spaces (e.g. ``author description``).
1717 1717
1718 1718 Valid fields are most regular revision fields and some special fields.
1719 1719
1720 1720 Regular revision fields are ``description``, ``author``, ``branch``,
1721 1721 ``date``, ``files``, ``phase``, ``parents``, ``substate``, ``user``
1722 1722 and ``diff``.
1723 1723 Note that ``author`` and ``user`` are synonyms. ``diff`` refers to the
1724 1724 contents of the revision. Two revisions matching their ``diff`` will
1725 1725 also match their ``files``.
1726 1726
1727 1727 Special fields are ``summary`` and ``metadata``:
1728 1728 ``summary`` matches the first line of the description.
1729 1729 ``metadata`` is equivalent to matching ``description user date``
1730 1730 (i.e. it matches the main metadata fields).
1731 1731
1732 1732 ``metadata`` is the default field which is used when no fields are
1733 1733 specified. You can match more than one field at a time.
1734 1734 """
1735 1735 # i18n: "matching" is a keyword
1736 1736 l = getargs(x, 1, 2, _("matching takes 1 or 2 arguments"))
1737 1737
1738 1738 revs = getset(repo, fullreposet(repo), l[0])
1739 1739
1740 1740 fieldlist = ['metadata']
1741 1741 if len(l) > 1:
1742 1742 fieldlist = getstring(l[1],
1743 1743 # i18n: "matching" is a keyword
1744 1744 _("matching requires a string "
1745 1745 "as its second argument")).split()
1746 1746
1747 1747 # Make sure that there are no repeated fields,
1748 1748 # expand the 'special' 'metadata' field type
1749 1749 # and check the 'files' whenever we check the 'diff'
1750 1750 fields = []
1751 1751 for field in fieldlist:
1752 1752 if field == 'metadata':
1753 1753 fields += ['user', 'description', 'date']
1754 1754 elif field == 'diff':
1755 1755 # a revision matching the diff must also match the files
1756 1756 # since matching the diff is very costly, make sure to
1757 1757 # also match the files first
1758 1758 fields += ['files', 'diff']
1759 1759 else:
1760 1760 if field == 'author':
1761 1761 field = 'user'
1762 1762 fields.append(field)
1763 1763 fields = set(fields)
1764 1764 if 'summary' in fields and 'description' in fields:
1765 1765 # If a revision matches its description it also matches its summary
1766 1766 fields.discard('summary')
1767 1767
1768 1768 # We may want to match more than one field
1769 1769 # Not all fields take the same amount of time to be matched
1770 1770 # Sort the selected fields in order of increasing matching cost
1771 1771 fieldorder = ['phase', 'parents', 'user', 'date', 'branch', 'summary',
1772 1772 'files', 'description', 'substate', 'diff']
1773 1773 def fieldkeyfunc(f):
1774 1774 try:
1775 1775 return fieldorder.index(f)
1776 1776 except ValueError:
1777 1777 # assume an unknown field is very costly
1778 1778 return len(fieldorder)
1779 1779 fields = list(fields)
1780 1780 fields.sort(key=fieldkeyfunc)
1781 1781
1782 1782 # Each field will be matched with its own "getfield" function
1783 1783 # which will be added to the getfieldfuncs array of functions
1784 1784 getfieldfuncs = []
1785 1785 _funcs = {
1786 1786 'user': lambda r: repo[r].user(),
1787 1787 'branch': lambda r: repo[r].branch(),
1788 1788 'date': lambda r: repo[r].date(),
1789 1789 'description': lambda r: repo[r].description(),
1790 1790 'files': lambda r: repo[r].files(),
1791 1791 'parents': lambda r: repo[r].parents(),
1792 1792 'phase': lambda r: repo[r].phase(),
1793 1793 'substate': lambda r: repo[r].substate,
1794 1794 'summary': lambda r: repo[r].description().splitlines()[0],
1795 1795 'diff': lambda r: list(repo[r].diff(git=True),)
1796 1796 }
1797 1797 for info in fields:
1798 1798 getfield = _funcs.get(info, None)
1799 1799 if getfield is None:
1800 1800 raise error.ParseError(
1801 1801 # i18n: "matching" is a keyword
1802 1802 _("unexpected field name passed to matching: %s") % info)
1803 1803 getfieldfuncs.append(getfield)
1804 1804 # convert the getfield array of functions into a "getinfo" function
1805 1805 # which returns an array of field values (or a single value if there
1806 1806 # is only one field to match)
1807 1807 getinfo = lambda r: [f(r) for f in getfieldfuncs]
1808 1808
1809 1809 def matches(x):
1810 1810 for rev in revs:
1811 1811 target = getinfo(rev)
1812 1812 match = True
1813 1813 for n, f in enumerate(getfieldfuncs):
1814 1814 if target[n] != f(x):
1815 1815 match = False
1816 1816 if match:
1817 1817 return True
1818 1818 return False
1819 1819
1820 1820 return subset.filter(matches, condrepr=('<matching%r %r>', fields, revs))
1821 1821
1822 1822 @predicate('reverse(set)', safe=True)
1823 1823 def reverse(repo, subset, x):
1824 1824 """Reverse order of set.
1825 1825 """
1826 1826 l = getset(repo, subset, x)
1827 1827 l.reverse()
1828 1828 return l
1829 1829
1830 1830 @predicate('roots(set)', safe=True)
1831 1831 def roots(repo, subset, x):
1832 1832 """Changesets in set with no parent changeset in set.
1833 1833 """
1834 1834 s = getset(repo, fullreposet(repo), x)
1835 1835 parents = repo.changelog.parentrevs
1836 1836 def filter(r):
1837 1837 for p in parents(r):
1838 1838 if 0 <= p and p in s:
1839 1839 return False
1840 1840 return True
1841 1841 return subset & s.filter(filter, condrepr='<roots>')
1842 1842
1843 1843 _sortkeyfuncs = {
1844 1844 'rev': lambda c: c.rev(),
1845 1845 'branch': lambda c: c.branch(),
1846 1846 'desc': lambda c: c.description(),
1847 1847 'user': lambda c: c.user(),
1848 1848 'author': lambda c: c.user(),
1849 1849 'date': lambda c: c.date()[0],
1850 1850 }
1851 1851
1852 1852 def _getsortargs(x):
1853 1853 """Parse sort options into (set, [(key, reverse)], opts)"""
1854 1854 args = getargsdict(x, 'sort', 'set keys topo.firstbranch')
1855 1855 if 'set' not in args:
1856 1856 # i18n: "sort" is a keyword
1857 1857 raise error.ParseError(_('sort requires one or two arguments'))
1858 1858 keys = "rev"
1859 1859 if 'keys' in args:
1860 1860 # i18n: "sort" is a keyword
1861 1861 keys = getstring(args['keys'], _("sort spec must be a string"))
1862 1862
1863 1863 keyflags = []
1864 1864 for k in keys.split():
1865 1865 fk = k
1866 1866 reverse = (k[0] == '-')
1867 1867 if reverse:
1868 1868 k = k[1:]
1869 1869 if k not in _sortkeyfuncs and k != 'topo':
1870 1870 raise error.ParseError(_("unknown sort key %r") % fk)
1871 1871 keyflags.append((k, reverse))
1872 1872
1873 1873 if len(keyflags) > 1 and any(k == 'topo' for k, reverse in keyflags):
1874 1874 # i18n: "topo" is a keyword
1875 1875 raise error.ParseError(_('topo sort order cannot be combined '
1876 1876 'with other sort keys'))
1877 1877
1878 1878 opts = {}
1879 1879 if 'topo.firstbranch' in args:
1880 1880 if any(k == 'topo' for k, reverse in keyflags):
1881 1881 opts['topo.firstbranch'] = args['topo.firstbranch']
1882 1882 else:
1883 1883 # i18n: "topo" and "topo.firstbranch" are keywords
1884 1884 raise error.ParseError(_('topo.firstbranch can only be used '
1885 1885 'when using the topo sort key'))
1886 1886
1887 1887 return args['set'], keyflags, opts
1888 1888
1889 1889 @predicate('sort(set[, [-]key... [, ...]])', safe=True)
1890 1890 def sort(repo, subset, x):
1891 1891 """Sort set by keys. The default sort order is ascending, specify a key
1892 1892 as ``-key`` to sort in descending order.
1893 1893
1894 1894 The keys can be:
1895 1895
1896 1896 - ``rev`` for the revision number,
1897 1897 - ``branch`` for the branch name,
1898 1898 - ``desc`` for the commit message (description),
1899 1899 - ``user`` for user name (``author`` can be used as an alias),
1900 1900 - ``date`` for the commit date
1901 1901 - ``topo`` for a reverse topographical sort
1902 1902
1903 1903 The ``topo`` sort order cannot be combined with other sort keys. This sort
1904 1904 takes one optional argument, ``topo.firstbranch``, which takes a revset that
1905 1905 specifies what topographical branches to prioritize in the sort.
1906 1906
1907 1907 """
1908 1908 s, keyflags, opts = _getsortargs(x)
1909 1909 revs = getset(repo, subset, s)
1910 1910
1911 1911 if not keyflags:
1912 1912 return revs
1913 1913 if len(keyflags) == 1 and keyflags[0][0] == "rev":
1914 1914 revs.sort(reverse=keyflags[0][1])
1915 1915 return revs
1916 1916 elif keyflags[0][0] == "topo":
1917 1917 firstbranch = ()
1918 1918 if 'topo.firstbranch' in opts:
1919 1919 firstbranch = getset(repo, subset, opts['topo.firstbranch'])
1920 1920 revs = baseset(_toposort(revs, repo.changelog.parentrevs, firstbranch),
1921 1921 istopo=True)
1922 1922 if keyflags[0][1]:
1923 1923 revs.reverse()
1924 1924 return revs
1925 1925
1926 1926 # sort() is guaranteed to be stable
1927 1927 ctxs = [repo[r] for r in revs]
1928 1928 for k, reverse in reversed(keyflags):
1929 1929 ctxs.sort(key=_sortkeyfuncs[k], reverse=reverse)
1930 1930 return baseset([c.rev() for c in ctxs])
1931 1931
1932 1932 def _toposort(revs, parentsfunc, firstbranch=()):
1933 1933 """Yield revisions from heads to roots one (topo) branch at a time.
1934 1934
1935 1935 This function aims to be used by a graph generator that wishes to minimize
1936 1936 the number of parallel branches and their interleaving.
1937 1937
1938 1938 Example iteration order (numbers show the "true" order in a changelog):
1939 1939
1940 1940 o 4
1941 1941 |
1942 1942 o 1
1943 1943 |
1944 1944 | o 3
1945 1945 | |
1946 1946 | o 2
1947 1947 |/
1948 1948 o 0
1949 1949
1950 1950 Note that the ancestors of merges are understood by the current
1951 1951 algorithm to be on the same branch. This means no reordering will
1952 1952 occur behind a merge.
1953 1953 """
1954 1954
1955 1955 ### Quick summary of the algorithm
1956 1956 #
1957 1957 # This function is based around a "retention" principle. We keep revisions
1958 1958 # in memory until we are ready to emit a whole branch that immediately
1959 1959 # "merges" into an existing one. This reduces the number of parallel
1960 1960 # branches with interleaved revisions.
1961 1961 #
1962 1962 # During iteration revs are split into two groups:
1963 1963 # A) revision already emitted
1964 1964 # B) revision in "retention". They are stored as different subgroups.
1965 1965 #
1966 1966 # for each REV, we do the following logic:
1967 1967 #
1968 1968 # 1) if REV is a parent of (A), we will emit it. If there is a
1969 1969 # retention group ((B) above) that is blocked on REV being
1970 1970 # available, we emit all the revisions out of that retention
1971 1971 # group first.
1972 1972 #
1973 1973 # 2) else, we'll search for a subgroup in (B) awaiting for REV to be
1974 1974 # available, if such subgroup exist, we add REV to it and the subgroup is
1975 1975 # now awaiting for REV.parents() to be available.
1976 1976 #
1977 1977 # 3) finally if no such group existed in (B), we create a new subgroup.
1978 1978 #
1979 1979 #
1980 1980 # To bootstrap the algorithm, we emit the tipmost revision (which
1981 1981 # puts it in group (A) from above).
1982 1982
1983 1983 revs.sort(reverse=True)
1984 1984
1985 1985 # Set of parents of revision that have been emitted. They can be considered
1986 1986 # unblocked as the graph generator is already aware of them so there is no
1987 1987 # need to delay the revisions that reference them.
1988 1988 #
1989 1989 # If someone wants to prioritize a branch over the others, pre-filling this
1990 1990 # set will force all other branches to wait until this branch is ready to be
1991 1991 # emitted.
1992 1992 unblocked = set(firstbranch)
1993 1993
1994 1994 # list of groups waiting to be displayed, each group is defined by:
1995 1995 #
1996 1996 # (revs: lists of revs waiting to be displayed,
1997 1997 # blocked: set of that cannot be displayed before those in 'revs')
1998 1998 #
1999 1999 # The second value ('blocked') correspond to parents of any revision in the
2000 2000 # group ('revs') that is not itself contained in the group. The main idea
2001 2001 # of this algorithm is to delay as much as possible the emission of any
2002 2002 # revision. This means waiting for the moment we are about to display
2003 2003 # these parents to display the revs in a group.
2004 2004 #
2005 2005 # This first implementation is smart until it encounters a merge: it will
2006 2006 # emit revs as soon as any parent is about to be emitted and can grow an
2007 2007 # arbitrary number of revs in 'blocked'. In practice this mean we properly
2008 2008 # retains new branches but gives up on any special ordering for ancestors
2009 2009 # of merges. The implementation can be improved to handle this better.
2010 2010 #
2011 2011 # The first subgroup is special. It corresponds to all the revision that
2012 2012 # were already emitted. The 'revs' lists is expected to be empty and the
2013 2013 # 'blocked' set contains the parents revisions of already emitted revision.
2014 2014 #
2015 2015 # You could pre-seed the <parents> set of groups[0] to a specific
2016 2016 # changesets to select what the first emitted branch should be.
2017 2017 groups = [([], unblocked)]
2018 2018 pendingheap = []
2019 2019 pendingset = set()
2020 2020
2021 2021 heapq.heapify(pendingheap)
2022 2022 heappop = heapq.heappop
2023 2023 heappush = heapq.heappush
2024 2024 for currentrev in revs:
2025 2025 # Heap works with smallest element, we want highest so we invert
2026 2026 if currentrev not in pendingset:
2027 2027 heappush(pendingheap, -currentrev)
2028 2028 pendingset.add(currentrev)
2029 2029 # iterates on pending rev until after the current rev have been
2030 2030 # processed.
2031 2031 rev = None
2032 2032 while rev != currentrev:
2033 2033 rev = -heappop(pendingheap)
2034 2034 pendingset.remove(rev)
2035 2035
2036 2036 # Seek for a subgroup blocked, waiting for the current revision.
2037 2037 matching = [i for i, g in enumerate(groups) if rev in g[1]]
2038 2038
2039 2039 if matching:
2040 2040 # The main idea is to gather together all sets that are blocked
2041 2041 # on the same revision.
2042 2042 #
2043 2043 # Groups are merged when a common blocking ancestor is
2044 2044 # observed. For example, given two groups:
2045 2045 #
2046 2046 # revs [5, 4] waiting for 1
2047 2047 # revs [3, 2] waiting for 1
2048 2048 #
2049 2049 # These two groups will be merged when we process
2050 2050 # 1. In theory, we could have merged the groups when
2051 2051 # we added 2 to the group it is now in (we could have
2052 2052 # noticed the groups were both blocked on 1 then), but
2053 2053 # the way it works now makes the algorithm simpler.
2054 2054 #
2055 2055 # We also always keep the oldest subgroup first. We can
2056 2056 # probably improve the behavior by having the longest set
2057 2057 # first. That way, graph algorithms could minimise the length
2058 2058 # of parallel lines their drawing. This is currently not done.
2059 2059 targetidx = matching.pop(0)
2060 2060 trevs, tparents = groups[targetidx]
2061 2061 for i in matching:
2062 2062 gr = groups[i]
2063 2063 trevs.extend(gr[0])
2064 2064 tparents |= gr[1]
2065 2065 # delete all merged subgroups (except the one we kept)
2066 2066 # (starting from the last subgroup for performance and
2067 2067 # sanity reasons)
2068 2068 for i in reversed(matching):
2069 2069 del groups[i]
2070 2070 else:
2071 2071 # This is a new head. We create a new subgroup for it.
2072 2072 targetidx = len(groups)
2073 2073 groups.append(([], set([rev])))
2074 2074
2075 2075 gr = groups[targetidx]
2076 2076
2077 2077 # We now add the current nodes to this subgroups. This is done
2078 2078 # after the subgroup merging because all elements from a subgroup
2079 2079 # that relied on this rev must precede it.
2080 2080 #
2081 2081 # we also update the <parents> set to include the parents of the
2082 2082 # new nodes.
2083 2083 if rev == currentrev: # only display stuff in rev
2084 2084 gr[0].append(rev)
2085 2085 gr[1].remove(rev)
2086 2086 parents = [p for p in parentsfunc(rev) if p > node.nullrev]
2087 2087 gr[1].update(parents)
2088 2088 for p in parents:
2089 2089 if p not in pendingset:
2090 2090 pendingset.add(p)
2091 2091 heappush(pendingheap, -p)
2092 2092
2093 2093 # Look for a subgroup to display
2094 2094 #
2095 2095 # When unblocked is empty (if clause), we were not waiting for any
2096 2096 # revisions during the first iteration (if no priority was given) or
2097 2097 # if we emitted a whole disconnected set of the graph (reached a
2098 2098 # root). In that case we arbitrarily take the oldest known
2099 2099 # subgroup. The heuristic could probably be better.
2100 2100 #
2101 2101 # Otherwise (elif clause) if the subgroup is blocked on
2102 2102 # a revision we just emitted, we can safely emit it as
2103 2103 # well.
2104 2104 if not unblocked:
2105 2105 if len(groups) > 1: # display other subset
2106 2106 targetidx = 1
2107 2107 gr = groups[1]
2108 2108 elif not gr[1] & unblocked:
2109 2109 gr = None
2110 2110
2111 2111 if gr is not None:
2112 2112 # update the set of awaited revisions with the one from the
2113 2113 # subgroup
2114 2114 unblocked |= gr[1]
2115 2115 # output all revisions in the subgroup
2116 2116 for r in gr[0]:
2117 2117 yield r
2118 2118 # delete the subgroup that you just output
2119 2119 # unless it is groups[0] in which case you just empty it.
2120 2120 if targetidx:
2121 2121 del groups[targetidx]
2122 2122 else:
2123 2123 gr[0][:] = []
2124 2124 # Check if we have some subgroup waiting for revisions we are not going to
2125 2125 # iterate over
2126 2126 for g in groups:
2127 2127 for r in g[0]:
2128 2128 yield r
2129 2129
2130 2130 @predicate('subrepo([pattern])')
2131 2131 def subrepo(repo, subset, x):
2132 2132 """Changesets that add, modify or remove the given subrepo. If no subrepo
2133 2133 pattern is named, any subrepo changes are returned.
2134 2134 """
2135 2135 # i18n: "subrepo" is a keyword
2136 2136 args = getargs(x, 0, 1, _('subrepo takes at most one argument'))
2137 2137 pat = None
2138 2138 if len(args) != 0:
2139 2139 pat = getstring(args[0], _("subrepo requires a pattern"))
2140 2140
2141 2141 m = matchmod.exact(repo.root, repo.root, ['.hgsubstate'])
2142 2142
2143 2143 def submatches(names):
2144 2144 k, p, m = util.stringmatcher(pat)
2145 2145 for name in names:
2146 2146 if m(name):
2147 2147 yield name
2148 2148
2149 2149 def matches(x):
2150 2150 c = repo[x]
2151 2151 s = repo.status(c.p1().node(), c.node(), match=m)
2152 2152
2153 2153 if pat is None:
2154 2154 return s.added or s.modified or s.removed
2155 2155
2156 2156 if s.added:
2157 2157 return any(submatches(c.substate.keys()))
2158 2158
2159 2159 if s.modified:
2160 2160 subs = set(c.p1().substate.keys())
2161 2161 subs.update(c.substate.keys())
2162 2162
2163 2163 for path in submatches(subs):
2164 2164 if c.p1().substate.get(path) != c.substate.get(path):
2165 2165 return True
2166 2166
2167 2167 if s.removed:
2168 2168 return any(submatches(c.p1().substate.keys()))
2169 2169
2170 2170 return False
2171 2171
2172 2172 return subset.filter(matches, condrepr=('<subrepo %r>', pat))
2173 2173
2174 2174 def _substringmatcher(pattern):
2175 2175 kind, pattern, matcher = util.stringmatcher(pattern)
2176 2176 if kind == 'literal':
2177 2177 matcher = lambda s: pattern in s
2178 2178 return kind, pattern, matcher
2179 2179
2180 2180 @predicate('tag([name])', safe=True)
2181 2181 def tag(repo, subset, x):
2182 2182 """The specified tag by name, or all tagged revisions if no name is given.
2183 2183
2184 2184 If `name` starts with `re:`, the remainder of the name is treated as
2185 2185 a regular expression. To match a tag that actually starts with `re:`,
2186 2186 use the prefix `literal:`.
2187 2187 """
2188 2188 # i18n: "tag" is a keyword
2189 2189 args = getargs(x, 0, 1, _("tag takes one or no arguments"))
2190 2190 cl = repo.changelog
2191 2191 if args:
2192 2192 pattern = getstring(args[0],
2193 2193 # i18n: "tag" is a keyword
2194 2194 _('the argument to tag must be a string'))
2195 2195 kind, pattern, matcher = util.stringmatcher(pattern)
2196 2196 if kind == 'literal':
2197 2197 # avoid resolving all tags
2198 2198 tn = repo._tagscache.tags.get(pattern, None)
2199 2199 if tn is None:
2200 2200 raise error.RepoLookupError(_("tag '%s' does not exist")
2201 2201 % pattern)
2202 2202 s = set([repo[tn].rev()])
2203 2203 else:
2204 2204 s = set([cl.rev(n) for t, n in repo.tagslist() if matcher(t)])
2205 2205 else:
2206 2206 s = set([cl.rev(n) for t, n in repo.tagslist() if t != 'tip'])
2207 2207 return subset & s
2208 2208
2209 2209 @predicate('tagged', safe=True)
2210 2210 def tagged(repo, subset, x):
2211 2211 return tag(repo, subset, x)
2212 2212
2213 2213 @predicate('unstable()', safe=True)
2214 2214 def unstable(repo, subset, x):
2215 2215 """Non-obsolete changesets with obsolete ancestors.
2216 2216 """
2217 2217 # i18n: "unstable" is a keyword
2218 2218 getargs(x, 0, 0, _("unstable takes no arguments"))
2219 2219 unstables = obsmod.getrevs(repo, 'unstable')
2220 2220 return subset & unstables
2221 2221
2222 2222
2223 2223 @predicate('user(string)', safe=True)
2224 2224 def user(repo, subset, x):
2225 2225 """User name contains string. The match is case-insensitive.
2226 2226
2227 2227 If `string` starts with `re:`, the remainder of the string is treated as
2228 2228 a regular expression. To match a user that actually contains `re:`, use
2229 2229 the prefix `literal:`.
2230 2230 """
2231 2231 return author(repo, subset, x)
2232 2232
2233 2233 # experimental
2234 2234 @predicate('wdir', safe=True)
2235 2235 def wdir(repo, subset, x):
2236 2236 # i18n: "wdir" is a keyword
2237 2237 getargs(x, 0, 0, _("wdir takes no arguments"))
2238 2238 if node.wdirrev in subset or isinstance(subset, fullreposet):
2239 2239 return baseset([node.wdirrev])
2240 2240 return baseset()
2241 2241
2242 2242 # for internal use
2243 2243 @predicate('_list', safe=True)
2244 2244 def _list(repo, subset, x):
2245 2245 s = getstring(x, "internal error")
2246 2246 if not s:
2247 2247 return baseset()
2248 2248 # remove duplicates here. it's difficult for caller to deduplicate sets
2249 2249 # because different symbols can point to the same rev.
2250 2250 cl = repo.changelog
2251 2251 ls = []
2252 2252 seen = set()
2253 2253 for t in s.split('\0'):
2254 2254 try:
2255 2255 # fast path for integer revision
2256 2256 r = int(t)
2257 2257 if str(r) != t or r not in cl:
2258 2258 raise ValueError
2259 2259 revs = [r]
2260 2260 except ValueError:
2261 2261 revs = stringset(repo, subset, t)
2262 2262
2263 2263 for r in revs:
2264 2264 if r in seen:
2265 2265 continue
2266 2266 if (r in subset
2267 2267 or r == node.nullrev and isinstance(subset, fullreposet)):
2268 2268 ls.append(r)
2269 2269 seen.add(r)
2270 2270 return baseset(ls)
2271 2271
2272 2272 # for internal use
2273 2273 @predicate('_intlist', safe=True)
2274 2274 def _intlist(repo, subset, x):
2275 2275 s = getstring(x, "internal error")
2276 2276 if not s:
2277 2277 return baseset()
2278 2278 ls = [int(r) for r in s.split('\0')]
2279 2279 s = subset
2280 2280 return baseset([r for r in ls if r in s])
2281 2281
2282 2282 # for internal use
2283 2283 @predicate('_hexlist', safe=True)
2284 2284 def _hexlist(repo, subset, x):
2285 2285 s = getstring(x, "internal error")
2286 2286 if not s:
2287 2287 return baseset()
2288 2288 cl = repo.changelog
2289 2289 ls = [cl.rev(node.bin(r)) for r in s.split('\0')]
2290 2290 s = subset
2291 2291 return baseset([r for r in ls if r in s])
2292 2292
2293 2293 methods = {
2294 2294 "range": rangeset,
2295 2295 "dagrange": dagrange,
2296 2296 "string": stringset,
2297 2297 "symbol": stringset,
2298 2298 "and": andset,
2299 2299 "or": orset,
2300 2300 "not": notset,
2301 2301 "difference": differenceset,
2302 2302 "list": listset,
2303 2303 "keyvalue": keyvaluepair,
2304 2304 "func": func,
2305 2305 "ancestor": ancestorspec,
2306 2306 "parent": parentspec,
2307 2307 "parentpost": p1,
2308 2308 }
2309 2309
2310 2310 def _matchonly(revs, bases):
2311 2311 """
2312 2312 >>> f = lambda *args: _matchonly(*map(parse, args))
2313 2313 >>> f('ancestors(A)', 'not ancestors(B)')
2314 2314 ('list', ('symbol', 'A'), ('symbol', 'B'))
2315 2315 """
2316 2316 if (revs is not None
2317 2317 and revs[0] == 'func'
2318 2318 and getsymbol(revs[1]) == 'ancestors'
2319 2319 and bases is not None
2320 2320 and bases[0] == 'not'
2321 2321 and bases[1][0] == 'func'
2322 2322 and getsymbol(bases[1][1]) == 'ancestors'):
2323 2323 return ('list', revs[2], bases[1][2])
2324 2324
2325 2325 def _fixops(x):
2326 2326 """Rewrite raw parsed tree to resolve ambiguous syntax which cannot be
2327 2327 handled well by our simple top-down parser"""
2328 2328 if not isinstance(x, tuple):
2329 2329 return x
2330 2330
2331 2331 op = x[0]
2332 2332 if op == 'parent':
2333 2333 # x^:y means (x^) : y, not x ^ (:y)
2334 2334 # x^: means (x^) :, not x ^ (:)
2335 2335 post = ('parentpost', x[1])
2336 2336 if x[2][0] == 'dagrangepre':
2337 2337 return _fixops(('dagrange', post, x[2][1]))
2338 2338 elif x[2][0] == 'rangepre':
2339 2339 return _fixops(('range', post, x[2][1]))
2340 2340 elif x[2][0] == 'rangeall':
2341 2341 return _fixops(('rangepost', post))
2342 2342
2343 2343 return (op,) + tuple(_fixops(y) for y in x[1:])
2344 2344
2345 2345 def _analyze(x):
2346 2346 if x is None:
2347 2347 return x
2348 2348
2349 2349 op = x[0]
2350 2350 if op == 'minus':
2351 2351 return _analyze(('and', x[1], ('not', x[2])))
2352 2352 elif op == 'only':
2353 2353 t = ('func', ('symbol', 'only'), ('list', x[1], x[2]))
2354 2354 return _analyze(t)
2355 2355 elif op == 'onlypost':
2356 2356 return _analyze(('func', ('symbol', 'only'), x[1]))
2357 2357 elif op == 'dagrangepre':
2358 2358 return _analyze(('func', ('symbol', 'ancestors'), x[1]))
2359 2359 elif op == 'dagrangepost':
2360 2360 return _analyze(('func', ('symbol', 'descendants'), x[1]))
2361 2361 elif op == 'rangeall':
2362 2362 return _analyze(('range', ('string', '0'), ('string', 'tip')))
2363 2363 elif op == 'rangepre':
2364 2364 return _analyze(('range', ('string', '0'), x[1]))
2365 2365 elif op == 'rangepost':
2366 2366 return _analyze(('range', x[1], ('string', 'tip')))
2367 2367 elif op == 'negate':
2368 2368 s = getstring(x[1], _("can't negate that"))
2369 2369 return _analyze(('string', '-' + s))
2370 2370 elif op in ('string', 'symbol'):
2371 2371 return x
2372 2372 elif op == 'and':
2373 2373 ta = _analyze(x[1])
2374 2374 tb = _analyze(x[2])
2375 2375 return (op, ta, tb)
2376 2376 elif op == 'or':
2377 2377 return (op,) + tuple(_analyze(y) for y in x[1:])
2378 2378 elif op == 'not':
2379 2379 return (op, _analyze(x[1]))
2380 2380 elif op == 'parentpost':
2381 2381 return (op, _analyze(x[1]))
2382 2382 elif op == 'group':
2383 2383 return _analyze(x[1])
2384 2384 elif op in ('dagrange', 'range', 'parent', 'ancestor'):
2385 2385 ta = _analyze(x[1])
2386 2386 tb = _analyze(x[2])
2387 2387 return (op, ta, tb)
2388 2388 elif op == 'list':
2389 2389 return (op,) + tuple(_analyze(y) for y in x[1:])
2390 2390 elif op == 'keyvalue':
2391 2391 return (op, x[1], _analyze(x[2]))
2392 2392 elif op == 'func':
2393 2393 return (op, x[1], _analyze(x[2]))
2394 2394 raise ValueError('invalid operator %r' % op)
2395 2395
2396 2396 def analyze(x):
2397 2397 """Transform raw parsed tree to evaluatable tree which can be fed to
2398 2398 optimize() or getset()
2399 2399
2400 2400 All pseudo operations should be mapped to real operations or functions
2401 2401 defined in methods or symbols table respectively.
2402 2402 """
2403 2403 return _analyze(x)
2404 2404
2405 2405 def _optimize(x, small):
2406 2406 if x is None:
2407 2407 return 0, x
2408 2408
2409 2409 smallbonus = 1
2410 2410 if small:
2411 2411 smallbonus = .5
2412 2412
2413 2413 op = x[0]
2414 2414 if op in ('string', 'symbol'):
2415 2415 return smallbonus, x # single revisions are small
2416 2416 elif op == 'and':
2417 2417 wa, ta = _optimize(x[1], True)
2418 2418 wb, tb = _optimize(x[2], True)
2419 2419 w = min(wa, wb)
2420 2420
2421 2421 # (::x and not ::y)/(not ::y and ::x) have a fast path
2422 2422 tm = _matchonly(ta, tb) or _matchonly(tb, ta)
2423 2423 if tm:
2424 2424 return w, ('func', ('symbol', 'only'), tm)
2425 2425
2426 2426 if tb is not None and tb[0] == 'not':
2427 2427 return wa, ('difference', ta, tb[1])
2428 2428
2429 2429 if wa > wb:
2430 2430 return w, (op, tb, ta)
2431 2431 return w, (op, ta, tb)
2432 2432 elif op == 'or':
2433 2433 # fast path for machine-generated expression, that is likely to have
2434 2434 # lots of trivial revisions: 'a + b + c()' to '_list(a b) + c()'
2435 2435 ws, ts, ss = [], [], []
2436 2436 def flushss():
2437 2437 if not ss:
2438 2438 return
2439 2439 if len(ss) == 1:
2440 2440 w, t = ss[0]
2441 2441 else:
2442 2442 s = '\0'.join(t[1] for w, t in ss)
2443 2443 y = ('func', ('symbol', '_list'), ('string', s))
2444 2444 w, t = _optimize(y, False)
2445 2445 ws.append(w)
2446 2446 ts.append(t)
2447 2447 del ss[:]
2448 2448 for y in x[1:]:
2449 2449 w, t = _optimize(y, False)
2450 2450 if t is not None and (t[0] == 'string' or t[0] == 'symbol'):
2451 2451 ss.append((w, t))
2452 2452 continue
2453 2453 flushss()
2454 2454 ws.append(w)
2455 2455 ts.append(t)
2456 2456 flushss()
2457 2457 if len(ts) == 1:
2458 2458 return ws[0], ts[0] # 'or' operation is fully optimized out
2459 2459 # we can't reorder trees by weight because it would change the order.
2460 2460 # ("sort(a + b)" == "sort(b + a)", but "a + b" != "b + a")
2461 2461 # ts = tuple(t for w, t in sorted(zip(ws, ts), key=lambda wt: wt[0]))
2462 2462 return max(ws), (op,) + tuple(ts)
2463 2463 elif op == 'not':
2464 2464 # Optimize not public() to _notpublic() because we have a fast version
2465 2465 if x[1] == ('func', ('symbol', 'public'), None):
2466 2466 newsym = ('func', ('symbol', '_notpublic'), None)
2467 2467 o = _optimize(newsym, not small)
2468 2468 return o[0], o[1]
2469 2469 else:
2470 2470 o = _optimize(x[1], not small)
2471 2471 return o[0], (op, o[1])
2472 2472 elif op == 'parentpost':
2473 2473 o = _optimize(x[1], small)
2474 2474 return o[0], (op, o[1])
2475 2475 elif op in ('dagrange', 'range', 'parent', 'ancestor'):
2476 2476 wa, ta = _optimize(x[1], small)
2477 2477 wb, tb = _optimize(x[2], small)
2478 2478 return wa + wb, (op, ta, tb)
2479 2479 elif op == 'list':
2480 2480 ws, ts = zip(*(_optimize(y, small) for y in x[1:]))
2481 2481 return sum(ws), (op,) + ts
2482 2482 elif op == 'keyvalue':
2483 2483 w, t = _optimize(x[2], small)
2484 2484 return w, (op, x[1], t)
2485 2485 elif op == 'func':
2486 2486 f = getsymbol(x[1])
2487 2487 wa, ta = _optimize(x[2], small)
2488 2488 if f in ('author', 'branch', 'closed', 'date', 'desc', 'file', 'grep',
2489 2489 'keyword', 'outgoing', 'user'):
2490 2490 w = 10 # slow
2491 2491 elif f in ('modifies', 'adds', 'removes'):
2492 2492 w = 30 # slower
2493 2493 elif f == "contains":
2494 2494 w = 100 # very slow
2495 2495 elif f == "ancestor":
2496 2496 w = 1 * smallbonus
2497 2497 elif f in ('reverse', 'limit', 'first', '_intlist'):
2498 2498 w = 0
2499 2499 elif f == "sort":
2500 2500 w = 10 # assume most sorts look at changelog
2501 2501 else:
2502 2502 w = 1
2503 2503 return w + wa, (op, x[1], ta)
2504 2504 raise ValueError('invalid operator %r' % op)
2505 2505
2506 2506 def optimize(tree):
2507 2507 """Optimize evaluatable tree
2508 2508
2509 2509 All pseudo operations should be transformed beforehand.
2510 2510 """
2511 2511 _weight, newtree = _optimize(tree, small=True)
2512 2512 return newtree
2513 2513
2514 2514 # the set of valid characters for the initial letter of symbols in
2515 2515 # alias declarations and definitions
2516 2516 _aliassyminitletters = set(c for c in [chr(i) for i in xrange(256)]
2517 2517 if c.isalnum() or c in '._@$' or ord(c) > 127)
2518 2518
2519 2519 def _parsewith(spec, lookup=None, syminitletters=None):
2520 2520 """Generate a parse tree of given spec with given tokenizing options
2521 2521
2522 2522 >>> _parsewith('foo($1)', syminitletters=_aliassyminitletters)
2523 2523 ('func', ('symbol', 'foo'), ('symbol', '$1'))
2524 2524 >>> _parsewith('$1')
2525 2525 Traceback (most recent call last):
2526 2526 ...
2527 2527 ParseError: ("syntax error in revset '$1'", 0)
2528 2528 >>> _parsewith('foo bar')
2529 2529 Traceback (most recent call last):
2530 2530 ...
2531 2531 ParseError: ('invalid token', 4)
2532 2532 """
2533 2533 p = parser.parser(elements)
2534 2534 tree, pos = p.parse(tokenize(spec, lookup=lookup,
2535 2535 syminitletters=syminitletters))
2536 2536 if pos != len(spec):
2537 2537 raise error.ParseError(_('invalid token'), pos)
2538 2538 return _fixops(parser.simplifyinfixops(tree, ('list', 'or')))
2539 2539
2540 2540 class _aliasrules(parser.basealiasrules):
2541 2541 """Parsing and expansion rule set of revset aliases"""
2542 2542 _section = _('revset alias')
2543 2543
2544 2544 @staticmethod
2545 2545 def _parse(spec):
2546 2546 """Parse alias declaration/definition ``spec``
2547 2547
2548 2548 This allows symbol names to use also ``$`` as an initial letter
2549 2549 (for backward compatibility), and callers of this function should
2550 2550 examine whether ``$`` is used also for unexpected symbols or not.
2551 2551 """
2552 2552 return _parsewith(spec, syminitletters=_aliassyminitletters)
2553 2553
2554 2554 @staticmethod
2555 2555 def _trygetfunc(tree):
2556 2556 if tree[0] == 'func' and tree[1][0] == 'symbol':
2557 2557 return tree[1][1], getlist(tree[2])
2558 2558
2559 2559 def expandaliases(ui, tree, showwarning=None):
2560 2560 aliases = _aliasrules.buildmap(ui.configitems('revsetalias'))
2561 2561 tree = _aliasrules.expand(aliases, tree)
2562 2562 if showwarning:
2563 2563 # warn about problematic (but not referred) aliases
2564 2564 for name, alias in sorted(aliases.iteritems()):
2565 2565 if alias.error and not alias.warned:
2566 2566 showwarning(_('warning: %s\n') % (alias.error))
2567 2567 alias.warned = True
2568 2568 return tree
2569 2569
2570 2570 def foldconcat(tree):
2571 2571 """Fold elements to be concatenated by `##`
2572 2572 """
2573 2573 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2574 2574 return tree
2575 2575 if tree[0] == '_concat':
2576 2576 pending = [tree]
2577 2577 l = []
2578 2578 while pending:
2579 2579 e = pending.pop()
2580 2580 if e[0] == '_concat':
2581 2581 pending.extend(reversed(e[1:]))
2582 2582 elif e[0] in ('string', 'symbol'):
2583 2583 l.append(e[1])
2584 2584 else:
2585 2585 msg = _("\"##\" can't concatenate \"%s\" element") % (e[0])
2586 2586 raise error.ParseError(msg)
2587 2587 return ('string', ''.join(l))
2588 2588 else:
2589 2589 return tuple(foldconcat(t) for t in tree)
2590 2590
2591 2591 def parse(spec, lookup=None):
2592 2592 return _parsewith(spec, lookup=lookup)
2593 2593
2594 2594 def posttreebuilthook(tree, repo):
2595 2595 # hook for extensions to execute code on the optimized tree
2596 2596 pass
2597 2597
2598 2598 def match(ui, spec, repo=None):
2599 2599 """Create a matcher for a single revision spec."""
2600 2600 return matchany(ui, [spec], repo=repo)
2601 2601
2602 2602 def matchany(ui, specs, repo=None):
2603 2603 """Create a matcher that will include any revisions matching one of the
2604 2604 given specs"""
2605 2605 if not specs:
2606 2606 def mfunc(repo, subset=None):
2607 2607 return baseset()
2608 2608 return mfunc
2609 2609 if not all(specs):
2610 2610 raise error.ParseError(_("empty query"))
2611 2611 lookup = None
2612 2612 if repo:
2613 2613 lookup = repo.__contains__
2614 2614 if len(specs) == 1:
2615 2615 tree = parse(specs[0], lookup)
2616 2616 else:
2617 2617 tree = ('or',) + tuple(parse(s, lookup) for s in specs)
2618 return _makematcher(ui, tree, repo)
2619
2620 def _makematcher(ui, tree, repo):
2618
2621 2619 if ui:
2622 2620 tree = expandaliases(ui, tree, showwarning=ui.warn)
2623 2621 tree = foldconcat(tree)
2624 2622 tree = analyze(tree)
2625 2623 tree = optimize(tree)
2626 2624 posttreebuilthook(tree, repo)
2625 return makematcher(tree)
2626
2627 def makematcher(tree):
2628 """Create a matcher from an evaluatable tree"""
2627 2629 def mfunc(repo, subset=None):
2628 2630 if subset is None:
2629 2631 subset = fullreposet(repo)
2630 2632 if util.safehasattr(subset, 'isascending'):
2631 2633 result = getset(repo, subset, tree)
2632 2634 else:
2633 2635 result = getset(repo, baseset(subset), tree)
2634 2636 return result
2635 2637 return mfunc
2636 2638
2637 2639 def formatspec(expr, *args):
2638 2640 '''
2639 2641 This is a convenience function for using revsets internally, and
2640 2642 escapes arguments appropriately. Aliases are intentionally ignored
2641 2643 so that intended expression behavior isn't accidentally subverted.
2642 2644
2643 2645 Supported arguments:
2644 2646
2645 2647 %r = revset expression, parenthesized
2646 2648 %d = int(arg), no quoting
2647 2649 %s = string(arg), escaped and single-quoted
2648 2650 %b = arg.branch(), escaped and single-quoted
2649 2651 %n = hex(arg), single-quoted
2650 2652 %% = a literal '%'
2651 2653
2652 2654 Prefixing the type with 'l' specifies a parenthesized list of that type.
2653 2655
2654 2656 >>> formatspec('%r:: and %lr', '10 or 11', ("this()", "that()"))
2655 2657 '(10 or 11):: and ((this()) or (that()))'
2656 2658 >>> formatspec('%d:: and not %d::', 10, 20)
2657 2659 '10:: and not 20::'
2658 2660 >>> formatspec('%ld or %ld', [], [1])
2659 2661 "_list('') or 1"
2660 2662 >>> formatspec('keyword(%s)', 'foo\\xe9')
2661 2663 "keyword('foo\\\\xe9')"
2662 2664 >>> b = lambda: 'default'
2663 2665 >>> b.branch = b
2664 2666 >>> formatspec('branch(%b)', b)
2665 2667 "branch('default')"
2666 2668 >>> formatspec('root(%ls)', ['a', 'b', 'c', 'd'])
2667 2669 "root(_list('a\\x00b\\x00c\\x00d'))"
2668 2670 '''
2669 2671
2670 2672 def quote(s):
2671 2673 return repr(str(s))
2672 2674
2673 2675 def argtype(c, arg):
2674 2676 if c == 'd':
2675 2677 return str(int(arg))
2676 2678 elif c == 's':
2677 2679 return quote(arg)
2678 2680 elif c == 'r':
2679 2681 parse(arg) # make sure syntax errors are confined
2680 2682 return '(%s)' % arg
2681 2683 elif c == 'n':
2682 2684 return quote(node.hex(arg))
2683 2685 elif c == 'b':
2684 2686 return quote(arg.branch())
2685 2687
2686 2688 def listexp(s, t):
2687 2689 l = len(s)
2688 2690 if l == 0:
2689 2691 return "_list('')"
2690 2692 elif l == 1:
2691 2693 return argtype(t, s[0])
2692 2694 elif t == 'd':
2693 2695 return "_intlist('%s')" % "\0".join(str(int(a)) for a in s)
2694 2696 elif t == 's':
2695 2697 return "_list('%s')" % "\0".join(s)
2696 2698 elif t == 'n':
2697 2699 return "_hexlist('%s')" % "\0".join(node.hex(a) for a in s)
2698 2700 elif t == 'b':
2699 2701 return "_list('%s')" % "\0".join(a.branch() for a in s)
2700 2702
2701 2703 m = l // 2
2702 2704 return '(%s or %s)' % (listexp(s[:m], t), listexp(s[m:], t))
2703 2705
2704 2706 ret = ''
2705 2707 pos = 0
2706 2708 arg = 0
2707 2709 while pos < len(expr):
2708 2710 c = expr[pos]
2709 2711 if c == '%':
2710 2712 pos += 1
2711 2713 d = expr[pos]
2712 2714 if d == '%':
2713 2715 ret += d
2714 2716 elif d in 'dsnbr':
2715 2717 ret += argtype(d, args[arg])
2716 2718 arg += 1
2717 2719 elif d == 'l':
2718 2720 # a list of some type
2719 2721 pos += 1
2720 2722 d = expr[pos]
2721 2723 ret += listexp(list(args[arg]), d)
2722 2724 arg += 1
2723 2725 else:
2724 2726 raise error.Abort(_('unexpected revspec format character %s')
2725 2727 % d)
2726 2728 else:
2727 2729 ret += c
2728 2730 pos += 1
2729 2731
2730 2732 return ret
2731 2733
2732 2734 def prettyformat(tree):
2733 2735 return parser.prettyformat(tree, ('string', 'symbol'))
2734 2736
2735 2737 def depth(tree):
2736 2738 if isinstance(tree, tuple):
2737 2739 return max(map(depth, tree)) + 1
2738 2740 else:
2739 2741 return 0
2740 2742
2741 2743 def funcsused(tree):
2742 2744 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2743 2745 return set()
2744 2746 else:
2745 2747 funcs = set()
2746 2748 for s in tree[1:]:
2747 2749 funcs |= funcsused(s)
2748 2750 if tree[0] == 'func':
2749 2751 funcs.add(tree[1][1])
2750 2752 return funcs
2751 2753
2752 2754 def _formatsetrepr(r):
2753 2755 """Format an optional printable representation of a set
2754 2756
2755 2757 ======== =================================
2756 2758 type(r) example
2757 2759 ======== =================================
2758 2760 tuple ('<not %r>', other)
2759 2761 str '<branch closed>'
2760 2762 callable lambda: '<branch %r>' % sorted(b)
2761 2763 object other
2762 2764 ======== =================================
2763 2765 """
2764 2766 if r is None:
2765 2767 return ''
2766 2768 elif isinstance(r, tuple):
2767 2769 return r[0] % r[1:]
2768 2770 elif isinstance(r, str):
2769 2771 return r
2770 2772 elif callable(r):
2771 2773 return r()
2772 2774 else:
2773 2775 return repr(r)
2774 2776
2775 2777 class abstractsmartset(object):
2776 2778
2777 2779 def __nonzero__(self):
2778 2780 """True if the smartset is not empty"""
2779 2781 raise NotImplementedError()
2780 2782
2781 2783 def __contains__(self, rev):
2782 2784 """provide fast membership testing"""
2783 2785 raise NotImplementedError()
2784 2786
2785 2787 def __iter__(self):
2786 2788 """iterate the set in the order it is supposed to be iterated"""
2787 2789 raise NotImplementedError()
2788 2790
2789 2791 # Attributes containing a function to perform a fast iteration in a given
2790 2792 # direction. A smartset can have none, one, or both defined.
2791 2793 #
2792 2794 # Default value is None instead of a function returning None to avoid
2793 2795 # initializing an iterator just for testing if a fast method exists.
2794 2796 fastasc = None
2795 2797 fastdesc = None
2796 2798
2797 2799 def isascending(self):
2798 2800 """True if the set will iterate in ascending order"""
2799 2801 raise NotImplementedError()
2800 2802
2801 2803 def isdescending(self):
2802 2804 """True if the set will iterate in descending order"""
2803 2805 raise NotImplementedError()
2804 2806
2805 2807 def istopo(self):
2806 2808 """True if the set will iterate in topographical order"""
2807 2809 raise NotImplementedError()
2808 2810
2809 2811 @util.cachefunc
2810 2812 def min(self):
2811 2813 """return the minimum element in the set"""
2812 2814 if self.fastasc is not None:
2813 2815 for r in self.fastasc():
2814 2816 return r
2815 2817 raise ValueError('arg is an empty sequence')
2816 2818 return min(self)
2817 2819
2818 2820 @util.cachefunc
2819 2821 def max(self):
2820 2822 """return the maximum element in the set"""
2821 2823 if self.fastdesc is not None:
2822 2824 for r in self.fastdesc():
2823 2825 return r
2824 2826 raise ValueError('arg is an empty sequence')
2825 2827 return max(self)
2826 2828
2827 2829 def first(self):
2828 2830 """return the first element in the set (user iteration perspective)
2829 2831
2830 2832 Return None if the set is empty"""
2831 2833 raise NotImplementedError()
2832 2834
2833 2835 def last(self):
2834 2836 """return the last element in the set (user iteration perspective)
2835 2837
2836 2838 Return None if the set is empty"""
2837 2839 raise NotImplementedError()
2838 2840
2839 2841 def __len__(self):
2840 2842 """return the length of the smartsets
2841 2843
2842 2844 This can be expensive on smartset that could be lazy otherwise."""
2843 2845 raise NotImplementedError()
2844 2846
2845 2847 def reverse(self):
2846 2848 """reverse the expected iteration order"""
2847 2849 raise NotImplementedError()
2848 2850
2849 2851 def sort(self, reverse=True):
2850 2852 """get the set to iterate in an ascending or descending order"""
2851 2853 raise NotImplementedError()
2852 2854
2853 2855 def __and__(self, other):
2854 2856 """Returns a new object with the intersection of the two collections.
2855 2857
2856 2858 This is part of the mandatory API for smartset."""
2857 2859 if isinstance(other, fullreposet):
2858 2860 return self
2859 2861 return self.filter(other.__contains__, condrepr=other, cache=False)
2860 2862
2861 2863 def __add__(self, other):
2862 2864 """Returns a new object with the union of the two collections.
2863 2865
2864 2866 This is part of the mandatory API for smartset."""
2865 2867 return addset(self, other)
2866 2868
2867 2869 def __sub__(self, other):
2868 2870 """Returns a new object with the substraction of the two collections.
2869 2871
2870 2872 This is part of the mandatory API for smartset."""
2871 2873 c = other.__contains__
2872 2874 return self.filter(lambda r: not c(r), condrepr=('<not %r>', other),
2873 2875 cache=False)
2874 2876
2875 2877 def filter(self, condition, condrepr=None, cache=True):
2876 2878 """Returns this smartset filtered by condition as a new smartset.
2877 2879
2878 2880 `condition` is a callable which takes a revision number and returns a
2879 2881 boolean. Optional `condrepr` provides a printable representation of
2880 2882 the given `condition`.
2881 2883
2882 2884 This is part of the mandatory API for smartset."""
2883 2885 # builtin cannot be cached. but do not needs to
2884 2886 if cache and util.safehasattr(condition, 'func_code'):
2885 2887 condition = util.cachefunc(condition)
2886 2888 return filteredset(self, condition, condrepr)
2887 2889
2888 2890 class baseset(abstractsmartset):
2889 2891 """Basic data structure that represents a revset and contains the basic
2890 2892 operation that it should be able to perform.
2891 2893
2892 2894 Every method in this class should be implemented by any smartset class.
2893 2895 """
2894 2896 def __init__(self, data=(), datarepr=None, istopo=False):
2895 2897 """
2896 2898 datarepr: a tuple of (format, obj, ...), a function or an object that
2897 2899 provides a printable representation of the given data.
2898 2900 """
2899 2901 self._ascending = None
2900 2902 self._istopo = istopo
2901 2903 if not isinstance(data, list):
2902 2904 if isinstance(data, set):
2903 2905 self._set = data
2904 2906 # set has no order we pick one for stability purpose
2905 2907 self._ascending = True
2906 2908 data = list(data)
2907 2909 self._list = data
2908 2910 self._datarepr = datarepr
2909 2911
2910 2912 @util.propertycache
2911 2913 def _set(self):
2912 2914 return set(self._list)
2913 2915
2914 2916 @util.propertycache
2915 2917 def _asclist(self):
2916 2918 asclist = self._list[:]
2917 2919 asclist.sort()
2918 2920 return asclist
2919 2921
2920 2922 def __iter__(self):
2921 2923 if self._ascending is None:
2922 2924 return iter(self._list)
2923 2925 elif self._ascending:
2924 2926 return iter(self._asclist)
2925 2927 else:
2926 2928 return reversed(self._asclist)
2927 2929
2928 2930 def fastasc(self):
2929 2931 return iter(self._asclist)
2930 2932
2931 2933 def fastdesc(self):
2932 2934 return reversed(self._asclist)
2933 2935
2934 2936 @util.propertycache
2935 2937 def __contains__(self):
2936 2938 return self._set.__contains__
2937 2939
2938 2940 def __nonzero__(self):
2939 2941 return bool(self._list)
2940 2942
2941 2943 def sort(self, reverse=False):
2942 2944 self._ascending = not bool(reverse)
2943 2945 self._istopo = False
2944 2946
2945 2947 def reverse(self):
2946 2948 if self._ascending is None:
2947 2949 self._list.reverse()
2948 2950 else:
2949 2951 self._ascending = not self._ascending
2950 2952 self._istopo = False
2951 2953
2952 2954 def __len__(self):
2953 2955 return len(self._list)
2954 2956
2955 2957 def isascending(self):
2956 2958 """Returns True if the collection is ascending order, False if not.
2957 2959
2958 2960 This is part of the mandatory API for smartset."""
2959 2961 if len(self) <= 1:
2960 2962 return True
2961 2963 return self._ascending is not None and self._ascending
2962 2964
2963 2965 def isdescending(self):
2964 2966 """Returns True if the collection is descending order, False if not.
2965 2967
2966 2968 This is part of the mandatory API for smartset."""
2967 2969 if len(self) <= 1:
2968 2970 return True
2969 2971 return self._ascending is not None and not self._ascending
2970 2972
2971 2973 def istopo(self):
2972 2974 """Is the collection is in topographical order or not.
2973 2975
2974 2976 This is part of the mandatory API for smartset."""
2975 2977 if len(self) <= 1:
2976 2978 return True
2977 2979 return self._istopo
2978 2980
2979 2981 def first(self):
2980 2982 if self:
2981 2983 if self._ascending is None:
2982 2984 return self._list[0]
2983 2985 elif self._ascending:
2984 2986 return self._asclist[0]
2985 2987 else:
2986 2988 return self._asclist[-1]
2987 2989 return None
2988 2990
2989 2991 def last(self):
2990 2992 if self:
2991 2993 if self._ascending is None:
2992 2994 return self._list[-1]
2993 2995 elif self._ascending:
2994 2996 return self._asclist[-1]
2995 2997 else:
2996 2998 return self._asclist[0]
2997 2999 return None
2998 3000
2999 3001 def __repr__(self):
3000 3002 d = {None: '', False: '-', True: '+'}[self._ascending]
3001 3003 s = _formatsetrepr(self._datarepr)
3002 3004 if not s:
3003 3005 l = self._list
3004 3006 # if _list has been built from a set, it might have a different
3005 3007 # order from one python implementation to another.
3006 3008 # We fallback to the sorted version for a stable output.
3007 3009 if self._ascending is not None:
3008 3010 l = self._asclist
3009 3011 s = repr(l)
3010 3012 return '<%s%s %s>' % (type(self).__name__, d, s)
3011 3013
3012 3014 class filteredset(abstractsmartset):
3013 3015 """Duck type for baseset class which iterates lazily over the revisions in
3014 3016 the subset and contains a function which tests for membership in the
3015 3017 revset
3016 3018 """
3017 3019 def __init__(self, subset, condition=lambda x: True, condrepr=None):
3018 3020 """
3019 3021 condition: a function that decide whether a revision in the subset
3020 3022 belongs to the revset or not.
3021 3023 condrepr: a tuple of (format, obj, ...), a function or an object that
3022 3024 provides a printable representation of the given condition.
3023 3025 """
3024 3026 self._subset = subset
3025 3027 self._condition = condition
3026 3028 self._condrepr = condrepr
3027 3029
3028 3030 def __contains__(self, x):
3029 3031 return x in self._subset and self._condition(x)
3030 3032
3031 3033 def __iter__(self):
3032 3034 return self._iterfilter(self._subset)
3033 3035
3034 3036 def _iterfilter(self, it):
3035 3037 cond = self._condition
3036 3038 for x in it:
3037 3039 if cond(x):
3038 3040 yield x
3039 3041
3040 3042 @property
3041 3043 def fastasc(self):
3042 3044 it = self._subset.fastasc
3043 3045 if it is None:
3044 3046 return None
3045 3047 return lambda: self._iterfilter(it())
3046 3048
3047 3049 @property
3048 3050 def fastdesc(self):
3049 3051 it = self._subset.fastdesc
3050 3052 if it is None:
3051 3053 return None
3052 3054 return lambda: self._iterfilter(it())
3053 3055
3054 3056 def __nonzero__(self):
3055 3057 fast = None
3056 3058 candidates = [self.fastasc if self.isascending() else None,
3057 3059 self.fastdesc if self.isdescending() else None,
3058 3060 self.fastasc,
3059 3061 self.fastdesc]
3060 3062 for candidate in candidates:
3061 3063 if candidate is not None:
3062 3064 fast = candidate
3063 3065 break
3064 3066
3065 3067 if fast is not None:
3066 3068 it = fast()
3067 3069 else:
3068 3070 it = self
3069 3071
3070 3072 for r in it:
3071 3073 return True
3072 3074 return False
3073 3075
3074 3076 def __len__(self):
3075 3077 # Basic implementation to be changed in future patches.
3076 3078 # until this gets improved, we use generator expression
3077 3079 # here, since list compr is free to call __len__ again
3078 3080 # causing infinite recursion
3079 3081 l = baseset(r for r in self)
3080 3082 return len(l)
3081 3083
3082 3084 def sort(self, reverse=False):
3083 3085 self._subset.sort(reverse=reverse)
3084 3086
3085 3087 def reverse(self):
3086 3088 self._subset.reverse()
3087 3089
3088 3090 def isascending(self):
3089 3091 return self._subset.isascending()
3090 3092
3091 3093 def isdescending(self):
3092 3094 return self._subset.isdescending()
3093 3095
3094 3096 def istopo(self):
3095 3097 return self._subset.istopo()
3096 3098
3097 3099 def first(self):
3098 3100 for x in self:
3099 3101 return x
3100 3102 return None
3101 3103
3102 3104 def last(self):
3103 3105 it = None
3104 3106 if self.isascending():
3105 3107 it = self.fastdesc
3106 3108 elif self.isdescending():
3107 3109 it = self.fastasc
3108 3110 if it is not None:
3109 3111 for x in it():
3110 3112 return x
3111 3113 return None #empty case
3112 3114 else:
3113 3115 x = None
3114 3116 for x in self:
3115 3117 pass
3116 3118 return x
3117 3119
3118 3120 def __repr__(self):
3119 3121 xs = [repr(self._subset)]
3120 3122 s = _formatsetrepr(self._condrepr)
3121 3123 if s:
3122 3124 xs.append(s)
3123 3125 return '<%s %s>' % (type(self).__name__, ', '.join(xs))
3124 3126
3125 3127 def _iterordered(ascending, iter1, iter2):
3126 3128 """produce an ordered iteration from two iterators with the same order
3127 3129
3128 3130 The ascending is used to indicated the iteration direction.
3129 3131 """
3130 3132 choice = max
3131 3133 if ascending:
3132 3134 choice = min
3133 3135
3134 3136 val1 = None
3135 3137 val2 = None
3136 3138 try:
3137 3139 # Consume both iterators in an ordered way until one is empty
3138 3140 while True:
3139 3141 if val1 is None:
3140 3142 val1 = next(iter1)
3141 3143 if val2 is None:
3142 3144 val2 = next(iter2)
3143 3145 n = choice(val1, val2)
3144 3146 yield n
3145 3147 if val1 == n:
3146 3148 val1 = None
3147 3149 if val2 == n:
3148 3150 val2 = None
3149 3151 except StopIteration:
3150 3152 # Flush any remaining values and consume the other one
3151 3153 it = iter2
3152 3154 if val1 is not None:
3153 3155 yield val1
3154 3156 it = iter1
3155 3157 elif val2 is not None:
3156 3158 # might have been equality and both are empty
3157 3159 yield val2
3158 3160 for val in it:
3159 3161 yield val
3160 3162
3161 3163 class addset(abstractsmartset):
3162 3164 """Represent the addition of two sets
3163 3165
3164 3166 Wrapper structure for lazily adding two structures without losing much
3165 3167 performance on the __contains__ method
3166 3168
3167 3169 If the ascending attribute is set, that means the two structures are
3168 3170 ordered in either an ascending or descending way. Therefore, we can add
3169 3171 them maintaining the order by iterating over both at the same time
3170 3172
3171 3173 >>> xs = baseset([0, 3, 2])
3172 3174 >>> ys = baseset([5, 2, 4])
3173 3175
3174 3176 >>> rs = addset(xs, ys)
3175 3177 >>> bool(rs), 0 in rs, 1 in rs, 5 in rs, rs.first(), rs.last()
3176 3178 (True, True, False, True, 0, 4)
3177 3179 >>> rs = addset(xs, baseset([]))
3178 3180 >>> bool(rs), 0 in rs, 1 in rs, rs.first(), rs.last()
3179 3181 (True, True, False, 0, 2)
3180 3182 >>> rs = addset(baseset([]), baseset([]))
3181 3183 >>> bool(rs), 0 in rs, rs.first(), rs.last()
3182 3184 (False, False, None, None)
3183 3185
3184 3186 iterate unsorted:
3185 3187 >>> rs = addset(xs, ys)
3186 3188 >>> # (use generator because pypy could call len())
3187 3189 >>> list(x for x in rs) # without _genlist
3188 3190 [0, 3, 2, 5, 4]
3189 3191 >>> assert not rs._genlist
3190 3192 >>> len(rs)
3191 3193 5
3192 3194 >>> [x for x in rs] # with _genlist
3193 3195 [0, 3, 2, 5, 4]
3194 3196 >>> assert rs._genlist
3195 3197
3196 3198 iterate ascending:
3197 3199 >>> rs = addset(xs, ys, ascending=True)
3198 3200 >>> # (use generator because pypy could call len())
3199 3201 >>> list(x for x in rs), list(x for x in rs.fastasc()) # without _asclist
3200 3202 ([0, 2, 3, 4, 5], [0, 2, 3, 4, 5])
3201 3203 >>> assert not rs._asclist
3202 3204 >>> len(rs)
3203 3205 5
3204 3206 >>> [x for x in rs], [x for x in rs.fastasc()]
3205 3207 ([0, 2, 3, 4, 5], [0, 2, 3, 4, 5])
3206 3208 >>> assert rs._asclist
3207 3209
3208 3210 iterate descending:
3209 3211 >>> rs = addset(xs, ys, ascending=False)
3210 3212 >>> # (use generator because pypy could call len())
3211 3213 >>> list(x for x in rs), list(x for x in rs.fastdesc()) # without _asclist
3212 3214 ([5, 4, 3, 2, 0], [5, 4, 3, 2, 0])
3213 3215 >>> assert not rs._asclist
3214 3216 >>> len(rs)
3215 3217 5
3216 3218 >>> [x for x in rs], [x for x in rs.fastdesc()]
3217 3219 ([5, 4, 3, 2, 0], [5, 4, 3, 2, 0])
3218 3220 >>> assert rs._asclist
3219 3221
3220 3222 iterate ascending without fastasc:
3221 3223 >>> rs = addset(xs, generatorset(ys), ascending=True)
3222 3224 >>> assert rs.fastasc is None
3223 3225 >>> [x for x in rs]
3224 3226 [0, 2, 3, 4, 5]
3225 3227
3226 3228 iterate descending without fastdesc:
3227 3229 >>> rs = addset(generatorset(xs), ys, ascending=False)
3228 3230 >>> assert rs.fastdesc is None
3229 3231 >>> [x for x in rs]
3230 3232 [5, 4, 3, 2, 0]
3231 3233 """
3232 3234 def __init__(self, revs1, revs2, ascending=None):
3233 3235 self._r1 = revs1
3234 3236 self._r2 = revs2
3235 3237 self._iter = None
3236 3238 self._ascending = ascending
3237 3239 self._genlist = None
3238 3240 self._asclist = None
3239 3241
3240 3242 def __len__(self):
3241 3243 return len(self._list)
3242 3244
3243 3245 def __nonzero__(self):
3244 3246 return bool(self._r1) or bool(self._r2)
3245 3247
3246 3248 @util.propertycache
3247 3249 def _list(self):
3248 3250 if not self._genlist:
3249 3251 self._genlist = baseset(iter(self))
3250 3252 return self._genlist
3251 3253
3252 3254 def __iter__(self):
3253 3255 """Iterate over both collections without repeating elements
3254 3256
3255 3257 If the ascending attribute is not set, iterate over the first one and
3256 3258 then over the second one checking for membership on the first one so we
3257 3259 dont yield any duplicates.
3258 3260
3259 3261 If the ascending attribute is set, iterate over both collections at the
3260 3262 same time, yielding only one value at a time in the given order.
3261 3263 """
3262 3264 if self._ascending is None:
3263 3265 if self._genlist:
3264 3266 return iter(self._genlist)
3265 3267 def arbitraryordergen():
3266 3268 for r in self._r1:
3267 3269 yield r
3268 3270 inr1 = self._r1.__contains__
3269 3271 for r in self._r2:
3270 3272 if not inr1(r):
3271 3273 yield r
3272 3274 return arbitraryordergen()
3273 3275 # try to use our own fast iterator if it exists
3274 3276 self._trysetasclist()
3275 3277 if self._ascending:
3276 3278 attr = 'fastasc'
3277 3279 else:
3278 3280 attr = 'fastdesc'
3279 3281 it = getattr(self, attr)
3280 3282 if it is not None:
3281 3283 return it()
3282 3284 # maybe half of the component supports fast
3283 3285 # get iterator for _r1
3284 3286 iter1 = getattr(self._r1, attr)
3285 3287 if iter1 is None:
3286 3288 # let's avoid side effect (not sure it matters)
3287 3289 iter1 = iter(sorted(self._r1, reverse=not self._ascending))
3288 3290 else:
3289 3291 iter1 = iter1()
3290 3292 # get iterator for _r2
3291 3293 iter2 = getattr(self._r2, attr)
3292 3294 if iter2 is None:
3293 3295 # let's avoid side effect (not sure it matters)
3294 3296 iter2 = iter(sorted(self._r2, reverse=not self._ascending))
3295 3297 else:
3296 3298 iter2 = iter2()
3297 3299 return _iterordered(self._ascending, iter1, iter2)
3298 3300
3299 3301 def _trysetasclist(self):
3300 3302 """populate the _asclist attribute if possible and necessary"""
3301 3303 if self._genlist is not None and self._asclist is None:
3302 3304 self._asclist = sorted(self._genlist)
3303 3305
3304 3306 @property
3305 3307 def fastasc(self):
3306 3308 self._trysetasclist()
3307 3309 if self._asclist is not None:
3308 3310 return self._asclist.__iter__
3309 3311 iter1 = self._r1.fastasc
3310 3312 iter2 = self._r2.fastasc
3311 3313 if None in (iter1, iter2):
3312 3314 return None
3313 3315 return lambda: _iterordered(True, iter1(), iter2())
3314 3316
3315 3317 @property
3316 3318 def fastdesc(self):
3317 3319 self._trysetasclist()
3318 3320 if self._asclist is not None:
3319 3321 return self._asclist.__reversed__
3320 3322 iter1 = self._r1.fastdesc
3321 3323 iter2 = self._r2.fastdesc
3322 3324 if None in (iter1, iter2):
3323 3325 return None
3324 3326 return lambda: _iterordered(False, iter1(), iter2())
3325 3327
3326 3328 def __contains__(self, x):
3327 3329 return x in self._r1 or x in self._r2
3328 3330
3329 3331 def sort(self, reverse=False):
3330 3332 """Sort the added set
3331 3333
3332 3334 For this we use the cached list with all the generated values and if we
3333 3335 know they are ascending or descending we can sort them in a smart way.
3334 3336 """
3335 3337 self._ascending = not reverse
3336 3338
3337 3339 def isascending(self):
3338 3340 return self._ascending is not None and self._ascending
3339 3341
3340 3342 def isdescending(self):
3341 3343 return self._ascending is not None and not self._ascending
3342 3344
3343 3345 def istopo(self):
3344 3346 # not worth the trouble asserting if the two sets combined are still
3345 3347 # in topographical order. Use the sort() predicate to explicitly sort
3346 3348 # again instead.
3347 3349 return False
3348 3350
3349 3351 def reverse(self):
3350 3352 if self._ascending is None:
3351 3353 self._list.reverse()
3352 3354 else:
3353 3355 self._ascending = not self._ascending
3354 3356
3355 3357 def first(self):
3356 3358 for x in self:
3357 3359 return x
3358 3360 return None
3359 3361
3360 3362 def last(self):
3361 3363 self.reverse()
3362 3364 val = self.first()
3363 3365 self.reverse()
3364 3366 return val
3365 3367
3366 3368 def __repr__(self):
3367 3369 d = {None: '', False: '-', True: '+'}[self._ascending]
3368 3370 return '<%s%s %r, %r>' % (type(self).__name__, d, self._r1, self._r2)
3369 3371
3370 3372 class generatorset(abstractsmartset):
3371 3373 """Wrap a generator for lazy iteration
3372 3374
3373 3375 Wrapper structure for generators that provides lazy membership and can
3374 3376 be iterated more than once.
3375 3377 When asked for membership it generates values until either it finds the
3376 3378 requested one or has gone through all the elements in the generator
3377 3379 """
3378 3380 def __init__(self, gen, iterasc=None):
3379 3381 """
3380 3382 gen: a generator producing the values for the generatorset.
3381 3383 """
3382 3384 self._gen = gen
3383 3385 self._asclist = None
3384 3386 self._cache = {}
3385 3387 self._genlist = []
3386 3388 self._finished = False
3387 3389 self._ascending = True
3388 3390 if iterasc is not None:
3389 3391 if iterasc:
3390 3392 self.fastasc = self._iterator
3391 3393 self.__contains__ = self._asccontains
3392 3394 else:
3393 3395 self.fastdesc = self._iterator
3394 3396 self.__contains__ = self._desccontains
3395 3397
3396 3398 def __nonzero__(self):
3397 3399 # Do not use 'for r in self' because it will enforce the iteration
3398 3400 # order (default ascending), possibly unrolling a whole descending
3399 3401 # iterator.
3400 3402 if self._genlist:
3401 3403 return True
3402 3404 for r in self._consumegen():
3403 3405 return True
3404 3406 return False
3405 3407
3406 3408 def __contains__(self, x):
3407 3409 if x in self._cache:
3408 3410 return self._cache[x]
3409 3411
3410 3412 # Use new values only, as existing values would be cached.
3411 3413 for l in self._consumegen():
3412 3414 if l == x:
3413 3415 return True
3414 3416
3415 3417 self._cache[x] = False
3416 3418 return False
3417 3419
3418 3420 def _asccontains(self, x):
3419 3421 """version of contains optimised for ascending generator"""
3420 3422 if x in self._cache:
3421 3423 return self._cache[x]
3422 3424
3423 3425 # Use new values only, as existing values would be cached.
3424 3426 for l in self._consumegen():
3425 3427 if l == x:
3426 3428 return True
3427 3429 if l > x:
3428 3430 break
3429 3431
3430 3432 self._cache[x] = False
3431 3433 return False
3432 3434
3433 3435 def _desccontains(self, x):
3434 3436 """version of contains optimised for descending generator"""
3435 3437 if x in self._cache:
3436 3438 return self._cache[x]
3437 3439
3438 3440 # Use new values only, as existing values would be cached.
3439 3441 for l in self._consumegen():
3440 3442 if l == x:
3441 3443 return True
3442 3444 if l < x:
3443 3445 break
3444 3446
3445 3447 self._cache[x] = False
3446 3448 return False
3447 3449
3448 3450 def __iter__(self):
3449 3451 if self._ascending:
3450 3452 it = self.fastasc
3451 3453 else:
3452 3454 it = self.fastdesc
3453 3455 if it is not None:
3454 3456 return it()
3455 3457 # we need to consume the iterator
3456 3458 for x in self._consumegen():
3457 3459 pass
3458 3460 # recall the same code
3459 3461 return iter(self)
3460 3462
3461 3463 def _iterator(self):
3462 3464 if self._finished:
3463 3465 return iter(self._genlist)
3464 3466
3465 3467 # We have to use this complex iteration strategy to allow multiple
3466 3468 # iterations at the same time. We need to be able to catch revision
3467 3469 # removed from _consumegen and added to genlist in another instance.
3468 3470 #
3469 3471 # Getting rid of it would provide an about 15% speed up on this
3470 3472 # iteration.
3471 3473 genlist = self._genlist
3472 3474 nextrev = self._consumegen().next
3473 3475 _len = len # cache global lookup
3474 3476 def gen():
3475 3477 i = 0
3476 3478 while True:
3477 3479 if i < _len(genlist):
3478 3480 yield genlist[i]
3479 3481 else:
3480 3482 yield nextrev()
3481 3483 i += 1
3482 3484 return gen()
3483 3485
3484 3486 def _consumegen(self):
3485 3487 cache = self._cache
3486 3488 genlist = self._genlist.append
3487 3489 for item in self._gen:
3488 3490 cache[item] = True
3489 3491 genlist(item)
3490 3492 yield item
3491 3493 if not self._finished:
3492 3494 self._finished = True
3493 3495 asc = self._genlist[:]
3494 3496 asc.sort()
3495 3497 self._asclist = asc
3496 3498 self.fastasc = asc.__iter__
3497 3499 self.fastdesc = asc.__reversed__
3498 3500
3499 3501 def __len__(self):
3500 3502 for x in self._consumegen():
3501 3503 pass
3502 3504 return len(self._genlist)
3503 3505
3504 3506 def sort(self, reverse=False):
3505 3507 self._ascending = not reverse
3506 3508
3507 3509 def reverse(self):
3508 3510 self._ascending = not self._ascending
3509 3511
3510 3512 def isascending(self):
3511 3513 return self._ascending
3512 3514
3513 3515 def isdescending(self):
3514 3516 return not self._ascending
3515 3517
3516 3518 def istopo(self):
3517 3519 # not worth the trouble asserting if the two sets combined are still
3518 3520 # in topographical order. Use the sort() predicate to explicitly sort
3519 3521 # again instead.
3520 3522 return False
3521 3523
3522 3524 def first(self):
3523 3525 if self._ascending:
3524 3526 it = self.fastasc
3525 3527 else:
3526 3528 it = self.fastdesc
3527 3529 if it is None:
3528 3530 # we need to consume all and try again
3529 3531 for x in self._consumegen():
3530 3532 pass
3531 3533 return self.first()
3532 3534 return next(it(), None)
3533 3535
3534 3536 def last(self):
3535 3537 if self._ascending:
3536 3538 it = self.fastdesc
3537 3539 else:
3538 3540 it = self.fastasc
3539 3541 if it is None:
3540 3542 # we need to consume all and try again
3541 3543 for x in self._consumegen():
3542 3544 pass
3543 3545 return self.first()
3544 3546 return next(it(), None)
3545 3547
3546 3548 def __repr__(self):
3547 3549 d = {False: '-', True: '+'}[self._ascending]
3548 3550 return '<%s%s>' % (type(self).__name__, d)
3549 3551
3550 3552 class spanset(abstractsmartset):
3551 3553 """Duck type for baseset class which represents a range of revisions and
3552 3554 can work lazily and without having all the range in memory
3553 3555
3554 3556 Note that spanset(x, y) behave almost like xrange(x, y) except for two
3555 3557 notable points:
3556 3558 - when x < y it will be automatically descending,
3557 3559 - revision filtered with this repoview will be skipped.
3558 3560
3559 3561 """
3560 3562 def __init__(self, repo, start=0, end=None):
3561 3563 """
3562 3564 start: first revision included the set
3563 3565 (default to 0)
3564 3566 end: first revision excluded (last+1)
3565 3567 (default to len(repo)
3566 3568
3567 3569 Spanset will be descending if `end` < `start`.
3568 3570 """
3569 3571 if end is None:
3570 3572 end = len(repo)
3571 3573 self._ascending = start <= end
3572 3574 if not self._ascending:
3573 3575 start, end = end + 1, start +1
3574 3576 self._start = start
3575 3577 self._end = end
3576 3578 self._hiddenrevs = repo.changelog.filteredrevs
3577 3579
3578 3580 def sort(self, reverse=False):
3579 3581 self._ascending = not reverse
3580 3582
3581 3583 def reverse(self):
3582 3584 self._ascending = not self._ascending
3583 3585
3584 3586 def istopo(self):
3585 3587 # not worth the trouble asserting if the two sets combined are still
3586 3588 # in topographical order. Use the sort() predicate to explicitly sort
3587 3589 # again instead.
3588 3590 return False
3589 3591
3590 3592 def _iterfilter(self, iterrange):
3591 3593 s = self._hiddenrevs
3592 3594 for r in iterrange:
3593 3595 if r not in s:
3594 3596 yield r
3595 3597
3596 3598 def __iter__(self):
3597 3599 if self._ascending:
3598 3600 return self.fastasc()
3599 3601 else:
3600 3602 return self.fastdesc()
3601 3603
3602 3604 def fastasc(self):
3603 3605 iterrange = xrange(self._start, self._end)
3604 3606 if self._hiddenrevs:
3605 3607 return self._iterfilter(iterrange)
3606 3608 return iter(iterrange)
3607 3609
3608 3610 def fastdesc(self):
3609 3611 iterrange = xrange(self._end - 1, self._start - 1, -1)
3610 3612 if self._hiddenrevs:
3611 3613 return self._iterfilter(iterrange)
3612 3614 return iter(iterrange)
3613 3615
3614 3616 def __contains__(self, rev):
3615 3617 hidden = self._hiddenrevs
3616 3618 return ((self._start <= rev < self._end)
3617 3619 and not (hidden and rev in hidden))
3618 3620
3619 3621 def __nonzero__(self):
3620 3622 for r in self:
3621 3623 return True
3622 3624 return False
3623 3625
3624 3626 def __len__(self):
3625 3627 if not self._hiddenrevs:
3626 3628 return abs(self._end - self._start)
3627 3629 else:
3628 3630 count = 0
3629 3631 start = self._start
3630 3632 end = self._end
3631 3633 for rev in self._hiddenrevs:
3632 3634 if (end < rev <= start) or (start <= rev < end):
3633 3635 count += 1
3634 3636 return abs(self._end - self._start) - count
3635 3637
3636 3638 def isascending(self):
3637 3639 return self._ascending
3638 3640
3639 3641 def isdescending(self):
3640 3642 return not self._ascending
3641 3643
3642 3644 def first(self):
3643 3645 if self._ascending:
3644 3646 it = self.fastasc
3645 3647 else:
3646 3648 it = self.fastdesc
3647 3649 for x in it():
3648 3650 return x
3649 3651 return None
3650 3652
3651 3653 def last(self):
3652 3654 if self._ascending:
3653 3655 it = self.fastdesc
3654 3656 else:
3655 3657 it = self.fastasc
3656 3658 for x in it():
3657 3659 return x
3658 3660 return None
3659 3661
3660 3662 def __repr__(self):
3661 3663 d = {False: '-', True: '+'}[self._ascending]
3662 3664 return '<%s%s %d:%d>' % (type(self).__name__, d,
3663 3665 self._start, self._end - 1)
3664 3666
3665 3667 class fullreposet(spanset):
3666 3668 """a set containing all revisions in the repo
3667 3669
3668 3670 This class exists to host special optimization and magic to handle virtual
3669 3671 revisions such as "null".
3670 3672 """
3671 3673
3672 3674 def __init__(self, repo):
3673 3675 super(fullreposet, self).__init__(repo)
3674 3676
3675 3677 def __and__(self, other):
3676 3678 """As self contains the whole repo, all of the other set should also be
3677 3679 in self. Therefore `self & other = other`.
3678 3680
3679 3681 This boldly assumes the other contains valid revs only.
3680 3682 """
3681 3683 # other not a smartset, make is so
3682 3684 if not util.safehasattr(other, 'isascending'):
3683 3685 # filter out hidden revision
3684 3686 # (this boldly assumes all smartset are pure)
3685 3687 #
3686 3688 # `other` was used with "&", let's assume this is a set like
3687 3689 # object.
3688 3690 other = baseset(other - self._hiddenrevs)
3689 3691
3690 3692 # XXX As fullreposet is also used as bootstrap, this is wrong.
3691 3693 #
3692 3694 # With a giveme312() revset returning [3,1,2], this makes
3693 3695 # 'hg log -r "giveme312()"' -> 1, 2, 3 (wrong)
3694 3696 # We cannot just drop it because other usage still need to sort it:
3695 3697 # 'hg log -r "all() and giveme312()"' -> 1, 2, 3 (right)
3696 3698 #
3697 3699 # There is also some faulty revset implementations that rely on it
3698 3700 # (eg: children as of its state in e8075329c5fb)
3699 3701 #
3700 3702 # When we fix the two points above we can move this into the if clause
3701 3703 other.sort(reverse=self.isdescending())
3702 3704 return other
3703 3705
3704 3706 def prettyformatset(revs):
3705 3707 lines = []
3706 3708 rs = repr(revs)
3707 3709 p = 0
3708 3710 while p < len(rs):
3709 3711 q = rs.find('<', p + 1)
3710 3712 if q < 0:
3711 3713 q = len(rs)
3712 3714 l = rs.count('<', 0, p) - rs.count('>', 0, p)
3713 3715 assert l >= 0
3714 3716 lines.append((l, rs[p:q].rstrip()))
3715 3717 p = q
3716 3718 return '\n'.join(' ' * l + s for l, s in lines)
3717 3719
3718 3720 def loadpredicate(ui, extname, registrarobj):
3719 3721 """Load revset predicates from specified registrarobj
3720 3722 """
3721 3723 for name, func in registrarobj._table.iteritems():
3722 3724 symbols[name] = func
3723 3725 if func._safe:
3724 3726 safesymbols.add(name)
3725 3727
3726 3728 # load built-in predicates explicitly to setup safesymbols
3727 3729 loadpredicate(None, None, predicate)
3728 3730
3729 3731 # tell hggettext to extract docstrings from these functions:
3730 3732 i18nfunctions = symbols.values()
General Comments 0
You need to be logged in to leave comments. Login now