##// END OF EJS Templates
revsets: passing a set to baseset() is not wrong...
Martin von Zweigbergk -
r29406:c2193e59 default
parent child Browse files
Show More
@@ -1,3672 +1,3668
1 1 # revset.py - revision set queries for mercurial
2 2 #
3 3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import heapq
11 11 import re
12 12
13 13 from .i18n import _
14 14 from . import (
15 15 destutil,
16 16 encoding,
17 17 error,
18 18 hbisect,
19 19 match as matchmod,
20 20 node,
21 21 obsolete as obsmod,
22 22 parser,
23 23 pathutil,
24 24 phases,
25 25 registrar,
26 26 repoview,
27 27 util,
28 28 )
29 29
30 30 def _revancestors(repo, revs, followfirst):
31 31 """Like revlog.ancestors(), but supports followfirst."""
32 32 if followfirst:
33 33 cut = 1
34 34 else:
35 35 cut = None
36 36 cl = repo.changelog
37 37
38 38 def iterate():
39 39 revs.sort(reverse=True)
40 40 irevs = iter(revs)
41 41 h = []
42 42
43 43 inputrev = next(irevs, None)
44 44 if inputrev is not None:
45 45 heapq.heappush(h, -inputrev)
46 46
47 47 seen = set()
48 48 while h:
49 49 current = -heapq.heappop(h)
50 50 if current == inputrev:
51 51 inputrev = next(irevs, None)
52 52 if inputrev is not None:
53 53 heapq.heappush(h, -inputrev)
54 54 if current not in seen:
55 55 seen.add(current)
56 56 yield current
57 57 for parent in cl.parentrevs(current)[:cut]:
58 58 if parent != node.nullrev:
59 59 heapq.heappush(h, -parent)
60 60
61 61 return generatorset(iterate(), iterasc=False)
62 62
63 63 def _revdescendants(repo, revs, followfirst):
64 64 """Like revlog.descendants() but supports followfirst."""
65 65 if followfirst:
66 66 cut = 1
67 67 else:
68 68 cut = None
69 69
70 70 def iterate():
71 71 cl = repo.changelog
72 72 # XXX this should be 'parentset.min()' assuming 'parentset' is a
73 73 # smartset (and if it is not, it should.)
74 74 first = min(revs)
75 75 nullrev = node.nullrev
76 76 if first == nullrev:
77 77 # Are there nodes with a null first parent and a non-null
78 78 # second one? Maybe. Do we care? Probably not.
79 79 for i in cl:
80 80 yield i
81 81 else:
82 82 seen = set(revs)
83 83 for i in cl.revs(first + 1):
84 84 for x in cl.parentrevs(i)[:cut]:
85 85 if x != nullrev and x in seen:
86 86 seen.add(i)
87 87 yield i
88 88 break
89 89
90 90 return generatorset(iterate(), iterasc=True)
91 91
92 92 def _reachablerootspure(repo, minroot, roots, heads, includepath):
93 93 """return (heads(::<roots> and ::<heads>))
94 94
95 95 If includepath is True, return (<roots>::<heads>)."""
96 96 if not roots:
97 97 return []
98 98 parentrevs = repo.changelog.parentrevs
99 99 roots = set(roots)
100 100 visit = list(heads)
101 101 reachable = set()
102 102 seen = {}
103 103 # prefetch all the things! (because python is slow)
104 104 reached = reachable.add
105 105 dovisit = visit.append
106 106 nextvisit = visit.pop
107 107 # open-code the post-order traversal due to the tiny size of
108 108 # sys.getrecursionlimit()
109 109 while visit:
110 110 rev = nextvisit()
111 111 if rev in roots:
112 112 reached(rev)
113 113 if not includepath:
114 114 continue
115 115 parents = parentrevs(rev)
116 116 seen[rev] = parents
117 117 for parent in parents:
118 118 if parent >= minroot and parent not in seen:
119 119 dovisit(parent)
120 120 if not reachable:
121 121 return baseset()
122 122 if not includepath:
123 123 return reachable
124 124 for rev in sorted(seen):
125 125 for parent in seen[rev]:
126 126 if parent in reachable:
127 127 reached(rev)
128 128 return reachable
129 129
130 130 def reachableroots(repo, roots, heads, includepath=False):
131 131 """return (heads(::<roots> and ::<heads>))
132 132
133 133 If includepath is True, return (<roots>::<heads>)."""
134 134 if not roots:
135 135 return baseset()
136 136 minroot = roots.min()
137 137 roots = list(roots)
138 138 heads = list(heads)
139 139 try:
140 140 revs = repo.changelog.reachableroots(minroot, heads, roots, includepath)
141 141 except AttributeError:
142 142 revs = _reachablerootspure(repo, minroot, roots, heads, includepath)
143 143 revs = baseset(revs)
144 144 revs.sort()
145 145 return revs
146 146
147 147 elements = {
148 148 # token-type: binding-strength, primary, prefix, infix, suffix
149 149 "(": (21, None, ("group", 1, ")"), ("func", 1, ")"), None),
150 150 "##": (20, None, None, ("_concat", 20), None),
151 151 "~": (18, None, None, ("ancestor", 18), None),
152 152 "^": (18, None, None, ("parent", 18), ("parentpost", 18)),
153 153 "-": (5, None, ("negate", 19), ("minus", 5), None),
154 154 "::": (17, None, ("dagrangepre", 17), ("dagrange", 17),
155 155 ("dagrangepost", 17)),
156 156 "..": (17, None, ("dagrangepre", 17), ("dagrange", 17),
157 157 ("dagrangepost", 17)),
158 158 ":": (15, "rangeall", ("rangepre", 15), ("range", 15), ("rangepost", 15)),
159 159 "not": (10, None, ("not", 10), None, None),
160 160 "!": (10, None, ("not", 10), None, None),
161 161 "and": (5, None, None, ("and", 5), None),
162 162 "&": (5, None, None, ("and", 5), None),
163 163 "%": (5, None, None, ("only", 5), ("onlypost", 5)),
164 164 "or": (4, None, None, ("or", 4), None),
165 165 "|": (4, None, None, ("or", 4), None),
166 166 "+": (4, None, None, ("or", 4), None),
167 167 "=": (3, None, None, ("keyvalue", 3), None),
168 168 ",": (2, None, None, ("list", 2), None),
169 169 ")": (0, None, None, None, None),
170 170 "symbol": (0, "symbol", None, None, None),
171 171 "string": (0, "string", None, None, None),
172 172 "end": (0, None, None, None, None),
173 173 }
174 174
175 175 keywords = set(['and', 'or', 'not'])
176 176
177 177 # default set of valid characters for the initial letter of symbols
178 178 _syminitletters = set(c for c in [chr(i) for i in xrange(256)]
179 179 if c.isalnum() or c in '._@' or ord(c) > 127)
180 180
181 181 # default set of valid characters for non-initial letters of symbols
182 182 _symletters = set(c for c in [chr(i) for i in xrange(256)]
183 183 if c.isalnum() or c in '-._/@' or ord(c) > 127)
184 184
185 185 def tokenize(program, lookup=None, syminitletters=None, symletters=None):
186 186 '''
187 187 Parse a revset statement into a stream of tokens
188 188
189 189 ``syminitletters`` is the set of valid characters for the initial
190 190 letter of symbols.
191 191
192 192 By default, character ``c`` is recognized as valid for initial
193 193 letter of symbols, if ``c.isalnum() or c in '._@' or ord(c) > 127``.
194 194
195 195 ``symletters`` is the set of valid characters for non-initial
196 196 letters of symbols.
197 197
198 198 By default, character ``c`` is recognized as valid for non-initial
199 199 letters of symbols, if ``c.isalnum() or c in '-._/@' or ord(c) > 127``.
200 200
201 201 Check that @ is a valid unquoted token character (issue3686):
202 202 >>> list(tokenize("@::"))
203 203 [('symbol', '@', 0), ('::', None, 1), ('end', None, 3)]
204 204
205 205 '''
206 206 if syminitletters is None:
207 207 syminitletters = _syminitletters
208 208 if symletters is None:
209 209 symletters = _symletters
210 210
211 211 if program and lookup:
212 212 # attempt to parse old-style ranges first to deal with
213 213 # things like old-tag which contain query metacharacters
214 214 parts = program.split(':', 1)
215 215 if all(lookup(sym) for sym in parts if sym):
216 216 if parts[0]:
217 217 yield ('symbol', parts[0], 0)
218 218 if len(parts) > 1:
219 219 s = len(parts[0])
220 220 yield (':', None, s)
221 221 if parts[1]:
222 222 yield ('symbol', parts[1], s + 1)
223 223 yield ('end', None, len(program))
224 224 return
225 225
226 226 pos, l = 0, len(program)
227 227 while pos < l:
228 228 c = program[pos]
229 229 if c.isspace(): # skip inter-token whitespace
230 230 pass
231 231 elif c == ':' and program[pos:pos + 2] == '::': # look ahead carefully
232 232 yield ('::', None, pos)
233 233 pos += 1 # skip ahead
234 234 elif c == '.' and program[pos:pos + 2] == '..': # look ahead carefully
235 235 yield ('..', None, pos)
236 236 pos += 1 # skip ahead
237 237 elif c == '#' and program[pos:pos + 2] == '##': # look ahead carefully
238 238 yield ('##', None, pos)
239 239 pos += 1 # skip ahead
240 240 elif c in "():=,-|&+!~^%": # handle simple operators
241 241 yield (c, None, pos)
242 242 elif (c in '"\'' or c == 'r' and
243 243 program[pos:pos + 2] in ("r'", 'r"')): # handle quoted strings
244 244 if c == 'r':
245 245 pos += 1
246 246 c = program[pos]
247 247 decode = lambda x: x
248 248 else:
249 249 decode = parser.unescapestr
250 250 pos += 1
251 251 s = pos
252 252 while pos < l: # find closing quote
253 253 d = program[pos]
254 254 if d == '\\': # skip over escaped characters
255 255 pos += 2
256 256 continue
257 257 if d == c:
258 258 yield ('string', decode(program[s:pos]), s)
259 259 break
260 260 pos += 1
261 261 else:
262 262 raise error.ParseError(_("unterminated string"), s)
263 263 # gather up a symbol/keyword
264 264 elif c in syminitletters:
265 265 s = pos
266 266 pos += 1
267 267 while pos < l: # find end of symbol
268 268 d = program[pos]
269 269 if d not in symletters:
270 270 break
271 271 if d == '.' and program[pos - 1] == '.': # special case for ..
272 272 pos -= 1
273 273 break
274 274 pos += 1
275 275 sym = program[s:pos]
276 276 if sym in keywords: # operator keywords
277 277 yield (sym, None, s)
278 278 elif '-' in sym:
279 279 # some jerk gave us foo-bar-baz, try to check if it's a symbol
280 280 if lookup and lookup(sym):
281 281 # looks like a real symbol
282 282 yield ('symbol', sym, s)
283 283 else:
284 284 # looks like an expression
285 285 parts = sym.split('-')
286 286 for p in parts[:-1]:
287 287 if p: # possible consecutive -
288 288 yield ('symbol', p, s)
289 289 s += len(p)
290 290 yield ('-', None, pos)
291 291 s += 1
292 292 if parts[-1]: # possible trailing -
293 293 yield ('symbol', parts[-1], s)
294 294 else:
295 295 yield ('symbol', sym, s)
296 296 pos -= 1
297 297 else:
298 298 raise error.ParseError(_("syntax error in revset '%s'") %
299 299 program, pos)
300 300 pos += 1
301 301 yield ('end', None, pos)
302 302
303 303 # helpers
304 304
305 305 def getstring(x, err):
306 306 if x and (x[0] == 'string' or x[0] == 'symbol'):
307 307 return x[1]
308 308 raise error.ParseError(err)
309 309
310 310 def getlist(x):
311 311 if not x:
312 312 return []
313 313 if x[0] == 'list':
314 314 return list(x[1:])
315 315 return [x]
316 316
317 317 def getargs(x, min, max, err):
318 318 l = getlist(x)
319 319 if len(l) < min or (max >= 0 and len(l) > max):
320 320 raise error.ParseError(err)
321 321 return l
322 322
323 323 def getargsdict(x, funcname, keys):
324 324 return parser.buildargsdict(getlist(x), funcname, keys.split(),
325 325 keyvaluenode='keyvalue', keynode='symbol')
326 326
327 327 def getset(repo, subset, x):
328 328 if not x:
329 329 raise error.ParseError(_("missing argument"))
330 330 s = methods[x[0]](repo, subset, *x[1:])
331 331 if util.safehasattr(s, 'isascending'):
332 332 return s
333 333 # else case should not happen, because all non-func are internal,
334 334 # ignoring for now.
335 335 if x[0] == 'func' and x[1][0] == 'symbol' and x[1][1] in symbols:
336 336 repo.ui.deprecwarn('revset "%s" uses list instead of smartset'
337 337 % x[1][1],
338 338 '3.9')
339 339 return baseset(s)
340 340
341 341 def _getrevsource(repo, r):
342 342 extra = repo[r].extra()
343 343 for label in ('source', 'transplant_source', 'rebase_source'):
344 344 if label in extra:
345 345 try:
346 346 return repo[extra[label]].rev()
347 347 except error.RepoLookupError:
348 348 pass
349 349 return None
350 350
351 351 # operator methods
352 352
353 353 def stringset(repo, subset, x):
354 354 x = repo[x].rev()
355 355 if (x in subset
356 356 or x == node.nullrev and isinstance(subset, fullreposet)):
357 357 return baseset([x])
358 358 return baseset()
359 359
360 360 def rangeset(repo, subset, x, y):
361 361 m = getset(repo, fullreposet(repo), x)
362 362 n = getset(repo, fullreposet(repo), y)
363 363
364 364 if not m or not n:
365 365 return baseset()
366 366 m, n = m.first(), n.last()
367 367
368 368 if m == n:
369 369 r = baseset([m])
370 370 elif n == node.wdirrev:
371 371 r = spanset(repo, m, len(repo)) + baseset([n])
372 372 elif m == node.wdirrev:
373 373 r = baseset([m]) + spanset(repo, len(repo) - 1, n - 1)
374 374 elif m < n:
375 375 r = spanset(repo, m, n + 1)
376 376 else:
377 377 r = spanset(repo, m, n - 1)
378 378 # XXX We should combine with subset first: 'subset & baseset(...)'. This is
379 379 # necessary to ensure we preserve the order in subset.
380 380 #
381 381 # This has performance implication, carrying the sorting over when possible
382 382 # would be more efficient.
383 383 return r & subset
384 384
385 385 def dagrange(repo, subset, x, y):
386 386 r = fullreposet(repo)
387 387 xs = reachableroots(repo, getset(repo, r, x), getset(repo, r, y),
388 388 includepath=True)
389 389 return subset & xs
390 390
391 391 def andset(repo, subset, x, y):
392 392 return getset(repo, getset(repo, subset, x), y)
393 393
394 394 def differenceset(repo, subset, x, y):
395 395 return getset(repo, subset, x) - getset(repo, subset, y)
396 396
397 397 def orset(repo, subset, *xs):
398 398 assert xs
399 399 if len(xs) == 1:
400 400 return getset(repo, subset, xs[0])
401 401 p = len(xs) // 2
402 402 a = orset(repo, subset, *xs[:p])
403 403 b = orset(repo, subset, *xs[p:])
404 404 return a + b
405 405
406 406 def notset(repo, subset, x):
407 407 return subset - getset(repo, subset, x)
408 408
409 409 def listset(repo, subset, *xs):
410 410 raise error.ParseError(_("can't use a list in this context"),
411 411 hint=_('see hg help "revsets.x or y"'))
412 412
413 413 def keyvaluepair(repo, subset, k, v):
414 414 raise error.ParseError(_("can't use a key-value pair in this context"))
415 415
416 416 def func(repo, subset, a, b):
417 417 if a[0] == 'symbol' and a[1] in symbols:
418 418 return symbols[a[1]](repo, subset, b)
419 419
420 420 keep = lambda fn: getattr(fn, '__doc__', None) is not None
421 421
422 422 syms = [s for (s, fn) in symbols.items() if keep(fn)]
423 423 raise error.UnknownIdentifier(a[1], syms)
424 424
425 425 # functions
426 426
427 427 # symbols are callables like:
428 428 # fn(repo, subset, x)
429 429 # with:
430 430 # repo - current repository instance
431 431 # subset - of revisions to be examined
432 432 # x - argument in tree form
433 433 symbols = {}
434 434
435 435 # symbols which can't be used for a DoS attack for any given input
436 436 # (e.g. those which accept regexes as plain strings shouldn't be included)
437 437 # functions that just return a lot of changesets (like all) don't count here
438 438 safesymbols = set()
439 439
440 440 predicate = registrar.revsetpredicate()
441 441
442 442 @predicate('_destupdate')
443 443 def _destupdate(repo, subset, x):
444 444 # experimental revset for update destination
445 445 args = getargsdict(x, 'limit', 'clean check')
446 446 return subset & baseset([destutil.destupdate(repo, **args)[0]])
447 447
448 448 @predicate('_destmerge')
449 449 def _destmerge(repo, subset, x):
450 450 # experimental revset for merge destination
451 451 sourceset = None
452 452 if x is not None:
453 453 sourceset = getset(repo, fullreposet(repo), x)
454 454 return subset & baseset([destutil.destmerge(repo, sourceset=sourceset)])
455 455
456 456 @predicate('adds(pattern)', safe=True)
457 457 def adds(repo, subset, x):
458 458 """Changesets that add a file matching pattern.
459 459
460 460 The pattern without explicit kind like ``glob:`` is expected to be
461 461 relative to the current directory and match against a file or a
462 462 directory.
463 463 """
464 464 # i18n: "adds" is a keyword
465 465 pat = getstring(x, _("adds requires a pattern"))
466 466 return checkstatus(repo, subset, pat, 1)
467 467
468 468 @predicate('ancestor(*changeset)', safe=True)
469 469 def ancestor(repo, subset, x):
470 470 """A greatest common ancestor of the changesets.
471 471
472 472 Accepts 0 or more changesets.
473 473 Will return empty list when passed no args.
474 474 Greatest common ancestor of a single changeset is that changeset.
475 475 """
476 476 # i18n: "ancestor" is a keyword
477 477 l = getlist(x)
478 478 rl = fullreposet(repo)
479 479 anc = None
480 480
481 481 # (getset(repo, rl, i) for i in l) generates a list of lists
482 482 for revs in (getset(repo, rl, i) for i in l):
483 483 for r in revs:
484 484 if anc is None:
485 485 anc = repo[r]
486 486 else:
487 487 anc = anc.ancestor(repo[r])
488 488
489 489 if anc is not None and anc.rev() in subset:
490 490 return baseset([anc.rev()])
491 491 return baseset()
492 492
493 493 def _ancestors(repo, subset, x, followfirst=False):
494 494 heads = getset(repo, fullreposet(repo), x)
495 495 if not heads:
496 496 return baseset()
497 497 s = _revancestors(repo, heads, followfirst)
498 498 return subset & s
499 499
500 500 @predicate('ancestors(set)', safe=True)
501 501 def ancestors(repo, subset, x):
502 502 """Changesets that are ancestors of a changeset in set.
503 503 """
504 504 return _ancestors(repo, subset, x)
505 505
506 506 @predicate('_firstancestors', safe=True)
507 507 def _firstancestors(repo, subset, x):
508 508 # ``_firstancestors(set)``
509 509 # Like ``ancestors(set)`` but follows only the first parents.
510 510 return _ancestors(repo, subset, x, followfirst=True)
511 511
512 512 def ancestorspec(repo, subset, x, n):
513 513 """``set~n``
514 514 Changesets that are the Nth ancestor (first parents only) of a changeset
515 515 in set.
516 516 """
517 517 try:
518 518 n = int(n[1])
519 519 except (TypeError, ValueError):
520 520 raise error.ParseError(_("~ expects a number"))
521 521 ps = set()
522 522 cl = repo.changelog
523 523 for r in getset(repo, fullreposet(repo), x):
524 524 for i in range(n):
525 525 r = cl.parentrevs(r)[0]
526 526 ps.add(r)
527 527 return subset & ps
528 528
529 529 @predicate('author(string)', safe=True)
530 530 def author(repo, subset, x):
531 531 """Alias for ``user(string)``.
532 532 """
533 533 # i18n: "author" is a keyword
534 534 n = encoding.lower(getstring(x, _("author requires a string")))
535 535 kind, pattern, matcher = _substringmatcher(n)
536 536 return subset.filter(lambda x: matcher(encoding.lower(repo[x].user())),
537 537 condrepr=('<user %r>', n))
538 538
539 539 @predicate('bisect(string)', safe=True)
540 540 def bisect(repo, subset, x):
541 541 """Changesets marked in the specified bisect status:
542 542
543 543 - ``good``, ``bad``, ``skip``: csets explicitly marked as good/bad/skip
544 544 - ``goods``, ``bads`` : csets topologically good/bad
545 545 - ``range`` : csets taking part in the bisection
546 546 - ``pruned`` : csets that are goods, bads or skipped
547 547 - ``untested`` : csets whose fate is yet unknown
548 548 - ``ignored`` : csets ignored due to DAG topology
549 549 - ``current`` : the cset currently being bisected
550 550 """
551 551 # i18n: "bisect" is a keyword
552 552 status = getstring(x, _("bisect requires a string")).lower()
553 553 state = set(hbisect.get(repo, status))
554 554 return subset & state
555 555
556 556 # Backward-compatibility
557 557 # - no help entry so that we do not advertise it any more
558 558 @predicate('bisected', safe=True)
559 559 def bisected(repo, subset, x):
560 560 return bisect(repo, subset, x)
561 561
562 562 @predicate('bookmark([name])', safe=True)
563 563 def bookmark(repo, subset, x):
564 564 """The named bookmark or all bookmarks.
565 565
566 566 If `name` starts with `re:`, the remainder of the name is treated as
567 567 a regular expression. To match a bookmark that actually starts with `re:`,
568 568 use the prefix `literal:`.
569 569 """
570 570 # i18n: "bookmark" is a keyword
571 571 args = getargs(x, 0, 1, _('bookmark takes one or no arguments'))
572 572 if args:
573 573 bm = getstring(args[0],
574 574 # i18n: "bookmark" is a keyword
575 575 _('the argument to bookmark must be a string'))
576 576 kind, pattern, matcher = util.stringmatcher(bm)
577 577 bms = set()
578 578 if kind == 'literal':
579 579 bmrev = repo._bookmarks.get(pattern, None)
580 580 if not bmrev:
581 581 raise error.RepoLookupError(_("bookmark '%s' does not exist")
582 582 % pattern)
583 583 bms.add(repo[bmrev].rev())
584 584 else:
585 585 matchrevs = set()
586 586 for name, bmrev in repo._bookmarks.iteritems():
587 587 if matcher(name):
588 588 matchrevs.add(bmrev)
589 589 if not matchrevs:
590 590 raise error.RepoLookupError(_("no bookmarks exist"
591 591 " that match '%s'") % pattern)
592 592 for bmrev in matchrevs:
593 593 bms.add(repo[bmrev].rev())
594 594 else:
595 595 bms = set([repo[r].rev()
596 596 for r in repo._bookmarks.values()])
597 597 bms -= set([node.nullrev])
598 598 return subset & bms
599 599
600 600 @predicate('branch(string or set)', safe=True)
601 601 def branch(repo, subset, x):
602 602 """
603 603 All changesets belonging to the given branch or the branches of the given
604 604 changesets.
605 605
606 606 If `string` starts with `re:`, the remainder of the name is treated as
607 607 a regular expression. To match a branch that actually starts with `re:`,
608 608 use the prefix `literal:`.
609 609 """
610 610 getbi = repo.revbranchcache().branchinfo
611 611
612 612 try:
613 613 b = getstring(x, '')
614 614 except error.ParseError:
615 615 # not a string, but another revspec, e.g. tip()
616 616 pass
617 617 else:
618 618 kind, pattern, matcher = util.stringmatcher(b)
619 619 if kind == 'literal':
620 620 # note: falls through to the revspec case if no branch with
621 621 # this name exists and pattern kind is not specified explicitly
622 622 if pattern in repo.branchmap():
623 623 return subset.filter(lambda r: matcher(getbi(r)[0]),
624 624 condrepr=('<branch %r>', b))
625 625 if b.startswith('literal:'):
626 626 raise error.RepoLookupError(_("branch '%s' does not exist")
627 627 % pattern)
628 628 else:
629 629 return subset.filter(lambda r: matcher(getbi(r)[0]),
630 630 condrepr=('<branch %r>', b))
631 631
632 632 s = getset(repo, fullreposet(repo), x)
633 633 b = set()
634 634 for r in s:
635 635 b.add(getbi(r)[0])
636 636 c = s.__contains__
637 637 return subset.filter(lambda r: c(r) or getbi(r)[0] in b,
638 638 condrepr=lambda: '<branch %r>' % sorted(b))
639 639
640 640 @predicate('bumped()', safe=True)
641 641 def bumped(repo, subset, x):
642 642 """Mutable changesets marked as successors of public changesets.
643 643
644 644 Only non-public and non-obsolete changesets can be `bumped`.
645 645 """
646 646 # i18n: "bumped" is a keyword
647 647 getargs(x, 0, 0, _("bumped takes no arguments"))
648 648 bumped = obsmod.getrevs(repo, 'bumped')
649 649 return subset & bumped
650 650
651 651 @predicate('bundle()', safe=True)
652 652 def bundle(repo, subset, x):
653 653 """Changesets in the bundle.
654 654
655 655 Bundle must be specified by the -R option."""
656 656
657 657 try:
658 658 bundlerevs = repo.changelog.bundlerevs
659 659 except AttributeError:
660 660 raise error.Abort(_("no bundle provided - specify with -R"))
661 661 return subset & bundlerevs
662 662
663 663 def checkstatus(repo, subset, pat, field):
664 664 hasset = matchmod.patkind(pat) == 'set'
665 665
666 666 mcache = [None]
667 667 def matches(x):
668 668 c = repo[x]
669 669 if not mcache[0] or hasset:
670 670 mcache[0] = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
671 671 m = mcache[0]
672 672 fname = None
673 673 if not m.anypats() and len(m.files()) == 1:
674 674 fname = m.files()[0]
675 675 if fname is not None:
676 676 if fname not in c.files():
677 677 return False
678 678 else:
679 679 for f in c.files():
680 680 if m(f):
681 681 break
682 682 else:
683 683 return False
684 684 files = repo.status(c.p1().node(), c.node())[field]
685 685 if fname is not None:
686 686 if fname in files:
687 687 return True
688 688 else:
689 689 for f in files:
690 690 if m(f):
691 691 return True
692 692
693 693 return subset.filter(matches, condrepr=('<status[%r] %r>', field, pat))
694 694
695 def _children(repo, narrow, parentset):
695 def _children(repo, subset, parentset):
696 696 if not parentset:
697 697 return baseset()
698 698 cs = set()
699 699 pr = repo.changelog.parentrevs
700 700 minrev = parentset.min()
701 for r in narrow:
701 for r in subset:
702 702 if r <= minrev:
703 703 continue
704 704 for p in pr(r):
705 705 if p in parentset:
706 706 cs.add(r)
707 # XXX using a set to feed the baseset is wrong. Sets are not ordered.
708 # This does not break because of other fullreposet misbehavior.
709 707 return baseset(cs)
710 708
711 709 @predicate('children(set)', safe=True)
712 710 def children(repo, subset, x):
713 711 """Child changesets of changesets in set.
714 712 """
715 713 s = getset(repo, fullreposet(repo), x)
716 714 cs = _children(repo, subset, s)
717 715 return subset & cs
718 716
719 717 @predicate('closed()', safe=True)
720 718 def closed(repo, subset, x):
721 719 """Changeset is closed.
722 720 """
723 721 # i18n: "closed" is a keyword
724 722 getargs(x, 0, 0, _("closed takes no arguments"))
725 723 return subset.filter(lambda r: repo[r].closesbranch(),
726 724 condrepr='<branch closed>')
727 725
728 726 @predicate('contains(pattern)')
729 727 def contains(repo, subset, x):
730 728 """The revision's manifest contains a file matching pattern (but might not
731 729 modify it). See :hg:`help patterns` for information about file patterns.
732 730
733 731 The pattern without explicit kind like ``glob:`` is expected to be
734 732 relative to the current directory and match against a file exactly
735 733 for efficiency.
736 734 """
737 735 # i18n: "contains" is a keyword
738 736 pat = getstring(x, _("contains requires a pattern"))
739 737
740 738 def matches(x):
741 739 if not matchmod.patkind(pat):
742 740 pats = pathutil.canonpath(repo.root, repo.getcwd(), pat)
743 741 if pats in repo[x]:
744 742 return True
745 743 else:
746 744 c = repo[x]
747 745 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
748 746 for f in c.manifest():
749 747 if m(f):
750 748 return True
751 749 return False
752 750
753 751 return subset.filter(matches, condrepr=('<contains %r>', pat))
754 752
755 753 @predicate('converted([id])', safe=True)
756 754 def converted(repo, subset, x):
757 755 """Changesets converted from the given identifier in the old repository if
758 756 present, or all converted changesets if no identifier is specified.
759 757 """
760 758
761 759 # There is exactly no chance of resolving the revision, so do a simple
762 760 # string compare and hope for the best
763 761
764 762 rev = None
765 763 # i18n: "converted" is a keyword
766 764 l = getargs(x, 0, 1, _('converted takes one or no arguments'))
767 765 if l:
768 766 # i18n: "converted" is a keyword
769 767 rev = getstring(l[0], _('converted requires a revision'))
770 768
771 769 def _matchvalue(r):
772 770 source = repo[r].extra().get('convert_revision', None)
773 771 return source is not None and (rev is None or source.startswith(rev))
774 772
775 773 return subset.filter(lambda r: _matchvalue(r),
776 774 condrepr=('<converted %r>', rev))
777 775
778 776 @predicate('date(interval)', safe=True)
779 777 def date(repo, subset, x):
780 778 """Changesets within the interval, see :hg:`help dates`.
781 779 """
782 780 # i18n: "date" is a keyword
783 781 ds = getstring(x, _("date requires a string"))
784 782 dm = util.matchdate(ds)
785 783 return subset.filter(lambda x: dm(repo[x].date()[0]),
786 784 condrepr=('<date %r>', ds))
787 785
788 786 @predicate('desc(string)', safe=True)
789 787 def desc(repo, subset, x):
790 788 """Search commit message for string. The match is case-insensitive.
791 789 """
792 790 # i18n: "desc" is a keyword
793 791 ds = encoding.lower(getstring(x, _("desc requires a string")))
794 792
795 793 def matches(x):
796 794 c = repo[x]
797 795 return ds in encoding.lower(c.description())
798 796
799 797 return subset.filter(matches, condrepr=('<desc %r>', ds))
800 798
801 799 def _descendants(repo, subset, x, followfirst=False):
802 800 roots = getset(repo, fullreposet(repo), x)
803 801 if not roots:
804 802 return baseset()
805 803 s = _revdescendants(repo, roots, followfirst)
806 804
807 805 # Both sets need to be ascending in order to lazily return the union
808 806 # in the correct order.
809 807 base = subset & roots
810 808 desc = subset & s
811 809 result = base + desc
812 810 if subset.isascending():
813 811 result.sort()
814 812 elif subset.isdescending():
815 813 result.sort(reverse=True)
816 814 else:
817 815 result = subset & result
818 816 return result
819 817
820 818 @predicate('descendants(set)', safe=True)
821 819 def descendants(repo, subset, x):
822 820 """Changesets which are descendants of changesets in set.
823 821 """
824 822 return _descendants(repo, subset, x)
825 823
826 824 @predicate('_firstdescendants', safe=True)
827 825 def _firstdescendants(repo, subset, x):
828 826 # ``_firstdescendants(set)``
829 827 # Like ``descendants(set)`` but follows only the first parents.
830 828 return _descendants(repo, subset, x, followfirst=True)
831 829
832 830 @predicate('destination([set])', safe=True)
833 831 def destination(repo, subset, x):
834 832 """Changesets that were created by a graft, transplant or rebase operation,
835 833 with the given revisions specified as the source. Omitting the optional set
836 834 is the same as passing all().
837 835 """
838 836 if x is not None:
839 837 sources = getset(repo, fullreposet(repo), x)
840 838 else:
841 839 sources = fullreposet(repo)
842 840
843 841 dests = set()
844 842
845 843 # subset contains all of the possible destinations that can be returned, so
846 844 # iterate over them and see if their source(s) were provided in the arg set.
847 845 # Even if the immediate src of r is not in the arg set, src's source (or
848 846 # further back) may be. Scanning back further than the immediate src allows
849 847 # transitive transplants and rebases to yield the same results as transitive
850 848 # grafts.
851 849 for r in subset:
852 850 src = _getrevsource(repo, r)
853 851 lineage = None
854 852
855 853 while src is not None:
856 854 if lineage is None:
857 855 lineage = list()
858 856
859 857 lineage.append(r)
860 858
861 859 # The visited lineage is a match if the current source is in the arg
862 860 # set. Since every candidate dest is visited by way of iterating
863 861 # subset, any dests further back in the lineage will be tested by a
864 862 # different iteration over subset. Likewise, if the src was already
865 863 # selected, the current lineage can be selected without going back
866 864 # further.
867 865 if src in sources or src in dests:
868 866 dests.update(lineage)
869 867 break
870 868
871 869 r = src
872 870 src = _getrevsource(repo, r)
873 871
874 872 return subset.filter(dests.__contains__,
875 873 condrepr=lambda: '<destination %r>' % sorted(dests))
876 874
877 875 @predicate('divergent()', safe=True)
878 876 def divergent(repo, subset, x):
879 877 """
880 878 Final successors of changesets with an alternative set of final successors.
881 879 """
882 880 # i18n: "divergent" is a keyword
883 881 getargs(x, 0, 0, _("divergent takes no arguments"))
884 882 divergent = obsmod.getrevs(repo, 'divergent')
885 883 return subset & divergent
886 884
887 885 @predicate('extinct()', safe=True)
888 886 def extinct(repo, subset, x):
889 887 """Obsolete changesets with obsolete descendants only.
890 888 """
891 889 # i18n: "extinct" is a keyword
892 890 getargs(x, 0, 0, _("extinct takes no arguments"))
893 891 extincts = obsmod.getrevs(repo, 'extinct')
894 892 return subset & extincts
895 893
896 894 @predicate('extra(label, [value])', safe=True)
897 895 def extra(repo, subset, x):
898 896 """Changesets with the given label in the extra metadata, with the given
899 897 optional value.
900 898
901 899 If `value` starts with `re:`, the remainder of the value is treated as
902 900 a regular expression. To match a value that actually starts with `re:`,
903 901 use the prefix `literal:`.
904 902 """
905 903 args = getargsdict(x, 'extra', 'label value')
906 904 if 'label' not in args:
907 905 # i18n: "extra" is a keyword
908 906 raise error.ParseError(_('extra takes at least 1 argument'))
909 907 # i18n: "extra" is a keyword
910 908 label = getstring(args['label'], _('first argument to extra must be '
911 909 'a string'))
912 910 value = None
913 911
914 912 if 'value' in args:
915 913 # i18n: "extra" is a keyword
916 914 value = getstring(args['value'], _('second argument to extra must be '
917 915 'a string'))
918 916 kind, value, matcher = util.stringmatcher(value)
919 917
920 918 def _matchvalue(r):
921 919 extra = repo[r].extra()
922 920 return label in extra and (value is None or matcher(extra[label]))
923 921
924 922 return subset.filter(lambda r: _matchvalue(r),
925 923 condrepr=('<extra[%r] %r>', label, value))
926 924
927 925 @predicate('filelog(pattern)', safe=True)
928 926 def filelog(repo, subset, x):
929 927 """Changesets connected to the specified filelog.
930 928
931 929 For performance reasons, visits only revisions mentioned in the file-level
932 930 filelog, rather than filtering through all changesets (much faster, but
933 931 doesn't include deletes or duplicate changes). For a slower, more accurate
934 932 result, use ``file()``.
935 933
936 934 The pattern without explicit kind like ``glob:`` is expected to be
937 935 relative to the current directory and match against a file exactly
938 936 for efficiency.
939 937
940 938 If some linkrev points to revisions filtered by the current repoview, we'll
941 939 work around it to return a non-filtered value.
942 940 """
943 941
944 942 # i18n: "filelog" is a keyword
945 943 pat = getstring(x, _("filelog requires a pattern"))
946 944 s = set()
947 945 cl = repo.changelog
948 946
949 947 if not matchmod.patkind(pat):
950 948 f = pathutil.canonpath(repo.root, repo.getcwd(), pat)
951 949 files = [f]
952 950 else:
953 951 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=repo[None])
954 952 files = (f for f in repo[None] if m(f))
955 953
956 954 for f in files:
957 955 fl = repo.file(f)
958 956 known = {}
959 957 scanpos = 0
960 958 for fr in list(fl):
961 959 fn = fl.node(fr)
962 960 if fn in known:
963 961 s.add(known[fn])
964 962 continue
965 963
966 964 lr = fl.linkrev(fr)
967 965 if lr in cl:
968 966 s.add(lr)
969 967 elif scanpos is not None:
970 968 # lowest matching changeset is filtered, scan further
971 969 # ahead in changelog
972 970 start = max(lr, scanpos) + 1
973 971 scanpos = None
974 972 for r in cl.revs(start):
975 973 # minimize parsing of non-matching entries
976 974 if f in cl.revision(r) and f in cl.readfiles(r):
977 975 try:
978 976 # try to use manifest delta fastpath
979 977 n = repo[r].filenode(f)
980 978 if n not in known:
981 979 if n == fn:
982 980 s.add(r)
983 981 scanpos = r
984 982 break
985 983 else:
986 984 known[n] = r
987 985 except error.ManifestLookupError:
988 986 # deletion in changelog
989 987 continue
990 988
991 989 return subset & s
992 990
993 991 @predicate('first(set, [n])', safe=True)
994 992 def first(repo, subset, x):
995 993 """An alias for limit().
996 994 """
997 995 return limit(repo, subset, x)
998 996
999 997 def _follow(repo, subset, x, name, followfirst=False):
1000 998 l = getargs(x, 0, 1, _("%s takes no arguments or a pattern") % name)
1001 999 c = repo['.']
1002 1000 if l:
1003 1001 x = getstring(l[0], _("%s expected a pattern") % name)
1004 1002 matcher = matchmod.match(repo.root, repo.getcwd(), [x],
1005 1003 ctx=repo[None], default='path')
1006 1004
1007 1005 files = c.manifest().walk(matcher)
1008 1006
1009 1007 s = set()
1010 1008 for fname in files:
1011 1009 fctx = c[fname]
1012 1010 s = s.union(set(c.rev() for c in fctx.ancestors(followfirst)))
1013 1011 # include the revision responsible for the most recent version
1014 1012 s.add(fctx.introrev())
1015 1013 else:
1016 1014 s = _revancestors(repo, baseset([c.rev()]), followfirst)
1017 1015
1018 1016 return subset & s
1019 1017
1020 1018 @predicate('follow([pattern])', safe=True)
1021 1019 def follow(repo, subset, x):
1022 1020 """
1023 1021 An alias for ``::.`` (ancestors of the working directory's first parent).
1024 1022 If pattern is specified, the histories of files matching given
1025 1023 pattern is followed, including copies.
1026 1024 """
1027 1025 return _follow(repo, subset, x, 'follow')
1028 1026
1029 1027 @predicate('_followfirst', safe=True)
1030 1028 def _followfirst(repo, subset, x):
1031 1029 # ``followfirst([pattern])``
1032 1030 # Like ``follow([pattern])`` but follows only the first parent of
1033 1031 # every revisions or files revisions.
1034 1032 return _follow(repo, subset, x, '_followfirst', followfirst=True)
1035 1033
1036 1034 @predicate('all()', safe=True)
1037 1035 def getall(repo, subset, x):
1038 1036 """All changesets, the same as ``0:tip``.
1039 1037 """
1040 1038 # i18n: "all" is a keyword
1041 1039 getargs(x, 0, 0, _("all takes no arguments"))
1042 1040 return subset & spanset(repo) # drop "null" if any
1043 1041
1044 1042 @predicate('grep(regex)')
1045 1043 def grep(repo, subset, x):
1046 1044 """Like ``keyword(string)`` but accepts a regex. Use ``grep(r'...')``
1047 1045 to ensure special escape characters are handled correctly. Unlike
1048 1046 ``keyword(string)``, the match is case-sensitive.
1049 1047 """
1050 1048 try:
1051 1049 # i18n: "grep" is a keyword
1052 1050 gr = re.compile(getstring(x, _("grep requires a string")))
1053 1051 except re.error as e:
1054 1052 raise error.ParseError(_('invalid match pattern: %s') % e)
1055 1053
1056 1054 def matches(x):
1057 1055 c = repo[x]
1058 1056 for e in c.files() + [c.user(), c.description()]:
1059 1057 if gr.search(e):
1060 1058 return True
1061 1059 return False
1062 1060
1063 1061 return subset.filter(matches, condrepr=('<grep %r>', gr.pattern))
1064 1062
1065 1063 @predicate('_matchfiles', safe=True)
1066 1064 def _matchfiles(repo, subset, x):
1067 1065 # _matchfiles takes a revset list of prefixed arguments:
1068 1066 #
1069 1067 # [p:foo, i:bar, x:baz]
1070 1068 #
1071 1069 # builds a match object from them and filters subset. Allowed
1072 1070 # prefixes are 'p:' for regular patterns, 'i:' for include
1073 1071 # patterns and 'x:' for exclude patterns. Use 'r:' prefix to pass
1074 1072 # a revision identifier, or the empty string to reference the
1075 1073 # working directory, from which the match object is
1076 1074 # initialized. Use 'd:' to set the default matching mode, default
1077 1075 # to 'glob'. At most one 'r:' and 'd:' argument can be passed.
1078 1076
1079 1077 l = getargs(x, 1, -1, "_matchfiles requires at least one argument")
1080 1078 pats, inc, exc = [], [], []
1081 1079 rev, default = None, None
1082 1080 for arg in l:
1083 1081 s = getstring(arg, "_matchfiles requires string arguments")
1084 1082 prefix, value = s[:2], s[2:]
1085 1083 if prefix == 'p:':
1086 1084 pats.append(value)
1087 1085 elif prefix == 'i:':
1088 1086 inc.append(value)
1089 1087 elif prefix == 'x:':
1090 1088 exc.append(value)
1091 1089 elif prefix == 'r:':
1092 1090 if rev is not None:
1093 1091 raise error.ParseError('_matchfiles expected at most one '
1094 1092 'revision')
1095 1093 if value != '': # empty means working directory; leave rev as None
1096 1094 rev = value
1097 1095 elif prefix == 'd:':
1098 1096 if default is not None:
1099 1097 raise error.ParseError('_matchfiles expected at most one '
1100 1098 'default mode')
1101 1099 default = value
1102 1100 else:
1103 1101 raise error.ParseError('invalid _matchfiles prefix: %s' % prefix)
1104 1102 if not default:
1105 1103 default = 'glob'
1106 1104
1107 1105 m = matchmod.match(repo.root, repo.getcwd(), pats, include=inc,
1108 1106 exclude=exc, ctx=repo[rev], default=default)
1109 1107
1110 1108 # This directly read the changelog data as creating changectx for all
1111 1109 # revisions is quite expensive.
1112 1110 getfiles = repo.changelog.readfiles
1113 1111 wdirrev = node.wdirrev
1114 1112 def matches(x):
1115 1113 if x == wdirrev:
1116 1114 files = repo[x].files()
1117 1115 else:
1118 1116 files = getfiles(x)
1119 1117 for f in files:
1120 1118 if m(f):
1121 1119 return True
1122 1120 return False
1123 1121
1124 1122 return subset.filter(matches,
1125 1123 condrepr=('<matchfiles patterns=%r, include=%r '
1126 1124 'exclude=%r, default=%r, rev=%r>',
1127 1125 pats, inc, exc, default, rev))
1128 1126
1129 1127 @predicate('file(pattern)', safe=True)
1130 1128 def hasfile(repo, subset, x):
1131 1129 """Changesets affecting files matched by pattern.
1132 1130
1133 1131 For a faster but less accurate result, consider using ``filelog()``
1134 1132 instead.
1135 1133
1136 1134 This predicate uses ``glob:`` as the default kind of pattern.
1137 1135 """
1138 1136 # i18n: "file" is a keyword
1139 1137 pat = getstring(x, _("file requires a pattern"))
1140 1138 return _matchfiles(repo, subset, ('string', 'p:' + pat))
1141 1139
1142 1140 @predicate('head()', safe=True)
1143 1141 def head(repo, subset, x):
1144 1142 """Changeset is a named branch head.
1145 1143 """
1146 1144 # i18n: "head" is a keyword
1147 1145 getargs(x, 0, 0, _("head takes no arguments"))
1148 1146 hs = set()
1149 1147 cl = repo.changelog
1150 1148 for b, ls in repo.branchmap().iteritems():
1151 1149 hs.update(cl.rev(h) for h in ls)
1152 # XXX using a set to feed the baseset is wrong. Sets are not ordered.
1153 # This does not break because of other fullreposet misbehavior.
1154 1150 # XXX We should combine with subset first: 'subset & baseset(...)'. This is
1155 1151 # necessary to ensure we preserve the order in subset.
1156 1152 return baseset(hs) & subset
1157 1153
1158 1154 @predicate('heads(set)', safe=True)
1159 1155 def heads(repo, subset, x):
1160 1156 """Members of set with no children in set.
1161 1157 """
1162 1158 s = getset(repo, subset, x)
1163 1159 ps = parents(repo, subset, x)
1164 1160 return s - ps
1165 1161
1166 1162 @predicate('hidden()', safe=True)
1167 1163 def hidden(repo, subset, x):
1168 1164 """Hidden changesets.
1169 1165 """
1170 1166 # i18n: "hidden" is a keyword
1171 1167 getargs(x, 0, 0, _("hidden takes no arguments"))
1172 1168 hiddenrevs = repoview.filterrevs(repo, 'visible')
1173 1169 return subset & hiddenrevs
1174 1170
1175 1171 @predicate('keyword(string)', safe=True)
1176 1172 def keyword(repo, subset, x):
1177 1173 """Search commit message, user name, and names of changed files for
1178 1174 string. The match is case-insensitive.
1179 1175 """
1180 1176 # i18n: "keyword" is a keyword
1181 1177 kw = encoding.lower(getstring(x, _("keyword requires a string")))
1182 1178
1183 1179 def matches(r):
1184 1180 c = repo[r]
1185 1181 return any(kw in encoding.lower(t)
1186 1182 for t in c.files() + [c.user(), c.description()])
1187 1183
1188 1184 return subset.filter(matches, condrepr=('<keyword %r>', kw))
1189 1185
1190 1186 @predicate('limit(set[, n[, offset]])', safe=True)
1191 1187 def limit(repo, subset, x):
1192 1188 """First n members of set, defaulting to 1, starting from offset.
1193 1189 """
1194 1190 args = getargsdict(x, 'limit', 'set n offset')
1195 1191 if 'set' not in args:
1196 1192 # i18n: "limit" is a keyword
1197 1193 raise error.ParseError(_("limit requires one to three arguments"))
1198 1194 try:
1199 1195 lim, ofs = 1, 0
1200 1196 if 'n' in args:
1201 1197 # i18n: "limit" is a keyword
1202 1198 lim = int(getstring(args['n'], _("limit requires a number")))
1203 1199 if 'offset' in args:
1204 1200 # i18n: "limit" is a keyword
1205 1201 ofs = int(getstring(args['offset'], _("limit requires a number")))
1206 1202 if ofs < 0:
1207 1203 raise error.ParseError(_("negative offset"))
1208 1204 except (TypeError, ValueError):
1209 1205 # i18n: "limit" is a keyword
1210 1206 raise error.ParseError(_("limit expects a number"))
1211 1207 os = getset(repo, fullreposet(repo), args['set'])
1212 1208 result = []
1213 1209 it = iter(os)
1214 1210 for x in xrange(ofs):
1215 1211 y = next(it, None)
1216 1212 if y is None:
1217 1213 break
1218 1214 for x in xrange(lim):
1219 1215 y = next(it, None)
1220 1216 if y is None:
1221 1217 break
1222 1218 elif y in subset:
1223 1219 result.append(y)
1224 1220 return baseset(result, datarepr=('<limit n=%d, offset=%d, %r, %r>',
1225 1221 lim, ofs, subset, os))
1226 1222
1227 1223 @predicate('last(set, [n])', safe=True)
1228 1224 def last(repo, subset, x):
1229 1225 """Last n members of set, defaulting to 1.
1230 1226 """
1231 1227 # i18n: "last" is a keyword
1232 1228 l = getargs(x, 1, 2, _("last requires one or two arguments"))
1233 1229 try:
1234 1230 lim = 1
1235 1231 if len(l) == 2:
1236 1232 # i18n: "last" is a keyword
1237 1233 lim = int(getstring(l[1], _("last requires a number")))
1238 1234 except (TypeError, ValueError):
1239 1235 # i18n: "last" is a keyword
1240 1236 raise error.ParseError(_("last expects a number"))
1241 1237 os = getset(repo, fullreposet(repo), l[0])
1242 1238 os.reverse()
1243 1239 result = []
1244 1240 it = iter(os)
1245 1241 for x in xrange(lim):
1246 1242 y = next(it, None)
1247 1243 if y is None:
1248 1244 break
1249 1245 elif y in subset:
1250 1246 result.append(y)
1251 1247 return baseset(result, datarepr=('<last n=%d, %r, %r>', lim, subset, os))
1252 1248
1253 1249 @predicate('max(set)', safe=True)
1254 1250 def maxrev(repo, subset, x):
1255 1251 """Changeset with highest revision number in set.
1256 1252 """
1257 1253 os = getset(repo, fullreposet(repo), x)
1258 1254 try:
1259 1255 m = os.max()
1260 1256 if m in subset:
1261 1257 return baseset([m], datarepr=('<max %r, %r>', subset, os))
1262 1258 except ValueError:
1263 1259 # os.max() throws a ValueError when the collection is empty.
1264 1260 # Same as python's max().
1265 1261 pass
1266 1262 return baseset(datarepr=('<max %r, %r>', subset, os))
1267 1263
1268 1264 @predicate('merge()', safe=True)
1269 1265 def merge(repo, subset, x):
1270 1266 """Changeset is a merge changeset.
1271 1267 """
1272 1268 # i18n: "merge" is a keyword
1273 1269 getargs(x, 0, 0, _("merge takes no arguments"))
1274 1270 cl = repo.changelog
1275 1271 return subset.filter(lambda r: cl.parentrevs(r)[1] != -1,
1276 1272 condrepr='<merge>')
1277 1273
1278 1274 @predicate('branchpoint()', safe=True)
1279 1275 def branchpoint(repo, subset, x):
1280 1276 """Changesets with more than one child.
1281 1277 """
1282 1278 # i18n: "branchpoint" is a keyword
1283 1279 getargs(x, 0, 0, _("branchpoint takes no arguments"))
1284 1280 cl = repo.changelog
1285 1281 if not subset:
1286 1282 return baseset()
1287 1283 # XXX this should be 'parentset.min()' assuming 'parentset' is a smartset
1288 1284 # (and if it is not, it should.)
1289 1285 baserev = min(subset)
1290 1286 parentscount = [0]*(len(repo) - baserev)
1291 1287 for r in cl.revs(start=baserev + 1):
1292 1288 for p in cl.parentrevs(r):
1293 1289 if p >= baserev:
1294 1290 parentscount[p - baserev] += 1
1295 1291 return subset.filter(lambda r: parentscount[r - baserev] > 1,
1296 1292 condrepr='<branchpoint>')
1297 1293
1298 1294 @predicate('min(set)', safe=True)
1299 1295 def minrev(repo, subset, x):
1300 1296 """Changeset with lowest revision number in set.
1301 1297 """
1302 1298 os = getset(repo, fullreposet(repo), x)
1303 1299 try:
1304 1300 m = os.min()
1305 1301 if m in subset:
1306 1302 return baseset([m], datarepr=('<min %r, %r>', subset, os))
1307 1303 except ValueError:
1308 1304 # os.min() throws a ValueError when the collection is empty.
1309 1305 # Same as python's min().
1310 1306 pass
1311 1307 return baseset(datarepr=('<min %r, %r>', subset, os))
1312 1308
1313 1309 @predicate('modifies(pattern)', safe=True)
1314 1310 def modifies(repo, subset, x):
1315 1311 """Changesets modifying files matched by pattern.
1316 1312
1317 1313 The pattern without explicit kind like ``glob:`` is expected to be
1318 1314 relative to the current directory and match against a file or a
1319 1315 directory.
1320 1316 """
1321 1317 # i18n: "modifies" is a keyword
1322 1318 pat = getstring(x, _("modifies requires a pattern"))
1323 1319 return checkstatus(repo, subset, pat, 0)
1324 1320
1325 1321 @predicate('named(namespace)')
1326 1322 def named(repo, subset, x):
1327 1323 """The changesets in a given namespace.
1328 1324
1329 1325 If `namespace` starts with `re:`, the remainder of the string is treated as
1330 1326 a regular expression. To match a namespace that actually starts with `re:`,
1331 1327 use the prefix `literal:`.
1332 1328 """
1333 1329 # i18n: "named" is a keyword
1334 1330 args = getargs(x, 1, 1, _('named requires a namespace argument'))
1335 1331
1336 1332 ns = getstring(args[0],
1337 1333 # i18n: "named" is a keyword
1338 1334 _('the argument to named must be a string'))
1339 1335 kind, pattern, matcher = util.stringmatcher(ns)
1340 1336 namespaces = set()
1341 1337 if kind == 'literal':
1342 1338 if pattern not in repo.names:
1343 1339 raise error.RepoLookupError(_("namespace '%s' does not exist")
1344 1340 % ns)
1345 1341 namespaces.add(repo.names[pattern])
1346 1342 else:
1347 1343 for name, ns in repo.names.iteritems():
1348 1344 if matcher(name):
1349 1345 namespaces.add(ns)
1350 1346 if not namespaces:
1351 1347 raise error.RepoLookupError(_("no namespace exists"
1352 1348 " that match '%s'") % pattern)
1353 1349
1354 1350 names = set()
1355 1351 for ns in namespaces:
1356 1352 for name in ns.listnames(repo):
1357 1353 if name not in ns.deprecated:
1358 1354 names.update(repo[n].rev() for n in ns.nodes(repo, name))
1359 1355
1360 1356 names -= set([node.nullrev])
1361 1357 return subset & names
1362 1358
1363 1359 @predicate('id(string)', safe=True)
1364 1360 def node_(repo, subset, x):
1365 1361 """Revision non-ambiguously specified by the given hex string prefix.
1366 1362 """
1367 1363 # i18n: "id" is a keyword
1368 1364 l = getargs(x, 1, 1, _("id requires one argument"))
1369 1365 # i18n: "id" is a keyword
1370 1366 n = getstring(l[0], _("id requires a string"))
1371 1367 if len(n) == 40:
1372 1368 try:
1373 1369 rn = repo.changelog.rev(node.bin(n))
1374 1370 except (LookupError, TypeError):
1375 1371 rn = None
1376 1372 else:
1377 1373 rn = None
1378 1374 pm = repo.changelog._partialmatch(n)
1379 1375 if pm is not None:
1380 1376 rn = repo.changelog.rev(pm)
1381 1377
1382 1378 if rn is None:
1383 1379 return baseset()
1384 1380 result = baseset([rn])
1385 1381 return result & subset
1386 1382
1387 1383 @predicate('obsolete()', safe=True)
1388 1384 def obsolete(repo, subset, x):
1389 1385 """Mutable changeset with a newer version."""
1390 1386 # i18n: "obsolete" is a keyword
1391 1387 getargs(x, 0, 0, _("obsolete takes no arguments"))
1392 1388 obsoletes = obsmod.getrevs(repo, 'obsolete')
1393 1389 return subset & obsoletes
1394 1390
1395 1391 @predicate('only(set, [set])', safe=True)
1396 1392 def only(repo, subset, x):
1397 1393 """Changesets that are ancestors of the first set that are not ancestors
1398 1394 of any other head in the repo. If a second set is specified, the result
1399 1395 is ancestors of the first set that are not ancestors of the second set
1400 1396 (i.e. ::<set1> - ::<set2>).
1401 1397 """
1402 1398 cl = repo.changelog
1403 1399 # i18n: "only" is a keyword
1404 1400 args = getargs(x, 1, 2, _('only takes one or two arguments'))
1405 1401 include = getset(repo, fullreposet(repo), args[0])
1406 1402 if len(args) == 1:
1407 1403 if not include:
1408 1404 return baseset()
1409 1405
1410 1406 descendants = set(_revdescendants(repo, include, False))
1411 1407 exclude = [rev for rev in cl.headrevs()
1412 1408 if not rev in descendants and not rev in include]
1413 1409 else:
1414 1410 exclude = getset(repo, fullreposet(repo), args[1])
1415 1411
1416 1412 results = set(cl.findmissingrevs(common=exclude, heads=include))
1417 1413 # XXX we should turn this into a baseset instead of a set, smartset may do
1418 1414 # some optimisations from the fact this is a baseset.
1419 1415 return subset & results
1420 1416
1421 1417 @predicate('origin([set])', safe=True)
1422 1418 def origin(repo, subset, x):
1423 1419 """
1424 1420 Changesets that were specified as a source for the grafts, transplants or
1425 1421 rebases that created the given revisions. Omitting the optional set is the
1426 1422 same as passing all(). If a changeset created by these operations is itself
1427 1423 specified as a source for one of these operations, only the source changeset
1428 1424 for the first operation is selected.
1429 1425 """
1430 1426 if x is not None:
1431 1427 dests = getset(repo, fullreposet(repo), x)
1432 1428 else:
1433 1429 dests = fullreposet(repo)
1434 1430
1435 1431 def _firstsrc(rev):
1436 1432 src = _getrevsource(repo, rev)
1437 1433 if src is None:
1438 1434 return None
1439 1435
1440 1436 while True:
1441 1437 prev = _getrevsource(repo, src)
1442 1438
1443 1439 if prev is None:
1444 1440 return src
1445 1441 src = prev
1446 1442
1447 1443 o = set([_firstsrc(r) for r in dests])
1448 1444 o -= set([None])
1449 1445 # XXX we should turn this into a baseset instead of a set, smartset may do
1450 1446 # some optimisations from the fact this is a baseset.
1451 1447 return subset & o
1452 1448
1453 1449 @predicate('outgoing([path])', safe=True)
1454 1450 def outgoing(repo, subset, x):
1455 1451 """Changesets not found in the specified destination repository, or the
1456 1452 default push location.
1457 1453 """
1458 1454 # Avoid cycles.
1459 1455 from . import (
1460 1456 discovery,
1461 1457 hg,
1462 1458 )
1463 1459 # i18n: "outgoing" is a keyword
1464 1460 l = getargs(x, 0, 1, _("outgoing takes one or no arguments"))
1465 1461 # i18n: "outgoing" is a keyword
1466 1462 dest = l and getstring(l[0], _("outgoing requires a repository path")) or ''
1467 1463 dest = repo.ui.expandpath(dest or 'default-push', dest or 'default')
1468 1464 dest, branches = hg.parseurl(dest)
1469 1465 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1470 1466 if revs:
1471 1467 revs = [repo.lookup(rev) for rev in revs]
1472 1468 other = hg.peer(repo, {}, dest)
1473 1469 repo.ui.pushbuffer()
1474 1470 outgoing = discovery.findcommonoutgoing(repo, other, onlyheads=revs)
1475 1471 repo.ui.popbuffer()
1476 1472 cl = repo.changelog
1477 1473 o = set([cl.rev(r) for r in outgoing.missing])
1478 1474 return subset & o
1479 1475
1480 1476 @predicate('p1([set])', safe=True)
1481 1477 def p1(repo, subset, x):
1482 1478 """First parent of changesets in set, or the working directory.
1483 1479 """
1484 1480 if x is None:
1485 1481 p = repo[x].p1().rev()
1486 1482 if p >= 0:
1487 1483 return subset & baseset([p])
1488 1484 return baseset()
1489 1485
1490 1486 ps = set()
1491 1487 cl = repo.changelog
1492 1488 for r in getset(repo, fullreposet(repo), x):
1493 1489 ps.add(cl.parentrevs(r)[0])
1494 1490 ps -= set([node.nullrev])
1495 1491 # XXX we should turn this into a baseset instead of a set, smartset may do
1496 1492 # some optimisations from the fact this is a baseset.
1497 1493 return subset & ps
1498 1494
1499 1495 @predicate('p2([set])', safe=True)
1500 1496 def p2(repo, subset, x):
1501 1497 """Second parent of changesets in set, or the working directory.
1502 1498 """
1503 1499 if x is None:
1504 1500 ps = repo[x].parents()
1505 1501 try:
1506 1502 p = ps[1].rev()
1507 1503 if p >= 0:
1508 1504 return subset & baseset([p])
1509 1505 return baseset()
1510 1506 except IndexError:
1511 1507 return baseset()
1512 1508
1513 1509 ps = set()
1514 1510 cl = repo.changelog
1515 1511 for r in getset(repo, fullreposet(repo), x):
1516 1512 ps.add(cl.parentrevs(r)[1])
1517 1513 ps -= set([node.nullrev])
1518 1514 # XXX we should turn this into a baseset instead of a set, smartset may do
1519 1515 # some optimisations from the fact this is a baseset.
1520 1516 return subset & ps
1521 1517
1522 1518 @predicate('parents([set])', safe=True)
1523 1519 def parents(repo, subset, x):
1524 1520 """
1525 1521 The set of all parents for all changesets in set, or the working directory.
1526 1522 """
1527 1523 if x is None:
1528 1524 ps = set(p.rev() for p in repo[x].parents())
1529 1525 else:
1530 1526 ps = set()
1531 1527 cl = repo.changelog
1532 1528 up = ps.update
1533 1529 parentrevs = cl.parentrevs
1534 1530 for r in getset(repo, fullreposet(repo), x):
1535 1531 if r == node.wdirrev:
1536 1532 up(p.rev() for p in repo[r].parents())
1537 1533 else:
1538 1534 up(parentrevs(r))
1539 1535 ps -= set([node.nullrev])
1540 1536 return subset & ps
1541 1537
1542 1538 def _phase(repo, subset, target):
1543 1539 """helper to select all rev in phase <target>"""
1544 1540 repo._phasecache.loadphaserevs(repo) # ensure phase's sets are loaded
1545 1541 if repo._phasecache._phasesets:
1546 1542 s = repo._phasecache._phasesets[target] - repo.changelog.filteredrevs
1547 1543 s = baseset(s)
1548 1544 s.sort() # set are non ordered, so we enforce ascending
1549 1545 return subset & s
1550 1546 else:
1551 1547 phase = repo._phasecache.phase
1552 1548 condition = lambda r: phase(repo, r) == target
1553 1549 return subset.filter(condition, condrepr=('<phase %r>', target),
1554 1550 cache=False)
1555 1551
1556 1552 @predicate('draft()', safe=True)
1557 1553 def draft(repo, subset, x):
1558 1554 """Changeset in draft phase."""
1559 1555 # i18n: "draft" is a keyword
1560 1556 getargs(x, 0, 0, _("draft takes no arguments"))
1561 1557 target = phases.draft
1562 1558 return _phase(repo, subset, target)
1563 1559
1564 1560 @predicate('secret()', safe=True)
1565 1561 def secret(repo, subset, x):
1566 1562 """Changeset in secret phase."""
1567 1563 # i18n: "secret" is a keyword
1568 1564 getargs(x, 0, 0, _("secret takes no arguments"))
1569 1565 target = phases.secret
1570 1566 return _phase(repo, subset, target)
1571 1567
1572 1568 def parentspec(repo, subset, x, n):
1573 1569 """``set^0``
1574 1570 The set.
1575 1571 ``set^1`` (or ``set^``), ``set^2``
1576 1572 First or second parent, respectively, of all changesets in set.
1577 1573 """
1578 1574 try:
1579 1575 n = int(n[1])
1580 1576 if n not in (0, 1, 2):
1581 1577 raise ValueError
1582 1578 except (TypeError, ValueError):
1583 1579 raise error.ParseError(_("^ expects a number 0, 1, or 2"))
1584 1580 ps = set()
1585 1581 cl = repo.changelog
1586 1582 for r in getset(repo, fullreposet(repo), x):
1587 1583 if n == 0:
1588 1584 ps.add(r)
1589 1585 elif n == 1:
1590 1586 ps.add(cl.parentrevs(r)[0])
1591 1587 elif n == 2:
1592 1588 parents = cl.parentrevs(r)
1593 1589 if len(parents) > 1:
1594 1590 ps.add(parents[1])
1595 1591 return subset & ps
1596 1592
1597 1593 @predicate('present(set)', safe=True)
1598 1594 def present(repo, subset, x):
1599 1595 """An empty set, if any revision in set isn't found; otherwise,
1600 1596 all revisions in set.
1601 1597
1602 1598 If any of specified revisions is not present in the local repository,
1603 1599 the query is normally aborted. But this predicate allows the query
1604 1600 to continue even in such cases.
1605 1601 """
1606 1602 try:
1607 1603 return getset(repo, subset, x)
1608 1604 except error.RepoLookupError:
1609 1605 return baseset()
1610 1606
1611 1607 # for internal use
1612 1608 @predicate('_notpublic', safe=True)
1613 1609 def _notpublic(repo, subset, x):
1614 1610 getargs(x, 0, 0, "_notpublic takes no arguments")
1615 1611 repo._phasecache.loadphaserevs(repo) # ensure phase's sets are loaded
1616 1612 if repo._phasecache._phasesets:
1617 1613 s = set()
1618 1614 for u in repo._phasecache._phasesets[1:]:
1619 1615 s.update(u)
1620 1616 s = baseset(s - repo.changelog.filteredrevs)
1621 1617 s.sort()
1622 1618 return subset & s
1623 1619 else:
1624 1620 phase = repo._phasecache.phase
1625 1621 target = phases.public
1626 1622 condition = lambda r: phase(repo, r) != target
1627 1623 return subset.filter(condition, condrepr=('<phase %r>', target),
1628 1624 cache=False)
1629 1625
1630 1626 @predicate('public()', safe=True)
1631 1627 def public(repo, subset, x):
1632 1628 """Changeset in public phase."""
1633 1629 # i18n: "public" is a keyword
1634 1630 getargs(x, 0, 0, _("public takes no arguments"))
1635 1631 phase = repo._phasecache.phase
1636 1632 target = phases.public
1637 1633 condition = lambda r: phase(repo, r) == target
1638 1634 return subset.filter(condition, condrepr=('<phase %r>', target),
1639 1635 cache=False)
1640 1636
1641 1637 @predicate('remote([id [,path]])', safe=True)
1642 1638 def remote(repo, subset, x):
1643 1639 """Local revision that corresponds to the given identifier in a
1644 1640 remote repository, if present. Here, the '.' identifier is a
1645 1641 synonym for the current local branch.
1646 1642 """
1647 1643
1648 1644 from . import hg # avoid start-up nasties
1649 1645 # i18n: "remote" is a keyword
1650 1646 l = getargs(x, 0, 2, _("remote takes zero, one, or two arguments"))
1651 1647
1652 1648 q = '.'
1653 1649 if len(l) > 0:
1654 1650 # i18n: "remote" is a keyword
1655 1651 q = getstring(l[0], _("remote requires a string id"))
1656 1652 if q == '.':
1657 1653 q = repo['.'].branch()
1658 1654
1659 1655 dest = ''
1660 1656 if len(l) > 1:
1661 1657 # i18n: "remote" is a keyword
1662 1658 dest = getstring(l[1], _("remote requires a repository path"))
1663 1659 dest = repo.ui.expandpath(dest or 'default')
1664 1660 dest, branches = hg.parseurl(dest)
1665 1661 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1666 1662 if revs:
1667 1663 revs = [repo.lookup(rev) for rev in revs]
1668 1664 other = hg.peer(repo, {}, dest)
1669 1665 n = other.lookup(q)
1670 1666 if n in repo:
1671 1667 r = repo[n].rev()
1672 1668 if r in subset:
1673 1669 return baseset([r])
1674 1670 return baseset()
1675 1671
1676 1672 @predicate('removes(pattern)', safe=True)
1677 1673 def removes(repo, subset, x):
1678 1674 """Changesets which remove files matching pattern.
1679 1675
1680 1676 The pattern without explicit kind like ``glob:`` is expected to be
1681 1677 relative to the current directory and match against a file or a
1682 1678 directory.
1683 1679 """
1684 1680 # i18n: "removes" is a keyword
1685 1681 pat = getstring(x, _("removes requires a pattern"))
1686 1682 return checkstatus(repo, subset, pat, 2)
1687 1683
1688 1684 @predicate('rev(number)', safe=True)
1689 1685 def rev(repo, subset, x):
1690 1686 """Revision with the given numeric identifier.
1691 1687 """
1692 1688 # i18n: "rev" is a keyword
1693 1689 l = getargs(x, 1, 1, _("rev requires one argument"))
1694 1690 try:
1695 1691 # i18n: "rev" is a keyword
1696 1692 l = int(getstring(l[0], _("rev requires a number")))
1697 1693 except (TypeError, ValueError):
1698 1694 # i18n: "rev" is a keyword
1699 1695 raise error.ParseError(_("rev expects a number"))
1700 1696 if l not in repo.changelog and l != node.nullrev:
1701 1697 return baseset()
1702 1698 return subset & baseset([l])
1703 1699
1704 1700 @predicate('matching(revision [, field])', safe=True)
1705 1701 def matching(repo, subset, x):
1706 1702 """Changesets in which a given set of fields match the set of fields in the
1707 1703 selected revision or set.
1708 1704
1709 1705 To match more than one field pass the list of fields to match separated
1710 1706 by spaces (e.g. ``author description``).
1711 1707
1712 1708 Valid fields are most regular revision fields and some special fields.
1713 1709
1714 1710 Regular revision fields are ``description``, ``author``, ``branch``,
1715 1711 ``date``, ``files``, ``phase``, ``parents``, ``substate``, ``user``
1716 1712 and ``diff``.
1717 1713 Note that ``author`` and ``user`` are synonyms. ``diff`` refers to the
1718 1714 contents of the revision. Two revisions matching their ``diff`` will
1719 1715 also match their ``files``.
1720 1716
1721 1717 Special fields are ``summary`` and ``metadata``:
1722 1718 ``summary`` matches the first line of the description.
1723 1719 ``metadata`` is equivalent to matching ``description user date``
1724 1720 (i.e. it matches the main metadata fields).
1725 1721
1726 1722 ``metadata`` is the default field which is used when no fields are
1727 1723 specified. You can match more than one field at a time.
1728 1724 """
1729 1725 # i18n: "matching" is a keyword
1730 1726 l = getargs(x, 1, 2, _("matching takes 1 or 2 arguments"))
1731 1727
1732 1728 revs = getset(repo, fullreposet(repo), l[0])
1733 1729
1734 1730 fieldlist = ['metadata']
1735 1731 if len(l) > 1:
1736 1732 fieldlist = getstring(l[1],
1737 1733 # i18n: "matching" is a keyword
1738 1734 _("matching requires a string "
1739 1735 "as its second argument")).split()
1740 1736
1741 1737 # Make sure that there are no repeated fields,
1742 1738 # expand the 'special' 'metadata' field type
1743 1739 # and check the 'files' whenever we check the 'diff'
1744 1740 fields = []
1745 1741 for field in fieldlist:
1746 1742 if field == 'metadata':
1747 1743 fields += ['user', 'description', 'date']
1748 1744 elif field == 'diff':
1749 1745 # a revision matching the diff must also match the files
1750 1746 # since matching the diff is very costly, make sure to
1751 1747 # also match the files first
1752 1748 fields += ['files', 'diff']
1753 1749 else:
1754 1750 if field == 'author':
1755 1751 field = 'user'
1756 1752 fields.append(field)
1757 1753 fields = set(fields)
1758 1754 if 'summary' in fields and 'description' in fields:
1759 1755 # If a revision matches its description it also matches its summary
1760 1756 fields.discard('summary')
1761 1757
1762 1758 # We may want to match more than one field
1763 1759 # Not all fields take the same amount of time to be matched
1764 1760 # Sort the selected fields in order of increasing matching cost
1765 1761 fieldorder = ['phase', 'parents', 'user', 'date', 'branch', 'summary',
1766 1762 'files', 'description', 'substate', 'diff']
1767 1763 def fieldkeyfunc(f):
1768 1764 try:
1769 1765 return fieldorder.index(f)
1770 1766 except ValueError:
1771 1767 # assume an unknown field is very costly
1772 1768 return len(fieldorder)
1773 1769 fields = list(fields)
1774 1770 fields.sort(key=fieldkeyfunc)
1775 1771
1776 1772 # Each field will be matched with its own "getfield" function
1777 1773 # which will be added to the getfieldfuncs array of functions
1778 1774 getfieldfuncs = []
1779 1775 _funcs = {
1780 1776 'user': lambda r: repo[r].user(),
1781 1777 'branch': lambda r: repo[r].branch(),
1782 1778 'date': lambda r: repo[r].date(),
1783 1779 'description': lambda r: repo[r].description(),
1784 1780 'files': lambda r: repo[r].files(),
1785 1781 'parents': lambda r: repo[r].parents(),
1786 1782 'phase': lambda r: repo[r].phase(),
1787 1783 'substate': lambda r: repo[r].substate,
1788 1784 'summary': lambda r: repo[r].description().splitlines()[0],
1789 1785 'diff': lambda r: list(repo[r].diff(git=True),)
1790 1786 }
1791 1787 for info in fields:
1792 1788 getfield = _funcs.get(info, None)
1793 1789 if getfield is None:
1794 1790 raise error.ParseError(
1795 1791 # i18n: "matching" is a keyword
1796 1792 _("unexpected field name passed to matching: %s") % info)
1797 1793 getfieldfuncs.append(getfield)
1798 1794 # convert the getfield array of functions into a "getinfo" function
1799 1795 # which returns an array of field values (or a single value if there
1800 1796 # is only one field to match)
1801 1797 getinfo = lambda r: [f(r) for f in getfieldfuncs]
1802 1798
1803 1799 def matches(x):
1804 1800 for rev in revs:
1805 1801 target = getinfo(rev)
1806 1802 match = True
1807 1803 for n, f in enumerate(getfieldfuncs):
1808 1804 if target[n] != f(x):
1809 1805 match = False
1810 1806 if match:
1811 1807 return True
1812 1808 return False
1813 1809
1814 1810 return subset.filter(matches, condrepr=('<matching%r %r>', fields, revs))
1815 1811
1816 1812 @predicate('reverse(set)', safe=True)
1817 1813 def reverse(repo, subset, x):
1818 1814 """Reverse order of set.
1819 1815 """
1820 1816 l = getset(repo, subset, x)
1821 1817 l.reverse()
1822 1818 return l
1823 1819
1824 1820 @predicate('roots(set)', safe=True)
1825 1821 def roots(repo, subset, x):
1826 1822 """Changesets in set with no parent changeset in set.
1827 1823 """
1828 1824 s = getset(repo, fullreposet(repo), x)
1829 1825 parents = repo.changelog.parentrevs
1830 1826 def filter(r):
1831 1827 for p in parents(r):
1832 1828 if 0 <= p and p in s:
1833 1829 return False
1834 1830 return True
1835 1831 return subset & s.filter(filter, condrepr='<roots>')
1836 1832
1837 1833 _sortkeyfuncs = {
1838 1834 'rev': lambda c: c.rev(),
1839 1835 'branch': lambda c: c.branch(),
1840 1836 'desc': lambda c: c.description(),
1841 1837 'user': lambda c: c.user(),
1842 1838 'author': lambda c: c.user(),
1843 1839 'date': lambda c: c.date()[0],
1844 1840 }
1845 1841
1846 1842 def _getsortargs(x):
1847 1843 """Parse sort options into (set, [(key, reverse)], opts)"""
1848 1844 args = getargsdict(x, 'sort', 'set keys topo.firstbranch')
1849 1845 if 'set' not in args:
1850 1846 # i18n: "sort" is a keyword
1851 1847 raise error.ParseError(_('sort requires one or two arguments'))
1852 1848 keys = "rev"
1853 1849 if 'keys' in args:
1854 1850 # i18n: "sort" is a keyword
1855 1851 keys = getstring(args['keys'], _("sort spec must be a string"))
1856 1852
1857 1853 keyflags = []
1858 1854 for k in keys.split():
1859 1855 fk = k
1860 1856 reverse = (k[0] == '-')
1861 1857 if reverse:
1862 1858 k = k[1:]
1863 1859 if k not in _sortkeyfuncs and k != 'topo':
1864 1860 raise error.ParseError(_("unknown sort key %r") % fk)
1865 1861 keyflags.append((k, reverse))
1866 1862
1867 1863 if len(keyflags) > 1 and any(k == 'topo' for k, reverse in keyflags):
1868 1864 # i18n: "topo" is a keyword
1869 1865 raise error.ParseError(_(
1870 1866 'topo sort order cannot be combined with other sort keys'))
1871 1867
1872 1868 opts = {}
1873 1869 if 'topo.firstbranch' in args:
1874 1870 if any(k == 'topo' for k, reverse in keyflags):
1875 1871 opts['topo.firstbranch'] = args['topo.firstbranch']
1876 1872 else:
1877 1873 # i18n: "topo" and "topo.firstbranch" are keywords
1878 1874 raise error.ParseError(_(
1879 1875 'topo.firstbranch can only be used when using the topo sort '
1880 1876 'key'))
1881 1877
1882 1878 return args['set'], keyflags, opts
1883 1879
1884 1880 @predicate('sort(set[, [-]key... [, ...]])', safe=True)
1885 1881 def sort(repo, subset, x):
1886 1882 """Sort set by keys. The default sort order is ascending, specify a key
1887 1883 as ``-key`` to sort in descending order.
1888 1884
1889 1885 The keys can be:
1890 1886
1891 1887 - ``rev`` for the revision number,
1892 1888 - ``branch`` for the branch name,
1893 1889 - ``desc`` for the commit message (description),
1894 1890 - ``user`` for user name (``author`` can be used as an alias),
1895 1891 - ``date`` for the commit date
1896 1892 - ``topo`` for a reverse topographical sort
1897 1893
1898 1894 The ``topo`` sort order cannot be combined with other sort keys. This sort
1899 1895 takes one optional argument, ``topo.firstbranch``, which takes a revset that
1900 1896 specifies what topographical branches to prioritize in the sort.
1901 1897
1902 1898 """
1903 1899 s, keyflags, opts = _getsortargs(x)
1904 1900 revs = getset(repo, subset, s)
1905 1901
1906 1902 if not keyflags:
1907 1903 return revs
1908 1904 if len(keyflags) == 1 and keyflags[0][0] == "rev":
1909 1905 revs.sort(reverse=keyflags[0][1])
1910 1906 return revs
1911 1907 elif keyflags[0][0] == "topo":
1912 1908 firstbranch = ()
1913 1909 if 'topo.firstbranch' in opts:
1914 1910 firstbranch = getset(repo, subset, opts['topo.firstbranch'])
1915 1911 revs = baseset(_toposort(revs, repo.changelog.parentrevs, firstbranch),
1916 1912 istopo=True)
1917 1913 if keyflags[0][1]:
1918 1914 revs.reverse()
1919 1915 return revs
1920 1916
1921 1917 # sort() is guaranteed to be stable
1922 1918 ctxs = [repo[r] for r in revs]
1923 1919 for k, reverse in reversed(keyflags):
1924 1920 ctxs.sort(key=_sortkeyfuncs[k], reverse=reverse)
1925 1921 return baseset([c.rev() for c in ctxs])
1926 1922
1927 1923 def _toposort(revs, parentsfunc, firstbranch=()):
1928 1924 """Yield revisions from heads to roots one (topo) branch at a time.
1929 1925
1930 1926 This function aims to be used by a graph generator that wishes to minimize
1931 1927 the number of parallel branches and their interleaving.
1932 1928
1933 1929 Example iteration order (numbers show the "true" order in a changelog):
1934 1930
1935 1931 o 4
1936 1932 |
1937 1933 o 1
1938 1934 |
1939 1935 | o 3
1940 1936 | |
1941 1937 | o 2
1942 1938 |/
1943 1939 o 0
1944 1940
1945 1941 Note that the ancestors of merges are understood by the current
1946 1942 algorithm to be on the same branch. This means no reordering will
1947 1943 occur behind a merge.
1948 1944 """
1949 1945
1950 1946 ### Quick summary of the algorithm
1951 1947 #
1952 1948 # This function is based around a "retention" principle. We keep revisions
1953 1949 # in memory until we are ready to emit a whole branch that immediately
1954 1950 # "merges" into an existing one. This reduces the number of parallel
1955 1951 # branches with interleaved revisions.
1956 1952 #
1957 1953 # During iteration revs are split into two groups:
1958 1954 # A) revision already emitted
1959 1955 # B) revision in "retention". They are stored as different subgroups.
1960 1956 #
1961 1957 # for each REV, we do the following logic:
1962 1958 #
1963 1959 # 1) if REV is a parent of (A), we will emit it. If there is a
1964 1960 # retention group ((B) above) that is blocked on REV being
1965 1961 # available, we emit all the revisions out of that retention
1966 1962 # group first.
1967 1963 #
1968 1964 # 2) else, we'll search for a subgroup in (B) awaiting for REV to be
1969 1965 # available, if such subgroup exist, we add REV to it and the subgroup is
1970 1966 # now awaiting for REV.parents() to be available.
1971 1967 #
1972 1968 # 3) finally if no such group existed in (B), we create a new subgroup.
1973 1969 #
1974 1970 #
1975 1971 # To bootstrap the algorithm, we emit the tipmost revision (which
1976 1972 # puts it in group (A) from above).
1977 1973
1978 1974 revs.sort(reverse=True)
1979 1975
1980 1976 # Set of parents of revision that have been emitted. They can be considered
1981 1977 # unblocked as the graph generator is already aware of them so there is no
1982 1978 # need to delay the revisions that reference them.
1983 1979 #
1984 1980 # If someone wants to prioritize a branch over the others, pre-filling this
1985 1981 # set will force all other branches to wait until this branch is ready to be
1986 1982 # emitted.
1987 1983 unblocked = set(firstbranch)
1988 1984
1989 1985 # list of groups waiting to be displayed, each group is defined by:
1990 1986 #
1991 1987 # (revs: lists of revs waiting to be displayed,
1992 1988 # blocked: set of that cannot be displayed before those in 'revs')
1993 1989 #
1994 1990 # The second value ('blocked') correspond to parents of any revision in the
1995 1991 # group ('revs') that is not itself contained in the group. The main idea
1996 1992 # of this algorithm is to delay as much as possible the emission of any
1997 1993 # revision. This means waiting for the moment we are about to display
1998 1994 # these parents to display the revs in a group.
1999 1995 #
2000 1996 # This first implementation is smart until it encounters a merge: it will
2001 1997 # emit revs as soon as any parent is about to be emitted and can grow an
2002 1998 # arbitrary number of revs in 'blocked'. In practice this mean we properly
2003 1999 # retains new branches but gives up on any special ordering for ancestors
2004 2000 # of merges. The implementation can be improved to handle this better.
2005 2001 #
2006 2002 # The first subgroup is special. It corresponds to all the revision that
2007 2003 # were already emitted. The 'revs' lists is expected to be empty and the
2008 2004 # 'blocked' set contains the parents revisions of already emitted revision.
2009 2005 #
2010 2006 # You could pre-seed the <parents> set of groups[0] to a specific
2011 2007 # changesets to select what the first emitted branch should be.
2012 2008 groups = [([], unblocked)]
2013 2009 pendingheap = []
2014 2010 pendingset = set()
2015 2011
2016 2012 heapq.heapify(pendingheap)
2017 2013 heappop = heapq.heappop
2018 2014 heappush = heapq.heappush
2019 2015 for currentrev in revs:
2020 2016 # Heap works with smallest element, we want highest so we invert
2021 2017 if currentrev not in pendingset:
2022 2018 heappush(pendingheap, -currentrev)
2023 2019 pendingset.add(currentrev)
2024 2020 # iterates on pending rev until after the current rev have been
2025 2021 # processed.
2026 2022 rev = None
2027 2023 while rev != currentrev:
2028 2024 rev = -heappop(pendingheap)
2029 2025 pendingset.remove(rev)
2030 2026
2031 2027 # Seek for a subgroup blocked, waiting for the current revision.
2032 2028 matching = [i for i, g in enumerate(groups) if rev in g[1]]
2033 2029
2034 2030 if matching:
2035 2031 # The main idea is to gather together all sets that are blocked
2036 2032 # on the same revision.
2037 2033 #
2038 2034 # Groups are merged when a common blocking ancestor is
2039 2035 # observed. For example, given two groups:
2040 2036 #
2041 2037 # revs [5, 4] waiting for 1
2042 2038 # revs [3, 2] waiting for 1
2043 2039 #
2044 2040 # These two groups will be merged when we process
2045 2041 # 1. In theory, we could have merged the groups when
2046 2042 # we added 2 to the group it is now in (we could have
2047 2043 # noticed the groups were both blocked on 1 then), but
2048 2044 # the way it works now makes the algorithm simpler.
2049 2045 #
2050 2046 # We also always keep the oldest subgroup first. We can
2051 2047 # probably improve the behavior by having the longest set
2052 2048 # first. That way, graph algorithms could minimise the length
2053 2049 # of parallel lines their drawing. This is currently not done.
2054 2050 targetidx = matching.pop(0)
2055 2051 trevs, tparents = groups[targetidx]
2056 2052 for i in matching:
2057 2053 gr = groups[i]
2058 2054 trevs.extend(gr[0])
2059 2055 tparents |= gr[1]
2060 2056 # delete all merged subgroups (except the one we kept)
2061 2057 # (starting from the last subgroup for performance and
2062 2058 # sanity reasons)
2063 2059 for i in reversed(matching):
2064 2060 del groups[i]
2065 2061 else:
2066 2062 # This is a new head. We create a new subgroup for it.
2067 2063 targetidx = len(groups)
2068 2064 groups.append(([], set([rev])))
2069 2065
2070 2066 gr = groups[targetidx]
2071 2067
2072 2068 # We now add the current nodes to this subgroups. This is done
2073 2069 # after the subgroup merging because all elements from a subgroup
2074 2070 # that relied on this rev must precede it.
2075 2071 #
2076 2072 # we also update the <parents> set to include the parents of the
2077 2073 # new nodes.
2078 2074 if rev == currentrev: # only display stuff in rev
2079 2075 gr[0].append(rev)
2080 2076 gr[1].remove(rev)
2081 2077 parents = [p for p in parentsfunc(rev) if p > node.nullrev]
2082 2078 gr[1].update(parents)
2083 2079 for p in parents:
2084 2080 if p not in pendingset:
2085 2081 pendingset.add(p)
2086 2082 heappush(pendingheap, -p)
2087 2083
2088 2084 # Look for a subgroup to display
2089 2085 #
2090 2086 # When unblocked is empty (if clause), we were not waiting for any
2091 2087 # revisions during the first iteration (if no priority was given) or
2092 2088 # if we emitted a whole disconnected set of the graph (reached a
2093 2089 # root). In that case we arbitrarily take the oldest known
2094 2090 # subgroup. The heuristic could probably be better.
2095 2091 #
2096 2092 # Otherwise (elif clause) if the subgroup is blocked on
2097 2093 # a revision we just emitted, we can safely emit it as
2098 2094 # well.
2099 2095 if not unblocked:
2100 2096 if len(groups) > 1: # display other subset
2101 2097 targetidx = 1
2102 2098 gr = groups[1]
2103 2099 elif not gr[1] & unblocked:
2104 2100 gr = None
2105 2101
2106 2102 if gr is not None:
2107 2103 # update the set of awaited revisions with the one from the
2108 2104 # subgroup
2109 2105 unblocked |= gr[1]
2110 2106 # output all revisions in the subgroup
2111 2107 for r in gr[0]:
2112 2108 yield r
2113 2109 # delete the subgroup that you just output
2114 2110 # unless it is groups[0] in which case you just empty it.
2115 2111 if targetidx:
2116 2112 del groups[targetidx]
2117 2113 else:
2118 2114 gr[0][:] = []
2119 2115 # Check if we have some subgroup waiting for revisions we are not going to
2120 2116 # iterate over
2121 2117 for g in groups:
2122 2118 for r in g[0]:
2123 2119 yield r
2124 2120
2125 2121 @predicate('subrepo([pattern])')
2126 2122 def subrepo(repo, subset, x):
2127 2123 """Changesets that add, modify or remove the given subrepo. If no subrepo
2128 2124 pattern is named, any subrepo changes are returned.
2129 2125 """
2130 2126 # i18n: "subrepo" is a keyword
2131 2127 args = getargs(x, 0, 1, _('subrepo takes at most one argument'))
2132 2128 pat = None
2133 2129 if len(args) != 0:
2134 2130 pat = getstring(args[0], _("subrepo requires a pattern"))
2135 2131
2136 2132 m = matchmod.exact(repo.root, repo.root, ['.hgsubstate'])
2137 2133
2138 2134 def submatches(names):
2139 2135 k, p, m = util.stringmatcher(pat)
2140 2136 for name in names:
2141 2137 if m(name):
2142 2138 yield name
2143 2139
2144 2140 def matches(x):
2145 2141 c = repo[x]
2146 2142 s = repo.status(c.p1().node(), c.node(), match=m)
2147 2143
2148 2144 if pat is None:
2149 2145 return s.added or s.modified or s.removed
2150 2146
2151 2147 if s.added:
2152 2148 return any(submatches(c.substate.keys()))
2153 2149
2154 2150 if s.modified:
2155 2151 subs = set(c.p1().substate.keys())
2156 2152 subs.update(c.substate.keys())
2157 2153
2158 2154 for path in submatches(subs):
2159 2155 if c.p1().substate.get(path) != c.substate.get(path):
2160 2156 return True
2161 2157
2162 2158 if s.removed:
2163 2159 return any(submatches(c.p1().substate.keys()))
2164 2160
2165 2161 return False
2166 2162
2167 2163 return subset.filter(matches, condrepr=('<subrepo %r>', pat))
2168 2164
2169 2165 def _substringmatcher(pattern):
2170 2166 kind, pattern, matcher = util.stringmatcher(pattern)
2171 2167 if kind == 'literal':
2172 2168 matcher = lambda s: pattern in s
2173 2169 return kind, pattern, matcher
2174 2170
2175 2171 @predicate('tag([name])', safe=True)
2176 2172 def tag(repo, subset, x):
2177 2173 """The specified tag by name, or all tagged revisions if no name is given.
2178 2174
2179 2175 If `name` starts with `re:`, the remainder of the name is treated as
2180 2176 a regular expression. To match a tag that actually starts with `re:`,
2181 2177 use the prefix `literal:`.
2182 2178 """
2183 2179 # i18n: "tag" is a keyword
2184 2180 args = getargs(x, 0, 1, _("tag takes one or no arguments"))
2185 2181 cl = repo.changelog
2186 2182 if args:
2187 2183 pattern = getstring(args[0],
2188 2184 # i18n: "tag" is a keyword
2189 2185 _('the argument to tag must be a string'))
2190 2186 kind, pattern, matcher = util.stringmatcher(pattern)
2191 2187 if kind == 'literal':
2192 2188 # avoid resolving all tags
2193 2189 tn = repo._tagscache.tags.get(pattern, None)
2194 2190 if tn is None:
2195 2191 raise error.RepoLookupError(_("tag '%s' does not exist")
2196 2192 % pattern)
2197 2193 s = set([repo[tn].rev()])
2198 2194 else:
2199 2195 s = set([cl.rev(n) for t, n in repo.tagslist() if matcher(t)])
2200 2196 else:
2201 2197 s = set([cl.rev(n) for t, n in repo.tagslist() if t != 'tip'])
2202 2198 return subset & s
2203 2199
2204 2200 @predicate('tagged', safe=True)
2205 2201 def tagged(repo, subset, x):
2206 2202 return tag(repo, subset, x)
2207 2203
2208 2204 @predicate('unstable()', safe=True)
2209 2205 def unstable(repo, subset, x):
2210 2206 """Non-obsolete changesets with obsolete ancestors.
2211 2207 """
2212 2208 # i18n: "unstable" is a keyword
2213 2209 getargs(x, 0, 0, _("unstable takes no arguments"))
2214 2210 unstables = obsmod.getrevs(repo, 'unstable')
2215 2211 return subset & unstables
2216 2212
2217 2213
2218 2214 @predicate('user(string)', safe=True)
2219 2215 def user(repo, subset, x):
2220 2216 """User name contains string. The match is case-insensitive.
2221 2217
2222 2218 If `string` starts with `re:`, the remainder of the string is treated as
2223 2219 a regular expression. To match a user that actually contains `re:`, use
2224 2220 the prefix `literal:`.
2225 2221 """
2226 2222 return author(repo, subset, x)
2227 2223
2228 2224 # experimental
2229 2225 @predicate('wdir', safe=True)
2230 2226 def wdir(repo, subset, x):
2231 2227 # i18n: "wdir" is a keyword
2232 2228 getargs(x, 0, 0, _("wdir takes no arguments"))
2233 2229 if node.wdirrev in subset or isinstance(subset, fullreposet):
2234 2230 return baseset([node.wdirrev])
2235 2231 return baseset()
2236 2232
2237 2233 # for internal use
2238 2234 @predicate('_list', safe=True)
2239 2235 def _list(repo, subset, x):
2240 2236 s = getstring(x, "internal error")
2241 2237 if not s:
2242 2238 return baseset()
2243 2239 # remove duplicates here. it's difficult for caller to deduplicate sets
2244 2240 # because different symbols can point to the same rev.
2245 2241 cl = repo.changelog
2246 2242 ls = []
2247 2243 seen = set()
2248 2244 for t in s.split('\0'):
2249 2245 try:
2250 2246 # fast path for integer revision
2251 2247 r = int(t)
2252 2248 if str(r) != t or r not in cl:
2253 2249 raise ValueError
2254 2250 revs = [r]
2255 2251 except ValueError:
2256 2252 revs = stringset(repo, subset, t)
2257 2253
2258 2254 for r in revs:
2259 2255 if r in seen:
2260 2256 continue
2261 2257 if (r in subset
2262 2258 or r == node.nullrev and isinstance(subset, fullreposet)):
2263 2259 ls.append(r)
2264 2260 seen.add(r)
2265 2261 return baseset(ls)
2266 2262
2267 2263 # for internal use
2268 2264 @predicate('_intlist', safe=True)
2269 2265 def _intlist(repo, subset, x):
2270 2266 s = getstring(x, "internal error")
2271 2267 if not s:
2272 2268 return baseset()
2273 2269 ls = [int(r) for r in s.split('\0')]
2274 2270 s = subset
2275 2271 return baseset([r for r in ls if r in s])
2276 2272
2277 2273 # for internal use
2278 2274 @predicate('_hexlist', safe=True)
2279 2275 def _hexlist(repo, subset, x):
2280 2276 s = getstring(x, "internal error")
2281 2277 if not s:
2282 2278 return baseset()
2283 2279 cl = repo.changelog
2284 2280 ls = [cl.rev(node.bin(r)) for r in s.split('\0')]
2285 2281 s = subset
2286 2282 return baseset([r for r in ls if r in s])
2287 2283
2288 2284 methods = {
2289 2285 "range": rangeset,
2290 2286 "dagrange": dagrange,
2291 2287 "string": stringset,
2292 2288 "symbol": stringset,
2293 2289 "and": andset,
2294 2290 "or": orset,
2295 2291 "not": notset,
2296 2292 "difference": differenceset,
2297 2293 "list": listset,
2298 2294 "keyvalue": keyvaluepair,
2299 2295 "func": func,
2300 2296 "ancestor": ancestorspec,
2301 2297 "parent": parentspec,
2302 2298 "parentpost": p1,
2303 2299 }
2304 2300
2305 2301 def _matchonly(revs, bases):
2306 2302 """
2307 2303 >>> f = lambda *args: _matchonly(*map(parse, args))
2308 2304 >>> f('ancestors(A)', 'not ancestors(B)')
2309 2305 ('list', ('symbol', 'A'), ('symbol', 'B'))
2310 2306 """
2311 2307 if (revs is not None
2312 2308 and revs[0] == 'func'
2313 2309 and getstring(revs[1], _('not a symbol')) == 'ancestors'
2314 2310 and bases is not None
2315 2311 and bases[0] == 'not'
2316 2312 and bases[1][0] == 'func'
2317 2313 and getstring(bases[1][1], _('not a symbol')) == 'ancestors'):
2318 2314 return ('list', revs[2], bases[1][2])
2319 2315
2320 2316 def _optimize(x, small):
2321 2317 if x is None:
2322 2318 return 0, x
2323 2319
2324 2320 smallbonus = 1
2325 2321 if small:
2326 2322 smallbonus = .5
2327 2323
2328 2324 op = x[0]
2329 2325 if op == 'minus':
2330 2326 return _optimize(('and', x[1], ('not', x[2])), small)
2331 2327 elif op == 'only':
2332 2328 t = ('func', ('symbol', 'only'), ('list', x[1], x[2]))
2333 2329 return _optimize(t, small)
2334 2330 elif op == 'onlypost':
2335 2331 return _optimize(('func', ('symbol', 'only'), x[1]), small)
2336 2332 elif op == 'dagrangepre':
2337 2333 return _optimize(('func', ('symbol', 'ancestors'), x[1]), small)
2338 2334 elif op == 'dagrangepost':
2339 2335 return _optimize(('func', ('symbol', 'descendants'), x[1]), small)
2340 2336 elif op == 'rangeall':
2341 2337 return _optimize(('range', ('string', '0'), ('string', 'tip')), small)
2342 2338 elif op == 'rangepre':
2343 2339 return _optimize(('range', ('string', '0'), x[1]), small)
2344 2340 elif op == 'rangepost':
2345 2341 return _optimize(('range', x[1], ('string', 'tip')), small)
2346 2342 elif op == 'negate':
2347 2343 s = getstring(x[1], _("can't negate that"))
2348 2344 return _optimize(('string', '-' + s), small)
2349 2345 elif op in 'string symbol negate':
2350 2346 return smallbonus, x # single revisions are small
2351 2347 elif op == 'and':
2352 2348 wa, ta = _optimize(x[1], True)
2353 2349 wb, tb = _optimize(x[2], True)
2354 2350 w = min(wa, wb)
2355 2351
2356 2352 # (::x and not ::y)/(not ::y and ::x) have a fast path
2357 2353 tm = _matchonly(ta, tb) or _matchonly(tb, ta)
2358 2354 if tm:
2359 2355 return w, ('func', ('symbol', 'only'), tm)
2360 2356
2361 2357 if tb is not None and tb[0] == 'not':
2362 2358 return wa, ('difference', ta, tb[1])
2363 2359
2364 2360 if wa > wb:
2365 2361 return w, (op, tb, ta)
2366 2362 return w, (op, ta, tb)
2367 2363 elif op == 'or':
2368 2364 # fast path for machine-generated expression, that is likely to have
2369 2365 # lots of trivial revisions: 'a + b + c()' to '_list(a b) + c()'
2370 2366 ws, ts, ss = [], [], []
2371 2367 def flushss():
2372 2368 if not ss:
2373 2369 return
2374 2370 if len(ss) == 1:
2375 2371 w, t = ss[0]
2376 2372 else:
2377 2373 s = '\0'.join(t[1] for w, t in ss)
2378 2374 y = ('func', ('symbol', '_list'), ('string', s))
2379 2375 w, t = _optimize(y, False)
2380 2376 ws.append(w)
2381 2377 ts.append(t)
2382 2378 del ss[:]
2383 2379 for y in x[1:]:
2384 2380 w, t = _optimize(y, False)
2385 2381 if t is not None and (t[0] == 'string' or t[0] == 'symbol'):
2386 2382 ss.append((w, t))
2387 2383 continue
2388 2384 flushss()
2389 2385 ws.append(w)
2390 2386 ts.append(t)
2391 2387 flushss()
2392 2388 if len(ts) == 1:
2393 2389 return ws[0], ts[0] # 'or' operation is fully optimized out
2394 2390 # we can't reorder trees by weight because it would change the order.
2395 2391 # ("sort(a + b)" == "sort(b + a)", but "a + b" != "b + a")
2396 2392 # ts = tuple(t for w, t in sorted(zip(ws, ts), key=lambda wt: wt[0]))
2397 2393 return max(ws), (op,) + tuple(ts)
2398 2394 elif op == 'not':
2399 2395 # Optimize not public() to _notpublic() because we have a fast version
2400 2396 if x[1] == ('func', ('symbol', 'public'), None):
2401 2397 newsym = ('func', ('symbol', '_notpublic'), None)
2402 2398 o = _optimize(newsym, not small)
2403 2399 return o[0], o[1]
2404 2400 else:
2405 2401 o = _optimize(x[1], not small)
2406 2402 return o[0], (op, o[1])
2407 2403 elif op == 'parentpost':
2408 2404 o = _optimize(x[1], small)
2409 2405 return o[0], (op, o[1])
2410 2406 elif op == 'group':
2411 2407 return _optimize(x[1], small)
2412 2408 elif op in 'dagrange range parent ancestorspec':
2413 2409 if op == 'parent':
2414 2410 # x^:y means (x^) : y, not x ^ (:y)
2415 2411 post = ('parentpost', x[1])
2416 2412 if x[2][0] == 'dagrangepre':
2417 2413 return _optimize(('dagrange', post, x[2][1]), small)
2418 2414 elif x[2][0] == 'rangepre':
2419 2415 return _optimize(('range', post, x[2][1]), small)
2420 2416
2421 2417 wa, ta = _optimize(x[1], small)
2422 2418 wb, tb = _optimize(x[2], small)
2423 2419 return wa + wb, (op, ta, tb)
2424 2420 elif op == 'list':
2425 2421 ws, ts = zip(*(_optimize(y, small) for y in x[1:]))
2426 2422 return sum(ws), (op,) + ts
2427 2423 elif op == 'func':
2428 2424 f = getstring(x[1], _("not a symbol"))
2429 2425 wa, ta = _optimize(x[2], small)
2430 2426 if f in ("author branch closed date desc file grep keyword "
2431 2427 "outgoing user"):
2432 2428 w = 10 # slow
2433 2429 elif f in "modifies adds removes":
2434 2430 w = 30 # slower
2435 2431 elif f == "contains":
2436 2432 w = 100 # very slow
2437 2433 elif f == "ancestor":
2438 2434 w = 1 * smallbonus
2439 2435 elif f in "reverse limit first _intlist":
2440 2436 w = 0
2441 2437 elif f in "sort":
2442 2438 w = 10 # assume most sorts look at changelog
2443 2439 else:
2444 2440 w = 1
2445 2441 return w + wa, (op, x[1], ta)
2446 2442 return 1, x
2447 2443
2448 2444 def optimize(tree):
2449 2445 _weight, newtree = _optimize(tree, small=True)
2450 2446 return newtree
2451 2447
2452 2448 # the set of valid characters for the initial letter of symbols in
2453 2449 # alias declarations and definitions
2454 2450 _aliassyminitletters = set(c for c in [chr(i) for i in xrange(256)]
2455 2451 if c.isalnum() or c in '._@$' or ord(c) > 127)
2456 2452
2457 2453 def _parsewith(spec, lookup=None, syminitletters=None):
2458 2454 """Generate a parse tree of given spec with given tokenizing options
2459 2455
2460 2456 >>> _parsewith('foo($1)', syminitletters=_aliassyminitletters)
2461 2457 ('func', ('symbol', 'foo'), ('symbol', '$1'))
2462 2458 >>> _parsewith('$1')
2463 2459 Traceback (most recent call last):
2464 2460 ...
2465 2461 ParseError: ("syntax error in revset '$1'", 0)
2466 2462 >>> _parsewith('foo bar')
2467 2463 Traceback (most recent call last):
2468 2464 ...
2469 2465 ParseError: ('invalid token', 4)
2470 2466 """
2471 2467 p = parser.parser(elements)
2472 2468 tree, pos = p.parse(tokenize(spec, lookup=lookup,
2473 2469 syminitletters=syminitletters))
2474 2470 if pos != len(spec):
2475 2471 raise error.ParseError(_('invalid token'), pos)
2476 2472 return parser.simplifyinfixops(tree, ('list', 'or'))
2477 2473
2478 2474 class _aliasrules(parser.basealiasrules):
2479 2475 """Parsing and expansion rule set of revset aliases"""
2480 2476 _section = _('revset alias')
2481 2477
2482 2478 @staticmethod
2483 2479 def _parse(spec):
2484 2480 """Parse alias declaration/definition ``spec``
2485 2481
2486 2482 This allows symbol names to use also ``$`` as an initial letter
2487 2483 (for backward compatibility), and callers of this function should
2488 2484 examine whether ``$`` is used also for unexpected symbols or not.
2489 2485 """
2490 2486 return _parsewith(spec, syminitletters=_aliassyminitletters)
2491 2487
2492 2488 @staticmethod
2493 2489 def _trygetfunc(tree):
2494 2490 if tree[0] == 'func' and tree[1][0] == 'symbol':
2495 2491 return tree[1][1], getlist(tree[2])
2496 2492
2497 2493 def expandaliases(ui, tree, showwarning=None):
2498 2494 aliases = _aliasrules.buildmap(ui.configitems('revsetalias'))
2499 2495 tree = _aliasrules.expand(aliases, tree)
2500 2496 if showwarning:
2501 2497 # warn about problematic (but not referred) aliases
2502 2498 for name, alias in sorted(aliases.iteritems()):
2503 2499 if alias.error and not alias.warned:
2504 2500 showwarning(_('warning: %s\n') % (alias.error))
2505 2501 alias.warned = True
2506 2502 return tree
2507 2503
2508 2504 def foldconcat(tree):
2509 2505 """Fold elements to be concatenated by `##`
2510 2506 """
2511 2507 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2512 2508 return tree
2513 2509 if tree[0] == '_concat':
2514 2510 pending = [tree]
2515 2511 l = []
2516 2512 while pending:
2517 2513 e = pending.pop()
2518 2514 if e[0] == '_concat':
2519 2515 pending.extend(reversed(e[1:]))
2520 2516 elif e[0] in ('string', 'symbol'):
2521 2517 l.append(e[1])
2522 2518 else:
2523 2519 msg = _("\"##\" can't concatenate \"%s\" element") % (e[0])
2524 2520 raise error.ParseError(msg)
2525 2521 return ('string', ''.join(l))
2526 2522 else:
2527 2523 return tuple(foldconcat(t) for t in tree)
2528 2524
2529 2525 def parse(spec, lookup=None):
2530 2526 return _parsewith(spec, lookup=lookup)
2531 2527
2532 2528 def posttreebuilthook(tree, repo):
2533 2529 # hook for extensions to execute code on the optimized tree
2534 2530 pass
2535 2531
2536 2532 def match(ui, spec, repo=None):
2537 2533 if not spec:
2538 2534 raise error.ParseError(_("empty query"))
2539 2535 lookup = None
2540 2536 if repo:
2541 2537 lookup = repo.__contains__
2542 2538 tree = parse(spec, lookup)
2543 2539 return _makematcher(ui, tree, repo)
2544 2540
2545 2541 def matchany(ui, specs, repo=None):
2546 2542 """Create a matcher that will include any revisions matching one of the
2547 2543 given specs"""
2548 2544 if not specs:
2549 2545 def mfunc(repo, subset=None):
2550 2546 return baseset()
2551 2547 return mfunc
2552 2548 if not all(specs):
2553 2549 raise error.ParseError(_("empty query"))
2554 2550 lookup = None
2555 2551 if repo:
2556 2552 lookup = repo.__contains__
2557 2553 if len(specs) == 1:
2558 2554 tree = parse(specs[0], lookup)
2559 2555 else:
2560 2556 tree = ('or',) + tuple(parse(s, lookup) for s in specs)
2561 2557 return _makematcher(ui, tree, repo)
2562 2558
2563 2559 def _makematcher(ui, tree, repo):
2564 2560 if ui:
2565 2561 tree = expandaliases(ui, tree, showwarning=ui.warn)
2566 2562 tree = foldconcat(tree)
2567 2563 tree = optimize(tree)
2568 2564 posttreebuilthook(tree, repo)
2569 2565 def mfunc(repo, subset=None):
2570 2566 if subset is None:
2571 2567 subset = fullreposet(repo)
2572 2568 if util.safehasattr(subset, 'isascending'):
2573 2569 result = getset(repo, subset, tree)
2574 2570 else:
2575 2571 result = getset(repo, baseset(subset), tree)
2576 2572 return result
2577 2573 return mfunc
2578 2574
2579 2575 def formatspec(expr, *args):
2580 2576 '''
2581 2577 This is a convenience function for using revsets internally, and
2582 2578 escapes arguments appropriately. Aliases are intentionally ignored
2583 2579 so that intended expression behavior isn't accidentally subverted.
2584 2580
2585 2581 Supported arguments:
2586 2582
2587 2583 %r = revset expression, parenthesized
2588 2584 %d = int(arg), no quoting
2589 2585 %s = string(arg), escaped and single-quoted
2590 2586 %b = arg.branch(), escaped and single-quoted
2591 2587 %n = hex(arg), single-quoted
2592 2588 %% = a literal '%'
2593 2589
2594 2590 Prefixing the type with 'l' specifies a parenthesized list of that type.
2595 2591
2596 2592 >>> formatspec('%r:: and %lr', '10 or 11', ("this()", "that()"))
2597 2593 '(10 or 11):: and ((this()) or (that()))'
2598 2594 >>> formatspec('%d:: and not %d::', 10, 20)
2599 2595 '10:: and not 20::'
2600 2596 >>> formatspec('%ld or %ld', [], [1])
2601 2597 "_list('') or 1"
2602 2598 >>> formatspec('keyword(%s)', 'foo\\xe9')
2603 2599 "keyword('foo\\\\xe9')"
2604 2600 >>> b = lambda: 'default'
2605 2601 >>> b.branch = b
2606 2602 >>> formatspec('branch(%b)', b)
2607 2603 "branch('default')"
2608 2604 >>> formatspec('root(%ls)', ['a', 'b', 'c', 'd'])
2609 2605 "root(_list('a\\x00b\\x00c\\x00d'))"
2610 2606 '''
2611 2607
2612 2608 def quote(s):
2613 2609 return repr(str(s))
2614 2610
2615 2611 def argtype(c, arg):
2616 2612 if c == 'd':
2617 2613 return str(int(arg))
2618 2614 elif c == 's':
2619 2615 return quote(arg)
2620 2616 elif c == 'r':
2621 2617 parse(arg) # make sure syntax errors are confined
2622 2618 return '(%s)' % arg
2623 2619 elif c == 'n':
2624 2620 return quote(node.hex(arg))
2625 2621 elif c == 'b':
2626 2622 return quote(arg.branch())
2627 2623
2628 2624 def listexp(s, t):
2629 2625 l = len(s)
2630 2626 if l == 0:
2631 2627 return "_list('')"
2632 2628 elif l == 1:
2633 2629 return argtype(t, s[0])
2634 2630 elif t == 'd':
2635 2631 return "_intlist('%s')" % "\0".join(str(int(a)) for a in s)
2636 2632 elif t == 's':
2637 2633 return "_list('%s')" % "\0".join(s)
2638 2634 elif t == 'n':
2639 2635 return "_hexlist('%s')" % "\0".join(node.hex(a) for a in s)
2640 2636 elif t == 'b':
2641 2637 return "_list('%s')" % "\0".join(a.branch() for a in s)
2642 2638
2643 2639 m = l // 2
2644 2640 return '(%s or %s)' % (listexp(s[:m], t), listexp(s[m:], t))
2645 2641
2646 2642 ret = ''
2647 2643 pos = 0
2648 2644 arg = 0
2649 2645 while pos < len(expr):
2650 2646 c = expr[pos]
2651 2647 if c == '%':
2652 2648 pos += 1
2653 2649 d = expr[pos]
2654 2650 if d == '%':
2655 2651 ret += d
2656 2652 elif d in 'dsnbr':
2657 2653 ret += argtype(d, args[arg])
2658 2654 arg += 1
2659 2655 elif d == 'l':
2660 2656 # a list of some type
2661 2657 pos += 1
2662 2658 d = expr[pos]
2663 2659 ret += listexp(list(args[arg]), d)
2664 2660 arg += 1
2665 2661 else:
2666 2662 raise error.Abort(_('unexpected revspec format character %s')
2667 2663 % d)
2668 2664 else:
2669 2665 ret += c
2670 2666 pos += 1
2671 2667
2672 2668 return ret
2673 2669
2674 2670 def prettyformat(tree):
2675 2671 return parser.prettyformat(tree, ('string', 'symbol'))
2676 2672
2677 2673 def depth(tree):
2678 2674 if isinstance(tree, tuple):
2679 2675 return max(map(depth, tree)) + 1
2680 2676 else:
2681 2677 return 0
2682 2678
2683 2679 def funcsused(tree):
2684 2680 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2685 2681 return set()
2686 2682 else:
2687 2683 funcs = set()
2688 2684 for s in tree[1:]:
2689 2685 funcs |= funcsused(s)
2690 2686 if tree[0] == 'func':
2691 2687 funcs.add(tree[1][1])
2692 2688 return funcs
2693 2689
2694 2690 def _formatsetrepr(r):
2695 2691 """Format an optional printable representation of a set
2696 2692
2697 2693 ======== =================================
2698 2694 type(r) example
2699 2695 ======== =================================
2700 2696 tuple ('<not %r>', other)
2701 2697 str '<branch closed>'
2702 2698 callable lambda: '<branch %r>' % sorted(b)
2703 2699 object other
2704 2700 ======== =================================
2705 2701 """
2706 2702 if r is None:
2707 2703 return ''
2708 2704 elif isinstance(r, tuple):
2709 2705 return r[0] % r[1:]
2710 2706 elif isinstance(r, str):
2711 2707 return r
2712 2708 elif callable(r):
2713 2709 return r()
2714 2710 else:
2715 2711 return repr(r)
2716 2712
2717 2713 class abstractsmartset(object):
2718 2714
2719 2715 def __nonzero__(self):
2720 2716 """True if the smartset is not empty"""
2721 2717 raise NotImplementedError()
2722 2718
2723 2719 def __contains__(self, rev):
2724 2720 """provide fast membership testing"""
2725 2721 raise NotImplementedError()
2726 2722
2727 2723 def __iter__(self):
2728 2724 """iterate the set in the order it is supposed to be iterated"""
2729 2725 raise NotImplementedError()
2730 2726
2731 2727 # Attributes containing a function to perform a fast iteration in a given
2732 2728 # direction. A smartset can have none, one, or both defined.
2733 2729 #
2734 2730 # Default value is None instead of a function returning None to avoid
2735 2731 # initializing an iterator just for testing if a fast method exists.
2736 2732 fastasc = None
2737 2733 fastdesc = None
2738 2734
2739 2735 def isascending(self):
2740 2736 """True if the set will iterate in ascending order"""
2741 2737 raise NotImplementedError()
2742 2738
2743 2739 def isdescending(self):
2744 2740 """True if the set will iterate in descending order"""
2745 2741 raise NotImplementedError()
2746 2742
2747 2743 def istopo(self):
2748 2744 """True if the set will iterate in topographical order"""
2749 2745 raise NotImplementedError()
2750 2746
2751 2747 @util.cachefunc
2752 2748 def min(self):
2753 2749 """return the minimum element in the set"""
2754 2750 if self.fastasc is not None:
2755 2751 for r in self.fastasc():
2756 2752 return r
2757 2753 raise ValueError('arg is an empty sequence')
2758 2754 return min(self)
2759 2755
2760 2756 @util.cachefunc
2761 2757 def max(self):
2762 2758 """return the maximum element in the set"""
2763 2759 if self.fastdesc is not None:
2764 2760 for r in self.fastdesc():
2765 2761 return r
2766 2762 raise ValueError('arg is an empty sequence')
2767 2763 return max(self)
2768 2764
2769 2765 def first(self):
2770 2766 """return the first element in the set (user iteration perspective)
2771 2767
2772 2768 Return None if the set is empty"""
2773 2769 raise NotImplementedError()
2774 2770
2775 2771 def last(self):
2776 2772 """return the last element in the set (user iteration perspective)
2777 2773
2778 2774 Return None if the set is empty"""
2779 2775 raise NotImplementedError()
2780 2776
2781 2777 def __len__(self):
2782 2778 """return the length of the smartsets
2783 2779
2784 2780 This can be expensive on smartset that could be lazy otherwise."""
2785 2781 raise NotImplementedError()
2786 2782
2787 2783 def reverse(self):
2788 2784 """reverse the expected iteration order"""
2789 2785 raise NotImplementedError()
2790 2786
2791 2787 def sort(self, reverse=True):
2792 2788 """get the set to iterate in an ascending or descending order"""
2793 2789 raise NotImplementedError()
2794 2790
2795 2791 def __and__(self, other):
2796 2792 """Returns a new object with the intersection of the two collections.
2797 2793
2798 2794 This is part of the mandatory API for smartset."""
2799 2795 if isinstance(other, fullreposet):
2800 2796 return self
2801 2797 return self.filter(other.__contains__, condrepr=other, cache=False)
2802 2798
2803 2799 def __add__(self, other):
2804 2800 """Returns a new object with the union of the two collections.
2805 2801
2806 2802 This is part of the mandatory API for smartset."""
2807 2803 return addset(self, other)
2808 2804
2809 2805 def __sub__(self, other):
2810 2806 """Returns a new object with the substraction of the two collections.
2811 2807
2812 2808 This is part of the mandatory API for smartset."""
2813 2809 c = other.__contains__
2814 2810 return self.filter(lambda r: not c(r), condrepr=('<not %r>', other),
2815 2811 cache=False)
2816 2812
2817 2813 def filter(self, condition, condrepr=None, cache=True):
2818 2814 """Returns this smartset filtered by condition as a new smartset.
2819 2815
2820 2816 `condition` is a callable which takes a revision number and returns a
2821 2817 boolean. Optional `condrepr` provides a printable representation of
2822 2818 the given `condition`.
2823 2819
2824 2820 This is part of the mandatory API for smartset."""
2825 2821 # builtin cannot be cached. but do not needs to
2826 2822 if cache and util.safehasattr(condition, 'func_code'):
2827 2823 condition = util.cachefunc(condition)
2828 2824 return filteredset(self, condition, condrepr)
2829 2825
2830 2826 class baseset(abstractsmartset):
2831 2827 """Basic data structure that represents a revset and contains the basic
2832 2828 operation that it should be able to perform.
2833 2829
2834 2830 Every method in this class should be implemented by any smartset class.
2835 2831 """
2836 2832 def __init__(self, data=(), datarepr=None, istopo=False):
2837 2833 """
2838 2834 datarepr: a tuple of (format, obj, ...), a function or an object that
2839 2835 provides a printable representation of the given data.
2840 2836 """
2841 2837 self._ascending = None
2842 2838 self._istopo = istopo
2843 2839 if not isinstance(data, list):
2844 2840 if isinstance(data, set):
2845 2841 self._set = data
2846 2842 # set has no order we pick one for stability purpose
2847 2843 self._ascending = True
2848 2844 data = list(data)
2849 2845 self._list = data
2850 2846 self._datarepr = datarepr
2851 2847
2852 2848 @util.propertycache
2853 2849 def _set(self):
2854 2850 return set(self._list)
2855 2851
2856 2852 @util.propertycache
2857 2853 def _asclist(self):
2858 2854 asclist = self._list[:]
2859 2855 asclist.sort()
2860 2856 return asclist
2861 2857
2862 2858 def __iter__(self):
2863 2859 if self._ascending is None:
2864 2860 return iter(self._list)
2865 2861 elif self._ascending:
2866 2862 return iter(self._asclist)
2867 2863 else:
2868 2864 return reversed(self._asclist)
2869 2865
2870 2866 def fastasc(self):
2871 2867 return iter(self._asclist)
2872 2868
2873 2869 def fastdesc(self):
2874 2870 return reversed(self._asclist)
2875 2871
2876 2872 @util.propertycache
2877 2873 def __contains__(self):
2878 2874 return self._set.__contains__
2879 2875
2880 2876 def __nonzero__(self):
2881 2877 return bool(self._list)
2882 2878
2883 2879 def sort(self, reverse=False):
2884 2880 self._ascending = not bool(reverse)
2885 2881 self._istopo = False
2886 2882
2887 2883 def reverse(self):
2888 2884 if self._ascending is None:
2889 2885 self._list.reverse()
2890 2886 else:
2891 2887 self._ascending = not self._ascending
2892 2888 self._istopo = False
2893 2889
2894 2890 def __len__(self):
2895 2891 return len(self._list)
2896 2892
2897 2893 def isascending(self):
2898 2894 """Returns True if the collection is ascending order, False if not.
2899 2895
2900 2896 This is part of the mandatory API for smartset."""
2901 2897 if len(self) <= 1:
2902 2898 return True
2903 2899 return self._ascending is not None and self._ascending
2904 2900
2905 2901 def isdescending(self):
2906 2902 """Returns True if the collection is descending order, False if not.
2907 2903
2908 2904 This is part of the mandatory API for smartset."""
2909 2905 if len(self) <= 1:
2910 2906 return True
2911 2907 return self._ascending is not None and not self._ascending
2912 2908
2913 2909 def istopo(self):
2914 2910 """Is the collection is in topographical order or not.
2915 2911
2916 2912 This is part of the mandatory API for smartset."""
2917 2913 if len(self) <= 1:
2918 2914 return True
2919 2915 return self._istopo
2920 2916
2921 2917 def first(self):
2922 2918 if self:
2923 2919 if self._ascending is None:
2924 2920 return self._list[0]
2925 2921 elif self._ascending:
2926 2922 return self._asclist[0]
2927 2923 else:
2928 2924 return self._asclist[-1]
2929 2925 return None
2930 2926
2931 2927 def last(self):
2932 2928 if self:
2933 2929 if self._ascending is None:
2934 2930 return self._list[-1]
2935 2931 elif self._ascending:
2936 2932 return self._asclist[-1]
2937 2933 else:
2938 2934 return self._asclist[0]
2939 2935 return None
2940 2936
2941 2937 def __repr__(self):
2942 2938 d = {None: '', False: '-', True: '+'}[self._ascending]
2943 2939 s = _formatsetrepr(self._datarepr)
2944 2940 if not s:
2945 2941 l = self._list
2946 2942 # if _list has been built from a set, it might have a different
2947 2943 # order from one python implementation to another.
2948 2944 # We fallback to the sorted version for a stable output.
2949 2945 if self._ascending is not None:
2950 2946 l = self._asclist
2951 2947 s = repr(l)
2952 2948 return '<%s%s %s>' % (type(self).__name__, d, s)
2953 2949
2954 2950 class filteredset(abstractsmartset):
2955 2951 """Duck type for baseset class which iterates lazily over the revisions in
2956 2952 the subset and contains a function which tests for membership in the
2957 2953 revset
2958 2954 """
2959 2955 def __init__(self, subset, condition=lambda x: True, condrepr=None):
2960 2956 """
2961 2957 condition: a function that decide whether a revision in the subset
2962 2958 belongs to the revset or not.
2963 2959 condrepr: a tuple of (format, obj, ...), a function or an object that
2964 2960 provides a printable representation of the given condition.
2965 2961 """
2966 2962 self._subset = subset
2967 2963 self._condition = condition
2968 2964 self._condrepr = condrepr
2969 2965
2970 2966 def __contains__(self, x):
2971 2967 return x in self._subset and self._condition(x)
2972 2968
2973 2969 def __iter__(self):
2974 2970 return self._iterfilter(self._subset)
2975 2971
2976 2972 def _iterfilter(self, it):
2977 2973 cond = self._condition
2978 2974 for x in it:
2979 2975 if cond(x):
2980 2976 yield x
2981 2977
2982 2978 @property
2983 2979 def fastasc(self):
2984 2980 it = self._subset.fastasc
2985 2981 if it is None:
2986 2982 return None
2987 2983 return lambda: self._iterfilter(it())
2988 2984
2989 2985 @property
2990 2986 def fastdesc(self):
2991 2987 it = self._subset.fastdesc
2992 2988 if it is None:
2993 2989 return None
2994 2990 return lambda: self._iterfilter(it())
2995 2991
2996 2992 def __nonzero__(self):
2997 2993 fast = None
2998 2994 candidates = [self.fastasc if self.isascending() else None,
2999 2995 self.fastdesc if self.isdescending() else None,
3000 2996 self.fastasc,
3001 2997 self.fastdesc]
3002 2998 for candidate in candidates:
3003 2999 if candidate is not None:
3004 3000 fast = candidate
3005 3001 break
3006 3002
3007 3003 if fast is not None:
3008 3004 it = fast()
3009 3005 else:
3010 3006 it = self
3011 3007
3012 3008 for r in it:
3013 3009 return True
3014 3010 return False
3015 3011
3016 3012 def __len__(self):
3017 3013 # Basic implementation to be changed in future patches.
3018 3014 # until this gets improved, we use generator expression
3019 3015 # here, since list compr is free to call __len__ again
3020 3016 # causing infinite recursion
3021 3017 l = baseset(r for r in self)
3022 3018 return len(l)
3023 3019
3024 3020 def sort(self, reverse=False):
3025 3021 self._subset.sort(reverse=reverse)
3026 3022
3027 3023 def reverse(self):
3028 3024 self._subset.reverse()
3029 3025
3030 3026 def isascending(self):
3031 3027 return self._subset.isascending()
3032 3028
3033 3029 def isdescending(self):
3034 3030 return self._subset.isdescending()
3035 3031
3036 3032 def istopo(self):
3037 3033 return self._subset.istopo()
3038 3034
3039 3035 def first(self):
3040 3036 for x in self:
3041 3037 return x
3042 3038 return None
3043 3039
3044 3040 def last(self):
3045 3041 it = None
3046 3042 if self.isascending():
3047 3043 it = self.fastdesc
3048 3044 elif self.isdescending():
3049 3045 it = self.fastasc
3050 3046 if it is not None:
3051 3047 for x in it():
3052 3048 return x
3053 3049 return None #empty case
3054 3050 else:
3055 3051 x = None
3056 3052 for x in self:
3057 3053 pass
3058 3054 return x
3059 3055
3060 3056 def __repr__(self):
3061 3057 xs = [repr(self._subset)]
3062 3058 s = _formatsetrepr(self._condrepr)
3063 3059 if s:
3064 3060 xs.append(s)
3065 3061 return '<%s %s>' % (type(self).__name__, ', '.join(xs))
3066 3062
3067 3063 def _iterordered(ascending, iter1, iter2):
3068 3064 """produce an ordered iteration from two iterators with the same order
3069 3065
3070 3066 The ascending is used to indicated the iteration direction.
3071 3067 """
3072 3068 choice = max
3073 3069 if ascending:
3074 3070 choice = min
3075 3071
3076 3072 val1 = None
3077 3073 val2 = None
3078 3074 try:
3079 3075 # Consume both iterators in an ordered way until one is empty
3080 3076 while True:
3081 3077 if val1 is None:
3082 3078 val1 = next(iter1)
3083 3079 if val2 is None:
3084 3080 val2 = next(iter2)
3085 3081 n = choice(val1, val2)
3086 3082 yield n
3087 3083 if val1 == n:
3088 3084 val1 = None
3089 3085 if val2 == n:
3090 3086 val2 = None
3091 3087 except StopIteration:
3092 3088 # Flush any remaining values and consume the other one
3093 3089 it = iter2
3094 3090 if val1 is not None:
3095 3091 yield val1
3096 3092 it = iter1
3097 3093 elif val2 is not None:
3098 3094 # might have been equality and both are empty
3099 3095 yield val2
3100 3096 for val in it:
3101 3097 yield val
3102 3098
3103 3099 class addset(abstractsmartset):
3104 3100 """Represent the addition of two sets
3105 3101
3106 3102 Wrapper structure for lazily adding two structures without losing much
3107 3103 performance on the __contains__ method
3108 3104
3109 3105 If the ascending attribute is set, that means the two structures are
3110 3106 ordered in either an ascending or descending way. Therefore, we can add
3111 3107 them maintaining the order by iterating over both at the same time
3112 3108
3113 3109 >>> xs = baseset([0, 3, 2])
3114 3110 >>> ys = baseset([5, 2, 4])
3115 3111
3116 3112 >>> rs = addset(xs, ys)
3117 3113 >>> bool(rs), 0 in rs, 1 in rs, 5 in rs, rs.first(), rs.last()
3118 3114 (True, True, False, True, 0, 4)
3119 3115 >>> rs = addset(xs, baseset([]))
3120 3116 >>> bool(rs), 0 in rs, 1 in rs, rs.first(), rs.last()
3121 3117 (True, True, False, 0, 2)
3122 3118 >>> rs = addset(baseset([]), baseset([]))
3123 3119 >>> bool(rs), 0 in rs, rs.first(), rs.last()
3124 3120 (False, False, None, None)
3125 3121
3126 3122 iterate unsorted:
3127 3123 >>> rs = addset(xs, ys)
3128 3124 >>> # (use generator because pypy could call len())
3129 3125 >>> list(x for x in rs) # without _genlist
3130 3126 [0, 3, 2, 5, 4]
3131 3127 >>> assert not rs._genlist
3132 3128 >>> len(rs)
3133 3129 5
3134 3130 >>> [x for x in rs] # with _genlist
3135 3131 [0, 3, 2, 5, 4]
3136 3132 >>> assert rs._genlist
3137 3133
3138 3134 iterate ascending:
3139 3135 >>> rs = addset(xs, ys, ascending=True)
3140 3136 >>> # (use generator because pypy could call len())
3141 3137 >>> list(x for x in rs), list(x for x in rs.fastasc()) # without _asclist
3142 3138 ([0, 2, 3, 4, 5], [0, 2, 3, 4, 5])
3143 3139 >>> assert not rs._asclist
3144 3140 >>> len(rs)
3145 3141 5
3146 3142 >>> [x for x in rs], [x for x in rs.fastasc()]
3147 3143 ([0, 2, 3, 4, 5], [0, 2, 3, 4, 5])
3148 3144 >>> assert rs._asclist
3149 3145
3150 3146 iterate descending:
3151 3147 >>> rs = addset(xs, ys, ascending=False)
3152 3148 >>> # (use generator because pypy could call len())
3153 3149 >>> list(x for x in rs), list(x for x in rs.fastdesc()) # without _asclist
3154 3150 ([5, 4, 3, 2, 0], [5, 4, 3, 2, 0])
3155 3151 >>> assert not rs._asclist
3156 3152 >>> len(rs)
3157 3153 5
3158 3154 >>> [x for x in rs], [x for x in rs.fastdesc()]
3159 3155 ([5, 4, 3, 2, 0], [5, 4, 3, 2, 0])
3160 3156 >>> assert rs._asclist
3161 3157
3162 3158 iterate ascending without fastasc:
3163 3159 >>> rs = addset(xs, generatorset(ys), ascending=True)
3164 3160 >>> assert rs.fastasc is None
3165 3161 >>> [x for x in rs]
3166 3162 [0, 2, 3, 4, 5]
3167 3163
3168 3164 iterate descending without fastdesc:
3169 3165 >>> rs = addset(generatorset(xs), ys, ascending=False)
3170 3166 >>> assert rs.fastdesc is None
3171 3167 >>> [x for x in rs]
3172 3168 [5, 4, 3, 2, 0]
3173 3169 """
3174 3170 def __init__(self, revs1, revs2, ascending=None):
3175 3171 self._r1 = revs1
3176 3172 self._r2 = revs2
3177 3173 self._iter = None
3178 3174 self._ascending = ascending
3179 3175 self._genlist = None
3180 3176 self._asclist = None
3181 3177
3182 3178 def __len__(self):
3183 3179 return len(self._list)
3184 3180
3185 3181 def __nonzero__(self):
3186 3182 return bool(self._r1) or bool(self._r2)
3187 3183
3188 3184 @util.propertycache
3189 3185 def _list(self):
3190 3186 if not self._genlist:
3191 3187 self._genlist = baseset(iter(self))
3192 3188 return self._genlist
3193 3189
3194 3190 def __iter__(self):
3195 3191 """Iterate over both collections without repeating elements
3196 3192
3197 3193 If the ascending attribute is not set, iterate over the first one and
3198 3194 then over the second one checking for membership on the first one so we
3199 3195 dont yield any duplicates.
3200 3196
3201 3197 If the ascending attribute is set, iterate over both collections at the
3202 3198 same time, yielding only one value at a time in the given order.
3203 3199 """
3204 3200 if self._ascending is None:
3205 3201 if self._genlist:
3206 3202 return iter(self._genlist)
3207 3203 def arbitraryordergen():
3208 3204 for r in self._r1:
3209 3205 yield r
3210 3206 inr1 = self._r1.__contains__
3211 3207 for r in self._r2:
3212 3208 if not inr1(r):
3213 3209 yield r
3214 3210 return arbitraryordergen()
3215 3211 # try to use our own fast iterator if it exists
3216 3212 self._trysetasclist()
3217 3213 if self._ascending:
3218 3214 attr = 'fastasc'
3219 3215 else:
3220 3216 attr = 'fastdesc'
3221 3217 it = getattr(self, attr)
3222 3218 if it is not None:
3223 3219 return it()
3224 3220 # maybe half of the component supports fast
3225 3221 # get iterator for _r1
3226 3222 iter1 = getattr(self._r1, attr)
3227 3223 if iter1 is None:
3228 3224 # let's avoid side effect (not sure it matters)
3229 3225 iter1 = iter(sorted(self._r1, reverse=not self._ascending))
3230 3226 else:
3231 3227 iter1 = iter1()
3232 3228 # get iterator for _r2
3233 3229 iter2 = getattr(self._r2, attr)
3234 3230 if iter2 is None:
3235 3231 # let's avoid side effect (not sure it matters)
3236 3232 iter2 = iter(sorted(self._r2, reverse=not self._ascending))
3237 3233 else:
3238 3234 iter2 = iter2()
3239 3235 return _iterordered(self._ascending, iter1, iter2)
3240 3236
3241 3237 def _trysetasclist(self):
3242 3238 """populate the _asclist attribute if possible and necessary"""
3243 3239 if self._genlist is not None and self._asclist is None:
3244 3240 self._asclist = sorted(self._genlist)
3245 3241
3246 3242 @property
3247 3243 def fastasc(self):
3248 3244 self._trysetasclist()
3249 3245 if self._asclist is not None:
3250 3246 return self._asclist.__iter__
3251 3247 iter1 = self._r1.fastasc
3252 3248 iter2 = self._r2.fastasc
3253 3249 if None in (iter1, iter2):
3254 3250 return None
3255 3251 return lambda: _iterordered(True, iter1(), iter2())
3256 3252
3257 3253 @property
3258 3254 def fastdesc(self):
3259 3255 self._trysetasclist()
3260 3256 if self._asclist is not None:
3261 3257 return self._asclist.__reversed__
3262 3258 iter1 = self._r1.fastdesc
3263 3259 iter2 = self._r2.fastdesc
3264 3260 if None in (iter1, iter2):
3265 3261 return None
3266 3262 return lambda: _iterordered(False, iter1(), iter2())
3267 3263
3268 3264 def __contains__(self, x):
3269 3265 return x in self._r1 or x in self._r2
3270 3266
3271 3267 def sort(self, reverse=False):
3272 3268 """Sort the added set
3273 3269
3274 3270 For this we use the cached list with all the generated values and if we
3275 3271 know they are ascending or descending we can sort them in a smart way.
3276 3272 """
3277 3273 self._ascending = not reverse
3278 3274
3279 3275 def isascending(self):
3280 3276 return self._ascending is not None and self._ascending
3281 3277
3282 3278 def isdescending(self):
3283 3279 return self._ascending is not None and not self._ascending
3284 3280
3285 3281 def istopo(self):
3286 3282 # not worth the trouble asserting if the two sets combined are still
3287 3283 # in topographical order. Use the sort() predicate to explicitly sort
3288 3284 # again instead.
3289 3285 return False
3290 3286
3291 3287 def reverse(self):
3292 3288 if self._ascending is None:
3293 3289 self._list.reverse()
3294 3290 else:
3295 3291 self._ascending = not self._ascending
3296 3292
3297 3293 def first(self):
3298 3294 for x in self:
3299 3295 return x
3300 3296 return None
3301 3297
3302 3298 def last(self):
3303 3299 self.reverse()
3304 3300 val = self.first()
3305 3301 self.reverse()
3306 3302 return val
3307 3303
3308 3304 def __repr__(self):
3309 3305 d = {None: '', False: '-', True: '+'}[self._ascending]
3310 3306 return '<%s%s %r, %r>' % (type(self).__name__, d, self._r1, self._r2)
3311 3307
3312 3308 class generatorset(abstractsmartset):
3313 3309 """Wrap a generator for lazy iteration
3314 3310
3315 3311 Wrapper structure for generators that provides lazy membership and can
3316 3312 be iterated more than once.
3317 3313 When asked for membership it generates values until either it finds the
3318 3314 requested one or has gone through all the elements in the generator
3319 3315 """
3320 3316 def __init__(self, gen, iterasc=None):
3321 3317 """
3322 3318 gen: a generator producing the values for the generatorset.
3323 3319 """
3324 3320 self._gen = gen
3325 3321 self._asclist = None
3326 3322 self._cache = {}
3327 3323 self._genlist = []
3328 3324 self._finished = False
3329 3325 self._ascending = True
3330 3326 if iterasc is not None:
3331 3327 if iterasc:
3332 3328 self.fastasc = self._iterator
3333 3329 self.__contains__ = self._asccontains
3334 3330 else:
3335 3331 self.fastdesc = self._iterator
3336 3332 self.__contains__ = self._desccontains
3337 3333
3338 3334 def __nonzero__(self):
3339 3335 # Do not use 'for r in self' because it will enforce the iteration
3340 3336 # order (default ascending), possibly unrolling a whole descending
3341 3337 # iterator.
3342 3338 if self._genlist:
3343 3339 return True
3344 3340 for r in self._consumegen():
3345 3341 return True
3346 3342 return False
3347 3343
3348 3344 def __contains__(self, x):
3349 3345 if x in self._cache:
3350 3346 return self._cache[x]
3351 3347
3352 3348 # Use new values only, as existing values would be cached.
3353 3349 for l in self._consumegen():
3354 3350 if l == x:
3355 3351 return True
3356 3352
3357 3353 self._cache[x] = False
3358 3354 return False
3359 3355
3360 3356 def _asccontains(self, x):
3361 3357 """version of contains optimised for ascending generator"""
3362 3358 if x in self._cache:
3363 3359 return self._cache[x]
3364 3360
3365 3361 # Use new values only, as existing values would be cached.
3366 3362 for l in self._consumegen():
3367 3363 if l == x:
3368 3364 return True
3369 3365 if l > x:
3370 3366 break
3371 3367
3372 3368 self._cache[x] = False
3373 3369 return False
3374 3370
3375 3371 def _desccontains(self, x):
3376 3372 """version of contains optimised for descending generator"""
3377 3373 if x in self._cache:
3378 3374 return self._cache[x]
3379 3375
3380 3376 # Use new values only, as existing values would be cached.
3381 3377 for l in self._consumegen():
3382 3378 if l == x:
3383 3379 return True
3384 3380 if l < x:
3385 3381 break
3386 3382
3387 3383 self._cache[x] = False
3388 3384 return False
3389 3385
3390 3386 def __iter__(self):
3391 3387 if self._ascending:
3392 3388 it = self.fastasc
3393 3389 else:
3394 3390 it = self.fastdesc
3395 3391 if it is not None:
3396 3392 return it()
3397 3393 # we need to consume the iterator
3398 3394 for x in self._consumegen():
3399 3395 pass
3400 3396 # recall the same code
3401 3397 return iter(self)
3402 3398
3403 3399 def _iterator(self):
3404 3400 if self._finished:
3405 3401 return iter(self._genlist)
3406 3402
3407 3403 # We have to use this complex iteration strategy to allow multiple
3408 3404 # iterations at the same time. We need to be able to catch revision
3409 3405 # removed from _consumegen and added to genlist in another instance.
3410 3406 #
3411 3407 # Getting rid of it would provide an about 15% speed up on this
3412 3408 # iteration.
3413 3409 genlist = self._genlist
3414 3410 nextrev = self._consumegen().next
3415 3411 _len = len # cache global lookup
3416 3412 def gen():
3417 3413 i = 0
3418 3414 while True:
3419 3415 if i < _len(genlist):
3420 3416 yield genlist[i]
3421 3417 else:
3422 3418 yield nextrev()
3423 3419 i += 1
3424 3420 return gen()
3425 3421
3426 3422 def _consumegen(self):
3427 3423 cache = self._cache
3428 3424 genlist = self._genlist.append
3429 3425 for item in self._gen:
3430 3426 cache[item] = True
3431 3427 genlist(item)
3432 3428 yield item
3433 3429 if not self._finished:
3434 3430 self._finished = True
3435 3431 asc = self._genlist[:]
3436 3432 asc.sort()
3437 3433 self._asclist = asc
3438 3434 self.fastasc = asc.__iter__
3439 3435 self.fastdesc = asc.__reversed__
3440 3436
3441 3437 def __len__(self):
3442 3438 for x in self._consumegen():
3443 3439 pass
3444 3440 return len(self._genlist)
3445 3441
3446 3442 def sort(self, reverse=False):
3447 3443 self._ascending = not reverse
3448 3444
3449 3445 def reverse(self):
3450 3446 self._ascending = not self._ascending
3451 3447
3452 3448 def isascending(self):
3453 3449 return self._ascending
3454 3450
3455 3451 def isdescending(self):
3456 3452 return not self._ascending
3457 3453
3458 3454 def istopo(self):
3459 3455 # not worth the trouble asserting if the two sets combined are still
3460 3456 # in topographical order. Use the sort() predicate to explicitly sort
3461 3457 # again instead.
3462 3458 return False
3463 3459
3464 3460 def first(self):
3465 3461 if self._ascending:
3466 3462 it = self.fastasc
3467 3463 else:
3468 3464 it = self.fastdesc
3469 3465 if it is None:
3470 3466 # we need to consume all and try again
3471 3467 for x in self._consumegen():
3472 3468 pass
3473 3469 return self.first()
3474 3470 return next(it(), None)
3475 3471
3476 3472 def last(self):
3477 3473 if self._ascending:
3478 3474 it = self.fastdesc
3479 3475 else:
3480 3476 it = self.fastasc
3481 3477 if it is None:
3482 3478 # we need to consume all and try again
3483 3479 for x in self._consumegen():
3484 3480 pass
3485 3481 return self.first()
3486 3482 return next(it(), None)
3487 3483
3488 3484 def __repr__(self):
3489 3485 d = {False: '-', True: '+'}[self._ascending]
3490 3486 return '<%s%s>' % (type(self).__name__, d)
3491 3487
3492 3488 class spanset(abstractsmartset):
3493 3489 """Duck type for baseset class which represents a range of revisions and
3494 3490 can work lazily and without having all the range in memory
3495 3491
3496 3492 Note that spanset(x, y) behave almost like xrange(x, y) except for two
3497 3493 notable points:
3498 3494 - when x < y it will be automatically descending,
3499 3495 - revision filtered with this repoview will be skipped.
3500 3496
3501 3497 """
3502 3498 def __init__(self, repo, start=0, end=None):
3503 3499 """
3504 3500 start: first revision included the set
3505 3501 (default to 0)
3506 3502 end: first revision excluded (last+1)
3507 3503 (default to len(repo)
3508 3504
3509 3505 Spanset will be descending if `end` < `start`.
3510 3506 """
3511 3507 if end is None:
3512 3508 end = len(repo)
3513 3509 self._ascending = start <= end
3514 3510 if not self._ascending:
3515 3511 start, end = end + 1, start +1
3516 3512 self._start = start
3517 3513 self._end = end
3518 3514 self._hiddenrevs = repo.changelog.filteredrevs
3519 3515
3520 3516 def sort(self, reverse=False):
3521 3517 self._ascending = not reverse
3522 3518
3523 3519 def reverse(self):
3524 3520 self._ascending = not self._ascending
3525 3521
3526 3522 def istopo(self):
3527 3523 # not worth the trouble asserting if the two sets combined are still
3528 3524 # in topographical order. Use the sort() predicate to explicitly sort
3529 3525 # again instead.
3530 3526 return False
3531 3527
3532 3528 def _iterfilter(self, iterrange):
3533 3529 s = self._hiddenrevs
3534 3530 for r in iterrange:
3535 3531 if r not in s:
3536 3532 yield r
3537 3533
3538 3534 def __iter__(self):
3539 3535 if self._ascending:
3540 3536 return self.fastasc()
3541 3537 else:
3542 3538 return self.fastdesc()
3543 3539
3544 3540 def fastasc(self):
3545 3541 iterrange = xrange(self._start, self._end)
3546 3542 if self._hiddenrevs:
3547 3543 return self._iterfilter(iterrange)
3548 3544 return iter(iterrange)
3549 3545
3550 3546 def fastdesc(self):
3551 3547 iterrange = xrange(self._end - 1, self._start - 1, -1)
3552 3548 if self._hiddenrevs:
3553 3549 return self._iterfilter(iterrange)
3554 3550 return iter(iterrange)
3555 3551
3556 3552 def __contains__(self, rev):
3557 3553 hidden = self._hiddenrevs
3558 3554 return ((self._start <= rev < self._end)
3559 3555 and not (hidden and rev in hidden))
3560 3556
3561 3557 def __nonzero__(self):
3562 3558 for r in self:
3563 3559 return True
3564 3560 return False
3565 3561
3566 3562 def __len__(self):
3567 3563 if not self._hiddenrevs:
3568 3564 return abs(self._end - self._start)
3569 3565 else:
3570 3566 count = 0
3571 3567 start = self._start
3572 3568 end = self._end
3573 3569 for rev in self._hiddenrevs:
3574 3570 if (end < rev <= start) or (start <= rev < end):
3575 3571 count += 1
3576 3572 return abs(self._end - self._start) - count
3577 3573
3578 3574 def isascending(self):
3579 3575 return self._ascending
3580 3576
3581 3577 def isdescending(self):
3582 3578 return not self._ascending
3583 3579
3584 3580 def first(self):
3585 3581 if self._ascending:
3586 3582 it = self.fastasc
3587 3583 else:
3588 3584 it = self.fastdesc
3589 3585 for x in it():
3590 3586 return x
3591 3587 return None
3592 3588
3593 3589 def last(self):
3594 3590 if self._ascending:
3595 3591 it = self.fastdesc
3596 3592 else:
3597 3593 it = self.fastasc
3598 3594 for x in it():
3599 3595 return x
3600 3596 return None
3601 3597
3602 3598 def __repr__(self):
3603 3599 d = {False: '-', True: '+'}[self._ascending]
3604 3600 return '<%s%s %d:%d>' % (type(self).__name__, d,
3605 3601 self._start, self._end - 1)
3606 3602
3607 3603 class fullreposet(spanset):
3608 3604 """a set containing all revisions in the repo
3609 3605
3610 3606 This class exists to host special optimization and magic to handle virtual
3611 3607 revisions such as "null".
3612 3608 """
3613 3609
3614 3610 def __init__(self, repo):
3615 3611 super(fullreposet, self).__init__(repo)
3616 3612
3617 3613 def __and__(self, other):
3618 3614 """As self contains the whole repo, all of the other set should also be
3619 3615 in self. Therefore `self & other = other`.
3620 3616
3621 3617 This boldly assumes the other contains valid revs only.
3622 3618 """
3623 3619 # other not a smartset, make is so
3624 3620 if not util.safehasattr(other, 'isascending'):
3625 3621 # filter out hidden revision
3626 3622 # (this boldly assumes all smartset are pure)
3627 3623 #
3628 3624 # `other` was used with "&", let's assume this is a set like
3629 3625 # object.
3630 3626 other = baseset(other - self._hiddenrevs)
3631 3627
3632 3628 # XXX As fullreposet is also used as bootstrap, this is wrong.
3633 3629 #
3634 3630 # With a giveme312() revset returning [3,1,2], this makes
3635 3631 # 'hg log -r "giveme312()"' -> 1, 2, 3 (wrong)
3636 3632 # We cannot just drop it because other usage still need to sort it:
3637 3633 # 'hg log -r "all() and giveme312()"' -> 1, 2, 3 (right)
3638 3634 #
3639 3635 # There is also some faulty revset implementations that rely on it
3640 3636 # (eg: children as of its state in e8075329c5fb)
3641 3637 #
3642 3638 # When we fix the two points above we can move this into the if clause
3643 3639 other.sort(reverse=self.isdescending())
3644 3640 return other
3645 3641
3646 3642 def prettyformatset(revs):
3647 3643 lines = []
3648 3644 rs = repr(revs)
3649 3645 p = 0
3650 3646 while p < len(rs):
3651 3647 q = rs.find('<', p + 1)
3652 3648 if q < 0:
3653 3649 q = len(rs)
3654 3650 l = rs.count('<', 0, p) - rs.count('>', 0, p)
3655 3651 assert l >= 0
3656 3652 lines.append((l, rs[p:q].rstrip()))
3657 3653 p = q
3658 3654 return '\n'.join(' ' * l + s for l, s in lines)
3659 3655
3660 3656 def loadpredicate(ui, extname, registrarobj):
3661 3657 """Load revset predicates from specified registrarobj
3662 3658 """
3663 3659 for name, func in registrarobj._table.iteritems():
3664 3660 symbols[name] = func
3665 3661 if func._safe:
3666 3662 safesymbols.add(name)
3667 3663
3668 3664 # load built-in predicates explicitly to setup safesymbols
3669 3665 loadpredicate(None, None, predicate)
3670 3666
3671 3667 # tell hggettext to extract docstrings from these functions:
3672 3668 i18nfunctions = symbols.values()
General Comments 0
You need to be logged in to leave comments. Login now