##// END OF EJS Templates
revsets: use itervalues() where only values are needed...
Martin von Zweigbergk -
r29407:20fabe81 default
parent child Browse files
Show More
@@ -1,3668 +1,3668
1 1 # revset.py - revision set queries for mercurial
2 2 #
3 3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import heapq
11 11 import re
12 12
13 13 from .i18n import _
14 14 from . import (
15 15 destutil,
16 16 encoding,
17 17 error,
18 18 hbisect,
19 19 match as matchmod,
20 20 node,
21 21 obsolete as obsmod,
22 22 parser,
23 23 pathutil,
24 24 phases,
25 25 registrar,
26 26 repoview,
27 27 util,
28 28 )
29 29
30 30 def _revancestors(repo, revs, followfirst):
31 31 """Like revlog.ancestors(), but supports followfirst."""
32 32 if followfirst:
33 33 cut = 1
34 34 else:
35 35 cut = None
36 36 cl = repo.changelog
37 37
38 38 def iterate():
39 39 revs.sort(reverse=True)
40 40 irevs = iter(revs)
41 41 h = []
42 42
43 43 inputrev = next(irevs, None)
44 44 if inputrev is not None:
45 45 heapq.heappush(h, -inputrev)
46 46
47 47 seen = set()
48 48 while h:
49 49 current = -heapq.heappop(h)
50 50 if current == inputrev:
51 51 inputrev = next(irevs, None)
52 52 if inputrev is not None:
53 53 heapq.heappush(h, -inputrev)
54 54 if current not in seen:
55 55 seen.add(current)
56 56 yield current
57 57 for parent in cl.parentrevs(current)[:cut]:
58 58 if parent != node.nullrev:
59 59 heapq.heappush(h, -parent)
60 60
61 61 return generatorset(iterate(), iterasc=False)
62 62
63 63 def _revdescendants(repo, revs, followfirst):
64 64 """Like revlog.descendants() but supports followfirst."""
65 65 if followfirst:
66 66 cut = 1
67 67 else:
68 68 cut = None
69 69
70 70 def iterate():
71 71 cl = repo.changelog
72 72 # XXX this should be 'parentset.min()' assuming 'parentset' is a
73 73 # smartset (and if it is not, it should.)
74 74 first = min(revs)
75 75 nullrev = node.nullrev
76 76 if first == nullrev:
77 77 # Are there nodes with a null first parent and a non-null
78 78 # second one? Maybe. Do we care? Probably not.
79 79 for i in cl:
80 80 yield i
81 81 else:
82 82 seen = set(revs)
83 83 for i in cl.revs(first + 1):
84 84 for x in cl.parentrevs(i)[:cut]:
85 85 if x != nullrev and x in seen:
86 86 seen.add(i)
87 87 yield i
88 88 break
89 89
90 90 return generatorset(iterate(), iterasc=True)
91 91
92 92 def _reachablerootspure(repo, minroot, roots, heads, includepath):
93 93 """return (heads(::<roots> and ::<heads>))
94 94
95 95 If includepath is True, return (<roots>::<heads>)."""
96 96 if not roots:
97 97 return []
98 98 parentrevs = repo.changelog.parentrevs
99 99 roots = set(roots)
100 100 visit = list(heads)
101 101 reachable = set()
102 102 seen = {}
103 103 # prefetch all the things! (because python is slow)
104 104 reached = reachable.add
105 105 dovisit = visit.append
106 106 nextvisit = visit.pop
107 107 # open-code the post-order traversal due to the tiny size of
108 108 # sys.getrecursionlimit()
109 109 while visit:
110 110 rev = nextvisit()
111 111 if rev in roots:
112 112 reached(rev)
113 113 if not includepath:
114 114 continue
115 115 parents = parentrevs(rev)
116 116 seen[rev] = parents
117 117 for parent in parents:
118 118 if parent >= minroot and parent not in seen:
119 119 dovisit(parent)
120 120 if not reachable:
121 121 return baseset()
122 122 if not includepath:
123 123 return reachable
124 124 for rev in sorted(seen):
125 125 for parent in seen[rev]:
126 126 if parent in reachable:
127 127 reached(rev)
128 128 return reachable
129 129
130 130 def reachableroots(repo, roots, heads, includepath=False):
131 131 """return (heads(::<roots> and ::<heads>))
132 132
133 133 If includepath is True, return (<roots>::<heads>)."""
134 134 if not roots:
135 135 return baseset()
136 136 minroot = roots.min()
137 137 roots = list(roots)
138 138 heads = list(heads)
139 139 try:
140 140 revs = repo.changelog.reachableroots(minroot, heads, roots, includepath)
141 141 except AttributeError:
142 142 revs = _reachablerootspure(repo, minroot, roots, heads, includepath)
143 143 revs = baseset(revs)
144 144 revs.sort()
145 145 return revs
146 146
147 147 elements = {
148 148 # token-type: binding-strength, primary, prefix, infix, suffix
149 149 "(": (21, None, ("group", 1, ")"), ("func", 1, ")"), None),
150 150 "##": (20, None, None, ("_concat", 20), None),
151 151 "~": (18, None, None, ("ancestor", 18), None),
152 152 "^": (18, None, None, ("parent", 18), ("parentpost", 18)),
153 153 "-": (5, None, ("negate", 19), ("minus", 5), None),
154 154 "::": (17, None, ("dagrangepre", 17), ("dagrange", 17),
155 155 ("dagrangepost", 17)),
156 156 "..": (17, None, ("dagrangepre", 17), ("dagrange", 17),
157 157 ("dagrangepost", 17)),
158 158 ":": (15, "rangeall", ("rangepre", 15), ("range", 15), ("rangepost", 15)),
159 159 "not": (10, None, ("not", 10), None, None),
160 160 "!": (10, None, ("not", 10), None, None),
161 161 "and": (5, None, None, ("and", 5), None),
162 162 "&": (5, None, None, ("and", 5), None),
163 163 "%": (5, None, None, ("only", 5), ("onlypost", 5)),
164 164 "or": (4, None, None, ("or", 4), None),
165 165 "|": (4, None, None, ("or", 4), None),
166 166 "+": (4, None, None, ("or", 4), None),
167 167 "=": (3, None, None, ("keyvalue", 3), None),
168 168 ",": (2, None, None, ("list", 2), None),
169 169 ")": (0, None, None, None, None),
170 170 "symbol": (0, "symbol", None, None, None),
171 171 "string": (0, "string", None, None, None),
172 172 "end": (0, None, None, None, None),
173 173 }
174 174
175 175 keywords = set(['and', 'or', 'not'])
176 176
177 177 # default set of valid characters for the initial letter of symbols
178 178 _syminitletters = set(c for c in [chr(i) for i in xrange(256)]
179 179 if c.isalnum() or c in '._@' or ord(c) > 127)
180 180
181 181 # default set of valid characters for non-initial letters of symbols
182 182 _symletters = set(c for c in [chr(i) for i in xrange(256)]
183 183 if c.isalnum() or c in '-._/@' or ord(c) > 127)
184 184
185 185 def tokenize(program, lookup=None, syminitletters=None, symletters=None):
186 186 '''
187 187 Parse a revset statement into a stream of tokens
188 188
189 189 ``syminitletters`` is the set of valid characters for the initial
190 190 letter of symbols.
191 191
192 192 By default, character ``c`` is recognized as valid for initial
193 193 letter of symbols, if ``c.isalnum() or c in '._@' or ord(c) > 127``.
194 194
195 195 ``symletters`` is the set of valid characters for non-initial
196 196 letters of symbols.
197 197
198 198 By default, character ``c`` is recognized as valid for non-initial
199 199 letters of symbols, if ``c.isalnum() or c in '-._/@' or ord(c) > 127``.
200 200
201 201 Check that @ is a valid unquoted token character (issue3686):
202 202 >>> list(tokenize("@::"))
203 203 [('symbol', '@', 0), ('::', None, 1), ('end', None, 3)]
204 204
205 205 '''
206 206 if syminitletters is None:
207 207 syminitletters = _syminitletters
208 208 if symletters is None:
209 209 symletters = _symletters
210 210
211 211 if program and lookup:
212 212 # attempt to parse old-style ranges first to deal with
213 213 # things like old-tag which contain query metacharacters
214 214 parts = program.split(':', 1)
215 215 if all(lookup(sym) for sym in parts if sym):
216 216 if parts[0]:
217 217 yield ('symbol', parts[0], 0)
218 218 if len(parts) > 1:
219 219 s = len(parts[0])
220 220 yield (':', None, s)
221 221 if parts[1]:
222 222 yield ('symbol', parts[1], s + 1)
223 223 yield ('end', None, len(program))
224 224 return
225 225
226 226 pos, l = 0, len(program)
227 227 while pos < l:
228 228 c = program[pos]
229 229 if c.isspace(): # skip inter-token whitespace
230 230 pass
231 231 elif c == ':' and program[pos:pos + 2] == '::': # look ahead carefully
232 232 yield ('::', None, pos)
233 233 pos += 1 # skip ahead
234 234 elif c == '.' and program[pos:pos + 2] == '..': # look ahead carefully
235 235 yield ('..', None, pos)
236 236 pos += 1 # skip ahead
237 237 elif c == '#' and program[pos:pos + 2] == '##': # look ahead carefully
238 238 yield ('##', None, pos)
239 239 pos += 1 # skip ahead
240 240 elif c in "():=,-|&+!~^%": # handle simple operators
241 241 yield (c, None, pos)
242 242 elif (c in '"\'' or c == 'r' and
243 243 program[pos:pos + 2] in ("r'", 'r"')): # handle quoted strings
244 244 if c == 'r':
245 245 pos += 1
246 246 c = program[pos]
247 247 decode = lambda x: x
248 248 else:
249 249 decode = parser.unescapestr
250 250 pos += 1
251 251 s = pos
252 252 while pos < l: # find closing quote
253 253 d = program[pos]
254 254 if d == '\\': # skip over escaped characters
255 255 pos += 2
256 256 continue
257 257 if d == c:
258 258 yield ('string', decode(program[s:pos]), s)
259 259 break
260 260 pos += 1
261 261 else:
262 262 raise error.ParseError(_("unterminated string"), s)
263 263 # gather up a symbol/keyword
264 264 elif c in syminitletters:
265 265 s = pos
266 266 pos += 1
267 267 while pos < l: # find end of symbol
268 268 d = program[pos]
269 269 if d not in symletters:
270 270 break
271 271 if d == '.' and program[pos - 1] == '.': # special case for ..
272 272 pos -= 1
273 273 break
274 274 pos += 1
275 275 sym = program[s:pos]
276 276 if sym in keywords: # operator keywords
277 277 yield (sym, None, s)
278 278 elif '-' in sym:
279 279 # some jerk gave us foo-bar-baz, try to check if it's a symbol
280 280 if lookup and lookup(sym):
281 281 # looks like a real symbol
282 282 yield ('symbol', sym, s)
283 283 else:
284 284 # looks like an expression
285 285 parts = sym.split('-')
286 286 for p in parts[:-1]:
287 287 if p: # possible consecutive -
288 288 yield ('symbol', p, s)
289 289 s += len(p)
290 290 yield ('-', None, pos)
291 291 s += 1
292 292 if parts[-1]: # possible trailing -
293 293 yield ('symbol', parts[-1], s)
294 294 else:
295 295 yield ('symbol', sym, s)
296 296 pos -= 1
297 297 else:
298 298 raise error.ParseError(_("syntax error in revset '%s'") %
299 299 program, pos)
300 300 pos += 1
301 301 yield ('end', None, pos)
302 302
303 303 # helpers
304 304
305 305 def getstring(x, err):
306 306 if x and (x[0] == 'string' or x[0] == 'symbol'):
307 307 return x[1]
308 308 raise error.ParseError(err)
309 309
310 310 def getlist(x):
311 311 if not x:
312 312 return []
313 313 if x[0] == 'list':
314 314 return list(x[1:])
315 315 return [x]
316 316
317 317 def getargs(x, min, max, err):
318 318 l = getlist(x)
319 319 if len(l) < min or (max >= 0 and len(l) > max):
320 320 raise error.ParseError(err)
321 321 return l
322 322
323 323 def getargsdict(x, funcname, keys):
324 324 return parser.buildargsdict(getlist(x), funcname, keys.split(),
325 325 keyvaluenode='keyvalue', keynode='symbol')
326 326
327 327 def getset(repo, subset, x):
328 328 if not x:
329 329 raise error.ParseError(_("missing argument"))
330 330 s = methods[x[0]](repo, subset, *x[1:])
331 331 if util.safehasattr(s, 'isascending'):
332 332 return s
333 333 # else case should not happen, because all non-func are internal,
334 334 # ignoring for now.
335 335 if x[0] == 'func' and x[1][0] == 'symbol' and x[1][1] in symbols:
336 336 repo.ui.deprecwarn('revset "%s" uses list instead of smartset'
337 337 % x[1][1],
338 338 '3.9')
339 339 return baseset(s)
340 340
341 341 def _getrevsource(repo, r):
342 342 extra = repo[r].extra()
343 343 for label in ('source', 'transplant_source', 'rebase_source'):
344 344 if label in extra:
345 345 try:
346 346 return repo[extra[label]].rev()
347 347 except error.RepoLookupError:
348 348 pass
349 349 return None
350 350
351 351 # operator methods
352 352
353 353 def stringset(repo, subset, x):
354 354 x = repo[x].rev()
355 355 if (x in subset
356 356 or x == node.nullrev and isinstance(subset, fullreposet)):
357 357 return baseset([x])
358 358 return baseset()
359 359
360 360 def rangeset(repo, subset, x, y):
361 361 m = getset(repo, fullreposet(repo), x)
362 362 n = getset(repo, fullreposet(repo), y)
363 363
364 364 if not m or not n:
365 365 return baseset()
366 366 m, n = m.first(), n.last()
367 367
368 368 if m == n:
369 369 r = baseset([m])
370 370 elif n == node.wdirrev:
371 371 r = spanset(repo, m, len(repo)) + baseset([n])
372 372 elif m == node.wdirrev:
373 373 r = baseset([m]) + spanset(repo, len(repo) - 1, n - 1)
374 374 elif m < n:
375 375 r = spanset(repo, m, n + 1)
376 376 else:
377 377 r = spanset(repo, m, n - 1)
378 378 # XXX We should combine with subset first: 'subset & baseset(...)'. This is
379 379 # necessary to ensure we preserve the order in subset.
380 380 #
381 381 # This has performance implication, carrying the sorting over when possible
382 382 # would be more efficient.
383 383 return r & subset
384 384
385 385 def dagrange(repo, subset, x, y):
386 386 r = fullreposet(repo)
387 387 xs = reachableroots(repo, getset(repo, r, x), getset(repo, r, y),
388 388 includepath=True)
389 389 return subset & xs
390 390
391 391 def andset(repo, subset, x, y):
392 392 return getset(repo, getset(repo, subset, x), y)
393 393
394 394 def differenceset(repo, subset, x, y):
395 395 return getset(repo, subset, x) - getset(repo, subset, y)
396 396
397 397 def orset(repo, subset, *xs):
398 398 assert xs
399 399 if len(xs) == 1:
400 400 return getset(repo, subset, xs[0])
401 401 p = len(xs) // 2
402 402 a = orset(repo, subset, *xs[:p])
403 403 b = orset(repo, subset, *xs[p:])
404 404 return a + b
405 405
406 406 def notset(repo, subset, x):
407 407 return subset - getset(repo, subset, x)
408 408
409 409 def listset(repo, subset, *xs):
410 410 raise error.ParseError(_("can't use a list in this context"),
411 411 hint=_('see hg help "revsets.x or y"'))
412 412
413 413 def keyvaluepair(repo, subset, k, v):
414 414 raise error.ParseError(_("can't use a key-value pair in this context"))
415 415
416 416 def func(repo, subset, a, b):
417 417 if a[0] == 'symbol' and a[1] in symbols:
418 418 return symbols[a[1]](repo, subset, b)
419 419
420 420 keep = lambda fn: getattr(fn, '__doc__', None) is not None
421 421
422 422 syms = [s for (s, fn) in symbols.items() if keep(fn)]
423 423 raise error.UnknownIdentifier(a[1], syms)
424 424
425 425 # functions
426 426
427 427 # symbols are callables like:
428 428 # fn(repo, subset, x)
429 429 # with:
430 430 # repo - current repository instance
431 431 # subset - of revisions to be examined
432 432 # x - argument in tree form
433 433 symbols = {}
434 434
435 435 # symbols which can't be used for a DoS attack for any given input
436 436 # (e.g. those which accept regexes as plain strings shouldn't be included)
437 437 # functions that just return a lot of changesets (like all) don't count here
438 438 safesymbols = set()
439 439
440 440 predicate = registrar.revsetpredicate()
441 441
442 442 @predicate('_destupdate')
443 443 def _destupdate(repo, subset, x):
444 444 # experimental revset for update destination
445 445 args = getargsdict(x, 'limit', 'clean check')
446 446 return subset & baseset([destutil.destupdate(repo, **args)[0]])
447 447
448 448 @predicate('_destmerge')
449 449 def _destmerge(repo, subset, x):
450 450 # experimental revset for merge destination
451 451 sourceset = None
452 452 if x is not None:
453 453 sourceset = getset(repo, fullreposet(repo), x)
454 454 return subset & baseset([destutil.destmerge(repo, sourceset=sourceset)])
455 455
456 456 @predicate('adds(pattern)', safe=True)
457 457 def adds(repo, subset, x):
458 458 """Changesets that add a file matching pattern.
459 459
460 460 The pattern without explicit kind like ``glob:`` is expected to be
461 461 relative to the current directory and match against a file or a
462 462 directory.
463 463 """
464 464 # i18n: "adds" is a keyword
465 465 pat = getstring(x, _("adds requires a pattern"))
466 466 return checkstatus(repo, subset, pat, 1)
467 467
468 468 @predicate('ancestor(*changeset)', safe=True)
469 469 def ancestor(repo, subset, x):
470 470 """A greatest common ancestor of the changesets.
471 471
472 472 Accepts 0 or more changesets.
473 473 Will return empty list when passed no args.
474 474 Greatest common ancestor of a single changeset is that changeset.
475 475 """
476 476 # i18n: "ancestor" is a keyword
477 477 l = getlist(x)
478 478 rl = fullreposet(repo)
479 479 anc = None
480 480
481 481 # (getset(repo, rl, i) for i in l) generates a list of lists
482 482 for revs in (getset(repo, rl, i) for i in l):
483 483 for r in revs:
484 484 if anc is None:
485 485 anc = repo[r]
486 486 else:
487 487 anc = anc.ancestor(repo[r])
488 488
489 489 if anc is not None and anc.rev() in subset:
490 490 return baseset([anc.rev()])
491 491 return baseset()
492 492
493 493 def _ancestors(repo, subset, x, followfirst=False):
494 494 heads = getset(repo, fullreposet(repo), x)
495 495 if not heads:
496 496 return baseset()
497 497 s = _revancestors(repo, heads, followfirst)
498 498 return subset & s
499 499
500 500 @predicate('ancestors(set)', safe=True)
501 501 def ancestors(repo, subset, x):
502 502 """Changesets that are ancestors of a changeset in set.
503 503 """
504 504 return _ancestors(repo, subset, x)
505 505
506 506 @predicate('_firstancestors', safe=True)
507 507 def _firstancestors(repo, subset, x):
508 508 # ``_firstancestors(set)``
509 509 # Like ``ancestors(set)`` but follows only the first parents.
510 510 return _ancestors(repo, subset, x, followfirst=True)
511 511
512 512 def ancestorspec(repo, subset, x, n):
513 513 """``set~n``
514 514 Changesets that are the Nth ancestor (first parents only) of a changeset
515 515 in set.
516 516 """
517 517 try:
518 518 n = int(n[1])
519 519 except (TypeError, ValueError):
520 520 raise error.ParseError(_("~ expects a number"))
521 521 ps = set()
522 522 cl = repo.changelog
523 523 for r in getset(repo, fullreposet(repo), x):
524 524 for i in range(n):
525 525 r = cl.parentrevs(r)[0]
526 526 ps.add(r)
527 527 return subset & ps
528 528
529 529 @predicate('author(string)', safe=True)
530 530 def author(repo, subset, x):
531 531 """Alias for ``user(string)``.
532 532 """
533 533 # i18n: "author" is a keyword
534 534 n = encoding.lower(getstring(x, _("author requires a string")))
535 535 kind, pattern, matcher = _substringmatcher(n)
536 536 return subset.filter(lambda x: matcher(encoding.lower(repo[x].user())),
537 537 condrepr=('<user %r>', n))
538 538
539 539 @predicate('bisect(string)', safe=True)
540 540 def bisect(repo, subset, x):
541 541 """Changesets marked in the specified bisect status:
542 542
543 543 - ``good``, ``bad``, ``skip``: csets explicitly marked as good/bad/skip
544 544 - ``goods``, ``bads`` : csets topologically good/bad
545 545 - ``range`` : csets taking part in the bisection
546 546 - ``pruned`` : csets that are goods, bads or skipped
547 547 - ``untested`` : csets whose fate is yet unknown
548 548 - ``ignored`` : csets ignored due to DAG topology
549 549 - ``current`` : the cset currently being bisected
550 550 """
551 551 # i18n: "bisect" is a keyword
552 552 status = getstring(x, _("bisect requires a string")).lower()
553 553 state = set(hbisect.get(repo, status))
554 554 return subset & state
555 555
556 556 # Backward-compatibility
557 557 # - no help entry so that we do not advertise it any more
558 558 @predicate('bisected', safe=True)
559 559 def bisected(repo, subset, x):
560 560 return bisect(repo, subset, x)
561 561
562 562 @predicate('bookmark([name])', safe=True)
563 563 def bookmark(repo, subset, x):
564 564 """The named bookmark or all bookmarks.
565 565
566 566 If `name` starts with `re:`, the remainder of the name is treated as
567 567 a regular expression. To match a bookmark that actually starts with `re:`,
568 568 use the prefix `literal:`.
569 569 """
570 570 # i18n: "bookmark" is a keyword
571 571 args = getargs(x, 0, 1, _('bookmark takes one or no arguments'))
572 572 if args:
573 573 bm = getstring(args[0],
574 574 # i18n: "bookmark" is a keyword
575 575 _('the argument to bookmark must be a string'))
576 576 kind, pattern, matcher = util.stringmatcher(bm)
577 577 bms = set()
578 578 if kind == 'literal':
579 579 bmrev = repo._bookmarks.get(pattern, None)
580 580 if not bmrev:
581 581 raise error.RepoLookupError(_("bookmark '%s' does not exist")
582 582 % pattern)
583 583 bms.add(repo[bmrev].rev())
584 584 else:
585 585 matchrevs = set()
586 586 for name, bmrev in repo._bookmarks.iteritems():
587 587 if matcher(name):
588 588 matchrevs.add(bmrev)
589 589 if not matchrevs:
590 590 raise error.RepoLookupError(_("no bookmarks exist"
591 591 " that match '%s'") % pattern)
592 592 for bmrev in matchrevs:
593 593 bms.add(repo[bmrev].rev())
594 594 else:
595 595 bms = set([repo[r].rev()
596 596 for r in repo._bookmarks.values()])
597 597 bms -= set([node.nullrev])
598 598 return subset & bms
599 599
600 600 @predicate('branch(string or set)', safe=True)
601 601 def branch(repo, subset, x):
602 602 """
603 603 All changesets belonging to the given branch or the branches of the given
604 604 changesets.
605 605
606 606 If `string` starts with `re:`, the remainder of the name is treated as
607 607 a regular expression. To match a branch that actually starts with `re:`,
608 608 use the prefix `literal:`.
609 609 """
610 610 getbi = repo.revbranchcache().branchinfo
611 611
612 612 try:
613 613 b = getstring(x, '')
614 614 except error.ParseError:
615 615 # not a string, but another revspec, e.g. tip()
616 616 pass
617 617 else:
618 618 kind, pattern, matcher = util.stringmatcher(b)
619 619 if kind == 'literal':
620 620 # note: falls through to the revspec case if no branch with
621 621 # this name exists and pattern kind is not specified explicitly
622 622 if pattern in repo.branchmap():
623 623 return subset.filter(lambda r: matcher(getbi(r)[0]),
624 624 condrepr=('<branch %r>', b))
625 625 if b.startswith('literal:'):
626 626 raise error.RepoLookupError(_("branch '%s' does not exist")
627 627 % pattern)
628 628 else:
629 629 return subset.filter(lambda r: matcher(getbi(r)[0]),
630 630 condrepr=('<branch %r>', b))
631 631
632 632 s = getset(repo, fullreposet(repo), x)
633 633 b = set()
634 634 for r in s:
635 635 b.add(getbi(r)[0])
636 636 c = s.__contains__
637 637 return subset.filter(lambda r: c(r) or getbi(r)[0] in b,
638 638 condrepr=lambda: '<branch %r>' % sorted(b))
639 639
640 640 @predicate('bumped()', safe=True)
641 641 def bumped(repo, subset, x):
642 642 """Mutable changesets marked as successors of public changesets.
643 643
644 644 Only non-public and non-obsolete changesets can be `bumped`.
645 645 """
646 646 # i18n: "bumped" is a keyword
647 647 getargs(x, 0, 0, _("bumped takes no arguments"))
648 648 bumped = obsmod.getrevs(repo, 'bumped')
649 649 return subset & bumped
650 650
651 651 @predicate('bundle()', safe=True)
652 652 def bundle(repo, subset, x):
653 653 """Changesets in the bundle.
654 654
655 655 Bundle must be specified by the -R option."""
656 656
657 657 try:
658 658 bundlerevs = repo.changelog.bundlerevs
659 659 except AttributeError:
660 660 raise error.Abort(_("no bundle provided - specify with -R"))
661 661 return subset & bundlerevs
662 662
663 663 def checkstatus(repo, subset, pat, field):
664 664 hasset = matchmod.patkind(pat) == 'set'
665 665
666 666 mcache = [None]
667 667 def matches(x):
668 668 c = repo[x]
669 669 if not mcache[0] or hasset:
670 670 mcache[0] = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
671 671 m = mcache[0]
672 672 fname = None
673 673 if not m.anypats() and len(m.files()) == 1:
674 674 fname = m.files()[0]
675 675 if fname is not None:
676 676 if fname not in c.files():
677 677 return False
678 678 else:
679 679 for f in c.files():
680 680 if m(f):
681 681 break
682 682 else:
683 683 return False
684 684 files = repo.status(c.p1().node(), c.node())[field]
685 685 if fname is not None:
686 686 if fname in files:
687 687 return True
688 688 else:
689 689 for f in files:
690 690 if m(f):
691 691 return True
692 692
693 693 return subset.filter(matches, condrepr=('<status[%r] %r>', field, pat))
694 694
695 695 def _children(repo, subset, parentset):
696 696 if not parentset:
697 697 return baseset()
698 698 cs = set()
699 699 pr = repo.changelog.parentrevs
700 700 minrev = parentset.min()
701 701 for r in subset:
702 702 if r <= minrev:
703 703 continue
704 704 for p in pr(r):
705 705 if p in parentset:
706 706 cs.add(r)
707 707 return baseset(cs)
708 708
709 709 @predicate('children(set)', safe=True)
710 710 def children(repo, subset, x):
711 711 """Child changesets of changesets in set.
712 712 """
713 713 s = getset(repo, fullreposet(repo), x)
714 714 cs = _children(repo, subset, s)
715 715 return subset & cs
716 716
717 717 @predicate('closed()', safe=True)
718 718 def closed(repo, subset, x):
719 719 """Changeset is closed.
720 720 """
721 721 # i18n: "closed" is a keyword
722 722 getargs(x, 0, 0, _("closed takes no arguments"))
723 723 return subset.filter(lambda r: repo[r].closesbranch(),
724 724 condrepr='<branch closed>')
725 725
726 726 @predicate('contains(pattern)')
727 727 def contains(repo, subset, x):
728 728 """The revision's manifest contains a file matching pattern (but might not
729 729 modify it). See :hg:`help patterns` for information about file patterns.
730 730
731 731 The pattern without explicit kind like ``glob:`` is expected to be
732 732 relative to the current directory and match against a file exactly
733 733 for efficiency.
734 734 """
735 735 # i18n: "contains" is a keyword
736 736 pat = getstring(x, _("contains requires a pattern"))
737 737
738 738 def matches(x):
739 739 if not matchmod.patkind(pat):
740 740 pats = pathutil.canonpath(repo.root, repo.getcwd(), pat)
741 741 if pats in repo[x]:
742 742 return True
743 743 else:
744 744 c = repo[x]
745 745 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
746 746 for f in c.manifest():
747 747 if m(f):
748 748 return True
749 749 return False
750 750
751 751 return subset.filter(matches, condrepr=('<contains %r>', pat))
752 752
753 753 @predicate('converted([id])', safe=True)
754 754 def converted(repo, subset, x):
755 755 """Changesets converted from the given identifier in the old repository if
756 756 present, or all converted changesets if no identifier is specified.
757 757 """
758 758
759 759 # There is exactly no chance of resolving the revision, so do a simple
760 760 # string compare and hope for the best
761 761
762 762 rev = None
763 763 # i18n: "converted" is a keyword
764 764 l = getargs(x, 0, 1, _('converted takes one or no arguments'))
765 765 if l:
766 766 # i18n: "converted" is a keyword
767 767 rev = getstring(l[0], _('converted requires a revision'))
768 768
769 769 def _matchvalue(r):
770 770 source = repo[r].extra().get('convert_revision', None)
771 771 return source is not None and (rev is None or source.startswith(rev))
772 772
773 773 return subset.filter(lambda r: _matchvalue(r),
774 774 condrepr=('<converted %r>', rev))
775 775
776 776 @predicate('date(interval)', safe=True)
777 777 def date(repo, subset, x):
778 778 """Changesets within the interval, see :hg:`help dates`.
779 779 """
780 780 # i18n: "date" is a keyword
781 781 ds = getstring(x, _("date requires a string"))
782 782 dm = util.matchdate(ds)
783 783 return subset.filter(lambda x: dm(repo[x].date()[0]),
784 784 condrepr=('<date %r>', ds))
785 785
786 786 @predicate('desc(string)', safe=True)
787 787 def desc(repo, subset, x):
788 788 """Search commit message for string. The match is case-insensitive.
789 789 """
790 790 # i18n: "desc" is a keyword
791 791 ds = encoding.lower(getstring(x, _("desc requires a string")))
792 792
793 793 def matches(x):
794 794 c = repo[x]
795 795 return ds in encoding.lower(c.description())
796 796
797 797 return subset.filter(matches, condrepr=('<desc %r>', ds))
798 798
799 799 def _descendants(repo, subset, x, followfirst=False):
800 800 roots = getset(repo, fullreposet(repo), x)
801 801 if not roots:
802 802 return baseset()
803 803 s = _revdescendants(repo, roots, followfirst)
804 804
805 805 # Both sets need to be ascending in order to lazily return the union
806 806 # in the correct order.
807 807 base = subset & roots
808 808 desc = subset & s
809 809 result = base + desc
810 810 if subset.isascending():
811 811 result.sort()
812 812 elif subset.isdescending():
813 813 result.sort(reverse=True)
814 814 else:
815 815 result = subset & result
816 816 return result
817 817
818 818 @predicate('descendants(set)', safe=True)
819 819 def descendants(repo, subset, x):
820 820 """Changesets which are descendants of changesets in set.
821 821 """
822 822 return _descendants(repo, subset, x)
823 823
824 824 @predicate('_firstdescendants', safe=True)
825 825 def _firstdescendants(repo, subset, x):
826 826 # ``_firstdescendants(set)``
827 827 # Like ``descendants(set)`` but follows only the first parents.
828 828 return _descendants(repo, subset, x, followfirst=True)
829 829
830 830 @predicate('destination([set])', safe=True)
831 831 def destination(repo, subset, x):
832 832 """Changesets that were created by a graft, transplant or rebase operation,
833 833 with the given revisions specified as the source. Omitting the optional set
834 834 is the same as passing all().
835 835 """
836 836 if x is not None:
837 837 sources = getset(repo, fullreposet(repo), x)
838 838 else:
839 839 sources = fullreposet(repo)
840 840
841 841 dests = set()
842 842
843 843 # subset contains all of the possible destinations that can be returned, so
844 844 # iterate over them and see if their source(s) were provided in the arg set.
845 845 # Even if the immediate src of r is not in the arg set, src's source (or
846 846 # further back) may be. Scanning back further than the immediate src allows
847 847 # transitive transplants and rebases to yield the same results as transitive
848 848 # grafts.
849 849 for r in subset:
850 850 src = _getrevsource(repo, r)
851 851 lineage = None
852 852
853 853 while src is not None:
854 854 if lineage is None:
855 855 lineage = list()
856 856
857 857 lineage.append(r)
858 858
859 859 # The visited lineage is a match if the current source is in the arg
860 860 # set. Since every candidate dest is visited by way of iterating
861 861 # subset, any dests further back in the lineage will be tested by a
862 862 # different iteration over subset. Likewise, if the src was already
863 863 # selected, the current lineage can be selected without going back
864 864 # further.
865 865 if src in sources or src in dests:
866 866 dests.update(lineage)
867 867 break
868 868
869 869 r = src
870 870 src = _getrevsource(repo, r)
871 871
872 872 return subset.filter(dests.__contains__,
873 873 condrepr=lambda: '<destination %r>' % sorted(dests))
874 874
875 875 @predicate('divergent()', safe=True)
876 876 def divergent(repo, subset, x):
877 877 """
878 878 Final successors of changesets with an alternative set of final successors.
879 879 """
880 880 # i18n: "divergent" is a keyword
881 881 getargs(x, 0, 0, _("divergent takes no arguments"))
882 882 divergent = obsmod.getrevs(repo, 'divergent')
883 883 return subset & divergent
884 884
885 885 @predicate('extinct()', safe=True)
886 886 def extinct(repo, subset, x):
887 887 """Obsolete changesets with obsolete descendants only.
888 888 """
889 889 # i18n: "extinct" is a keyword
890 890 getargs(x, 0, 0, _("extinct takes no arguments"))
891 891 extincts = obsmod.getrevs(repo, 'extinct')
892 892 return subset & extincts
893 893
894 894 @predicate('extra(label, [value])', safe=True)
895 895 def extra(repo, subset, x):
896 896 """Changesets with the given label in the extra metadata, with the given
897 897 optional value.
898 898
899 899 If `value` starts with `re:`, the remainder of the value is treated as
900 900 a regular expression. To match a value that actually starts with `re:`,
901 901 use the prefix `literal:`.
902 902 """
903 903 args = getargsdict(x, 'extra', 'label value')
904 904 if 'label' not in args:
905 905 # i18n: "extra" is a keyword
906 906 raise error.ParseError(_('extra takes at least 1 argument'))
907 907 # i18n: "extra" is a keyword
908 908 label = getstring(args['label'], _('first argument to extra must be '
909 909 'a string'))
910 910 value = None
911 911
912 912 if 'value' in args:
913 913 # i18n: "extra" is a keyword
914 914 value = getstring(args['value'], _('second argument to extra must be '
915 915 'a string'))
916 916 kind, value, matcher = util.stringmatcher(value)
917 917
918 918 def _matchvalue(r):
919 919 extra = repo[r].extra()
920 920 return label in extra and (value is None or matcher(extra[label]))
921 921
922 922 return subset.filter(lambda r: _matchvalue(r),
923 923 condrepr=('<extra[%r] %r>', label, value))
924 924
925 925 @predicate('filelog(pattern)', safe=True)
926 926 def filelog(repo, subset, x):
927 927 """Changesets connected to the specified filelog.
928 928
929 929 For performance reasons, visits only revisions mentioned in the file-level
930 930 filelog, rather than filtering through all changesets (much faster, but
931 931 doesn't include deletes or duplicate changes). For a slower, more accurate
932 932 result, use ``file()``.
933 933
934 934 The pattern without explicit kind like ``glob:`` is expected to be
935 935 relative to the current directory and match against a file exactly
936 936 for efficiency.
937 937
938 938 If some linkrev points to revisions filtered by the current repoview, we'll
939 939 work around it to return a non-filtered value.
940 940 """
941 941
942 942 # i18n: "filelog" is a keyword
943 943 pat = getstring(x, _("filelog requires a pattern"))
944 944 s = set()
945 945 cl = repo.changelog
946 946
947 947 if not matchmod.patkind(pat):
948 948 f = pathutil.canonpath(repo.root, repo.getcwd(), pat)
949 949 files = [f]
950 950 else:
951 951 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=repo[None])
952 952 files = (f for f in repo[None] if m(f))
953 953
954 954 for f in files:
955 955 fl = repo.file(f)
956 956 known = {}
957 957 scanpos = 0
958 958 for fr in list(fl):
959 959 fn = fl.node(fr)
960 960 if fn in known:
961 961 s.add(known[fn])
962 962 continue
963 963
964 964 lr = fl.linkrev(fr)
965 965 if lr in cl:
966 966 s.add(lr)
967 967 elif scanpos is not None:
968 968 # lowest matching changeset is filtered, scan further
969 969 # ahead in changelog
970 970 start = max(lr, scanpos) + 1
971 971 scanpos = None
972 972 for r in cl.revs(start):
973 973 # minimize parsing of non-matching entries
974 974 if f in cl.revision(r) and f in cl.readfiles(r):
975 975 try:
976 976 # try to use manifest delta fastpath
977 977 n = repo[r].filenode(f)
978 978 if n not in known:
979 979 if n == fn:
980 980 s.add(r)
981 981 scanpos = r
982 982 break
983 983 else:
984 984 known[n] = r
985 985 except error.ManifestLookupError:
986 986 # deletion in changelog
987 987 continue
988 988
989 989 return subset & s
990 990
991 991 @predicate('first(set, [n])', safe=True)
992 992 def first(repo, subset, x):
993 993 """An alias for limit().
994 994 """
995 995 return limit(repo, subset, x)
996 996
997 997 def _follow(repo, subset, x, name, followfirst=False):
998 998 l = getargs(x, 0, 1, _("%s takes no arguments or a pattern") % name)
999 999 c = repo['.']
1000 1000 if l:
1001 1001 x = getstring(l[0], _("%s expected a pattern") % name)
1002 1002 matcher = matchmod.match(repo.root, repo.getcwd(), [x],
1003 1003 ctx=repo[None], default='path')
1004 1004
1005 1005 files = c.manifest().walk(matcher)
1006 1006
1007 1007 s = set()
1008 1008 for fname in files:
1009 1009 fctx = c[fname]
1010 1010 s = s.union(set(c.rev() for c in fctx.ancestors(followfirst)))
1011 1011 # include the revision responsible for the most recent version
1012 1012 s.add(fctx.introrev())
1013 1013 else:
1014 1014 s = _revancestors(repo, baseset([c.rev()]), followfirst)
1015 1015
1016 1016 return subset & s
1017 1017
1018 1018 @predicate('follow([pattern])', safe=True)
1019 1019 def follow(repo, subset, x):
1020 1020 """
1021 1021 An alias for ``::.`` (ancestors of the working directory's first parent).
1022 1022 If pattern is specified, the histories of files matching given
1023 1023 pattern is followed, including copies.
1024 1024 """
1025 1025 return _follow(repo, subset, x, 'follow')
1026 1026
1027 1027 @predicate('_followfirst', safe=True)
1028 1028 def _followfirst(repo, subset, x):
1029 1029 # ``followfirst([pattern])``
1030 1030 # Like ``follow([pattern])`` but follows only the first parent of
1031 1031 # every revisions or files revisions.
1032 1032 return _follow(repo, subset, x, '_followfirst', followfirst=True)
1033 1033
1034 1034 @predicate('all()', safe=True)
1035 1035 def getall(repo, subset, x):
1036 1036 """All changesets, the same as ``0:tip``.
1037 1037 """
1038 1038 # i18n: "all" is a keyword
1039 1039 getargs(x, 0, 0, _("all takes no arguments"))
1040 1040 return subset & spanset(repo) # drop "null" if any
1041 1041
1042 1042 @predicate('grep(regex)')
1043 1043 def grep(repo, subset, x):
1044 1044 """Like ``keyword(string)`` but accepts a regex. Use ``grep(r'...')``
1045 1045 to ensure special escape characters are handled correctly. Unlike
1046 1046 ``keyword(string)``, the match is case-sensitive.
1047 1047 """
1048 1048 try:
1049 1049 # i18n: "grep" is a keyword
1050 1050 gr = re.compile(getstring(x, _("grep requires a string")))
1051 1051 except re.error as e:
1052 1052 raise error.ParseError(_('invalid match pattern: %s') % e)
1053 1053
1054 1054 def matches(x):
1055 1055 c = repo[x]
1056 1056 for e in c.files() + [c.user(), c.description()]:
1057 1057 if gr.search(e):
1058 1058 return True
1059 1059 return False
1060 1060
1061 1061 return subset.filter(matches, condrepr=('<grep %r>', gr.pattern))
1062 1062
1063 1063 @predicate('_matchfiles', safe=True)
1064 1064 def _matchfiles(repo, subset, x):
1065 1065 # _matchfiles takes a revset list of prefixed arguments:
1066 1066 #
1067 1067 # [p:foo, i:bar, x:baz]
1068 1068 #
1069 1069 # builds a match object from them and filters subset. Allowed
1070 1070 # prefixes are 'p:' for regular patterns, 'i:' for include
1071 1071 # patterns and 'x:' for exclude patterns. Use 'r:' prefix to pass
1072 1072 # a revision identifier, or the empty string to reference the
1073 1073 # working directory, from which the match object is
1074 1074 # initialized. Use 'd:' to set the default matching mode, default
1075 1075 # to 'glob'. At most one 'r:' and 'd:' argument can be passed.
1076 1076
1077 1077 l = getargs(x, 1, -1, "_matchfiles requires at least one argument")
1078 1078 pats, inc, exc = [], [], []
1079 1079 rev, default = None, None
1080 1080 for arg in l:
1081 1081 s = getstring(arg, "_matchfiles requires string arguments")
1082 1082 prefix, value = s[:2], s[2:]
1083 1083 if prefix == 'p:':
1084 1084 pats.append(value)
1085 1085 elif prefix == 'i:':
1086 1086 inc.append(value)
1087 1087 elif prefix == 'x:':
1088 1088 exc.append(value)
1089 1089 elif prefix == 'r:':
1090 1090 if rev is not None:
1091 1091 raise error.ParseError('_matchfiles expected at most one '
1092 1092 'revision')
1093 1093 if value != '': # empty means working directory; leave rev as None
1094 1094 rev = value
1095 1095 elif prefix == 'd:':
1096 1096 if default is not None:
1097 1097 raise error.ParseError('_matchfiles expected at most one '
1098 1098 'default mode')
1099 1099 default = value
1100 1100 else:
1101 1101 raise error.ParseError('invalid _matchfiles prefix: %s' % prefix)
1102 1102 if not default:
1103 1103 default = 'glob'
1104 1104
1105 1105 m = matchmod.match(repo.root, repo.getcwd(), pats, include=inc,
1106 1106 exclude=exc, ctx=repo[rev], default=default)
1107 1107
1108 1108 # This directly read the changelog data as creating changectx for all
1109 1109 # revisions is quite expensive.
1110 1110 getfiles = repo.changelog.readfiles
1111 1111 wdirrev = node.wdirrev
1112 1112 def matches(x):
1113 1113 if x == wdirrev:
1114 1114 files = repo[x].files()
1115 1115 else:
1116 1116 files = getfiles(x)
1117 1117 for f in files:
1118 1118 if m(f):
1119 1119 return True
1120 1120 return False
1121 1121
1122 1122 return subset.filter(matches,
1123 1123 condrepr=('<matchfiles patterns=%r, include=%r '
1124 1124 'exclude=%r, default=%r, rev=%r>',
1125 1125 pats, inc, exc, default, rev))
1126 1126
1127 1127 @predicate('file(pattern)', safe=True)
1128 1128 def hasfile(repo, subset, x):
1129 1129 """Changesets affecting files matched by pattern.
1130 1130
1131 1131 For a faster but less accurate result, consider using ``filelog()``
1132 1132 instead.
1133 1133
1134 1134 This predicate uses ``glob:`` as the default kind of pattern.
1135 1135 """
1136 1136 # i18n: "file" is a keyword
1137 1137 pat = getstring(x, _("file requires a pattern"))
1138 1138 return _matchfiles(repo, subset, ('string', 'p:' + pat))
1139 1139
1140 1140 @predicate('head()', safe=True)
1141 1141 def head(repo, subset, x):
1142 1142 """Changeset is a named branch head.
1143 1143 """
1144 1144 # i18n: "head" is a keyword
1145 1145 getargs(x, 0, 0, _("head takes no arguments"))
1146 1146 hs = set()
1147 1147 cl = repo.changelog
1148 for b, ls in repo.branchmap().iteritems():
1148 for ls in repo.branchmap().itervalues():
1149 1149 hs.update(cl.rev(h) for h in ls)
1150 1150 # XXX We should combine with subset first: 'subset & baseset(...)'. This is
1151 1151 # necessary to ensure we preserve the order in subset.
1152 1152 return baseset(hs) & subset
1153 1153
1154 1154 @predicate('heads(set)', safe=True)
1155 1155 def heads(repo, subset, x):
1156 1156 """Members of set with no children in set.
1157 1157 """
1158 1158 s = getset(repo, subset, x)
1159 1159 ps = parents(repo, subset, x)
1160 1160 return s - ps
1161 1161
1162 1162 @predicate('hidden()', safe=True)
1163 1163 def hidden(repo, subset, x):
1164 1164 """Hidden changesets.
1165 1165 """
1166 1166 # i18n: "hidden" is a keyword
1167 1167 getargs(x, 0, 0, _("hidden takes no arguments"))
1168 1168 hiddenrevs = repoview.filterrevs(repo, 'visible')
1169 1169 return subset & hiddenrevs
1170 1170
1171 1171 @predicate('keyword(string)', safe=True)
1172 1172 def keyword(repo, subset, x):
1173 1173 """Search commit message, user name, and names of changed files for
1174 1174 string. The match is case-insensitive.
1175 1175 """
1176 1176 # i18n: "keyword" is a keyword
1177 1177 kw = encoding.lower(getstring(x, _("keyword requires a string")))
1178 1178
1179 1179 def matches(r):
1180 1180 c = repo[r]
1181 1181 return any(kw in encoding.lower(t)
1182 1182 for t in c.files() + [c.user(), c.description()])
1183 1183
1184 1184 return subset.filter(matches, condrepr=('<keyword %r>', kw))
1185 1185
1186 1186 @predicate('limit(set[, n[, offset]])', safe=True)
1187 1187 def limit(repo, subset, x):
1188 1188 """First n members of set, defaulting to 1, starting from offset.
1189 1189 """
1190 1190 args = getargsdict(x, 'limit', 'set n offset')
1191 1191 if 'set' not in args:
1192 1192 # i18n: "limit" is a keyword
1193 1193 raise error.ParseError(_("limit requires one to three arguments"))
1194 1194 try:
1195 1195 lim, ofs = 1, 0
1196 1196 if 'n' in args:
1197 1197 # i18n: "limit" is a keyword
1198 1198 lim = int(getstring(args['n'], _("limit requires a number")))
1199 1199 if 'offset' in args:
1200 1200 # i18n: "limit" is a keyword
1201 1201 ofs = int(getstring(args['offset'], _("limit requires a number")))
1202 1202 if ofs < 0:
1203 1203 raise error.ParseError(_("negative offset"))
1204 1204 except (TypeError, ValueError):
1205 1205 # i18n: "limit" is a keyword
1206 1206 raise error.ParseError(_("limit expects a number"))
1207 1207 os = getset(repo, fullreposet(repo), args['set'])
1208 1208 result = []
1209 1209 it = iter(os)
1210 1210 for x in xrange(ofs):
1211 1211 y = next(it, None)
1212 1212 if y is None:
1213 1213 break
1214 1214 for x in xrange(lim):
1215 1215 y = next(it, None)
1216 1216 if y is None:
1217 1217 break
1218 1218 elif y in subset:
1219 1219 result.append(y)
1220 1220 return baseset(result, datarepr=('<limit n=%d, offset=%d, %r, %r>',
1221 1221 lim, ofs, subset, os))
1222 1222
1223 1223 @predicate('last(set, [n])', safe=True)
1224 1224 def last(repo, subset, x):
1225 1225 """Last n members of set, defaulting to 1.
1226 1226 """
1227 1227 # i18n: "last" is a keyword
1228 1228 l = getargs(x, 1, 2, _("last requires one or two arguments"))
1229 1229 try:
1230 1230 lim = 1
1231 1231 if len(l) == 2:
1232 1232 # i18n: "last" is a keyword
1233 1233 lim = int(getstring(l[1], _("last requires a number")))
1234 1234 except (TypeError, ValueError):
1235 1235 # i18n: "last" is a keyword
1236 1236 raise error.ParseError(_("last expects a number"))
1237 1237 os = getset(repo, fullreposet(repo), l[0])
1238 1238 os.reverse()
1239 1239 result = []
1240 1240 it = iter(os)
1241 1241 for x in xrange(lim):
1242 1242 y = next(it, None)
1243 1243 if y is None:
1244 1244 break
1245 1245 elif y in subset:
1246 1246 result.append(y)
1247 1247 return baseset(result, datarepr=('<last n=%d, %r, %r>', lim, subset, os))
1248 1248
1249 1249 @predicate('max(set)', safe=True)
1250 1250 def maxrev(repo, subset, x):
1251 1251 """Changeset with highest revision number in set.
1252 1252 """
1253 1253 os = getset(repo, fullreposet(repo), x)
1254 1254 try:
1255 1255 m = os.max()
1256 1256 if m in subset:
1257 1257 return baseset([m], datarepr=('<max %r, %r>', subset, os))
1258 1258 except ValueError:
1259 1259 # os.max() throws a ValueError when the collection is empty.
1260 1260 # Same as python's max().
1261 1261 pass
1262 1262 return baseset(datarepr=('<max %r, %r>', subset, os))
1263 1263
1264 1264 @predicate('merge()', safe=True)
1265 1265 def merge(repo, subset, x):
1266 1266 """Changeset is a merge changeset.
1267 1267 """
1268 1268 # i18n: "merge" is a keyword
1269 1269 getargs(x, 0, 0, _("merge takes no arguments"))
1270 1270 cl = repo.changelog
1271 1271 return subset.filter(lambda r: cl.parentrevs(r)[1] != -1,
1272 1272 condrepr='<merge>')
1273 1273
1274 1274 @predicate('branchpoint()', safe=True)
1275 1275 def branchpoint(repo, subset, x):
1276 1276 """Changesets with more than one child.
1277 1277 """
1278 1278 # i18n: "branchpoint" is a keyword
1279 1279 getargs(x, 0, 0, _("branchpoint takes no arguments"))
1280 1280 cl = repo.changelog
1281 1281 if not subset:
1282 1282 return baseset()
1283 1283 # XXX this should be 'parentset.min()' assuming 'parentset' is a smartset
1284 1284 # (and if it is not, it should.)
1285 1285 baserev = min(subset)
1286 1286 parentscount = [0]*(len(repo) - baserev)
1287 1287 for r in cl.revs(start=baserev + 1):
1288 1288 for p in cl.parentrevs(r):
1289 1289 if p >= baserev:
1290 1290 parentscount[p - baserev] += 1
1291 1291 return subset.filter(lambda r: parentscount[r - baserev] > 1,
1292 1292 condrepr='<branchpoint>')
1293 1293
1294 1294 @predicate('min(set)', safe=True)
1295 1295 def minrev(repo, subset, x):
1296 1296 """Changeset with lowest revision number in set.
1297 1297 """
1298 1298 os = getset(repo, fullreposet(repo), x)
1299 1299 try:
1300 1300 m = os.min()
1301 1301 if m in subset:
1302 1302 return baseset([m], datarepr=('<min %r, %r>', subset, os))
1303 1303 except ValueError:
1304 1304 # os.min() throws a ValueError when the collection is empty.
1305 1305 # Same as python's min().
1306 1306 pass
1307 1307 return baseset(datarepr=('<min %r, %r>', subset, os))
1308 1308
1309 1309 @predicate('modifies(pattern)', safe=True)
1310 1310 def modifies(repo, subset, x):
1311 1311 """Changesets modifying files matched by pattern.
1312 1312
1313 1313 The pattern without explicit kind like ``glob:`` is expected to be
1314 1314 relative to the current directory and match against a file or a
1315 1315 directory.
1316 1316 """
1317 1317 # i18n: "modifies" is a keyword
1318 1318 pat = getstring(x, _("modifies requires a pattern"))
1319 1319 return checkstatus(repo, subset, pat, 0)
1320 1320
1321 1321 @predicate('named(namespace)')
1322 1322 def named(repo, subset, x):
1323 1323 """The changesets in a given namespace.
1324 1324
1325 1325 If `namespace` starts with `re:`, the remainder of the string is treated as
1326 1326 a regular expression. To match a namespace that actually starts with `re:`,
1327 1327 use the prefix `literal:`.
1328 1328 """
1329 1329 # i18n: "named" is a keyword
1330 1330 args = getargs(x, 1, 1, _('named requires a namespace argument'))
1331 1331
1332 1332 ns = getstring(args[0],
1333 1333 # i18n: "named" is a keyword
1334 1334 _('the argument to named must be a string'))
1335 1335 kind, pattern, matcher = util.stringmatcher(ns)
1336 1336 namespaces = set()
1337 1337 if kind == 'literal':
1338 1338 if pattern not in repo.names:
1339 1339 raise error.RepoLookupError(_("namespace '%s' does not exist")
1340 1340 % ns)
1341 1341 namespaces.add(repo.names[pattern])
1342 1342 else:
1343 1343 for name, ns in repo.names.iteritems():
1344 1344 if matcher(name):
1345 1345 namespaces.add(ns)
1346 1346 if not namespaces:
1347 1347 raise error.RepoLookupError(_("no namespace exists"
1348 1348 " that match '%s'") % pattern)
1349 1349
1350 1350 names = set()
1351 1351 for ns in namespaces:
1352 1352 for name in ns.listnames(repo):
1353 1353 if name not in ns.deprecated:
1354 1354 names.update(repo[n].rev() for n in ns.nodes(repo, name))
1355 1355
1356 1356 names -= set([node.nullrev])
1357 1357 return subset & names
1358 1358
1359 1359 @predicate('id(string)', safe=True)
1360 1360 def node_(repo, subset, x):
1361 1361 """Revision non-ambiguously specified by the given hex string prefix.
1362 1362 """
1363 1363 # i18n: "id" is a keyword
1364 1364 l = getargs(x, 1, 1, _("id requires one argument"))
1365 1365 # i18n: "id" is a keyword
1366 1366 n = getstring(l[0], _("id requires a string"))
1367 1367 if len(n) == 40:
1368 1368 try:
1369 1369 rn = repo.changelog.rev(node.bin(n))
1370 1370 except (LookupError, TypeError):
1371 1371 rn = None
1372 1372 else:
1373 1373 rn = None
1374 1374 pm = repo.changelog._partialmatch(n)
1375 1375 if pm is not None:
1376 1376 rn = repo.changelog.rev(pm)
1377 1377
1378 1378 if rn is None:
1379 1379 return baseset()
1380 1380 result = baseset([rn])
1381 1381 return result & subset
1382 1382
1383 1383 @predicate('obsolete()', safe=True)
1384 1384 def obsolete(repo, subset, x):
1385 1385 """Mutable changeset with a newer version."""
1386 1386 # i18n: "obsolete" is a keyword
1387 1387 getargs(x, 0, 0, _("obsolete takes no arguments"))
1388 1388 obsoletes = obsmod.getrevs(repo, 'obsolete')
1389 1389 return subset & obsoletes
1390 1390
1391 1391 @predicate('only(set, [set])', safe=True)
1392 1392 def only(repo, subset, x):
1393 1393 """Changesets that are ancestors of the first set that are not ancestors
1394 1394 of any other head in the repo. If a second set is specified, the result
1395 1395 is ancestors of the first set that are not ancestors of the second set
1396 1396 (i.e. ::<set1> - ::<set2>).
1397 1397 """
1398 1398 cl = repo.changelog
1399 1399 # i18n: "only" is a keyword
1400 1400 args = getargs(x, 1, 2, _('only takes one or two arguments'))
1401 1401 include = getset(repo, fullreposet(repo), args[0])
1402 1402 if len(args) == 1:
1403 1403 if not include:
1404 1404 return baseset()
1405 1405
1406 1406 descendants = set(_revdescendants(repo, include, False))
1407 1407 exclude = [rev for rev in cl.headrevs()
1408 1408 if not rev in descendants and not rev in include]
1409 1409 else:
1410 1410 exclude = getset(repo, fullreposet(repo), args[1])
1411 1411
1412 1412 results = set(cl.findmissingrevs(common=exclude, heads=include))
1413 1413 # XXX we should turn this into a baseset instead of a set, smartset may do
1414 1414 # some optimisations from the fact this is a baseset.
1415 1415 return subset & results
1416 1416
1417 1417 @predicate('origin([set])', safe=True)
1418 1418 def origin(repo, subset, x):
1419 1419 """
1420 1420 Changesets that were specified as a source for the grafts, transplants or
1421 1421 rebases that created the given revisions. Omitting the optional set is the
1422 1422 same as passing all(). If a changeset created by these operations is itself
1423 1423 specified as a source for one of these operations, only the source changeset
1424 1424 for the first operation is selected.
1425 1425 """
1426 1426 if x is not None:
1427 1427 dests = getset(repo, fullreposet(repo), x)
1428 1428 else:
1429 1429 dests = fullreposet(repo)
1430 1430
1431 1431 def _firstsrc(rev):
1432 1432 src = _getrevsource(repo, rev)
1433 1433 if src is None:
1434 1434 return None
1435 1435
1436 1436 while True:
1437 1437 prev = _getrevsource(repo, src)
1438 1438
1439 1439 if prev is None:
1440 1440 return src
1441 1441 src = prev
1442 1442
1443 1443 o = set([_firstsrc(r) for r in dests])
1444 1444 o -= set([None])
1445 1445 # XXX we should turn this into a baseset instead of a set, smartset may do
1446 1446 # some optimisations from the fact this is a baseset.
1447 1447 return subset & o
1448 1448
1449 1449 @predicate('outgoing([path])', safe=True)
1450 1450 def outgoing(repo, subset, x):
1451 1451 """Changesets not found in the specified destination repository, or the
1452 1452 default push location.
1453 1453 """
1454 1454 # Avoid cycles.
1455 1455 from . import (
1456 1456 discovery,
1457 1457 hg,
1458 1458 )
1459 1459 # i18n: "outgoing" is a keyword
1460 1460 l = getargs(x, 0, 1, _("outgoing takes one or no arguments"))
1461 1461 # i18n: "outgoing" is a keyword
1462 1462 dest = l and getstring(l[0], _("outgoing requires a repository path")) or ''
1463 1463 dest = repo.ui.expandpath(dest or 'default-push', dest or 'default')
1464 1464 dest, branches = hg.parseurl(dest)
1465 1465 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1466 1466 if revs:
1467 1467 revs = [repo.lookup(rev) for rev in revs]
1468 1468 other = hg.peer(repo, {}, dest)
1469 1469 repo.ui.pushbuffer()
1470 1470 outgoing = discovery.findcommonoutgoing(repo, other, onlyheads=revs)
1471 1471 repo.ui.popbuffer()
1472 1472 cl = repo.changelog
1473 1473 o = set([cl.rev(r) for r in outgoing.missing])
1474 1474 return subset & o
1475 1475
1476 1476 @predicate('p1([set])', safe=True)
1477 1477 def p1(repo, subset, x):
1478 1478 """First parent of changesets in set, or the working directory.
1479 1479 """
1480 1480 if x is None:
1481 1481 p = repo[x].p1().rev()
1482 1482 if p >= 0:
1483 1483 return subset & baseset([p])
1484 1484 return baseset()
1485 1485
1486 1486 ps = set()
1487 1487 cl = repo.changelog
1488 1488 for r in getset(repo, fullreposet(repo), x):
1489 1489 ps.add(cl.parentrevs(r)[0])
1490 1490 ps -= set([node.nullrev])
1491 1491 # XXX we should turn this into a baseset instead of a set, smartset may do
1492 1492 # some optimisations from the fact this is a baseset.
1493 1493 return subset & ps
1494 1494
1495 1495 @predicate('p2([set])', safe=True)
1496 1496 def p2(repo, subset, x):
1497 1497 """Second parent of changesets in set, or the working directory.
1498 1498 """
1499 1499 if x is None:
1500 1500 ps = repo[x].parents()
1501 1501 try:
1502 1502 p = ps[1].rev()
1503 1503 if p >= 0:
1504 1504 return subset & baseset([p])
1505 1505 return baseset()
1506 1506 except IndexError:
1507 1507 return baseset()
1508 1508
1509 1509 ps = set()
1510 1510 cl = repo.changelog
1511 1511 for r in getset(repo, fullreposet(repo), x):
1512 1512 ps.add(cl.parentrevs(r)[1])
1513 1513 ps -= set([node.nullrev])
1514 1514 # XXX we should turn this into a baseset instead of a set, smartset may do
1515 1515 # some optimisations from the fact this is a baseset.
1516 1516 return subset & ps
1517 1517
1518 1518 @predicate('parents([set])', safe=True)
1519 1519 def parents(repo, subset, x):
1520 1520 """
1521 1521 The set of all parents for all changesets in set, or the working directory.
1522 1522 """
1523 1523 if x is None:
1524 1524 ps = set(p.rev() for p in repo[x].parents())
1525 1525 else:
1526 1526 ps = set()
1527 1527 cl = repo.changelog
1528 1528 up = ps.update
1529 1529 parentrevs = cl.parentrevs
1530 1530 for r in getset(repo, fullreposet(repo), x):
1531 1531 if r == node.wdirrev:
1532 1532 up(p.rev() for p in repo[r].parents())
1533 1533 else:
1534 1534 up(parentrevs(r))
1535 1535 ps -= set([node.nullrev])
1536 1536 return subset & ps
1537 1537
1538 1538 def _phase(repo, subset, target):
1539 1539 """helper to select all rev in phase <target>"""
1540 1540 repo._phasecache.loadphaserevs(repo) # ensure phase's sets are loaded
1541 1541 if repo._phasecache._phasesets:
1542 1542 s = repo._phasecache._phasesets[target] - repo.changelog.filteredrevs
1543 1543 s = baseset(s)
1544 1544 s.sort() # set are non ordered, so we enforce ascending
1545 1545 return subset & s
1546 1546 else:
1547 1547 phase = repo._phasecache.phase
1548 1548 condition = lambda r: phase(repo, r) == target
1549 1549 return subset.filter(condition, condrepr=('<phase %r>', target),
1550 1550 cache=False)
1551 1551
1552 1552 @predicate('draft()', safe=True)
1553 1553 def draft(repo, subset, x):
1554 1554 """Changeset in draft phase."""
1555 1555 # i18n: "draft" is a keyword
1556 1556 getargs(x, 0, 0, _("draft takes no arguments"))
1557 1557 target = phases.draft
1558 1558 return _phase(repo, subset, target)
1559 1559
1560 1560 @predicate('secret()', safe=True)
1561 1561 def secret(repo, subset, x):
1562 1562 """Changeset in secret phase."""
1563 1563 # i18n: "secret" is a keyword
1564 1564 getargs(x, 0, 0, _("secret takes no arguments"))
1565 1565 target = phases.secret
1566 1566 return _phase(repo, subset, target)
1567 1567
1568 1568 def parentspec(repo, subset, x, n):
1569 1569 """``set^0``
1570 1570 The set.
1571 1571 ``set^1`` (or ``set^``), ``set^2``
1572 1572 First or second parent, respectively, of all changesets in set.
1573 1573 """
1574 1574 try:
1575 1575 n = int(n[1])
1576 1576 if n not in (0, 1, 2):
1577 1577 raise ValueError
1578 1578 except (TypeError, ValueError):
1579 1579 raise error.ParseError(_("^ expects a number 0, 1, or 2"))
1580 1580 ps = set()
1581 1581 cl = repo.changelog
1582 1582 for r in getset(repo, fullreposet(repo), x):
1583 1583 if n == 0:
1584 1584 ps.add(r)
1585 1585 elif n == 1:
1586 1586 ps.add(cl.parentrevs(r)[0])
1587 1587 elif n == 2:
1588 1588 parents = cl.parentrevs(r)
1589 1589 if len(parents) > 1:
1590 1590 ps.add(parents[1])
1591 1591 return subset & ps
1592 1592
1593 1593 @predicate('present(set)', safe=True)
1594 1594 def present(repo, subset, x):
1595 1595 """An empty set, if any revision in set isn't found; otherwise,
1596 1596 all revisions in set.
1597 1597
1598 1598 If any of specified revisions is not present in the local repository,
1599 1599 the query is normally aborted. But this predicate allows the query
1600 1600 to continue even in such cases.
1601 1601 """
1602 1602 try:
1603 1603 return getset(repo, subset, x)
1604 1604 except error.RepoLookupError:
1605 1605 return baseset()
1606 1606
1607 1607 # for internal use
1608 1608 @predicate('_notpublic', safe=True)
1609 1609 def _notpublic(repo, subset, x):
1610 1610 getargs(x, 0, 0, "_notpublic takes no arguments")
1611 1611 repo._phasecache.loadphaserevs(repo) # ensure phase's sets are loaded
1612 1612 if repo._phasecache._phasesets:
1613 1613 s = set()
1614 1614 for u in repo._phasecache._phasesets[1:]:
1615 1615 s.update(u)
1616 1616 s = baseset(s - repo.changelog.filteredrevs)
1617 1617 s.sort()
1618 1618 return subset & s
1619 1619 else:
1620 1620 phase = repo._phasecache.phase
1621 1621 target = phases.public
1622 1622 condition = lambda r: phase(repo, r) != target
1623 1623 return subset.filter(condition, condrepr=('<phase %r>', target),
1624 1624 cache=False)
1625 1625
1626 1626 @predicate('public()', safe=True)
1627 1627 def public(repo, subset, x):
1628 1628 """Changeset in public phase."""
1629 1629 # i18n: "public" is a keyword
1630 1630 getargs(x, 0, 0, _("public takes no arguments"))
1631 1631 phase = repo._phasecache.phase
1632 1632 target = phases.public
1633 1633 condition = lambda r: phase(repo, r) == target
1634 1634 return subset.filter(condition, condrepr=('<phase %r>', target),
1635 1635 cache=False)
1636 1636
1637 1637 @predicate('remote([id [,path]])', safe=True)
1638 1638 def remote(repo, subset, x):
1639 1639 """Local revision that corresponds to the given identifier in a
1640 1640 remote repository, if present. Here, the '.' identifier is a
1641 1641 synonym for the current local branch.
1642 1642 """
1643 1643
1644 1644 from . import hg # avoid start-up nasties
1645 1645 # i18n: "remote" is a keyword
1646 1646 l = getargs(x, 0, 2, _("remote takes zero, one, or two arguments"))
1647 1647
1648 1648 q = '.'
1649 1649 if len(l) > 0:
1650 1650 # i18n: "remote" is a keyword
1651 1651 q = getstring(l[0], _("remote requires a string id"))
1652 1652 if q == '.':
1653 1653 q = repo['.'].branch()
1654 1654
1655 1655 dest = ''
1656 1656 if len(l) > 1:
1657 1657 # i18n: "remote" is a keyword
1658 1658 dest = getstring(l[1], _("remote requires a repository path"))
1659 1659 dest = repo.ui.expandpath(dest or 'default')
1660 1660 dest, branches = hg.parseurl(dest)
1661 1661 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1662 1662 if revs:
1663 1663 revs = [repo.lookup(rev) for rev in revs]
1664 1664 other = hg.peer(repo, {}, dest)
1665 1665 n = other.lookup(q)
1666 1666 if n in repo:
1667 1667 r = repo[n].rev()
1668 1668 if r in subset:
1669 1669 return baseset([r])
1670 1670 return baseset()
1671 1671
1672 1672 @predicate('removes(pattern)', safe=True)
1673 1673 def removes(repo, subset, x):
1674 1674 """Changesets which remove files matching pattern.
1675 1675
1676 1676 The pattern without explicit kind like ``glob:`` is expected to be
1677 1677 relative to the current directory and match against a file or a
1678 1678 directory.
1679 1679 """
1680 1680 # i18n: "removes" is a keyword
1681 1681 pat = getstring(x, _("removes requires a pattern"))
1682 1682 return checkstatus(repo, subset, pat, 2)
1683 1683
1684 1684 @predicate('rev(number)', safe=True)
1685 1685 def rev(repo, subset, x):
1686 1686 """Revision with the given numeric identifier.
1687 1687 """
1688 1688 # i18n: "rev" is a keyword
1689 1689 l = getargs(x, 1, 1, _("rev requires one argument"))
1690 1690 try:
1691 1691 # i18n: "rev" is a keyword
1692 1692 l = int(getstring(l[0], _("rev requires a number")))
1693 1693 except (TypeError, ValueError):
1694 1694 # i18n: "rev" is a keyword
1695 1695 raise error.ParseError(_("rev expects a number"))
1696 1696 if l not in repo.changelog and l != node.nullrev:
1697 1697 return baseset()
1698 1698 return subset & baseset([l])
1699 1699
1700 1700 @predicate('matching(revision [, field])', safe=True)
1701 1701 def matching(repo, subset, x):
1702 1702 """Changesets in which a given set of fields match the set of fields in the
1703 1703 selected revision or set.
1704 1704
1705 1705 To match more than one field pass the list of fields to match separated
1706 1706 by spaces (e.g. ``author description``).
1707 1707
1708 1708 Valid fields are most regular revision fields and some special fields.
1709 1709
1710 1710 Regular revision fields are ``description``, ``author``, ``branch``,
1711 1711 ``date``, ``files``, ``phase``, ``parents``, ``substate``, ``user``
1712 1712 and ``diff``.
1713 1713 Note that ``author`` and ``user`` are synonyms. ``diff`` refers to the
1714 1714 contents of the revision. Two revisions matching their ``diff`` will
1715 1715 also match their ``files``.
1716 1716
1717 1717 Special fields are ``summary`` and ``metadata``:
1718 1718 ``summary`` matches the first line of the description.
1719 1719 ``metadata`` is equivalent to matching ``description user date``
1720 1720 (i.e. it matches the main metadata fields).
1721 1721
1722 1722 ``metadata`` is the default field which is used when no fields are
1723 1723 specified. You can match more than one field at a time.
1724 1724 """
1725 1725 # i18n: "matching" is a keyword
1726 1726 l = getargs(x, 1, 2, _("matching takes 1 or 2 arguments"))
1727 1727
1728 1728 revs = getset(repo, fullreposet(repo), l[0])
1729 1729
1730 1730 fieldlist = ['metadata']
1731 1731 if len(l) > 1:
1732 1732 fieldlist = getstring(l[1],
1733 1733 # i18n: "matching" is a keyword
1734 1734 _("matching requires a string "
1735 1735 "as its second argument")).split()
1736 1736
1737 1737 # Make sure that there are no repeated fields,
1738 1738 # expand the 'special' 'metadata' field type
1739 1739 # and check the 'files' whenever we check the 'diff'
1740 1740 fields = []
1741 1741 for field in fieldlist:
1742 1742 if field == 'metadata':
1743 1743 fields += ['user', 'description', 'date']
1744 1744 elif field == 'diff':
1745 1745 # a revision matching the diff must also match the files
1746 1746 # since matching the diff is very costly, make sure to
1747 1747 # also match the files first
1748 1748 fields += ['files', 'diff']
1749 1749 else:
1750 1750 if field == 'author':
1751 1751 field = 'user'
1752 1752 fields.append(field)
1753 1753 fields = set(fields)
1754 1754 if 'summary' in fields and 'description' in fields:
1755 1755 # If a revision matches its description it also matches its summary
1756 1756 fields.discard('summary')
1757 1757
1758 1758 # We may want to match more than one field
1759 1759 # Not all fields take the same amount of time to be matched
1760 1760 # Sort the selected fields in order of increasing matching cost
1761 1761 fieldorder = ['phase', 'parents', 'user', 'date', 'branch', 'summary',
1762 1762 'files', 'description', 'substate', 'diff']
1763 1763 def fieldkeyfunc(f):
1764 1764 try:
1765 1765 return fieldorder.index(f)
1766 1766 except ValueError:
1767 1767 # assume an unknown field is very costly
1768 1768 return len(fieldorder)
1769 1769 fields = list(fields)
1770 1770 fields.sort(key=fieldkeyfunc)
1771 1771
1772 1772 # Each field will be matched with its own "getfield" function
1773 1773 # which will be added to the getfieldfuncs array of functions
1774 1774 getfieldfuncs = []
1775 1775 _funcs = {
1776 1776 'user': lambda r: repo[r].user(),
1777 1777 'branch': lambda r: repo[r].branch(),
1778 1778 'date': lambda r: repo[r].date(),
1779 1779 'description': lambda r: repo[r].description(),
1780 1780 'files': lambda r: repo[r].files(),
1781 1781 'parents': lambda r: repo[r].parents(),
1782 1782 'phase': lambda r: repo[r].phase(),
1783 1783 'substate': lambda r: repo[r].substate,
1784 1784 'summary': lambda r: repo[r].description().splitlines()[0],
1785 1785 'diff': lambda r: list(repo[r].diff(git=True),)
1786 1786 }
1787 1787 for info in fields:
1788 1788 getfield = _funcs.get(info, None)
1789 1789 if getfield is None:
1790 1790 raise error.ParseError(
1791 1791 # i18n: "matching" is a keyword
1792 1792 _("unexpected field name passed to matching: %s") % info)
1793 1793 getfieldfuncs.append(getfield)
1794 1794 # convert the getfield array of functions into a "getinfo" function
1795 1795 # which returns an array of field values (or a single value if there
1796 1796 # is only one field to match)
1797 1797 getinfo = lambda r: [f(r) for f in getfieldfuncs]
1798 1798
1799 1799 def matches(x):
1800 1800 for rev in revs:
1801 1801 target = getinfo(rev)
1802 1802 match = True
1803 1803 for n, f in enumerate(getfieldfuncs):
1804 1804 if target[n] != f(x):
1805 1805 match = False
1806 1806 if match:
1807 1807 return True
1808 1808 return False
1809 1809
1810 1810 return subset.filter(matches, condrepr=('<matching%r %r>', fields, revs))
1811 1811
1812 1812 @predicate('reverse(set)', safe=True)
1813 1813 def reverse(repo, subset, x):
1814 1814 """Reverse order of set.
1815 1815 """
1816 1816 l = getset(repo, subset, x)
1817 1817 l.reverse()
1818 1818 return l
1819 1819
1820 1820 @predicate('roots(set)', safe=True)
1821 1821 def roots(repo, subset, x):
1822 1822 """Changesets in set with no parent changeset in set.
1823 1823 """
1824 1824 s = getset(repo, fullreposet(repo), x)
1825 1825 parents = repo.changelog.parentrevs
1826 1826 def filter(r):
1827 1827 for p in parents(r):
1828 1828 if 0 <= p and p in s:
1829 1829 return False
1830 1830 return True
1831 1831 return subset & s.filter(filter, condrepr='<roots>')
1832 1832
1833 1833 _sortkeyfuncs = {
1834 1834 'rev': lambda c: c.rev(),
1835 1835 'branch': lambda c: c.branch(),
1836 1836 'desc': lambda c: c.description(),
1837 1837 'user': lambda c: c.user(),
1838 1838 'author': lambda c: c.user(),
1839 1839 'date': lambda c: c.date()[0],
1840 1840 }
1841 1841
1842 1842 def _getsortargs(x):
1843 1843 """Parse sort options into (set, [(key, reverse)], opts)"""
1844 1844 args = getargsdict(x, 'sort', 'set keys topo.firstbranch')
1845 1845 if 'set' not in args:
1846 1846 # i18n: "sort" is a keyword
1847 1847 raise error.ParseError(_('sort requires one or two arguments'))
1848 1848 keys = "rev"
1849 1849 if 'keys' in args:
1850 1850 # i18n: "sort" is a keyword
1851 1851 keys = getstring(args['keys'], _("sort spec must be a string"))
1852 1852
1853 1853 keyflags = []
1854 1854 for k in keys.split():
1855 1855 fk = k
1856 1856 reverse = (k[0] == '-')
1857 1857 if reverse:
1858 1858 k = k[1:]
1859 1859 if k not in _sortkeyfuncs and k != 'topo':
1860 1860 raise error.ParseError(_("unknown sort key %r") % fk)
1861 1861 keyflags.append((k, reverse))
1862 1862
1863 1863 if len(keyflags) > 1 and any(k == 'topo' for k, reverse in keyflags):
1864 1864 # i18n: "topo" is a keyword
1865 1865 raise error.ParseError(_(
1866 1866 'topo sort order cannot be combined with other sort keys'))
1867 1867
1868 1868 opts = {}
1869 1869 if 'topo.firstbranch' in args:
1870 1870 if any(k == 'topo' for k, reverse in keyflags):
1871 1871 opts['topo.firstbranch'] = args['topo.firstbranch']
1872 1872 else:
1873 1873 # i18n: "topo" and "topo.firstbranch" are keywords
1874 1874 raise error.ParseError(_(
1875 1875 'topo.firstbranch can only be used when using the topo sort '
1876 1876 'key'))
1877 1877
1878 1878 return args['set'], keyflags, opts
1879 1879
1880 1880 @predicate('sort(set[, [-]key... [, ...]])', safe=True)
1881 1881 def sort(repo, subset, x):
1882 1882 """Sort set by keys. The default sort order is ascending, specify a key
1883 1883 as ``-key`` to sort in descending order.
1884 1884
1885 1885 The keys can be:
1886 1886
1887 1887 - ``rev`` for the revision number,
1888 1888 - ``branch`` for the branch name,
1889 1889 - ``desc`` for the commit message (description),
1890 1890 - ``user`` for user name (``author`` can be used as an alias),
1891 1891 - ``date`` for the commit date
1892 1892 - ``topo`` for a reverse topographical sort
1893 1893
1894 1894 The ``topo`` sort order cannot be combined with other sort keys. This sort
1895 1895 takes one optional argument, ``topo.firstbranch``, which takes a revset that
1896 1896 specifies what topographical branches to prioritize in the sort.
1897 1897
1898 1898 """
1899 1899 s, keyflags, opts = _getsortargs(x)
1900 1900 revs = getset(repo, subset, s)
1901 1901
1902 1902 if not keyflags:
1903 1903 return revs
1904 1904 if len(keyflags) == 1 and keyflags[0][0] == "rev":
1905 1905 revs.sort(reverse=keyflags[0][1])
1906 1906 return revs
1907 1907 elif keyflags[0][0] == "topo":
1908 1908 firstbranch = ()
1909 1909 if 'topo.firstbranch' in opts:
1910 1910 firstbranch = getset(repo, subset, opts['topo.firstbranch'])
1911 1911 revs = baseset(_toposort(revs, repo.changelog.parentrevs, firstbranch),
1912 1912 istopo=True)
1913 1913 if keyflags[0][1]:
1914 1914 revs.reverse()
1915 1915 return revs
1916 1916
1917 1917 # sort() is guaranteed to be stable
1918 1918 ctxs = [repo[r] for r in revs]
1919 1919 for k, reverse in reversed(keyflags):
1920 1920 ctxs.sort(key=_sortkeyfuncs[k], reverse=reverse)
1921 1921 return baseset([c.rev() for c in ctxs])
1922 1922
1923 1923 def _toposort(revs, parentsfunc, firstbranch=()):
1924 1924 """Yield revisions from heads to roots one (topo) branch at a time.
1925 1925
1926 1926 This function aims to be used by a graph generator that wishes to minimize
1927 1927 the number of parallel branches and their interleaving.
1928 1928
1929 1929 Example iteration order (numbers show the "true" order in a changelog):
1930 1930
1931 1931 o 4
1932 1932 |
1933 1933 o 1
1934 1934 |
1935 1935 | o 3
1936 1936 | |
1937 1937 | o 2
1938 1938 |/
1939 1939 o 0
1940 1940
1941 1941 Note that the ancestors of merges are understood by the current
1942 1942 algorithm to be on the same branch. This means no reordering will
1943 1943 occur behind a merge.
1944 1944 """
1945 1945
1946 1946 ### Quick summary of the algorithm
1947 1947 #
1948 1948 # This function is based around a "retention" principle. We keep revisions
1949 1949 # in memory until we are ready to emit a whole branch that immediately
1950 1950 # "merges" into an existing one. This reduces the number of parallel
1951 1951 # branches with interleaved revisions.
1952 1952 #
1953 1953 # During iteration revs are split into two groups:
1954 1954 # A) revision already emitted
1955 1955 # B) revision in "retention". They are stored as different subgroups.
1956 1956 #
1957 1957 # for each REV, we do the following logic:
1958 1958 #
1959 1959 # 1) if REV is a parent of (A), we will emit it. If there is a
1960 1960 # retention group ((B) above) that is blocked on REV being
1961 1961 # available, we emit all the revisions out of that retention
1962 1962 # group first.
1963 1963 #
1964 1964 # 2) else, we'll search for a subgroup in (B) awaiting for REV to be
1965 1965 # available, if such subgroup exist, we add REV to it and the subgroup is
1966 1966 # now awaiting for REV.parents() to be available.
1967 1967 #
1968 1968 # 3) finally if no such group existed in (B), we create a new subgroup.
1969 1969 #
1970 1970 #
1971 1971 # To bootstrap the algorithm, we emit the tipmost revision (which
1972 1972 # puts it in group (A) from above).
1973 1973
1974 1974 revs.sort(reverse=True)
1975 1975
1976 1976 # Set of parents of revision that have been emitted. They can be considered
1977 1977 # unblocked as the graph generator is already aware of them so there is no
1978 1978 # need to delay the revisions that reference them.
1979 1979 #
1980 1980 # If someone wants to prioritize a branch over the others, pre-filling this
1981 1981 # set will force all other branches to wait until this branch is ready to be
1982 1982 # emitted.
1983 1983 unblocked = set(firstbranch)
1984 1984
1985 1985 # list of groups waiting to be displayed, each group is defined by:
1986 1986 #
1987 1987 # (revs: lists of revs waiting to be displayed,
1988 1988 # blocked: set of that cannot be displayed before those in 'revs')
1989 1989 #
1990 1990 # The second value ('blocked') correspond to parents of any revision in the
1991 1991 # group ('revs') that is not itself contained in the group. The main idea
1992 1992 # of this algorithm is to delay as much as possible the emission of any
1993 1993 # revision. This means waiting for the moment we are about to display
1994 1994 # these parents to display the revs in a group.
1995 1995 #
1996 1996 # This first implementation is smart until it encounters a merge: it will
1997 1997 # emit revs as soon as any parent is about to be emitted and can grow an
1998 1998 # arbitrary number of revs in 'blocked'. In practice this mean we properly
1999 1999 # retains new branches but gives up on any special ordering for ancestors
2000 2000 # of merges. The implementation can be improved to handle this better.
2001 2001 #
2002 2002 # The first subgroup is special. It corresponds to all the revision that
2003 2003 # were already emitted. The 'revs' lists is expected to be empty and the
2004 2004 # 'blocked' set contains the parents revisions of already emitted revision.
2005 2005 #
2006 2006 # You could pre-seed the <parents> set of groups[0] to a specific
2007 2007 # changesets to select what the first emitted branch should be.
2008 2008 groups = [([], unblocked)]
2009 2009 pendingheap = []
2010 2010 pendingset = set()
2011 2011
2012 2012 heapq.heapify(pendingheap)
2013 2013 heappop = heapq.heappop
2014 2014 heappush = heapq.heappush
2015 2015 for currentrev in revs:
2016 2016 # Heap works with smallest element, we want highest so we invert
2017 2017 if currentrev not in pendingset:
2018 2018 heappush(pendingheap, -currentrev)
2019 2019 pendingset.add(currentrev)
2020 2020 # iterates on pending rev until after the current rev have been
2021 2021 # processed.
2022 2022 rev = None
2023 2023 while rev != currentrev:
2024 2024 rev = -heappop(pendingheap)
2025 2025 pendingset.remove(rev)
2026 2026
2027 2027 # Seek for a subgroup blocked, waiting for the current revision.
2028 2028 matching = [i for i, g in enumerate(groups) if rev in g[1]]
2029 2029
2030 2030 if matching:
2031 2031 # The main idea is to gather together all sets that are blocked
2032 2032 # on the same revision.
2033 2033 #
2034 2034 # Groups are merged when a common blocking ancestor is
2035 2035 # observed. For example, given two groups:
2036 2036 #
2037 2037 # revs [5, 4] waiting for 1
2038 2038 # revs [3, 2] waiting for 1
2039 2039 #
2040 2040 # These two groups will be merged when we process
2041 2041 # 1. In theory, we could have merged the groups when
2042 2042 # we added 2 to the group it is now in (we could have
2043 2043 # noticed the groups were both blocked on 1 then), but
2044 2044 # the way it works now makes the algorithm simpler.
2045 2045 #
2046 2046 # We also always keep the oldest subgroup first. We can
2047 2047 # probably improve the behavior by having the longest set
2048 2048 # first. That way, graph algorithms could minimise the length
2049 2049 # of parallel lines their drawing. This is currently not done.
2050 2050 targetidx = matching.pop(0)
2051 2051 trevs, tparents = groups[targetidx]
2052 2052 for i in matching:
2053 2053 gr = groups[i]
2054 2054 trevs.extend(gr[0])
2055 2055 tparents |= gr[1]
2056 2056 # delete all merged subgroups (except the one we kept)
2057 2057 # (starting from the last subgroup for performance and
2058 2058 # sanity reasons)
2059 2059 for i in reversed(matching):
2060 2060 del groups[i]
2061 2061 else:
2062 2062 # This is a new head. We create a new subgroup for it.
2063 2063 targetidx = len(groups)
2064 2064 groups.append(([], set([rev])))
2065 2065
2066 2066 gr = groups[targetidx]
2067 2067
2068 2068 # We now add the current nodes to this subgroups. This is done
2069 2069 # after the subgroup merging because all elements from a subgroup
2070 2070 # that relied on this rev must precede it.
2071 2071 #
2072 2072 # we also update the <parents> set to include the parents of the
2073 2073 # new nodes.
2074 2074 if rev == currentrev: # only display stuff in rev
2075 2075 gr[0].append(rev)
2076 2076 gr[1].remove(rev)
2077 2077 parents = [p for p in parentsfunc(rev) if p > node.nullrev]
2078 2078 gr[1].update(parents)
2079 2079 for p in parents:
2080 2080 if p not in pendingset:
2081 2081 pendingset.add(p)
2082 2082 heappush(pendingheap, -p)
2083 2083
2084 2084 # Look for a subgroup to display
2085 2085 #
2086 2086 # When unblocked is empty (if clause), we were not waiting for any
2087 2087 # revisions during the first iteration (if no priority was given) or
2088 2088 # if we emitted a whole disconnected set of the graph (reached a
2089 2089 # root). In that case we arbitrarily take the oldest known
2090 2090 # subgroup. The heuristic could probably be better.
2091 2091 #
2092 2092 # Otherwise (elif clause) if the subgroup is blocked on
2093 2093 # a revision we just emitted, we can safely emit it as
2094 2094 # well.
2095 2095 if not unblocked:
2096 2096 if len(groups) > 1: # display other subset
2097 2097 targetidx = 1
2098 2098 gr = groups[1]
2099 2099 elif not gr[1] & unblocked:
2100 2100 gr = None
2101 2101
2102 2102 if gr is not None:
2103 2103 # update the set of awaited revisions with the one from the
2104 2104 # subgroup
2105 2105 unblocked |= gr[1]
2106 2106 # output all revisions in the subgroup
2107 2107 for r in gr[0]:
2108 2108 yield r
2109 2109 # delete the subgroup that you just output
2110 2110 # unless it is groups[0] in which case you just empty it.
2111 2111 if targetidx:
2112 2112 del groups[targetidx]
2113 2113 else:
2114 2114 gr[0][:] = []
2115 2115 # Check if we have some subgroup waiting for revisions we are not going to
2116 2116 # iterate over
2117 2117 for g in groups:
2118 2118 for r in g[0]:
2119 2119 yield r
2120 2120
2121 2121 @predicate('subrepo([pattern])')
2122 2122 def subrepo(repo, subset, x):
2123 2123 """Changesets that add, modify or remove the given subrepo. If no subrepo
2124 2124 pattern is named, any subrepo changes are returned.
2125 2125 """
2126 2126 # i18n: "subrepo" is a keyword
2127 2127 args = getargs(x, 0, 1, _('subrepo takes at most one argument'))
2128 2128 pat = None
2129 2129 if len(args) != 0:
2130 2130 pat = getstring(args[0], _("subrepo requires a pattern"))
2131 2131
2132 2132 m = matchmod.exact(repo.root, repo.root, ['.hgsubstate'])
2133 2133
2134 2134 def submatches(names):
2135 2135 k, p, m = util.stringmatcher(pat)
2136 2136 for name in names:
2137 2137 if m(name):
2138 2138 yield name
2139 2139
2140 2140 def matches(x):
2141 2141 c = repo[x]
2142 2142 s = repo.status(c.p1().node(), c.node(), match=m)
2143 2143
2144 2144 if pat is None:
2145 2145 return s.added or s.modified or s.removed
2146 2146
2147 2147 if s.added:
2148 2148 return any(submatches(c.substate.keys()))
2149 2149
2150 2150 if s.modified:
2151 2151 subs = set(c.p1().substate.keys())
2152 2152 subs.update(c.substate.keys())
2153 2153
2154 2154 for path in submatches(subs):
2155 2155 if c.p1().substate.get(path) != c.substate.get(path):
2156 2156 return True
2157 2157
2158 2158 if s.removed:
2159 2159 return any(submatches(c.p1().substate.keys()))
2160 2160
2161 2161 return False
2162 2162
2163 2163 return subset.filter(matches, condrepr=('<subrepo %r>', pat))
2164 2164
2165 2165 def _substringmatcher(pattern):
2166 2166 kind, pattern, matcher = util.stringmatcher(pattern)
2167 2167 if kind == 'literal':
2168 2168 matcher = lambda s: pattern in s
2169 2169 return kind, pattern, matcher
2170 2170
2171 2171 @predicate('tag([name])', safe=True)
2172 2172 def tag(repo, subset, x):
2173 2173 """The specified tag by name, or all tagged revisions if no name is given.
2174 2174
2175 2175 If `name` starts with `re:`, the remainder of the name is treated as
2176 2176 a regular expression. To match a tag that actually starts with `re:`,
2177 2177 use the prefix `literal:`.
2178 2178 """
2179 2179 # i18n: "tag" is a keyword
2180 2180 args = getargs(x, 0, 1, _("tag takes one or no arguments"))
2181 2181 cl = repo.changelog
2182 2182 if args:
2183 2183 pattern = getstring(args[0],
2184 2184 # i18n: "tag" is a keyword
2185 2185 _('the argument to tag must be a string'))
2186 2186 kind, pattern, matcher = util.stringmatcher(pattern)
2187 2187 if kind == 'literal':
2188 2188 # avoid resolving all tags
2189 2189 tn = repo._tagscache.tags.get(pattern, None)
2190 2190 if tn is None:
2191 2191 raise error.RepoLookupError(_("tag '%s' does not exist")
2192 2192 % pattern)
2193 2193 s = set([repo[tn].rev()])
2194 2194 else:
2195 2195 s = set([cl.rev(n) for t, n in repo.tagslist() if matcher(t)])
2196 2196 else:
2197 2197 s = set([cl.rev(n) for t, n in repo.tagslist() if t != 'tip'])
2198 2198 return subset & s
2199 2199
2200 2200 @predicate('tagged', safe=True)
2201 2201 def tagged(repo, subset, x):
2202 2202 return tag(repo, subset, x)
2203 2203
2204 2204 @predicate('unstable()', safe=True)
2205 2205 def unstable(repo, subset, x):
2206 2206 """Non-obsolete changesets with obsolete ancestors.
2207 2207 """
2208 2208 # i18n: "unstable" is a keyword
2209 2209 getargs(x, 0, 0, _("unstable takes no arguments"))
2210 2210 unstables = obsmod.getrevs(repo, 'unstable')
2211 2211 return subset & unstables
2212 2212
2213 2213
2214 2214 @predicate('user(string)', safe=True)
2215 2215 def user(repo, subset, x):
2216 2216 """User name contains string. The match is case-insensitive.
2217 2217
2218 2218 If `string` starts with `re:`, the remainder of the string is treated as
2219 2219 a regular expression. To match a user that actually contains `re:`, use
2220 2220 the prefix `literal:`.
2221 2221 """
2222 2222 return author(repo, subset, x)
2223 2223
2224 2224 # experimental
2225 2225 @predicate('wdir', safe=True)
2226 2226 def wdir(repo, subset, x):
2227 2227 # i18n: "wdir" is a keyword
2228 2228 getargs(x, 0, 0, _("wdir takes no arguments"))
2229 2229 if node.wdirrev in subset or isinstance(subset, fullreposet):
2230 2230 return baseset([node.wdirrev])
2231 2231 return baseset()
2232 2232
2233 2233 # for internal use
2234 2234 @predicate('_list', safe=True)
2235 2235 def _list(repo, subset, x):
2236 2236 s = getstring(x, "internal error")
2237 2237 if not s:
2238 2238 return baseset()
2239 2239 # remove duplicates here. it's difficult for caller to deduplicate sets
2240 2240 # because different symbols can point to the same rev.
2241 2241 cl = repo.changelog
2242 2242 ls = []
2243 2243 seen = set()
2244 2244 for t in s.split('\0'):
2245 2245 try:
2246 2246 # fast path for integer revision
2247 2247 r = int(t)
2248 2248 if str(r) != t or r not in cl:
2249 2249 raise ValueError
2250 2250 revs = [r]
2251 2251 except ValueError:
2252 2252 revs = stringset(repo, subset, t)
2253 2253
2254 2254 for r in revs:
2255 2255 if r in seen:
2256 2256 continue
2257 2257 if (r in subset
2258 2258 or r == node.nullrev and isinstance(subset, fullreposet)):
2259 2259 ls.append(r)
2260 2260 seen.add(r)
2261 2261 return baseset(ls)
2262 2262
2263 2263 # for internal use
2264 2264 @predicate('_intlist', safe=True)
2265 2265 def _intlist(repo, subset, x):
2266 2266 s = getstring(x, "internal error")
2267 2267 if not s:
2268 2268 return baseset()
2269 2269 ls = [int(r) for r in s.split('\0')]
2270 2270 s = subset
2271 2271 return baseset([r for r in ls if r in s])
2272 2272
2273 2273 # for internal use
2274 2274 @predicate('_hexlist', safe=True)
2275 2275 def _hexlist(repo, subset, x):
2276 2276 s = getstring(x, "internal error")
2277 2277 if not s:
2278 2278 return baseset()
2279 2279 cl = repo.changelog
2280 2280 ls = [cl.rev(node.bin(r)) for r in s.split('\0')]
2281 2281 s = subset
2282 2282 return baseset([r for r in ls if r in s])
2283 2283
2284 2284 methods = {
2285 2285 "range": rangeset,
2286 2286 "dagrange": dagrange,
2287 2287 "string": stringset,
2288 2288 "symbol": stringset,
2289 2289 "and": andset,
2290 2290 "or": orset,
2291 2291 "not": notset,
2292 2292 "difference": differenceset,
2293 2293 "list": listset,
2294 2294 "keyvalue": keyvaluepair,
2295 2295 "func": func,
2296 2296 "ancestor": ancestorspec,
2297 2297 "parent": parentspec,
2298 2298 "parentpost": p1,
2299 2299 }
2300 2300
2301 2301 def _matchonly(revs, bases):
2302 2302 """
2303 2303 >>> f = lambda *args: _matchonly(*map(parse, args))
2304 2304 >>> f('ancestors(A)', 'not ancestors(B)')
2305 2305 ('list', ('symbol', 'A'), ('symbol', 'B'))
2306 2306 """
2307 2307 if (revs is not None
2308 2308 and revs[0] == 'func'
2309 2309 and getstring(revs[1], _('not a symbol')) == 'ancestors'
2310 2310 and bases is not None
2311 2311 and bases[0] == 'not'
2312 2312 and bases[1][0] == 'func'
2313 2313 and getstring(bases[1][1], _('not a symbol')) == 'ancestors'):
2314 2314 return ('list', revs[2], bases[1][2])
2315 2315
2316 2316 def _optimize(x, small):
2317 2317 if x is None:
2318 2318 return 0, x
2319 2319
2320 2320 smallbonus = 1
2321 2321 if small:
2322 2322 smallbonus = .5
2323 2323
2324 2324 op = x[0]
2325 2325 if op == 'minus':
2326 2326 return _optimize(('and', x[1], ('not', x[2])), small)
2327 2327 elif op == 'only':
2328 2328 t = ('func', ('symbol', 'only'), ('list', x[1], x[2]))
2329 2329 return _optimize(t, small)
2330 2330 elif op == 'onlypost':
2331 2331 return _optimize(('func', ('symbol', 'only'), x[1]), small)
2332 2332 elif op == 'dagrangepre':
2333 2333 return _optimize(('func', ('symbol', 'ancestors'), x[1]), small)
2334 2334 elif op == 'dagrangepost':
2335 2335 return _optimize(('func', ('symbol', 'descendants'), x[1]), small)
2336 2336 elif op == 'rangeall':
2337 2337 return _optimize(('range', ('string', '0'), ('string', 'tip')), small)
2338 2338 elif op == 'rangepre':
2339 2339 return _optimize(('range', ('string', '0'), x[1]), small)
2340 2340 elif op == 'rangepost':
2341 2341 return _optimize(('range', x[1], ('string', 'tip')), small)
2342 2342 elif op == 'negate':
2343 2343 s = getstring(x[1], _("can't negate that"))
2344 2344 return _optimize(('string', '-' + s), small)
2345 2345 elif op in 'string symbol negate':
2346 2346 return smallbonus, x # single revisions are small
2347 2347 elif op == 'and':
2348 2348 wa, ta = _optimize(x[1], True)
2349 2349 wb, tb = _optimize(x[2], True)
2350 2350 w = min(wa, wb)
2351 2351
2352 2352 # (::x and not ::y)/(not ::y and ::x) have a fast path
2353 2353 tm = _matchonly(ta, tb) or _matchonly(tb, ta)
2354 2354 if tm:
2355 2355 return w, ('func', ('symbol', 'only'), tm)
2356 2356
2357 2357 if tb is not None and tb[0] == 'not':
2358 2358 return wa, ('difference', ta, tb[1])
2359 2359
2360 2360 if wa > wb:
2361 2361 return w, (op, tb, ta)
2362 2362 return w, (op, ta, tb)
2363 2363 elif op == 'or':
2364 2364 # fast path for machine-generated expression, that is likely to have
2365 2365 # lots of trivial revisions: 'a + b + c()' to '_list(a b) + c()'
2366 2366 ws, ts, ss = [], [], []
2367 2367 def flushss():
2368 2368 if not ss:
2369 2369 return
2370 2370 if len(ss) == 1:
2371 2371 w, t = ss[0]
2372 2372 else:
2373 2373 s = '\0'.join(t[1] for w, t in ss)
2374 2374 y = ('func', ('symbol', '_list'), ('string', s))
2375 2375 w, t = _optimize(y, False)
2376 2376 ws.append(w)
2377 2377 ts.append(t)
2378 2378 del ss[:]
2379 2379 for y in x[1:]:
2380 2380 w, t = _optimize(y, False)
2381 2381 if t is not None and (t[0] == 'string' or t[0] == 'symbol'):
2382 2382 ss.append((w, t))
2383 2383 continue
2384 2384 flushss()
2385 2385 ws.append(w)
2386 2386 ts.append(t)
2387 2387 flushss()
2388 2388 if len(ts) == 1:
2389 2389 return ws[0], ts[0] # 'or' operation is fully optimized out
2390 2390 # we can't reorder trees by weight because it would change the order.
2391 2391 # ("sort(a + b)" == "sort(b + a)", but "a + b" != "b + a")
2392 2392 # ts = tuple(t for w, t in sorted(zip(ws, ts), key=lambda wt: wt[0]))
2393 2393 return max(ws), (op,) + tuple(ts)
2394 2394 elif op == 'not':
2395 2395 # Optimize not public() to _notpublic() because we have a fast version
2396 2396 if x[1] == ('func', ('symbol', 'public'), None):
2397 2397 newsym = ('func', ('symbol', '_notpublic'), None)
2398 2398 o = _optimize(newsym, not small)
2399 2399 return o[0], o[1]
2400 2400 else:
2401 2401 o = _optimize(x[1], not small)
2402 2402 return o[0], (op, o[1])
2403 2403 elif op == 'parentpost':
2404 2404 o = _optimize(x[1], small)
2405 2405 return o[0], (op, o[1])
2406 2406 elif op == 'group':
2407 2407 return _optimize(x[1], small)
2408 2408 elif op in 'dagrange range parent ancestorspec':
2409 2409 if op == 'parent':
2410 2410 # x^:y means (x^) : y, not x ^ (:y)
2411 2411 post = ('parentpost', x[1])
2412 2412 if x[2][0] == 'dagrangepre':
2413 2413 return _optimize(('dagrange', post, x[2][1]), small)
2414 2414 elif x[2][0] == 'rangepre':
2415 2415 return _optimize(('range', post, x[2][1]), small)
2416 2416
2417 2417 wa, ta = _optimize(x[1], small)
2418 2418 wb, tb = _optimize(x[2], small)
2419 2419 return wa + wb, (op, ta, tb)
2420 2420 elif op == 'list':
2421 2421 ws, ts = zip(*(_optimize(y, small) for y in x[1:]))
2422 2422 return sum(ws), (op,) + ts
2423 2423 elif op == 'func':
2424 2424 f = getstring(x[1], _("not a symbol"))
2425 2425 wa, ta = _optimize(x[2], small)
2426 2426 if f in ("author branch closed date desc file grep keyword "
2427 2427 "outgoing user"):
2428 2428 w = 10 # slow
2429 2429 elif f in "modifies adds removes":
2430 2430 w = 30 # slower
2431 2431 elif f == "contains":
2432 2432 w = 100 # very slow
2433 2433 elif f == "ancestor":
2434 2434 w = 1 * smallbonus
2435 2435 elif f in "reverse limit first _intlist":
2436 2436 w = 0
2437 2437 elif f in "sort":
2438 2438 w = 10 # assume most sorts look at changelog
2439 2439 else:
2440 2440 w = 1
2441 2441 return w + wa, (op, x[1], ta)
2442 2442 return 1, x
2443 2443
2444 2444 def optimize(tree):
2445 2445 _weight, newtree = _optimize(tree, small=True)
2446 2446 return newtree
2447 2447
2448 2448 # the set of valid characters for the initial letter of symbols in
2449 2449 # alias declarations and definitions
2450 2450 _aliassyminitletters = set(c for c in [chr(i) for i in xrange(256)]
2451 2451 if c.isalnum() or c in '._@$' or ord(c) > 127)
2452 2452
2453 2453 def _parsewith(spec, lookup=None, syminitletters=None):
2454 2454 """Generate a parse tree of given spec with given tokenizing options
2455 2455
2456 2456 >>> _parsewith('foo($1)', syminitletters=_aliassyminitletters)
2457 2457 ('func', ('symbol', 'foo'), ('symbol', '$1'))
2458 2458 >>> _parsewith('$1')
2459 2459 Traceback (most recent call last):
2460 2460 ...
2461 2461 ParseError: ("syntax error in revset '$1'", 0)
2462 2462 >>> _parsewith('foo bar')
2463 2463 Traceback (most recent call last):
2464 2464 ...
2465 2465 ParseError: ('invalid token', 4)
2466 2466 """
2467 2467 p = parser.parser(elements)
2468 2468 tree, pos = p.parse(tokenize(spec, lookup=lookup,
2469 2469 syminitletters=syminitletters))
2470 2470 if pos != len(spec):
2471 2471 raise error.ParseError(_('invalid token'), pos)
2472 2472 return parser.simplifyinfixops(tree, ('list', 'or'))
2473 2473
2474 2474 class _aliasrules(parser.basealiasrules):
2475 2475 """Parsing and expansion rule set of revset aliases"""
2476 2476 _section = _('revset alias')
2477 2477
2478 2478 @staticmethod
2479 2479 def _parse(spec):
2480 2480 """Parse alias declaration/definition ``spec``
2481 2481
2482 2482 This allows symbol names to use also ``$`` as an initial letter
2483 2483 (for backward compatibility), and callers of this function should
2484 2484 examine whether ``$`` is used also for unexpected symbols or not.
2485 2485 """
2486 2486 return _parsewith(spec, syminitletters=_aliassyminitletters)
2487 2487
2488 2488 @staticmethod
2489 2489 def _trygetfunc(tree):
2490 2490 if tree[0] == 'func' and tree[1][0] == 'symbol':
2491 2491 return tree[1][1], getlist(tree[2])
2492 2492
2493 2493 def expandaliases(ui, tree, showwarning=None):
2494 2494 aliases = _aliasrules.buildmap(ui.configitems('revsetalias'))
2495 2495 tree = _aliasrules.expand(aliases, tree)
2496 2496 if showwarning:
2497 2497 # warn about problematic (but not referred) aliases
2498 2498 for name, alias in sorted(aliases.iteritems()):
2499 2499 if alias.error and not alias.warned:
2500 2500 showwarning(_('warning: %s\n') % (alias.error))
2501 2501 alias.warned = True
2502 2502 return tree
2503 2503
2504 2504 def foldconcat(tree):
2505 2505 """Fold elements to be concatenated by `##`
2506 2506 """
2507 2507 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2508 2508 return tree
2509 2509 if tree[0] == '_concat':
2510 2510 pending = [tree]
2511 2511 l = []
2512 2512 while pending:
2513 2513 e = pending.pop()
2514 2514 if e[0] == '_concat':
2515 2515 pending.extend(reversed(e[1:]))
2516 2516 elif e[0] in ('string', 'symbol'):
2517 2517 l.append(e[1])
2518 2518 else:
2519 2519 msg = _("\"##\" can't concatenate \"%s\" element") % (e[0])
2520 2520 raise error.ParseError(msg)
2521 2521 return ('string', ''.join(l))
2522 2522 else:
2523 2523 return tuple(foldconcat(t) for t in tree)
2524 2524
2525 2525 def parse(spec, lookup=None):
2526 2526 return _parsewith(spec, lookup=lookup)
2527 2527
2528 2528 def posttreebuilthook(tree, repo):
2529 2529 # hook for extensions to execute code on the optimized tree
2530 2530 pass
2531 2531
2532 2532 def match(ui, spec, repo=None):
2533 2533 if not spec:
2534 2534 raise error.ParseError(_("empty query"))
2535 2535 lookup = None
2536 2536 if repo:
2537 2537 lookup = repo.__contains__
2538 2538 tree = parse(spec, lookup)
2539 2539 return _makematcher(ui, tree, repo)
2540 2540
2541 2541 def matchany(ui, specs, repo=None):
2542 2542 """Create a matcher that will include any revisions matching one of the
2543 2543 given specs"""
2544 2544 if not specs:
2545 2545 def mfunc(repo, subset=None):
2546 2546 return baseset()
2547 2547 return mfunc
2548 2548 if not all(specs):
2549 2549 raise error.ParseError(_("empty query"))
2550 2550 lookup = None
2551 2551 if repo:
2552 2552 lookup = repo.__contains__
2553 2553 if len(specs) == 1:
2554 2554 tree = parse(specs[0], lookup)
2555 2555 else:
2556 2556 tree = ('or',) + tuple(parse(s, lookup) for s in specs)
2557 2557 return _makematcher(ui, tree, repo)
2558 2558
2559 2559 def _makematcher(ui, tree, repo):
2560 2560 if ui:
2561 2561 tree = expandaliases(ui, tree, showwarning=ui.warn)
2562 2562 tree = foldconcat(tree)
2563 2563 tree = optimize(tree)
2564 2564 posttreebuilthook(tree, repo)
2565 2565 def mfunc(repo, subset=None):
2566 2566 if subset is None:
2567 2567 subset = fullreposet(repo)
2568 2568 if util.safehasattr(subset, 'isascending'):
2569 2569 result = getset(repo, subset, tree)
2570 2570 else:
2571 2571 result = getset(repo, baseset(subset), tree)
2572 2572 return result
2573 2573 return mfunc
2574 2574
2575 2575 def formatspec(expr, *args):
2576 2576 '''
2577 2577 This is a convenience function for using revsets internally, and
2578 2578 escapes arguments appropriately. Aliases are intentionally ignored
2579 2579 so that intended expression behavior isn't accidentally subverted.
2580 2580
2581 2581 Supported arguments:
2582 2582
2583 2583 %r = revset expression, parenthesized
2584 2584 %d = int(arg), no quoting
2585 2585 %s = string(arg), escaped and single-quoted
2586 2586 %b = arg.branch(), escaped and single-quoted
2587 2587 %n = hex(arg), single-quoted
2588 2588 %% = a literal '%'
2589 2589
2590 2590 Prefixing the type with 'l' specifies a parenthesized list of that type.
2591 2591
2592 2592 >>> formatspec('%r:: and %lr', '10 or 11', ("this()", "that()"))
2593 2593 '(10 or 11):: and ((this()) or (that()))'
2594 2594 >>> formatspec('%d:: and not %d::', 10, 20)
2595 2595 '10:: and not 20::'
2596 2596 >>> formatspec('%ld or %ld', [], [1])
2597 2597 "_list('') or 1"
2598 2598 >>> formatspec('keyword(%s)', 'foo\\xe9')
2599 2599 "keyword('foo\\\\xe9')"
2600 2600 >>> b = lambda: 'default'
2601 2601 >>> b.branch = b
2602 2602 >>> formatspec('branch(%b)', b)
2603 2603 "branch('default')"
2604 2604 >>> formatspec('root(%ls)', ['a', 'b', 'c', 'd'])
2605 2605 "root(_list('a\\x00b\\x00c\\x00d'))"
2606 2606 '''
2607 2607
2608 2608 def quote(s):
2609 2609 return repr(str(s))
2610 2610
2611 2611 def argtype(c, arg):
2612 2612 if c == 'd':
2613 2613 return str(int(arg))
2614 2614 elif c == 's':
2615 2615 return quote(arg)
2616 2616 elif c == 'r':
2617 2617 parse(arg) # make sure syntax errors are confined
2618 2618 return '(%s)' % arg
2619 2619 elif c == 'n':
2620 2620 return quote(node.hex(arg))
2621 2621 elif c == 'b':
2622 2622 return quote(arg.branch())
2623 2623
2624 2624 def listexp(s, t):
2625 2625 l = len(s)
2626 2626 if l == 0:
2627 2627 return "_list('')"
2628 2628 elif l == 1:
2629 2629 return argtype(t, s[0])
2630 2630 elif t == 'd':
2631 2631 return "_intlist('%s')" % "\0".join(str(int(a)) for a in s)
2632 2632 elif t == 's':
2633 2633 return "_list('%s')" % "\0".join(s)
2634 2634 elif t == 'n':
2635 2635 return "_hexlist('%s')" % "\0".join(node.hex(a) for a in s)
2636 2636 elif t == 'b':
2637 2637 return "_list('%s')" % "\0".join(a.branch() for a in s)
2638 2638
2639 2639 m = l // 2
2640 2640 return '(%s or %s)' % (listexp(s[:m], t), listexp(s[m:], t))
2641 2641
2642 2642 ret = ''
2643 2643 pos = 0
2644 2644 arg = 0
2645 2645 while pos < len(expr):
2646 2646 c = expr[pos]
2647 2647 if c == '%':
2648 2648 pos += 1
2649 2649 d = expr[pos]
2650 2650 if d == '%':
2651 2651 ret += d
2652 2652 elif d in 'dsnbr':
2653 2653 ret += argtype(d, args[arg])
2654 2654 arg += 1
2655 2655 elif d == 'l':
2656 2656 # a list of some type
2657 2657 pos += 1
2658 2658 d = expr[pos]
2659 2659 ret += listexp(list(args[arg]), d)
2660 2660 arg += 1
2661 2661 else:
2662 2662 raise error.Abort(_('unexpected revspec format character %s')
2663 2663 % d)
2664 2664 else:
2665 2665 ret += c
2666 2666 pos += 1
2667 2667
2668 2668 return ret
2669 2669
2670 2670 def prettyformat(tree):
2671 2671 return parser.prettyformat(tree, ('string', 'symbol'))
2672 2672
2673 2673 def depth(tree):
2674 2674 if isinstance(tree, tuple):
2675 2675 return max(map(depth, tree)) + 1
2676 2676 else:
2677 2677 return 0
2678 2678
2679 2679 def funcsused(tree):
2680 2680 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2681 2681 return set()
2682 2682 else:
2683 2683 funcs = set()
2684 2684 for s in tree[1:]:
2685 2685 funcs |= funcsused(s)
2686 2686 if tree[0] == 'func':
2687 2687 funcs.add(tree[1][1])
2688 2688 return funcs
2689 2689
2690 2690 def _formatsetrepr(r):
2691 2691 """Format an optional printable representation of a set
2692 2692
2693 2693 ======== =================================
2694 2694 type(r) example
2695 2695 ======== =================================
2696 2696 tuple ('<not %r>', other)
2697 2697 str '<branch closed>'
2698 2698 callable lambda: '<branch %r>' % sorted(b)
2699 2699 object other
2700 2700 ======== =================================
2701 2701 """
2702 2702 if r is None:
2703 2703 return ''
2704 2704 elif isinstance(r, tuple):
2705 2705 return r[0] % r[1:]
2706 2706 elif isinstance(r, str):
2707 2707 return r
2708 2708 elif callable(r):
2709 2709 return r()
2710 2710 else:
2711 2711 return repr(r)
2712 2712
2713 2713 class abstractsmartset(object):
2714 2714
2715 2715 def __nonzero__(self):
2716 2716 """True if the smartset is not empty"""
2717 2717 raise NotImplementedError()
2718 2718
2719 2719 def __contains__(self, rev):
2720 2720 """provide fast membership testing"""
2721 2721 raise NotImplementedError()
2722 2722
2723 2723 def __iter__(self):
2724 2724 """iterate the set in the order it is supposed to be iterated"""
2725 2725 raise NotImplementedError()
2726 2726
2727 2727 # Attributes containing a function to perform a fast iteration in a given
2728 2728 # direction. A smartset can have none, one, or both defined.
2729 2729 #
2730 2730 # Default value is None instead of a function returning None to avoid
2731 2731 # initializing an iterator just for testing if a fast method exists.
2732 2732 fastasc = None
2733 2733 fastdesc = None
2734 2734
2735 2735 def isascending(self):
2736 2736 """True if the set will iterate in ascending order"""
2737 2737 raise NotImplementedError()
2738 2738
2739 2739 def isdescending(self):
2740 2740 """True if the set will iterate in descending order"""
2741 2741 raise NotImplementedError()
2742 2742
2743 2743 def istopo(self):
2744 2744 """True if the set will iterate in topographical order"""
2745 2745 raise NotImplementedError()
2746 2746
2747 2747 @util.cachefunc
2748 2748 def min(self):
2749 2749 """return the minimum element in the set"""
2750 2750 if self.fastasc is not None:
2751 2751 for r in self.fastasc():
2752 2752 return r
2753 2753 raise ValueError('arg is an empty sequence')
2754 2754 return min(self)
2755 2755
2756 2756 @util.cachefunc
2757 2757 def max(self):
2758 2758 """return the maximum element in the set"""
2759 2759 if self.fastdesc is not None:
2760 2760 for r in self.fastdesc():
2761 2761 return r
2762 2762 raise ValueError('arg is an empty sequence')
2763 2763 return max(self)
2764 2764
2765 2765 def first(self):
2766 2766 """return the first element in the set (user iteration perspective)
2767 2767
2768 2768 Return None if the set is empty"""
2769 2769 raise NotImplementedError()
2770 2770
2771 2771 def last(self):
2772 2772 """return the last element in the set (user iteration perspective)
2773 2773
2774 2774 Return None if the set is empty"""
2775 2775 raise NotImplementedError()
2776 2776
2777 2777 def __len__(self):
2778 2778 """return the length of the smartsets
2779 2779
2780 2780 This can be expensive on smartset that could be lazy otherwise."""
2781 2781 raise NotImplementedError()
2782 2782
2783 2783 def reverse(self):
2784 2784 """reverse the expected iteration order"""
2785 2785 raise NotImplementedError()
2786 2786
2787 2787 def sort(self, reverse=True):
2788 2788 """get the set to iterate in an ascending or descending order"""
2789 2789 raise NotImplementedError()
2790 2790
2791 2791 def __and__(self, other):
2792 2792 """Returns a new object with the intersection of the two collections.
2793 2793
2794 2794 This is part of the mandatory API for smartset."""
2795 2795 if isinstance(other, fullreposet):
2796 2796 return self
2797 2797 return self.filter(other.__contains__, condrepr=other, cache=False)
2798 2798
2799 2799 def __add__(self, other):
2800 2800 """Returns a new object with the union of the two collections.
2801 2801
2802 2802 This is part of the mandatory API for smartset."""
2803 2803 return addset(self, other)
2804 2804
2805 2805 def __sub__(self, other):
2806 2806 """Returns a new object with the substraction of the two collections.
2807 2807
2808 2808 This is part of the mandatory API for smartset."""
2809 2809 c = other.__contains__
2810 2810 return self.filter(lambda r: not c(r), condrepr=('<not %r>', other),
2811 2811 cache=False)
2812 2812
2813 2813 def filter(self, condition, condrepr=None, cache=True):
2814 2814 """Returns this smartset filtered by condition as a new smartset.
2815 2815
2816 2816 `condition` is a callable which takes a revision number and returns a
2817 2817 boolean. Optional `condrepr` provides a printable representation of
2818 2818 the given `condition`.
2819 2819
2820 2820 This is part of the mandatory API for smartset."""
2821 2821 # builtin cannot be cached. but do not needs to
2822 2822 if cache and util.safehasattr(condition, 'func_code'):
2823 2823 condition = util.cachefunc(condition)
2824 2824 return filteredset(self, condition, condrepr)
2825 2825
2826 2826 class baseset(abstractsmartset):
2827 2827 """Basic data structure that represents a revset and contains the basic
2828 2828 operation that it should be able to perform.
2829 2829
2830 2830 Every method in this class should be implemented by any smartset class.
2831 2831 """
2832 2832 def __init__(self, data=(), datarepr=None, istopo=False):
2833 2833 """
2834 2834 datarepr: a tuple of (format, obj, ...), a function or an object that
2835 2835 provides a printable representation of the given data.
2836 2836 """
2837 2837 self._ascending = None
2838 2838 self._istopo = istopo
2839 2839 if not isinstance(data, list):
2840 2840 if isinstance(data, set):
2841 2841 self._set = data
2842 2842 # set has no order we pick one for stability purpose
2843 2843 self._ascending = True
2844 2844 data = list(data)
2845 2845 self._list = data
2846 2846 self._datarepr = datarepr
2847 2847
2848 2848 @util.propertycache
2849 2849 def _set(self):
2850 2850 return set(self._list)
2851 2851
2852 2852 @util.propertycache
2853 2853 def _asclist(self):
2854 2854 asclist = self._list[:]
2855 2855 asclist.sort()
2856 2856 return asclist
2857 2857
2858 2858 def __iter__(self):
2859 2859 if self._ascending is None:
2860 2860 return iter(self._list)
2861 2861 elif self._ascending:
2862 2862 return iter(self._asclist)
2863 2863 else:
2864 2864 return reversed(self._asclist)
2865 2865
2866 2866 def fastasc(self):
2867 2867 return iter(self._asclist)
2868 2868
2869 2869 def fastdesc(self):
2870 2870 return reversed(self._asclist)
2871 2871
2872 2872 @util.propertycache
2873 2873 def __contains__(self):
2874 2874 return self._set.__contains__
2875 2875
2876 2876 def __nonzero__(self):
2877 2877 return bool(self._list)
2878 2878
2879 2879 def sort(self, reverse=False):
2880 2880 self._ascending = not bool(reverse)
2881 2881 self._istopo = False
2882 2882
2883 2883 def reverse(self):
2884 2884 if self._ascending is None:
2885 2885 self._list.reverse()
2886 2886 else:
2887 2887 self._ascending = not self._ascending
2888 2888 self._istopo = False
2889 2889
2890 2890 def __len__(self):
2891 2891 return len(self._list)
2892 2892
2893 2893 def isascending(self):
2894 2894 """Returns True if the collection is ascending order, False if not.
2895 2895
2896 2896 This is part of the mandatory API for smartset."""
2897 2897 if len(self) <= 1:
2898 2898 return True
2899 2899 return self._ascending is not None and self._ascending
2900 2900
2901 2901 def isdescending(self):
2902 2902 """Returns True if the collection is descending order, False if not.
2903 2903
2904 2904 This is part of the mandatory API for smartset."""
2905 2905 if len(self) <= 1:
2906 2906 return True
2907 2907 return self._ascending is not None and not self._ascending
2908 2908
2909 2909 def istopo(self):
2910 2910 """Is the collection is in topographical order or not.
2911 2911
2912 2912 This is part of the mandatory API for smartset."""
2913 2913 if len(self) <= 1:
2914 2914 return True
2915 2915 return self._istopo
2916 2916
2917 2917 def first(self):
2918 2918 if self:
2919 2919 if self._ascending is None:
2920 2920 return self._list[0]
2921 2921 elif self._ascending:
2922 2922 return self._asclist[0]
2923 2923 else:
2924 2924 return self._asclist[-1]
2925 2925 return None
2926 2926
2927 2927 def last(self):
2928 2928 if self:
2929 2929 if self._ascending is None:
2930 2930 return self._list[-1]
2931 2931 elif self._ascending:
2932 2932 return self._asclist[-1]
2933 2933 else:
2934 2934 return self._asclist[0]
2935 2935 return None
2936 2936
2937 2937 def __repr__(self):
2938 2938 d = {None: '', False: '-', True: '+'}[self._ascending]
2939 2939 s = _formatsetrepr(self._datarepr)
2940 2940 if not s:
2941 2941 l = self._list
2942 2942 # if _list has been built from a set, it might have a different
2943 2943 # order from one python implementation to another.
2944 2944 # We fallback to the sorted version for a stable output.
2945 2945 if self._ascending is not None:
2946 2946 l = self._asclist
2947 2947 s = repr(l)
2948 2948 return '<%s%s %s>' % (type(self).__name__, d, s)
2949 2949
2950 2950 class filteredset(abstractsmartset):
2951 2951 """Duck type for baseset class which iterates lazily over the revisions in
2952 2952 the subset and contains a function which tests for membership in the
2953 2953 revset
2954 2954 """
2955 2955 def __init__(self, subset, condition=lambda x: True, condrepr=None):
2956 2956 """
2957 2957 condition: a function that decide whether a revision in the subset
2958 2958 belongs to the revset or not.
2959 2959 condrepr: a tuple of (format, obj, ...), a function or an object that
2960 2960 provides a printable representation of the given condition.
2961 2961 """
2962 2962 self._subset = subset
2963 2963 self._condition = condition
2964 2964 self._condrepr = condrepr
2965 2965
2966 2966 def __contains__(self, x):
2967 2967 return x in self._subset and self._condition(x)
2968 2968
2969 2969 def __iter__(self):
2970 2970 return self._iterfilter(self._subset)
2971 2971
2972 2972 def _iterfilter(self, it):
2973 2973 cond = self._condition
2974 2974 for x in it:
2975 2975 if cond(x):
2976 2976 yield x
2977 2977
2978 2978 @property
2979 2979 def fastasc(self):
2980 2980 it = self._subset.fastasc
2981 2981 if it is None:
2982 2982 return None
2983 2983 return lambda: self._iterfilter(it())
2984 2984
2985 2985 @property
2986 2986 def fastdesc(self):
2987 2987 it = self._subset.fastdesc
2988 2988 if it is None:
2989 2989 return None
2990 2990 return lambda: self._iterfilter(it())
2991 2991
2992 2992 def __nonzero__(self):
2993 2993 fast = None
2994 2994 candidates = [self.fastasc if self.isascending() else None,
2995 2995 self.fastdesc if self.isdescending() else None,
2996 2996 self.fastasc,
2997 2997 self.fastdesc]
2998 2998 for candidate in candidates:
2999 2999 if candidate is not None:
3000 3000 fast = candidate
3001 3001 break
3002 3002
3003 3003 if fast is not None:
3004 3004 it = fast()
3005 3005 else:
3006 3006 it = self
3007 3007
3008 3008 for r in it:
3009 3009 return True
3010 3010 return False
3011 3011
3012 3012 def __len__(self):
3013 3013 # Basic implementation to be changed in future patches.
3014 3014 # until this gets improved, we use generator expression
3015 3015 # here, since list compr is free to call __len__ again
3016 3016 # causing infinite recursion
3017 3017 l = baseset(r for r in self)
3018 3018 return len(l)
3019 3019
3020 3020 def sort(self, reverse=False):
3021 3021 self._subset.sort(reverse=reverse)
3022 3022
3023 3023 def reverse(self):
3024 3024 self._subset.reverse()
3025 3025
3026 3026 def isascending(self):
3027 3027 return self._subset.isascending()
3028 3028
3029 3029 def isdescending(self):
3030 3030 return self._subset.isdescending()
3031 3031
3032 3032 def istopo(self):
3033 3033 return self._subset.istopo()
3034 3034
3035 3035 def first(self):
3036 3036 for x in self:
3037 3037 return x
3038 3038 return None
3039 3039
3040 3040 def last(self):
3041 3041 it = None
3042 3042 if self.isascending():
3043 3043 it = self.fastdesc
3044 3044 elif self.isdescending():
3045 3045 it = self.fastasc
3046 3046 if it is not None:
3047 3047 for x in it():
3048 3048 return x
3049 3049 return None #empty case
3050 3050 else:
3051 3051 x = None
3052 3052 for x in self:
3053 3053 pass
3054 3054 return x
3055 3055
3056 3056 def __repr__(self):
3057 3057 xs = [repr(self._subset)]
3058 3058 s = _formatsetrepr(self._condrepr)
3059 3059 if s:
3060 3060 xs.append(s)
3061 3061 return '<%s %s>' % (type(self).__name__, ', '.join(xs))
3062 3062
3063 3063 def _iterordered(ascending, iter1, iter2):
3064 3064 """produce an ordered iteration from two iterators with the same order
3065 3065
3066 3066 The ascending is used to indicated the iteration direction.
3067 3067 """
3068 3068 choice = max
3069 3069 if ascending:
3070 3070 choice = min
3071 3071
3072 3072 val1 = None
3073 3073 val2 = None
3074 3074 try:
3075 3075 # Consume both iterators in an ordered way until one is empty
3076 3076 while True:
3077 3077 if val1 is None:
3078 3078 val1 = next(iter1)
3079 3079 if val2 is None:
3080 3080 val2 = next(iter2)
3081 3081 n = choice(val1, val2)
3082 3082 yield n
3083 3083 if val1 == n:
3084 3084 val1 = None
3085 3085 if val2 == n:
3086 3086 val2 = None
3087 3087 except StopIteration:
3088 3088 # Flush any remaining values and consume the other one
3089 3089 it = iter2
3090 3090 if val1 is not None:
3091 3091 yield val1
3092 3092 it = iter1
3093 3093 elif val2 is not None:
3094 3094 # might have been equality and both are empty
3095 3095 yield val2
3096 3096 for val in it:
3097 3097 yield val
3098 3098
3099 3099 class addset(abstractsmartset):
3100 3100 """Represent the addition of two sets
3101 3101
3102 3102 Wrapper structure for lazily adding two structures without losing much
3103 3103 performance on the __contains__ method
3104 3104
3105 3105 If the ascending attribute is set, that means the two structures are
3106 3106 ordered in either an ascending or descending way. Therefore, we can add
3107 3107 them maintaining the order by iterating over both at the same time
3108 3108
3109 3109 >>> xs = baseset([0, 3, 2])
3110 3110 >>> ys = baseset([5, 2, 4])
3111 3111
3112 3112 >>> rs = addset(xs, ys)
3113 3113 >>> bool(rs), 0 in rs, 1 in rs, 5 in rs, rs.first(), rs.last()
3114 3114 (True, True, False, True, 0, 4)
3115 3115 >>> rs = addset(xs, baseset([]))
3116 3116 >>> bool(rs), 0 in rs, 1 in rs, rs.first(), rs.last()
3117 3117 (True, True, False, 0, 2)
3118 3118 >>> rs = addset(baseset([]), baseset([]))
3119 3119 >>> bool(rs), 0 in rs, rs.first(), rs.last()
3120 3120 (False, False, None, None)
3121 3121
3122 3122 iterate unsorted:
3123 3123 >>> rs = addset(xs, ys)
3124 3124 >>> # (use generator because pypy could call len())
3125 3125 >>> list(x for x in rs) # without _genlist
3126 3126 [0, 3, 2, 5, 4]
3127 3127 >>> assert not rs._genlist
3128 3128 >>> len(rs)
3129 3129 5
3130 3130 >>> [x for x in rs] # with _genlist
3131 3131 [0, 3, 2, 5, 4]
3132 3132 >>> assert rs._genlist
3133 3133
3134 3134 iterate ascending:
3135 3135 >>> rs = addset(xs, ys, ascending=True)
3136 3136 >>> # (use generator because pypy could call len())
3137 3137 >>> list(x for x in rs), list(x for x in rs.fastasc()) # without _asclist
3138 3138 ([0, 2, 3, 4, 5], [0, 2, 3, 4, 5])
3139 3139 >>> assert not rs._asclist
3140 3140 >>> len(rs)
3141 3141 5
3142 3142 >>> [x for x in rs], [x for x in rs.fastasc()]
3143 3143 ([0, 2, 3, 4, 5], [0, 2, 3, 4, 5])
3144 3144 >>> assert rs._asclist
3145 3145
3146 3146 iterate descending:
3147 3147 >>> rs = addset(xs, ys, ascending=False)
3148 3148 >>> # (use generator because pypy could call len())
3149 3149 >>> list(x for x in rs), list(x for x in rs.fastdesc()) # without _asclist
3150 3150 ([5, 4, 3, 2, 0], [5, 4, 3, 2, 0])
3151 3151 >>> assert not rs._asclist
3152 3152 >>> len(rs)
3153 3153 5
3154 3154 >>> [x for x in rs], [x for x in rs.fastdesc()]
3155 3155 ([5, 4, 3, 2, 0], [5, 4, 3, 2, 0])
3156 3156 >>> assert rs._asclist
3157 3157
3158 3158 iterate ascending without fastasc:
3159 3159 >>> rs = addset(xs, generatorset(ys), ascending=True)
3160 3160 >>> assert rs.fastasc is None
3161 3161 >>> [x for x in rs]
3162 3162 [0, 2, 3, 4, 5]
3163 3163
3164 3164 iterate descending without fastdesc:
3165 3165 >>> rs = addset(generatorset(xs), ys, ascending=False)
3166 3166 >>> assert rs.fastdesc is None
3167 3167 >>> [x for x in rs]
3168 3168 [5, 4, 3, 2, 0]
3169 3169 """
3170 3170 def __init__(self, revs1, revs2, ascending=None):
3171 3171 self._r1 = revs1
3172 3172 self._r2 = revs2
3173 3173 self._iter = None
3174 3174 self._ascending = ascending
3175 3175 self._genlist = None
3176 3176 self._asclist = None
3177 3177
3178 3178 def __len__(self):
3179 3179 return len(self._list)
3180 3180
3181 3181 def __nonzero__(self):
3182 3182 return bool(self._r1) or bool(self._r2)
3183 3183
3184 3184 @util.propertycache
3185 3185 def _list(self):
3186 3186 if not self._genlist:
3187 3187 self._genlist = baseset(iter(self))
3188 3188 return self._genlist
3189 3189
3190 3190 def __iter__(self):
3191 3191 """Iterate over both collections without repeating elements
3192 3192
3193 3193 If the ascending attribute is not set, iterate over the first one and
3194 3194 then over the second one checking for membership on the first one so we
3195 3195 dont yield any duplicates.
3196 3196
3197 3197 If the ascending attribute is set, iterate over both collections at the
3198 3198 same time, yielding only one value at a time in the given order.
3199 3199 """
3200 3200 if self._ascending is None:
3201 3201 if self._genlist:
3202 3202 return iter(self._genlist)
3203 3203 def arbitraryordergen():
3204 3204 for r in self._r1:
3205 3205 yield r
3206 3206 inr1 = self._r1.__contains__
3207 3207 for r in self._r2:
3208 3208 if not inr1(r):
3209 3209 yield r
3210 3210 return arbitraryordergen()
3211 3211 # try to use our own fast iterator if it exists
3212 3212 self._trysetasclist()
3213 3213 if self._ascending:
3214 3214 attr = 'fastasc'
3215 3215 else:
3216 3216 attr = 'fastdesc'
3217 3217 it = getattr(self, attr)
3218 3218 if it is not None:
3219 3219 return it()
3220 3220 # maybe half of the component supports fast
3221 3221 # get iterator for _r1
3222 3222 iter1 = getattr(self._r1, attr)
3223 3223 if iter1 is None:
3224 3224 # let's avoid side effect (not sure it matters)
3225 3225 iter1 = iter(sorted(self._r1, reverse=not self._ascending))
3226 3226 else:
3227 3227 iter1 = iter1()
3228 3228 # get iterator for _r2
3229 3229 iter2 = getattr(self._r2, attr)
3230 3230 if iter2 is None:
3231 3231 # let's avoid side effect (not sure it matters)
3232 3232 iter2 = iter(sorted(self._r2, reverse=not self._ascending))
3233 3233 else:
3234 3234 iter2 = iter2()
3235 3235 return _iterordered(self._ascending, iter1, iter2)
3236 3236
3237 3237 def _trysetasclist(self):
3238 3238 """populate the _asclist attribute if possible and necessary"""
3239 3239 if self._genlist is not None and self._asclist is None:
3240 3240 self._asclist = sorted(self._genlist)
3241 3241
3242 3242 @property
3243 3243 def fastasc(self):
3244 3244 self._trysetasclist()
3245 3245 if self._asclist is not None:
3246 3246 return self._asclist.__iter__
3247 3247 iter1 = self._r1.fastasc
3248 3248 iter2 = self._r2.fastasc
3249 3249 if None in (iter1, iter2):
3250 3250 return None
3251 3251 return lambda: _iterordered(True, iter1(), iter2())
3252 3252
3253 3253 @property
3254 3254 def fastdesc(self):
3255 3255 self._trysetasclist()
3256 3256 if self._asclist is not None:
3257 3257 return self._asclist.__reversed__
3258 3258 iter1 = self._r1.fastdesc
3259 3259 iter2 = self._r2.fastdesc
3260 3260 if None in (iter1, iter2):
3261 3261 return None
3262 3262 return lambda: _iterordered(False, iter1(), iter2())
3263 3263
3264 3264 def __contains__(self, x):
3265 3265 return x in self._r1 or x in self._r2
3266 3266
3267 3267 def sort(self, reverse=False):
3268 3268 """Sort the added set
3269 3269
3270 3270 For this we use the cached list with all the generated values and if we
3271 3271 know they are ascending or descending we can sort them in a smart way.
3272 3272 """
3273 3273 self._ascending = not reverse
3274 3274
3275 3275 def isascending(self):
3276 3276 return self._ascending is not None and self._ascending
3277 3277
3278 3278 def isdescending(self):
3279 3279 return self._ascending is not None and not self._ascending
3280 3280
3281 3281 def istopo(self):
3282 3282 # not worth the trouble asserting if the two sets combined are still
3283 3283 # in topographical order. Use the sort() predicate to explicitly sort
3284 3284 # again instead.
3285 3285 return False
3286 3286
3287 3287 def reverse(self):
3288 3288 if self._ascending is None:
3289 3289 self._list.reverse()
3290 3290 else:
3291 3291 self._ascending = not self._ascending
3292 3292
3293 3293 def first(self):
3294 3294 for x in self:
3295 3295 return x
3296 3296 return None
3297 3297
3298 3298 def last(self):
3299 3299 self.reverse()
3300 3300 val = self.first()
3301 3301 self.reverse()
3302 3302 return val
3303 3303
3304 3304 def __repr__(self):
3305 3305 d = {None: '', False: '-', True: '+'}[self._ascending]
3306 3306 return '<%s%s %r, %r>' % (type(self).__name__, d, self._r1, self._r2)
3307 3307
3308 3308 class generatorset(abstractsmartset):
3309 3309 """Wrap a generator for lazy iteration
3310 3310
3311 3311 Wrapper structure for generators that provides lazy membership and can
3312 3312 be iterated more than once.
3313 3313 When asked for membership it generates values until either it finds the
3314 3314 requested one or has gone through all the elements in the generator
3315 3315 """
3316 3316 def __init__(self, gen, iterasc=None):
3317 3317 """
3318 3318 gen: a generator producing the values for the generatorset.
3319 3319 """
3320 3320 self._gen = gen
3321 3321 self._asclist = None
3322 3322 self._cache = {}
3323 3323 self._genlist = []
3324 3324 self._finished = False
3325 3325 self._ascending = True
3326 3326 if iterasc is not None:
3327 3327 if iterasc:
3328 3328 self.fastasc = self._iterator
3329 3329 self.__contains__ = self._asccontains
3330 3330 else:
3331 3331 self.fastdesc = self._iterator
3332 3332 self.__contains__ = self._desccontains
3333 3333
3334 3334 def __nonzero__(self):
3335 3335 # Do not use 'for r in self' because it will enforce the iteration
3336 3336 # order (default ascending), possibly unrolling a whole descending
3337 3337 # iterator.
3338 3338 if self._genlist:
3339 3339 return True
3340 3340 for r in self._consumegen():
3341 3341 return True
3342 3342 return False
3343 3343
3344 3344 def __contains__(self, x):
3345 3345 if x in self._cache:
3346 3346 return self._cache[x]
3347 3347
3348 3348 # Use new values only, as existing values would be cached.
3349 3349 for l in self._consumegen():
3350 3350 if l == x:
3351 3351 return True
3352 3352
3353 3353 self._cache[x] = False
3354 3354 return False
3355 3355
3356 3356 def _asccontains(self, x):
3357 3357 """version of contains optimised for ascending generator"""
3358 3358 if x in self._cache:
3359 3359 return self._cache[x]
3360 3360
3361 3361 # Use new values only, as existing values would be cached.
3362 3362 for l in self._consumegen():
3363 3363 if l == x:
3364 3364 return True
3365 3365 if l > x:
3366 3366 break
3367 3367
3368 3368 self._cache[x] = False
3369 3369 return False
3370 3370
3371 3371 def _desccontains(self, x):
3372 3372 """version of contains optimised for descending generator"""
3373 3373 if x in self._cache:
3374 3374 return self._cache[x]
3375 3375
3376 3376 # Use new values only, as existing values would be cached.
3377 3377 for l in self._consumegen():
3378 3378 if l == x:
3379 3379 return True
3380 3380 if l < x:
3381 3381 break
3382 3382
3383 3383 self._cache[x] = False
3384 3384 return False
3385 3385
3386 3386 def __iter__(self):
3387 3387 if self._ascending:
3388 3388 it = self.fastasc
3389 3389 else:
3390 3390 it = self.fastdesc
3391 3391 if it is not None:
3392 3392 return it()
3393 3393 # we need to consume the iterator
3394 3394 for x in self._consumegen():
3395 3395 pass
3396 3396 # recall the same code
3397 3397 return iter(self)
3398 3398
3399 3399 def _iterator(self):
3400 3400 if self._finished:
3401 3401 return iter(self._genlist)
3402 3402
3403 3403 # We have to use this complex iteration strategy to allow multiple
3404 3404 # iterations at the same time. We need to be able to catch revision
3405 3405 # removed from _consumegen and added to genlist in another instance.
3406 3406 #
3407 3407 # Getting rid of it would provide an about 15% speed up on this
3408 3408 # iteration.
3409 3409 genlist = self._genlist
3410 3410 nextrev = self._consumegen().next
3411 3411 _len = len # cache global lookup
3412 3412 def gen():
3413 3413 i = 0
3414 3414 while True:
3415 3415 if i < _len(genlist):
3416 3416 yield genlist[i]
3417 3417 else:
3418 3418 yield nextrev()
3419 3419 i += 1
3420 3420 return gen()
3421 3421
3422 3422 def _consumegen(self):
3423 3423 cache = self._cache
3424 3424 genlist = self._genlist.append
3425 3425 for item in self._gen:
3426 3426 cache[item] = True
3427 3427 genlist(item)
3428 3428 yield item
3429 3429 if not self._finished:
3430 3430 self._finished = True
3431 3431 asc = self._genlist[:]
3432 3432 asc.sort()
3433 3433 self._asclist = asc
3434 3434 self.fastasc = asc.__iter__
3435 3435 self.fastdesc = asc.__reversed__
3436 3436
3437 3437 def __len__(self):
3438 3438 for x in self._consumegen():
3439 3439 pass
3440 3440 return len(self._genlist)
3441 3441
3442 3442 def sort(self, reverse=False):
3443 3443 self._ascending = not reverse
3444 3444
3445 3445 def reverse(self):
3446 3446 self._ascending = not self._ascending
3447 3447
3448 3448 def isascending(self):
3449 3449 return self._ascending
3450 3450
3451 3451 def isdescending(self):
3452 3452 return not self._ascending
3453 3453
3454 3454 def istopo(self):
3455 3455 # not worth the trouble asserting if the two sets combined are still
3456 3456 # in topographical order. Use the sort() predicate to explicitly sort
3457 3457 # again instead.
3458 3458 return False
3459 3459
3460 3460 def first(self):
3461 3461 if self._ascending:
3462 3462 it = self.fastasc
3463 3463 else:
3464 3464 it = self.fastdesc
3465 3465 if it is None:
3466 3466 # we need to consume all and try again
3467 3467 for x in self._consumegen():
3468 3468 pass
3469 3469 return self.first()
3470 3470 return next(it(), None)
3471 3471
3472 3472 def last(self):
3473 3473 if self._ascending:
3474 3474 it = self.fastdesc
3475 3475 else:
3476 3476 it = self.fastasc
3477 3477 if it is None:
3478 3478 # we need to consume all and try again
3479 3479 for x in self._consumegen():
3480 3480 pass
3481 3481 return self.first()
3482 3482 return next(it(), None)
3483 3483
3484 3484 def __repr__(self):
3485 3485 d = {False: '-', True: '+'}[self._ascending]
3486 3486 return '<%s%s>' % (type(self).__name__, d)
3487 3487
3488 3488 class spanset(abstractsmartset):
3489 3489 """Duck type for baseset class which represents a range of revisions and
3490 3490 can work lazily and without having all the range in memory
3491 3491
3492 3492 Note that spanset(x, y) behave almost like xrange(x, y) except for two
3493 3493 notable points:
3494 3494 - when x < y it will be automatically descending,
3495 3495 - revision filtered with this repoview will be skipped.
3496 3496
3497 3497 """
3498 3498 def __init__(self, repo, start=0, end=None):
3499 3499 """
3500 3500 start: first revision included the set
3501 3501 (default to 0)
3502 3502 end: first revision excluded (last+1)
3503 3503 (default to len(repo)
3504 3504
3505 3505 Spanset will be descending if `end` < `start`.
3506 3506 """
3507 3507 if end is None:
3508 3508 end = len(repo)
3509 3509 self._ascending = start <= end
3510 3510 if not self._ascending:
3511 3511 start, end = end + 1, start +1
3512 3512 self._start = start
3513 3513 self._end = end
3514 3514 self._hiddenrevs = repo.changelog.filteredrevs
3515 3515
3516 3516 def sort(self, reverse=False):
3517 3517 self._ascending = not reverse
3518 3518
3519 3519 def reverse(self):
3520 3520 self._ascending = not self._ascending
3521 3521
3522 3522 def istopo(self):
3523 3523 # not worth the trouble asserting if the two sets combined are still
3524 3524 # in topographical order. Use the sort() predicate to explicitly sort
3525 3525 # again instead.
3526 3526 return False
3527 3527
3528 3528 def _iterfilter(self, iterrange):
3529 3529 s = self._hiddenrevs
3530 3530 for r in iterrange:
3531 3531 if r not in s:
3532 3532 yield r
3533 3533
3534 3534 def __iter__(self):
3535 3535 if self._ascending:
3536 3536 return self.fastasc()
3537 3537 else:
3538 3538 return self.fastdesc()
3539 3539
3540 3540 def fastasc(self):
3541 3541 iterrange = xrange(self._start, self._end)
3542 3542 if self._hiddenrevs:
3543 3543 return self._iterfilter(iterrange)
3544 3544 return iter(iterrange)
3545 3545
3546 3546 def fastdesc(self):
3547 3547 iterrange = xrange(self._end - 1, self._start - 1, -1)
3548 3548 if self._hiddenrevs:
3549 3549 return self._iterfilter(iterrange)
3550 3550 return iter(iterrange)
3551 3551
3552 3552 def __contains__(self, rev):
3553 3553 hidden = self._hiddenrevs
3554 3554 return ((self._start <= rev < self._end)
3555 3555 and not (hidden and rev in hidden))
3556 3556
3557 3557 def __nonzero__(self):
3558 3558 for r in self:
3559 3559 return True
3560 3560 return False
3561 3561
3562 3562 def __len__(self):
3563 3563 if not self._hiddenrevs:
3564 3564 return abs(self._end - self._start)
3565 3565 else:
3566 3566 count = 0
3567 3567 start = self._start
3568 3568 end = self._end
3569 3569 for rev in self._hiddenrevs:
3570 3570 if (end < rev <= start) or (start <= rev < end):
3571 3571 count += 1
3572 3572 return abs(self._end - self._start) - count
3573 3573
3574 3574 def isascending(self):
3575 3575 return self._ascending
3576 3576
3577 3577 def isdescending(self):
3578 3578 return not self._ascending
3579 3579
3580 3580 def first(self):
3581 3581 if self._ascending:
3582 3582 it = self.fastasc
3583 3583 else:
3584 3584 it = self.fastdesc
3585 3585 for x in it():
3586 3586 return x
3587 3587 return None
3588 3588
3589 3589 def last(self):
3590 3590 if self._ascending:
3591 3591 it = self.fastdesc
3592 3592 else:
3593 3593 it = self.fastasc
3594 3594 for x in it():
3595 3595 return x
3596 3596 return None
3597 3597
3598 3598 def __repr__(self):
3599 3599 d = {False: '-', True: '+'}[self._ascending]
3600 3600 return '<%s%s %d:%d>' % (type(self).__name__, d,
3601 3601 self._start, self._end - 1)
3602 3602
3603 3603 class fullreposet(spanset):
3604 3604 """a set containing all revisions in the repo
3605 3605
3606 3606 This class exists to host special optimization and magic to handle virtual
3607 3607 revisions such as "null".
3608 3608 """
3609 3609
3610 3610 def __init__(self, repo):
3611 3611 super(fullreposet, self).__init__(repo)
3612 3612
3613 3613 def __and__(self, other):
3614 3614 """As self contains the whole repo, all of the other set should also be
3615 3615 in self. Therefore `self & other = other`.
3616 3616
3617 3617 This boldly assumes the other contains valid revs only.
3618 3618 """
3619 3619 # other not a smartset, make is so
3620 3620 if not util.safehasattr(other, 'isascending'):
3621 3621 # filter out hidden revision
3622 3622 # (this boldly assumes all smartset are pure)
3623 3623 #
3624 3624 # `other` was used with "&", let's assume this is a set like
3625 3625 # object.
3626 3626 other = baseset(other - self._hiddenrevs)
3627 3627
3628 3628 # XXX As fullreposet is also used as bootstrap, this is wrong.
3629 3629 #
3630 3630 # With a giveme312() revset returning [3,1,2], this makes
3631 3631 # 'hg log -r "giveme312()"' -> 1, 2, 3 (wrong)
3632 3632 # We cannot just drop it because other usage still need to sort it:
3633 3633 # 'hg log -r "all() and giveme312()"' -> 1, 2, 3 (right)
3634 3634 #
3635 3635 # There is also some faulty revset implementations that rely on it
3636 3636 # (eg: children as of its state in e8075329c5fb)
3637 3637 #
3638 3638 # When we fix the two points above we can move this into the if clause
3639 3639 other.sort(reverse=self.isdescending())
3640 3640 return other
3641 3641
3642 3642 def prettyformatset(revs):
3643 3643 lines = []
3644 3644 rs = repr(revs)
3645 3645 p = 0
3646 3646 while p < len(rs):
3647 3647 q = rs.find('<', p + 1)
3648 3648 if q < 0:
3649 3649 q = len(rs)
3650 3650 l = rs.count('<', 0, p) - rs.count('>', 0, p)
3651 3651 assert l >= 0
3652 3652 lines.append((l, rs[p:q].rstrip()))
3653 3653 p = q
3654 3654 return '\n'.join(' ' * l + s for l, s in lines)
3655 3655
3656 3656 def loadpredicate(ui, extname, registrarobj):
3657 3657 """Load revset predicates from specified registrarobj
3658 3658 """
3659 3659 for name, func in registrarobj._table.iteritems():
3660 3660 symbols[name] = func
3661 3661 if func._safe:
3662 3662 safesymbols.add(name)
3663 3663
3664 3664 # load built-in predicates explicitly to setup safesymbols
3665 3665 loadpredicate(None, None, predicate)
3666 3666
3667 3667 # tell hggettext to extract docstrings from these functions:
3668 3668 i18nfunctions = symbols.values()
General Comments 0
You need to be logged in to leave comments. Login now