##// END OF EJS Templates
revset: unnest isonly() closure from optimize()...
Yuya Nishihara -
r29116:0c9b05da default
parent child Browse files
Show More
@@ -1,3390 +1,3390
1 1 # revset.py - revision set queries for mercurial
2 2 #
3 3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import heapq
11 11 import re
12 12
13 13 from .i18n import _
14 14 from . import (
15 15 destutil,
16 16 encoding,
17 17 error,
18 18 hbisect,
19 19 match as matchmod,
20 20 node,
21 21 obsolete as obsmod,
22 22 parser,
23 23 pathutil,
24 24 phases,
25 25 registrar,
26 26 repoview,
27 27 util,
28 28 )
29 29
30 30 def _revancestors(repo, revs, followfirst):
31 31 """Like revlog.ancestors(), but supports followfirst."""
32 32 if followfirst:
33 33 cut = 1
34 34 else:
35 35 cut = None
36 36 cl = repo.changelog
37 37
38 38 def iterate():
39 39 revs.sort(reverse=True)
40 40 irevs = iter(revs)
41 41 h = []
42 42
43 43 inputrev = next(irevs, None)
44 44 if inputrev is not None:
45 45 heapq.heappush(h, -inputrev)
46 46
47 47 seen = set()
48 48 while h:
49 49 current = -heapq.heappop(h)
50 50 if current == inputrev:
51 51 inputrev = next(irevs, None)
52 52 if inputrev is not None:
53 53 heapq.heappush(h, -inputrev)
54 54 if current not in seen:
55 55 seen.add(current)
56 56 yield current
57 57 for parent in cl.parentrevs(current)[:cut]:
58 58 if parent != node.nullrev:
59 59 heapq.heappush(h, -parent)
60 60
61 61 return generatorset(iterate(), iterasc=False)
62 62
63 63 def _revdescendants(repo, revs, followfirst):
64 64 """Like revlog.descendants() but supports followfirst."""
65 65 if followfirst:
66 66 cut = 1
67 67 else:
68 68 cut = None
69 69
70 70 def iterate():
71 71 cl = repo.changelog
72 72 # XXX this should be 'parentset.min()' assuming 'parentset' is a
73 73 # smartset (and if it is not, it should.)
74 74 first = min(revs)
75 75 nullrev = node.nullrev
76 76 if first == nullrev:
77 77 # Are there nodes with a null first parent and a non-null
78 78 # second one? Maybe. Do we care? Probably not.
79 79 for i in cl:
80 80 yield i
81 81 else:
82 82 seen = set(revs)
83 83 for i in cl.revs(first + 1):
84 84 for x in cl.parentrevs(i)[:cut]:
85 85 if x != nullrev and x in seen:
86 86 seen.add(i)
87 87 yield i
88 88 break
89 89
90 90 return generatorset(iterate(), iterasc=True)
91 91
92 92 def _reachablerootspure(repo, minroot, roots, heads, includepath):
93 93 """return (heads(::<roots> and ::<heads>))
94 94
95 95 If includepath is True, return (<roots>::<heads>)."""
96 96 if not roots:
97 97 return []
98 98 parentrevs = repo.changelog.parentrevs
99 99 roots = set(roots)
100 100 visit = list(heads)
101 101 reachable = set()
102 102 seen = {}
103 103 # prefetch all the things! (because python is slow)
104 104 reached = reachable.add
105 105 dovisit = visit.append
106 106 nextvisit = visit.pop
107 107 # open-code the post-order traversal due to the tiny size of
108 108 # sys.getrecursionlimit()
109 109 while visit:
110 110 rev = nextvisit()
111 111 if rev in roots:
112 112 reached(rev)
113 113 if not includepath:
114 114 continue
115 115 parents = parentrevs(rev)
116 116 seen[rev] = parents
117 117 for parent in parents:
118 118 if parent >= minroot and parent not in seen:
119 119 dovisit(parent)
120 120 if not reachable:
121 121 return baseset()
122 122 if not includepath:
123 123 return reachable
124 124 for rev in sorted(seen):
125 125 for parent in seen[rev]:
126 126 if parent in reachable:
127 127 reached(rev)
128 128 return reachable
129 129
130 130 def reachableroots(repo, roots, heads, includepath=False):
131 131 """return (heads(::<roots> and ::<heads>))
132 132
133 133 If includepath is True, return (<roots>::<heads>)."""
134 134 if not roots:
135 135 return baseset()
136 136 minroot = roots.min()
137 137 roots = list(roots)
138 138 heads = list(heads)
139 139 try:
140 140 revs = repo.changelog.reachableroots(minroot, heads, roots, includepath)
141 141 except AttributeError:
142 142 revs = _reachablerootspure(repo, minroot, roots, heads, includepath)
143 143 revs = baseset(revs)
144 144 revs.sort()
145 145 return revs
146 146
147 147 elements = {
148 148 # token-type: binding-strength, primary, prefix, infix, suffix
149 149 "(": (21, None, ("group", 1, ")"), ("func", 1, ")"), None),
150 150 "##": (20, None, None, ("_concat", 20), None),
151 151 "~": (18, None, None, ("ancestor", 18), None),
152 152 "^": (18, None, None, ("parent", 18), ("parentpost", 18)),
153 153 "-": (5, None, ("negate", 19), ("minus", 5), None),
154 154 "::": (17, None, ("dagrangepre", 17), ("dagrange", 17),
155 155 ("dagrangepost", 17)),
156 156 "..": (17, None, ("dagrangepre", 17), ("dagrange", 17),
157 157 ("dagrangepost", 17)),
158 158 ":": (15, "rangeall", ("rangepre", 15), ("range", 15), ("rangepost", 15)),
159 159 "not": (10, None, ("not", 10), None, None),
160 160 "!": (10, None, ("not", 10), None, None),
161 161 "and": (5, None, None, ("and", 5), None),
162 162 "&": (5, None, None, ("and", 5), None),
163 163 "%": (5, None, None, ("only", 5), ("onlypost", 5)),
164 164 "or": (4, None, None, ("or", 4), None),
165 165 "|": (4, None, None, ("or", 4), None),
166 166 "+": (4, None, None, ("or", 4), None),
167 167 "=": (3, None, None, ("keyvalue", 3), None),
168 168 ",": (2, None, None, ("list", 2), None),
169 169 ")": (0, None, None, None, None),
170 170 "symbol": (0, "symbol", None, None, None),
171 171 "string": (0, "string", None, None, None),
172 172 "end": (0, None, None, None, None),
173 173 }
174 174
175 175 keywords = set(['and', 'or', 'not'])
176 176
177 177 # default set of valid characters for the initial letter of symbols
178 178 _syminitletters = set(c for c in [chr(i) for i in xrange(256)]
179 179 if c.isalnum() or c in '._@' or ord(c) > 127)
180 180
181 181 # default set of valid characters for non-initial letters of symbols
182 182 _symletters = set(c for c in [chr(i) for i in xrange(256)]
183 183 if c.isalnum() or c in '-._/@' or ord(c) > 127)
184 184
185 185 def tokenize(program, lookup=None, syminitletters=None, symletters=None):
186 186 '''
187 187 Parse a revset statement into a stream of tokens
188 188
189 189 ``syminitletters`` is the set of valid characters for the initial
190 190 letter of symbols.
191 191
192 192 By default, character ``c`` is recognized as valid for initial
193 193 letter of symbols, if ``c.isalnum() or c in '._@' or ord(c) > 127``.
194 194
195 195 ``symletters`` is the set of valid characters for non-initial
196 196 letters of symbols.
197 197
198 198 By default, character ``c`` is recognized as valid for non-initial
199 199 letters of symbols, if ``c.isalnum() or c in '-._/@' or ord(c) > 127``.
200 200
201 201 Check that @ is a valid unquoted token character (issue3686):
202 202 >>> list(tokenize("@::"))
203 203 [('symbol', '@', 0), ('::', None, 1), ('end', None, 3)]
204 204
205 205 '''
206 206 if syminitletters is None:
207 207 syminitletters = _syminitletters
208 208 if symletters is None:
209 209 symletters = _symletters
210 210
211 211 if program and lookup:
212 212 # attempt to parse old-style ranges first to deal with
213 213 # things like old-tag which contain query metacharacters
214 214 parts = program.split(':', 1)
215 215 if all(lookup(sym) for sym in parts if sym):
216 216 if parts[0]:
217 217 yield ('symbol', parts[0], 0)
218 218 if len(parts) > 1:
219 219 s = len(parts[0])
220 220 yield (':', None, s)
221 221 if parts[1]:
222 222 yield ('symbol', parts[1], s + 1)
223 223 yield ('end', None, len(program))
224 224 return
225 225
226 226 pos, l = 0, len(program)
227 227 while pos < l:
228 228 c = program[pos]
229 229 if c.isspace(): # skip inter-token whitespace
230 230 pass
231 231 elif c == ':' and program[pos:pos + 2] == '::': # look ahead carefully
232 232 yield ('::', None, pos)
233 233 pos += 1 # skip ahead
234 234 elif c == '.' and program[pos:pos + 2] == '..': # look ahead carefully
235 235 yield ('..', None, pos)
236 236 pos += 1 # skip ahead
237 237 elif c == '#' and program[pos:pos + 2] == '##': # look ahead carefully
238 238 yield ('##', None, pos)
239 239 pos += 1 # skip ahead
240 240 elif c in "():=,-|&+!~^%": # handle simple operators
241 241 yield (c, None, pos)
242 242 elif (c in '"\'' or c == 'r' and
243 243 program[pos:pos + 2] in ("r'", 'r"')): # handle quoted strings
244 244 if c == 'r':
245 245 pos += 1
246 246 c = program[pos]
247 247 decode = lambda x: x
248 248 else:
249 249 decode = parser.unescapestr
250 250 pos += 1
251 251 s = pos
252 252 while pos < l: # find closing quote
253 253 d = program[pos]
254 254 if d == '\\': # skip over escaped characters
255 255 pos += 2
256 256 continue
257 257 if d == c:
258 258 yield ('string', decode(program[s:pos]), s)
259 259 break
260 260 pos += 1
261 261 else:
262 262 raise error.ParseError(_("unterminated string"), s)
263 263 # gather up a symbol/keyword
264 264 elif c in syminitletters:
265 265 s = pos
266 266 pos += 1
267 267 while pos < l: # find end of symbol
268 268 d = program[pos]
269 269 if d not in symletters:
270 270 break
271 271 if d == '.' and program[pos - 1] == '.': # special case for ..
272 272 pos -= 1
273 273 break
274 274 pos += 1
275 275 sym = program[s:pos]
276 276 if sym in keywords: # operator keywords
277 277 yield (sym, None, s)
278 278 elif '-' in sym:
279 279 # some jerk gave us foo-bar-baz, try to check if it's a symbol
280 280 if lookup and lookup(sym):
281 281 # looks like a real symbol
282 282 yield ('symbol', sym, s)
283 283 else:
284 284 # looks like an expression
285 285 parts = sym.split('-')
286 286 for p in parts[:-1]:
287 287 if p: # possible consecutive -
288 288 yield ('symbol', p, s)
289 289 s += len(p)
290 290 yield ('-', None, pos)
291 291 s += 1
292 292 if parts[-1]: # possible trailing -
293 293 yield ('symbol', parts[-1], s)
294 294 else:
295 295 yield ('symbol', sym, s)
296 296 pos -= 1
297 297 else:
298 298 raise error.ParseError(_("syntax error in revset '%s'") %
299 299 program, pos)
300 300 pos += 1
301 301 yield ('end', None, pos)
302 302
303 303 # helpers
304 304
305 305 def getstring(x, err):
306 306 if x and (x[0] == 'string' or x[0] == 'symbol'):
307 307 return x[1]
308 308 raise error.ParseError(err)
309 309
310 310 def getlist(x):
311 311 if not x:
312 312 return []
313 313 if x[0] == 'list':
314 314 return list(x[1:])
315 315 return [x]
316 316
317 317 def getargs(x, min, max, err):
318 318 l = getlist(x)
319 319 if len(l) < min or (max >= 0 and len(l) > max):
320 320 raise error.ParseError(err)
321 321 return l
322 322
323 323 def getargsdict(x, funcname, keys):
324 324 return parser.buildargsdict(getlist(x), funcname, keys.split(),
325 325 keyvaluenode='keyvalue', keynode='symbol')
326 326
327 327 def getset(repo, subset, x):
328 328 if not x:
329 329 raise error.ParseError(_("missing argument"))
330 330 s = methods[x[0]](repo, subset, *x[1:])
331 331 if util.safehasattr(s, 'isascending'):
332 332 return s
333 333 # else case should not happen, because all non-func are internal,
334 334 # ignoring for now.
335 335 if x[0] == 'func' and x[1][0] == 'symbol' and x[1][1] in symbols:
336 336 repo.ui.develwarn('revset "%s" use list instead of smartset, '
337 337 '(upgrade your code)' % x[1][1],
338 338 config='old-revset')
339 339 return baseset(s)
340 340
341 341 def _getrevsource(repo, r):
342 342 extra = repo[r].extra()
343 343 for label in ('source', 'transplant_source', 'rebase_source'):
344 344 if label in extra:
345 345 try:
346 346 return repo[extra[label]].rev()
347 347 except error.RepoLookupError:
348 348 pass
349 349 return None
350 350
351 351 # operator methods
352 352
353 353 def stringset(repo, subset, x):
354 354 x = repo[x].rev()
355 355 if (x in subset
356 356 or x == node.nullrev and isinstance(subset, fullreposet)):
357 357 return baseset([x])
358 358 return baseset()
359 359
360 360 def rangeset(repo, subset, x, y):
361 361 m = getset(repo, fullreposet(repo), x)
362 362 n = getset(repo, fullreposet(repo), y)
363 363
364 364 if not m or not n:
365 365 return baseset()
366 366 m, n = m.first(), n.last()
367 367
368 368 if m == n:
369 369 r = baseset([m])
370 370 elif n == node.wdirrev:
371 371 r = spanset(repo, m, len(repo)) + baseset([n])
372 372 elif m == node.wdirrev:
373 373 r = baseset([m]) + spanset(repo, len(repo) - 1, n - 1)
374 374 elif m < n:
375 375 r = spanset(repo, m, n + 1)
376 376 else:
377 377 r = spanset(repo, m, n - 1)
378 378 # XXX We should combine with subset first: 'subset & baseset(...)'. This is
379 379 # necessary to ensure we preserve the order in subset.
380 380 #
381 381 # This has performance implication, carrying the sorting over when possible
382 382 # would be more efficient.
383 383 return r & subset
384 384
385 385 def dagrange(repo, subset, x, y):
386 386 r = fullreposet(repo)
387 387 xs = reachableroots(repo, getset(repo, r, x), getset(repo, r, y),
388 388 includepath=True)
389 389 # XXX We should combine with subset first: 'subset & baseset(...)'. This is
390 390 # necessary to ensure we preserve the order in subset.
391 391 return xs & subset
392 392
393 393 def andset(repo, subset, x, y):
394 394 return getset(repo, getset(repo, subset, x), y)
395 395
396 396 def differenceset(repo, subset, x, y):
397 397 return getset(repo, subset, x) - getset(repo, subset, y)
398 398
399 399 def orset(repo, subset, *xs):
400 400 assert xs
401 401 if len(xs) == 1:
402 402 return getset(repo, subset, xs[0])
403 403 p = len(xs) // 2
404 404 a = orset(repo, subset, *xs[:p])
405 405 b = orset(repo, subset, *xs[p:])
406 406 return a + b
407 407
408 408 def notset(repo, subset, x):
409 409 return subset - getset(repo, subset, x)
410 410
411 411 def listset(repo, subset, *xs):
412 412 raise error.ParseError(_("can't use a list in this context"),
413 413 hint=_('see hg help "revsets.x or y"'))
414 414
415 415 def keyvaluepair(repo, subset, k, v):
416 416 raise error.ParseError(_("can't use a key-value pair in this context"))
417 417
418 418 def func(repo, subset, a, b):
419 419 if a[0] == 'symbol' and a[1] in symbols:
420 420 return symbols[a[1]](repo, subset, b)
421 421
422 422 keep = lambda fn: getattr(fn, '__doc__', None) is not None
423 423
424 424 syms = [s for (s, fn) in symbols.items() if keep(fn)]
425 425 raise error.UnknownIdentifier(a[1], syms)
426 426
427 427 # functions
428 428
429 429 # symbols are callables like:
430 430 # fn(repo, subset, x)
431 431 # with:
432 432 # repo - current repository instance
433 433 # subset - of revisions to be examined
434 434 # x - argument in tree form
435 435 symbols = {}
436 436
437 437 # symbols which can't be used for a DoS attack for any given input
438 438 # (e.g. those which accept regexes as plain strings shouldn't be included)
439 439 # functions that just return a lot of changesets (like all) don't count here
440 440 safesymbols = set()
441 441
442 442 predicate = registrar.revsetpredicate()
443 443
444 444 @predicate('_destupdate')
445 445 def _destupdate(repo, subset, x):
446 446 # experimental revset for update destination
447 447 args = getargsdict(x, 'limit', 'clean check')
448 448 return subset & baseset([destutil.destupdate(repo, **args)[0]])
449 449
450 450 @predicate('_destmerge')
451 451 def _destmerge(repo, subset, x):
452 452 # experimental revset for merge destination
453 453 sourceset = None
454 454 if x is not None:
455 455 sourceset = getset(repo, fullreposet(repo), x)
456 456 return subset & baseset([destutil.destmerge(repo, sourceset=sourceset)])
457 457
458 458 @predicate('adds(pattern)', safe=True)
459 459 def adds(repo, subset, x):
460 460 """Changesets that add a file matching pattern.
461 461
462 462 The pattern without explicit kind like ``glob:`` is expected to be
463 463 relative to the current directory and match against a file or a
464 464 directory.
465 465 """
466 466 # i18n: "adds" is a keyword
467 467 pat = getstring(x, _("adds requires a pattern"))
468 468 return checkstatus(repo, subset, pat, 1)
469 469
470 470 @predicate('ancestor(*changeset)', safe=True)
471 471 def ancestor(repo, subset, x):
472 472 """A greatest common ancestor of the changesets.
473 473
474 474 Accepts 0 or more changesets.
475 475 Will return empty list when passed no args.
476 476 Greatest common ancestor of a single changeset is that changeset.
477 477 """
478 478 # i18n: "ancestor" is a keyword
479 479 l = getlist(x)
480 480 rl = fullreposet(repo)
481 481 anc = None
482 482
483 483 # (getset(repo, rl, i) for i in l) generates a list of lists
484 484 for revs in (getset(repo, rl, i) for i in l):
485 485 for r in revs:
486 486 if anc is None:
487 487 anc = repo[r]
488 488 else:
489 489 anc = anc.ancestor(repo[r])
490 490
491 491 if anc is not None and anc.rev() in subset:
492 492 return baseset([anc.rev()])
493 493 return baseset()
494 494
495 495 def _ancestors(repo, subset, x, followfirst=False):
496 496 heads = getset(repo, fullreposet(repo), x)
497 497 if not heads:
498 498 return baseset()
499 499 s = _revancestors(repo, heads, followfirst)
500 500 return subset & s
501 501
502 502 @predicate('ancestors(set)', safe=True)
503 503 def ancestors(repo, subset, x):
504 504 """Changesets that are ancestors of a changeset in set.
505 505 """
506 506 return _ancestors(repo, subset, x)
507 507
508 508 @predicate('_firstancestors', safe=True)
509 509 def _firstancestors(repo, subset, x):
510 510 # ``_firstancestors(set)``
511 511 # Like ``ancestors(set)`` but follows only the first parents.
512 512 return _ancestors(repo, subset, x, followfirst=True)
513 513
514 514 def ancestorspec(repo, subset, x, n):
515 515 """``set~n``
516 516 Changesets that are the Nth ancestor (first parents only) of a changeset
517 517 in set.
518 518 """
519 519 try:
520 520 n = int(n[1])
521 521 except (TypeError, ValueError):
522 522 raise error.ParseError(_("~ expects a number"))
523 523 ps = set()
524 524 cl = repo.changelog
525 525 for r in getset(repo, fullreposet(repo), x):
526 526 for i in range(n):
527 527 r = cl.parentrevs(r)[0]
528 528 ps.add(r)
529 529 return subset & ps
530 530
531 531 @predicate('author(string)', safe=True)
532 532 def author(repo, subset, x):
533 533 """Alias for ``user(string)``.
534 534 """
535 535 # i18n: "author" is a keyword
536 536 n = encoding.lower(getstring(x, _("author requires a string")))
537 537 kind, pattern, matcher = _substringmatcher(n)
538 538 return subset.filter(lambda x: matcher(encoding.lower(repo[x].user())),
539 539 condrepr=('<user %r>', n))
540 540
541 541 @predicate('bisect(string)', safe=True)
542 542 def bisect(repo, subset, x):
543 543 """Changesets marked in the specified bisect status:
544 544
545 545 - ``good``, ``bad``, ``skip``: csets explicitly marked as good/bad/skip
546 546 - ``goods``, ``bads`` : csets topologically good/bad
547 547 - ``range`` : csets taking part in the bisection
548 548 - ``pruned`` : csets that are goods, bads or skipped
549 549 - ``untested`` : csets whose fate is yet unknown
550 550 - ``ignored`` : csets ignored due to DAG topology
551 551 - ``current`` : the cset currently being bisected
552 552 """
553 553 # i18n: "bisect" is a keyword
554 554 status = getstring(x, _("bisect requires a string")).lower()
555 555 state = set(hbisect.get(repo, status))
556 556 return subset & state
557 557
558 558 # Backward-compatibility
559 559 # - no help entry so that we do not advertise it any more
560 560 @predicate('bisected', safe=True)
561 561 def bisected(repo, subset, x):
562 562 return bisect(repo, subset, x)
563 563
564 564 @predicate('bookmark([name])', safe=True)
565 565 def bookmark(repo, subset, x):
566 566 """The named bookmark or all bookmarks.
567 567
568 568 If `name` starts with `re:`, the remainder of the name is treated as
569 569 a regular expression. To match a bookmark that actually starts with `re:`,
570 570 use the prefix `literal:`.
571 571 """
572 572 # i18n: "bookmark" is a keyword
573 573 args = getargs(x, 0, 1, _('bookmark takes one or no arguments'))
574 574 if args:
575 575 bm = getstring(args[0],
576 576 # i18n: "bookmark" is a keyword
577 577 _('the argument to bookmark must be a string'))
578 578 kind, pattern, matcher = util.stringmatcher(bm)
579 579 bms = set()
580 580 if kind == 'literal':
581 581 bmrev = repo._bookmarks.get(pattern, None)
582 582 if not bmrev:
583 583 raise error.RepoLookupError(_("bookmark '%s' does not exist")
584 584 % pattern)
585 585 bms.add(repo[bmrev].rev())
586 586 else:
587 587 matchrevs = set()
588 588 for name, bmrev in repo._bookmarks.iteritems():
589 589 if matcher(name):
590 590 matchrevs.add(bmrev)
591 591 if not matchrevs:
592 592 raise error.RepoLookupError(_("no bookmarks exist"
593 593 " that match '%s'") % pattern)
594 594 for bmrev in matchrevs:
595 595 bms.add(repo[bmrev].rev())
596 596 else:
597 597 bms = set([repo[r].rev()
598 598 for r in repo._bookmarks.values()])
599 599 bms -= set([node.nullrev])
600 600 return subset & bms
601 601
602 602 @predicate('branch(string or set)', safe=True)
603 603 def branch(repo, subset, x):
604 604 """
605 605 All changesets belonging to the given branch or the branches of the given
606 606 changesets.
607 607
608 608 If `string` starts with `re:`, the remainder of the name is treated as
609 609 a regular expression. To match a branch that actually starts with `re:`,
610 610 use the prefix `literal:`.
611 611 """
612 612 getbi = repo.revbranchcache().branchinfo
613 613
614 614 try:
615 615 b = getstring(x, '')
616 616 except error.ParseError:
617 617 # not a string, but another revspec, e.g. tip()
618 618 pass
619 619 else:
620 620 kind, pattern, matcher = util.stringmatcher(b)
621 621 if kind == 'literal':
622 622 # note: falls through to the revspec case if no branch with
623 623 # this name exists and pattern kind is not specified explicitly
624 624 if pattern in repo.branchmap():
625 625 return subset.filter(lambda r: matcher(getbi(r)[0]),
626 626 condrepr=('<branch %r>', b))
627 627 if b.startswith('literal:'):
628 628 raise error.RepoLookupError(_("branch '%s' does not exist")
629 629 % pattern)
630 630 else:
631 631 return subset.filter(lambda r: matcher(getbi(r)[0]),
632 632 condrepr=('<branch %r>', b))
633 633
634 634 s = getset(repo, fullreposet(repo), x)
635 635 b = set()
636 636 for r in s:
637 637 b.add(getbi(r)[0])
638 638 c = s.__contains__
639 639 return subset.filter(lambda r: c(r) or getbi(r)[0] in b,
640 640 condrepr=lambda: '<branch %r>' % sorted(b))
641 641
642 642 @predicate('bumped()', safe=True)
643 643 def bumped(repo, subset, x):
644 644 """Mutable changesets marked as successors of public changesets.
645 645
646 646 Only non-public and non-obsolete changesets can be `bumped`.
647 647 """
648 648 # i18n: "bumped" is a keyword
649 649 getargs(x, 0, 0, _("bumped takes no arguments"))
650 650 bumped = obsmod.getrevs(repo, 'bumped')
651 651 return subset & bumped
652 652
653 653 @predicate('bundle()', safe=True)
654 654 def bundle(repo, subset, x):
655 655 """Changesets in the bundle.
656 656
657 657 Bundle must be specified by the -R option."""
658 658
659 659 try:
660 660 bundlerevs = repo.changelog.bundlerevs
661 661 except AttributeError:
662 662 raise error.Abort(_("no bundle provided - specify with -R"))
663 663 return subset & bundlerevs
664 664
665 665 def checkstatus(repo, subset, pat, field):
666 666 hasset = matchmod.patkind(pat) == 'set'
667 667
668 668 mcache = [None]
669 669 def matches(x):
670 670 c = repo[x]
671 671 if not mcache[0] or hasset:
672 672 mcache[0] = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
673 673 m = mcache[0]
674 674 fname = None
675 675 if not m.anypats() and len(m.files()) == 1:
676 676 fname = m.files()[0]
677 677 if fname is not None:
678 678 if fname not in c.files():
679 679 return False
680 680 else:
681 681 for f in c.files():
682 682 if m(f):
683 683 break
684 684 else:
685 685 return False
686 686 files = repo.status(c.p1().node(), c.node())[field]
687 687 if fname is not None:
688 688 if fname in files:
689 689 return True
690 690 else:
691 691 for f in files:
692 692 if m(f):
693 693 return True
694 694
695 695 return subset.filter(matches, condrepr=('<status[%r] %r>', field, pat))
696 696
697 697 def _children(repo, narrow, parentset):
698 698 if not parentset:
699 699 return baseset()
700 700 cs = set()
701 701 pr = repo.changelog.parentrevs
702 702 minrev = parentset.min()
703 703 for r in narrow:
704 704 if r <= minrev:
705 705 continue
706 706 for p in pr(r):
707 707 if p in parentset:
708 708 cs.add(r)
709 709 # XXX using a set to feed the baseset is wrong. Sets are not ordered.
710 710 # This does not break because of other fullreposet misbehavior.
711 711 return baseset(cs)
712 712
713 713 @predicate('children(set)', safe=True)
714 714 def children(repo, subset, x):
715 715 """Child changesets of changesets in set.
716 716 """
717 717 s = getset(repo, fullreposet(repo), x)
718 718 cs = _children(repo, subset, s)
719 719 return subset & cs
720 720
721 721 @predicate('closed()', safe=True)
722 722 def closed(repo, subset, x):
723 723 """Changeset is closed.
724 724 """
725 725 # i18n: "closed" is a keyword
726 726 getargs(x, 0, 0, _("closed takes no arguments"))
727 727 return subset.filter(lambda r: repo[r].closesbranch(),
728 728 condrepr='<branch closed>')
729 729
730 730 @predicate('contains(pattern)')
731 731 def contains(repo, subset, x):
732 732 """The revision's manifest contains a file matching pattern (but might not
733 733 modify it). See :hg:`help patterns` for information about file patterns.
734 734
735 735 The pattern without explicit kind like ``glob:`` is expected to be
736 736 relative to the current directory and match against a file exactly
737 737 for efficiency.
738 738 """
739 739 # i18n: "contains" is a keyword
740 740 pat = getstring(x, _("contains requires a pattern"))
741 741
742 742 def matches(x):
743 743 if not matchmod.patkind(pat):
744 744 pats = pathutil.canonpath(repo.root, repo.getcwd(), pat)
745 745 if pats in repo[x]:
746 746 return True
747 747 else:
748 748 c = repo[x]
749 749 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
750 750 for f in c.manifest():
751 751 if m(f):
752 752 return True
753 753 return False
754 754
755 755 return subset.filter(matches, condrepr=('<contains %r>', pat))
756 756
757 757 @predicate('converted([id])', safe=True)
758 758 def converted(repo, subset, x):
759 759 """Changesets converted from the given identifier in the old repository if
760 760 present, or all converted changesets if no identifier is specified.
761 761 """
762 762
763 763 # There is exactly no chance of resolving the revision, so do a simple
764 764 # string compare and hope for the best
765 765
766 766 rev = None
767 767 # i18n: "converted" is a keyword
768 768 l = getargs(x, 0, 1, _('converted takes one or no arguments'))
769 769 if l:
770 770 # i18n: "converted" is a keyword
771 771 rev = getstring(l[0], _('converted requires a revision'))
772 772
773 773 def _matchvalue(r):
774 774 source = repo[r].extra().get('convert_revision', None)
775 775 return source is not None and (rev is None or source.startswith(rev))
776 776
777 777 return subset.filter(lambda r: _matchvalue(r),
778 778 condrepr=('<converted %r>', rev))
779 779
780 780 @predicate('date(interval)', safe=True)
781 781 def date(repo, subset, x):
782 782 """Changesets within the interval, see :hg:`help dates`.
783 783 """
784 784 # i18n: "date" is a keyword
785 785 ds = getstring(x, _("date requires a string"))
786 786 dm = util.matchdate(ds)
787 787 return subset.filter(lambda x: dm(repo[x].date()[0]),
788 788 condrepr=('<date %r>', ds))
789 789
790 790 @predicate('desc(string)', safe=True)
791 791 def desc(repo, subset, x):
792 792 """Search commit message for string. The match is case-insensitive.
793 793 """
794 794 # i18n: "desc" is a keyword
795 795 ds = encoding.lower(getstring(x, _("desc requires a string")))
796 796
797 797 def matches(x):
798 798 c = repo[x]
799 799 return ds in encoding.lower(c.description())
800 800
801 801 return subset.filter(matches, condrepr=('<desc %r>', ds))
802 802
803 803 def _descendants(repo, subset, x, followfirst=False):
804 804 roots = getset(repo, fullreposet(repo), x)
805 805 if not roots:
806 806 return baseset()
807 807 s = _revdescendants(repo, roots, followfirst)
808 808
809 809 # Both sets need to be ascending in order to lazily return the union
810 810 # in the correct order.
811 811 base = subset & roots
812 812 desc = subset & s
813 813 result = base + desc
814 814 if subset.isascending():
815 815 result.sort()
816 816 elif subset.isdescending():
817 817 result.sort(reverse=True)
818 818 else:
819 819 result = subset & result
820 820 return result
821 821
822 822 @predicate('descendants(set)', safe=True)
823 823 def descendants(repo, subset, x):
824 824 """Changesets which are descendants of changesets in set.
825 825 """
826 826 return _descendants(repo, subset, x)
827 827
828 828 @predicate('_firstdescendants', safe=True)
829 829 def _firstdescendants(repo, subset, x):
830 830 # ``_firstdescendants(set)``
831 831 # Like ``descendants(set)`` but follows only the first parents.
832 832 return _descendants(repo, subset, x, followfirst=True)
833 833
834 834 @predicate('destination([set])', safe=True)
835 835 def destination(repo, subset, x):
836 836 """Changesets that were created by a graft, transplant or rebase operation,
837 837 with the given revisions specified as the source. Omitting the optional set
838 838 is the same as passing all().
839 839 """
840 840 if x is not None:
841 841 sources = getset(repo, fullreposet(repo), x)
842 842 else:
843 843 sources = fullreposet(repo)
844 844
845 845 dests = set()
846 846
847 847 # subset contains all of the possible destinations that can be returned, so
848 848 # iterate over them and see if their source(s) were provided in the arg set.
849 849 # Even if the immediate src of r is not in the arg set, src's source (or
850 850 # further back) may be. Scanning back further than the immediate src allows
851 851 # transitive transplants and rebases to yield the same results as transitive
852 852 # grafts.
853 853 for r in subset:
854 854 src = _getrevsource(repo, r)
855 855 lineage = None
856 856
857 857 while src is not None:
858 858 if lineage is None:
859 859 lineage = list()
860 860
861 861 lineage.append(r)
862 862
863 863 # The visited lineage is a match if the current source is in the arg
864 864 # set. Since every candidate dest is visited by way of iterating
865 865 # subset, any dests further back in the lineage will be tested by a
866 866 # different iteration over subset. Likewise, if the src was already
867 867 # selected, the current lineage can be selected without going back
868 868 # further.
869 869 if src in sources or src in dests:
870 870 dests.update(lineage)
871 871 break
872 872
873 873 r = src
874 874 src = _getrevsource(repo, r)
875 875
876 876 return subset.filter(dests.__contains__,
877 877 condrepr=lambda: '<destination %r>' % sorted(dests))
878 878
879 879 @predicate('divergent()', safe=True)
880 880 def divergent(repo, subset, x):
881 881 """
882 882 Final successors of changesets with an alternative set of final successors.
883 883 """
884 884 # i18n: "divergent" is a keyword
885 885 getargs(x, 0, 0, _("divergent takes no arguments"))
886 886 divergent = obsmod.getrevs(repo, 'divergent')
887 887 return subset & divergent
888 888
889 889 @predicate('extinct()', safe=True)
890 890 def extinct(repo, subset, x):
891 891 """Obsolete changesets with obsolete descendants only.
892 892 """
893 893 # i18n: "extinct" is a keyword
894 894 getargs(x, 0, 0, _("extinct takes no arguments"))
895 895 extincts = obsmod.getrevs(repo, 'extinct')
896 896 return subset & extincts
897 897
898 898 @predicate('extra(label, [value])', safe=True)
899 899 def extra(repo, subset, x):
900 900 """Changesets with the given label in the extra metadata, with the given
901 901 optional value.
902 902
903 903 If `value` starts with `re:`, the remainder of the value is treated as
904 904 a regular expression. To match a value that actually starts with `re:`,
905 905 use the prefix `literal:`.
906 906 """
907 907 args = getargsdict(x, 'extra', 'label value')
908 908 if 'label' not in args:
909 909 # i18n: "extra" is a keyword
910 910 raise error.ParseError(_('extra takes at least 1 argument'))
911 911 # i18n: "extra" is a keyword
912 912 label = getstring(args['label'], _('first argument to extra must be '
913 913 'a string'))
914 914 value = None
915 915
916 916 if 'value' in args:
917 917 # i18n: "extra" is a keyword
918 918 value = getstring(args['value'], _('second argument to extra must be '
919 919 'a string'))
920 920 kind, value, matcher = util.stringmatcher(value)
921 921
922 922 def _matchvalue(r):
923 923 extra = repo[r].extra()
924 924 return label in extra and (value is None or matcher(extra[label]))
925 925
926 926 return subset.filter(lambda r: _matchvalue(r),
927 927 condrepr=('<extra[%r] %r>', label, value))
928 928
929 929 @predicate('filelog(pattern)', safe=True)
930 930 def filelog(repo, subset, x):
931 931 """Changesets connected to the specified filelog.
932 932
933 933 For performance reasons, visits only revisions mentioned in the file-level
934 934 filelog, rather than filtering through all changesets (much faster, but
935 935 doesn't include deletes or duplicate changes). For a slower, more accurate
936 936 result, use ``file()``.
937 937
938 938 The pattern without explicit kind like ``glob:`` is expected to be
939 939 relative to the current directory and match against a file exactly
940 940 for efficiency.
941 941
942 942 If some linkrev points to revisions filtered by the current repoview, we'll
943 943 work around it to return a non-filtered value.
944 944 """
945 945
946 946 # i18n: "filelog" is a keyword
947 947 pat = getstring(x, _("filelog requires a pattern"))
948 948 s = set()
949 949 cl = repo.changelog
950 950
951 951 if not matchmod.patkind(pat):
952 952 f = pathutil.canonpath(repo.root, repo.getcwd(), pat)
953 953 files = [f]
954 954 else:
955 955 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=repo[None])
956 956 files = (f for f in repo[None] if m(f))
957 957
958 958 for f in files:
959 959 fl = repo.file(f)
960 960 known = {}
961 961 scanpos = 0
962 962 for fr in list(fl):
963 963 fn = fl.node(fr)
964 964 if fn in known:
965 965 s.add(known[fn])
966 966 continue
967 967
968 968 lr = fl.linkrev(fr)
969 969 if lr in cl:
970 970 s.add(lr)
971 971 elif scanpos is not None:
972 972 # lowest matching changeset is filtered, scan further
973 973 # ahead in changelog
974 974 start = max(lr, scanpos) + 1
975 975 scanpos = None
976 976 for r in cl.revs(start):
977 977 # minimize parsing of non-matching entries
978 978 if f in cl.revision(r) and f in cl.readfiles(r):
979 979 try:
980 980 # try to use manifest delta fastpath
981 981 n = repo[r].filenode(f)
982 982 if n not in known:
983 983 if n == fn:
984 984 s.add(r)
985 985 scanpos = r
986 986 break
987 987 else:
988 988 known[n] = r
989 989 except error.ManifestLookupError:
990 990 # deletion in changelog
991 991 continue
992 992
993 993 return subset & s
994 994
995 995 @predicate('first(set, [n])', safe=True)
996 996 def first(repo, subset, x):
997 997 """An alias for limit().
998 998 """
999 999 return limit(repo, subset, x)
1000 1000
1001 1001 def _follow(repo, subset, x, name, followfirst=False):
1002 1002 l = getargs(x, 0, 1, _("%s takes no arguments or a pattern") % name)
1003 1003 c = repo['.']
1004 1004 if l:
1005 1005 x = getstring(l[0], _("%s expected a pattern") % name)
1006 1006 matcher = matchmod.match(repo.root, repo.getcwd(), [x],
1007 1007 ctx=repo[None], default='path')
1008 1008
1009 1009 files = c.manifest().walk(matcher)
1010 1010
1011 1011 s = set()
1012 1012 for fname in files:
1013 1013 fctx = c[fname]
1014 1014 s = s.union(set(c.rev() for c in fctx.ancestors(followfirst)))
1015 1015 # include the revision responsible for the most recent version
1016 1016 s.add(fctx.introrev())
1017 1017 else:
1018 1018 s = _revancestors(repo, baseset([c.rev()]), followfirst)
1019 1019
1020 1020 return subset & s
1021 1021
1022 1022 @predicate('follow([pattern])', safe=True)
1023 1023 def follow(repo, subset, x):
1024 1024 """
1025 1025 An alias for ``::.`` (ancestors of the working directory's first parent).
1026 1026 If pattern is specified, the histories of files matching given
1027 1027 pattern is followed, including copies.
1028 1028 """
1029 1029 return _follow(repo, subset, x, 'follow')
1030 1030
1031 1031 @predicate('_followfirst', safe=True)
1032 1032 def _followfirst(repo, subset, x):
1033 1033 # ``followfirst([pattern])``
1034 1034 # Like ``follow([pattern])`` but follows only the first parent of
1035 1035 # every revisions or files revisions.
1036 1036 return _follow(repo, subset, x, '_followfirst', followfirst=True)
1037 1037
1038 1038 @predicate('all()', safe=True)
1039 1039 def getall(repo, subset, x):
1040 1040 """All changesets, the same as ``0:tip``.
1041 1041 """
1042 1042 # i18n: "all" is a keyword
1043 1043 getargs(x, 0, 0, _("all takes no arguments"))
1044 1044 return subset & spanset(repo) # drop "null" if any
1045 1045
1046 1046 @predicate('grep(regex)')
1047 1047 def grep(repo, subset, x):
1048 1048 """Like ``keyword(string)`` but accepts a regex. Use ``grep(r'...')``
1049 1049 to ensure special escape characters are handled correctly. Unlike
1050 1050 ``keyword(string)``, the match is case-sensitive.
1051 1051 """
1052 1052 try:
1053 1053 # i18n: "grep" is a keyword
1054 1054 gr = re.compile(getstring(x, _("grep requires a string")))
1055 1055 except re.error as e:
1056 1056 raise error.ParseError(_('invalid match pattern: %s') % e)
1057 1057
1058 1058 def matches(x):
1059 1059 c = repo[x]
1060 1060 for e in c.files() + [c.user(), c.description()]:
1061 1061 if gr.search(e):
1062 1062 return True
1063 1063 return False
1064 1064
1065 1065 return subset.filter(matches, condrepr=('<grep %r>', gr.pattern))
1066 1066
1067 1067 @predicate('_matchfiles', safe=True)
1068 1068 def _matchfiles(repo, subset, x):
1069 1069 # _matchfiles takes a revset list of prefixed arguments:
1070 1070 #
1071 1071 # [p:foo, i:bar, x:baz]
1072 1072 #
1073 1073 # builds a match object from them and filters subset. Allowed
1074 1074 # prefixes are 'p:' for regular patterns, 'i:' for include
1075 1075 # patterns and 'x:' for exclude patterns. Use 'r:' prefix to pass
1076 1076 # a revision identifier, or the empty string to reference the
1077 1077 # working directory, from which the match object is
1078 1078 # initialized. Use 'd:' to set the default matching mode, default
1079 1079 # to 'glob'. At most one 'r:' and 'd:' argument can be passed.
1080 1080
1081 1081 l = getargs(x, 1, -1, "_matchfiles requires at least one argument")
1082 1082 pats, inc, exc = [], [], []
1083 1083 rev, default = None, None
1084 1084 for arg in l:
1085 1085 s = getstring(arg, "_matchfiles requires string arguments")
1086 1086 prefix, value = s[:2], s[2:]
1087 1087 if prefix == 'p:':
1088 1088 pats.append(value)
1089 1089 elif prefix == 'i:':
1090 1090 inc.append(value)
1091 1091 elif prefix == 'x:':
1092 1092 exc.append(value)
1093 1093 elif prefix == 'r:':
1094 1094 if rev is not None:
1095 1095 raise error.ParseError('_matchfiles expected at most one '
1096 1096 'revision')
1097 1097 if value != '': # empty means working directory; leave rev as None
1098 1098 rev = value
1099 1099 elif prefix == 'd:':
1100 1100 if default is not None:
1101 1101 raise error.ParseError('_matchfiles expected at most one '
1102 1102 'default mode')
1103 1103 default = value
1104 1104 else:
1105 1105 raise error.ParseError('invalid _matchfiles prefix: %s' % prefix)
1106 1106 if not default:
1107 1107 default = 'glob'
1108 1108
1109 1109 m = matchmod.match(repo.root, repo.getcwd(), pats, include=inc,
1110 1110 exclude=exc, ctx=repo[rev], default=default)
1111 1111
1112 1112 # This directly read the changelog data as creating changectx for all
1113 1113 # revisions is quite expensive.
1114 1114 getfiles = repo.changelog.readfiles
1115 1115 wdirrev = node.wdirrev
1116 1116 def matches(x):
1117 1117 if x == wdirrev:
1118 1118 files = repo[x].files()
1119 1119 else:
1120 1120 files = getfiles(x)
1121 1121 for f in files:
1122 1122 if m(f):
1123 1123 return True
1124 1124 return False
1125 1125
1126 1126 return subset.filter(matches,
1127 1127 condrepr=('<matchfiles patterns=%r, include=%r '
1128 1128 'exclude=%r, default=%r, rev=%r>',
1129 1129 pats, inc, exc, default, rev))
1130 1130
1131 1131 @predicate('file(pattern)', safe=True)
1132 1132 def hasfile(repo, subset, x):
1133 1133 """Changesets affecting files matched by pattern.
1134 1134
1135 1135 For a faster but less accurate result, consider using ``filelog()``
1136 1136 instead.
1137 1137
1138 1138 This predicate uses ``glob:`` as the default kind of pattern.
1139 1139 """
1140 1140 # i18n: "file" is a keyword
1141 1141 pat = getstring(x, _("file requires a pattern"))
1142 1142 return _matchfiles(repo, subset, ('string', 'p:' + pat))
1143 1143
1144 1144 @predicate('head()', safe=True)
1145 1145 def head(repo, subset, x):
1146 1146 """Changeset is a named branch head.
1147 1147 """
1148 1148 # i18n: "head" is a keyword
1149 1149 getargs(x, 0, 0, _("head takes no arguments"))
1150 1150 hs = set()
1151 1151 cl = repo.changelog
1152 1152 for b, ls in repo.branchmap().iteritems():
1153 1153 hs.update(cl.rev(h) for h in ls)
1154 1154 # XXX using a set to feed the baseset is wrong. Sets are not ordered.
1155 1155 # This does not break because of other fullreposet misbehavior.
1156 1156 # XXX We should combine with subset first: 'subset & baseset(...)'. This is
1157 1157 # necessary to ensure we preserve the order in subset.
1158 1158 return baseset(hs) & subset
1159 1159
1160 1160 @predicate('heads(set)', safe=True)
1161 1161 def heads(repo, subset, x):
1162 1162 """Members of set with no children in set.
1163 1163 """
1164 1164 s = getset(repo, subset, x)
1165 1165 ps = parents(repo, subset, x)
1166 1166 return s - ps
1167 1167
1168 1168 @predicate('hidden()', safe=True)
1169 1169 def hidden(repo, subset, x):
1170 1170 """Hidden changesets.
1171 1171 """
1172 1172 # i18n: "hidden" is a keyword
1173 1173 getargs(x, 0, 0, _("hidden takes no arguments"))
1174 1174 hiddenrevs = repoview.filterrevs(repo, 'visible')
1175 1175 return subset & hiddenrevs
1176 1176
1177 1177 @predicate('keyword(string)', safe=True)
1178 1178 def keyword(repo, subset, x):
1179 1179 """Search commit message, user name, and names of changed files for
1180 1180 string. The match is case-insensitive.
1181 1181 """
1182 1182 # i18n: "keyword" is a keyword
1183 1183 kw = encoding.lower(getstring(x, _("keyword requires a string")))
1184 1184
1185 1185 def matches(r):
1186 1186 c = repo[r]
1187 1187 return any(kw in encoding.lower(t)
1188 1188 for t in c.files() + [c.user(), c.description()])
1189 1189
1190 1190 return subset.filter(matches, condrepr=('<keyword %r>', kw))
1191 1191
1192 1192 @predicate('limit(set[, n[, offset]])', safe=True)
1193 1193 def limit(repo, subset, x):
1194 1194 """First n members of set, defaulting to 1, starting from offset.
1195 1195 """
1196 1196 args = getargsdict(x, 'limit', 'set n offset')
1197 1197 if 'set' not in args:
1198 1198 # i18n: "limit" is a keyword
1199 1199 raise error.ParseError(_("limit requires one to three arguments"))
1200 1200 try:
1201 1201 lim, ofs = 1, 0
1202 1202 if 'n' in args:
1203 1203 # i18n: "limit" is a keyword
1204 1204 lim = int(getstring(args['n'], _("limit requires a number")))
1205 1205 if 'offset' in args:
1206 1206 # i18n: "limit" is a keyword
1207 1207 ofs = int(getstring(args['offset'], _("limit requires a number")))
1208 1208 if ofs < 0:
1209 1209 raise error.ParseError(_("negative offset"))
1210 1210 except (TypeError, ValueError):
1211 1211 # i18n: "limit" is a keyword
1212 1212 raise error.ParseError(_("limit expects a number"))
1213 1213 os = getset(repo, fullreposet(repo), args['set'])
1214 1214 result = []
1215 1215 it = iter(os)
1216 1216 for x in xrange(ofs):
1217 1217 y = next(it, None)
1218 1218 if y is None:
1219 1219 break
1220 1220 for x in xrange(lim):
1221 1221 y = next(it, None)
1222 1222 if y is None:
1223 1223 break
1224 1224 elif y in subset:
1225 1225 result.append(y)
1226 1226 return baseset(result, datarepr=('<limit n=%d, offset=%d, %r, %r>',
1227 1227 lim, ofs, subset, os))
1228 1228
1229 1229 @predicate('last(set, [n])', safe=True)
1230 1230 def last(repo, subset, x):
1231 1231 """Last n members of set, defaulting to 1.
1232 1232 """
1233 1233 # i18n: "last" is a keyword
1234 1234 l = getargs(x, 1, 2, _("last requires one or two arguments"))
1235 1235 try:
1236 1236 lim = 1
1237 1237 if len(l) == 2:
1238 1238 # i18n: "last" is a keyword
1239 1239 lim = int(getstring(l[1], _("last requires a number")))
1240 1240 except (TypeError, ValueError):
1241 1241 # i18n: "last" is a keyword
1242 1242 raise error.ParseError(_("last expects a number"))
1243 1243 os = getset(repo, fullreposet(repo), l[0])
1244 1244 os.reverse()
1245 1245 result = []
1246 1246 it = iter(os)
1247 1247 for x in xrange(lim):
1248 1248 y = next(it, None)
1249 1249 if y is None:
1250 1250 break
1251 1251 elif y in subset:
1252 1252 result.append(y)
1253 1253 return baseset(result, datarepr=('<last n=%d, %r, %r>', lim, subset, os))
1254 1254
1255 1255 @predicate('max(set)', safe=True)
1256 1256 def maxrev(repo, subset, x):
1257 1257 """Changeset with highest revision number in set.
1258 1258 """
1259 1259 os = getset(repo, fullreposet(repo), x)
1260 1260 try:
1261 1261 m = os.max()
1262 1262 if m in subset:
1263 1263 return baseset([m], datarepr=('<max %r, %r>', subset, os))
1264 1264 except ValueError:
1265 1265 # os.max() throws a ValueError when the collection is empty.
1266 1266 # Same as python's max().
1267 1267 pass
1268 1268 return baseset(datarepr=('<max %r, %r>', subset, os))
1269 1269
1270 1270 @predicate('merge()', safe=True)
1271 1271 def merge(repo, subset, x):
1272 1272 """Changeset is a merge changeset.
1273 1273 """
1274 1274 # i18n: "merge" is a keyword
1275 1275 getargs(x, 0, 0, _("merge takes no arguments"))
1276 1276 cl = repo.changelog
1277 1277 return subset.filter(lambda r: cl.parentrevs(r)[1] != -1,
1278 1278 condrepr='<merge>')
1279 1279
1280 1280 @predicate('branchpoint()', safe=True)
1281 1281 def branchpoint(repo, subset, x):
1282 1282 """Changesets with more than one child.
1283 1283 """
1284 1284 # i18n: "branchpoint" is a keyword
1285 1285 getargs(x, 0, 0, _("branchpoint takes no arguments"))
1286 1286 cl = repo.changelog
1287 1287 if not subset:
1288 1288 return baseset()
1289 1289 # XXX this should be 'parentset.min()' assuming 'parentset' is a smartset
1290 1290 # (and if it is not, it should.)
1291 1291 baserev = min(subset)
1292 1292 parentscount = [0]*(len(repo) - baserev)
1293 1293 for r in cl.revs(start=baserev + 1):
1294 1294 for p in cl.parentrevs(r):
1295 1295 if p >= baserev:
1296 1296 parentscount[p - baserev] += 1
1297 1297 return subset.filter(lambda r: parentscount[r - baserev] > 1,
1298 1298 condrepr='<branchpoint>')
1299 1299
1300 1300 @predicate('min(set)', safe=True)
1301 1301 def minrev(repo, subset, x):
1302 1302 """Changeset with lowest revision number in set.
1303 1303 """
1304 1304 os = getset(repo, fullreposet(repo), x)
1305 1305 try:
1306 1306 m = os.min()
1307 1307 if m in subset:
1308 1308 return baseset([m], datarepr=('<min %r, %r>', subset, os))
1309 1309 except ValueError:
1310 1310 # os.min() throws a ValueError when the collection is empty.
1311 1311 # Same as python's min().
1312 1312 pass
1313 1313 return baseset(datarepr=('<min %r, %r>', subset, os))
1314 1314
1315 1315 @predicate('modifies(pattern)', safe=True)
1316 1316 def modifies(repo, subset, x):
1317 1317 """Changesets modifying files matched by pattern.
1318 1318
1319 1319 The pattern without explicit kind like ``glob:`` is expected to be
1320 1320 relative to the current directory and match against a file or a
1321 1321 directory.
1322 1322 """
1323 1323 # i18n: "modifies" is a keyword
1324 1324 pat = getstring(x, _("modifies requires a pattern"))
1325 1325 return checkstatus(repo, subset, pat, 0)
1326 1326
1327 1327 @predicate('named(namespace)')
1328 1328 def named(repo, subset, x):
1329 1329 """The changesets in a given namespace.
1330 1330
1331 1331 If `namespace` starts with `re:`, the remainder of the string is treated as
1332 1332 a regular expression. To match a namespace that actually starts with `re:`,
1333 1333 use the prefix `literal:`.
1334 1334 """
1335 1335 # i18n: "named" is a keyword
1336 1336 args = getargs(x, 1, 1, _('named requires a namespace argument'))
1337 1337
1338 1338 ns = getstring(args[0],
1339 1339 # i18n: "named" is a keyword
1340 1340 _('the argument to named must be a string'))
1341 1341 kind, pattern, matcher = util.stringmatcher(ns)
1342 1342 namespaces = set()
1343 1343 if kind == 'literal':
1344 1344 if pattern not in repo.names:
1345 1345 raise error.RepoLookupError(_("namespace '%s' does not exist")
1346 1346 % ns)
1347 1347 namespaces.add(repo.names[pattern])
1348 1348 else:
1349 1349 for name, ns in repo.names.iteritems():
1350 1350 if matcher(name):
1351 1351 namespaces.add(ns)
1352 1352 if not namespaces:
1353 1353 raise error.RepoLookupError(_("no namespace exists"
1354 1354 " that match '%s'") % pattern)
1355 1355
1356 1356 names = set()
1357 1357 for ns in namespaces:
1358 1358 for name in ns.listnames(repo):
1359 1359 if name not in ns.deprecated:
1360 1360 names.update(repo[n].rev() for n in ns.nodes(repo, name))
1361 1361
1362 1362 names -= set([node.nullrev])
1363 1363 return subset & names
1364 1364
1365 1365 @predicate('id(string)', safe=True)
1366 1366 def node_(repo, subset, x):
1367 1367 """Revision non-ambiguously specified by the given hex string prefix.
1368 1368 """
1369 1369 # i18n: "id" is a keyword
1370 1370 l = getargs(x, 1, 1, _("id requires one argument"))
1371 1371 # i18n: "id" is a keyword
1372 1372 n = getstring(l[0], _("id requires a string"))
1373 1373 if len(n) == 40:
1374 1374 try:
1375 1375 rn = repo.changelog.rev(node.bin(n))
1376 1376 except (LookupError, TypeError):
1377 1377 rn = None
1378 1378 else:
1379 1379 rn = None
1380 1380 pm = repo.changelog._partialmatch(n)
1381 1381 if pm is not None:
1382 1382 rn = repo.changelog.rev(pm)
1383 1383
1384 1384 if rn is None:
1385 1385 return baseset()
1386 1386 result = baseset([rn])
1387 1387 return result & subset
1388 1388
1389 1389 @predicate('obsolete()', safe=True)
1390 1390 def obsolete(repo, subset, x):
1391 1391 """Mutable changeset with a newer version."""
1392 1392 # i18n: "obsolete" is a keyword
1393 1393 getargs(x, 0, 0, _("obsolete takes no arguments"))
1394 1394 obsoletes = obsmod.getrevs(repo, 'obsolete')
1395 1395 return subset & obsoletes
1396 1396
1397 1397 @predicate('only(set, [set])', safe=True)
1398 1398 def only(repo, subset, x):
1399 1399 """Changesets that are ancestors of the first set that are not ancestors
1400 1400 of any other head in the repo. If a second set is specified, the result
1401 1401 is ancestors of the first set that are not ancestors of the second set
1402 1402 (i.e. ::<set1> - ::<set2>).
1403 1403 """
1404 1404 cl = repo.changelog
1405 1405 # i18n: "only" is a keyword
1406 1406 args = getargs(x, 1, 2, _('only takes one or two arguments'))
1407 1407 include = getset(repo, fullreposet(repo), args[0])
1408 1408 if len(args) == 1:
1409 1409 if not include:
1410 1410 return baseset()
1411 1411
1412 1412 descendants = set(_revdescendants(repo, include, False))
1413 1413 exclude = [rev for rev in cl.headrevs()
1414 1414 if not rev in descendants and not rev in include]
1415 1415 else:
1416 1416 exclude = getset(repo, fullreposet(repo), args[1])
1417 1417
1418 1418 results = set(cl.findmissingrevs(common=exclude, heads=include))
1419 1419 # XXX we should turn this into a baseset instead of a set, smartset may do
1420 1420 # some optimisations from the fact this is a baseset.
1421 1421 return subset & results
1422 1422
1423 1423 @predicate('origin([set])', safe=True)
1424 1424 def origin(repo, subset, x):
1425 1425 """
1426 1426 Changesets that were specified as a source for the grafts, transplants or
1427 1427 rebases that created the given revisions. Omitting the optional set is the
1428 1428 same as passing all(). If a changeset created by these operations is itself
1429 1429 specified as a source for one of these operations, only the source changeset
1430 1430 for the first operation is selected.
1431 1431 """
1432 1432 if x is not None:
1433 1433 dests = getset(repo, fullreposet(repo), x)
1434 1434 else:
1435 1435 dests = fullreposet(repo)
1436 1436
1437 1437 def _firstsrc(rev):
1438 1438 src = _getrevsource(repo, rev)
1439 1439 if src is None:
1440 1440 return None
1441 1441
1442 1442 while True:
1443 1443 prev = _getrevsource(repo, src)
1444 1444
1445 1445 if prev is None:
1446 1446 return src
1447 1447 src = prev
1448 1448
1449 1449 o = set([_firstsrc(r) for r in dests])
1450 1450 o -= set([None])
1451 1451 # XXX we should turn this into a baseset instead of a set, smartset may do
1452 1452 # some optimisations from the fact this is a baseset.
1453 1453 return subset & o
1454 1454
1455 1455 @predicate('outgoing([path])', safe=True)
1456 1456 def outgoing(repo, subset, x):
1457 1457 """Changesets not found in the specified destination repository, or the
1458 1458 default push location.
1459 1459 """
1460 1460 # Avoid cycles.
1461 1461 from . import (
1462 1462 discovery,
1463 1463 hg,
1464 1464 )
1465 1465 # i18n: "outgoing" is a keyword
1466 1466 l = getargs(x, 0, 1, _("outgoing takes one or no arguments"))
1467 1467 # i18n: "outgoing" is a keyword
1468 1468 dest = l and getstring(l[0], _("outgoing requires a repository path")) or ''
1469 1469 dest = repo.ui.expandpath(dest or 'default-push', dest or 'default')
1470 1470 dest, branches = hg.parseurl(dest)
1471 1471 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1472 1472 if revs:
1473 1473 revs = [repo.lookup(rev) for rev in revs]
1474 1474 other = hg.peer(repo, {}, dest)
1475 1475 repo.ui.pushbuffer()
1476 1476 outgoing = discovery.findcommonoutgoing(repo, other, onlyheads=revs)
1477 1477 repo.ui.popbuffer()
1478 1478 cl = repo.changelog
1479 1479 o = set([cl.rev(r) for r in outgoing.missing])
1480 1480 return subset & o
1481 1481
1482 1482 @predicate('p1([set])', safe=True)
1483 1483 def p1(repo, subset, x):
1484 1484 """First parent of changesets in set, or the working directory.
1485 1485 """
1486 1486 if x is None:
1487 1487 p = repo[x].p1().rev()
1488 1488 if p >= 0:
1489 1489 return subset & baseset([p])
1490 1490 return baseset()
1491 1491
1492 1492 ps = set()
1493 1493 cl = repo.changelog
1494 1494 for r in getset(repo, fullreposet(repo), x):
1495 1495 ps.add(cl.parentrevs(r)[0])
1496 1496 ps -= set([node.nullrev])
1497 1497 # XXX we should turn this into a baseset instead of a set, smartset may do
1498 1498 # some optimisations from the fact this is a baseset.
1499 1499 return subset & ps
1500 1500
1501 1501 @predicate('p2([set])', safe=True)
1502 1502 def p2(repo, subset, x):
1503 1503 """Second parent of changesets in set, or the working directory.
1504 1504 """
1505 1505 if x is None:
1506 1506 ps = repo[x].parents()
1507 1507 try:
1508 1508 p = ps[1].rev()
1509 1509 if p >= 0:
1510 1510 return subset & baseset([p])
1511 1511 return baseset()
1512 1512 except IndexError:
1513 1513 return baseset()
1514 1514
1515 1515 ps = set()
1516 1516 cl = repo.changelog
1517 1517 for r in getset(repo, fullreposet(repo), x):
1518 1518 ps.add(cl.parentrevs(r)[1])
1519 1519 ps -= set([node.nullrev])
1520 1520 # XXX we should turn this into a baseset instead of a set, smartset may do
1521 1521 # some optimisations from the fact this is a baseset.
1522 1522 return subset & ps
1523 1523
1524 1524 @predicate('parents([set])', safe=True)
1525 1525 def parents(repo, subset, x):
1526 1526 """
1527 1527 The set of all parents for all changesets in set, or the working directory.
1528 1528 """
1529 1529 if x is None:
1530 1530 ps = set(p.rev() for p in repo[x].parents())
1531 1531 else:
1532 1532 ps = set()
1533 1533 cl = repo.changelog
1534 1534 up = ps.update
1535 1535 parentrevs = cl.parentrevs
1536 1536 for r in getset(repo, fullreposet(repo), x):
1537 1537 if r == node.wdirrev:
1538 1538 up(p.rev() for p in repo[r].parents())
1539 1539 else:
1540 1540 up(parentrevs(r))
1541 1541 ps -= set([node.nullrev])
1542 1542 return subset & ps
1543 1543
1544 1544 def _phase(repo, subset, target):
1545 1545 """helper to select all rev in phase <target>"""
1546 1546 repo._phasecache.loadphaserevs(repo) # ensure phase's sets are loaded
1547 1547 if repo._phasecache._phasesets:
1548 1548 s = repo._phasecache._phasesets[target] - repo.changelog.filteredrevs
1549 1549 s = baseset(s)
1550 1550 s.sort() # set are non ordered, so we enforce ascending
1551 1551 return subset & s
1552 1552 else:
1553 1553 phase = repo._phasecache.phase
1554 1554 condition = lambda r: phase(repo, r) == target
1555 1555 return subset.filter(condition, condrepr=('<phase %r>', target),
1556 1556 cache=False)
1557 1557
1558 1558 @predicate('draft()', safe=True)
1559 1559 def draft(repo, subset, x):
1560 1560 """Changeset in draft phase."""
1561 1561 # i18n: "draft" is a keyword
1562 1562 getargs(x, 0, 0, _("draft takes no arguments"))
1563 1563 target = phases.draft
1564 1564 return _phase(repo, subset, target)
1565 1565
1566 1566 @predicate('secret()', safe=True)
1567 1567 def secret(repo, subset, x):
1568 1568 """Changeset in secret phase."""
1569 1569 # i18n: "secret" is a keyword
1570 1570 getargs(x, 0, 0, _("secret takes no arguments"))
1571 1571 target = phases.secret
1572 1572 return _phase(repo, subset, target)
1573 1573
1574 1574 def parentspec(repo, subset, x, n):
1575 1575 """``set^0``
1576 1576 The set.
1577 1577 ``set^1`` (or ``set^``), ``set^2``
1578 1578 First or second parent, respectively, of all changesets in set.
1579 1579 """
1580 1580 try:
1581 1581 n = int(n[1])
1582 1582 if n not in (0, 1, 2):
1583 1583 raise ValueError
1584 1584 except (TypeError, ValueError):
1585 1585 raise error.ParseError(_("^ expects a number 0, 1, or 2"))
1586 1586 ps = set()
1587 1587 cl = repo.changelog
1588 1588 for r in getset(repo, fullreposet(repo), x):
1589 1589 if n == 0:
1590 1590 ps.add(r)
1591 1591 elif n == 1:
1592 1592 ps.add(cl.parentrevs(r)[0])
1593 1593 elif n == 2:
1594 1594 parents = cl.parentrevs(r)
1595 1595 if len(parents) > 1:
1596 1596 ps.add(parents[1])
1597 1597 return subset & ps
1598 1598
1599 1599 @predicate('present(set)', safe=True)
1600 1600 def present(repo, subset, x):
1601 1601 """An empty set, if any revision in set isn't found; otherwise,
1602 1602 all revisions in set.
1603 1603
1604 1604 If any of specified revisions is not present in the local repository,
1605 1605 the query is normally aborted. But this predicate allows the query
1606 1606 to continue even in such cases.
1607 1607 """
1608 1608 try:
1609 1609 return getset(repo, subset, x)
1610 1610 except error.RepoLookupError:
1611 1611 return baseset()
1612 1612
1613 1613 # for internal use
1614 1614 @predicate('_notpublic', safe=True)
1615 1615 def _notpublic(repo, subset, x):
1616 1616 getargs(x, 0, 0, "_notpublic takes no arguments")
1617 1617 repo._phasecache.loadphaserevs(repo) # ensure phase's sets are loaded
1618 1618 if repo._phasecache._phasesets:
1619 1619 s = set()
1620 1620 for u in repo._phasecache._phasesets[1:]:
1621 1621 s.update(u)
1622 1622 s = baseset(s - repo.changelog.filteredrevs)
1623 1623 s.sort()
1624 1624 return subset & s
1625 1625 else:
1626 1626 phase = repo._phasecache.phase
1627 1627 target = phases.public
1628 1628 condition = lambda r: phase(repo, r) != target
1629 1629 return subset.filter(condition, condrepr=('<phase %r>', target),
1630 1630 cache=False)
1631 1631
1632 1632 @predicate('public()', safe=True)
1633 1633 def public(repo, subset, x):
1634 1634 """Changeset in public phase."""
1635 1635 # i18n: "public" is a keyword
1636 1636 getargs(x, 0, 0, _("public takes no arguments"))
1637 1637 phase = repo._phasecache.phase
1638 1638 target = phases.public
1639 1639 condition = lambda r: phase(repo, r) == target
1640 1640 return subset.filter(condition, condrepr=('<phase %r>', target),
1641 1641 cache=False)
1642 1642
1643 1643 @predicate('remote([id [,path]])', safe=True)
1644 1644 def remote(repo, subset, x):
1645 1645 """Local revision that corresponds to the given identifier in a
1646 1646 remote repository, if present. Here, the '.' identifier is a
1647 1647 synonym for the current local branch.
1648 1648 """
1649 1649
1650 1650 from . import hg # avoid start-up nasties
1651 1651 # i18n: "remote" is a keyword
1652 1652 l = getargs(x, 0, 2, _("remote takes zero, one, or two arguments"))
1653 1653
1654 1654 q = '.'
1655 1655 if len(l) > 0:
1656 1656 # i18n: "remote" is a keyword
1657 1657 q = getstring(l[0], _("remote requires a string id"))
1658 1658 if q == '.':
1659 1659 q = repo['.'].branch()
1660 1660
1661 1661 dest = ''
1662 1662 if len(l) > 1:
1663 1663 # i18n: "remote" is a keyword
1664 1664 dest = getstring(l[1], _("remote requires a repository path"))
1665 1665 dest = repo.ui.expandpath(dest or 'default')
1666 1666 dest, branches = hg.parseurl(dest)
1667 1667 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1668 1668 if revs:
1669 1669 revs = [repo.lookup(rev) for rev in revs]
1670 1670 other = hg.peer(repo, {}, dest)
1671 1671 n = other.lookup(q)
1672 1672 if n in repo:
1673 1673 r = repo[n].rev()
1674 1674 if r in subset:
1675 1675 return baseset([r])
1676 1676 return baseset()
1677 1677
1678 1678 @predicate('removes(pattern)', safe=True)
1679 1679 def removes(repo, subset, x):
1680 1680 """Changesets which remove files matching pattern.
1681 1681
1682 1682 The pattern without explicit kind like ``glob:`` is expected to be
1683 1683 relative to the current directory and match against a file or a
1684 1684 directory.
1685 1685 """
1686 1686 # i18n: "removes" is a keyword
1687 1687 pat = getstring(x, _("removes requires a pattern"))
1688 1688 return checkstatus(repo, subset, pat, 2)
1689 1689
1690 1690 @predicate('rev(number)', safe=True)
1691 1691 def rev(repo, subset, x):
1692 1692 """Revision with the given numeric identifier.
1693 1693 """
1694 1694 # i18n: "rev" is a keyword
1695 1695 l = getargs(x, 1, 1, _("rev requires one argument"))
1696 1696 try:
1697 1697 # i18n: "rev" is a keyword
1698 1698 l = int(getstring(l[0], _("rev requires a number")))
1699 1699 except (TypeError, ValueError):
1700 1700 # i18n: "rev" is a keyword
1701 1701 raise error.ParseError(_("rev expects a number"))
1702 1702 if l not in repo.changelog and l != node.nullrev:
1703 1703 return baseset()
1704 1704 return subset & baseset([l])
1705 1705
1706 1706 @predicate('matching(revision [, field])', safe=True)
1707 1707 def matching(repo, subset, x):
1708 1708 """Changesets in which a given set of fields match the set of fields in the
1709 1709 selected revision or set.
1710 1710
1711 1711 To match more than one field pass the list of fields to match separated
1712 1712 by spaces (e.g. ``author description``).
1713 1713
1714 1714 Valid fields are most regular revision fields and some special fields.
1715 1715
1716 1716 Regular revision fields are ``description``, ``author``, ``branch``,
1717 1717 ``date``, ``files``, ``phase``, ``parents``, ``substate``, ``user``
1718 1718 and ``diff``.
1719 1719 Note that ``author`` and ``user`` are synonyms. ``diff`` refers to the
1720 1720 contents of the revision. Two revisions matching their ``diff`` will
1721 1721 also match their ``files``.
1722 1722
1723 1723 Special fields are ``summary`` and ``metadata``:
1724 1724 ``summary`` matches the first line of the description.
1725 1725 ``metadata`` is equivalent to matching ``description user date``
1726 1726 (i.e. it matches the main metadata fields).
1727 1727
1728 1728 ``metadata`` is the default field which is used when no fields are
1729 1729 specified. You can match more than one field at a time.
1730 1730 """
1731 1731 # i18n: "matching" is a keyword
1732 1732 l = getargs(x, 1, 2, _("matching takes 1 or 2 arguments"))
1733 1733
1734 1734 revs = getset(repo, fullreposet(repo), l[0])
1735 1735
1736 1736 fieldlist = ['metadata']
1737 1737 if len(l) > 1:
1738 1738 fieldlist = getstring(l[1],
1739 1739 # i18n: "matching" is a keyword
1740 1740 _("matching requires a string "
1741 1741 "as its second argument")).split()
1742 1742
1743 1743 # Make sure that there are no repeated fields,
1744 1744 # expand the 'special' 'metadata' field type
1745 1745 # and check the 'files' whenever we check the 'diff'
1746 1746 fields = []
1747 1747 for field in fieldlist:
1748 1748 if field == 'metadata':
1749 1749 fields += ['user', 'description', 'date']
1750 1750 elif field == 'diff':
1751 1751 # a revision matching the diff must also match the files
1752 1752 # since matching the diff is very costly, make sure to
1753 1753 # also match the files first
1754 1754 fields += ['files', 'diff']
1755 1755 else:
1756 1756 if field == 'author':
1757 1757 field = 'user'
1758 1758 fields.append(field)
1759 1759 fields = set(fields)
1760 1760 if 'summary' in fields and 'description' in fields:
1761 1761 # If a revision matches its description it also matches its summary
1762 1762 fields.discard('summary')
1763 1763
1764 1764 # We may want to match more than one field
1765 1765 # Not all fields take the same amount of time to be matched
1766 1766 # Sort the selected fields in order of increasing matching cost
1767 1767 fieldorder = ['phase', 'parents', 'user', 'date', 'branch', 'summary',
1768 1768 'files', 'description', 'substate', 'diff']
1769 1769 def fieldkeyfunc(f):
1770 1770 try:
1771 1771 return fieldorder.index(f)
1772 1772 except ValueError:
1773 1773 # assume an unknown field is very costly
1774 1774 return len(fieldorder)
1775 1775 fields = list(fields)
1776 1776 fields.sort(key=fieldkeyfunc)
1777 1777
1778 1778 # Each field will be matched with its own "getfield" function
1779 1779 # which will be added to the getfieldfuncs array of functions
1780 1780 getfieldfuncs = []
1781 1781 _funcs = {
1782 1782 'user': lambda r: repo[r].user(),
1783 1783 'branch': lambda r: repo[r].branch(),
1784 1784 'date': lambda r: repo[r].date(),
1785 1785 'description': lambda r: repo[r].description(),
1786 1786 'files': lambda r: repo[r].files(),
1787 1787 'parents': lambda r: repo[r].parents(),
1788 1788 'phase': lambda r: repo[r].phase(),
1789 1789 'substate': lambda r: repo[r].substate,
1790 1790 'summary': lambda r: repo[r].description().splitlines()[0],
1791 1791 'diff': lambda r: list(repo[r].diff(git=True),)
1792 1792 }
1793 1793 for info in fields:
1794 1794 getfield = _funcs.get(info, None)
1795 1795 if getfield is None:
1796 1796 raise error.ParseError(
1797 1797 # i18n: "matching" is a keyword
1798 1798 _("unexpected field name passed to matching: %s") % info)
1799 1799 getfieldfuncs.append(getfield)
1800 1800 # convert the getfield array of functions into a "getinfo" function
1801 1801 # which returns an array of field values (or a single value if there
1802 1802 # is only one field to match)
1803 1803 getinfo = lambda r: [f(r) for f in getfieldfuncs]
1804 1804
1805 1805 def matches(x):
1806 1806 for rev in revs:
1807 1807 target = getinfo(rev)
1808 1808 match = True
1809 1809 for n, f in enumerate(getfieldfuncs):
1810 1810 if target[n] != f(x):
1811 1811 match = False
1812 1812 if match:
1813 1813 return True
1814 1814 return False
1815 1815
1816 1816 return subset.filter(matches, condrepr=('<matching%r %r>', fields, revs))
1817 1817
1818 1818 @predicate('reverse(set)', safe=True)
1819 1819 def reverse(repo, subset, x):
1820 1820 """Reverse order of set.
1821 1821 """
1822 1822 l = getset(repo, subset, x)
1823 1823 l.reverse()
1824 1824 return l
1825 1825
1826 1826 @predicate('roots(set)', safe=True)
1827 1827 def roots(repo, subset, x):
1828 1828 """Changesets in set with no parent changeset in set.
1829 1829 """
1830 1830 s = getset(repo, fullreposet(repo), x)
1831 1831 parents = repo.changelog.parentrevs
1832 1832 def filter(r):
1833 1833 for p in parents(r):
1834 1834 if 0 <= p and p in s:
1835 1835 return False
1836 1836 return True
1837 1837 return subset & s.filter(filter, condrepr='<roots>')
1838 1838
1839 1839 @predicate('sort(set[, [-]key...])', safe=True)
1840 1840 def sort(repo, subset, x):
1841 1841 """Sort set by keys. The default sort order is ascending, specify a key
1842 1842 as ``-key`` to sort in descending order.
1843 1843
1844 1844 The keys can be:
1845 1845
1846 1846 - ``rev`` for the revision number,
1847 1847 - ``branch`` for the branch name,
1848 1848 - ``desc`` for the commit message (description),
1849 1849 - ``user`` for user name (``author`` can be used as an alias),
1850 1850 - ``date`` for the commit date
1851 1851 """
1852 1852 # i18n: "sort" is a keyword
1853 1853 l = getargs(x, 1, 2, _("sort requires one or two arguments"))
1854 1854 keys = "rev"
1855 1855 if len(l) == 2:
1856 1856 # i18n: "sort" is a keyword
1857 1857 keys = getstring(l[1], _("sort spec must be a string"))
1858 1858
1859 1859 s = l[0]
1860 1860 keys = keys.split()
1861 1861 revs = getset(repo, subset, s)
1862 1862 if keys == ["rev"]:
1863 1863 revs.sort()
1864 1864 return revs
1865 1865 elif keys == ["-rev"]:
1866 1866 revs.sort(reverse=True)
1867 1867 return revs
1868 1868 # sort() is guaranteed to be stable
1869 1869 ctxs = [repo[r] for r in revs]
1870 1870 for k in reversed(keys):
1871 1871 if k == 'rev':
1872 1872 ctxs.sort(key=lambda c: c.rev())
1873 1873 elif k == '-rev':
1874 1874 ctxs.sort(key=lambda c: c.rev(), reverse=True)
1875 1875 elif k == 'branch':
1876 1876 ctxs.sort(key=lambda c: c.branch())
1877 1877 elif k == '-branch':
1878 1878 ctxs.sort(key=lambda c: c.branch(), reverse=True)
1879 1879 elif k == 'desc':
1880 1880 ctxs.sort(key=lambda c: c.description())
1881 1881 elif k == '-desc':
1882 1882 ctxs.sort(key=lambda c: c.description(), reverse=True)
1883 1883 elif k in 'user author':
1884 1884 ctxs.sort(key=lambda c: c.user())
1885 1885 elif k in '-user -author':
1886 1886 ctxs.sort(key=lambda c: c.user(), reverse=True)
1887 1887 elif k == 'date':
1888 1888 ctxs.sort(key=lambda c: c.date()[0])
1889 1889 elif k == '-date':
1890 1890 ctxs.sort(key=lambda c: c.date()[0], reverse=True)
1891 1891 else:
1892 1892 raise error.ParseError(_("unknown sort key %r") % k)
1893 1893 return baseset([c.rev() for c in ctxs])
1894 1894
1895 1895 @predicate('subrepo([pattern])')
1896 1896 def subrepo(repo, subset, x):
1897 1897 """Changesets that add, modify or remove the given subrepo. If no subrepo
1898 1898 pattern is named, any subrepo changes are returned.
1899 1899 """
1900 1900 # i18n: "subrepo" is a keyword
1901 1901 args = getargs(x, 0, 1, _('subrepo takes at most one argument'))
1902 1902 pat = None
1903 1903 if len(args) != 0:
1904 1904 pat = getstring(args[0], _("subrepo requires a pattern"))
1905 1905
1906 1906 m = matchmod.exact(repo.root, repo.root, ['.hgsubstate'])
1907 1907
1908 1908 def submatches(names):
1909 1909 k, p, m = util.stringmatcher(pat)
1910 1910 for name in names:
1911 1911 if m(name):
1912 1912 yield name
1913 1913
1914 1914 def matches(x):
1915 1915 c = repo[x]
1916 1916 s = repo.status(c.p1().node(), c.node(), match=m)
1917 1917
1918 1918 if pat is None:
1919 1919 return s.added or s.modified or s.removed
1920 1920
1921 1921 if s.added:
1922 1922 return any(submatches(c.substate.keys()))
1923 1923
1924 1924 if s.modified:
1925 1925 subs = set(c.p1().substate.keys())
1926 1926 subs.update(c.substate.keys())
1927 1927
1928 1928 for path in submatches(subs):
1929 1929 if c.p1().substate.get(path) != c.substate.get(path):
1930 1930 return True
1931 1931
1932 1932 if s.removed:
1933 1933 return any(submatches(c.p1().substate.keys()))
1934 1934
1935 1935 return False
1936 1936
1937 1937 return subset.filter(matches, condrepr=('<subrepo %r>', pat))
1938 1938
1939 1939 def _substringmatcher(pattern):
1940 1940 kind, pattern, matcher = util.stringmatcher(pattern)
1941 1941 if kind == 'literal':
1942 1942 matcher = lambda s: pattern in s
1943 1943 return kind, pattern, matcher
1944 1944
1945 1945 @predicate('tag([name])', safe=True)
1946 1946 def tag(repo, subset, x):
1947 1947 """The specified tag by name, or all tagged revisions if no name is given.
1948 1948
1949 1949 If `name` starts with `re:`, the remainder of the name is treated as
1950 1950 a regular expression. To match a tag that actually starts with `re:`,
1951 1951 use the prefix `literal:`.
1952 1952 """
1953 1953 # i18n: "tag" is a keyword
1954 1954 args = getargs(x, 0, 1, _("tag takes one or no arguments"))
1955 1955 cl = repo.changelog
1956 1956 if args:
1957 1957 pattern = getstring(args[0],
1958 1958 # i18n: "tag" is a keyword
1959 1959 _('the argument to tag must be a string'))
1960 1960 kind, pattern, matcher = util.stringmatcher(pattern)
1961 1961 if kind == 'literal':
1962 1962 # avoid resolving all tags
1963 1963 tn = repo._tagscache.tags.get(pattern, None)
1964 1964 if tn is None:
1965 1965 raise error.RepoLookupError(_("tag '%s' does not exist")
1966 1966 % pattern)
1967 1967 s = set([repo[tn].rev()])
1968 1968 else:
1969 1969 s = set([cl.rev(n) for t, n in repo.tagslist() if matcher(t)])
1970 1970 else:
1971 1971 s = set([cl.rev(n) for t, n in repo.tagslist() if t != 'tip'])
1972 1972 return subset & s
1973 1973
1974 1974 @predicate('tagged', safe=True)
1975 1975 def tagged(repo, subset, x):
1976 1976 return tag(repo, subset, x)
1977 1977
1978 1978 @predicate('unstable()', safe=True)
1979 1979 def unstable(repo, subset, x):
1980 1980 """Non-obsolete changesets with obsolete ancestors.
1981 1981 """
1982 1982 # i18n: "unstable" is a keyword
1983 1983 getargs(x, 0, 0, _("unstable takes no arguments"))
1984 1984 unstables = obsmod.getrevs(repo, 'unstable')
1985 1985 return subset & unstables
1986 1986
1987 1987
1988 1988 @predicate('user(string)', safe=True)
1989 1989 def user(repo, subset, x):
1990 1990 """User name contains string. The match is case-insensitive.
1991 1991
1992 1992 If `string` starts with `re:`, the remainder of the string is treated as
1993 1993 a regular expression. To match a user that actually contains `re:`, use
1994 1994 the prefix `literal:`.
1995 1995 """
1996 1996 return author(repo, subset, x)
1997 1997
1998 1998 # experimental
1999 1999 @predicate('wdir', safe=True)
2000 2000 def wdir(repo, subset, x):
2001 2001 # i18n: "wdir" is a keyword
2002 2002 getargs(x, 0, 0, _("wdir takes no arguments"))
2003 2003 if node.wdirrev in subset or isinstance(subset, fullreposet):
2004 2004 return baseset([node.wdirrev])
2005 2005 return baseset()
2006 2006
2007 2007 # for internal use
2008 2008 @predicate('_list', safe=True)
2009 2009 def _list(repo, subset, x):
2010 2010 s = getstring(x, "internal error")
2011 2011 if not s:
2012 2012 return baseset()
2013 2013 # remove duplicates here. it's difficult for caller to deduplicate sets
2014 2014 # because different symbols can point to the same rev.
2015 2015 cl = repo.changelog
2016 2016 ls = []
2017 2017 seen = set()
2018 2018 for t in s.split('\0'):
2019 2019 try:
2020 2020 # fast path for integer revision
2021 2021 r = int(t)
2022 2022 if str(r) != t or r not in cl:
2023 2023 raise ValueError
2024 2024 revs = [r]
2025 2025 except ValueError:
2026 2026 revs = stringset(repo, subset, t)
2027 2027
2028 2028 for r in revs:
2029 2029 if r in seen:
2030 2030 continue
2031 2031 if (r in subset
2032 2032 or r == node.nullrev and isinstance(subset, fullreposet)):
2033 2033 ls.append(r)
2034 2034 seen.add(r)
2035 2035 return baseset(ls)
2036 2036
2037 2037 # for internal use
2038 2038 @predicate('_intlist', safe=True)
2039 2039 def _intlist(repo, subset, x):
2040 2040 s = getstring(x, "internal error")
2041 2041 if not s:
2042 2042 return baseset()
2043 2043 ls = [int(r) for r in s.split('\0')]
2044 2044 s = subset
2045 2045 return baseset([r for r in ls if r in s])
2046 2046
2047 2047 # for internal use
2048 2048 @predicate('_hexlist', safe=True)
2049 2049 def _hexlist(repo, subset, x):
2050 2050 s = getstring(x, "internal error")
2051 2051 if not s:
2052 2052 return baseset()
2053 2053 cl = repo.changelog
2054 2054 ls = [cl.rev(node.bin(r)) for r in s.split('\0')]
2055 2055 s = subset
2056 2056 return baseset([r for r in ls if r in s])
2057 2057
2058 2058 methods = {
2059 2059 "range": rangeset,
2060 2060 "dagrange": dagrange,
2061 2061 "string": stringset,
2062 2062 "symbol": stringset,
2063 2063 "and": andset,
2064 2064 "or": orset,
2065 2065 "not": notset,
2066 2066 "difference": differenceset,
2067 2067 "list": listset,
2068 2068 "keyvalue": keyvaluepair,
2069 2069 "func": func,
2070 2070 "ancestor": ancestorspec,
2071 2071 "parent": parentspec,
2072 2072 "parentpost": p1,
2073 2073 }
2074 2074
2075 def _isonly(revs, bases):
2076 return (
2077 revs is not None
2078 and revs[0] == 'func'
2079 and getstring(revs[1], _('not a symbol')) == 'ancestors'
2080 and bases is not None
2081 and bases[0] == 'not'
2082 and bases[1][0] == 'func'
2083 and getstring(bases[1][1], _('not a symbol')) == 'ancestors')
2084
2075 2085 def optimize(x, small):
2076 2086 if x is None:
2077 2087 return 0, x
2078 2088
2079 2089 smallbonus = 1
2080 2090 if small:
2081 2091 smallbonus = .5
2082 2092
2083 2093 op = x[0]
2084 2094 if op == 'minus':
2085 2095 return optimize(('and', x[1], ('not', x[2])), small)
2086 2096 elif op == 'only':
2087 2097 return optimize(('func', ('symbol', 'only'),
2088 2098 ('list', x[1], x[2])), small)
2089 2099 elif op == 'onlypost':
2090 2100 return optimize(('func', ('symbol', 'only'), x[1]), small)
2091 2101 elif op == 'dagrangepre':
2092 2102 return optimize(('func', ('symbol', 'ancestors'), x[1]), small)
2093 2103 elif op == 'dagrangepost':
2094 2104 return optimize(('func', ('symbol', 'descendants'), x[1]), small)
2095 2105 elif op == 'rangeall':
2096 2106 return optimize(('range', ('string', '0'), ('string', 'tip')), small)
2097 2107 elif op == 'rangepre':
2098 2108 return optimize(('range', ('string', '0'), x[1]), small)
2099 2109 elif op == 'rangepost':
2100 2110 return optimize(('range', x[1], ('string', 'tip')), small)
2101 2111 elif op == 'negate':
2102 2112 return optimize(('string',
2103 2113 '-' + getstring(x[1], _("can't negate that"))), small)
2104 2114 elif op in 'string symbol negate':
2105 2115 return smallbonus, x # single revisions are small
2106 2116 elif op == 'and':
2107 2117 wa, ta = optimize(x[1], True)
2108 2118 wb, tb = optimize(x[2], True)
2119 w = min(wa, wb)
2109 2120
2110 2121 # (::x and not ::y)/(not ::y and ::x) have a fast path
2111 def isonly(revs, bases):
2112 return (
2113 revs is not None
2114 and revs[0] == 'func'
2115 and getstring(revs[1], _('not a symbol')) == 'ancestors'
2116 and bases is not None
2117 and bases[0] == 'not'
2118 and bases[1][0] == 'func'
2119 and getstring(bases[1][1], _('not a symbol')) == 'ancestors')
2120
2121 w = min(wa, wb)
2122 if isonly(ta, tb):
2122 if _isonly(ta, tb):
2123 2123 return w, ('func', ('symbol', 'only'), ('list', ta[2], tb[1][2]))
2124 if isonly(tb, ta):
2124 if _isonly(tb, ta):
2125 2125 return w, ('func', ('symbol', 'only'), ('list', tb[2], ta[1][2]))
2126 2126
2127 2127 if tb is not None and tb[0] == 'not':
2128 2128 return wa, ('difference', ta, tb[1])
2129 2129
2130 2130 if wa > wb:
2131 2131 return w, (op, tb, ta)
2132 2132 return w, (op, ta, tb)
2133 2133 elif op == 'or':
2134 2134 # fast path for machine-generated expression, that is likely to have
2135 2135 # lots of trivial revisions: 'a + b + c()' to '_list(a b) + c()'
2136 2136 ws, ts, ss = [], [], []
2137 2137 def flushss():
2138 2138 if not ss:
2139 2139 return
2140 2140 if len(ss) == 1:
2141 2141 w, t = ss[0]
2142 2142 else:
2143 2143 s = '\0'.join(t[1] for w, t in ss)
2144 2144 y = ('func', ('symbol', '_list'), ('string', s))
2145 2145 w, t = optimize(y, False)
2146 2146 ws.append(w)
2147 2147 ts.append(t)
2148 2148 del ss[:]
2149 2149 for y in x[1:]:
2150 2150 w, t = optimize(y, False)
2151 2151 if t is not None and (t[0] == 'string' or t[0] == 'symbol'):
2152 2152 ss.append((w, t))
2153 2153 continue
2154 2154 flushss()
2155 2155 ws.append(w)
2156 2156 ts.append(t)
2157 2157 flushss()
2158 2158 if len(ts) == 1:
2159 2159 return ws[0], ts[0] # 'or' operation is fully optimized out
2160 2160 # we can't reorder trees by weight because it would change the order.
2161 2161 # ("sort(a + b)" == "sort(b + a)", but "a + b" != "b + a")
2162 2162 # ts = tuple(t for w, t in sorted(zip(ws, ts), key=lambda wt: wt[0]))
2163 2163 return max(ws), (op,) + tuple(ts)
2164 2164 elif op == 'not':
2165 2165 # Optimize not public() to _notpublic() because we have a fast version
2166 2166 if x[1] == ('func', ('symbol', 'public'), None):
2167 2167 newsym = ('func', ('symbol', '_notpublic'), None)
2168 2168 o = optimize(newsym, not small)
2169 2169 return o[0], o[1]
2170 2170 else:
2171 2171 o = optimize(x[1], not small)
2172 2172 return o[0], (op, o[1])
2173 2173 elif op == 'parentpost':
2174 2174 o = optimize(x[1], small)
2175 2175 return o[0], (op, o[1])
2176 2176 elif op == 'group':
2177 2177 return optimize(x[1], small)
2178 2178 elif op in 'dagrange range parent ancestorspec':
2179 2179 if op == 'parent':
2180 2180 # x^:y means (x^) : y, not x ^ (:y)
2181 2181 post = ('parentpost', x[1])
2182 2182 if x[2][0] == 'dagrangepre':
2183 2183 return optimize(('dagrange', post, x[2][1]), small)
2184 2184 elif x[2][0] == 'rangepre':
2185 2185 return optimize(('range', post, x[2][1]), small)
2186 2186
2187 2187 wa, ta = optimize(x[1], small)
2188 2188 wb, tb = optimize(x[2], small)
2189 2189 return wa + wb, (op, ta, tb)
2190 2190 elif op == 'list':
2191 2191 ws, ts = zip(*(optimize(y, small) for y in x[1:]))
2192 2192 return sum(ws), (op,) + ts
2193 2193 elif op == 'func':
2194 2194 f = getstring(x[1], _("not a symbol"))
2195 2195 wa, ta = optimize(x[2], small)
2196 2196 if f in ("author branch closed date desc file grep keyword "
2197 2197 "outgoing user"):
2198 2198 w = 10 # slow
2199 2199 elif f in "modifies adds removes":
2200 2200 w = 30 # slower
2201 2201 elif f == "contains":
2202 2202 w = 100 # very slow
2203 2203 elif f == "ancestor":
2204 2204 w = 1 * smallbonus
2205 2205 elif f in "reverse limit first _intlist":
2206 2206 w = 0
2207 2207 elif f in "sort":
2208 2208 w = 10 # assume most sorts look at changelog
2209 2209 else:
2210 2210 w = 1
2211 2211 return w + wa, (op, x[1], ta)
2212 2212 return 1, x
2213 2213
2214 2214 # the set of valid characters for the initial letter of symbols in
2215 2215 # alias declarations and definitions
2216 2216 _aliassyminitletters = set(c for c in [chr(i) for i in xrange(256)]
2217 2217 if c.isalnum() or c in '._@$' or ord(c) > 127)
2218 2218
2219 2219 def _parsewith(spec, lookup=None, syminitletters=None):
2220 2220 """Generate a parse tree of given spec with given tokenizing options
2221 2221
2222 2222 >>> _parsewith('foo($1)', syminitletters=_aliassyminitletters)
2223 2223 ('func', ('symbol', 'foo'), ('symbol', '$1'))
2224 2224 >>> _parsewith('$1')
2225 2225 Traceback (most recent call last):
2226 2226 ...
2227 2227 ParseError: ("syntax error in revset '$1'", 0)
2228 2228 >>> _parsewith('foo bar')
2229 2229 Traceback (most recent call last):
2230 2230 ...
2231 2231 ParseError: ('invalid token', 4)
2232 2232 """
2233 2233 p = parser.parser(elements)
2234 2234 tree, pos = p.parse(tokenize(spec, lookup=lookup,
2235 2235 syminitletters=syminitletters))
2236 2236 if pos != len(spec):
2237 2237 raise error.ParseError(_('invalid token'), pos)
2238 2238 return parser.simplifyinfixops(tree, ('list', 'or'))
2239 2239
2240 2240 class _aliasrules(parser.basealiasrules):
2241 2241 """Parsing and expansion rule set of revset aliases"""
2242 2242 _section = _('revset alias')
2243 2243
2244 2244 @staticmethod
2245 2245 def _parse(spec):
2246 2246 """Parse alias declaration/definition ``spec``
2247 2247
2248 2248 This allows symbol names to use also ``$`` as an initial letter
2249 2249 (for backward compatibility), and callers of this function should
2250 2250 examine whether ``$`` is used also for unexpected symbols or not.
2251 2251 """
2252 2252 return _parsewith(spec, syminitletters=_aliassyminitletters)
2253 2253
2254 2254 @staticmethod
2255 2255 def _trygetfunc(tree):
2256 2256 if tree[0] == 'func' and tree[1][0] == 'symbol':
2257 2257 return tree[1][1], getlist(tree[2])
2258 2258
2259 2259 def expandaliases(ui, tree, showwarning=None):
2260 2260 aliases = _aliasrules.buildmap(ui.configitems('revsetalias'))
2261 2261 tree = _aliasrules.expand(aliases, tree)
2262 2262 if showwarning:
2263 2263 # warn about problematic (but not referred) aliases
2264 2264 for name, alias in sorted(aliases.iteritems()):
2265 2265 if alias.error and not alias.warned:
2266 2266 showwarning(_('warning: %s\n') % (alias.error))
2267 2267 alias.warned = True
2268 2268 return tree
2269 2269
2270 2270 def foldconcat(tree):
2271 2271 """Fold elements to be concatenated by `##`
2272 2272 """
2273 2273 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2274 2274 return tree
2275 2275 if tree[0] == '_concat':
2276 2276 pending = [tree]
2277 2277 l = []
2278 2278 while pending:
2279 2279 e = pending.pop()
2280 2280 if e[0] == '_concat':
2281 2281 pending.extend(reversed(e[1:]))
2282 2282 elif e[0] in ('string', 'symbol'):
2283 2283 l.append(e[1])
2284 2284 else:
2285 2285 msg = _("\"##\" can't concatenate \"%s\" element") % (e[0])
2286 2286 raise error.ParseError(msg)
2287 2287 return ('string', ''.join(l))
2288 2288 else:
2289 2289 return tuple(foldconcat(t) for t in tree)
2290 2290
2291 2291 def parse(spec, lookup=None):
2292 2292 return _parsewith(spec, lookup=lookup)
2293 2293
2294 2294 def posttreebuilthook(tree, repo):
2295 2295 # hook for extensions to execute code on the optimized tree
2296 2296 pass
2297 2297
2298 2298 def match(ui, spec, repo=None):
2299 2299 if not spec:
2300 2300 raise error.ParseError(_("empty query"))
2301 2301 lookup = None
2302 2302 if repo:
2303 2303 lookup = repo.__contains__
2304 2304 tree = parse(spec, lookup)
2305 2305 return _makematcher(ui, tree, repo)
2306 2306
2307 2307 def matchany(ui, specs, repo=None):
2308 2308 """Create a matcher that will include any revisions matching one of the
2309 2309 given specs"""
2310 2310 if not specs:
2311 2311 def mfunc(repo, subset=None):
2312 2312 return baseset()
2313 2313 return mfunc
2314 2314 if not all(specs):
2315 2315 raise error.ParseError(_("empty query"))
2316 2316 lookup = None
2317 2317 if repo:
2318 2318 lookup = repo.__contains__
2319 2319 if len(specs) == 1:
2320 2320 tree = parse(specs[0], lookup)
2321 2321 else:
2322 2322 tree = ('or',) + tuple(parse(s, lookup) for s in specs)
2323 2323 return _makematcher(ui, tree, repo)
2324 2324
2325 2325 def _makematcher(ui, tree, repo):
2326 2326 if ui:
2327 2327 tree = expandaliases(ui, tree, showwarning=ui.warn)
2328 2328 tree = foldconcat(tree)
2329 2329 weight, tree = optimize(tree, True)
2330 2330 posttreebuilthook(tree, repo)
2331 2331 def mfunc(repo, subset=None):
2332 2332 if subset is None:
2333 2333 subset = fullreposet(repo)
2334 2334 if util.safehasattr(subset, 'isascending'):
2335 2335 result = getset(repo, subset, tree)
2336 2336 else:
2337 2337 result = getset(repo, baseset(subset), tree)
2338 2338 return result
2339 2339 return mfunc
2340 2340
2341 2341 def formatspec(expr, *args):
2342 2342 '''
2343 2343 This is a convenience function for using revsets internally, and
2344 2344 escapes arguments appropriately. Aliases are intentionally ignored
2345 2345 so that intended expression behavior isn't accidentally subverted.
2346 2346
2347 2347 Supported arguments:
2348 2348
2349 2349 %r = revset expression, parenthesized
2350 2350 %d = int(arg), no quoting
2351 2351 %s = string(arg), escaped and single-quoted
2352 2352 %b = arg.branch(), escaped and single-quoted
2353 2353 %n = hex(arg), single-quoted
2354 2354 %% = a literal '%'
2355 2355
2356 2356 Prefixing the type with 'l' specifies a parenthesized list of that type.
2357 2357
2358 2358 >>> formatspec('%r:: and %lr', '10 or 11', ("this()", "that()"))
2359 2359 '(10 or 11):: and ((this()) or (that()))'
2360 2360 >>> formatspec('%d:: and not %d::', 10, 20)
2361 2361 '10:: and not 20::'
2362 2362 >>> formatspec('%ld or %ld', [], [1])
2363 2363 "_list('') or 1"
2364 2364 >>> formatspec('keyword(%s)', 'foo\\xe9')
2365 2365 "keyword('foo\\\\xe9')"
2366 2366 >>> b = lambda: 'default'
2367 2367 >>> b.branch = b
2368 2368 >>> formatspec('branch(%b)', b)
2369 2369 "branch('default')"
2370 2370 >>> formatspec('root(%ls)', ['a', 'b', 'c', 'd'])
2371 2371 "root(_list('a\\x00b\\x00c\\x00d'))"
2372 2372 '''
2373 2373
2374 2374 def quote(s):
2375 2375 return repr(str(s))
2376 2376
2377 2377 def argtype(c, arg):
2378 2378 if c == 'd':
2379 2379 return str(int(arg))
2380 2380 elif c == 's':
2381 2381 return quote(arg)
2382 2382 elif c == 'r':
2383 2383 parse(arg) # make sure syntax errors are confined
2384 2384 return '(%s)' % arg
2385 2385 elif c == 'n':
2386 2386 return quote(node.hex(arg))
2387 2387 elif c == 'b':
2388 2388 return quote(arg.branch())
2389 2389
2390 2390 def listexp(s, t):
2391 2391 l = len(s)
2392 2392 if l == 0:
2393 2393 return "_list('')"
2394 2394 elif l == 1:
2395 2395 return argtype(t, s[0])
2396 2396 elif t == 'd':
2397 2397 return "_intlist('%s')" % "\0".join(str(int(a)) for a in s)
2398 2398 elif t == 's':
2399 2399 return "_list('%s')" % "\0".join(s)
2400 2400 elif t == 'n':
2401 2401 return "_hexlist('%s')" % "\0".join(node.hex(a) for a in s)
2402 2402 elif t == 'b':
2403 2403 return "_list('%s')" % "\0".join(a.branch() for a in s)
2404 2404
2405 2405 m = l // 2
2406 2406 return '(%s or %s)' % (listexp(s[:m], t), listexp(s[m:], t))
2407 2407
2408 2408 ret = ''
2409 2409 pos = 0
2410 2410 arg = 0
2411 2411 while pos < len(expr):
2412 2412 c = expr[pos]
2413 2413 if c == '%':
2414 2414 pos += 1
2415 2415 d = expr[pos]
2416 2416 if d == '%':
2417 2417 ret += d
2418 2418 elif d in 'dsnbr':
2419 2419 ret += argtype(d, args[arg])
2420 2420 arg += 1
2421 2421 elif d == 'l':
2422 2422 # a list of some type
2423 2423 pos += 1
2424 2424 d = expr[pos]
2425 2425 ret += listexp(list(args[arg]), d)
2426 2426 arg += 1
2427 2427 else:
2428 2428 raise error.Abort('unexpected revspec format character %s' % d)
2429 2429 else:
2430 2430 ret += c
2431 2431 pos += 1
2432 2432
2433 2433 return ret
2434 2434
2435 2435 def prettyformat(tree):
2436 2436 return parser.prettyformat(tree, ('string', 'symbol'))
2437 2437
2438 2438 def depth(tree):
2439 2439 if isinstance(tree, tuple):
2440 2440 return max(map(depth, tree)) + 1
2441 2441 else:
2442 2442 return 0
2443 2443
2444 2444 def funcsused(tree):
2445 2445 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2446 2446 return set()
2447 2447 else:
2448 2448 funcs = set()
2449 2449 for s in tree[1:]:
2450 2450 funcs |= funcsused(s)
2451 2451 if tree[0] == 'func':
2452 2452 funcs.add(tree[1][1])
2453 2453 return funcs
2454 2454
2455 2455 def _formatsetrepr(r):
2456 2456 """Format an optional printable representation of a set
2457 2457
2458 2458 ======== =================================
2459 2459 type(r) example
2460 2460 ======== =================================
2461 2461 tuple ('<not %r>', other)
2462 2462 str '<branch closed>'
2463 2463 callable lambda: '<branch %r>' % sorted(b)
2464 2464 object other
2465 2465 ======== =================================
2466 2466 """
2467 2467 if r is None:
2468 2468 return ''
2469 2469 elif isinstance(r, tuple):
2470 2470 return r[0] % r[1:]
2471 2471 elif isinstance(r, str):
2472 2472 return r
2473 2473 elif callable(r):
2474 2474 return r()
2475 2475 else:
2476 2476 return repr(r)
2477 2477
2478 2478 class abstractsmartset(object):
2479 2479
2480 2480 def __nonzero__(self):
2481 2481 """True if the smartset is not empty"""
2482 2482 raise NotImplementedError()
2483 2483
2484 2484 def __contains__(self, rev):
2485 2485 """provide fast membership testing"""
2486 2486 raise NotImplementedError()
2487 2487
2488 2488 def __iter__(self):
2489 2489 """iterate the set in the order it is supposed to be iterated"""
2490 2490 raise NotImplementedError()
2491 2491
2492 2492 # Attributes containing a function to perform a fast iteration in a given
2493 2493 # direction. A smartset can have none, one, or both defined.
2494 2494 #
2495 2495 # Default value is None instead of a function returning None to avoid
2496 2496 # initializing an iterator just for testing if a fast method exists.
2497 2497 fastasc = None
2498 2498 fastdesc = None
2499 2499
2500 2500 def isascending(self):
2501 2501 """True if the set will iterate in ascending order"""
2502 2502 raise NotImplementedError()
2503 2503
2504 2504 def isdescending(self):
2505 2505 """True if the set will iterate in descending order"""
2506 2506 raise NotImplementedError()
2507 2507
2508 2508 @util.cachefunc
2509 2509 def min(self):
2510 2510 """return the minimum element in the set"""
2511 2511 if self.fastasc is not None:
2512 2512 for r in self.fastasc():
2513 2513 return r
2514 2514 raise ValueError('arg is an empty sequence')
2515 2515 return min(self)
2516 2516
2517 2517 @util.cachefunc
2518 2518 def max(self):
2519 2519 """return the maximum element in the set"""
2520 2520 if self.fastdesc is not None:
2521 2521 for r in self.fastdesc():
2522 2522 return r
2523 2523 raise ValueError('arg is an empty sequence')
2524 2524 return max(self)
2525 2525
2526 2526 def first(self):
2527 2527 """return the first element in the set (user iteration perspective)
2528 2528
2529 2529 Return None if the set is empty"""
2530 2530 raise NotImplementedError()
2531 2531
2532 2532 def last(self):
2533 2533 """return the last element in the set (user iteration perspective)
2534 2534
2535 2535 Return None if the set is empty"""
2536 2536 raise NotImplementedError()
2537 2537
2538 2538 def __len__(self):
2539 2539 """return the length of the smartsets
2540 2540
2541 2541 This can be expensive on smartset that could be lazy otherwise."""
2542 2542 raise NotImplementedError()
2543 2543
2544 2544 def reverse(self):
2545 2545 """reverse the expected iteration order"""
2546 2546 raise NotImplementedError()
2547 2547
2548 2548 def sort(self, reverse=True):
2549 2549 """get the set to iterate in an ascending or descending order"""
2550 2550 raise NotImplementedError()
2551 2551
2552 2552 def __and__(self, other):
2553 2553 """Returns a new object with the intersection of the two collections.
2554 2554
2555 2555 This is part of the mandatory API for smartset."""
2556 2556 if isinstance(other, fullreposet):
2557 2557 return self
2558 2558 return self.filter(other.__contains__, condrepr=other, cache=False)
2559 2559
2560 2560 def __add__(self, other):
2561 2561 """Returns a new object with the union of the two collections.
2562 2562
2563 2563 This is part of the mandatory API for smartset."""
2564 2564 return addset(self, other)
2565 2565
2566 2566 def __sub__(self, other):
2567 2567 """Returns a new object with the substraction of the two collections.
2568 2568
2569 2569 This is part of the mandatory API for smartset."""
2570 2570 c = other.__contains__
2571 2571 return self.filter(lambda r: not c(r), condrepr=('<not %r>', other),
2572 2572 cache=False)
2573 2573
2574 2574 def filter(self, condition, condrepr=None, cache=True):
2575 2575 """Returns this smartset filtered by condition as a new smartset.
2576 2576
2577 2577 `condition` is a callable which takes a revision number and returns a
2578 2578 boolean. Optional `condrepr` provides a printable representation of
2579 2579 the given `condition`.
2580 2580
2581 2581 This is part of the mandatory API for smartset."""
2582 2582 # builtin cannot be cached. but do not needs to
2583 2583 if cache and util.safehasattr(condition, 'func_code'):
2584 2584 condition = util.cachefunc(condition)
2585 2585 return filteredset(self, condition, condrepr)
2586 2586
2587 2587 class baseset(abstractsmartset):
2588 2588 """Basic data structure that represents a revset and contains the basic
2589 2589 operation that it should be able to perform.
2590 2590
2591 2591 Every method in this class should be implemented by any smartset class.
2592 2592 """
2593 2593 def __init__(self, data=(), datarepr=None):
2594 2594 """
2595 2595 datarepr: a tuple of (format, obj, ...), a function or an object that
2596 2596 provides a printable representation of the given data.
2597 2597 """
2598 2598 self._ascending = None
2599 2599 if not isinstance(data, list):
2600 2600 if isinstance(data, set):
2601 2601 self._set = data
2602 2602 # set has no order we pick one for stability purpose
2603 2603 self._ascending = True
2604 2604 data = list(data)
2605 2605 self._list = data
2606 2606 self._datarepr = datarepr
2607 2607
2608 2608 @util.propertycache
2609 2609 def _set(self):
2610 2610 return set(self._list)
2611 2611
2612 2612 @util.propertycache
2613 2613 def _asclist(self):
2614 2614 asclist = self._list[:]
2615 2615 asclist.sort()
2616 2616 return asclist
2617 2617
2618 2618 def __iter__(self):
2619 2619 if self._ascending is None:
2620 2620 return iter(self._list)
2621 2621 elif self._ascending:
2622 2622 return iter(self._asclist)
2623 2623 else:
2624 2624 return reversed(self._asclist)
2625 2625
2626 2626 def fastasc(self):
2627 2627 return iter(self._asclist)
2628 2628
2629 2629 def fastdesc(self):
2630 2630 return reversed(self._asclist)
2631 2631
2632 2632 @util.propertycache
2633 2633 def __contains__(self):
2634 2634 return self._set.__contains__
2635 2635
2636 2636 def __nonzero__(self):
2637 2637 return bool(self._list)
2638 2638
2639 2639 def sort(self, reverse=False):
2640 2640 self._ascending = not bool(reverse)
2641 2641
2642 2642 def reverse(self):
2643 2643 if self._ascending is None:
2644 2644 self._list.reverse()
2645 2645 else:
2646 2646 self._ascending = not self._ascending
2647 2647
2648 2648 def __len__(self):
2649 2649 return len(self._list)
2650 2650
2651 2651 def isascending(self):
2652 2652 """Returns True if the collection is ascending order, False if not.
2653 2653
2654 2654 This is part of the mandatory API for smartset."""
2655 2655 if len(self) <= 1:
2656 2656 return True
2657 2657 return self._ascending is not None and self._ascending
2658 2658
2659 2659 def isdescending(self):
2660 2660 """Returns True if the collection is descending order, False if not.
2661 2661
2662 2662 This is part of the mandatory API for smartset."""
2663 2663 if len(self) <= 1:
2664 2664 return True
2665 2665 return self._ascending is not None and not self._ascending
2666 2666
2667 2667 def first(self):
2668 2668 if self:
2669 2669 if self._ascending is None:
2670 2670 return self._list[0]
2671 2671 elif self._ascending:
2672 2672 return self._asclist[0]
2673 2673 else:
2674 2674 return self._asclist[-1]
2675 2675 return None
2676 2676
2677 2677 def last(self):
2678 2678 if self:
2679 2679 if self._ascending is None:
2680 2680 return self._list[-1]
2681 2681 elif self._ascending:
2682 2682 return self._asclist[-1]
2683 2683 else:
2684 2684 return self._asclist[0]
2685 2685 return None
2686 2686
2687 2687 def __repr__(self):
2688 2688 d = {None: '', False: '-', True: '+'}[self._ascending]
2689 2689 s = _formatsetrepr(self._datarepr)
2690 2690 if not s:
2691 2691 l = self._list
2692 2692 # if _list has been built from a set, it might have a different
2693 2693 # order from one python implementation to another.
2694 2694 # We fallback to the sorted version for a stable output.
2695 2695 if self._ascending is not None:
2696 2696 l = self._asclist
2697 2697 s = repr(l)
2698 2698 return '<%s%s %s>' % (type(self).__name__, d, s)
2699 2699
2700 2700 class filteredset(abstractsmartset):
2701 2701 """Duck type for baseset class which iterates lazily over the revisions in
2702 2702 the subset and contains a function which tests for membership in the
2703 2703 revset
2704 2704 """
2705 2705 def __init__(self, subset, condition=lambda x: True, condrepr=None):
2706 2706 """
2707 2707 condition: a function that decide whether a revision in the subset
2708 2708 belongs to the revset or not.
2709 2709 condrepr: a tuple of (format, obj, ...), a function or an object that
2710 2710 provides a printable representation of the given condition.
2711 2711 """
2712 2712 self._subset = subset
2713 2713 self._condition = condition
2714 2714 self._condrepr = condrepr
2715 2715
2716 2716 def __contains__(self, x):
2717 2717 return x in self._subset and self._condition(x)
2718 2718
2719 2719 def __iter__(self):
2720 2720 return self._iterfilter(self._subset)
2721 2721
2722 2722 def _iterfilter(self, it):
2723 2723 cond = self._condition
2724 2724 for x in it:
2725 2725 if cond(x):
2726 2726 yield x
2727 2727
2728 2728 @property
2729 2729 def fastasc(self):
2730 2730 it = self._subset.fastasc
2731 2731 if it is None:
2732 2732 return None
2733 2733 return lambda: self._iterfilter(it())
2734 2734
2735 2735 @property
2736 2736 def fastdesc(self):
2737 2737 it = self._subset.fastdesc
2738 2738 if it is None:
2739 2739 return None
2740 2740 return lambda: self._iterfilter(it())
2741 2741
2742 2742 def __nonzero__(self):
2743 2743 fast = self.fastasc
2744 2744 if fast is None:
2745 2745 fast = self.fastdesc
2746 2746 if fast is not None:
2747 2747 it = fast()
2748 2748 else:
2749 2749 it = self
2750 2750
2751 2751 for r in it:
2752 2752 return True
2753 2753 return False
2754 2754
2755 2755 def __len__(self):
2756 2756 # Basic implementation to be changed in future patches.
2757 2757 # until this gets improved, we use generator expression
2758 2758 # here, since list compr is free to call __len__ again
2759 2759 # causing infinite recursion
2760 2760 l = baseset(r for r in self)
2761 2761 return len(l)
2762 2762
2763 2763 def sort(self, reverse=False):
2764 2764 self._subset.sort(reverse=reverse)
2765 2765
2766 2766 def reverse(self):
2767 2767 self._subset.reverse()
2768 2768
2769 2769 def isascending(self):
2770 2770 return self._subset.isascending()
2771 2771
2772 2772 def isdescending(self):
2773 2773 return self._subset.isdescending()
2774 2774
2775 2775 def first(self):
2776 2776 for x in self:
2777 2777 return x
2778 2778 return None
2779 2779
2780 2780 def last(self):
2781 2781 it = None
2782 2782 if self.isascending():
2783 2783 it = self.fastdesc
2784 2784 elif self.isdescending():
2785 2785 it = self.fastasc
2786 2786 if it is not None:
2787 2787 for x in it():
2788 2788 return x
2789 2789 return None #empty case
2790 2790 else:
2791 2791 x = None
2792 2792 for x in self:
2793 2793 pass
2794 2794 return x
2795 2795
2796 2796 def __repr__(self):
2797 2797 xs = [repr(self._subset)]
2798 2798 s = _formatsetrepr(self._condrepr)
2799 2799 if s:
2800 2800 xs.append(s)
2801 2801 return '<%s %s>' % (type(self).__name__, ', '.join(xs))
2802 2802
2803 2803 def _iterordered(ascending, iter1, iter2):
2804 2804 """produce an ordered iteration from two iterators with the same order
2805 2805
2806 2806 The ascending is used to indicated the iteration direction.
2807 2807 """
2808 2808 choice = max
2809 2809 if ascending:
2810 2810 choice = min
2811 2811
2812 2812 val1 = None
2813 2813 val2 = None
2814 2814 try:
2815 2815 # Consume both iterators in an ordered way until one is empty
2816 2816 while True:
2817 2817 if val1 is None:
2818 2818 val1 = iter1.next()
2819 2819 if val2 is None:
2820 2820 val2 = iter2.next()
2821 2821 next = choice(val1, val2)
2822 2822 yield next
2823 2823 if val1 == next:
2824 2824 val1 = None
2825 2825 if val2 == next:
2826 2826 val2 = None
2827 2827 except StopIteration:
2828 2828 # Flush any remaining values and consume the other one
2829 2829 it = iter2
2830 2830 if val1 is not None:
2831 2831 yield val1
2832 2832 it = iter1
2833 2833 elif val2 is not None:
2834 2834 # might have been equality and both are empty
2835 2835 yield val2
2836 2836 for val in it:
2837 2837 yield val
2838 2838
2839 2839 class addset(abstractsmartset):
2840 2840 """Represent the addition of two sets
2841 2841
2842 2842 Wrapper structure for lazily adding two structures without losing much
2843 2843 performance on the __contains__ method
2844 2844
2845 2845 If the ascending attribute is set, that means the two structures are
2846 2846 ordered in either an ascending or descending way. Therefore, we can add
2847 2847 them maintaining the order by iterating over both at the same time
2848 2848
2849 2849 >>> xs = baseset([0, 3, 2])
2850 2850 >>> ys = baseset([5, 2, 4])
2851 2851
2852 2852 >>> rs = addset(xs, ys)
2853 2853 >>> bool(rs), 0 in rs, 1 in rs, 5 in rs, rs.first(), rs.last()
2854 2854 (True, True, False, True, 0, 4)
2855 2855 >>> rs = addset(xs, baseset([]))
2856 2856 >>> bool(rs), 0 in rs, 1 in rs, rs.first(), rs.last()
2857 2857 (True, True, False, 0, 2)
2858 2858 >>> rs = addset(baseset([]), baseset([]))
2859 2859 >>> bool(rs), 0 in rs, rs.first(), rs.last()
2860 2860 (False, False, None, None)
2861 2861
2862 2862 iterate unsorted:
2863 2863 >>> rs = addset(xs, ys)
2864 2864 >>> # (use generator because pypy could call len())
2865 2865 >>> list(x for x in rs) # without _genlist
2866 2866 [0, 3, 2, 5, 4]
2867 2867 >>> assert not rs._genlist
2868 2868 >>> len(rs)
2869 2869 5
2870 2870 >>> [x for x in rs] # with _genlist
2871 2871 [0, 3, 2, 5, 4]
2872 2872 >>> assert rs._genlist
2873 2873
2874 2874 iterate ascending:
2875 2875 >>> rs = addset(xs, ys, ascending=True)
2876 2876 >>> # (use generator because pypy could call len())
2877 2877 >>> list(x for x in rs), list(x for x in rs.fastasc()) # without _asclist
2878 2878 ([0, 2, 3, 4, 5], [0, 2, 3, 4, 5])
2879 2879 >>> assert not rs._asclist
2880 2880 >>> len(rs)
2881 2881 5
2882 2882 >>> [x for x in rs], [x for x in rs.fastasc()]
2883 2883 ([0, 2, 3, 4, 5], [0, 2, 3, 4, 5])
2884 2884 >>> assert rs._asclist
2885 2885
2886 2886 iterate descending:
2887 2887 >>> rs = addset(xs, ys, ascending=False)
2888 2888 >>> # (use generator because pypy could call len())
2889 2889 >>> list(x for x in rs), list(x for x in rs.fastdesc()) # without _asclist
2890 2890 ([5, 4, 3, 2, 0], [5, 4, 3, 2, 0])
2891 2891 >>> assert not rs._asclist
2892 2892 >>> len(rs)
2893 2893 5
2894 2894 >>> [x for x in rs], [x for x in rs.fastdesc()]
2895 2895 ([5, 4, 3, 2, 0], [5, 4, 3, 2, 0])
2896 2896 >>> assert rs._asclist
2897 2897
2898 2898 iterate ascending without fastasc:
2899 2899 >>> rs = addset(xs, generatorset(ys), ascending=True)
2900 2900 >>> assert rs.fastasc is None
2901 2901 >>> [x for x in rs]
2902 2902 [0, 2, 3, 4, 5]
2903 2903
2904 2904 iterate descending without fastdesc:
2905 2905 >>> rs = addset(generatorset(xs), ys, ascending=False)
2906 2906 >>> assert rs.fastdesc is None
2907 2907 >>> [x for x in rs]
2908 2908 [5, 4, 3, 2, 0]
2909 2909 """
2910 2910 def __init__(self, revs1, revs2, ascending=None):
2911 2911 self._r1 = revs1
2912 2912 self._r2 = revs2
2913 2913 self._iter = None
2914 2914 self._ascending = ascending
2915 2915 self._genlist = None
2916 2916 self._asclist = None
2917 2917
2918 2918 def __len__(self):
2919 2919 return len(self._list)
2920 2920
2921 2921 def __nonzero__(self):
2922 2922 return bool(self._r1) or bool(self._r2)
2923 2923
2924 2924 @util.propertycache
2925 2925 def _list(self):
2926 2926 if not self._genlist:
2927 2927 self._genlist = baseset(iter(self))
2928 2928 return self._genlist
2929 2929
2930 2930 def __iter__(self):
2931 2931 """Iterate over both collections without repeating elements
2932 2932
2933 2933 If the ascending attribute is not set, iterate over the first one and
2934 2934 then over the second one checking for membership on the first one so we
2935 2935 dont yield any duplicates.
2936 2936
2937 2937 If the ascending attribute is set, iterate over both collections at the
2938 2938 same time, yielding only one value at a time in the given order.
2939 2939 """
2940 2940 if self._ascending is None:
2941 2941 if self._genlist:
2942 2942 return iter(self._genlist)
2943 2943 def arbitraryordergen():
2944 2944 for r in self._r1:
2945 2945 yield r
2946 2946 inr1 = self._r1.__contains__
2947 2947 for r in self._r2:
2948 2948 if not inr1(r):
2949 2949 yield r
2950 2950 return arbitraryordergen()
2951 2951 # try to use our own fast iterator if it exists
2952 2952 self._trysetasclist()
2953 2953 if self._ascending:
2954 2954 attr = 'fastasc'
2955 2955 else:
2956 2956 attr = 'fastdesc'
2957 2957 it = getattr(self, attr)
2958 2958 if it is not None:
2959 2959 return it()
2960 2960 # maybe half of the component supports fast
2961 2961 # get iterator for _r1
2962 2962 iter1 = getattr(self._r1, attr)
2963 2963 if iter1 is None:
2964 2964 # let's avoid side effect (not sure it matters)
2965 2965 iter1 = iter(sorted(self._r1, reverse=not self._ascending))
2966 2966 else:
2967 2967 iter1 = iter1()
2968 2968 # get iterator for _r2
2969 2969 iter2 = getattr(self._r2, attr)
2970 2970 if iter2 is None:
2971 2971 # let's avoid side effect (not sure it matters)
2972 2972 iter2 = iter(sorted(self._r2, reverse=not self._ascending))
2973 2973 else:
2974 2974 iter2 = iter2()
2975 2975 return _iterordered(self._ascending, iter1, iter2)
2976 2976
2977 2977 def _trysetasclist(self):
2978 2978 """populate the _asclist attribute if possible and necessary"""
2979 2979 if self._genlist is not None and self._asclist is None:
2980 2980 self._asclist = sorted(self._genlist)
2981 2981
2982 2982 @property
2983 2983 def fastasc(self):
2984 2984 self._trysetasclist()
2985 2985 if self._asclist is not None:
2986 2986 return self._asclist.__iter__
2987 2987 iter1 = self._r1.fastasc
2988 2988 iter2 = self._r2.fastasc
2989 2989 if None in (iter1, iter2):
2990 2990 return None
2991 2991 return lambda: _iterordered(True, iter1(), iter2())
2992 2992
2993 2993 @property
2994 2994 def fastdesc(self):
2995 2995 self._trysetasclist()
2996 2996 if self._asclist is not None:
2997 2997 return self._asclist.__reversed__
2998 2998 iter1 = self._r1.fastdesc
2999 2999 iter2 = self._r2.fastdesc
3000 3000 if None in (iter1, iter2):
3001 3001 return None
3002 3002 return lambda: _iterordered(False, iter1(), iter2())
3003 3003
3004 3004 def __contains__(self, x):
3005 3005 return x in self._r1 or x in self._r2
3006 3006
3007 3007 def sort(self, reverse=False):
3008 3008 """Sort the added set
3009 3009
3010 3010 For this we use the cached list with all the generated values and if we
3011 3011 know they are ascending or descending we can sort them in a smart way.
3012 3012 """
3013 3013 self._ascending = not reverse
3014 3014
3015 3015 def isascending(self):
3016 3016 return self._ascending is not None and self._ascending
3017 3017
3018 3018 def isdescending(self):
3019 3019 return self._ascending is not None and not self._ascending
3020 3020
3021 3021 def reverse(self):
3022 3022 if self._ascending is None:
3023 3023 self._list.reverse()
3024 3024 else:
3025 3025 self._ascending = not self._ascending
3026 3026
3027 3027 def first(self):
3028 3028 for x in self:
3029 3029 return x
3030 3030 return None
3031 3031
3032 3032 def last(self):
3033 3033 self.reverse()
3034 3034 val = self.first()
3035 3035 self.reverse()
3036 3036 return val
3037 3037
3038 3038 def __repr__(self):
3039 3039 d = {None: '', False: '-', True: '+'}[self._ascending]
3040 3040 return '<%s%s %r, %r>' % (type(self).__name__, d, self._r1, self._r2)
3041 3041
3042 3042 class generatorset(abstractsmartset):
3043 3043 """Wrap a generator for lazy iteration
3044 3044
3045 3045 Wrapper structure for generators that provides lazy membership and can
3046 3046 be iterated more than once.
3047 3047 When asked for membership it generates values until either it finds the
3048 3048 requested one or has gone through all the elements in the generator
3049 3049 """
3050 3050 def __init__(self, gen, iterasc=None):
3051 3051 """
3052 3052 gen: a generator producing the values for the generatorset.
3053 3053 """
3054 3054 self._gen = gen
3055 3055 self._asclist = None
3056 3056 self._cache = {}
3057 3057 self._genlist = []
3058 3058 self._finished = False
3059 3059 self._ascending = True
3060 3060 if iterasc is not None:
3061 3061 if iterasc:
3062 3062 self.fastasc = self._iterator
3063 3063 self.__contains__ = self._asccontains
3064 3064 else:
3065 3065 self.fastdesc = self._iterator
3066 3066 self.__contains__ = self._desccontains
3067 3067
3068 3068 def __nonzero__(self):
3069 3069 # Do not use 'for r in self' because it will enforce the iteration
3070 3070 # order (default ascending), possibly unrolling a whole descending
3071 3071 # iterator.
3072 3072 if self._genlist:
3073 3073 return True
3074 3074 for r in self._consumegen():
3075 3075 return True
3076 3076 return False
3077 3077
3078 3078 def __contains__(self, x):
3079 3079 if x in self._cache:
3080 3080 return self._cache[x]
3081 3081
3082 3082 # Use new values only, as existing values would be cached.
3083 3083 for l in self._consumegen():
3084 3084 if l == x:
3085 3085 return True
3086 3086
3087 3087 self._cache[x] = False
3088 3088 return False
3089 3089
3090 3090 def _asccontains(self, x):
3091 3091 """version of contains optimised for ascending generator"""
3092 3092 if x in self._cache:
3093 3093 return self._cache[x]
3094 3094
3095 3095 # Use new values only, as existing values would be cached.
3096 3096 for l in self._consumegen():
3097 3097 if l == x:
3098 3098 return True
3099 3099 if l > x:
3100 3100 break
3101 3101
3102 3102 self._cache[x] = False
3103 3103 return False
3104 3104
3105 3105 def _desccontains(self, x):
3106 3106 """version of contains optimised for descending generator"""
3107 3107 if x in self._cache:
3108 3108 return self._cache[x]
3109 3109
3110 3110 # Use new values only, as existing values would be cached.
3111 3111 for l in self._consumegen():
3112 3112 if l == x:
3113 3113 return True
3114 3114 if l < x:
3115 3115 break
3116 3116
3117 3117 self._cache[x] = False
3118 3118 return False
3119 3119
3120 3120 def __iter__(self):
3121 3121 if self._ascending:
3122 3122 it = self.fastasc
3123 3123 else:
3124 3124 it = self.fastdesc
3125 3125 if it is not None:
3126 3126 return it()
3127 3127 # we need to consume the iterator
3128 3128 for x in self._consumegen():
3129 3129 pass
3130 3130 # recall the same code
3131 3131 return iter(self)
3132 3132
3133 3133 def _iterator(self):
3134 3134 if self._finished:
3135 3135 return iter(self._genlist)
3136 3136
3137 3137 # We have to use this complex iteration strategy to allow multiple
3138 3138 # iterations at the same time. We need to be able to catch revision
3139 3139 # removed from _consumegen and added to genlist in another instance.
3140 3140 #
3141 3141 # Getting rid of it would provide an about 15% speed up on this
3142 3142 # iteration.
3143 3143 genlist = self._genlist
3144 3144 nextrev = self._consumegen().next
3145 3145 _len = len # cache global lookup
3146 3146 def gen():
3147 3147 i = 0
3148 3148 while True:
3149 3149 if i < _len(genlist):
3150 3150 yield genlist[i]
3151 3151 else:
3152 3152 yield nextrev()
3153 3153 i += 1
3154 3154 return gen()
3155 3155
3156 3156 def _consumegen(self):
3157 3157 cache = self._cache
3158 3158 genlist = self._genlist.append
3159 3159 for item in self._gen:
3160 3160 cache[item] = True
3161 3161 genlist(item)
3162 3162 yield item
3163 3163 if not self._finished:
3164 3164 self._finished = True
3165 3165 asc = self._genlist[:]
3166 3166 asc.sort()
3167 3167 self._asclist = asc
3168 3168 self.fastasc = asc.__iter__
3169 3169 self.fastdesc = asc.__reversed__
3170 3170
3171 3171 def __len__(self):
3172 3172 for x in self._consumegen():
3173 3173 pass
3174 3174 return len(self._genlist)
3175 3175
3176 3176 def sort(self, reverse=False):
3177 3177 self._ascending = not reverse
3178 3178
3179 3179 def reverse(self):
3180 3180 self._ascending = not self._ascending
3181 3181
3182 3182 def isascending(self):
3183 3183 return self._ascending
3184 3184
3185 3185 def isdescending(self):
3186 3186 return not self._ascending
3187 3187
3188 3188 def first(self):
3189 3189 if self._ascending:
3190 3190 it = self.fastasc
3191 3191 else:
3192 3192 it = self.fastdesc
3193 3193 if it is None:
3194 3194 # we need to consume all and try again
3195 3195 for x in self._consumegen():
3196 3196 pass
3197 3197 return self.first()
3198 3198 return next(it(), None)
3199 3199
3200 3200 def last(self):
3201 3201 if self._ascending:
3202 3202 it = self.fastdesc
3203 3203 else:
3204 3204 it = self.fastasc
3205 3205 if it is None:
3206 3206 # we need to consume all and try again
3207 3207 for x in self._consumegen():
3208 3208 pass
3209 3209 return self.first()
3210 3210 return next(it(), None)
3211 3211
3212 3212 def __repr__(self):
3213 3213 d = {False: '-', True: '+'}[self._ascending]
3214 3214 return '<%s%s>' % (type(self).__name__, d)
3215 3215
3216 3216 class spanset(abstractsmartset):
3217 3217 """Duck type for baseset class which represents a range of revisions and
3218 3218 can work lazily and without having all the range in memory
3219 3219
3220 3220 Note that spanset(x, y) behave almost like xrange(x, y) except for two
3221 3221 notable points:
3222 3222 - when x < y it will be automatically descending,
3223 3223 - revision filtered with this repoview will be skipped.
3224 3224
3225 3225 """
3226 3226 def __init__(self, repo, start=0, end=None):
3227 3227 """
3228 3228 start: first revision included the set
3229 3229 (default to 0)
3230 3230 end: first revision excluded (last+1)
3231 3231 (default to len(repo)
3232 3232
3233 3233 Spanset will be descending if `end` < `start`.
3234 3234 """
3235 3235 if end is None:
3236 3236 end = len(repo)
3237 3237 self._ascending = start <= end
3238 3238 if not self._ascending:
3239 3239 start, end = end + 1, start +1
3240 3240 self._start = start
3241 3241 self._end = end
3242 3242 self._hiddenrevs = repo.changelog.filteredrevs
3243 3243
3244 3244 def sort(self, reverse=False):
3245 3245 self._ascending = not reverse
3246 3246
3247 3247 def reverse(self):
3248 3248 self._ascending = not self._ascending
3249 3249
3250 3250 def _iterfilter(self, iterrange):
3251 3251 s = self._hiddenrevs
3252 3252 for r in iterrange:
3253 3253 if r not in s:
3254 3254 yield r
3255 3255
3256 3256 def __iter__(self):
3257 3257 if self._ascending:
3258 3258 return self.fastasc()
3259 3259 else:
3260 3260 return self.fastdesc()
3261 3261
3262 3262 def fastasc(self):
3263 3263 iterrange = xrange(self._start, self._end)
3264 3264 if self._hiddenrevs:
3265 3265 return self._iterfilter(iterrange)
3266 3266 return iter(iterrange)
3267 3267
3268 3268 def fastdesc(self):
3269 3269 iterrange = xrange(self._end - 1, self._start - 1, -1)
3270 3270 if self._hiddenrevs:
3271 3271 return self._iterfilter(iterrange)
3272 3272 return iter(iterrange)
3273 3273
3274 3274 def __contains__(self, rev):
3275 3275 hidden = self._hiddenrevs
3276 3276 return ((self._start <= rev < self._end)
3277 3277 and not (hidden and rev in hidden))
3278 3278
3279 3279 def __nonzero__(self):
3280 3280 for r in self:
3281 3281 return True
3282 3282 return False
3283 3283
3284 3284 def __len__(self):
3285 3285 if not self._hiddenrevs:
3286 3286 return abs(self._end - self._start)
3287 3287 else:
3288 3288 count = 0
3289 3289 start = self._start
3290 3290 end = self._end
3291 3291 for rev in self._hiddenrevs:
3292 3292 if (end < rev <= start) or (start <= rev < end):
3293 3293 count += 1
3294 3294 return abs(self._end - self._start) - count
3295 3295
3296 3296 def isascending(self):
3297 3297 return self._ascending
3298 3298
3299 3299 def isdescending(self):
3300 3300 return not self._ascending
3301 3301
3302 3302 def first(self):
3303 3303 if self._ascending:
3304 3304 it = self.fastasc
3305 3305 else:
3306 3306 it = self.fastdesc
3307 3307 for x in it():
3308 3308 return x
3309 3309 return None
3310 3310
3311 3311 def last(self):
3312 3312 if self._ascending:
3313 3313 it = self.fastdesc
3314 3314 else:
3315 3315 it = self.fastasc
3316 3316 for x in it():
3317 3317 return x
3318 3318 return None
3319 3319
3320 3320 def __repr__(self):
3321 3321 d = {False: '-', True: '+'}[self._ascending]
3322 3322 return '<%s%s %d:%d>' % (type(self).__name__, d,
3323 3323 self._start, self._end - 1)
3324 3324
3325 3325 class fullreposet(spanset):
3326 3326 """a set containing all revisions in the repo
3327 3327
3328 3328 This class exists to host special optimization and magic to handle virtual
3329 3329 revisions such as "null".
3330 3330 """
3331 3331
3332 3332 def __init__(self, repo):
3333 3333 super(fullreposet, self).__init__(repo)
3334 3334
3335 3335 def __and__(self, other):
3336 3336 """As self contains the whole repo, all of the other set should also be
3337 3337 in self. Therefore `self & other = other`.
3338 3338
3339 3339 This boldly assumes the other contains valid revs only.
3340 3340 """
3341 3341 # other not a smartset, make is so
3342 3342 if not util.safehasattr(other, 'isascending'):
3343 3343 # filter out hidden revision
3344 3344 # (this boldly assumes all smartset are pure)
3345 3345 #
3346 3346 # `other` was used with "&", let's assume this is a set like
3347 3347 # object.
3348 3348 other = baseset(other - self._hiddenrevs)
3349 3349
3350 3350 # XXX As fullreposet is also used as bootstrap, this is wrong.
3351 3351 #
3352 3352 # With a giveme312() revset returning [3,1,2], this makes
3353 3353 # 'hg log -r "giveme312()"' -> 1, 2, 3 (wrong)
3354 3354 # We cannot just drop it because other usage still need to sort it:
3355 3355 # 'hg log -r "all() and giveme312()"' -> 1, 2, 3 (right)
3356 3356 #
3357 3357 # There is also some faulty revset implementations that rely on it
3358 3358 # (eg: children as of its state in e8075329c5fb)
3359 3359 #
3360 3360 # When we fix the two points above we can move this into the if clause
3361 3361 other.sort(reverse=self.isdescending())
3362 3362 return other
3363 3363
3364 3364 def prettyformatset(revs):
3365 3365 lines = []
3366 3366 rs = repr(revs)
3367 3367 p = 0
3368 3368 while p < len(rs):
3369 3369 q = rs.find('<', p + 1)
3370 3370 if q < 0:
3371 3371 q = len(rs)
3372 3372 l = rs.count('<', 0, p) - rs.count('>', 0, p)
3373 3373 assert l >= 0
3374 3374 lines.append((l, rs[p:q].rstrip()))
3375 3375 p = q
3376 3376 return '\n'.join(' ' * l + s for l, s in lines)
3377 3377
3378 3378 def loadpredicate(ui, extname, registrarobj):
3379 3379 """Load revset predicates from specified registrarobj
3380 3380 """
3381 3381 for name, func in registrarobj._table.iteritems():
3382 3382 symbols[name] = func
3383 3383 if func._safe:
3384 3384 safesymbols.add(name)
3385 3385
3386 3386 # load built-in predicates explicitly to setup safesymbols
3387 3387 loadpredicate(None, None, predicate)
3388 3388
3389 3389 # tell hggettext to extract docstrings from these functions:
3390 3390 i18nfunctions = symbols.values()
General Comments 0
You need to be logged in to leave comments. Login now