##// END OF EJS Templates
revset: inline isvalidfunc(), getfuncname() and getfuncargs()...
Yuya Nishihara -
r28707:af5f90f2 default
parent child Browse files
Show More
@@ -1,3605 +1,3586 b''
1 1 # revset.py - revision set queries for mercurial
2 2 #
3 3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import heapq
11 11 import re
12 12
13 13 from .i18n import _
14 14 from . import (
15 15 destutil,
16 16 encoding,
17 17 error,
18 18 hbisect,
19 19 match as matchmod,
20 20 node,
21 21 obsolete as obsmod,
22 22 parser,
23 23 pathutil,
24 24 phases,
25 25 registrar,
26 26 repoview,
27 27 util,
28 28 )
29 29
30 30 def _revancestors(repo, revs, followfirst):
31 31 """Like revlog.ancestors(), but supports followfirst."""
32 32 if followfirst:
33 33 cut = 1
34 34 else:
35 35 cut = None
36 36 cl = repo.changelog
37 37
38 38 def iterate():
39 39 revs.sort(reverse=True)
40 40 irevs = iter(revs)
41 41 h = []
42 42
43 43 inputrev = next(irevs, None)
44 44 if inputrev is not None:
45 45 heapq.heappush(h, -inputrev)
46 46
47 47 seen = set()
48 48 while h:
49 49 current = -heapq.heappop(h)
50 50 if current == inputrev:
51 51 inputrev = next(irevs, None)
52 52 if inputrev is not None:
53 53 heapq.heappush(h, -inputrev)
54 54 if current not in seen:
55 55 seen.add(current)
56 56 yield current
57 57 for parent in cl.parentrevs(current)[:cut]:
58 58 if parent != node.nullrev:
59 59 heapq.heappush(h, -parent)
60 60
61 61 return generatorset(iterate(), iterasc=False)
62 62
63 63 def _revdescendants(repo, revs, followfirst):
64 64 """Like revlog.descendants() but supports followfirst."""
65 65 if followfirst:
66 66 cut = 1
67 67 else:
68 68 cut = None
69 69
70 70 def iterate():
71 71 cl = repo.changelog
72 72 # XXX this should be 'parentset.min()' assuming 'parentset' is a
73 73 # smartset (and if it is not, it should.)
74 74 first = min(revs)
75 75 nullrev = node.nullrev
76 76 if first == nullrev:
77 77 # Are there nodes with a null first parent and a non-null
78 78 # second one? Maybe. Do we care? Probably not.
79 79 for i in cl:
80 80 yield i
81 81 else:
82 82 seen = set(revs)
83 83 for i in cl.revs(first + 1):
84 84 for x in cl.parentrevs(i)[:cut]:
85 85 if x != nullrev and x in seen:
86 86 seen.add(i)
87 87 yield i
88 88 break
89 89
90 90 return generatorset(iterate(), iterasc=True)
91 91
92 92 def _reachablerootspure(repo, minroot, roots, heads, includepath):
93 93 """return (heads(::<roots> and ::<heads>))
94 94
95 95 If includepath is True, return (<roots>::<heads>)."""
96 96 if not roots:
97 97 return []
98 98 parentrevs = repo.changelog.parentrevs
99 99 roots = set(roots)
100 100 visit = list(heads)
101 101 reachable = set()
102 102 seen = {}
103 103 # prefetch all the things! (because python is slow)
104 104 reached = reachable.add
105 105 dovisit = visit.append
106 106 nextvisit = visit.pop
107 107 # open-code the post-order traversal due to the tiny size of
108 108 # sys.getrecursionlimit()
109 109 while visit:
110 110 rev = nextvisit()
111 111 if rev in roots:
112 112 reached(rev)
113 113 if not includepath:
114 114 continue
115 115 parents = parentrevs(rev)
116 116 seen[rev] = parents
117 117 for parent in parents:
118 118 if parent >= minroot and parent not in seen:
119 119 dovisit(parent)
120 120 if not reachable:
121 121 return baseset()
122 122 if not includepath:
123 123 return reachable
124 124 for rev in sorted(seen):
125 125 for parent in seen[rev]:
126 126 if parent in reachable:
127 127 reached(rev)
128 128 return reachable
129 129
130 130 def reachableroots(repo, roots, heads, includepath=False):
131 131 """return (heads(::<roots> and ::<heads>))
132 132
133 133 If includepath is True, return (<roots>::<heads>)."""
134 134 if not roots:
135 135 return baseset()
136 136 minroot = roots.min()
137 137 roots = list(roots)
138 138 heads = list(heads)
139 139 try:
140 140 revs = repo.changelog.reachableroots(minroot, heads, roots, includepath)
141 141 except AttributeError:
142 142 revs = _reachablerootspure(repo, minroot, roots, heads, includepath)
143 143 revs = baseset(revs)
144 144 revs.sort()
145 145 return revs
146 146
147 147 elements = {
148 148 # token-type: binding-strength, primary, prefix, infix, suffix
149 149 "(": (21, None, ("group", 1, ")"), ("func", 1, ")"), None),
150 150 "##": (20, None, None, ("_concat", 20), None),
151 151 "~": (18, None, None, ("ancestor", 18), None),
152 152 "^": (18, None, None, ("parent", 18), ("parentpost", 18)),
153 153 "-": (5, None, ("negate", 19), ("minus", 5), None),
154 154 "::": (17, None, ("dagrangepre", 17), ("dagrange", 17),
155 155 ("dagrangepost", 17)),
156 156 "..": (17, None, ("dagrangepre", 17), ("dagrange", 17),
157 157 ("dagrangepost", 17)),
158 158 ":": (15, "rangeall", ("rangepre", 15), ("range", 15), ("rangepost", 15)),
159 159 "not": (10, None, ("not", 10), None, None),
160 160 "!": (10, None, ("not", 10), None, None),
161 161 "and": (5, None, None, ("and", 5), None),
162 162 "&": (5, None, None, ("and", 5), None),
163 163 "%": (5, None, None, ("only", 5), ("onlypost", 5)),
164 164 "or": (4, None, None, ("or", 4), None),
165 165 "|": (4, None, None, ("or", 4), None),
166 166 "+": (4, None, None, ("or", 4), None),
167 167 "=": (3, None, None, ("keyvalue", 3), None),
168 168 ",": (2, None, None, ("list", 2), None),
169 169 ")": (0, None, None, None, None),
170 170 "symbol": (0, "symbol", None, None, None),
171 171 "string": (0, "string", None, None, None),
172 172 "end": (0, None, None, None, None),
173 173 }
174 174
175 175 keywords = set(['and', 'or', 'not'])
176 176
177 177 # default set of valid characters for the initial letter of symbols
178 178 _syminitletters = set(c for c in [chr(i) for i in xrange(256)]
179 179 if c.isalnum() or c in '._@' or ord(c) > 127)
180 180
181 181 # default set of valid characters for non-initial letters of symbols
182 182 _symletters = set(c for c in [chr(i) for i in xrange(256)]
183 183 if c.isalnum() or c in '-._/@' or ord(c) > 127)
184 184
185 185 def tokenize(program, lookup=None, syminitletters=None, symletters=None):
186 186 '''
187 187 Parse a revset statement into a stream of tokens
188 188
189 189 ``syminitletters`` is the set of valid characters for the initial
190 190 letter of symbols.
191 191
192 192 By default, character ``c`` is recognized as valid for initial
193 193 letter of symbols, if ``c.isalnum() or c in '._@' or ord(c) > 127``.
194 194
195 195 ``symletters`` is the set of valid characters for non-initial
196 196 letters of symbols.
197 197
198 198 By default, character ``c`` is recognized as valid for non-initial
199 199 letters of symbols, if ``c.isalnum() or c in '-._/@' or ord(c) > 127``.
200 200
201 201 Check that @ is a valid unquoted token character (issue3686):
202 202 >>> list(tokenize("@::"))
203 203 [('symbol', '@', 0), ('::', None, 1), ('end', None, 3)]
204 204
205 205 '''
206 206 if syminitletters is None:
207 207 syminitletters = _syminitletters
208 208 if symletters is None:
209 209 symletters = _symletters
210 210
211 211 if program and lookup:
212 212 # attempt to parse old-style ranges first to deal with
213 213 # things like old-tag which contain query metacharacters
214 214 parts = program.split(':', 1)
215 215 if all(lookup(sym) for sym in parts if sym):
216 216 if parts[0]:
217 217 yield ('symbol', parts[0], 0)
218 218 if len(parts) > 1:
219 219 s = len(parts[0])
220 220 yield (':', None, s)
221 221 if parts[1]:
222 222 yield ('symbol', parts[1], s + 1)
223 223 yield ('end', None, len(program))
224 224 return
225 225
226 226 pos, l = 0, len(program)
227 227 while pos < l:
228 228 c = program[pos]
229 229 if c.isspace(): # skip inter-token whitespace
230 230 pass
231 231 elif c == ':' and program[pos:pos + 2] == '::': # look ahead carefully
232 232 yield ('::', None, pos)
233 233 pos += 1 # skip ahead
234 234 elif c == '.' and program[pos:pos + 2] == '..': # look ahead carefully
235 235 yield ('..', None, pos)
236 236 pos += 1 # skip ahead
237 237 elif c == '#' and program[pos:pos + 2] == '##': # look ahead carefully
238 238 yield ('##', None, pos)
239 239 pos += 1 # skip ahead
240 240 elif c in "():=,-|&+!~^%": # handle simple operators
241 241 yield (c, None, pos)
242 242 elif (c in '"\'' or c == 'r' and
243 243 program[pos:pos + 2] in ("r'", 'r"')): # handle quoted strings
244 244 if c == 'r':
245 245 pos += 1
246 246 c = program[pos]
247 247 decode = lambda x: x
248 248 else:
249 249 decode = parser.unescapestr
250 250 pos += 1
251 251 s = pos
252 252 while pos < l: # find closing quote
253 253 d = program[pos]
254 254 if d == '\\': # skip over escaped characters
255 255 pos += 2
256 256 continue
257 257 if d == c:
258 258 yield ('string', decode(program[s:pos]), s)
259 259 break
260 260 pos += 1
261 261 else:
262 262 raise error.ParseError(_("unterminated string"), s)
263 263 # gather up a symbol/keyword
264 264 elif c in syminitletters:
265 265 s = pos
266 266 pos += 1
267 267 while pos < l: # find end of symbol
268 268 d = program[pos]
269 269 if d not in symletters:
270 270 break
271 271 if d == '.' and program[pos - 1] == '.': # special case for ..
272 272 pos -= 1
273 273 break
274 274 pos += 1
275 275 sym = program[s:pos]
276 276 if sym in keywords: # operator keywords
277 277 yield (sym, None, s)
278 278 elif '-' in sym:
279 279 # some jerk gave us foo-bar-baz, try to check if it's a symbol
280 280 if lookup and lookup(sym):
281 281 # looks like a real symbol
282 282 yield ('symbol', sym, s)
283 283 else:
284 284 # looks like an expression
285 285 parts = sym.split('-')
286 286 for p in parts[:-1]:
287 287 if p: # possible consecutive -
288 288 yield ('symbol', p, s)
289 289 s += len(p)
290 290 yield ('-', None, pos)
291 291 s += 1
292 292 if parts[-1]: # possible trailing -
293 293 yield ('symbol', parts[-1], s)
294 294 else:
295 295 yield ('symbol', sym, s)
296 296 pos -= 1
297 297 else:
298 298 raise error.ParseError(_("syntax error in revset '%s'") %
299 299 program, pos)
300 300 pos += 1
301 301 yield ('end', None, pos)
302 302
303 303 def parseerrordetail(inst):
304 304 """Compose error message from specified ParseError object
305 305 """
306 306 if len(inst.args) > 1:
307 307 return _('at %s: %s') % (inst.args[1], inst.args[0])
308 308 else:
309 309 return inst.args[0]
310 310
311 311 # helpers
312 312
313 313 def getstring(x, err):
314 314 if x and (x[0] == 'string' or x[0] == 'symbol'):
315 315 return x[1]
316 316 raise error.ParseError(err)
317 317
318 318 def getlist(x):
319 319 if not x:
320 320 return []
321 321 if x[0] == 'list':
322 322 return list(x[1:])
323 323 return [x]
324 324
325 325 def getargs(x, min, max, err):
326 326 l = getlist(x)
327 327 if len(l) < min or (max >= 0 and len(l) > max):
328 328 raise error.ParseError(err)
329 329 return l
330 330
331 331 def getargsdict(x, funcname, keys):
332 332 return parser.buildargsdict(getlist(x), funcname, keys.split(),
333 333 keyvaluenode='keyvalue', keynode='symbol')
334 334
335 def isvalidfunc(tree):
336 """Examine whether specified ``tree`` is valid ``func`` or not
337 """
338 return tree[0] == 'func' and tree[1][0] == 'symbol'
339
340 def getfuncname(tree):
341 """Get function name from valid ``func`` in ``tree``
342
343 This assumes that ``tree`` is already examined by ``isvalidfunc``.
344 """
345 return tree[1][1]
346
347 def getfuncargs(tree):
348 """Get list of function arguments from valid ``func`` in ``tree``
349
350 This assumes that ``tree`` is already examined by ``isvalidfunc``.
351 """
352 return getlist(tree[2])
353
354 335 def getset(repo, subset, x):
355 336 if not x:
356 337 raise error.ParseError(_("missing argument"))
357 338 s = methods[x[0]](repo, subset, *x[1:])
358 339 if util.safehasattr(s, 'isascending'):
359 340 return s
360 341 if (repo.ui.configbool('devel', 'all-warnings')
361 342 or repo.ui.configbool('devel', 'old-revset')):
362 343 # else case should not happen, because all non-func are internal,
363 344 # ignoring for now.
364 345 if x[0] == 'func' and x[1][0] == 'symbol' and x[1][1] in symbols:
365 346 repo.ui.develwarn('revset "%s" use list instead of smartset, '
366 347 '(upgrade your code)' % x[1][1])
367 348 return baseset(s)
368 349
369 350 def _getrevsource(repo, r):
370 351 extra = repo[r].extra()
371 352 for label in ('source', 'transplant_source', 'rebase_source'):
372 353 if label in extra:
373 354 try:
374 355 return repo[extra[label]].rev()
375 356 except error.RepoLookupError:
376 357 pass
377 358 return None
378 359
379 360 # operator methods
380 361
381 362 def stringset(repo, subset, x):
382 363 x = repo[x].rev()
383 364 if (x in subset
384 365 or x == node.nullrev and isinstance(subset, fullreposet)):
385 366 return baseset([x])
386 367 return baseset()
387 368
388 369 def rangeset(repo, subset, x, y):
389 370 m = getset(repo, fullreposet(repo), x)
390 371 n = getset(repo, fullreposet(repo), y)
391 372
392 373 if not m or not n:
393 374 return baseset()
394 375 m, n = m.first(), n.last()
395 376
396 377 if m == n:
397 378 r = baseset([m])
398 379 elif n == node.wdirrev:
399 380 r = spanset(repo, m, len(repo)) + baseset([n])
400 381 elif m == node.wdirrev:
401 382 r = baseset([m]) + spanset(repo, len(repo) - 1, n - 1)
402 383 elif m < n:
403 384 r = spanset(repo, m, n + 1)
404 385 else:
405 386 r = spanset(repo, m, n - 1)
406 387 # XXX We should combine with subset first: 'subset & baseset(...)'. This is
407 388 # necessary to ensure we preserve the order in subset.
408 389 #
409 390 # This has performance implication, carrying the sorting over when possible
410 391 # would be more efficient.
411 392 return r & subset
412 393
413 394 def dagrange(repo, subset, x, y):
414 395 r = fullreposet(repo)
415 396 xs = reachableroots(repo, getset(repo, r, x), getset(repo, r, y),
416 397 includepath=True)
417 398 # XXX We should combine with subset first: 'subset & baseset(...)'. This is
418 399 # necessary to ensure we preserve the order in subset.
419 400 return xs & subset
420 401
421 402 def andset(repo, subset, x, y):
422 403 return getset(repo, getset(repo, subset, x), y)
423 404
424 405 def differenceset(repo, subset, x, y):
425 406 return getset(repo, subset, x) - getset(repo, subset, y)
426 407
427 408 def orset(repo, subset, *xs):
428 409 assert xs
429 410 if len(xs) == 1:
430 411 return getset(repo, subset, xs[0])
431 412 p = len(xs) // 2
432 413 a = orset(repo, subset, *xs[:p])
433 414 b = orset(repo, subset, *xs[p:])
434 415 return a + b
435 416
436 417 def notset(repo, subset, x):
437 418 return subset - getset(repo, subset, x)
438 419
439 420 def listset(repo, subset, *xs):
440 421 raise error.ParseError(_("can't use a list in this context"),
441 422 hint=_('see hg help "revsets.x or y"'))
442 423
443 424 def keyvaluepair(repo, subset, k, v):
444 425 raise error.ParseError(_("can't use a key-value pair in this context"))
445 426
446 427 def func(repo, subset, a, b):
447 428 if a[0] == 'symbol' and a[1] in symbols:
448 429 return symbols[a[1]](repo, subset, b)
449 430
450 431 keep = lambda fn: getattr(fn, '__doc__', None) is not None
451 432
452 433 syms = [s for (s, fn) in symbols.items() if keep(fn)]
453 434 raise error.UnknownIdentifier(a[1], syms)
454 435
455 436 # functions
456 437
457 438 # symbols are callables like:
458 439 # fn(repo, subset, x)
459 440 # with:
460 441 # repo - current repository instance
461 442 # subset - of revisions to be examined
462 443 # x - argument in tree form
463 444 symbols = {}
464 445
465 446 # symbols which can't be used for a DoS attack for any given input
466 447 # (e.g. those which accept regexes as plain strings shouldn't be included)
467 448 # functions that just return a lot of changesets (like all) don't count here
468 449 safesymbols = set()
469 450
470 451 predicate = registrar.revsetpredicate()
471 452
472 453 @predicate('_destupdate')
473 454 def _destupdate(repo, subset, x):
474 455 # experimental revset for update destination
475 456 args = getargsdict(x, 'limit', 'clean check')
476 457 return subset & baseset([destutil.destupdate(repo, **args)[0]])
477 458
478 459 @predicate('_destmerge')
479 460 def _destmerge(repo, subset, x):
480 461 # experimental revset for merge destination
481 462 sourceset = None
482 463 if x is not None:
483 464 sourceset = getset(repo, fullreposet(repo), x)
484 465 return subset & baseset([destutil.destmerge(repo, sourceset=sourceset)])
485 466
486 467 @predicate('adds(pattern)', safe=True)
487 468 def adds(repo, subset, x):
488 469 """Changesets that add a file matching pattern.
489 470
490 471 The pattern without explicit kind like ``glob:`` is expected to be
491 472 relative to the current directory and match against a file or a
492 473 directory.
493 474 """
494 475 # i18n: "adds" is a keyword
495 476 pat = getstring(x, _("adds requires a pattern"))
496 477 return checkstatus(repo, subset, pat, 1)
497 478
498 479 @predicate('ancestor(*changeset)', safe=True)
499 480 def ancestor(repo, subset, x):
500 481 """A greatest common ancestor of the changesets.
501 482
502 483 Accepts 0 or more changesets.
503 484 Will return empty list when passed no args.
504 485 Greatest common ancestor of a single changeset is that changeset.
505 486 """
506 487 # i18n: "ancestor" is a keyword
507 488 l = getlist(x)
508 489 rl = fullreposet(repo)
509 490 anc = None
510 491
511 492 # (getset(repo, rl, i) for i in l) generates a list of lists
512 493 for revs in (getset(repo, rl, i) for i in l):
513 494 for r in revs:
514 495 if anc is None:
515 496 anc = repo[r]
516 497 else:
517 498 anc = anc.ancestor(repo[r])
518 499
519 500 if anc is not None and anc.rev() in subset:
520 501 return baseset([anc.rev()])
521 502 return baseset()
522 503
523 504 def _ancestors(repo, subset, x, followfirst=False):
524 505 heads = getset(repo, fullreposet(repo), x)
525 506 if not heads:
526 507 return baseset()
527 508 s = _revancestors(repo, heads, followfirst)
528 509 return subset & s
529 510
530 511 @predicate('ancestors(set)', safe=True)
531 512 def ancestors(repo, subset, x):
532 513 """Changesets that are ancestors of a changeset in set.
533 514 """
534 515 return _ancestors(repo, subset, x)
535 516
536 517 @predicate('_firstancestors', safe=True)
537 518 def _firstancestors(repo, subset, x):
538 519 # ``_firstancestors(set)``
539 520 # Like ``ancestors(set)`` but follows only the first parents.
540 521 return _ancestors(repo, subset, x, followfirst=True)
541 522
542 523 def ancestorspec(repo, subset, x, n):
543 524 """``set~n``
544 525 Changesets that are the Nth ancestor (first parents only) of a changeset
545 526 in set.
546 527 """
547 528 try:
548 529 n = int(n[1])
549 530 except (TypeError, ValueError):
550 531 raise error.ParseError(_("~ expects a number"))
551 532 ps = set()
552 533 cl = repo.changelog
553 534 for r in getset(repo, fullreposet(repo), x):
554 535 for i in range(n):
555 536 r = cl.parentrevs(r)[0]
556 537 ps.add(r)
557 538 return subset & ps
558 539
559 540 @predicate('author(string)', safe=True)
560 541 def author(repo, subset, x):
561 542 """Alias for ``user(string)``.
562 543 """
563 544 # i18n: "author" is a keyword
564 545 n = encoding.lower(getstring(x, _("author requires a string")))
565 546 kind, pattern, matcher = _substringmatcher(n)
566 547 return subset.filter(lambda x: matcher(encoding.lower(repo[x].user())),
567 548 condrepr=('<user %r>', n))
568 549
569 550 @predicate('bisect(string)', safe=True)
570 551 def bisect(repo, subset, x):
571 552 """Changesets marked in the specified bisect status:
572 553
573 554 - ``good``, ``bad``, ``skip``: csets explicitly marked as good/bad/skip
574 555 - ``goods``, ``bads`` : csets topologically good/bad
575 556 - ``range`` : csets taking part in the bisection
576 557 - ``pruned`` : csets that are goods, bads or skipped
577 558 - ``untested`` : csets whose fate is yet unknown
578 559 - ``ignored`` : csets ignored due to DAG topology
579 560 - ``current`` : the cset currently being bisected
580 561 """
581 562 # i18n: "bisect" is a keyword
582 563 status = getstring(x, _("bisect requires a string")).lower()
583 564 state = set(hbisect.get(repo, status))
584 565 return subset & state
585 566
586 567 # Backward-compatibility
587 568 # - no help entry so that we do not advertise it any more
588 569 @predicate('bisected', safe=True)
589 570 def bisected(repo, subset, x):
590 571 return bisect(repo, subset, x)
591 572
592 573 @predicate('bookmark([name])', safe=True)
593 574 def bookmark(repo, subset, x):
594 575 """The named bookmark or all bookmarks.
595 576
596 577 If `name` starts with `re:`, the remainder of the name is treated as
597 578 a regular expression. To match a bookmark that actually starts with `re:`,
598 579 use the prefix `literal:`.
599 580 """
600 581 # i18n: "bookmark" is a keyword
601 582 args = getargs(x, 0, 1, _('bookmark takes one or no arguments'))
602 583 if args:
603 584 bm = getstring(args[0],
604 585 # i18n: "bookmark" is a keyword
605 586 _('the argument to bookmark must be a string'))
606 587 kind, pattern, matcher = util.stringmatcher(bm)
607 588 bms = set()
608 589 if kind == 'literal':
609 590 bmrev = repo._bookmarks.get(pattern, None)
610 591 if not bmrev:
611 592 raise error.RepoLookupError(_("bookmark '%s' does not exist")
612 593 % pattern)
613 594 bms.add(repo[bmrev].rev())
614 595 else:
615 596 matchrevs = set()
616 597 for name, bmrev in repo._bookmarks.iteritems():
617 598 if matcher(name):
618 599 matchrevs.add(bmrev)
619 600 if not matchrevs:
620 601 raise error.RepoLookupError(_("no bookmarks exist"
621 602 " that match '%s'") % pattern)
622 603 for bmrev in matchrevs:
623 604 bms.add(repo[bmrev].rev())
624 605 else:
625 606 bms = set([repo[r].rev()
626 607 for r in repo._bookmarks.values()])
627 608 bms -= set([node.nullrev])
628 609 return subset & bms
629 610
630 611 @predicate('branch(string or set)', safe=True)
631 612 def branch(repo, subset, x):
632 613 """
633 614 All changesets belonging to the given branch or the branches of the given
634 615 changesets.
635 616
636 617 If `string` starts with `re:`, the remainder of the name is treated as
637 618 a regular expression. To match a branch that actually starts with `re:`,
638 619 use the prefix `literal:`.
639 620 """
640 621 getbi = repo.revbranchcache().branchinfo
641 622
642 623 try:
643 624 b = getstring(x, '')
644 625 except error.ParseError:
645 626 # not a string, but another revspec, e.g. tip()
646 627 pass
647 628 else:
648 629 kind, pattern, matcher = util.stringmatcher(b)
649 630 if kind == 'literal':
650 631 # note: falls through to the revspec case if no branch with
651 632 # this name exists and pattern kind is not specified explicitly
652 633 if pattern in repo.branchmap():
653 634 return subset.filter(lambda r: matcher(getbi(r)[0]),
654 635 condrepr=('<branch %r>', b))
655 636 if b.startswith('literal:'):
656 637 raise error.RepoLookupError(_("branch '%s' does not exist")
657 638 % pattern)
658 639 else:
659 640 return subset.filter(lambda r: matcher(getbi(r)[0]),
660 641 condrepr=('<branch %r>', b))
661 642
662 643 s = getset(repo, fullreposet(repo), x)
663 644 b = set()
664 645 for r in s:
665 646 b.add(getbi(r)[0])
666 647 c = s.__contains__
667 648 return subset.filter(lambda r: c(r) or getbi(r)[0] in b,
668 649 condrepr=lambda: '<branch %r>' % sorted(b))
669 650
670 651 @predicate('bumped()', safe=True)
671 652 def bumped(repo, subset, x):
672 653 """Mutable changesets marked as successors of public changesets.
673 654
674 655 Only non-public and non-obsolete changesets can be `bumped`.
675 656 """
676 657 # i18n: "bumped" is a keyword
677 658 getargs(x, 0, 0, _("bumped takes no arguments"))
678 659 bumped = obsmod.getrevs(repo, 'bumped')
679 660 return subset & bumped
680 661
681 662 @predicate('bundle()', safe=True)
682 663 def bundle(repo, subset, x):
683 664 """Changesets in the bundle.
684 665
685 666 Bundle must be specified by the -R option."""
686 667
687 668 try:
688 669 bundlerevs = repo.changelog.bundlerevs
689 670 except AttributeError:
690 671 raise error.Abort(_("no bundle provided - specify with -R"))
691 672 return subset & bundlerevs
692 673
693 674 def checkstatus(repo, subset, pat, field):
694 675 hasset = matchmod.patkind(pat) == 'set'
695 676
696 677 mcache = [None]
697 678 def matches(x):
698 679 c = repo[x]
699 680 if not mcache[0] or hasset:
700 681 mcache[0] = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
701 682 m = mcache[0]
702 683 fname = None
703 684 if not m.anypats() and len(m.files()) == 1:
704 685 fname = m.files()[0]
705 686 if fname is not None:
706 687 if fname not in c.files():
707 688 return False
708 689 else:
709 690 for f in c.files():
710 691 if m(f):
711 692 break
712 693 else:
713 694 return False
714 695 files = repo.status(c.p1().node(), c.node())[field]
715 696 if fname is not None:
716 697 if fname in files:
717 698 return True
718 699 else:
719 700 for f in files:
720 701 if m(f):
721 702 return True
722 703
723 704 return subset.filter(matches, condrepr=('<status[%r] %r>', field, pat))
724 705
725 706 def _children(repo, narrow, parentset):
726 707 if not parentset:
727 708 return baseset()
728 709 cs = set()
729 710 pr = repo.changelog.parentrevs
730 711 minrev = parentset.min()
731 712 for r in narrow:
732 713 if r <= minrev:
733 714 continue
734 715 for p in pr(r):
735 716 if p in parentset:
736 717 cs.add(r)
737 718 # XXX using a set to feed the baseset is wrong. Sets are not ordered.
738 719 # This does not break because of other fullreposet misbehavior.
739 720 return baseset(cs)
740 721
741 722 @predicate('children(set)', safe=True)
742 723 def children(repo, subset, x):
743 724 """Child changesets of changesets in set.
744 725 """
745 726 s = getset(repo, fullreposet(repo), x)
746 727 cs = _children(repo, subset, s)
747 728 return subset & cs
748 729
749 730 @predicate('closed()', safe=True)
750 731 def closed(repo, subset, x):
751 732 """Changeset is closed.
752 733 """
753 734 # i18n: "closed" is a keyword
754 735 getargs(x, 0, 0, _("closed takes no arguments"))
755 736 return subset.filter(lambda r: repo[r].closesbranch(),
756 737 condrepr='<branch closed>')
757 738
758 739 @predicate('contains(pattern)')
759 740 def contains(repo, subset, x):
760 741 """The revision's manifest contains a file matching pattern (but might not
761 742 modify it). See :hg:`help patterns` for information about file patterns.
762 743
763 744 The pattern without explicit kind like ``glob:`` is expected to be
764 745 relative to the current directory and match against a file exactly
765 746 for efficiency.
766 747 """
767 748 # i18n: "contains" is a keyword
768 749 pat = getstring(x, _("contains requires a pattern"))
769 750
770 751 def matches(x):
771 752 if not matchmod.patkind(pat):
772 753 pats = pathutil.canonpath(repo.root, repo.getcwd(), pat)
773 754 if pats in repo[x]:
774 755 return True
775 756 else:
776 757 c = repo[x]
777 758 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
778 759 for f in c.manifest():
779 760 if m(f):
780 761 return True
781 762 return False
782 763
783 764 return subset.filter(matches, condrepr=('<contains %r>', pat))
784 765
785 766 @predicate('converted([id])', safe=True)
786 767 def converted(repo, subset, x):
787 768 """Changesets converted from the given identifier in the old repository if
788 769 present, or all converted changesets if no identifier is specified.
789 770 """
790 771
791 772 # There is exactly no chance of resolving the revision, so do a simple
792 773 # string compare and hope for the best
793 774
794 775 rev = None
795 776 # i18n: "converted" is a keyword
796 777 l = getargs(x, 0, 1, _('converted takes one or no arguments'))
797 778 if l:
798 779 # i18n: "converted" is a keyword
799 780 rev = getstring(l[0], _('converted requires a revision'))
800 781
801 782 def _matchvalue(r):
802 783 source = repo[r].extra().get('convert_revision', None)
803 784 return source is not None and (rev is None or source.startswith(rev))
804 785
805 786 return subset.filter(lambda r: _matchvalue(r),
806 787 condrepr=('<converted %r>', rev))
807 788
808 789 @predicate('date(interval)', safe=True)
809 790 def date(repo, subset, x):
810 791 """Changesets within the interval, see :hg:`help dates`.
811 792 """
812 793 # i18n: "date" is a keyword
813 794 ds = getstring(x, _("date requires a string"))
814 795 dm = util.matchdate(ds)
815 796 return subset.filter(lambda x: dm(repo[x].date()[0]),
816 797 condrepr=('<date %r>', ds))
817 798
818 799 @predicate('desc(string)', safe=True)
819 800 def desc(repo, subset, x):
820 801 """Search commit message for string. The match is case-insensitive.
821 802 """
822 803 # i18n: "desc" is a keyword
823 804 ds = encoding.lower(getstring(x, _("desc requires a string")))
824 805
825 806 def matches(x):
826 807 c = repo[x]
827 808 return ds in encoding.lower(c.description())
828 809
829 810 return subset.filter(matches, condrepr=('<desc %r>', ds))
830 811
831 812 def _descendants(repo, subset, x, followfirst=False):
832 813 roots = getset(repo, fullreposet(repo), x)
833 814 if not roots:
834 815 return baseset()
835 816 s = _revdescendants(repo, roots, followfirst)
836 817
837 818 # Both sets need to be ascending in order to lazily return the union
838 819 # in the correct order.
839 820 base = subset & roots
840 821 desc = subset & s
841 822 result = base + desc
842 823 if subset.isascending():
843 824 result.sort()
844 825 elif subset.isdescending():
845 826 result.sort(reverse=True)
846 827 else:
847 828 result = subset & result
848 829 return result
849 830
850 831 @predicate('descendants(set)', safe=True)
851 832 def descendants(repo, subset, x):
852 833 """Changesets which are descendants of changesets in set.
853 834 """
854 835 return _descendants(repo, subset, x)
855 836
856 837 @predicate('_firstdescendants', safe=True)
857 838 def _firstdescendants(repo, subset, x):
858 839 # ``_firstdescendants(set)``
859 840 # Like ``descendants(set)`` but follows only the first parents.
860 841 return _descendants(repo, subset, x, followfirst=True)
861 842
862 843 @predicate('destination([set])', safe=True)
863 844 def destination(repo, subset, x):
864 845 """Changesets that were created by a graft, transplant or rebase operation,
865 846 with the given revisions specified as the source. Omitting the optional set
866 847 is the same as passing all().
867 848 """
868 849 if x is not None:
869 850 sources = getset(repo, fullreposet(repo), x)
870 851 else:
871 852 sources = fullreposet(repo)
872 853
873 854 dests = set()
874 855
875 856 # subset contains all of the possible destinations that can be returned, so
876 857 # iterate over them and see if their source(s) were provided in the arg set.
877 858 # Even if the immediate src of r is not in the arg set, src's source (or
878 859 # further back) may be. Scanning back further than the immediate src allows
879 860 # transitive transplants and rebases to yield the same results as transitive
880 861 # grafts.
881 862 for r in subset:
882 863 src = _getrevsource(repo, r)
883 864 lineage = None
884 865
885 866 while src is not None:
886 867 if lineage is None:
887 868 lineage = list()
888 869
889 870 lineage.append(r)
890 871
891 872 # The visited lineage is a match if the current source is in the arg
892 873 # set. Since every candidate dest is visited by way of iterating
893 874 # subset, any dests further back in the lineage will be tested by a
894 875 # different iteration over subset. Likewise, if the src was already
895 876 # selected, the current lineage can be selected without going back
896 877 # further.
897 878 if src in sources or src in dests:
898 879 dests.update(lineage)
899 880 break
900 881
901 882 r = src
902 883 src = _getrevsource(repo, r)
903 884
904 885 return subset.filter(dests.__contains__,
905 886 condrepr=lambda: '<destination %r>' % sorted(dests))
906 887
907 888 @predicate('divergent()', safe=True)
908 889 def divergent(repo, subset, x):
909 890 """
910 891 Final successors of changesets with an alternative set of final successors.
911 892 """
912 893 # i18n: "divergent" is a keyword
913 894 getargs(x, 0, 0, _("divergent takes no arguments"))
914 895 divergent = obsmod.getrevs(repo, 'divergent')
915 896 return subset & divergent
916 897
917 898 @predicate('extinct()', safe=True)
918 899 def extinct(repo, subset, x):
919 900 """Obsolete changesets with obsolete descendants only.
920 901 """
921 902 # i18n: "extinct" is a keyword
922 903 getargs(x, 0, 0, _("extinct takes no arguments"))
923 904 extincts = obsmod.getrevs(repo, 'extinct')
924 905 return subset & extincts
925 906
926 907 @predicate('extra(label, [value])', safe=True)
927 908 def extra(repo, subset, x):
928 909 """Changesets with the given label in the extra metadata, with the given
929 910 optional value.
930 911
931 912 If `value` starts with `re:`, the remainder of the value is treated as
932 913 a regular expression. To match a value that actually starts with `re:`,
933 914 use the prefix `literal:`.
934 915 """
935 916 args = getargsdict(x, 'extra', 'label value')
936 917 if 'label' not in args:
937 918 # i18n: "extra" is a keyword
938 919 raise error.ParseError(_('extra takes at least 1 argument'))
939 920 # i18n: "extra" is a keyword
940 921 label = getstring(args['label'], _('first argument to extra must be '
941 922 'a string'))
942 923 value = None
943 924
944 925 if 'value' in args:
945 926 # i18n: "extra" is a keyword
946 927 value = getstring(args['value'], _('second argument to extra must be '
947 928 'a string'))
948 929 kind, value, matcher = util.stringmatcher(value)
949 930
950 931 def _matchvalue(r):
951 932 extra = repo[r].extra()
952 933 return label in extra and (value is None or matcher(extra[label]))
953 934
954 935 return subset.filter(lambda r: _matchvalue(r),
955 936 condrepr=('<extra[%r] %r>', label, value))
956 937
957 938 @predicate('filelog(pattern)', safe=True)
958 939 def filelog(repo, subset, x):
959 940 """Changesets connected to the specified filelog.
960 941
961 942 For performance reasons, visits only revisions mentioned in the file-level
962 943 filelog, rather than filtering through all changesets (much faster, but
963 944 doesn't include deletes or duplicate changes). For a slower, more accurate
964 945 result, use ``file()``.
965 946
966 947 The pattern without explicit kind like ``glob:`` is expected to be
967 948 relative to the current directory and match against a file exactly
968 949 for efficiency.
969 950
970 951 If some linkrev points to revisions filtered by the current repoview, we'll
971 952 work around it to return a non-filtered value.
972 953 """
973 954
974 955 # i18n: "filelog" is a keyword
975 956 pat = getstring(x, _("filelog requires a pattern"))
976 957 s = set()
977 958 cl = repo.changelog
978 959
979 960 if not matchmod.patkind(pat):
980 961 f = pathutil.canonpath(repo.root, repo.getcwd(), pat)
981 962 files = [f]
982 963 else:
983 964 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=repo[None])
984 965 files = (f for f in repo[None] if m(f))
985 966
986 967 for f in files:
987 968 fl = repo.file(f)
988 969 known = {}
989 970 scanpos = 0
990 971 for fr in list(fl):
991 972 fn = fl.node(fr)
992 973 if fn in known:
993 974 s.add(known[fn])
994 975 continue
995 976
996 977 lr = fl.linkrev(fr)
997 978 if lr in cl:
998 979 s.add(lr)
999 980 elif scanpos is not None:
1000 981 # lowest matching changeset is filtered, scan further
1001 982 # ahead in changelog
1002 983 start = max(lr, scanpos) + 1
1003 984 scanpos = None
1004 985 for r in cl.revs(start):
1005 986 # minimize parsing of non-matching entries
1006 987 if f in cl.revision(r) and f in cl.readfiles(r):
1007 988 try:
1008 989 # try to use manifest delta fastpath
1009 990 n = repo[r].filenode(f)
1010 991 if n not in known:
1011 992 if n == fn:
1012 993 s.add(r)
1013 994 scanpos = r
1014 995 break
1015 996 else:
1016 997 known[n] = r
1017 998 except error.ManifestLookupError:
1018 999 # deletion in changelog
1019 1000 continue
1020 1001
1021 1002 return subset & s
1022 1003
1023 1004 @predicate('first(set, [n])', safe=True)
1024 1005 def first(repo, subset, x):
1025 1006 """An alias for limit().
1026 1007 """
1027 1008 return limit(repo, subset, x)
1028 1009
1029 1010 def _follow(repo, subset, x, name, followfirst=False):
1030 1011 l = getargs(x, 0, 1, _("%s takes no arguments or a pattern") % name)
1031 1012 c = repo['.']
1032 1013 if l:
1033 1014 x = getstring(l[0], _("%s expected a pattern") % name)
1034 1015 matcher = matchmod.match(repo.root, repo.getcwd(), [x],
1035 1016 ctx=repo[None], default='path')
1036 1017
1037 1018 files = c.manifest().walk(matcher)
1038 1019
1039 1020 s = set()
1040 1021 for fname in files:
1041 1022 fctx = c[fname]
1042 1023 s = s.union(set(c.rev() for c in fctx.ancestors(followfirst)))
1043 1024 # include the revision responsible for the most recent version
1044 1025 s.add(fctx.introrev())
1045 1026 else:
1046 1027 s = _revancestors(repo, baseset([c.rev()]), followfirst)
1047 1028
1048 1029 return subset & s
1049 1030
1050 1031 @predicate('follow([pattern])', safe=True)
1051 1032 def follow(repo, subset, x):
1052 1033 """
1053 1034 An alias for ``::.`` (ancestors of the working directory's first parent).
1054 1035 If pattern is specified, the histories of files matching given
1055 1036 pattern is followed, including copies.
1056 1037 """
1057 1038 return _follow(repo, subset, x, 'follow')
1058 1039
1059 1040 @predicate('_followfirst', safe=True)
1060 1041 def _followfirst(repo, subset, x):
1061 1042 # ``followfirst([pattern])``
1062 1043 # Like ``follow([pattern])`` but follows only the first parent of
1063 1044 # every revisions or files revisions.
1064 1045 return _follow(repo, subset, x, '_followfirst', followfirst=True)
1065 1046
1066 1047 @predicate('all()', safe=True)
1067 1048 def getall(repo, subset, x):
1068 1049 """All changesets, the same as ``0:tip``.
1069 1050 """
1070 1051 # i18n: "all" is a keyword
1071 1052 getargs(x, 0, 0, _("all takes no arguments"))
1072 1053 return subset & spanset(repo) # drop "null" if any
1073 1054
1074 1055 @predicate('grep(regex)')
1075 1056 def grep(repo, subset, x):
1076 1057 """Like ``keyword(string)`` but accepts a regex. Use ``grep(r'...')``
1077 1058 to ensure special escape characters are handled correctly. Unlike
1078 1059 ``keyword(string)``, the match is case-sensitive.
1079 1060 """
1080 1061 try:
1081 1062 # i18n: "grep" is a keyword
1082 1063 gr = re.compile(getstring(x, _("grep requires a string")))
1083 1064 except re.error as e:
1084 1065 raise error.ParseError(_('invalid match pattern: %s') % e)
1085 1066
1086 1067 def matches(x):
1087 1068 c = repo[x]
1088 1069 for e in c.files() + [c.user(), c.description()]:
1089 1070 if gr.search(e):
1090 1071 return True
1091 1072 return False
1092 1073
1093 1074 return subset.filter(matches, condrepr=('<grep %r>', gr.pattern))
1094 1075
1095 1076 @predicate('_matchfiles', safe=True)
1096 1077 def _matchfiles(repo, subset, x):
1097 1078 # _matchfiles takes a revset list of prefixed arguments:
1098 1079 #
1099 1080 # [p:foo, i:bar, x:baz]
1100 1081 #
1101 1082 # builds a match object from them and filters subset. Allowed
1102 1083 # prefixes are 'p:' for regular patterns, 'i:' for include
1103 1084 # patterns and 'x:' for exclude patterns. Use 'r:' prefix to pass
1104 1085 # a revision identifier, or the empty string to reference the
1105 1086 # working directory, from which the match object is
1106 1087 # initialized. Use 'd:' to set the default matching mode, default
1107 1088 # to 'glob'. At most one 'r:' and 'd:' argument can be passed.
1108 1089
1109 1090 l = getargs(x, 1, -1, "_matchfiles requires at least one argument")
1110 1091 pats, inc, exc = [], [], []
1111 1092 rev, default = None, None
1112 1093 for arg in l:
1113 1094 s = getstring(arg, "_matchfiles requires string arguments")
1114 1095 prefix, value = s[:2], s[2:]
1115 1096 if prefix == 'p:':
1116 1097 pats.append(value)
1117 1098 elif prefix == 'i:':
1118 1099 inc.append(value)
1119 1100 elif prefix == 'x:':
1120 1101 exc.append(value)
1121 1102 elif prefix == 'r:':
1122 1103 if rev is not None:
1123 1104 raise error.ParseError('_matchfiles expected at most one '
1124 1105 'revision')
1125 1106 if value != '': # empty means working directory; leave rev as None
1126 1107 rev = value
1127 1108 elif prefix == 'd:':
1128 1109 if default is not None:
1129 1110 raise error.ParseError('_matchfiles expected at most one '
1130 1111 'default mode')
1131 1112 default = value
1132 1113 else:
1133 1114 raise error.ParseError('invalid _matchfiles prefix: %s' % prefix)
1134 1115 if not default:
1135 1116 default = 'glob'
1136 1117
1137 1118 m = matchmod.match(repo.root, repo.getcwd(), pats, include=inc,
1138 1119 exclude=exc, ctx=repo[rev], default=default)
1139 1120
1140 1121 # This directly read the changelog data as creating changectx for all
1141 1122 # revisions is quite expensive.
1142 1123 getfiles = repo.changelog.readfiles
1143 1124 wdirrev = node.wdirrev
1144 1125 def matches(x):
1145 1126 if x == wdirrev:
1146 1127 files = repo[x].files()
1147 1128 else:
1148 1129 files = getfiles(x)
1149 1130 for f in files:
1150 1131 if m(f):
1151 1132 return True
1152 1133 return False
1153 1134
1154 1135 return subset.filter(matches,
1155 1136 condrepr=('<matchfiles patterns=%r, include=%r '
1156 1137 'exclude=%r, default=%r, rev=%r>',
1157 1138 pats, inc, exc, default, rev))
1158 1139
1159 1140 @predicate('file(pattern)', safe=True)
1160 1141 def hasfile(repo, subset, x):
1161 1142 """Changesets affecting files matched by pattern.
1162 1143
1163 1144 For a faster but less accurate result, consider using ``filelog()``
1164 1145 instead.
1165 1146
1166 1147 This predicate uses ``glob:`` as the default kind of pattern.
1167 1148 """
1168 1149 # i18n: "file" is a keyword
1169 1150 pat = getstring(x, _("file requires a pattern"))
1170 1151 return _matchfiles(repo, subset, ('string', 'p:' + pat))
1171 1152
1172 1153 @predicate('head()', safe=True)
1173 1154 def head(repo, subset, x):
1174 1155 """Changeset is a named branch head.
1175 1156 """
1176 1157 # i18n: "head" is a keyword
1177 1158 getargs(x, 0, 0, _("head takes no arguments"))
1178 1159 hs = set()
1179 1160 cl = repo.changelog
1180 1161 for b, ls in repo.branchmap().iteritems():
1181 1162 hs.update(cl.rev(h) for h in ls)
1182 1163 # XXX using a set to feed the baseset is wrong. Sets are not ordered.
1183 1164 # This does not break because of other fullreposet misbehavior.
1184 1165 # XXX We should combine with subset first: 'subset & baseset(...)'. This is
1185 1166 # necessary to ensure we preserve the order in subset.
1186 1167 return baseset(hs) & subset
1187 1168
1188 1169 @predicate('heads(set)', safe=True)
1189 1170 def heads(repo, subset, x):
1190 1171 """Members of set with no children in set.
1191 1172 """
1192 1173 s = getset(repo, subset, x)
1193 1174 ps = parents(repo, subset, x)
1194 1175 return s - ps
1195 1176
1196 1177 @predicate('hidden()', safe=True)
1197 1178 def hidden(repo, subset, x):
1198 1179 """Hidden changesets.
1199 1180 """
1200 1181 # i18n: "hidden" is a keyword
1201 1182 getargs(x, 0, 0, _("hidden takes no arguments"))
1202 1183 hiddenrevs = repoview.filterrevs(repo, 'visible')
1203 1184 return subset & hiddenrevs
1204 1185
1205 1186 @predicate('keyword(string)', safe=True)
1206 1187 def keyword(repo, subset, x):
1207 1188 """Search commit message, user name, and names of changed files for
1208 1189 string. The match is case-insensitive.
1209 1190 """
1210 1191 # i18n: "keyword" is a keyword
1211 1192 kw = encoding.lower(getstring(x, _("keyword requires a string")))
1212 1193
1213 1194 def matches(r):
1214 1195 c = repo[r]
1215 1196 return any(kw in encoding.lower(t)
1216 1197 for t in c.files() + [c.user(), c.description()])
1217 1198
1218 1199 return subset.filter(matches, condrepr=('<keyword %r>', kw))
1219 1200
1220 1201 @predicate('limit(set[, n[, offset]])', safe=True)
1221 1202 def limit(repo, subset, x):
1222 1203 """First n members of set, defaulting to 1, starting from offset.
1223 1204 """
1224 1205 args = getargsdict(x, 'limit', 'set n offset')
1225 1206 if 'set' not in args:
1226 1207 # i18n: "limit" is a keyword
1227 1208 raise error.ParseError(_("limit requires one to three arguments"))
1228 1209 try:
1229 1210 lim, ofs = 1, 0
1230 1211 if 'n' in args:
1231 1212 # i18n: "limit" is a keyword
1232 1213 lim = int(getstring(args['n'], _("limit requires a number")))
1233 1214 if 'offset' in args:
1234 1215 # i18n: "limit" is a keyword
1235 1216 ofs = int(getstring(args['offset'], _("limit requires a number")))
1236 1217 if ofs < 0:
1237 1218 raise error.ParseError(_("negative offset"))
1238 1219 except (TypeError, ValueError):
1239 1220 # i18n: "limit" is a keyword
1240 1221 raise error.ParseError(_("limit expects a number"))
1241 1222 os = getset(repo, fullreposet(repo), args['set'])
1242 1223 result = []
1243 1224 it = iter(os)
1244 1225 for x in xrange(ofs):
1245 1226 y = next(it, None)
1246 1227 if y is None:
1247 1228 break
1248 1229 for x in xrange(lim):
1249 1230 y = next(it, None)
1250 1231 if y is None:
1251 1232 break
1252 1233 elif y in subset:
1253 1234 result.append(y)
1254 1235 return baseset(result, datarepr=('<limit n=%d, offset=%d, %r, %r>',
1255 1236 lim, ofs, subset, os))
1256 1237
1257 1238 @predicate('last(set, [n])', safe=True)
1258 1239 def last(repo, subset, x):
1259 1240 """Last n members of set, defaulting to 1.
1260 1241 """
1261 1242 # i18n: "last" is a keyword
1262 1243 l = getargs(x, 1, 2, _("last requires one or two arguments"))
1263 1244 try:
1264 1245 lim = 1
1265 1246 if len(l) == 2:
1266 1247 # i18n: "last" is a keyword
1267 1248 lim = int(getstring(l[1], _("last requires a number")))
1268 1249 except (TypeError, ValueError):
1269 1250 # i18n: "last" is a keyword
1270 1251 raise error.ParseError(_("last expects a number"))
1271 1252 os = getset(repo, fullreposet(repo), l[0])
1272 1253 os.reverse()
1273 1254 result = []
1274 1255 it = iter(os)
1275 1256 for x in xrange(lim):
1276 1257 y = next(it, None)
1277 1258 if y is None:
1278 1259 break
1279 1260 elif y in subset:
1280 1261 result.append(y)
1281 1262 return baseset(result, datarepr=('<last n=%d, %r, %r>', lim, subset, os))
1282 1263
1283 1264 @predicate('max(set)', safe=True)
1284 1265 def maxrev(repo, subset, x):
1285 1266 """Changeset with highest revision number in set.
1286 1267 """
1287 1268 os = getset(repo, fullreposet(repo), x)
1288 1269 try:
1289 1270 m = os.max()
1290 1271 if m in subset:
1291 1272 return baseset([m], datarepr=('<max %r, %r>', subset, os))
1292 1273 except ValueError:
1293 1274 # os.max() throws a ValueError when the collection is empty.
1294 1275 # Same as python's max().
1295 1276 pass
1296 1277 return baseset(datarepr=('<max %r, %r>', subset, os))
1297 1278
1298 1279 @predicate('merge()', safe=True)
1299 1280 def merge(repo, subset, x):
1300 1281 """Changeset is a merge changeset.
1301 1282 """
1302 1283 # i18n: "merge" is a keyword
1303 1284 getargs(x, 0, 0, _("merge takes no arguments"))
1304 1285 cl = repo.changelog
1305 1286 return subset.filter(lambda r: cl.parentrevs(r)[1] != -1,
1306 1287 condrepr='<merge>')
1307 1288
1308 1289 @predicate('branchpoint()', safe=True)
1309 1290 def branchpoint(repo, subset, x):
1310 1291 """Changesets with more than one child.
1311 1292 """
1312 1293 # i18n: "branchpoint" is a keyword
1313 1294 getargs(x, 0, 0, _("branchpoint takes no arguments"))
1314 1295 cl = repo.changelog
1315 1296 if not subset:
1316 1297 return baseset()
1317 1298 # XXX this should be 'parentset.min()' assuming 'parentset' is a smartset
1318 1299 # (and if it is not, it should.)
1319 1300 baserev = min(subset)
1320 1301 parentscount = [0]*(len(repo) - baserev)
1321 1302 for r in cl.revs(start=baserev + 1):
1322 1303 for p in cl.parentrevs(r):
1323 1304 if p >= baserev:
1324 1305 parentscount[p - baserev] += 1
1325 1306 return subset.filter(lambda r: parentscount[r - baserev] > 1,
1326 1307 condrepr='<branchpoint>')
1327 1308
1328 1309 @predicate('min(set)', safe=True)
1329 1310 def minrev(repo, subset, x):
1330 1311 """Changeset with lowest revision number in set.
1331 1312 """
1332 1313 os = getset(repo, fullreposet(repo), x)
1333 1314 try:
1334 1315 m = os.min()
1335 1316 if m in subset:
1336 1317 return baseset([m], datarepr=('<min %r, %r>', subset, os))
1337 1318 except ValueError:
1338 1319 # os.min() throws a ValueError when the collection is empty.
1339 1320 # Same as python's min().
1340 1321 pass
1341 1322 return baseset(datarepr=('<min %r, %r>', subset, os))
1342 1323
1343 1324 @predicate('modifies(pattern)', safe=True)
1344 1325 def modifies(repo, subset, x):
1345 1326 """Changesets modifying files matched by pattern.
1346 1327
1347 1328 The pattern without explicit kind like ``glob:`` is expected to be
1348 1329 relative to the current directory and match against a file or a
1349 1330 directory.
1350 1331 """
1351 1332 # i18n: "modifies" is a keyword
1352 1333 pat = getstring(x, _("modifies requires a pattern"))
1353 1334 return checkstatus(repo, subset, pat, 0)
1354 1335
1355 1336 @predicate('named(namespace)')
1356 1337 def named(repo, subset, x):
1357 1338 """The changesets in a given namespace.
1358 1339
1359 1340 If `namespace` starts with `re:`, the remainder of the string is treated as
1360 1341 a regular expression. To match a namespace that actually starts with `re:`,
1361 1342 use the prefix `literal:`.
1362 1343 """
1363 1344 # i18n: "named" is a keyword
1364 1345 args = getargs(x, 1, 1, _('named requires a namespace argument'))
1365 1346
1366 1347 ns = getstring(args[0],
1367 1348 # i18n: "named" is a keyword
1368 1349 _('the argument to named must be a string'))
1369 1350 kind, pattern, matcher = util.stringmatcher(ns)
1370 1351 namespaces = set()
1371 1352 if kind == 'literal':
1372 1353 if pattern not in repo.names:
1373 1354 raise error.RepoLookupError(_("namespace '%s' does not exist")
1374 1355 % ns)
1375 1356 namespaces.add(repo.names[pattern])
1376 1357 else:
1377 1358 for name, ns in repo.names.iteritems():
1378 1359 if matcher(name):
1379 1360 namespaces.add(ns)
1380 1361 if not namespaces:
1381 1362 raise error.RepoLookupError(_("no namespace exists"
1382 1363 " that match '%s'") % pattern)
1383 1364
1384 1365 names = set()
1385 1366 for ns in namespaces:
1386 1367 for name in ns.listnames(repo):
1387 1368 if name not in ns.deprecated:
1388 1369 names.update(repo[n].rev() for n in ns.nodes(repo, name))
1389 1370
1390 1371 names -= set([node.nullrev])
1391 1372 return subset & names
1392 1373
1393 1374 @predicate('id(string)', safe=True)
1394 1375 def node_(repo, subset, x):
1395 1376 """Revision non-ambiguously specified by the given hex string prefix.
1396 1377 """
1397 1378 # i18n: "id" is a keyword
1398 1379 l = getargs(x, 1, 1, _("id requires one argument"))
1399 1380 # i18n: "id" is a keyword
1400 1381 n = getstring(l[0], _("id requires a string"))
1401 1382 if len(n) == 40:
1402 1383 try:
1403 1384 rn = repo.changelog.rev(node.bin(n))
1404 1385 except (LookupError, TypeError):
1405 1386 rn = None
1406 1387 else:
1407 1388 rn = None
1408 1389 pm = repo.changelog._partialmatch(n)
1409 1390 if pm is not None:
1410 1391 rn = repo.changelog.rev(pm)
1411 1392
1412 1393 if rn is None:
1413 1394 return baseset()
1414 1395 result = baseset([rn])
1415 1396 return result & subset
1416 1397
1417 1398 @predicate('obsolete()', safe=True)
1418 1399 def obsolete(repo, subset, x):
1419 1400 """Mutable changeset with a newer version."""
1420 1401 # i18n: "obsolete" is a keyword
1421 1402 getargs(x, 0, 0, _("obsolete takes no arguments"))
1422 1403 obsoletes = obsmod.getrevs(repo, 'obsolete')
1423 1404 return subset & obsoletes
1424 1405
1425 1406 @predicate('only(set, [set])', safe=True)
1426 1407 def only(repo, subset, x):
1427 1408 """Changesets that are ancestors of the first set that are not ancestors
1428 1409 of any other head in the repo. If a second set is specified, the result
1429 1410 is ancestors of the first set that are not ancestors of the second set
1430 1411 (i.e. ::<set1> - ::<set2>).
1431 1412 """
1432 1413 cl = repo.changelog
1433 1414 # i18n: "only" is a keyword
1434 1415 args = getargs(x, 1, 2, _('only takes one or two arguments'))
1435 1416 include = getset(repo, fullreposet(repo), args[0])
1436 1417 if len(args) == 1:
1437 1418 if not include:
1438 1419 return baseset()
1439 1420
1440 1421 descendants = set(_revdescendants(repo, include, False))
1441 1422 exclude = [rev for rev in cl.headrevs()
1442 1423 if not rev in descendants and not rev in include]
1443 1424 else:
1444 1425 exclude = getset(repo, fullreposet(repo), args[1])
1445 1426
1446 1427 results = set(cl.findmissingrevs(common=exclude, heads=include))
1447 1428 # XXX we should turn this into a baseset instead of a set, smartset may do
1448 1429 # some optimisations from the fact this is a baseset.
1449 1430 return subset & results
1450 1431
1451 1432 @predicate('origin([set])', safe=True)
1452 1433 def origin(repo, subset, x):
1453 1434 """
1454 1435 Changesets that were specified as a source for the grafts, transplants or
1455 1436 rebases that created the given revisions. Omitting the optional set is the
1456 1437 same as passing all(). If a changeset created by these operations is itself
1457 1438 specified as a source for one of these operations, only the source changeset
1458 1439 for the first operation is selected.
1459 1440 """
1460 1441 if x is not None:
1461 1442 dests = getset(repo, fullreposet(repo), x)
1462 1443 else:
1463 1444 dests = fullreposet(repo)
1464 1445
1465 1446 def _firstsrc(rev):
1466 1447 src = _getrevsource(repo, rev)
1467 1448 if src is None:
1468 1449 return None
1469 1450
1470 1451 while True:
1471 1452 prev = _getrevsource(repo, src)
1472 1453
1473 1454 if prev is None:
1474 1455 return src
1475 1456 src = prev
1476 1457
1477 1458 o = set([_firstsrc(r) for r in dests])
1478 1459 o -= set([None])
1479 1460 # XXX we should turn this into a baseset instead of a set, smartset may do
1480 1461 # some optimisations from the fact this is a baseset.
1481 1462 return subset & o
1482 1463
1483 1464 @predicate('outgoing([path])', safe=True)
1484 1465 def outgoing(repo, subset, x):
1485 1466 """Changesets not found in the specified destination repository, or the
1486 1467 default push location.
1487 1468 """
1488 1469 # Avoid cycles.
1489 1470 from . import (
1490 1471 discovery,
1491 1472 hg,
1492 1473 )
1493 1474 # i18n: "outgoing" is a keyword
1494 1475 l = getargs(x, 0, 1, _("outgoing takes one or no arguments"))
1495 1476 # i18n: "outgoing" is a keyword
1496 1477 dest = l and getstring(l[0], _("outgoing requires a repository path")) or ''
1497 1478 dest = repo.ui.expandpath(dest or 'default-push', dest or 'default')
1498 1479 dest, branches = hg.parseurl(dest)
1499 1480 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1500 1481 if revs:
1501 1482 revs = [repo.lookup(rev) for rev in revs]
1502 1483 other = hg.peer(repo, {}, dest)
1503 1484 repo.ui.pushbuffer()
1504 1485 outgoing = discovery.findcommonoutgoing(repo, other, onlyheads=revs)
1505 1486 repo.ui.popbuffer()
1506 1487 cl = repo.changelog
1507 1488 o = set([cl.rev(r) for r in outgoing.missing])
1508 1489 return subset & o
1509 1490
1510 1491 @predicate('p1([set])', safe=True)
1511 1492 def p1(repo, subset, x):
1512 1493 """First parent of changesets in set, or the working directory.
1513 1494 """
1514 1495 if x is None:
1515 1496 p = repo[x].p1().rev()
1516 1497 if p >= 0:
1517 1498 return subset & baseset([p])
1518 1499 return baseset()
1519 1500
1520 1501 ps = set()
1521 1502 cl = repo.changelog
1522 1503 for r in getset(repo, fullreposet(repo), x):
1523 1504 ps.add(cl.parentrevs(r)[0])
1524 1505 ps -= set([node.nullrev])
1525 1506 # XXX we should turn this into a baseset instead of a set, smartset may do
1526 1507 # some optimisations from the fact this is a baseset.
1527 1508 return subset & ps
1528 1509
1529 1510 @predicate('p2([set])', safe=True)
1530 1511 def p2(repo, subset, x):
1531 1512 """Second parent of changesets in set, or the working directory.
1532 1513 """
1533 1514 if x is None:
1534 1515 ps = repo[x].parents()
1535 1516 try:
1536 1517 p = ps[1].rev()
1537 1518 if p >= 0:
1538 1519 return subset & baseset([p])
1539 1520 return baseset()
1540 1521 except IndexError:
1541 1522 return baseset()
1542 1523
1543 1524 ps = set()
1544 1525 cl = repo.changelog
1545 1526 for r in getset(repo, fullreposet(repo), x):
1546 1527 ps.add(cl.parentrevs(r)[1])
1547 1528 ps -= set([node.nullrev])
1548 1529 # XXX we should turn this into a baseset instead of a set, smartset may do
1549 1530 # some optimisations from the fact this is a baseset.
1550 1531 return subset & ps
1551 1532
1552 1533 @predicate('parents([set])', safe=True)
1553 1534 def parents(repo, subset, x):
1554 1535 """
1555 1536 The set of all parents for all changesets in set, or the working directory.
1556 1537 """
1557 1538 if x is None:
1558 1539 ps = set(p.rev() for p in repo[x].parents())
1559 1540 else:
1560 1541 ps = set()
1561 1542 cl = repo.changelog
1562 1543 up = ps.update
1563 1544 parentrevs = cl.parentrevs
1564 1545 for r in getset(repo, fullreposet(repo), x):
1565 1546 if r == node.wdirrev:
1566 1547 up(p.rev() for p in repo[r].parents())
1567 1548 else:
1568 1549 up(parentrevs(r))
1569 1550 ps -= set([node.nullrev])
1570 1551 return subset & ps
1571 1552
1572 1553 def _phase(repo, subset, target):
1573 1554 """helper to select all rev in phase <target>"""
1574 1555 repo._phasecache.loadphaserevs(repo) # ensure phase's sets are loaded
1575 1556 if repo._phasecache._phasesets:
1576 1557 s = repo._phasecache._phasesets[target] - repo.changelog.filteredrevs
1577 1558 s = baseset(s)
1578 1559 s.sort() # set are non ordered, so we enforce ascending
1579 1560 return subset & s
1580 1561 else:
1581 1562 phase = repo._phasecache.phase
1582 1563 condition = lambda r: phase(repo, r) == target
1583 1564 return subset.filter(condition, condrepr=('<phase %r>', target),
1584 1565 cache=False)
1585 1566
1586 1567 @predicate('draft()', safe=True)
1587 1568 def draft(repo, subset, x):
1588 1569 """Changeset in draft phase."""
1589 1570 # i18n: "draft" is a keyword
1590 1571 getargs(x, 0, 0, _("draft takes no arguments"))
1591 1572 target = phases.draft
1592 1573 return _phase(repo, subset, target)
1593 1574
1594 1575 @predicate('secret()', safe=True)
1595 1576 def secret(repo, subset, x):
1596 1577 """Changeset in secret phase."""
1597 1578 # i18n: "secret" is a keyword
1598 1579 getargs(x, 0, 0, _("secret takes no arguments"))
1599 1580 target = phases.secret
1600 1581 return _phase(repo, subset, target)
1601 1582
1602 1583 def parentspec(repo, subset, x, n):
1603 1584 """``set^0``
1604 1585 The set.
1605 1586 ``set^1`` (or ``set^``), ``set^2``
1606 1587 First or second parent, respectively, of all changesets in set.
1607 1588 """
1608 1589 try:
1609 1590 n = int(n[1])
1610 1591 if n not in (0, 1, 2):
1611 1592 raise ValueError
1612 1593 except (TypeError, ValueError):
1613 1594 raise error.ParseError(_("^ expects a number 0, 1, or 2"))
1614 1595 ps = set()
1615 1596 cl = repo.changelog
1616 1597 for r in getset(repo, fullreposet(repo), x):
1617 1598 if n == 0:
1618 1599 ps.add(r)
1619 1600 elif n == 1:
1620 1601 ps.add(cl.parentrevs(r)[0])
1621 1602 elif n == 2:
1622 1603 parents = cl.parentrevs(r)
1623 1604 if len(parents) > 1:
1624 1605 ps.add(parents[1])
1625 1606 return subset & ps
1626 1607
1627 1608 @predicate('present(set)', safe=True)
1628 1609 def present(repo, subset, x):
1629 1610 """An empty set, if any revision in set isn't found; otherwise,
1630 1611 all revisions in set.
1631 1612
1632 1613 If any of specified revisions is not present in the local repository,
1633 1614 the query is normally aborted. But this predicate allows the query
1634 1615 to continue even in such cases.
1635 1616 """
1636 1617 try:
1637 1618 return getset(repo, subset, x)
1638 1619 except error.RepoLookupError:
1639 1620 return baseset()
1640 1621
1641 1622 # for internal use
1642 1623 @predicate('_notpublic', safe=True)
1643 1624 def _notpublic(repo, subset, x):
1644 1625 getargs(x, 0, 0, "_notpublic takes no arguments")
1645 1626 repo._phasecache.loadphaserevs(repo) # ensure phase's sets are loaded
1646 1627 if repo._phasecache._phasesets:
1647 1628 s = set()
1648 1629 for u in repo._phasecache._phasesets[1:]:
1649 1630 s.update(u)
1650 1631 s = baseset(s - repo.changelog.filteredrevs)
1651 1632 s.sort()
1652 1633 return subset & s
1653 1634 else:
1654 1635 phase = repo._phasecache.phase
1655 1636 target = phases.public
1656 1637 condition = lambda r: phase(repo, r) != target
1657 1638 return subset.filter(condition, condrepr=('<phase %r>', target),
1658 1639 cache=False)
1659 1640
1660 1641 @predicate('public()', safe=True)
1661 1642 def public(repo, subset, x):
1662 1643 """Changeset in public phase."""
1663 1644 # i18n: "public" is a keyword
1664 1645 getargs(x, 0, 0, _("public takes no arguments"))
1665 1646 phase = repo._phasecache.phase
1666 1647 target = phases.public
1667 1648 condition = lambda r: phase(repo, r) == target
1668 1649 return subset.filter(condition, condrepr=('<phase %r>', target),
1669 1650 cache=False)
1670 1651
1671 1652 @predicate('remote([id [,path]])', safe=True)
1672 1653 def remote(repo, subset, x):
1673 1654 """Local revision that corresponds to the given identifier in a
1674 1655 remote repository, if present. Here, the '.' identifier is a
1675 1656 synonym for the current local branch.
1676 1657 """
1677 1658
1678 1659 from . import hg # avoid start-up nasties
1679 1660 # i18n: "remote" is a keyword
1680 1661 l = getargs(x, 0, 2, _("remote takes zero, one, or two arguments"))
1681 1662
1682 1663 q = '.'
1683 1664 if len(l) > 0:
1684 1665 # i18n: "remote" is a keyword
1685 1666 q = getstring(l[0], _("remote requires a string id"))
1686 1667 if q == '.':
1687 1668 q = repo['.'].branch()
1688 1669
1689 1670 dest = ''
1690 1671 if len(l) > 1:
1691 1672 # i18n: "remote" is a keyword
1692 1673 dest = getstring(l[1], _("remote requires a repository path"))
1693 1674 dest = repo.ui.expandpath(dest or 'default')
1694 1675 dest, branches = hg.parseurl(dest)
1695 1676 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1696 1677 if revs:
1697 1678 revs = [repo.lookup(rev) for rev in revs]
1698 1679 other = hg.peer(repo, {}, dest)
1699 1680 n = other.lookup(q)
1700 1681 if n in repo:
1701 1682 r = repo[n].rev()
1702 1683 if r in subset:
1703 1684 return baseset([r])
1704 1685 return baseset()
1705 1686
1706 1687 @predicate('removes(pattern)', safe=True)
1707 1688 def removes(repo, subset, x):
1708 1689 """Changesets which remove files matching pattern.
1709 1690
1710 1691 The pattern without explicit kind like ``glob:`` is expected to be
1711 1692 relative to the current directory and match against a file or a
1712 1693 directory.
1713 1694 """
1714 1695 # i18n: "removes" is a keyword
1715 1696 pat = getstring(x, _("removes requires a pattern"))
1716 1697 return checkstatus(repo, subset, pat, 2)
1717 1698
1718 1699 @predicate('rev(number)', safe=True)
1719 1700 def rev(repo, subset, x):
1720 1701 """Revision with the given numeric identifier.
1721 1702 """
1722 1703 # i18n: "rev" is a keyword
1723 1704 l = getargs(x, 1, 1, _("rev requires one argument"))
1724 1705 try:
1725 1706 # i18n: "rev" is a keyword
1726 1707 l = int(getstring(l[0], _("rev requires a number")))
1727 1708 except (TypeError, ValueError):
1728 1709 # i18n: "rev" is a keyword
1729 1710 raise error.ParseError(_("rev expects a number"))
1730 1711 if l not in repo.changelog and l != node.nullrev:
1731 1712 return baseset()
1732 1713 return subset & baseset([l])
1733 1714
1734 1715 @predicate('matching(revision [, field])', safe=True)
1735 1716 def matching(repo, subset, x):
1736 1717 """Changesets in which a given set of fields match the set of fields in the
1737 1718 selected revision or set.
1738 1719
1739 1720 To match more than one field pass the list of fields to match separated
1740 1721 by spaces (e.g. ``author description``).
1741 1722
1742 1723 Valid fields are most regular revision fields and some special fields.
1743 1724
1744 1725 Regular revision fields are ``description``, ``author``, ``branch``,
1745 1726 ``date``, ``files``, ``phase``, ``parents``, ``substate``, ``user``
1746 1727 and ``diff``.
1747 1728 Note that ``author`` and ``user`` are synonyms. ``diff`` refers to the
1748 1729 contents of the revision. Two revisions matching their ``diff`` will
1749 1730 also match their ``files``.
1750 1731
1751 1732 Special fields are ``summary`` and ``metadata``:
1752 1733 ``summary`` matches the first line of the description.
1753 1734 ``metadata`` is equivalent to matching ``description user date``
1754 1735 (i.e. it matches the main metadata fields).
1755 1736
1756 1737 ``metadata`` is the default field which is used when no fields are
1757 1738 specified. You can match more than one field at a time.
1758 1739 """
1759 1740 # i18n: "matching" is a keyword
1760 1741 l = getargs(x, 1, 2, _("matching takes 1 or 2 arguments"))
1761 1742
1762 1743 revs = getset(repo, fullreposet(repo), l[0])
1763 1744
1764 1745 fieldlist = ['metadata']
1765 1746 if len(l) > 1:
1766 1747 fieldlist = getstring(l[1],
1767 1748 # i18n: "matching" is a keyword
1768 1749 _("matching requires a string "
1769 1750 "as its second argument")).split()
1770 1751
1771 1752 # Make sure that there are no repeated fields,
1772 1753 # expand the 'special' 'metadata' field type
1773 1754 # and check the 'files' whenever we check the 'diff'
1774 1755 fields = []
1775 1756 for field in fieldlist:
1776 1757 if field == 'metadata':
1777 1758 fields += ['user', 'description', 'date']
1778 1759 elif field == 'diff':
1779 1760 # a revision matching the diff must also match the files
1780 1761 # since matching the diff is very costly, make sure to
1781 1762 # also match the files first
1782 1763 fields += ['files', 'diff']
1783 1764 else:
1784 1765 if field == 'author':
1785 1766 field = 'user'
1786 1767 fields.append(field)
1787 1768 fields = set(fields)
1788 1769 if 'summary' in fields and 'description' in fields:
1789 1770 # If a revision matches its description it also matches its summary
1790 1771 fields.discard('summary')
1791 1772
1792 1773 # We may want to match more than one field
1793 1774 # Not all fields take the same amount of time to be matched
1794 1775 # Sort the selected fields in order of increasing matching cost
1795 1776 fieldorder = ['phase', 'parents', 'user', 'date', 'branch', 'summary',
1796 1777 'files', 'description', 'substate', 'diff']
1797 1778 def fieldkeyfunc(f):
1798 1779 try:
1799 1780 return fieldorder.index(f)
1800 1781 except ValueError:
1801 1782 # assume an unknown field is very costly
1802 1783 return len(fieldorder)
1803 1784 fields = list(fields)
1804 1785 fields.sort(key=fieldkeyfunc)
1805 1786
1806 1787 # Each field will be matched with its own "getfield" function
1807 1788 # which will be added to the getfieldfuncs array of functions
1808 1789 getfieldfuncs = []
1809 1790 _funcs = {
1810 1791 'user': lambda r: repo[r].user(),
1811 1792 'branch': lambda r: repo[r].branch(),
1812 1793 'date': lambda r: repo[r].date(),
1813 1794 'description': lambda r: repo[r].description(),
1814 1795 'files': lambda r: repo[r].files(),
1815 1796 'parents': lambda r: repo[r].parents(),
1816 1797 'phase': lambda r: repo[r].phase(),
1817 1798 'substate': lambda r: repo[r].substate,
1818 1799 'summary': lambda r: repo[r].description().splitlines()[0],
1819 1800 'diff': lambda r: list(repo[r].diff(git=True),)
1820 1801 }
1821 1802 for info in fields:
1822 1803 getfield = _funcs.get(info, None)
1823 1804 if getfield is None:
1824 1805 raise error.ParseError(
1825 1806 # i18n: "matching" is a keyword
1826 1807 _("unexpected field name passed to matching: %s") % info)
1827 1808 getfieldfuncs.append(getfield)
1828 1809 # convert the getfield array of functions into a "getinfo" function
1829 1810 # which returns an array of field values (or a single value if there
1830 1811 # is only one field to match)
1831 1812 getinfo = lambda r: [f(r) for f in getfieldfuncs]
1832 1813
1833 1814 def matches(x):
1834 1815 for rev in revs:
1835 1816 target = getinfo(rev)
1836 1817 match = True
1837 1818 for n, f in enumerate(getfieldfuncs):
1838 1819 if target[n] != f(x):
1839 1820 match = False
1840 1821 if match:
1841 1822 return True
1842 1823 return False
1843 1824
1844 1825 return subset.filter(matches, condrepr=('<matching%r %r>', fields, revs))
1845 1826
1846 1827 @predicate('reverse(set)', safe=True)
1847 1828 def reverse(repo, subset, x):
1848 1829 """Reverse order of set.
1849 1830 """
1850 1831 l = getset(repo, subset, x)
1851 1832 l.reverse()
1852 1833 return l
1853 1834
1854 1835 @predicate('roots(set)', safe=True)
1855 1836 def roots(repo, subset, x):
1856 1837 """Changesets in set with no parent changeset in set.
1857 1838 """
1858 1839 s = getset(repo, fullreposet(repo), x)
1859 1840 parents = repo.changelog.parentrevs
1860 1841 def filter(r):
1861 1842 for p in parents(r):
1862 1843 if 0 <= p and p in s:
1863 1844 return False
1864 1845 return True
1865 1846 return subset & s.filter(filter, condrepr='<roots>')
1866 1847
1867 1848 @predicate('sort(set[, [-]key...])', safe=True)
1868 1849 def sort(repo, subset, x):
1869 1850 """Sort set by keys. The default sort order is ascending, specify a key
1870 1851 as ``-key`` to sort in descending order.
1871 1852
1872 1853 The keys can be:
1873 1854
1874 1855 - ``rev`` for the revision number,
1875 1856 - ``branch`` for the branch name,
1876 1857 - ``desc`` for the commit message (description),
1877 1858 - ``user`` for user name (``author`` can be used as an alias),
1878 1859 - ``date`` for the commit date
1879 1860 """
1880 1861 # i18n: "sort" is a keyword
1881 1862 l = getargs(x, 1, 2, _("sort requires one or two arguments"))
1882 1863 keys = "rev"
1883 1864 if len(l) == 2:
1884 1865 # i18n: "sort" is a keyword
1885 1866 keys = getstring(l[1], _("sort spec must be a string"))
1886 1867
1887 1868 s = l[0]
1888 1869 keys = keys.split()
1889 1870 l = []
1890 1871 def invert(s):
1891 1872 return "".join(chr(255 - ord(c)) for c in s)
1892 1873 revs = getset(repo, subset, s)
1893 1874 if keys == ["rev"]:
1894 1875 revs.sort()
1895 1876 return revs
1896 1877 elif keys == ["-rev"]:
1897 1878 revs.sort(reverse=True)
1898 1879 return revs
1899 1880 for r in revs:
1900 1881 c = repo[r]
1901 1882 e = []
1902 1883 for k in keys:
1903 1884 if k == 'rev':
1904 1885 e.append(r)
1905 1886 elif k == '-rev':
1906 1887 e.append(-r)
1907 1888 elif k == 'branch':
1908 1889 e.append(c.branch())
1909 1890 elif k == '-branch':
1910 1891 e.append(invert(c.branch()))
1911 1892 elif k == 'desc':
1912 1893 e.append(c.description())
1913 1894 elif k == '-desc':
1914 1895 e.append(invert(c.description()))
1915 1896 elif k in 'user author':
1916 1897 e.append(c.user())
1917 1898 elif k in '-user -author':
1918 1899 e.append(invert(c.user()))
1919 1900 elif k == 'date':
1920 1901 e.append(c.date()[0])
1921 1902 elif k == '-date':
1922 1903 e.append(-c.date()[0])
1923 1904 else:
1924 1905 raise error.ParseError(_("unknown sort key %r") % k)
1925 1906 e.append(r)
1926 1907 l.append(e)
1927 1908 l.sort()
1928 1909 return baseset([e[-1] for e in l])
1929 1910
1930 1911 @predicate('subrepo([pattern])')
1931 1912 def subrepo(repo, subset, x):
1932 1913 """Changesets that add, modify or remove the given subrepo. If no subrepo
1933 1914 pattern is named, any subrepo changes are returned.
1934 1915 """
1935 1916 # i18n: "subrepo" is a keyword
1936 1917 args = getargs(x, 0, 1, _('subrepo takes at most one argument'))
1937 1918 pat = None
1938 1919 if len(args) != 0:
1939 1920 pat = getstring(args[0], _("subrepo requires a pattern"))
1940 1921
1941 1922 m = matchmod.exact(repo.root, repo.root, ['.hgsubstate'])
1942 1923
1943 1924 def submatches(names):
1944 1925 k, p, m = util.stringmatcher(pat)
1945 1926 for name in names:
1946 1927 if m(name):
1947 1928 yield name
1948 1929
1949 1930 def matches(x):
1950 1931 c = repo[x]
1951 1932 s = repo.status(c.p1().node(), c.node(), match=m)
1952 1933
1953 1934 if pat is None:
1954 1935 return s.added or s.modified or s.removed
1955 1936
1956 1937 if s.added:
1957 1938 return any(submatches(c.substate.keys()))
1958 1939
1959 1940 if s.modified:
1960 1941 subs = set(c.p1().substate.keys())
1961 1942 subs.update(c.substate.keys())
1962 1943
1963 1944 for path in submatches(subs):
1964 1945 if c.p1().substate.get(path) != c.substate.get(path):
1965 1946 return True
1966 1947
1967 1948 if s.removed:
1968 1949 return any(submatches(c.p1().substate.keys()))
1969 1950
1970 1951 return False
1971 1952
1972 1953 return subset.filter(matches, condrepr=('<subrepo %r>', pat))
1973 1954
1974 1955 def _substringmatcher(pattern):
1975 1956 kind, pattern, matcher = util.stringmatcher(pattern)
1976 1957 if kind == 'literal':
1977 1958 matcher = lambda s: pattern in s
1978 1959 return kind, pattern, matcher
1979 1960
1980 1961 @predicate('tag([name])', safe=True)
1981 1962 def tag(repo, subset, x):
1982 1963 """The specified tag by name, or all tagged revisions if no name is given.
1983 1964
1984 1965 If `name` starts with `re:`, the remainder of the name is treated as
1985 1966 a regular expression. To match a tag that actually starts with `re:`,
1986 1967 use the prefix `literal:`.
1987 1968 """
1988 1969 # i18n: "tag" is a keyword
1989 1970 args = getargs(x, 0, 1, _("tag takes one or no arguments"))
1990 1971 cl = repo.changelog
1991 1972 if args:
1992 1973 pattern = getstring(args[0],
1993 1974 # i18n: "tag" is a keyword
1994 1975 _('the argument to tag must be a string'))
1995 1976 kind, pattern, matcher = util.stringmatcher(pattern)
1996 1977 if kind == 'literal':
1997 1978 # avoid resolving all tags
1998 1979 tn = repo._tagscache.tags.get(pattern, None)
1999 1980 if tn is None:
2000 1981 raise error.RepoLookupError(_("tag '%s' does not exist")
2001 1982 % pattern)
2002 1983 s = set([repo[tn].rev()])
2003 1984 else:
2004 1985 s = set([cl.rev(n) for t, n in repo.tagslist() if matcher(t)])
2005 1986 else:
2006 1987 s = set([cl.rev(n) for t, n in repo.tagslist() if t != 'tip'])
2007 1988 return subset & s
2008 1989
2009 1990 @predicate('tagged', safe=True)
2010 1991 def tagged(repo, subset, x):
2011 1992 return tag(repo, subset, x)
2012 1993
2013 1994 @predicate('unstable()', safe=True)
2014 1995 def unstable(repo, subset, x):
2015 1996 """Non-obsolete changesets with obsolete ancestors.
2016 1997 """
2017 1998 # i18n: "unstable" is a keyword
2018 1999 getargs(x, 0, 0, _("unstable takes no arguments"))
2019 2000 unstables = obsmod.getrevs(repo, 'unstable')
2020 2001 return subset & unstables
2021 2002
2022 2003
2023 2004 @predicate('user(string)', safe=True)
2024 2005 def user(repo, subset, x):
2025 2006 """User name contains string. The match is case-insensitive.
2026 2007
2027 2008 If `string` starts with `re:`, the remainder of the string is treated as
2028 2009 a regular expression. To match a user that actually contains `re:`, use
2029 2010 the prefix `literal:`.
2030 2011 """
2031 2012 return author(repo, subset, x)
2032 2013
2033 2014 # experimental
2034 2015 @predicate('wdir', safe=True)
2035 2016 def wdir(repo, subset, x):
2036 2017 # i18n: "wdir" is a keyword
2037 2018 getargs(x, 0, 0, _("wdir takes no arguments"))
2038 2019 if node.wdirrev in subset or isinstance(subset, fullreposet):
2039 2020 return baseset([node.wdirrev])
2040 2021 return baseset()
2041 2022
2042 2023 # for internal use
2043 2024 @predicate('_list', safe=True)
2044 2025 def _list(repo, subset, x):
2045 2026 s = getstring(x, "internal error")
2046 2027 if not s:
2047 2028 return baseset()
2048 2029 # remove duplicates here. it's difficult for caller to deduplicate sets
2049 2030 # because different symbols can point to the same rev.
2050 2031 cl = repo.changelog
2051 2032 ls = []
2052 2033 seen = set()
2053 2034 for t in s.split('\0'):
2054 2035 try:
2055 2036 # fast path for integer revision
2056 2037 r = int(t)
2057 2038 if str(r) != t or r not in cl:
2058 2039 raise ValueError
2059 2040 revs = [r]
2060 2041 except ValueError:
2061 2042 revs = stringset(repo, subset, t)
2062 2043
2063 2044 for r in revs:
2064 2045 if r in seen:
2065 2046 continue
2066 2047 if (r in subset
2067 2048 or r == node.nullrev and isinstance(subset, fullreposet)):
2068 2049 ls.append(r)
2069 2050 seen.add(r)
2070 2051 return baseset(ls)
2071 2052
2072 2053 # for internal use
2073 2054 @predicate('_intlist', safe=True)
2074 2055 def _intlist(repo, subset, x):
2075 2056 s = getstring(x, "internal error")
2076 2057 if not s:
2077 2058 return baseset()
2078 2059 ls = [int(r) for r in s.split('\0')]
2079 2060 s = subset
2080 2061 return baseset([r for r in ls if r in s])
2081 2062
2082 2063 # for internal use
2083 2064 @predicate('_hexlist', safe=True)
2084 2065 def _hexlist(repo, subset, x):
2085 2066 s = getstring(x, "internal error")
2086 2067 if not s:
2087 2068 return baseset()
2088 2069 cl = repo.changelog
2089 2070 ls = [cl.rev(node.bin(r)) for r in s.split('\0')]
2090 2071 s = subset
2091 2072 return baseset([r for r in ls if r in s])
2092 2073
2093 2074 methods = {
2094 2075 "range": rangeset,
2095 2076 "dagrange": dagrange,
2096 2077 "string": stringset,
2097 2078 "symbol": stringset,
2098 2079 "and": andset,
2099 2080 "or": orset,
2100 2081 "not": notset,
2101 2082 "difference": differenceset,
2102 2083 "list": listset,
2103 2084 "keyvalue": keyvaluepair,
2104 2085 "func": func,
2105 2086 "ancestor": ancestorspec,
2106 2087 "parent": parentspec,
2107 2088 "parentpost": p1,
2108 2089 }
2109 2090
2110 2091 def optimize(x, small):
2111 2092 if x is None:
2112 2093 return 0, x
2113 2094
2114 2095 smallbonus = 1
2115 2096 if small:
2116 2097 smallbonus = .5
2117 2098
2118 2099 op = x[0]
2119 2100 if op == 'minus':
2120 2101 return optimize(('and', x[1], ('not', x[2])), small)
2121 2102 elif op == 'only':
2122 2103 return optimize(('func', ('symbol', 'only'),
2123 2104 ('list', x[1], x[2])), small)
2124 2105 elif op == 'onlypost':
2125 2106 return optimize(('func', ('symbol', 'only'), x[1]), small)
2126 2107 elif op == 'dagrangepre':
2127 2108 return optimize(('func', ('symbol', 'ancestors'), x[1]), small)
2128 2109 elif op == 'dagrangepost':
2129 2110 return optimize(('func', ('symbol', 'descendants'), x[1]), small)
2130 2111 elif op == 'rangeall':
2131 2112 return optimize(('range', ('string', '0'), ('string', 'tip')), small)
2132 2113 elif op == 'rangepre':
2133 2114 return optimize(('range', ('string', '0'), x[1]), small)
2134 2115 elif op == 'rangepost':
2135 2116 return optimize(('range', x[1], ('string', 'tip')), small)
2136 2117 elif op == 'negate':
2137 2118 return optimize(('string',
2138 2119 '-' + getstring(x[1], _("can't negate that"))), small)
2139 2120 elif op in 'string symbol negate':
2140 2121 return smallbonus, x # single revisions are small
2141 2122 elif op == 'and':
2142 2123 wa, ta = optimize(x[1], True)
2143 2124 wb, tb = optimize(x[2], True)
2144 2125
2145 2126 # (::x and not ::y)/(not ::y and ::x) have a fast path
2146 2127 def isonly(revs, bases):
2147 2128 return (
2148 2129 revs is not None
2149 2130 and revs[0] == 'func'
2150 2131 and getstring(revs[1], _('not a symbol')) == 'ancestors'
2151 2132 and bases is not None
2152 2133 and bases[0] == 'not'
2153 2134 and bases[1][0] == 'func'
2154 2135 and getstring(bases[1][1], _('not a symbol')) == 'ancestors')
2155 2136
2156 2137 w = min(wa, wb)
2157 2138 if isonly(ta, tb):
2158 2139 return w, ('func', ('symbol', 'only'), ('list', ta[2], tb[1][2]))
2159 2140 if isonly(tb, ta):
2160 2141 return w, ('func', ('symbol', 'only'), ('list', tb[2], ta[1][2]))
2161 2142
2162 2143 if tb is not None and tb[0] == 'not':
2163 2144 return wa, ('difference', ta, tb[1])
2164 2145
2165 2146 if wa > wb:
2166 2147 return w, (op, tb, ta)
2167 2148 return w, (op, ta, tb)
2168 2149 elif op == 'or':
2169 2150 # fast path for machine-generated expression, that is likely to have
2170 2151 # lots of trivial revisions: 'a + b + c()' to '_list(a b) + c()'
2171 2152 ws, ts, ss = [], [], []
2172 2153 def flushss():
2173 2154 if not ss:
2174 2155 return
2175 2156 if len(ss) == 1:
2176 2157 w, t = ss[0]
2177 2158 else:
2178 2159 s = '\0'.join(t[1] for w, t in ss)
2179 2160 y = ('func', ('symbol', '_list'), ('string', s))
2180 2161 w, t = optimize(y, False)
2181 2162 ws.append(w)
2182 2163 ts.append(t)
2183 2164 del ss[:]
2184 2165 for y in x[1:]:
2185 2166 w, t = optimize(y, False)
2186 2167 if t is not None and (t[0] == 'string' or t[0] == 'symbol'):
2187 2168 ss.append((w, t))
2188 2169 continue
2189 2170 flushss()
2190 2171 ws.append(w)
2191 2172 ts.append(t)
2192 2173 flushss()
2193 2174 if len(ts) == 1:
2194 2175 return ws[0], ts[0] # 'or' operation is fully optimized out
2195 2176 # we can't reorder trees by weight because it would change the order.
2196 2177 # ("sort(a + b)" == "sort(b + a)", but "a + b" != "b + a")
2197 2178 # ts = tuple(t for w, t in sorted(zip(ws, ts), key=lambda wt: wt[0]))
2198 2179 return max(ws), (op,) + tuple(ts)
2199 2180 elif op == 'not':
2200 2181 # Optimize not public() to _notpublic() because we have a fast version
2201 2182 if x[1] == ('func', ('symbol', 'public'), None):
2202 2183 newsym = ('func', ('symbol', '_notpublic'), None)
2203 2184 o = optimize(newsym, not small)
2204 2185 return o[0], o[1]
2205 2186 else:
2206 2187 o = optimize(x[1], not small)
2207 2188 return o[0], (op, o[1])
2208 2189 elif op == 'parentpost':
2209 2190 o = optimize(x[1], small)
2210 2191 return o[0], (op, o[1])
2211 2192 elif op == 'group':
2212 2193 return optimize(x[1], small)
2213 2194 elif op in 'dagrange range parent ancestorspec':
2214 2195 if op == 'parent':
2215 2196 # x^:y means (x^) : y, not x ^ (:y)
2216 2197 post = ('parentpost', x[1])
2217 2198 if x[2][0] == 'dagrangepre':
2218 2199 return optimize(('dagrange', post, x[2][1]), small)
2219 2200 elif x[2][0] == 'rangepre':
2220 2201 return optimize(('range', post, x[2][1]), small)
2221 2202
2222 2203 wa, ta = optimize(x[1], small)
2223 2204 wb, tb = optimize(x[2], small)
2224 2205 return wa + wb, (op, ta, tb)
2225 2206 elif op == 'list':
2226 2207 ws, ts = zip(*(optimize(y, small) for y in x[1:]))
2227 2208 return sum(ws), (op,) + ts
2228 2209 elif op == 'func':
2229 2210 f = getstring(x[1], _("not a symbol"))
2230 2211 wa, ta = optimize(x[2], small)
2231 2212 if f in ("author branch closed date desc file grep keyword "
2232 2213 "outgoing user"):
2233 2214 w = 10 # slow
2234 2215 elif f in "modifies adds removes":
2235 2216 w = 30 # slower
2236 2217 elif f == "contains":
2237 2218 w = 100 # very slow
2238 2219 elif f == "ancestor":
2239 2220 w = 1 * smallbonus
2240 2221 elif f in "reverse limit first _intlist":
2241 2222 w = 0
2242 2223 elif f in "sort":
2243 2224 w = 10 # assume most sorts look at changelog
2244 2225 else:
2245 2226 w = 1
2246 2227 return w + wa, (op, x[1], ta)
2247 2228 return 1, x
2248 2229
2249 2230 # the set of valid characters for the initial letter of symbols in
2250 2231 # alias declarations and definitions
2251 2232 _aliassyminitletters = set(c for c in [chr(i) for i in xrange(256)]
2252 2233 if c.isalnum() or c in '._@$' or ord(c) > 127)
2253 2234
2254 2235 def _tokenizealias(program, lookup=None):
2255 2236 """Parse alias declaration/definition into a stream of tokens
2256 2237
2257 2238 This allows symbol names to use also ``$`` as an initial letter
2258 2239 (for backward compatibility), and callers of this function should
2259 2240 examine whether ``$`` is used also for unexpected symbols or not.
2260 2241 """
2261 2242 return tokenize(program, lookup=lookup,
2262 2243 syminitletters=_aliassyminitletters)
2263 2244
2264 2245 def _parsealiasdecl(decl):
2265 2246 """Parse alias declaration ``decl``
2266 2247
2267 2248 This returns ``(name, tree, args, errorstr)`` tuple:
2268 2249
2269 2250 - ``name``: of declared alias (may be ``decl`` itself at error)
2270 2251 - ``tree``: parse result (or ``None`` at error)
2271 2252 - ``args``: list of alias argument names (or None for symbol declaration)
2272 2253 - ``errorstr``: detail about detected error (or None)
2273 2254
2274 2255 >>> _parsealiasdecl('foo')
2275 2256 ('foo', ('symbol', 'foo'), None, None)
2276 2257 >>> _parsealiasdecl('$foo')
2277 2258 ('$foo', None, None, "'$' not for alias arguments")
2278 2259 >>> _parsealiasdecl('foo::bar')
2279 2260 ('foo::bar', None, None, 'invalid format')
2280 2261 >>> _parsealiasdecl('foo bar')
2281 2262 ('foo bar', None, None, 'at 4: invalid token')
2282 2263 >>> _parsealiasdecl('foo()')
2283 2264 ('foo', ('func', ('symbol', 'foo')), [], None)
2284 2265 >>> _parsealiasdecl('$foo()')
2285 2266 ('$foo()', None, None, "'$' not for alias arguments")
2286 2267 >>> _parsealiasdecl('foo($1, $2)')
2287 2268 ('foo', ('func', ('symbol', 'foo')), ['$1', '$2'], None)
2288 2269 >>> _parsealiasdecl('foo(bar_bar, baz.baz)')
2289 2270 ('foo', ('func', ('symbol', 'foo')), ['bar_bar', 'baz.baz'], None)
2290 2271 >>> _parsealiasdecl('foo($1, $2, nested($1, $2))')
2291 2272 ('foo($1, $2, nested($1, $2))', None, None, 'invalid argument list')
2292 2273 >>> _parsealiasdecl('foo(bar($1, $2))')
2293 2274 ('foo(bar($1, $2))', None, None, 'invalid argument list')
2294 2275 >>> _parsealiasdecl('foo("string")')
2295 2276 ('foo("string")', None, None, 'invalid argument list')
2296 2277 >>> _parsealiasdecl('foo($1, $2')
2297 2278 ('foo($1, $2', None, None, 'at 10: unexpected token: end')
2298 2279 >>> _parsealiasdecl('foo("string')
2299 2280 ('foo("string', None, None, 'at 5: unterminated string')
2300 2281 >>> _parsealiasdecl('foo($1, $2, $1)')
2301 2282 ('foo', None, None, 'argument names collide with each other')
2302 2283 """
2303 2284 p = parser.parser(elements)
2304 2285 try:
2305 2286 tree, pos = p.parse(_tokenizealias(decl))
2306 2287 if (pos != len(decl)):
2307 2288 raise error.ParseError(_('invalid token'), pos)
2308 2289 tree = parser.simplifyinfixops(tree, ('list',))
2309 2290
2310 2291 if tree[0] == 'symbol':
2311 2292 # "name = ...." style
2312 2293 name = tree[1]
2313 2294 if name.startswith('$'):
2314 2295 return (decl, None, None, _("'$' not for alias arguments"))
2315 2296 return (name, ('symbol', name), None, None)
2316 2297
2317 if isvalidfunc(tree):
2298 if tree[0] == 'func' and tree[1][0] == 'symbol':
2318 2299 # "name(arg, ....) = ...." style
2319 name = getfuncname(tree)
2300 name = tree[1][1]
2320 2301 if name.startswith('$'):
2321 2302 return (decl, None, None, _("'$' not for alias arguments"))
2322 2303 args = []
2323 for arg in getfuncargs(tree):
2304 for arg in getlist(tree[2]):
2324 2305 if arg[0] != 'symbol':
2325 2306 return (decl, None, None, _("invalid argument list"))
2326 2307 args.append(arg[1])
2327 2308 if len(args) != len(set(args)):
2328 2309 return (name, None, None,
2329 2310 _("argument names collide with each other"))
2330 2311 return (name, ('func', ('symbol', name)), args, None)
2331 2312
2332 2313 return (decl, None, None, _("invalid format"))
2333 2314 except error.ParseError as inst:
2334 2315 return (decl, None, None, parseerrordetail(inst))
2335 2316
2336 2317 def _relabelaliasargs(tree, args):
2337 2318 if not isinstance(tree, tuple):
2338 2319 return tree
2339 2320 op = tree[0]
2340 2321 if op != 'symbol':
2341 2322 return (op,) + tuple(_relabelaliasargs(x, args) for x in tree[1:])
2342 2323
2343 2324 assert len(tree) == 2
2344 2325 sym = tree[1]
2345 2326 if sym in args:
2346 2327 op = '_aliasarg'
2347 2328 elif sym.startswith('$'):
2348 2329 raise error.ParseError(_("'$' not for alias arguments"))
2349 2330 return (op, sym)
2350 2331
2351 2332 def _parsealiasdefn(defn, args):
2352 2333 """Parse alias definition ``defn``
2353 2334
2354 2335 This function marks alias argument references as ``_aliasarg``.
2355 2336
2356 2337 ``args`` is a list of alias argument names, or None if the alias
2357 2338 is declared as a symbol.
2358 2339
2359 2340 This returns "tree" as parsing result.
2360 2341
2361 2342 >>> def prettyformat(tree):
2362 2343 ... return parser.prettyformat(tree, ('_aliasarg', 'string', 'symbol'))
2363 2344 >>> args = ['$1', '$2', 'foo']
2364 2345 >>> print prettyformat(_parsealiasdefn('$1 or foo', args))
2365 2346 (or
2366 2347 ('_aliasarg', '$1')
2367 2348 ('_aliasarg', 'foo'))
2368 2349 >>> try:
2369 2350 ... _parsealiasdefn('$1 or $bar', args)
2370 2351 ... except error.ParseError, inst:
2371 2352 ... print parseerrordetail(inst)
2372 2353 '$' not for alias arguments
2373 2354 >>> args = ['$1', '$10', 'foo']
2374 2355 >>> print prettyformat(_parsealiasdefn('$10 or foobar', args))
2375 2356 (or
2376 2357 ('_aliasarg', '$10')
2377 2358 ('symbol', 'foobar'))
2378 2359 >>> print prettyformat(_parsealiasdefn('"$1" or "foo"', args))
2379 2360 (or
2380 2361 ('string', '$1')
2381 2362 ('string', 'foo'))
2382 2363 """
2383 2364 if args:
2384 2365 args = set(args)
2385 2366 else:
2386 2367 args = set()
2387 2368
2388 2369 p = parser.parser(elements)
2389 2370 tree, pos = p.parse(_tokenizealias(defn))
2390 2371 if pos != len(defn):
2391 2372 raise error.ParseError(_('invalid token'), pos)
2392 2373 tree = parser.simplifyinfixops(tree, ('list', 'or'))
2393 2374 return _relabelaliasargs(tree, args)
2394 2375
2395 2376 class revsetalias(object):
2396 2377 # whether own `error` information is already shown or not.
2397 2378 # this avoids showing same warning multiple times at each `findaliases`.
2398 2379 warned = False
2399 2380
2400 2381 def __init__(self, name, value):
2401 2382 '''Aliases like:
2402 2383
2403 2384 h = heads(default)
2404 2385 b($1) = ancestors($1) - ancestors(default)
2405 2386 '''
2406 2387 self.name, self.tree, self.args, self.error = _parsealiasdecl(name)
2407 2388 if self.error:
2408 2389 self.error = _('failed to parse the declaration of revset alias'
2409 2390 ' "%s": %s') % (self.name, self.error)
2410 2391 return
2411 2392
2412 2393 try:
2413 2394 self.replacement = _parsealiasdefn(value, self.args)
2414 2395 except error.ParseError as inst:
2415 2396 self.error = _('failed to parse the definition of revset alias'
2416 2397 ' "%s": %s') % (self.name, parseerrordetail(inst))
2417 2398
2418 2399 def _getalias(aliases, tree):
2419 2400 """If tree looks like an unexpanded alias, return it. Return None
2420 2401 otherwise.
2421 2402 """
2422 2403 if isinstance(tree, tuple):
2423 2404 if tree[0] == 'symbol':
2424 2405 name = tree[1]
2425 2406 alias = aliases.get(name)
2426 2407 if alias and alias.args is None and alias.tree == tree:
2427 2408 return alias
2428 2409 if tree[0] == 'func':
2429 2410 if tree[1][0] == 'symbol':
2430 2411 name = tree[1][1]
2431 2412 alias = aliases.get(name)
2432 2413 if alias and alias.args is not None and alias.tree == tree[:2]:
2433 2414 return alias
2434 2415 return None
2435 2416
2436 2417 def _expandargs(tree, args):
2437 2418 """Replace _aliasarg instances with the substitution value of the
2438 2419 same name in args, recursively.
2439 2420 """
2440 2421 if not isinstance(tree, tuple):
2441 2422 return tree
2442 2423 if tree[0] == '_aliasarg':
2443 2424 sym = tree[1]
2444 2425 return args[sym]
2445 2426 return tuple(_expandargs(t, args) for t in tree)
2446 2427
2447 2428 def _expandaliases(aliases, tree, expanding, cache):
2448 2429 """Expand aliases in tree, recursively.
2449 2430
2450 2431 'aliases' is a dictionary mapping user defined aliases to
2451 2432 revsetalias objects.
2452 2433 """
2453 2434 if not isinstance(tree, tuple):
2454 2435 # Do not expand raw strings
2455 2436 return tree
2456 2437 alias = _getalias(aliases, tree)
2457 2438 if alias is not None:
2458 2439 if alias.error:
2459 2440 raise error.Abort(alias.error)
2460 2441 if alias in expanding:
2461 2442 raise error.ParseError(_('infinite expansion of revset alias "%s" '
2462 2443 'detected') % alias.name)
2463 2444 expanding.append(alias)
2464 2445 if alias.name not in cache:
2465 2446 cache[alias.name] = _expandaliases(aliases, alias.replacement,
2466 2447 expanding, cache)
2467 2448 result = cache[alias.name]
2468 2449 expanding.pop()
2469 2450 if alias.args is not None:
2470 2451 l = getlist(tree[2])
2471 2452 if len(l) != len(alias.args):
2472 2453 raise error.ParseError(
2473 2454 _('invalid number of arguments: %d') % len(l))
2474 2455 l = [_expandaliases(aliases, a, [], cache) for a in l]
2475 2456 result = _expandargs(result, dict(zip(alias.args, l)))
2476 2457 else:
2477 2458 result = tuple(_expandaliases(aliases, t, expanding, cache)
2478 2459 for t in tree)
2479 2460 return result
2480 2461
2481 2462 def findaliases(ui, tree, showwarning=None):
2482 2463 aliases = {}
2483 2464 for k, v in ui.configitems('revsetalias'):
2484 2465 alias = revsetalias(k, v)
2485 2466 aliases[alias.name] = alias
2486 2467 tree = _expandaliases(aliases, tree, [], {})
2487 2468 if showwarning:
2488 2469 # warn about problematic (but not referred) aliases
2489 2470 for name, alias in sorted(aliases.iteritems()):
2490 2471 if alias.error and not alias.warned:
2491 2472 showwarning(_('warning: %s\n') % (alias.error))
2492 2473 alias.warned = True
2493 2474 return tree
2494 2475
2495 2476 def foldconcat(tree):
2496 2477 """Fold elements to be concatenated by `##`
2497 2478 """
2498 2479 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2499 2480 return tree
2500 2481 if tree[0] == '_concat':
2501 2482 pending = [tree]
2502 2483 l = []
2503 2484 while pending:
2504 2485 e = pending.pop()
2505 2486 if e[0] == '_concat':
2506 2487 pending.extend(reversed(e[1:]))
2507 2488 elif e[0] in ('string', 'symbol'):
2508 2489 l.append(e[1])
2509 2490 else:
2510 2491 msg = _("\"##\" can't concatenate \"%s\" element") % (e[0])
2511 2492 raise error.ParseError(msg)
2512 2493 return ('string', ''.join(l))
2513 2494 else:
2514 2495 return tuple(foldconcat(t) for t in tree)
2515 2496
2516 2497 def parse(spec, lookup=None):
2517 2498 p = parser.parser(elements)
2518 2499 tree, pos = p.parse(tokenize(spec, lookup=lookup))
2519 2500 if pos != len(spec):
2520 2501 raise error.ParseError(_("invalid token"), pos)
2521 2502 return parser.simplifyinfixops(tree, ('list', 'or'))
2522 2503
2523 2504 def posttreebuilthook(tree, repo):
2524 2505 # hook for extensions to execute code on the optimized tree
2525 2506 pass
2526 2507
2527 2508 def match(ui, spec, repo=None):
2528 2509 if not spec:
2529 2510 raise error.ParseError(_("empty query"))
2530 2511 lookup = None
2531 2512 if repo:
2532 2513 lookup = repo.__contains__
2533 2514 tree = parse(spec, lookup)
2534 2515 return _makematcher(ui, tree, repo)
2535 2516
2536 2517 def matchany(ui, specs, repo=None):
2537 2518 """Create a matcher that will include any revisions matching one of the
2538 2519 given specs"""
2539 2520 if not specs:
2540 2521 def mfunc(repo, subset=None):
2541 2522 return baseset()
2542 2523 return mfunc
2543 2524 if not all(specs):
2544 2525 raise error.ParseError(_("empty query"))
2545 2526 lookup = None
2546 2527 if repo:
2547 2528 lookup = repo.__contains__
2548 2529 if len(specs) == 1:
2549 2530 tree = parse(specs[0], lookup)
2550 2531 else:
2551 2532 tree = ('or',) + tuple(parse(s, lookup) for s in specs)
2552 2533 return _makematcher(ui, tree, repo)
2553 2534
2554 2535 def _makematcher(ui, tree, repo):
2555 2536 if ui:
2556 2537 tree = findaliases(ui, tree, showwarning=ui.warn)
2557 2538 tree = foldconcat(tree)
2558 2539 weight, tree = optimize(tree, True)
2559 2540 posttreebuilthook(tree, repo)
2560 2541 def mfunc(repo, subset=None):
2561 2542 if subset is None:
2562 2543 subset = fullreposet(repo)
2563 2544 if util.safehasattr(subset, 'isascending'):
2564 2545 result = getset(repo, subset, tree)
2565 2546 else:
2566 2547 result = getset(repo, baseset(subset), tree)
2567 2548 return result
2568 2549 return mfunc
2569 2550
2570 2551 def formatspec(expr, *args):
2571 2552 '''
2572 2553 This is a convenience function for using revsets internally, and
2573 2554 escapes arguments appropriately. Aliases are intentionally ignored
2574 2555 so that intended expression behavior isn't accidentally subverted.
2575 2556
2576 2557 Supported arguments:
2577 2558
2578 2559 %r = revset expression, parenthesized
2579 2560 %d = int(arg), no quoting
2580 2561 %s = string(arg), escaped and single-quoted
2581 2562 %b = arg.branch(), escaped and single-quoted
2582 2563 %n = hex(arg), single-quoted
2583 2564 %% = a literal '%'
2584 2565
2585 2566 Prefixing the type with 'l' specifies a parenthesized list of that type.
2586 2567
2587 2568 >>> formatspec('%r:: and %lr', '10 or 11', ("this()", "that()"))
2588 2569 '(10 or 11):: and ((this()) or (that()))'
2589 2570 >>> formatspec('%d:: and not %d::', 10, 20)
2590 2571 '10:: and not 20::'
2591 2572 >>> formatspec('%ld or %ld', [], [1])
2592 2573 "_list('') or 1"
2593 2574 >>> formatspec('keyword(%s)', 'foo\\xe9')
2594 2575 "keyword('foo\\\\xe9')"
2595 2576 >>> b = lambda: 'default'
2596 2577 >>> b.branch = b
2597 2578 >>> formatspec('branch(%b)', b)
2598 2579 "branch('default')"
2599 2580 >>> formatspec('root(%ls)', ['a', 'b', 'c', 'd'])
2600 2581 "root(_list('a\\x00b\\x00c\\x00d'))"
2601 2582 '''
2602 2583
2603 2584 def quote(s):
2604 2585 return repr(str(s))
2605 2586
2606 2587 def argtype(c, arg):
2607 2588 if c == 'd':
2608 2589 return str(int(arg))
2609 2590 elif c == 's':
2610 2591 return quote(arg)
2611 2592 elif c == 'r':
2612 2593 parse(arg) # make sure syntax errors are confined
2613 2594 return '(%s)' % arg
2614 2595 elif c == 'n':
2615 2596 return quote(node.hex(arg))
2616 2597 elif c == 'b':
2617 2598 return quote(arg.branch())
2618 2599
2619 2600 def listexp(s, t):
2620 2601 l = len(s)
2621 2602 if l == 0:
2622 2603 return "_list('')"
2623 2604 elif l == 1:
2624 2605 return argtype(t, s[0])
2625 2606 elif t == 'd':
2626 2607 return "_intlist('%s')" % "\0".join(str(int(a)) for a in s)
2627 2608 elif t == 's':
2628 2609 return "_list('%s')" % "\0".join(s)
2629 2610 elif t == 'n':
2630 2611 return "_hexlist('%s')" % "\0".join(node.hex(a) for a in s)
2631 2612 elif t == 'b':
2632 2613 return "_list('%s')" % "\0".join(a.branch() for a in s)
2633 2614
2634 2615 m = l // 2
2635 2616 return '(%s or %s)' % (listexp(s[:m], t), listexp(s[m:], t))
2636 2617
2637 2618 ret = ''
2638 2619 pos = 0
2639 2620 arg = 0
2640 2621 while pos < len(expr):
2641 2622 c = expr[pos]
2642 2623 if c == '%':
2643 2624 pos += 1
2644 2625 d = expr[pos]
2645 2626 if d == '%':
2646 2627 ret += d
2647 2628 elif d in 'dsnbr':
2648 2629 ret += argtype(d, args[arg])
2649 2630 arg += 1
2650 2631 elif d == 'l':
2651 2632 # a list of some type
2652 2633 pos += 1
2653 2634 d = expr[pos]
2654 2635 ret += listexp(list(args[arg]), d)
2655 2636 arg += 1
2656 2637 else:
2657 2638 raise error.Abort('unexpected revspec format character %s' % d)
2658 2639 else:
2659 2640 ret += c
2660 2641 pos += 1
2661 2642
2662 2643 return ret
2663 2644
2664 2645 def prettyformat(tree):
2665 2646 return parser.prettyformat(tree, ('string', 'symbol'))
2666 2647
2667 2648 def depth(tree):
2668 2649 if isinstance(tree, tuple):
2669 2650 return max(map(depth, tree)) + 1
2670 2651 else:
2671 2652 return 0
2672 2653
2673 2654 def funcsused(tree):
2674 2655 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2675 2656 return set()
2676 2657 else:
2677 2658 funcs = set()
2678 2659 for s in tree[1:]:
2679 2660 funcs |= funcsused(s)
2680 2661 if tree[0] == 'func':
2681 2662 funcs.add(tree[1][1])
2682 2663 return funcs
2683 2664
2684 2665 def _formatsetrepr(r):
2685 2666 """Format an optional printable representation of a set
2686 2667
2687 2668 ======== =================================
2688 2669 type(r) example
2689 2670 ======== =================================
2690 2671 tuple ('<not %r>', other)
2691 2672 str '<branch closed>'
2692 2673 callable lambda: '<branch %r>' % sorted(b)
2693 2674 object other
2694 2675 ======== =================================
2695 2676 """
2696 2677 if r is None:
2697 2678 return ''
2698 2679 elif isinstance(r, tuple):
2699 2680 return r[0] % r[1:]
2700 2681 elif isinstance(r, str):
2701 2682 return r
2702 2683 elif callable(r):
2703 2684 return r()
2704 2685 else:
2705 2686 return repr(r)
2706 2687
2707 2688 class abstractsmartset(object):
2708 2689
2709 2690 def __nonzero__(self):
2710 2691 """True if the smartset is not empty"""
2711 2692 raise NotImplementedError()
2712 2693
2713 2694 def __contains__(self, rev):
2714 2695 """provide fast membership testing"""
2715 2696 raise NotImplementedError()
2716 2697
2717 2698 def __iter__(self):
2718 2699 """iterate the set in the order it is supposed to be iterated"""
2719 2700 raise NotImplementedError()
2720 2701
2721 2702 # Attributes containing a function to perform a fast iteration in a given
2722 2703 # direction. A smartset can have none, one, or both defined.
2723 2704 #
2724 2705 # Default value is None instead of a function returning None to avoid
2725 2706 # initializing an iterator just for testing if a fast method exists.
2726 2707 fastasc = None
2727 2708 fastdesc = None
2728 2709
2729 2710 def isascending(self):
2730 2711 """True if the set will iterate in ascending order"""
2731 2712 raise NotImplementedError()
2732 2713
2733 2714 def isdescending(self):
2734 2715 """True if the set will iterate in descending order"""
2735 2716 raise NotImplementedError()
2736 2717
2737 2718 @util.cachefunc
2738 2719 def min(self):
2739 2720 """return the minimum element in the set"""
2740 2721 if self.fastasc is not None:
2741 2722 for r in self.fastasc():
2742 2723 return r
2743 2724 raise ValueError('arg is an empty sequence')
2744 2725 return min(self)
2745 2726
2746 2727 @util.cachefunc
2747 2728 def max(self):
2748 2729 """return the maximum element in the set"""
2749 2730 if self.fastdesc is not None:
2750 2731 for r in self.fastdesc():
2751 2732 return r
2752 2733 raise ValueError('arg is an empty sequence')
2753 2734 return max(self)
2754 2735
2755 2736 def first(self):
2756 2737 """return the first element in the set (user iteration perspective)
2757 2738
2758 2739 Return None if the set is empty"""
2759 2740 raise NotImplementedError()
2760 2741
2761 2742 def last(self):
2762 2743 """return the last element in the set (user iteration perspective)
2763 2744
2764 2745 Return None if the set is empty"""
2765 2746 raise NotImplementedError()
2766 2747
2767 2748 def __len__(self):
2768 2749 """return the length of the smartsets
2769 2750
2770 2751 This can be expensive on smartset that could be lazy otherwise."""
2771 2752 raise NotImplementedError()
2772 2753
2773 2754 def reverse(self):
2774 2755 """reverse the expected iteration order"""
2775 2756 raise NotImplementedError()
2776 2757
2777 2758 def sort(self, reverse=True):
2778 2759 """get the set to iterate in an ascending or descending order"""
2779 2760 raise NotImplementedError()
2780 2761
2781 2762 def __and__(self, other):
2782 2763 """Returns a new object with the intersection of the two collections.
2783 2764
2784 2765 This is part of the mandatory API for smartset."""
2785 2766 if isinstance(other, fullreposet):
2786 2767 return self
2787 2768 return self.filter(other.__contains__, condrepr=other, cache=False)
2788 2769
2789 2770 def __add__(self, other):
2790 2771 """Returns a new object with the union of the two collections.
2791 2772
2792 2773 This is part of the mandatory API for smartset."""
2793 2774 return addset(self, other)
2794 2775
2795 2776 def __sub__(self, other):
2796 2777 """Returns a new object with the substraction of the two collections.
2797 2778
2798 2779 This is part of the mandatory API for smartset."""
2799 2780 c = other.__contains__
2800 2781 return self.filter(lambda r: not c(r), condrepr=('<not %r>', other),
2801 2782 cache=False)
2802 2783
2803 2784 def filter(self, condition, condrepr=None, cache=True):
2804 2785 """Returns this smartset filtered by condition as a new smartset.
2805 2786
2806 2787 `condition` is a callable which takes a revision number and returns a
2807 2788 boolean. Optional `condrepr` provides a printable representation of
2808 2789 the given `condition`.
2809 2790
2810 2791 This is part of the mandatory API for smartset."""
2811 2792 # builtin cannot be cached. but do not needs to
2812 2793 if cache and util.safehasattr(condition, 'func_code'):
2813 2794 condition = util.cachefunc(condition)
2814 2795 return filteredset(self, condition, condrepr)
2815 2796
2816 2797 class baseset(abstractsmartset):
2817 2798 """Basic data structure that represents a revset and contains the basic
2818 2799 operation that it should be able to perform.
2819 2800
2820 2801 Every method in this class should be implemented by any smartset class.
2821 2802 """
2822 2803 def __init__(self, data=(), datarepr=None):
2823 2804 """
2824 2805 datarepr: a tuple of (format, obj, ...), a function or an object that
2825 2806 provides a printable representation of the given data.
2826 2807 """
2827 2808 if not isinstance(data, list):
2828 2809 if isinstance(data, set):
2829 2810 self._set = data
2830 2811 data = list(data)
2831 2812 self._list = data
2832 2813 self._datarepr = datarepr
2833 2814 self._ascending = None
2834 2815
2835 2816 @util.propertycache
2836 2817 def _set(self):
2837 2818 return set(self._list)
2838 2819
2839 2820 @util.propertycache
2840 2821 def _asclist(self):
2841 2822 asclist = self._list[:]
2842 2823 asclist.sort()
2843 2824 return asclist
2844 2825
2845 2826 def __iter__(self):
2846 2827 if self._ascending is None:
2847 2828 return iter(self._list)
2848 2829 elif self._ascending:
2849 2830 return iter(self._asclist)
2850 2831 else:
2851 2832 return reversed(self._asclist)
2852 2833
2853 2834 def fastasc(self):
2854 2835 return iter(self._asclist)
2855 2836
2856 2837 def fastdesc(self):
2857 2838 return reversed(self._asclist)
2858 2839
2859 2840 @util.propertycache
2860 2841 def __contains__(self):
2861 2842 return self._set.__contains__
2862 2843
2863 2844 def __nonzero__(self):
2864 2845 return bool(self._list)
2865 2846
2866 2847 def sort(self, reverse=False):
2867 2848 self._ascending = not bool(reverse)
2868 2849
2869 2850 def reverse(self):
2870 2851 if self._ascending is None:
2871 2852 self._list.reverse()
2872 2853 else:
2873 2854 self._ascending = not self._ascending
2874 2855
2875 2856 def __len__(self):
2876 2857 return len(self._list)
2877 2858
2878 2859 def isascending(self):
2879 2860 """Returns True if the collection is ascending order, False if not.
2880 2861
2881 2862 This is part of the mandatory API for smartset."""
2882 2863 if len(self) <= 1:
2883 2864 return True
2884 2865 return self._ascending is not None and self._ascending
2885 2866
2886 2867 def isdescending(self):
2887 2868 """Returns True if the collection is descending order, False if not.
2888 2869
2889 2870 This is part of the mandatory API for smartset."""
2890 2871 if len(self) <= 1:
2891 2872 return True
2892 2873 return self._ascending is not None and not self._ascending
2893 2874
2894 2875 def first(self):
2895 2876 if self:
2896 2877 if self._ascending is None:
2897 2878 return self._list[0]
2898 2879 elif self._ascending:
2899 2880 return self._asclist[0]
2900 2881 else:
2901 2882 return self._asclist[-1]
2902 2883 return None
2903 2884
2904 2885 def last(self):
2905 2886 if self:
2906 2887 if self._ascending is None:
2907 2888 return self._list[-1]
2908 2889 elif self._ascending:
2909 2890 return self._asclist[-1]
2910 2891 else:
2911 2892 return self._asclist[0]
2912 2893 return None
2913 2894
2914 2895 def __repr__(self):
2915 2896 d = {None: '', False: '-', True: '+'}[self._ascending]
2916 2897 s = _formatsetrepr(self._datarepr)
2917 2898 if not s:
2918 2899 s = repr(self._list)
2919 2900 return '<%s%s %s>' % (type(self).__name__, d, s)
2920 2901
2921 2902 class filteredset(abstractsmartset):
2922 2903 """Duck type for baseset class which iterates lazily over the revisions in
2923 2904 the subset and contains a function which tests for membership in the
2924 2905 revset
2925 2906 """
2926 2907 def __init__(self, subset, condition=lambda x: True, condrepr=None):
2927 2908 """
2928 2909 condition: a function that decide whether a revision in the subset
2929 2910 belongs to the revset or not.
2930 2911 condrepr: a tuple of (format, obj, ...), a function or an object that
2931 2912 provides a printable representation of the given condition.
2932 2913 """
2933 2914 self._subset = subset
2934 2915 self._condition = condition
2935 2916 self._condrepr = condrepr
2936 2917
2937 2918 def __contains__(self, x):
2938 2919 return x in self._subset and self._condition(x)
2939 2920
2940 2921 def __iter__(self):
2941 2922 return self._iterfilter(self._subset)
2942 2923
2943 2924 def _iterfilter(self, it):
2944 2925 cond = self._condition
2945 2926 for x in it:
2946 2927 if cond(x):
2947 2928 yield x
2948 2929
2949 2930 @property
2950 2931 def fastasc(self):
2951 2932 it = self._subset.fastasc
2952 2933 if it is None:
2953 2934 return None
2954 2935 return lambda: self._iterfilter(it())
2955 2936
2956 2937 @property
2957 2938 def fastdesc(self):
2958 2939 it = self._subset.fastdesc
2959 2940 if it is None:
2960 2941 return None
2961 2942 return lambda: self._iterfilter(it())
2962 2943
2963 2944 def __nonzero__(self):
2964 2945 fast = self.fastasc
2965 2946 if fast is None:
2966 2947 fast = self.fastdesc
2967 2948 if fast is not None:
2968 2949 it = fast()
2969 2950 else:
2970 2951 it = self
2971 2952
2972 2953 for r in it:
2973 2954 return True
2974 2955 return False
2975 2956
2976 2957 def __len__(self):
2977 2958 # Basic implementation to be changed in future patches.
2978 2959 l = baseset([r for r in self])
2979 2960 return len(l)
2980 2961
2981 2962 def sort(self, reverse=False):
2982 2963 self._subset.sort(reverse=reverse)
2983 2964
2984 2965 def reverse(self):
2985 2966 self._subset.reverse()
2986 2967
2987 2968 def isascending(self):
2988 2969 return self._subset.isascending()
2989 2970
2990 2971 def isdescending(self):
2991 2972 return self._subset.isdescending()
2992 2973
2993 2974 def first(self):
2994 2975 for x in self:
2995 2976 return x
2996 2977 return None
2997 2978
2998 2979 def last(self):
2999 2980 it = None
3000 2981 if self.isascending():
3001 2982 it = self.fastdesc
3002 2983 elif self.isdescending():
3003 2984 it = self.fastasc
3004 2985 if it is not None:
3005 2986 for x in it():
3006 2987 return x
3007 2988 return None #empty case
3008 2989 else:
3009 2990 x = None
3010 2991 for x in self:
3011 2992 pass
3012 2993 return x
3013 2994
3014 2995 def __repr__(self):
3015 2996 xs = [repr(self._subset)]
3016 2997 s = _formatsetrepr(self._condrepr)
3017 2998 if s:
3018 2999 xs.append(s)
3019 3000 return '<%s %s>' % (type(self).__name__, ', '.join(xs))
3020 3001
3021 3002 def _iterordered(ascending, iter1, iter2):
3022 3003 """produce an ordered iteration from two iterators with the same order
3023 3004
3024 3005 The ascending is used to indicated the iteration direction.
3025 3006 """
3026 3007 choice = max
3027 3008 if ascending:
3028 3009 choice = min
3029 3010
3030 3011 val1 = None
3031 3012 val2 = None
3032 3013 try:
3033 3014 # Consume both iterators in an ordered way until one is empty
3034 3015 while True:
3035 3016 if val1 is None:
3036 3017 val1 = iter1.next()
3037 3018 if val2 is None:
3038 3019 val2 = iter2.next()
3039 3020 next = choice(val1, val2)
3040 3021 yield next
3041 3022 if val1 == next:
3042 3023 val1 = None
3043 3024 if val2 == next:
3044 3025 val2 = None
3045 3026 except StopIteration:
3046 3027 # Flush any remaining values and consume the other one
3047 3028 it = iter2
3048 3029 if val1 is not None:
3049 3030 yield val1
3050 3031 it = iter1
3051 3032 elif val2 is not None:
3052 3033 # might have been equality and both are empty
3053 3034 yield val2
3054 3035 for val in it:
3055 3036 yield val
3056 3037
3057 3038 class addset(abstractsmartset):
3058 3039 """Represent the addition of two sets
3059 3040
3060 3041 Wrapper structure for lazily adding two structures without losing much
3061 3042 performance on the __contains__ method
3062 3043
3063 3044 If the ascending attribute is set, that means the two structures are
3064 3045 ordered in either an ascending or descending way. Therefore, we can add
3065 3046 them maintaining the order by iterating over both at the same time
3066 3047
3067 3048 >>> xs = baseset([0, 3, 2])
3068 3049 >>> ys = baseset([5, 2, 4])
3069 3050
3070 3051 >>> rs = addset(xs, ys)
3071 3052 >>> bool(rs), 0 in rs, 1 in rs, 5 in rs, rs.first(), rs.last()
3072 3053 (True, True, False, True, 0, 4)
3073 3054 >>> rs = addset(xs, baseset([]))
3074 3055 >>> bool(rs), 0 in rs, 1 in rs, rs.first(), rs.last()
3075 3056 (True, True, False, 0, 2)
3076 3057 >>> rs = addset(baseset([]), baseset([]))
3077 3058 >>> bool(rs), 0 in rs, rs.first(), rs.last()
3078 3059 (False, False, None, None)
3079 3060
3080 3061 iterate unsorted:
3081 3062 >>> rs = addset(xs, ys)
3082 3063 >>> [x for x in rs] # without _genlist
3083 3064 [0, 3, 2, 5, 4]
3084 3065 >>> assert not rs._genlist
3085 3066 >>> len(rs)
3086 3067 5
3087 3068 >>> [x for x in rs] # with _genlist
3088 3069 [0, 3, 2, 5, 4]
3089 3070 >>> assert rs._genlist
3090 3071
3091 3072 iterate ascending:
3092 3073 >>> rs = addset(xs, ys, ascending=True)
3093 3074 >>> [x for x in rs], [x for x in rs.fastasc()] # without _asclist
3094 3075 ([0, 2, 3, 4, 5], [0, 2, 3, 4, 5])
3095 3076 >>> assert not rs._asclist
3096 3077 >>> len(rs)
3097 3078 5
3098 3079 >>> [x for x in rs], [x for x in rs.fastasc()]
3099 3080 ([0, 2, 3, 4, 5], [0, 2, 3, 4, 5])
3100 3081 >>> assert rs._asclist
3101 3082
3102 3083 iterate descending:
3103 3084 >>> rs = addset(xs, ys, ascending=False)
3104 3085 >>> [x for x in rs], [x for x in rs.fastdesc()] # without _asclist
3105 3086 ([5, 4, 3, 2, 0], [5, 4, 3, 2, 0])
3106 3087 >>> assert not rs._asclist
3107 3088 >>> len(rs)
3108 3089 5
3109 3090 >>> [x for x in rs], [x for x in rs.fastdesc()]
3110 3091 ([5, 4, 3, 2, 0], [5, 4, 3, 2, 0])
3111 3092 >>> assert rs._asclist
3112 3093
3113 3094 iterate ascending without fastasc:
3114 3095 >>> rs = addset(xs, generatorset(ys), ascending=True)
3115 3096 >>> assert rs.fastasc is None
3116 3097 >>> [x for x in rs]
3117 3098 [0, 2, 3, 4, 5]
3118 3099
3119 3100 iterate descending without fastdesc:
3120 3101 >>> rs = addset(generatorset(xs), ys, ascending=False)
3121 3102 >>> assert rs.fastdesc is None
3122 3103 >>> [x for x in rs]
3123 3104 [5, 4, 3, 2, 0]
3124 3105 """
3125 3106 def __init__(self, revs1, revs2, ascending=None):
3126 3107 self._r1 = revs1
3127 3108 self._r2 = revs2
3128 3109 self._iter = None
3129 3110 self._ascending = ascending
3130 3111 self._genlist = None
3131 3112 self._asclist = None
3132 3113
3133 3114 def __len__(self):
3134 3115 return len(self._list)
3135 3116
3136 3117 def __nonzero__(self):
3137 3118 return bool(self._r1) or bool(self._r2)
3138 3119
3139 3120 @util.propertycache
3140 3121 def _list(self):
3141 3122 if not self._genlist:
3142 3123 self._genlist = baseset(iter(self))
3143 3124 return self._genlist
3144 3125
3145 3126 def __iter__(self):
3146 3127 """Iterate over both collections without repeating elements
3147 3128
3148 3129 If the ascending attribute is not set, iterate over the first one and
3149 3130 then over the second one checking for membership on the first one so we
3150 3131 dont yield any duplicates.
3151 3132
3152 3133 If the ascending attribute is set, iterate over both collections at the
3153 3134 same time, yielding only one value at a time in the given order.
3154 3135 """
3155 3136 if self._ascending is None:
3156 3137 if self._genlist:
3157 3138 return iter(self._genlist)
3158 3139 def arbitraryordergen():
3159 3140 for r in self._r1:
3160 3141 yield r
3161 3142 inr1 = self._r1.__contains__
3162 3143 for r in self._r2:
3163 3144 if not inr1(r):
3164 3145 yield r
3165 3146 return arbitraryordergen()
3166 3147 # try to use our own fast iterator if it exists
3167 3148 self._trysetasclist()
3168 3149 if self._ascending:
3169 3150 attr = 'fastasc'
3170 3151 else:
3171 3152 attr = 'fastdesc'
3172 3153 it = getattr(self, attr)
3173 3154 if it is not None:
3174 3155 return it()
3175 3156 # maybe half of the component supports fast
3176 3157 # get iterator for _r1
3177 3158 iter1 = getattr(self._r1, attr)
3178 3159 if iter1 is None:
3179 3160 # let's avoid side effect (not sure it matters)
3180 3161 iter1 = iter(sorted(self._r1, reverse=not self._ascending))
3181 3162 else:
3182 3163 iter1 = iter1()
3183 3164 # get iterator for _r2
3184 3165 iter2 = getattr(self._r2, attr)
3185 3166 if iter2 is None:
3186 3167 # let's avoid side effect (not sure it matters)
3187 3168 iter2 = iter(sorted(self._r2, reverse=not self._ascending))
3188 3169 else:
3189 3170 iter2 = iter2()
3190 3171 return _iterordered(self._ascending, iter1, iter2)
3191 3172
3192 3173 def _trysetasclist(self):
3193 3174 """populate the _asclist attribute if possible and necessary"""
3194 3175 if self._genlist is not None and self._asclist is None:
3195 3176 self._asclist = sorted(self._genlist)
3196 3177
3197 3178 @property
3198 3179 def fastasc(self):
3199 3180 self._trysetasclist()
3200 3181 if self._asclist is not None:
3201 3182 return self._asclist.__iter__
3202 3183 iter1 = self._r1.fastasc
3203 3184 iter2 = self._r2.fastasc
3204 3185 if None in (iter1, iter2):
3205 3186 return None
3206 3187 return lambda: _iterordered(True, iter1(), iter2())
3207 3188
3208 3189 @property
3209 3190 def fastdesc(self):
3210 3191 self._trysetasclist()
3211 3192 if self._asclist is not None:
3212 3193 return self._asclist.__reversed__
3213 3194 iter1 = self._r1.fastdesc
3214 3195 iter2 = self._r2.fastdesc
3215 3196 if None in (iter1, iter2):
3216 3197 return None
3217 3198 return lambda: _iterordered(False, iter1(), iter2())
3218 3199
3219 3200 def __contains__(self, x):
3220 3201 return x in self._r1 or x in self._r2
3221 3202
3222 3203 def sort(self, reverse=False):
3223 3204 """Sort the added set
3224 3205
3225 3206 For this we use the cached list with all the generated values and if we
3226 3207 know they are ascending or descending we can sort them in a smart way.
3227 3208 """
3228 3209 self._ascending = not reverse
3229 3210
3230 3211 def isascending(self):
3231 3212 return self._ascending is not None and self._ascending
3232 3213
3233 3214 def isdescending(self):
3234 3215 return self._ascending is not None and not self._ascending
3235 3216
3236 3217 def reverse(self):
3237 3218 if self._ascending is None:
3238 3219 self._list.reverse()
3239 3220 else:
3240 3221 self._ascending = not self._ascending
3241 3222
3242 3223 def first(self):
3243 3224 for x in self:
3244 3225 return x
3245 3226 return None
3246 3227
3247 3228 def last(self):
3248 3229 self.reverse()
3249 3230 val = self.first()
3250 3231 self.reverse()
3251 3232 return val
3252 3233
3253 3234 def __repr__(self):
3254 3235 d = {None: '', False: '-', True: '+'}[self._ascending]
3255 3236 return '<%s%s %r, %r>' % (type(self).__name__, d, self._r1, self._r2)
3256 3237
3257 3238 class generatorset(abstractsmartset):
3258 3239 """Wrap a generator for lazy iteration
3259 3240
3260 3241 Wrapper structure for generators that provides lazy membership and can
3261 3242 be iterated more than once.
3262 3243 When asked for membership it generates values until either it finds the
3263 3244 requested one or has gone through all the elements in the generator
3264 3245 """
3265 3246 def __init__(self, gen, iterasc=None):
3266 3247 """
3267 3248 gen: a generator producing the values for the generatorset.
3268 3249 """
3269 3250 self._gen = gen
3270 3251 self._asclist = None
3271 3252 self._cache = {}
3272 3253 self._genlist = []
3273 3254 self._finished = False
3274 3255 self._ascending = True
3275 3256 if iterasc is not None:
3276 3257 if iterasc:
3277 3258 self.fastasc = self._iterator
3278 3259 self.__contains__ = self._asccontains
3279 3260 else:
3280 3261 self.fastdesc = self._iterator
3281 3262 self.__contains__ = self._desccontains
3282 3263
3283 3264 def __nonzero__(self):
3284 3265 # Do not use 'for r in self' because it will enforce the iteration
3285 3266 # order (default ascending), possibly unrolling a whole descending
3286 3267 # iterator.
3287 3268 if self._genlist:
3288 3269 return True
3289 3270 for r in self._consumegen():
3290 3271 return True
3291 3272 return False
3292 3273
3293 3274 def __contains__(self, x):
3294 3275 if x in self._cache:
3295 3276 return self._cache[x]
3296 3277
3297 3278 # Use new values only, as existing values would be cached.
3298 3279 for l in self._consumegen():
3299 3280 if l == x:
3300 3281 return True
3301 3282
3302 3283 self._cache[x] = False
3303 3284 return False
3304 3285
3305 3286 def _asccontains(self, x):
3306 3287 """version of contains optimised for ascending generator"""
3307 3288 if x in self._cache:
3308 3289 return self._cache[x]
3309 3290
3310 3291 # Use new values only, as existing values would be cached.
3311 3292 for l in self._consumegen():
3312 3293 if l == x:
3313 3294 return True
3314 3295 if l > x:
3315 3296 break
3316 3297
3317 3298 self._cache[x] = False
3318 3299 return False
3319 3300
3320 3301 def _desccontains(self, x):
3321 3302 """version of contains optimised for descending generator"""
3322 3303 if x in self._cache:
3323 3304 return self._cache[x]
3324 3305
3325 3306 # Use new values only, as existing values would be cached.
3326 3307 for l in self._consumegen():
3327 3308 if l == x:
3328 3309 return True
3329 3310 if l < x:
3330 3311 break
3331 3312
3332 3313 self._cache[x] = False
3333 3314 return False
3334 3315
3335 3316 def __iter__(self):
3336 3317 if self._ascending:
3337 3318 it = self.fastasc
3338 3319 else:
3339 3320 it = self.fastdesc
3340 3321 if it is not None:
3341 3322 return it()
3342 3323 # we need to consume the iterator
3343 3324 for x in self._consumegen():
3344 3325 pass
3345 3326 # recall the same code
3346 3327 return iter(self)
3347 3328
3348 3329 def _iterator(self):
3349 3330 if self._finished:
3350 3331 return iter(self._genlist)
3351 3332
3352 3333 # We have to use this complex iteration strategy to allow multiple
3353 3334 # iterations at the same time. We need to be able to catch revision
3354 3335 # removed from _consumegen and added to genlist in another instance.
3355 3336 #
3356 3337 # Getting rid of it would provide an about 15% speed up on this
3357 3338 # iteration.
3358 3339 genlist = self._genlist
3359 3340 nextrev = self._consumegen().next
3360 3341 _len = len # cache global lookup
3361 3342 def gen():
3362 3343 i = 0
3363 3344 while True:
3364 3345 if i < _len(genlist):
3365 3346 yield genlist[i]
3366 3347 else:
3367 3348 yield nextrev()
3368 3349 i += 1
3369 3350 return gen()
3370 3351
3371 3352 def _consumegen(self):
3372 3353 cache = self._cache
3373 3354 genlist = self._genlist.append
3374 3355 for item in self._gen:
3375 3356 cache[item] = True
3376 3357 genlist(item)
3377 3358 yield item
3378 3359 if not self._finished:
3379 3360 self._finished = True
3380 3361 asc = self._genlist[:]
3381 3362 asc.sort()
3382 3363 self._asclist = asc
3383 3364 self.fastasc = asc.__iter__
3384 3365 self.fastdesc = asc.__reversed__
3385 3366
3386 3367 def __len__(self):
3387 3368 for x in self._consumegen():
3388 3369 pass
3389 3370 return len(self._genlist)
3390 3371
3391 3372 def sort(self, reverse=False):
3392 3373 self._ascending = not reverse
3393 3374
3394 3375 def reverse(self):
3395 3376 self._ascending = not self._ascending
3396 3377
3397 3378 def isascending(self):
3398 3379 return self._ascending
3399 3380
3400 3381 def isdescending(self):
3401 3382 return not self._ascending
3402 3383
3403 3384 def first(self):
3404 3385 if self._ascending:
3405 3386 it = self.fastasc
3406 3387 else:
3407 3388 it = self.fastdesc
3408 3389 if it is None:
3409 3390 # we need to consume all and try again
3410 3391 for x in self._consumegen():
3411 3392 pass
3412 3393 return self.first()
3413 3394 return next(it(), None)
3414 3395
3415 3396 def last(self):
3416 3397 if self._ascending:
3417 3398 it = self.fastdesc
3418 3399 else:
3419 3400 it = self.fastasc
3420 3401 if it is None:
3421 3402 # we need to consume all and try again
3422 3403 for x in self._consumegen():
3423 3404 pass
3424 3405 return self.first()
3425 3406 return next(it(), None)
3426 3407
3427 3408 def __repr__(self):
3428 3409 d = {False: '-', True: '+'}[self._ascending]
3429 3410 return '<%s%s>' % (type(self).__name__, d)
3430 3411
3431 3412 class spanset(abstractsmartset):
3432 3413 """Duck type for baseset class which represents a range of revisions and
3433 3414 can work lazily and without having all the range in memory
3434 3415
3435 3416 Note that spanset(x, y) behave almost like xrange(x, y) except for two
3436 3417 notable points:
3437 3418 - when x < y it will be automatically descending,
3438 3419 - revision filtered with this repoview will be skipped.
3439 3420
3440 3421 """
3441 3422 def __init__(self, repo, start=0, end=None):
3442 3423 """
3443 3424 start: first revision included the set
3444 3425 (default to 0)
3445 3426 end: first revision excluded (last+1)
3446 3427 (default to len(repo)
3447 3428
3448 3429 Spanset will be descending if `end` < `start`.
3449 3430 """
3450 3431 if end is None:
3451 3432 end = len(repo)
3452 3433 self._ascending = start <= end
3453 3434 if not self._ascending:
3454 3435 start, end = end + 1, start +1
3455 3436 self._start = start
3456 3437 self._end = end
3457 3438 self._hiddenrevs = repo.changelog.filteredrevs
3458 3439
3459 3440 def sort(self, reverse=False):
3460 3441 self._ascending = not reverse
3461 3442
3462 3443 def reverse(self):
3463 3444 self._ascending = not self._ascending
3464 3445
3465 3446 def _iterfilter(self, iterrange):
3466 3447 s = self._hiddenrevs
3467 3448 for r in iterrange:
3468 3449 if r not in s:
3469 3450 yield r
3470 3451
3471 3452 def __iter__(self):
3472 3453 if self._ascending:
3473 3454 return self.fastasc()
3474 3455 else:
3475 3456 return self.fastdesc()
3476 3457
3477 3458 def fastasc(self):
3478 3459 iterrange = xrange(self._start, self._end)
3479 3460 if self._hiddenrevs:
3480 3461 return self._iterfilter(iterrange)
3481 3462 return iter(iterrange)
3482 3463
3483 3464 def fastdesc(self):
3484 3465 iterrange = xrange(self._end - 1, self._start - 1, -1)
3485 3466 if self._hiddenrevs:
3486 3467 return self._iterfilter(iterrange)
3487 3468 return iter(iterrange)
3488 3469
3489 3470 def __contains__(self, rev):
3490 3471 hidden = self._hiddenrevs
3491 3472 return ((self._start <= rev < self._end)
3492 3473 and not (hidden and rev in hidden))
3493 3474
3494 3475 def __nonzero__(self):
3495 3476 for r in self:
3496 3477 return True
3497 3478 return False
3498 3479
3499 3480 def __len__(self):
3500 3481 if not self._hiddenrevs:
3501 3482 return abs(self._end - self._start)
3502 3483 else:
3503 3484 count = 0
3504 3485 start = self._start
3505 3486 end = self._end
3506 3487 for rev in self._hiddenrevs:
3507 3488 if (end < rev <= start) or (start <= rev < end):
3508 3489 count += 1
3509 3490 return abs(self._end - self._start) - count
3510 3491
3511 3492 def isascending(self):
3512 3493 return self._ascending
3513 3494
3514 3495 def isdescending(self):
3515 3496 return not self._ascending
3516 3497
3517 3498 def first(self):
3518 3499 if self._ascending:
3519 3500 it = self.fastasc
3520 3501 else:
3521 3502 it = self.fastdesc
3522 3503 for x in it():
3523 3504 return x
3524 3505 return None
3525 3506
3526 3507 def last(self):
3527 3508 if self._ascending:
3528 3509 it = self.fastdesc
3529 3510 else:
3530 3511 it = self.fastasc
3531 3512 for x in it():
3532 3513 return x
3533 3514 return None
3534 3515
3535 3516 def __repr__(self):
3536 3517 d = {False: '-', True: '+'}[self._ascending]
3537 3518 return '<%s%s %d:%d>' % (type(self).__name__, d,
3538 3519 self._start, self._end - 1)
3539 3520
3540 3521 class fullreposet(spanset):
3541 3522 """a set containing all revisions in the repo
3542 3523
3543 3524 This class exists to host special optimization and magic to handle virtual
3544 3525 revisions such as "null".
3545 3526 """
3546 3527
3547 3528 def __init__(self, repo):
3548 3529 super(fullreposet, self).__init__(repo)
3549 3530
3550 3531 def __and__(self, other):
3551 3532 """As self contains the whole repo, all of the other set should also be
3552 3533 in self. Therefore `self & other = other`.
3553 3534
3554 3535 This boldly assumes the other contains valid revs only.
3555 3536 """
3556 3537 # other not a smartset, make is so
3557 3538 if not util.safehasattr(other, 'isascending'):
3558 3539 # filter out hidden revision
3559 3540 # (this boldly assumes all smartset are pure)
3560 3541 #
3561 3542 # `other` was used with "&", let's assume this is a set like
3562 3543 # object.
3563 3544 other = baseset(other - self._hiddenrevs)
3564 3545
3565 3546 # XXX As fullreposet is also used as bootstrap, this is wrong.
3566 3547 #
3567 3548 # With a giveme312() revset returning [3,1,2], this makes
3568 3549 # 'hg log -r "giveme312()"' -> 1, 2, 3 (wrong)
3569 3550 # We cannot just drop it because other usage still need to sort it:
3570 3551 # 'hg log -r "all() and giveme312()"' -> 1, 2, 3 (right)
3571 3552 #
3572 3553 # There is also some faulty revset implementations that rely on it
3573 3554 # (eg: children as of its state in e8075329c5fb)
3574 3555 #
3575 3556 # When we fix the two points above we can move this into the if clause
3576 3557 other.sort(reverse=self.isdescending())
3577 3558 return other
3578 3559
3579 3560 def prettyformatset(revs):
3580 3561 lines = []
3581 3562 rs = repr(revs)
3582 3563 p = 0
3583 3564 while p < len(rs):
3584 3565 q = rs.find('<', p + 1)
3585 3566 if q < 0:
3586 3567 q = len(rs)
3587 3568 l = rs.count('<', 0, p) - rs.count('>', 0, p)
3588 3569 assert l >= 0
3589 3570 lines.append((l, rs[p:q].rstrip()))
3590 3571 p = q
3591 3572 return '\n'.join(' ' * l + s for l, s in lines)
3592 3573
3593 3574 def loadpredicate(ui, extname, registrarobj):
3594 3575 """Load revset predicates from specified registrarobj
3595 3576 """
3596 3577 for name, func in registrarobj._table.iteritems():
3597 3578 symbols[name] = func
3598 3579 if func._safe:
3599 3580 safesymbols.add(name)
3600 3581
3601 3582 # load built-in predicates explicitly to setup safesymbols
3602 3583 loadpredicate(None, None, predicate)
3603 3584
3604 3585 # tell hggettext to extract docstrings from these functions:
3605 3586 i18nfunctions = symbols.values()
General Comments 0
You need to be logged in to leave comments. Login now