##// END OF EJS Templates
revset: remove useless extpredicate class (API)...
FUJIWARA Katsunori -
r28445:d749b183 default
parent child Browse files
Show More
@@ -1,3672 +1,3650 b''
1 1 # revset.py - revision set queries for mercurial
2 2 #
3 3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import heapq
11 11 import re
12 12
13 13 from .i18n import _
14 14 from . import (
15 15 destutil,
16 16 encoding,
17 17 error,
18 18 hbisect,
19 19 match as matchmod,
20 20 node,
21 21 obsolete as obsmod,
22 22 parser,
23 23 pathutil,
24 24 phases,
25 25 registrar,
26 26 repoview,
27 27 util,
28 28 )
29 29
30 30 def _revancestors(repo, revs, followfirst):
31 31 """Like revlog.ancestors(), but supports followfirst."""
32 32 if followfirst:
33 33 cut = 1
34 34 else:
35 35 cut = None
36 36 cl = repo.changelog
37 37
38 38 def iterate():
39 39 revs.sort(reverse=True)
40 40 irevs = iter(revs)
41 41 h = []
42 42
43 43 inputrev = next(irevs, None)
44 44 if inputrev is not None:
45 45 heapq.heappush(h, -inputrev)
46 46
47 47 seen = set()
48 48 while h:
49 49 current = -heapq.heappop(h)
50 50 if current == inputrev:
51 51 inputrev = next(irevs, None)
52 52 if inputrev is not None:
53 53 heapq.heappush(h, -inputrev)
54 54 if current not in seen:
55 55 seen.add(current)
56 56 yield current
57 57 for parent in cl.parentrevs(current)[:cut]:
58 58 if parent != node.nullrev:
59 59 heapq.heappush(h, -parent)
60 60
61 61 return generatorset(iterate(), iterasc=False)
62 62
63 63 def _revdescendants(repo, revs, followfirst):
64 64 """Like revlog.descendants() but supports followfirst."""
65 65 if followfirst:
66 66 cut = 1
67 67 else:
68 68 cut = None
69 69
70 70 def iterate():
71 71 cl = repo.changelog
72 72 # XXX this should be 'parentset.min()' assuming 'parentset' is a
73 73 # smartset (and if it is not, it should.)
74 74 first = min(revs)
75 75 nullrev = node.nullrev
76 76 if first == nullrev:
77 77 # Are there nodes with a null first parent and a non-null
78 78 # second one? Maybe. Do we care? Probably not.
79 79 for i in cl:
80 80 yield i
81 81 else:
82 82 seen = set(revs)
83 83 for i in cl.revs(first + 1):
84 84 for x in cl.parentrevs(i)[:cut]:
85 85 if x != nullrev and x in seen:
86 86 seen.add(i)
87 87 yield i
88 88 break
89 89
90 90 return generatorset(iterate(), iterasc=True)
91 91
92 92 def _reachablerootspure(repo, minroot, roots, heads, includepath):
93 93 """return (heads(::<roots> and ::<heads>))
94 94
95 95 If includepath is True, return (<roots>::<heads>)."""
96 96 if not roots:
97 97 return []
98 98 parentrevs = repo.changelog.parentrevs
99 99 roots = set(roots)
100 100 visit = list(heads)
101 101 reachable = set()
102 102 seen = {}
103 103 # prefetch all the things! (because python is slow)
104 104 reached = reachable.add
105 105 dovisit = visit.append
106 106 nextvisit = visit.pop
107 107 # open-code the post-order traversal due to the tiny size of
108 108 # sys.getrecursionlimit()
109 109 while visit:
110 110 rev = nextvisit()
111 111 if rev in roots:
112 112 reached(rev)
113 113 if not includepath:
114 114 continue
115 115 parents = parentrevs(rev)
116 116 seen[rev] = parents
117 117 for parent in parents:
118 118 if parent >= minroot and parent not in seen:
119 119 dovisit(parent)
120 120 if not reachable:
121 121 return baseset()
122 122 if not includepath:
123 123 return reachable
124 124 for rev in sorted(seen):
125 125 for parent in seen[rev]:
126 126 if parent in reachable:
127 127 reached(rev)
128 128 return reachable
129 129
130 130 def reachableroots(repo, roots, heads, includepath=False):
131 131 """return (heads(::<roots> and ::<heads>))
132 132
133 133 If includepath is True, return (<roots>::<heads>)."""
134 134 if not roots:
135 135 return baseset()
136 136 minroot = roots.min()
137 137 roots = list(roots)
138 138 heads = list(heads)
139 139 try:
140 140 revs = repo.changelog.reachableroots(minroot, heads, roots, includepath)
141 141 except AttributeError:
142 142 revs = _reachablerootspure(repo, minroot, roots, heads, includepath)
143 143 revs = baseset(revs)
144 144 revs.sort()
145 145 return revs
146 146
147 147 elements = {
148 148 # token-type: binding-strength, primary, prefix, infix, suffix
149 149 "(": (21, None, ("group", 1, ")"), ("func", 1, ")"), None),
150 150 "##": (20, None, None, ("_concat", 20), None),
151 151 "~": (18, None, None, ("ancestor", 18), None),
152 152 "^": (18, None, None, ("parent", 18), ("parentpost", 18)),
153 153 "-": (5, None, ("negate", 19), ("minus", 5), None),
154 154 "::": (17, None, ("dagrangepre", 17), ("dagrange", 17),
155 155 ("dagrangepost", 17)),
156 156 "..": (17, None, ("dagrangepre", 17), ("dagrange", 17),
157 157 ("dagrangepost", 17)),
158 158 ":": (15, "rangeall", ("rangepre", 15), ("range", 15), ("rangepost", 15)),
159 159 "not": (10, None, ("not", 10), None, None),
160 160 "!": (10, None, ("not", 10), None, None),
161 161 "and": (5, None, None, ("and", 5), None),
162 162 "&": (5, None, None, ("and", 5), None),
163 163 "%": (5, None, None, ("only", 5), ("onlypost", 5)),
164 164 "or": (4, None, None, ("or", 4), None),
165 165 "|": (4, None, None, ("or", 4), None),
166 166 "+": (4, None, None, ("or", 4), None),
167 167 "=": (3, None, None, ("keyvalue", 3), None),
168 168 ",": (2, None, None, ("list", 2), None),
169 169 ")": (0, None, None, None, None),
170 170 "symbol": (0, "symbol", None, None, None),
171 171 "string": (0, "string", None, None, None),
172 172 "end": (0, None, None, None, None),
173 173 }
174 174
175 175 keywords = set(['and', 'or', 'not'])
176 176
177 177 # default set of valid characters for the initial letter of symbols
178 178 _syminitletters = set(c for c in [chr(i) for i in xrange(256)]
179 179 if c.isalnum() or c in '._@' or ord(c) > 127)
180 180
181 181 # default set of valid characters for non-initial letters of symbols
182 182 _symletters = set(c for c in [chr(i) for i in xrange(256)]
183 183 if c.isalnum() or c in '-._/@' or ord(c) > 127)
184 184
185 185 def tokenize(program, lookup=None, syminitletters=None, symletters=None):
186 186 '''
187 187 Parse a revset statement into a stream of tokens
188 188
189 189 ``syminitletters`` is the set of valid characters for the initial
190 190 letter of symbols.
191 191
192 192 By default, character ``c`` is recognized as valid for initial
193 193 letter of symbols, if ``c.isalnum() or c in '._@' or ord(c) > 127``.
194 194
195 195 ``symletters`` is the set of valid characters for non-initial
196 196 letters of symbols.
197 197
198 198 By default, character ``c`` is recognized as valid for non-initial
199 199 letters of symbols, if ``c.isalnum() or c in '-._/@' or ord(c) > 127``.
200 200
201 201 Check that @ is a valid unquoted token character (issue3686):
202 202 >>> list(tokenize("@::"))
203 203 [('symbol', '@', 0), ('::', None, 1), ('end', None, 3)]
204 204
205 205 '''
206 206 if syminitletters is None:
207 207 syminitletters = _syminitletters
208 208 if symletters is None:
209 209 symletters = _symletters
210 210
211 211 if program and lookup:
212 212 # attempt to parse old-style ranges first to deal with
213 213 # things like old-tag which contain query metacharacters
214 214 parts = program.split(':', 1)
215 215 if all(lookup(sym) for sym in parts if sym):
216 216 if parts[0]:
217 217 yield ('symbol', parts[0], 0)
218 218 if len(parts) > 1:
219 219 s = len(parts[0])
220 220 yield (':', None, s)
221 221 if parts[1]:
222 222 yield ('symbol', parts[1], s + 1)
223 223 yield ('end', None, len(program))
224 224 return
225 225
226 226 pos, l = 0, len(program)
227 227 while pos < l:
228 228 c = program[pos]
229 229 if c.isspace(): # skip inter-token whitespace
230 230 pass
231 231 elif c == ':' and program[pos:pos + 2] == '::': # look ahead carefully
232 232 yield ('::', None, pos)
233 233 pos += 1 # skip ahead
234 234 elif c == '.' and program[pos:pos + 2] == '..': # look ahead carefully
235 235 yield ('..', None, pos)
236 236 pos += 1 # skip ahead
237 237 elif c == '#' and program[pos:pos + 2] == '##': # look ahead carefully
238 238 yield ('##', None, pos)
239 239 pos += 1 # skip ahead
240 240 elif c in "():=,-|&+!~^%": # handle simple operators
241 241 yield (c, None, pos)
242 242 elif (c in '"\'' or c == 'r' and
243 243 program[pos:pos + 2] in ("r'", 'r"')): # handle quoted strings
244 244 if c == 'r':
245 245 pos += 1
246 246 c = program[pos]
247 247 decode = lambda x: x
248 248 else:
249 249 decode = parser.unescapestr
250 250 pos += 1
251 251 s = pos
252 252 while pos < l: # find closing quote
253 253 d = program[pos]
254 254 if d == '\\': # skip over escaped characters
255 255 pos += 2
256 256 continue
257 257 if d == c:
258 258 yield ('string', decode(program[s:pos]), s)
259 259 break
260 260 pos += 1
261 261 else:
262 262 raise error.ParseError(_("unterminated string"), s)
263 263 # gather up a symbol/keyword
264 264 elif c in syminitletters:
265 265 s = pos
266 266 pos += 1
267 267 while pos < l: # find end of symbol
268 268 d = program[pos]
269 269 if d not in symletters:
270 270 break
271 271 if d == '.' and program[pos - 1] == '.': # special case for ..
272 272 pos -= 1
273 273 break
274 274 pos += 1
275 275 sym = program[s:pos]
276 276 if sym in keywords: # operator keywords
277 277 yield (sym, None, s)
278 278 elif '-' in sym:
279 279 # some jerk gave us foo-bar-baz, try to check if it's a symbol
280 280 if lookup and lookup(sym):
281 281 # looks like a real symbol
282 282 yield ('symbol', sym, s)
283 283 else:
284 284 # looks like an expression
285 285 parts = sym.split('-')
286 286 for p in parts[:-1]:
287 287 if p: # possible consecutive -
288 288 yield ('symbol', p, s)
289 289 s += len(p)
290 290 yield ('-', None, pos)
291 291 s += 1
292 292 if parts[-1]: # possible trailing -
293 293 yield ('symbol', parts[-1], s)
294 294 else:
295 295 yield ('symbol', sym, s)
296 296 pos -= 1
297 297 else:
298 298 raise error.ParseError(_("syntax error in revset '%s'") %
299 299 program, pos)
300 300 pos += 1
301 301 yield ('end', None, pos)
302 302
303 303 def parseerrordetail(inst):
304 304 """Compose error message from specified ParseError object
305 305 """
306 306 if len(inst.args) > 1:
307 307 return _('at %s: %s') % (inst.args[1], inst.args[0])
308 308 else:
309 309 return inst.args[0]
310 310
311 311 # helpers
312 312
313 313 def getstring(x, err):
314 314 if x and (x[0] == 'string' or x[0] == 'symbol'):
315 315 return x[1]
316 316 raise error.ParseError(err)
317 317
318 318 def getlist(x):
319 319 if not x:
320 320 return []
321 321 if x[0] == 'list':
322 322 return list(x[1:])
323 323 return [x]
324 324
325 325 def getargs(x, min, max, err):
326 326 l = getlist(x)
327 327 if len(l) < min or (max >= 0 and len(l) > max):
328 328 raise error.ParseError(err)
329 329 return l
330 330
331 331 def getargsdict(x, funcname, keys):
332 332 return parser.buildargsdict(getlist(x), funcname, keys.split(),
333 333 keyvaluenode='keyvalue', keynode='symbol')
334 334
335 335 def isvalidsymbol(tree):
336 336 """Examine whether specified ``tree`` is valid ``symbol`` or not
337 337 """
338 338 return tree[0] == 'symbol' and len(tree) > 1
339 339
340 340 def getsymbol(tree):
341 341 """Get symbol name from valid ``symbol`` in ``tree``
342 342
343 343 This assumes that ``tree`` is already examined by ``isvalidsymbol``.
344 344 """
345 345 return tree[1]
346 346
347 347 def isvalidfunc(tree):
348 348 """Examine whether specified ``tree`` is valid ``func`` or not
349 349 """
350 350 return tree[0] == 'func' and len(tree) > 1 and isvalidsymbol(tree[1])
351 351
352 352 def getfuncname(tree):
353 353 """Get function name from valid ``func`` in ``tree``
354 354
355 355 This assumes that ``tree`` is already examined by ``isvalidfunc``.
356 356 """
357 357 return getsymbol(tree[1])
358 358
359 359 def getfuncargs(tree):
360 360 """Get list of function arguments from valid ``func`` in ``tree``
361 361
362 362 This assumes that ``tree`` is already examined by ``isvalidfunc``.
363 363 """
364 364 if len(tree) > 2:
365 365 return getlist(tree[2])
366 366 else:
367 367 return []
368 368
369 369 def getset(repo, subset, x):
370 370 if not x:
371 371 raise error.ParseError(_("missing argument"))
372 372 s = methods[x[0]](repo, subset, *x[1:])
373 373 if util.safehasattr(s, 'isascending'):
374 374 return s
375 375 if (repo.ui.configbool('devel', 'all-warnings')
376 376 or repo.ui.configbool('devel', 'old-revset')):
377 377 # else case should not happen, because all non-func are internal,
378 378 # ignoring for now.
379 379 if x[0] == 'func' and x[1][0] == 'symbol' and x[1][1] in symbols:
380 380 repo.ui.develwarn('revset "%s" use list instead of smartset, '
381 381 '(upgrade your code)' % x[1][1])
382 382 return baseset(s)
383 383
384 384 def _getrevsource(repo, r):
385 385 extra = repo[r].extra()
386 386 for label in ('source', 'transplant_source', 'rebase_source'):
387 387 if label in extra:
388 388 try:
389 389 return repo[extra[label]].rev()
390 390 except error.RepoLookupError:
391 391 pass
392 392 return None
393 393
394 394 # operator methods
395 395
396 396 def stringset(repo, subset, x):
397 397 x = repo[x].rev()
398 398 if (x in subset
399 399 or x == node.nullrev and isinstance(subset, fullreposet)):
400 400 return baseset([x])
401 401 return baseset()
402 402
403 403 def rangeset(repo, subset, x, y):
404 404 m = getset(repo, fullreposet(repo), x)
405 405 n = getset(repo, fullreposet(repo), y)
406 406
407 407 if not m or not n:
408 408 return baseset()
409 409 m, n = m.first(), n.last()
410 410
411 411 if m == n:
412 412 r = baseset([m])
413 413 elif n == node.wdirrev:
414 414 r = spanset(repo, m, len(repo)) + baseset([n])
415 415 elif m == node.wdirrev:
416 416 r = baseset([m]) + spanset(repo, len(repo) - 1, n - 1)
417 417 elif m < n:
418 418 r = spanset(repo, m, n + 1)
419 419 else:
420 420 r = spanset(repo, m, n - 1)
421 421 # XXX We should combine with subset first: 'subset & baseset(...)'. This is
422 422 # necessary to ensure we preserve the order in subset.
423 423 #
424 424 # This has performance implication, carrying the sorting over when possible
425 425 # would be more efficient.
426 426 return r & subset
427 427
428 428 def dagrange(repo, subset, x, y):
429 429 r = fullreposet(repo)
430 430 xs = reachableroots(repo, getset(repo, r, x), getset(repo, r, y),
431 431 includepath=True)
432 432 # XXX We should combine with subset first: 'subset & baseset(...)'. This is
433 433 # necessary to ensure we preserve the order in subset.
434 434 return xs & subset
435 435
436 436 def andset(repo, subset, x, y):
437 437 return getset(repo, getset(repo, subset, x), y)
438 438
439 439 def differenceset(repo, subset, x, y):
440 440 return getset(repo, subset, x) - getset(repo, subset, y)
441 441
442 442 def orset(repo, subset, *xs):
443 443 assert xs
444 444 if len(xs) == 1:
445 445 return getset(repo, subset, xs[0])
446 446 p = len(xs) // 2
447 447 a = orset(repo, subset, *xs[:p])
448 448 b = orset(repo, subset, *xs[p:])
449 449 return a + b
450 450
451 451 def notset(repo, subset, x):
452 452 return subset - getset(repo, subset, x)
453 453
454 454 def listset(repo, subset, *xs):
455 455 raise error.ParseError(_("can't use a list in this context"),
456 456 hint=_('see hg help "revsets.x or y"'))
457 457
458 458 def keyvaluepair(repo, subset, k, v):
459 459 raise error.ParseError(_("can't use a key-value pair in this context"))
460 460
461 461 def func(repo, subset, a, b):
462 462 if a[0] == 'symbol' and a[1] in symbols:
463 463 return symbols[a[1]](repo, subset, b)
464 464
465 465 keep = lambda fn: getattr(fn, '__doc__', None) is not None
466 466
467 467 syms = [s for (s, fn) in symbols.items() if keep(fn)]
468 468 raise error.UnknownIdentifier(a[1], syms)
469 469
470 470 # functions
471 471
472 472 # symbols are callables like:
473 473 # fn(repo, subset, x)
474 474 # with:
475 475 # repo - current repository instance
476 476 # subset - of revisions to be examined
477 477 # x - argument in tree form
478 478 symbols = {}
479 479
480 480 # symbols which can't be used for a DoS attack for any given input
481 481 # (e.g. those which accept regexes as plain strings shouldn't be included)
482 482 # functions that just return a lot of changesets (like all) don't count here
483 483 safesymbols = set()
484 484
485 485 predicate = registrar.revsetpredicate()
486 486
487 class extpredicate(registrar.delayregistrar):
488 """Decorator to register revset predicate in extensions
489
490 Usage::
491
492 revsetpredicate = revset.extpredicate()
493
494 @revsetpredicate('mypredicate(arg1, arg2[, arg3])')
495 def mypredicatefunc(repo, subset, x):
496 '''Explanation of this revset predicate ....
497 '''
498 pass
499
500 def uisetup(ui):
501 revsetpredicate.setup()
502
503 'revsetpredicate' instance above can be used to decorate multiple
504 functions, and 'setup()' on it registers all such functions at
505 once.
506 """
507 registrar = predicate
508
509 487 @predicate('_destupdate')
510 488 def _destupdate(repo, subset, x):
511 489 # experimental revset for update destination
512 490 args = getargsdict(x, 'limit', 'clean check')
513 491 return subset & baseset([destutil.destupdate(repo, **args)[0]])
514 492
515 493 @predicate('_destmerge')
516 494 def _destmerge(repo, subset, x):
517 495 # experimental revset for merge destination
518 496 sourceset = None
519 497 if x is not None:
520 498 sourceset = getset(repo, fullreposet(repo), x)
521 499 return subset & baseset([destutil.destmerge(repo, sourceset=sourceset)])
522 500
523 501 @predicate('adds(pattern)', safe=True)
524 502 def adds(repo, subset, x):
525 503 """Changesets that add a file matching pattern.
526 504
527 505 The pattern without explicit kind like ``glob:`` is expected to be
528 506 relative to the current directory and match against a file or a
529 507 directory.
530 508 """
531 509 # i18n: "adds" is a keyword
532 510 pat = getstring(x, _("adds requires a pattern"))
533 511 return checkstatus(repo, subset, pat, 1)
534 512
535 513 @predicate('ancestor(*changeset)', safe=True)
536 514 def ancestor(repo, subset, x):
537 515 """A greatest common ancestor of the changesets.
538 516
539 517 Accepts 0 or more changesets.
540 518 Will return empty list when passed no args.
541 519 Greatest common ancestor of a single changeset is that changeset.
542 520 """
543 521 # i18n: "ancestor" is a keyword
544 522 l = getlist(x)
545 523 rl = fullreposet(repo)
546 524 anc = None
547 525
548 526 # (getset(repo, rl, i) for i in l) generates a list of lists
549 527 for revs in (getset(repo, rl, i) for i in l):
550 528 for r in revs:
551 529 if anc is None:
552 530 anc = repo[r]
553 531 else:
554 532 anc = anc.ancestor(repo[r])
555 533
556 534 if anc is not None and anc.rev() in subset:
557 535 return baseset([anc.rev()])
558 536 return baseset()
559 537
560 538 def _ancestors(repo, subset, x, followfirst=False):
561 539 heads = getset(repo, fullreposet(repo), x)
562 540 if not heads:
563 541 return baseset()
564 542 s = _revancestors(repo, heads, followfirst)
565 543 return subset & s
566 544
567 545 @predicate('ancestors(set)', safe=True)
568 546 def ancestors(repo, subset, x):
569 547 """Changesets that are ancestors of a changeset in set.
570 548 """
571 549 return _ancestors(repo, subset, x)
572 550
573 551 @predicate('_firstancestors', safe=True)
574 552 def _firstancestors(repo, subset, x):
575 553 # ``_firstancestors(set)``
576 554 # Like ``ancestors(set)`` but follows only the first parents.
577 555 return _ancestors(repo, subset, x, followfirst=True)
578 556
579 557 def ancestorspec(repo, subset, x, n):
580 558 """``set~n``
581 559 Changesets that are the Nth ancestor (first parents only) of a changeset
582 560 in set.
583 561 """
584 562 try:
585 563 n = int(n[1])
586 564 except (TypeError, ValueError):
587 565 raise error.ParseError(_("~ expects a number"))
588 566 ps = set()
589 567 cl = repo.changelog
590 568 for r in getset(repo, fullreposet(repo), x):
591 569 for i in range(n):
592 570 r = cl.parentrevs(r)[0]
593 571 ps.add(r)
594 572 return subset & ps
595 573
596 574 @predicate('author(string)', safe=True)
597 575 def author(repo, subset, x):
598 576 """Alias for ``user(string)``.
599 577 """
600 578 # i18n: "author" is a keyword
601 579 n = encoding.lower(getstring(x, _("author requires a string")))
602 580 kind, pattern, matcher = _substringmatcher(n)
603 581 return subset.filter(lambda x: matcher(encoding.lower(repo[x].user())),
604 582 condrepr=('<user %r>', n))
605 583
606 584 @predicate('bisect(string)', safe=True)
607 585 def bisect(repo, subset, x):
608 586 """Changesets marked in the specified bisect status:
609 587
610 588 - ``good``, ``bad``, ``skip``: csets explicitly marked as good/bad/skip
611 589 - ``goods``, ``bads`` : csets topologically good/bad
612 590 - ``range`` : csets taking part in the bisection
613 591 - ``pruned`` : csets that are goods, bads or skipped
614 592 - ``untested`` : csets whose fate is yet unknown
615 593 - ``ignored`` : csets ignored due to DAG topology
616 594 - ``current`` : the cset currently being bisected
617 595 """
618 596 # i18n: "bisect" is a keyword
619 597 status = getstring(x, _("bisect requires a string")).lower()
620 598 state = set(hbisect.get(repo, status))
621 599 return subset & state
622 600
623 601 # Backward-compatibility
624 602 # - no help entry so that we do not advertise it any more
625 603 @predicate('bisected', safe=True)
626 604 def bisected(repo, subset, x):
627 605 return bisect(repo, subset, x)
628 606
629 607 @predicate('bookmark([name])', safe=True)
630 608 def bookmark(repo, subset, x):
631 609 """The named bookmark or all bookmarks.
632 610
633 611 If `name` starts with `re:`, the remainder of the name is treated as
634 612 a regular expression. To match a bookmark that actually starts with `re:`,
635 613 use the prefix `literal:`.
636 614 """
637 615 # i18n: "bookmark" is a keyword
638 616 args = getargs(x, 0, 1, _('bookmark takes one or no arguments'))
639 617 if args:
640 618 bm = getstring(args[0],
641 619 # i18n: "bookmark" is a keyword
642 620 _('the argument to bookmark must be a string'))
643 621 kind, pattern, matcher = util.stringmatcher(bm)
644 622 bms = set()
645 623 if kind == 'literal':
646 624 bmrev = repo._bookmarks.get(pattern, None)
647 625 if not bmrev:
648 626 raise error.RepoLookupError(_("bookmark '%s' does not exist")
649 627 % pattern)
650 628 bms.add(repo[bmrev].rev())
651 629 else:
652 630 matchrevs = set()
653 631 for name, bmrev in repo._bookmarks.iteritems():
654 632 if matcher(name):
655 633 matchrevs.add(bmrev)
656 634 if not matchrevs:
657 635 raise error.RepoLookupError(_("no bookmarks exist"
658 636 " that match '%s'") % pattern)
659 637 for bmrev in matchrevs:
660 638 bms.add(repo[bmrev].rev())
661 639 else:
662 640 bms = set([repo[r].rev()
663 641 for r in repo._bookmarks.values()])
664 642 bms -= set([node.nullrev])
665 643 return subset & bms
666 644
667 645 @predicate('branch(string or set)', safe=True)
668 646 def branch(repo, subset, x):
669 647 """
670 648 All changesets belonging to the given branch or the branches of the given
671 649 changesets.
672 650
673 651 If `string` starts with `re:`, the remainder of the name is treated as
674 652 a regular expression. To match a branch that actually starts with `re:`,
675 653 use the prefix `literal:`.
676 654 """
677 655 getbi = repo.revbranchcache().branchinfo
678 656
679 657 try:
680 658 b = getstring(x, '')
681 659 except error.ParseError:
682 660 # not a string, but another revspec, e.g. tip()
683 661 pass
684 662 else:
685 663 kind, pattern, matcher = util.stringmatcher(b)
686 664 if kind == 'literal':
687 665 # note: falls through to the revspec case if no branch with
688 666 # this name exists and pattern kind is not specified explicitly
689 667 if pattern in repo.branchmap():
690 668 return subset.filter(lambda r: matcher(getbi(r)[0]),
691 669 condrepr=('<branch %r>', b))
692 670 if b.startswith('literal:'):
693 671 raise error.RepoLookupError(_("branch '%s' does not exist")
694 672 % pattern)
695 673 else:
696 674 return subset.filter(lambda r: matcher(getbi(r)[0]),
697 675 condrepr=('<branch %r>', b))
698 676
699 677 s = getset(repo, fullreposet(repo), x)
700 678 b = set()
701 679 for r in s:
702 680 b.add(getbi(r)[0])
703 681 c = s.__contains__
704 682 return subset.filter(lambda r: c(r) or getbi(r)[0] in b,
705 683 condrepr=lambda: '<branch %r>' % sorted(b))
706 684
707 685 @predicate('bumped()', safe=True)
708 686 def bumped(repo, subset, x):
709 687 """Mutable changesets marked as successors of public changesets.
710 688
711 689 Only non-public and non-obsolete changesets can be `bumped`.
712 690 """
713 691 # i18n: "bumped" is a keyword
714 692 getargs(x, 0, 0, _("bumped takes no arguments"))
715 693 bumped = obsmod.getrevs(repo, 'bumped')
716 694 return subset & bumped
717 695
718 696 @predicate('bundle()', safe=True)
719 697 def bundle(repo, subset, x):
720 698 """Changesets in the bundle.
721 699
722 700 Bundle must be specified by the -R option."""
723 701
724 702 try:
725 703 bundlerevs = repo.changelog.bundlerevs
726 704 except AttributeError:
727 705 raise error.Abort(_("no bundle provided - specify with -R"))
728 706 return subset & bundlerevs
729 707
730 708 def checkstatus(repo, subset, pat, field):
731 709 hasset = matchmod.patkind(pat) == 'set'
732 710
733 711 mcache = [None]
734 712 def matches(x):
735 713 c = repo[x]
736 714 if not mcache[0] or hasset:
737 715 mcache[0] = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
738 716 m = mcache[0]
739 717 fname = None
740 718 if not m.anypats() and len(m.files()) == 1:
741 719 fname = m.files()[0]
742 720 if fname is not None:
743 721 if fname not in c.files():
744 722 return False
745 723 else:
746 724 for f in c.files():
747 725 if m(f):
748 726 break
749 727 else:
750 728 return False
751 729 files = repo.status(c.p1().node(), c.node())[field]
752 730 if fname is not None:
753 731 if fname in files:
754 732 return True
755 733 else:
756 734 for f in files:
757 735 if m(f):
758 736 return True
759 737
760 738 return subset.filter(matches, condrepr=('<status[%r] %r>', field, pat))
761 739
762 740 def _children(repo, narrow, parentset):
763 741 if not parentset:
764 742 return baseset()
765 743 cs = set()
766 744 pr = repo.changelog.parentrevs
767 745 minrev = parentset.min()
768 746 for r in narrow:
769 747 if r <= minrev:
770 748 continue
771 749 for p in pr(r):
772 750 if p in parentset:
773 751 cs.add(r)
774 752 # XXX using a set to feed the baseset is wrong. Sets are not ordered.
775 753 # This does not break because of other fullreposet misbehavior.
776 754 return baseset(cs)
777 755
778 756 @predicate('children(set)', safe=True)
779 757 def children(repo, subset, x):
780 758 """Child changesets of changesets in set.
781 759 """
782 760 s = getset(repo, fullreposet(repo), x)
783 761 cs = _children(repo, subset, s)
784 762 return subset & cs
785 763
786 764 @predicate('closed()', safe=True)
787 765 def closed(repo, subset, x):
788 766 """Changeset is closed.
789 767 """
790 768 # i18n: "closed" is a keyword
791 769 getargs(x, 0, 0, _("closed takes no arguments"))
792 770 return subset.filter(lambda r: repo[r].closesbranch(),
793 771 condrepr='<branch closed>')
794 772
795 773 @predicate('contains(pattern)')
796 774 def contains(repo, subset, x):
797 775 """The revision's manifest contains a file matching pattern (but might not
798 776 modify it). See :hg:`help patterns` for information about file patterns.
799 777
800 778 The pattern without explicit kind like ``glob:`` is expected to be
801 779 relative to the current directory and match against a file exactly
802 780 for efficiency.
803 781 """
804 782 # i18n: "contains" is a keyword
805 783 pat = getstring(x, _("contains requires a pattern"))
806 784
807 785 def matches(x):
808 786 if not matchmod.patkind(pat):
809 787 pats = pathutil.canonpath(repo.root, repo.getcwd(), pat)
810 788 if pats in repo[x]:
811 789 return True
812 790 else:
813 791 c = repo[x]
814 792 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
815 793 for f in c.manifest():
816 794 if m(f):
817 795 return True
818 796 return False
819 797
820 798 return subset.filter(matches, condrepr=('<contains %r>', pat))
821 799
822 800 @predicate('converted([id])', safe=True)
823 801 def converted(repo, subset, x):
824 802 """Changesets converted from the given identifier in the old repository if
825 803 present, or all converted changesets if no identifier is specified.
826 804 """
827 805
828 806 # There is exactly no chance of resolving the revision, so do a simple
829 807 # string compare and hope for the best
830 808
831 809 rev = None
832 810 # i18n: "converted" is a keyword
833 811 l = getargs(x, 0, 1, _('converted takes one or no arguments'))
834 812 if l:
835 813 # i18n: "converted" is a keyword
836 814 rev = getstring(l[0], _('converted requires a revision'))
837 815
838 816 def _matchvalue(r):
839 817 source = repo[r].extra().get('convert_revision', None)
840 818 return source is not None and (rev is None or source.startswith(rev))
841 819
842 820 return subset.filter(lambda r: _matchvalue(r),
843 821 condrepr=('<converted %r>', rev))
844 822
845 823 @predicate('date(interval)', safe=True)
846 824 def date(repo, subset, x):
847 825 """Changesets within the interval, see :hg:`help dates`.
848 826 """
849 827 # i18n: "date" is a keyword
850 828 ds = getstring(x, _("date requires a string"))
851 829 dm = util.matchdate(ds)
852 830 return subset.filter(lambda x: dm(repo[x].date()[0]),
853 831 condrepr=('<date %r>', ds))
854 832
855 833 @predicate('desc(string)', safe=True)
856 834 def desc(repo, subset, x):
857 835 """Search commit message for string. The match is case-insensitive.
858 836 """
859 837 # i18n: "desc" is a keyword
860 838 ds = encoding.lower(getstring(x, _("desc requires a string")))
861 839
862 840 def matches(x):
863 841 c = repo[x]
864 842 return ds in encoding.lower(c.description())
865 843
866 844 return subset.filter(matches, condrepr=('<desc %r>', ds))
867 845
868 846 def _descendants(repo, subset, x, followfirst=False):
869 847 roots = getset(repo, fullreposet(repo), x)
870 848 if not roots:
871 849 return baseset()
872 850 s = _revdescendants(repo, roots, followfirst)
873 851
874 852 # Both sets need to be ascending in order to lazily return the union
875 853 # in the correct order.
876 854 base = subset & roots
877 855 desc = subset & s
878 856 result = base + desc
879 857 if subset.isascending():
880 858 result.sort()
881 859 elif subset.isdescending():
882 860 result.sort(reverse=True)
883 861 else:
884 862 result = subset & result
885 863 return result
886 864
887 865 @predicate('descendants(set)', safe=True)
888 866 def descendants(repo, subset, x):
889 867 """Changesets which are descendants of changesets in set.
890 868 """
891 869 return _descendants(repo, subset, x)
892 870
893 871 @predicate('_firstdescendants', safe=True)
894 872 def _firstdescendants(repo, subset, x):
895 873 # ``_firstdescendants(set)``
896 874 # Like ``descendants(set)`` but follows only the first parents.
897 875 return _descendants(repo, subset, x, followfirst=True)
898 876
899 877 @predicate('destination([set])', safe=True)
900 878 def destination(repo, subset, x):
901 879 """Changesets that were created by a graft, transplant or rebase operation,
902 880 with the given revisions specified as the source. Omitting the optional set
903 881 is the same as passing all().
904 882 """
905 883 if x is not None:
906 884 sources = getset(repo, fullreposet(repo), x)
907 885 else:
908 886 sources = fullreposet(repo)
909 887
910 888 dests = set()
911 889
912 890 # subset contains all of the possible destinations that can be returned, so
913 891 # iterate over them and see if their source(s) were provided in the arg set.
914 892 # Even if the immediate src of r is not in the arg set, src's source (or
915 893 # further back) may be. Scanning back further than the immediate src allows
916 894 # transitive transplants and rebases to yield the same results as transitive
917 895 # grafts.
918 896 for r in subset:
919 897 src = _getrevsource(repo, r)
920 898 lineage = None
921 899
922 900 while src is not None:
923 901 if lineage is None:
924 902 lineage = list()
925 903
926 904 lineage.append(r)
927 905
928 906 # The visited lineage is a match if the current source is in the arg
929 907 # set. Since every candidate dest is visited by way of iterating
930 908 # subset, any dests further back in the lineage will be tested by a
931 909 # different iteration over subset. Likewise, if the src was already
932 910 # selected, the current lineage can be selected without going back
933 911 # further.
934 912 if src in sources or src in dests:
935 913 dests.update(lineage)
936 914 break
937 915
938 916 r = src
939 917 src = _getrevsource(repo, r)
940 918
941 919 return subset.filter(dests.__contains__,
942 920 condrepr=lambda: '<destination %r>' % sorted(dests))
943 921
944 922 @predicate('divergent()', safe=True)
945 923 def divergent(repo, subset, x):
946 924 """
947 925 Final successors of changesets with an alternative set of final successors.
948 926 """
949 927 # i18n: "divergent" is a keyword
950 928 getargs(x, 0, 0, _("divergent takes no arguments"))
951 929 divergent = obsmod.getrevs(repo, 'divergent')
952 930 return subset & divergent
953 931
954 932 @predicate('extinct()', safe=True)
955 933 def extinct(repo, subset, x):
956 934 """Obsolete changesets with obsolete descendants only.
957 935 """
958 936 # i18n: "extinct" is a keyword
959 937 getargs(x, 0, 0, _("extinct takes no arguments"))
960 938 extincts = obsmod.getrevs(repo, 'extinct')
961 939 return subset & extincts
962 940
963 941 @predicate('extra(label, [value])', safe=True)
964 942 def extra(repo, subset, x):
965 943 """Changesets with the given label in the extra metadata, with the given
966 944 optional value.
967 945
968 946 If `value` starts with `re:`, the remainder of the value is treated as
969 947 a regular expression. To match a value that actually starts with `re:`,
970 948 use the prefix `literal:`.
971 949 """
972 950 args = getargsdict(x, 'extra', 'label value')
973 951 if 'label' not in args:
974 952 # i18n: "extra" is a keyword
975 953 raise error.ParseError(_('extra takes at least 1 argument'))
976 954 # i18n: "extra" is a keyword
977 955 label = getstring(args['label'], _('first argument to extra must be '
978 956 'a string'))
979 957 value = None
980 958
981 959 if 'value' in args:
982 960 # i18n: "extra" is a keyword
983 961 value = getstring(args['value'], _('second argument to extra must be '
984 962 'a string'))
985 963 kind, value, matcher = util.stringmatcher(value)
986 964
987 965 def _matchvalue(r):
988 966 extra = repo[r].extra()
989 967 return label in extra and (value is None or matcher(extra[label]))
990 968
991 969 return subset.filter(lambda r: _matchvalue(r),
992 970 condrepr=('<extra[%r] %r>', label, value))
993 971
994 972 @predicate('filelog(pattern)', safe=True)
995 973 def filelog(repo, subset, x):
996 974 """Changesets connected to the specified filelog.
997 975
998 976 For performance reasons, visits only revisions mentioned in the file-level
999 977 filelog, rather than filtering through all changesets (much faster, but
1000 978 doesn't include deletes or duplicate changes). For a slower, more accurate
1001 979 result, use ``file()``.
1002 980
1003 981 The pattern without explicit kind like ``glob:`` is expected to be
1004 982 relative to the current directory and match against a file exactly
1005 983 for efficiency.
1006 984
1007 985 If some linkrev points to revisions filtered by the current repoview, we'll
1008 986 work around it to return a non-filtered value.
1009 987 """
1010 988
1011 989 # i18n: "filelog" is a keyword
1012 990 pat = getstring(x, _("filelog requires a pattern"))
1013 991 s = set()
1014 992 cl = repo.changelog
1015 993
1016 994 if not matchmod.patkind(pat):
1017 995 f = pathutil.canonpath(repo.root, repo.getcwd(), pat)
1018 996 files = [f]
1019 997 else:
1020 998 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=repo[None])
1021 999 files = (f for f in repo[None] if m(f))
1022 1000
1023 1001 for f in files:
1024 1002 fl = repo.file(f)
1025 1003 known = {}
1026 1004 scanpos = 0
1027 1005 for fr in list(fl):
1028 1006 fn = fl.node(fr)
1029 1007 if fn in known:
1030 1008 s.add(known[fn])
1031 1009 continue
1032 1010
1033 1011 lr = fl.linkrev(fr)
1034 1012 if lr in cl:
1035 1013 s.add(lr)
1036 1014 elif scanpos is not None:
1037 1015 # lowest matching changeset is filtered, scan further
1038 1016 # ahead in changelog
1039 1017 start = max(lr, scanpos) + 1
1040 1018 scanpos = None
1041 1019 for r in cl.revs(start):
1042 1020 # minimize parsing of non-matching entries
1043 1021 if f in cl.revision(r) and f in cl.readfiles(r):
1044 1022 try:
1045 1023 # try to use manifest delta fastpath
1046 1024 n = repo[r].filenode(f)
1047 1025 if n not in known:
1048 1026 if n == fn:
1049 1027 s.add(r)
1050 1028 scanpos = r
1051 1029 break
1052 1030 else:
1053 1031 known[n] = r
1054 1032 except error.ManifestLookupError:
1055 1033 # deletion in changelog
1056 1034 continue
1057 1035
1058 1036 return subset & s
1059 1037
1060 1038 @predicate('first(set, [n])', safe=True)
1061 1039 def first(repo, subset, x):
1062 1040 """An alias for limit().
1063 1041 """
1064 1042 return limit(repo, subset, x)
1065 1043
1066 1044 def _follow(repo, subset, x, name, followfirst=False):
1067 1045 l = getargs(x, 0, 1, _("%s takes no arguments or a pattern") % name)
1068 1046 c = repo['.']
1069 1047 if l:
1070 1048 x = getstring(l[0], _("%s expected a pattern") % name)
1071 1049 matcher = matchmod.match(repo.root, repo.getcwd(), [x],
1072 1050 ctx=repo[None], default='path')
1073 1051
1074 1052 files = c.manifest().walk(matcher)
1075 1053
1076 1054 s = set()
1077 1055 for fname in files:
1078 1056 fctx = c[fname]
1079 1057 s = s.union(set(c.rev() for c in fctx.ancestors(followfirst)))
1080 1058 # include the revision responsible for the most recent version
1081 1059 s.add(fctx.introrev())
1082 1060 else:
1083 1061 s = _revancestors(repo, baseset([c.rev()]), followfirst)
1084 1062
1085 1063 return subset & s
1086 1064
1087 1065 @predicate('follow([pattern])', safe=True)
1088 1066 def follow(repo, subset, x):
1089 1067 """
1090 1068 An alias for ``::.`` (ancestors of the working directory's first parent).
1091 1069 If pattern is specified, the histories of files matching given
1092 1070 pattern is followed, including copies.
1093 1071 """
1094 1072 return _follow(repo, subset, x, 'follow')
1095 1073
1096 1074 @predicate('_followfirst', safe=True)
1097 1075 def _followfirst(repo, subset, x):
1098 1076 # ``followfirst([pattern])``
1099 1077 # Like ``follow([pattern])`` but follows only the first parent of
1100 1078 # every revisions or files revisions.
1101 1079 return _follow(repo, subset, x, '_followfirst', followfirst=True)
1102 1080
1103 1081 @predicate('all()', safe=True)
1104 1082 def getall(repo, subset, x):
1105 1083 """All changesets, the same as ``0:tip``.
1106 1084 """
1107 1085 # i18n: "all" is a keyword
1108 1086 getargs(x, 0, 0, _("all takes no arguments"))
1109 1087 return subset & spanset(repo) # drop "null" if any
1110 1088
1111 1089 @predicate('grep(regex)')
1112 1090 def grep(repo, subset, x):
1113 1091 """Like ``keyword(string)`` but accepts a regex. Use ``grep(r'...')``
1114 1092 to ensure special escape characters are handled correctly. Unlike
1115 1093 ``keyword(string)``, the match is case-sensitive.
1116 1094 """
1117 1095 try:
1118 1096 # i18n: "grep" is a keyword
1119 1097 gr = re.compile(getstring(x, _("grep requires a string")))
1120 1098 except re.error as e:
1121 1099 raise error.ParseError(_('invalid match pattern: %s') % e)
1122 1100
1123 1101 def matches(x):
1124 1102 c = repo[x]
1125 1103 for e in c.files() + [c.user(), c.description()]:
1126 1104 if gr.search(e):
1127 1105 return True
1128 1106 return False
1129 1107
1130 1108 return subset.filter(matches, condrepr=('<grep %r>', gr.pattern))
1131 1109
1132 1110 @predicate('_matchfiles', safe=True)
1133 1111 def _matchfiles(repo, subset, x):
1134 1112 # _matchfiles takes a revset list of prefixed arguments:
1135 1113 #
1136 1114 # [p:foo, i:bar, x:baz]
1137 1115 #
1138 1116 # builds a match object from them and filters subset. Allowed
1139 1117 # prefixes are 'p:' for regular patterns, 'i:' for include
1140 1118 # patterns and 'x:' for exclude patterns. Use 'r:' prefix to pass
1141 1119 # a revision identifier, or the empty string to reference the
1142 1120 # working directory, from which the match object is
1143 1121 # initialized. Use 'd:' to set the default matching mode, default
1144 1122 # to 'glob'. At most one 'r:' and 'd:' argument can be passed.
1145 1123
1146 1124 l = getargs(x, 1, -1, "_matchfiles requires at least one argument")
1147 1125 pats, inc, exc = [], [], []
1148 1126 rev, default = None, None
1149 1127 for arg in l:
1150 1128 s = getstring(arg, "_matchfiles requires string arguments")
1151 1129 prefix, value = s[:2], s[2:]
1152 1130 if prefix == 'p:':
1153 1131 pats.append(value)
1154 1132 elif prefix == 'i:':
1155 1133 inc.append(value)
1156 1134 elif prefix == 'x:':
1157 1135 exc.append(value)
1158 1136 elif prefix == 'r:':
1159 1137 if rev is not None:
1160 1138 raise error.ParseError('_matchfiles expected at most one '
1161 1139 'revision')
1162 1140 if value != '': # empty means working directory; leave rev as None
1163 1141 rev = value
1164 1142 elif prefix == 'd:':
1165 1143 if default is not None:
1166 1144 raise error.ParseError('_matchfiles expected at most one '
1167 1145 'default mode')
1168 1146 default = value
1169 1147 else:
1170 1148 raise error.ParseError('invalid _matchfiles prefix: %s' % prefix)
1171 1149 if not default:
1172 1150 default = 'glob'
1173 1151
1174 1152 m = matchmod.match(repo.root, repo.getcwd(), pats, include=inc,
1175 1153 exclude=exc, ctx=repo[rev], default=default)
1176 1154
1177 1155 # This directly read the changelog data as creating changectx for all
1178 1156 # revisions is quite expensive.
1179 1157 getfiles = repo.changelog.readfiles
1180 1158 wdirrev = node.wdirrev
1181 1159 def matches(x):
1182 1160 if x == wdirrev:
1183 1161 files = repo[x].files()
1184 1162 else:
1185 1163 files = getfiles(x)
1186 1164 for f in files:
1187 1165 if m(f):
1188 1166 return True
1189 1167 return False
1190 1168
1191 1169 return subset.filter(matches,
1192 1170 condrepr=('<matchfiles patterns=%r, include=%r '
1193 1171 'exclude=%r, default=%r, rev=%r>',
1194 1172 pats, inc, exc, default, rev))
1195 1173
1196 1174 @predicate('file(pattern)', safe=True)
1197 1175 def hasfile(repo, subset, x):
1198 1176 """Changesets affecting files matched by pattern.
1199 1177
1200 1178 For a faster but less accurate result, consider using ``filelog()``
1201 1179 instead.
1202 1180
1203 1181 This predicate uses ``glob:`` as the default kind of pattern.
1204 1182 """
1205 1183 # i18n: "file" is a keyword
1206 1184 pat = getstring(x, _("file requires a pattern"))
1207 1185 return _matchfiles(repo, subset, ('string', 'p:' + pat))
1208 1186
1209 1187 @predicate('head()', safe=True)
1210 1188 def head(repo, subset, x):
1211 1189 """Changeset is a named branch head.
1212 1190 """
1213 1191 # i18n: "head" is a keyword
1214 1192 getargs(x, 0, 0, _("head takes no arguments"))
1215 1193 hs = set()
1216 1194 cl = repo.changelog
1217 1195 for b, ls in repo.branchmap().iteritems():
1218 1196 hs.update(cl.rev(h) for h in ls)
1219 1197 # XXX using a set to feed the baseset is wrong. Sets are not ordered.
1220 1198 # This does not break because of other fullreposet misbehavior.
1221 1199 # XXX We should combine with subset first: 'subset & baseset(...)'. This is
1222 1200 # necessary to ensure we preserve the order in subset.
1223 1201 return baseset(hs) & subset
1224 1202
1225 1203 @predicate('heads(set)', safe=True)
1226 1204 def heads(repo, subset, x):
1227 1205 """Members of set with no children in set.
1228 1206 """
1229 1207 s = getset(repo, subset, x)
1230 1208 ps = parents(repo, subset, x)
1231 1209 return s - ps
1232 1210
1233 1211 @predicate('hidden()', safe=True)
1234 1212 def hidden(repo, subset, x):
1235 1213 """Hidden changesets.
1236 1214 """
1237 1215 # i18n: "hidden" is a keyword
1238 1216 getargs(x, 0, 0, _("hidden takes no arguments"))
1239 1217 hiddenrevs = repoview.filterrevs(repo, 'visible')
1240 1218 return subset & hiddenrevs
1241 1219
1242 1220 @predicate('keyword(string)', safe=True)
1243 1221 def keyword(repo, subset, x):
1244 1222 """Search commit message, user name, and names of changed files for
1245 1223 string. The match is case-insensitive.
1246 1224 """
1247 1225 # i18n: "keyword" is a keyword
1248 1226 kw = encoding.lower(getstring(x, _("keyword requires a string")))
1249 1227
1250 1228 def matches(r):
1251 1229 c = repo[r]
1252 1230 return any(kw in encoding.lower(t)
1253 1231 for t in c.files() + [c.user(), c.description()])
1254 1232
1255 1233 return subset.filter(matches, condrepr=('<keyword %r>', kw))
1256 1234
1257 1235 @predicate('limit(set[, n[, offset]])', safe=True)
1258 1236 def limit(repo, subset, x):
1259 1237 """First n members of set, defaulting to 1, starting from offset.
1260 1238 """
1261 1239 args = getargsdict(x, 'limit', 'set n offset')
1262 1240 if 'set' not in args:
1263 1241 # i18n: "limit" is a keyword
1264 1242 raise error.ParseError(_("limit requires one to three arguments"))
1265 1243 try:
1266 1244 lim, ofs = 1, 0
1267 1245 if 'n' in args:
1268 1246 # i18n: "limit" is a keyword
1269 1247 lim = int(getstring(args['n'], _("limit requires a number")))
1270 1248 if 'offset' in args:
1271 1249 # i18n: "limit" is a keyword
1272 1250 ofs = int(getstring(args['offset'], _("limit requires a number")))
1273 1251 if ofs < 0:
1274 1252 raise error.ParseError(_("negative offset"))
1275 1253 except (TypeError, ValueError):
1276 1254 # i18n: "limit" is a keyword
1277 1255 raise error.ParseError(_("limit expects a number"))
1278 1256 os = getset(repo, fullreposet(repo), args['set'])
1279 1257 result = []
1280 1258 it = iter(os)
1281 1259 for x in xrange(ofs):
1282 1260 y = next(it, None)
1283 1261 if y is None:
1284 1262 break
1285 1263 for x in xrange(lim):
1286 1264 y = next(it, None)
1287 1265 if y is None:
1288 1266 break
1289 1267 elif y in subset:
1290 1268 result.append(y)
1291 1269 return baseset(result, datarepr=('<limit n=%d, offset=%d, %r, %r>',
1292 1270 lim, ofs, subset, os))
1293 1271
1294 1272 @predicate('last(set, [n])', safe=True)
1295 1273 def last(repo, subset, x):
1296 1274 """Last n members of set, defaulting to 1.
1297 1275 """
1298 1276 # i18n: "last" is a keyword
1299 1277 l = getargs(x, 1, 2, _("last requires one or two arguments"))
1300 1278 try:
1301 1279 lim = 1
1302 1280 if len(l) == 2:
1303 1281 # i18n: "last" is a keyword
1304 1282 lim = int(getstring(l[1], _("last requires a number")))
1305 1283 except (TypeError, ValueError):
1306 1284 # i18n: "last" is a keyword
1307 1285 raise error.ParseError(_("last expects a number"))
1308 1286 os = getset(repo, fullreposet(repo), l[0])
1309 1287 os.reverse()
1310 1288 result = []
1311 1289 it = iter(os)
1312 1290 for x in xrange(lim):
1313 1291 y = next(it, None)
1314 1292 if y is None:
1315 1293 break
1316 1294 elif y in subset:
1317 1295 result.append(y)
1318 1296 return baseset(result, datarepr=('<last n=%d, %r, %r>', lim, subset, os))
1319 1297
1320 1298 @predicate('max(set)', safe=True)
1321 1299 def maxrev(repo, subset, x):
1322 1300 """Changeset with highest revision number in set.
1323 1301 """
1324 1302 os = getset(repo, fullreposet(repo), x)
1325 1303 try:
1326 1304 m = os.max()
1327 1305 if m in subset:
1328 1306 return baseset([m], datarepr=('<max %r, %r>', subset, os))
1329 1307 except ValueError:
1330 1308 # os.max() throws a ValueError when the collection is empty.
1331 1309 # Same as python's max().
1332 1310 pass
1333 1311 return baseset(datarepr=('<max %r, %r>', subset, os))
1334 1312
1335 1313 @predicate('merge()', safe=True)
1336 1314 def merge(repo, subset, x):
1337 1315 """Changeset is a merge changeset.
1338 1316 """
1339 1317 # i18n: "merge" is a keyword
1340 1318 getargs(x, 0, 0, _("merge takes no arguments"))
1341 1319 cl = repo.changelog
1342 1320 return subset.filter(lambda r: cl.parentrevs(r)[1] != -1,
1343 1321 condrepr='<merge>')
1344 1322
1345 1323 @predicate('branchpoint()', safe=True)
1346 1324 def branchpoint(repo, subset, x):
1347 1325 """Changesets with more than one child.
1348 1326 """
1349 1327 # i18n: "branchpoint" is a keyword
1350 1328 getargs(x, 0, 0, _("branchpoint takes no arguments"))
1351 1329 cl = repo.changelog
1352 1330 if not subset:
1353 1331 return baseset()
1354 1332 # XXX this should be 'parentset.min()' assuming 'parentset' is a smartset
1355 1333 # (and if it is not, it should.)
1356 1334 baserev = min(subset)
1357 1335 parentscount = [0]*(len(repo) - baserev)
1358 1336 for r in cl.revs(start=baserev + 1):
1359 1337 for p in cl.parentrevs(r):
1360 1338 if p >= baserev:
1361 1339 parentscount[p - baserev] += 1
1362 1340 return subset.filter(lambda r: parentscount[r - baserev] > 1,
1363 1341 condrepr='<branchpoint>')
1364 1342
1365 1343 @predicate('min(set)', safe=True)
1366 1344 def minrev(repo, subset, x):
1367 1345 """Changeset with lowest revision number in set.
1368 1346 """
1369 1347 os = getset(repo, fullreposet(repo), x)
1370 1348 try:
1371 1349 m = os.min()
1372 1350 if m in subset:
1373 1351 return baseset([m], datarepr=('<min %r, %r>', subset, os))
1374 1352 except ValueError:
1375 1353 # os.min() throws a ValueError when the collection is empty.
1376 1354 # Same as python's min().
1377 1355 pass
1378 1356 return baseset(datarepr=('<min %r, %r>', subset, os))
1379 1357
1380 1358 @predicate('modifies(pattern)', safe=True)
1381 1359 def modifies(repo, subset, x):
1382 1360 """Changesets modifying files matched by pattern.
1383 1361
1384 1362 The pattern without explicit kind like ``glob:`` is expected to be
1385 1363 relative to the current directory and match against a file or a
1386 1364 directory.
1387 1365 """
1388 1366 # i18n: "modifies" is a keyword
1389 1367 pat = getstring(x, _("modifies requires a pattern"))
1390 1368 return checkstatus(repo, subset, pat, 0)
1391 1369
1392 1370 @predicate('named(namespace)')
1393 1371 def named(repo, subset, x):
1394 1372 """The changesets in a given namespace.
1395 1373
1396 1374 If `namespace` starts with `re:`, the remainder of the string is treated as
1397 1375 a regular expression. To match a namespace that actually starts with `re:`,
1398 1376 use the prefix `literal:`.
1399 1377 """
1400 1378 # i18n: "named" is a keyword
1401 1379 args = getargs(x, 1, 1, _('named requires a namespace argument'))
1402 1380
1403 1381 ns = getstring(args[0],
1404 1382 # i18n: "named" is a keyword
1405 1383 _('the argument to named must be a string'))
1406 1384 kind, pattern, matcher = util.stringmatcher(ns)
1407 1385 namespaces = set()
1408 1386 if kind == 'literal':
1409 1387 if pattern not in repo.names:
1410 1388 raise error.RepoLookupError(_("namespace '%s' does not exist")
1411 1389 % ns)
1412 1390 namespaces.add(repo.names[pattern])
1413 1391 else:
1414 1392 for name, ns in repo.names.iteritems():
1415 1393 if matcher(name):
1416 1394 namespaces.add(ns)
1417 1395 if not namespaces:
1418 1396 raise error.RepoLookupError(_("no namespace exists"
1419 1397 " that match '%s'") % pattern)
1420 1398
1421 1399 names = set()
1422 1400 for ns in namespaces:
1423 1401 for name in ns.listnames(repo):
1424 1402 if name not in ns.deprecated:
1425 1403 names.update(repo[n].rev() for n in ns.nodes(repo, name))
1426 1404
1427 1405 names -= set([node.nullrev])
1428 1406 return subset & names
1429 1407
1430 1408 @predicate('id(string)', safe=True)
1431 1409 def node_(repo, subset, x):
1432 1410 """Revision non-ambiguously specified by the given hex string prefix.
1433 1411 """
1434 1412 # i18n: "id" is a keyword
1435 1413 l = getargs(x, 1, 1, _("id requires one argument"))
1436 1414 # i18n: "id" is a keyword
1437 1415 n = getstring(l[0], _("id requires a string"))
1438 1416 if len(n) == 40:
1439 1417 try:
1440 1418 rn = repo.changelog.rev(node.bin(n))
1441 1419 except (LookupError, TypeError):
1442 1420 rn = None
1443 1421 else:
1444 1422 rn = None
1445 1423 pm = repo.changelog._partialmatch(n)
1446 1424 if pm is not None:
1447 1425 rn = repo.changelog.rev(pm)
1448 1426
1449 1427 if rn is None:
1450 1428 return baseset()
1451 1429 result = baseset([rn])
1452 1430 return result & subset
1453 1431
1454 1432 @predicate('obsolete()', safe=True)
1455 1433 def obsolete(repo, subset, x):
1456 1434 """Mutable changeset with a newer version."""
1457 1435 # i18n: "obsolete" is a keyword
1458 1436 getargs(x, 0, 0, _("obsolete takes no arguments"))
1459 1437 obsoletes = obsmod.getrevs(repo, 'obsolete')
1460 1438 return subset & obsoletes
1461 1439
1462 1440 @predicate('only(set, [set])', safe=True)
1463 1441 def only(repo, subset, x):
1464 1442 """Changesets that are ancestors of the first set that are not ancestors
1465 1443 of any other head in the repo. If a second set is specified, the result
1466 1444 is ancestors of the first set that are not ancestors of the second set
1467 1445 (i.e. ::<set1> - ::<set2>).
1468 1446 """
1469 1447 cl = repo.changelog
1470 1448 # i18n: "only" is a keyword
1471 1449 args = getargs(x, 1, 2, _('only takes one or two arguments'))
1472 1450 include = getset(repo, fullreposet(repo), args[0])
1473 1451 if len(args) == 1:
1474 1452 if not include:
1475 1453 return baseset()
1476 1454
1477 1455 descendants = set(_revdescendants(repo, include, False))
1478 1456 exclude = [rev for rev in cl.headrevs()
1479 1457 if not rev in descendants and not rev in include]
1480 1458 else:
1481 1459 exclude = getset(repo, fullreposet(repo), args[1])
1482 1460
1483 1461 results = set(cl.findmissingrevs(common=exclude, heads=include))
1484 1462 # XXX we should turn this into a baseset instead of a set, smartset may do
1485 1463 # some optimisations from the fact this is a baseset.
1486 1464 return subset & results
1487 1465
1488 1466 @predicate('origin([set])', safe=True)
1489 1467 def origin(repo, subset, x):
1490 1468 """
1491 1469 Changesets that were specified as a source for the grafts, transplants or
1492 1470 rebases that created the given revisions. Omitting the optional set is the
1493 1471 same as passing all(). If a changeset created by these operations is itself
1494 1472 specified as a source for one of these operations, only the source changeset
1495 1473 for the first operation is selected.
1496 1474 """
1497 1475 if x is not None:
1498 1476 dests = getset(repo, fullreposet(repo), x)
1499 1477 else:
1500 1478 dests = fullreposet(repo)
1501 1479
1502 1480 def _firstsrc(rev):
1503 1481 src = _getrevsource(repo, rev)
1504 1482 if src is None:
1505 1483 return None
1506 1484
1507 1485 while True:
1508 1486 prev = _getrevsource(repo, src)
1509 1487
1510 1488 if prev is None:
1511 1489 return src
1512 1490 src = prev
1513 1491
1514 1492 o = set([_firstsrc(r) for r in dests])
1515 1493 o -= set([None])
1516 1494 # XXX we should turn this into a baseset instead of a set, smartset may do
1517 1495 # some optimisations from the fact this is a baseset.
1518 1496 return subset & o
1519 1497
1520 1498 @predicate('outgoing([path])', safe=True)
1521 1499 def outgoing(repo, subset, x):
1522 1500 """Changesets not found in the specified destination repository, or the
1523 1501 default push location.
1524 1502 """
1525 1503 # Avoid cycles.
1526 1504 from . import (
1527 1505 discovery,
1528 1506 hg,
1529 1507 )
1530 1508 # i18n: "outgoing" is a keyword
1531 1509 l = getargs(x, 0, 1, _("outgoing takes one or no arguments"))
1532 1510 # i18n: "outgoing" is a keyword
1533 1511 dest = l and getstring(l[0], _("outgoing requires a repository path")) or ''
1534 1512 dest = repo.ui.expandpath(dest or 'default-push', dest or 'default')
1535 1513 dest, branches = hg.parseurl(dest)
1536 1514 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1537 1515 if revs:
1538 1516 revs = [repo.lookup(rev) for rev in revs]
1539 1517 other = hg.peer(repo, {}, dest)
1540 1518 repo.ui.pushbuffer()
1541 1519 outgoing = discovery.findcommonoutgoing(repo, other, onlyheads=revs)
1542 1520 repo.ui.popbuffer()
1543 1521 cl = repo.changelog
1544 1522 o = set([cl.rev(r) for r in outgoing.missing])
1545 1523 return subset & o
1546 1524
1547 1525 @predicate('p1([set])', safe=True)
1548 1526 def p1(repo, subset, x):
1549 1527 """First parent of changesets in set, or the working directory.
1550 1528 """
1551 1529 if x is None:
1552 1530 p = repo[x].p1().rev()
1553 1531 if p >= 0:
1554 1532 return subset & baseset([p])
1555 1533 return baseset()
1556 1534
1557 1535 ps = set()
1558 1536 cl = repo.changelog
1559 1537 for r in getset(repo, fullreposet(repo), x):
1560 1538 ps.add(cl.parentrevs(r)[0])
1561 1539 ps -= set([node.nullrev])
1562 1540 # XXX we should turn this into a baseset instead of a set, smartset may do
1563 1541 # some optimisations from the fact this is a baseset.
1564 1542 return subset & ps
1565 1543
1566 1544 @predicate('p2([set])', safe=True)
1567 1545 def p2(repo, subset, x):
1568 1546 """Second parent of changesets in set, or the working directory.
1569 1547 """
1570 1548 if x is None:
1571 1549 ps = repo[x].parents()
1572 1550 try:
1573 1551 p = ps[1].rev()
1574 1552 if p >= 0:
1575 1553 return subset & baseset([p])
1576 1554 return baseset()
1577 1555 except IndexError:
1578 1556 return baseset()
1579 1557
1580 1558 ps = set()
1581 1559 cl = repo.changelog
1582 1560 for r in getset(repo, fullreposet(repo), x):
1583 1561 ps.add(cl.parentrevs(r)[1])
1584 1562 ps -= set([node.nullrev])
1585 1563 # XXX we should turn this into a baseset instead of a set, smartset may do
1586 1564 # some optimisations from the fact this is a baseset.
1587 1565 return subset & ps
1588 1566
1589 1567 @predicate('parents([set])', safe=True)
1590 1568 def parents(repo, subset, x):
1591 1569 """
1592 1570 The set of all parents for all changesets in set, or the working directory.
1593 1571 """
1594 1572 if x is None:
1595 1573 ps = set(p.rev() for p in repo[x].parents())
1596 1574 else:
1597 1575 ps = set()
1598 1576 cl = repo.changelog
1599 1577 up = ps.update
1600 1578 parentrevs = cl.parentrevs
1601 1579 for r in getset(repo, fullreposet(repo), x):
1602 1580 if r == node.wdirrev:
1603 1581 up(p.rev() for p in repo[r].parents())
1604 1582 else:
1605 1583 up(parentrevs(r))
1606 1584 ps -= set([node.nullrev])
1607 1585 return subset & ps
1608 1586
1609 1587 def _phase(repo, subset, target):
1610 1588 """helper to select all rev in phase <target>"""
1611 1589 repo._phasecache.loadphaserevs(repo) # ensure phase's sets are loaded
1612 1590 if repo._phasecache._phasesets:
1613 1591 s = repo._phasecache._phasesets[target] - repo.changelog.filteredrevs
1614 1592 s = baseset(s)
1615 1593 s.sort() # set are non ordered, so we enforce ascending
1616 1594 return subset & s
1617 1595 else:
1618 1596 phase = repo._phasecache.phase
1619 1597 condition = lambda r: phase(repo, r) == target
1620 1598 return subset.filter(condition, condrepr=('<phase %r>', target),
1621 1599 cache=False)
1622 1600
1623 1601 @predicate('draft()', safe=True)
1624 1602 def draft(repo, subset, x):
1625 1603 """Changeset in draft phase."""
1626 1604 # i18n: "draft" is a keyword
1627 1605 getargs(x, 0, 0, _("draft takes no arguments"))
1628 1606 target = phases.draft
1629 1607 return _phase(repo, subset, target)
1630 1608
1631 1609 @predicate('secret()', safe=True)
1632 1610 def secret(repo, subset, x):
1633 1611 """Changeset in secret phase."""
1634 1612 # i18n: "secret" is a keyword
1635 1613 getargs(x, 0, 0, _("secret takes no arguments"))
1636 1614 target = phases.secret
1637 1615 return _phase(repo, subset, target)
1638 1616
1639 1617 def parentspec(repo, subset, x, n):
1640 1618 """``set^0``
1641 1619 The set.
1642 1620 ``set^1`` (or ``set^``), ``set^2``
1643 1621 First or second parent, respectively, of all changesets in set.
1644 1622 """
1645 1623 try:
1646 1624 n = int(n[1])
1647 1625 if n not in (0, 1, 2):
1648 1626 raise ValueError
1649 1627 except (TypeError, ValueError):
1650 1628 raise error.ParseError(_("^ expects a number 0, 1, or 2"))
1651 1629 ps = set()
1652 1630 cl = repo.changelog
1653 1631 for r in getset(repo, fullreposet(repo), x):
1654 1632 if n == 0:
1655 1633 ps.add(r)
1656 1634 elif n == 1:
1657 1635 ps.add(cl.parentrevs(r)[0])
1658 1636 elif n == 2:
1659 1637 parents = cl.parentrevs(r)
1660 1638 if len(parents) > 1:
1661 1639 ps.add(parents[1])
1662 1640 return subset & ps
1663 1641
1664 1642 @predicate('present(set)', safe=True)
1665 1643 def present(repo, subset, x):
1666 1644 """An empty set, if any revision in set isn't found; otherwise,
1667 1645 all revisions in set.
1668 1646
1669 1647 If any of specified revisions is not present in the local repository,
1670 1648 the query is normally aborted. But this predicate allows the query
1671 1649 to continue even in such cases.
1672 1650 """
1673 1651 try:
1674 1652 return getset(repo, subset, x)
1675 1653 except error.RepoLookupError:
1676 1654 return baseset()
1677 1655
1678 1656 # for internal use
1679 1657 @predicate('_notpublic', safe=True)
1680 1658 def _notpublic(repo, subset, x):
1681 1659 getargs(x, 0, 0, "_notpublic takes no arguments")
1682 1660 repo._phasecache.loadphaserevs(repo) # ensure phase's sets are loaded
1683 1661 if repo._phasecache._phasesets:
1684 1662 s = set()
1685 1663 for u in repo._phasecache._phasesets[1:]:
1686 1664 s.update(u)
1687 1665 s = baseset(s - repo.changelog.filteredrevs)
1688 1666 s.sort()
1689 1667 return subset & s
1690 1668 else:
1691 1669 phase = repo._phasecache.phase
1692 1670 target = phases.public
1693 1671 condition = lambda r: phase(repo, r) != target
1694 1672 return subset.filter(condition, condrepr=('<phase %r>', target),
1695 1673 cache=False)
1696 1674
1697 1675 @predicate('public()', safe=True)
1698 1676 def public(repo, subset, x):
1699 1677 """Changeset in public phase."""
1700 1678 # i18n: "public" is a keyword
1701 1679 getargs(x, 0, 0, _("public takes no arguments"))
1702 1680 phase = repo._phasecache.phase
1703 1681 target = phases.public
1704 1682 condition = lambda r: phase(repo, r) == target
1705 1683 return subset.filter(condition, condrepr=('<phase %r>', target),
1706 1684 cache=False)
1707 1685
1708 1686 @predicate('remote([id [,path]])', safe=True)
1709 1687 def remote(repo, subset, x):
1710 1688 """Local revision that corresponds to the given identifier in a
1711 1689 remote repository, if present. Here, the '.' identifier is a
1712 1690 synonym for the current local branch.
1713 1691 """
1714 1692
1715 1693 from . import hg # avoid start-up nasties
1716 1694 # i18n: "remote" is a keyword
1717 1695 l = getargs(x, 0, 2, _("remote takes zero, one, or two arguments"))
1718 1696
1719 1697 q = '.'
1720 1698 if len(l) > 0:
1721 1699 # i18n: "remote" is a keyword
1722 1700 q = getstring(l[0], _("remote requires a string id"))
1723 1701 if q == '.':
1724 1702 q = repo['.'].branch()
1725 1703
1726 1704 dest = ''
1727 1705 if len(l) > 1:
1728 1706 # i18n: "remote" is a keyword
1729 1707 dest = getstring(l[1], _("remote requires a repository path"))
1730 1708 dest = repo.ui.expandpath(dest or 'default')
1731 1709 dest, branches = hg.parseurl(dest)
1732 1710 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1733 1711 if revs:
1734 1712 revs = [repo.lookup(rev) for rev in revs]
1735 1713 other = hg.peer(repo, {}, dest)
1736 1714 n = other.lookup(q)
1737 1715 if n in repo:
1738 1716 r = repo[n].rev()
1739 1717 if r in subset:
1740 1718 return baseset([r])
1741 1719 return baseset()
1742 1720
1743 1721 @predicate('removes(pattern)', safe=True)
1744 1722 def removes(repo, subset, x):
1745 1723 """Changesets which remove files matching pattern.
1746 1724
1747 1725 The pattern without explicit kind like ``glob:`` is expected to be
1748 1726 relative to the current directory and match against a file or a
1749 1727 directory.
1750 1728 """
1751 1729 # i18n: "removes" is a keyword
1752 1730 pat = getstring(x, _("removes requires a pattern"))
1753 1731 return checkstatus(repo, subset, pat, 2)
1754 1732
1755 1733 @predicate('rev(number)', safe=True)
1756 1734 def rev(repo, subset, x):
1757 1735 """Revision with the given numeric identifier.
1758 1736 """
1759 1737 # i18n: "rev" is a keyword
1760 1738 l = getargs(x, 1, 1, _("rev requires one argument"))
1761 1739 try:
1762 1740 # i18n: "rev" is a keyword
1763 1741 l = int(getstring(l[0], _("rev requires a number")))
1764 1742 except (TypeError, ValueError):
1765 1743 # i18n: "rev" is a keyword
1766 1744 raise error.ParseError(_("rev expects a number"))
1767 1745 if l not in repo.changelog and l != node.nullrev:
1768 1746 return baseset()
1769 1747 return subset & baseset([l])
1770 1748
1771 1749 @predicate('matching(revision [, field])', safe=True)
1772 1750 def matching(repo, subset, x):
1773 1751 """Changesets in which a given set of fields match the set of fields in the
1774 1752 selected revision or set.
1775 1753
1776 1754 To match more than one field pass the list of fields to match separated
1777 1755 by spaces (e.g. ``author description``).
1778 1756
1779 1757 Valid fields are most regular revision fields and some special fields.
1780 1758
1781 1759 Regular revision fields are ``description``, ``author``, ``branch``,
1782 1760 ``date``, ``files``, ``phase``, ``parents``, ``substate``, ``user``
1783 1761 and ``diff``.
1784 1762 Note that ``author`` and ``user`` are synonyms. ``diff`` refers to the
1785 1763 contents of the revision. Two revisions matching their ``diff`` will
1786 1764 also match their ``files``.
1787 1765
1788 1766 Special fields are ``summary`` and ``metadata``:
1789 1767 ``summary`` matches the first line of the description.
1790 1768 ``metadata`` is equivalent to matching ``description user date``
1791 1769 (i.e. it matches the main metadata fields).
1792 1770
1793 1771 ``metadata`` is the default field which is used when no fields are
1794 1772 specified. You can match more than one field at a time.
1795 1773 """
1796 1774 # i18n: "matching" is a keyword
1797 1775 l = getargs(x, 1, 2, _("matching takes 1 or 2 arguments"))
1798 1776
1799 1777 revs = getset(repo, fullreposet(repo), l[0])
1800 1778
1801 1779 fieldlist = ['metadata']
1802 1780 if len(l) > 1:
1803 1781 fieldlist = getstring(l[1],
1804 1782 # i18n: "matching" is a keyword
1805 1783 _("matching requires a string "
1806 1784 "as its second argument")).split()
1807 1785
1808 1786 # Make sure that there are no repeated fields,
1809 1787 # expand the 'special' 'metadata' field type
1810 1788 # and check the 'files' whenever we check the 'diff'
1811 1789 fields = []
1812 1790 for field in fieldlist:
1813 1791 if field == 'metadata':
1814 1792 fields += ['user', 'description', 'date']
1815 1793 elif field == 'diff':
1816 1794 # a revision matching the diff must also match the files
1817 1795 # since matching the diff is very costly, make sure to
1818 1796 # also match the files first
1819 1797 fields += ['files', 'diff']
1820 1798 else:
1821 1799 if field == 'author':
1822 1800 field = 'user'
1823 1801 fields.append(field)
1824 1802 fields = set(fields)
1825 1803 if 'summary' in fields and 'description' in fields:
1826 1804 # If a revision matches its description it also matches its summary
1827 1805 fields.discard('summary')
1828 1806
1829 1807 # We may want to match more than one field
1830 1808 # Not all fields take the same amount of time to be matched
1831 1809 # Sort the selected fields in order of increasing matching cost
1832 1810 fieldorder = ['phase', 'parents', 'user', 'date', 'branch', 'summary',
1833 1811 'files', 'description', 'substate', 'diff']
1834 1812 def fieldkeyfunc(f):
1835 1813 try:
1836 1814 return fieldorder.index(f)
1837 1815 except ValueError:
1838 1816 # assume an unknown field is very costly
1839 1817 return len(fieldorder)
1840 1818 fields = list(fields)
1841 1819 fields.sort(key=fieldkeyfunc)
1842 1820
1843 1821 # Each field will be matched with its own "getfield" function
1844 1822 # which will be added to the getfieldfuncs array of functions
1845 1823 getfieldfuncs = []
1846 1824 _funcs = {
1847 1825 'user': lambda r: repo[r].user(),
1848 1826 'branch': lambda r: repo[r].branch(),
1849 1827 'date': lambda r: repo[r].date(),
1850 1828 'description': lambda r: repo[r].description(),
1851 1829 'files': lambda r: repo[r].files(),
1852 1830 'parents': lambda r: repo[r].parents(),
1853 1831 'phase': lambda r: repo[r].phase(),
1854 1832 'substate': lambda r: repo[r].substate,
1855 1833 'summary': lambda r: repo[r].description().splitlines()[0],
1856 1834 'diff': lambda r: list(repo[r].diff(git=True),)
1857 1835 }
1858 1836 for info in fields:
1859 1837 getfield = _funcs.get(info, None)
1860 1838 if getfield is None:
1861 1839 raise error.ParseError(
1862 1840 # i18n: "matching" is a keyword
1863 1841 _("unexpected field name passed to matching: %s") % info)
1864 1842 getfieldfuncs.append(getfield)
1865 1843 # convert the getfield array of functions into a "getinfo" function
1866 1844 # which returns an array of field values (or a single value if there
1867 1845 # is only one field to match)
1868 1846 getinfo = lambda r: [f(r) for f in getfieldfuncs]
1869 1847
1870 1848 def matches(x):
1871 1849 for rev in revs:
1872 1850 target = getinfo(rev)
1873 1851 match = True
1874 1852 for n, f in enumerate(getfieldfuncs):
1875 1853 if target[n] != f(x):
1876 1854 match = False
1877 1855 if match:
1878 1856 return True
1879 1857 return False
1880 1858
1881 1859 return subset.filter(matches, condrepr=('<matching%r %r>', fields, revs))
1882 1860
1883 1861 @predicate('reverse(set)', safe=True)
1884 1862 def reverse(repo, subset, x):
1885 1863 """Reverse order of set.
1886 1864 """
1887 1865 l = getset(repo, subset, x)
1888 1866 l.reverse()
1889 1867 return l
1890 1868
1891 1869 @predicate('roots(set)', safe=True)
1892 1870 def roots(repo, subset, x):
1893 1871 """Changesets in set with no parent changeset in set.
1894 1872 """
1895 1873 s = getset(repo, fullreposet(repo), x)
1896 1874 parents = repo.changelog.parentrevs
1897 1875 def filter(r):
1898 1876 for p in parents(r):
1899 1877 if 0 <= p and p in s:
1900 1878 return False
1901 1879 return True
1902 1880 return subset & s.filter(filter, condrepr='<roots>')
1903 1881
1904 1882 @predicate('sort(set[, [-]key...])', safe=True)
1905 1883 def sort(repo, subset, x):
1906 1884 """Sort set by keys. The default sort order is ascending, specify a key
1907 1885 as ``-key`` to sort in descending order.
1908 1886
1909 1887 The keys can be:
1910 1888
1911 1889 - ``rev`` for the revision number,
1912 1890 - ``branch`` for the branch name,
1913 1891 - ``desc`` for the commit message (description),
1914 1892 - ``user`` for user name (``author`` can be used as an alias),
1915 1893 - ``date`` for the commit date
1916 1894 """
1917 1895 # i18n: "sort" is a keyword
1918 1896 l = getargs(x, 1, 2, _("sort requires one or two arguments"))
1919 1897 keys = "rev"
1920 1898 if len(l) == 2:
1921 1899 # i18n: "sort" is a keyword
1922 1900 keys = getstring(l[1], _("sort spec must be a string"))
1923 1901
1924 1902 s = l[0]
1925 1903 keys = keys.split()
1926 1904 l = []
1927 1905 def invert(s):
1928 1906 return "".join(chr(255 - ord(c)) for c in s)
1929 1907 revs = getset(repo, subset, s)
1930 1908 if keys == ["rev"]:
1931 1909 revs.sort()
1932 1910 return revs
1933 1911 elif keys == ["-rev"]:
1934 1912 revs.sort(reverse=True)
1935 1913 return revs
1936 1914 for r in revs:
1937 1915 c = repo[r]
1938 1916 e = []
1939 1917 for k in keys:
1940 1918 if k == 'rev':
1941 1919 e.append(r)
1942 1920 elif k == '-rev':
1943 1921 e.append(-r)
1944 1922 elif k == 'branch':
1945 1923 e.append(c.branch())
1946 1924 elif k == '-branch':
1947 1925 e.append(invert(c.branch()))
1948 1926 elif k == 'desc':
1949 1927 e.append(c.description())
1950 1928 elif k == '-desc':
1951 1929 e.append(invert(c.description()))
1952 1930 elif k in 'user author':
1953 1931 e.append(c.user())
1954 1932 elif k in '-user -author':
1955 1933 e.append(invert(c.user()))
1956 1934 elif k == 'date':
1957 1935 e.append(c.date()[0])
1958 1936 elif k == '-date':
1959 1937 e.append(-c.date()[0])
1960 1938 else:
1961 1939 raise error.ParseError(_("unknown sort key %r") % k)
1962 1940 e.append(r)
1963 1941 l.append(e)
1964 1942 l.sort()
1965 1943 return baseset([e[-1] for e in l])
1966 1944
1967 1945 @predicate('subrepo([pattern])')
1968 1946 def subrepo(repo, subset, x):
1969 1947 """Changesets that add, modify or remove the given subrepo. If no subrepo
1970 1948 pattern is named, any subrepo changes are returned.
1971 1949 """
1972 1950 # i18n: "subrepo" is a keyword
1973 1951 args = getargs(x, 0, 1, _('subrepo takes at most one argument'))
1974 1952 pat = None
1975 1953 if len(args) != 0:
1976 1954 pat = getstring(args[0], _("subrepo requires a pattern"))
1977 1955
1978 1956 m = matchmod.exact(repo.root, repo.root, ['.hgsubstate'])
1979 1957
1980 1958 def submatches(names):
1981 1959 k, p, m = util.stringmatcher(pat)
1982 1960 for name in names:
1983 1961 if m(name):
1984 1962 yield name
1985 1963
1986 1964 def matches(x):
1987 1965 c = repo[x]
1988 1966 s = repo.status(c.p1().node(), c.node(), match=m)
1989 1967
1990 1968 if pat is None:
1991 1969 return s.added or s.modified or s.removed
1992 1970
1993 1971 if s.added:
1994 1972 return any(submatches(c.substate.keys()))
1995 1973
1996 1974 if s.modified:
1997 1975 subs = set(c.p1().substate.keys())
1998 1976 subs.update(c.substate.keys())
1999 1977
2000 1978 for path in submatches(subs):
2001 1979 if c.p1().substate.get(path) != c.substate.get(path):
2002 1980 return True
2003 1981
2004 1982 if s.removed:
2005 1983 return any(submatches(c.p1().substate.keys()))
2006 1984
2007 1985 return False
2008 1986
2009 1987 return subset.filter(matches, condrepr=('<subrepo %r>', pat))
2010 1988
2011 1989 def _substringmatcher(pattern):
2012 1990 kind, pattern, matcher = util.stringmatcher(pattern)
2013 1991 if kind == 'literal':
2014 1992 matcher = lambda s: pattern in s
2015 1993 return kind, pattern, matcher
2016 1994
2017 1995 @predicate('tag([name])', safe=True)
2018 1996 def tag(repo, subset, x):
2019 1997 """The specified tag by name, or all tagged revisions if no name is given.
2020 1998
2021 1999 If `name` starts with `re:`, the remainder of the name is treated as
2022 2000 a regular expression. To match a tag that actually starts with `re:`,
2023 2001 use the prefix `literal:`.
2024 2002 """
2025 2003 # i18n: "tag" is a keyword
2026 2004 args = getargs(x, 0, 1, _("tag takes one or no arguments"))
2027 2005 cl = repo.changelog
2028 2006 if args:
2029 2007 pattern = getstring(args[0],
2030 2008 # i18n: "tag" is a keyword
2031 2009 _('the argument to tag must be a string'))
2032 2010 kind, pattern, matcher = util.stringmatcher(pattern)
2033 2011 if kind == 'literal':
2034 2012 # avoid resolving all tags
2035 2013 tn = repo._tagscache.tags.get(pattern, None)
2036 2014 if tn is None:
2037 2015 raise error.RepoLookupError(_("tag '%s' does not exist")
2038 2016 % pattern)
2039 2017 s = set([repo[tn].rev()])
2040 2018 else:
2041 2019 s = set([cl.rev(n) for t, n in repo.tagslist() if matcher(t)])
2042 2020 else:
2043 2021 s = set([cl.rev(n) for t, n in repo.tagslist() if t != 'tip'])
2044 2022 return subset & s
2045 2023
2046 2024 @predicate('tagged', safe=True)
2047 2025 def tagged(repo, subset, x):
2048 2026 return tag(repo, subset, x)
2049 2027
2050 2028 @predicate('unstable()', safe=True)
2051 2029 def unstable(repo, subset, x):
2052 2030 """Non-obsolete changesets with obsolete ancestors.
2053 2031 """
2054 2032 # i18n: "unstable" is a keyword
2055 2033 getargs(x, 0, 0, _("unstable takes no arguments"))
2056 2034 unstables = obsmod.getrevs(repo, 'unstable')
2057 2035 return subset & unstables
2058 2036
2059 2037
2060 2038 @predicate('user(string)', safe=True)
2061 2039 def user(repo, subset, x):
2062 2040 """User name contains string. The match is case-insensitive.
2063 2041
2064 2042 If `string` starts with `re:`, the remainder of the string is treated as
2065 2043 a regular expression. To match a user that actually contains `re:`, use
2066 2044 the prefix `literal:`.
2067 2045 """
2068 2046 return author(repo, subset, x)
2069 2047
2070 2048 # experimental
2071 2049 @predicate('wdir', safe=True)
2072 2050 def wdir(repo, subset, x):
2073 2051 # i18n: "wdir" is a keyword
2074 2052 getargs(x, 0, 0, _("wdir takes no arguments"))
2075 2053 if node.wdirrev in subset or isinstance(subset, fullreposet):
2076 2054 return baseset([node.wdirrev])
2077 2055 return baseset()
2078 2056
2079 2057 # for internal use
2080 2058 @predicate('_list', safe=True)
2081 2059 def _list(repo, subset, x):
2082 2060 s = getstring(x, "internal error")
2083 2061 if not s:
2084 2062 return baseset()
2085 2063 # remove duplicates here. it's difficult for caller to deduplicate sets
2086 2064 # because different symbols can point to the same rev.
2087 2065 cl = repo.changelog
2088 2066 ls = []
2089 2067 seen = set()
2090 2068 for t in s.split('\0'):
2091 2069 try:
2092 2070 # fast path for integer revision
2093 2071 r = int(t)
2094 2072 if str(r) != t or r not in cl:
2095 2073 raise ValueError
2096 2074 revs = [r]
2097 2075 except ValueError:
2098 2076 revs = stringset(repo, subset, t)
2099 2077
2100 2078 for r in revs:
2101 2079 if r in seen:
2102 2080 continue
2103 2081 if (r in subset
2104 2082 or r == node.nullrev and isinstance(subset, fullreposet)):
2105 2083 ls.append(r)
2106 2084 seen.add(r)
2107 2085 return baseset(ls)
2108 2086
2109 2087 # for internal use
2110 2088 @predicate('_intlist', safe=True)
2111 2089 def _intlist(repo, subset, x):
2112 2090 s = getstring(x, "internal error")
2113 2091 if not s:
2114 2092 return baseset()
2115 2093 ls = [int(r) for r in s.split('\0')]
2116 2094 s = subset
2117 2095 return baseset([r for r in ls if r in s])
2118 2096
2119 2097 # for internal use
2120 2098 @predicate('_hexlist', safe=True)
2121 2099 def _hexlist(repo, subset, x):
2122 2100 s = getstring(x, "internal error")
2123 2101 if not s:
2124 2102 return baseset()
2125 2103 cl = repo.changelog
2126 2104 ls = [cl.rev(node.bin(r)) for r in s.split('\0')]
2127 2105 s = subset
2128 2106 return baseset([r for r in ls if r in s])
2129 2107
2130 2108 methods = {
2131 2109 "range": rangeset,
2132 2110 "dagrange": dagrange,
2133 2111 "string": stringset,
2134 2112 "symbol": stringset,
2135 2113 "and": andset,
2136 2114 "or": orset,
2137 2115 "not": notset,
2138 2116 "difference": differenceset,
2139 2117 "list": listset,
2140 2118 "keyvalue": keyvaluepair,
2141 2119 "func": func,
2142 2120 "ancestor": ancestorspec,
2143 2121 "parent": parentspec,
2144 2122 "parentpost": p1,
2145 2123 }
2146 2124
2147 2125 def optimize(x, small):
2148 2126 if x is None:
2149 2127 return 0, x
2150 2128
2151 2129 smallbonus = 1
2152 2130 if small:
2153 2131 smallbonus = .5
2154 2132
2155 2133 op = x[0]
2156 2134 if op == 'minus':
2157 2135 return optimize(('and', x[1], ('not', x[2])), small)
2158 2136 elif op == 'only':
2159 2137 return optimize(('func', ('symbol', 'only'),
2160 2138 ('list', x[1], x[2])), small)
2161 2139 elif op == 'onlypost':
2162 2140 return optimize(('func', ('symbol', 'only'), x[1]), small)
2163 2141 elif op == 'dagrangepre':
2164 2142 return optimize(('func', ('symbol', 'ancestors'), x[1]), small)
2165 2143 elif op == 'dagrangepost':
2166 2144 return optimize(('func', ('symbol', 'descendants'), x[1]), small)
2167 2145 elif op == 'rangeall':
2168 2146 return optimize(('range', ('string', '0'), ('string', 'tip')), small)
2169 2147 elif op == 'rangepre':
2170 2148 return optimize(('range', ('string', '0'), x[1]), small)
2171 2149 elif op == 'rangepost':
2172 2150 return optimize(('range', x[1], ('string', 'tip')), small)
2173 2151 elif op == 'negate':
2174 2152 return optimize(('string',
2175 2153 '-' + getstring(x[1], _("can't negate that"))), small)
2176 2154 elif op in 'string symbol negate':
2177 2155 return smallbonus, x # single revisions are small
2178 2156 elif op == 'and':
2179 2157 wa, ta = optimize(x[1], True)
2180 2158 wb, tb = optimize(x[2], True)
2181 2159
2182 2160 # (::x and not ::y)/(not ::y and ::x) have a fast path
2183 2161 def isonly(revs, bases):
2184 2162 return (
2185 2163 revs is not None
2186 2164 and revs[0] == 'func'
2187 2165 and getstring(revs[1], _('not a symbol')) == 'ancestors'
2188 2166 and bases is not None
2189 2167 and bases[0] == 'not'
2190 2168 and bases[1][0] == 'func'
2191 2169 and getstring(bases[1][1], _('not a symbol')) == 'ancestors')
2192 2170
2193 2171 w = min(wa, wb)
2194 2172 if isonly(ta, tb):
2195 2173 return w, ('func', ('symbol', 'only'), ('list', ta[2], tb[1][2]))
2196 2174 if isonly(tb, ta):
2197 2175 return w, ('func', ('symbol', 'only'), ('list', tb[2], ta[1][2]))
2198 2176
2199 2177 if tb is not None and tb[0] == 'not':
2200 2178 return wa, ('difference', ta, tb[1])
2201 2179
2202 2180 if wa > wb:
2203 2181 return w, (op, tb, ta)
2204 2182 return w, (op, ta, tb)
2205 2183 elif op == 'or':
2206 2184 # fast path for machine-generated expression, that is likely to have
2207 2185 # lots of trivial revisions: 'a + b + c()' to '_list(a b) + c()'
2208 2186 ws, ts, ss = [], [], []
2209 2187 def flushss():
2210 2188 if not ss:
2211 2189 return
2212 2190 if len(ss) == 1:
2213 2191 w, t = ss[0]
2214 2192 else:
2215 2193 s = '\0'.join(t[1] for w, t in ss)
2216 2194 y = ('func', ('symbol', '_list'), ('string', s))
2217 2195 w, t = optimize(y, False)
2218 2196 ws.append(w)
2219 2197 ts.append(t)
2220 2198 del ss[:]
2221 2199 for y in x[1:]:
2222 2200 w, t = optimize(y, False)
2223 2201 if t is not None and (t[0] == 'string' or t[0] == 'symbol'):
2224 2202 ss.append((w, t))
2225 2203 continue
2226 2204 flushss()
2227 2205 ws.append(w)
2228 2206 ts.append(t)
2229 2207 flushss()
2230 2208 if len(ts) == 1:
2231 2209 return ws[0], ts[0] # 'or' operation is fully optimized out
2232 2210 # we can't reorder trees by weight because it would change the order.
2233 2211 # ("sort(a + b)" == "sort(b + a)", but "a + b" != "b + a")
2234 2212 # ts = tuple(t for w, t in sorted(zip(ws, ts), key=lambda wt: wt[0]))
2235 2213 return max(ws), (op,) + tuple(ts)
2236 2214 elif op == 'not':
2237 2215 # Optimize not public() to _notpublic() because we have a fast version
2238 2216 if x[1] == ('func', ('symbol', 'public'), None):
2239 2217 newsym = ('func', ('symbol', '_notpublic'), None)
2240 2218 o = optimize(newsym, not small)
2241 2219 return o[0], o[1]
2242 2220 else:
2243 2221 o = optimize(x[1], not small)
2244 2222 return o[0], (op, o[1])
2245 2223 elif op == 'parentpost':
2246 2224 o = optimize(x[1], small)
2247 2225 return o[0], (op, o[1])
2248 2226 elif op == 'group':
2249 2227 return optimize(x[1], small)
2250 2228 elif op in 'dagrange range parent ancestorspec':
2251 2229 if op == 'parent':
2252 2230 # x^:y means (x^) : y, not x ^ (:y)
2253 2231 post = ('parentpost', x[1])
2254 2232 if x[2][0] == 'dagrangepre':
2255 2233 return optimize(('dagrange', post, x[2][1]), small)
2256 2234 elif x[2][0] == 'rangepre':
2257 2235 return optimize(('range', post, x[2][1]), small)
2258 2236
2259 2237 wa, ta = optimize(x[1], small)
2260 2238 wb, tb = optimize(x[2], small)
2261 2239 return wa + wb, (op, ta, tb)
2262 2240 elif op == 'list':
2263 2241 ws, ts = zip(*(optimize(y, small) for y in x[1:]))
2264 2242 return sum(ws), (op,) + ts
2265 2243 elif op == 'func':
2266 2244 f = getstring(x[1], _("not a symbol"))
2267 2245 wa, ta = optimize(x[2], small)
2268 2246 if f in ("author branch closed date desc file grep keyword "
2269 2247 "outgoing user"):
2270 2248 w = 10 # slow
2271 2249 elif f in "modifies adds removes":
2272 2250 w = 30 # slower
2273 2251 elif f == "contains":
2274 2252 w = 100 # very slow
2275 2253 elif f == "ancestor":
2276 2254 w = 1 * smallbonus
2277 2255 elif f in "reverse limit first _intlist":
2278 2256 w = 0
2279 2257 elif f in "sort":
2280 2258 w = 10 # assume most sorts look at changelog
2281 2259 else:
2282 2260 w = 1
2283 2261 return w + wa, (op, x[1], ta)
2284 2262 return 1, x
2285 2263
2286 2264 _aliasarg = ('func', ('symbol', '_aliasarg'))
2287 2265 def _getaliasarg(tree):
2288 2266 """If tree matches ('func', ('symbol', '_aliasarg'), ('string', X))
2289 2267 return X, None otherwise.
2290 2268 """
2291 2269 if (len(tree) == 3 and tree[:2] == _aliasarg
2292 2270 and tree[2][0] == 'string'):
2293 2271 return tree[2][1]
2294 2272 return None
2295 2273
2296 2274 def _checkaliasarg(tree, known=None):
2297 2275 """Check tree contains no _aliasarg construct or only ones which
2298 2276 value is in known. Used to avoid alias placeholders injection.
2299 2277 """
2300 2278 if isinstance(tree, tuple):
2301 2279 arg = _getaliasarg(tree)
2302 2280 if arg is not None and (not known or arg not in known):
2303 2281 raise error.UnknownIdentifier('_aliasarg', [])
2304 2282 for t in tree:
2305 2283 _checkaliasarg(t, known)
2306 2284
2307 2285 # the set of valid characters for the initial letter of symbols in
2308 2286 # alias declarations and definitions
2309 2287 _aliassyminitletters = set(c for c in [chr(i) for i in xrange(256)]
2310 2288 if c.isalnum() or c in '._@$' or ord(c) > 127)
2311 2289
2312 2290 def _tokenizealias(program, lookup=None):
2313 2291 """Parse alias declaration/definition into a stream of tokens
2314 2292
2315 2293 This allows symbol names to use also ``$`` as an initial letter
2316 2294 (for backward compatibility), and callers of this function should
2317 2295 examine whether ``$`` is used also for unexpected symbols or not.
2318 2296 """
2319 2297 return tokenize(program, lookup=lookup,
2320 2298 syminitletters=_aliassyminitletters)
2321 2299
2322 2300 def _parsealiasdecl(decl):
2323 2301 """Parse alias declaration ``decl``
2324 2302
2325 2303 This returns ``(name, tree, args, errorstr)`` tuple:
2326 2304
2327 2305 - ``name``: of declared alias (may be ``decl`` itself at error)
2328 2306 - ``tree``: parse result (or ``None`` at error)
2329 2307 - ``args``: list of alias argument names (or None for symbol declaration)
2330 2308 - ``errorstr``: detail about detected error (or None)
2331 2309
2332 2310 >>> _parsealiasdecl('foo')
2333 2311 ('foo', ('symbol', 'foo'), None, None)
2334 2312 >>> _parsealiasdecl('$foo')
2335 2313 ('$foo', None, None, "'$' not for alias arguments")
2336 2314 >>> _parsealiasdecl('foo::bar')
2337 2315 ('foo::bar', None, None, 'invalid format')
2338 2316 >>> _parsealiasdecl('foo bar')
2339 2317 ('foo bar', None, None, 'at 4: invalid token')
2340 2318 >>> _parsealiasdecl('foo()')
2341 2319 ('foo', ('func', ('symbol', 'foo')), [], None)
2342 2320 >>> _parsealiasdecl('$foo()')
2343 2321 ('$foo()', None, None, "'$' not for alias arguments")
2344 2322 >>> _parsealiasdecl('foo($1, $2)')
2345 2323 ('foo', ('func', ('symbol', 'foo')), ['$1', '$2'], None)
2346 2324 >>> _parsealiasdecl('foo(bar_bar, baz.baz)')
2347 2325 ('foo', ('func', ('symbol', 'foo')), ['bar_bar', 'baz.baz'], None)
2348 2326 >>> _parsealiasdecl('foo($1, $2, nested($1, $2))')
2349 2327 ('foo($1, $2, nested($1, $2))', None, None, 'invalid argument list')
2350 2328 >>> _parsealiasdecl('foo(bar($1, $2))')
2351 2329 ('foo(bar($1, $2))', None, None, 'invalid argument list')
2352 2330 >>> _parsealiasdecl('foo("string")')
2353 2331 ('foo("string")', None, None, 'invalid argument list')
2354 2332 >>> _parsealiasdecl('foo($1, $2')
2355 2333 ('foo($1, $2', None, None, 'at 10: unexpected token: end')
2356 2334 >>> _parsealiasdecl('foo("string')
2357 2335 ('foo("string', None, None, 'at 5: unterminated string')
2358 2336 >>> _parsealiasdecl('foo($1, $2, $1)')
2359 2337 ('foo', None, None, 'argument names collide with each other')
2360 2338 """
2361 2339 p = parser.parser(elements)
2362 2340 try:
2363 2341 tree, pos = p.parse(_tokenizealias(decl))
2364 2342 if (pos != len(decl)):
2365 2343 raise error.ParseError(_('invalid token'), pos)
2366 2344 tree = parser.simplifyinfixops(tree, ('list',))
2367 2345
2368 2346 if isvalidsymbol(tree):
2369 2347 # "name = ...." style
2370 2348 name = getsymbol(tree)
2371 2349 if name.startswith('$'):
2372 2350 return (decl, None, None, _("'$' not for alias arguments"))
2373 2351 return (name, ('symbol', name), None, None)
2374 2352
2375 2353 if isvalidfunc(tree):
2376 2354 # "name(arg, ....) = ...." style
2377 2355 name = getfuncname(tree)
2378 2356 if name.startswith('$'):
2379 2357 return (decl, None, None, _("'$' not for alias arguments"))
2380 2358 args = []
2381 2359 for arg in getfuncargs(tree):
2382 2360 if not isvalidsymbol(arg):
2383 2361 return (decl, None, None, _("invalid argument list"))
2384 2362 args.append(getsymbol(arg))
2385 2363 if len(args) != len(set(args)):
2386 2364 return (name, None, None,
2387 2365 _("argument names collide with each other"))
2388 2366 return (name, ('func', ('symbol', name)), args, None)
2389 2367
2390 2368 return (decl, None, None, _("invalid format"))
2391 2369 except error.ParseError as inst:
2392 2370 return (decl, None, None, parseerrordetail(inst))
2393 2371
2394 2372 def _parsealiasdefn(defn, args):
2395 2373 """Parse alias definition ``defn``
2396 2374
2397 2375 This function also replaces alias argument references in the
2398 2376 specified definition by ``_aliasarg(ARGNAME)``.
2399 2377
2400 2378 ``args`` is a list of alias argument names, or None if the alias
2401 2379 is declared as a symbol.
2402 2380
2403 2381 This returns "tree" as parsing result.
2404 2382
2405 2383 >>> args = ['$1', '$2', 'foo']
2406 2384 >>> print prettyformat(_parsealiasdefn('$1 or foo', args))
2407 2385 (or
2408 2386 (func
2409 2387 ('symbol', '_aliasarg')
2410 2388 ('string', '$1'))
2411 2389 (func
2412 2390 ('symbol', '_aliasarg')
2413 2391 ('string', 'foo')))
2414 2392 >>> try:
2415 2393 ... _parsealiasdefn('$1 or $bar', args)
2416 2394 ... except error.ParseError, inst:
2417 2395 ... print parseerrordetail(inst)
2418 2396 at 6: '$' not for alias arguments
2419 2397 >>> args = ['$1', '$10', 'foo']
2420 2398 >>> print prettyformat(_parsealiasdefn('$10 or foobar', args))
2421 2399 (or
2422 2400 (func
2423 2401 ('symbol', '_aliasarg')
2424 2402 ('string', '$10'))
2425 2403 ('symbol', 'foobar'))
2426 2404 >>> print prettyformat(_parsealiasdefn('"$1" or "foo"', args))
2427 2405 (or
2428 2406 ('string', '$1')
2429 2407 ('string', 'foo'))
2430 2408 """
2431 2409 def tokenizedefn(program, lookup=None):
2432 2410 if args:
2433 2411 argset = set(args)
2434 2412 else:
2435 2413 argset = set()
2436 2414
2437 2415 for t, value, pos in _tokenizealias(program, lookup=lookup):
2438 2416 if t == 'symbol':
2439 2417 if value in argset:
2440 2418 # emulate tokenization of "_aliasarg('ARGNAME')":
2441 2419 # "_aliasarg()" is an unknown symbol only used separate
2442 2420 # alias argument placeholders from regular strings.
2443 2421 yield ('symbol', '_aliasarg', pos)
2444 2422 yield ('(', None, pos)
2445 2423 yield ('string', value, pos)
2446 2424 yield (')', None, pos)
2447 2425 continue
2448 2426 elif value.startswith('$'):
2449 2427 raise error.ParseError(_("'$' not for alias arguments"),
2450 2428 pos)
2451 2429 yield (t, value, pos)
2452 2430
2453 2431 p = parser.parser(elements)
2454 2432 tree, pos = p.parse(tokenizedefn(defn))
2455 2433 if pos != len(defn):
2456 2434 raise error.ParseError(_('invalid token'), pos)
2457 2435 return parser.simplifyinfixops(tree, ('list', 'or'))
2458 2436
2459 2437 class revsetalias(object):
2460 2438 # whether own `error` information is already shown or not.
2461 2439 # this avoids showing same warning multiple times at each `findaliases`.
2462 2440 warned = False
2463 2441
2464 2442 def __init__(self, name, value):
2465 2443 '''Aliases like:
2466 2444
2467 2445 h = heads(default)
2468 2446 b($1) = ancestors($1) - ancestors(default)
2469 2447 '''
2470 2448 self.name, self.tree, self.args, self.error = _parsealiasdecl(name)
2471 2449 if self.error:
2472 2450 self.error = _('failed to parse the declaration of revset alias'
2473 2451 ' "%s": %s') % (self.name, self.error)
2474 2452 return
2475 2453
2476 2454 try:
2477 2455 self.replacement = _parsealiasdefn(value, self.args)
2478 2456 # Check for placeholder injection
2479 2457 _checkaliasarg(self.replacement, self.args)
2480 2458 except error.ParseError as inst:
2481 2459 self.error = _('failed to parse the definition of revset alias'
2482 2460 ' "%s": %s') % (self.name, parseerrordetail(inst))
2483 2461
2484 2462 def _getalias(aliases, tree):
2485 2463 """If tree looks like an unexpanded alias, return it. Return None
2486 2464 otherwise.
2487 2465 """
2488 2466 if isinstance(tree, tuple) and tree:
2489 2467 if tree[0] == 'symbol' and len(tree) == 2:
2490 2468 name = tree[1]
2491 2469 alias = aliases.get(name)
2492 2470 if alias and alias.args is None and alias.tree == tree:
2493 2471 return alias
2494 2472 if tree[0] == 'func' and len(tree) > 1:
2495 2473 if tree[1][0] == 'symbol' and len(tree[1]) == 2:
2496 2474 name = tree[1][1]
2497 2475 alias = aliases.get(name)
2498 2476 if alias and alias.args is not None and alias.tree == tree[:2]:
2499 2477 return alias
2500 2478 return None
2501 2479
2502 2480 def _expandargs(tree, args):
2503 2481 """Replace _aliasarg instances with the substitution value of the
2504 2482 same name in args, recursively.
2505 2483 """
2506 2484 if not tree or not isinstance(tree, tuple):
2507 2485 return tree
2508 2486 arg = _getaliasarg(tree)
2509 2487 if arg is not None:
2510 2488 return args[arg]
2511 2489 return tuple(_expandargs(t, args) for t in tree)
2512 2490
2513 2491 def _expandaliases(aliases, tree, expanding, cache):
2514 2492 """Expand aliases in tree, recursively.
2515 2493
2516 2494 'aliases' is a dictionary mapping user defined aliases to
2517 2495 revsetalias objects.
2518 2496 """
2519 2497 if not isinstance(tree, tuple):
2520 2498 # Do not expand raw strings
2521 2499 return tree
2522 2500 alias = _getalias(aliases, tree)
2523 2501 if alias is not None:
2524 2502 if alias.error:
2525 2503 raise error.Abort(alias.error)
2526 2504 if alias in expanding:
2527 2505 raise error.ParseError(_('infinite expansion of revset alias "%s" '
2528 2506 'detected') % alias.name)
2529 2507 expanding.append(alias)
2530 2508 if alias.name not in cache:
2531 2509 cache[alias.name] = _expandaliases(aliases, alias.replacement,
2532 2510 expanding, cache)
2533 2511 result = cache[alias.name]
2534 2512 expanding.pop()
2535 2513 if alias.args is not None:
2536 2514 l = getlist(tree[2])
2537 2515 if len(l) != len(alias.args):
2538 2516 raise error.ParseError(
2539 2517 _('invalid number of arguments: %d') % len(l))
2540 2518 l = [_expandaliases(aliases, a, [], cache) for a in l]
2541 2519 result = _expandargs(result, dict(zip(alias.args, l)))
2542 2520 else:
2543 2521 result = tuple(_expandaliases(aliases, t, expanding, cache)
2544 2522 for t in tree)
2545 2523 return result
2546 2524
2547 2525 def findaliases(ui, tree, showwarning=None):
2548 2526 _checkaliasarg(tree)
2549 2527 aliases = {}
2550 2528 for k, v in ui.configitems('revsetalias'):
2551 2529 alias = revsetalias(k, v)
2552 2530 aliases[alias.name] = alias
2553 2531 tree = _expandaliases(aliases, tree, [], {})
2554 2532 if showwarning:
2555 2533 # warn about problematic (but not referred) aliases
2556 2534 for name, alias in sorted(aliases.iteritems()):
2557 2535 if alias.error and not alias.warned:
2558 2536 showwarning(_('warning: %s\n') % (alias.error))
2559 2537 alias.warned = True
2560 2538 return tree
2561 2539
2562 2540 def foldconcat(tree):
2563 2541 """Fold elements to be concatenated by `##`
2564 2542 """
2565 2543 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2566 2544 return tree
2567 2545 if tree[0] == '_concat':
2568 2546 pending = [tree]
2569 2547 l = []
2570 2548 while pending:
2571 2549 e = pending.pop()
2572 2550 if e[0] == '_concat':
2573 2551 pending.extend(reversed(e[1:]))
2574 2552 elif e[0] in ('string', 'symbol'):
2575 2553 l.append(e[1])
2576 2554 else:
2577 2555 msg = _("\"##\" can't concatenate \"%s\" element") % (e[0])
2578 2556 raise error.ParseError(msg)
2579 2557 return ('string', ''.join(l))
2580 2558 else:
2581 2559 return tuple(foldconcat(t) for t in tree)
2582 2560
2583 2561 def parse(spec, lookup=None):
2584 2562 p = parser.parser(elements)
2585 2563 tree, pos = p.parse(tokenize(spec, lookup=lookup))
2586 2564 if pos != len(spec):
2587 2565 raise error.ParseError(_("invalid token"), pos)
2588 2566 return parser.simplifyinfixops(tree, ('list', 'or'))
2589 2567
2590 2568 def posttreebuilthook(tree, repo):
2591 2569 # hook for extensions to execute code on the optimized tree
2592 2570 pass
2593 2571
2594 2572 def match(ui, spec, repo=None):
2595 2573 if not spec:
2596 2574 raise error.ParseError(_("empty query"))
2597 2575 lookup = None
2598 2576 if repo:
2599 2577 lookup = repo.__contains__
2600 2578 tree = parse(spec, lookup)
2601 2579 return _makematcher(ui, tree, repo)
2602 2580
2603 2581 def matchany(ui, specs, repo=None):
2604 2582 """Create a matcher that will include any revisions matching one of the
2605 2583 given specs"""
2606 2584 if not specs:
2607 2585 def mfunc(repo, subset=None):
2608 2586 return baseset()
2609 2587 return mfunc
2610 2588 if not all(specs):
2611 2589 raise error.ParseError(_("empty query"))
2612 2590 lookup = None
2613 2591 if repo:
2614 2592 lookup = repo.__contains__
2615 2593 if len(specs) == 1:
2616 2594 tree = parse(specs[0], lookup)
2617 2595 else:
2618 2596 tree = ('or',) + tuple(parse(s, lookup) for s in specs)
2619 2597 return _makematcher(ui, tree, repo)
2620 2598
2621 2599 def _makematcher(ui, tree, repo):
2622 2600 if ui:
2623 2601 tree = findaliases(ui, tree, showwarning=ui.warn)
2624 2602 tree = foldconcat(tree)
2625 2603 weight, tree = optimize(tree, True)
2626 2604 posttreebuilthook(tree, repo)
2627 2605 def mfunc(repo, subset=None):
2628 2606 if subset is None:
2629 2607 subset = fullreposet(repo)
2630 2608 if util.safehasattr(subset, 'isascending'):
2631 2609 result = getset(repo, subset, tree)
2632 2610 else:
2633 2611 result = getset(repo, baseset(subset), tree)
2634 2612 return result
2635 2613 return mfunc
2636 2614
2637 2615 def formatspec(expr, *args):
2638 2616 '''
2639 2617 This is a convenience function for using revsets internally, and
2640 2618 escapes arguments appropriately. Aliases are intentionally ignored
2641 2619 so that intended expression behavior isn't accidentally subverted.
2642 2620
2643 2621 Supported arguments:
2644 2622
2645 2623 %r = revset expression, parenthesized
2646 2624 %d = int(arg), no quoting
2647 2625 %s = string(arg), escaped and single-quoted
2648 2626 %b = arg.branch(), escaped and single-quoted
2649 2627 %n = hex(arg), single-quoted
2650 2628 %% = a literal '%'
2651 2629
2652 2630 Prefixing the type with 'l' specifies a parenthesized list of that type.
2653 2631
2654 2632 >>> formatspec('%r:: and %lr', '10 or 11', ("this()", "that()"))
2655 2633 '(10 or 11):: and ((this()) or (that()))'
2656 2634 >>> formatspec('%d:: and not %d::', 10, 20)
2657 2635 '10:: and not 20::'
2658 2636 >>> formatspec('%ld or %ld', [], [1])
2659 2637 "_list('') or 1"
2660 2638 >>> formatspec('keyword(%s)', 'foo\\xe9')
2661 2639 "keyword('foo\\\\xe9')"
2662 2640 >>> b = lambda: 'default'
2663 2641 >>> b.branch = b
2664 2642 >>> formatspec('branch(%b)', b)
2665 2643 "branch('default')"
2666 2644 >>> formatspec('root(%ls)', ['a', 'b', 'c', 'd'])
2667 2645 "root(_list('a\\x00b\\x00c\\x00d'))"
2668 2646 '''
2669 2647
2670 2648 def quote(s):
2671 2649 return repr(str(s))
2672 2650
2673 2651 def argtype(c, arg):
2674 2652 if c == 'd':
2675 2653 return str(int(arg))
2676 2654 elif c == 's':
2677 2655 return quote(arg)
2678 2656 elif c == 'r':
2679 2657 parse(arg) # make sure syntax errors are confined
2680 2658 return '(%s)' % arg
2681 2659 elif c == 'n':
2682 2660 return quote(node.hex(arg))
2683 2661 elif c == 'b':
2684 2662 return quote(arg.branch())
2685 2663
2686 2664 def listexp(s, t):
2687 2665 l = len(s)
2688 2666 if l == 0:
2689 2667 return "_list('')"
2690 2668 elif l == 1:
2691 2669 return argtype(t, s[0])
2692 2670 elif t == 'd':
2693 2671 return "_intlist('%s')" % "\0".join(str(int(a)) for a in s)
2694 2672 elif t == 's':
2695 2673 return "_list('%s')" % "\0".join(s)
2696 2674 elif t == 'n':
2697 2675 return "_hexlist('%s')" % "\0".join(node.hex(a) for a in s)
2698 2676 elif t == 'b':
2699 2677 return "_list('%s')" % "\0".join(a.branch() for a in s)
2700 2678
2701 2679 m = l // 2
2702 2680 return '(%s or %s)' % (listexp(s[:m], t), listexp(s[m:], t))
2703 2681
2704 2682 ret = ''
2705 2683 pos = 0
2706 2684 arg = 0
2707 2685 while pos < len(expr):
2708 2686 c = expr[pos]
2709 2687 if c == '%':
2710 2688 pos += 1
2711 2689 d = expr[pos]
2712 2690 if d == '%':
2713 2691 ret += d
2714 2692 elif d in 'dsnbr':
2715 2693 ret += argtype(d, args[arg])
2716 2694 arg += 1
2717 2695 elif d == 'l':
2718 2696 # a list of some type
2719 2697 pos += 1
2720 2698 d = expr[pos]
2721 2699 ret += listexp(list(args[arg]), d)
2722 2700 arg += 1
2723 2701 else:
2724 2702 raise error.Abort('unexpected revspec format character %s' % d)
2725 2703 else:
2726 2704 ret += c
2727 2705 pos += 1
2728 2706
2729 2707 return ret
2730 2708
2731 2709 def prettyformat(tree):
2732 2710 return parser.prettyformat(tree, ('string', 'symbol'))
2733 2711
2734 2712 def depth(tree):
2735 2713 if isinstance(tree, tuple):
2736 2714 return max(map(depth, tree)) + 1
2737 2715 else:
2738 2716 return 0
2739 2717
2740 2718 def funcsused(tree):
2741 2719 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2742 2720 return set()
2743 2721 else:
2744 2722 funcs = set()
2745 2723 for s in tree[1:]:
2746 2724 funcs |= funcsused(s)
2747 2725 if tree[0] == 'func':
2748 2726 funcs.add(tree[1][1])
2749 2727 return funcs
2750 2728
2751 2729 def _formatsetrepr(r):
2752 2730 """Format an optional printable representation of a set
2753 2731
2754 2732 ======== =================================
2755 2733 type(r) example
2756 2734 ======== =================================
2757 2735 tuple ('<not %r>', other)
2758 2736 str '<branch closed>'
2759 2737 callable lambda: '<branch %r>' % sorted(b)
2760 2738 object other
2761 2739 ======== =================================
2762 2740 """
2763 2741 if r is None:
2764 2742 return ''
2765 2743 elif isinstance(r, tuple):
2766 2744 return r[0] % r[1:]
2767 2745 elif isinstance(r, str):
2768 2746 return r
2769 2747 elif callable(r):
2770 2748 return r()
2771 2749 else:
2772 2750 return repr(r)
2773 2751
2774 2752 class abstractsmartset(object):
2775 2753
2776 2754 def __nonzero__(self):
2777 2755 """True if the smartset is not empty"""
2778 2756 raise NotImplementedError()
2779 2757
2780 2758 def __contains__(self, rev):
2781 2759 """provide fast membership testing"""
2782 2760 raise NotImplementedError()
2783 2761
2784 2762 def __iter__(self):
2785 2763 """iterate the set in the order it is supposed to be iterated"""
2786 2764 raise NotImplementedError()
2787 2765
2788 2766 # Attributes containing a function to perform a fast iteration in a given
2789 2767 # direction. A smartset can have none, one, or both defined.
2790 2768 #
2791 2769 # Default value is None instead of a function returning None to avoid
2792 2770 # initializing an iterator just for testing if a fast method exists.
2793 2771 fastasc = None
2794 2772 fastdesc = None
2795 2773
2796 2774 def isascending(self):
2797 2775 """True if the set will iterate in ascending order"""
2798 2776 raise NotImplementedError()
2799 2777
2800 2778 def isdescending(self):
2801 2779 """True if the set will iterate in descending order"""
2802 2780 raise NotImplementedError()
2803 2781
2804 2782 @util.cachefunc
2805 2783 def min(self):
2806 2784 """return the minimum element in the set"""
2807 2785 if self.fastasc is not None:
2808 2786 for r in self.fastasc():
2809 2787 return r
2810 2788 raise ValueError('arg is an empty sequence')
2811 2789 return min(self)
2812 2790
2813 2791 @util.cachefunc
2814 2792 def max(self):
2815 2793 """return the maximum element in the set"""
2816 2794 if self.fastdesc is not None:
2817 2795 for r in self.fastdesc():
2818 2796 return r
2819 2797 raise ValueError('arg is an empty sequence')
2820 2798 return max(self)
2821 2799
2822 2800 def first(self):
2823 2801 """return the first element in the set (user iteration perspective)
2824 2802
2825 2803 Return None if the set is empty"""
2826 2804 raise NotImplementedError()
2827 2805
2828 2806 def last(self):
2829 2807 """return the last element in the set (user iteration perspective)
2830 2808
2831 2809 Return None if the set is empty"""
2832 2810 raise NotImplementedError()
2833 2811
2834 2812 def __len__(self):
2835 2813 """return the length of the smartsets
2836 2814
2837 2815 This can be expensive on smartset that could be lazy otherwise."""
2838 2816 raise NotImplementedError()
2839 2817
2840 2818 def reverse(self):
2841 2819 """reverse the expected iteration order"""
2842 2820 raise NotImplementedError()
2843 2821
2844 2822 def sort(self, reverse=True):
2845 2823 """get the set to iterate in an ascending or descending order"""
2846 2824 raise NotImplementedError()
2847 2825
2848 2826 def __and__(self, other):
2849 2827 """Returns a new object with the intersection of the two collections.
2850 2828
2851 2829 This is part of the mandatory API for smartset."""
2852 2830 if isinstance(other, fullreposet):
2853 2831 return self
2854 2832 return self.filter(other.__contains__, condrepr=other, cache=False)
2855 2833
2856 2834 def __add__(self, other):
2857 2835 """Returns a new object with the union of the two collections.
2858 2836
2859 2837 This is part of the mandatory API for smartset."""
2860 2838 return addset(self, other)
2861 2839
2862 2840 def __sub__(self, other):
2863 2841 """Returns a new object with the substraction of the two collections.
2864 2842
2865 2843 This is part of the mandatory API for smartset."""
2866 2844 c = other.__contains__
2867 2845 return self.filter(lambda r: not c(r), condrepr=('<not %r>', other),
2868 2846 cache=False)
2869 2847
2870 2848 def filter(self, condition, condrepr=None, cache=True):
2871 2849 """Returns this smartset filtered by condition as a new smartset.
2872 2850
2873 2851 `condition` is a callable which takes a revision number and returns a
2874 2852 boolean. Optional `condrepr` provides a printable representation of
2875 2853 the given `condition`.
2876 2854
2877 2855 This is part of the mandatory API for smartset."""
2878 2856 # builtin cannot be cached. but do not needs to
2879 2857 if cache and util.safehasattr(condition, 'func_code'):
2880 2858 condition = util.cachefunc(condition)
2881 2859 return filteredset(self, condition, condrepr)
2882 2860
2883 2861 class baseset(abstractsmartset):
2884 2862 """Basic data structure that represents a revset and contains the basic
2885 2863 operation that it should be able to perform.
2886 2864
2887 2865 Every method in this class should be implemented by any smartset class.
2888 2866 """
2889 2867 def __init__(self, data=(), datarepr=None):
2890 2868 """
2891 2869 datarepr: a tuple of (format, obj, ...), a function or an object that
2892 2870 provides a printable representation of the given data.
2893 2871 """
2894 2872 if not isinstance(data, list):
2895 2873 if isinstance(data, set):
2896 2874 self._set = data
2897 2875 data = list(data)
2898 2876 self._list = data
2899 2877 self._datarepr = datarepr
2900 2878 self._ascending = None
2901 2879
2902 2880 @util.propertycache
2903 2881 def _set(self):
2904 2882 return set(self._list)
2905 2883
2906 2884 @util.propertycache
2907 2885 def _asclist(self):
2908 2886 asclist = self._list[:]
2909 2887 asclist.sort()
2910 2888 return asclist
2911 2889
2912 2890 def __iter__(self):
2913 2891 if self._ascending is None:
2914 2892 return iter(self._list)
2915 2893 elif self._ascending:
2916 2894 return iter(self._asclist)
2917 2895 else:
2918 2896 return reversed(self._asclist)
2919 2897
2920 2898 def fastasc(self):
2921 2899 return iter(self._asclist)
2922 2900
2923 2901 def fastdesc(self):
2924 2902 return reversed(self._asclist)
2925 2903
2926 2904 @util.propertycache
2927 2905 def __contains__(self):
2928 2906 return self._set.__contains__
2929 2907
2930 2908 def __nonzero__(self):
2931 2909 return bool(self._list)
2932 2910
2933 2911 def sort(self, reverse=False):
2934 2912 self._ascending = not bool(reverse)
2935 2913
2936 2914 def reverse(self):
2937 2915 if self._ascending is None:
2938 2916 self._list.reverse()
2939 2917 else:
2940 2918 self._ascending = not self._ascending
2941 2919
2942 2920 def __len__(self):
2943 2921 return len(self._list)
2944 2922
2945 2923 def isascending(self):
2946 2924 """Returns True if the collection is ascending order, False if not.
2947 2925
2948 2926 This is part of the mandatory API for smartset."""
2949 2927 if len(self) <= 1:
2950 2928 return True
2951 2929 return self._ascending is not None and self._ascending
2952 2930
2953 2931 def isdescending(self):
2954 2932 """Returns True if the collection is descending order, False if not.
2955 2933
2956 2934 This is part of the mandatory API for smartset."""
2957 2935 if len(self) <= 1:
2958 2936 return True
2959 2937 return self._ascending is not None and not self._ascending
2960 2938
2961 2939 def first(self):
2962 2940 if self:
2963 2941 if self._ascending is None:
2964 2942 return self._list[0]
2965 2943 elif self._ascending:
2966 2944 return self._asclist[0]
2967 2945 else:
2968 2946 return self._asclist[-1]
2969 2947 return None
2970 2948
2971 2949 def last(self):
2972 2950 if self:
2973 2951 if self._ascending is None:
2974 2952 return self._list[-1]
2975 2953 elif self._ascending:
2976 2954 return self._asclist[-1]
2977 2955 else:
2978 2956 return self._asclist[0]
2979 2957 return None
2980 2958
2981 2959 def __repr__(self):
2982 2960 d = {None: '', False: '-', True: '+'}[self._ascending]
2983 2961 s = _formatsetrepr(self._datarepr)
2984 2962 if not s:
2985 2963 s = repr(self._list)
2986 2964 return '<%s%s %s>' % (type(self).__name__, d, s)
2987 2965
2988 2966 class filteredset(abstractsmartset):
2989 2967 """Duck type for baseset class which iterates lazily over the revisions in
2990 2968 the subset and contains a function which tests for membership in the
2991 2969 revset
2992 2970 """
2993 2971 def __init__(self, subset, condition=lambda x: True, condrepr=None):
2994 2972 """
2995 2973 condition: a function that decide whether a revision in the subset
2996 2974 belongs to the revset or not.
2997 2975 condrepr: a tuple of (format, obj, ...), a function or an object that
2998 2976 provides a printable representation of the given condition.
2999 2977 """
3000 2978 self._subset = subset
3001 2979 self._condition = condition
3002 2980 self._condrepr = condrepr
3003 2981
3004 2982 def __contains__(self, x):
3005 2983 return x in self._subset and self._condition(x)
3006 2984
3007 2985 def __iter__(self):
3008 2986 return self._iterfilter(self._subset)
3009 2987
3010 2988 def _iterfilter(self, it):
3011 2989 cond = self._condition
3012 2990 for x in it:
3013 2991 if cond(x):
3014 2992 yield x
3015 2993
3016 2994 @property
3017 2995 def fastasc(self):
3018 2996 it = self._subset.fastasc
3019 2997 if it is None:
3020 2998 return None
3021 2999 return lambda: self._iterfilter(it())
3022 3000
3023 3001 @property
3024 3002 def fastdesc(self):
3025 3003 it = self._subset.fastdesc
3026 3004 if it is None:
3027 3005 return None
3028 3006 return lambda: self._iterfilter(it())
3029 3007
3030 3008 def __nonzero__(self):
3031 3009 fast = self.fastasc
3032 3010 if fast is None:
3033 3011 fast = self.fastdesc
3034 3012 if fast is not None:
3035 3013 it = fast()
3036 3014 else:
3037 3015 it = self
3038 3016
3039 3017 for r in it:
3040 3018 return True
3041 3019 return False
3042 3020
3043 3021 def __len__(self):
3044 3022 # Basic implementation to be changed in future patches.
3045 3023 l = baseset([r for r in self])
3046 3024 return len(l)
3047 3025
3048 3026 def sort(self, reverse=False):
3049 3027 self._subset.sort(reverse=reverse)
3050 3028
3051 3029 def reverse(self):
3052 3030 self._subset.reverse()
3053 3031
3054 3032 def isascending(self):
3055 3033 return self._subset.isascending()
3056 3034
3057 3035 def isdescending(self):
3058 3036 return self._subset.isdescending()
3059 3037
3060 3038 def first(self):
3061 3039 for x in self:
3062 3040 return x
3063 3041 return None
3064 3042
3065 3043 def last(self):
3066 3044 it = None
3067 3045 if self.isascending():
3068 3046 it = self.fastdesc
3069 3047 elif self.isdescending():
3070 3048 it = self.fastasc
3071 3049 if it is not None:
3072 3050 for x in it():
3073 3051 return x
3074 3052 return None #empty case
3075 3053 else:
3076 3054 x = None
3077 3055 for x in self:
3078 3056 pass
3079 3057 return x
3080 3058
3081 3059 def __repr__(self):
3082 3060 xs = [repr(self._subset)]
3083 3061 s = _formatsetrepr(self._condrepr)
3084 3062 if s:
3085 3063 xs.append(s)
3086 3064 return '<%s %s>' % (type(self).__name__, ', '.join(xs))
3087 3065
3088 3066 def _iterordered(ascending, iter1, iter2):
3089 3067 """produce an ordered iteration from two iterators with the same order
3090 3068
3091 3069 The ascending is used to indicated the iteration direction.
3092 3070 """
3093 3071 choice = max
3094 3072 if ascending:
3095 3073 choice = min
3096 3074
3097 3075 val1 = None
3098 3076 val2 = None
3099 3077 try:
3100 3078 # Consume both iterators in an ordered way until one is empty
3101 3079 while True:
3102 3080 if val1 is None:
3103 3081 val1 = iter1.next()
3104 3082 if val2 is None:
3105 3083 val2 = iter2.next()
3106 3084 next = choice(val1, val2)
3107 3085 yield next
3108 3086 if val1 == next:
3109 3087 val1 = None
3110 3088 if val2 == next:
3111 3089 val2 = None
3112 3090 except StopIteration:
3113 3091 # Flush any remaining values and consume the other one
3114 3092 it = iter2
3115 3093 if val1 is not None:
3116 3094 yield val1
3117 3095 it = iter1
3118 3096 elif val2 is not None:
3119 3097 # might have been equality and both are empty
3120 3098 yield val2
3121 3099 for val in it:
3122 3100 yield val
3123 3101
3124 3102 class addset(abstractsmartset):
3125 3103 """Represent the addition of two sets
3126 3104
3127 3105 Wrapper structure for lazily adding two structures without losing much
3128 3106 performance on the __contains__ method
3129 3107
3130 3108 If the ascending attribute is set, that means the two structures are
3131 3109 ordered in either an ascending or descending way. Therefore, we can add
3132 3110 them maintaining the order by iterating over both at the same time
3133 3111
3134 3112 >>> xs = baseset([0, 3, 2])
3135 3113 >>> ys = baseset([5, 2, 4])
3136 3114
3137 3115 >>> rs = addset(xs, ys)
3138 3116 >>> bool(rs), 0 in rs, 1 in rs, 5 in rs, rs.first(), rs.last()
3139 3117 (True, True, False, True, 0, 4)
3140 3118 >>> rs = addset(xs, baseset([]))
3141 3119 >>> bool(rs), 0 in rs, 1 in rs, rs.first(), rs.last()
3142 3120 (True, True, False, 0, 2)
3143 3121 >>> rs = addset(baseset([]), baseset([]))
3144 3122 >>> bool(rs), 0 in rs, rs.first(), rs.last()
3145 3123 (False, False, None, None)
3146 3124
3147 3125 iterate unsorted:
3148 3126 >>> rs = addset(xs, ys)
3149 3127 >>> [x for x in rs] # without _genlist
3150 3128 [0, 3, 2, 5, 4]
3151 3129 >>> assert not rs._genlist
3152 3130 >>> len(rs)
3153 3131 5
3154 3132 >>> [x for x in rs] # with _genlist
3155 3133 [0, 3, 2, 5, 4]
3156 3134 >>> assert rs._genlist
3157 3135
3158 3136 iterate ascending:
3159 3137 >>> rs = addset(xs, ys, ascending=True)
3160 3138 >>> [x for x in rs], [x for x in rs.fastasc()] # without _asclist
3161 3139 ([0, 2, 3, 4, 5], [0, 2, 3, 4, 5])
3162 3140 >>> assert not rs._asclist
3163 3141 >>> len(rs)
3164 3142 5
3165 3143 >>> [x for x in rs], [x for x in rs.fastasc()]
3166 3144 ([0, 2, 3, 4, 5], [0, 2, 3, 4, 5])
3167 3145 >>> assert rs._asclist
3168 3146
3169 3147 iterate descending:
3170 3148 >>> rs = addset(xs, ys, ascending=False)
3171 3149 >>> [x for x in rs], [x for x in rs.fastdesc()] # without _asclist
3172 3150 ([5, 4, 3, 2, 0], [5, 4, 3, 2, 0])
3173 3151 >>> assert not rs._asclist
3174 3152 >>> len(rs)
3175 3153 5
3176 3154 >>> [x for x in rs], [x for x in rs.fastdesc()]
3177 3155 ([5, 4, 3, 2, 0], [5, 4, 3, 2, 0])
3178 3156 >>> assert rs._asclist
3179 3157
3180 3158 iterate ascending without fastasc:
3181 3159 >>> rs = addset(xs, generatorset(ys), ascending=True)
3182 3160 >>> assert rs.fastasc is None
3183 3161 >>> [x for x in rs]
3184 3162 [0, 2, 3, 4, 5]
3185 3163
3186 3164 iterate descending without fastdesc:
3187 3165 >>> rs = addset(generatorset(xs), ys, ascending=False)
3188 3166 >>> assert rs.fastdesc is None
3189 3167 >>> [x for x in rs]
3190 3168 [5, 4, 3, 2, 0]
3191 3169 """
3192 3170 def __init__(self, revs1, revs2, ascending=None):
3193 3171 self._r1 = revs1
3194 3172 self._r2 = revs2
3195 3173 self._iter = None
3196 3174 self._ascending = ascending
3197 3175 self._genlist = None
3198 3176 self._asclist = None
3199 3177
3200 3178 def __len__(self):
3201 3179 return len(self._list)
3202 3180
3203 3181 def __nonzero__(self):
3204 3182 return bool(self._r1) or bool(self._r2)
3205 3183
3206 3184 @util.propertycache
3207 3185 def _list(self):
3208 3186 if not self._genlist:
3209 3187 self._genlist = baseset(iter(self))
3210 3188 return self._genlist
3211 3189
3212 3190 def __iter__(self):
3213 3191 """Iterate over both collections without repeating elements
3214 3192
3215 3193 If the ascending attribute is not set, iterate over the first one and
3216 3194 then over the second one checking for membership on the first one so we
3217 3195 dont yield any duplicates.
3218 3196
3219 3197 If the ascending attribute is set, iterate over both collections at the
3220 3198 same time, yielding only one value at a time in the given order.
3221 3199 """
3222 3200 if self._ascending is None:
3223 3201 if self._genlist:
3224 3202 return iter(self._genlist)
3225 3203 def arbitraryordergen():
3226 3204 for r in self._r1:
3227 3205 yield r
3228 3206 inr1 = self._r1.__contains__
3229 3207 for r in self._r2:
3230 3208 if not inr1(r):
3231 3209 yield r
3232 3210 return arbitraryordergen()
3233 3211 # try to use our own fast iterator if it exists
3234 3212 self._trysetasclist()
3235 3213 if self._ascending:
3236 3214 attr = 'fastasc'
3237 3215 else:
3238 3216 attr = 'fastdesc'
3239 3217 it = getattr(self, attr)
3240 3218 if it is not None:
3241 3219 return it()
3242 3220 # maybe half of the component supports fast
3243 3221 # get iterator for _r1
3244 3222 iter1 = getattr(self._r1, attr)
3245 3223 if iter1 is None:
3246 3224 # let's avoid side effect (not sure it matters)
3247 3225 iter1 = iter(sorted(self._r1, reverse=not self._ascending))
3248 3226 else:
3249 3227 iter1 = iter1()
3250 3228 # get iterator for _r2
3251 3229 iter2 = getattr(self._r2, attr)
3252 3230 if iter2 is None:
3253 3231 # let's avoid side effect (not sure it matters)
3254 3232 iter2 = iter(sorted(self._r2, reverse=not self._ascending))
3255 3233 else:
3256 3234 iter2 = iter2()
3257 3235 return _iterordered(self._ascending, iter1, iter2)
3258 3236
3259 3237 def _trysetasclist(self):
3260 3238 """populate the _asclist attribute if possible and necessary"""
3261 3239 if self._genlist is not None and self._asclist is None:
3262 3240 self._asclist = sorted(self._genlist)
3263 3241
3264 3242 @property
3265 3243 def fastasc(self):
3266 3244 self._trysetasclist()
3267 3245 if self._asclist is not None:
3268 3246 return self._asclist.__iter__
3269 3247 iter1 = self._r1.fastasc
3270 3248 iter2 = self._r2.fastasc
3271 3249 if None in (iter1, iter2):
3272 3250 return None
3273 3251 return lambda: _iterordered(True, iter1(), iter2())
3274 3252
3275 3253 @property
3276 3254 def fastdesc(self):
3277 3255 self._trysetasclist()
3278 3256 if self._asclist is not None:
3279 3257 return self._asclist.__reversed__
3280 3258 iter1 = self._r1.fastdesc
3281 3259 iter2 = self._r2.fastdesc
3282 3260 if None in (iter1, iter2):
3283 3261 return None
3284 3262 return lambda: _iterordered(False, iter1(), iter2())
3285 3263
3286 3264 def __contains__(self, x):
3287 3265 return x in self._r1 or x in self._r2
3288 3266
3289 3267 def sort(self, reverse=False):
3290 3268 """Sort the added set
3291 3269
3292 3270 For this we use the cached list with all the generated values and if we
3293 3271 know they are ascending or descending we can sort them in a smart way.
3294 3272 """
3295 3273 self._ascending = not reverse
3296 3274
3297 3275 def isascending(self):
3298 3276 return self._ascending is not None and self._ascending
3299 3277
3300 3278 def isdescending(self):
3301 3279 return self._ascending is not None and not self._ascending
3302 3280
3303 3281 def reverse(self):
3304 3282 if self._ascending is None:
3305 3283 self._list.reverse()
3306 3284 else:
3307 3285 self._ascending = not self._ascending
3308 3286
3309 3287 def first(self):
3310 3288 for x in self:
3311 3289 return x
3312 3290 return None
3313 3291
3314 3292 def last(self):
3315 3293 self.reverse()
3316 3294 val = self.first()
3317 3295 self.reverse()
3318 3296 return val
3319 3297
3320 3298 def __repr__(self):
3321 3299 d = {None: '', False: '-', True: '+'}[self._ascending]
3322 3300 return '<%s%s %r, %r>' % (type(self).__name__, d, self._r1, self._r2)
3323 3301
3324 3302 class generatorset(abstractsmartset):
3325 3303 """Wrap a generator for lazy iteration
3326 3304
3327 3305 Wrapper structure for generators that provides lazy membership and can
3328 3306 be iterated more than once.
3329 3307 When asked for membership it generates values until either it finds the
3330 3308 requested one or has gone through all the elements in the generator
3331 3309 """
3332 3310 def __init__(self, gen, iterasc=None):
3333 3311 """
3334 3312 gen: a generator producing the values for the generatorset.
3335 3313 """
3336 3314 self._gen = gen
3337 3315 self._asclist = None
3338 3316 self._cache = {}
3339 3317 self._genlist = []
3340 3318 self._finished = False
3341 3319 self._ascending = True
3342 3320 if iterasc is not None:
3343 3321 if iterasc:
3344 3322 self.fastasc = self._iterator
3345 3323 self.__contains__ = self._asccontains
3346 3324 else:
3347 3325 self.fastdesc = self._iterator
3348 3326 self.__contains__ = self._desccontains
3349 3327
3350 3328 def __nonzero__(self):
3351 3329 # Do not use 'for r in self' because it will enforce the iteration
3352 3330 # order (default ascending), possibly unrolling a whole descending
3353 3331 # iterator.
3354 3332 if self._genlist:
3355 3333 return True
3356 3334 for r in self._consumegen():
3357 3335 return True
3358 3336 return False
3359 3337
3360 3338 def __contains__(self, x):
3361 3339 if x in self._cache:
3362 3340 return self._cache[x]
3363 3341
3364 3342 # Use new values only, as existing values would be cached.
3365 3343 for l in self._consumegen():
3366 3344 if l == x:
3367 3345 return True
3368 3346
3369 3347 self._cache[x] = False
3370 3348 return False
3371 3349
3372 3350 def _asccontains(self, x):
3373 3351 """version of contains optimised for ascending generator"""
3374 3352 if x in self._cache:
3375 3353 return self._cache[x]
3376 3354
3377 3355 # Use new values only, as existing values would be cached.
3378 3356 for l in self._consumegen():
3379 3357 if l == x:
3380 3358 return True
3381 3359 if l > x:
3382 3360 break
3383 3361
3384 3362 self._cache[x] = False
3385 3363 return False
3386 3364
3387 3365 def _desccontains(self, x):
3388 3366 """version of contains optimised for descending generator"""
3389 3367 if x in self._cache:
3390 3368 return self._cache[x]
3391 3369
3392 3370 # Use new values only, as existing values would be cached.
3393 3371 for l in self._consumegen():
3394 3372 if l == x:
3395 3373 return True
3396 3374 if l < x:
3397 3375 break
3398 3376
3399 3377 self._cache[x] = False
3400 3378 return False
3401 3379
3402 3380 def __iter__(self):
3403 3381 if self._ascending:
3404 3382 it = self.fastasc
3405 3383 else:
3406 3384 it = self.fastdesc
3407 3385 if it is not None:
3408 3386 return it()
3409 3387 # we need to consume the iterator
3410 3388 for x in self._consumegen():
3411 3389 pass
3412 3390 # recall the same code
3413 3391 return iter(self)
3414 3392
3415 3393 def _iterator(self):
3416 3394 if self._finished:
3417 3395 return iter(self._genlist)
3418 3396
3419 3397 # We have to use this complex iteration strategy to allow multiple
3420 3398 # iterations at the same time. We need to be able to catch revision
3421 3399 # removed from _consumegen and added to genlist in another instance.
3422 3400 #
3423 3401 # Getting rid of it would provide an about 15% speed up on this
3424 3402 # iteration.
3425 3403 genlist = self._genlist
3426 3404 nextrev = self._consumegen().next
3427 3405 _len = len # cache global lookup
3428 3406 def gen():
3429 3407 i = 0
3430 3408 while True:
3431 3409 if i < _len(genlist):
3432 3410 yield genlist[i]
3433 3411 else:
3434 3412 yield nextrev()
3435 3413 i += 1
3436 3414 return gen()
3437 3415
3438 3416 def _consumegen(self):
3439 3417 cache = self._cache
3440 3418 genlist = self._genlist.append
3441 3419 for item in self._gen:
3442 3420 cache[item] = True
3443 3421 genlist(item)
3444 3422 yield item
3445 3423 if not self._finished:
3446 3424 self._finished = True
3447 3425 asc = self._genlist[:]
3448 3426 asc.sort()
3449 3427 self._asclist = asc
3450 3428 self.fastasc = asc.__iter__
3451 3429 self.fastdesc = asc.__reversed__
3452 3430
3453 3431 def __len__(self):
3454 3432 for x in self._consumegen():
3455 3433 pass
3456 3434 return len(self._genlist)
3457 3435
3458 3436 def sort(self, reverse=False):
3459 3437 self._ascending = not reverse
3460 3438
3461 3439 def reverse(self):
3462 3440 self._ascending = not self._ascending
3463 3441
3464 3442 def isascending(self):
3465 3443 return self._ascending
3466 3444
3467 3445 def isdescending(self):
3468 3446 return not self._ascending
3469 3447
3470 3448 def first(self):
3471 3449 if self._ascending:
3472 3450 it = self.fastasc
3473 3451 else:
3474 3452 it = self.fastdesc
3475 3453 if it is None:
3476 3454 # we need to consume all and try again
3477 3455 for x in self._consumegen():
3478 3456 pass
3479 3457 return self.first()
3480 3458 return next(it(), None)
3481 3459
3482 3460 def last(self):
3483 3461 if self._ascending:
3484 3462 it = self.fastdesc
3485 3463 else:
3486 3464 it = self.fastasc
3487 3465 if it is None:
3488 3466 # we need to consume all and try again
3489 3467 for x in self._consumegen():
3490 3468 pass
3491 3469 return self.first()
3492 3470 return next(it(), None)
3493 3471
3494 3472 def __repr__(self):
3495 3473 d = {False: '-', True: '+'}[self._ascending]
3496 3474 return '<%s%s>' % (type(self).__name__, d)
3497 3475
3498 3476 class spanset(abstractsmartset):
3499 3477 """Duck type for baseset class which represents a range of revisions and
3500 3478 can work lazily and without having all the range in memory
3501 3479
3502 3480 Note that spanset(x, y) behave almost like xrange(x, y) except for two
3503 3481 notable points:
3504 3482 - when x < y it will be automatically descending,
3505 3483 - revision filtered with this repoview will be skipped.
3506 3484
3507 3485 """
3508 3486 def __init__(self, repo, start=0, end=None):
3509 3487 """
3510 3488 start: first revision included the set
3511 3489 (default to 0)
3512 3490 end: first revision excluded (last+1)
3513 3491 (default to len(repo)
3514 3492
3515 3493 Spanset will be descending if `end` < `start`.
3516 3494 """
3517 3495 if end is None:
3518 3496 end = len(repo)
3519 3497 self._ascending = start <= end
3520 3498 if not self._ascending:
3521 3499 start, end = end + 1, start +1
3522 3500 self._start = start
3523 3501 self._end = end
3524 3502 self._hiddenrevs = repo.changelog.filteredrevs
3525 3503
3526 3504 def sort(self, reverse=False):
3527 3505 self._ascending = not reverse
3528 3506
3529 3507 def reverse(self):
3530 3508 self._ascending = not self._ascending
3531 3509
3532 3510 def _iterfilter(self, iterrange):
3533 3511 s = self._hiddenrevs
3534 3512 for r in iterrange:
3535 3513 if r not in s:
3536 3514 yield r
3537 3515
3538 3516 def __iter__(self):
3539 3517 if self._ascending:
3540 3518 return self.fastasc()
3541 3519 else:
3542 3520 return self.fastdesc()
3543 3521
3544 3522 def fastasc(self):
3545 3523 iterrange = xrange(self._start, self._end)
3546 3524 if self._hiddenrevs:
3547 3525 return self._iterfilter(iterrange)
3548 3526 return iter(iterrange)
3549 3527
3550 3528 def fastdesc(self):
3551 3529 iterrange = xrange(self._end - 1, self._start - 1, -1)
3552 3530 if self._hiddenrevs:
3553 3531 return self._iterfilter(iterrange)
3554 3532 return iter(iterrange)
3555 3533
3556 3534 def __contains__(self, rev):
3557 3535 hidden = self._hiddenrevs
3558 3536 return ((self._start <= rev < self._end)
3559 3537 and not (hidden and rev in hidden))
3560 3538
3561 3539 def __nonzero__(self):
3562 3540 for r in self:
3563 3541 return True
3564 3542 return False
3565 3543
3566 3544 def __len__(self):
3567 3545 if not self._hiddenrevs:
3568 3546 return abs(self._end - self._start)
3569 3547 else:
3570 3548 count = 0
3571 3549 start = self._start
3572 3550 end = self._end
3573 3551 for rev in self._hiddenrevs:
3574 3552 if (end < rev <= start) or (start <= rev < end):
3575 3553 count += 1
3576 3554 return abs(self._end - self._start) - count
3577 3555
3578 3556 def isascending(self):
3579 3557 return self._ascending
3580 3558
3581 3559 def isdescending(self):
3582 3560 return not self._ascending
3583 3561
3584 3562 def first(self):
3585 3563 if self._ascending:
3586 3564 it = self.fastasc
3587 3565 else:
3588 3566 it = self.fastdesc
3589 3567 for x in it():
3590 3568 return x
3591 3569 return None
3592 3570
3593 3571 def last(self):
3594 3572 if self._ascending:
3595 3573 it = self.fastdesc
3596 3574 else:
3597 3575 it = self.fastasc
3598 3576 for x in it():
3599 3577 return x
3600 3578 return None
3601 3579
3602 3580 def __repr__(self):
3603 3581 d = {False: '-', True: '+'}[self._ascending]
3604 3582 return '<%s%s %d:%d>' % (type(self).__name__, d,
3605 3583 self._start, self._end - 1)
3606 3584
3607 3585 class fullreposet(spanset):
3608 3586 """a set containing all revisions in the repo
3609 3587
3610 3588 This class exists to host special optimization and magic to handle virtual
3611 3589 revisions such as "null".
3612 3590 """
3613 3591
3614 3592 def __init__(self, repo):
3615 3593 super(fullreposet, self).__init__(repo)
3616 3594
3617 3595 def __and__(self, other):
3618 3596 """As self contains the whole repo, all of the other set should also be
3619 3597 in self. Therefore `self & other = other`.
3620 3598
3621 3599 This boldly assumes the other contains valid revs only.
3622 3600 """
3623 3601 # other not a smartset, make is so
3624 3602 if not util.safehasattr(other, 'isascending'):
3625 3603 # filter out hidden revision
3626 3604 # (this boldly assumes all smartset are pure)
3627 3605 #
3628 3606 # `other` was used with "&", let's assume this is a set like
3629 3607 # object.
3630 3608 other = baseset(other - self._hiddenrevs)
3631 3609
3632 3610 # XXX As fullreposet is also used as bootstrap, this is wrong.
3633 3611 #
3634 3612 # With a giveme312() revset returning [3,1,2], this makes
3635 3613 # 'hg log -r "giveme312()"' -> 1, 2, 3 (wrong)
3636 3614 # We cannot just drop it because other usage still need to sort it:
3637 3615 # 'hg log -r "all() and giveme312()"' -> 1, 2, 3 (right)
3638 3616 #
3639 3617 # There is also some faulty revset implementations that rely on it
3640 3618 # (eg: children as of its state in e8075329c5fb)
3641 3619 #
3642 3620 # When we fix the two points above we can move this into the if clause
3643 3621 other.sort(reverse=self.isdescending())
3644 3622 return other
3645 3623
3646 3624 def prettyformatset(revs):
3647 3625 lines = []
3648 3626 rs = repr(revs)
3649 3627 p = 0
3650 3628 while p < len(rs):
3651 3629 q = rs.find('<', p + 1)
3652 3630 if q < 0:
3653 3631 q = len(rs)
3654 3632 l = rs.count('<', 0, p) - rs.count('>', 0, p)
3655 3633 assert l >= 0
3656 3634 lines.append((l, rs[p:q].rstrip()))
3657 3635 p = q
3658 3636 return '\n'.join(' ' * l + s for l, s in lines)
3659 3637
3660 3638 def loadpredicate(ui, extname, registrarobj):
3661 3639 """Load revset predicates from specified registrarobj
3662 3640 """
3663 3641 for name, func in registrarobj._table.iteritems():
3664 3642 symbols[name] = func
3665 3643 if func._safe:
3666 3644 safesymbols.add(name)
3667 3645
3668 3646 # load built-in predicates explicitly to setup safesymbols
3669 3647 loadpredicate(None, None, predicate)
3670 3648
3671 3649 # tell hggettext to extract docstrings from these functions:
3672 3650 i18nfunctions = symbols.values()
General Comments 0
You need to be logged in to leave comments. Login now