##// END OF EJS Templates
revset: inline isvalidsymbol() and getsymbol() into _parsealiasdecl()...
Yuya Nishihara -
r28706:b33ca687 default
parent child Browse files
Show More
@@ -1,3617 +1,3605
1 1 # revset.py - revision set queries for mercurial
2 2 #
3 3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import heapq
11 11 import re
12 12
13 13 from .i18n import _
14 14 from . import (
15 15 destutil,
16 16 encoding,
17 17 error,
18 18 hbisect,
19 19 match as matchmod,
20 20 node,
21 21 obsolete as obsmod,
22 22 parser,
23 23 pathutil,
24 24 phases,
25 25 registrar,
26 26 repoview,
27 27 util,
28 28 )
29 29
30 30 def _revancestors(repo, revs, followfirst):
31 31 """Like revlog.ancestors(), but supports followfirst."""
32 32 if followfirst:
33 33 cut = 1
34 34 else:
35 35 cut = None
36 36 cl = repo.changelog
37 37
38 38 def iterate():
39 39 revs.sort(reverse=True)
40 40 irevs = iter(revs)
41 41 h = []
42 42
43 43 inputrev = next(irevs, None)
44 44 if inputrev is not None:
45 45 heapq.heappush(h, -inputrev)
46 46
47 47 seen = set()
48 48 while h:
49 49 current = -heapq.heappop(h)
50 50 if current == inputrev:
51 51 inputrev = next(irevs, None)
52 52 if inputrev is not None:
53 53 heapq.heappush(h, -inputrev)
54 54 if current not in seen:
55 55 seen.add(current)
56 56 yield current
57 57 for parent in cl.parentrevs(current)[:cut]:
58 58 if parent != node.nullrev:
59 59 heapq.heappush(h, -parent)
60 60
61 61 return generatorset(iterate(), iterasc=False)
62 62
63 63 def _revdescendants(repo, revs, followfirst):
64 64 """Like revlog.descendants() but supports followfirst."""
65 65 if followfirst:
66 66 cut = 1
67 67 else:
68 68 cut = None
69 69
70 70 def iterate():
71 71 cl = repo.changelog
72 72 # XXX this should be 'parentset.min()' assuming 'parentset' is a
73 73 # smartset (and if it is not, it should.)
74 74 first = min(revs)
75 75 nullrev = node.nullrev
76 76 if first == nullrev:
77 77 # Are there nodes with a null first parent and a non-null
78 78 # second one? Maybe. Do we care? Probably not.
79 79 for i in cl:
80 80 yield i
81 81 else:
82 82 seen = set(revs)
83 83 for i in cl.revs(first + 1):
84 84 for x in cl.parentrevs(i)[:cut]:
85 85 if x != nullrev and x in seen:
86 86 seen.add(i)
87 87 yield i
88 88 break
89 89
90 90 return generatorset(iterate(), iterasc=True)
91 91
92 92 def _reachablerootspure(repo, minroot, roots, heads, includepath):
93 93 """return (heads(::<roots> and ::<heads>))
94 94
95 95 If includepath is True, return (<roots>::<heads>)."""
96 96 if not roots:
97 97 return []
98 98 parentrevs = repo.changelog.parentrevs
99 99 roots = set(roots)
100 100 visit = list(heads)
101 101 reachable = set()
102 102 seen = {}
103 103 # prefetch all the things! (because python is slow)
104 104 reached = reachable.add
105 105 dovisit = visit.append
106 106 nextvisit = visit.pop
107 107 # open-code the post-order traversal due to the tiny size of
108 108 # sys.getrecursionlimit()
109 109 while visit:
110 110 rev = nextvisit()
111 111 if rev in roots:
112 112 reached(rev)
113 113 if not includepath:
114 114 continue
115 115 parents = parentrevs(rev)
116 116 seen[rev] = parents
117 117 for parent in parents:
118 118 if parent >= minroot and parent not in seen:
119 119 dovisit(parent)
120 120 if not reachable:
121 121 return baseset()
122 122 if not includepath:
123 123 return reachable
124 124 for rev in sorted(seen):
125 125 for parent in seen[rev]:
126 126 if parent in reachable:
127 127 reached(rev)
128 128 return reachable
129 129
130 130 def reachableroots(repo, roots, heads, includepath=False):
131 131 """return (heads(::<roots> and ::<heads>))
132 132
133 133 If includepath is True, return (<roots>::<heads>)."""
134 134 if not roots:
135 135 return baseset()
136 136 minroot = roots.min()
137 137 roots = list(roots)
138 138 heads = list(heads)
139 139 try:
140 140 revs = repo.changelog.reachableroots(minroot, heads, roots, includepath)
141 141 except AttributeError:
142 142 revs = _reachablerootspure(repo, minroot, roots, heads, includepath)
143 143 revs = baseset(revs)
144 144 revs.sort()
145 145 return revs
146 146
147 147 elements = {
148 148 # token-type: binding-strength, primary, prefix, infix, suffix
149 149 "(": (21, None, ("group", 1, ")"), ("func", 1, ")"), None),
150 150 "##": (20, None, None, ("_concat", 20), None),
151 151 "~": (18, None, None, ("ancestor", 18), None),
152 152 "^": (18, None, None, ("parent", 18), ("parentpost", 18)),
153 153 "-": (5, None, ("negate", 19), ("minus", 5), None),
154 154 "::": (17, None, ("dagrangepre", 17), ("dagrange", 17),
155 155 ("dagrangepost", 17)),
156 156 "..": (17, None, ("dagrangepre", 17), ("dagrange", 17),
157 157 ("dagrangepost", 17)),
158 158 ":": (15, "rangeall", ("rangepre", 15), ("range", 15), ("rangepost", 15)),
159 159 "not": (10, None, ("not", 10), None, None),
160 160 "!": (10, None, ("not", 10), None, None),
161 161 "and": (5, None, None, ("and", 5), None),
162 162 "&": (5, None, None, ("and", 5), None),
163 163 "%": (5, None, None, ("only", 5), ("onlypost", 5)),
164 164 "or": (4, None, None, ("or", 4), None),
165 165 "|": (4, None, None, ("or", 4), None),
166 166 "+": (4, None, None, ("or", 4), None),
167 167 "=": (3, None, None, ("keyvalue", 3), None),
168 168 ",": (2, None, None, ("list", 2), None),
169 169 ")": (0, None, None, None, None),
170 170 "symbol": (0, "symbol", None, None, None),
171 171 "string": (0, "string", None, None, None),
172 172 "end": (0, None, None, None, None),
173 173 }
174 174
175 175 keywords = set(['and', 'or', 'not'])
176 176
177 177 # default set of valid characters for the initial letter of symbols
178 178 _syminitletters = set(c for c in [chr(i) for i in xrange(256)]
179 179 if c.isalnum() or c in '._@' or ord(c) > 127)
180 180
181 181 # default set of valid characters for non-initial letters of symbols
182 182 _symletters = set(c for c in [chr(i) for i in xrange(256)]
183 183 if c.isalnum() or c in '-._/@' or ord(c) > 127)
184 184
185 185 def tokenize(program, lookup=None, syminitletters=None, symletters=None):
186 186 '''
187 187 Parse a revset statement into a stream of tokens
188 188
189 189 ``syminitletters`` is the set of valid characters for the initial
190 190 letter of symbols.
191 191
192 192 By default, character ``c`` is recognized as valid for initial
193 193 letter of symbols, if ``c.isalnum() or c in '._@' or ord(c) > 127``.
194 194
195 195 ``symletters`` is the set of valid characters for non-initial
196 196 letters of symbols.
197 197
198 198 By default, character ``c`` is recognized as valid for non-initial
199 199 letters of symbols, if ``c.isalnum() or c in '-._/@' or ord(c) > 127``.
200 200
201 201 Check that @ is a valid unquoted token character (issue3686):
202 202 >>> list(tokenize("@::"))
203 203 [('symbol', '@', 0), ('::', None, 1), ('end', None, 3)]
204 204
205 205 '''
206 206 if syminitletters is None:
207 207 syminitletters = _syminitletters
208 208 if symletters is None:
209 209 symletters = _symletters
210 210
211 211 if program and lookup:
212 212 # attempt to parse old-style ranges first to deal with
213 213 # things like old-tag which contain query metacharacters
214 214 parts = program.split(':', 1)
215 215 if all(lookup(sym) for sym in parts if sym):
216 216 if parts[0]:
217 217 yield ('symbol', parts[0], 0)
218 218 if len(parts) > 1:
219 219 s = len(parts[0])
220 220 yield (':', None, s)
221 221 if parts[1]:
222 222 yield ('symbol', parts[1], s + 1)
223 223 yield ('end', None, len(program))
224 224 return
225 225
226 226 pos, l = 0, len(program)
227 227 while pos < l:
228 228 c = program[pos]
229 229 if c.isspace(): # skip inter-token whitespace
230 230 pass
231 231 elif c == ':' and program[pos:pos + 2] == '::': # look ahead carefully
232 232 yield ('::', None, pos)
233 233 pos += 1 # skip ahead
234 234 elif c == '.' and program[pos:pos + 2] == '..': # look ahead carefully
235 235 yield ('..', None, pos)
236 236 pos += 1 # skip ahead
237 237 elif c == '#' and program[pos:pos + 2] == '##': # look ahead carefully
238 238 yield ('##', None, pos)
239 239 pos += 1 # skip ahead
240 240 elif c in "():=,-|&+!~^%": # handle simple operators
241 241 yield (c, None, pos)
242 242 elif (c in '"\'' or c == 'r' and
243 243 program[pos:pos + 2] in ("r'", 'r"')): # handle quoted strings
244 244 if c == 'r':
245 245 pos += 1
246 246 c = program[pos]
247 247 decode = lambda x: x
248 248 else:
249 249 decode = parser.unescapestr
250 250 pos += 1
251 251 s = pos
252 252 while pos < l: # find closing quote
253 253 d = program[pos]
254 254 if d == '\\': # skip over escaped characters
255 255 pos += 2
256 256 continue
257 257 if d == c:
258 258 yield ('string', decode(program[s:pos]), s)
259 259 break
260 260 pos += 1
261 261 else:
262 262 raise error.ParseError(_("unterminated string"), s)
263 263 # gather up a symbol/keyword
264 264 elif c in syminitletters:
265 265 s = pos
266 266 pos += 1
267 267 while pos < l: # find end of symbol
268 268 d = program[pos]
269 269 if d not in symletters:
270 270 break
271 271 if d == '.' and program[pos - 1] == '.': # special case for ..
272 272 pos -= 1
273 273 break
274 274 pos += 1
275 275 sym = program[s:pos]
276 276 if sym in keywords: # operator keywords
277 277 yield (sym, None, s)
278 278 elif '-' in sym:
279 279 # some jerk gave us foo-bar-baz, try to check if it's a symbol
280 280 if lookup and lookup(sym):
281 281 # looks like a real symbol
282 282 yield ('symbol', sym, s)
283 283 else:
284 284 # looks like an expression
285 285 parts = sym.split('-')
286 286 for p in parts[:-1]:
287 287 if p: # possible consecutive -
288 288 yield ('symbol', p, s)
289 289 s += len(p)
290 290 yield ('-', None, pos)
291 291 s += 1
292 292 if parts[-1]: # possible trailing -
293 293 yield ('symbol', parts[-1], s)
294 294 else:
295 295 yield ('symbol', sym, s)
296 296 pos -= 1
297 297 else:
298 298 raise error.ParseError(_("syntax error in revset '%s'") %
299 299 program, pos)
300 300 pos += 1
301 301 yield ('end', None, pos)
302 302
303 303 def parseerrordetail(inst):
304 304 """Compose error message from specified ParseError object
305 305 """
306 306 if len(inst.args) > 1:
307 307 return _('at %s: %s') % (inst.args[1], inst.args[0])
308 308 else:
309 309 return inst.args[0]
310 310
311 311 # helpers
312 312
313 313 def getstring(x, err):
314 314 if x and (x[0] == 'string' or x[0] == 'symbol'):
315 315 return x[1]
316 316 raise error.ParseError(err)
317 317
318 318 def getlist(x):
319 319 if not x:
320 320 return []
321 321 if x[0] == 'list':
322 322 return list(x[1:])
323 323 return [x]
324 324
325 325 def getargs(x, min, max, err):
326 326 l = getlist(x)
327 327 if len(l) < min or (max >= 0 and len(l) > max):
328 328 raise error.ParseError(err)
329 329 return l
330 330
331 331 def getargsdict(x, funcname, keys):
332 332 return parser.buildargsdict(getlist(x), funcname, keys.split(),
333 333 keyvaluenode='keyvalue', keynode='symbol')
334 334
335 def isvalidsymbol(tree):
336 """Examine whether specified ``tree`` is valid ``symbol`` or not
337 """
338 return tree[0] == 'symbol'
339
340 def getsymbol(tree):
341 """Get symbol name from valid ``symbol`` in ``tree``
342
343 This assumes that ``tree`` is already examined by ``isvalidsymbol``.
344 """
345 return tree[1]
346
347 335 def isvalidfunc(tree):
348 336 """Examine whether specified ``tree`` is valid ``func`` or not
349 337 """
350 return tree[0] == 'func' and isvalidsymbol(tree[1])
338 return tree[0] == 'func' and tree[1][0] == 'symbol'
351 339
352 340 def getfuncname(tree):
353 341 """Get function name from valid ``func`` in ``tree``
354 342
355 343 This assumes that ``tree`` is already examined by ``isvalidfunc``.
356 344 """
357 return getsymbol(tree[1])
345 return tree[1][1]
358 346
359 347 def getfuncargs(tree):
360 348 """Get list of function arguments from valid ``func`` in ``tree``
361 349
362 350 This assumes that ``tree`` is already examined by ``isvalidfunc``.
363 351 """
364 352 return getlist(tree[2])
365 353
366 354 def getset(repo, subset, x):
367 355 if not x:
368 356 raise error.ParseError(_("missing argument"))
369 357 s = methods[x[0]](repo, subset, *x[1:])
370 358 if util.safehasattr(s, 'isascending'):
371 359 return s
372 360 if (repo.ui.configbool('devel', 'all-warnings')
373 361 or repo.ui.configbool('devel', 'old-revset')):
374 362 # else case should not happen, because all non-func are internal,
375 363 # ignoring for now.
376 364 if x[0] == 'func' and x[1][0] == 'symbol' and x[1][1] in symbols:
377 365 repo.ui.develwarn('revset "%s" use list instead of smartset, '
378 366 '(upgrade your code)' % x[1][1])
379 367 return baseset(s)
380 368
381 369 def _getrevsource(repo, r):
382 370 extra = repo[r].extra()
383 371 for label in ('source', 'transplant_source', 'rebase_source'):
384 372 if label in extra:
385 373 try:
386 374 return repo[extra[label]].rev()
387 375 except error.RepoLookupError:
388 376 pass
389 377 return None
390 378
391 379 # operator methods
392 380
393 381 def stringset(repo, subset, x):
394 382 x = repo[x].rev()
395 383 if (x in subset
396 384 or x == node.nullrev and isinstance(subset, fullreposet)):
397 385 return baseset([x])
398 386 return baseset()
399 387
400 388 def rangeset(repo, subset, x, y):
401 389 m = getset(repo, fullreposet(repo), x)
402 390 n = getset(repo, fullreposet(repo), y)
403 391
404 392 if not m or not n:
405 393 return baseset()
406 394 m, n = m.first(), n.last()
407 395
408 396 if m == n:
409 397 r = baseset([m])
410 398 elif n == node.wdirrev:
411 399 r = spanset(repo, m, len(repo)) + baseset([n])
412 400 elif m == node.wdirrev:
413 401 r = baseset([m]) + spanset(repo, len(repo) - 1, n - 1)
414 402 elif m < n:
415 403 r = spanset(repo, m, n + 1)
416 404 else:
417 405 r = spanset(repo, m, n - 1)
418 406 # XXX We should combine with subset first: 'subset & baseset(...)'. This is
419 407 # necessary to ensure we preserve the order in subset.
420 408 #
421 409 # This has performance implication, carrying the sorting over when possible
422 410 # would be more efficient.
423 411 return r & subset
424 412
425 413 def dagrange(repo, subset, x, y):
426 414 r = fullreposet(repo)
427 415 xs = reachableroots(repo, getset(repo, r, x), getset(repo, r, y),
428 416 includepath=True)
429 417 # XXX We should combine with subset first: 'subset & baseset(...)'. This is
430 418 # necessary to ensure we preserve the order in subset.
431 419 return xs & subset
432 420
433 421 def andset(repo, subset, x, y):
434 422 return getset(repo, getset(repo, subset, x), y)
435 423
436 424 def differenceset(repo, subset, x, y):
437 425 return getset(repo, subset, x) - getset(repo, subset, y)
438 426
439 427 def orset(repo, subset, *xs):
440 428 assert xs
441 429 if len(xs) == 1:
442 430 return getset(repo, subset, xs[0])
443 431 p = len(xs) // 2
444 432 a = orset(repo, subset, *xs[:p])
445 433 b = orset(repo, subset, *xs[p:])
446 434 return a + b
447 435
448 436 def notset(repo, subset, x):
449 437 return subset - getset(repo, subset, x)
450 438
451 439 def listset(repo, subset, *xs):
452 440 raise error.ParseError(_("can't use a list in this context"),
453 441 hint=_('see hg help "revsets.x or y"'))
454 442
455 443 def keyvaluepair(repo, subset, k, v):
456 444 raise error.ParseError(_("can't use a key-value pair in this context"))
457 445
458 446 def func(repo, subset, a, b):
459 447 if a[0] == 'symbol' and a[1] in symbols:
460 448 return symbols[a[1]](repo, subset, b)
461 449
462 450 keep = lambda fn: getattr(fn, '__doc__', None) is not None
463 451
464 452 syms = [s for (s, fn) in symbols.items() if keep(fn)]
465 453 raise error.UnknownIdentifier(a[1], syms)
466 454
467 455 # functions
468 456
469 457 # symbols are callables like:
470 458 # fn(repo, subset, x)
471 459 # with:
472 460 # repo - current repository instance
473 461 # subset - of revisions to be examined
474 462 # x - argument in tree form
475 463 symbols = {}
476 464
477 465 # symbols which can't be used for a DoS attack for any given input
478 466 # (e.g. those which accept regexes as plain strings shouldn't be included)
479 467 # functions that just return a lot of changesets (like all) don't count here
480 468 safesymbols = set()
481 469
482 470 predicate = registrar.revsetpredicate()
483 471
484 472 @predicate('_destupdate')
485 473 def _destupdate(repo, subset, x):
486 474 # experimental revset for update destination
487 475 args = getargsdict(x, 'limit', 'clean check')
488 476 return subset & baseset([destutil.destupdate(repo, **args)[0]])
489 477
490 478 @predicate('_destmerge')
491 479 def _destmerge(repo, subset, x):
492 480 # experimental revset for merge destination
493 481 sourceset = None
494 482 if x is not None:
495 483 sourceset = getset(repo, fullreposet(repo), x)
496 484 return subset & baseset([destutil.destmerge(repo, sourceset=sourceset)])
497 485
498 486 @predicate('adds(pattern)', safe=True)
499 487 def adds(repo, subset, x):
500 488 """Changesets that add a file matching pattern.
501 489
502 490 The pattern without explicit kind like ``glob:`` is expected to be
503 491 relative to the current directory and match against a file or a
504 492 directory.
505 493 """
506 494 # i18n: "adds" is a keyword
507 495 pat = getstring(x, _("adds requires a pattern"))
508 496 return checkstatus(repo, subset, pat, 1)
509 497
510 498 @predicate('ancestor(*changeset)', safe=True)
511 499 def ancestor(repo, subset, x):
512 500 """A greatest common ancestor of the changesets.
513 501
514 502 Accepts 0 or more changesets.
515 503 Will return empty list when passed no args.
516 504 Greatest common ancestor of a single changeset is that changeset.
517 505 """
518 506 # i18n: "ancestor" is a keyword
519 507 l = getlist(x)
520 508 rl = fullreposet(repo)
521 509 anc = None
522 510
523 511 # (getset(repo, rl, i) for i in l) generates a list of lists
524 512 for revs in (getset(repo, rl, i) for i in l):
525 513 for r in revs:
526 514 if anc is None:
527 515 anc = repo[r]
528 516 else:
529 517 anc = anc.ancestor(repo[r])
530 518
531 519 if anc is not None and anc.rev() in subset:
532 520 return baseset([anc.rev()])
533 521 return baseset()
534 522
535 523 def _ancestors(repo, subset, x, followfirst=False):
536 524 heads = getset(repo, fullreposet(repo), x)
537 525 if not heads:
538 526 return baseset()
539 527 s = _revancestors(repo, heads, followfirst)
540 528 return subset & s
541 529
542 530 @predicate('ancestors(set)', safe=True)
543 531 def ancestors(repo, subset, x):
544 532 """Changesets that are ancestors of a changeset in set.
545 533 """
546 534 return _ancestors(repo, subset, x)
547 535
548 536 @predicate('_firstancestors', safe=True)
549 537 def _firstancestors(repo, subset, x):
550 538 # ``_firstancestors(set)``
551 539 # Like ``ancestors(set)`` but follows only the first parents.
552 540 return _ancestors(repo, subset, x, followfirst=True)
553 541
554 542 def ancestorspec(repo, subset, x, n):
555 543 """``set~n``
556 544 Changesets that are the Nth ancestor (first parents only) of a changeset
557 545 in set.
558 546 """
559 547 try:
560 548 n = int(n[1])
561 549 except (TypeError, ValueError):
562 550 raise error.ParseError(_("~ expects a number"))
563 551 ps = set()
564 552 cl = repo.changelog
565 553 for r in getset(repo, fullreposet(repo), x):
566 554 for i in range(n):
567 555 r = cl.parentrevs(r)[0]
568 556 ps.add(r)
569 557 return subset & ps
570 558
571 559 @predicate('author(string)', safe=True)
572 560 def author(repo, subset, x):
573 561 """Alias for ``user(string)``.
574 562 """
575 563 # i18n: "author" is a keyword
576 564 n = encoding.lower(getstring(x, _("author requires a string")))
577 565 kind, pattern, matcher = _substringmatcher(n)
578 566 return subset.filter(lambda x: matcher(encoding.lower(repo[x].user())),
579 567 condrepr=('<user %r>', n))
580 568
581 569 @predicate('bisect(string)', safe=True)
582 570 def bisect(repo, subset, x):
583 571 """Changesets marked in the specified bisect status:
584 572
585 573 - ``good``, ``bad``, ``skip``: csets explicitly marked as good/bad/skip
586 574 - ``goods``, ``bads`` : csets topologically good/bad
587 575 - ``range`` : csets taking part in the bisection
588 576 - ``pruned`` : csets that are goods, bads or skipped
589 577 - ``untested`` : csets whose fate is yet unknown
590 578 - ``ignored`` : csets ignored due to DAG topology
591 579 - ``current`` : the cset currently being bisected
592 580 """
593 581 # i18n: "bisect" is a keyword
594 582 status = getstring(x, _("bisect requires a string")).lower()
595 583 state = set(hbisect.get(repo, status))
596 584 return subset & state
597 585
598 586 # Backward-compatibility
599 587 # - no help entry so that we do not advertise it any more
600 588 @predicate('bisected', safe=True)
601 589 def bisected(repo, subset, x):
602 590 return bisect(repo, subset, x)
603 591
604 592 @predicate('bookmark([name])', safe=True)
605 593 def bookmark(repo, subset, x):
606 594 """The named bookmark or all bookmarks.
607 595
608 596 If `name` starts with `re:`, the remainder of the name is treated as
609 597 a regular expression. To match a bookmark that actually starts with `re:`,
610 598 use the prefix `literal:`.
611 599 """
612 600 # i18n: "bookmark" is a keyword
613 601 args = getargs(x, 0, 1, _('bookmark takes one or no arguments'))
614 602 if args:
615 603 bm = getstring(args[0],
616 604 # i18n: "bookmark" is a keyword
617 605 _('the argument to bookmark must be a string'))
618 606 kind, pattern, matcher = util.stringmatcher(bm)
619 607 bms = set()
620 608 if kind == 'literal':
621 609 bmrev = repo._bookmarks.get(pattern, None)
622 610 if not bmrev:
623 611 raise error.RepoLookupError(_("bookmark '%s' does not exist")
624 612 % pattern)
625 613 bms.add(repo[bmrev].rev())
626 614 else:
627 615 matchrevs = set()
628 616 for name, bmrev in repo._bookmarks.iteritems():
629 617 if matcher(name):
630 618 matchrevs.add(bmrev)
631 619 if not matchrevs:
632 620 raise error.RepoLookupError(_("no bookmarks exist"
633 621 " that match '%s'") % pattern)
634 622 for bmrev in matchrevs:
635 623 bms.add(repo[bmrev].rev())
636 624 else:
637 625 bms = set([repo[r].rev()
638 626 for r in repo._bookmarks.values()])
639 627 bms -= set([node.nullrev])
640 628 return subset & bms
641 629
642 630 @predicate('branch(string or set)', safe=True)
643 631 def branch(repo, subset, x):
644 632 """
645 633 All changesets belonging to the given branch or the branches of the given
646 634 changesets.
647 635
648 636 If `string` starts with `re:`, the remainder of the name is treated as
649 637 a regular expression. To match a branch that actually starts with `re:`,
650 638 use the prefix `literal:`.
651 639 """
652 640 getbi = repo.revbranchcache().branchinfo
653 641
654 642 try:
655 643 b = getstring(x, '')
656 644 except error.ParseError:
657 645 # not a string, but another revspec, e.g. tip()
658 646 pass
659 647 else:
660 648 kind, pattern, matcher = util.stringmatcher(b)
661 649 if kind == 'literal':
662 650 # note: falls through to the revspec case if no branch with
663 651 # this name exists and pattern kind is not specified explicitly
664 652 if pattern in repo.branchmap():
665 653 return subset.filter(lambda r: matcher(getbi(r)[0]),
666 654 condrepr=('<branch %r>', b))
667 655 if b.startswith('literal:'):
668 656 raise error.RepoLookupError(_("branch '%s' does not exist")
669 657 % pattern)
670 658 else:
671 659 return subset.filter(lambda r: matcher(getbi(r)[0]),
672 660 condrepr=('<branch %r>', b))
673 661
674 662 s = getset(repo, fullreposet(repo), x)
675 663 b = set()
676 664 for r in s:
677 665 b.add(getbi(r)[0])
678 666 c = s.__contains__
679 667 return subset.filter(lambda r: c(r) or getbi(r)[0] in b,
680 668 condrepr=lambda: '<branch %r>' % sorted(b))
681 669
682 670 @predicate('bumped()', safe=True)
683 671 def bumped(repo, subset, x):
684 672 """Mutable changesets marked as successors of public changesets.
685 673
686 674 Only non-public and non-obsolete changesets can be `bumped`.
687 675 """
688 676 # i18n: "bumped" is a keyword
689 677 getargs(x, 0, 0, _("bumped takes no arguments"))
690 678 bumped = obsmod.getrevs(repo, 'bumped')
691 679 return subset & bumped
692 680
693 681 @predicate('bundle()', safe=True)
694 682 def bundle(repo, subset, x):
695 683 """Changesets in the bundle.
696 684
697 685 Bundle must be specified by the -R option."""
698 686
699 687 try:
700 688 bundlerevs = repo.changelog.bundlerevs
701 689 except AttributeError:
702 690 raise error.Abort(_("no bundle provided - specify with -R"))
703 691 return subset & bundlerevs
704 692
705 693 def checkstatus(repo, subset, pat, field):
706 694 hasset = matchmod.patkind(pat) == 'set'
707 695
708 696 mcache = [None]
709 697 def matches(x):
710 698 c = repo[x]
711 699 if not mcache[0] or hasset:
712 700 mcache[0] = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
713 701 m = mcache[0]
714 702 fname = None
715 703 if not m.anypats() and len(m.files()) == 1:
716 704 fname = m.files()[0]
717 705 if fname is not None:
718 706 if fname not in c.files():
719 707 return False
720 708 else:
721 709 for f in c.files():
722 710 if m(f):
723 711 break
724 712 else:
725 713 return False
726 714 files = repo.status(c.p1().node(), c.node())[field]
727 715 if fname is not None:
728 716 if fname in files:
729 717 return True
730 718 else:
731 719 for f in files:
732 720 if m(f):
733 721 return True
734 722
735 723 return subset.filter(matches, condrepr=('<status[%r] %r>', field, pat))
736 724
737 725 def _children(repo, narrow, parentset):
738 726 if not parentset:
739 727 return baseset()
740 728 cs = set()
741 729 pr = repo.changelog.parentrevs
742 730 minrev = parentset.min()
743 731 for r in narrow:
744 732 if r <= minrev:
745 733 continue
746 734 for p in pr(r):
747 735 if p in parentset:
748 736 cs.add(r)
749 737 # XXX using a set to feed the baseset is wrong. Sets are not ordered.
750 738 # This does not break because of other fullreposet misbehavior.
751 739 return baseset(cs)
752 740
753 741 @predicate('children(set)', safe=True)
754 742 def children(repo, subset, x):
755 743 """Child changesets of changesets in set.
756 744 """
757 745 s = getset(repo, fullreposet(repo), x)
758 746 cs = _children(repo, subset, s)
759 747 return subset & cs
760 748
761 749 @predicate('closed()', safe=True)
762 750 def closed(repo, subset, x):
763 751 """Changeset is closed.
764 752 """
765 753 # i18n: "closed" is a keyword
766 754 getargs(x, 0, 0, _("closed takes no arguments"))
767 755 return subset.filter(lambda r: repo[r].closesbranch(),
768 756 condrepr='<branch closed>')
769 757
770 758 @predicate('contains(pattern)')
771 759 def contains(repo, subset, x):
772 760 """The revision's manifest contains a file matching pattern (but might not
773 761 modify it). See :hg:`help patterns` for information about file patterns.
774 762
775 763 The pattern without explicit kind like ``glob:`` is expected to be
776 764 relative to the current directory and match against a file exactly
777 765 for efficiency.
778 766 """
779 767 # i18n: "contains" is a keyword
780 768 pat = getstring(x, _("contains requires a pattern"))
781 769
782 770 def matches(x):
783 771 if not matchmod.patkind(pat):
784 772 pats = pathutil.canonpath(repo.root, repo.getcwd(), pat)
785 773 if pats in repo[x]:
786 774 return True
787 775 else:
788 776 c = repo[x]
789 777 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
790 778 for f in c.manifest():
791 779 if m(f):
792 780 return True
793 781 return False
794 782
795 783 return subset.filter(matches, condrepr=('<contains %r>', pat))
796 784
797 785 @predicate('converted([id])', safe=True)
798 786 def converted(repo, subset, x):
799 787 """Changesets converted from the given identifier in the old repository if
800 788 present, or all converted changesets if no identifier is specified.
801 789 """
802 790
803 791 # There is exactly no chance of resolving the revision, so do a simple
804 792 # string compare and hope for the best
805 793
806 794 rev = None
807 795 # i18n: "converted" is a keyword
808 796 l = getargs(x, 0, 1, _('converted takes one or no arguments'))
809 797 if l:
810 798 # i18n: "converted" is a keyword
811 799 rev = getstring(l[0], _('converted requires a revision'))
812 800
813 801 def _matchvalue(r):
814 802 source = repo[r].extra().get('convert_revision', None)
815 803 return source is not None and (rev is None or source.startswith(rev))
816 804
817 805 return subset.filter(lambda r: _matchvalue(r),
818 806 condrepr=('<converted %r>', rev))
819 807
820 808 @predicate('date(interval)', safe=True)
821 809 def date(repo, subset, x):
822 810 """Changesets within the interval, see :hg:`help dates`.
823 811 """
824 812 # i18n: "date" is a keyword
825 813 ds = getstring(x, _("date requires a string"))
826 814 dm = util.matchdate(ds)
827 815 return subset.filter(lambda x: dm(repo[x].date()[0]),
828 816 condrepr=('<date %r>', ds))
829 817
830 818 @predicate('desc(string)', safe=True)
831 819 def desc(repo, subset, x):
832 820 """Search commit message for string. The match is case-insensitive.
833 821 """
834 822 # i18n: "desc" is a keyword
835 823 ds = encoding.lower(getstring(x, _("desc requires a string")))
836 824
837 825 def matches(x):
838 826 c = repo[x]
839 827 return ds in encoding.lower(c.description())
840 828
841 829 return subset.filter(matches, condrepr=('<desc %r>', ds))
842 830
843 831 def _descendants(repo, subset, x, followfirst=False):
844 832 roots = getset(repo, fullreposet(repo), x)
845 833 if not roots:
846 834 return baseset()
847 835 s = _revdescendants(repo, roots, followfirst)
848 836
849 837 # Both sets need to be ascending in order to lazily return the union
850 838 # in the correct order.
851 839 base = subset & roots
852 840 desc = subset & s
853 841 result = base + desc
854 842 if subset.isascending():
855 843 result.sort()
856 844 elif subset.isdescending():
857 845 result.sort(reverse=True)
858 846 else:
859 847 result = subset & result
860 848 return result
861 849
862 850 @predicate('descendants(set)', safe=True)
863 851 def descendants(repo, subset, x):
864 852 """Changesets which are descendants of changesets in set.
865 853 """
866 854 return _descendants(repo, subset, x)
867 855
868 856 @predicate('_firstdescendants', safe=True)
869 857 def _firstdescendants(repo, subset, x):
870 858 # ``_firstdescendants(set)``
871 859 # Like ``descendants(set)`` but follows only the first parents.
872 860 return _descendants(repo, subset, x, followfirst=True)
873 861
874 862 @predicate('destination([set])', safe=True)
875 863 def destination(repo, subset, x):
876 864 """Changesets that were created by a graft, transplant or rebase operation,
877 865 with the given revisions specified as the source. Omitting the optional set
878 866 is the same as passing all().
879 867 """
880 868 if x is not None:
881 869 sources = getset(repo, fullreposet(repo), x)
882 870 else:
883 871 sources = fullreposet(repo)
884 872
885 873 dests = set()
886 874
887 875 # subset contains all of the possible destinations that can be returned, so
888 876 # iterate over them and see if their source(s) were provided in the arg set.
889 877 # Even if the immediate src of r is not in the arg set, src's source (or
890 878 # further back) may be. Scanning back further than the immediate src allows
891 879 # transitive transplants and rebases to yield the same results as transitive
892 880 # grafts.
893 881 for r in subset:
894 882 src = _getrevsource(repo, r)
895 883 lineage = None
896 884
897 885 while src is not None:
898 886 if lineage is None:
899 887 lineage = list()
900 888
901 889 lineage.append(r)
902 890
903 891 # The visited lineage is a match if the current source is in the arg
904 892 # set. Since every candidate dest is visited by way of iterating
905 893 # subset, any dests further back in the lineage will be tested by a
906 894 # different iteration over subset. Likewise, if the src was already
907 895 # selected, the current lineage can be selected without going back
908 896 # further.
909 897 if src in sources or src in dests:
910 898 dests.update(lineage)
911 899 break
912 900
913 901 r = src
914 902 src = _getrevsource(repo, r)
915 903
916 904 return subset.filter(dests.__contains__,
917 905 condrepr=lambda: '<destination %r>' % sorted(dests))
918 906
919 907 @predicate('divergent()', safe=True)
920 908 def divergent(repo, subset, x):
921 909 """
922 910 Final successors of changesets with an alternative set of final successors.
923 911 """
924 912 # i18n: "divergent" is a keyword
925 913 getargs(x, 0, 0, _("divergent takes no arguments"))
926 914 divergent = obsmod.getrevs(repo, 'divergent')
927 915 return subset & divergent
928 916
929 917 @predicate('extinct()', safe=True)
930 918 def extinct(repo, subset, x):
931 919 """Obsolete changesets with obsolete descendants only.
932 920 """
933 921 # i18n: "extinct" is a keyword
934 922 getargs(x, 0, 0, _("extinct takes no arguments"))
935 923 extincts = obsmod.getrevs(repo, 'extinct')
936 924 return subset & extincts
937 925
938 926 @predicate('extra(label, [value])', safe=True)
939 927 def extra(repo, subset, x):
940 928 """Changesets with the given label in the extra metadata, with the given
941 929 optional value.
942 930
943 931 If `value` starts with `re:`, the remainder of the value is treated as
944 932 a regular expression. To match a value that actually starts with `re:`,
945 933 use the prefix `literal:`.
946 934 """
947 935 args = getargsdict(x, 'extra', 'label value')
948 936 if 'label' not in args:
949 937 # i18n: "extra" is a keyword
950 938 raise error.ParseError(_('extra takes at least 1 argument'))
951 939 # i18n: "extra" is a keyword
952 940 label = getstring(args['label'], _('first argument to extra must be '
953 941 'a string'))
954 942 value = None
955 943
956 944 if 'value' in args:
957 945 # i18n: "extra" is a keyword
958 946 value = getstring(args['value'], _('second argument to extra must be '
959 947 'a string'))
960 948 kind, value, matcher = util.stringmatcher(value)
961 949
962 950 def _matchvalue(r):
963 951 extra = repo[r].extra()
964 952 return label in extra and (value is None or matcher(extra[label]))
965 953
966 954 return subset.filter(lambda r: _matchvalue(r),
967 955 condrepr=('<extra[%r] %r>', label, value))
968 956
969 957 @predicate('filelog(pattern)', safe=True)
970 958 def filelog(repo, subset, x):
971 959 """Changesets connected to the specified filelog.
972 960
973 961 For performance reasons, visits only revisions mentioned in the file-level
974 962 filelog, rather than filtering through all changesets (much faster, but
975 963 doesn't include deletes or duplicate changes). For a slower, more accurate
976 964 result, use ``file()``.
977 965
978 966 The pattern without explicit kind like ``glob:`` is expected to be
979 967 relative to the current directory and match against a file exactly
980 968 for efficiency.
981 969
982 970 If some linkrev points to revisions filtered by the current repoview, we'll
983 971 work around it to return a non-filtered value.
984 972 """
985 973
986 974 # i18n: "filelog" is a keyword
987 975 pat = getstring(x, _("filelog requires a pattern"))
988 976 s = set()
989 977 cl = repo.changelog
990 978
991 979 if not matchmod.patkind(pat):
992 980 f = pathutil.canonpath(repo.root, repo.getcwd(), pat)
993 981 files = [f]
994 982 else:
995 983 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=repo[None])
996 984 files = (f for f in repo[None] if m(f))
997 985
998 986 for f in files:
999 987 fl = repo.file(f)
1000 988 known = {}
1001 989 scanpos = 0
1002 990 for fr in list(fl):
1003 991 fn = fl.node(fr)
1004 992 if fn in known:
1005 993 s.add(known[fn])
1006 994 continue
1007 995
1008 996 lr = fl.linkrev(fr)
1009 997 if lr in cl:
1010 998 s.add(lr)
1011 999 elif scanpos is not None:
1012 1000 # lowest matching changeset is filtered, scan further
1013 1001 # ahead in changelog
1014 1002 start = max(lr, scanpos) + 1
1015 1003 scanpos = None
1016 1004 for r in cl.revs(start):
1017 1005 # minimize parsing of non-matching entries
1018 1006 if f in cl.revision(r) and f in cl.readfiles(r):
1019 1007 try:
1020 1008 # try to use manifest delta fastpath
1021 1009 n = repo[r].filenode(f)
1022 1010 if n not in known:
1023 1011 if n == fn:
1024 1012 s.add(r)
1025 1013 scanpos = r
1026 1014 break
1027 1015 else:
1028 1016 known[n] = r
1029 1017 except error.ManifestLookupError:
1030 1018 # deletion in changelog
1031 1019 continue
1032 1020
1033 1021 return subset & s
1034 1022
1035 1023 @predicate('first(set, [n])', safe=True)
1036 1024 def first(repo, subset, x):
1037 1025 """An alias for limit().
1038 1026 """
1039 1027 return limit(repo, subset, x)
1040 1028
1041 1029 def _follow(repo, subset, x, name, followfirst=False):
1042 1030 l = getargs(x, 0, 1, _("%s takes no arguments or a pattern") % name)
1043 1031 c = repo['.']
1044 1032 if l:
1045 1033 x = getstring(l[0], _("%s expected a pattern") % name)
1046 1034 matcher = matchmod.match(repo.root, repo.getcwd(), [x],
1047 1035 ctx=repo[None], default='path')
1048 1036
1049 1037 files = c.manifest().walk(matcher)
1050 1038
1051 1039 s = set()
1052 1040 for fname in files:
1053 1041 fctx = c[fname]
1054 1042 s = s.union(set(c.rev() for c in fctx.ancestors(followfirst)))
1055 1043 # include the revision responsible for the most recent version
1056 1044 s.add(fctx.introrev())
1057 1045 else:
1058 1046 s = _revancestors(repo, baseset([c.rev()]), followfirst)
1059 1047
1060 1048 return subset & s
1061 1049
1062 1050 @predicate('follow([pattern])', safe=True)
1063 1051 def follow(repo, subset, x):
1064 1052 """
1065 1053 An alias for ``::.`` (ancestors of the working directory's first parent).
1066 1054 If pattern is specified, the histories of files matching given
1067 1055 pattern is followed, including copies.
1068 1056 """
1069 1057 return _follow(repo, subset, x, 'follow')
1070 1058
1071 1059 @predicate('_followfirst', safe=True)
1072 1060 def _followfirst(repo, subset, x):
1073 1061 # ``followfirst([pattern])``
1074 1062 # Like ``follow([pattern])`` but follows only the first parent of
1075 1063 # every revisions or files revisions.
1076 1064 return _follow(repo, subset, x, '_followfirst', followfirst=True)
1077 1065
1078 1066 @predicate('all()', safe=True)
1079 1067 def getall(repo, subset, x):
1080 1068 """All changesets, the same as ``0:tip``.
1081 1069 """
1082 1070 # i18n: "all" is a keyword
1083 1071 getargs(x, 0, 0, _("all takes no arguments"))
1084 1072 return subset & spanset(repo) # drop "null" if any
1085 1073
1086 1074 @predicate('grep(regex)')
1087 1075 def grep(repo, subset, x):
1088 1076 """Like ``keyword(string)`` but accepts a regex. Use ``grep(r'...')``
1089 1077 to ensure special escape characters are handled correctly. Unlike
1090 1078 ``keyword(string)``, the match is case-sensitive.
1091 1079 """
1092 1080 try:
1093 1081 # i18n: "grep" is a keyword
1094 1082 gr = re.compile(getstring(x, _("grep requires a string")))
1095 1083 except re.error as e:
1096 1084 raise error.ParseError(_('invalid match pattern: %s') % e)
1097 1085
1098 1086 def matches(x):
1099 1087 c = repo[x]
1100 1088 for e in c.files() + [c.user(), c.description()]:
1101 1089 if gr.search(e):
1102 1090 return True
1103 1091 return False
1104 1092
1105 1093 return subset.filter(matches, condrepr=('<grep %r>', gr.pattern))
1106 1094
1107 1095 @predicate('_matchfiles', safe=True)
1108 1096 def _matchfiles(repo, subset, x):
1109 1097 # _matchfiles takes a revset list of prefixed arguments:
1110 1098 #
1111 1099 # [p:foo, i:bar, x:baz]
1112 1100 #
1113 1101 # builds a match object from them and filters subset. Allowed
1114 1102 # prefixes are 'p:' for regular patterns, 'i:' for include
1115 1103 # patterns and 'x:' for exclude patterns. Use 'r:' prefix to pass
1116 1104 # a revision identifier, or the empty string to reference the
1117 1105 # working directory, from which the match object is
1118 1106 # initialized. Use 'd:' to set the default matching mode, default
1119 1107 # to 'glob'. At most one 'r:' and 'd:' argument can be passed.
1120 1108
1121 1109 l = getargs(x, 1, -1, "_matchfiles requires at least one argument")
1122 1110 pats, inc, exc = [], [], []
1123 1111 rev, default = None, None
1124 1112 for arg in l:
1125 1113 s = getstring(arg, "_matchfiles requires string arguments")
1126 1114 prefix, value = s[:2], s[2:]
1127 1115 if prefix == 'p:':
1128 1116 pats.append(value)
1129 1117 elif prefix == 'i:':
1130 1118 inc.append(value)
1131 1119 elif prefix == 'x:':
1132 1120 exc.append(value)
1133 1121 elif prefix == 'r:':
1134 1122 if rev is not None:
1135 1123 raise error.ParseError('_matchfiles expected at most one '
1136 1124 'revision')
1137 1125 if value != '': # empty means working directory; leave rev as None
1138 1126 rev = value
1139 1127 elif prefix == 'd:':
1140 1128 if default is not None:
1141 1129 raise error.ParseError('_matchfiles expected at most one '
1142 1130 'default mode')
1143 1131 default = value
1144 1132 else:
1145 1133 raise error.ParseError('invalid _matchfiles prefix: %s' % prefix)
1146 1134 if not default:
1147 1135 default = 'glob'
1148 1136
1149 1137 m = matchmod.match(repo.root, repo.getcwd(), pats, include=inc,
1150 1138 exclude=exc, ctx=repo[rev], default=default)
1151 1139
1152 1140 # This directly read the changelog data as creating changectx for all
1153 1141 # revisions is quite expensive.
1154 1142 getfiles = repo.changelog.readfiles
1155 1143 wdirrev = node.wdirrev
1156 1144 def matches(x):
1157 1145 if x == wdirrev:
1158 1146 files = repo[x].files()
1159 1147 else:
1160 1148 files = getfiles(x)
1161 1149 for f in files:
1162 1150 if m(f):
1163 1151 return True
1164 1152 return False
1165 1153
1166 1154 return subset.filter(matches,
1167 1155 condrepr=('<matchfiles patterns=%r, include=%r '
1168 1156 'exclude=%r, default=%r, rev=%r>',
1169 1157 pats, inc, exc, default, rev))
1170 1158
1171 1159 @predicate('file(pattern)', safe=True)
1172 1160 def hasfile(repo, subset, x):
1173 1161 """Changesets affecting files matched by pattern.
1174 1162
1175 1163 For a faster but less accurate result, consider using ``filelog()``
1176 1164 instead.
1177 1165
1178 1166 This predicate uses ``glob:`` as the default kind of pattern.
1179 1167 """
1180 1168 # i18n: "file" is a keyword
1181 1169 pat = getstring(x, _("file requires a pattern"))
1182 1170 return _matchfiles(repo, subset, ('string', 'p:' + pat))
1183 1171
1184 1172 @predicate('head()', safe=True)
1185 1173 def head(repo, subset, x):
1186 1174 """Changeset is a named branch head.
1187 1175 """
1188 1176 # i18n: "head" is a keyword
1189 1177 getargs(x, 0, 0, _("head takes no arguments"))
1190 1178 hs = set()
1191 1179 cl = repo.changelog
1192 1180 for b, ls in repo.branchmap().iteritems():
1193 1181 hs.update(cl.rev(h) for h in ls)
1194 1182 # XXX using a set to feed the baseset is wrong. Sets are not ordered.
1195 1183 # This does not break because of other fullreposet misbehavior.
1196 1184 # XXX We should combine with subset first: 'subset & baseset(...)'. This is
1197 1185 # necessary to ensure we preserve the order in subset.
1198 1186 return baseset(hs) & subset
1199 1187
1200 1188 @predicate('heads(set)', safe=True)
1201 1189 def heads(repo, subset, x):
1202 1190 """Members of set with no children in set.
1203 1191 """
1204 1192 s = getset(repo, subset, x)
1205 1193 ps = parents(repo, subset, x)
1206 1194 return s - ps
1207 1195
1208 1196 @predicate('hidden()', safe=True)
1209 1197 def hidden(repo, subset, x):
1210 1198 """Hidden changesets.
1211 1199 """
1212 1200 # i18n: "hidden" is a keyword
1213 1201 getargs(x, 0, 0, _("hidden takes no arguments"))
1214 1202 hiddenrevs = repoview.filterrevs(repo, 'visible')
1215 1203 return subset & hiddenrevs
1216 1204
1217 1205 @predicate('keyword(string)', safe=True)
1218 1206 def keyword(repo, subset, x):
1219 1207 """Search commit message, user name, and names of changed files for
1220 1208 string. The match is case-insensitive.
1221 1209 """
1222 1210 # i18n: "keyword" is a keyword
1223 1211 kw = encoding.lower(getstring(x, _("keyword requires a string")))
1224 1212
1225 1213 def matches(r):
1226 1214 c = repo[r]
1227 1215 return any(kw in encoding.lower(t)
1228 1216 for t in c.files() + [c.user(), c.description()])
1229 1217
1230 1218 return subset.filter(matches, condrepr=('<keyword %r>', kw))
1231 1219
1232 1220 @predicate('limit(set[, n[, offset]])', safe=True)
1233 1221 def limit(repo, subset, x):
1234 1222 """First n members of set, defaulting to 1, starting from offset.
1235 1223 """
1236 1224 args = getargsdict(x, 'limit', 'set n offset')
1237 1225 if 'set' not in args:
1238 1226 # i18n: "limit" is a keyword
1239 1227 raise error.ParseError(_("limit requires one to three arguments"))
1240 1228 try:
1241 1229 lim, ofs = 1, 0
1242 1230 if 'n' in args:
1243 1231 # i18n: "limit" is a keyword
1244 1232 lim = int(getstring(args['n'], _("limit requires a number")))
1245 1233 if 'offset' in args:
1246 1234 # i18n: "limit" is a keyword
1247 1235 ofs = int(getstring(args['offset'], _("limit requires a number")))
1248 1236 if ofs < 0:
1249 1237 raise error.ParseError(_("negative offset"))
1250 1238 except (TypeError, ValueError):
1251 1239 # i18n: "limit" is a keyword
1252 1240 raise error.ParseError(_("limit expects a number"))
1253 1241 os = getset(repo, fullreposet(repo), args['set'])
1254 1242 result = []
1255 1243 it = iter(os)
1256 1244 for x in xrange(ofs):
1257 1245 y = next(it, None)
1258 1246 if y is None:
1259 1247 break
1260 1248 for x in xrange(lim):
1261 1249 y = next(it, None)
1262 1250 if y is None:
1263 1251 break
1264 1252 elif y in subset:
1265 1253 result.append(y)
1266 1254 return baseset(result, datarepr=('<limit n=%d, offset=%d, %r, %r>',
1267 1255 lim, ofs, subset, os))
1268 1256
1269 1257 @predicate('last(set, [n])', safe=True)
1270 1258 def last(repo, subset, x):
1271 1259 """Last n members of set, defaulting to 1.
1272 1260 """
1273 1261 # i18n: "last" is a keyword
1274 1262 l = getargs(x, 1, 2, _("last requires one or two arguments"))
1275 1263 try:
1276 1264 lim = 1
1277 1265 if len(l) == 2:
1278 1266 # i18n: "last" is a keyword
1279 1267 lim = int(getstring(l[1], _("last requires a number")))
1280 1268 except (TypeError, ValueError):
1281 1269 # i18n: "last" is a keyword
1282 1270 raise error.ParseError(_("last expects a number"))
1283 1271 os = getset(repo, fullreposet(repo), l[0])
1284 1272 os.reverse()
1285 1273 result = []
1286 1274 it = iter(os)
1287 1275 for x in xrange(lim):
1288 1276 y = next(it, None)
1289 1277 if y is None:
1290 1278 break
1291 1279 elif y in subset:
1292 1280 result.append(y)
1293 1281 return baseset(result, datarepr=('<last n=%d, %r, %r>', lim, subset, os))
1294 1282
1295 1283 @predicate('max(set)', safe=True)
1296 1284 def maxrev(repo, subset, x):
1297 1285 """Changeset with highest revision number in set.
1298 1286 """
1299 1287 os = getset(repo, fullreposet(repo), x)
1300 1288 try:
1301 1289 m = os.max()
1302 1290 if m in subset:
1303 1291 return baseset([m], datarepr=('<max %r, %r>', subset, os))
1304 1292 except ValueError:
1305 1293 # os.max() throws a ValueError when the collection is empty.
1306 1294 # Same as python's max().
1307 1295 pass
1308 1296 return baseset(datarepr=('<max %r, %r>', subset, os))
1309 1297
1310 1298 @predicate('merge()', safe=True)
1311 1299 def merge(repo, subset, x):
1312 1300 """Changeset is a merge changeset.
1313 1301 """
1314 1302 # i18n: "merge" is a keyword
1315 1303 getargs(x, 0, 0, _("merge takes no arguments"))
1316 1304 cl = repo.changelog
1317 1305 return subset.filter(lambda r: cl.parentrevs(r)[1] != -1,
1318 1306 condrepr='<merge>')
1319 1307
1320 1308 @predicate('branchpoint()', safe=True)
1321 1309 def branchpoint(repo, subset, x):
1322 1310 """Changesets with more than one child.
1323 1311 """
1324 1312 # i18n: "branchpoint" is a keyword
1325 1313 getargs(x, 0, 0, _("branchpoint takes no arguments"))
1326 1314 cl = repo.changelog
1327 1315 if not subset:
1328 1316 return baseset()
1329 1317 # XXX this should be 'parentset.min()' assuming 'parentset' is a smartset
1330 1318 # (and if it is not, it should.)
1331 1319 baserev = min(subset)
1332 1320 parentscount = [0]*(len(repo) - baserev)
1333 1321 for r in cl.revs(start=baserev + 1):
1334 1322 for p in cl.parentrevs(r):
1335 1323 if p >= baserev:
1336 1324 parentscount[p - baserev] += 1
1337 1325 return subset.filter(lambda r: parentscount[r - baserev] > 1,
1338 1326 condrepr='<branchpoint>')
1339 1327
1340 1328 @predicate('min(set)', safe=True)
1341 1329 def minrev(repo, subset, x):
1342 1330 """Changeset with lowest revision number in set.
1343 1331 """
1344 1332 os = getset(repo, fullreposet(repo), x)
1345 1333 try:
1346 1334 m = os.min()
1347 1335 if m in subset:
1348 1336 return baseset([m], datarepr=('<min %r, %r>', subset, os))
1349 1337 except ValueError:
1350 1338 # os.min() throws a ValueError when the collection is empty.
1351 1339 # Same as python's min().
1352 1340 pass
1353 1341 return baseset(datarepr=('<min %r, %r>', subset, os))
1354 1342
1355 1343 @predicate('modifies(pattern)', safe=True)
1356 1344 def modifies(repo, subset, x):
1357 1345 """Changesets modifying files matched by pattern.
1358 1346
1359 1347 The pattern without explicit kind like ``glob:`` is expected to be
1360 1348 relative to the current directory and match against a file or a
1361 1349 directory.
1362 1350 """
1363 1351 # i18n: "modifies" is a keyword
1364 1352 pat = getstring(x, _("modifies requires a pattern"))
1365 1353 return checkstatus(repo, subset, pat, 0)
1366 1354
1367 1355 @predicate('named(namespace)')
1368 1356 def named(repo, subset, x):
1369 1357 """The changesets in a given namespace.
1370 1358
1371 1359 If `namespace` starts with `re:`, the remainder of the string is treated as
1372 1360 a regular expression. To match a namespace that actually starts with `re:`,
1373 1361 use the prefix `literal:`.
1374 1362 """
1375 1363 # i18n: "named" is a keyword
1376 1364 args = getargs(x, 1, 1, _('named requires a namespace argument'))
1377 1365
1378 1366 ns = getstring(args[0],
1379 1367 # i18n: "named" is a keyword
1380 1368 _('the argument to named must be a string'))
1381 1369 kind, pattern, matcher = util.stringmatcher(ns)
1382 1370 namespaces = set()
1383 1371 if kind == 'literal':
1384 1372 if pattern not in repo.names:
1385 1373 raise error.RepoLookupError(_("namespace '%s' does not exist")
1386 1374 % ns)
1387 1375 namespaces.add(repo.names[pattern])
1388 1376 else:
1389 1377 for name, ns in repo.names.iteritems():
1390 1378 if matcher(name):
1391 1379 namespaces.add(ns)
1392 1380 if not namespaces:
1393 1381 raise error.RepoLookupError(_("no namespace exists"
1394 1382 " that match '%s'") % pattern)
1395 1383
1396 1384 names = set()
1397 1385 for ns in namespaces:
1398 1386 for name in ns.listnames(repo):
1399 1387 if name not in ns.deprecated:
1400 1388 names.update(repo[n].rev() for n in ns.nodes(repo, name))
1401 1389
1402 1390 names -= set([node.nullrev])
1403 1391 return subset & names
1404 1392
1405 1393 @predicate('id(string)', safe=True)
1406 1394 def node_(repo, subset, x):
1407 1395 """Revision non-ambiguously specified by the given hex string prefix.
1408 1396 """
1409 1397 # i18n: "id" is a keyword
1410 1398 l = getargs(x, 1, 1, _("id requires one argument"))
1411 1399 # i18n: "id" is a keyword
1412 1400 n = getstring(l[0], _("id requires a string"))
1413 1401 if len(n) == 40:
1414 1402 try:
1415 1403 rn = repo.changelog.rev(node.bin(n))
1416 1404 except (LookupError, TypeError):
1417 1405 rn = None
1418 1406 else:
1419 1407 rn = None
1420 1408 pm = repo.changelog._partialmatch(n)
1421 1409 if pm is not None:
1422 1410 rn = repo.changelog.rev(pm)
1423 1411
1424 1412 if rn is None:
1425 1413 return baseset()
1426 1414 result = baseset([rn])
1427 1415 return result & subset
1428 1416
1429 1417 @predicate('obsolete()', safe=True)
1430 1418 def obsolete(repo, subset, x):
1431 1419 """Mutable changeset with a newer version."""
1432 1420 # i18n: "obsolete" is a keyword
1433 1421 getargs(x, 0, 0, _("obsolete takes no arguments"))
1434 1422 obsoletes = obsmod.getrevs(repo, 'obsolete')
1435 1423 return subset & obsoletes
1436 1424
1437 1425 @predicate('only(set, [set])', safe=True)
1438 1426 def only(repo, subset, x):
1439 1427 """Changesets that are ancestors of the first set that are not ancestors
1440 1428 of any other head in the repo. If a second set is specified, the result
1441 1429 is ancestors of the first set that are not ancestors of the second set
1442 1430 (i.e. ::<set1> - ::<set2>).
1443 1431 """
1444 1432 cl = repo.changelog
1445 1433 # i18n: "only" is a keyword
1446 1434 args = getargs(x, 1, 2, _('only takes one or two arguments'))
1447 1435 include = getset(repo, fullreposet(repo), args[0])
1448 1436 if len(args) == 1:
1449 1437 if not include:
1450 1438 return baseset()
1451 1439
1452 1440 descendants = set(_revdescendants(repo, include, False))
1453 1441 exclude = [rev for rev in cl.headrevs()
1454 1442 if not rev in descendants and not rev in include]
1455 1443 else:
1456 1444 exclude = getset(repo, fullreposet(repo), args[1])
1457 1445
1458 1446 results = set(cl.findmissingrevs(common=exclude, heads=include))
1459 1447 # XXX we should turn this into a baseset instead of a set, smartset may do
1460 1448 # some optimisations from the fact this is a baseset.
1461 1449 return subset & results
1462 1450
1463 1451 @predicate('origin([set])', safe=True)
1464 1452 def origin(repo, subset, x):
1465 1453 """
1466 1454 Changesets that were specified as a source for the grafts, transplants or
1467 1455 rebases that created the given revisions. Omitting the optional set is the
1468 1456 same as passing all(). If a changeset created by these operations is itself
1469 1457 specified as a source for one of these operations, only the source changeset
1470 1458 for the first operation is selected.
1471 1459 """
1472 1460 if x is not None:
1473 1461 dests = getset(repo, fullreposet(repo), x)
1474 1462 else:
1475 1463 dests = fullreposet(repo)
1476 1464
1477 1465 def _firstsrc(rev):
1478 1466 src = _getrevsource(repo, rev)
1479 1467 if src is None:
1480 1468 return None
1481 1469
1482 1470 while True:
1483 1471 prev = _getrevsource(repo, src)
1484 1472
1485 1473 if prev is None:
1486 1474 return src
1487 1475 src = prev
1488 1476
1489 1477 o = set([_firstsrc(r) for r in dests])
1490 1478 o -= set([None])
1491 1479 # XXX we should turn this into a baseset instead of a set, smartset may do
1492 1480 # some optimisations from the fact this is a baseset.
1493 1481 return subset & o
1494 1482
1495 1483 @predicate('outgoing([path])', safe=True)
1496 1484 def outgoing(repo, subset, x):
1497 1485 """Changesets not found in the specified destination repository, or the
1498 1486 default push location.
1499 1487 """
1500 1488 # Avoid cycles.
1501 1489 from . import (
1502 1490 discovery,
1503 1491 hg,
1504 1492 )
1505 1493 # i18n: "outgoing" is a keyword
1506 1494 l = getargs(x, 0, 1, _("outgoing takes one or no arguments"))
1507 1495 # i18n: "outgoing" is a keyword
1508 1496 dest = l and getstring(l[0], _("outgoing requires a repository path")) or ''
1509 1497 dest = repo.ui.expandpath(dest or 'default-push', dest or 'default')
1510 1498 dest, branches = hg.parseurl(dest)
1511 1499 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1512 1500 if revs:
1513 1501 revs = [repo.lookup(rev) for rev in revs]
1514 1502 other = hg.peer(repo, {}, dest)
1515 1503 repo.ui.pushbuffer()
1516 1504 outgoing = discovery.findcommonoutgoing(repo, other, onlyheads=revs)
1517 1505 repo.ui.popbuffer()
1518 1506 cl = repo.changelog
1519 1507 o = set([cl.rev(r) for r in outgoing.missing])
1520 1508 return subset & o
1521 1509
1522 1510 @predicate('p1([set])', safe=True)
1523 1511 def p1(repo, subset, x):
1524 1512 """First parent of changesets in set, or the working directory.
1525 1513 """
1526 1514 if x is None:
1527 1515 p = repo[x].p1().rev()
1528 1516 if p >= 0:
1529 1517 return subset & baseset([p])
1530 1518 return baseset()
1531 1519
1532 1520 ps = set()
1533 1521 cl = repo.changelog
1534 1522 for r in getset(repo, fullreposet(repo), x):
1535 1523 ps.add(cl.parentrevs(r)[0])
1536 1524 ps -= set([node.nullrev])
1537 1525 # XXX we should turn this into a baseset instead of a set, smartset may do
1538 1526 # some optimisations from the fact this is a baseset.
1539 1527 return subset & ps
1540 1528
1541 1529 @predicate('p2([set])', safe=True)
1542 1530 def p2(repo, subset, x):
1543 1531 """Second parent of changesets in set, or the working directory.
1544 1532 """
1545 1533 if x is None:
1546 1534 ps = repo[x].parents()
1547 1535 try:
1548 1536 p = ps[1].rev()
1549 1537 if p >= 0:
1550 1538 return subset & baseset([p])
1551 1539 return baseset()
1552 1540 except IndexError:
1553 1541 return baseset()
1554 1542
1555 1543 ps = set()
1556 1544 cl = repo.changelog
1557 1545 for r in getset(repo, fullreposet(repo), x):
1558 1546 ps.add(cl.parentrevs(r)[1])
1559 1547 ps -= set([node.nullrev])
1560 1548 # XXX we should turn this into a baseset instead of a set, smartset may do
1561 1549 # some optimisations from the fact this is a baseset.
1562 1550 return subset & ps
1563 1551
1564 1552 @predicate('parents([set])', safe=True)
1565 1553 def parents(repo, subset, x):
1566 1554 """
1567 1555 The set of all parents for all changesets in set, or the working directory.
1568 1556 """
1569 1557 if x is None:
1570 1558 ps = set(p.rev() for p in repo[x].parents())
1571 1559 else:
1572 1560 ps = set()
1573 1561 cl = repo.changelog
1574 1562 up = ps.update
1575 1563 parentrevs = cl.parentrevs
1576 1564 for r in getset(repo, fullreposet(repo), x):
1577 1565 if r == node.wdirrev:
1578 1566 up(p.rev() for p in repo[r].parents())
1579 1567 else:
1580 1568 up(parentrevs(r))
1581 1569 ps -= set([node.nullrev])
1582 1570 return subset & ps
1583 1571
1584 1572 def _phase(repo, subset, target):
1585 1573 """helper to select all rev in phase <target>"""
1586 1574 repo._phasecache.loadphaserevs(repo) # ensure phase's sets are loaded
1587 1575 if repo._phasecache._phasesets:
1588 1576 s = repo._phasecache._phasesets[target] - repo.changelog.filteredrevs
1589 1577 s = baseset(s)
1590 1578 s.sort() # set are non ordered, so we enforce ascending
1591 1579 return subset & s
1592 1580 else:
1593 1581 phase = repo._phasecache.phase
1594 1582 condition = lambda r: phase(repo, r) == target
1595 1583 return subset.filter(condition, condrepr=('<phase %r>', target),
1596 1584 cache=False)
1597 1585
1598 1586 @predicate('draft()', safe=True)
1599 1587 def draft(repo, subset, x):
1600 1588 """Changeset in draft phase."""
1601 1589 # i18n: "draft" is a keyword
1602 1590 getargs(x, 0, 0, _("draft takes no arguments"))
1603 1591 target = phases.draft
1604 1592 return _phase(repo, subset, target)
1605 1593
1606 1594 @predicate('secret()', safe=True)
1607 1595 def secret(repo, subset, x):
1608 1596 """Changeset in secret phase."""
1609 1597 # i18n: "secret" is a keyword
1610 1598 getargs(x, 0, 0, _("secret takes no arguments"))
1611 1599 target = phases.secret
1612 1600 return _phase(repo, subset, target)
1613 1601
1614 1602 def parentspec(repo, subset, x, n):
1615 1603 """``set^0``
1616 1604 The set.
1617 1605 ``set^1`` (or ``set^``), ``set^2``
1618 1606 First or second parent, respectively, of all changesets in set.
1619 1607 """
1620 1608 try:
1621 1609 n = int(n[1])
1622 1610 if n not in (0, 1, 2):
1623 1611 raise ValueError
1624 1612 except (TypeError, ValueError):
1625 1613 raise error.ParseError(_("^ expects a number 0, 1, or 2"))
1626 1614 ps = set()
1627 1615 cl = repo.changelog
1628 1616 for r in getset(repo, fullreposet(repo), x):
1629 1617 if n == 0:
1630 1618 ps.add(r)
1631 1619 elif n == 1:
1632 1620 ps.add(cl.parentrevs(r)[0])
1633 1621 elif n == 2:
1634 1622 parents = cl.parentrevs(r)
1635 1623 if len(parents) > 1:
1636 1624 ps.add(parents[1])
1637 1625 return subset & ps
1638 1626
1639 1627 @predicate('present(set)', safe=True)
1640 1628 def present(repo, subset, x):
1641 1629 """An empty set, if any revision in set isn't found; otherwise,
1642 1630 all revisions in set.
1643 1631
1644 1632 If any of specified revisions is not present in the local repository,
1645 1633 the query is normally aborted. But this predicate allows the query
1646 1634 to continue even in such cases.
1647 1635 """
1648 1636 try:
1649 1637 return getset(repo, subset, x)
1650 1638 except error.RepoLookupError:
1651 1639 return baseset()
1652 1640
1653 1641 # for internal use
1654 1642 @predicate('_notpublic', safe=True)
1655 1643 def _notpublic(repo, subset, x):
1656 1644 getargs(x, 0, 0, "_notpublic takes no arguments")
1657 1645 repo._phasecache.loadphaserevs(repo) # ensure phase's sets are loaded
1658 1646 if repo._phasecache._phasesets:
1659 1647 s = set()
1660 1648 for u in repo._phasecache._phasesets[1:]:
1661 1649 s.update(u)
1662 1650 s = baseset(s - repo.changelog.filteredrevs)
1663 1651 s.sort()
1664 1652 return subset & s
1665 1653 else:
1666 1654 phase = repo._phasecache.phase
1667 1655 target = phases.public
1668 1656 condition = lambda r: phase(repo, r) != target
1669 1657 return subset.filter(condition, condrepr=('<phase %r>', target),
1670 1658 cache=False)
1671 1659
1672 1660 @predicate('public()', safe=True)
1673 1661 def public(repo, subset, x):
1674 1662 """Changeset in public phase."""
1675 1663 # i18n: "public" is a keyword
1676 1664 getargs(x, 0, 0, _("public takes no arguments"))
1677 1665 phase = repo._phasecache.phase
1678 1666 target = phases.public
1679 1667 condition = lambda r: phase(repo, r) == target
1680 1668 return subset.filter(condition, condrepr=('<phase %r>', target),
1681 1669 cache=False)
1682 1670
1683 1671 @predicate('remote([id [,path]])', safe=True)
1684 1672 def remote(repo, subset, x):
1685 1673 """Local revision that corresponds to the given identifier in a
1686 1674 remote repository, if present. Here, the '.' identifier is a
1687 1675 synonym for the current local branch.
1688 1676 """
1689 1677
1690 1678 from . import hg # avoid start-up nasties
1691 1679 # i18n: "remote" is a keyword
1692 1680 l = getargs(x, 0, 2, _("remote takes zero, one, or two arguments"))
1693 1681
1694 1682 q = '.'
1695 1683 if len(l) > 0:
1696 1684 # i18n: "remote" is a keyword
1697 1685 q = getstring(l[0], _("remote requires a string id"))
1698 1686 if q == '.':
1699 1687 q = repo['.'].branch()
1700 1688
1701 1689 dest = ''
1702 1690 if len(l) > 1:
1703 1691 # i18n: "remote" is a keyword
1704 1692 dest = getstring(l[1], _("remote requires a repository path"))
1705 1693 dest = repo.ui.expandpath(dest or 'default')
1706 1694 dest, branches = hg.parseurl(dest)
1707 1695 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1708 1696 if revs:
1709 1697 revs = [repo.lookup(rev) for rev in revs]
1710 1698 other = hg.peer(repo, {}, dest)
1711 1699 n = other.lookup(q)
1712 1700 if n in repo:
1713 1701 r = repo[n].rev()
1714 1702 if r in subset:
1715 1703 return baseset([r])
1716 1704 return baseset()
1717 1705
1718 1706 @predicate('removes(pattern)', safe=True)
1719 1707 def removes(repo, subset, x):
1720 1708 """Changesets which remove files matching pattern.
1721 1709
1722 1710 The pattern without explicit kind like ``glob:`` is expected to be
1723 1711 relative to the current directory and match against a file or a
1724 1712 directory.
1725 1713 """
1726 1714 # i18n: "removes" is a keyword
1727 1715 pat = getstring(x, _("removes requires a pattern"))
1728 1716 return checkstatus(repo, subset, pat, 2)
1729 1717
1730 1718 @predicate('rev(number)', safe=True)
1731 1719 def rev(repo, subset, x):
1732 1720 """Revision with the given numeric identifier.
1733 1721 """
1734 1722 # i18n: "rev" is a keyword
1735 1723 l = getargs(x, 1, 1, _("rev requires one argument"))
1736 1724 try:
1737 1725 # i18n: "rev" is a keyword
1738 1726 l = int(getstring(l[0], _("rev requires a number")))
1739 1727 except (TypeError, ValueError):
1740 1728 # i18n: "rev" is a keyword
1741 1729 raise error.ParseError(_("rev expects a number"))
1742 1730 if l not in repo.changelog and l != node.nullrev:
1743 1731 return baseset()
1744 1732 return subset & baseset([l])
1745 1733
1746 1734 @predicate('matching(revision [, field])', safe=True)
1747 1735 def matching(repo, subset, x):
1748 1736 """Changesets in which a given set of fields match the set of fields in the
1749 1737 selected revision or set.
1750 1738
1751 1739 To match more than one field pass the list of fields to match separated
1752 1740 by spaces (e.g. ``author description``).
1753 1741
1754 1742 Valid fields are most regular revision fields and some special fields.
1755 1743
1756 1744 Regular revision fields are ``description``, ``author``, ``branch``,
1757 1745 ``date``, ``files``, ``phase``, ``parents``, ``substate``, ``user``
1758 1746 and ``diff``.
1759 1747 Note that ``author`` and ``user`` are synonyms. ``diff`` refers to the
1760 1748 contents of the revision. Two revisions matching their ``diff`` will
1761 1749 also match their ``files``.
1762 1750
1763 1751 Special fields are ``summary`` and ``metadata``:
1764 1752 ``summary`` matches the first line of the description.
1765 1753 ``metadata`` is equivalent to matching ``description user date``
1766 1754 (i.e. it matches the main metadata fields).
1767 1755
1768 1756 ``metadata`` is the default field which is used when no fields are
1769 1757 specified. You can match more than one field at a time.
1770 1758 """
1771 1759 # i18n: "matching" is a keyword
1772 1760 l = getargs(x, 1, 2, _("matching takes 1 or 2 arguments"))
1773 1761
1774 1762 revs = getset(repo, fullreposet(repo), l[0])
1775 1763
1776 1764 fieldlist = ['metadata']
1777 1765 if len(l) > 1:
1778 1766 fieldlist = getstring(l[1],
1779 1767 # i18n: "matching" is a keyword
1780 1768 _("matching requires a string "
1781 1769 "as its second argument")).split()
1782 1770
1783 1771 # Make sure that there are no repeated fields,
1784 1772 # expand the 'special' 'metadata' field type
1785 1773 # and check the 'files' whenever we check the 'diff'
1786 1774 fields = []
1787 1775 for field in fieldlist:
1788 1776 if field == 'metadata':
1789 1777 fields += ['user', 'description', 'date']
1790 1778 elif field == 'diff':
1791 1779 # a revision matching the diff must also match the files
1792 1780 # since matching the diff is very costly, make sure to
1793 1781 # also match the files first
1794 1782 fields += ['files', 'diff']
1795 1783 else:
1796 1784 if field == 'author':
1797 1785 field = 'user'
1798 1786 fields.append(field)
1799 1787 fields = set(fields)
1800 1788 if 'summary' in fields and 'description' in fields:
1801 1789 # If a revision matches its description it also matches its summary
1802 1790 fields.discard('summary')
1803 1791
1804 1792 # We may want to match more than one field
1805 1793 # Not all fields take the same amount of time to be matched
1806 1794 # Sort the selected fields in order of increasing matching cost
1807 1795 fieldorder = ['phase', 'parents', 'user', 'date', 'branch', 'summary',
1808 1796 'files', 'description', 'substate', 'diff']
1809 1797 def fieldkeyfunc(f):
1810 1798 try:
1811 1799 return fieldorder.index(f)
1812 1800 except ValueError:
1813 1801 # assume an unknown field is very costly
1814 1802 return len(fieldorder)
1815 1803 fields = list(fields)
1816 1804 fields.sort(key=fieldkeyfunc)
1817 1805
1818 1806 # Each field will be matched with its own "getfield" function
1819 1807 # which will be added to the getfieldfuncs array of functions
1820 1808 getfieldfuncs = []
1821 1809 _funcs = {
1822 1810 'user': lambda r: repo[r].user(),
1823 1811 'branch': lambda r: repo[r].branch(),
1824 1812 'date': lambda r: repo[r].date(),
1825 1813 'description': lambda r: repo[r].description(),
1826 1814 'files': lambda r: repo[r].files(),
1827 1815 'parents': lambda r: repo[r].parents(),
1828 1816 'phase': lambda r: repo[r].phase(),
1829 1817 'substate': lambda r: repo[r].substate,
1830 1818 'summary': lambda r: repo[r].description().splitlines()[0],
1831 1819 'diff': lambda r: list(repo[r].diff(git=True),)
1832 1820 }
1833 1821 for info in fields:
1834 1822 getfield = _funcs.get(info, None)
1835 1823 if getfield is None:
1836 1824 raise error.ParseError(
1837 1825 # i18n: "matching" is a keyword
1838 1826 _("unexpected field name passed to matching: %s") % info)
1839 1827 getfieldfuncs.append(getfield)
1840 1828 # convert the getfield array of functions into a "getinfo" function
1841 1829 # which returns an array of field values (or a single value if there
1842 1830 # is only one field to match)
1843 1831 getinfo = lambda r: [f(r) for f in getfieldfuncs]
1844 1832
1845 1833 def matches(x):
1846 1834 for rev in revs:
1847 1835 target = getinfo(rev)
1848 1836 match = True
1849 1837 for n, f in enumerate(getfieldfuncs):
1850 1838 if target[n] != f(x):
1851 1839 match = False
1852 1840 if match:
1853 1841 return True
1854 1842 return False
1855 1843
1856 1844 return subset.filter(matches, condrepr=('<matching%r %r>', fields, revs))
1857 1845
1858 1846 @predicate('reverse(set)', safe=True)
1859 1847 def reverse(repo, subset, x):
1860 1848 """Reverse order of set.
1861 1849 """
1862 1850 l = getset(repo, subset, x)
1863 1851 l.reverse()
1864 1852 return l
1865 1853
1866 1854 @predicate('roots(set)', safe=True)
1867 1855 def roots(repo, subset, x):
1868 1856 """Changesets in set with no parent changeset in set.
1869 1857 """
1870 1858 s = getset(repo, fullreposet(repo), x)
1871 1859 parents = repo.changelog.parentrevs
1872 1860 def filter(r):
1873 1861 for p in parents(r):
1874 1862 if 0 <= p and p in s:
1875 1863 return False
1876 1864 return True
1877 1865 return subset & s.filter(filter, condrepr='<roots>')
1878 1866
1879 1867 @predicate('sort(set[, [-]key...])', safe=True)
1880 1868 def sort(repo, subset, x):
1881 1869 """Sort set by keys. The default sort order is ascending, specify a key
1882 1870 as ``-key`` to sort in descending order.
1883 1871
1884 1872 The keys can be:
1885 1873
1886 1874 - ``rev`` for the revision number,
1887 1875 - ``branch`` for the branch name,
1888 1876 - ``desc`` for the commit message (description),
1889 1877 - ``user`` for user name (``author`` can be used as an alias),
1890 1878 - ``date`` for the commit date
1891 1879 """
1892 1880 # i18n: "sort" is a keyword
1893 1881 l = getargs(x, 1, 2, _("sort requires one or two arguments"))
1894 1882 keys = "rev"
1895 1883 if len(l) == 2:
1896 1884 # i18n: "sort" is a keyword
1897 1885 keys = getstring(l[1], _("sort spec must be a string"))
1898 1886
1899 1887 s = l[0]
1900 1888 keys = keys.split()
1901 1889 l = []
1902 1890 def invert(s):
1903 1891 return "".join(chr(255 - ord(c)) for c in s)
1904 1892 revs = getset(repo, subset, s)
1905 1893 if keys == ["rev"]:
1906 1894 revs.sort()
1907 1895 return revs
1908 1896 elif keys == ["-rev"]:
1909 1897 revs.sort(reverse=True)
1910 1898 return revs
1911 1899 for r in revs:
1912 1900 c = repo[r]
1913 1901 e = []
1914 1902 for k in keys:
1915 1903 if k == 'rev':
1916 1904 e.append(r)
1917 1905 elif k == '-rev':
1918 1906 e.append(-r)
1919 1907 elif k == 'branch':
1920 1908 e.append(c.branch())
1921 1909 elif k == '-branch':
1922 1910 e.append(invert(c.branch()))
1923 1911 elif k == 'desc':
1924 1912 e.append(c.description())
1925 1913 elif k == '-desc':
1926 1914 e.append(invert(c.description()))
1927 1915 elif k in 'user author':
1928 1916 e.append(c.user())
1929 1917 elif k in '-user -author':
1930 1918 e.append(invert(c.user()))
1931 1919 elif k == 'date':
1932 1920 e.append(c.date()[0])
1933 1921 elif k == '-date':
1934 1922 e.append(-c.date()[0])
1935 1923 else:
1936 1924 raise error.ParseError(_("unknown sort key %r") % k)
1937 1925 e.append(r)
1938 1926 l.append(e)
1939 1927 l.sort()
1940 1928 return baseset([e[-1] for e in l])
1941 1929
1942 1930 @predicate('subrepo([pattern])')
1943 1931 def subrepo(repo, subset, x):
1944 1932 """Changesets that add, modify or remove the given subrepo. If no subrepo
1945 1933 pattern is named, any subrepo changes are returned.
1946 1934 """
1947 1935 # i18n: "subrepo" is a keyword
1948 1936 args = getargs(x, 0, 1, _('subrepo takes at most one argument'))
1949 1937 pat = None
1950 1938 if len(args) != 0:
1951 1939 pat = getstring(args[0], _("subrepo requires a pattern"))
1952 1940
1953 1941 m = matchmod.exact(repo.root, repo.root, ['.hgsubstate'])
1954 1942
1955 1943 def submatches(names):
1956 1944 k, p, m = util.stringmatcher(pat)
1957 1945 for name in names:
1958 1946 if m(name):
1959 1947 yield name
1960 1948
1961 1949 def matches(x):
1962 1950 c = repo[x]
1963 1951 s = repo.status(c.p1().node(), c.node(), match=m)
1964 1952
1965 1953 if pat is None:
1966 1954 return s.added or s.modified or s.removed
1967 1955
1968 1956 if s.added:
1969 1957 return any(submatches(c.substate.keys()))
1970 1958
1971 1959 if s.modified:
1972 1960 subs = set(c.p1().substate.keys())
1973 1961 subs.update(c.substate.keys())
1974 1962
1975 1963 for path in submatches(subs):
1976 1964 if c.p1().substate.get(path) != c.substate.get(path):
1977 1965 return True
1978 1966
1979 1967 if s.removed:
1980 1968 return any(submatches(c.p1().substate.keys()))
1981 1969
1982 1970 return False
1983 1971
1984 1972 return subset.filter(matches, condrepr=('<subrepo %r>', pat))
1985 1973
1986 1974 def _substringmatcher(pattern):
1987 1975 kind, pattern, matcher = util.stringmatcher(pattern)
1988 1976 if kind == 'literal':
1989 1977 matcher = lambda s: pattern in s
1990 1978 return kind, pattern, matcher
1991 1979
1992 1980 @predicate('tag([name])', safe=True)
1993 1981 def tag(repo, subset, x):
1994 1982 """The specified tag by name, or all tagged revisions if no name is given.
1995 1983
1996 1984 If `name` starts with `re:`, the remainder of the name is treated as
1997 1985 a regular expression. To match a tag that actually starts with `re:`,
1998 1986 use the prefix `literal:`.
1999 1987 """
2000 1988 # i18n: "tag" is a keyword
2001 1989 args = getargs(x, 0, 1, _("tag takes one or no arguments"))
2002 1990 cl = repo.changelog
2003 1991 if args:
2004 1992 pattern = getstring(args[0],
2005 1993 # i18n: "tag" is a keyword
2006 1994 _('the argument to tag must be a string'))
2007 1995 kind, pattern, matcher = util.stringmatcher(pattern)
2008 1996 if kind == 'literal':
2009 1997 # avoid resolving all tags
2010 1998 tn = repo._tagscache.tags.get(pattern, None)
2011 1999 if tn is None:
2012 2000 raise error.RepoLookupError(_("tag '%s' does not exist")
2013 2001 % pattern)
2014 2002 s = set([repo[tn].rev()])
2015 2003 else:
2016 2004 s = set([cl.rev(n) for t, n in repo.tagslist() if matcher(t)])
2017 2005 else:
2018 2006 s = set([cl.rev(n) for t, n in repo.tagslist() if t != 'tip'])
2019 2007 return subset & s
2020 2008
2021 2009 @predicate('tagged', safe=True)
2022 2010 def tagged(repo, subset, x):
2023 2011 return tag(repo, subset, x)
2024 2012
2025 2013 @predicate('unstable()', safe=True)
2026 2014 def unstable(repo, subset, x):
2027 2015 """Non-obsolete changesets with obsolete ancestors.
2028 2016 """
2029 2017 # i18n: "unstable" is a keyword
2030 2018 getargs(x, 0, 0, _("unstable takes no arguments"))
2031 2019 unstables = obsmod.getrevs(repo, 'unstable')
2032 2020 return subset & unstables
2033 2021
2034 2022
2035 2023 @predicate('user(string)', safe=True)
2036 2024 def user(repo, subset, x):
2037 2025 """User name contains string. The match is case-insensitive.
2038 2026
2039 2027 If `string` starts with `re:`, the remainder of the string is treated as
2040 2028 a regular expression. To match a user that actually contains `re:`, use
2041 2029 the prefix `literal:`.
2042 2030 """
2043 2031 return author(repo, subset, x)
2044 2032
2045 2033 # experimental
2046 2034 @predicate('wdir', safe=True)
2047 2035 def wdir(repo, subset, x):
2048 2036 # i18n: "wdir" is a keyword
2049 2037 getargs(x, 0, 0, _("wdir takes no arguments"))
2050 2038 if node.wdirrev in subset or isinstance(subset, fullreposet):
2051 2039 return baseset([node.wdirrev])
2052 2040 return baseset()
2053 2041
2054 2042 # for internal use
2055 2043 @predicate('_list', safe=True)
2056 2044 def _list(repo, subset, x):
2057 2045 s = getstring(x, "internal error")
2058 2046 if not s:
2059 2047 return baseset()
2060 2048 # remove duplicates here. it's difficult for caller to deduplicate sets
2061 2049 # because different symbols can point to the same rev.
2062 2050 cl = repo.changelog
2063 2051 ls = []
2064 2052 seen = set()
2065 2053 for t in s.split('\0'):
2066 2054 try:
2067 2055 # fast path for integer revision
2068 2056 r = int(t)
2069 2057 if str(r) != t or r not in cl:
2070 2058 raise ValueError
2071 2059 revs = [r]
2072 2060 except ValueError:
2073 2061 revs = stringset(repo, subset, t)
2074 2062
2075 2063 for r in revs:
2076 2064 if r in seen:
2077 2065 continue
2078 2066 if (r in subset
2079 2067 or r == node.nullrev and isinstance(subset, fullreposet)):
2080 2068 ls.append(r)
2081 2069 seen.add(r)
2082 2070 return baseset(ls)
2083 2071
2084 2072 # for internal use
2085 2073 @predicate('_intlist', safe=True)
2086 2074 def _intlist(repo, subset, x):
2087 2075 s = getstring(x, "internal error")
2088 2076 if not s:
2089 2077 return baseset()
2090 2078 ls = [int(r) for r in s.split('\0')]
2091 2079 s = subset
2092 2080 return baseset([r for r in ls if r in s])
2093 2081
2094 2082 # for internal use
2095 2083 @predicate('_hexlist', safe=True)
2096 2084 def _hexlist(repo, subset, x):
2097 2085 s = getstring(x, "internal error")
2098 2086 if not s:
2099 2087 return baseset()
2100 2088 cl = repo.changelog
2101 2089 ls = [cl.rev(node.bin(r)) for r in s.split('\0')]
2102 2090 s = subset
2103 2091 return baseset([r for r in ls if r in s])
2104 2092
2105 2093 methods = {
2106 2094 "range": rangeset,
2107 2095 "dagrange": dagrange,
2108 2096 "string": stringset,
2109 2097 "symbol": stringset,
2110 2098 "and": andset,
2111 2099 "or": orset,
2112 2100 "not": notset,
2113 2101 "difference": differenceset,
2114 2102 "list": listset,
2115 2103 "keyvalue": keyvaluepair,
2116 2104 "func": func,
2117 2105 "ancestor": ancestorspec,
2118 2106 "parent": parentspec,
2119 2107 "parentpost": p1,
2120 2108 }
2121 2109
2122 2110 def optimize(x, small):
2123 2111 if x is None:
2124 2112 return 0, x
2125 2113
2126 2114 smallbonus = 1
2127 2115 if small:
2128 2116 smallbonus = .5
2129 2117
2130 2118 op = x[0]
2131 2119 if op == 'minus':
2132 2120 return optimize(('and', x[1], ('not', x[2])), small)
2133 2121 elif op == 'only':
2134 2122 return optimize(('func', ('symbol', 'only'),
2135 2123 ('list', x[1], x[2])), small)
2136 2124 elif op == 'onlypost':
2137 2125 return optimize(('func', ('symbol', 'only'), x[1]), small)
2138 2126 elif op == 'dagrangepre':
2139 2127 return optimize(('func', ('symbol', 'ancestors'), x[1]), small)
2140 2128 elif op == 'dagrangepost':
2141 2129 return optimize(('func', ('symbol', 'descendants'), x[1]), small)
2142 2130 elif op == 'rangeall':
2143 2131 return optimize(('range', ('string', '0'), ('string', 'tip')), small)
2144 2132 elif op == 'rangepre':
2145 2133 return optimize(('range', ('string', '0'), x[1]), small)
2146 2134 elif op == 'rangepost':
2147 2135 return optimize(('range', x[1], ('string', 'tip')), small)
2148 2136 elif op == 'negate':
2149 2137 return optimize(('string',
2150 2138 '-' + getstring(x[1], _("can't negate that"))), small)
2151 2139 elif op in 'string symbol negate':
2152 2140 return smallbonus, x # single revisions are small
2153 2141 elif op == 'and':
2154 2142 wa, ta = optimize(x[1], True)
2155 2143 wb, tb = optimize(x[2], True)
2156 2144
2157 2145 # (::x and not ::y)/(not ::y and ::x) have a fast path
2158 2146 def isonly(revs, bases):
2159 2147 return (
2160 2148 revs is not None
2161 2149 and revs[0] == 'func'
2162 2150 and getstring(revs[1], _('not a symbol')) == 'ancestors'
2163 2151 and bases is not None
2164 2152 and bases[0] == 'not'
2165 2153 and bases[1][0] == 'func'
2166 2154 and getstring(bases[1][1], _('not a symbol')) == 'ancestors')
2167 2155
2168 2156 w = min(wa, wb)
2169 2157 if isonly(ta, tb):
2170 2158 return w, ('func', ('symbol', 'only'), ('list', ta[2], tb[1][2]))
2171 2159 if isonly(tb, ta):
2172 2160 return w, ('func', ('symbol', 'only'), ('list', tb[2], ta[1][2]))
2173 2161
2174 2162 if tb is not None and tb[0] == 'not':
2175 2163 return wa, ('difference', ta, tb[1])
2176 2164
2177 2165 if wa > wb:
2178 2166 return w, (op, tb, ta)
2179 2167 return w, (op, ta, tb)
2180 2168 elif op == 'or':
2181 2169 # fast path for machine-generated expression, that is likely to have
2182 2170 # lots of trivial revisions: 'a + b + c()' to '_list(a b) + c()'
2183 2171 ws, ts, ss = [], [], []
2184 2172 def flushss():
2185 2173 if not ss:
2186 2174 return
2187 2175 if len(ss) == 1:
2188 2176 w, t = ss[0]
2189 2177 else:
2190 2178 s = '\0'.join(t[1] for w, t in ss)
2191 2179 y = ('func', ('symbol', '_list'), ('string', s))
2192 2180 w, t = optimize(y, False)
2193 2181 ws.append(w)
2194 2182 ts.append(t)
2195 2183 del ss[:]
2196 2184 for y in x[1:]:
2197 2185 w, t = optimize(y, False)
2198 2186 if t is not None and (t[0] == 'string' or t[0] == 'symbol'):
2199 2187 ss.append((w, t))
2200 2188 continue
2201 2189 flushss()
2202 2190 ws.append(w)
2203 2191 ts.append(t)
2204 2192 flushss()
2205 2193 if len(ts) == 1:
2206 2194 return ws[0], ts[0] # 'or' operation is fully optimized out
2207 2195 # we can't reorder trees by weight because it would change the order.
2208 2196 # ("sort(a + b)" == "sort(b + a)", but "a + b" != "b + a")
2209 2197 # ts = tuple(t for w, t in sorted(zip(ws, ts), key=lambda wt: wt[0]))
2210 2198 return max(ws), (op,) + tuple(ts)
2211 2199 elif op == 'not':
2212 2200 # Optimize not public() to _notpublic() because we have a fast version
2213 2201 if x[1] == ('func', ('symbol', 'public'), None):
2214 2202 newsym = ('func', ('symbol', '_notpublic'), None)
2215 2203 o = optimize(newsym, not small)
2216 2204 return o[0], o[1]
2217 2205 else:
2218 2206 o = optimize(x[1], not small)
2219 2207 return o[0], (op, o[1])
2220 2208 elif op == 'parentpost':
2221 2209 o = optimize(x[1], small)
2222 2210 return o[0], (op, o[1])
2223 2211 elif op == 'group':
2224 2212 return optimize(x[1], small)
2225 2213 elif op in 'dagrange range parent ancestorspec':
2226 2214 if op == 'parent':
2227 2215 # x^:y means (x^) : y, not x ^ (:y)
2228 2216 post = ('parentpost', x[1])
2229 2217 if x[2][0] == 'dagrangepre':
2230 2218 return optimize(('dagrange', post, x[2][1]), small)
2231 2219 elif x[2][0] == 'rangepre':
2232 2220 return optimize(('range', post, x[2][1]), small)
2233 2221
2234 2222 wa, ta = optimize(x[1], small)
2235 2223 wb, tb = optimize(x[2], small)
2236 2224 return wa + wb, (op, ta, tb)
2237 2225 elif op == 'list':
2238 2226 ws, ts = zip(*(optimize(y, small) for y in x[1:]))
2239 2227 return sum(ws), (op,) + ts
2240 2228 elif op == 'func':
2241 2229 f = getstring(x[1], _("not a symbol"))
2242 2230 wa, ta = optimize(x[2], small)
2243 2231 if f in ("author branch closed date desc file grep keyword "
2244 2232 "outgoing user"):
2245 2233 w = 10 # slow
2246 2234 elif f in "modifies adds removes":
2247 2235 w = 30 # slower
2248 2236 elif f == "contains":
2249 2237 w = 100 # very slow
2250 2238 elif f == "ancestor":
2251 2239 w = 1 * smallbonus
2252 2240 elif f in "reverse limit first _intlist":
2253 2241 w = 0
2254 2242 elif f in "sort":
2255 2243 w = 10 # assume most sorts look at changelog
2256 2244 else:
2257 2245 w = 1
2258 2246 return w + wa, (op, x[1], ta)
2259 2247 return 1, x
2260 2248
2261 2249 # the set of valid characters for the initial letter of symbols in
2262 2250 # alias declarations and definitions
2263 2251 _aliassyminitletters = set(c for c in [chr(i) for i in xrange(256)]
2264 2252 if c.isalnum() or c in '._@$' or ord(c) > 127)
2265 2253
2266 2254 def _tokenizealias(program, lookup=None):
2267 2255 """Parse alias declaration/definition into a stream of tokens
2268 2256
2269 2257 This allows symbol names to use also ``$`` as an initial letter
2270 2258 (for backward compatibility), and callers of this function should
2271 2259 examine whether ``$`` is used also for unexpected symbols or not.
2272 2260 """
2273 2261 return tokenize(program, lookup=lookup,
2274 2262 syminitletters=_aliassyminitletters)
2275 2263
2276 2264 def _parsealiasdecl(decl):
2277 2265 """Parse alias declaration ``decl``
2278 2266
2279 2267 This returns ``(name, tree, args, errorstr)`` tuple:
2280 2268
2281 2269 - ``name``: of declared alias (may be ``decl`` itself at error)
2282 2270 - ``tree``: parse result (or ``None`` at error)
2283 2271 - ``args``: list of alias argument names (or None for symbol declaration)
2284 2272 - ``errorstr``: detail about detected error (or None)
2285 2273
2286 2274 >>> _parsealiasdecl('foo')
2287 2275 ('foo', ('symbol', 'foo'), None, None)
2288 2276 >>> _parsealiasdecl('$foo')
2289 2277 ('$foo', None, None, "'$' not for alias arguments")
2290 2278 >>> _parsealiasdecl('foo::bar')
2291 2279 ('foo::bar', None, None, 'invalid format')
2292 2280 >>> _parsealiasdecl('foo bar')
2293 2281 ('foo bar', None, None, 'at 4: invalid token')
2294 2282 >>> _parsealiasdecl('foo()')
2295 2283 ('foo', ('func', ('symbol', 'foo')), [], None)
2296 2284 >>> _parsealiasdecl('$foo()')
2297 2285 ('$foo()', None, None, "'$' not for alias arguments")
2298 2286 >>> _parsealiasdecl('foo($1, $2)')
2299 2287 ('foo', ('func', ('symbol', 'foo')), ['$1', '$2'], None)
2300 2288 >>> _parsealiasdecl('foo(bar_bar, baz.baz)')
2301 2289 ('foo', ('func', ('symbol', 'foo')), ['bar_bar', 'baz.baz'], None)
2302 2290 >>> _parsealiasdecl('foo($1, $2, nested($1, $2))')
2303 2291 ('foo($1, $2, nested($1, $2))', None, None, 'invalid argument list')
2304 2292 >>> _parsealiasdecl('foo(bar($1, $2))')
2305 2293 ('foo(bar($1, $2))', None, None, 'invalid argument list')
2306 2294 >>> _parsealiasdecl('foo("string")')
2307 2295 ('foo("string")', None, None, 'invalid argument list')
2308 2296 >>> _parsealiasdecl('foo($1, $2')
2309 2297 ('foo($1, $2', None, None, 'at 10: unexpected token: end')
2310 2298 >>> _parsealiasdecl('foo("string')
2311 2299 ('foo("string', None, None, 'at 5: unterminated string')
2312 2300 >>> _parsealiasdecl('foo($1, $2, $1)')
2313 2301 ('foo', None, None, 'argument names collide with each other')
2314 2302 """
2315 2303 p = parser.parser(elements)
2316 2304 try:
2317 2305 tree, pos = p.parse(_tokenizealias(decl))
2318 2306 if (pos != len(decl)):
2319 2307 raise error.ParseError(_('invalid token'), pos)
2320 2308 tree = parser.simplifyinfixops(tree, ('list',))
2321 2309
2322 if isvalidsymbol(tree):
2310 if tree[0] == 'symbol':
2323 2311 # "name = ...." style
2324 name = getsymbol(tree)
2312 name = tree[1]
2325 2313 if name.startswith('$'):
2326 2314 return (decl, None, None, _("'$' not for alias arguments"))
2327 2315 return (name, ('symbol', name), None, None)
2328 2316
2329 2317 if isvalidfunc(tree):
2330 2318 # "name(arg, ....) = ...." style
2331 2319 name = getfuncname(tree)
2332 2320 if name.startswith('$'):
2333 2321 return (decl, None, None, _("'$' not for alias arguments"))
2334 2322 args = []
2335 2323 for arg in getfuncargs(tree):
2336 if not isvalidsymbol(arg):
2324 if arg[0] != 'symbol':
2337 2325 return (decl, None, None, _("invalid argument list"))
2338 args.append(getsymbol(arg))
2326 args.append(arg[1])
2339 2327 if len(args) != len(set(args)):
2340 2328 return (name, None, None,
2341 2329 _("argument names collide with each other"))
2342 2330 return (name, ('func', ('symbol', name)), args, None)
2343 2331
2344 2332 return (decl, None, None, _("invalid format"))
2345 2333 except error.ParseError as inst:
2346 2334 return (decl, None, None, parseerrordetail(inst))
2347 2335
2348 2336 def _relabelaliasargs(tree, args):
2349 2337 if not isinstance(tree, tuple):
2350 2338 return tree
2351 2339 op = tree[0]
2352 2340 if op != 'symbol':
2353 2341 return (op,) + tuple(_relabelaliasargs(x, args) for x in tree[1:])
2354 2342
2355 2343 assert len(tree) == 2
2356 2344 sym = tree[1]
2357 2345 if sym in args:
2358 2346 op = '_aliasarg'
2359 2347 elif sym.startswith('$'):
2360 2348 raise error.ParseError(_("'$' not for alias arguments"))
2361 2349 return (op, sym)
2362 2350
2363 2351 def _parsealiasdefn(defn, args):
2364 2352 """Parse alias definition ``defn``
2365 2353
2366 2354 This function marks alias argument references as ``_aliasarg``.
2367 2355
2368 2356 ``args`` is a list of alias argument names, or None if the alias
2369 2357 is declared as a symbol.
2370 2358
2371 2359 This returns "tree" as parsing result.
2372 2360
2373 2361 >>> def prettyformat(tree):
2374 2362 ... return parser.prettyformat(tree, ('_aliasarg', 'string', 'symbol'))
2375 2363 >>> args = ['$1', '$2', 'foo']
2376 2364 >>> print prettyformat(_parsealiasdefn('$1 or foo', args))
2377 2365 (or
2378 2366 ('_aliasarg', '$1')
2379 2367 ('_aliasarg', 'foo'))
2380 2368 >>> try:
2381 2369 ... _parsealiasdefn('$1 or $bar', args)
2382 2370 ... except error.ParseError, inst:
2383 2371 ... print parseerrordetail(inst)
2384 2372 '$' not for alias arguments
2385 2373 >>> args = ['$1', '$10', 'foo']
2386 2374 >>> print prettyformat(_parsealiasdefn('$10 or foobar', args))
2387 2375 (or
2388 2376 ('_aliasarg', '$10')
2389 2377 ('symbol', 'foobar'))
2390 2378 >>> print prettyformat(_parsealiasdefn('"$1" or "foo"', args))
2391 2379 (or
2392 2380 ('string', '$1')
2393 2381 ('string', 'foo'))
2394 2382 """
2395 2383 if args:
2396 2384 args = set(args)
2397 2385 else:
2398 2386 args = set()
2399 2387
2400 2388 p = parser.parser(elements)
2401 2389 tree, pos = p.parse(_tokenizealias(defn))
2402 2390 if pos != len(defn):
2403 2391 raise error.ParseError(_('invalid token'), pos)
2404 2392 tree = parser.simplifyinfixops(tree, ('list', 'or'))
2405 2393 return _relabelaliasargs(tree, args)
2406 2394
2407 2395 class revsetalias(object):
2408 2396 # whether own `error` information is already shown or not.
2409 2397 # this avoids showing same warning multiple times at each `findaliases`.
2410 2398 warned = False
2411 2399
2412 2400 def __init__(self, name, value):
2413 2401 '''Aliases like:
2414 2402
2415 2403 h = heads(default)
2416 2404 b($1) = ancestors($1) - ancestors(default)
2417 2405 '''
2418 2406 self.name, self.tree, self.args, self.error = _parsealiasdecl(name)
2419 2407 if self.error:
2420 2408 self.error = _('failed to parse the declaration of revset alias'
2421 2409 ' "%s": %s') % (self.name, self.error)
2422 2410 return
2423 2411
2424 2412 try:
2425 2413 self.replacement = _parsealiasdefn(value, self.args)
2426 2414 except error.ParseError as inst:
2427 2415 self.error = _('failed to parse the definition of revset alias'
2428 2416 ' "%s": %s') % (self.name, parseerrordetail(inst))
2429 2417
2430 2418 def _getalias(aliases, tree):
2431 2419 """If tree looks like an unexpanded alias, return it. Return None
2432 2420 otherwise.
2433 2421 """
2434 2422 if isinstance(tree, tuple):
2435 2423 if tree[0] == 'symbol':
2436 2424 name = tree[1]
2437 2425 alias = aliases.get(name)
2438 2426 if alias and alias.args is None and alias.tree == tree:
2439 2427 return alias
2440 2428 if tree[0] == 'func':
2441 2429 if tree[1][0] == 'symbol':
2442 2430 name = tree[1][1]
2443 2431 alias = aliases.get(name)
2444 2432 if alias and alias.args is not None and alias.tree == tree[:2]:
2445 2433 return alias
2446 2434 return None
2447 2435
2448 2436 def _expandargs(tree, args):
2449 2437 """Replace _aliasarg instances with the substitution value of the
2450 2438 same name in args, recursively.
2451 2439 """
2452 2440 if not isinstance(tree, tuple):
2453 2441 return tree
2454 2442 if tree[0] == '_aliasarg':
2455 2443 sym = tree[1]
2456 2444 return args[sym]
2457 2445 return tuple(_expandargs(t, args) for t in tree)
2458 2446
2459 2447 def _expandaliases(aliases, tree, expanding, cache):
2460 2448 """Expand aliases in tree, recursively.
2461 2449
2462 2450 'aliases' is a dictionary mapping user defined aliases to
2463 2451 revsetalias objects.
2464 2452 """
2465 2453 if not isinstance(tree, tuple):
2466 2454 # Do not expand raw strings
2467 2455 return tree
2468 2456 alias = _getalias(aliases, tree)
2469 2457 if alias is not None:
2470 2458 if alias.error:
2471 2459 raise error.Abort(alias.error)
2472 2460 if alias in expanding:
2473 2461 raise error.ParseError(_('infinite expansion of revset alias "%s" '
2474 2462 'detected') % alias.name)
2475 2463 expanding.append(alias)
2476 2464 if alias.name not in cache:
2477 2465 cache[alias.name] = _expandaliases(aliases, alias.replacement,
2478 2466 expanding, cache)
2479 2467 result = cache[alias.name]
2480 2468 expanding.pop()
2481 2469 if alias.args is not None:
2482 2470 l = getlist(tree[2])
2483 2471 if len(l) != len(alias.args):
2484 2472 raise error.ParseError(
2485 2473 _('invalid number of arguments: %d') % len(l))
2486 2474 l = [_expandaliases(aliases, a, [], cache) for a in l]
2487 2475 result = _expandargs(result, dict(zip(alias.args, l)))
2488 2476 else:
2489 2477 result = tuple(_expandaliases(aliases, t, expanding, cache)
2490 2478 for t in tree)
2491 2479 return result
2492 2480
2493 2481 def findaliases(ui, tree, showwarning=None):
2494 2482 aliases = {}
2495 2483 for k, v in ui.configitems('revsetalias'):
2496 2484 alias = revsetalias(k, v)
2497 2485 aliases[alias.name] = alias
2498 2486 tree = _expandaliases(aliases, tree, [], {})
2499 2487 if showwarning:
2500 2488 # warn about problematic (but not referred) aliases
2501 2489 for name, alias in sorted(aliases.iteritems()):
2502 2490 if alias.error and not alias.warned:
2503 2491 showwarning(_('warning: %s\n') % (alias.error))
2504 2492 alias.warned = True
2505 2493 return tree
2506 2494
2507 2495 def foldconcat(tree):
2508 2496 """Fold elements to be concatenated by `##`
2509 2497 """
2510 2498 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2511 2499 return tree
2512 2500 if tree[0] == '_concat':
2513 2501 pending = [tree]
2514 2502 l = []
2515 2503 while pending:
2516 2504 e = pending.pop()
2517 2505 if e[0] == '_concat':
2518 2506 pending.extend(reversed(e[1:]))
2519 2507 elif e[0] in ('string', 'symbol'):
2520 2508 l.append(e[1])
2521 2509 else:
2522 2510 msg = _("\"##\" can't concatenate \"%s\" element") % (e[0])
2523 2511 raise error.ParseError(msg)
2524 2512 return ('string', ''.join(l))
2525 2513 else:
2526 2514 return tuple(foldconcat(t) for t in tree)
2527 2515
2528 2516 def parse(spec, lookup=None):
2529 2517 p = parser.parser(elements)
2530 2518 tree, pos = p.parse(tokenize(spec, lookup=lookup))
2531 2519 if pos != len(spec):
2532 2520 raise error.ParseError(_("invalid token"), pos)
2533 2521 return parser.simplifyinfixops(tree, ('list', 'or'))
2534 2522
2535 2523 def posttreebuilthook(tree, repo):
2536 2524 # hook for extensions to execute code on the optimized tree
2537 2525 pass
2538 2526
2539 2527 def match(ui, spec, repo=None):
2540 2528 if not spec:
2541 2529 raise error.ParseError(_("empty query"))
2542 2530 lookup = None
2543 2531 if repo:
2544 2532 lookup = repo.__contains__
2545 2533 tree = parse(spec, lookup)
2546 2534 return _makematcher(ui, tree, repo)
2547 2535
2548 2536 def matchany(ui, specs, repo=None):
2549 2537 """Create a matcher that will include any revisions matching one of the
2550 2538 given specs"""
2551 2539 if not specs:
2552 2540 def mfunc(repo, subset=None):
2553 2541 return baseset()
2554 2542 return mfunc
2555 2543 if not all(specs):
2556 2544 raise error.ParseError(_("empty query"))
2557 2545 lookup = None
2558 2546 if repo:
2559 2547 lookup = repo.__contains__
2560 2548 if len(specs) == 1:
2561 2549 tree = parse(specs[0], lookup)
2562 2550 else:
2563 2551 tree = ('or',) + tuple(parse(s, lookup) for s in specs)
2564 2552 return _makematcher(ui, tree, repo)
2565 2553
2566 2554 def _makematcher(ui, tree, repo):
2567 2555 if ui:
2568 2556 tree = findaliases(ui, tree, showwarning=ui.warn)
2569 2557 tree = foldconcat(tree)
2570 2558 weight, tree = optimize(tree, True)
2571 2559 posttreebuilthook(tree, repo)
2572 2560 def mfunc(repo, subset=None):
2573 2561 if subset is None:
2574 2562 subset = fullreposet(repo)
2575 2563 if util.safehasattr(subset, 'isascending'):
2576 2564 result = getset(repo, subset, tree)
2577 2565 else:
2578 2566 result = getset(repo, baseset(subset), tree)
2579 2567 return result
2580 2568 return mfunc
2581 2569
2582 2570 def formatspec(expr, *args):
2583 2571 '''
2584 2572 This is a convenience function for using revsets internally, and
2585 2573 escapes arguments appropriately. Aliases are intentionally ignored
2586 2574 so that intended expression behavior isn't accidentally subverted.
2587 2575
2588 2576 Supported arguments:
2589 2577
2590 2578 %r = revset expression, parenthesized
2591 2579 %d = int(arg), no quoting
2592 2580 %s = string(arg), escaped and single-quoted
2593 2581 %b = arg.branch(), escaped and single-quoted
2594 2582 %n = hex(arg), single-quoted
2595 2583 %% = a literal '%'
2596 2584
2597 2585 Prefixing the type with 'l' specifies a parenthesized list of that type.
2598 2586
2599 2587 >>> formatspec('%r:: and %lr', '10 or 11', ("this()", "that()"))
2600 2588 '(10 or 11):: and ((this()) or (that()))'
2601 2589 >>> formatspec('%d:: and not %d::', 10, 20)
2602 2590 '10:: and not 20::'
2603 2591 >>> formatspec('%ld or %ld', [], [1])
2604 2592 "_list('') or 1"
2605 2593 >>> formatspec('keyword(%s)', 'foo\\xe9')
2606 2594 "keyword('foo\\\\xe9')"
2607 2595 >>> b = lambda: 'default'
2608 2596 >>> b.branch = b
2609 2597 >>> formatspec('branch(%b)', b)
2610 2598 "branch('default')"
2611 2599 >>> formatspec('root(%ls)', ['a', 'b', 'c', 'd'])
2612 2600 "root(_list('a\\x00b\\x00c\\x00d'))"
2613 2601 '''
2614 2602
2615 2603 def quote(s):
2616 2604 return repr(str(s))
2617 2605
2618 2606 def argtype(c, arg):
2619 2607 if c == 'd':
2620 2608 return str(int(arg))
2621 2609 elif c == 's':
2622 2610 return quote(arg)
2623 2611 elif c == 'r':
2624 2612 parse(arg) # make sure syntax errors are confined
2625 2613 return '(%s)' % arg
2626 2614 elif c == 'n':
2627 2615 return quote(node.hex(arg))
2628 2616 elif c == 'b':
2629 2617 return quote(arg.branch())
2630 2618
2631 2619 def listexp(s, t):
2632 2620 l = len(s)
2633 2621 if l == 0:
2634 2622 return "_list('')"
2635 2623 elif l == 1:
2636 2624 return argtype(t, s[0])
2637 2625 elif t == 'd':
2638 2626 return "_intlist('%s')" % "\0".join(str(int(a)) for a in s)
2639 2627 elif t == 's':
2640 2628 return "_list('%s')" % "\0".join(s)
2641 2629 elif t == 'n':
2642 2630 return "_hexlist('%s')" % "\0".join(node.hex(a) for a in s)
2643 2631 elif t == 'b':
2644 2632 return "_list('%s')" % "\0".join(a.branch() for a in s)
2645 2633
2646 2634 m = l // 2
2647 2635 return '(%s or %s)' % (listexp(s[:m], t), listexp(s[m:], t))
2648 2636
2649 2637 ret = ''
2650 2638 pos = 0
2651 2639 arg = 0
2652 2640 while pos < len(expr):
2653 2641 c = expr[pos]
2654 2642 if c == '%':
2655 2643 pos += 1
2656 2644 d = expr[pos]
2657 2645 if d == '%':
2658 2646 ret += d
2659 2647 elif d in 'dsnbr':
2660 2648 ret += argtype(d, args[arg])
2661 2649 arg += 1
2662 2650 elif d == 'l':
2663 2651 # a list of some type
2664 2652 pos += 1
2665 2653 d = expr[pos]
2666 2654 ret += listexp(list(args[arg]), d)
2667 2655 arg += 1
2668 2656 else:
2669 2657 raise error.Abort('unexpected revspec format character %s' % d)
2670 2658 else:
2671 2659 ret += c
2672 2660 pos += 1
2673 2661
2674 2662 return ret
2675 2663
2676 2664 def prettyformat(tree):
2677 2665 return parser.prettyformat(tree, ('string', 'symbol'))
2678 2666
2679 2667 def depth(tree):
2680 2668 if isinstance(tree, tuple):
2681 2669 return max(map(depth, tree)) + 1
2682 2670 else:
2683 2671 return 0
2684 2672
2685 2673 def funcsused(tree):
2686 2674 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2687 2675 return set()
2688 2676 else:
2689 2677 funcs = set()
2690 2678 for s in tree[1:]:
2691 2679 funcs |= funcsused(s)
2692 2680 if tree[0] == 'func':
2693 2681 funcs.add(tree[1][1])
2694 2682 return funcs
2695 2683
2696 2684 def _formatsetrepr(r):
2697 2685 """Format an optional printable representation of a set
2698 2686
2699 2687 ======== =================================
2700 2688 type(r) example
2701 2689 ======== =================================
2702 2690 tuple ('<not %r>', other)
2703 2691 str '<branch closed>'
2704 2692 callable lambda: '<branch %r>' % sorted(b)
2705 2693 object other
2706 2694 ======== =================================
2707 2695 """
2708 2696 if r is None:
2709 2697 return ''
2710 2698 elif isinstance(r, tuple):
2711 2699 return r[0] % r[1:]
2712 2700 elif isinstance(r, str):
2713 2701 return r
2714 2702 elif callable(r):
2715 2703 return r()
2716 2704 else:
2717 2705 return repr(r)
2718 2706
2719 2707 class abstractsmartset(object):
2720 2708
2721 2709 def __nonzero__(self):
2722 2710 """True if the smartset is not empty"""
2723 2711 raise NotImplementedError()
2724 2712
2725 2713 def __contains__(self, rev):
2726 2714 """provide fast membership testing"""
2727 2715 raise NotImplementedError()
2728 2716
2729 2717 def __iter__(self):
2730 2718 """iterate the set in the order it is supposed to be iterated"""
2731 2719 raise NotImplementedError()
2732 2720
2733 2721 # Attributes containing a function to perform a fast iteration in a given
2734 2722 # direction. A smartset can have none, one, or both defined.
2735 2723 #
2736 2724 # Default value is None instead of a function returning None to avoid
2737 2725 # initializing an iterator just for testing if a fast method exists.
2738 2726 fastasc = None
2739 2727 fastdesc = None
2740 2728
2741 2729 def isascending(self):
2742 2730 """True if the set will iterate in ascending order"""
2743 2731 raise NotImplementedError()
2744 2732
2745 2733 def isdescending(self):
2746 2734 """True if the set will iterate in descending order"""
2747 2735 raise NotImplementedError()
2748 2736
2749 2737 @util.cachefunc
2750 2738 def min(self):
2751 2739 """return the minimum element in the set"""
2752 2740 if self.fastasc is not None:
2753 2741 for r in self.fastasc():
2754 2742 return r
2755 2743 raise ValueError('arg is an empty sequence')
2756 2744 return min(self)
2757 2745
2758 2746 @util.cachefunc
2759 2747 def max(self):
2760 2748 """return the maximum element in the set"""
2761 2749 if self.fastdesc is not None:
2762 2750 for r in self.fastdesc():
2763 2751 return r
2764 2752 raise ValueError('arg is an empty sequence')
2765 2753 return max(self)
2766 2754
2767 2755 def first(self):
2768 2756 """return the first element in the set (user iteration perspective)
2769 2757
2770 2758 Return None if the set is empty"""
2771 2759 raise NotImplementedError()
2772 2760
2773 2761 def last(self):
2774 2762 """return the last element in the set (user iteration perspective)
2775 2763
2776 2764 Return None if the set is empty"""
2777 2765 raise NotImplementedError()
2778 2766
2779 2767 def __len__(self):
2780 2768 """return the length of the smartsets
2781 2769
2782 2770 This can be expensive on smartset that could be lazy otherwise."""
2783 2771 raise NotImplementedError()
2784 2772
2785 2773 def reverse(self):
2786 2774 """reverse the expected iteration order"""
2787 2775 raise NotImplementedError()
2788 2776
2789 2777 def sort(self, reverse=True):
2790 2778 """get the set to iterate in an ascending or descending order"""
2791 2779 raise NotImplementedError()
2792 2780
2793 2781 def __and__(self, other):
2794 2782 """Returns a new object with the intersection of the two collections.
2795 2783
2796 2784 This is part of the mandatory API for smartset."""
2797 2785 if isinstance(other, fullreposet):
2798 2786 return self
2799 2787 return self.filter(other.__contains__, condrepr=other, cache=False)
2800 2788
2801 2789 def __add__(self, other):
2802 2790 """Returns a new object with the union of the two collections.
2803 2791
2804 2792 This is part of the mandatory API for smartset."""
2805 2793 return addset(self, other)
2806 2794
2807 2795 def __sub__(self, other):
2808 2796 """Returns a new object with the substraction of the two collections.
2809 2797
2810 2798 This is part of the mandatory API for smartset."""
2811 2799 c = other.__contains__
2812 2800 return self.filter(lambda r: not c(r), condrepr=('<not %r>', other),
2813 2801 cache=False)
2814 2802
2815 2803 def filter(self, condition, condrepr=None, cache=True):
2816 2804 """Returns this smartset filtered by condition as a new smartset.
2817 2805
2818 2806 `condition` is a callable which takes a revision number and returns a
2819 2807 boolean. Optional `condrepr` provides a printable representation of
2820 2808 the given `condition`.
2821 2809
2822 2810 This is part of the mandatory API for smartset."""
2823 2811 # builtin cannot be cached. but do not needs to
2824 2812 if cache and util.safehasattr(condition, 'func_code'):
2825 2813 condition = util.cachefunc(condition)
2826 2814 return filteredset(self, condition, condrepr)
2827 2815
2828 2816 class baseset(abstractsmartset):
2829 2817 """Basic data structure that represents a revset and contains the basic
2830 2818 operation that it should be able to perform.
2831 2819
2832 2820 Every method in this class should be implemented by any smartset class.
2833 2821 """
2834 2822 def __init__(self, data=(), datarepr=None):
2835 2823 """
2836 2824 datarepr: a tuple of (format, obj, ...), a function or an object that
2837 2825 provides a printable representation of the given data.
2838 2826 """
2839 2827 if not isinstance(data, list):
2840 2828 if isinstance(data, set):
2841 2829 self._set = data
2842 2830 data = list(data)
2843 2831 self._list = data
2844 2832 self._datarepr = datarepr
2845 2833 self._ascending = None
2846 2834
2847 2835 @util.propertycache
2848 2836 def _set(self):
2849 2837 return set(self._list)
2850 2838
2851 2839 @util.propertycache
2852 2840 def _asclist(self):
2853 2841 asclist = self._list[:]
2854 2842 asclist.sort()
2855 2843 return asclist
2856 2844
2857 2845 def __iter__(self):
2858 2846 if self._ascending is None:
2859 2847 return iter(self._list)
2860 2848 elif self._ascending:
2861 2849 return iter(self._asclist)
2862 2850 else:
2863 2851 return reversed(self._asclist)
2864 2852
2865 2853 def fastasc(self):
2866 2854 return iter(self._asclist)
2867 2855
2868 2856 def fastdesc(self):
2869 2857 return reversed(self._asclist)
2870 2858
2871 2859 @util.propertycache
2872 2860 def __contains__(self):
2873 2861 return self._set.__contains__
2874 2862
2875 2863 def __nonzero__(self):
2876 2864 return bool(self._list)
2877 2865
2878 2866 def sort(self, reverse=False):
2879 2867 self._ascending = not bool(reverse)
2880 2868
2881 2869 def reverse(self):
2882 2870 if self._ascending is None:
2883 2871 self._list.reverse()
2884 2872 else:
2885 2873 self._ascending = not self._ascending
2886 2874
2887 2875 def __len__(self):
2888 2876 return len(self._list)
2889 2877
2890 2878 def isascending(self):
2891 2879 """Returns True if the collection is ascending order, False if not.
2892 2880
2893 2881 This is part of the mandatory API for smartset."""
2894 2882 if len(self) <= 1:
2895 2883 return True
2896 2884 return self._ascending is not None and self._ascending
2897 2885
2898 2886 def isdescending(self):
2899 2887 """Returns True if the collection is descending order, False if not.
2900 2888
2901 2889 This is part of the mandatory API for smartset."""
2902 2890 if len(self) <= 1:
2903 2891 return True
2904 2892 return self._ascending is not None and not self._ascending
2905 2893
2906 2894 def first(self):
2907 2895 if self:
2908 2896 if self._ascending is None:
2909 2897 return self._list[0]
2910 2898 elif self._ascending:
2911 2899 return self._asclist[0]
2912 2900 else:
2913 2901 return self._asclist[-1]
2914 2902 return None
2915 2903
2916 2904 def last(self):
2917 2905 if self:
2918 2906 if self._ascending is None:
2919 2907 return self._list[-1]
2920 2908 elif self._ascending:
2921 2909 return self._asclist[-1]
2922 2910 else:
2923 2911 return self._asclist[0]
2924 2912 return None
2925 2913
2926 2914 def __repr__(self):
2927 2915 d = {None: '', False: '-', True: '+'}[self._ascending]
2928 2916 s = _formatsetrepr(self._datarepr)
2929 2917 if not s:
2930 2918 s = repr(self._list)
2931 2919 return '<%s%s %s>' % (type(self).__name__, d, s)
2932 2920
2933 2921 class filteredset(abstractsmartset):
2934 2922 """Duck type for baseset class which iterates lazily over the revisions in
2935 2923 the subset and contains a function which tests for membership in the
2936 2924 revset
2937 2925 """
2938 2926 def __init__(self, subset, condition=lambda x: True, condrepr=None):
2939 2927 """
2940 2928 condition: a function that decide whether a revision in the subset
2941 2929 belongs to the revset or not.
2942 2930 condrepr: a tuple of (format, obj, ...), a function or an object that
2943 2931 provides a printable representation of the given condition.
2944 2932 """
2945 2933 self._subset = subset
2946 2934 self._condition = condition
2947 2935 self._condrepr = condrepr
2948 2936
2949 2937 def __contains__(self, x):
2950 2938 return x in self._subset and self._condition(x)
2951 2939
2952 2940 def __iter__(self):
2953 2941 return self._iterfilter(self._subset)
2954 2942
2955 2943 def _iterfilter(self, it):
2956 2944 cond = self._condition
2957 2945 for x in it:
2958 2946 if cond(x):
2959 2947 yield x
2960 2948
2961 2949 @property
2962 2950 def fastasc(self):
2963 2951 it = self._subset.fastasc
2964 2952 if it is None:
2965 2953 return None
2966 2954 return lambda: self._iterfilter(it())
2967 2955
2968 2956 @property
2969 2957 def fastdesc(self):
2970 2958 it = self._subset.fastdesc
2971 2959 if it is None:
2972 2960 return None
2973 2961 return lambda: self._iterfilter(it())
2974 2962
2975 2963 def __nonzero__(self):
2976 2964 fast = self.fastasc
2977 2965 if fast is None:
2978 2966 fast = self.fastdesc
2979 2967 if fast is not None:
2980 2968 it = fast()
2981 2969 else:
2982 2970 it = self
2983 2971
2984 2972 for r in it:
2985 2973 return True
2986 2974 return False
2987 2975
2988 2976 def __len__(self):
2989 2977 # Basic implementation to be changed in future patches.
2990 2978 l = baseset([r for r in self])
2991 2979 return len(l)
2992 2980
2993 2981 def sort(self, reverse=False):
2994 2982 self._subset.sort(reverse=reverse)
2995 2983
2996 2984 def reverse(self):
2997 2985 self._subset.reverse()
2998 2986
2999 2987 def isascending(self):
3000 2988 return self._subset.isascending()
3001 2989
3002 2990 def isdescending(self):
3003 2991 return self._subset.isdescending()
3004 2992
3005 2993 def first(self):
3006 2994 for x in self:
3007 2995 return x
3008 2996 return None
3009 2997
3010 2998 def last(self):
3011 2999 it = None
3012 3000 if self.isascending():
3013 3001 it = self.fastdesc
3014 3002 elif self.isdescending():
3015 3003 it = self.fastasc
3016 3004 if it is not None:
3017 3005 for x in it():
3018 3006 return x
3019 3007 return None #empty case
3020 3008 else:
3021 3009 x = None
3022 3010 for x in self:
3023 3011 pass
3024 3012 return x
3025 3013
3026 3014 def __repr__(self):
3027 3015 xs = [repr(self._subset)]
3028 3016 s = _formatsetrepr(self._condrepr)
3029 3017 if s:
3030 3018 xs.append(s)
3031 3019 return '<%s %s>' % (type(self).__name__, ', '.join(xs))
3032 3020
3033 3021 def _iterordered(ascending, iter1, iter2):
3034 3022 """produce an ordered iteration from two iterators with the same order
3035 3023
3036 3024 The ascending is used to indicated the iteration direction.
3037 3025 """
3038 3026 choice = max
3039 3027 if ascending:
3040 3028 choice = min
3041 3029
3042 3030 val1 = None
3043 3031 val2 = None
3044 3032 try:
3045 3033 # Consume both iterators in an ordered way until one is empty
3046 3034 while True:
3047 3035 if val1 is None:
3048 3036 val1 = iter1.next()
3049 3037 if val2 is None:
3050 3038 val2 = iter2.next()
3051 3039 next = choice(val1, val2)
3052 3040 yield next
3053 3041 if val1 == next:
3054 3042 val1 = None
3055 3043 if val2 == next:
3056 3044 val2 = None
3057 3045 except StopIteration:
3058 3046 # Flush any remaining values and consume the other one
3059 3047 it = iter2
3060 3048 if val1 is not None:
3061 3049 yield val1
3062 3050 it = iter1
3063 3051 elif val2 is not None:
3064 3052 # might have been equality and both are empty
3065 3053 yield val2
3066 3054 for val in it:
3067 3055 yield val
3068 3056
3069 3057 class addset(abstractsmartset):
3070 3058 """Represent the addition of two sets
3071 3059
3072 3060 Wrapper structure for lazily adding two structures without losing much
3073 3061 performance on the __contains__ method
3074 3062
3075 3063 If the ascending attribute is set, that means the two structures are
3076 3064 ordered in either an ascending or descending way. Therefore, we can add
3077 3065 them maintaining the order by iterating over both at the same time
3078 3066
3079 3067 >>> xs = baseset([0, 3, 2])
3080 3068 >>> ys = baseset([5, 2, 4])
3081 3069
3082 3070 >>> rs = addset(xs, ys)
3083 3071 >>> bool(rs), 0 in rs, 1 in rs, 5 in rs, rs.first(), rs.last()
3084 3072 (True, True, False, True, 0, 4)
3085 3073 >>> rs = addset(xs, baseset([]))
3086 3074 >>> bool(rs), 0 in rs, 1 in rs, rs.first(), rs.last()
3087 3075 (True, True, False, 0, 2)
3088 3076 >>> rs = addset(baseset([]), baseset([]))
3089 3077 >>> bool(rs), 0 in rs, rs.first(), rs.last()
3090 3078 (False, False, None, None)
3091 3079
3092 3080 iterate unsorted:
3093 3081 >>> rs = addset(xs, ys)
3094 3082 >>> [x for x in rs] # without _genlist
3095 3083 [0, 3, 2, 5, 4]
3096 3084 >>> assert not rs._genlist
3097 3085 >>> len(rs)
3098 3086 5
3099 3087 >>> [x for x in rs] # with _genlist
3100 3088 [0, 3, 2, 5, 4]
3101 3089 >>> assert rs._genlist
3102 3090
3103 3091 iterate ascending:
3104 3092 >>> rs = addset(xs, ys, ascending=True)
3105 3093 >>> [x for x in rs], [x for x in rs.fastasc()] # without _asclist
3106 3094 ([0, 2, 3, 4, 5], [0, 2, 3, 4, 5])
3107 3095 >>> assert not rs._asclist
3108 3096 >>> len(rs)
3109 3097 5
3110 3098 >>> [x for x in rs], [x for x in rs.fastasc()]
3111 3099 ([0, 2, 3, 4, 5], [0, 2, 3, 4, 5])
3112 3100 >>> assert rs._asclist
3113 3101
3114 3102 iterate descending:
3115 3103 >>> rs = addset(xs, ys, ascending=False)
3116 3104 >>> [x for x in rs], [x for x in rs.fastdesc()] # without _asclist
3117 3105 ([5, 4, 3, 2, 0], [5, 4, 3, 2, 0])
3118 3106 >>> assert not rs._asclist
3119 3107 >>> len(rs)
3120 3108 5
3121 3109 >>> [x for x in rs], [x for x in rs.fastdesc()]
3122 3110 ([5, 4, 3, 2, 0], [5, 4, 3, 2, 0])
3123 3111 >>> assert rs._asclist
3124 3112
3125 3113 iterate ascending without fastasc:
3126 3114 >>> rs = addset(xs, generatorset(ys), ascending=True)
3127 3115 >>> assert rs.fastasc is None
3128 3116 >>> [x for x in rs]
3129 3117 [0, 2, 3, 4, 5]
3130 3118
3131 3119 iterate descending without fastdesc:
3132 3120 >>> rs = addset(generatorset(xs), ys, ascending=False)
3133 3121 >>> assert rs.fastdesc is None
3134 3122 >>> [x for x in rs]
3135 3123 [5, 4, 3, 2, 0]
3136 3124 """
3137 3125 def __init__(self, revs1, revs2, ascending=None):
3138 3126 self._r1 = revs1
3139 3127 self._r2 = revs2
3140 3128 self._iter = None
3141 3129 self._ascending = ascending
3142 3130 self._genlist = None
3143 3131 self._asclist = None
3144 3132
3145 3133 def __len__(self):
3146 3134 return len(self._list)
3147 3135
3148 3136 def __nonzero__(self):
3149 3137 return bool(self._r1) or bool(self._r2)
3150 3138
3151 3139 @util.propertycache
3152 3140 def _list(self):
3153 3141 if not self._genlist:
3154 3142 self._genlist = baseset(iter(self))
3155 3143 return self._genlist
3156 3144
3157 3145 def __iter__(self):
3158 3146 """Iterate over both collections without repeating elements
3159 3147
3160 3148 If the ascending attribute is not set, iterate over the first one and
3161 3149 then over the second one checking for membership on the first one so we
3162 3150 dont yield any duplicates.
3163 3151
3164 3152 If the ascending attribute is set, iterate over both collections at the
3165 3153 same time, yielding only one value at a time in the given order.
3166 3154 """
3167 3155 if self._ascending is None:
3168 3156 if self._genlist:
3169 3157 return iter(self._genlist)
3170 3158 def arbitraryordergen():
3171 3159 for r in self._r1:
3172 3160 yield r
3173 3161 inr1 = self._r1.__contains__
3174 3162 for r in self._r2:
3175 3163 if not inr1(r):
3176 3164 yield r
3177 3165 return arbitraryordergen()
3178 3166 # try to use our own fast iterator if it exists
3179 3167 self._trysetasclist()
3180 3168 if self._ascending:
3181 3169 attr = 'fastasc'
3182 3170 else:
3183 3171 attr = 'fastdesc'
3184 3172 it = getattr(self, attr)
3185 3173 if it is not None:
3186 3174 return it()
3187 3175 # maybe half of the component supports fast
3188 3176 # get iterator for _r1
3189 3177 iter1 = getattr(self._r1, attr)
3190 3178 if iter1 is None:
3191 3179 # let's avoid side effect (not sure it matters)
3192 3180 iter1 = iter(sorted(self._r1, reverse=not self._ascending))
3193 3181 else:
3194 3182 iter1 = iter1()
3195 3183 # get iterator for _r2
3196 3184 iter2 = getattr(self._r2, attr)
3197 3185 if iter2 is None:
3198 3186 # let's avoid side effect (not sure it matters)
3199 3187 iter2 = iter(sorted(self._r2, reverse=not self._ascending))
3200 3188 else:
3201 3189 iter2 = iter2()
3202 3190 return _iterordered(self._ascending, iter1, iter2)
3203 3191
3204 3192 def _trysetasclist(self):
3205 3193 """populate the _asclist attribute if possible and necessary"""
3206 3194 if self._genlist is not None and self._asclist is None:
3207 3195 self._asclist = sorted(self._genlist)
3208 3196
3209 3197 @property
3210 3198 def fastasc(self):
3211 3199 self._trysetasclist()
3212 3200 if self._asclist is not None:
3213 3201 return self._asclist.__iter__
3214 3202 iter1 = self._r1.fastasc
3215 3203 iter2 = self._r2.fastasc
3216 3204 if None in (iter1, iter2):
3217 3205 return None
3218 3206 return lambda: _iterordered(True, iter1(), iter2())
3219 3207
3220 3208 @property
3221 3209 def fastdesc(self):
3222 3210 self._trysetasclist()
3223 3211 if self._asclist is not None:
3224 3212 return self._asclist.__reversed__
3225 3213 iter1 = self._r1.fastdesc
3226 3214 iter2 = self._r2.fastdesc
3227 3215 if None in (iter1, iter2):
3228 3216 return None
3229 3217 return lambda: _iterordered(False, iter1(), iter2())
3230 3218
3231 3219 def __contains__(self, x):
3232 3220 return x in self._r1 or x in self._r2
3233 3221
3234 3222 def sort(self, reverse=False):
3235 3223 """Sort the added set
3236 3224
3237 3225 For this we use the cached list with all the generated values and if we
3238 3226 know they are ascending or descending we can sort them in a smart way.
3239 3227 """
3240 3228 self._ascending = not reverse
3241 3229
3242 3230 def isascending(self):
3243 3231 return self._ascending is not None and self._ascending
3244 3232
3245 3233 def isdescending(self):
3246 3234 return self._ascending is not None and not self._ascending
3247 3235
3248 3236 def reverse(self):
3249 3237 if self._ascending is None:
3250 3238 self._list.reverse()
3251 3239 else:
3252 3240 self._ascending = not self._ascending
3253 3241
3254 3242 def first(self):
3255 3243 for x in self:
3256 3244 return x
3257 3245 return None
3258 3246
3259 3247 def last(self):
3260 3248 self.reverse()
3261 3249 val = self.first()
3262 3250 self.reverse()
3263 3251 return val
3264 3252
3265 3253 def __repr__(self):
3266 3254 d = {None: '', False: '-', True: '+'}[self._ascending]
3267 3255 return '<%s%s %r, %r>' % (type(self).__name__, d, self._r1, self._r2)
3268 3256
3269 3257 class generatorset(abstractsmartset):
3270 3258 """Wrap a generator for lazy iteration
3271 3259
3272 3260 Wrapper structure for generators that provides lazy membership and can
3273 3261 be iterated more than once.
3274 3262 When asked for membership it generates values until either it finds the
3275 3263 requested one or has gone through all the elements in the generator
3276 3264 """
3277 3265 def __init__(self, gen, iterasc=None):
3278 3266 """
3279 3267 gen: a generator producing the values for the generatorset.
3280 3268 """
3281 3269 self._gen = gen
3282 3270 self._asclist = None
3283 3271 self._cache = {}
3284 3272 self._genlist = []
3285 3273 self._finished = False
3286 3274 self._ascending = True
3287 3275 if iterasc is not None:
3288 3276 if iterasc:
3289 3277 self.fastasc = self._iterator
3290 3278 self.__contains__ = self._asccontains
3291 3279 else:
3292 3280 self.fastdesc = self._iterator
3293 3281 self.__contains__ = self._desccontains
3294 3282
3295 3283 def __nonzero__(self):
3296 3284 # Do not use 'for r in self' because it will enforce the iteration
3297 3285 # order (default ascending), possibly unrolling a whole descending
3298 3286 # iterator.
3299 3287 if self._genlist:
3300 3288 return True
3301 3289 for r in self._consumegen():
3302 3290 return True
3303 3291 return False
3304 3292
3305 3293 def __contains__(self, x):
3306 3294 if x in self._cache:
3307 3295 return self._cache[x]
3308 3296
3309 3297 # Use new values only, as existing values would be cached.
3310 3298 for l in self._consumegen():
3311 3299 if l == x:
3312 3300 return True
3313 3301
3314 3302 self._cache[x] = False
3315 3303 return False
3316 3304
3317 3305 def _asccontains(self, x):
3318 3306 """version of contains optimised for ascending generator"""
3319 3307 if x in self._cache:
3320 3308 return self._cache[x]
3321 3309
3322 3310 # Use new values only, as existing values would be cached.
3323 3311 for l in self._consumegen():
3324 3312 if l == x:
3325 3313 return True
3326 3314 if l > x:
3327 3315 break
3328 3316
3329 3317 self._cache[x] = False
3330 3318 return False
3331 3319
3332 3320 def _desccontains(self, x):
3333 3321 """version of contains optimised for descending generator"""
3334 3322 if x in self._cache:
3335 3323 return self._cache[x]
3336 3324
3337 3325 # Use new values only, as existing values would be cached.
3338 3326 for l in self._consumegen():
3339 3327 if l == x:
3340 3328 return True
3341 3329 if l < x:
3342 3330 break
3343 3331
3344 3332 self._cache[x] = False
3345 3333 return False
3346 3334
3347 3335 def __iter__(self):
3348 3336 if self._ascending:
3349 3337 it = self.fastasc
3350 3338 else:
3351 3339 it = self.fastdesc
3352 3340 if it is not None:
3353 3341 return it()
3354 3342 # we need to consume the iterator
3355 3343 for x in self._consumegen():
3356 3344 pass
3357 3345 # recall the same code
3358 3346 return iter(self)
3359 3347
3360 3348 def _iterator(self):
3361 3349 if self._finished:
3362 3350 return iter(self._genlist)
3363 3351
3364 3352 # We have to use this complex iteration strategy to allow multiple
3365 3353 # iterations at the same time. We need to be able to catch revision
3366 3354 # removed from _consumegen and added to genlist in another instance.
3367 3355 #
3368 3356 # Getting rid of it would provide an about 15% speed up on this
3369 3357 # iteration.
3370 3358 genlist = self._genlist
3371 3359 nextrev = self._consumegen().next
3372 3360 _len = len # cache global lookup
3373 3361 def gen():
3374 3362 i = 0
3375 3363 while True:
3376 3364 if i < _len(genlist):
3377 3365 yield genlist[i]
3378 3366 else:
3379 3367 yield nextrev()
3380 3368 i += 1
3381 3369 return gen()
3382 3370
3383 3371 def _consumegen(self):
3384 3372 cache = self._cache
3385 3373 genlist = self._genlist.append
3386 3374 for item in self._gen:
3387 3375 cache[item] = True
3388 3376 genlist(item)
3389 3377 yield item
3390 3378 if not self._finished:
3391 3379 self._finished = True
3392 3380 asc = self._genlist[:]
3393 3381 asc.sort()
3394 3382 self._asclist = asc
3395 3383 self.fastasc = asc.__iter__
3396 3384 self.fastdesc = asc.__reversed__
3397 3385
3398 3386 def __len__(self):
3399 3387 for x in self._consumegen():
3400 3388 pass
3401 3389 return len(self._genlist)
3402 3390
3403 3391 def sort(self, reverse=False):
3404 3392 self._ascending = not reverse
3405 3393
3406 3394 def reverse(self):
3407 3395 self._ascending = not self._ascending
3408 3396
3409 3397 def isascending(self):
3410 3398 return self._ascending
3411 3399
3412 3400 def isdescending(self):
3413 3401 return not self._ascending
3414 3402
3415 3403 def first(self):
3416 3404 if self._ascending:
3417 3405 it = self.fastasc
3418 3406 else:
3419 3407 it = self.fastdesc
3420 3408 if it is None:
3421 3409 # we need to consume all and try again
3422 3410 for x in self._consumegen():
3423 3411 pass
3424 3412 return self.first()
3425 3413 return next(it(), None)
3426 3414
3427 3415 def last(self):
3428 3416 if self._ascending:
3429 3417 it = self.fastdesc
3430 3418 else:
3431 3419 it = self.fastasc
3432 3420 if it is None:
3433 3421 # we need to consume all and try again
3434 3422 for x in self._consumegen():
3435 3423 pass
3436 3424 return self.first()
3437 3425 return next(it(), None)
3438 3426
3439 3427 def __repr__(self):
3440 3428 d = {False: '-', True: '+'}[self._ascending]
3441 3429 return '<%s%s>' % (type(self).__name__, d)
3442 3430
3443 3431 class spanset(abstractsmartset):
3444 3432 """Duck type for baseset class which represents a range of revisions and
3445 3433 can work lazily and without having all the range in memory
3446 3434
3447 3435 Note that spanset(x, y) behave almost like xrange(x, y) except for two
3448 3436 notable points:
3449 3437 - when x < y it will be automatically descending,
3450 3438 - revision filtered with this repoview will be skipped.
3451 3439
3452 3440 """
3453 3441 def __init__(self, repo, start=0, end=None):
3454 3442 """
3455 3443 start: first revision included the set
3456 3444 (default to 0)
3457 3445 end: first revision excluded (last+1)
3458 3446 (default to len(repo)
3459 3447
3460 3448 Spanset will be descending if `end` < `start`.
3461 3449 """
3462 3450 if end is None:
3463 3451 end = len(repo)
3464 3452 self._ascending = start <= end
3465 3453 if not self._ascending:
3466 3454 start, end = end + 1, start +1
3467 3455 self._start = start
3468 3456 self._end = end
3469 3457 self._hiddenrevs = repo.changelog.filteredrevs
3470 3458
3471 3459 def sort(self, reverse=False):
3472 3460 self._ascending = not reverse
3473 3461
3474 3462 def reverse(self):
3475 3463 self._ascending = not self._ascending
3476 3464
3477 3465 def _iterfilter(self, iterrange):
3478 3466 s = self._hiddenrevs
3479 3467 for r in iterrange:
3480 3468 if r not in s:
3481 3469 yield r
3482 3470
3483 3471 def __iter__(self):
3484 3472 if self._ascending:
3485 3473 return self.fastasc()
3486 3474 else:
3487 3475 return self.fastdesc()
3488 3476
3489 3477 def fastasc(self):
3490 3478 iterrange = xrange(self._start, self._end)
3491 3479 if self._hiddenrevs:
3492 3480 return self._iterfilter(iterrange)
3493 3481 return iter(iterrange)
3494 3482
3495 3483 def fastdesc(self):
3496 3484 iterrange = xrange(self._end - 1, self._start - 1, -1)
3497 3485 if self._hiddenrevs:
3498 3486 return self._iterfilter(iterrange)
3499 3487 return iter(iterrange)
3500 3488
3501 3489 def __contains__(self, rev):
3502 3490 hidden = self._hiddenrevs
3503 3491 return ((self._start <= rev < self._end)
3504 3492 and not (hidden and rev in hidden))
3505 3493
3506 3494 def __nonzero__(self):
3507 3495 for r in self:
3508 3496 return True
3509 3497 return False
3510 3498
3511 3499 def __len__(self):
3512 3500 if not self._hiddenrevs:
3513 3501 return abs(self._end - self._start)
3514 3502 else:
3515 3503 count = 0
3516 3504 start = self._start
3517 3505 end = self._end
3518 3506 for rev in self._hiddenrevs:
3519 3507 if (end < rev <= start) or (start <= rev < end):
3520 3508 count += 1
3521 3509 return abs(self._end - self._start) - count
3522 3510
3523 3511 def isascending(self):
3524 3512 return self._ascending
3525 3513
3526 3514 def isdescending(self):
3527 3515 return not self._ascending
3528 3516
3529 3517 def first(self):
3530 3518 if self._ascending:
3531 3519 it = self.fastasc
3532 3520 else:
3533 3521 it = self.fastdesc
3534 3522 for x in it():
3535 3523 return x
3536 3524 return None
3537 3525
3538 3526 def last(self):
3539 3527 if self._ascending:
3540 3528 it = self.fastdesc
3541 3529 else:
3542 3530 it = self.fastasc
3543 3531 for x in it():
3544 3532 return x
3545 3533 return None
3546 3534
3547 3535 def __repr__(self):
3548 3536 d = {False: '-', True: '+'}[self._ascending]
3549 3537 return '<%s%s %d:%d>' % (type(self).__name__, d,
3550 3538 self._start, self._end - 1)
3551 3539
3552 3540 class fullreposet(spanset):
3553 3541 """a set containing all revisions in the repo
3554 3542
3555 3543 This class exists to host special optimization and magic to handle virtual
3556 3544 revisions such as "null".
3557 3545 """
3558 3546
3559 3547 def __init__(self, repo):
3560 3548 super(fullreposet, self).__init__(repo)
3561 3549
3562 3550 def __and__(self, other):
3563 3551 """As self contains the whole repo, all of the other set should also be
3564 3552 in self. Therefore `self & other = other`.
3565 3553
3566 3554 This boldly assumes the other contains valid revs only.
3567 3555 """
3568 3556 # other not a smartset, make is so
3569 3557 if not util.safehasattr(other, 'isascending'):
3570 3558 # filter out hidden revision
3571 3559 # (this boldly assumes all smartset are pure)
3572 3560 #
3573 3561 # `other` was used with "&", let's assume this is a set like
3574 3562 # object.
3575 3563 other = baseset(other - self._hiddenrevs)
3576 3564
3577 3565 # XXX As fullreposet is also used as bootstrap, this is wrong.
3578 3566 #
3579 3567 # With a giveme312() revset returning [3,1,2], this makes
3580 3568 # 'hg log -r "giveme312()"' -> 1, 2, 3 (wrong)
3581 3569 # We cannot just drop it because other usage still need to sort it:
3582 3570 # 'hg log -r "all() and giveme312()"' -> 1, 2, 3 (right)
3583 3571 #
3584 3572 # There is also some faulty revset implementations that rely on it
3585 3573 # (eg: children as of its state in e8075329c5fb)
3586 3574 #
3587 3575 # When we fix the two points above we can move this into the if clause
3588 3576 other.sort(reverse=self.isdescending())
3589 3577 return other
3590 3578
3591 3579 def prettyformatset(revs):
3592 3580 lines = []
3593 3581 rs = repr(revs)
3594 3582 p = 0
3595 3583 while p < len(rs):
3596 3584 q = rs.find('<', p + 1)
3597 3585 if q < 0:
3598 3586 q = len(rs)
3599 3587 l = rs.count('<', 0, p) - rs.count('>', 0, p)
3600 3588 assert l >= 0
3601 3589 lines.append((l, rs[p:q].rstrip()))
3602 3590 p = q
3603 3591 return '\n'.join(' ' * l + s for l, s in lines)
3604 3592
3605 3593 def loadpredicate(ui, extname, registrarobj):
3606 3594 """Load revset predicates from specified registrarobj
3607 3595 """
3608 3596 for name, func in registrarobj._table.iteritems():
3609 3597 symbols[name] = func
3610 3598 if func._safe:
3611 3599 safesymbols.add(name)
3612 3600
3613 3601 # load built-in predicates explicitly to setup safesymbols
3614 3602 loadpredicate(None, None, predicate)
3615 3603
3616 3604 # tell hggettext to extract docstrings from these functions:
3617 3605 i18nfunctions = symbols.values()
General Comments 0
You need to be logged in to leave comments. Login now