##// END OF EJS Templates
revset: refactor to make xgettext put i18n comments into hg.pot file...
FUJIWARA Katsunori -
r29646:a8a5dd89 stable
parent child Browse files
Show More
@@ -1,3667 +1,3666
1 1 # revset.py - revision set queries for mercurial
2 2 #
3 3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import heapq
11 11 import re
12 12
13 13 from .i18n import _
14 14 from . import (
15 15 destutil,
16 16 encoding,
17 17 error,
18 18 hbisect,
19 19 match as matchmod,
20 20 node,
21 21 obsolete as obsmod,
22 22 parser,
23 23 pathutil,
24 24 phases,
25 25 registrar,
26 26 repoview,
27 27 util,
28 28 )
29 29
30 30 def _revancestors(repo, revs, followfirst):
31 31 """Like revlog.ancestors(), but supports followfirst."""
32 32 if followfirst:
33 33 cut = 1
34 34 else:
35 35 cut = None
36 36 cl = repo.changelog
37 37
38 38 def iterate():
39 39 revs.sort(reverse=True)
40 40 irevs = iter(revs)
41 41 h = []
42 42
43 43 inputrev = next(irevs, None)
44 44 if inputrev is not None:
45 45 heapq.heappush(h, -inputrev)
46 46
47 47 seen = set()
48 48 while h:
49 49 current = -heapq.heappop(h)
50 50 if current == inputrev:
51 51 inputrev = next(irevs, None)
52 52 if inputrev is not None:
53 53 heapq.heappush(h, -inputrev)
54 54 if current not in seen:
55 55 seen.add(current)
56 56 yield current
57 57 for parent in cl.parentrevs(current)[:cut]:
58 58 if parent != node.nullrev:
59 59 heapq.heappush(h, -parent)
60 60
61 61 return generatorset(iterate(), iterasc=False)
62 62
63 63 def _revdescendants(repo, revs, followfirst):
64 64 """Like revlog.descendants() but supports followfirst."""
65 65 if followfirst:
66 66 cut = 1
67 67 else:
68 68 cut = None
69 69
70 70 def iterate():
71 71 cl = repo.changelog
72 72 # XXX this should be 'parentset.min()' assuming 'parentset' is a
73 73 # smartset (and if it is not, it should.)
74 74 first = min(revs)
75 75 nullrev = node.nullrev
76 76 if first == nullrev:
77 77 # Are there nodes with a null first parent and a non-null
78 78 # second one? Maybe. Do we care? Probably not.
79 79 for i in cl:
80 80 yield i
81 81 else:
82 82 seen = set(revs)
83 83 for i in cl.revs(first + 1):
84 84 for x in cl.parentrevs(i)[:cut]:
85 85 if x != nullrev and x in seen:
86 86 seen.add(i)
87 87 yield i
88 88 break
89 89
90 90 return generatorset(iterate(), iterasc=True)
91 91
92 92 def _reachablerootspure(repo, minroot, roots, heads, includepath):
93 93 """return (heads(::<roots> and ::<heads>))
94 94
95 95 If includepath is True, return (<roots>::<heads>)."""
96 96 if not roots:
97 97 return []
98 98 parentrevs = repo.changelog.parentrevs
99 99 roots = set(roots)
100 100 visit = list(heads)
101 101 reachable = set()
102 102 seen = {}
103 103 # prefetch all the things! (because python is slow)
104 104 reached = reachable.add
105 105 dovisit = visit.append
106 106 nextvisit = visit.pop
107 107 # open-code the post-order traversal due to the tiny size of
108 108 # sys.getrecursionlimit()
109 109 while visit:
110 110 rev = nextvisit()
111 111 if rev in roots:
112 112 reached(rev)
113 113 if not includepath:
114 114 continue
115 115 parents = parentrevs(rev)
116 116 seen[rev] = parents
117 117 for parent in parents:
118 118 if parent >= minroot and parent not in seen:
119 119 dovisit(parent)
120 120 if not reachable:
121 121 return baseset()
122 122 if not includepath:
123 123 return reachable
124 124 for rev in sorted(seen):
125 125 for parent in seen[rev]:
126 126 if parent in reachable:
127 127 reached(rev)
128 128 return reachable
129 129
130 130 def reachableroots(repo, roots, heads, includepath=False):
131 131 """return (heads(::<roots> and ::<heads>))
132 132
133 133 If includepath is True, return (<roots>::<heads>)."""
134 134 if not roots:
135 135 return baseset()
136 136 minroot = roots.min()
137 137 roots = list(roots)
138 138 heads = list(heads)
139 139 try:
140 140 revs = repo.changelog.reachableroots(minroot, heads, roots, includepath)
141 141 except AttributeError:
142 142 revs = _reachablerootspure(repo, minroot, roots, heads, includepath)
143 143 revs = baseset(revs)
144 144 revs.sort()
145 145 return revs
146 146
147 147 elements = {
148 148 # token-type: binding-strength, primary, prefix, infix, suffix
149 149 "(": (21, None, ("group", 1, ")"), ("func", 1, ")"), None),
150 150 "##": (20, None, None, ("_concat", 20), None),
151 151 "~": (18, None, None, ("ancestor", 18), None),
152 152 "^": (18, None, None, ("parent", 18), ("parentpost", 18)),
153 153 "-": (5, None, ("negate", 19), ("minus", 5), None),
154 154 "::": (17, None, ("dagrangepre", 17), ("dagrange", 17),
155 155 ("dagrangepost", 17)),
156 156 "..": (17, None, ("dagrangepre", 17), ("dagrange", 17),
157 157 ("dagrangepost", 17)),
158 158 ":": (15, "rangeall", ("rangepre", 15), ("range", 15), ("rangepost", 15)),
159 159 "not": (10, None, ("not", 10), None, None),
160 160 "!": (10, None, ("not", 10), None, None),
161 161 "and": (5, None, None, ("and", 5), None),
162 162 "&": (5, None, None, ("and", 5), None),
163 163 "%": (5, None, None, ("only", 5), ("onlypost", 5)),
164 164 "or": (4, None, None, ("or", 4), None),
165 165 "|": (4, None, None, ("or", 4), None),
166 166 "+": (4, None, None, ("or", 4), None),
167 167 "=": (3, None, None, ("keyvalue", 3), None),
168 168 ",": (2, None, None, ("list", 2), None),
169 169 ")": (0, None, None, None, None),
170 170 "symbol": (0, "symbol", None, None, None),
171 171 "string": (0, "string", None, None, None),
172 172 "end": (0, None, None, None, None),
173 173 }
174 174
175 175 keywords = set(['and', 'or', 'not'])
176 176
177 177 # default set of valid characters for the initial letter of symbols
178 178 _syminitletters = set(c for c in [chr(i) for i in xrange(256)]
179 179 if c.isalnum() or c in '._@' or ord(c) > 127)
180 180
181 181 # default set of valid characters for non-initial letters of symbols
182 182 _symletters = set(c for c in [chr(i) for i in xrange(256)]
183 183 if c.isalnum() or c in '-._/@' or ord(c) > 127)
184 184
185 185 def tokenize(program, lookup=None, syminitletters=None, symletters=None):
186 186 '''
187 187 Parse a revset statement into a stream of tokens
188 188
189 189 ``syminitletters`` is the set of valid characters for the initial
190 190 letter of symbols.
191 191
192 192 By default, character ``c`` is recognized as valid for initial
193 193 letter of symbols, if ``c.isalnum() or c in '._@' or ord(c) > 127``.
194 194
195 195 ``symletters`` is the set of valid characters for non-initial
196 196 letters of symbols.
197 197
198 198 By default, character ``c`` is recognized as valid for non-initial
199 199 letters of symbols, if ``c.isalnum() or c in '-._/@' or ord(c) > 127``.
200 200
201 201 Check that @ is a valid unquoted token character (issue3686):
202 202 >>> list(tokenize("@::"))
203 203 [('symbol', '@', 0), ('::', None, 1), ('end', None, 3)]
204 204
205 205 '''
206 206 if syminitletters is None:
207 207 syminitletters = _syminitletters
208 208 if symletters is None:
209 209 symletters = _symletters
210 210
211 211 if program and lookup:
212 212 # attempt to parse old-style ranges first to deal with
213 213 # things like old-tag which contain query metacharacters
214 214 parts = program.split(':', 1)
215 215 if all(lookup(sym) for sym in parts if sym):
216 216 if parts[0]:
217 217 yield ('symbol', parts[0], 0)
218 218 if len(parts) > 1:
219 219 s = len(parts[0])
220 220 yield (':', None, s)
221 221 if parts[1]:
222 222 yield ('symbol', parts[1], s + 1)
223 223 yield ('end', None, len(program))
224 224 return
225 225
226 226 pos, l = 0, len(program)
227 227 while pos < l:
228 228 c = program[pos]
229 229 if c.isspace(): # skip inter-token whitespace
230 230 pass
231 231 elif c == ':' and program[pos:pos + 2] == '::': # look ahead carefully
232 232 yield ('::', None, pos)
233 233 pos += 1 # skip ahead
234 234 elif c == '.' and program[pos:pos + 2] == '..': # look ahead carefully
235 235 yield ('..', None, pos)
236 236 pos += 1 # skip ahead
237 237 elif c == '#' and program[pos:pos + 2] == '##': # look ahead carefully
238 238 yield ('##', None, pos)
239 239 pos += 1 # skip ahead
240 240 elif c in "():=,-|&+!~^%": # handle simple operators
241 241 yield (c, None, pos)
242 242 elif (c in '"\'' or c == 'r' and
243 243 program[pos:pos + 2] in ("r'", 'r"')): # handle quoted strings
244 244 if c == 'r':
245 245 pos += 1
246 246 c = program[pos]
247 247 decode = lambda x: x
248 248 else:
249 249 decode = parser.unescapestr
250 250 pos += 1
251 251 s = pos
252 252 while pos < l: # find closing quote
253 253 d = program[pos]
254 254 if d == '\\': # skip over escaped characters
255 255 pos += 2
256 256 continue
257 257 if d == c:
258 258 yield ('string', decode(program[s:pos]), s)
259 259 break
260 260 pos += 1
261 261 else:
262 262 raise error.ParseError(_("unterminated string"), s)
263 263 # gather up a symbol/keyword
264 264 elif c in syminitletters:
265 265 s = pos
266 266 pos += 1
267 267 while pos < l: # find end of symbol
268 268 d = program[pos]
269 269 if d not in symletters:
270 270 break
271 271 if d == '.' and program[pos - 1] == '.': # special case for ..
272 272 pos -= 1
273 273 break
274 274 pos += 1
275 275 sym = program[s:pos]
276 276 if sym in keywords: # operator keywords
277 277 yield (sym, None, s)
278 278 elif '-' in sym:
279 279 # some jerk gave us foo-bar-baz, try to check if it's a symbol
280 280 if lookup and lookup(sym):
281 281 # looks like a real symbol
282 282 yield ('symbol', sym, s)
283 283 else:
284 284 # looks like an expression
285 285 parts = sym.split('-')
286 286 for p in parts[:-1]:
287 287 if p: # possible consecutive -
288 288 yield ('symbol', p, s)
289 289 s += len(p)
290 290 yield ('-', None, pos)
291 291 s += 1
292 292 if parts[-1]: # possible trailing -
293 293 yield ('symbol', parts[-1], s)
294 294 else:
295 295 yield ('symbol', sym, s)
296 296 pos -= 1
297 297 else:
298 298 raise error.ParseError(_("syntax error in revset '%s'") %
299 299 program, pos)
300 300 pos += 1
301 301 yield ('end', None, pos)
302 302
303 303 # helpers
304 304
305 305 def getsymbol(x):
306 306 if x and x[0] == 'symbol':
307 307 return x[1]
308 308 raise error.ParseError(_('not a symbol'))
309 309
310 310 def getstring(x, err):
311 311 if x and (x[0] == 'string' or x[0] == 'symbol'):
312 312 return x[1]
313 313 raise error.ParseError(err)
314 314
315 315 def getlist(x):
316 316 if not x:
317 317 return []
318 318 if x[0] == 'list':
319 319 return list(x[1:])
320 320 return [x]
321 321
322 322 def getargs(x, min, max, err):
323 323 l = getlist(x)
324 324 if len(l) < min or (max >= 0 and len(l) > max):
325 325 raise error.ParseError(err)
326 326 return l
327 327
328 328 def getargsdict(x, funcname, keys):
329 329 return parser.buildargsdict(getlist(x), funcname, keys.split(),
330 330 keyvaluenode='keyvalue', keynode='symbol')
331 331
332 332 def getset(repo, subset, x):
333 333 if not x:
334 334 raise error.ParseError(_("missing argument"))
335 335 s = methods[x[0]](repo, subset, *x[1:])
336 336 if util.safehasattr(s, 'isascending'):
337 337 return s
338 338 # else case should not happen, because all non-func are internal,
339 339 # ignoring for now.
340 340 if x[0] == 'func' and x[1][0] == 'symbol' and x[1][1] in symbols:
341 341 repo.ui.deprecwarn('revset "%s" uses list instead of smartset'
342 342 % x[1][1],
343 343 '3.9')
344 344 return baseset(s)
345 345
346 346 def _getrevsource(repo, r):
347 347 extra = repo[r].extra()
348 348 for label in ('source', 'transplant_source', 'rebase_source'):
349 349 if label in extra:
350 350 try:
351 351 return repo[extra[label]].rev()
352 352 except error.RepoLookupError:
353 353 pass
354 354 return None
355 355
356 356 # operator methods
357 357
358 358 def stringset(repo, subset, x):
359 359 x = repo[x].rev()
360 360 if (x in subset
361 361 or x == node.nullrev and isinstance(subset, fullreposet)):
362 362 return baseset([x])
363 363 return baseset()
364 364
365 365 def rangeset(repo, subset, x, y):
366 366 m = getset(repo, fullreposet(repo), x)
367 367 n = getset(repo, fullreposet(repo), y)
368 368
369 369 if not m or not n:
370 370 return baseset()
371 371 m, n = m.first(), n.last()
372 372
373 373 if m == n:
374 374 r = baseset([m])
375 375 elif n == node.wdirrev:
376 376 r = spanset(repo, m, len(repo)) + baseset([n])
377 377 elif m == node.wdirrev:
378 378 r = baseset([m]) + spanset(repo, len(repo) - 1, n - 1)
379 379 elif m < n:
380 380 r = spanset(repo, m, n + 1)
381 381 else:
382 382 r = spanset(repo, m, n - 1)
383 383 # XXX We should combine with subset first: 'subset & baseset(...)'. This is
384 384 # necessary to ensure we preserve the order in subset.
385 385 #
386 386 # This has performance implication, carrying the sorting over when possible
387 387 # would be more efficient.
388 388 return r & subset
389 389
390 390 def dagrange(repo, subset, x, y):
391 391 r = fullreposet(repo)
392 392 xs = reachableroots(repo, getset(repo, r, x), getset(repo, r, y),
393 393 includepath=True)
394 394 return subset & xs
395 395
396 396 def andset(repo, subset, x, y):
397 397 return getset(repo, getset(repo, subset, x), y)
398 398
399 399 def differenceset(repo, subset, x, y):
400 400 return getset(repo, subset, x) - getset(repo, subset, y)
401 401
402 402 def orset(repo, subset, *xs):
403 403 assert xs
404 404 if len(xs) == 1:
405 405 return getset(repo, subset, xs[0])
406 406 p = len(xs) // 2
407 407 a = orset(repo, subset, *xs[:p])
408 408 b = orset(repo, subset, *xs[p:])
409 409 return a + b
410 410
411 411 def notset(repo, subset, x):
412 412 return subset - getset(repo, subset, x)
413 413
414 414 def listset(repo, subset, *xs):
415 415 raise error.ParseError(_("can't use a list in this context"),
416 416 hint=_('see hg help "revsets.x or y"'))
417 417
418 418 def keyvaluepair(repo, subset, k, v):
419 419 raise error.ParseError(_("can't use a key-value pair in this context"))
420 420
421 421 def func(repo, subset, a, b):
422 422 f = getsymbol(a)
423 423 if f in symbols:
424 424 return symbols[f](repo, subset, b)
425 425
426 426 keep = lambda fn: getattr(fn, '__doc__', None) is not None
427 427
428 428 syms = [s for (s, fn) in symbols.items() if keep(fn)]
429 429 raise error.UnknownIdentifier(f, syms)
430 430
431 431 # functions
432 432
433 433 # symbols are callables like:
434 434 # fn(repo, subset, x)
435 435 # with:
436 436 # repo - current repository instance
437 437 # subset - of revisions to be examined
438 438 # x - argument in tree form
439 439 symbols = {}
440 440
441 441 # symbols which can't be used for a DoS attack for any given input
442 442 # (e.g. those which accept regexes as plain strings shouldn't be included)
443 443 # functions that just return a lot of changesets (like all) don't count here
444 444 safesymbols = set()
445 445
446 446 predicate = registrar.revsetpredicate()
447 447
448 448 @predicate('_destupdate')
449 449 def _destupdate(repo, subset, x):
450 450 # experimental revset for update destination
451 451 args = getargsdict(x, 'limit', 'clean check')
452 452 return subset & baseset([destutil.destupdate(repo, **args)[0]])
453 453
454 454 @predicate('_destmerge')
455 455 def _destmerge(repo, subset, x):
456 456 # experimental revset for merge destination
457 457 sourceset = None
458 458 if x is not None:
459 459 sourceset = getset(repo, fullreposet(repo), x)
460 460 return subset & baseset([destutil.destmerge(repo, sourceset=sourceset)])
461 461
462 462 @predicate('adds(pattern)', safe=True)
463 463 def adds(repo, subset, x):
464 464 """Changesets that add a file matching pattern.
465 465
466 466 The pattern without explicit kind like ``glob:`` is expected to be
467 467 relative to the current directory and match against a file or a
468 468 directory.
469 469 """
470 470 # i18n: "adds" is a keyword
471 471 pat = getstring(x, _("adds requires a pattern"))
472 472 return checkstatus(repo, subset, pat, 1)
473 473
474 474 @predicate('ancestor(*changeset)', safe=True)
475 475 def ancestor(repo, subset, x):
476 476 """A greatest common ancestor of the changesets.
477 477
478 478 Accepts 0 or more changesets.
479 479 Will return empty list when passed no args.
480 480 Greatest common ancestor of a single changeset is that changeset.
481 481 """
482 482 # i18n: "ancestor" is a keyword
483 483 l = getlist(x)
484 484 rl = fullreposet(repo)
485 485 anc = None
486 486
487 487 # (getset(repo, rl, i) for i in l) generates a list of lists
488 488 for revs in (getset(repo, rl, i) for i in l):
489 489 for r in revs:
490 490 if anc is None:
491 491 anc = repo[r]
492 492 else:
493 493 anc = anc.ancestor(repo[r])
494 494
495 495 if anc is not None and anc.rev() in subset:
496 496 return baseset([anc.rev()])
497 497 return baseset()
498 498
499 499 def _ancestors(repo, subset, x, followfirst=False):
500 500 heads = getset(repo, fullreposet(repo), x)
501 501 if not heads:
502 502 return baseset()
503 503 s = _revancestors(repo, heads, followfirst)
504 504 return subset & s
505 505
506 506 @predicate('ancestors(set)', safe=True)
507 507 def ancestors(repo, subset, x):
508 508 """Changesets that are ancestors of a changeset in set.
509 509 """
510 510 return _ancestors(repo, subset, x)
511 511
512 512 @predicate('_firstancestors', safe=True)
513 513 def _firstancestors(repo, subset, x):
514 514 # ``_firstancestors(set)``
515 515 # Like ``ancestors(set)`` but follows only the first parents.
516 516 return _ancestors(repo, subset, x, followfirst=True)
517 517
518 518 def ancestorspec(repo, subset, x, n):
519 519 """``set~n``
520 520 Changesets that are the Nth ancestor (first parents only) of a changeset
521 521 in set.
522 522 """
523 523 try:
524 524 n = int(n[1])
525 525 except (TypeError, ValueError):
526 526 raise error.ParseError(_("~ expects a number"))
527 527 ps = set()
528 528 cl = repo.changelog
529 529 for r in getset(repo, fullreposet(repo), x):
530 530 for i in range(n):
531 531 r = cl.parentrevs(r)[0]
532 532 ps.add(r)
533 533 return subset & ps
534 534
535 535 @predicate('author(string)', safe=True)
536 536 def author(repo, subset, x):
537 537 """Alias for ``user(string)``.
538 538 """
539 539 # i18n: "author" is a keyword
540 540 n = encoding.lower(getstring(x, _("author requires a string")))
541 541 kind, pattern, matcher = _substringmatcher(n)
542 542 return subset.filter(lambda x: matcher(encoding.lower(repo[x].user())),
543 543 condrepr=('<user %r>', n))
544 544
545 545 @predicate('bisect(string)', safe=True)
546 546 def bisect(repo, subset, x):
547 547 """Changesets marked in the specified bisect status:
548 548
549 549 - ``good``, ``bad``, ``skip``: csets explicitly marked as good/bad/skip
550 550 - ``goods``, ``bads`` : csets topologically good/bad
551 551 - ``range`` : csets taking part in the bisection
552 552 - ``pruned`` : csets that are goods, bads or skipped
553 553 - ``untested`` : csets whose fate is yet unknown
554 554 - ``ignored`` : csets ignored due to DAG topology
555 555 - ``current`` : the cset currently being bisected
556 556 """
557 557 # i18n: "bisect" is a keyword
558 558 status = getstring(x, _("bisect requires a string")).lower()
559 559 state = set(hbisect.get(repo, status))
560 560 return subset & state
561 561
562 562 # Backward-compatibility
563 563 # - no help entry so that we do not advertise it any more
564 564 @predicate('bisected', safe=True)
565 565 def bisected(repo, subset, x):
566 566 return bisect(repo, subset, x)
567 567
568 568 @predicate('bookmark([name])', safe=True)
569 569 def bookmark(repo, subset, x):
570 570 """The named bookmark or all bookmarks.
571 571
572 572 If `name` starts with `re:`, the remainder of the name is treated as
573 573 a regular expression. To match a bookmark that actually starts with `re:`,
574 574 use the prefix `literal:`.
575 575 """
576 576 # i18n: "bookmark" is a keyword
577 577 args = getargs(x, 0, 1, _('bookmark takes one or no arguments'))
578 578 if args:
579 579 bm = getstring(args[0],
580 580 # i18n: "bookmark" is a keyword
581 581 _('the argument to bookmark must be a string'))
582 582 kind, pattern, matcher = util.stringmatcher(bm)
583 583 bms = set()
584 584 if kind == 'literal':
585 585 bmrev = repo._bookmarks.get(pattern, None)
586 586 if not bmrev:
587 587 raise error.RepoLookupError(_("bookmark '%s' does not exist")
588 588 % pattern)
589 589 bms.add(repo[bmrev].rev())
590 590 else:
591 591 matchrevs = set()
592 592 for name, bmrev in repo._bookmarks.iteritems():
593 593 if matcher(name):
594 594 matchrevs.add(bmrev)
595 595 if not matchrevs:
596 596 raise error.RepoLookupError(_("no bookmarks exist"
597 597 " that match '%s'") % pattern)
598 598 for bmrev in matchrevs:
599 599 bms.add(repo[bmrev].rev())
600 600 else:
601 601 bms = set([repo[r].rev()
602 602 for r in repo._bookmarks.values()])
603 603 bms -= set([node.nullrev])
604 604 return subset & bms
605 605
606 606 @predicate('branch(string or set)', safe=True)
607 607 def branch(repo, subset, x):
608 608 """
609 609 All changesets belonging to the given branch or the branches of the given
610 610 changesets.
611 611
612 612 If `string` starts with `re:`, the remainder of the name is treated as
613 613 a regular expression. To match a branch that actually starts with `re:`,
614 614 use the prefix `literal:`.
615 615 """
616 616 getbi = repo.revbranchcache().branchinfo
617 617
618 618 try:
619 619 b = getstring(x, '')
620 620 except error.ParseError:
621 621 # not a string, but another revspec, e.g. tip()
622 622 pass
623 623 else:
624 624 kind, pattern, matcher = util.stringmatcher(b)
625 625 if kind == 'literal':
626 626 # note: falls through to the revspec case if no branch with
627 627 # this name exists and pattern kind is not specified explicitly
628 628 if pattern in repo.branchmap():
629 629 return subset.filter(lambda r: matcher(getbi(r)[0]),
630 630 condrepr=('<branch %r>', b))
631 631 if b.startswith('literal:'):
632 632 raise error.RepoLookupError(_("branch '%s' does not exist")
633 633 % pattern)
634 634 else:
635 635 return subset.filter(lambda r: matcher(getbi(r)[0]),
636 636 condrepr=('<branch %r>', b))
637 637
638 638 s = getset(repo, fullreposet(repo), x)
639 639 b = set()
640 640 for r in s:
641 641 b.add(getbi(r)[0])
642 642 c = s.__contains__
643 643 return subset.filter(lambda r: c(r) or getbi(r)[0] in b,
644 644 condrepr=lambda: '<branch %r>' % sorted(b))
645 645
646 646 @predicate('bumped()', safe=True)
647 647 def bumped(repo, subset, x):
648 648 """Mutable changesets marked as successors of public changesets.
649 649
650 650 Only non-public and non-obsolete changesets can be `bumped`.
651 651 """
652 652 # i18n: "bumped" is a keyword
653 653 getargs(x, 0, 0, _("bumped takes no arguments"))
654 654 bumped = obsmod.getrevs(repo, 'bumped')
655 655 return subset & bumped
656 656
657 657 @predicate('bundle()', safe=True)
658 658 def bundle(repo, subset, x):
659 659 """Changesets in the bundle.
660 660
661 661 Bundle must be specified by the -R option."""
662 662
663 663 try:
664 664 bundlerevs = repo.changelog.bundlerevs
665 665 except AttributeError:
666 666 raise error.Abort(_("no bundle provided - specify with -R"))
667 667 return subset & bundlerevs
668 668
669 669 def checkstatus(repo, subset, pat, field):
670 670 hasset = matchmod.patkind(pat) == 'set'
671 671
672 672 mcache = [None]
673 673 def matches(x):
674 674 c = repo[x]
675 675 if not mcache[0] or hasset:
676 676 mcache[0] = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
677 677 m = mcache[0]
678 678 fname = None
679 679 if not m.anypats() and len(m.files()) == 1:
680 680 fname = m.files()[0]
681 681 if fname is not None:
682 682 if fname not in c.files():
683 683 return False
684 684 else:
685 685 for f in c.files():
686 686 if m(f):
687 687 break
688 688 else:
689 689 return False
690 690 files = repo.status(c.p1().node(), c.node())[field]
691 691 if fname is not None:
692 692 if fname in files:
693 693 return True
694 694 else:
695 695 for f in files:
696 696 if m(f):
697 697 return True
698 698
699 699 return subset.filter(matches, condrepr=('<status[%r] %r>', field, pat))
700 700
701 701 def _children(repo, subset, parentset):
702 702 if not parentset:
703 703 return baseset()
704 704 cs = set()
705 705 pr = repo.changelog.parentrevs
706 706 minrev = parentset.min()
707 707 for r in subset:
708 708 if r <= minrev:
709 709 continue
710 710 for p in pr(r):
711 711 if p in parentset:
712 712 cs.add(r)
713 713 return baseset(cs)
714 714
715 715 @predicate('children(set)', safe=True)
716 716 def children(repo, subset, x):
717 717 """Child changesets of changesets in set.
718 718 """
719 719 s = getset(repo, fullreposet(repo), x)
720 720 cs = _children(repo, subset, s)
721 721 return subset & cs
722 722
723 723 @predicate('closed()', safe=True)
724 724 def closed(repo, subset, x):
725 725 """Changeset is closed.
726 726 """
727 727 # i18n: "closed" is a keyword
728 728 getargs(x, 0, 0, _("closed takes no arguments"))
729 729 return subset.filter(lambda r: repo[r].closesbranch(),
730 730 condrepr='<branch closed>')
731 731
732 732 @predicate('contains(pattern)')
733 733 def contains(repo, subset, x):
734 734 """The revision's manifest contains a file matching pattern (but might not
735 735 modify it). See :hg:`help patterns` for information about file patterns.
736 736
737 737 The pattern without explicit kind like ``glob:`` is expected to be
738 738 relative to the current directory and match against a file exactly
739 739 for efficiency.
740 740 """
741 741 # i18n: "contains" is a keyword
742 742 pat = getstring(x, _("contains requires a pattern"))
743 743
744 744 def matches(x):
745 745 if not matchmod.patkind(pat):
746 746 pats = pathutil.canonpath(repo.root, repo.getcwd(), pat)
747 747 if pats in repo[x]:
748 748 return True
749 749 else:
750 750 c = repo[x]
751 751 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
752 752 for f in c.manifest():
753 753 if m(f):
754 754 return True
755 755 return False
756 756
757 757 return subset.filter(matches, condrepr=('<contains %r>', pat))
758 758
759 759 @predicate('converted([id])', safe=True)
760 760 def converted(repo, subset, x):
761 761 """Changesets converted from the given identifier in the old repository if
762 762 present, or all converted changesets if no identifier is specified.
763 763 """
764 764
765 765 # There is exactly no chance of resolving the revision, so do a simple
766 766 # string compare and hope for the best
767 767
768 768 rev = None
769 769 # i18n: "converted" is a keyword
770 770 l = getargs(x, 0, 1, _('converted takes one or no arguments'))
771 771 if l:
772 772 # i18n: "converted" is a keyword
773 773 rev = getstring(l[0], _('converted requires a revision'))
774 774
775 775 def _matchvalue(r):
776 776 source = repo[r].extra().get('convert_revision', None)
777 777 return source is not None and (rev is None or source.startswith(rev))
778 778
779 779 return subset.filter(lambda r: _matchvalue(r),
780 780 condrepr=('<converted %r>', rev))
781 781
782 782 @predicate('date(interval)', safe=True)
783 783 def date(repo, subset, x):
784 784 """Changesets within the interval, see :hg:`help dates`.
785 785 """
786 786 # i18n: "date" is a keyword
787 787 ds = getstring(x, _("date requires a string"))
788 788 dm = util.matchdate(ds)
789 789 return subset.filter(lambda x: dm(repo[x].date()[0]),
790 790 condrepr=('<date %r>', ds))
791 791
792 792 @predicate('desc(string)', safe=True)
793 793 def desc(repo, subset, x):
794 794 """Search commit message for string. The match is case-insensitive.
795 795 """
796 796 # i18n: "desc" is a keyword
797 797 ds = encoding.lower(getstring(x, _("desc requires a string")))
798 798
799 799 def matches(x):
800 800 c = repo[x]
801 801 return ds in encoding.lower(c.description())
802 802
803 803 return subset.filter(matches, condrepr=('<desc %r>', ds))
804 804
805 805 def _descendants(repo, subset, x, followfirst=False):
806 806 roots = getset(repo, fullreposet(repo), x)
807 807 if not roots:
808 808 return baseset()
809 809 s = _revdescendants(repo, roots, followfirst)
810 810
811 811 # Both sets need to be ascending in order to lazily return the union
812 812 # in the correct order.
813 813 base = subset & roots
814 814 desc = subset & s
815 815 result = base + desc
816 816 if subset.isascending():
817 817 result.sort()
818 818 elif subset.isdescending():
819 819 result.sort(reverse=True)
820 820 else:
821 821 result = subset & result
822 822 return result
823 823
824 824 @predicate('descendants(set)', safe=True)
825 825 def descendants(repo, subset, x):
826 826 """Changesets which are descendants of changesets in set.
827 827 """
828 828 return _descendants(repo, subset, x)
829 829
830 830 @predicate('_firstdescendants', safe=True)
831 831 def _firstdescendants(repo, subset, x):
832 832 # ``_firstdescendants(set)``
833 833 # Like ``descendants(set)`` but follows only the first parents.
834 834 return _descendants(repo, subset, x, followfirst=True)
835 835
836 836 @predicate('destination([set])', safe=True)
837 837 def destination(repo, subset, x):
838 838 """Changesets that were created by a graft, transplant or rebase operation,
839 839 with the given revisions specified as the source. Omitting the optional set
840 840 is the same as passing all().
841 841 """
842 842 if x is not None:
843 843 sources = getset(repo, fullreposet(repo), x)
844 844 else:
845 845 sources = fullreposet(repo)
846 846
847 847 dests = set()
848 848
849 849 # subset contains all of the possible destinations that can be returned, so
850 850 # iterate over them and see if their source(s) were provided in the arg set.
851 851 # Even if the immediate src of r is not in the arg set, src's source (or
852 852 # further back) may be. Scanning back further than the immediate src allows
853 853 # transitive transplants and rebases to yield the same results as transitive
854 854 # grafts.
855 855 for r in subset:
856 856 src = _getrevsource(repo, r)
857 857 lineage = None
858 858
859 859 while src is not None:
860 860 if lineage is None:
861 861 lineage = list()
862 862
863 863 lineage.append(r)
864 864
865 865 # The visited lineage is a match if the current source is in the arg
866 866 # set. Since every candidate dest is visited by way of iterating
867 867 # subset, any dests further back in the lineage will be tested by a
868 868 # different iteration over subset. Likewise, if the src was already
869 869 # selected, the current lineage can be selected without going back
870 870 # further.
871 871 if src in sources or src in dests:
872 872 dests.update(lineage)
873 873 break
874 874
875 875 r = src
876 876 src = _getrevsource(repo, r)
877 877
878 878 return subset.filter(dests.__contains__,
879 879 condrepr=lambda: '<destination %r>' % sorted(dests))
880 880
881 881 @predicate('divergent()', safe=True)
882 882 def divergent(repo, subset, x):
883 883 """
884 884 Final successors of changesets with an alternative set of final successors.
885 885 """
886 886 # i18n: "divergent" is a keyword
887 887 getargs(x, 0, 0, _("divergent takes no arguments"))
888 888 divergent = obsmod.getrevs(repo, 'divergent')
889 889 return subset & divergent
890 890
891 891 @predicate('extinct()', safe=True)
892 892 def extinct(repo, subset, x):
893 893 """Obsolete changesets with obsolete descendants only.
894 894 """
895 895 # i18n: "extinct" is a keyword
896 896 getargs(x, 0, 0, _("extinct takes no arguments"))
897 897 extincts = obsmod.getrevs(repo, 'extinct')
898 898 return subset & extincts
899 899
900 900 @predicate('extra(label, [value])', safe=True)
901 901 def extra(repo, subset, x):
902 902 """Changesets with the given label in the extra metadata, with the given
903 903 optional value.
904 904
905 905 If `value` starts with `re:`, the remainder of the value is treated as
906 906 a regular expression. To match a value that actually starts with `re:`,
907 907 use the prefix `literal:`.
908 908 """
909 909 args = getargsdict(x, 'extra', 'label value')
910 910 if 'label' not in args:
911 911 # i18n: "extra" is a keyword
912 912 raise error.ParseError(_('extra takes at least 1 argument'))
913 913 # i18n: "extra" is a keyword
914 914 label = getstring(args['label'], _('first argument to extra must be '
915 915 'a string'))
916 916 value = None
917 917
918 918 if 'value' in args:
919 919 # i18n: "extra" is a keyword
920 920 value = getstring(args['value'], _('second argument to extra must be '
921 921 'a string'))
922 922 kind, value, matcher = util.stringmatcher(value)
923 923
924 924 def _matchvalue(r):
925 925 extra = repo[r].extra()
926 926 return label in extra and (value is None or matcher(extra[label]))
927 927
928 928 return subset.filter(lambda r: _matchvalue(r),
929 929 condrepr=('<extra[%r] %r>', label, value))
930 930
931 931 @predicate('filelog(pattern)', safe=True)
932 932 def filelog(repo, subset, x):
933 933 """Changesets connected to the specified filelog.
934 934
935 935 For performance reasons, visits only revisions mentioned in the file-level
936 936 filelog, rather than filtering through all changesets (much faster, but
937 937 doesn't include deletes or duplicate changes). For a slower, more accurate
938 938 result, use ``file()``.
939 939
940 940 The pattern without explicit kind like ``glob:`` is expected to be
941 941 relative to the current directory and match against a file exactly
942 942 for efficiency.
943 943
944 944 If some linkrev points to revisions filtered by the current repoview, we'll
945 945 work around it to return a non-filtered value.
946 946 """
947 947
948 948 # i18n: "filelog" is a keyword
949 949 pat = getstring(x, _("filelog requires a pattern"))
950 950 s = set()
951 951 cl = repo.changelog
952 952
953 953 if not matchmod.patkind(pat):
954 954 f = pathutil.canonpath(repo.root, repo.getcwd(), pat)
955 955 files = [f]
956 956 else:
957 957 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=repo[None])
958 958 files = (f for f in repo[None] if m(f))
959 959
960 960 for f in files:
961 961 fl = repo.file(f)
962 962 known = {}
963 963 scanpos = 0
964 964 for fr in list(fl):
965 965 fn = fl.node(fr)
966 966 if fn in known:
967 967 s.add(known[fn])
968 968 continue
969 969
970 970 lr = fl.linkrev(fr)
971 971 if lr in cl:
972 972 s.add(lr)
973 973 elif scanpos is not None:
974 974 # lowest matching changeset is filtered, scan further
975 975 # ahead in changelog
976 976 start = max(lr, scanpos) + 1
977 977 scanpos = None
978 978 for r in cl.revs(start):
979 979 # minimize parsing of non-matching entries
980 980 if f in cl.revision(r) and f in cl.readfiles(r):
981 981 try:
982 982 # try to use manifest delta fastpath
983 983 n = repo[r].filenode(f)
984 984 if n not in known:
985 985 if n == fn:
986 986 s.add(r)
987 987 scanpos = r
988 988 break
989 989 else:
990 990 known[n] = r
991 991 except error.ManifestLookupError:
992 992 # deletion in changelog
993 993 continue
994 994
995 995 return subset & s
996 996
997 997 @predicate('first(set, [n])', safe=True)
998 998 def first(repo, subset, x):
999 999 """An alias for limit().
1000 1000 """
1001 1001 return limit(repo, subset, x)
1002 1002
1003 1003 def _follow(repo, subset, x, name, followfirst=False):
1004 1004 l = getargs(x, 0, 1, _("%s takes no arguments or a pattern") % name)
1005 1005 c = repo['.']
1006 1006 if l:
1007 1007 x = getstring(l[0], _("%s expected a pattern") % name)
1008 1008 matcher = matchmod.match(repo.root, repo.getcwd(), [x],
1009 1009 ctx=repo[None], default='path')
1010 1010
1011 1011 files = c.manifest().walk(matcher)
1012 1012
1013 1013 s = set()
1014 1014 for fname in files:
1015 1015 fctx = c[fname]
1016 1016 s = s.union(set(c.rev() for c in fctx.ancestors(followfirst)))
1017 1017 # include the revision responsible for the most recent version
1018 1018 s.add(fctx.introrev())
1019 1019 else:
1020 1020 s = _revancestors(repo, baseset([c.rev()]), followfirst)
1021 1021
1022 1022 return subset & s
1023 1023
1024 1024 @predicate('follow([pattern])', safe=True)
1025 1025 def follow(repo, subset, x):
1026 1026 """
1027 1027 An alias for ``::.`` (ancestors of the working directory's first parent).
1028 1028 If pattern is specified, the histories of files matching given
1029 1029 pattern is followed, including copies.
1030 1030 """
1031 1031 return _follow(repo, subset, x, 'follow')
1032 1032
1033 1033 @predicate('_followfirst', safe=True)
1034 1034 def _followfirst(repo, subset, x):
1035 1035 # ``followfirst([pattern])``
1036 1036 # Like ``follow([pattern])`` but follows only the first parent of
1037 1037 # every revisions or files revisions.
1038 1038 return _follow(repo, subset, x, '_followfirst', followfirst=True)
1039 1039
1040 1040 @predicate('all()', safe=True)
1041 1041 def getall(repo, subset, x):
1042 1042 """All changesets, the same as ``0:tip``.
1043 1043 """
1044 1044 # i18n: "all" is a keyword
1045 1045 getargs(x, 0, 0, _("all takes no arguments"))
1046 1046 return subset & spanset(repo) # drop "null" if any
1047 1047
1048 1048 @predicate('grep(regex)')
1049 1049 def grep(repo, subset, x):
1050 1050 """Like ``keyword(string)`` but accepts a regex. Use ``grep(r'...')``
1051 1051 to ensure special escape characters are handled correctly. Unlike
1052 1052 ``keyword(string)``, the match is case-sensitive.
1053 1053 """
1054 1054 try:
1055 1055 # i18n: "grep" is a keyword
1056 1056 gr = re.compile(getstring(x, _("grep requires a string")))
1057 1057 except re.error as e:
1058 1058 raise error.ParseError(_('invalid match pattern: %s') % e)
1059 1059
1060 1060 def matches(x):
1061 1061 c = repo[x]
1062 1062 for e in c.files() + [c.user(), c.description()]:
1063 1063 if gr.search(e):
1064 1064 return True
1065 1065 return False
1066 1066
1067 1067 return subset.filter(matches, condrepr=('<grep %r>', gr.pattern))
1068 1068
1069 1069 @predicate('_matchfiles', safe=True)
1070 1070 def _matchfiles(repo, subset, x):
1071 1071 # _matchfiles takes a revset list of prefixed arguments:
1072 1072 #
1073 1073 # [p:foo, i:bar, x:baz]
1074 1074 #
1075 1075 # builds a match object from them and filters subset. Allowed
1076 1076 # prefixes are 'p:' for regular patterns, 'i:' for include
1077 1077 # patterns and 'x:' for exclude patterns. Use 'r:' prefix to pass
1078 1078 # a revision identifier, or the empty string to reference the
1079 1079 # working directory, from which the match object is
1080 1080 # initialized. Use 'd:' to set the default matching mode, default
1081 1081 # to 'glob'. At most one 'r:' and 'd:' argument can be passed.
1082 1082
1083 1083 l = getargs(x, 1, -1, "_matchfiles requires at least one argument")
1084 1084 pats, inc, exc = [], [], []
1085 1085 rev, default = None, None
1086 1086 for arg in l:
1087 1087 s = getstring(arg, "_matchfiles requires string arguments")
1088 1088 prefix, value = s[:2], s[2:]
1089 1089 if prefix == 'p:':
1090 1090 pats.append(value)
1091 1091 elif prefix == 'i:':
1092 1092 inc.append(value)
1093 1093 elif prefix == 'x:':
1094 1094 exc.append(value)
1095 1095 elif prefix == 'r:':
1096 1096 if rev is not None:
1097 1097 raise error.ParseError('_matchfiles expected at most one '
1098 1098 'revision')
1099 1099 if value != '': # empty means working directory; leave rev as None
1100 1100 rev = value
1101 1101 elif prefix == 'd:':
1102 1102 if default is not None:
1103 1103 raise error.ParseError('_matchfiles expected at most one '
1104 1104 'default mode')
1105 1105 default = value
1106 1106 else:
1107 1107 raise error.ParseError('invalid _matchfiles prefix: %s' % prefix)
1108 1108 if not default:
1109 1109 default = 'glob'
1110 1110
1111 1111 m = matchmod.match(repo.root, repo.getcwd(), pats, include=inc,
1112 1112 exclude=exc, ctx=repo[rev], default=default)
1113 1113
1114 1114 # This directly read the changelog data as creating changectx for all
1115 1115 # revisions is quite expensive.
1116 1116 getfiles = repo.changelog.readfiles
1117 1117 wdirrev = node.wdirrev
1118 1118 def matches(x):
1119 1119 if x == wdirrev:
1120 1120 files = repo[x].files()
1121 1121 else:
1122 1122 files = getfiles(x)
1123 1123 for f in files:
1124 1124 if m(f):
1125 1125 return True
1126 1126 return False
1127 1127
1128 1128 return subset.filter(matches,
1129 1129 condrepr=('<matchfiles patterns=%r, include=%r '
1130 1130 'exclude=%r, default=%r, rev=%r>',
1131 1131 pats, inc, exc, default, rev))
1132 1132
1133 1133 @predicate('file(pattern)', safe=True)
1134 1134 def hasfile(repo, subset, x):
1135 1135 """Changesets affecting files matched by pattern.
1136 1136
1137 1137 For a faster but less accurate result, consider using ``filelog()``
1138 1138 instead.
1139 1139
1140 1140 This predicate uses ``glob:`` as the default kind of pattern.
1141 1141 """
1142 1142 # i18n: "file" is a keyword
1143 1143 pat = getstring(x, _("file requires a pattern"))
1144 1144 return _matchfiles(repo, subset, ('string', 'p:' + pat))
1145 1145
1146 1146 @predicate('head()', safe=True)
1147 1147 def head(repo, subset, x):
1148 1148 """Changeset is a named branch head.
1149 1149 """
1150 1150 # i18n: "head" is a keyword
1151 1151 getargs(x, 0, 0, _("head takes no arguments"))
1152 1152 hs = set()
1153 1153 cl = repo.changelog
1154 1154 for ls in repo.branchmap().itervalues():
1155 1155 hs.update(cl.rev(h) for h in ls)
1156 1156 return subset & baseset(hs)
1157 1157
1158 1158 @predicate('heads(set)', safe=True)
1159 1159 def heads(repo, subset, x):
1160 1160 """Members of set with no children in set.
1161 1161 """
1162 1162 s = getset(repo, subset, x)
1163 1163 ps = parents(repo, subset, x)
1164 1164 return s - ps
1165 1165
1166 1166 @predicate('hidden()', safe=True)
1167 1167 def hidden(repo, subset, x):
1168 1168 """Hidden changesets.
1169 1169 """
1170 1170 # i18n: "hidden" is a keyword
1171 1171 getargs(x, 0, 0, _("hidden takes no arguments"))
1172 1172 hiddenrevs = repoview.filterrevs(repo, 'visible')
1173 1173 return subset & hiddenrevs
1174 1174
1175 1175 @predicate('keyword(string)', safe=True)
1176 1176 def keyword(repo, subset, x):
1177 1177 """Search commit message, user name, and names of changed files for
1178 1178 string. The match is case-insensitive.
1179 1179 """
1180 1180 # i18n: "keyword" is a keyword
1181 1181 kw = encoding.lower(getstring(x, _("keyword requires a string")))
1182 1182
1183 1183 def matches(r):
1184 1184 c = repo[r]
1185 1185 return any(kw in encoding.lower(t)
1186 1186 for t in c.files() + [c.user(), c.description()])
1187 1187
1188 1188 return subset.filter(matches, condrepr=('<keyword %r>', kw))
1189 1189
1190 1190 @predicate('limit(set[, n[, offset]])', safe=True)
1191 1191 def limit(repo, subset, x):
1192 1192 """First n members of set, defaulting to 1, starting from offset.
1193 1193 """
1194 1194 args = getargsdict(x, 'limit', 'set n offset')
1195 1195 if 'set' not in args:
1196 1196 # i18n: "limit" is a keyword
1197 1197 raise error.ParseError(_("limit requires one to three arguments"))
1198 1198 try:
1199 1199 lim, ofs = 1, 0
1200 1200 if 'n' in args:
1201 1201 # i18n: "limit" is a keyword
1202 1202 lim = int(getstring(args['n'], _("limit requires a number")))
1203 1203 if 'offset' in args:
1204 1204 # i18n: "limit" is a keyword
1205 1205 ofs = int(getstring(args['offset'], _("limit requires a number")))
1206 1206 if ofs < 0:
1207 1207 raise error.ParseError(_("negative offset"))
1208 1208 except (TypeError, ValueError):
1209 1209 # i18n: "limit" is a keyword
1210 1210 raise error.ParseError(_("limit expects a number"))
1211 1211 os = getset(repo, fullreposet(repo), args['set'])
1212 1212 result = []
1213 1213 it = iter(os)
1214 1214 for x in xrange(ofs):
1215 1215 y = next(it, None)
1216 1216 if y is None:
1217 1217 break
1218 1218 for x in xrange(lim):
1219 1219 y = next(it, None)
1220 1220 if y is None:
1221 1221 break
1222 1222 elif y in subset:
1223 1223 result.append(y)
1224 1224 return baseset(result, datarepr=('<limit n=%d, offset=%d, %r, %r>',
1225 1225 lim, ofs, subset, os))
1226 1226
1227 1227 @predicate('last(set, [n])', safe=True)
1228 1228 def last(repo, subset, x):
1229 1229 """Last n members of set, defaulting to 1.
1230 1230 """
1231 1231 # i18n: "last" is a keyword
1232 1232 l = getargs(x, 1, 2, _("last requires one or two arguments"))
1233 1233 try:
1234 1234 lim = 1
1235 1235 if len(l) == 2:
1236 1236 # i18n: "last" is a keyword
1237 1237 lim = int(getstring(l[1], _("last requires a number")))
1238 1238 except (TypeError, ValueError):
1239 1239 # i18n: "last" is a keyword
1240 1240 raise error.ParseError(_("last expects a number"))
1241 1241 os = getset(repo, fullreposet(repo), l[0])
1242 1242 os.reverse()
1243 1243 result = []
1244 1244 it = iter(os)
1245 1245 for x in xrange(lim):
1246 1246 y = next(it, None)
1247 1247 if y is None:
1248 1248 break
1249 1249 elif y in subset:
1250 1250 result.append(y)
1251 1251 return baseset(result, datarepr=('<last n=%d, %r, %r>', lim, subset, os))
1252 1252
1253 1253 @predicate('max(set)', safe=True)
1254 1254 def maxrev(repo, subset, x):
1255 1255 """Changeset with highest revision number in set.
1256 1256 """
1257 1257 os = getset(repo, fullreposet(repo), x)
1258 1258 try:
1259 1259 m = os.max()
1260 1260 if m in subset:
1261 1261 return baseset([m], datarepr=('<max %r, %r>', subset, os))
1262 1262 except ValueError:
1263 1263 # os.max() throws a ValueError when the collection is empty.
1264 1264 # Same as python's max().
1265 1265 pass
1266 1266 return baseset(datarepr=('<max %r, %r>', subset, os))
1267 1267
1268 1268 @predicate('merge()', safe=True)
1269 1269 def merge(repo, subset, x):
1270 1270 """Changeset is a merge changeset.
1271 1271 """
1272 1272 # i18n: "merge" is a keyword
1273 1273 getargs(x, 0, 0, _("merge takes no arguments"))
1274 1274 cl = repo.changelog
1275 1275 return subset.filter(lambda r: cl.parentrevs(r)[1] != -1,
1276 1276 condrepr='<merge>')
1277 1277
1278 1278 @predicate('branchpoint()', safe=True)
1279 1279 def branchpoint(repo, subset, x):
1280 1280 """Changesets with more than one child.
1281 1281 """
1282 1282 # i18n: "branchpoint" is a keyword
1283 1283 getargs(x, 0, 0, _("branchpoint takes no arguments"))
1284 1284 cl = repo.changelog
1285 1285 if not subset:
1286 1286 return baseset()
1287 1287 # XXX this should be 'parentset.min()' assuming 'parentset' is a smartset
1288 1288 # (and if it is not, it should.)
1289 1289 baserev = min(subset)
1290 1290 parentscount = [0]*(len(repo) - baserev)
1291 1291 for r in cl.revs(start=baserev + 1):
1292 1292 for p in cl.parentrevs(r):
1293 1293 if p >= baserev:
1294 1294 parentscount[p - baserev] += 1
1295 1295 return subset.filter(lambda r: parentscount[r - baserev] > 1,
1296 1296 condrepr='<branchpoint>')
1297 1297
1298 1298 @predicate('min(set)', safe=True)
1299 1299 def minrev(repo, subset, x):
1300 1300 """Changeset with lowest revision number in set.
1301 1301 """
1302 1302 os = getset(repo, fullreposet(repo), x)
1303 1303 try:
1304 1304 m = os.min()
1305 1305 if m in subset:
1306 1306 return baseset([m], datarepr=('<min %r, %r>', subset, os))
1307 1307 except ValueError:
1308 1308 # os.min() throws a ValueError when the collection is empty.
1309 1309 # Same as python's min().
1310 1310 pass
1311 1311 return baseset(datarepr=('<min %r, %r>', subset, os))
1312 1312
1313 1313 @predicate('modifies(pattern)', safe=True)
1314 1314 def modifies(repo, subset, x):
1315 1315 """Changesets modifying files matched by pattern.
1316 1316
1317 1317 The pattern without explicit kind like ``glob:`` is expected to be
1318 1318 relative to the current directory and match against a file or a
1319 1319 directory.
1320 1320 """
1321 1321 # i18n: "modifies" is a keyword
1322 1322 pat = getstring(x, _("modifies requires a pattern"))
1323 1323 return checkstatus(repo, subset, pat, 0)
1324 1324
1325 1325 @predicate('named(namespace)')
1326 1326 def named(repo, subset, x):
1327 1327 """The changesets in a given namespace.
1328 1328
1329 1329 If `namespace` starts with `re:`, the remainder of the string is treated as
1330 1330 a regular expression. To match a namespace that actually starts with `re:`,
1331 1331 use the prefix `literal:`.
1332 1332 """
1333 1333 # i18n: "named" is a keyword
1334 1334 args = getargs(x, 1, 1, _('named requires a namespace argument'))
1335 1335
1336 1336 ns = getstring(args[0],
1337 1337 # i18n: "named" is a keyword
1338 1338 _('the argument to named must be a string'))
1339 1339 kind, pattern, matcher = util.stringmatcher(ns)
1340 1340 namespaces = set()
1341 1341 if kind == 'literal':
1342 1342 if pattern not in repo.names:
1343 1343 raise error.RepoLookupError(_("namespace '%s' does not exist")
1344 1344 % ns)
1345 1345 namespaces.add(repo.names[pattern])
1346 1346 else:
1347 1347 for name, ns in repo.names.iteritems():
1348 1348 if matcher(name):
1349 1349 namespaces.add(ns)
1350 1350 if not namespaces:
1351 1351 raise error.RepoLookupError(_("no namespace exists"
1352 1352 " that match '%s'") % pattern)
1353 1353
1354 1354 names = set()
1355 1355 for ns in namespaces:
1356 1356 for name in ns.listnames(repo):
1357 1357 if name not in ns.deprecated:
1358 1358 names.update(repo[n].rev() for n in ns.nodes(repo, name))
1359 1359
1360 1360 names -= set([node.nullrev])
1361 1361 return subset & names
1362 1362
1363 1363 @predicate('id(string)', safe=True)
1364 1364 def node_(repo, subset, x):
1365 1365 """Revision non-ambiguously specified by the given hex string prefix.
1366 1366 """
1367 1367 # i18n: "id" is a keyword
1368 1368 l = getargs(x, 1, 1, _("id requires one argument"))
1369 1369 # i18n: "id" is a keyword
1370 1370 n = getstring(l[0], _("id requires a string"))
1371 1371 if len(n) == 40:
1372 1372 try:
1373 1373 rn = repo.changelog.rev(node.bin(n))
1374 1374 except (LookupError, TypeError):
1375 1375 rn = None
1376 1376 else:
1377 1377 rn = None
1378 1378 pm = repo.changelog._partialmatch(n)
1379 1379 if pm is not None:
1380 1380 rn = repo.changelog.rev(pm)
1381 1381
1382 1382 if rn is None:
1383 1383 return baseset()
1384 1384 result = baseset([rn])
1385 1385 return result & subset
1386 1386
1387 1387 @predicate('obsolete()', safe=True)
1388 1388 def obsolete(repo, subset, x):
1389 1389 """Mutable changeset with a newer version."""
1390 1390 # i18n: "obsolete" is a keyword
1391 1391 getargs(x, 0, 0, _("obsolete takes no arguments"))
1392 1392 obsoletes = obsmod.getrevs(repo, 'obsolete')
1393 1393 return subset & obsoletes
1394 1394
1395 1395 @predicate('only(set, [set])', safe=True)
1396 1396 def only(repo, subset, x):
1397 1397 """Changesets that are ancestors of the first set that are not ancestors
1398 1398 of any other head in the repo. If a second set is specified, the result
1399 1399 is ancestors of the first set that are not ancestors of the second set
1400 1400 (i.e. ::<set1> - ::<set2>).
1401 1401 """
1402 1402 cl = repo.changelog
1403 1403 # i18n: "only" is a keyword
1404 1404 args = getargs(x, 1, 2, _('only takes one or two arguments'))
1405 1405 include = getset(repo, fullreposet(repo), args[0])
1406 1406 if len(args) == 1:
1407 1407 if not include:
1408 1408 return baseset()
1409 1409
1410 1410 descendants = set(_revdescendants(repo, include, False))
1411 1411 exclude = [rev for rev in cl.headrevs()
1412 1412 if not rev in descendants and not rev in include]
1413 1413 else:
1414 1414 exclude = getset(repo, fullreposet(repo), args[1])
1415 1415
1416 1416 results = set(cl.findmissingrevs(common=exclude, heads=include))
1417 1417 # XXX we should turn this into a baseset instead of a set, smartset may do
1418 1418 # some optimisations from the fact this is a baseset.
1419 1419 return subset & results
1420 1420
1421 1421 @predicate('origin([set])', safe=True)
1422 1422 def origin(repo, subset, x):
1423 1423 """
1424 1424 Changesets that were specified as a source for the grafts, transplants or
1425 1425 rebases that created the given revisions. Omitting the optional set is the
1426 1426 same as passing all(). If a changeset created by these operations is itself
1427 1427 specified as a source for one of these operations, only the source changeset
1428 1428 for the first operation is selected.
1429 1429 """
1430 1430 if x is not None:
1431 1431 dests = getset(repo, fullreposet(repo), x)
1432 1432 else:
1433 1433 dests = fullreposet(repo)
1434 1434
1435 1435 def _firstsrc(rev):
1436 1436 src = _getrevsource(repo, rev)
1437 1437 if src is None:
1438 1438 return None
1439 1439
1440 1440 while True:
1441 1441 prev = _getrevsource(repo, src)
1442 1442
1443 1443 if prev is None:
1444 1444 return src
1445 1445 src = prev
1446 1446
1447 1447 o = set([_firstsrc(r) for r in dests])
1448 1448 o -= set([None])
1449 1449 # XXX we should turn this into a baseset instead of a set, smartset may do
1450 1450 # some optimisations from the fact this is a baseset.
1451 1451 return subset & o
1452 1452
1453 1453 @predicate('outgoing([path])', safe=True)
1454 1454 def outgoing(repo, subset, x):
1455 1455 """Changesets not found in the specified destination repository, or the
1456 1456 default push location.
1457 1457 """
1458 1458 # Avoid cycles.
1459 1459 from . import (
1460 1460 discovery,
1461 1461 hg,
1462 1462 )
1463 1463 # i18n: "outgoing" is a keyword
1464 1464 l = getargs(x, 0, 1, _("outgoing takes one or no arguments"))
1465 1465 # i18n: "outgoing" is a keyword
1466 1466 dest = l and getstring(l[0], _("outgoing requires a repository path")) or ''
1467 1467 dest = repo.ui.expandpath(dest or 'default-push', dest or 'default')
1468 1468 dest, branches = hg.parseurl(dest)
1469 1469 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1470 1470 if revs:
1471 1471 revs = [repo.lookup(rev) for rev in revs]
1472 1472 other = hg.peer(repo, {}, dest)
1473 1473 repo.ui.pushbuffer()
1474 1474 outgoing = discovery.findcommonoutgoing(repo, other, onlyheads=revs)
1475 1475 repo.ui.popbuffer()
1476 1476 cl = repo.changelog
1477 1477 o = set([cl.rev(r) for r in outgoing.missing])
1478 1478 return subset & o
1479 1479
1480 1480 @predicate('p1([set])', safe=True)
1481 1481 def p1(repo, subset, x):
1482 1482 """First parent of changesets in set, or the working directory.
1483 1483 """
1484 1484 if x is None:
1485 1485 p = repo[x].p1().rev()
1486 1486 if p >= 0:
1487 1487 return subset & baseset([p])
1488 1488 return baseset()
1489 1489
1490 1490 ps = set()
1491 1491 cl = repo.changelog
1492 1492 for r in getset(repo, fullreposet(repo), x):
1493 1493 ps.add(cl.parentrevs(r)[0])
1494 1494 ps -= set([node.nullrev])
1495 1495 # XXX we should turn this into a baseset instead of a set, smartset may do
1496 1496 # some optimisations from the fact this is a baseset.
1497 1497 return subset & ps
1498 1498
1499 1499 @predicate('p2([set])', safe=True)
1500 1500 def p2(repo, subset, x):
1501 1501 """Second parent of changesets in set, or the working directory.
1502 1502 """
1503 1503 if x is None:
1504 1504 ps = repo[x].parents()
1505 1505 try:
1506 1506 p = ps[1].rev()
1507 1507 if p >= 0:
1508 1508 return subset & baseset([p])
1509 1509 return baseset()
1510 1510 except IndexError:
1511 1511 return baseset()
1512 1512
1513 1513 ps = set()
1514 1514 cl = repo.changelog
1515 1515 for r in getset(repo, fullreposet(repo), x):
1516 1516 ps.add(cl.parentrevs(r)[1])
1517 1517 ps -= set([node.nullrev])
1518 1518 # XXX we should turn this into a baseset instead of a set, smartset may do
1519 1519 # some optimisations from the fact this is a baseset.
1520 1520 return subset & ps
1521 1521
1522 1522 @predicate('parents([set])', safe=True)
1523 1523 def parents(repo, subset, x):
1524 1524 """
1525 1525 The set of all parents for all changesets in set, or the working directory.
1526 1526 """
1527 1527 if x is None:
1528 1528 ps = set(p.rev() for p in repo[x].parents())
1529 1529 else:
1530 1530 ps = set()
1531 1531 cl = repo.changelog
1532 1532 up = ps.update
1533 1533 parentrevs = cl.parentrevs
1534 1534 for r in getset(repo, fullreposet(repo), x):
1535 1535 if r == node.wdirrev:
1536 1536 up(p.rev() for p in repo[r].parents())
1537 1537 else:
1538 1538 up(parentrevs(r))
1539 1539 ps -= set([node.nullrev])
1540 1540 return subset & ps
1541 1541
1542 1542 def _phase(repo, subset, target):
1543 1543 """helper to select all rev in phase <target>"""
1544 1544 repo._phasecache.loadphaserevs(repo) # ensure phase's sets are loaded
1545 1545 if repo._phasecache._phasesets:
1546 1546 s = repo._phasecache._phasesets[target] - repo.changelog.filteredrevs
1547 1547 s = baseset(s)
1548 1548 s.sort() # set are non ordered, so we enforce ascending
1549 1549 return subset & s
1550 1550 else:
1551 1551 phase = repo._phasecache.phase
1552 1552 condition = lambda r: phase(repo, r) == target
1553 1553 return subset.filter(condition, condrepr=('<phase %r>', target),
1554 1554 cache=False)
1555 1555
1556 1556 @predicate('draft()', safe=True)
1557 1557 def draft(repo, subset, x):
1558 1558 """Changeset in draft phase."""
1559 1559 # i18n: "draft" is a keyword
1560 1560 getargs(x, 0, 0, _("draft takes no arguments"))
1561 1561 target = phases.draft
1562 1562 return _phase(repo, subset, target)
1563 1563
1564 1564 @predicate('secret()', safe=True)
1565 1565 def secret(repo, subset, x):
1566 1566 """Changeset in secret phase."""
1567 1567 # i18n: "secret" is a keyword
1568 1568 getargs(x, 0, 0, _("secret takes no arguments"))
1569 1569 target = phases.secret
1570 1570 return _phase(repo, subset, target)
1571 1571
1572 1572 def parentspec(repo, subset, x, n):
1573 1573 """``set^0``
1574 1574 The set.
1575 1575 ``set^1`` (or ``set^``), ``set^2``
1576 1576 First or second parent, respectively, of all changesets in set.
1577 1577 """
1578 1578 try:
1579 1579 n = int(n[1])
1580 1580 if n not in (0, 1, 2):
1581 1581 raise ValueError
1582 1582 except (TypeError, ValueError):
1583 1583 raise error.ParseError(_("^ expects a number 0, 1, or 2"))
1584 1584 ps = set()
1585 1585 cl = repo.changelog
1586 1586 for r in getset(repo, fullreposet(repo), x):
1587 1587 if n == 0:
1588 1588 ps.add(r)
1589 1589 elif n == 1:
1590 1590 ps.add(cl.parentrevs(r)[0])
1591 1591 elif n == 2:
1592 1592 parents = cl.parentrevs(r)
1593 1593 if len(parents) > 1:
1594 1594 ps.add(parents[1])
1595 1595 return subset & ps
1596 1596
1597 1597 @predicate('present(set)', safe=True)
1598 1598 def present(repo, subset, x):
1599 1599 """An empty set, if any revision in set isn't found; otherwise,
1600 1600 all revisions in set.
1601 1601
1602 1602 If any of specified revisions is not present in the local repository,
1603 1603 the query is normally aborted. But this predicate allows the query
1604 1604 to continue even in such cases.
1605 1605 """
1606 1606 try:
1607 1607 return getset(repo, subset, x)
1608 1608 except error.RepoLookupError:
1609 1609 return baseset()
1610 1610
1611 1611 # for internal use
1612 1612 @predicate('_notpublic', safe=True)
1613 1613 def _notpublic(repo, subset, x):
1614 1614 getargs(x, 0, 0, "_notpublic takes no arguments")
1615 1615 repo._phasecache.loadphaserevs(repo) # ensure phase's sets are loaded
1616 1616 if repo._phasecache._phasesets:
1617 1617 s = set()
1618 1618 for u in repo._phasecache._phasesets[1:]:
1619 1619 s.update(u)
1620 1620 s = baseset(s - repo.changelog.filteredrevs)
1621 1621 s.sort()
1622 1622 return subset & s
1623 1623 else:
1624 1624 phase = repo._phasecache.phase
1625 1625 target = phases.public
1626 1626 condition = lambda r: phase(repo, r) != target
1627 1627 return subset.filter(condition, condrepr=('<phase %r>', target),
1628 1628 cache=False)
1629 1629
1630 1630 @predicate('public()', safe=True)
1631 1631 def public(repo, subset, x):
1632 1632 """Changeset in public phase."""
1633 1633 # i18n: "public" is a keyword
1634 1634 getargs(x, 0, 0, _("public takes no arguments"))
1635 1635 phase = repo._phasecache.phase
1636 1636 target = phases.public
1637 1637 condition = lambda r: phase(repo, r) == target
1638 1638 return subset.filter(condition, condrepr=('<phase %r>', target),
1639 1639 cache=False)
1640 1640
1641 1641 @predicate('remote([id [,path]])', safe=True)
1642 1642 def remote(repo, subset, x):
1643 1643 """Local revision that corresponds to the given identifier in a
1644 1644 remote repository, if present. Here, the '.' identifier is a
1645 1645 synonym for the current local branch.
1646 1646 """
1647 1647
1648 1648 from . import hg # avoid start-up nasties
1649 1649 # i18n: "remote" is a keyword
1650 1650 l = getargs(x, 0, 2, _("remote takes zero, one, or two arguments"))
1651 1651
1652 1652 q = '.'
1653 1653 if len(l) > 0:
1654 1654 # i18n: "remote" is a keyword
1655 1655 q = getstring(l[0], _("remote requires a string id"))
1656 1656 if q == '.':
1657 1657 q = repo['.'].branch()
1658 1658
1659 1659 dest = ''
1660 1660 if len(l) > 1:
1661 1661 # i18n: "remote" is a keyword
1662 1662 dest = getstring(l[1], _("remote requires a repository path"))
1663 1663 dest = repo.ui.expandpath(dest or 'default')
1664 1664 dest, branches = hg.parseurl(dest)
1665 1665 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1666 1666 if revs:
1667 1667 revs = [repo.lookup(rev) for rev in revs]
1668 1668 other = hg.peer(repo, {}, dest)
1669 1669 n = other.lookup(q)
1670 1670 if n in repo:
1671 1671 r = repo[n].rev()
1672 1672 if r in subset:
1673 1673 return baseset([r])
1674 1674 return baseset()
1675 1675
1676 1676 @predicate('removes(pattern)', safe=True)
1677 1677 def removes(repo, subset, x):
1678 1678 """Changesets which remove files matching pattern.
1679 1679
1680 1680 The pattern without explicit kind like ``glob:`` is expected to be
1681 1681 relative to the current directory and match against a file or a
1682 1682 directory.
1683 1683 """
1684 1684 # i18n: "removes" is a keyword
1685 1685 pat = getstring(x, _("removes requires a pattern"))
1686 1686 return checkstatus(repo, subset, pat, 2)
1687 1687
1688 1688 @predicate('rev(number)', safe=True)
1689 1689 def rev(repo, subset, x):
1690 1690 """Revision with the given numeric identifier.
1691 1691 """
1692 1692 # i18n: "rev" is a keyword
1693 1693 l = getargs(x, 1, 1, _("rev requires one argument"))
1694 1694 try:
1695 1695 # i18n: "rev" is a keyword
1696 1696 l = int(getstring(l[0], _("rev requires a number")))
1697 1697 except (TypeError, ValueError):
1698 1698 # i18n: "rev" is a keyword
1699 1699 raise error.ParseError(_("rev expects a number"))
1700 1700 if l not in repo.changelog and l != node.nullrev:
1701 1701 return baseset()
1702 1702 return subset & baseset([l])
1703 1703
1704 1704 @predicate('matching(revision [, field])', safe=True)
1705 1705 def matching(repo, subset, x):
1706 1706 """Changesets in which a given set of fields match the set of fields in the
1707 1707 selected revision or set.
1708 1708
1709 1709 To match more than one field pass the list of fields to match separated
1710 1710 by spaces (e.g. ``author description``).
1711 1711
1712 1712 Valid fields are most regular revision fields and some special fields.
1713 1713
1714 1714 Regular revision fields are ``description``, ``author``, ``branch``,
1715 1715 ``date``, ``files``, ``phase``, ``parents``, ``substate``, ``user``
1716 1716 and ``diff``.
1717 1717 Note that ``author`` and ``user`` are synonyms. ``diff`` refers to the
1718 1718 contents of the revision. Two revisions matching their ``diff`` will
1719 1719 also match their ``files``.
1720 1720
1721 1721 Special fields are ``summary`` and ``metadata``:
1722 1722 ``summary`` matches the first line of the description.
1723 1723 ``metadata`` is equivalent to matching ``description user date``
1724 1724 (i.e. it matches the main metadata fields).
1725 1725
1726 1726 ``metadata`` is the default field which is used when no fields are
1727 1727 specified. You can match more than one field at a time.
1728 1728 """
1729 1729 # i18n: "matching" is a keyword
1730 1730 l = getargs(x, 1, 2, _("matching takes 1 or 2 arguments"))
1731 1731
1732 1732 revs = getset(repo, fullreposet(repo), l[0])
1733 1733
1734 1734 fieldlist = ['metadata']
1735 1735 if len(l) > 1:
1736 1736 fieldlist = getstring(l[1],
1737 1737 # i18n: "matching" is a keyword
1738 1738 _("matching requires a string "
1739 1739 "as its second argument")).split()
1740 1740
1741 1741 # Make sure that there are no repeated fields,
1742 1742 # expand the 'special' 'metadata' field type
1743 1743 # and check the 'files' whenever we check the 'diff'
1744 1744 fields = []
1745 1745 for field in fieldlist:
1746 1746 if field == 'metadata':
1747 1747 fields += ['user', 'description', 'date']
1748 1748 elif field == 'diff':
1749 1749 # a revision matching the diff must also match the files
1750 1750 # since matching the diff is very costly, make sure to
1751 1751 # also match the files first
1752 1752 fields += ['files', 'diff']
1753 1753 else:
1754 1754 if field == 'author':
1755 1755 field = 'user'
1756 1756 fields.append(field)
1757 1757 fields = set(fields)
1758 1758 if 'summary' in fields and 'description' in fields:
1759 1759 # If a revision matches its description it also matches its summary
1760 1760 fields.discard('summary')
1761 1761
1762 1762 # We may want to match more than one field
1763 1763 # Not all fields take the same amount of time to be matched
1764 1764 # Sort the selected fields in order of increasing matching cost
1765 1765 fieldorder = ['phase', 'parents', 'user', 'date', 'branch', 'summary',
1766 1766 'files', 'description', 'substate', 'diff']
1767 1767 def fieldkeyfunc(f):
1768 1768 try:
1769 1769 return fieldorder.index(f)
1770 1770 except ValueError:
1771 1771 # assume an unknown field is very costly
1772 1772 return len(fieldorder)
1773 1773 fields = list(fields)
1774 1774 fields.sort(key=fieldkeyfunc)
1775 1775
1776 1776 # Each field will be matched with its own "getfield" function
1777 1777 # which will be added to the getfieldfuncs array of functions
1778 1778 getfieldfuncs = []
1779 1779 _funcs = {
1780 1780 'user': lambda r: repo[r].user(),
1781 1781 'branch': lambda r: repo[r].branch(),
1782 1782 'date': lambda r: repo[r].date(),
1783 1783 'description': lambda r: repo[r].description(),
1784 1784 'files': lambda r: repo[r].files(),
1785 1785 'parents': lambda r: repo[r].parents(),
1786 1786 'phase': lambda r: repo[r].phase(),
1787 1787 'substate': lambda r: repo[r].substate,
1788 1788 'summary': lambda r: repo[r].description().splitlines()[0],
1789 1789 'diff': lambda r: list(repo[r].diff(git=True),)
1790 1790 }
1791 1791 for info in fields:
1792 1792 getfield = _funcs.get(info, None)
1793 1793 if getfield is None:
1794 1794 raise error.ParseError(
1795 1795 # i18n: "matching" is a keyword
1796 1796 _("unexpected field name passed to matching: %s") % info)
1797 1797 getfieldfuncs.append(getfield)
1798 1798 # convert the getfield array of functions into a "getinfo" function
1799 1799 # which returns an array of field values (or a single value if there
1800 1800 # is only one field to match)
1801 1801 getinfo = lambda r: [f(r) for f in getfieldfuncs]
1802 1802
1803 1803 def matches(x):
1804 1804 for rev in revs:
1805 1805 target = getinfo(rev)
1806 1806 match = True
1807 1807 for n, f in enumerate(getfieldfuncs):
1808 1808 if target[n] != f(x):
1809 1809 match = False
1810 1810 if match:
1811 1811 return True
1812 1812 return False
1813 1813
1814 1814 return subset.filter(matches, condrepr=('<matching%r %r>', fields, revs))
1815 1815
1816 1816 @predicate('reverse(set)', safe=True)
1817 1817 def reverse(repo, subset, x):
1818 1818 """Reverse order of set.
1819 1819 """
1820 1820 l = getset(repo, subset, x)
1821 1821 l.reverse()
1822 1822 return l
1823 1823
1824 1824 @predicate('roots(set)', safe=True)
1825 1825 def roots(repo, subset, x):
1826 1826 """Changesets in set with no parent changeset in set.
1827 1827 """
1828 1828 s = getset(repo, fullreposet(repo), x)
1829 1829 parents = repo.changelog.parentrevs
1830 1830 def filter(r):
1831 1831 for p in parents(r):
1832 1832 if 0 <= p and p in s:
1833 1833 return False
1834 1834 return True
1835 1835 return subset & s.filter(filter, condrepr='<roots>')
1836 1836
1837 1837 _sortkeyfuncs = {
1838 1838 'rev': lambda c: c.rev(),
1839 1839 'branch': lambda c: c.branch(),
1840 1840 'desc': lambda c: c.description(),
1841 1841 'user': lambda c: c.user(),
1842 1842 'author': lambda c: c.user(),
1843 1843 'date': lambda c: c.date()[0],
1844 1844 }
1845 1845
1846 1846 def _getsortargs(x):
1847 1847 """Parse sort options into (set, [(key, reverse)], opts)"""
1848 1848 args = getargsdict(x, 'sort', 'set keys topo.firstbranch')
1849 1849 if 'set' not in args:
1850 1850 # i18n: "sort" is a keyword
1851 1851 raise error.ParseError(_('sort requires one or two arguments'))
1852 1852 keys = "rev"
1853 1853 if 'keys' in args:
1854 1854 # i18n: "sort" is a keyword
1855 1855 keys = getstring(args['keys'], _("sort spec must be a string"))
1856 1856
1857 1857 keyflags = []
1858 1858 for k in keys.split():
1859 1859 fk = k
1860 1860 reverse = (k[0] == '-')
1861 1861 if reverse:
1862 1862 k = k[1:]
1863 1863 if k not in _sortkeyfuncs and k != 'topo':
1864 1864 raise error.ParseError(_("unknown sort key %r") % fk)
1865 1865 keyflags.append((k, reverse))
1866 1866
1867 1867 if len(keyflags) > 1 and any(k == 'topo' for k, reverse in keyflags):
1868 1868 # i18n: "topo" is a keyword
1869 raise error.ParseError(_(
1870 'topo sort order cannot be combined with other sort keys'))
1869 raise error.ParseError(_('topo sort order cannot be combined '
1870 'with other sort keys'))
1871 1871
1872 1872 opts = {}
1873 1873 if 'topo.firstbranch' in args:
1874 1874 if any(k == 'topo' for k, reverse in keyflags):
1875 1875 opts['topo.firstbranch'] = args['topo.firstbranch']
1876 1876 else:
1877 1877 # i18n: "topo" and "topo.firstbranch" are keywords
1878 raise error.ParseError(_(
1879 'topo.firstbranch can only be used when using the topo sort '
1880 'key'))
1878 raise error.ParseError(_('topo.firstbranch can only be used '
1879 'when using the topo sort key'))
1881 1880
1882 1881 return args['set'], keyflags, opts
1883 1882
1884 1883 @predicate('sort(set[, [-]key... [, ...]])', safe=True)
1885 1884 def sort(repo, subset, x):
1886 1885 """Sort set by keys. The default sort order is ascending, specify a key
1887 1886 as ``-key`` to sort in descending order.
1888 1887
1889 1888 The keys can be:
1890 1889
1891 1890 - ``rev`` for the revision number,
1892 1891 - ``branch`` for the branch name,
1893 1892 - ``desc`` for the commit message (description),
1894 1893 - ``user`` for user name (``author`` can be used as an alias),
1895 1894 - ``date`` for the commit date
1896 1895 - ``topo`` for a reverse topographical sort
1897 1896
1898 1897 The ``topo`` sort order cannot be combined with other sort keys. This sort
1899 1898 takes one optional argument, ``topo.firstbranch``, which takes a revset that
1900 1899 specifies what topographical branches to prioritize in the sort.
1901 1900
1902 1901 """
1903 1902 s, keyflags, opts = _getsortargs(x)
1904 1903 revs = getset(repo, subset, s)
1905 1904
1906 1905 if not keyflags:
1907 1906 return revs
1908 1907 if len(keyflags) == 1 and keyflags[0][0] == "rev":
1909 1908 revs.sort(reverse=keyflags[0][1])
1910 1909 return revs
1911 1910 elif keyflags[0][0] == "topo":
1912 1911 firstbranch = ()
1913 1912 if 'topo.firstbranch' in opts:
1914 1913 firstbranch = getset(repo, subset, opts['topo.firstbranch'])
1915 1914 revs = baseset(_toposort(revs, repo.changelog.parentrevs, firstbranch),
1916 1915 istopo=True)
1917 1916 if keyflags[0][1]:
1918 1917 revs.reverse()
1919 1918 return revs
1920 1919
1921 1920 # sort() is guaranteed to be stable
1922 1921 ctxs = [repo[r] for r in revs]
1923 1922 for k, reverse in reversed(keyflags):
1924 1923 ctxs.sort(key=_sortkeyfuncs[k], reverse=reverse)
1925 1924 return baseset([c.rev() for c in ctxs])
1926 1925
1927 1926 def _toposort(revs, parentsfunc, firstbranch=()):
1928 1927 """Yield revisions from heads to roots one (topo) branch at a time.
1929 1928
1930 1929 This function aims to be used by a graph generator that wishes to minimize
1931 1930 the number of parallel branches and their interleaving.
1932 1931
1933 1932 Example iteration order (numbers show the "true" order in a changelog):
1934 1933
1935 1934 o 4
1936 1935 |
1937 1936 o 1
1938 1937 |
1939 1938 | o 3
1940 1939 | |
1941 1940 | o 2
1942 1941 |/
1943 1942 o 0
1944 1943
1945 1944 Note that the ancestors of merges are understood by the current
1946 1945 algorithm to be on the same branch. This means no reordering will
1947 1946 occur behind a merge.
1948 1947 """
1949 1948
1950 1949 ### Quick summary of the algorithm
1951 1950 #
1952 1951 # This function is based around a "retention" principle. We keep revisions
1953 1952 # in memory until we are ready to emit a whole branch that immediately
1954 1953 # "merges" into an existing one. This reduces the number of parallel
1955 1954 # branches with interleaved revisions.
1956 1955 #
1957 1956 # During iteration revs are split into two groups:
1958 1957 # A) revision already emitted
1959 1958 # B) revision in "retention". They are stored as different subgroups.
1960 1959 #
1961 1960 # for each REV, we do the following logic:
1962 1961 #
1963 1962 # 1) if REV is a parent of (A), we will emit it. If there is a
1964 1963 # retention group ((B) above) that is blocked on REV being
1965 1964 # available, we emit all the revisions out of that retention
1966 1965 # group first.
1967 1966 #
1968 1967 # 2) else, we'll search for a subgroup in (B) awaiting for REV to be
1969 1968 # available, if such subgroup exist, we add REV to it and the subgroup is
1970 1969 # now awaiting for REV.parents() to be available.
1971 1970 #
1972 1971 # 3) finally if no such group existed in (B), we create a new subgroup.
1973 1972 #
1974 1973 #
1975 1974 # To bootstrap the algorithm, we emit the tipmost revision (which
1976 1975 # puts it in group (A) from above).
1977 1976
1978 1977 revs.sort(reverse=True)
1979 1978
1980 1979 # Set of parents of revision that have been emitted. They can be considered
1981 1980 # unblocked as the graph generator is already aware of them so there is no
1982 1981 # need to delay the revisions that reference them.
1983 1982 #
1984 1983 # If someone wants to prioritize a branch over the others, pre-filling this
1985 1984 # set will force all other branches to wait until this branch is ready to be
1986 1985 # emitted.
1987 1986 unblocked = set(firstbranch)
1988 1987
1989 1988 # list of groups waiting to be displayed, each group is defined by:
1990 1989 #
1991 1990 # (revs: lists of revs waiting to be displayed,
1992 1991 # blocked: set of that cannot be displayed before those in 'revs')
1993 1992 #
1994 1993 # The second value ('blocked') correspond to parents of any revision in the
1995 1994 # group ('revs') that is not itself contained in the group. The main idea
1996 1995 # of this algorithm is to delay as much as possible the emission of any
1997 1996 # revision. This means waiting for the moment we are about to display
1998 1997 # these parents to display the revs in a group.
1999 1998 #
2000 1999 # This first implementation is smart until it encounters a merge: it will
2001 2000 # emit revs as soon as any parent is about to be emitted and can grow an
2002 2001 # arbitrary number of revs in 'blocked'. In practice this mean we properly
2003 2002 # retains new branches but gives up on any special ordering for ancestors
2004 2003 # of merges. The implementation can be improved to handle this better.
2005 2004 #
2006 2005 # The first subgroup is special. It corresponds to all the revision that
2007 2006 # were already emitted. The 'revs' lists is expected to be empty and the
2008 2007 # 'blocked' set contains the parents revisions of already emitted revision.
2009 2008 #
2010 2009 # You could pre-seed the <parents> set of groups[0] to a specific
2011 2010 # changesets to select what the first emitted branch should be.
2012 2011 groups = [([], unblocked)]
2013 2012 pendingheap = []
2014 2013 pendingset = set()
2015 2014
2016 2015 heapq.heapify(pendingheap)
2017 2016 heappop = heapq.heappop
2018 2017 heappush = heapq.heappush
2019 2018 for currentrev in revs:
2020 2019 # Heap works with smallest element, we want highest so we invert
2021 2020 if currentrev not in pendingset:
2022 2021 heappush(pendingheap, -currentrev)
2023 2022 pendingset.add(currentrev)
2024 2023 # iterates on pending rev until after the current rev have been
2025 2024 # processed.
2026 2025 rev = None
2027 2026 while rev != currentrev:
2028 2027 rev = -heappop(pendingheap)
2029 2028 pendingset.remove(rev)
2030 2029
2031 2030 # Seek for a subgroup blocked, waiting for the current revision.
2032 2031 matching = [i for i, g in enumerate(groups) if rev in g[1]]
2033 2032
2034 2033 if matching:
2035 2034 # The main idea is to gather together all sets that are blocked
2036 2035 # on the same revision.
2037 2036 #
2038 2037 # Groups are merged when a common blocking ancestor is
2039 2038 # observed. For example, given two groups:
2040 2039 #
2041 2040 # revs [5, 4] waiting for 1
2042 2041 # revs [3, 2] waiting for 1
2043 2042 #
2044 2043 # These two groups will be merged when we process
2045 2044 # 1. In theory, we could have merged the groups when
2046 2045 # we added 2 to the group it is now in (we could have
2047 2046 # noticed the groups were both blocked on 1 then), but
2048 2047 # the way it works now makes the algorithm simpler.
2049 2048 #
2050 2049 # We also always keep the oldest subgroup first. We can
2051 2050 # probably improve the behavior by having the longest set
2052 2051 # first. That way, graph algorithms could minimise the length
2053 2052 # of parallel lines their drawing. This is currently not done.
2054 2053 targetidx = matching.pop(0)
2055 2054 trevs, tparents = groups[targetidx]
2056 2055 for i in matching:
2057 2056 gr = groups[i]
2058 2057 trevs.extend(gr[0])
2059 2058 tparents |= gr[1]
2060 2059 # delete all merged subgroups (except the one we kept)
2061 2060 # (starting from the last subgroup for performance and
2062 2061 # sanity reasons)
2063 2062 for i in reversed(matching):
2064 2063 del groups[i]
2065 2064 else:
2066 2065 # This is a new head. We create a new subgroup for it.
2067 2066 targetidx = len(groups)
2068 2067 groups.append(([], set([rev])))
2069 2068
2070 2069 gr = groups[targetidx]
2071 2070
2072 2071 # We now add the current nodes to this subgroups. This is done
2073 2072 # after the subgroup merging because all elements from a subgroup
2074 2073 # that relied on this rev must precede it.
2075 2074 #
2076 2075 # we also update the <parents> set to include the parents of the
2077 2076 # new nodes.
2078 2077 if rev == currentrev: # only display stuff in rev
2079 2078 gr[0].append(rev)
2080 2079 gr[1].remove(rev)
2081 2080 parents = [p for p in parentsfunc(rev) if p > node.nullrev]
2082 2081 gr[1].update(parents)
2083 2082 for p in parents:
2084 2083 if p not in pendingset:
2085 2084 pendingset.add(p)
2086 2085 heappush(pendingheap, -p)
2087 2086
2088 2087 # Look for a subgroup to display
2089 2088 #
2090 2089 # When unblocked is empty (if clause), we were not waiting for any
2091 2090 # revisions during the first iteration (if no priority was given) or
2092 2091 # if we emitted a whole disconnected set of the graph (reached a
2093 2092 # root). In that case we arbitrarily take the oldest known
2094 2093 # subgroup. The heuristic could probably be better.
2095 2094 #
2096 2095 # Otherwise (elif clause) if the subgroup is blocked on
2097 2096 # a revision we just emitted, we can safely emit it as
2098 2097 # well.
2099 2098 if not unblocked:
2100 2099 if len(groups) > 1: # display other subset
2101 2100 targetidx = 1
2102 2101 gr = groups[1]
2103 2102 elif not gr[1] & unblocked:
2104 2103 gr = None
2105 2104
2106 2105 if gr is not None:
2107 2106 # update the set of awaited revisions with the one from the
2108 2107 # subgroup
2109 2108 unblocked |= gr[1]
2110 2109 # output all revisions in the subgroup
2111 2110 for r in gr[0]:
2112 2111 yield r
2113 2112 # delete the subgroup that you just output
2114 2113 # unless it is groups[0] in which case you just empty it.
2115 2114 if targetidx:
2116 2115 del groups[targetidx]
2117 2116 else:
2118 2117 gr[0][:] = []
2119 2118 # Check if we have some subgroup waiting for revisions we are not going to
2120 2119 # iterate over
2121 2120 for g in groups:
2122 2121 for r in g[0]:
2123 2122 yield r
2124 2123
2125 2124 @predicate('subrepo([pattern])')
2126 2125 def subrepo(repo, subset, x):
2127 2126 """Changesets that add, modify or remove the given subrepo. If no subrepo
2128 2127 pattern is named, any subrepo changes are returned.
2129 2128 """
2130 2129 # i18n: "subrepo" is a keyword
2131 2130 args = getargs(x, 0, 1, _('subrepo takes at most one argument'))
2132 2131 pat = None
2133 2132 if len(args) != 0:
2134 2133 pat = getstring(args[0], _("subrepo requires a pattern"))
2135 2134
2136 2135 m = matchmod.exact(repo.root, repo.root, ['.hgsubstate'])
2137 2136
2138 2137 def submatches(names):
2139 2138 k, p, m = util.stringmatcher(pat)
2140 2139 for name in names:
2141 2140 if m(name):
2142 2141 yield name
2143 2142
2144 2143 def matches(x):
2145 2144 c = repo[x]
2146 2145 s = repo.status(c.p1().node(), c.node(), match=m)
2147 2146
2148 2147 if pat is None:
2149 2148 return s.added or s.modified or s.removed
2150 2149
2151 2150 if s.added:
2152 2151 return any(submatches(c.substate.keys()))
2153 2152
2154 2153 if s.modified:
2155 2154 subs = set(c.p1().substate.keys())
2156 2155 subs.update(c.substate.keys())
2157 2156
2158 2157 for path in submatches(subs):
2159 2158 if c.p1().substate.get(path) != c.substate.get(path):
2160 2159 return True
2161 2160
2162 2161 if s.removed:
2163 2162 return any(submatches(c.p1().substate.keys()))
2164 2163
2165 2164 return False
2166 2165
2167 2166 return subset.filter(matches, condrepr=('<subrepo %r>', pat))
2168 2167
2169 2168 def _substringmatcher(pattern):
2170 2169 kind, pattern, matcher = util.stringmatcher(pattern)
2171 2170 if kind == 'literal':
2172 2171 matcher = lambda s: pattern in s
2173 2172 return kind, pattern, matcher
2174 2173
2175 2174 @predicate('tag([name])', safe=True)
2176 2175 def tag(repo, subset, x):
2177 2176 """The specified tag by name, or all tagged revisions if no name is given.
2178 2177
2179 2178 If `name` starts with `re:`, the remainder of the name is treated as
2180 2179 a regular expression. To match a tag that actually starts with `re:`,
2181 2180 use the prefix `literal:`.
2182 2181 """
2183 2182 # i18n: "tag" is a keyword
2184 2183 args = getargs(x, 0, 1, _("tag takes one or no arguments"))
2185 2184 cl = repo.changelog
2186 2185 if args:
2187 2186 pattern = getstring(args[0],
2188 2187 # i18n: "tag" is a keyword
2189 2188 _('the argument to tag must be a string'))
2190 2189 kind, pattern, matcher = util.stringmatcher(pattern)
2191 2190 if kind == 'literal':
2192 2191 # avoid resolving all tags
2193 2192 tn = repo._tagscache.tags.get(pattern, None)
2194 2193 if tn is None:
2195 2194 raise error.RepoLookupError(_("tag '%s' does not exist")
2196 2195 % pattern)
2197 2196 s = set([repo[tn].rev()])
2198 2197 else:
2199 2198 s = set([cl.rev(n) for t, n in repo.tagslist() if matcher(t)])
2200 2199 else:
2201 2200 s = set([cl.rev(n) for t, n in repo.tagslist() if t != 'tip'])
2202 2201 return subset & s
2203 2202
2204 2203 @predicate('tagged', safe=True)
2205 2204 def tagged(repo, subset, x):
2206 2205 return tag(repo, subset, x)
2207 2206
2208 2207 @predicate('unstable()', safe=True)
2209 2208 def unstable(repo, subset, x):
2210 2209 """Non-obsolete changesets with obsolete ancestors.
2211 2210 """
2212 2211 # i18n: "unstable" is a keyword
2213 2212 getargs(x, 0, 0, _("unstable takes no arguments"))
2214 2213 unstables = obsmod.getrevs(repo, 'unstable')
2215 2214 return subset & unstables
2216 2215
2217 2216
2218 2217 @predicate('user(string)', safe=True)
2219 2218 def user(repo, subset, x):
2220 2219 """User name contains string. The match is case-insensitive.
2221 2220
2222 2221 If `string` starts with `re:`, the remainder of the string is treated as
2223 2222 a regular expression. To match a user that actually contains `re:`, use
2224 2223 the prefix `literal:`.
2225 2224 """
2226 2225 return author(repo, subset, x)
2227 2226
2228 2227 # experimental
2229 2228 @predicate('wdir', safe=True)
2230 2229 def wdir(repo, subset, x):
2231 2230 # i18n: "wdir" is a keyword
2232 2231 getargs(x, 0, 0, _("wdir takes no arguments"))
2233 2232 if node.wdirrev in subset or isinstance(subset, fullreposet):
2234 2233 return baseset([node.wdirrev])
2235 2234 return baseset()
2236 2235
2237 2236 # for internal use
2238 2237 @predicate('_list', safe=True)
2239 2238 def _list(repo, subset, x):
2240 2239 s = getstring(x, "internal error")
2241 2240 if not s:
2242 2241 return baseset()
2243 2242 # remove duplicates here. it's difficult for caller to deduplicate sets
2244 2243 # because different symbols can point to the same rev.
2245 2244 cl = repo.changelog
2246 2245 ls = []
2247 2246 seen = set()
2248 2247 for t in s.split('\0'):
2249 2248 try:
2250 2249 # fast path for integer revision
2251 2250 r = int(t)
2252 2251 if str(r) != t or r not in cl:
2253 2252 raise ValueError
2254 2253 revs = [r]
2255 2254 except ValueError:
2256 2255 revs = stringset(repo, subset, t)
2257 2256
2258 2257 for r in revs:
2259 2258 if r in seen:
2260 2259 continue
2261 2260 if (r in subset
2262 2261 or r == node.nullrev and isinstance(subset, fullreposet)):
2263 2262 ls.append(r)
2264 2263 seen.add(r)
2265 2264 return baseset(ls)
2266 2265
2267 2266 # for internal use
2268 2267 @predicate('_intlist', safe=True)
2269 2268 def _intlist(repo, subset, x):
2270 2269 s = getstring(x, "internal error")
2271 2270 if not s:
2272 2271 return baseset()
2273 2272 ls = [int(r) for r in s.split('\0')]
2274 2273 s = subset
2275 2274 return baseset([r for r in ls if r in s])
2276 2275
2277 2276 # for internal use
2278 2277 @predicate('_hexlist', safe=True)
2279 2278 def _hexlist(repo, subset, x):
2280 2279 s = getstring(x, "internal error")
2281 2280 if not s:
2282 2281 return baseset()
2283 2282 cl = repo.changelog
2284 2283 ls = [cl.rev(node.bin(r)) for r in s.split('\0')]
2285 2284 s = subset
2286 2285 return baseset([r for r in ls if r in s])
2287 2286
2288 2287 methods = {
2289 2288 "range": rangeset,
2290 2289 "dagrange": dagrange,
2291 2290 "string": stringset,
2292 2291 "symbol": stringset,
2293 2292 "and": andset,
2294 2293 "or": orset,
2295 2294 "not": notset,
2296 2295 "difference": differenceset,
2297 2296 "list": listset,
2298 2297 "keyvalue": keyvaluepair,
2299 2298 "func": func,
2300 2299 "ancestor": ancestorspec,
2301 2300 "parent": parentspec,
2302 2301 "parentpost": p1,
2303 2302 }
2304 2303
2305 2304 def _matchonly(revs, bases):
2306 2305 """
2307 2306 >>> f = lambda *args: _matchonly(*map(parse, args))
2308 2307 >>> f('ancestors(A)', 'not ancestors(B)')
2309 2308 ('list', ('symbol', 'A'), ('symbol', 'B'))
2310 2309 """
2311 2310 if (revs is not None
2312 2311 and revs[0] == 'func'
2313 2312 and getsymbol(revs[1]) == 'ancestors'
2314 2313 and bases is not None
2315 2314 and bases[0] == 'not'
2316 2315 and bases[1][0] == 'func'
2317 2316 and getsymbol(bases[1][1]) == 'ancestors'):
2318 2317 return ('list', revs[2], bases[1][2])
2319 2318
2320 2319 def _optimize(x, small):
2321 2320 if x is None:
2322 2321 return 0, x
2323 2322
2324 2323 smallbonus = 1
2325 2324 if small:
2326 2325 smallbonus = .5
2327 2326
2328 2327 op = x[0]
2329 2328 if op == 'minus':
2330 2329 return _optimize(('and', x[1], ('not', x[2])), small)
2331 2330 elif op == 'only':
2332 2331 t = ('func', ('symbol', 'only'), ('list', x[1], x[2]))
2333 2332 return _optimize(t, small)
2334 2333 elif op == 'onlypost':
2335 2334 return _optimize(('func', ('symbol', 'only'), x[1]), small)
2336 2335 elif op == 'dagrangepre':
2337 2336 return _optimize(('func', ('symbol', 'ancestors'), x[1]), small)
2338 2337 elif op == 'dagrangepost':
2339 2338 return _optimize(('func', ('symbol', 'descendants'), x[1]), small)
2340 2339 elif op == 'rangeall':
2341 2340 return _optimize(('range', ('string', '0'), ('string', 'tip')), small)
2342 2341 elif op == 'rangepre':
2343 2342 return _optimize(('range', ('string', '0'), x[1]), small)
2344 2343 elif op == 'rangepost':
2345 2344 return _optimize(('range', x[1], ('string', 'tip')), small)
2346 2345 elif op == 'negate':
2347 2346 s = getstring(x[1], _("can't negate that"))
2348 2347 return _optimize(('string', '-' + s), small)
2349 2348 elif op in 'string symbol negate':
2350 2349 return smallbonus, x # single revisions are small
2351 2350 elif op == 'and':
2352 2351 wa, ta = _optimize(x[1], True)
2353 2352 wb, tb = _optimize(x[2], True)
2354 2353 w = min(wa, wb)
2355 2354
2356 2355 # (::x and not ::y)/(not ::y and ::x) have a fast path
2357 2356 tm = _matchonly(ta, tb) or _matchonly(tb, ta)
2358 2357 if tm:
2359 2358 return w, ('func', ('symbol', 'only'), tm)
2360 2359
2361 2360 if tb is not None and tb[0] == 'not':
2362 2361 return wa, ('difference', ta, tb[1])
2363 2362
2364 2363 if wa > wb:
2365 2364 return w, (op, tb, ta)
2366 2365 return w, (op, ta, tb)
2367 2366 elif op == 'or':
2368 2367 # fast path for machine-generated expression, that is likely to have
2369 2368 # lots of trivial revisions: 'a + b + c()' to '_list(a b) + c()'
2370 2369 ws, ts, ss = [], [], []
2371 2370 def flushss():
2372 2371 if not ss:
2373 2372 return
2374 2373 if len(ss) == 1:
2375 2374 w, t = ss[0]
2376 2375 else:
2377 2376 s = '\0'.join(t[1] for w, t in ss)
2378 2377 y = ('func', ('symbol', '_list'), ('string', s))
2379 2378 w, t = _optimize(y, False)
2380 2379 ws.append(w)
2381 2380 ts.append(t)
2382 2381 del ss[:]
2383 2382 for y in x[1:]:
2384 2383 w, t = _optimize(y, False)
2385 2384 if t is not None and (t[0] == 'string' or t[0] == 'symbol'):
2386 2385 ss.append((w, t))
2387 2386 continue
2388 2387 flushss()
2389 2388 ws.append(w)
2390 2389 ts.append(t)
2391 2390 flushss()
2392 2391 if len(ts) == 1:
2393 2392 return ws[0], ts[0] # 'or' operation is fully optimized out
2394 2393 # we can't reorder trees by weight because it would change the order.
2395 2394 # ("sort(a + b)" == "sort(b + a)", but "a + b" != "b + a")
2396 2395 # ts = tuple(t for w, t in sorted(zip(ws, ts), key=lambda wt: wt[0]))
2397 2396 return max(ws), (op,) + tuple(ts)
2398 2397 elif op == 'not':
2399 2398 # Optimize not public() to _notpublic() because we have a fast version
2400 2399 if x[1] == ('func', ('symbol', 'public'), None):
2401 2400 newsym = ('func', ('symbol', '_notpublic'), None)
2402 2401 o = _optimize(newsym, not small)
2403 2402 return o[0], o[1]
2404 2403 else:
2405 2404 o = _optimize(x[1], not small)
2406 2405 return o[0], (op, o[1])
2407 2406 elif op == 'parentpost':
2408 2407 o = _optimize(x[1], small)
2409 2408 return o[0], (op, o[1])
2410 2409 elif op == 'group':
2411 2410 return _optimize(x[1], small)
2412 2411 elif op in 'dagrange range parent ancestorspec':
2413 2412 if op == 'parent':
2414 2413 # x^:y means (x^) : y, not x ^ (:y)
2415 2414 post = ('parentpost', x[1])
2416 2415 if x[2][0] == 'dagrangepre':
2417 2416 return _optimize(('dagrange', post, x[2][1]), small)
2418 2417 elif x[2][0] == 'rangepre':
2419 2418 return _optimize(('range', post, x[2][1]), small)
2420 2419
2421 2420 wa, ta = _optimize(x[1], small)
2422 2421 wb, tb = _optimize(x[2], small)
2423 2422 return wa + wb, (op, ta, tb)
2424 2423 elif op == 'list':
2425 2424 ws, ts = zip(*(_optimize(y, small) for y in x[1:]))
2426 2425 return sum(ws), (op,) + ts
2427 2426 elif op == 'func':
2428 2427 f = getsymbol(x[1])
2429 2428 wa, ta = _optimize(x[2], small)
2430 2429 if f in ("author branch closed date desc file grep keyword "
2431 2430 "outgoing user"):
2432 2431 w = 10 # slow
2433 2432 elif f in "modifies adds removes":
2434 2433 w = 30 # slower
2435 2434 elif f == "contains":
2436 2435 w = 100 # very slow
2437 2436 elif f == "ancestor":
2438 2437 w = 1 * smallbonus
2439 2438 elif f in "reverse limit first _intlist":
2440 2439 w = 0
2441 2440 elif f in "sort":
2442 2441 w = 10 # assume most sorts look at changelog
2443 2442 else:
2444 2443 w = 1
2445 2444 return w + wa, (op, x[1], ta)
2446 2445 return 1, x
2447 2446
2448 2447 def optimize(tree):
2449 2448 _weight, newtree = _optimize(tree, small=True)
2450 2449 return newtree
2451 2450
2452 2451 # the set of valid characters for the initial letter of symbols in
2453 2452 # alias declarations and definitions
2454 2453 _aliassyminitletters = set(c for c in [chr(i) for i in xrange(256)]
2455 2454 if c.isalnum() or c in '._@$' or ord(c) > 127)
2456 2455
2457 2456 def _parsewith(spec, lookup=None, syminitletters=None):
2458 2457 """Generate a parse tree of given spec with given tokenizing options
2459 2458
2460 2459 >>> _parsewith('foo($1)', syminitletters=_aliassyminitletters)
2461 2460 ('func', ('symbol', 'foo'), ('symbol', '$1'))
2462 2461 >>> _parsewith('$1')
2463 2462 Traceback (most recent call last):
2464 2463 ...
2465 2464 ParseError: ("syntax error in revset '$1'", 0)
2466 2465 >>> _parsewith('foo bar')
2467 2466 Traceback (most recent call last):
2468 2467 ...
2469 2468 ParseError: ('invalid token', 4)
2470 2469 """
2471 2470 p = parser.parser(elements)
2472 2471 tree, pos = p.parse(tokenize(spec, lookup=lookup,
2473 2472 syminitletters=syminitletters))
2474 2473 if pos != len(spec):
2475 2474 raise error.ParseError(_('invalid token'), pos)
2476 2475 return parser.simplifyinfixops(tree, ('list', 'or'))
2477 2476
2478 2477 class _aliasrules(parser.basealiasrules):
2479 2478 """Parsing and expansion rule set of revset aliases"""
2480 2479 _section = _('revset alias')
2481 2480
2482 2481 @staticmethod
2483 2482 def _parse(spec):
2484 2483 """Parse alias declaration/definition ``spec``
2485 2484
2486 2485 This allows symbol names to use also ``$`` as an initial letter
2487 2486 (for backward compatibility), and callers of this function should
2488 2487 examine whether ``$`` is used also for unexpected symbols or not.
2489 2488 """
2490 2489 return _parsewith(spec, syminitletters=_aliassyminitletters)
2491 2490
2492 2491 @staticmethod
2493 2492 def _trygetfunc(tree):
2494 2493 if tree[0] == 'func' and tree[1][0] == 'symbol':
2495 2494 return tree[1][1], getlist(tree[2])
2496 2495
2497 2496 def expandaliases(ui, tree, showwarning=None):
2498 2497 aliases = _aliasrules.buildmap(ui.configitems('revsetalias'))
2499 2498 tree = _aliasrules.expand(aliases, tree)
2500 2499 if showwarning:
2501 2500 # warn about problematic (but not referred) aliases
2502 2501 for name, alias in sorted(aliases.iteritems()):
2503 2502 if alias.error and not alias.warned:
2504 2503 showwarning(_('warning: %s\n') % (alias.error))
2505 2504 alias.warned = True
2506 2505 return tree
2507 2506
2508 2507 def foldconcat(tree):
2509 2508 """Fold elements to be concatenated by `##`
2510 2509 """
2511 2510 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2512 2511 return tree
2513 2512 if tree[0] == '_concat':
2514 2513 pending = [tree]
2515 2514 l = []
2516 2515 while pending:
2517 2516 e = pending.pop()
2518 2517 if e[0] == '_concat':
2519 2518 pending.extend(reversed(e[1:]))
2520 2519 elif e[0] in ('string', 'symbol'):
2521 2520 l.append(e[1])
2522 2521 else:
2523 2522 msg = _("\"##\" can't concatenate \"%s\" element") % (e[0])
2524 2523 raise error.ParseError(msg)
2525 2524 return ('string', ''.join(l))
2526 2525 else:
2527 2526 return tuple(foldconcat(t) for t in tree)
2528 2527
2529 2528 def parse(spec, lookup=None):
2530 2529 return _parsewith(spec, lookup=lookup)
2531 2530
2532 2531 def posttreebuilthook(tree, repo):
2533 2532 # hook for extensions to execute code on the optimized tree
2534 2533 pass
2535 2534
2536 2535 def match(ui, spec, repo=None):
2537 2536 """Create a matcher for a single revision spec."""
2538 2537 return matchany(ui, [spec], repo=repo)
2539 2538
2540 2539 def matchany(ui, specs, repo=None):
2541 2540 """Create a matcher that will include any revisions matching one of the
2542 2541 given specs"""
2543 2542 if not specs:
2544 2543 def mfunc(repo, subset=None):
2545 2544 return baseset()
2546 2545 return mfunc
2547 2546 if not all(specs):
2548 2547 raise error.ParseError(_("empty query"))
2549 2548 lookup = None
2550 2549 if repo:
2551 2550 lookup = repo.__contains__
2552 2551 if len(specs) == 1:
2553 2552 tree = parse(specs[0], lookup)
2554 2553 else:
2555 2554 tree = ('or',) + tuple(parse(s, lookup) for s in specs)
2556 2555 return _makematcher(ui, tree, repo)
2557 2556
2558 2557 def _makematcher(ui, tree, repo):
2559 2558 if ui:
2560 2559 tree = expandaliases(ui, tree, showwarning=ui.warn)
2561 2560 tree = foldconcat(tree)
2562 2561 tree = optimize(tree)
2563 2562 posttreebuilthook(tree, repo)
2564 2563 def mfunc(repo, subset=None):
2565 2564 if subset is None:
2566 2565 subset = fullreposet(repo)
2567 2566 if util.safehasattr(subset, 'isascending'):
2568 2567 result = getset(repo, subset, tree)
2569 2568 else:
2570 2569 result = getset(repo, baseset(subset), tree)
2571 2570 return result
2572 2571 return mfunc
2573 2572
2574 2573 def formatspec(expr, *args):
2575 2574 '''
2576 2575 This is a convenience function for using revsets internally, and
2577 2576 escapes arguments appropriately. Aliases are intentionally ignored
2578 2577 so that intended expression behavior isn't accidentally subverted.
2579 2578
2580 2579 Supported arguments:
2581 2580
2582 2581 %r = revset expression, parenthesized
2583 2582 %d = int(arg), no quoting
2584 2583 %s = string(arg), escaped and single-quoted
2585 2584 %b = arg.branch(), escaped and single-quoted
2586 2585 %n = hex(arg), single-quoted
2587 2586 %% = a literal '%'
2588 2587
2589 2588 Prefixing the type with 'l' specifies a parenthesized list of that type.
2590 2589
2591 2590 >>> formatspec('%r:: and %lr', '10 or 11', ("this()", "that()"))
2592 2591 '(10 or 11):: and ((this()) or (that()))'
2593 2592 >>> formatspec('%d:: and not %d::', 10, 20)
2594 2593 '10:: and not 20::'
2595 2594 >>> formatspec('%ld or %ld', [], [1])
2596 2595 "_list('') or 1"
2597 2596 >>> formatspec('keyword(%s)', 'foo\\xe9')
2598 2597 "keyword('foo\\\\xe9')"
2599 2598 >>> b = lambda: 'default'
2600 2599 >>> b.branch = b
2601 2600 >>> formatspec('branch(%b)', b)
2602 2601 "branch('default')"
2603 2602 >>> formatspec('root(%ls)', ['a', 'b', 'c', 'd'])
2604 2603 "root(_list('a\\x00b\\x00c\\x00d'))"
2605 2604 '''
2606 2605
2607 2606 def quote(s):
2608 2607 return repr(str(s))
2609 2608
2610 2609 def argtype(c, arg):
2611 2610 if c == 'd':
2612 2611 return str(int(arg))
2613 2612 elif c == 's':
2614 2613 return quote(arg)
2615 2614 elif c == 'r':
2616 2615 parse(arg) # make sure syntax errors are confined
2617 2616 return '(%s)' % arg
2618 2617 elif c == 'n':
2619 2618 return quote(node.hex(arg))
2620 2619 elif c == 'b':
2621 2620 return quote(arg.branch())
2622 2621
2623 2622 def listexp(s, t):
2624 2623 l = len(s)
2625 2624 if l == 0:
2626 2625 return "_list('')"
2627 2626 elif l == 1:
2628 2627 return argtype(t, s[0])
2629 2628 elif t == 'd':
2630 2629 return "_intlist('%s')" % "\0".join(str(int(a)) for a in s)
2631 2630 elif t == 's':
2632 2631 return "_list('%s')" % "\0".join(s)
2633 2632 elif t == 'n':
2634 2633 return "_hexlist('%s')" % "\0".join(node.hex(a) for a in s)
2635 2634 elif t == 'b':
2636 2635 return "_list('%s')" % "\0".join(a.branch() for a in s)
2637 2636
2638 2637 m = l // 2
2639 2638 return '(%s or %s)' % (listexp(s[:m], t), listexp(s[m:], t))
2640 2639
2641 2640 ret = ''
2642 2641 pos = 0
2643 2642 arg = 0
2644 2643 while pos < len(expr):
2645 2644 c = expr[pos]
2646 2645 if c == '%':
2647 2646 pos += 1
2648 2647 d = expr[pos]
2649 2648 if d == '%':
2650 2649 ret += d
2651 2650 elif d in 'dsnbr':
2652 2651 ret += argtype(d, args[arg])
2653 2652 arg += 1
2654 2653 elif d == 'l':
2655 2654 # a list of some type
2656 2655 pos += 1
2657 2656 d = expr[pos]
2658 2657 ret += listexp(list(args[arg]), d)
2659 2658 arg += 1
2660 2659 else:
2661 2660 raise error.Abort(_('unexpected revspec format character %s')
2662 2661 % d)
2663 2662 else:
2664 2663 ret += c
2665 2664 pos += 1
2666 2665
2667 2666 return ret
2668 2667
2669 2668 def prettyformat(tree):
2670 2669 return parser.prettyformat(tree, ('string', 'symbol'))
2671 2670
2672 2671 def depth(tree):
2673 2672 if isinstance(tree, tuple):
2674 2673 return max(map(depth, tree)) + 1
2675 2674 else:
2676 2675 return 0
2677 2676
2678 2677 def funcsused(tree):
2679 2678 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2680 2679 return set()
2681 2680 else:
2682 2681 funcs = set()
2683 2682 for s in tree[1:]:
2684 2683 funcs |= funcsused(s)
2685 2684 if tree[0] == 'func':
2686 2685 funcs.add(tree[1][1])
2687 2686 return funcs
2688 2687
2689 2688 def _formatsetrepr(r):
2690 2689 """Format an optional printable representation of a set
2691 2690
2692 2691 ======== =================================
2693 2692 type(r) example
2694 2693 ======== =================================
2695 2694 tuple ('<not %r>', other)
2696 2695 str '<branch closed>'
2697 2696 callable lambda: '<branch %r>' % sorted(b)
2698 2697 object other
2699 2698 ======== =================================
2700 2699 """
2701 2700 if r is None:
2702 2701 return ''
2703 2702 elif isinstance(r, tuple):
2704 2703 return r[0] % r[1:]
2705 2704 elif isinstance(r, str):
2706 2705 return r
2707 2706 elif callable(r):
2708 2707 return r()
2709 2708 else:
2710 2709 return repr(r)
2711 2710
2712 2711 class abstractsmartset(object):
2713 2712
2714 2713 def __nonzero__(self):
2715 2714 """True if the smartset is not empty"""
2716 2715 raise NotImplementedError()
2717 2716
2718 2717 def __contains__(self, rev):
2719 2718 """provide fast membership testing"""
2720 2719 raise NotImplementedError()
2721 2720
2722 2721 def __iter__(self):
2723 2722 """iterate the set in the order it is supposed to be iterated"""
2724 2723 raise NotImplementedError()
2725 2724
2726 2725 # Attributes containing a function to perform a fast iteration in a given
2727 2726 # direction. A smartset can have none, one, or both defined.
2728 2727 #
2729 2728 # Default value is None instead of a function returning None to avoid
2730 2729 # initializing an iterator just for testing if a fast method exists.
2731 2730 fastasc = None
2732 2731 fastdesc = None
2733 2732
2734 2733 def isascending(self):
2735 2734 """True if the set will iterate in ascending order"""
2736 2735 raise NotImplementedError()
2737 2736
2738 2737 def isdescending(self):
2739 2738 """True if the set will iterate in descending order"""
2740 2739 raise NotImplementedError()
2741 2740
2742 2741 def istopo(self):
2743 2742 """True if the set will iterate in topographical order"""
2744 2743 raise NotImplementedError()
2745 2744
2746 2745 @util.cachefunc
2747 2746 def min(self):
2748 2747 """return the minimum element in the set"""
2749 2748 if self.fastasc is not None:
2750 2749 for r in self.fastasc():
2751 2750 return r
2752 2751 raise ValueError('arg is an empty sequence')
2753 2752 return min(self)
2754 2753
2755 2754 @util.cachefunc
2756 2755 def max(self):
2757 2756 """return the maximum element in the set"""
2758 2757 if self.fastdesc is not None:
2759 2758 for r in self.fastdesc():
2760 2759 return r
2761 2760 raise ValueError('arg is an empty sequence')
2762 2761 return max(self)
2763 2762
2764 2763 def first(self):
2765 2764 """return the first element in the set (user iteration perspective)
2766 2765
2767 2766 Return None if the set is empty"""
2768 2767 raise NotImplementedError()
2769 2768
2770 2769 def last(self):
2771 2770 """return the last element in the set (user iteration perspective)
2772 2771
2773 2772 Return None if the set is empty"""
2774 2773 raise NotImplementedError()
2775 2774
2776 2775 def __len__(self):
2777 2776 """return the length of the smartsets
2778 2777
2779 2778 This can be expensive on smartset that could be lazy otherwise."""
2780 2779 raise NotImplementedError()
2781 2780
2782 2781 def reverse(self):
2783 2782 """reverse the expected iteration order"""
2784 2783 raise NotImplementedError()
2785 2784
2786 2785 def sort(self, reverse=True):
2787 2786 """get the set to iterate in an ascending or descending order"""
2788 2787 raise NotImplementedError()
2789 2788
2790 2789 def __and__(self, other):
2791 2790 """Returns a new object with the intersection of the two collections.
2792 2791
2793 2792 This is part of the mandatory API for smartset."""
2794 2793 if isinstance(other, fullreposet):
2795 2794 return self
2796 2795 return self.filter(other.__contains__, condrepr=other, cache=False)
2797 2796
2798 2797 def __add__(self, other):
2799 2798 """Returns a new object with the union of the two collections.
2800 2799
2801 2800 This is part of the mandatory API for smartset."""
2802 2801 return addset(self, other)
2803 2802
2804 2803 def __sub__(self, other):
2805 2804 """Returns a new object with the substraction of the two collections.
2806 2805
2807 2806 This is part of the mandatory API for smartset."""
2808 2807 c = other.__contains__
2809 2808 return self.filter(lambda r: not c(r), condrepr=('<not %r>', other),
2810 2809 cache=False)
2811 2810
2812 2811 def filter(self, condition, condrepr=None, cache=True):
2813 2812 """Returns this smartset filtered by condition as a new smartset.
2814 2813
2815 2814 `condition` is a callable which takes a revision number and returns a
2816 2815 boolean. Optional `condrepr` provides a printable representation of
2817 2816 the given `condition`.
2818 2817
2819 2818 This is part of the mandatory API for smartset."""
2820 2819 # builtin cannot be cached. but do not needs to
2821 2820 if cache and util.safehasattr(condition, 'func_code'):
2822 2821 condition = util.cachefunc(condition)
2823 2822 return filteredset(self, condition, condrepr)
2824 2823
2825 2824 class baseset(abstractsmartset):
2826 2825 """Basic data structure that represents a revset and contains the basic
2827 2826 operation that it should be able to perform.
2828 2827
2829 2828 Every method in this class should be implemented by any smartset class.
2830 2829 """
2831 2830 def __init__(self, data=(), datarepr=None, istopo=False):
2832 2831 """
2833 2832 datarepr: a tuple of (format, obj, ...), a function or an object that
2834 2833 provides a printable representation of the given data.
2835 2834 """
2836 2835 self._ascending = None
2837 2836 self._istopo = istopo
2838 2837 if not isinstance(data, list):
2839 2838 if isinstance(data, set):
2840 2839 self._set = data
2841 2840 # set has no order we pick one for stability purpose
2842 2841 self._ascending = True
2843 2842 data = list(data)
2844 2843 self._list = data
2845 2844 self._datarepr = datarepr
2846 2845
2847 2846 @util.propertycache
2848 2847 def _set(self):
2849 2848 return set(self._list)
2850 2849
2851 2850 @util.propertycache
2852 2851 def _asclist(self):
2853 2852 asclist = self._list[:]
2854 2853 asclist.sort()
2855 2854 return asclist
2856 2855
2857 2856 def __iter__(self):
2858 2857 if self._ascending is None:
2859 2858 return iter(self._list)
2860 2859 elif self._ascending:
2861 2860 return iter(self._asclist)
2862 2861 else:
2863 2862 return reversed(self._asclist)
2864 2863
2865 2864 def fastasc(self):
2866 2865 return iter(self._asclist)
2867 2866
2868 2867 def fastdesc(self):
2869 2868 return reversed(self._asclist)
2870 2869
2871 2870 @util.propertycache
2872 2871 def __contains__(self):
2873 2872 return self._set.__contains__
2874 2873
2875 2874 def __nonzero__(self):
2876 2875 return bool(self._list)
2877 2876
2878 2877 def sort(self, reverse=False):
2879 2878 self._ascending = not bool(reverse)
2880 2879 self._istopo = False
2881 2880
2882 2881 def reverse(self):
2883 2882 if self._ascending is None:
2884 2883 self._list.reverse()
2885 2884 else:
2886 2885 self._ascending = not self._ascending
2887 2886 self._istopo = False
2888 2887
2889 2888 def __len__(self):
2890 2889 return len(self._list)
2891 2890
2892 2891 def isascending(self):
2893 2892 """Returns True if the collection is ascending order, False if not.
2894 2893
2895 2894 This is part of the mandatory API for smartset."""
2896 2895 if len(self) <= 1:
2897 2896 return True
2898 2897 return self._ascending is not None and self._ascending
2899 2898
2900 2899 def isdescending(self):
2901 2900 """Returns True if the collection is descending order, False if not.
2902 2901
2903 2902 This is part of the mandatory API for smartset."""
2904 2903 if len(self) <= 1:
2905 2904 return True
2906 2905 return self._ascending is not None and not self._ascending
2907 2906
2908 2907 def istopo(self):
2909 2908 """Is the collection is in topographical order or not.
2910 2909
2911 2910 This is part of the mandatory API for smartset."""
2912 2911 if len(self) <= 1:
2913 2912 return True
2914 2913 return self._istopo
2915 2914
2916 2915 def first(self):
2917 2916 if self:
2918 2917 if self._ascending is None:
2919 2918 return self._list[0]
2920 2919 elif self._ascending:
2921 2920 return self._asclist[0]
2922 2921 else:
2923 2922 return self._asclist[-1]
2924 2923 return None
2925 2924
2926 2925 def last(self):
2927 2926 if self:
2928 2927 if self._ascending is None:
2929 2928 return self._list[-1]
2930 2929 elif self._ascending:
2931 2930 return self._asclist[-1]
2932 2931 else:
2933 2932 return self._asclist[0]
2934 2933 return None
2935 2934
2936 2935 def __repr__(self):
2937 2936 d = {None: '', False: '-', True: '+'}[self._ascending]
2938 2937 s = _formatsetrepr(self._datarepr)
2939 2938 if not s:
2940 2939 l = self._list
2941 2940 # if _list has been built from a set, it might have a different
2942 2941 # order from one python implementation to another.
2943 2942 # We fallback to the sorted version for a stable output.
2944 2943 if self._ascending is not None:
2945 2944 l = self._asclist
2946 2945 s = repr(l)
2947 2946 return '<%s%s %s>' % (type(self).__name__, d, s)
2948 2947
2949 2948 class filteredset(abstractsmartset):
2950 2949 """Duck type for baseset class which iterates lazily over the revisions in
2951 2950 the subset and contains a function which tests for membership in the
2952 2951 revset
2953 2952 """
2954 2953 def __init__(self, subset, condition=lambda x: True, condrepr=None):
2955 2954 """
2956 2955 condition: a function that decide whether a revision in the subset
2957 2956 belongs to the revset or not.
2958 2957 condrepr: a tuple of (format, obj, ...), a function or an object that
2959 2958 provides a printable representation of the given condition.
2960 2959 """
2961 2960 self._subset = subset
2962 2961 self._condition = condition
2963 2962 self._condrepr = condrepr
2964 2963
2965 2964 def __contains__(self, x):
2966 2965 return x in self._subset and self._condition(x)
2967 2966
2968 2967 def __iter__(self):
2969 2968 return self._iterfilter(self._subset)
2970 2969
2971 2970 def _iterfilter(self, it):
2972 2971 cond = self._condition
2973 2972 for x in it:
2974 2973 if cond(x):
2975 2974 yield x
2976 2975
2977 2976 @property
2978 2977 def fastasc(self):
2979 2978 it = self._subset.fastasc
2980 2979 if it is None:
2981 2980 return None
2982 2981 return lambda: self._iterfilter(it())
2983 2982
2984 2983 @property
2985 2984 def fastdesc(self):
2986 2985 it = self._subset.fastdesc
2987 2986 if it is None:
2988 2987 return None
2989 2988 return lambda: self._iterfilter(it())
2990 2989
2991 2990 def __nonzero__(self):
2992 2991 fast = None
2993 2992 candidates = [self.fastasc if self.isascending() else None,
2994 2993 self.fastdesc if self.isdescending() else None,
2995 2994 self.fastasc,
2996 2995 self.fastdesc]
2997 2996 for candidate in candidates:
2998 2997 if candidate is not None:
2999 2998 fast = candidate
3000 2999 break
3001 3000
3002 3001 if fast is not None:
3003 3002 it = fast()
3004 3003 else:
3005 3004 it = self
3006 3005
3007 3006 for r in it:
3008 3007 return True
3009 3008 return False
3010 3009
3011 3010 def __len__(self):
3012 3011 # Basic implementation to be changed in future patches.
3013 3012 # until this gets improved, we use generator expression
3014 3013 # here, since list compr is free to call __len__ again
3015 3014 # causing infinite recursion
3016 3015 l = baseset(r for r in self)
3017 3016 return len(l)
3018 3017
3019 3018 def sort(self, reverse=False):
3020 3019 self._subset.sort(reverse=reverse)
3021 3020
3022 3021 def reverse(self):
3023 3022 self._subset.reverse()
3024 3023
3025 3024 def isascending(self):
3026 3025 return self._subset.isascending()
3027 3026
3028 3027 def isdescending(self):
3029 3028 return self._subset.isdescending()
3030 3029
3031 3030 def istopo(self):
3032 3031 return self._subset.istopo()
3033 3032
3034 3033 def first(self):
3035 3034 for x in self:
3036 3035 return x
3037 3036 return None
3038 3037
3039 3038 def last(self):
3040 3039 it = None
3041 3040 if self.isascending():
3042 3041 it = self.fastdesc
3043 3042 elif self.isdescending():
3044 3043 it = self.fastasc
3045 3044 if it is not None:
3046 3045 for x in it():
3047 3046 return x
3048 3047 return None #empty case
3049 3048 else:
3050 3049 x = None
3051 3050 for x in self:
3052 3051 pass
3053 3052 return x
3054 3053
3055 3054 def __repr__(self):
3056 3055 xs = [repr(self._subset)]
3057 3056 s = _formatsetrepr(self._condrepr)
3058 3057 if s:
3059 3058 xs.append(s)
3060 3059 return '<%s %s>' % (type(self).__name__, ', '.join(xs))
3061 3060
3062 3061 def _iterordered(ascending, iter1, iter2):
3063 3062 """produce an ordered iteration from two iterators with the same order
3064 3063
3065 3064 The ascending is used to indicated the iteration direction.
3066 3065 """
3067 3066 choice = max
3068 3067 if ascending:
3069 3068 choice = min
3070 3069
3071 3070 val1 = None
3072 3071 val2 = None
3073 3072 try:
3074 3073 # Consume both iterators in an ordered way until one is empty
3075 3074 while True:
3076 3075 if val1 is None:
3077 3076 val1 = next(iter1)
3078 3077 if val2 is None:
3079 3078 val2 = next(iter2)
3080 3079 n = choice(val1, val2)
3081 3080 yield n
3082 3081 if val1 == n:
3083 3082 val1 = None
3084 3083 if val2 == n:
3085 3084 val2 = None
3086 3085 except StopIteration:
3087 3086 # Flush any remaining values and consume the other one
3088 3087 it = iter2
3089 3088 if val1 is not None:
3090 3089 yield val1
3091 3090 it = iter1
3092 3091 elif val2 is not None:
3093 3092 # might have been equality and both are empty
3094 3093 yield val2
3095 3094 for val in it:
3096 3095 yield val
3097 3096
3098 3097 class addset(abstractsmartset):
3099 3098 """Represent the addition of two sets
3100 3099
3101 3100 Wrapper structure for lazily adding two structures without losing much
3102 3101 performance on the __contains__ method
3103 3102
3104 3103 If the ascending attribute is set, that means the two structures are
3105 3104 ordered in either an ascending or descending way. Therefore, we can add
3106 3105 them maintaining the order by iterating over both at the same time
3107 3106
3108 3107 >>> xs = baseset([0, 3, 2])
3109 3108 >>> ys = baseset([5, 2, 4])
3110 3109
3111 3110 >>> rs = addset(xs, ys)
3112 3111 >>> bool(rs), 0 in rs, 1 in rs, 5 in rs, rs.first(), rs.last()
3113 3112 (True, True, False, True, 0, 4)
3114 3113 >>> rs = addset(xs, baseset([]))
3115 3114 >>> bool(rs), 0 in rs, 1 in rs, rs.first(), rs.last()
3116 3115 (True, True, False, 0, 2)
3117 3116 >>> rs = addset(baseset([]), baseset([]))
3118 3117 >>> bool(rs), 0 in rs, rs.first(), rs.last()
3119 3118 (False, False, None, None)
3120 3119
3121 3120 iterate unsorted:
3122 3121 >>> rs = addset(xs, ys)
3123 3122 >>> # (use generator because pypy could call len())
3124 3123 >>> list(x for x in rs) # without _genlist
3125 3124 [0, 3, 2, 5, 4]
3126 3125 >>> assert not rs._genlist
3127 3126 >>> len(rs)
3128 3127 5
3129 3128 >>> [x for x in rs] # with _genlist
3130 3129 [0, 3, 2, 5, 4]
3131 3130 >>> assert rs._genlist
3132 3131
3133 3132 iterate ascending:
3134 3133 >>> rs = addset(xs, ys, ascending=True)
3135 3134 >>> # (use generator because pypy could call len())
3136 3135 >>> list(x for x in rs), list(x for x in rs.fastasc()) # without _asclist
3137 3136 ([0, 2, 3, 4, 5], [0, 2, 3, 4, 5])
3138 3137 >>> assert not rs._asclist
3139 3138 >>> len(rs)
3140 3139 5
3141 3140 >>> [x for x in rs], [x for x in rs.fastasc()]
3142 3141 ([0, 2, 3, 4, 5], [0, 2, 3, 4, 5])
3143 3142 >>> assert rs._asclist
3144 3143
3145 3144 iterate descending:
3146 3145 >>> rs = addset(xs, ys, ascending=False)
3147 3146 >>> # (use generator because pypy could call len())
3148 3147 >>> list(x for x in rs), list(x for x in rs.fastdesc()) # without _asclist
3149 3148 ([5, 4, 3, 2, 0], [5, 4, 3, 2, 0])
3150 3149 >>> assert not rs._asclist
3151 3150 >>> len(rs)
3152 3151 5
3153 3152 >>> [x for x in rs], [x for x in rs.fastdesc()]
3154 3153 ([5, 4, 3, 2, 0], [5, 4, 3, 2, 0])
3155 3154 >>> assert rs._asclist
3156 3155
3157 3156 iterate ascending without fastasc:
3158 3157 >>> rs = addset(xs, generatorset(ys), ascending=True)
3159 3158 >>> assert rs.fastasc is None
3160 3159 >>> [x for x in rs]
3161 3160 [0, 2, 3, 4, 5]
3162 3161
3163 3162 iterate descending without fastdesc:
3164 3163 >>> rs = addset(generatorset(xs), ys, ascending=False)
3165 3164 >>> assert rs.fastdesc is None
3166 3165 >>> [x for x in rs]
3167 3166 [5, 4, 3, 2, 0]
3168 3167 """
3169 3168 def __init__(self, revs1, revs2, ascending=None):
3170 3169 self._r1 = revs1
3171 3170 self._r2 = revs2
3172 3171 self._iter = None
3173 3172 self._ascending = ascending
3174 3173 self._genlist = None
3175 3174 self._asclist = None
3176 3175
3177 3176 def __len__(self):
3178 3177 return len(self._list)
3179 3178
3180 3179 def __nonzero__(self):
3181 3180 return bool(self._r1) or bool(self._r2)
3182 3181
3183 3182 @util.propertycache
3184 3183 def _list(self):
3185 3184 if not self._genlist:
3186 3185 self._genlist = baseset(iter(self))
3187 3186 return self._genlist
3188 3187
3189 3188 def __iter__(self):
3190 3189 """Iterate over both collections without repeating elements
3191 3190
3192 3191 If the ascending attribute is not set, iterate over the first one and
3193 3192 then over the second one checking for membership on the first one so we
3194 3193 dont yield any duplicates.
3195 3194
3196 3195 If the ascending attribute is set, iterate over both collections at the
3197 3196 same time, yielding only one value at a time in the given order.
3198 3197 """
3199 3198 if self._ascending is None:
3200 3199 if self._genlist:
3201 3200 return iter(self._genlist)
3202 3201 def arbitraryordergen():
3203 3202 for r in self._r1:
3204 3203 yield r
3205 3204 inr1 = self._r1.__contains__
3206 3205 for r in self._r2:
3207 3206 if not inr1(r):
3208 3207 yield r
3209 3208 return arbitraryordergen()
3210 3209 # try to use our own fast iterator if it exists
3211 3210 self._trysetasclist()
3212 3211 if self._ascending:
3213 3212 attr = 'fastasc'
3214 3213 else:
3215 3214 attr = 'fastdesc'
3216 3215 it = getattr(self, attr)
3217 3216 if it is not None:
3218 3217 return it()
3219 3218 # maybe half of the component supports fast
3220 3219 # get iterator for _r1
3221 3220 iter1 = getattr(self._r1, attr)
3222 3221 if iter1 is None:
3223 3222 # let's avoid side effect (not sure it matters)
3224 3223 iter1 = iter(sorted(self._r1, reverse=not self._ascending))
3225 3224 else:
3226 3225 iter1 = iter1()
3227 3226 # get iterator for _r2
3228 3227 iter2 = getattr(self._r2, attr)
3229 3228 if iter2 is None:
3230 3229 # let's avoid side effect (not sure it matters)
3231 3230 iter2 = iter(sorted(self._r2, reverse=not self._ascending))
3232 3231 else:
3233 3232 iter2 = iter2()
3234 3233 return _iterordered(self._ascending, iter1, iter2)
3235 3234
3236 3235 def _trysetasclist(self):
3237 3236 """populate the _asclist attribute if possible and necessary"""
3238 3237 if self._genlist is not None and self._asclist is None:
3239 3238 self._asclist = sorted(self._genlist)
3240 3239
3241 3240 @property
3242 3241 def fastasc(self):
3243 3242 self._trysetasclist()
3244 3243 if self._asclist is not None:
3245 3244 return self._asclist.__iter__
3246 3245 iter1 = self._r1.fastasc
3247 3246 iter2 = self._r2.fastasc
3248 3247 if None in (iter1, iter2):
3249 3248 return None
3250 3249 return lambda: _iterordered(True, iter1(), iter2())
3251 3250
3252 3251 @property
3253 3252 def fastdesc(self):
3254 3253 self._trysetasclist()
3255 3254 if self._asclist is not None:
3256 3255 return self._asclist.__reversed__
3257 3256 iter1 = self._r1.fastdesc
3258 3257 iter2 = self._r2.fastdesc
3259 3258 if None in (iter1, iter2):
3260 3259 return None
3261 3260 return lambda: _iterordered(False, iter1(), iter2())
3262 3261
3263 3262 def __contains__(self, x):
3264 3263 return x in self._r1 or x in self._r2
3265 3264
3266 3265 def sort(self, reverse=False):
3267 3266 """Sort the added set
3268 3267
3269 3268 For this we use the cached list with all the generated values and if we
3270 3269 know they are ascending or descending we can sort them in a smart way.
3271 3270 """
3272 3271 self._ascending = not reverse
3273 3272
3274 3273 def isascending(self):
3275 3274 return self._ascending is not None and self._ascending
3276 3275
3277 3276 def isdescending(self):
3278 3277 return self._ascending is not None and not self._ascending
3279 3278
3280 3279 def istopo(self):
3281 3280 # not worth the trouble asserting if the two sets combined are still
3282 3281 # in topographical order. Use the sort() predicate to explicitly sort
3283 3282 # again instead.
3284 3283 return False
3285 3284
3286 3285 def reverse(self):
3287 3286 if self._ascending is None:
3288 3287 self._list.reverse()
3289 3288 else:
3290 3289 self._ascending = not self._ascending
3291 3290
3292 3291 def first(self):
3293 3292 for x in self:
3294 3293 return x
3295 3294 return None
3296 3295
3297 3296 def last(self):
3298 3297 self.reverse()
3299 3298 val = self.first()
3300 3299 self.reverse()
3301 3300 return val
3302 3301
3303 3302 def __repr__(self):
3304 3303 d = {None: '', False: '-', True: '+'}[self._ascending]
3305 3304 return '<%s%s %r, %r>' % (type(self).__name__, d, self._r1, self._r2)
3306 3305
3307 3306 class generatorset(abstractsmartset):
3308 3307 """Wrap a generator for lazy iteration
3309 3308
3310 3309 Wrapper structure for generators that provides lazy membership and can
3311 3310 be iterated more than once.
3312 3311 When asked for membership it generates values until either it finds the
3313 3312 requested one or has gone through all the elements in the generator
3314 3313 """
3315 3314 def __init__(self, gen, iterasc=None):
3316 3315 """
3317 3316 gen: a generator producing the values for the generatorset.
3318 3317 """
3319 3318 self._gen = gen
3320 3319 self._asclist = None
3321 3320 self._cache = {}
3322 3321 self._genlist = []
3323 3322 self._finished = False
3324 3323 self._ascending = True
3325 3324 if iterasc is not None:
3326 3325 if iterasc:
3327 3326 self.fastasc = self._iterator
3328 3327 self.__contains__ = self._asccontains
3329 3328 else:
3330 3329 self.fastdesc = self._iterator
3331 3330 self.__contains__ = self._desccontains
3332 3331
3333 3332 def __nonzero__(self):
3334 3333 # Do not use 'for r in self' because it will enforce the iteration
3335 3334 # order (default ascending), possibly unrolling a whole descending
3336 3335 # iterator.
3337 3336 if self._genlist:
3338 3337 return True
3339 3338 for r in self._consumegen():
3340 3339 return True
3341 3340 return False
3342 3341
3343 3342 def __contains__(self, x):
3344 3343 if x in self._cache:
3345 3344 return self._cache[x]
3346 3345
3347 3346 # Use new values only, as existing values would be cached.
3348 3347 for l in self._consumegen():
3349 3348 if l == x:
3350 3349 return True
3351 3350
3352 3351 self._cache[x] = False
3353 3352 return False
3354 3353
3355 3354 def _asccontains(self, x):
3356 3355 """version of contains optimised for ascending generator"""
3357 3356 if x in self._cache:
3358 3357 return self._cache[x]
3359 3358
3360 3359 # Use new values only, as existing values would be cached.
3361 3360 for l in self._consumegen():
3362 3361 if l == x:
3363 3362 return True
3364 3363 if l > x:
3365 3364 break
3366 3365
3367 3366 self._cache[x] = False
3368 3367 return False
3369 3368
3370 3369 def _desccontains(self, x):
3371 3370 """version of contains optimised for descending generator"""
3372 3371 if x in self._cache:
3373 3372 return self._cache[x]
3374 3373
3375 3374 # Use new values only, as existing values would be cached.
3376 3375 for l in self._consumegen():
3377 3376 if l == x:
3378 3377 return True
3379 3378 if l < x:
3380 3379 break
3381 3380
3382 3381 self._cache[x] = False
3383 3382 return False
3384 3383
3385 3384 def __iter__(self):
3386 3385 if self._ascending:
3387 3386 it = self.fastasc
3388 3387 else:
3389 3388 it = self.fastdesc
3390 3389 if it is not None:
3391 3390 return it()
3392 3391 # we need to consume the iterator
3393 3392 for x in self._consumegen():
3394 3393 pass
3395 3394 # recall the same code
3396 3395 return iter(self)
3397 3396
3398 3397 def _iterator(self):
3399 3398 if self._finished:
3400 3399 return iter(self._genlist)
3401 3400
3402 3401 # We have to use this complex iteration strategy to allow multiple
3403 3402 # iterations at the same time. We need to be able to catch revision
3404 3403 # removed from _consumegen and added to genlist in another instance.
3405 3404 #
3406 3405 # Getting rid of it would provide an about 15% speed up on this
3407 3406 # iteration.
3408 3407 genlist = self._genlist
3409 3408 nextrev = self._consumegen().next
3410 3409 _len = len # cache global lookup
3411 3410 def gen():
3412 3411 i = 0
3413 3412 while True:
3414 3413 if i < _len(genlist):
3415 3414 yield genlist[i]
3416 3415 else:
3417 3416 yield nextrev()
3418 3417 i += 1
3419 3418 return gen()
3420 3419
3421 3420 def _consumegen(self):
3422 3421 cache = self._cache
3423 3422 genlist = self._genlist.append
3424 3423 for item in self._gen:
3425 3424 cache[item] = True
3426 3425 genlist(item)
3427 3426 yield item
3428 3427 if not self._finished:
3429 3428 self._finished = True
3430 3429 asc = self._genlist[:]
3431 3430 asc.sort()
3432 3431 self._asclist = asc
3433 3432 self.fastasc = asc.__iter__
3434 3433 self.fastdesc = asc.__reversed__
3435 3434
3436 3435 def __len__(self):
3437 3436 for x in self._consumegen():
3438 3437 pass
3439 3438 return len(self._genlist)
3440 3439
3441 3440 def sort(self, reverse=False):
3442 3441 self._ascending = not reverse
3443 3442
3444 3443 def reverse(self):
3445 3444 self._ascending = not self._ascending
3446 3445
3447 3446 def isascending(self):
3448 3447 return self._ascending
3449 3448
3450 3449 def isdescending(self):
3451 3450 return not self._ascending
3452 3451
3453 3452 def istopo(self):
3454 3453 # not worth the trouble asserting if the two sets combined are still
3455 3454 # in topographical order. Use the sort() predicate to explicitly sort
3456 3455 # again instead.
3457 3456 return False
3458 3457
3459 3458 def first(self):
3460 3459 if self._ascending:
3461 3460 it = self.fastasc
3462 3461 else:
3463 3462 it = self.fastdesc
3464 3463 if it is None:
3465 3464 # we need to consume all and try again
3466 3465 for x in self._consumegen():
3467 3466 pass
3468 3467 return self.first()
3469 3468 return next(it(), None)
3470 3469
3471 3470 def last(self):
3472 3471 if self._ascending:
3473 3472 it = self.fastdesc
3474 3473 else:
3475 3474 it = self.fastasc
3476 3475 if it is None:
3477 3476 # we need to consume all and try again
3478 3477 for x in self._consumegen():
3479 3478 pass
3480 3479 return self.first()
3481 3480 return next(it(), None)
3482 3481
3483 3482 def __repr__(self):
3484 3483 d = {False: '-', True: '+'}[self._ascending]
3485 3484 return '<%s%s>' % (type(self).__name__, d)
3486 3485
3487 3486 class spanset(abstractsmartset):
3488 3487 """Duck type for baseset class which represents a range of revisions and
3489 3488 can work lazily and without having all the range in memory
3490 3489
3491 3490 Note that spanset(x, y) behave almost like xrange(x, y) except for two
3492 3491 notable points:
3493 3492 - when x < y it will be automatically descending,
3494 3493 - revision filtered with this repoview will be skipped.
3495 3494
3496 3495 """
3497 3496 def __init__(self, repo, start=0, end=None):
3498 3497 """
3499 3498 start: first revision included the set
3500 3499 (default to 0)
3501 3500 end: first revision excluded (last+1)
3502 3501 (default to len(repo)
3503 3502
3504 3503 Spanset will be descending if `end` < `start`.
3505 3504 """
3506 3505 if end is None:
3507 3506 end = len(repo)
3508 3507 self._ascending = start <= end
3509 3508 if not self._ascending:
3510 3509 start, end = end + 1, start +1
3511 3510 self._start = start
3512 3511 self._end = end
3513 3512 self._hiddenrevs = repo.changelog.filteredrevs
3514 3513
3515 3514 def sort(self, reverse=False):
3516 3515 self._ascending = not reverse
3517 3516
3518 3517 def reverse(self):
3519 3518 self._ascending = not self._ascending
3520 3519
3521 3520 def istopo(self):
3522 3521 # not worth the trouble asserting if the two sets combined are still
3523 3522 # in topographical order. Use the sort() predicate to explicitly sort
3524 3523 # again instead.
3525 3524 return False
3526 3525
3527 3526 def _iterfilter(self, iterrange):
3528 3527 s = self._hiddenrevs
3529 3528 for r in iterrange:
3530 3529 if r not in s:
3531 3530 yield r
3532 3531
3533 3532 def __iter__(self):
3534 3533 if self._ascending:
3535 3534 return self.fastasc()
3536 3535 else:
3537 3536 return self.fastdesc()
3538 3537
3539 3538 def fastasc(self):
3540 3539 iterrange = xrange(self._start, self._end)
3541 3540 if self._hiddenrevs:
3542 3541 return self._iterfilter(iterrange)
3543 3542 return iter(iterrange)
3544 3543
3545 3544 def fastdesc(self):
3546 3545 iterrange = xrange(self._end - 1, self._start - 1, -1)
3547 3546 if self._hiddenrevs:
3548 3547 return self._iterfilter(iterrange)
3549 3548 return iter(iterrange)
3550 3549
3551 3550 def __contains__(self, rev):
3552 3551 hidden = self._hiddenrevs
3553 3552 return ((self._start <= rev < self._end)
3554 3553 and not (hidden and rev in hidden))
3555 3554
3556 3555 def __nonzero__(self):
3557 3556 for r in self:
3558 3557 return True
3559 3558 return False
3560 3559
3561 3560 def __len__(self):
3562 3561 if not self._hiddenrevs:
3563 3562 return abs(self._end - self._start)
3564 3563 else:
3565 3564 count = 0
3566 3565 start = self._start
3567 3566 end = self._end
3568 3567 for rev in self._hiddenrevs:
3569 3568 if (end < rev <= start) or (start <= rev < end):
3570 3569 count += 1
3571 3570 return abs(self._end - self._start) - count
3572 3571
3573 3572 def isascending(self):
3574 3573 return self._ascending
3575 3574
3576 3575 def isdescending(self):
3577 3576 return not self._ascending
3578 3577
3579 3578 def first(self):
3580 3579 if self._ascending:
3581 3580 it = self.fastasc
3582 3581 else:
3583 3582 it = self.fastdesc
3584 3583 for x in it():
3585 3584 return x
3586 3585 return None
3587 3586
3588 3587 def last(self):
3589 3588 if self._ascending:
3590 3589 it = self.fastdesc
3591 3590 else:
3592 3591 it = self.fastasc
3593 3592 for x in it():
3594 3593 return x
3595 3594 return None
3596 3595
3597 3596 def __repr__(self):
3598 3597 d = {False: '-', True: '+'}[self._ascending]
3599 3598 return '<%s%s %d:%d>' % (type(self).__name__, d,
3600 3599 self._start, self._end - 1)
3601 3600
3602 3601 class fullreposet(spanset):
3603 3602 """a set containing all revisions in the repo
3604 3603
3605 3604 This class exists to host special optimization and magic to handle virtual
3606 3605 revisions such as "null".
3607 3606 """
3608 3607
3609 3608 def __init__(self, repo):
3610 3609 super(fullreposet, self).__init__(repo)
3611 3610
3612 3611 def __and__(self, other):
3613 3612 """As self contains the whole repo, all of the other set should also be
3614 3613 in self. Therefore `self & other = other`.
3615 3614
3616 3615 This boldly assumes the other contains valid revs only.
3617 3616 """
3618 3617 # other not a smartset, make is so
3619 3618 if not util.safehasattr(other, 'isascending'):
3620 3619 # filter out hidden revision
3621 3620 # (this boldly assumes all smartset are pure)
3622 3621 #
3623 3622 # `other` was used with "&", let's assume this is a set like
3624 3623 # object.
3625 3624 other = baseset(other - self._hiddenrevs)
3626 3625
3627 3626 # XXX As fullreposet is also used as bootstrap, this is wrong.
3628 3627 #
3629 3628 # With a giveme312() revset returning [3,1,2], this makes
3630 3629 # 'hg log -r "giveme312()"' -> 1, 2, 3 (wrong)
3631 3630 # We cannot just drop it because other usage still need to sort it:
3632 3631 # 'hg log -r "all() and giveme312()"' -> 1, 2, 3 (right)
3633 3632 #
3634 3633 # There is also some faulty revset implementations that rely on it
3635 3634 # (eg: children as of its state in e8075329c5fb)
3636 3635 #
3637 3636 # When we fix the two points above we can move this into the if clause
3638 3637 other.sort(reverse=self.isdescending())
3639 3638 return other
3640 3639
3641 3640 def prettyformatset(revs):
3642 3641 lines = []
3643 3642 rs = repr(revs)
3644 3643 p = 0
3645 3644 while p < len(rs):
3646 3645 q = rs.find('<', p + 1)
3647 3646 if q < 0:
3648 3647 q = len(rs)
3649 3648 l = rs.count('<', 0, p) - rs.count('>', 0, p)
3650 3649 assert l >= 0
3651 3650 lines.append((l, rs[p:q].rstrip()))
3652 3651 p = q
3653 3652 return '\n'.join(' ' * l + s for l, s in lines)
3654 3653
3655 3654 def loadpredicate(ui, extname, registrarobj):
3656 3655 """Load revset predicates from specified registrarobj
3657 3656 """
3658 3657 for name, func in registrarobj._table.iteritems():
3659 3658 symbols[name] = func
3660 3659 if func._safe:
3661 3660 safesymbols.add(name)
3662 3661
3663 3662 # load built-in predicates explicitly to setup safesymbols
3664 3663 loadpredicate(None, None, predicate)
3665 3664
3666 3665 # tell hggettext to extract docstrings from these functions:
3667 3666 i18nfunctions = symbols.values()
General Comments 0
You need to be logged in to leave comments. Login now