##// END OF EJS Templates
revset: build list of (key, reverse) pairs before sorting...
Yuya Nishihara -
r29363:2d18c611 default
parent child Browse files
Show More
@@ -1,3665 +1,3664 b''
1 1 # revset.py - revision set queries for mercurial
2 2 #
3 3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import heapq
11 11 import re
12 12
13 13 from .i18n import _
14 14 from . import (
15 15 destutil,
16 16 encoding,
17 17 error,
18 18 hbisect,
19 19 match as matchmod,
20 20 node,
21 21 obsolete as obsmod,
22 22 parser,
23 23 pathutil,
24 24 phases,
25 25 registrar,
26 26 repoview,
27 27 util,
28 28 )
29 29
30 30 def _revancestors(repo, revs, followfirst):
31 31 """Like revlog.ancestors(), but supports followfirst."""
32 32 if followfirst:
33 33 cut = 1
34 34 else:
35 35 cut = None
36 36 cl = repo.changelog
37 37
38 38 def iterate():
39 39 revs.sort(reverse=True)
40 40 irevs = iter(revs)
41 41 h = []
42 42
43 43 inputrev = next(irevs, None)
44 44 if inputrev is not None:
45 45 heapq.heappush(h, -inputrev)
46 46
47 47 seen = set()
48 48 while h:
49 49 current = -heapq.heappop(h)
50 50 if current == inputrev:
51 51 inputrev = next(irevs, None)
52 52 if inputrev is not None:
53 53 heapq.heappush(h, -inputrev)
54 54 if current not in seen:
55 55 seen.add(current)
56 56 yield current
57 57 for parent in cl.parentrevs(current)[:cut]:
58 58 if parent != node.nullrev:
59 59 heapq.heappush(h, -parent)
60 60
61 61 return generatorset(iterate(), iterasc=False)
62 62
63 63 def _revdescendants(repo, revs, followfirst):
64 64 """Like revlog.descendants() but supports followfirst."""
65 65 if followfirst:
66 66 cut = 1
67 67 else:
68 68 cut = None
69 69
70 70 def iterate():
71 71 cl = repo.changelog
72 72 # XXX this should be 'parentset.min()' assuming 'parentset' is a
73 73 # smartset (and if it is not, it should.)
74 74 first = min(revs)
75 75 nullrev = node.nullrev
76 76 if first == nullrev:
77 77 # Are there nodes with a null first parent and a non-null
78 78 # second one? Maybe. Do we care? Probably not.
79 79 for i in cl:
80 80 yield i
81 81 else:
82 82 seen = set(revs)
83 83 for i in cl.revs(first + 1):
84 84 for x in cl.parentrevs(i)[:cut]:
85 85 if x != nullrev and x in seen:
86 86 seen.add(i)
87 87 yield i
88 88 break
89 89
90 90 return generatorset(iterate(), iterasc=True)
91 91
92 92 def _reachablerootspure(repo, minroot, roots, heads, includepath):
93 93 """return (heads(::<roots> and ::<heads>))
94 94
95 95 If includepath is True, return (<roots>::<heads>)."""
96 96 if not roots:
97 97 return []
98 98 parentrevs = repo.changelog.parentrevs
99 99 roots = set(roots)
100 100 visit = list(heads)
101 101 reachable = set()
102 102 seen = {}
103 103 # prefetch all the things! (because python is slow)
104 104 reached = reachable.add
105 105 dovisit = visit.append
106 106 nextvisit = visit.pop
107 107 # open-code the post-order traversal due to the tiny size of
108 108 # sys.getrecursionlimit()
109 109 while visit:
110 110 rev = nextvisit()
111 111 if rev in roots:
112 112 reached(rev)
113 113 if not includepath:
114 114 continue
115 115 parents = parentrevs(rev)
116 116 seen[rev] = parents
117 117 for parent in parents:
118 118 if parent >= minroot and parent not in seen:
119 119 dovisit(parent)
120 120 if not reachable:
121 121 return baseset()
122 122 if not includepath:
123 123 return reachable
124 124 for rev in sorted(seen):
125 125 for parent in seen[rev]:
126 126 if parent in reachable:
127 127 reached(rev)
128 128 return reachable
129 129
130 130 def reachableroots(repo, roots, heads, includepath=False):
131 131 """return (heads(::<roots> and ::<heads>))
132 132
133 133 If includepath is True, return (<roots>::<heads>)."""
134 134 if not roots:
135 135 return baseset()
136 136 minroot = roots.min()
137 137 roots = list(roots)
138 138 heads = list(heads)
139 139 try:
140 140 revs = repo.changelog.reachableroots(minroot, heads, roots, includepath)
141 141 except AttributeError:
142 142 revs = _reachablerootspure(repo, minroot, roots, heads, includepath)
143 143 revs = baseset(revs)
144 144 revs.sort()
145 145 return revs
146 146
147 147 elements = {
148 148 # token-type: binding-strength, primary, prefix, infix, suffix
149 149 "(": (21, None, ("group", 1, ")"), ("func", 1, ")"), None),
150 150 "##": (20, None, None, ("_concat", 20), None),
151 151 "~": (18, None, None, ("ancestor", 18), None),
152 152 "^": (18, None, None, ("parent", 18), ("parentpost", 18)),
153 153 "-": (5, None, ("negate", 19), ("minus", 5), None),
154 154 "::": (17, None, ("dagrangepre", 17), ("dagrange", 17),
155 155 ("dagrangepost", 17)),
156 156 "..": (17, None, ("dagrangepre", 17), ("dagrange", 17),
157 157 ("dagrangepost", 17)),
158 158 ":": (15, "rangeall", ("rangepre", 15), ("range", 15), ("rangepost", 15)),
159 159 "not": (10, None, ("not", 10), None, None),
160 160 "!": (10, None, ("not", 10), None, None),
161 161 "and": (5, None, None, ("and", 5), None),
162 162 "&": (5, None, None, ("and", 5), None),
163 163 "%": (5, None, None, ("only", 5), ("onlypost", 5)),
164 164 "or": (4, None, None, ("or", 4), None),
165 165 "|": (4, None, None, ("or", 4), None),
166 166 "+": (4, None, None, ("or", 4), None),
167 167 "=": (3, None, None, ("keyvalue", 3), None),
168 168 ",": (2, None, None, ("list", 2), None),
169 169 ")": (0, None, None, None, None),
170 170 "symbol": (0, "symbol", None, None, None),
171 171 "string": (0, "string", None, None, None),
172 172 "end": (0, None, None, None, None),
173 173 }
174 174
175 175 keywords = set(['and', 'or', 'not'])
176 176
177 177 # default set of valid characters for the initial letter of symbols
178 178 _syminitletters = set(c for c in [chr(i) for i in xrange(256)]
179 179 if c.isalnum() or c in '._@' or ord(c) > 127)
180 180
181 181 # default set of valid characters for non-initial letters of symbols
182 182 _symletters = set(c for c in [chr(i) for i in xrange(256)]
183 183 if c.isalnum() or c in '-._/@' or ord(c) > 127)
184 184
185 185 def tokenize(program, lookup=None, syminitletters=None, symletters=None):
186 186 '''
187 187 Parse a revset statement into a stream of tokens
188 188
189 189 ``syminitletters`` is the set of valid characters for the initial
190 190 letter of symbols.
191 191
192 192 By default, character ``c`` is recognized as valid for initial
193 193 letter of symbols, if ``c.isalnum() or c in '._@' or ord(c) > 127``.
194 194
195 195 ``symletters`` is the set of valid characters for non-initial
196 196 letters of symbols.
197 197
198 198 By default, character ``c`` is recognized as valid for non-initial
199 199 letters of symbols, if ``c.isalnum() or c in '-._/@' or ord(c) > 127``.
200 200
201 201 Check that @ is a valid unquoted token character (issue3686):
202 202 >>> list(tokenize("@::"))
203 203 [('symbol', '@', 0), ('::', None, 1), ('end', None, 3)]
204 204
205 205 '''
206 206 if syminitletters is None:
207 207 syminitletters = _syminitletters
208 208 if symletters is None:
209 209 symletters = _symletters
210 210
211 211 if program and lookup:
212 212 # attempt to parse old-style ranges first to deal with
213 213 # things like old-tag which contain query metacharacters
214 214 parts = program.split(':', 1)
215 215 if all(lookup(sym) for sym in parts if sym):
216 216 if parts[0]:
217 217 yield ('symbol', parts[0], 0)
218 218 if len(parts) > 1:
219 219 s = len(parts[0])
220 220 yield (':', None, s)
221 221 if parts[1]:
222 222 yield ('symbol', parts[1], s + 1)
223 223 yield ('end', None, len(program))
224 224 return
225 225
226 226 pos, l = 0, len(program)
227 227 while pos < l:
228 228 c = program[pos]
229 229 if c.isspace(): # skip inter-token whitespace
230 230 pass
231 231 elif c == ':' and program[pos:pos + 2] == '::': # look ahead carefully
232 232 yield ('::', None, pos)
233 233 pos += 1 # skip ahead
234 234 elif c == '.' and program[pos:pos + 2] == '..': # look ahead carefully
235 235 yield ('..', None, pos)
236 236 pos += 1 # skip ahead
237 237 elif c == '#' and program[pos:pos + 2] == '##': # look ahead carefully
238 238 yield ('##', None, pos)
239 239 pos += 1 # skip ahead
240 240 elif c in "():=,-|&+!~^%": # handle simple operators
241 241 yield (c, None, pos)
242 242 elif (c in '"\'' or c == 'r' and
243 243 program[pos:pos + 2] in ("r'", 'r"')): # handle quoted strings
244 244 if c == 'r':
245 245 pos += 1
246 246 c = program[pos]
247 247 decode = lambda x: x
248 248 else:
249 249 decode = parser.unescapestr
250 250 pos += 1
251 251 s = pos
252 252 while pos < l: # find closing quote
253 253 d = program[pos]
254 254 if d == '\\': # skip over escaped characters
255 255 pos += 2
256 256 continue
257 257 if d == c:
258 258 yield ('string', decode(program[s:pos]), s)
259 259 break
260 260 pos += 1
261 261 else:
262 262 raise error.ParseError(_("unterminated string"), s)
263 263 # gather up a symbol/keyword
264 264 elif c in syminitletters:
265 265 s = pos
266 266 pos += 1
267 267 while pos < l: # find end of symbol
268 268 d = program[pos]
269 269 if d not in symletters:
270 270 break
271 271 if d == '.' and program[pos - 1] == '.': # special case for ..
272 272 pos -= 1
273 273 break
274 274 pos += 1
275 275 sym = program[s:pos]
276 276 if sym in keywords: # operator keywords
277 277 yield (sym, None, s)
278 278 elif '-' in sym:
279 279 # some jerk gave us foo-bar-baz, try to check if it's a symbol
280 280 if lookup and lookup(sym):
281 281 # looks like a real symbol
282 282 yield ('symbol', sym, s)
283 283 else:
284 284 # looks like an expression
285 285 parts = sym.split('-')
286 286 for p in parts[:-1]:
287 287 if p: # possible consecutive -
288 288 yield ('symbol', p, s)
289 289 s += len(p)
290 290 yield ('-', None, pos)
291 291 s += 1
292 292 if parts[-1]: # possible trailing -
293 293 yield ('symbol', parts[-1], s)
294 294 else:
295 295 yield ('symbol', sym, s)
296 296 pos -= 1
297 297 else:
298 298 raise error.ParseError(_("syntax error in revset '%s'") %
299 299 program, pos)
300 300 pos += 1
301 301 yield ('end', None, pos)
302 302
303 303 # helpers
304 304
305 305 def getstring(x, err):
306 306 if x and (x[0] == 'string' or x[0] == 'symbol'):
307 307 return x[1]
308 308 raise error.ParseError(err)
309 309
310 310 def getlist(x):
311 311 if not x:
312 312 return []
313 313 if x[0] == 'list':
314 314 return list(x[1:])
315 315 return [x]
316 316
317 317 def getargs(x, min, max, err):
318 318 l = getlist(x)
319 319 if len(l) < min or (max >= 0 and len(l) > max):
320 320 raise error.ParseError(err)
321 321 return l
322 322
323 323 def getargsdict(x, funcname, keys):
324 324 return parser.buildargsdict(getlist(x), funcname, keys.split(),
325 325 keyvaluenode='keyvalue', keynode='symbol')
326 326
327 327 def getset(repo, subset, x):
328 328 if not x:
329 329 raise error.ParseError(_("missing argument"))
330 330 s = methods[x[0]](repo, subset, *x[1:])
331 331 if util.safehasattr(s, 'isascending'):
332 332 return s
333 333 # else case should not happen, because all non-func are internal,
334 334 # ignoring for now.
335 335 if x[0] == 'func' and x[1][0] == 'symbol' and x[1][1] in symbols:
336 336 repo.ui.deprecwarn('revset "%s" uses list instead of smartset'
337 337 % x[1][1],
338 338 '3.9')
339 339 return baseset(s)
340 340
341 341 def _getrevsource(repo, r):
342 342 extra = repo[r].extra()
343 343 for label in ('source', 'transplant_source', 'rebase_source'):
344 344 if label in extra:
345 345 try:
346 346 return repo[extra[label]].rev()
347 347 except error.RepoLookupError:
348 348 pass
349 349 return None
350 350
351 351 # operator methods
352 352
353 353 def stringset(repo, subset, x):
354 354 x = repo[x].rev()
355 355 if (x in subset
356 356 or x == node.nullrev and isinstance(subset, fullreposet)):
357 357 return baseset([x])
358 358 return baseset()
359 359
360 360 def rangeset(repo, subset, x, y):
361 361 m = getset(repo, fullreposet(repo), x)
362 362 n = getset(repo, fullreposet(repo), y)
363 363
364 364 if not m or not n:
365 365 return baseset()
366 366 m, n = m.first(), n.last()
367 367
368 368 if m == n:
369 369 r = baseset([m])
370 370 elif n == node.wdirrev:
371 371 r = spanset(repo, m, len(repo)) + baseset([n])
372 372 elif m == node.wdirrev:
373 373 r = baseset([m]) + spanset(repo, len(repo) - 1, n - 1)
374 374 elif m < n:
375 375 r = spanset(repo, m, n + 1)
376 376 else:
377 377 r = spanset(repo, m, n - 1)
378 378 # XXX We should combine with subset first: 'subset & baseset(...)'. This is
379 379 # necessary to ensure we preserve the order in subset.
380 380 #
381 381 # This has performance implication, carrying the sorting over when possible
382 382 # would be more efficient.
383 383 return r & subset
384 384
385 385 def dagrange(repo, subset, x, y):
386 386 r = fullreposet(repo)
387 387 xs = reachableroots(repo, getset(repo, r, x), getset(repo, r, y),
388 388 includepath=True)
389 389 return subset & xs
390 390
391 391 def andset(repo, subset, x, y):
392 392 return getset(repo, getset(repo, subset, x), y)
393 393
394 394 def differenceset(repo, subset, x, y):
395 395 return getset(repo, subset, x) - getset(repo, subset, y)
396 396
397 397 def orset(repo, subset, *xs):
398 398 assert xs
399 399 if len(xs) == 1:
400 400 return getset(repo, subset, xs[0])
401 401 p = len(xs) // 2
402 402 a = orset(repo, subset, *xs[:p])
403 403 b = orset(repo, subset, *xs[p:])
404 404 return a + b
405 405
406 406 def notset(repo, subset, x):
407 407 return subset - getset(repo, subset, x)
408 408
409 409 def listset(repo, subset, *xs):
410 410 raise error.ParseError(_("can't use a list in this context"),
411 411 hint=_('see hg help "revsets.x or y"'))
412 412
413 413 def keyvaluepair(repo, subset, k, v):
414 414 raise error.ParseError(_("can't use a key-value pair in this context"))
415 415
416 416 def func(repo, subset, a, b):
417 417 if a[0] == 'symbol' and a[1] in symbols:
418 418 return symbols[a[1]](repo, subset, b)
419 419
420 420 keep = lambda fn: getattr(fn, '__doc__', None) is not None
421 421
422 422 syms = [s for (s, fn) in symbols.items() if keep(fn)]
423 423 raise error.UnknownIdentifier(a[1], syms)
424 424
425 425 # functions
426 426
427 427 # symbols are callables like:
428 428 # fn(repo, subset, x)
429 429 # with:
430 430 # repo - current repository instance
431 431 # subset - of revisions to be examined
432 432 # x - argument in tree form
433 433 symbols = {}
434 434
435 435 # symbols which can't be used for a DoS attack for any given input
436 436 # (e.g. those which accept regexes as plain strings shouldn't be included)
437 437 # functions that just return a lot of changesets (like all) don't count here
438 438 safesymbols = set()
439 439
440 440 predicate = registrar.revsetpredicate()
441 441
442 442 @predicate('_destupdate')
443 443 def _destupdate(repo, subset, x):
444 444 # experimental revset for update destination
445 445 args = getargsdict(x, 'limit', 'clean check')
446 446 return subset & baseset([destutil.destupdate(repo, **args)[0]])
447 447
448 448 @predicate('_destmerge')
449 449 def _destmerge(repo, subset, x):
450 450 # experimental revset for merge destination
451 451 sourceset = None
452 452 if x is not None:
453 453 sourceset = getset(repo, fullreposet(repo), x)
454 454 return subset & baseset([destutil.destmerge(repo, sourceset=sourceset)])
455 455
456 456 @predicate('adds(pattern)', safe=True)
457 457 def adds(repo, subset, x):
458 458 """Changesets that add a file matching pattern.
459 459
460 460 The pattern without explicit kind like ``glob:`` is expected to be
461 461 relative to the current directory and match against a file or a
462 462 directory.
463 463 """
464 464 # i18n: "adds" is a keyword
465 465 pat = getstring(x, _("adds requires a pattern"))
466 466 return checkstatus(repo, subset, pat, 1)
467 467
468 468 @predicate('ancestor(*changeset)', safe=True)
469 469 def ancestor(repo, subset, x):
470 470 """A greatest common ancestor of the changesets.
471 471
472 472 Accepts 0 or more changesets.
473 473 Will return empty list when passed no args.
474 474 Greatest common ancestor of a single changeset is that changeset.
475 475 """
476 476 # i18n: "ancestor" is a keyword
477 477 l = getlist(x)
478 478 rl = fullreposet(repo)
479 479 anc = None
480 480
481 481 # (getset(repo, rl, i) for i in l) generates a list of lists
482 482 for revs in (getset(repo, rl, i) for i in l):
483 483 for r in revs:
484 484 if anc is None:
485 485 anc = repo[r]
486 486 else:
487 487 anc = anc.ancestor(repo[r])
488 488
489 489 if anc is not None and anc.rev() in subset:
490 490 return baseset([anc.rev()])
491 491 return baseset()
492 492
493 493 def _ancestors(repo, subset, x, followfirst=False):
494 494 heads = getset(repo, fullreposet(repo), x)
495 495 if not heads:
496 496 return baseset()
497 497 s = _revancestors(repo, heads, followfirst)
498 498 return subset & s
499 499
500 500 @predicate('ancestors(set)', safe=True)
501 501 def ancestors(repo, subset, x):
502 502 """Changesets that are ancestors of a changeset in set.
503 503 """
504 504 return _ancestors(repo, subset, x)
505 505
506 506 @predicate('_firstancestors', safe=True)
507 507 def _firstancestors(repo, subset, x):
508 508 # ``_firstancestors(set)``
509 509 # Like ``ancestors(set)`` but follows only the first parents.
510 510 return _ancestors(repo, subset, x, followfirst=True)
511 511
512 512 def ancestorspec(repo, subset, x, n):
513 513 """``set~n``
514 514 Changesets that are the Nth ancestor (first parents only) of a changeset
515 515 in set.
516 516 """
517 517 try:
518 518 n = int(n[1])
519 519 except (TypeError, ValueError):
520 520 raise error.ParseError(_("~ expects a number"))
521 521 ps = set()
522 522 cl = repo.changelog
523 523 for r in getset(repo, fullreposet(repo), x):
524 524 for i in range(n):
525 525 r = cl.parentrevs(r)[0]
526 526 ps.add(r)
527 527 return subset & ps
528 528
529 529 @predicate('author(string)', safe=True)
530 530 def author(repo, subset, x):
531 531 """Alias for ``user(string)``.
532 532 """
533 533 # i18n: "author" is a keyword
534 534 n = encoding.lower(getstring(x, _("author requires a string")))
535 535 kind, pattern, matcher = _substringmatcher(n)
536 536 return subset.filter(lambda x: matcher(encoding.lower(repo[x].user())),
537 537 condrepr=('<user %r>', n))
538 538
539 539 @predicate('bisect(string)', safe=True)
540 540 def bisect(repo, subset, x):
541 541 """Changesets marked in the specified bisect status:
542 542
543 543 - ``good``, ``bad``, ``skip``: csets explicitly marked as good/bad/skip
544 544 - ``goods``, ``bads`` : csets topologically good/bad
545 545 - ``range`` : csets taking part in the bisection
546 546 - ``pruned`` : csets that are goods, bads or skipped
547 547 - ``untested`` : csets whose fate is yet unknown
548 548 - ``ignored`` : csets ignored due to DAG topology
549 549 - ``current`` : the cset currently being bisected
550 550 """
551 551 # i18n: "bisect" is a keyword
552 552 status = getstring(x, _("bisect requires a string")).lower()
553 553 state = set(hbisect.get(repo, status))
554 554 return subset & state
555 555
556 556 # Backward-compatibility
557 557 # - no help entry so that we do not advertise it any more
558 558 @predicate('bisected', safe=True)
559 559 def bisected(repo, subset, x):
560 560 return bisect(repo, subset, x)
561 561
562 562 @predicate('bookmark([name])', safe=True)
563 563 def bookmark(repo, subset, x):
564 564 """The named bookmark or all bookmarks.
565 565
566 566 If `name` starts with `re:`, the remainder of the name is treated as
567 567 a regular expression. To match a bookmark that actually starts with `re:`,
568 568 use the prefix `literal:`.
569 569 """
570 570 # i18n: "bookmark" is a keyword
571 571 args = getargs(x, 0, 1, _('bookmark takes one or no arguments'))
572 572 if args:
573 573 bm = getstring(args[0],
574 574 # i18n: "bookmark" is a keyword
575 575 _('the argument to bookmark must be a string'))
576 576 kind, pattern, matcher = util.stringmatcher(bm)
577 577 bms = set()
578 578 if kind == 'literal':
579 579 bmrev = repo._bookmarks.get(pattern, None)
580 580 if not bmrev:
581 581 raise error.RepoLookupError(_("bookmark '%s' does not exist")
582 582 % pattern)
583 583 bms.add(repo[bmrev].rev())
584 584 else:
585 585 matchrevs = set()
586 586 for name, bmrev in repo._bookmarks.iteritems():
587 587 if matcher(name):
588 588 matchrevs.add(bmrev)
589 589 if not matchrevs:
590 590 raise error.RepoLookupError(_("no bookmarks exist"
591 591 " that match '%s'") % pattern)
592 592 for bmrev in matchrevs:
593 593 bms.add(repo[bmrev].rev())
594 594 else:
595 595 bms = set([repo[r].rev()
596 596 for r in repo._bookmarks.values()])
597 597 bms -= set([node.nullrev])
598 598 return subset & bms
599 599
600 600 @predicate('branch(string or set)', safe=True)
601 601 def branch(repo, subset, x):
602 602 """
603 603 All changesets belonging to the given branch or the branches of the given
604 604 changesets.
605 605
606 606 If `string` starts with `re:`, the remainder of the name is treated as
607 607 a regular expression. To match a branch that actually starts with `re:`,
608 608 use the prefix `literal:`.
609 609 """
610 610 getbi = repo.revbranchcache().branchinfo
611 611
612 612 try:
613 613 b = getstring(x, '')
614 614 except error.ParseError:
615 615 # not a string, but another revspec, e.g. tip()
616 616 pass
617 617 else:
618 618 kind, pattern, matcher = util.stringmatcher(b)
619 619 if kind == 'literal':
620 620 # note: falls through to the revspec case if no branch with
621 621 # this name exists and pattern kind is not specified explicitly
622 622 if pattern in repo.branchmap():
623 623 return subset.filter(lambda r: matcher(getbi(r)[0]),
624 624 condrepr=('<branch %r>', b))
625 625 if b.startswith('literal:'):
626 626 raise error.RepoLookupError(_("branch '%s' does not exist")
627 627 % pattern)
628 628 else:
629 629 return subset.filter(lambda r: matcher(getbi(r)[0]),
630 630 condrepr=('<branch %r>', b))
631 631
632 632 s = getset(repo, fullreposet(repo), x)
633 633 b = set()
634 634 for r in s:
635 635 b.add(getbi(r)[0])
636 636 c = s.__contains__
637 637 return subset.filter(lambda r: c(r) or getbi(r)[0] in b,
638 638 condrepr=lambda: '<branch %r>' % sorted(b))
639 639
640 640 @predicate('bumped()', safe=True)
641 641 def bumped(repo, subset, x):
642 642 """Mutable changesets marked as successors of public changesets.
643 643
644 644 Only non-public and non-obsolete changesets can be `bumped`.
645 645 """
646 646 # i18n: "bumped" is a keyword
647 647 getargs(x, 0, 0, _("bumped takes no arguments"))
648 648 bumped = obsmod.getrevs(repo, 'bumped')
649 649 return subset & bumped
650 650
651 651 @predicate('bundle()', safe=True)
652 652 def bundle(repo, subset, x):
653 653 """Changesets in the bundle.
654 654
655 655 Bundle must be specified by the -R option."""
656 656
657 657 try:
658 658 bundlerevs = repo.changelog.bundlerevs
659 659 except AttributeError:
660 660 raise error.Abort(_("no bundle provided - specify with -R"))
661 661 return subset & bundlerevs
662 662
663 663 def checkstatus(repo, subset, pat, field):
664 664 hasset = matchmod.patkind(pat) == 'set'
665 665
666 666 mcache = [None]
667 667 def matches(x):
668 668 c = repo[x]
669 669 if not mcache[0] or hasset:
670 670 mcache[0] = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
671 671 m = mcache[0]
672 672 fname = None
673 673 if not m.anypats() and len(m.files()) == 1:
674 674 fname = m.files()[0]
675 675 if fname is not None:
676 676 if fname not in c.files():
677 677 return False
678 678 else:
679 679 for f in c.files():
680 680 if m(f):
681 681 break
682 682 else:
683 683 return False
684 684 files = repo.status(c.p1().node(), c.node())[field]
685 685 if fname is not None:
686 686 if fname in files:
687 687 return True
688 688 else:
689 689 for f in files:
690 690 if m(f):
691 691 return True
692 692
693 693 return subset.filter(matches, condrepr=('<status[%r] %r>', field, pat))
694 694
695 695 def _children(repo, narrow, parentset):
696 696 if not parentset:
697 697 return baseset()
698 698 cs = set()
699 699 pr = repo.changelog.parentrevs
700 700 minrev = parentset.min()
701 701 for r in narrow:
702 702 if r <= minrev:
703 703 continue
704 704 for p in pr(r):
705 705 if p in parentset:
706 706 cs.add(r)
707 707 # XXX using a set to feed the baseset is wrong. Sets are not ordered.
708 708 # This does not break because of other fullreposet misbehavior.
709 709 return baseset(cs)
710 710
711 711 @predicate('children(set)', safe=True)
712 712 def children(repo, subset, x):
713 713 """Child changesets of changesets in set.
714 714 """
715 715 s = getset(repo, fullreposet(repo), x)
716 716 cs = _children(repo, subset, s)
717 717 return subset & cs
718 718
719 719 @predicate('closed()', safe=True)
720 720 def closed(repo, subset, x):
721 721 """Changeset is closed.
722 722 """
723 723 # i18n: "closed" is a keyword
724 724 getargs(x, 0, 0, _("closed takes no arguments"))
725 725 return subset.filter(lambda r: repo[r].closesbranch(),
726 726 condrepr='<branch closed>')
727 727
728 728 @predicate('contains(pattern)')
729 729 def contains(repo, subset, x):
730 730 """The revision's manifest contains a file matching pattern (but might not
731 731 modify it). See :hg:`help patterns` for information about file patterns.
732 732
733 733 The pattern without explicit kind like ``glob:`` is expected to be
734 734 relative to the current directory and match against a file exactly
735 735 for efficiency.
736 736 """
737 737 # i18n: "contains" is a keyword
738 738 pat = getstring(x, _("contains requires a pattern"))
739 739
740 740 def matches(x):
741 741 if not matchmod.patkind(pat):
742 742 pats = pathutil.canonpath(repo.root, repo.getcwd(), pat)
743 743 if pats in repo[x]:
744 744 return True
745 745 else:
746 746 c = repo[x]
747 747 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
748 748 for f in c.manifest():
749 749 if m(f):
750 750 return True
751 751 return False
752 752
753 753 return subset.filter(matches, condrepr=('<contains %r>', pat))
754 754
755 755 @predicate('converted([id])', safe=True)
756 756 def converted(repo, subset, x):
757 757 """Changesets converted from the given identifier in the old repository if
758 758 present, or all converted changesets if no identifier is specified.
759 759 """
760 760
761 761 # There is exactly no chance of resolving the revision, so do a simple
762 762 # string compare and hope for the best
763 763
764 764 rev = None
765 765 # i18n: "converted" is a keyword
766 766 l = getargs(x, 0, 1, _('converted takes one or no arguments'))
767 767 if l:
768 768 # i18n: "converted" is a keyword
769 769 rev = getstring(l[0], _('converted requires a revision'))
770 770
771 771 def _matchvalue(r):
772 772 source = repo[r].extra().get('convert_revision', None)
773 773 return source is not None and (rev is None or source.startswith(rev))
774 774
775 775 return subset.filter(lambda r: _matchvalue(r),
776 776 condrepr=('<converted %r>', rev))
777 777
778 778 @predicate('date(interval)', safe=True)
779 779 def date(repo, subset, x):
780 780 """Changesets within the interval, see :hg:`help dates`.
781 781 """
782 782 # i18n: "date" is a keyword
783 783 ds = getstring(x, _("date requires a string"))
784 784 dm = util.matchdate(ds)
785 785 return subset.filter(lambda x: dm(repo[x].date()[0]),
786 786 condrepr=('<date %r>', ds))
787 787
788 788 @predicate('desc(string)', safe=True)
789 789 def desc(repo, subset, x):
790 790 """Search commit message for string. The match is case-insensitive.
791 791 """
792 792 # i18n: "desc" is a keyword
793 793 ds = encoding.lower(getstring(x, _("desc requires a string")))
794 794
795 795 def matches(x):
796 796 c = repo[x]
797 797 return ds in encoding.lower(c.description())
798 798
799 799 return subset.filter(matches, condrepr=('<desc %r>', ds))
800 800
801 801 def _descendants(repo, subset, x, followfirst=False):
802 802 roots = getset(repo, fullreposet(repo), x)
803 803 if not roots:
804 804 return baseset()
805 805 s = _revdescendants(repo, roots, followfirst)
806 806
807 807 # Both sets need to be ascending in order to lazily return the union
808 808 # in the correct order.
809 809 base = subset & roots
810 810 desc = subset & s
811 811 result = base + desc
812 812 if subset.isascending():
813 813 result.sort()
814 814 elif subset.isdescending():
815 815 result.sort(reverse=True)
816 816 else:
817 817 result = subset & result
818 818 return result
819 819
820 820 @predicate('descendants(set)', safe=True)
821 821 def descendants(repo, subset, x):
822 822 """Changesets which are descendants of changesets in set.
823 823 """
824 824 return _descendants(repo, subset, x)
825 825
826 826 @predicate('_firstdescendants', safe=True)
827 827 def _firstdescendants(repo, subset, x):
828 828 # ``_firstdescendants(set)``
829 829 # Like ``descendants(set)`` but follows only the first parents.
830 830 return _descendants(repo, subset, x, followfirst=True)
831 831
832 832 @predicate('destination([set])', safe=True)
833 833 def destination(repo, subset, x):
834 834 """Changesets that were created by a graft, transplant or rebase operation,
835 835 with the given revisions specified as the source. Omitting the optional set
836 836 is the same as passing all().
837 837 """
838 838 if x is not None:
839 839 sources = getset(repo, fullreposet(repo), x)
840 840 else:
841 841 sources = fullreposet(repo)
842 842
843 843 dests = set()
844 844
845 845 # subset contains all of the possible destinations that can be returned, so
846 846 # iterate over them and see if their source(s) were provided in the arg set.
847 847 # Even if the immediate src of r is not in the arg set, src's source (or
848 848 # further back) may be. Scanning back further than the immediate src allows
849 849 # transitive transplants and rebases to yield the same results as transitive
850 850 # grafts.
851 851 for r in subset:
852 852 src = _getrevsource(repo, r)
853 853 lineage = None
854 854
855 855 while src is not None:
856 856 if lineage is None:
857 857 lineage = list()
858 858
859 859 lineage.append(r)
860 860
861 861 # The visited lineage is a match if the current source is in the arg
862 862 # set. Since every candidate dest is visited by way of iterating
863 863 # subset, any dests further back in the lineage will be tested by a
864 864 # different iteration over subset. Likewise, if the src was already
865 865 # selected, the current lineage can be selected without going back
866 866 # further.
867 867 if src in sources or src in dests:
868 868 dests.update(lineage)
869 869 break
870 870
871 871 r = src
872 872 src = _getrevsource(repo, r)
873 873
874 874 return subset.filter(dests.__contains__,
875 875 condrepr=lambda: '<destination %r>' % sorted(dests))
876 876
877 877 @predicate('divergent()', safe=True)
878 878 def divergent(repo, subset, x):
879 879 """
880 880 Final successors of changesets with an alternative set of final successors.
881 881 """
882 882 # i18n: "divergent" is a keyword
883 883 getargs(x, 0, 0, _("divergent takes no arguments"))
884 884 divergent = obsmod.getrevs(repo, 'divergent')
885 885 return subset & divergent
886 886
887 887 @predicate('extinct()', safe=True)
888 888 def extinct(repo, subset, x):
889 889 """Obsolete changesets with obsolete descendants only.
890 890 """
891 891 # i18n: "extinct" is a keyword
892 892 getargs(x, 0, 0, _("extinct takes no arguments"))
893 893 extincts = obsmod.getrevs(repo, 'extinct')
894 894 return subset & extincts
895 895
896 896 @predicate('extra(label, [value])', safe=True)
897 897 def extra(repo, subset, x):
898 898 """Changesets with the given label in the extra metadata, with the given
899 899 optional value.
900 900
901 901 If `value` starts with `re:`, the remainder of the value is treated as
902 902 a regular expression. To match a value that actually starts with `re:`,
903 903 use the prefix `literal:`.
904 904 """
905 905 args = getargsdict(x, 'extra', 'label value')
906 906 if 'label' not in args:
907 907 # i18n: "extra" is a keyword
908 908 raise error.ParseError(_('extra takes at least 1 argument'))
909 909 # i18n: "extra" is a keyword
910 910 label = getstring(args['label'], _('first argument to extra must be '
911 911 'a string'))
912 912 value = None
913 913
914 914 if 'value' in args:
915 915 # i18n: "extra" is a keyword
916 916 value = getstring(args['value'], _('second argument to extra must be '
917 917 'a string'))
918 918 kind, value, matcher = util.stringmatcher(value)
919 919
920 920 def _matchvalue(r):
921 921 extra = repo[r].extra()
922 922 return label in extra and (value is None or matcher(extra[label]))
923 923
924 924 return subset.filter(lambda r: _matchvalue(r),
925 925 condrepr=('<extra[%r] %r>', label, value))
926 926
927 927 @predicate('filelog(pattern)', safe=True)
928 928 def filelog(repo, subset, x):
929 929 """Changesets connected to the specified filelog.
930 930
931 931 For performance reasons, visits only revisions mentioned in the file-level
932 932 filelog, rather than filtering through all changesets (much faster, but
933 933 doesn't include deletes or duplicate changes). For a slower, more accurate
934 934 result, use ``file()``.
935 935
936 936 The pattern without explicit kind like ``glob:`` is expected to be
937 937 relative to the current directory and match against a file exactly
938 938 for efficiency.
939 939
940 940 If some linkrev points to revisions filtered by the current repoview, we'll
941 941 work around it to return a non-filtered value.
942 942 """
943 943
944 944 # i18n: "filelog" is a keyword
945 945 pat = getstring(x, _("filelog requires a pattern"))
946 946 s = set()
947 947 cl = repo.changelog
948 948
949 949 if not matchmod.patkind(pat):
950 950 f = pathutil.canonpath(repo.root, repo.getcwd(), pat)
951 951 files = [f]
952 952 else:
953 953 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=repo[None])
954 954 files = (f for f in repo[None] if m(f))
955 955
956 956 for f in files:
957 957 fl = repo.file(f)
958 958 known = {}
959 959 scanpos = 0
960 960 for fr in list(fl):
961 961 fn = fl.node(fr)
962 962 if fn in known:
963 963 s.add(known[fn])
964 964 continue
965 965
966 966 lr = fl.linkrev(fr)
967 967 if lr in cl:
968 968 s.add(lr)
969 969 elif scanpos is not None:
970 970 # lowest matching changeset is filtered, scan further
971 971 # ahead in changelog
972 972 start = max(lr, scanpos) + 1
973 973 scanpos = None
974 974 for r in cl.revs(start):
975 975 # minimize parsing of non-matching entries
976 976 if f in cl.revision(r) and f in cl.readfiles(r):
977 977 try:
978 978 # try to use manifest delta fastpath
979 979 n = repo[r].filenode(f)
980 980 if n not in known:
981 981 if n == fn:
982 982 s.add(r)
983 983 scanpos = r
984 984 break
985 985 else:
986 986 known[n] = r
987 987 except error.ManifestLookupError:
988 988 # deletion in changelog
989 989 continue
990 990
991 991 return subset & s
992 992
993 993 @predicate('first(set, [n])', safe=True)
994 994 def first(repo, subset, x):
995 995 """An alias for limit().
996 996 """
997 997 return limit(repo, subset, x)
998 998
999 999 def _follow(repo, subset, x, name, followfirst=False):
1000 1000 l = getargs(x, 0, 1, _("%s takes no arguments or a pattern") % name)
1001 1001 c = repo['.']
1002 1002 if l:
1003 1003 x = getstring(l[0], _("%s expected a pattern") % name)
1004 1004 matcher = matchmod.match(repo.root, repo.getcwd(), [x],
1005 1005 ctx=repo[None], default='path')
1006 1006
1007 1007 files = c.manifest().walk(matcher)
1008 1008
1009 1009 s = set()
1010 1010 for fname in files:
1011 1011 fctx = c[fname]
1012 1012 s = s.union(set(c.rev() for c in fctx.ancestors(followfirst)))
1013 1013 # include the revision responsible for the most recent version
1014 1014 s.add(fctx.introrev())
1015 1015 else:
1016 1016 s = _revancestors(repo, baseset([c.rev()]), followfirst)
1017 1017
1018 1018 return subset & s
1019 1019
1020 1020 @predicate('follow([pattern])', safe=True)
1021 1021 def follow(repo, subset, x):
1022 1022 """
1023 1023 An alias for ``::.`` (ancestors of the working directory's first parent).
1024 1024 If pattern is specified, the histories of files matching given
1025 1025 pattern is followed, including copies.
1026 1026 """
1027 1027 return _follow(repo, subset, x, 'follow')
1028 1028
1029 1029 @predicate('_followfirst', safe=True)
1030 1030 def _followfirst(repo, subset, x):
1031 1031 # ``followfirst([pattern])``
1032 1032 # Like ``follow([pattern])`` but follows only the first parent of
1033 1033 # every revisions or files revisions.
1034 1034 return _follow(repo, subset, x, '_followfirst', followfirst=True)
1035 1035
1036 1036 @predicate('all()', safe=True)
1037 1037 def getall(repo, subset, x):
1038 1038 """All changesets, the same as ``0:tip``.
1039 1039 """
1040 1040 # i18n: "all" is a keyword
1041 1041 getargs(x, 0, 0, _("all takes no arguments"))
1042 1042 return subset & spanset(repo) # drop "null" if any
1043 1043
1044 1044 @predicate('grep(regex)')
1045 1045 def grep(repo, subset, x):
1046 1046 """Like ``keyword(string)`` but accepts a regex. Use ``grep(r'...')``
1047 1047 to ensure special escape characters are handled correctly. Unlike
1048 1048 ``keyword(string)``, the match is case-sensitive.
1049 1049 """
1050 1050 try:
1051 1051 # i18n: "grep" is a keyword
1052 1052 gr = re.compile(getstring(x, _("grep requires a string")))
1053 1053 except re.error as e:
1054 1054 raise error.ParseError(_('invalid match pattern: %s') % e)
1055 1055
1056 1056 def matches(x):
1057 1057 c = repo[x]
1058 1058 for e in c.files() + [c.user(), c.description()]:
1059 1059 if gr.search(e):
1060 1060 return True
1061 1061 return False
1062 1062
1063 1063 return subset.filter(matches, condrepr=('<grep %r>', gr.pattern))
1064 1064
1065 1065 @predicate('_matchfiles', safe=True)
1066 1066 def _matchfiles(repo, subset, x):
1067 1067 # _matchfiles takes a revset list of prefixed arguments:
1068 1068 #
1069 1069 # [p:foo, i:bar, x:baz]
1070 1070 #
1071 1071 # builds a match object from them and filters subset. Allowed
1072 1072 # prefixes are 'p:' for regular patterns, 'i:' for include
1073 1073 # patterns and 'x:' for exclude patterns. Use 'r:' prefix to pass
1074 1074 # a revision identifier, or the empty string to reference the
1075 1075 # working directory, from which the match object is
1076 1076 # initialized. Use 'd:' to set the default matching mode, default
1077 1077 # to 'glob'. At most one 'r:' and 'd:' argument can be passed.
1078 1078
1079 1079 l = getargs(x, 1, -1, "_matchfiles requires at least one argument")
1080 1080 pats, inc, exc = [], [], []
1081 1081 rev, default = None, None
1082 1082 for arg in l:
1083 1083 s = getstring(arg, "_matchfiles requires string arguments")
1084 1084 prefix, value = s[:2], s[2:]
1085 1085 if prefix == 'p:':
1086 1086 pats.append(value)
1087 1087 elif prefix == 'i:':
1088 1088 inc.append(value)
1089 1089 elif prefix == 'x:':
1090 1090 exc.append(value)
1091 1091 elif prefix == 'r:':
1092 1092 if rev is not None:
1093 1093 raise error.ParseError('_matchfiles expected at most one '
1094 1094 'revision')
1095 1095 if value != '': # empty means working directory; leave rev as None
1096 1096 rev = value
1097 1097 elif prefix == 'd:':
1098 1098 if default is not None:
1099 1099 raise error.ParseError('_matchfiles expected at most one '
1100 1100 'default mode')
1101 1101 default = value
1102 1102 else:
1103 1103 raise error.ParseError('invalid _matchfiles prefix: %s' % prefix)
1104 1104 if not default:
1105 1105 default = 'glob'
1106 1106
1107 1107 m = matchmod.match(repo.root, repo.getcwd(), pats, include=inc,
1108 1108 exclude=exc, ctx=repo[rev], default=default)
1109 1109
1110 1110 # This directly read the changelog data as creating changectx for all
1111 1111 # revisions is quite expensive.
1112 1112 getfiles = repo.changelog.readfiles
1113 1113 wdirrev = node.wdirrev
1114 1114 def matches(x):
1115 1115 if x == wdirrev:
1116 1116 files = repo[x].files()
1117 1117 else:
1118 1118 files = getfiles(x)
1119 1119 for f in files:
1120 1120 if m(f):
1121 1121 return True
1122 1122 return False
1123 1123
1124 1124 return subset.filter(matches,
1125 1125 condrepr=('<matchfiles patterns=%r, include=%r '
1126 1126 'exclude=%r, default=%r, rev=%r>',
1127 1127 pats, inc, exc, default, rev))
1128 1128
1129 1129 @predicate('file(pattern)', safe=True)
1130 1130 def hasfile(repo, subset, x):
1131 1131 """Changesets affecting files matched by pattern.
1132 1132
1133 1133 For a faster but less accurate result, consider using ``filelog()``
1134 1134 instead.
1135 1135
1136 1136 This predicate uses ``glob:`` as the default kind of pattern.
1137 1137 """
1138 1138 # i18n: "file" is a keyword
1139 1139 pat = getstring(x, _("file requires a pattern"))
1140 1140 return _matchfiles(repo, subset, ('string', 'p:' + pat))
1141 1141
1142 1142 @predicate('head()', safe=True)
1143 1143 def head(repo, subset, x):
1144 1144 """Changeset is a named branch head.
1145 1145 """
1146 1146 # i18n: "head" is a keyword
1147 1147 getargs(x, 0, 0, _("head takes no arguments"))
1148 1148 hs = set()
1149 1149 cl = repo.changelog
1150 1150 for b, ls in repo.branchmap().iteritems():
1151 1151 hs.update(cl.rev(h) for h in ls)
1152 1152 # XXX using a set to feed the baseset is wrong. Sets are not ordered.
1153 1153 # This does not break because of other fullreposet misbehavior.
1154 1154 # XXX We should combine with subset first: 'subset & baseset(...)'. This is
1155 1155 # necessary to ensure we preserve the order in subset.
1156 1156 return baseset(hs) & subset
1157 1157
1158 1158 @predicate('heads(set)', safe=True)
1159 1159 def heads(repo, subset, x):
1160 1160 """Members of set with no children in set.
1161 1161 """
1162 1162 s = getset(repo, subset, x)
1163 1163 ps = parents(repo, subset, x)
1164 1164 return s - ps
1165 1165
1166 1166 @predicate('hidden()', safe=True)
1167 1167 def hidden(repo, subset, x):
1168 1168 """Hidden changesets.
1169 1169 """
1170 1170 # i18n: "hidden" is a keyword
1171 1171 getargs(x, 0, 0, _("hidden takes no arguments"))
1172 1172 hiddenrevs = repoview.filterrevs(repo, 'visible')
1173 1173 return subset & hiddenrevs
1174 1174
1175 1175 @predicate('keyword(string)', safe=True)
1176 1176 def keyword(repo, subset, x):
1177 1177 """Search commit message, user name, and names of changed files for
1178 1178 string. The match is case-insensitive.
1179 1179 """
1180 1180 # i18n: "keyword" is a keyword
1181 1181 kw = encoding.lower(getstring(x, _("keyword requires a string")))
1182 1182
1183 1183 def matches(r):
1184 1184 c = repo[r]
1185 1185 return any(kw in encoding.lower(t)
1186 1186 for t in c.files() + [c.user(), c.description()])
1187 1187
1188 1188 return subset.filter(matches, condrepr=('<keyword %r>', kw))
1189 1189
1190 1190 @predicate('limit(set[, n[, offset]])', safe=True)
1191 1191 def limit(repo, subset, x):
1192 1192 """First n members of set, defaulting to 1, starting from offset.
1193 1193 """
1194 1194 args = getargsdict(x, 'limit', 'set n offset')
1195 1195 if 'set' not in args:
1196 1196 # i18n: "limit" is a keyword
1197 1197 raise error.ParseError(_("limit requires one to three arguments"))
1198 1198 try:
1199 1199 lim, ofs = 1, 0
1200 1200 if 'n' in args:
1201 1201 # i18n: "limit" is a keyword
1202 1202 lim = int(getstring(args['n'], _("limit requires a number")))
1203 1203 if 'offset' in args:
1204 1204 # i18n: "limit" is a keyword
1205 1205 ofs = int(getstring(args['offset'], _("limit requires a number")))
1206 1206 if ofs < 0:
1207 1207 raise error.ParseError(_("negative offset"))
1208 1208 except (TypeError, ValueError):
1209 1209 # i18n: "limit" is a keyword
1210 1210 raise error.ParseError(_("limit expects a number"))
1211 1211 os = getset(repo, fullreposet(repo), args['set'])
1212 1212 result = []
1213 1213 it = iter(os)
1214 1214 for x in xrange(ofs):
1215 1215 y = next(it, None)
1216 1216 if y is None:
1217 1217 break
1218 1218 for x in xrange(lim):
1219 1219 y = next(it, None)
1220 1220 if y is None:
1221 1221 break
1222 1222 elif y in subset:
1223 1223 result.append(y)
1224 1224 return baseset(result, datarepr=('<limit n=%d, offset=%d, %r, %r>',
1225 1225 lim, ofs, subset, os))
1226 1226
1227 1227 @predicate('last(set, [n])', safe=True)
1228 1228 def last(repo, subset, x):
1229 1229 """Last n members of set, defaulting to 1.
1230 1230 """
1231 1231 # i18n: "last" is a keyword
1232 1232 l = getargs(x, 1, 2, _("last requires one or two arguments"))
1233 1233 try:
1234 1234 lim = 1
1235 1235 if len(l) == 2:
1236 1236 # i18n: "last" is a keyword
1237 1237 lim = int(getstring(l[1], _("last requires a number")))
1238 1238 except (TypeError, ValueError):
1239 1239 # i18n: "last" is a keyword
1240 1240 raise error.ParseError(_("last expects a number"))
1241 1241 os = getset(repo, fullreposet(repo), l[0])
1242 1242 os.reverse()
1243 1243 result = []
1244 1244 it = iter(os)
1245 1245 for x in xrange(lim):
1246 1246 y = next(it, None)
1247 1247 if y is None:
1248 1248 break
1249 1249 elif y in subset:
1250 1250 result.append(y)
1251 1251 return baseset(result, datarepr=('<last n=%d, %r, %r>', lim, subset, os))
1252 1252
1253 1253 @predicate('max(set)', safe=True)
1254 1254 def maxrev(repo, subset, x):
1255 1255 """Changeset with highest revision number in set.
1256 1256 """
1257 1257 os = getset(repo, fullreposet(repo), x)
1258 1258 try:
1259 1259 m = os.max()
1260 1260 if m in subset:
1261 1261 return baseset([m], datarepr=('<max %r, %r>', subset, os))
1262 1262 except ValueError:
1263 1263 # os.max() throws a ValueError when the collection is empty.
1264 1264 # Same as python's max().
1265 1265 pass
1266 1266 return baseset(datarepr=('<max %r, %r>', subset, os))
1267 1267
1268 1268 @predicate('merge()', safe=True)
1269 1269 def merge(repo, subset, x):
1270 1270 """Changeset is a merge changeset.
1271 1271 """
1272 1272 # i18n: "merge" is a keyword
1273 1273 getargs(x, 0, 0, _("merge takes no arguments"))
1274 1274 cl = repo.changelog
1275 1275 return subset.filter(lambda r: cl.parentrevs(r)[1] != -1,
1276 1276 condrepr='<merge>')
1277 1277
1278 1278 @predicate('branchpoint()', safe=True)
1279 1279 def branchpoint(repo, subset, x):
1280 1280 """Changesets with more than one child.
1281 1281 """
1282 1282 # i18n: "branchpoint" is a keyword
1283 1283 getargs(x, 0, 0, _("branchpoint takes no arguments"))
1284 1284 cl = repo.changelog
1285 1285 if not subset:
1286 1286 return baseset()
1287 1287 # XXX this should be 'parentset.min()' assuming 'parentset' is a smartset
1288 1288 # (and if it is not, it should.)
1289 1289 baserev = min(subset)
1290 1290 parentscount = [0]*(len(repo) - baserev)
1291 1291 for r in cl.revs(start=baserev + 1):
1292 1292 for p in cl.parentrevs(r):
1293 1293 if p >= baserev:
1294 1294 parentscount[p - baserev] += 1
1295 1295 return subset.filter(lambda r: parentscount[r - baserev] > 1,
1296 1296 condrepr='<branchpoint>')
1297 1297
1298 1298 @predicate('min(set)', safe=True)
1299 1299 def minrev(repo, subset, x):
1300 1300 """Changeset with lowest revision number in set.
1301 1301 """
1302 1302 os = getset(repo, fullreposet(repo), x)
1303 1303 try:
1304 1304 m = os.min()
1305 1305 if m in subset:
1306 1306 return baseset([m], datarepr=('<min %r, %r>', subset, os))
1307 1307 except ValueError:
1308 1308 # os.min() throws a ValueError when the collection is empty.
1309 1309 # Same as python's min().
1310 1310 pass
1311 1311 return baseset(datarepr=('<min %r, %r>', subset, os))
1312 1312
1313 1313 @predicate('modifies(pattern)', safe=True)
1314 1314 def modifies(repo, subset, x):
1315 1315 """Changesets modifying files matched by pattern.
1316 1316
1317 1317 The pattern without explicit kind like ``glob:`` is expected to be
1318 1318 relative to the current directory and match against a file or a
1319 1319 directory.
1320 1320 """
1321 1321 # i18n: "modifies" is a keyword
1322 1322 pat = getstring(x, _("modifies requires a pattern"))
1323 1323 return checkstatus(repo, subset, pat, 0)
1324 1324
1325 1325 @predicate('named(namespace)')
1326 1326 def named(repo, subset, x):
1327 1327 """The changesets in a given namespace.
1328 1328
1329 1329 If `namespace` starts with `re:`, the remainder of the string is treated as
1330 1330 a regular expression. To match a namespace that actually starts with `re:`,
1331 1331 use the prefix `literal:`.
1332 1332 """
1333 1333 # i18n: "named" is a keyword
1334 1334 args = getargs(x, 1, 1, _('named requires a namespace argument'))
1335 1335
1336 1336 ns = getstring(args[0],
1337 1337 # i18n: "named" is a keyword
1338 1338 _('the argument to named must be a string'))
1339 1339 kind, pattern, matcher = util.stringmatcher(ns)
1340 1340 namespaces = set()
1341 1341 if kind == 'literal':
1342 1342 if pattern not in repo.names:
1343 1343 raise error.RepoLookupError(_("namespace '%s' does not exist")
1344 1344 % ns)
1345 1345 namespaces.add(repo.names[pattern])
1346 1346 else:
1347 1347 for name, ns in repo.names.iteritems():
1348 1348 if matcher(name):
1349 1349 namespaces.add(ns)
1350 1350 if not namespaces:
1351 1351 raise error.RepoLookupError(_("no namespace exists"
1352 1352 " that match '%s'") % pattern)
1353 1353
1354 1354 names = set()
1355 1355 for ns in namespaces:
1356 1356 for name in ns.listnames(repo):
1357 1357 if name not in ns.deprecated:
1358 1358 names.update(repo[n].rev() for n in ns.nodes(repo, name))
1359 1359
1360 1360 names -= set([node.nullrev])
1361 1361 return subset & names
1362 1362
1363 1363 @predicate('id(string)', safe=True)
1364 1364 def node_(repo, subset, x):
1365 1365 """Revision non-ambiguously specified by the given hex string prefix.
1366 1366 """
1367 1367 # i18n: "id" is a keyword
1368 1368 l = getargs(x, 1, 1, _("id requires one argument"))
1369 1369 # i18n: "id" is a keyword
1370 1370 n = getstring(l[0], _("id requires a string"))
1371 1371 if len(n) == 40:
1372 1372 try:
1373 1373 rn = repo.changelog.rev(node.bin(n))
1374 1374 except (LookupError, TypeError):
1375 1375 rn = None
1376 1376 else:
1377 1377 rn = None
1378 1378 pm = repo.changelog._partialmatch(n)
1379 1379 if pm is not None:
1380 1380 rn = repo.changelog.rev(pm)
1381 1381
1382 1382 if rn is None:
1383 1383 return baseset()
1384 1384 result = baseset([rn])
1385 1385 return result & subset
1386 1386
1387 1387 @predicate('obsolete()', safe=True)
1388 1388 def obsolete(repo, subset, x):
1389 1389 """Mutable changeset with a newer version."""
1390 1390 # i18n: "obsolete" is a keyword
1391 1391 getargs(x, 0, 0, _("obsolete takes no arguments"))
1392 1392 obsoletes = obsmod.getrevs(repo, 'obsolete')
1393 1393 return subset & obsoletes
1394 1394
1395 1395 @predicate('only(set, [set])', safe=True)
1396 1396 def only(repo, subset, x):
1397 1397 """Changesets that are ancestors of the first set that are not ancestors
1398 1398 of any other head in the repo. If a second set is specified, the result
1399 1399 is ancestors of the first set that are not ancestors of the second set
1400 1400 (i.e. ::<set1> - ::<set2>).
1401 1401 """
1402 1402 cl = repo.changelog
1403 1403 # i18n: "only" is a keyword
1404 1404 args = getargs(x, 1, 2, _('only takes one or two arguments'))
1405 1405 include = getset(repo, fullreposet(repo), args[0])
1406 1406 if len(args) == 1:
1407 1407 if not include:
1408 1408 return baseset()
1409 1409
1410 1410 descendants = set(_revdescendants(repo, include, False))
1411 1411 exclude = [rev for rev in cl.headrevs()
1412 1412 if not rev in descendants and not rev in include]
1413 1413 else:
1414 1414 exclude = getset(repo, fullreposet(repo), args[1])
1415 1415
1416 1416 results = set(cl.findmissingrevs(common=exclude, heads=include))
1417 1417 # XXX we should turn this into a baseset instead of a set, smartset may do
1418 1418 # some optimisations from the fact this is a baseset.
1419 1419 return subset & results
1420 1420
1421 1421 @predicate('origin([set])', safe=True)
1422 1422 def origin(repo, subset, x):
1423 1423 """
1424 1424 Changesets that were specified as a source for the grafts, transplants or
1425 1425 rebases that created the given revisions. Omitting the optional set is the
1426 1426 same as passing all(). If a changeset created by these operations is itself
1427 1427 specified as a source for one of these operations, only the source changeset
1428 1428 for the first operation is selected.
1429 1429 """
1430 1430 if x is not None:
1431 1431 dests = getset(repo, fullreposet(repo), x)
1432 1432 else:
1433 1433 dests = fullreposet(repo)
1434 1434
1435 1435 def _firstsrc(rev):
1436 1436 src = _getrevsource(repo, rev)
1437 1437 if src is None:
1438 1438 return None
1439 1439
1440 1440 while True:
1441 1441 prev = _getrevsource(repo, src)
1442 1442
1443 1443 if prev is None:
1444 1444 return src
1445 1445 src = prev
1446 1446
1447 1447 o = set([_firstsrc(r) for r in dests])
1448 1448 o -= set([None])
1449 1449 # XXX we should turn this into a baseset instead of a set, smartset may do
1450 1450 # some optimisations from the fact this is a baseset.
1451 1451 return subset & o
1452 1452
1453 1453 @predicate('outgoing([path])', safe=True)
1454 1454 def outgoing(repo, subset, x):
1455 1455 """Changesets not found in the specified destination repository, or the
1456 1456 default push location.
1457 1457 """
1458 1458 # Avoid cycles.
1459 1459 from . import (
1460 1460 discovery,
1461 1461 hg,
1462 1462 )
1463 1463 # i18n: "outgoing" is a keyword
1464 1464 l = getargs(x, 0, 1, _("outgoing takes one or no arguments"))
1465 1465 # i18n: "outgoing" is a keyword
1466 1466 dest = l and getstring(l[0], _("outgoing requires a repository path")) or ''
1467 1467 dest = repo.ui.expandpath(dest or 'default-push', dest or 'default')
1468 1468 dest, branches = hg.parseurl(dest)
1469 1469 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1470 1470 if revs:
1471 1471 revs = [repo.lookup(rev) for rev in revs]
1472 1472 other = hg.peer(repo, {}, dest)
1473 1473 repo.ui.pushbuffer()
1474 1474 outgoing = discovery.findcommonoutgoing(repo, other, onlyheads=revs)
1475 1475 repo.ui.popbuffer()
1476 1476 cl = repo.changelog
1477 1477 o = set([cl.rev(r) for r in outgoing.missing])
1478 1478 return subset & o
1479 1479
1480 1480 @predicate('p1([set])', safe=True)
1481 1481 def p1(repo, subset, x):
1482 1482 """First parent of changesets in set, or the working directory.
1483 1483 """
1484 1484 if x is None:
1485 1485 p = repo[x].p1().rev()
1486 1486 if p >= 0:
1487 1487 return subset & baseset([p])
1488 1488 return baseset()
1489 1489
1490 1490 ps = set()
1491 1491 cl = repo.changelog
1492 1492 for r in getset(repo, fullreposet(repo), x):
1493 1493 ps.add(cl.parentrevs(r)[0])
1494 1494 ps -= set([node.nullrev])
1495 1495 # XXX we should turn this into a baseset instead of a set, smartset may do
1496 1496 # some optimisations from the fact this is a baseset.
1497 1497 return subset & ps
1498 1498
1499 1499 @predicate('p2([set])', safe=True)
1500 1500 def p2(repo, subset, x):
1501 1501 """Second parent of changesets in set, or the working directory.
1502 1502 """
1503 1503 if x is None:
1504 1504 ps = repo[x].parents()
1505 1505 try:
1506 1506 p = ps[1].rev()
1507 1507 if p >= 0:
1508 1508 return subset & baseset([p])
1509 1509 return baseset()
1510 1510 except IndexError:
1511 1511 return baseset()
1512 1512
1513 1513 ps = set()
1514 1514 cl = repo.changelog
1515 1515 for r in getset(repo, fullreposet(repo), x):
1516 1516 ps.add(cl.parentrevs(r)[1])
1517 1517 ps -= set([node.nullrev])
1518 1518 # XXX we should turn this into a baseset instead of a set, smartset may do
1519 1519 # some optimisations from the fact this is a baseset.
1520 1520 return subset & ps
1521 1521
1522 1522 @predicate('parents([set])', safe=True)
1523 1523 def parents(repo, subset, x):
1524 1524 """
1525 1525 The set of all parents for all changesets in set, or the working directory.
1526 1526 """
1527 1527 if x is None:
1528 1528 ps = set(p.rev() for p in repo[x].parents())
1529 1529 else:
1530 1530 ps = set()
1531 1531 cl = repo.changelog
1532 1532 up = ps.update
1533 1533 parentrevs = cl.parentrevs
1534 1534 for r in getset(repo, fullreposet(repo), x):
1535 1535 if r == node.wdirrev:
1536 1536 up(p.rev() for p in repo[r].parents())
1537 1537 else:
1538 1538 up(parentrevs(r))
1539 1539 ps -= set([node.nullrev])
1540 1540 return subset & ps
1541 1541
1542 1542 def _phase(repo, subset, target):
1543 1543 """helper to select all rev in phase <target>"""
1544 1544 repo._phasecache.loadphaserevs(repo) # ensure phase's sets are loaded
1545 1545 if repo._phasecache._phasesets:
1546 1546 s = repo._phasecache._phasesets[target] - repo.changelog.filteredrevs
1547 1547 s = baseset(s)
1548 1548 s.sort() # set are non ordered, so we enforce ascending
1549 1549 return subset & s
1550 1550 else:
1551 1551 phase = repo._phasecache.phase
1552 1552 condition = lambda r: phase(repo, r) == target
1553 1553 return subset.filter(condition, condrepr=('<phase %r>', target),
1554 1554 cache=False)
1555 1555
1556 1556 @predicate('draft()', safe=True)
1557 1557 def draft(repo, subset, x):
1558 1558 """Changeset in draft phase."""
1559 1559 # i18n: "draft" is a keyword
1560 1560 getargs(x, 0, 0, _("draft takes no arguments"))
1561 1561 target = phases.draft
1562 1562 return _phase(repo, subset, target)
1563 1563
1564 1564 @predicate('secret()', safe=True)
1565 1565 def secret(repo, subset, x):
1566 1566 """Changeset in secret phase."""
1567 1567 # i18n: "secret" is a keyword
1568 1568 getargs(x, 0, 0, _("secret takes no arguments"))
1569 1569 target = phases.secret
1570 1570 return _phase(repo, subset, target)
1571 1571
1572 1572 def parentspec(repo, subset, x, n):
1573 1573 """``set^0``
1574 1574 The set.
1575 1575 ``set^1`` (or ``set^``), ``set^2``
1576 1576 First or second parent, respectively, of all changesets in set.
1577 1577 """
1578 1578 try:
1579 1579 n = int(n[1])
1580 1580 if n not in (0, 1, 2):
1581 1581 raise ValueError
1582 1582 except (TypeError, ValueError):
1583 1583 raise error.ParseError(_("^ expects a number 0, 1, or 2"))
1584 1584 ps = set()
1585 1585 cl = repo.changelog
1586 1586 for r in getset(repo, fullreposet(repo), x):
1587 1587 if n == 0:
1588 1588 ps.add(r)
1589 1589 elif n == 1:
1590 1590 ps.add(cl.parentrevs(r)[0])
1591 1591 elif n == 2:
1592 1592 parents = cl.parentrevs(r)
1593 1593 if len(parents) > 1:
1594 1594 ps.add(parents[1])
1595 1595 return subset & ps
1596 1596
1597 1597 @predicate('present(set)', safe=True)
1598 1598 def present(repo, subset, x):
1599 1599 """An empty set, if any revision in set isn't found; otherwise,
1600 1600 all revisions in set.
1601 1601
1602 1602 If any of specified revisions is not present in the local repository,
1603 1603 the query is normally aborted. But this predicate allows the query
1604 1604 to continue even in such cases.
1605 1605 """
1606 1606 try:
1607 1607 return getset(repo, subset, x)
1608 1608 except error.RepoLookupError:
1609 1609 return baseset()
1610 1610
1611 1611 # for internal use
1612 1612 @predicate('_notpublic', safe=True)
1613 1613 def _notpublic(repo, subset, x):
1614 1614 getargs(x, 0, 0, "_notpublic takes no arguments")
1615 1615 repo._phasecache.loadphaserevs(repo) # ensure phase's sets are loaded
1616 1616 if repo._phasecache._phasesets:
1617 1617 s = set()
1618 1618 for u in repo._phasecache._phasesets[1:]:
1619 1619 s.update(u)
1620 1620 s = baseset(s - repo.changelog.filteredrevs)
1621 1621 s.sort()
1622 1622 return subset & s
1623 1623 else:
1624 1624 phase = repo._phasecache.phase
1625 1625 target = phases.public
1626 1626 condition = lambda r: phase(repo, r) != target
1627 1627 return subset.filter(condition, condrepr=('<phase %r>', target),
1628 1628 cache=False)
1629 1629
1630 1630 @predicate('public()', safe=True)
1631 1631 def public(repo, subset, x):
1632 1632 """Changeset in public phase."""
1633 1633 # i18n: "public" is a keyword
1634 1634 getargs(x, 0, 0, _("public takes no arguments"))
1635 1635 phase = repo._phasecache.phase
1636 1636 target = phases.public
1637 1637 condition = lambda r: phase(repo, r) == target
1638 1638 return subset.filter(condition, condrepr=('<phase %r>', target),
1639 1639 cache=False)
1640 1640
1641 1641 @predicate('remote([id [,path]])', safe=True)
1642 1642 def remote(repo, subset, x):
1643 1643 """Local revision that corresponds to the given identifier in a
1644 1644 remote repository, if present. Here, the '.' identifier is a
1645 1645 synonym for the current local branch.
1646 1646 """
1647 1647
1648 1648 from . import hg # avoid start-up nasties
1649 1649 # i18n: "remote" is a keyword
1650 1650 l = getargs(x, 0, 2, _("remote takes zero, one, or two arguments"))
1651 1651
1652 1652 q = '.'
1653 1653 if len(l) > 0:
1654 1654 # i18n: "remote" is a keyword
1655 1655 q = getstring(l[0], _("remote requires a string id"))
1656 1656 if q == '.':
1657 1657 q = repo['.'].branch()
1658 1658
1659 1659 dest = ''
1660 1660 if len(l) > 1:
1661 1661 # i18n: "remote" is a keyword
1662 1662 dest = getstring(l[1], _("remote requires a repository path"))
1663 1663 dest = repo.ui.expandpath(dest or 'default')
1664 1664 dest, branches = hg.parseurl(dest)
1665 1665 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1666 1666 if revs:
1667 1667 revs = [repo.lookup(rev) for rev in revs]
1668 1668 other = hg.peer(repo, {}, dest)
1669 1669 n = other.lookup(q)
1670 1670 if n in repo:
1671 1671 r = repo[n].rev()
1672 1672 if r in subset:
1673 1673 return baseset([r])
1674 1674 return baseset()
1675 1675
1676 1676 @predicate('removes(pattern)', safe=True)
1677 1677 def removes(repo, subset, x):
1678 1678 """Changesets which remove files matching pattern.
1679 1679
1680 1680 The pattern without explicit kind like ``glob:`` is expected to be
1681 1681 relative to the current directory and match against a file or a
1682 1682 directory.
1683 1683 """
1684 1684 # i18n: "removes" is a keyword
1685 1685 pat = getstring(x, _("removes requires a pattern"))
1686 1686 return checkstatus(repo, subset, pat, 2)
1687 1687
1688 1688 @predicate('rev(number)', safe=True)
1689 1689 def rev(repo, subset, x):
1690 1690 """Revision with the given numeric identifier.
1691 1691 """
1692 1692 # i18n: "rev" is a keyword
1693 1693 l = getargs(x, 1, 1, _("rev requires one argument"))
1694 1694 try:
1695 1695 # i18n: "rev" is a keyword
1696 1696 l = int(getstring(l[0], _("rev requires a number")))
1697 1697 except (TypeError, ValueError):
1698 1698 # i18n: "rev" is a keyword
1699 1699 raise error.ParseError(_("rev expects a number"))
1700 1700 if l not in repo.changelog and l != node.nullrev:
1701 1701 return baseset()
1702 1702 return subset & baseset([l])
1703 1703
1704 1704 @predicate('matching(revision [, field])', safe=True)
1705 1705 def matching(repo, subset, x):
1706 1706 """Changesets in which a given set of fields match the set of fields in the
1707 1707 selected revision or set.
1708 1708
1709 1709 To match more than one field pass the list of fields to match separated
1710 1710 by spaces (e.g. ``author description``).
1711 1711
1712 1712 Valid fields are most regular revision fields and some special fields.
1713 1713
1714 1714 Regular revision fields are ``description``, ``author``, ``branch``,
1715 1715 ``date``, ``files``, ``phase``, ``parents``, ``substate``, ``user``
1716 1716 and ``diff``.
1717 1717 Note that ``author`` and ``user`` are synonyms. ``diff`` refers to the
1718 1718 contents of the revision. Two revisions matching their ``diff`` will
1719 1719 also match their ``files``.
1720 1720
1721 1721 Special fields are ``summary`` and ``metadata``:
1722 1722 ``summary`` matches the first line of the description.
1723 1723 ``metadata`` is equivalent to matching ``description user date``
1724 1724 (i.e. it matches the main metadata fields).
1725 1725
1726 1726 ``metadata`` is the default field which is used when no fields are
1727 1727 specified. You can match more than one field at a time.
1728 1728 """
1729 1729 # i18n: "matching" is a keyword
1730 1730 l = getargs(x, 1, 2, _("matching takes 1 or 2 arguments"))
1731 1731
1732 1732 revs = getset(repo, fullreposet(repo), l[0])
1733 1733
1734 1734 fieldlist = ['metadata']
1735 1735 if len(l) > 1:
1736 1736 fieldlist = getstring(l[1],
1737 1737 # i18n: "matching" is a keyword
1738 1738 _("matching requires a string "
1739 1739 "as its second argument")).split()
1740 1740
1741 1741 # Make sure that there are no repeated fields,
1742 1742 # expand the 'special' 'metadata' field type
1743 1743 # and check the 'files' whenever we check the 'diff'
1744 1744 fields = []
1745 1745 for field in fieldlist:
1746 1746 if field == 'metadata':
1747 1747 fields += ['user', 'description', 'date']
1748 1748 elif field == 'diff':
1749 1749 # a revision matching the diff must also match the files
1750 1750 # since matching the diff is very costly, make sure to
1751 1751 # also match the files first
1752 1752 fields += ['files', 'diff']
1753 1753 else:
1754 1754 if field == 'author':
1755 1755 field = 'user'
1756 1756 fields.append(field)
1757 1757 fields = set(fields)
1758 1758 if 'summary' in fields and 'description' in fields:
1759 1759 # If a revision matches its description it also matches its summary
1760 1760 fields.discard('summary')
1761 1761
1762 1762 # We may want to match more than one field
1763 1763 # Not all fields take the same amount of time to be matched
1764 1764 # Sort the selected fields in order of increasing matching cost
1765 1765 fieldorder = ['phase', 'parents', 'user', 'date', 'branch', 'summary',
1766 1766 'files', 'description', 'substate', 'diff']
1767 1767 def fieldkeyfunc(f):
1768 1768 try:
1769 1769 return fieldorder.index(f)
1770 1770 except ValueError:
1771 1771 # assume an unknown field is very costly
1772 1772 return len(fieldorder)
1773 1773 fields = list(fields)
1774 1774 fields.sort(key=fieldkeyfunc)
1775 1775
1776 1776 # Each field will be matched with its own "getfield" function
1777 1777 # which will be added to the getfieldfuncs array of functions
1778 1778 getfieldfuncs = []
1779 1779 _funcs = {
1780 1780 'user': lambda r: repo[r].user(),
1781 1781 'branch': lambda r: repo[r].branch(),
1782 1782 'date': lambda r: repo[r].date(),
1783 1783 'description': lambda r: repo[r].description(),
1784 1784 'files': lambda r: repo[r].files(),
1785 1785 'parents': lambda r: repo[r].parents(),
1786 1786 'phase': lambda r: repo[r].phase(),
1787 1787 'substate': lambda r: repo[r].substate,
1788 1788 'summary': lambda r: repo[r].description().splitlines()[0],
1789 1789 'diff': lambda r: list(repo[r].diff(git=True),)
1790 1790 }
1791 1791 for info in fields:
1792 1792 getfield = _funcs.get(info, None)
1793 1793 if getfield is None:
1794 1794 raise error.ParseError(
1795 1795 # i18n: "matching" is a keyword
1796 1796 _("unexpected field name passed to matching: %s") % info)
1797 1797 getfieldfuncs.append(getfield)
1798 1798 # convert the getfield array of functions into a "getinfo" function
1799 1799 # which returns an array of field values (or a single value if there
1800 1800 # is only one field to match)
1801 1801 getinfo = lambda r: [f(r) for f in getfieldfuncs]
1802 1802
1803 1803 def matches(x):
1804 1804 for rev in revs:
1805 1805 target = getinfo(rev)
1806 1806 match = True
1807 1807 for n, f in enumerate(getfieldfuncs):
1808 1808 if target[n] != f(x):
1809 1809 match = False
1810 1810 if match:
1811 1811 return True
1812 1812 return False
1813 1813
1814 1814 return subset.filter(matches, condrepr=('<matching%r %r>', fields, revs))
1815 1815
1816 1816 @predicate('reverse(set)', safe=True)
1817 1817 def reverse(repo, subset, x):
1818 1818 """Reverse order of set.
1819 1819 """
1820 1820 l = getset(repo, subset, x)
1821 1821 l.reverse()
1822 1822 return l
1823 1823
1824 1824 @predicate('roots(set)', safe=True)
1825 1825 def roots(repo, subset, x):
1826 1826 """Changesets in set with no parent changeset in set.
1827 1827 """
1828 1828 s = getset(repo, fullreposet(repo), x)
1829 1829 parents = repo.changelog.parentrevs
1830 1830 def filter(r):
1831 1831 for p in parents(r):
1832 1832 if 0 <= p and p in s:
1833 1833 return False
1834 1834 return True
1835 1835 return subset & s.filter(filter, condrepr='<roots>')
1836 1836
1837 1837 _sortkeyfuncs = {
1838 1838 'rev': lambda c: c.rev(),
1839 1839 'branch': lambda c: c.branch(),
1840 1840 'desc': lambda c: c.description(),
1841 1841 'user': lambda c: c.user(),
1842 1842 'author': lambda c: c.user(),
1843 1843 'date': lambda c: c.date()[0],
1844 1844 }
1845 1845
1846 1846 @predicate('sort(set[, [-]key... [, ...]])', safe=True)
1847 1847 def sort(repo, subset, x):
1848 1848 """Sort set by keys. The default sort order is ascending, specify a key
1849 1849 as ``-key`` to sort in descending order.
1850 1850
1851 1851 The keys can be:
1852 1852
1853 1853 - ``rev`` for the revision number,
1854 1854 - ``branch`` for the branch name,
1855 1855 - ``desc`` for the commit message (description),
1856 1856 - ``user`` for user name (``author`` can be used as an alias),
1857 1857 - ``date`` for the commit date
1858 1858 - ``topo`` for a reverse topographical sort
1859 1859
1860 1860 The ``topo`` sort order cannot be combined with other sort keys. This sort
1861 1861 takes one optional argument, ``topo.firstbranch``, which takes a revset that
1862 1862 specifies what topographical branches to prioritize in the sort.
1863 1863
1864 1864 """
1865 1865 args = getargsdict(x, 'sort', 'set keys topo.firstbranch')
1866 1866 if 'set' not in args:
1867 1867 # i18n: "sort" is a keyword
1868 1868 raise error.ParseError(_('sort requires one or two arguments'))
1869 1869 keys = "rev"
1870 1870 if 'keys' in args:
1871 1871 # i18n: "sort" is a keyword
1872 1872 keys = getstring(args['keys'], _("sort spec must be a string"))
1873 1873
1874 keyflags = []
1875 for k in keys.split():
1876 fk = k
1877 reverse = (k[0] == '-')
1878 if reverse:
1879 k = k[1:]
1880 if k not in _sortkeyfuncs and k != 'topo':
1881 raise error.ParseError(_("unknown sort key %r") % fk)
1882 keyflags.append((k, reverse))
1883
1874 1884 s = args['set']
1875 keys = keys.split()
1876 1885 revs = getset(repo, subset, s)
1877 1886
1878 if len(keys) > 1 and any(k.lstrip('-') == 'topo' for k in keys):
1887 if len(keyflags) > 1 and any(k == 'topo' for k, reverse in keyflags):
1879 1888 # i18n: "topo" is a keyword
1880 1889 raise error.ParseError(_(
1881 1890 'topo sort order cannot be combined with other sort keys'))
1882 1891
1883 1892 firstbranch = ()
1884 1893 if 'topo.firstbranch' in args:
1885 if any(k.lstrip('-') == 'topo' for k in keys):
1894 if any(k == 'topo' for k, reverse in keyflags):
1886 1895 firstbranch = getset(repo, subset, args['topo.firstbranch'])
1887 1896 else:
1888 1897 # i18n: "topo" and "topo.firstbranch" are keywords
1889 1898 raise error.ParseError(_(
1890 1899 'topo.firstbranch can only be used when using the topo sort '
1891 1900 'key'))
1892 1901
1893 if not keys:
1894 return revs
1895 if keys == ["rev"]:
1896 revs.sort()
1902 if not keyflags:
1897 1903 return revs
1898 elif keys == ["-rev"]:
1899 revs.sort(reverse=True)
1904 if len(keyflags) == 1 and keyflags[0][0] == "rev":
1905 revs.sort(reverse=keyflags[0][1])
1900 1906 return revs
1901 elif keys[0] in ("topo", "-topo"):
1907 elif keyflags[0][0] == "topo":
1902 1908 revs = baseset(_toposort(revs, repo.changelog.parentrevs, firstbranch),
1903 1909 istopo=True)
1904 if keys[0][0] == '-':
1910 if keyflags[0][1]:
1905 1911 revs.reverse()
1906 1912 return revs
1907 1913
1908 1914 # sort() is guaranteed to be stable
1909 1915 ctxs = [repo[r] for r in revs]
1910 for k in reversed(keys):
1911 fk = k
1912 reverse = (k[0] == '-')
1913 if reverse:
1914 k = k[1:]
1915 try:
1916 ctxs.sort(key=_sortkeyfuncs[k], reverse=reverse)
1917 except KeyError:
1918 raise error.ParseError(_("unknown sort key %r") % fk)
1916 for k, reverse in reversed(keyflags):
1917 ctxs.sort(key=_sortkeyfuncs[k], reverse=reverse)
1919 1918 return baseset([c.rev() for c in ctxs])
1920 1919
1921 1920 def _toposort(revs, parentsfunc, firstbranch=()):
1922 1921 """Yield revisions from heads to roots one (topo) branch at a time.
1923 1922
1924 1923 This function aims to be used by a graph generator that wishes to minimize
1925 1924 the number of parallel branches and their interleaving.
1926 1925
1927 1926 Example iteration order (numbers show the "true" order in a changelog):
1928 1927
1929 1928 o 4
1930 1929 |
1931 1930 o 1
1932 1931 |
1933 1932 | o 3
1934 1933 | |
1935 1934 | o 2
1936 1935 |/
1937 1936 o 0
1938 1937
1939 1938 Note that the ancestors of merges are understood by the current
1940 1939 algorithm to be on the same branch. This means no reordering will
1941 1940 occur behind a merge.
1942 1941 """
1943 1942
1944 1943 ### Quick summary of the algorithm
1945 1944 #
1946 1945 # This function is based around a "retention" principle. We keep revisions
1947 1946 # in memory until we are ready to emit a whole branch that immediately
1948 1947 # "merges" into an existing one. This reduces the number of parallel
1949 1948 # branches with interleaved revisions.
1950 1949 #
1951 1950 # During iteration revs are split into two groups:
1952 1951 # A) revision already emitted
1953 1952 # B) revision in "retention". They are stored as different subgroups.
1954 1953 #
1955 1954 # for each REV, we do the following logic:
1956 1955 #
1957 1956 # 1) if REV is a parent of (A), we will emit it. If there is a
1958 1957 # retention group ((B) above) that is blocked on REV being
1959 1958 # available, we emit all the revisions out of that retention
1960 1959 # group first.
1961 1960 #
1962 1961 # 2) else, we'll search for a subgroup in (B) awaiting for REV to be
1963 1962 # available, if such subgroup exist, we add REV to it and the subgroup is
1964 1963 # now awaiting for REV.parents() to be available.
1965 1964 #
1966 1965 # 3) finally if no such group existed in (B), we create a new subgroup.
1967 1966 #
1968 1967 #
1969 1968 # To bootstrap the algorithm, we emit the tipmost revision (which
1970 1969 # puts it in group (A) from above).
1971 1970
1972 1971 revs.sort(reverse=True)
1973 1972
1974 1973 # Set of parents of revision that have been emitted. They can be considered
1975 1974 # unblocked as the graph generator is already aware of them so there is no
1976 1975 # need to delay the revisions that reference them.
1977 1976 #
1978 1977 # If someone wants to prioritize a branch over the others, pre-filling this
1979 1978 # set will force all other branches to wait until this branch is ready to be
1980 1979 # emitted.
1981 1980 unblocked = set(firstbranch)
1982 1981
1983 1982 # list of groups waiting to be displayed, each group is defined by:
1984 1983 #
1985 1984 # (revs: lists of revs waiting to be displayed,
1986 1985 # blocked: set of that cannot be displayed before those in 'revs')
1987 1986 #
1988 1987 # The second value ('blocked') correspond to parents of any revision in the
1989 1988 # group ('revs') that is not itself contained in the group. The main idea
1990 1989 # of this algorithm is to delay as much as possible the emission of any
1991 1990 # revision. This means waiting for the moment we are about to display
1992 1991 # these parents to display the revs in a group.
1993 1992 #
1994 1993 # This first implementation is smart until it encounters a merge: it will
1995 1994 # emit revs as soon as any parent is about to be emitted and can grow an
1996 1995 # arbitrary number of revs in 'blocked'. In practice this mean we properly
1997 1996 # retains new branches but gives up on any special ordering for ancestors
1998 1997 # of merges. The implementation can be improved to handle this better.
1999 1998 #
2000 1999 # The first subgroup is special. It corresponds to all the revision that
2001 2000 # were already emitted. The 'revs' lists is expected to be empty and the
2002 2001 # 'blocked' set contains the parents revisions of already emitted revision.
2003 2002 #
2004 2003 # You could pre-seed the <parents> set of groups[0] to a specific
2005 2004 # changesets to select what the first emitted branch should be.
2006 2005 groups = [([], unblocked)]
2007 2006 pendingheap = []
2008 2007 pendingset = set()
2009 2008
2010 2009 heapq.heapify(pendingheap)
2011 2010 heappop = heapq.heappop
2012 2011 heappush = heapq.heappush
2013 2012 for currentrev in revs:
2014 2013 # Heap works with smallest element, we want highest so we invert
2015 2014 if currentrev not in pendingset:
2016 2015 heappush(pendingheap, -currentrev)
2017 2016 pendingset.add(currentrev)
2018 2017 # iterates on pending rev until after the current rev have been
2019 2018 # processed.
2020 2019 rev = None
2021 2020 while rev != currentrev:
2022 2021 rev = -heappop(pendingheap)
2023 2022 pendingset.remove(rev)
2024 2023
2025 2024 # Seek for a subgroup blocked, waiting for the current revision.
2026 2025 matching = [i for i, g in enumerate(groups) if rev in g[1]]
2027 2026
2028 2027 if matching:
2029 2028 # The main idea is to gather together all sets that are blocked
2030 2029 # on the same revision.
2031 2030 #
2032 2031 # Groups are merged when a common blocking ancestor is
2033 2032 # observed. For example, given two groups:
2034 2033 #
2035 2034 # revs [5, 4] waiting for 1
2036 2035 # revs [3, 2] waiting for 1
2037 2036 #
2038 2037 # These two groups will be merged when we process
2039 2038 # 1. In theory, we could have merged the groups when
2040 2039 # we added 2 to the group it is now in (we could have
2041 2040 # noticed the groups were both blocked on 1 then), but
2042 2041 # the way it works now makes the algorithm simpler.
2043 2042 #
2044 2043 # We also always keep the oldest subgroup first. We can
2045 2044 # probably improve the behavior by having the longest set
2046 2045 # first. That way, graph algorithms could minimise the length
2047 2046 # of parallel lines their drawing. This is currently not done.
2048 2047 targetidx = matching.pop(0)
2049 2048 trevs, tparents = groups[targetidx]
2050 2049 for i in matching:
2051 2050 gr = groups[i]
2052 2051 trevs.extend(gr[0])
2053 2052 tparents |= gr[1]
2054 2053 # delete all merged subgroups (except the one we kept)
2055 2054 # (starting from the last subgroup for performance and
2056 2055 # sanity reasons)
2057 2056 for i in reversed(matching):
2058 2057 del groups[i]
2059 2058 else:
2060 2059 # This is a new head. We create a new subgroup for it.
2061 2060 targetidx = len(groups)
2062 2061 groups.append(([], set([rev])))
2063 2062
2064 2063 gr = groups[targetidx]
2065 2064
2066 2065 # We now add the current nodes to this subgroups. This is done
2067 2066 # after the subgroup merging because all elements from a subgroup
2068 2067 # that relied on this rev must precede it.
2069 2068 #
2070 2069 # we also update the <parents> set to include the parents of the
2071 2070 # new nodes.
2072 2071 if rev == currentrev: # only display stuff in rev
2073 2072 gr[0].append(rev)
2074 2073 gr[1].remove(rev)
2075 2074 parents = [p for p in parentsfunc(rev) if p > node.nullrev]
2076 2075 gr[1].update(parents)
2077 2076 for p in parents:
2078 2077 if p not in pendingset:
2079 2078 pendingset.add(p)
2080 2079 heappush(pendingheap, -p)
2081 2080
2082 2081 # Look for a subgroup to display
2083 2082 #
2084 2083 # When unblocked is empty (if clause), we were not waiting for any
2085 2084 # revisions during the first iteration (if no priority was given) or
2086 2085 # if we emitted a whole disconnected set of the graph (reached a
2087 2086 # root). In that case we arbitrarily take the oldest known
2088 2087 # subgroup. The heuristic could probably be better.
2089 2088 #
2090 2089 # Otherwise (elif clause) if the subgroup is blocked on
2091 2090 # a revision we just emitted, we can safely emit it as
2092 2091 # well.
2093 2092 if not unblocked:
2094 2093 if len(groups) > 1: # display other subset
2095 2094 targetidx = 1
2096 2095 gr = groups[1]
2097 2096 elif not gr[1] & unblocked:
2098 2097 gr = None
2099 2098
2100 2099 if gr is not None:
2101 2100 # update the set of awaited revisions with the one from the
2102 2101 # subgroup
2103 2102 unblocked |= gr[1]
2104 2103 # output all revisions in the subgroup
2105 2104 for r in gr[0]:
2106 2105 yield r
2107 2106 # delete the subgroup that you just output
2108 2107 # unless it is groups[0] in which case you just empty it.
2109 2108 if targetidx:
2110 2109 del groups[targetidx]
2111 2110 else:
2112 2111 gr[0][:] = []
2113 2112 # Check if we have some subgroup waiting for revisions we are not going to
2114 2113 # iterate over
2115 2114 for g in groups:
2116 2115 for r in g[0]:
2117 2116 yield r
2118 2117
2119 2118 @predicate('subrepo([pattern])')
2120 2119 def subrepo(repo, subset, x):
2121 2120 """Changesets that add, modify or remove the given subrepo. If no subrepo
2122 2121 pattern is named, any subrepo changes are returned.
2123 2122 """
2124 2123 # i18n: "subrepo" is a keyword
2125 2124 args = getargs(x, 0, 1, _('subrepo takes at most one argument'))
2126 2125 pat = None
2127 2126 if len(args) != 0:
2128 2127 pat = getstring(args[0], _("subrepo requires a pattern"))
2129 2128
2130 2129 m = matchmod.exact(repo.root, repo.root, ['.hgsubstate'])
2131 2130
2132 2131 def submatches(names):
2133 2132 k, p, m = util.stringmatcher(pat)
2134 2133 for name in names:
2135 2134 if m(name):
2136 2135 yield name
2137 2136
2138 2137 def matches(x):
2139 2138 c = repo[x]
2140 2139 s = repo.status(c.p1().node(), c.node(), match=m)
2141 2140
2142 2141 if pat is None:
2143 2142 return s.added or s.modified or s.removed
2144 2143
2145 2144 if s.added:
2146 2145 return any(submatches(c.substate.keys()))
2147 2146
2148 2147 if s.modified:
2149 2148 subs = set(c.p1().substate.keys())
2150 2149 subs.update(c.substate.keys())
2151 2150
2152 2151 for path in submatches(subs):
2153 2152 if c.p1().substate.get(path) != c.substate.get(path):
2154 2153 return True
2155 2154
2156 2155 if s.removed:
2157 2156 return any(submatches(c.p1().substate.keys()))
2158 2157
2159 2158 return False
2160 2159
2161 2160 return subset.filter(matches, condrepr=('<subrepo %r>', pat))
2162 2161
2163 2162 def _substringmatcher(pattern):
2164 2163 kind, pattern, matcher = util.stringmatcher(pattern)
2165 2164 if kind == 'literal':
2166 2165 matcher = lambda s: pattern in s
2167 2166 return kind, pattern, matcher
2168 2167
2169 2168 @predicate('tag([name])', safe=True)
2170 2169 def tag(repo, subset, x):
2171 2170 """The specified tag by name, or all tagged revisions if no name is given.
2172 2171
2173 2172 If `name` starts with `re:`, the remainder of the name is treated as
2174 2173 a regular expression. To match a tag that actually starts with `re:`,
2175 2174 use the prefix `literal:`.
2176 2175 """
2177 2176 # i18n: "tag" is a keyword
2178 2177 args = getargs(x, 0, 1, _("tag takes one or no arguments"))
2179 2178 cl = repo.changelog
2180 2179 if args:
2181 2180 pattern = getstring(args[0],
2182 2181 # i18n: "tag" is a keyword
2183 2182 _('the argument to tag must be a string'))
2184 2183 kind, pattern, matcher = util.stringmatcher(pattern)
2185 2184 if kind == 'literal':
2186 2185 # avoid resolving all tags
2187 2186 tn = repo._tagscache.tags.get(pattern, None)
2188 2187 if tn is None:
2189 2188 raise error.RepoLookupError(_("tag '%s' does not exist")
2190 2189 % pattern)
2191 2190 s = set([repo[tn].rev()])
2192 2191 else:
2193 2192 s = set([cl.rev(n) for t, n in repo.tagslist() if matcher(t)])
2194 2193 else:
2195 2194 s = set([cl.rev(n) for t, n in repo.tagslist() if t != 'tip'])
2196 2195 return subset & s
2197 2196
2198 2197 @predicate('tagged', safe=True)
2199 2198 def tagged(repo, subset, x):
2200 2199 return tag(repo, subset, x)
2201 2200
2202 2201 @predicate('unstable()', safe=True)
2203 2202 def unstable(repo, subset, x):
2204 2203 """Non-obsolete changesets with obsolete ancestors.
2205 2204 """
2206 2205 # i18n: "unstable" is a keyword
2207 2206 getargs(x, 0, 0, _("unstable takes no arguments"))
2208 2207 unstables = obsmod.getrevs(repo, 'unstable')
2209 2208 return subset & unstables
2210 2209
2211 2210
2212 2211 @predicate('user(string)', safe=True)
2213 2212 def user(repo, subset, x):
2214 2213 """User name contains string. The match is case-insensitive.
2215 2214
2216 2215 If `string` starts with `re:`, the remainder of the string is treated as
2217 2216 a regular expression. To match a user that actually contains `re:`, use
2218 2217 the prefix `literal:`.
2219 2218 """
2220 2219 return author(repo, subset, x)
2221 2220
2222 2221 # experimental
2223 2222 @predicate('wdir', safe=True)
2224 2223 def wdir(repo, subset, x):
2225 2224 # i18n: "wdir" is a keyword
2226 2225 getargs(x, 0, 0, _("wdir takes no arguments"))
2227 2226 if node.wdirrev in subset or isinstance(subset, fullreposet):
2228 2227 return baseset([node.wdirrev])
2229 2228 return baseset()
2230 2229
2231 2230 # for internal use
2232 2231 @predicate('_list', safe=True)
2233 2232 def _list(repo, subset, x):
2234 2233 s = getstring(x, "internal error")
2235 2234 if not s:
2236 2235 return baseset()
2237 2236 # remove duplicates here. it's difficult for caller to deduplicate sets
2238 2237 # because different symbols can point to the same rev.
2239 2238 cl = repo.changelog
2240 2239 ls = []
2241 2240 seen = set()
2242 2241 for t in s.split('\0'):
2243 2242 try:
2244 2243 # fast path for integer revision
2245 2244 r = int(t)
2246 2245 if str(r) != t or r not in cl:
2247 2246 raise ValueError
2248 2247 revs = [r]
2249 2248 except ValueError:
2250 2249 revs = stringset(repo, subset, t)
2251 2250
2252 2251 for r in revs:
2253 2252 if r in seen:
2254 2253 continue
2255 2254 if (r in subset
2256 2255 or r == node.nullrev and isinstance(subset, fullreposet)):
2257 2256 ls.append(r)
2258 2257 seen.add(r)
2259 2258 return baseset(ls)
2260 2259
2261 2260 # for internal use
2262 2261 @predicate('_intlist', safe=True)
2263 2262 def _intlist(repo, subset, x):
2264 2263 s = getstring(x, "internal error")
2265 2264 if not s:
2266 2265 return baseset()
2267 2266 ls = [int(r) for r in s.split('\0')]
2268 2267 s = subset
2269 2268 return baseset([r for r in ls if r in s])
2270 2269
2271 2270 # for internal use
2272 2271 @predicate('_hexlist', safe=True)
2273 2272 def _hexlist(repo, subset, x):
2274 2273 s = getstring(x, "internal error")
2275 2274 if not s:
2276 2275 return baseset()
2277 2276 cl = repo.changelog
2278 2277 ls = [cl.rev(node.bin(r)) for r in s.split('\0')]
2279 2278 s = subset
2280 2279 return baseset([r for r in ls if r in s])
2281 2280
2282 2281 methods = {
2283 2282 "range": rangeset,
2284 2283 "dagrange": dagrange,
2285 2284 "string": stringset,
2286 2285 "symbol": stringset,
2287 2286 "and": andset,
2288 2287 "or": orset,
2289 2288 "not": notset,
2290 2289 "difference": differenceset,
2291 2290 "list": listset,
2292 2291 "keyvalue": keyvaluepair,
2293 2292 "func": func,
2294 2293 "ancestor": ancestorspec,
2295 2294 "parent": parentspec,
2296 2295 "parentpost": p1,
2297 2296 }
2298 2297
2299 2298 def _matchonly(revs, bases):
2300 2299 """
2301 2300 >>> f = lambda *args: _matchonly(*map(parse, args))
2302 2301 >>> f('ancestors(A)', 'not ancestors(B)')
2303 2302 ('list', ('symbol', 'A'), ('symbol', 'B'))
2304 2303 """
2305 2304 if (revs is not None
2306 2305 and revs[0] == 'func'
2307 2306 and getstring(revs[1], _('not a symbol')) == 'ancestors'
2308 2307 and bases is not None
2309 2308 and bases[0] == 'not'
2310 2309 and bases[1][0] == 'func'
2311 2310 and getstring(bases[1][1], _('not a symbol')) == 'ancestors'):
2312 2311 return ('list', revs[2], bases[1][2])
2313 2312
2314 2313 def _optimize(x, small):
2315 2314 if x is None:
2316 2315 return 0, x
2317 2316
2318 2317 smallbonus = 1
2319 2318 if small:
2320 2319 smallbonus = .5
2321 2320
2322 2321 op = x[0]
2323 2322 if op == 'minus':
2324 2323 return _optimize(('and', x[1], ('not', x[2])), small)
2325 2324 elif op == 'only':
2326 2325 t = ('func', ('symbol', 'only'), ('list', x[1], x[2]))
2327 2326 return _optimize(t, small)
2328 2327 elif op == 'onlypost':
2329 2328 return _optimize(('func', ('symbol', 'only'), x[1]), small)
2330 2329 elif op == 'dagrangepre':
2331 2330 return _optimize(('func', ('symbol', 'ancestors'), x[1]), small)
2332 2331 elif op == 'dagrangepost':
2333 2332 return _optimize(('func', ('symbol', 'descendants'), x[1]), small)
2334 2333 elif op == 'rangeall':
2335 2334 return _optimize(('range', ('string', '0'), ('string', 'tip')), small)
2336 2335 elif op == 'rangepre':
2337 2336 return _optimize(('range', ('string', '0'), x[1]), small)
2338 2337 elif op == 'rangepost':
2339 2338 return _optimize(('range', x[1], ('string', 'tip')), small)
2340 2339 elif op == 'negate':
2341 2340 s = getstring(x[1], _("can't negate that"))
2342 2341 return _optimize(('string', '-' + s), small)
2343 2342 elif op in 'string symbol negate':
2344 2343 return smallbonus, x # single revisions are small
2345 2344 elif op == 'and':
2346 2345 wa, ta = _optimize(x[1], True)
2347 2346 wb, tb = _optimize(x[2], True)
2348 2347 w = min(wa, wb)
2349 2348
2350 2349 # (::x and not ::y)/(not ::y and ::x) have a fast path
2351 2350 tm = _matchonly(ta, tb) or _matchonly(tb, ta)
2352 2351 if tm:
2353 2352 return w, ('func', ('symbol', 'only'), tm)
2354 2353
2355 2354 if tb is not None and tb[0] == 'not':
2356 2355 return wa, ('difference', ta, tb[1])
2357 2356
2358 2357 if wa > wb:
2359 2358 return w, (op, tb, ta)
2360 2359 return w, (op, ta, tb)
2361 2360 elif op == 'or':
2362 2361 # fast path for machine-generated expression, that is likely to have
2363 2362 # lots of trivial revisions: 'a + b + c()' to '_list(a b) + c()'
2364 2363 ws, ts, ss = [], [], []
2365 2364 def flushss():
2366 2365 if not ss:
2367 2366 return
2368 2367 if len(ss) == 1:
2369 2368 w, t = ss[0]
2370 2369 else:
2371 2370 s = '\0'.join(t[1] for w, t in ss)
2372 2371 y = ('func', ('symbol', '_list'), ('string', s))
2373 2372 w, t = _optimize(y, False)
2374 2373 ws.append(w)
2375 2374 ts.append(t)
2376 2375 del ss[:]
2377 2376 for y in x[1:]:
2378 2377 w, t = _optimize(y, False)
2379 2378 if t is not None and (t[0] == 'string' or t[0] == 'symbol'):
2380 2379 ss.append((w, t))
2381 2380 continue
2382 2381 flushss()
2383 2382 ws.append(w)
2384 2383 ts.append(t)
2385 2384 flushss()
2386 2385 if len(ts) == 1:
2387 2386 return ws[0], ts[0] # 'or' operation is fully optimized out
2388 2387 # we can't reorder trees by weight because it would change the order.
2389 2388 # ("sort(a + b)" == "sort(b + a)", but "a + b" != "b + a")
2390 2389 # ts = tuple(t for w, t in sorted(zip(ws, ts), key=lambda wt: wt[0]))
2391 2390 return max(ws), (op,) + tuple(ts)
2392 2391 elif op == 'not':
2393 2392 # Optimize not public() to _notpublic() because we have a fast version
2394 2393 if x[1] == ('func', ('symbol', 'public'), None):
2395 2394 newsym = ('func', ('symbol', '_notpublic'), None)
2396 2395 o = _optimize(newsym, not small)
2397 2396 return o[0], o[1]
2398 2397 else:
2399 2398 o = _optimize(x[1], not small)
2400 2399 return o[0], (op, o[1])
2401 2400 elif op == 'parentpost':
2402 2401 o = _optimize(x[1], small)
2403 2402 return o[0], (op, o[1])
2404 2403 elif op == 'group':
2405 2404 return _optimize(x[1], small)
2406 2405 elif op in 'dagrange range parent ancestorspec':
2407 2406 if op == 'parent':
2408 2407 # x^:y means (x^) : y, not x ^ (:y)
2409 2408 post = ('parentpost', x[1])
2410 2409 if x[2][0] == 'dagrangepre':
2411 2410 return _optimize(('dagrange', post, x[2][1]), small)
2412 2411 elif x[2][0] == 'rangepre':
2413 2412 return _optimize(('range', post, x[2][1]), small)
2414 2413
2415 2414 wa, ta = _optimize(x[1], small)
2416 2415 wb, tb = _optimize(x[2], small)
2417 2416 return wa + wb, (op, ta, tb)
2418 2417 elif op == 'list':
2419 2418 ws, ts = zip(*(_optimize(y, small) for y in x[1:]))
2420 2419 return sum(ws), (op,) + ts
2421 2420 elif op == 'func':
2422 2421 f = getstring(x[1], _("not a symbol"))
2423 2422 wa, ta = _optimize(x[2], small)
2424 2423 if f in ("author branch closed date desc file grep keyword "
2425 2424 "outgoing user"):
2426 2425 w = 10 # slow
2427 2426 elif f in "modifies adds removes":
2428 2427 w = 30 # slower
2429 2428 elif f == "contains":
2430 2429 w = 100 # very slow
2431 2430 elif f == "ancestor":
2432 2431 w = 1 * smallbonus
2433 2432 elif f in "reverse limit first _intlist":
2434 2433 w = 0
2435 2434 elif f in "sort":
2436 2435 w = 10 # assume most sorts look at changelog
2437 2436 else:
2438 2437 w = 1
2439 2438 return w + wa, (op, x[1], ta)
2440 2439 return 1, x
2441 2440
2442 2441 def optimize(tree):
2443 2442 _weight, newtree = _optimize(tree, small=True)
2444 2443 return newtree
2445 2444
2446 2445 # the set of valid characters for the initial letter of symbols in
2447 2446 # alias declarations and definitions
2448 2447 _aliassyminitletters = set(c for c in [chr(i) for i in xrange(256)]
2449 2448 if c.isalnum() or c in '._@$' or ord(c) > 127)
2450 2449
2451 2450 def _parsewith(spec, lookup=None, syminitletters=None):
2452 2451 """Generate a parse tree of given spec with given tokenizing options
2453 2452
2454 2453 >>> _parsewith('foo($1)', syminitletters=_aliassyminitletters)
2455 2454 ('func', ('symbol', 'foo'), ('symbol', '$1'))
2456 2455 >>> _parsewith('$1')
2457 2456 Traceback (most recent call last):
2458 2457 ...
2459 2458 ParseError: ("syntax error in revset '$1'", 0)
2460 2459 >>> _parsewith('foo bar')
2461 2460 Traceback (most recent call last):
2462 2461 ...
2463 2462 ParseError: ('invalid token', 4)
2464 2463 """
2465 2464 p = parser.parser(elements)
2466 2465 tree, pos = p.parse(tokenize(spec, lookup=lookup,
2467 2466 syminitletters=syminitletters))
2468 2467 if pos != len(spec):
2469 2468 raise error.ParseError(_('invalid token'), pos)
2470 2469 return parser.simplifyinfixops(tree, ('list', 'or'))
2471 2470
2472 2471 class _aliasrules(parser.basealiasrules):
2473 2472 """Parsing and expansion rule set of revset aliases"""
2474 2473 _section = _('revset alias')
2475 2474
2476 2475 @staticmethod
2477 2476 def _parse(spec):
2478 2477 """Parse alias declaration/definition ``spec``
2479 2478
2480 2479 This allows symbol names to use also ``$`` as an initial letter
2481 2480 (for backward compatibility), and callers of this function should
2482 2481 examine whether ``$`` is used also for unexpected symbols or not.
2483 2482 """
2484 2483 return _parsewith(spec, syminitletters=_aliassyminitletters)
2485 2484
2486 2485 @staticmethod
2487 2486 def _trygetfunc(tree):
2488 2487 if tree[0] == 'func' and tree[1][0] == 'symbol':
2489 2488 return tree[1][1], getlist(tree[2])
2490 2489
2491 2490 def expandaliases(ui, tree, showwarning=None):
2492 2491 aliases = _aliasrules.buildmap(ui.configitems('revsetalias'))
2493 2492 tree = _aliasrules.expand(aliases, tree)
2494 2493 if showwarning:
2495 2494 # warn about problematic (but not referred) aliases
2496 2495 for name, alias in sorted(aliases.iteritems()):
2497 2496 if alias.error and not alias.warned:
2498 2497 showwarning(_('warning: %s\n') % (alias.error))
2499 2498 alias.warned = True
2500 2499 return tree
2501 2500
2502 2501 def foldconcat(tree):
2503 2502 """Fold elements to be concatenated by `##`
2504 2503 """
2505 2504 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2506 2505 return tree
2507 2506 if tree[0] == '_concat':
2508 2507 pending = [tree]
2509 2508 l = []
2510 2509 while pending:
2511 2510 e = pending.pop()
2512 2511 if e[0] == '_concat':
2513 2512 pending.extend(reversed(e[1:]))
2514 2513 elif e[0] in ('string', 'symbol'):
2515 2514 l.append(e[1])
2516 2515 else:
2517 2516 msg = _("\"##\" can't concatenate \"%s\" element") % (e[0])
2518 2517 raise error.ParseError(msg)
2519 2518 return ('string', ''.join(l))
2520 2519 else:
2521 2520 return tuple(foldconcat(t) for t in tree)
2522 2521
2523 2522 def parse(spec, lookup=None):
2524 2523 return _parsewith(spec, lookup=lookup)
2525 2524
2526 2525 def posttreebuilthook(tree, repo):
2527 2526 # hook for extensions to execute code on the optimized tree
2528 2527 pass
2529 2528
2530 2529 def match(ui, spec, repo=None):
2531 2530 if not spec:
2532 2531 raise error.ParseError(_("empty query"))
2533 2532 lookup = None
2534 2533 if repo:
2535 2534 lookup = repo.__contains__
2536 2535 tree = parse(spec, lookup)
2537 2536 return _makematcher(ui, tree, repo)
2538 2537
2539 2538 def matchany(ui, specs, repo=None):
2540 2539 """Create a matcher that will include any revisions matching one of the
2541 2540 given specs"""
2542 2541 if not specs:
2543 2542 def mfunc(repo, subset=None):
2544 2543 return baseset()
2545 2544 return mfunc
2546 2545 if not all(specs):
2547 2546 raise error.ParseError(_("empty query"))
2548 2547 lookup = None
2549 2548 if repo:
2550 2549 lookup = repo.__contains__
2551 2550 if len(specs) == 1:
2552 2551 tree = parse(specs[0], lookup)
2553 2552 else:
2554 2553 tree = ('or',) + tuple(parse(s, lookup) for s in specs)
2555 2554 return _makematcher(ui, tree, repo)
2556 2555
2557 2556 def _makematcher(ui, tree, repo):
2558 2557 if ui:
2559 2558 tree = expandaliases(ui, tree, showwarning=ui.warn)
2560 2559 tree = foldconcat(tree)
2561 2560 tree = optimize(tree)
2562 2561 posttreebuilthook(tree, repo)
2563 2562 def mfunc(repo, subset=None):
2564 2563 if subset is None:
2565 2564 subset = fullreposet(repo)
2566 2565 if util.safehasattr(subset, 'isascending'):
2567 2566 result = getset(repo, subset, tree)
2568 2567 else:
2569 2568 result = getset(repo, baseset(subset), tree)
2570 2569 return result
2571 2570 return mfunc
2572 2571
2573 2572 def formatspec(expr, *args):
2574 2573 '''
2575 2574 This is a convenience function for using revsets internally, and
2576 2575 escapes arguments appropriately. Aliases are intentionally ignored
2577 2576 so that intended expression behavior isn't accidentally subverted.
2578 2577
2579 2578 Supported arguments:
2580 2579
2581 2580 %r = revset expression, parenthesized
2582 2581 %d = int(arg), no quoting
2583 2582 %s = string(arg), escaped and single-quoted
2584 2583 %b = arg.branch(), escaped and single-quoted
2585 2584 %n = hex(arg), single-quoted
2586 2585 %% = a literal '%'
2587 2586
2588 2587 Prefixing the type with 'l' specifies a parenthesized list of that type.
2589 2588
2590 2589 >>> formatspec('%r:: and %lr', '10 or 11', ("this()", "that()"))
2591 2590 '(10 or 11):: and ((this()) or (that()))'
2592 2591 >>> formatspec('%d:: and not %d::', 10, 20)
2593 2592 '10:: and not 20::'
2594 2593 >>> formatspec('%ld or %ld', [], [1])
2595 2594 "_list('') or 1"
2596 2595 >>> formatspec('keyword(%s)', 'foo\\xe9')
2597 2596 "keyword('foo\\\\xe9')"
2598 2597 >>> b = lambda: 'default'
2599 2598 >>> b.branch = b
2600 2599 >>> formatspec('branch(%b)', b)
2601 2600 "branch('default')"
2602 2601 >>> formatspec('root(%ls)', ['a', 'b', 'c', 'd'])
2603 2602 "root(_list('a\\x00b\\x00c\\x00d'))"
2604 2603 '''
2605 2604
2606 2605 def quote(s):
2607 2606 return repr(str(s))
2608 2607
2609 2608 def argtype(c, arg):
2610 2609 if c == 'd':
2611 2610 return str(int(arg))
2612 2611 elif c == 's':
2613 2612 return quote(arg)
2614 2613 elif c == 'r':
2615 2614 parse(arg) # make sure syntax errors are confined
2616 2615 return '(%s)' % arg
2617 2616 elif c == 'n':
2618 2617 return quote(node.hex(arg))
2619 2618 elif c == 'b':
2620 2619 return quote(arg.branch())
2621 2620
2622 2621 def listexp(s, t):
2623 2622 l = len(s)
2624 2623 if l == 0:
2625 2624 return "_list('')"
2626 2625 elif l == 1:
2627 2626 return argtype(t, s[0])
2628 2627 elif t == 'd':
2629 2628 return "_intlist('%s')" % "\0".join(str(int(a)) for a in s)
2630 2629 elif t == 's':
2631 2630 return "_list('%s')" % "\0".join(s)
2632 2631 elif t == 'n':
2633 2632 return "_hexlist('%s')" % "\0".join(node.hex(a) for a in s)
2634 2633 elif t == 'b':
2635 2634 return "_list('%s')" % "\0".join(a.branch() for a in s)
2636 2635
2637 2636 m = l // 2
2638 2637 return '(%s or %s)' % (listexp(s[:m], t), listexp(s[m:], t))
2639 2638
2640 2639 ret = ''
2641 2640 pos = 0
2642 2641 arg = 0
2643 2642 while pos < len(expr):
2644 2643 c = expr[pos]
2645 2644 if c == '%':
2646 2645 pos += 1
2647 2646 d = expr[pos]
2648 2647 if d == '%':
2649 2648 ret += d
2650 2649 elif d in 'dsnbr':
2651 2650 ret += argtype(d, args[arg])
2652 2651 arg += 1
2653 2652 elif d == 'l':
2654 2653 # a list of some type
2655 2654 pos += 1
2656 2655 d = expr[pos]
2657 2656 ret += listexp(list(args[arg]), d)
2658 2657 arg += 1
2659 2658 else:
2660 2659 raise error.Abort('unexpected revspec format character %s' % d)
2661 2660 else:
2662 2661 ret += c
2663 2662 pos += 1
2664 2663
2665 2664 return ret
2666 2665
2667 2666 def prettyformat(tree):
2668 2667 return parser.prettyformat(tree, ('string', 'symbol'))
2669 2668
2670 2669 def depth(tree):
2671 2670 if isinstance(tree, tuple):
2672 2671 return max(map(depth, tree)) + 1
2673 2672 else:
2674 2673 return 0
2675 2674
2676 2675 def funcsused(tree):
2677 2676 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2678 2677 return set()
2679 2678 else:
2680 2679 funcs = set()
2681 2680 for s in tree[1:]:
2682 2681 funcs |= funcsused(s)
2683 2682 if tree[0] == 'func':
2684 2683 funcs.add(tree[1][1])
2685 2684 return funcs
2686 2685
2687 2686 def _formatsetrepr(r):
2688 2687 """Format an optional printable representation of a set
2689 2688
2690 2689 ======== =================================
2691 2690 type(r) example
2692 2691 ======== =================================
2693 2692 tuple ('<not %r>', other)
2694 2693 str '<branch closed>'
2695 2694 callable lambda: '<branch %r>' % sorted(b)
2696 2695 object other
2697 2696 ======== =================================
2698 2697 """
2699 2698 if r is None:
2700 2699 return ''
2701 2700 elif isinstance(r, tuple):
2702 2701 return r[0] % r[1:]
2703 2702 elif isinstance(r, str):
2704 2703 return r
2705 2704 elif callable(r):
2706 2705 return r()
2707 2706 else:
2708 2707 return repr(r)
2709 2708
2710 2709 class abstractsmartset(object):
2711 2710
2712 2711 def __nonzero__(self):
2713 2712 """True if the smartset is not empty"""
2714 2713 raise NotImplementedError()
2715 2714
2716 2715 def __contains__(self, rev):
2717 2716 """provide fast membership testing"""
2718 2717 raise NotImplementedError()
2719 2718
2720 2719 def __iter__(self):
2721 2720 """iterate the set in the order it is supposed to be iterated"""
2722 2721 raise NotImplementedError()
2723 2722
2724 2723 # Attributes containing a function to perform a fast iteration in a given
2725 2724 # direction. A smartset can have none, one, or both defined.
2726 2725 #
2727 2726 # Default value is None instead of a function returning None to avoid
2728 2727 # initializing an iterator just for testing if a fast method exists.
2729 2728 fastasc = None
2730 2729 fastdesc = None
2731 2730
2732 2731 def isascending(self):
2733 2732 """True if the set will iterate in ascending order"""
2734 2733 raise NotImplementedError()
2735 2734
2736 2735 def isdescending(self):
2737 2736 """True if the set will iterate in descending order"""
2738 2737 raise NotImplementedError()
2739 2738
2740 2739 def istopo(self):
2741 2740 """True if the set will iterate in topographical order"""
2742 2741 raise NotImplementedError()
2743 2742
2744 2743 @util.cachefunc
2745 2744 def min(self):
2746 2745 """return the minimum element in the set"""
2747 2746 if self.fastasc is not None:
2748 2747 for r in self.fastasc():
2749 2748 return r
2750 2749 raise ValueError('arg is an empty sequence')
2751 2750 return min(self)
2752 2751
2753 2752 @util.cachefunc
2754 2753 def max(self):
2755 2754 """return the maximum element in the set"""
2756 2755 if self.fastdesc is not None:
2757 2756 for r in self.fastdesc():
2758 2757 return r
2759 2758 raise ValueError('arg is an empty sequence')
2760 2759 return max(self)
2761 2760
2762 2761 def first(self):
2763 2762 """return the first element in the set (user iteration perspective)
2764 2763
2765 2764 Return None if the set is empty"""
2766 2765 raise NotImplementedError()
2767 2766
2768 2767 def last(self):
2769 2768 """return the last element in the set (user iteration perspective)
2770 2769
2771 2770 Return None if the set is empty"""
2772 2771 raise NotImplementedError()
2773 2772
2774 2773 def __len__(self):
2775 2774 """return the length of the smartsets
2776 2775
2777 2776 This can be expensive on smartset that could be lazy otherwise."""
2778 2777 raise NotImplementedError()
2779 2778
2780 2779 def reverse(self):
2781 2780 """reverse the expected iteration order"""
2782 2781 raise NotImplementedError()
2783 2782
2784 2783 def sort(self, reverse=True):
2785 2784 """get the set to iterate in an ascending or descending order"""
2786 2785 raise NotImplementedError()
2787 2786
2788 2787 def __and__(self, other):
2789 2788 """Returns a new object with the intersection of the two collections.
2790 2789
2791 2790 This is part of the mandatory API for smartset."""
2792 2791 if isinstance(other, fullreposet):
2793 2792 return self
2794 2793 return self.filter(other.__contains__, condrepr=other, cache=False)
2795 2794
2796 2795 def __add__(self, other):
2797 2796 """Returns a new object with the union of the two collections.
2798 2797
2799 2798 This is part of the mandatory API for smartset."""
2800 2799 return addset(self, other)
2801 2800
2802 2801 def __sub__(self, other):
2803 2802 """Returns a new object with the substraction of the two collections.
2804 2803
2805 2804 This is part of the mandatory API for smartset."""
2806 2805 c = other.__contains__
2807 2806 return self.filter(lambda r: not c(r), condrepr=('<not %r>', other),
2808 2807 cache=False)
2809 2808
2810 2809 def filter(self, condition, condrepr=None, cache=True):
2811 2810 """Returns this smartset filtered by condition as a new smartset.
2812 2811
2813 2812 `condition` is a callable which takes a revision number and returns a
2814 2813 boolean. Optional `condrepr` provides a printable representation of
2815 2814 the given `condition`.
2816 2815
2817 2816 This is part of the mandatory API for smartset."""
2818 2817 # builtin cannot be cached. but do not needs to
2819 2818 if cache and util.safehasattr(condition, 'func_code'):
2820 2819 condition = util.cachefunc(condition)
2821 2820 return filteredset(self, condition, condrepr)
2822 2821
2823 2822 class baseset(abstractsmartset):
2824 2823 """Basic data structure that represents a revset and contains the basic
2825 2824 operation that it should be able to perform.
2826 2825
2827 2826 Every method in this class should be implemented by any smartset class.
2828 2827 """
2829 2828 def __init__(self, data=(), datarepr=None, istopo=False):
2830 2829 """
2831 2830 datarepr: a tuple of (format, obj, ...), a function or an object that
2832 2831 provides a printable representation of the given data.
2833 2832 """
2834 2833 self._ascending = None
2835 2834 self._istopo = istopo
2836 2835 if not isinstance(data, list):
2837 2836 if isinstance(data, set):
2838 2837 self._set = data
2839 2838 # set has no order we pick one for stability purpose
2840 2839 self._ascending = True
2841 2840 data = list(data)
2842 2841 self._list = data
2843 2842 self._datarepr = datarepr
2844 2843
2845 2844 @util.propertycache
2846 2845 def _set(self):
2847 2846 return set(self._list)
2848 2847
2849 2848 @util.propertycache
2850 2849 def _asclist(self):
2851 2850 asclist = self._list[:]
2852 2851 asclist.sort()
2853 2852 return asclist
2854 2853
2855 2854 def __iter__(self):
2856 2855 if self._ascending is None:
2857 2856 return iter(self._list)
2858 2857 elif self._ascending:
2859 2858 return iter(self._asclist)
2860 2859 else:
2861 2860 return reversed(self._asclist)
2862 2861
2863 2862 def fastasc(self):
2864 2863 return iter(self._asclist)
2865 2864
2866 2865 def fastdesc(self):
2867 2866 return reversed(self._asclist)
2868 2867
2869 2868 @util.propertycache
2870 2869 def __contains__(self):
2871 2870 return self._set.__contains__
2872 2871
2873 2872 def __nonzero__(self):
2874 2873 return bool(self._list)
2875 2874
2876 2875 def sort(self, reverse=False):
2877 2876 self._ascending = not bool(reverse)
2878 2877 self._istopo = False
2879 2878
2880 2879 def reverse(self):
2881 2880 if self._ascending is None:
2882 2881 self._list.reverse()
2883 2882 else:
2884 2883 self._ascending = not self._ascending
2885 2884 self._istopo = False
2886 2885
2887 2886 def __len__(self):
2888 2887 return len(self._list)
2889 2888
2890 2889 def isascending(self):
2891 2890 """Returns True if the collection is ascending order, False if not.
2892 2891
2893 2892 This is part of the mandatory API for smartset."""
2894 2893 if len(self) <= 1:
2895 2894 return True
2896 2895 return self._ascending is not None and self._ascending
2897 2896
2898 2897 def isdescending(self):
2899 2898 """Returns True if the collection is descending order, False if not.
2900 2899
2901 2900 This is part of the mandatory API for smartset."""
2902 2901 if len(self) <= 1:
2903 2902 return True
2904 2903 return self._ascending is not None and not self._ascending
2905 2904
2906 2905 def istopo(self):
2907 2906 """Is the collection is in topographical order or not.
2908 2907
2909 2908 This is part of the mandatory API for smartset."""
2910 2909 if len(self) <= 1:
2911 2910 return True
2912 2911 return self._istopo
2913 2912
2914 2913 def first(self):
2915 2914 if self:
2916 2915 if self._ascending is None:
2917 2916 return self._list[0]
2918 2917 elif self._ascending:
2919 2918 return self._asclist[0]
2920 2919 else:
2921 2920 return self._asclist[-1]
2922 2921 return None
2923 2922
2924 2923 def last(self):
2925 2924 if self:
2926 2925 if self._ascending is None:
2927 2926 return self._list[-1]
2928 2927 elif self._ascending:
2929 2928 return self._asclist[-1]
2930 2929 else:
2931 2930 return self._asclist[0]
2932 2931 return None
2933 2932
2934 2933 def __repr__(self):
2935 2934 d = {None: '', False: '-', True: '+'}[self._ascending]
2936 2935 s = _formatsetrepr(self._datarepr)
2937 2936 if not s:
2938 2937 l = self._list
2939 2938 # if _list has been built from a set, it might have a different
2940 2939 # order from one python implementation to another.
2941 2940 # We fallback to the sorted version for a stable output.
2942 2941 if self._ascending is not None:
2943 2942 l = self._asclist
2944 2943 s = repr(l)
2945 2944 return '<%s%s %s>' % (type(self).__name__, d, s)
2946 2945
2947 2946 class filteredset(abstractsmartset):
2948 2947 """Duck type for baseset class which iterates lazily over the revisions in
2949 2948 the subset and contains a function which tests for membership in the
2950 2949 revset
2951 2950 """
2952 2951 def __init__(self, subset, condition=lambda x: True, condrepr=None):
2953 2952 """
2954 2953 condition: a function that decide whether a revision in the subset
2955 2954 belongs to the revset or not.
2956 2955 condrepr: a tuple of (format, obj, ...), a function or an object that
2957 2956 provides a printable representation of the given condition.
2958 2957 """
2959 2958 self._subset = subset
2960 2959 self._condition = condition
2961 2960 self._condrepr = condrepr
2962 2961
2963 2962 def __contains__(self, x):
2964 2963 return x in self._subset and self._condition(x)
2965 2964
2966 2965 def __iter__(self):
2967 2966 return self._iterfilter(self._subset)
2968 2967
2969 2968 def _iterfilter(self, it):
2970 2969 cond = self._condition
2971 2970 for x in it:
2972 2971 if cond(x):
2973 2972 yield x
2974 2973
2975 2974 @property
2976 2975 def fastasc(self):
2977 2976 it = self._subset.fastasc
2978 2977 if it is None:
2979 2978 return None
2980 2979 return lambda: self._iterfilter(it())
2981 2980
2982 2981 @property
2983 2982 def fastdesc(self):
2984 2983 it = self._subset.fastdesc
2985 2984 if it is None:
2986 2985 return None
2987 2986 return lambda: self._iterfilter(it())
2988 2987
2989 2988 def __nonzero__(self):
2990 2989 fast = None
2991 2990 candidates = [self.fastasc if self.isascending() else None,
2992 2991 self.fastdesc if self.isdescending() else None,
2993 2992 self.fastasc,
2994 2993 self.fastdesc]
2995 2994 for candidate in candidates:
2996 2995 if candidate is not None:
2997 2996 fast = candidate
2998 2997 break
2999 2998
3000 2999 if fast is not None:
3001 3000 it = fast()
3002 3001 else:
3003 3002 it = self
3004 3003
3005 3004 for r in it:
3006 3005 return True
3007 3006 return False
3008 3007
3009 3008 def __len__(self):
3010 3009 # Basic implementation to be changed in future patches.
3011 3010 # until this gets improved, we use generator expression
3012 3011 # here, since list compr is free to call __len__ again
3013 3012 # causing infinite recursion
3014 3013 l = baseset(r for r in self)
3015 3014 return len(l)
3016 3015
3017 3016 def sort(self, reverse=False):
3018 3017 self._subset.sort(reverse=reverse)
3019 3018
3020 3019 def reverse(self):
3021 3020 self._subset.reverse()
3022 3021
3023 3022 def isascending(self):
3024 3023 return self._subset.isascending()
3025 3024
3026 3025 def isdescending(self):
3027 3026 return self._subset.isdescending()
3028 3027
3029 3028 def istopo(self):
3030 3029 return self._subset.istopo()
3031 3030
3032 3031 def first(self):
3033 3032 for x in self:
3034 3033 return x
3035 3034 return None
3036 3035
3037 3036 def last(self):
3038 3037 it = None
3039 3038 if self.isascending():
3040 3039 it = self.fastdesc
3041 3040 elif self.isdescending():
3042 3041 it = self.fastasc
3043 3042 if it is not None:
3044 3043 for x in it():
3045 3044 return x
3046 3045 return None #empty case
3047 3046 else:
3048 3047 x = None
3049 3048 for x in self:
3050 3049 pass
3051 3050 return x
3052 3051
3053 3052 def __repr__(self):
3054 3053 xs = [repr(self._subset)]
3055 3054 s = _formatsetrepr(self._condrepr)
3056 3055 if s:
3057 3056 xs.append(s)
3058 3057 return '<%s %s>' % (type(self).__name__, ', '.join(xs))
3059 3058
3060 3059 def _iterordered(ascending, iter1, iter2):
3061 3060 """produce an ordered iteration from two iterators with the same order
3062 3061
3063 3062 The ascending is used to indicated the iteration direction.
3064 3063 """
3065 3064 choice = max
3066 3065 if ascending:
3067 3066 choice = min
3068 3067
3069 3068 val1 = None
3070 3069 val2 = None
3071 3070 try:
3072 3071 # Consume both iterators in an ordered way until one is empty
3073 3072 while True:
3074 3073 if val1 is None:
3075 3074 val1 = next(iter1)
3076 3075 if val2 is None:
3077 3076 val2 = next(iter2)
3078 3077 n = choice(val1, val2)
3079 3078 yield n
3080 3079 if val1 == n:
3081 3080 val1 = None
3082 3081 if val2 == n:
3083 3082 val2 = None
3084 3083 except StopIteration:
3085 3084 # Flush any remaining values and consume the other one
3086 3085 it = iter2
3087 3086 if val1 is not None:
3088 3087 yield val1
3089 3088 it = iter1
3090 3089 elif val2 is not None:
3091 3090 # might have been equality and both are empty
3092 3091 yield val2
3093 3092 for val in it:
3094 3093 yield val
3095 3094
3096 3095 class addset(abstractsmartset):
3097 3096 """Represent the addition of two sets
3098 3097
3099 3098 Wrapper structure for lazily adding two structures without losing much
3100 3099 performance on the __contains__ method
3101 3100
3102 3101 If the ascending attribute is set, that means the two structures are
3103 3102 ordered in either an ascending or descending way. Therefore, we can add
3104 3103 them maintaining the order by iterating over both at the same time
3105 3104
3106 3105 >>> xs = baseset([0, 3, 2])
3107 3106 >>> ys = baseset([5, 2, 4])
3108 3107
3109 3108 >>> rs = addset(xs, ys)
3110 3109 >>> bool(rs), 0 in rs, 1 in rs, 5 in rs, rs.first(), rs.last()
3111 3110 (True, True, False, True, 0, 4)
3112 3111 >>> rs = addset(xs, baseset([]))
3113 3112 >>> bool(rs), 0 in rs, 1 in rs, rs.first(), rs.last()
3114 3113 (True, True, False, 0, 2)
3115 3114 >>> rs = addset(baseset([]), baseset([]))
3116 3115 >>> bool(rs), 0 in rs, rs.first(), rs.last()
3117 3116 (False, False, None, None)
3118 3117
3119 3118 iterate unsorted:
3120 3119 >>> rs = addset(xs, ys)
3121 3120 >>> # (use generator because pypy could call len())
3122 3121 >>> list(x for x in rs) # without _genlist
3123 3122 [0, 3, 2, 5, 4]
3124 3123 >>> assert not rs._genlist
3125 3124 >>> len(rs)
3126 3125 5
3127 3126 >>> [x for x in rs] # with _genlist
3128 3127 [0, 3, 2, 5, 4]
3129 3128 >>> assert rs._genlist
3130 3129
3131 3130 iterate ascending:
3132 3131 >>> rs = addset(xs, ys, ascending=True)
3133 3132 >>> # (use generator because pypy could call len())
3134 3133 >>> list(x for x in rs), list(x for x in rs.fastasc()) # without _asclist
3135 3134 ([0, 2, 3, 4, 5], [0, 2, 3, 4, 5])
3136 3135 >>> assert not rs._asclist
3137 3136 >>> len(rs)
3138 3137 5
3139 3138 >>> [x for x in rs], [x for x in rs.fastasc()]
3140 3139 ([0, 2, 3, 4, 5], [0, 2, 3, 4, 5])
3141 3140 >>> assert rs._asclist
3142 3141
3143 3142 iterate descending:
3144 3143 >>> rs = addset(xs, ys, ascending=False)
3145 3144 >>> # (use generator because pypy could call len())
3146 3145 >>> list(x for x in rs), list(x for x in rs.fastdesc()) # without _asclist
3147 3146 ([5, 4, 3, 2, 0], [5, 4, 3, 2, 0])
3148 3147 >>> assert not rs._asclist
3149 3148 >>> len(rs)
3150 3149 5
3151 3150 >>> [x for x in rs], [x for x in rs.fastdesc()]
3152 3151 ([5, 4, 3, 2, 0], [5, 4, 3, 2, 0])
3153 3152 >>> assert rs._asclist
3154 3153
3155 3154 iterate ascending without fastasc:
3156 3155 >>> rs = addset(xs, generatorset(ys), ascending=True)
3157 3156 >>> assert rs.fastasc is None
3158 3157 >>> [x for x in rs]
3159 3158 [0, 2, 3, 4, 5]
3160 3159
3161 3160 iterate descending without fastdesc:
3162 3161 >>> rs = addset(generatorset(xs), ys, ascending=False)
3163 3162 >>> assert rs.fastdesc is None
3164 3163 >>> [x for x in rs]
3165 3164 [5, 4, 3, 2, 0]
3166 3165 """
3167 3166 def __init__(self, revs1, revs2, ascending=None):
3168 3167 self._r1 = revs1
3169 3168 self._r2 = revs2
3170 3169 self._iter = None
3171 3170 self._ascending = ascending
3172 3171 self._genlist = None
3173 3172 self._asclist = None
3174 3173
3175 3174 def __len__(self):
3176 3175 return len(self._list)
3177 3176
3178 3177 def __nonzero__(self):
3179 3178 return bool(self._r1) or bool(self._r2)
3180 3179
3181 3180 @util.propertycache
3182 3181 def _list(self):
3183 3182 if not self._genlist:
3184 3183 self._genlist = baseset(iter(self))
3185 3184 return self._genlist
3186 3185
3187 3186 def __iter__(self):
3188 3187 """Iterate over both collections without repeating elements
3189 3188
3190 3189 If the ascending attribute is not set, iterate over the first one and
3191 3190 then over the second one checking for membership on the first one so we
3192 3191 dont yield any duplicates.
3193 3192
3194 3193 If the ascending attribute is set, iterate over both collections at the
3195 3194 same time, yielding only one value at a time in the given order.
3196 3195 """
3197 3196 if self._ascending is None:
3198 3197 if self._genlist:
3199 3198 return iter(self._genlist)
3200 3199 def arbitraryordergen():
3201 3200 for r in self._r1:
3202 3201 yield r
3203 3202 inr1 = self._r1.__contains__
3204 3203 for r in self._r2:
3205 3204 if not inr1(r):
3206 3205 yield r
3207 3206 return arbitraryordergen()
3208 3207 # try to use our own fast iterator if it exists
3209 3208 self._trysetasclist()
3210 3209 if self._ascending:
3211 3210 attr = 'fastasc'
3212 3211 else:
3213 3212 attr = 'fastdesc'
3214 3213 it = getattr(self, attr)
3215 3214 if it is not None:
3216 3215 return it()
3217 3216 # maybe half of the component supports fast
3218 3217 # get iterator for _r1
3219 3218 iter1 = getattr(self._r1, attr)
3220 3219 if iter1 is None:
3221 3220 # let's avoid side effect (not sure it matters)
3222 3221 iter1 = iter(sorted(self._r1, reverse=not self._ascending))
3223 3222 else:
3224 3223 iter1 = iter1()
3225 3224 # get iterator for _r2
3226 3225 iter2 = getattr(self._r2, attr)
3227 3226 if iter2 is None:
3228 3227 # let's avoid side effect (not sure it matters)
3229 3228 iter2 = iter(sorted(self._r2, reverse=not self._ascending))
3230 3229 else:
3231 3230 iter2 = iter2()
3232 3231 return _iterordered(self._ascending, iter1, iter2)
3233 3232
3234 3233 def _trysetasclist(self):
3235 3234 """populate the _asclist attribute if possible and necessary"""
3236 3235 if self._genlist is not None and self._asclist is None:
3237 3236 self._asclist = sorted(self._genlist)
3238 3237
3239 3238 @property
3240 3239 def fastasc(self):
3241 3240 self._trysetasclist()
3242 3241 if self._asclist is not None:
3243 3242 return self._asclist.__iter__
3244 3243 iter1 = self._r1.fastasc
3245 3244 iter2 = self._r2.fastasc
3246 3245 if None in (iter1, iter2):
3247 3246 return None
3248 3247 return lambda: _iterordered(True, iter1(), iter2())
3249 3248
3250 3249 @property
3251 3250 def fastdesc(self):
3252 3251 self._trysetasclist()
3253 3252 if self._asclist is not None:
3254 3253 return self._asclist.__reversed__
3255 3254 iter1 = self._r1.fastdesc
3256 3255 iter2 = self._r2.fastdesc
3257 3256 if None in (iter1, iter2):
3258 3257 return None
3259 3258 return lambda: _iterordered(False, iter1(), iter2())
3260 3259
3261 3260 def __contains__(self, x):
3262 3261 return x in self._r1 or x in self._r2
3263 3262
3264 3263 def sort(self, reverse=False):
3265 3264 """Sort the added set
3266 3265
3267 3266 For this we use the cached list with all the generated values and if we
3268 3267 know they are ascending or descending we can sort them in a smart way.
3269 3268 """
3270 3269 self._ascending = not reverse
3271 3270
3272 3271 def isascending(self):
3273 3272 return self._ascending is not None and self._ascending
3274 3273
3275 3274 def isdescending(self):
3276 3275 return self._ascending is not None and not self._ascending
3277 3276
3278 3277 def istopo(self):
3279 3278 # not worth the trouble asserting if the two sets combined are still
3280 3279 # in topographical order. Use the sort() predicate to explicitly sort
3281 3280 # again instead.
3282 3281 return False
3283 3282
3284 3283 def reverse(self):
3285 3284 if self._ascending is None:
3286 3285 self._list.reverse()
3287 3286 else:
3288 3287 self._ascending = not self._ascending
3289 3288
3290 3289 def first(self):
3291 3290 for x in self:
3292 3291 return x
3293 3292 return None
3294 3293
3295 3294 def last(self):
3296 3295 self.reverse()
3297 3296 val = self.first()
3298 3297 self.reverse()
3299 3298 return val
3300 3299
3301 3300 def __repr__(self):
3302 3301 d = {None: '', False: '-', True: '+'}[self._ascending]
3303 3302 return '<%s%s %r, %r>' % (type(self).__name__, d, self._r1, self._r2)
3304 3303
3305 3304 class generatorset(abstractsmartset):
3306 3305 """Wrap a generator for lazy iteration
3307 3306
3308 3307 Wrapper structure for generators that provides lazy membership and can
3309 3308 be iterated more than once.
3310 3309 When asked for membership it generates values until either it finds the
3311 3310 requested one or has gone through all the elements in the generator
3312 3311 """
3313 3312 def __init__(self, gen, iterasc=None):
3314 3313 """
3315 3314 gen: a generator producing the values for the generatorset.
3316 3315 """
3317 3316 self._gen = gen
3318 3317 self._asclist = None
3319 3318 self._cache = {}
3320 3319 self._genlist = []
3321 3320 self._finished = False
3322 3321 self._ascending = True
3323 3322 if iterasc is not None:
3324 3323 if iterasc:
3325 3324 self.fastasc = self._iterator
3326 3325 self.__contains__ = self._asccontains
3327 3326 else:
3328 3327 self.fastdesc = self._iterator
3329 3328 self.__contains__ = self._desccontains
3330 3329
3331 3330 def __nonzero__(self):
3332 3331 # Do not use 'for r in self' because it will enforce the iteration
3333 3332 # order (default ascending), possibly unrolling a whole descending
3334 3333 # iterator.
3335 3334 if self._genlist:
3336 3335 return True
3337 3336 for r in self._consumegen():
3338 3337 return True
3339 3338 return False
3340 3339
3341 3340 def __contains__(self, x):
3342 3341 if x in self._cache:
3343 3342 return self._cache[x]
3344 3343
3345 3344 # Use new values only, as existing values would be cached.
3346 3345 for l in self._consumegen():
3347 3346 if l == x:
3348 3347 return True
3349 3348
3350 3349 self._cache[x] = False
3351 3350 return False
3352 3351
3353 3352 def _asccontains(self, x):
3354 3353 """version of contains optimised for ascending generator"""
3355 3354 if x in self._cache:
3356 3355 return self._cache[x]
3357 3356
3358 3357 # Use new values only, as existing values would be cached.
3359 3358 for l in self._consumegen():
3360 3359 if l == x:
3361 3360 return True
3362 3361 if l > x:
3363 3362 break
3364 3363
3365 3364 self._cache[x] = False
3366 3365 return False
3367 3366
3368 3367 def _desccontains(self, x):
3369 3368 """version of contains optimised for descending generator"""
3370 3369 if x in self._cache:
3371 3370 return self._cache[x]
3372 3371
3373 3372 # Use new values only, as existing values would be cached.
3374 3373 for l in self._consumegen():
3375 3374 if l == x:
3376 3375 return True
3377 3376 if l < x:
3378 3377 break
3379 3378
3380 3379 self._cache[x] = False
3381 3380 return False
3382 3381
3383 3382 def __iter__(self):
3384 3383 if self._ascending:
3385 3384 it = self.fastasc
3386 3385 else:
3387 3386 it = self.fastdesc
3388 3387 if it is not None:
3389 3388 return it()
3390 3389 # we need to consume the iterator
3391 3390 for x in self._consumegen():
3392 3391 pass
3393 3392 # recall the same code
3394 3393 return iter(self)
3395 3394
3396 3395 def _iterator(self):
3397 3396 if self._finished:
3398 3397 return iter(self._genlist)
3399 3398
3400 3399 # We have to use this complex iteration strategy to allow multiple
3401 3400 # iterations at the same time. We need to be able to catch revision
3402 3401 # removed from _consumegen and added to genlist in another instance.
3403 3402 #
3404 3403 # Getting rid of it would provide an about 15% speed up on this
3405 3404 # iteration.
3406 3405 genlist = self._genlist
3407 3406 nextrev = self._consumegen().next
3408 3407 _len = len # cache global lookup
3409 3408 def gen():
3410 3409 i = 0
3411 3410 while True:
3412 3411 if i < _len(genlist):
3413 3412 yield genlist[i]
3414 3413 else:
3415 3414 yield nextrev()
3416 3415 i += 1
3417 3416 return gen()
3418 3417
3419 3418 def _consumegen(self):
3420 3419 cache = self._cache
3421 3420 genlist = self._genlist.append
3422 3421 for item in self._gen:
3423 3422 cache[item] = True
3424 3423 genlist(item)
3425 3424 yield item
3426 3425 if not self._finished:
3427 3426 self._finished = True
3428 3427 asc = self._genlist[:]
3429 3428 asc.sort()
3430 3429 self._asclist = asc
3431 3430 self.fastasc = asc.__iter__
3432 3431 self.fastdesc = asc.__reversed__
3433 3432
3434 3433 def __len__(self):
3435 3434 for x in self._consumegen():
3436 3435 pass
3437 3436 return len(self._genlist)
3438 3437
3439 3438 def sort(self, reverse=False):
3440 3439 self._ascending = not reverse
3441 3440
3442 3441 def reverse(self):
3443 3442 self._ascending = not self._ascending
3444 3443
3445 3444 def isascending(self):
3446 3445 return self._ascending
3447 3446
3448 3447 def isdescending(self):
3449 3448 return not self._ascending
3450 3449
3451 3450 def istopo(self):
3452 3451 # not worth the trouble asserting if the two sets combined are still
3453 3452 # in topographical order. Use the sort() predicate to explicitly sort
3454 3453 # again instead.
3455 3454 return False
3456 3455
3457 3456 def first(self):
3458 3457 if self._ascending:
3459 3458 it = self.fastasc
3460 3459 else:
3461 3460 it = self.fastdesc
3462 3461 if it is None:
3463 3462 # we need to consume all and try again
3464 3463 for x in self._consumegen():
3465 3464 pass
3466 3465 return self.first()
3467 3466 return next(it(), None)
3468 3467
3469 3468 def last(self):
3470 3469 if self._ascending:
3471 3470 it = self.fastdesc
3472 3471 else:
3473 3472 it = self.fastasc
3474 3473 if it is None:
3475 3474 # we need to consume all and try again
3476 3475 for x in self._consumegen():
3477 3476 pass
3478 3477 return self.first()
3479 3478 return next(it(), None)
3480 3479
3481 3480 def __repr__(self):
3482 3481 d = {False: '-', True: '+'}[self._ascending]
3483 3482 return '<%s%s>' % (type(self).__name__, d)
3484 3483
3485 3484 class spanset(abstractsmartset):
3486 3485 """Duck type for baseset class which represents a range of revisions and
3487 3486 can work lazily and without having all the range in memory
3488 3487
3489 3488 Note that spanset(x, y) behave almost like xrange(x, y) except for two
3490 3489 notable points:
3491 3490 - when x < y it will be automatically descending,
3492 3491 - revision filtered with this repoview will be skipped.
3493 3492
3494 3493 """
3495 3494 def __init__(self, repo, start=0, end=None):
3496 3495 """
3497 3496 start: first revision included the set
3498 3497 (default to 0)
3499 3498 end: first revision excluded (last+1)
3500 3499 (default to len(repo)
3501 3500
3502 3501 Spanset will be descending if `end` < `start`.
3503 3502 """
3504 3503 if end is None:
3505 3504 end = len(repo)
3506 3505 self._ascending = start <= end
3507 3506 if not self._ascending:
3508 3507 start, end = end + 1, start +1
3509 3508 self._start = start
3510 3509 self._end = end
3511 3510 self._hiddenrevs = repo.changelog.filteredrevs
3512 3511
3513 3512 def sort(self, reverse=False):
3514 3513 self._ascending = not reverse
3515 3514
3516 3515 def reverse(self):
3517 3516 self._ascending = not self._ascending
3518 3517
3519 3518 def istopo(self):
3520 3519 # not worth the trouble asserting if the two sets combined are still
3521 3520 # in topographical order. Use the sort() predicate to explicitly sort
3522 3521 # again instead.
3523 3522 return False
3524 3523
3525 3524 def _iterfilter(self, iterrange):
3526 3525 s = self._hiddenrevs
3527 3526 for r in iterrange:
3528 3527 if r not in s:
3529 3528 yield r
3530 3529
3531 3530 def __iter__(self):
3532 3531 if self._ascending:
3533 3532 return self.fastasc()
3534 3533 else:
3535 3534 return self.fastdesc()
3536 3535
3537 3536 def fastasc(self):
3538 3537 iterrange = xrange(self._start, self._end)
3539 3538 if self._hiddenrevs:
3540 3539 return self._iterfilter(iterrange)
3541 3540 return iter(iterrange)
3542 3541
3543 3542 def fastdesc(self):
3544 3543 iterrange = xrange(self._end - 1, self._start - 1, -1)
3545 3544 if self._hiddenrevs:
3546 3545 return self._iterfilter(iterrange)
3547 3546 return iter(iterrange)
3548 3547
3549 3548 def __contains__(self, rev):
3550 3549 hidden = self._hiddenrevs
3551 3550 return ((self._start <= rev < self._end)
3552 3551 and not (hidden and rev in hidden))
3553 3552
3554 3553 def __nonzero__(self):
3555 3554 for r in self:
3556 3555 return True
3557 3556 return False
3558 3557
3559 3558 def __len__(self):
3560 3559 if not self._hiddenrevs:
3561 3560 return abs(self._end - self._start)
3562 3561 else:
3563 3562 count = 0
3564 3563 start = self._start
3565 3564 end = self._end
3566 3565 for rev in self._hiddenrevs:
3567 3566 if (end < rev <= start) or (start <= rev < end):
3568 3567 count += 1
3569 3568 return abs(self._end - self._start) - count
3570 3569
3571 3570 def isascending(self):
3572 3571 return self._ascending
3573 3572
3574 3573 def isdescending(self):
3575 3574 return not self._ascending
3576 3575
3577 3576 def first(self):
3578 3577 if self._ascending:
3579 3578 it = self.fastasc
3580 3579 else:
3581 3580 it = self.fastdesc
3582 3581 for x in it():
3583 3582 return x
3584 3583 return None
3585 3584
3586 3585 def last(self):
3587 3586 if self._ascending:
3588 3587 it = self.fastdesc
3589 3588 else:
3590 3589 it = self.fastasc
3591 3590 for x in it():
3592 3591 return x
3593 3592 return None
3594 3593
3595 3594 def __repr__(self):
3596 3595 d = {False: '-', True: '+'}[self._ascending]
3597 3596 return '<%s%s %d:%d>' % (type(self).__name__, d,
3598 3597 self._start, self._end - 1)
3599 3598
3600 3599 class fullreposet(spanset):
3601 3600 """a set containing all revisions in the repo
3602 3601
3603 3602 This class exists to host special optimization and magic to handle virtual
3604 3603 revisions such as "null".
3605 3604 """
3606 3605
3607 3606 def __init__(self, repo):
3608 3607 super(fullreposet, self).__init__(repo)
3609 3608
3610 3609 def __and__(self, other):
3611 3610 """As self contains the whole repo, all of the other set should also be
3612 3611 in self. Therefore `self & other = other`.
3613 3612
3614 3613 This boldly assumes the other contains valid revs only.
3615 3614 """
3616 3615 # other not a smartset, make is so
3617 3616 if not util.safehasattr(other, 'isascending'):
3618 3617 # filter out hidden revision
3619 3618 # (this boldly assumes all smartset are pure)
3620 3619 #
3621 3620 # `other` was used with "&", let's assume this is a set like
3622 3621 # object.
3623 3622 other = baseset(other - self._hiddenrevs)
3624 3623
3625 3624 # XXX As fullreposet is also used as bootstrap, this is wrong.
3626 3625 #
3627 3626 # With a giveme312() revset returning [3,1,2], this makes
3628 3627 # 'hg log -r "giveme312()"' -> 1, 2, 3 (wrong)
3629 3628 # We cannot just drop it because other usage still need to sort it:
3630 3629 # 'hg log -r "all() and giveme312()"' -> 1, 2, 3 (right)
3631 3630 #
3632 3631 # There is also some faulty revset implementations that rely on it
3633 3632 # (eg: children as of its state in e8075329c5fb)
3634 3633 #
3635 3634 # When we fix the two points above we can move this into the if clause
3636 3635 other.sort(reverse=self.isdescending())
3637 3636 return other
3638 3637
3639 3638 def prettyformatset(revs):
3640 3639 lines = []
3641 3640 rs = repr(revs)
3642 3641 p = 0
3643 3642 while p < len(rs):
3644 3643 q = rs.find('<', p + 1)
3645 3644 if q < 0:
3646 3645 q = len(rs)
3647 3646 l = rs.count('<', 0, p) - rs.count('>', 0, p)
3648 3647 assert l >= 0
3649 3648 lines.append((l, rs[p:q].rstrip()))
3650 3649 p = q
3651 3650 return '\n'.join(' ' * l + s for l, s in lines)
3652 3651
3653 3652 def loadpredicate(ui, extname, registrarobj):
3654 3653 """Load revset predicates from specified registrarobj
3655 3654 """
3656 3655 for name, func in registrarobj._table.iteritems():
3657 3656 symbols[name] = func
3658 3657 if func._safe:
3659 3658 safesymbols.add(name)
3660 3659
3661 3660 # load built-in predicates explicitly to setup safesymbols
3662 3661 loadpredicate(None, None, predicate)
3663 3662
3664 3663 # tell hggettext to extract docstrings from these functions:
3665 3664 i18nfunctions = symbols.values()
General Comments 0
You need to be logged in to leave comments. Login now