##// END OF EJS Templates
baseset: keep the input set around...
Pierre-Yves David -
r26060:4ee2af21 default
parent child Browse files
Show More
@@ -1,3720 +1,3722 b''
1 1 # revset.py - revision set queries for mercurial
2 2 #
3 3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import heapq
11 11 import re
12 12
13 13 from .i18n import _
14 14 from . import (
15 15 encoding,
16 16 error,
17 17 hbisect,
18 18 match as matchmod,
19 19 node,
20 20 obsolete as obsmod,
21 21 parser,
22 22 pathutil,
23 23 phases,
24 24 repoview,
25 25 util,
26 26 )
27 27
28 28 def _revancestors(repo, revs, followfirst):
29 29 """Like revlog.ancestors(), but supports followfirst."""
30 30 if followfirst:
31 31 cut = 1
32 32 else:
33 33 cut = None
34 34 cl = repo.changelog
35 35
36 36 def iterate():
37 37 revs.sort(reverse=True)
38 38 irevs = iter(revs)
39 39 h = []
40 40
41 41 inputrev = next(irevs, None)
42 42 if inputrev is not None:
43 43 heapq.heappush(h, -inputrev)
44 44
45 45 seen = set()
46 46 while h:
47 47 current = -heapq.heappop(h)
48 48 if current == inputrev:
49 49 inputrev = next(irevs, None)
50 50 if inputrev is not None:
51 51 heapq.heappush(h, -inputrev)
52 52 if current not in seen:
53 53 seen.add(current)
54 54 yield current
55 55 for parent in cl.parentrevs(current)[:cut]:
56 56 if parent != node.nullrev:
57 57 heapq.heappush(h, -parent)
58 58
59 59 return generatorset(iterate(), iterasc=False)
60 60
61 61 def _revdescendants(repo, revs, followfirst):
62 62 """Like revlog.descendants() but supports followfirst."""
63 63 if followfirst:
64 64 cut = 1
65 65 else:
66 66 cut = None
67 67
68 68 def iterate():
69 69 cl = repo.changelog
70 70 # XXX this should be 'parentset.min()' assuming 'parentset' is a
71 71 # smartset (and if it is not, it should.)
72 72 first = min(revs)
73 73 nullrev = node.nullrev
74 74 if first == nullrev:
75 75 # Are there nodes with a null first parent and a non-null
76 76 # second one? Maybe. Do we care? Probably not.
77 77 for i in cl:
78 78 yield i
79 79 else:
80 80 seen = set(revs)
81 81 for i in cl.revs(first + 1):
82 82 for x in cl.parentrevs(i)[:cut]:
83 83 if x != nullrev and x in seen:
84 84 seen.add(i)
85 85 yield i
86 86 break
87 87
88 88 return generatorset(iterate(), iterasc=True)
89 89
90 90 def reachablerootspure(repo, minroot, roots, heads, includepath):
91 91 """return (heads(::<roots> and ::<heads>))
92 92
93 93 If includepath is True, return (<roots>::<heads>)."""
94 94 if not roots:
95 95 return baseset()
96 96 parentrevs = repo.changelog.parentrevs
97 97 roots = set(roots)
98 98 visit = list(heads)
99 99 reachable = set()
100 100 seen = {}
101 101 # prefetch all the things! (because python is slow)
102 102 reached = reachable.add
103 103 dovisit = visit.append
104 104 nextvisit = visit.pop
105 105 # open-code the post-order traversal due to the tiny size of
106 106 # sys.getrecursionlimit()
107 107 while visit:
108 108 rev = nextvisit()
109 109 if rev in roots:
110 110 reached(rev)
111 111 if not includepath:
112 112 continue
113 113 parents = parentrevs(rev)
114 114 seen[rev] = parents
115 115 for parent in parents:
116 116 if parent >= minroot and parent not in seen:
117 117 dovisit(parent)
118 118 if not reachable:
119 119 return baseset()
120 120 if not includepath:
121 121 return reachable
122 122 for rev in sorted(seen):
123 123 for parent in seen[rev]:
124 124 if parent in reachable:
125 125 reached(rev)
126 126 return baseset(sorted(reachable))
127 127
128 128 def reachableroots(repo, roots, heads, includepath=False):
129 129 """return (heads(::<roots> and ::<heads>))
130 130
131 131 If includepath is True, return (<roots>::<heads>)."""
132 132 if not roots:
133 133 return baseset()
134 134 # XXX this should be 'parentset.min()' assuming 'parentset' is a smartset
135 135 # (and if it is not, it should.)
136 136 minroot = min(roots)
137 137 roots = list(roots)
138 138 heads = list(heads)
139 139 try:
140 140 return repo.changelog.reachableroots(minroot, heads, roots, includepath)
141 141 except AttributeError:
142 142 return reachablerootspure(repo, minroot, roots, heads, includepath)
143 143
144 144 elements = {
145 145 # token-type: binding-strength, primary, prefix, infix, suffix
146 146 "(": (21, None, ("group", 1, ")"), ("func", 1, ")"), None),
147 147 "##": (20, None, None, ("_concat", 20), None),
148 148 "~": (18, None, None, ("ancestor", 18), None),
149 149 "^": (18, None, None, ("parent", 18), ("parentpost", 18)),
150 150 "-": (5, None, ("negate", 19), ("minus", 5), None),
151 151 "::": (17, None, ("dagrangepre", 17), ("dagrange", 17),
152 152 ("dagrangepost", 17)),
153 153 "..": (17, None, ("dagrangepre", 17), ("dagrange", 17),
154 154 ("dagrangepost", 17)),
155 155 ":": (15, "rangeall", ("rangepre", 15), ("range", 15), ("rangepost", 15)),
156 156 "not": (10, None, ("not", 10), None, None),
157 157 "!": (10, None, ("not", 10), None, None),
158 158 "and": (5, None, None, ("and", 5), None),
159 159 "&": (5, None, None, ("and", 5), None),
160 160 "%": (5, None, None, ("only", 5), ("onlypost", 5)),
161 161 "or": (4, None, None, ("or", 4), None),
162 162 "|": (4, None, None, ("or", 4), None),
163 163 "+": (4, None, None, ("or", 4), None),
164 164 "=": (3, None, None, ("keyvalue", 3), None),
165 165 ",": (2, None, None, ("list", 2), None),
166 166 ")": (0, None, None, None, None),
167 167 "symbol": (0, "symbol", None, None, None),
168 168 "string": (0, "string", None, None, None),
169 169 "end": (0, None, None, None, None),
170 170 }
171 171
172 172 keywords = set(['and', 'or', 'not'])
173 173
174 174 # default set of valid characters for the initial letter of symbols
175 175 _syminitletters = set(c for c in [chr(i) for i in xrange(256)]
176 176 if c.isalnum() or c in '._@' or ord(c) > 127)
177 177
178 178 # default set of valid characters for non-initial letters of symbols
179 179 _symletters = set(c for c in [chr(i) for i in xrange(256)]
180 180 if c.isalnum() or c in '-._/@' or ord(c) > 127)
181 181
182 182 def tokenize(program, lookup=None, syminitletters=None, symletters=None):
183 183 '''
184 184 Parse a revset statement into a stream of tokens
185 185
186 186 ``syminitletters`` is the set of valid characters for the initial
187 187 letter of symbols.
188 188
189 189 By default, character ``c`` is recognized as valid for initial
190 190 letter of symbols, if ``c.isalnum() or c in '._@' or ord(c) > 127``.
191 191
192 192 ``symletters`` is the set of valid characters for non-initial
193 193 letters of symbols.
194 194
195 195 By default, character ``c`` is recognized as valid for non-initial
196 196 letters of symbols, if ``c.isalnum() or c in '-._/@' or ord(c) > 127``.
197 197
198 198 Check that @ is a valid unquoted token character (issue3686):
199 199 >>> list(tokenize("@::"))
200 200 [('symbol', '@', 0), ('::', None, 1), ('end', None, 3)]
201 201
202 202 '''
203 203 if syminitletters is None:
204 204 syminitletters = _syminitletters
205 205 if symletters is None:
206 206 symletters = _symletters
207 207
208 208 if program and lookup:
209 209 # attempt to parse old-style ranges first to deal with
210 210 # things like old-tag which contain query metacharacters
211 211 parts = program.split(':', 1)
212 212 if all(lookup(sym) for sym in parts if sym):
213 213 if parts[0]:
214 214 yield ('symbol', parts[0], 0)
215 215 if len(parts) > 1:
216 216 s = len(parts[0])
217 217 yield (':', None, s)
218 218 if parts[1]:
219 219 yield ('symbol', parts[1], s + 1)
220 220 yield ('end', None, len(program))
221 221 return
222 222
223 223 pos, l = 0, len(program)
224 224 while pos < l:
225 225 c = program[pos]
226 226 if c.isspace(): # skip inter-token whitespace
227 227 pass
228 228 elif c == ':' and program[pos:pos + 2] == '::': # look ahead carefully
229 229 yield ('::', None, pos)
230 230 pos += 1 # skip ahead
231 231 elif c == '.' and program[pos:pos + 2] == '..': # look ahead carefully
232 232 yield ('..', None, pos)
233 233 pos += 1 # skip ahead
234 234 elif c == '#' and program[pos:pos + 2] == '##': # look ahead carefully
235 235 yield ('##', None, pos)
236 236 pos += 1 # skip ahead
237 237 elif c in "():=,-|&+!~^%": # handle simple operators
238 238 yield (c, None, pos)
239 239 elif (c in '"\'' or c == 'r' and
240 240 program[pos:pos + 2] in ("r'", 'r"')): # handle quoted strings
241 241 if c == 'r':
242 242 pos += 1
243 243 c = program[pos]
244 244 decode = lambda x: x
245 245 else:
246 246 decode = lambda x: x.decode('string-escape')
247 247 pos += 1
248 248 s = pos
249 249 while pos < l: # find closing quote
250 250 d = program[pos]
251 251 if d == '\\': # skip over escaped characters
252 252 pos += 2
253 253 continue
254 254 if d == c:
255 255 yield ('string', decode(program[s:pos]), s)
256 256 break
257 257 pos += 1
258 258 else:
259 259 raise error.ParseError(_("unterminated string"), s)
260 260 # gather up a symbol/keyword
261 261 elif c in syminitletters:
262 262 s = pos
263 263 pos += 1
264 264 while pos < l: # find end of symbol
265 265 d = program[pos]
266 266 if d not in symletters:
267 267 break
268 268 if d == '.' and program[pos - 1] == '.': # special case for ..
269 269 pos -= 1
270 270 break
271 271 pos += 1
272 272 sym = program[s:pos]
273 273 if sym in keywords: # operator keywords
274 274 yield (sym, None, s)
275 275 elif '-' in sym:
276 276 # some jerk gave us foo-bar-baz, try to check if it's a symbol
277 277 if lookup and lookup(sym):
278 278 # looks like a real symbol
279 279 yield ('symbol', sym, s)
280 280 else:
281 281 # looks like an expression
282 282 parts = sym.split('-')
283 283 for p in parts[:-1]:
284 284 if p: # possible consecutive -
285 285 yield ('symbol', p, s)
286 286 s += len(p)
287 287 yield ('-', None, pos)
288 288 s += 1
289 289 if parts[-1]: # possible trailing -
290 290 yield ('symbol', parts[-1], s)
291 291 else:
292 292 yield ('symbol', sym, s)
293 293 pos -= 1
294 294 else:
295 295 raise error.ParseError(_("syntax error in revset '%s'") %
296 296 program, pos)
297 297 pos += 1
298 298 yield ('end', None, pos)
299 299
300 300 def parseerrordetail(inst):
301 301 """Compose error message from specified ParseError object
302 302 """
303 303 if len(inst.args) > 1:
304 304 return _('at %s: %s') % (inst.args[1], inst.args[0])
305 305 else:
306 306 return inst.args[0]
307 307
308 308 # helpers
309 309
310 310 def getstring(x, err):
311 311 if x and (x[0] == 'string' or x[0] == 'symbol'):
312 312 return x[1]
313 313 raise error.ParseError(err)
314 314
315 315 def getlist(x):
316 316 if not x:
317 317 return []
318 318 if x[0] == 'list':
319 319 return getlist(x[1]) + [x[2]]
320 320 return [x]
321 321
322 322 def getargs(x, min, max, err):
323 323 l = getlist(x)
324 324 if len(l) < min or (max >= 0 and len(l) > max):
325 325 raise error.ParseError(err)
326 326 return l
327 327
328 328 def getargsdict(x, funcname, keys):
329 329 return parser.buildargsdict(getlist(x), funcname, keys.split(),
330 330 keyvaluenode='keyvalue', keynode='symbol')
331 331
332 332 def isvalidsymbol(tree):
333 333 """Examine whether specified ``tree`` is valid ``symbol`` or not
334 334 """
335 335 return tree[0] == 'symbol' and len(tree) > 1
336 336
337 337 def getsymbol(tree):
338 338 """Get symbol name from valid ``symbol`` in ``tree``
339 339
340 340 This assumes that ``tree`` is already examined by ``isvalidsymbol``.
341 341 """
342 342 return tree[1]
343 343
344 344 def isvalidfunc(tree):
345 345 """Examine whether specified ``tree`` is valid ``func`` or not
346 346 """
347 347 return tree[0] == 'func' and len(tree) > 1 and isvalidsymbol(tree[1])
348 348
349 349 def getfuncname(tree):
350 350 """Get function name from valid ``func`` in ``tree``
351 351
352 352 This assumes that ``tree`` is already examined by ``isvalidfunc``.
353 353 """
354 354 return getsymbol(tree[1])
355 355
356 356 def getfuncargs(tree):
357 357 """Get list of function arguments from valid ``func`` in ``tree``
358 358
359 359 This assumes that ``tree`` is already examined by ``isvalidfunc``.
360 360 """
361 361 if len(tree) > 2:
362 362 return getlist(tree[2])
363 363 else:
364 364 return []
365 365
366 366 def getset(repo, subset, x):
367 367 if not x:
368 368 raise error.ParseError(_("missing argument"))
369 369 s = methods[x[0]](repo, subset, *x[1:])
370 370 if util.safehasattr(s, 'isascending'):
371 371 return s
372 372 if (repo.ui.configbool('devel', 'all-warnings')
373 373 or repo.ui.configbool('devel', 'old-revset')):
374 374 # else case should not happen, because all non-func are internal,
375 375 # ignoring for now.
376 376 if x[0] == 'func' and x[1][0] == 'symbol' and x[1][1] in symbols:
377 377 repo.ui.develwarn('revset "%s" use list instead of smartset, '
378 378 '(upgrade your code)' % x[1][1])
379 379 return baseset(s)
380 380
381 381 def _getrevsource(repo, r):
382 382 extra = repo[r].extra()
383 383 for label in ('source', 'transplant_source', 'rebase_source'):
384 384 if label in extra:
385 385 try:
386 386 return repo[extra[label]].rev()
387 387 except error.RepoLookupError:
388 388 pass
389 389 return None
390 390
391 391 # operator methods
392 392
393 393 def stringset(repo, subset, x):
394 394 x = repo[x].rev()
395 395 if (x in subset
396 396 or x == node.nullrev and isinstance(subset, fullreposet)):
397 397 return baseset([x])
398 398 return baseset()
399 399
400 400 def rangeset(repo, subset, x, y):
401 401 m = getset(repo, fullreposet(repo), x)
402 402 n = getset(repo, fullreposet(repo), y)
403 403
404 404 if not m or not n:
405 405 return baseset()
406 406 m, n = m.first(), n.last()
407 407
408 408 if m == n:
409 409 r = baseset([m])
410 410 elif n == node.wdirrev:
411 411 r = spanset(repo, m, len(repo)) + baseset([n])
412 412 elif m == node.wdirrev:
413 413 r = baseset([m]) + spanset(repo, len(repo) - 1, n - 1)
414 414 elif m < n:
415 415 r = spanset(repo, m, n + 1)
416 416 else:
417 417 r = spanset(repo, m, n - 1)
418 418 # XXX We should combine with subset first: 'subset & baseset(...)'. This is
419 419 # necessary to ensure we preserve the order in subset.
420 420 #
421 421 # This has performance implication, carrying the sorting over when possible
422 422 # would be more efficient.
423 423 return r & subset
424 424
425 425 def dagrange(repo, subset, x, y):
426 426 r = fullreposet(repo)
427 427 xs = reachableroots(repo, getset(repo, r, x), getset(repo, r, y),
428 428 includepath=True)
429 429 # XXX We should combine with subset first: 'subset & baseset(...)'. This is
430 430 # necessary to ensure we preserve the order in subset.
431 431 return xs & subset
432 432
433 433 def andset(repo, subset, x, y):
434 434 return getset(repo, getset(repo, subset, x), y)
435 435
436 436 def orset(repo, subset, *xs):
437 437 assert xs
438 438 if len(xs) == 1:
439 439 return getset(repo, subset, xs[0])
440 440 p = len(xs) // 2
441 441 a = orset(repo, subset, *xs[:p])
442 442 b = orset(repo, subset, *xs[p:])
443 443 return a + b
444 444
445 445 def notset(repo, subset, x):
446 446 return subset - getset(repo, subset, x)
447 447
448 448 def listset(repo, subset, a, b):
449 449 raise error.ParseError(_("can't use a list in this context"))
450 450
451 451 def keyvaluepair(repo, subset, k, v):
452 452 raise error.ParseError(_("can't use a key-value pair in this context"))
453 453
454 454 def func(repo, subset, a, b):
455 455 if a[0] == 'symbol' and a[1] in symbols:
456 456 return symbols[a[1]](repo, subset, b)
457 457
458 458 keep = lambda fn: getattr(fn, '__doc__', None) is not None
459 459
460 460 syms = [s for (s, fn) in symbols.items() if keep(fn)]
461 461 raise error.UnknownIdentifier(a[1], syms)
462 462
463 463 # functions
464 464
465 465 def adds(repo, subset, x):
466 466 """``adds(pattern)``
467 467 Changesets that add a file matching pattern.
468 468
469 469 The pattern without explicit kind like ``glob:`` is expected to be
470 470 relative to the current directory and match against a file or a
471 471 directory.
472 472 """
473 473 # i18n: "adds" is a keyword
474 474 pat = getstring(x, _("adds requires a pattern"))
475 475 return checkstatus(repo, subset, pat, 1)
476 476
477 477 def ancestor(repo, subset, x):
478 478 """``ancestor(*changeset)``
479 479 A greatest common ancestor of the changesets.
480 480
481 481 Accepts 0 or more changesets.
482 482 Will return empty list when passed no args.
483 483 Greatest common ancestor of a single changeset is that changeset.
484 484 """
485 485 # i18n: "ancestor" is a keyword
486 486 l = getlist(x)
487 487 rl = fullreposet(repo)
488 488 anc = None
489 489
490 490 # (getset(repo, rl, i) for i in l) generates a list of lists
491 491 for revs in (getset(repo, rl, i) for i in l):
492 492 for r in revs:
493 493 if anc is None:
494 494 anc = repo[r]
495 495 else:
496 496 anc = anc.ancestor(repo[r])
497 497
498 498 if anc is not None and anc.rev() in subset:
499 499 return baseset([anc.rev()])
500 500 return baseset()
501 501
502 502 def _ancestors(repo, subset, x, followfirst=False):
503 503 heads = getset(repo, fullreposet(repo), x)
504 504 if not heads:
505 505 return baseset()
506 506 s = _revancestors(repo, heads, followfirst)
507 507 return subset & s
508 508
509 509 def ancestors(repo, subset, x):
510 510 """``ancestors(set)``
511 511 Changesets that are ancestors of a changeset in set.
512 512 """
513 513 return _ancestors(repo, subset, x)
514 514
515 515 def _firstancestors(repo, subset, x):
516 516 # ``_firstancestors(set)``
517 517 # Like ``ancestors(set)`` but follows only the first parents.
518 518 return _ancestors(repo, subset, x, followfirst=True)
519 519
520 520 def ancestorspec(repo, subset, x, n):
521 521 """``set~n``
522 522 Changesets that are the Nth ancestor (first parents only) of a changeset
523 523 in set.
524 524 """
525 525 try:
526 526 n = int(n[1])
527 527 except (TypeError, ValueError):
528 528 raise error.ParseError(_("~ expects a number"))
529 529 ps = set()
530 530 cl = repo.changelog
531 531 for r in getset(repo, fullreposet(repo), x):
532 532 for i in range(n):
533 533 r = cl.parentrevs(r)[0]
534 534 ps.add(r)
535 535 return subset & ps
536 536
537 537 def author(repo, subset, x):
538 538 """``author(string)``
539 539 Alias for ``user(string)``.
540 540 """
541 541 # i18n: "author" is a keyword
542 542 n = encoding.lower(getstring(x, _("author requires a string")))
543 543 kind, pattern, matcher = _substringmatcher(n)
544 544 return subset.filter(lambda x: matcher(encoding.lower(repo[x].user())))
545 545
546 546 def bisect(repo, subset, x):
547 547 """``bisect(string)``
548 548 Changesets marked in the specified bisect status:
549 549
550 550 - ``good``, ``bad``, ``skip``: csets explicitly marked as good/bad/skip
551 551 - ``goods``, ``bads`` : csets topologically good/bad
552 552 - ``range`` : csets taking part in the bisection
553 553 - ``pruned`` : csets that are goods, bads or skipped
554 554 - ``untested`` : csets whose fate is yet unknown
555 555 - ``ignored`` : csets ignored due to DAG topology
556 556 - ``current`` : the cset currently being bisected
557 557 """
558 558 # i18n: "bisect" is a keyword
559 559 status = getstring(x, _("bisect requires a string")).lower()
560 560 state = set(hbisect.get(repo, status))
561 561 return subset & state
562 562
563 563 # Backward-compatibility
564 564 # - no help entry so that we do not advertise it any more
565 565 def bisected(repo, subset, x):
566 566 return bisect(repo, subset, x)
567 567
568 568 def bookmark(repo, subset, x):
569 569 """``bookmark([name])``
570 570 The named bookmark or all bookmarks.
571 571
572 572 If `name` starts with `re:`, the remainder of the name is treated as
573 573 a regular expression. To match a bookmark that actually starts with `re:`,
574 574 use the prefix `literal:`.
575 575 """
576 576 # i18n: "bookmark" is a keyword
577 577 args = getargs(x, 0, 1, _('bookmark takes one or no arguments'))
578 578 if args:
579 579 bm = getstring(args[0],
580 580 # i18n: "bookmark" is a keyword
581 581 _('the argument to bookmark must be a string'))
582 582 kind, pattern, matcher = _stringmatcher(bm)
583 583 bms = set()
584 584 if kind == 'literal':
585 585 bmrev = repo._bookmarks.get(pattern, None)
586 586 if not bmrev:
587 587 raise error.RepoLookupError(_("bookmark '%s' does not exist")
588 588 % bm)
589 589 bms.add(repo[bmrev].rev())
590 590 else:
591 591 matchrevs = set()
592 592 for name, bmrev in repo._bookmarks.iteritems():
593 593 if matcher(name):
594 594 matchrevs.add(bmrev)
595 595 if not matchrevs:
596 596 raise error.RepoLookupError(_("no bookmarks exist"
597 597 " that match '%s'") % pattern)
598 598 for bmrev in matchrevs:
599 599 bms.add(repo[bmrev].rev())
600 600 else:
601 601 bms = set([repo[r].rev()
602 602 for r in repo._bookmarks.values()])
603 603 bms -= set([node.nullrev])
604 604 return subset & bms
605 605
606 606 def branch(repo, subset, x):
607 607 """``branch(string or set)``
608 608 All changesets belonging to the given branch or the branches of the given
609 609 changesets.
610 610
611 611 If `string` starts with `re:`, the remainder of the name is treated as
612 612 a regular expression. To match a branch that actually starts with `re:`,
613 613 use the prefix `literal:`.
614 614 """
615 615 getbi = repo.revbranchcache().branchinfo
616 616
617 617 try:
618 618 b = getstring(x, '')
619 619 except error.ParseError:
620 620 # not a string, but another revspec, e.g. tip()
621 621 pass
622 622 else:
623 623 kind, pattern, matcher = _stringmatcher(b)
624 624 if kind == 'literal':
625 625 # note: falls through to the revspec case if no branch with
626 626 # this name exists
627 627 if pattern in repo.branchmap():
628 628 return subset.filter(lambda r: matcher(getbi(r)[0]))
629 629 else:
630 630 return subset.filter(lambda r: matcher(getbi(r)[0]))
631 631
632 632 s = getset(repo, fullreposet(repo), x)
633 633 b = set()
634 634 for r in s:
635 635 b.add(getbi(r)[0])
636 636 c = s.__contains__
637 637 return subset.filter(lambda r: c(r) or getbi(r)[0] in b)
638 638
639 639 def bumped(repo, subset, x):
640 640 """``bumped()``
641 641 Mutable changesets marked as successors of public changesets.
642 642
643 643 Only non-public and non-obsolete changesets can be `bumped`.
644 644 """
645 645 # i18n: "bumped" is a keyword
646 646 getargs(x, 0, 0, _("bumped takes no arguments"))
647 647 bumped = obsmod.getrevs(repo, 'bumped')
648 648 return subset & bumped
649 649
650 650 def bundle(repo, subset, x):
651 651 """``bundle()``
652 652 Changesets in the bundle.
653 653
654 654 Bundle must be specified by the -R option."""
655 655
656 656 try:
657 657 bundlerevs = repo.changelog.bundlerevs
658 658 except AttributeError:
659 659 raise util.Abort(_("no bundle provided - specify with -R"))
660 660 return subset & bundlerevs
661 661
662 662 def checkstatus(repo, subset, pat, field):
663 663 hasset = matchmod.patkind(pat) == 'set'
664 664
665 665 mcache = [None]
666 666 def matches(x):
667 667 c = repo[x]
668 668 if not mcache[0] or hasset:
669 669 mcache[0] = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
670 670 m = mcache[0]
671 671 fname = None
672 672 if not m.anypats() and len(m.files()) == 1:
673 673 fname = m.files()[0]
674 674 if fname is not None:
675 675 if fname not in c.files():
676 676 return False
677 677 else:
678 678 for f in c.files():
679 679 if m(f):
680 680 break
681 681 else:
682 682 return False
683 683 files = repo.status(c.p1().node(), c.node())[field]
684 684 if fname is not None:
685 685 if fname in files:
686 686 return True
687 687 else:
688 688 for f in files:
689 689 if m(f):
690 690 return True
691 691
692 692 return subset.filter(matches)
693 693
694 694 def _children(repo, narrow, parentset):
695 695 if not parentset:
696 696 return baseset()
697 697 cs = set()
698 698 pr = repo.changelog.parentrevs
699 699 minrev = parentset.min()
700 700 for r in narrow:
701 701 if r <= minrev:
702 702 continue
703 703 for p in pr(r):
704 704 if p in parentset:
705 705 cs.add(r)
706 706 # XXX using a set to feed the baseset is wrong. Sets are not ordered.
707 707 # This does not break because of other fullreposet misbehavior.
708 708 return baseset(cs)
709 709
710 710 def children(repo, subset, x):
711 711 """``children(set)``
712 712 Child changesets of changesets in set.
713 713 """
714 714 s = getset(repo, fullreposet(repo), x)
715 715 cs = _children(repo, subset, s)
716 716 return subset & cs
717 717
718 718 def closed(repo, subset, x):
719 719 """``closed()``
720 720 Changeset is closed.
721 721 """
722 722 # i18n: "closed" is a keyword
723 723 getargs(x, 0, 0, _("closed takes no arguments"))
724 724 return subset.filter(lambda r: repo[r].closesbranch())
725 725
726 726 def contains(repo, subset, x):
727 727 """``contains(pattern)``
728 728 The revision's manifest contains a file matching pattern (but might not
729 729 modify it). See :hg:`help patterns` for information about file patterns.
730 730
731 731 The pattern without explicit kind like ``glob:`` is expected to be
732 732 relative to the current directory and match against a file exactly
733 733 for efficiency.
734 734 """
735 735 # i18n: "contains" is a keyword
736 736 pat = getstring(x, _("contains requires a pattern"))
737 737
738 738 def matches(x):
739 739 if not matchmod.patkind(pat):
740 740 pats = pathutil.canonpath(repo.root, repo.getcwd(), pat)
741 741 if pats in repo[x]:
742 742 return True
743 743 else:
744 744 c = repo[x]
745 745 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
746 746 for f in c.manifest():
747 747 if m(f):
748 748 return True
749 749 return False
750 750
751 751 return subset.filter(matches)
752 752
753 753 def converted(repo, subset, x):
754 754 """``converted([id])``
755 755 Changesets converted from the given identifier in the old repository if
756 756 present, or all converted changesets if no identifier is specified.
757 757 """
758 758
759 759 # There is exactly no chance of resolving the revision, so do a simple
760 760 # string compare and hope for the best
761 761
762 762 rev = None
763 763 # i18n: "converted" is a keyword
764 764 l = getargs(x, 0, 1, _('converted takes one or no arguments'))
765 765 if l:
766 766 # i18n: "converted" is a keyword
767 767 rev = getstring(l[0], _('converted requires a revision'))
768 768
769 769 def _matchvalue(r):
770 770 source = repo[r].extra().get('convert_revision', None)
771 771 return source is not None and (rev is None or source.startswith(rev))
772 772
773 773 return subset.filter(lambda r: _matchvalue(r))
774 774
775 775 def date(repo, subset, x):
776 776 """``date(interval)``
777 777 Changesets within the interval, see :hg:`help dates`.
778 778 """
779 779 # i18n: "date" is a keyword
780 780 ds = getstring(x, _("date requires a string"))
781 781 dm = util.matchdate(ds)
782 782 return subset.filter(lambda x: dm(repo[x].date()[0]))
783 783
784 784 def desc(repo, subset, x):
785 785 """``desc(string)``
786 786 Search commit message for string. The match is case-insensitive.
787 787 """
788 788 # i18n: "desc" is a keyword
789 789 ds = encoding.lower(getstring(x, _("desc requires a string")))
790 790
791 791 def matches(x):
792 792 c = repo[x]
793 793 return ds in encoding.lower(c.description())
794 794
795 795 return subset.filter(matches)
796 796
797 797 def _descendants(repo, subset, x, followfirst=False):
798 798 roots = getset(repo, fullreposet(repo), x)
799 799 if not roots:
800 800 return baseset()
801 801 s = _revdescendants(repo, roots, followfirst)
802 802
803 803 # Both sets need to be ascending in order to lazily return the union
804 804 # in the correct order.
805 805 base = subset & roots
806 806 desc = subset & s
807 807 result = base + desc
808 808 if subset.isascending():
809 809 result.sort()
810 810 elif subset.isdescending():
811 811 result.sort(reverse=True)
812 812 else:
813 813 result = subset & result
814 814 return result
815 815
816 816 def descendants(repo, subset, x):
817 817 """``descendants(set)``
818 818 Changesets which are descendants of changesets in set.
819 819 """
820 820 return _descendants(repo, subset, x)
821 821
822 822 def _firstdescendants(repo, subset, x):
823 823 # ``_firstdescendants(set)``
824 824 # Like ``descendants(set)`` but follows only the first parents.
825 825 return _descendants(repo, subset, x, followfirst=True)
826 826
827 827 def destination(repo, subset, x):
828 828 """``destination([set])``
829 829 Changesets that were created by a graft, transplant or rebase operation,
830 830 with the given revisions specified as the source. Omitting the optional set
831 831 is the same as passing all().
832 832 """
833 833 if x is not None:
834 834 sources = getset(repo, fullreposet(repo), x)
835 835 else:
836 836 sources = fullreposet(repo)
837 837
838 838 dests = set()
839 839
840 840 # subset contains all of the possible destinations that can be returned, so
841 841 # iterate over them and see if their source(s) were provided in the arg set.
842 842 # Even if the immediate src of r is not in the arg set, src's source (or
843 843 # further back) may be. Scanning back further than the immediate src allows
844 844 # transitive transplants and rebases to yield the same results as transitive
845 845 # grafts.
846 846 for r in subset:
847 847 src = _getrevsource(repo, r)
848 848 lineage = None
849 849
850 850 while src is not None:
851 851 if lineage is None:
852 852 lineage = list()
853 853
854 854 lineage.append(r)
855 855
856 856 # The visited lineage is a match if the current source is in the arg
857 857 # set. Since every candidate dest is visited by way of iterating
858 858 # subset, any dests further back in the lineage will be tested by a
859 859 # different iteration over subset. Likewise, if the src was already
860 860 # selected, the current lineage can be selected without going back
861 861 # further.
862 862 if src in sources or src in dests:
863 863 dests.update(lineage)
864 864 break
865 865
866 866 r = src
867 867 src = _getrevsource(repo, r)
868 868
869 869 return subset.filter(dests.__contains__)
870 870
871 871 def divergent(repo, subset, x):
872 872 """``divergent()``
873 873 Final successors of changesets with an alternative set of final successors.
874 874 """
875 875 # i18n: "divergent" is a keyword
876 876 getargs(x, 0, 0, _("divergent takes no arguments"))
877 877 divergent = obsmod.getrevs(repo, 'divergent')
878 878 return subset & divergent
879 879
880 880 def extinct(repo, subset, x):
881 881 """``extinct()``
882 882 Obsolete changesets with obsolete descendants only.
883 883 """
884 884 # i18n: "extinct" is a keyword
885 885 getargs(x, 0, 0, _("extinct takes no arguments"))
886 886 extincts = obsmod.getrevs(repo, 'extinct')
887 887 return subset & extincts
888 888
889 889 def extra(repo, subset, x):
890 890 """``extra(label, [value])``
891 891 Changesets with the given label in the extra metadata, with the given
892 892 optional value.
893 893
894 894 If `value` starts with `re:`, the remainder of the value is treated as
895 895 a regular expression. To match a value that actually starts with `re:`,
896 896 use the prefix `literal:`.
897 897 """
898 898 args = getargsdict(x, 'extra', 'label value')
899 899 if 'label' not in args:
900 900 # i18n: "extra" is a keyword
901 901 raise error.ParseError(_('extra takes at least 1 argument'))
902 902 # i18n: "extra" is a keyword
903 903 label = getstring(args['label'], _('first argument to extra must be '
904 904 'a string'))
905 905 value = None
906 906
907 907 if 'value' in args:
908 908 # i18n: "extra" is a keyword
909 909 value = getstring(args['value'], _('second argument to extra must be '
910 910 'a string'))
911 911 kind, value, matcher = _stringmatcher(value)
912 912
913 913 def _matchvalue(r):
914 914 extra = repo[r].extra()
915 915 return label in extra and (value is None or matcher(extra[label]))
916 916
917 917 return subset.filter(lambda r: _matchvalue(r))
918 918
919 919 def filelog(repo, subset, x):
920 920 """``filelog(pattern)``
921 921 Changesets connected to the specified filelog.
922 922
923 923 For performance reasons, visits only revisions mentioned in the file-level
924 924 filelog, rather than filtering through all changesets (much faster, but
925 925 doesn't include deletes or duplicate changes). For a slower, more accurate
926 926 result, use ``file()``.
927 927
928 928 The pattern without explicit kind like ``glob:`` is expected to be
929 929 relative to the current directory and match against a file exactly
930 930 for efficiency.
931 931
932 932 If some linkrev points to revisions filtered by the current repoview, we'll
933 933 work around it to return a non-filtered value.
934 934 """
935 935
936 936 # i18n: "filelog" is a keyword
937 937 pat = getstring(x, _("filelog requires a pattern"))
938 938 s = set()
939 939 cl = repo.changelog
940 940
941 941 if not matchmod.patkind(pat):
942 942 f = pathutil.canonpath(repo.root, repo.getcwd(), pat)
943 943 files = [f]
944 944 else:
945 945 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=repo[None])
946 946 files = (f for f in repo[None] if m(f))
947 947
948 948 for f in files:
949 949 backrevref = {} # final value for: filerev -> changerev
950 950 lowestchild = {} # lowest known filerev child of a filerev
951 951 delayed = [] # filerev with filtered linkrev, for post-processing
952 952 lowesthead = None # cache for manifest content of all head revisions
953 953 fl = repo.file(f)
954 954 for fr in list(fl):
955 955 rev = fl.linkrev(fr)
956 956 if rev not in cl:
957 957 # changerev pointed in linkrev is filtered
958 958 # record it for post processing.
959 959 delayed.append((fr, rev))
960 960 continue
961 961 for p in fl.parentrevs(fr):
962 962 if 0 <= p and p not in lowestchild:
963 963 lowestchild[p] = fr
964 964 backrevref[fr] = rev
965 965 s.add(rev)
966 966
967 967 # Post-processing of all filerevs we skipped because they were
968 968 # filtered. If such filerevs have known and unfiltered children, this
969 969 # means they have an unfiltered appearance out there. We'll use linkrev
970 970 # adjustment to find one of these appearances. The lowest known child
971 971 # will be used as a starting point because it is the best upper-bound we
972 972 # have.
973 973 #
974 974 # This approach will fail when an unfiltered but linkrev-shadowed
975 975 # appearance exists in a head changeset without unfiltered filerev
976 976 # children anywhere.
977 977 while delayed:
978 978 # must be a descending iteration. To slowly fill lowest child
979 979 # information that is of potential use by the next item.
980 980 fr, rev = delayed.pop()
981 981 lkr = rev
982 982
983 983 child = lowestchild.get(fr)
984 984
985 985 if child is None:
986 986 # search for existence of this file revision in a head revision.
987 987 # There are three possibilities:
988 988 # - the revision exists in a head and we can find an
989 989 # introduction from there,
990 990 # - the revision does not exist in a head because it has been
991 991 # changed since its introduction: we would have found a child
992 992 # and be in the other 'else' clause,
993 993 # - all versions of the revision are hidden.
994 994 if lowesthead is None:
995 995 lowesthead = {}
996 996 for h in repo.heads():
997 997 fnode = repo[h].manifest().get(f)
998 998 if fnode is not None:
999 999 lowesthead[fl.rev(fnode)] = h
1000 1000 headrev = lowesthead.get(fr)
1001 1001 if headrev is None:
1002 1002 # content is nowhere unfiltered
1003 1003 continue
1004 1004 rev = repo[headrev][f].introrev()
1005 1005 else:
1006 1006 # the lowest known child is a good upper bound
1007 1007 childcrev = backrevref[child]
1008 1008 # XXX this does not guarantee returning the lowest
1009 1009 # introduction of this revision, but this gives a
1010 1010 # result which is a good start and will fit in most
1011 1011 # cases. We probably need to fix the multiple
1012 1012 # introductions case properly (report each
1013 1013 # introduction, even for identical file revisions)
1014 1014 # once and for all at some point anyway.
1015 1015 for p in repo[childcrev][f].parents():
1016 1016 if p.filerev() == fr:
1017 1017 rev = p.rev()
1018 1018 break
1019 1019 if rev == lkr: # no shadowed entry found
1020 1020 # XXX This should never happen unless some manifest points
1021 1021 # to biggish file revisions (like a revision that uses a
1022 1022 # parent that never appears in the manifest ancestors)
1023 1023 continue
1024 1024
1025 1025 # Fill the data for the next iteration.
1026 1026 for p in fl.parentrevs(fr):
1027 1027 if 0 <= p and p not in lowestchild:
1028 1028 lowestchild[p] = fr
1029 1029 backrevref[fr] = rev
1030 1030 s.add(rev)
1031 1031
1032 1032 return subset & s
1033 1033
1034 1034 def first(repo, subset, x):
1035 1035 """``first(set, [n])``
1036 1036 An alias for limit().
1037 1037 """
1038 1038 return limit(repo, subset, x)
1039 1039
1040 1040 def _follow(repo, subset, x, name, followfirst=False):
1041 1041 l = getargs(x, 0, 1, _("%s takes no arguments or a filename") % name)
1042 1042 c = repo['.']
1043 1043 if l:
1044 1044 x = getstring(l[0], _("%s expected a filename") % name)
1045 1045 if x in c:
1046 1046 cx = c[x]
1047 1047 s = set(ctx.rev() for ctx in cx.ancestors(followfirst=followfirst))
1048 1048 # include the revision responsible for the most recent version
1049 1049 s.add(cx.introrev())
1050 1050 else:
1051 1051 return baseset()
1052 1052 else:
1053 1053 s = _revancestors(repo, baseset([c.rev()]), followfirst)
1054 1054
1055 1055 return subset & s
1056 1056
1057 1057 def follow(repo, subset, x):
1058 1058 """``follow([file])``
1059 1059 An alias for ``::.`` (ancestors of the working directory's first parent).
1060 1060 If a filename is specified, the history of the given file is followed,
1061 1061 including copies.
1062 1062 """
1063 1063 return _follow(repo, subset, x, 'follow')
1064 1064
1065 1065 def _followfirst(repo, subset, x):
1066 1066 # ``followfirst([file])``
1067 1067 # Like ``follow([file])`` but follows only the first parent of
1068 1068 # every revision or file revision.
1069 1069 return _follow(repo, subset, x, '_followfirst', followfirst=True)
1070 1070
1071 1071 def getall(repo, subset, x):
1072 1072 """``all()``
1073 1073 All changesets, the same as ``0:tip``.
1074 1074 """
1075 1075 # i18n: "all" is a keyword
1076 1076 getargs(x, 0, 0, _("all takes no arguments"))
1077 1077 return subset & spanset(repo) # drop "null" if any
1078 1078
1079 1079 def grep(repo, subset, x):
1080 1080 """``grep(regex)``
1081 1081 Like ``keyword(string)`` but accepts a regex. Use ``grep(r'...')``
1082 1082 to ensure special escape characters are handled correctly. Unlike
1083 1083 ``keyword(string)``, the match is case-sensitive.
1084 1084 """
1085 1085 try:
1086 1086 # i18n: "grep" is a keyword
1087 1087 gr = re.compile(getstring(x, _("grep requires a string")))
1088 1088 except re.error as e:
1089 1089 raise error.ParseError(_('invalid match pattern: %s') % e)
1090 1090
1091 1091 def matches(x):
1092 1092 c = repo[x]
1093 1093 for e in c.files() + [c.user(), c.description()]:
1094 1094 if gr.search(e):
1095 1095 return True
1096 1096 return False
1097 1097
1098 1098 return subset.filter(matches)
1099 1099
1100 1100 def _matchfiles(repo, subset, x):
1101 1101 # _matchfiles takes a revset list of prefixed arguments:
1102 1102 #
1103 1103 # [p:foo, i:bar, x:baz]
1104 1104 #
1105 1105 # builds a match object from them and filters subset. Allowed
1106 1106 # prefixes are 'p:' for regular patterns, 'i:' for include
1107 1107 # patterns and 'x:' for exclude patterns. Use 'r:' prefix to pass
1108 1108 # a revision identifier, or the empty string to reference the
1109 1109 # working directory, from which the match object is
1110 1110 # initialized. Use 'd:' to set the default matching mode, default
1111 1111 # to 'glob'. At most one 'r:' and 'd:' argument can be passed.
1112 1112
1113 1113 # i18n: "_matchfiles" is a keyword
1114 1114 l = getargs(x, 1, -1, _("_matchfiles requires at least one argument"))
1115 1115 pats, inc, exc = [], [], []
1116 1116 rev, default = None, None
1117 1117 for arg in l:
1118 1118 # i18n: "_matchfiles" is a keyword
1119 1119 s = getstring(arg, _("_matchfiles requires string arguments"))
1120 1120 prefix, value = s[:2], s[2:]
1121 1121 if prefix == 'p:':
1122 1122 pats.append(value)
1123 1123 elif prefix == 'i:':
1124 1124 inc.append(value)
1125 1125 elif prefix == 'x:':
1126 1126 exc.append(value)
1127 1127 elif prefix == 'r:':
1128 1128 if rev is not None:
1129 1129 # i18n: "_matchfiles" is a keyword
1130 1130 raise error.ParseError(_('_matchfiles expected at most one '
1131 1131 'revision'))
1132 1132 if value != '': # empty means working directory; leave rev as None
1133 1133 rev = value
1134 1134 elif prefix == 'd:':
1135 1135 if default is not None:
1136 1136 # i18n: "_matchfiles" is a keyword
1137 1137 raise error.ParseError(_('_matchfiles expected at most one '
1138 1138 'default mode'))
1139 1139 default = value
1140 1140 else:
1141 1141 # i18n: "_matchfiles" is a keyword
1142 1142 raise error.ParseError(_('invalid _matchfiles prefix: %s') % prefix)
1143 1143 if not default:
1144 1144 default = 'glob'
1145 1145
1146 1146 m = matchmod.match(repo.root, repo.getcwd(), pats, include=inc,
1147 1147 exclude=exc, ctx=repo[rev], default=default)
1148 1148
1149 1149 def matches(x):
1150 1150 for f in repo[x].files():
1151 1151 if m(f):
1152 1152 return True
1153 1153 return False
1154 1154
1155 1155 return subset.filter(matches)
1156 1156
1157 1157 def hasfile(repo, subset, x):
1158 1158 """``file(pattern)``
1159 1159 Changesets affecting files matched by pattern.
1160 1160
1161 1161 For a faster but less accurate result, consider using ``filelog()``
1162 1162 instead.
1163 1163
1164 1164 This predicate uses ``glob:`` as the default kind of pattern.
1165 1165 """
1166 1166 # i18n: "file" is a keyword
1167 1167 pat = getstring(x, _("file requires a pattern"))
1168 1168 return _matchfiles(repo, subset, ('string', 'p:' + pat))
1169 1169
1170 1170 def head(repo, subset, x):
1171 1171 """``head()``
1172 1172 Changeset is a named branch head.
1173 1173 """
1174 1174 # i18n: "head" is a keyword
1175 1175 getargs(x, 0, 0, _("head takes no arguments"))
1176 1176 hs = set()
1177 1177 cl = repo.changelog
1178 1178 for b, ls in repo.branchmap().iteritems():
1179 1179 hs.update(cl.rev(h) for h in ls)
1180 1180 # XXX using a set to feed the baseset is wrong. Sets are not ordered.
1181 1181 # This does not break because of other fullreposet misbehavior.
1182 1182 # XXX We should combine with subset first: 'subset & baseset(...)'. This is
1183 1183 # necessary to ensure we preserve the order in subset.
1184 1184 return baseset(hs) & subset
1185 1185
1186 1186 def heads(repo, subset, x):
1187 1187 """``heads(set)``
1188 1188 Members of set with no children in set.
1189 1189 """
1190 1190 s = getset(repo, subset, x)
1191 1191 ps = parents(repo, subset, x)
1192 1192 return s - ps
1193 1193
1194 1194 def hidden(repo, subset, x):
1195 1195 """``hidden()``
1196 1196 Hidden changesets.
1197 1197 """
1198 1198 # i18n: "hidden" is a keyword
1199 1199 getargs(x, 0, 0, _("hidden takes no arguments"))
1200 1200 hiddenrevs = repoview.filterrevs(repo, 'visible')
1201 1201 return subset & hiddenrevs
1202 1202
1203 1203 def keyword(repo, subset, x):
1204 1204 """``keyword(string)``
1205 1205 Search commit message, user name, and names of changed files for
1206 1206 string. The match is case-insensitive.
1207 1207 """
1208 1208 # i18n: "keyword" is a keyword
1209 1209 kw = encoding.lower(getstring(x, _("keyword requires a string")))
1210 1210
1211 1211 def matches(r):
1212 1212 c = repo[r]
1213 1213 return any(kw in encoding.lower(t)
1214 1214 for t in c.files() + [c.user(), c.description()])
1215 1215
1216 1216 return subset.filter(matches)
1217 1217
1218 1218 def limit(repo, subset, x):
1219 1219 """``limit(set, [n])``
1220 1220 First n members of set, defaulting to 1.
1221 1221 """
1222 1222 # i18n: "limit" is a keyword
1223 1223 l = getargs(x, 1, 2, _("limit requires one or two arguments"))
1224 1224 try:
1225 1225 lim = 1
1226 1226 if len(l) == 2:
1227 1227 # i18n: "limit" is a keyword
1228 1228 lim = int(getstring(l[1], _("limit requires a number")))
1229 1229 except (TypeError, ValueError):
1230 1230 # i18n: "limit" is a keyword
1231 1231 raise error.ParseError(_("limit expects a number"))
1232 1232 ss = subset
1233 1233 os = getset(repo, fullreposet(repo), l[0])
1234 1234 result = []
1235 1235 it = iter(os)
1236 1236 for x in xrange(lim):
1237 1237 y = next(it, None)
1238 1238 if y is None:
1239 1239 break
1240 1240 elif y in ss:
1241 1241 result.append(y)
1242 1242 return baseset(result)
1243 1243
1244 1244 def last(repo, subset, x):
1245 1245 """``last(set, [n])``
1246 1246 Last n members of set, defaulting to 1.
1247 1247 """
1248 1248 # i18n: "last" is a keyword
1249 1249 l = getargs(x, 1, 2, _("last requires one or two arguments"))
1250 1250 try:
1251 1251 lim = 1
1252 1252 if len(l) == 2:
1253 1253 # i18n: "last" is a keyword
1254 1254 lim = int(getstring(l[1], _("last requires a number")))
1255 1255 except (TypeError, ValueError):
1256 1256 # i18n: "last" is a keyword
1257 1257 raise error.ParseError(_("last expects a number"))
1258 1258 ss = subset
1259 1259 os = getset(repo, fullreposet(repo), l[0])
1260 1260 os.reverse()
1261 1261 result = []
1262 1262 it = iter(os)
1263 1263 for x in xrange(lim):
1264 1264 y = next(it, None)
1265 1265 if y is None:
1266 1266 break
1267 1267 elif y in ss:
1268 1268 result.append(y)
1269 1269 return baseset(result)
1270 1270
1271 1271 def maxrev(repo, subset, x):
1272 1272 """``max(set)``
1273 1273 Changeset with highest revision number in set.
1274 1274 """
1275 1275 os = getset(repo, fullreposet(repo), x)
1276 1276 if os:
1277 1277 m = os.max()
1278 1278 if m in subset:
1279 1279 return baseset([m])
1280 1280 return baseset()
1281 1281
1282 1282 def merge(repo, subset, x):
1283 1283 """``merge()``
1284 1284 Changeset is a merge changeset.
1285 1285 """
1286 1286 # i18n: "merge" is a keyword
1287 1287 getargs(x, 0, 0, _("merge takes no arguments"))
1288 1288 cl = repo.changelog
1289 1289 return subset.filter(lambda r: cl.parentrevs(r)[1] != -1)
1290 1290
1291 1291 def branchpoint(repo, subset, x):
1292 1292 """``branchpoint()``
1293 1293 Changesets with more than one child.
1294 1294 """
1295 1295 # i18n: "branchpoint" is a keyword
1296 1296 getargs(x, 0, 0, _("branchpoint takes no arguments"))
1297 1297 cl = repo.changelog
1298 1298 if not subset:
1299 1299 return baseset()
1300 1300 # XXX this should be 'parentset.min()' assuming 'parentset' is a smartset
1301 1301 # (and if it is not, it should.)
1302 1302 baserev = min(subset)
1303 1303 parentscount = [0]*(len(repo) - baserev)
1304 1304 for r in cl.revs(start=baserev + 1):
1305 1305 for p in cl.parentrevs(r):
1306 1306 if p >= baserev:
1307 1307 parentscount[p - baserev] += 1
1308 1308 return subset.filter(lambda r: parentscount[r - baserev] > 1)
1309 1309
1310 1310 def minrev(repo, subset, x):
1311 1311 """``min(set)``
1312 1312 Changeset with lowest revision number in set.
1313 1313 """
1314 1314 os = getset(repo, fullreposet(repo), x)
1315 1315 if os:
1316 1316 m = os.min()
1317 1317 if m in subset:
1318 1318 return baseset([m])
1319 1319 return baseset()
1320 1320
1321 1321 def modifies(repo, subset, x):
1322 1322 """``modifies(pattern)``
1323 1323 Changesets modifying files matched by pattern.
1324 1324
1325 1325 The pattern without explicit kind like ``glob:`` is expected to be
1326 1326 relative to the current directory and match against a file or a
1327 1327 directory.
1328 1328 """
1329 1329 # i18n: "modifies" is a keyword
1330 1330 pat = getstring(x, _("modifies requires a pattern"))
1331 1331 return checkstatus(repo, subset, pat, 0)
1332 1332
1333 1333 def named(repo, subset, x):
1334 1334 """``named(namespace)``
1335 1335 The changesets in a given namespace.
1336 1336
1337 1337 If `namespace` starts with `re:`, the remainder of the string is treated as
1338 1338 a regular expression. To match a namespace that actually starts with `re:`,
1339 1339 use the prefix `literal:`.
1340 1340 """
1341 1341 # i18n: "named" is a keyword
1342 1342 args = getargs(x, 1, 1, _('named requires a namespace argument'))
1343 1343
1344 1344 ns = getstring(args[0],
1345 1345 # i18n: "named" is a keyword
1346 1346 _('the argument to named must be a string'))
1347 1347 kind, pattern, matcher = _stringmatcher(ns)
1348 1348 namespaces = set()
1349 1349 if kind == 'literal':
1350 1350 if pattern not in repo.names:
1351 1351 raise error.RepoLookupError(_("namespace '%s' does not exist")
1352 1352 % ns)
1353 1353 namespaces.add(repo.names[pattern])
1354 1354 else:
1355 1355 for name, ns in repo.names.iteritems():
1356 1356 if matcher(name):
1357 1357 namespaces.add(ns)
1358 1358 if not namespaces:
1359 1359 raise error.RepoLookupError(_("no namespace exists"
1360 1360 " that match '%s'") % pattern)
1361 1361
1362 1362 names = set()
1363 1363 for ns in namespaces:
1364 1364 for name in ns.listnames(repo):
1365 1365 if name not in ns.deprecated:
1366 1366 names.update(repo[n].rev() for n in ns.nodes(repo, name))
1367 1367
1368 1368 names -= set([node.nullrev])
1369 1369 return subset & names
1370 1370
1371 1371 def node_(repo, subset, x):
1372 1372 """``id(string)``
1373 1373 Revision non-ambiguously specified by the given hex string prefix.
1374 1374 """
1375 1375 # i18n: "id" is a keyword
1376 1376 l = getargs(x, 1, 1, _("id requires one argument"))
1377 1377 # i18n: "id" is a keyword
1378 1378 n = getstring(l[0], _("id requires a string"))
1379 1379 if len(n) == 40:
1380 1380 try:
1381 1381 rn = repo.changelog.rev(node.bin(n))
1382 1382 except (LookupError, TypeError):
1383 1383 rn = None
1384 1384 else:
1385 1385 rn = None
1386 1386 pm = repo.changelog._partialmatch(n)
1387 1387 if pm is not None:
1388 1388 rn = repo.changelog.rev(pm)
1389 1389
1390 1390 if rn is None:
1391 1391 return baseset()
1392 1392 result = baseset([rn])
1393 1393 return result & subset
1394 1394
1395 1395 def obsolete(repo, subset, x):
1396 1396 """``obsolete()``
1397 1397 Mutable changeset with a newer version."""
1398 1398 # i18n: "obsolete" is a keyword
1399 1399 getargs(x, 0, 0, _("obsolete takes no arguments"))
1400 1400 obsoletes = obsmod.getrevs(repo, 'obsolete')
1401 1401 return subset & obsoletes
1402 1402
1403 1403 def only(repo, subset, x):
1404 1404 """``only(set, [set])``
1405 1405 Changesets that are ancestors of the first set that are not ancestors
1406 1406 of any other head in the repo. If a second set is specified, the result
1407 1407 is ancestors of the first set that are not ancestors of the second set
1408 1408 (i.e. ::<set1> - ::<set2>).
1409 1409 """
1410 1410 cl = repo.changelog
1411 1411 # i18n: "only" is a keyword
1412 1412 args = getargs(x, 1, 2, _('only takes one or two arguments'))
1413 1413 include = getset(repo, fullreposet(repo), args[0])
1414 1414 if len(args) == 1:
1415 1415 if not include:
1416 1416 return baseset()
1417 1417
1418 1418 descendants = set(_revdescendants(repo, include, False))
1419 1419 exclude = [rev for rev in cl.headrevs()
1420 1420 if not rev in descendants and not rev in include]
1421 1421 else:
1422 1422 exclude = getset(repo, fullreposet(repo), args[1])
1423 1423
1424 1424 results = set(cl.findmissingrevs(common=exclude, heads=include))
1425 1425 # XXX we should turn this into a baseset instead of a set, smartset may do
1426 1426 # some optimisations from the fact this is a baseset.
1427 1427 return subset & results
1428 1428
1429 1429 def origin(repo, subset, x):
1430 1430 """``origin([set])``
1431 1431 Changesets that were specified as a source for the grafts, transplants or
1432 1432 rebases that created the given revisions. Omitting the optional set is the
1433 1433 same as passing all(). If a changeset created by these operations is itself
1434 1434 specified as a source for one of these operations, only the source changeset
1435 1435 for the first operation is selected.
1436 1436 """
1437 1437 if x is not None:
1438 1438 dests = getset(repo, fullreposet(repo), x)
1439 1439 else:
1440 1440 dests = fullreposet(repo)
1441 1441
1442 1442 def _firstsrc(rev):
1443 1443 src = _getrevsource(repo, rev)
1444 1444 if src is None:
1445 1445 return None
1446 1446
1447 1447 while True:
1448 1448 prev = _getrevsource(repo, src)
1449 1449
1450 1450 if prev is None:
1451 1451 return src
1452 1452 src = prev
1453 1453
1454 1454 o = set([_firstsrc(r) for r in dests])
1455 1455 o -= set([None])
1456 1456 # XXX we should turn this into a baseset instead of a set, smartset may do
1457 1457 # some optimisations from the fact this is a baseset.
1458 1458 return subset & o
1459 1459
1460 1460 def outgoing(repo, subset, x):
1461 1461 """``outgoing([path])``
1462 1462 Changesets not found in the specified destination repository, or the
1463 1463 default push location.
1464 1464 """
1465 1465 # Avoid cycles.
1466 1466 from . import (
1467 1467 discovery,
1468 1468 hg,
1469 1469 )
1470 1470 # i18n: "outgoing" is a keyword
1471 1471 l = getargs(x, 0, 1, _("outgoing takes one or no arguments"))
1472 1472 # i18n: "outgoing" is a keyword
1473 1473 dest = l and getstring(l[0], _("outgoing requires a repository path")) or ''
1474 1474 dest = repo.ui.expandpath(dest or 'default-push', dest or 'default')
1475 1475 dest, branches = hg.parseurl(dest)
1476 1476 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1477 1477 if revs:
1478 1478 revs = [repo.lookup(rev) for rev in revs]
1479 1479 other = hg.peer(repo, {}, dest)
1480 1480 repo.ui.pushbuffer()
1481 1481 outgoing = discovery.findcommonoutgoing(repo, other, onlyheads=revs)
1482 1482 repo.ui.popbuffer()
1483 1483 cl = repo.changelog
1484 1484 o = set([cl.rev(r) for r in outgoing.missing])
1485 1485 return subset & o
1486 1486
1487 1487 def p1(repo, subset, x):
1488 1488 """``p1([set])``
1489 1489 First parent of changesets in set, or the working directory.
1490 1490 """
1491 1491 if x is None:
1492 1492 p = repo[x].p1().rev()
1493 1493 if p >= 0:
1494 1494 return subset & baseset([p])
1495 1495 return baseset()
1496 1496
1497 1497 ps = set()
1498 1498 cl = repo.changelog
1499 1499 for r in getset(repo, fullreposet(repo), x):
1500 1500 ps.add(cl.parentrevs(r)[0])
1501 1501 ps -= set([node.nullrev])
1502 1502 # XXX we should turn this into a baseset instead of a set, smartset may do
1503 1503 # some optimisations from the fact this is a baseset.
1504 1504 return subset & ps
1505 1505
1506 1506 def p2(repo, subset, x):
1507 1507 """``p2([set])``
1508 1508 Second parent of changesets in set, or the working directory.
1509 1509 """
1510 1510 if x is None:
1511 1511 ps = repo[x].parents()
1512 1512 try:
1513 1513 p = ps[1].rev()
1514 1514 if p >= 0:
1515 1515 return subset & baseset([p])
1516 1516 return baseset()
1517 1517 except IndexError:
1518 1518 return baseset()
1519 1519
1520 1520 ps = set()
1521 1521 cl = repo.changelog
1522 1522 for r in getset(repo, fullreposet(repo), x):
1523 1523 ps.add(cl.parentrevs(r)[1])
1524 1524 ps -= set([node.nullrev])
1525 1525 # XXX we should turn this into a baseset instead of a set, smartset may do
1526 1526 # some optimisations from the fact this is a baseset.
1527 1527 return subset & ps
1528 1528
1529 1529 def parents(repo, subset, x):
1530 1530 """``parents([set])``
1531 1531 The set of all parents for all changesets in set, or the working directory.
1532 1532 """
1533 1533 if x is None:
1534 1534 ps = set(p.rev() for p in repo[x].parents())
1535 1535 else:
1536 1536 ps = set()
1537 1537 cl = repo.changelog
1538 1538 up = ps.update
1539 1539 parentrevs = cl.parentrevs
1540 1540 for r in getset(repo, fullreposet(repo), x):
1541 1541 if r == node.wdirrev:
1542 1542 up(p.rev() for p in repo[r].parents())
1543 1543 else:
1544 1544 up(parentrevs(r))
1545 1545 ps -= set([node.nullrev])
1546 1546 return subset & ps
1547 1547
1548 1548 def _phase(repo, subset, target):
1549 1549 """helper to select all rev in phase <target>"""
1550 1550 repo._phasecache.loadphaserevs(repo) # ensure phase's sets are loaded
1551 1551 if repo._phasecache._phasesets:
1552 1552 s = repo._phasecache._phasesets[target] - repo.changelog.filteredrevs
1553 1553 s = baseset(s)
1554 1554 s.sort() # set are non ordered, so we enforce ascending
1555 1555 return subset & s
1556 1556 else:
1557 1557 phase = repo._phasecache.phase
1558 1558 condition = lambda r: phase(repo, r) == target
1559 1559 return subset.filter(condition, cache=False)
1560 1560
1561 1561 def draft(repo, subset, x):
1562 1562 """``draft()``
1563 1563 Changeset in draft phase."""
1564 1564 # i18n: "draft" is a keyword
1565 1565 getargs(x, 0, 0, _("draft takes no arguments"))
1566 1566 target = phases.draft
1567 1567 return _phase(repo, subset, target)
1568 1568
1569 1569 def secret(repo, subset, x):
1570 1570 """``secret()``
1571 1571 Changeset in secret phase."""
1572 1572 # i18n: "secret" is a keyword
1573 1573 getargs(x, 0, 0, _("secret takes no arguments"))
1574 1574 target = phases.secret
1575 1575 return _phase(repo, subset, target)
1576 1576
1577 1577 def parentspec(repo, subset, x, n):
1578 1578 """``set^0``
1579 1579 The set.
1580 1580 ``set^1`` (or ``set^``), ``set^2``
1581 1581 First or second parent, respectively, of all changesets in set.
1582 1582 """
1583 1583 try:
1584 1584 n = int(n[1])
1585 1585 if n not in (0, 1, 2):
1586 1586 raise ValueError
1587 1587 except (TypeError, ValueError):
1588 1588 raise error.ParseError(_("^ expects a number 0, 1, or 2"))
1589 1589 ps = set()
1590 1590 cl = repo.changelog
1591 1591 for r in getset(repo, fullreposet(repo), x):
1592 1592 if n == 0:
1593 1593 ps.add(r)
1594 1594 elif n == 1:
1595 1595 ps.add(cl.parentrevs(r)[0])
1596 1596 elif n == 2:
1597 1597 parents = cl.parentrevs(r)
1598 1598 if len(parents) > 1:
1599 1599 ps.add(parents[1])
1600 1600 return subset & ps
1601 1601
1602 1602 def present(repo, subset, x):
1603 1603 """``present(set)``
1604 1604 An empty set, if any revision in set isn't found; otherwise,
1605 1605 all revisions in set.
1606 1606
1607 1607 If any of specified revisions is not present in the local repository,
1608 1608 the query is normally aborted. But this predicate allows the query
1609 1609 to continue even in such cases.
1610 1610 """
1611 1611 try:
1612 1612 return getset(repo, subset, x)
1613 1613 except error.RepoLookupError:
1614 1614 return baseset()
1615 1615
1616 1616 # for internal use
1617 1617 def _notpublic(repo, subset, x):
1618 1618 getargs(x, 0, 0, "_notpublic takes no arguments")
1619 1619 repo._phasecache.loadphaserevs(repo) # ensure phase's sets are loaded
1620 1620 if repo._phasecache._phasesets:
1621 1621 s = set()
1622 1622 for u in repo._phasecache._phasesets[1:]:
1623 1623 s.update(u)
1624 1624 s = baseset(s - repo.changelog.filteredrevs)
1625 1625 s.sort()
1626 1626 return subset & s
1627 1627 else:
1628 1628 phase = repo._phasecache.phase
1629 1629 target = phases.public
1630 1630 condition = lambda r: phase(repo, r) != target
1631 1631 return subset.filter(condition, cache=False)
1632 1632
1633 1633 def public(repo, subset, x):
1634 1634 """``public()``
1635 1635 Changeset in public phase."""
1636 1636 # i18n: "public" is a keyword
1637 1637 getargs(x, 0, 0, _("public takes no arguments"))
1638 1638 phase = repo._phasecache.phase
1639 1639 target = phases.public
1640 1640 condition = lambda r: phase(repo, r) == target
1641 1641 return subset.filter(condition, cache=False)
1642 1642
1643 1643 def remote(repo, subset, x):
1644 1644 """``remote([id [,path]])``
1645 1645 Local revision that corresponds to the given identifier in a
1646 1646 remote repository, if present. Here, the '.' identifier is a
1647 1647 synonym for the current local branch.
1648 1648 """
1649 1649
1650 1650 from . import hg # avoid start-up nasties
1651 1651 # i18n: "remote" is a keyword
1652 1652 l = getargs(x, 0, 2, _("remote takes one, two or no arguments"))
1653 1653
1654 1654 q = '.'
1655 1655 if len(l) > 0:
1656 1656 # i18n: "remote" is a keyword
1657 1657 q = getstring(l[0], _("remote requires a string id"))
1658 1658 if q == '.':
1659 1659 q = repo['.'].branch()
1660 1660
1661 1661 dest = ''
1662 1662 if len(l) > 1:
1663 1663 # i18n: "remote" is a keyword
1664 1664 dest = getstring(l[1], _("remote requires a repository path"))
1665 1665 dest = repo.ui.expandpath(dest or 'default')
1666 1666 dest, branches = hg.parseurl(dest)
1667 1667 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1668 1668 if revs:
1669 1669 revs = [repo.lookup(rev) for rev in revs]
1670 1670 other = hg.peer(repo, {}, dest)
1671 1671 n = other.lookup(q)
1672 1672 if n in repo:
1673 1673 r = repo[n].rev()
1674 1674 if r in subset:
1675 1675 return baseset([r])
1676 1676 return baseset()
1677 1677
1678 1678 def removes(repo, subset, x):
1679 1679 """``removes(pattern)``
1680 1680 Changesets which remove files matching pattern.
1681 1681
1682 1682 The pattern without explicit kind like ``glob:`` is expected to be
1683 1683 relative to the current directory and match against a file or a
1684 1684 directory.
1685 1685 """
1686 1686 # i18n: "removes" is a keyword
1687 1687 pat = getstring(x, _("removes requires a pattern"))
1688 1688 return checkstatus(repo, subset, pat, 2)
1689 1689
1690 1690 def rev(repo, subset, x):
1691 1691 """``rev(number)``
1692 1692 Revision with the given numeric identifier.
1693 1693 """
1694 1694 # i18n: "rev" is a keyword
1695 1695 l = getargs(x, 1, 1, _("rev requires one argument"))
1696 1696 try:
1697 1697 # i18n: "rev" is a keyword
1698 1698 l = int(getstring(l[0], _("rev requires a number")))
1699 1699 except (TypeError, ValueError):
1700 1700 # i18n: "rev" is a keyword
1701 1701 raise error.ParseError(_("rev expects a number"))
1702 1702 if l not in repo.changelog and l != node.nullrev:
1703 1703 return baseset()
1704 1704 return subset & baseset([l])
1705 1705
1706 1706 def matching(repo, subset, x):
1707 1707 """``matching(revision [, field])``
1708 1708 Changesets in which a given set of fields match the set of fields in the
1709 1709 selected revision or set.
1710 1710
1711 1711 To match more than one field pass the list of fields to match separated
1712 1712 by spaces (e.g. ``author description``).
1713 1713
1714 1714 Valid fields are most regular revision fields and some special fields.
1715 1715
1716 1716 Regular revision fields are ``description``, ``author``, ``branch``,
1717 1717 ``date``, ``files``, ``phase``, ``parents``, ``substate``, ``user``
1718 1718 and ``diff``.
1719 1719 Note that ``author`` and ``user`` are synonyms. ``diff`` refers to the
1720 1720 contents of the revision. Two revisions matching their ``diff`` will
1721 1721 also match their ``files``.
1722 1722
1723 1723 Special fields are ``summary`` and ``metadata``:
1724 1724 ``summary`` matches the first line of the description.
1725 1725 ``metadata`` is equivalent to matching ``description user date``
1726 1726 (i.e. it matches the main metadata fields).
1727 1727
1728 1728 ``metadata`` is the default field which is used when no fields are
1729 1729 specified. You can match more than one field at a time.
1730 1730 """
1731 1731 # i18n: "matching" is a keyword
1732 1732 l = getargs(x, 1, 2, _("matching takes 1 or 2 arguments"))
1733 1733
1734 1734 revs = getset(repo, fullreposet(repo), l[0])
1735 1735
1736 1736 fieldlist = ['metadata']
1737 1737 if len(l) > 1:
1738 1738 fieldlist = getstring(l[1],
1739 1739 # i18n: "matching" is a keyword
1740 1740 _("matching requires a string "
1741 1741 "as its second argument")).split()
1742 1742
1743 1743 # Make sure that there are no repeated fields,
1744 1744 # expand the 'special' 'metadata' field type
1745 1745 # and check the 'files' whenever we check the 'diff'
1746 1746 fields = []
1747 1747 for field in fieldlist:
1748 1748 if field == 'metadata':
1749 1749 fields += ['user', 'description', 'date']
1750 1750 elif field == 'diff':
1751 1751 # a revision matching the diff must also match the files
1752 1752 # since matching the diff is very costly, make sure to
1753 1753 # also match the files first
1754 1754 fields += ['files', 'diff']
1755 1755 else:
1756 1756 if field == 'author':
1757 1757 field = 'user'
1758 1758 fields.append(field)
1759 1759 fields = set(fields)
1760 1760 if 'summary' in fields and 'description' in fields:
1761 1761 # If a revision matches its description it also matches its summary
1762 1762 fields.discard('summary')
1763 1763
1764 1764 # We may want to match more than one field
1765 1765 # Not all fields take the same amount of time to be matched
1766 1766 # Sort the selected fields in order of increasing matching cost
1767 1767 fieldorder = ['phase', 'parents', 'user', 'date', 'branch', 'summary',
1768 1768 'files', 'description', 'substate', 'diff']
1769 1769 def fieldkeyfunc(f):
1770 1770 try:
1771 1771 return fieldorder.index(f)
1772 1772 except ValueError:
1773 1773 # assume an unknown field is very costly
1774 1774 return len(fieldorder)
1775 1775 fields = list(fields)
1776 1776 fields.sort(key=fieldkeyfunc)
1777 1777
1778 1778 # Each field will be matched with its own "getfield" function
1779 1779 # which will be added to the getfieldfuncs array of functions
1780 1780 getfieldfuncs = []
1781 1781 _funcs = {
1782 1782 'user': lambda r: repo[r].user(),
1783 1783 'branch': lambda r: repo[r].branch(),
1784 1784 'date': lambda r: repo[r].date(),
1785 1785 'description': lambda r: repo[r].description(),
1786 1786 'files': lambda r: repo[r].files(),
1787 1787 'parents': lambda r: repo[r].parents(),
1788 1788 'phase': lambda r: repo[r].phase(),
1789 1789 'substate': lambda r: repo[r].substate,
1790 1790 'summary': lambda r: repo[r].description().splitlines()[0],
1791 1791 'diff': lambda r: list(repo[r].diff(git=True),)
1792 1792 }
1793 1793 for info in fields:
1794 1794 getfield = _funcs.get(info, None)
1795 1795 if getfield is None:
1796 1796 raise error.ParseError(
1797 1797 # i18n: "matching" is a keyword
1798 1798 _("unexpected field name passed to matching: %s") % info)
1799 1799 getfieldfuncs.append(getfield)
1800 1800 # convert the getfield array of functions into a "getinfo" function
1801 1801 # which returns an array of field values (or a single value if there
1802 1802 # is only one field to match)
1803 1803 getinfo = lambda r: [f(r) for f in getfieldfuncs]
1804 1804
1805 1805 def matches(x):
1806 1806 for rev in revs:
1807 1807 target = getinfo(rev)
1808 1808 match = True
1809 1809 for n, f in enumerate(getfieldfuncs):
1810 1810 if target[n] != f(x):
1811 1811 match = False
1812 1812 if match:
1813 1813 return True
1814 1814 return False
1815 1815
1816 1816 return subset.filter(matches)
1817 1817
1818 1818 def reverse(repo, subset, x):
1819 1819 """``reverse(set)``
1820 1820 Reverse order of set.
1821 1821 """
1822 1822 l = getset(repo, subset, x)
1823 1823 l.reverse()
1824 1824 return l
1825 1825
1826 1826 def roots(repo, subset, x):
1827 1827 """``roots(set)``
1828 1828 Changesets in set with no parent changeset in set.
1829 1829 """
1830 1830 s = getset(repo, fullreposet(repo), x)
1831 1831 parents = repo.changelog.parentrevs
1832 1832 def filter(r):
1833 1833 for p in parents(r):
1834 1834 if 0 <= p and p in s:
1835 1835 return False
1836 1836 return True
1837 1837 return subset & s.filter(filter)
1838 1838
1839 1839 def sort(repo, subset, x):
1840 1840 """``sort(set[, [-]key...])``
1841 1841 Sort set by keys. The default sort order is ascending, specify a key
1842 1842 as ``-key`` to sort in descending order.
1843 1843
1844 1844 The keys can be:
1845 1845
1846 1846 - ``rev`` for the revision number,
1847 1847 - ``branch`` for the branch name,
1848 1848 - ``desc`` for the commit message (description),
1849 1849 - ``user`` for user name (``author`` can be used as an alias),
1850 1850 - ``date`` for the commit date
1851 1851 """
1852 1852 # i18n: "sort" is a keyword
1853 1853 l = getargs(x, 1, 2, _("sort requires one or two arguments"))
1854 1854 keys = "rev"
1855 1855 if len(l) == 2:
1856 1856 # i18n: "sort" is a keyword
1857 1857 keys = getstring(l[1], _("sort spec must be a string"))
1858 1858
1859 1859 s = l[0]
1860 1860 keys = keys.split()
1861 1861 l = []
1862 1862 def invert(s):
1863 1863 return "".join(chr(255 - ord(c)) for c in s)
1864 1864 revs = getset(repo, subset, s)
1865 1865 if keys == ["rev"]:
1866 1866 revs.sort()
1867 1867 return revs
1868 1868 elif keys == ["-rev"]:
1869 1869 revs.sort(reverse=True)
1870 1870 return revs
1871 1871 for r in revs:
1872 1872 c = repo[r]
1873 1873 e = []
1874 1874 for k in keys:
1875 1875 if k == 'rev':
1876 1876 e.append(r)
1877 1877 elif k == '-rev':
1878 1878 e.append(-r)
1879 1879 elif k == 'branch':
1880 1880 e.append(c.branch())
1881 1881 elif k == '-branch':
1882 1882 e.append(invert(c.branch()))
1883 1883 elif k == 'desc':
1884 1884 e.append(c.description())
1885 1885 elif k == '-desc':
1886 1886 e.append(invert(c.description()))
1887 1887 elif k in 'user author':
1888 1888 e.append(c.user())
1889 1889 elif k in '-user -author':
1890 1890 e.append(invert(c.user()))
1891 1891 elif k == 'date':
1892 1892 e.append(c.date()[0])
1893 1893 elif k == '-date':
1894 1894 e.append(-c.date()[0])
1895 1895 else:
1896 1896 raise error.ParseError(_("unknown sort key %r") % k)
1897 1897 e.append(r)
1898 1898 l.append(e)
1899 1899 l.sort()
1900 1900 return baseset([e[-1] for e in l])
1901 1901
1902 1902 def subrepo(repo, subset, x):
1903 1903 """``subrepo([pattern])``
1904 1904 Changesets that add, modify or remove the given subrepo. If no subrepo
1905 1905 pattern is named, any subrepo changes are returned.
1906 1906 """
1907 1907 # i18n: "subrepo" is a keyword
1908 1908 args = getargs(x, 0, 1, _('subrepo takes at most one argument'))
1909 1909 if len(args) != 0:
1910 1910 pat = getstring(args[0], _("subrepo requires a pattern"))
1911 1911
1912 1912 m = matchmod.exact(repo.root, repo.root, ['.hgsubstate'])
1913 1913
1914 1914 def submatches(names):
1915 1915 k, p, m = _stringmatcher(pat)
1916 1916 for name in names:
1917 1917 if m(name):
1918 1918 yield name
1919 1919
1920 1920 def matches(x):
1921 1921 c = repo[x]
1922 1922 s = repo.status(c.p1().node(), c.node(), match=m)
1923 1923
1924 1924 if len(args) == 0:
1925 1925 return s.added or s.modified or s.removed
1926 1926
1927 1927 if s.added:
1928 1928 return any(submatches(c.substate.keys()))
1929 1929
1930 1930 if s.modified:
1931 1931 subs = set(c.p1().substate.keys())
1932 1932 subs.update(c.substate.keys())
1933 1933
1934 1934 for path in submatches(subs):
1935 1935 if c.p1().substate.get(path) != c.substate.get(path):
1936 1936 return True
1937 1937
1938 1938 if s.removed:
1939 1939 return any(submatches(c.p1().substate.keys()))
1940 1940
1941 1941 return False
1942 1942
1943 1943 return subset.filter(matches)
1944 1944
1945 1945 def _stringmatcher(pattern):
1946 1946 """
1947 1947 accepts a string, possibly starting with 're:' or 'literal:' prefix.
1948 1948 returns the matcher name, pattern, and matcher function.
1949 1949 missing or unknown prefixes are treated as literal matches.
1950 1950
1951 1951 helper for tests:
1952 1952 >>> def test(pattern, *tests):
1953 1953 ... kind, pattern, matcher = _stringmatcher(pattern)
1954 1954 ... return (kind, pattern, [bool(matcher(t)) for t in tests])
1955 1955
1956 1956 exact matching (no prefix):
1957 1957 >>> test('abcdefg', 'abc', 'def', 'abcdefg')
1958 1958 ('literal', 'abcdefg', [False, False, True])
1959 1959
1960 1960 regex matching ('re:' prefix)
1961 1961 >>> test('re:a.+b', 'nomatch', 'fooadef', 'fooadefbar')
1962 1962 ('re', 'a.+b', [False, False, True])
1963 1963
1964 1964 force exact matches ('literal:' prefix)
1965 1965 >>> test('literal:re:foobar', 'foobar', 're:foobar')
1966 1966 ('literal', 're:foobar', [False, True])
1967 1967
1968 1968 unknown prefixes are ignored and treated as literals
1969 1969 >>> test('foo:bar', 'foo', 'bar', 'foo:bar')
1970 1970 ('literal', 'foo:bar', [False, False, True])
1971 1971 """
1972 1972 if pattern.startswith('re:'):
1973 1973 pattern = pattern[3:]
1974 1974 try:
1975 1975 regex = re.compile(pattern)
1976 1976 except re.error as e:
1977 1977 raise error.ParseError(_('invalid regular expression: %s')
1978 1978 % e)
1979 1979 return 're', pattern, regex.search
1980 1980 elif pattern.startswith('literal:'):
1981 1981 pattern = pattern[8:]
1982 1982 return 'literal', pattern, pattern.__eq__
1983 1983
1984 1984 def _substringmatcher(pattern):
1985 1985 kind, pattern, matcher = _stringmatcher(pattern)
1986 1986 if kind == 'literal':
1987 1987 matcher = lambda s: pattern in s
1988 1988 return kind, pattern, matcher
1989 1989
1990 1990 def tag(repo, subset, x):
1991 1991 """``tag([name])``
1992 1992 The specified tag by name, or all tagged revisions if no name is given.
1993 1993
1994 1994 If `name` starts with `re:`, the remainder of the name is treated as
1995 1995 a regular expression. To match a tag that actually starts with `re:`,
1996 1996 use the prefix `literal:`.
1997 1997 """
1998 1998 # i18n: "tag" is a keyword
1999 1999 args = getargs(x, 0, 1, _("tag takes one or no arguments"))
2000 2000 cl = repo.changelog
2001 2001 if args:
2002 2002 pattern = getstring(args[0],
2003 2003 # i18n: "tag" is a keyword
2004 2004 _('the argument to tag must be a string'))
2005 2005 kind, pattern, matcher = _stringmatcher(pattern)
2006 2006 if kind == 'literal':
2007 2007 # avoid resolving all tags
2008 2008 tn = repo._tagscache.tags.get(pattern, None)
2009 2009 if tn is None:
2010 2010 raise error.RepoLookupError(_("tag '%s' does not exist")
2011 2011 % pattern)
2012 2012 s = set([repo[tn].rev()])
2013 2013 else:
2014 2014 s = set([cl.rev(n) for t, n in repo.tagslist() if matcher(t)])
2015 2015 else:
2016 2016 s = set([cl.rev(n) for t, n in repo.tagslist() if t != 'tip'])
2017 2017 return subset & s
2018 2018
2019 2019 def tagged(repo, subset, x):
2020 2020 return tag(repo, subset, x)
2021 2021
2022 2022 def unstable(repo, subset, x):
2023 2023 """``unstable()``
2024 2024 Non-obsolete changesets with obsolete ancestors.
2025 2025 """
2026 2026 # i18n: "unstable" is a keyword
2027 2027 getargs(x, 0, 0, _("unstable takes no arguments"))
2028 2028 unstables = obsmod.getrevs(repo, 'unstable')
2029 2029 return subset & unstables
2030 2030
2031 2031
2032 2032 def user(repo, subset, x):
2033 2033 """``user(string)``
2034 2034 User name contains string. The match is case-insensitive.
2035 2035
2036 2036 If `string` starts with `re:`, the remainder of the string is treated as
2037 2037 a regular expression. To match a user that actually contains `re:`, use
2038 2038 the prefix `literal:`.
2039 2039 """
2040 2040 return author(repo, subset, x)
2041 2041
2042 2042 # experimental
2043 2043 def wdir(repo, subset, x):
2044 2044 # i18n: "wdir" is a keyword
2045 2045 getargs(x, 0, 0, _("wdir takes no arguments"))
2046 2046 if node.wdirrev in subset or isinstance(subset, fullreposet):
2047 2047 return baseset([node.wdirrev])
2048 2048 return baseset()
2049 2049
2050 2050 # for internal use
2051 2051 def _list(repo, subset, x):
2052 2052 s = getstring(x, "internal error")
2053 2053 if not s:
2054 2054 return baseset()
2055 2055 # remove duplicates here. it's difficult for caller to deduplicate sets
2056 2056 # because different symbols can point to the same rev.
2057 2057 cl = repo.changelog
2058 2058 ls = []
2059 2059 seen = set()
2060 2060 for t in s.split('\0'):
2061 2061 try:
2062 2062 # fast path for integer revision
2063 2063 r = int(t)
2064 2064 if str(r) != t or r not in cl:
2065 2065 raise ValueError
2066 2066 except ValueError:
2067 2067 r = repo[t].rev()
2068 2068 if r in seen:
2069 2069 continue
2070 2070 if (r in subset
2071 2071 or r == node.nullrev and isinstance(subset, fullreposet)):
2072 2072 ls.append(r)
2073 2073 seen.add(r)
2074 2074 return baseset(ls)
2075 2075
2076 2076 # for internal use
2077 2077 def _intlist(repo, subset, x):
2078 2078 s = getstring(x, "internal error")
2079 2079 if not s:
2080 2080 return baseset()
2081 2081 ls = [int(r) for r in s.split('\0')]
2082 2082 s = subset
2083 2083 return baseset([r for r in ls if r in s])
2084 2084
2085 2085 # for internal use
2086 2086 def _hexlist(repo, subset, x):
2087 2087 s = getstring(x, "internal error")
2088 2088 if not s:
2089 2089 return baseset()
2090 2090 cl = repo.changelog
2091 2091 ls = [cl.rev(node.bin(r)) for r in s.split('\0')]
2092 2092 s = subset
2093 2093 return baseset([r for r in ls if r in s])
2094 2094
2095 2095 symbols = {
2096 2096 "adds": adds,
2097 2097 "all": getall,
2098 2098 "ancestor": ancestor,
2099 2099 "ancestors": ancestors,
2100 2100 "_firstancestors": _firstancestors,
2101 2101 "author": author,
2102 2102 "bisect": bisect,
2103 2103 "bisected": bisected,
2104 2104 "bookmark": bookmark,
2105 2105 "branch": branch,
2106 2106 "branchpoint": branchpoint,
2107 2107 "bumped": bumped,
2108 2108 "bundle": bundle,
2109 2109 "children": children,
2110 2110 "closed": closed,
2111 2111 "contains": contains,
2112 2112 "converted": converted,
2113 2113 "date": date,
2114 2114 "desc": desc,
2115 2115 "descendants": descendants,
2116 2116 "_firstdescendants": _firstdescendants,
2117 2117 "destination": destination,
2118 2118 "divergent": divergent,
2119 2119 "draft": draft,
2120 2120 "extinct": extinct,
2121 2121 "extra": extra,
2122 2122 "file": hasfile,
2123 2123 "filelog": filelog,
2124 2124 "first": first,
2125 2125 "follow": follow,
2126 2126 "_followfirst": _followfirst,
2127 2127 "grep": grep,
2128 2128 "head": head,
2129 2129 "heads": heads,
2130 2130 "hidden": hidden,
2131 2131 "id": node_,
2132 2132 "keyword": keyword,
2133 2133 "last": last,
2134 2134 "limit": limit,
2135 2135 "_matchfiles": _matchfiles,
2136 2136 "max": maxrev,
2137 2137 "merge": merge,
2138 2138 "min": minrev,
2139 2139 "modifies": modifies,
2140 2140 "named": named,
2141 2141 "obsolete": obsolete,
2142 2142 "only": only,
2143 2143 "origin": origin,
2144 2144 "outgoing": outgoing,
2145 2145 "p1": p1,
2146 2146 "p2": p2,
2147 2147 "parents": parents,
2148 2148 "present": present,
2149 2149 "public": public,
2150 2150 "_notpublic": _notpublic,
2151 2151 "remote": remote,
2152 2152 "removes": removes,
2153 2153 "rev": rev,
2154 2154 "reverse": reverse,
2155 2155 "roots": roots,
2156 2156 "sort": sort,
2157 2157 "secret": secret,
2158 2158 "subrepo": subrepo,
2159 2159 "matching": matching,
2160 2160 "tag": tag,
2161 2161 "tagged": tagged,
2162 2162 "user": user,
2163 2163 "unstable": unstable,
2164 2164 "wdir": wdir,
2165 2165 "_list": _list,
2166 2166 "_intlist": _intlist,
2167 2167 "_hexlist": _hexlist,
2168 2168 }
2169 2169
2170 2170 # symbols which can't be used for a DoS attack for any given input
2171 2171 # (e.g. those which accept regexes as plain strings shouldn't be included)
2172 2172 # functions that just return a lot of changesets (like all) don't count here
2173 2173 safesymbols = set([
2174 2174 "adds",
2175 2175 "all",
2176 2176 "ancestor",
2177 2177 "ancestors",
2178 2178 "_firstancestors",
2179 2179 "author",
2180 2180 "bisect",
2181 2181 "bisected",
2182 2182 "bookmark",
2183 2183 "branch",
2184 2184 "branchpoint",
2185 2185 "bumped",
2186 2186 "bundle",
2187 2187 "children",
2188 2188 "closed",
2189 2189 "converted",
2190 2190 "date",
2191 2191 "desc",
2192 2192 "descendants",
2193 2193 "_firstdescendants",
2194 2194 "destination",
2195 2195 "divergent",
2196 2196 "draft",
2197 2197 "extinct",
2198 2198 "extra",
2199 2199 "file",
2200 2200 "filelog",
2201 2201 "first",
2202 2202 "follow",
2203 2203 "_followfirst",
2204 2204 "head",
2205 2205 "heads",
2206 2206 "hidden",
2207 2207 "id",
2208 2208 "keyword",
2209 2209 "last",
2210 2210 "limit",
2211 2211 "_matchfiles",
2212 2212 "max",
2213 2213 "merge",
2214 2214 "min",
2215 2215 "modifies",
2216 2216 "obsolete",
2217 2217 "only",
2218 2218 "origin",
2219 2219 "outgoing",
2220 2220 "p1",
2221 2221 "p2",
2222 2222 "parents",
2223 2223 "present",
2224 2224 "public",
2225 2225 "_notpublic",
2226 2226 "remote",
2227 2227 "removes",
2228 2228 "rev",
2229 2229 "reverse",
2230 2230 "roots",
2231 2231 "sort",
2232 2232 "secret",
2233 2233 "matching",
2234 2234 "tag",
2235 2235 "tagged",
2236 2236 "user",
2237 2237 "unstable",
2238 2238 "wdir",
2239 2239 "_list",
2240 2240 "_intlist",
2241 2241 "_hexlist",
2242 2242 ])
2243 2243
2244 2244 methods = {
2245 2245 "range": rangeset,
2246 2246 "dagrange": dagrange,
2247 2247 "string": stringset,
2248 2248 "symbol": stringset,
2249 2249 "and": andset,
2250 2250 "or": orset,
2251 2251 "not": notset,
2252 2252 "list": listset,
2253 2253 "keyvalue": keyvaluepair,
2254 2254 "func": func,
2255 2255 "ancestor": ancestorspec,
2256 2256 "parent": parentspec,
2257 2257 "parentpost": p1,
2258 2258 }
2259 2259
2260 2260 def optimize(x, small):
2261 2261 if x is None:
2262 2262 return 0, x
2263 2263
2264 2264 smallbonus = 1
2265 2265 if small:
2266 2266 smallbonus = .5
2267 2267
2268 2268 op = x[0]
2269 2269 if op == 'minus':
2270 2270 return optimize(('and', x[1], ('not', x[2])), small)
2271 2271 elif op == 'only':
2272 2272 return optimize(('func', ('symbol', 'only'),
2273 2273 ('list', x[1], x[2])), small)
2274 2274 elif op == 'onlypost':
2275 2275 return optimize(('func', ('symbol', 'only'), x[1]), small)
2276 2276 elif op == 'dagrangepre':
2277 2277 return optimize(('func', ('symbol', 'ancestors'), x[1]), small)
2278 2278 elif op == 'dagrangepost':
2279 2279 return optimize(('func', ('symbol', 'descendants'), x[1]), small)
2280 2280 elif op == 'rangeall':
2281 2281 return optimize(('range', ('string', '0'), ('string', 'tip')), small)
2282 2282 elif op == 'rangepre':
2283 2283 return optimize(('range', ('string', '0'), x[1]), small)
2284 2284 elif op == 'rangepost':
2285 2285 return optimize(('range', x[1], ('string', 'tip')), small)
2286 2286 elif op == 'negate':
2287 2287 return optimize(('string',
2288 2288 '-' + getstring(x[1], _("can't negate that"))), small)
2289 2289 elif op in 'string symbol negate':
2290 2290 return smallbonus, x # single revisions are small
2291 2291 elif op == 'and':
2292 2292 wa, ta = optimize(x[1], True)
2293 2293 wb, tb = optimize(x[2], True)
2294 2294
2295 2295 # (::x and not ::y)/(not ::y and ::x) have a fast path
2296 2296 def isonly(revs, bases):
2297 2297 return (
2298 2298 revs is not None
2299 2299 and revs[0] == 'func'
2300 2300 and getstring(revs[1], _('not a symbol')) == 'ancestors'
2301 2301 and bases is not None
2302 2302 and bases[0] == 'not'
2303 2303 and bases[1][0] == 'func'
2304 2304 and getstring(bases[1][1], _('not a symbol')) == 'ancestors')
2305 2305
2306 2306 w = min(wa, wb)
2307 2307 if isonly(ta, tb):
2308 2308 return w, ('func', ('symbol', 'only'), ('list', ta[2], tb[1][2]))
2309 2309 if isonly(tb, ta):
2310 2310 return w, ('func', ('symbol', 'only'), ('list', tb[2], ta[1][2]))
2311 2311
2312 2312 if wa > wb:
2313 2313 return w, (op, tb, ta)
2314 2314 return w, (op, ta, tb)
2315 2315 elif op == 'or':
2316 2316 # fast path for machine-generated expression, that is likely to have
2317 2317 # lots of trivial revisions: 'a + b + c()' to '_list(a b) + c()'
2318 2318 ws, ts, ss = [], [], []
2319 2319 def flushss():
2320 2320 if not ss:
2321 2321 return
2322 2322 if len(ss) == 1:
2323 2323 w, t = ss[0]
2324 2324 else:
2325 2325 s = '\0'.join(t[1] for w, t in ss)
2326 2326 y = ('func', ('symbol', '_list'), ('string', s))
2327 2327 w, t = optimize(y, False)
2328 2328 ws.append(w)
2329 2329 ts.append(t)
2330 2330 del ss[:]
2331 2331 for y in x[1:]:
2332 2332 w, t = optimize(y, False)
2333 2333 if t is not None and (t[0] == 'string' or t[0] == 'symbol'):
2334 2334 ss.append((w, t))
2335 2335 continue
2336 2336 flushss()
2337 2337 ws.append(w)
2338 2338 ts.append(t)
2339 2339 flushss()
2340 2340 if len(ts) == 1:
2341 2341 return ws[0], ts[0] # 'or' operation is fully optimized out
2342 2342 # we can't reorder trees by weight because it would change the order.
2343 2343 # ("sort(a + b)" == "sort(b + a)", but "a + b" != "b + a")
2344 2344 # ts = tuple(t for w, t in sorted(zip(ws, ts), key=lambda wt: wt[0]))
2345 2345 return max(ws), (op,) + tuple(ts)
2346 2346 elif op == 'not':
2347 2347 # Optimize not public() to _notpublic() because we have a fast version
2348 2348 if x[1] == ('func', ('symbol', 'public'), None):
2349 2349 newsym = ('func', ('symbol', '_notpublic'), None)
2350 2350 o = optimize(newsym, not small)
2351 2351 return o[0], o[1]
2352 2352 else:
2353 2353 o = optimize(x[1], not small)
2354 2354 return o[0], (op, o[1])
2355 2355 elif op == 'parentpost':
2356 2356 o = optimize(x[1], small)
2357 2357 return o[0], (op, o[1])
2358 2358 elif op == 'group':
2359 2359 return optimize(x[1], small)
2360 2360 elif op in 'dagrange range list parent ancestorspec':
2361 2361 if op == 'parent':
2362 2362 # x^:y means (x^) : y, not x ^ (:y)
2363 2363 post = ('parentpost', x[1])
2364 2364 if x[2][0] == 'dagrangepre':
2365 2365 return optimize(('dagrange', post, x[2][1]), small)
2366 2366 elif x[2][0] == 'rangepre':
2367 2367 return optimize(('range', post, x[2][1]), small)
2368 2368
2369 2369 wa, ta = optimize(x[1], small)
2370 2370 wb, tb = optimize(x[2], small)
2371 2371 return wa + wb, (op, ta, tb)
2372 2372 elif op == 'func':
2373 2373 f = getstring(x[1], _("not a symbol"))
2374 2374 wa, ta = optimize(x[2], small)
2375 2375 if f in ("author branch closed date desc file grep keyword "
2376 2376 "outgoing user"):
2377 2377 w = 10 # slow
2378 2378 elif f in "modifies adds removes":
2379 2379 w = 30 # slower
2380 2380 elif f == "contains":
2381 2381 w = 100 # very slow
2382 2382 elif f == "ancestor":
2383 2383 w = 1 * smallbonus
2384 2384 elif f in "reverse limit first _intlist":
2385 2385 w = 0
2386 2386 elif f in "sort":
2387 2387 w = 10 # assume most sorts look at changelog
2388 2388 else:
2389 2389 w = 1
2390 2390 return w + wa, (op, x[1], ta)
2391 2391 return 1, x
2392 2392
2393 2393 _aliasarg = ('func', ('symbol', '_aliasarg'))
2394 2394 def _getaliasarg(tree):
2395 2395 """If tree matches ('func', ('symbol', '_aliasarg'), ('string', X))
2396 2396 return X, None otherwise.
2397 2397 """
2398 2398 if (len(tree) == 3 and tree[:2] == _aliasarg
2399 2399 and tree[2][0] == 'string'):
2400 2400 return tree[2][1]
2401 2401 return None
2402 2402
2403 2403 def _checkaliasarg(tree, known=None):
2404 2404 """Check tree contains no _aliasarg construct or only ones which
2405 2405 value is in known. Used to avoid alias placeholders injection.
2406 2406 """
2407 2407 if isinstance(tree, tuple):
2408 2408 arg = _getaliasarg(tree)
2409 2409 if arg is not None and (not known or arg not in known):
2410 2410 raise error.UnknownIdentifier('_aliasarg', [])
2411 2411 for t in tree:
2412 2412 _checkaliasarg(t, known)
2413 2413
2414 2414 # the set of valid characters for the initial letter of symbols in
2415 2415 # alias declarations and definitions
2416 2416 _aliassyminitletters = set(c for c in [chr(i) for i in xrange(256)]
2417 2417 if c.isalnum() or c in '._@$' or ord(c) > 127)
2418 2418
2419 2419 def _tokenizealias(program, lookup=None):
2420 2420 """Parse alias declaration/definition into a stream of tokens
2421 2421
2422 2422 This allows symbol names to use also ``$`` as an initial letter
2423 2423 (for backward compatibility), and callers of this function should
2424 2424 examine whether ``$`` is used also for unexpected symbols or not.
2425 2425 """
2426 2426 return tokenize(program, lookup=lookup,
2427 2427 syminitletters=_aliassyminitletters)
2428 2428
2429 2429 def _parsealiasdecl(decl):
2430 2430 """Parse alias declaration ``decl``
2431 2431
2432 2432 This returns ``(name, tree, args, errorstr)`` tuple:
2433 2433
2434 2434 - ``name``: of declared alias (may be ``decl`` itself at error)
2435 2435 - ``tree``: parse result (or ``None`` at error)
2436 2436 - ``args``: list of alias argument names (or None for symbol declaration)
2437 2437 - ``errorstr``: detail about detected error (or None)
2438 2438
2439 2439 >>> _parsealiasdecl('foo')
2440 2440 ('foo', ('symbol', 'foo'), None, None)
2441 2441 >>> _parsealiasdecl('$foo')
2442 2442 ('$foo', None, None, "'$' not for alias arguments")
2443 2443 >>> _parsealiasdecl('foo::bar')
2444 2444 ('foo::bar', None, None, 'invalid format')
2445 2445 >>> _parsealiasdecl('foo bar')
2446 2446 ('foo bar', None, None, 'at 4: invalid token')
2447 2447 >>> _parsealiasdecl('foo()')
2448 2448 ('foo', ('func', ('symbol', 'foo')), [], None)
2449 2449 >>> _parsealiasdecl('$foo()')
2450 2450 ('$foo()', None, None, "'$' not for alias arguments")
2451 2451 >>> _parsealiasdecl('foo($1, $2)')
2452 2452 ('foo', ('func', ('symbol', 'foo')), ['$1', '$2'], None)
2453 2453 >>> _parsealiasdecl('foo(bar_bar, baz.baz)')
2454 2454 ('foo', ('func', ('symbol', 'foo')), ['bar_bar', 'baz.baz'], None)
2455 2455 >>> _parsealiasdecl('foo($1, $2, nested($1, $2))')
2456 2456 ('foo($1, $2, nested($1, $2))', None, None, 'invalid argument list')
2457 2457 >>> _parsealiasdecl('foo(bar($1, $2))')
2458 2458 ('foo(bar($1, $2))', None, None, 'invalid argument list')
2459 2459 >>> _parsealiasdecl('foo("string")')
2460 2460 ('foo("string")', None, None, 'invalid argument list')
2461 2461 >>> _parsealiasdecl('foo($1, $2')
2462 2462 ('foo($1, $2', None, None, 'at 10: unexpected token: end')
2463 2463 >>> _parsealiasdecl('foo("string')
2464 2464 ('foo("string', None, None, 'at 5: unterminated string')
2465 2465 >>> _parsealiasdecl('foo($1, $2, $1)')
2466 2466 ('foo', None, None, 'argument names collide with each other')
2467 2467 """
2468 2468 p = parser.parser(elements)
2469 2469 try:
2470 2470 tree, pos = p.parse(_tokenizealias(decl))
2471 2471 if (pos != len(decl)):
2472 2472 raise error.ParseError(_('invalid token'), pos)
2473 2473
2474 2474 if isvalidsymbol(tree):
2475 2475 # "name = ...." style
2476 2476 name = getsymbol(tree)
2477 2477 if name.startswith('$'):
2478 2478 return (decl, None, None, _("'$' not for alias arguments"))
2479 2479 return (name, ('symbol', name), None, None)
2480 2480
2481 2481 if isvalidfunc(tree):
2482 2482 # "name(arg, ....) = ...." style
2483 2483 name = getfuncname(tree)
2484 2484 if name.startswith('$'):
2485 2485 return (decl, None, None, _("'$' not for alias arguments"))
2486 2486 args = []
2487 2487 for arg in getfuncargs(tree):
2488 2488 if not isvalidsymbol(arg):
2489 2489 return (decl, None, None, _("invalid argument list"))
2490 2490 args.append(getsymbol(arg))
2491 2491 if len(args) != len(set(args)):
2492 2492 return (name, None, None,
2493 2493 _("argument names collide with each other"))
2494 2494 return (name, ('func', ('symbol', name)), args, None)
2495 2495
2496 2496 return (decl, None, None, _("invalid format"))
2497 2497 except error.ParseError as inst:
2498 2498 return (decl, None, None, parseerrordetail(inst))
2499 2499
2500 2500 def _parsealiasdefn(defn, args):
2501 2501 """Parse alias definition ``defn``
2502 2502
2503 2503 This function also replaces alias argument references in the
2504 2504 specified definition by ``_aliasarg(ARGNAME)``.
2505 2505
2506 2506 ``args`` is a list of alias argument names, or None if the alias
2507 2507 is declared as a symbol.
2508 2508
2509 2509 This returns "tree" as parsing result.
2510 2510
2511 2511 >>> args = ['$1', '$2', 'foo']
2512 2512 >>> print prettyformat(_parsealiasdefn('$1 or foo', args))
2513 2513 (or
2514 2514 (func
2515 2515 ('symbol', '_aliasarg')
2516 2516 ('string', '$1'))
2517 2517 (func
2518 2518 ('symbol', '_aliasarg')
2519 2519 ('string', 'foo')))
2520 2520 >>> try:
2521 2521 ... _parsealiasdefn('$1 or $bar', args)
2522 2522 ... except error.ParseError, inst:
2523 2523 ... print parseerrordetail(inst)
2524 2524 at 6: '$' not for alias arguments
2525 2525 >>> args = ['$1', '$10', 'foo']
2526 2526 >>> print prettyformat(_parsealiasdefn('$10 or foobar', args))
2527 2527 (or
2528 2528 (func
2529 2529 ('symbol', '_aliasarg')
2530 2530 ('string', '$10'))
2531 2531 ('symbol', 'foobar'))
2532 2532 >>> print prettyformat(_parsealiasdefn('"$1" or "foo"', args))
2533 2533 (or
2534 2534 ('string', '$1')
2535 2535 ('string', 'foo'))
2536 2536 """
2537 2537 def tokenizedefn(program, lookup=None):
2538 2538 if args:
2539 2539 argset = set(args)
2540 2540 else:
2541 2541 argset = set()
2542 2542
2543 2543 for t, value, pos in _tokenizealias(program, lookup=lookup):
2544 2544 if t == 'symbol':
2545 2545 if value in argset:
2546 2546 # emulate tokenization of "_aliasarg('ARGNAME')":
2547 2547 # "_aliasarg()" is an unknown symbol only used separate
2548 2548 # alias argument placeholders from regular strings.
2549 2549 yield ('symbol', '_aliasarg', pos)
2550 2550 yield ('(', None, pos)
2551 2551 yield ('string', value, pos)
2552 2552 yield (')', None, pos)
2553 2553 continue
2554 2554 elif value.startswith('$'):
2555 2555 raise error.ParseError(_("'$' not for alias arguments"),
2556 2556 pos)
2557 2557 yield (t, value, pos)
2558 2558
2559 2559 p = parser.parser(elements)
2560 2560 tree, pos = p.parse(tokenizedefn(defn))
2561 2561 if pos != len(defn):
2562 2562 raise error.ParseError(_('invalid token'), pos)
2563 2563 return parser.simplifyinfixops(tree, ('or',))
2564 2564
2565 2565 class revsetalias(object):
2566 2566 # whether own `error` information is already shown or not.
2567 2567 # this avoids showing same warning multiple times at each `findaliases`.
2568 2568 warned = False
2569 2569
2570 2570 def __init__(self, name, value):
2571 2571 '''Aliases like:
2572 2572
2573 2573 h = heads(default)
2574 2574 b($1) = ancestors($1) - ancestors(default)
2575 2575 '''
2576 2576 self.name, self.tree, self.args, self.error = _parsealiasdecl(name)
2577 2577 if self.error:
2578 2578 self.error = _('failed to parse the declaration of revset alias'
2579 2579 ' "%s": %s') % (self.name, self.error)
2580 2580 return
2581 2581
2582 2582 try:
2583 2583 self.replacement = _parsealiasdefn(value, self.args)
2584 2584 # Check for placeholder injection
2585 2585 _checkaliasarg(self.replacement, self.args)
2586 2586 except error.ParseError as inst:
2587 2587 self.error = _('failed to parse the definition of revset alias'
2588 2588 ' "%s": %s') % (self.name, parseerrordetail(inst))
2589 2589
2590 2590 def _getalias(aliases, tree):
2591 2591 """If tree looks like an unexpanded alias, return it. Return None
2592 2592 otherwise.
2593 2593 """
2594 2594 if isinstance(tree, tuple) and tree:
2595 2595 if tree[0] == 'symbol' and len(tree) == 2:
2596 2596 name = tree[1]
2597 2597 alias = aliases.get(name)
2598 2598 if alias and alias.args is None and alias.tree == tree:
2599 2599 return alias
2600 2600 if tree[0] == 'func' and len(tree) > 1:
2601 2601 if tree[1][0] == 'symbol' and len(tree[1]) == 2:
2602 2602 name = tree[1][1]
2603 2603 alias = aliases.get(name)
2604 2604 if alias and alias.args is not None and alias.tree == tree[:2]:
2605 2605 return alias
2606 2606 return None
2607 2607
2608 2608 def _expandargs(tree, args):
2609 2609 """Replace _aliasarg instances with the substitution value of the
2610 2610 same name in args, recursively.
2611 2611 """
2612 2612 if not tree or not isinstance(tree, tuple):
2613 2613 return tree
2614 2614 arg = _getaliasarg(tree)
2615 2615 if arg is not None:
2616 2616 return args[arg]
2617 2617 return tuple(_expandargs(t, args) for t in tree)
2618 2618
2619 2619 def _expandaliases(aliases, tree, expanding, cache):
2620 2620 """Expand aliases in tree, recursively.
2621 2621
2622 2622 'aliases' is a dictionary mapping user defined aliases to
2623 2623 revsetalias objects.
2624 2624 """
2625 2625 if not isinstance(tree, tuple):
2626 2626 # Do not expand raw strings
2627 2627 return tree
2628 2628 alias = _getalias(aliases, tree)
2629 2629 if alias is not None:
2630 2630 if alias.error:
2631 2631 raise util.Abort(alias.error)
2632 2632 if alias in expanding:
2633 2633 raise error.ParseError(_('infinite expansion of revset alias "%s" '
2634 2634 'detected') % alias.name)
2635 2635 expanding.append(alias)
2636 2636 if alias.name not in cache:
2637 2637 cache[alias.name] = _expandaliases(aliases, alias.replacement,
2638 2638 expanding, cache)
2639 2639 result = cache[alias.name]
2640 2640 expanding.pop()
2641 2641 if alias.args is not None:
2642 2642 l = getlist(tree[2])
2643 2643 if len(l) != len(alias.args):
2644 2644 raise error.ParseError(
2645 2645 _('invalid number of arguments: %s') % len(l))
2646 2646 l = [_expandaliases(aliases, a, [], cache) for a in l]
2647 2647 result = _expandargs(result, dict(zip(alias.args, l)))
2648 2648 else:
2649 2649 result = tuple(_expandaliases(aliases, t, expanding, cache)
2650 2650 for t in tree)
2651 2651 return result
2652 2652
2653 2653 def findaliases(ui, tree, showwarning=None):
2654 2654 _checkaliasarg(tree)
2655 2655 aliases = {}
2656 2656 for k, v in ui.configitems('revsetalias'):
2657 2657 alias = revsetalias(k, v)
2658 2658 aliases[alias.name] = alias
2659 2659 tree = _expandaliases(aliases, tree, [], {})
2660 2660 if showwarning:
2661 2661 # warn about problematic (but not referred) aliases
2662 2662 for name, alias in sorted(aliases.iteritems()):
2663 2663 if alias.error and not alias.warned:
2664 2664 showwarning(_('warning: %s\n') % (alias.error))
2665 2665 alias.warned = True
2666 2666 return tree
2667 2667
2668 2668 def foldconcat(tree):
2669 2669 """Fold elements to be concatenated by `##`
2670 2670 """
2671 2671 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2672 2672 return tree
2673 2673 if tree[0] == '_concat':
2674 2674 pending = [tree]
2675 2675 l = []
2676 2676 while pending:
2677 2677 e = pending.pop()
2678 2678 if e[0] == '_concat':
2679 2679 pending.extend(reversed(e[1:]))
2680 2680 elif e[0] in ('string', 'symbol'):
2681 2681 l.append(e[1])
2682 2682 else:
2683 2683 msg = _("\"##\" can't concatenate \"%s\" element") % (e[0])
2684 2684 raise error.ParseError(msg)
2685 2685 return ('string', ''.join(l))
2686 2686 else:
2687 2687 return tuple(foldconcat(t) for t in tree)
2688 2688
2689 2689 def parse(spec, lookup=None):
2690 2690 p = parser.parser(elements)
2691 2691 tree, pos = p.parse(tokenize(spec, lookup=lookup))
2692 2692 if pos != len(spec):
2693 2693 raise error.ParseError(_("invalid token"), pos)
2694 2694 return parser.simplifyinfixops(tree, ('or',))
2695 2695
2696 2696 def posttreebuilthook(tree, repo):
2697 2697 # hook for extensions to execute code on the optimized tree
2698 2698 pass
2699 2699
2700 2700 def match(ui, spec, repo=None):
2701 2701 if not spec:
2702 2702 raise error.ParseError(_("empty query"))
2703 2703 lookup = None
2704 2704 if repo:
2705 2705 lookup = repo.__contains__
2706 2706 tree = parse(spec, lookup)
2707 2707 return _makematcher(ui, tree, repo)
2708 2708
2709 2709 def matchany(ui, specs, repo=None):
2710 2710 """Create a matcher that will include any revisions matching one of the
2711 2711 given specs"""
2712 2712 if not specs:
2713 2713 def mfunc(repo, subset=None):
2714 2714 return baseset()
2715 2715 return mfunc
2716 2716 if not all(specs):
2717 2717 raise error.ParseError(_("empty query"))
2718 2718 lookup = None
2719 2719 if repo:
2720 2720 lookup = repo.__contains__
2721 2721 if len(specs) == 1:
2722 2722 tree = parse(specs[0], lookup)
2723 2723 else:
2724 2724 tree = ('or',) + tuple(parse(s, lookup) for s in specs)
2725 2725 return _makematcher(ui, tree, repo)
2726 2726
2727 2727 def _makematcher(ui, tree, repo):
2728 2728 if ui:
2729 2729 tree = findaliases(ui, tree, showwarning=ui.warn)
2730 2730 tree = foldconcat(tree)
2731 2731 weight, tree = optimize(tree, True)
2732 2732 posttreebuilthook(tree, repo)
2733 2733 def mfunc(repo, subset=None):
2734 2734 if subset is None:
2735 2735 subset = fullreposet(repo)
2736 2736 if util.safehasattr(subset, 'isascending'):
2737 2737 result = getset(repo, subset, tree)
2738 2738 else:
2739 2739 result = getset(repo, baseset(subset), tree)
2740 2740 return result
2741 2741 return mfunc
2742 2742
2743 2743 def formatspec(expr, *args):
2744 2744 '''
2745 2745 This is a convenience function for using revsets internally, and
2746 2746 escapes arguments appropriately. Aliases are intentionally ignored
2747 2747 so that intended expression behavior isn't accidentally subverted.
2748 2748
2749 2749 Supported arguments:
2750 2750
2751 2751 %r = revset expression, parenthesized
2752 2752 %d = int(arg), no quoting
2753 2753 %s = string(arg), escaped and single-quoted
2754 2754 %b = arg.branch(), escaped and single-quoted
2755 2755 %n = hex(arg), single-quoted
2756 2756 %% = a literal '%'
2757 2757
2758 2758 Prefixing the type with 'l' specifies a parenthesized list of that type.
2759 2759
2760 2760 >>> formatspec('%r:: and %lr', '10 or 11', ("this()", "that()"))
2761 2761 '(10 or 11):: and ((this()) or (that()))'
2762 2762 >>> formatspec('%d:: and not %d::', 10, 20)
2763 2763 '10:: and not 20::'
2764 2764 >>> formatspec('%ld or %ld', [], [1])
2765 2765 "_list('') or 1"
2766 2766 >>> formatspec('keyword(%s)', 'foo\\xe9')
2767 2767 "keyword('foo\\\\xe9')"
2768 2768 >>> b = lambda: 'default'
2769 2769 >>> b.branch = b
2770 2770 >>> formatspec('branch(%b)', b)
2771 2771 "branch('default')"
2772 2772 >>> formatspec('root(%ls)', ['a', 'b', 'c', 'd'])
2773 2773 "root(_list('a\\x00b\\x00c\\x00d'))"
2774 2774 '''
2775 2775
2776 2776 def quote(s):
2777 2777 return repr(str(s))
2778 2778
2779 2779 def argtype(c, arg):
2780 2780 if c == 'd':
2781 2781 return str(int(arg))
2782 2782 elif c == 's':
2783 2783 return quote(arg)
2784 2784 elif c == 'r':
2785 2785 parse(arg) # make sure syntax errors are confined
2786 2786 return '(%s)' % arg
2787 2787 elif c == 'n':
2788 2788 return quote(node.hex(arg))
2789 2789 elif c == 'b':
2790 2790 return quote(arg.branch())
2791 2791
2792 2792 def listexp(s, t):
2793 2793 l = len(s)
2794 2794 if l == 0:
2795 2795 return "_list('')"
2796 2796 elif l == 1:
2797 2797 return argtype(t, s[0])
2798 2798 elif t == 'd':
2799 2799 return "_intlist('%s')" % "\0".join(str(int(a)) for a in s)
2800 2800 elif t == 's':
2801 2801 return "_list('%s')" % "\0".join(s)
2802 2802 elif t == 'n':
2803 2803 return "_hexlist('%s')" % "\0".join(node.hex(a) for a in s)
2804 2804 elif t == 'b':
2805 2805 return "_list('%s')" % "\0".join(a.branch() for a in s)
2806 2806
2807 2807 m = l // 2
2808 2808 return '(%s or %s)' % (listexp(s[:m], t), listexp(s[m:], t))
2809 2809
2810 2810 ret = ''
2811 2811 pos = 0
2812 2812 arg = 0
2813 2813 while pos < len(expr):
2814 2814 c = expr[pos]
2815 2815 if c == '%':
2816 2816 pos += 1
2817 2817 d = expr[pos]
2818 2818 if d == '%':
2819 2819 ret += d
2820 2820 elif d in 'dsnbr':
2821 2821 ret += argtype(d, args[arg])
2822 2822 arg += 1
2823 2823 elif d == 'l':
2824 2824 # a list of some type
2825 2825 pos += 1
2826 2826 d = expr[pos]
2827 2827 ret += listexp(list(args[arg]), d)
2828 2828 arg += 1
2829 2829 else:
2830 2830 raise util.Abort('unexpected revspec format character %s' % d)
2831 2831 else:
2832 2832 ret += c
2833 2833 pos += 1
2834 2834
2835 2835 return ret
2836 2836
2837 2837 def prettyformat(tree):
2838 2838 return parser.prettyformat(tree, ('string', 'symbol'))
2839 2839
2840 2840 def depth(tree):
2841 2841 if isinstance(tree, tuple):
2842 2842 return max(map(depth, tree)) + 1
2843 2843 else:
2844 2844 return 0
2845 2845
2846 2846 def funcsused(tree):
2847 2847 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2848 2848 return set()
2849 2849 else:
2850 2850 funcs = set()
2851 2851 for s in tree[1:]:
2852 2852 funcs |= funcsused(s)
2853 2853 if tree[0] == 'func':
2854 2854 funcs.add(tree[1][1])
2855 2855 return funcs
2856 2856
2857 2857 class abstractsmartset(object):
2858 2858
2859 2859 def __nonzero__(self):
2860 2860 """True if the smartset is not empty"""
2861 2861 raise NotImplementedError()
2862 2862
2863 2863 def __contains__(self, rev):
2864 2864 """provide fast membership testing"""
2865 2865 raise NotImplementedError()
2866 2866
2867 2867 def __iter__(self):
2868 2868 """iterate the set in the order it is supposed to be iterated"""
2869 2869 raise NotImplementedError()
2870 2870
2871 2871 # Attributes containing a function to perform a fast iteration in a given
2872 2872 # direction. A smartset can have none, one, or both defined.
2873 2873 #
2874 2874 # Default value is None instead of a function returning None to avoid
2875 2875 # initializing an iterator just for testing if a fast method exists.
2876 2876 fastasc = None
2877 2877 fastdesc = None
2878 2878
2879 2879 def isascending(self):
2880 2880 """True if the set will iterate in ascending order"""
2881 2881 raise NotImplementedError()
2882 2882
2883 2883 def isdescending(self):
2884 2884 """True if the set will iterate in descending order"""
2885 2885 raise NotImplementedError()
2886 2886
2887 2887 def min(self):
2888 2888 """return the minimum element in the set"""
2889 2889 if self.fastasc is not None:
2890 2890 for r in self.fastasc():
2891 2891 return r
2892 2892 raise ValueError('arg is an empty sequence')
2893 2893 return min(self)
2894 2894
2895 2895 def max(self):
2896 2896 """return the maximum element in the set"""
2897 2897 if self.fastdesc is not None:
2898 2898 for r in self.fastdesc():
2899 2899 return r
2900 2900 raise ValueError('arg is an empty sequence')
2901 2901 return max(self)
2902 2902
2903 2903 def first(self):
2904 2904 """return the first element in the set (user iteration perspective)
2905 2905
2906 2906 Return None if the set is empty"""
2907 2907 raise NotImplementedError()
2908 2908
2909 2909 def last(self):
2910 2910 """return the last element in the set (user iteration perspective)
2911 2911
2912 2912 Return None if the set is empty"""
2913 2913 raise NotImplementedError()
2914 2914
2915 2915 def __len__(self):
2916 2916 """return the length of the smartsets
2917 2917
2918 2918 This can be expensive on smartset that could be lazy otherwise."""
2919 2919 raise NotImplementedError()
2920 2920
2921 2921 def reverse(self):
2922 2922 """reverse the expected iteration order"""
2923 2923 raise NotImplementedError()
2924 2924
2925 2925 def sort(self, reverse=True):
2926 2926 """get the set to iterate in an ascending or descending order"""
2927 2927 raise NotImplementedError()
2928 2928
2929 2929 def __and__(self, other):
2930 2930 """Returns a new object with the intersection of the two collections.
2931 2931
2932 2932 This is part of the mandatory API for smartset."""
2933 2933 if isinstance(other, fullreposet):
2934 2934 return self
2935 2935 return self.filter(other.__contains__, cache=False)
2936 2936
2937 2937 def __add__(self, other):
2938 2938 """Returns a new object with the union of the two collections.
2939 2939
2940 2940 This is part of the mandatory API for smartset."""
2941 2941 return addset(self, other)
2942 2942
2943 2943 def __sub__(self, other):
2944 2944 """Returns a new object with the substraction of the two collections.
2945 2945
2946 2946 This is part of the mandatory API for smartset."""
2947 2947 c = other.__contains__
2948 2948 return self.filter(lambda r: not c(r), cache=False)
2949 2949
2950 2950 def filter(self, condition, cache=True):
2951 2951 """Returns this smartset filtered by condition as a new smartset.
2952 2952
2953 2953 `condition` is a callable which takes a revision number and returns a
2954 2954 boolean.
2955 2955
2956 2956 This is part of the mandatory API for smartset."""
2957 2957 # builtin cannot be cached. but do not needs to
2958 2958 if cache and util.safehasattr(condition, 'func_code'):
2959 2959 condition = util.cachefunc(condition)
2960 2960 return filteredset(self, condition)
2961 2961
2962 2962 class baseset(abstractsmartset):
2963 2963 """Basic data structure that represents a revset and contains the basic
2964 2964 operation that it should be able to perform.
2965 2965
2966 2966 Every method in this class should be implemented by any smartset class.
2967 2967 """
2968 2968 def __init__(self, data=()):
2969 2969 if not isinstance(data, list):
2970 if isinstance(data, set):
2971 self._set = data
2970 2972 data = list(data)
2971 2973 self._list = data
2972 2974 self._ascending = None
2973 2975
2974 2976 @util.propertycache
2975 2977 def _set(self):
2976 2978 return set(self._list)
2977 2979
2978 2980 @util.propertycache
2979 2981 def _asclist(self):
2980 2982 asclist = self._list[:]
2981 2983 asclist.sort()
2982 2984 return asclist
2983 2985
2984 2986 def __iter__(self):
2985 2987 if self._ascending is None:
2986 2988 return iter(self._list)
2987 2989 elif self._ascending:
2988 2990 return iter(self._asclist)
2989 2991 else:
2990 2992 return reversed(self._asclist)
2991 2993
2992 2994 def fastasc(self):
2993 2995 return iter(self._asclist)
2994 2996
2995 2997 def fastdesc(self):
2996 2998 return reversed(self._asclist)
2997 2999
2998 3000 @util.propertycache
2999 3001 def __contains__(self):
3000 3002 return self._set.__contains__
3001 3003
3002 3004 def __nonzero__(self):
3003 3005 return bool(self._list)
3004 3006
3005 3007 def sort(self, reverse=False):
3006 3008 self._ascending = not bool(reverse)
3007 3009
3008 3010 def reverse(self):
3009 3011 if self._ascending is None:
3010 3012 self._list.reverse()
3011 3013 else:
3012 3014 self._ascending = not self._ascending
3013 3015
3014 3016 def __len__(self):
3015 3017 return len(self._list)
3016 3018
3017 3019 def isascending(self):
3018 3020 """Returns True if the collection is ascending order, False if not.
3019 3021
3020 3022 This is part of the mandatory API for smartset."""
3021 3023 if len(self) <= 1:
3022 3024 return True
3023 3025 return self._ascending is not None and self._ascending
3024 3026
3025 3027 def isdescending(self):
3026 3028 """Returns True if the collection is descending order, False if not.
3027 3029
3028 3030 This is part of the mandatory API for smartset."""
3029 3031 if len(self) <= 1:
3030 3032 return True
3031 3033 return self._ascending is not None and not self._ascending
3032 3034
3033 3035 def first(self):
3034 3036 if self:
3035 3037 if self._ascending is None:
3036 3038 return self._list[0]
3037 3039 elif self._ascending:
3038 3040 return self._asclist[0]
3039 3041 else:
3040 3042 return self._asclist[-1]
3041 3043 return None
3042 3044
3043 3045 def last(self):
3044 3046 if self:
3045 3047 if self._ascending is None:
3046 3048 return self._list[-1]
3047 3049 elif self._ascending:
3048 3050 return self._asclist[-1]
3049 3051 else:
3050 3052 return self._asclist[0]
3051 3053 return None
3052 3054
3053 3055 def __repr__(self):
3054 3056 d = {None: '', False: '-', True: '+'}[self._ascending]
3055 3057 return '<%s%s %r>' % (type(self).__name__, d, self._list)
3056 3058
3057 3059 class filteredset(abstractsmartset):
3058 3060 """Duck type for baseset class which iterates lazily over the revisions in
3059 3061 the subset and contains a function which tests for membership in the
3060 3062 revset
3061 3063 """
3062 3064 def __init__(self, subset, condition=lambda x: True):
3063 3065 """
3064 3066 condition: a function that decide whether a revision in the subset
3065 3067 belongs to the revset or not.
3066 3068 """
3067 3069 self._subset = subset
3068 3070 self._condition = condition
3069 3071 self._cache = {}
3070 3072
3071 3073 def __contains__(self, x):
3072 3074 c = self._cache
3073 3075 if x not in c:
3074 3076 v = c[x] = x in self._subset and self._condition(x)
3075 3077 return v
3076 3078 return c[x]
3077 3079
3078 3080 def __iter__(self):
3079 3081 return self._iterfilter(self._subset)
3080 3082
3081 3083 def _iterfilter(self, it):
3082 3084 cond = self._condition
3083 3085 for x in it:
3084 3086 if cond(x):
3085 3087 yield x
3086 3088
3087 3089 @property
3088 3090 def fastasc(self):
3089 3091 it = self._subset.fastasc
3090 3092 if it is None:
3091 3093 return None
3092 3094 return lambda: self._iterfilter(it())
3093 3095
3094 3096 @property
3095 3097 def fastdesc(self):
3096 3098 it = self._subset.fastdesc
3097 3099 if it is None:
3098 3100 return None
3099 3101 return lambda: self._iterfilter(it())
3100 3102
3101 3103 def __nonzero__(self):
3102 3104 for r in self:
3103 3105 return True
3104 3106 return False
3105 3107
3106 3108 def __len__(self):
3107 3109 # Basic implementation to be changed in future patches.
3108 3110 l = baseset([r for r in self])
3109 3111 return len(l)
3110 3112
3111 3113 def sort(self, reverse=False):
3112 3114 self._subset.sort(reverse=reverse)
3113 3115
3114 3116 def reverse(self):
3115 3117 self._subset.reverse()
3116 3118
3117 3119 def isascending(self):
3118 3120 return self._subset.isascending()
3119 3121
3120 3122 def isdescending(self):
3121 3123 return self._subset.isdescending()
3122 3124
3123 3125 def first(self):
3124 3126 for x in self:
3125 3127 return x
3126 3128 return None
3127 3129
3128 3130 def last(self):
3129 3131 it = None
3130 3132 if self.isascending():
3131 3133 it = self.fastdesc
3132 3134 elif self.isdescending():
3133 3135 it = self.fastasc
3134 3136 if it is not None:
3135 3137 for x in it():
3136 3138 return x
3137 3139 return None #empty case
3138 3140 else:
3139 3141 x = None
3140 3142 for x in self:
3141 3143 pass
3142 3144 return x
3143 3145
3144 3146 def __repr__(self):
3145 3147 return '<%s %r>' % (type(self).__name__, self._subset)
3146 3148
3147 3149 def _iterordered(ascending, iter1, iter2):
3148 3150 """produce an ordered iteration from two iterators with the same order
3149 3151
3150 3152 The ascending is used to indicated the iteration direction.
3151 3153 """
3152 3154 choice = max
3153 3155 if ascending:
3154 3156 choice = min
3155 3157
3156 3158 val1 = None
3157 3159 val2 = None
3158 3160 try:
3159 3161 # Consume both iterators in an ordered way until one is empty
3160 3162 while True:
3161 3163 if val1 is None:
3162 3164 val1 = iter1.next()
3163 3165 if val2 is None:
3164 3166 val2 = iter2.next()
3165 3167 next = choice(val1, val2)
3166 3168 yield next
3167 3169 if val1 == next:
3168 3170 val1 = None
3169 3171 if val2 == next:
3170 3172 val2 = None
3171 3173 except StopIteration:
3172 3174 # Flush any remaining values and consume the other one
3173 3175 it = iter2
3174 3176 if val1 is not None:
3175 3177 yield val1
3176 3178 it = iter1
3177 3179 elif val2 is not None:
3178 3180 # might have been equality and both are empty
3179 3181 yield val2
3180 3182 for val in it:
3181 3183 yield val
3182 3184
3183 3185 class addset(abstractsmartset):
3184 3186 """Represent the addition of two sets
3185 3187
3186 3188 Wrapper structure for lazily adding two structures without losing much
3187 3189 performance on the __contains__ method
3188 3190
3189 3191 If the ascending attribute is set, that means the two structures are
3190 3192 ordered in either an ascending or descending way. Therefore, we can add
3191 3193 them maintaining the order by iterating over both at the same time
3192 3194
3193 3195 >>> xs = baseset([0, 3, 2])
3194 3196 >>> ys = baseset([5, 2, 4])
3195 3197
3196 3198 >>> rs = addset(xs, ys)
3197 3199 >>> bool(rs), 0 in rs, 1 in rs, 5 in rs, rs.first(), rs.last()
3198 3200 (True, True, False, True, 0, 4)
3199 3201 >>> rs = addset(xs, baseset([]))
3200 3202 >>> bool(rs), 0 in rs, 1 in rs, rs.first(), rs.last()
3201 3203 (True, True, False, 0, 2)
3202 3204 >>> rs = addset(baseset([]), baseset([]))
3203 3205 >>> bool(rs), 0 in rs, rs.first(), rs.last()
3204 3206 (False, False, None, None)
3205 3207
3206 3208 iterate unsorted:
3207 3209 >>> rs = addset(xs, ys)
3208 3210 >>> [x for x in rs] # without _genlist
3209 3211 [0, 3, 2, 5, 4]
3210 3212 >>> assert not rs._genlist
3211 3213 >>> len(rs)
3212 3214 5
3213 3215 >>> [x for x in rs] # with _genlist
3214 3216 [0, 3, 2, 5, 4]
3215 3217 >>> assert rs._genlist
3216 3218
3217 3219 iterate ascending:
3218 3220 >>> rs = addset(xs, ys, ascending=True)
3219 3221 >>> [x for x in rs], [x for x in rs.fastasc()] # without _asclist
3220 3222 ([0, 2, 3, 4, 5], [0, 2, 3, 4, 5])
3221 3223 >>> assert not rs._asclist
3222 3224 >>> len(rs)
3223 3225 5
3224 3226 >>> [x for x in rs], [x for x in rs.fastasc()]
3225 3227 ([0, 2, 3, 4, 5], [0, 2, 3, 4, 5])
3226 3228 >>> assert rs._asclist
3227 3229
3228 3230 iterate descending:
3229 3231 >>> rs = addset(xs, ys, ascending=False)
3230 3232 >>> [x for x in rs], [x for x in rs.fastdesc()] # without _asclist
3231 3233 ([5, 4, 3, 2, 0], [5, 4, 3, 2, 0])
3232 3234 >>> assert not rs._asclist
3233 3235 >>> len(rs)
3234 3236 5
3235 3237 >>> [x for x in rs], [x for x in rs.fastdesc()]
3236 3238 ([5, 4, 3, 2, 0], [5, 4, 3, 2, 0])
3237 3239 >>> assert rs._asclist
3238 3240
3239 3241 iterate ascending without fastasc:
3240 3242 >>> rs = addset(xs, generatorset(ys), ascending=True)
3241 3243 >>> assert rs.fastasc is None
3242 3244 >>> [x for x in rs]
3243 3245 [0, 2, 3, 4, 5]
3244 3246
3245 3247 iterate descending without fastdesc:
3246 3248 >>> rs = addset(generatorset(xs), ys, ascending=False)
3247 3249 >>> assert rs.fastdesc is None
3248 3250 >>> [x for x in rs]
3249 3251 [5, 4, 3, 2, 0]
3250 3252 """
3251 3253 def __init__(self, revs1, revs2, ascending=None):
3252 3254 self._r1 = revs1
3253 3255 self._r2 = revs2
3254 3256 self._iter = None
3255 3257 self._ascending = ascending
3256 3258 self._genlist = None
3257 3259 self._asclist = None
3258 3260
3259 3261 def __len__(self):
3260 3262 return len(self._list)
3261 3263
3262 3264 def __nonzero__(self):
3263 3265 return bool(self._r1) or bool(self._r2)
3264 3266
3265 3267 @util.propertycache
3266 3268 def _list(self):
3267 3269 if not self._genlist:
3268 3270 self._genlist = baseset(iter(self))
3269 3271 return self._genlist
3270 3272
3271 3273 def __iter__(self):
3272 3274 """Iterate over both collections without repeating elements
3273 3275
3274 3276 If the ascending attribute is not set, iterate over the first one and
3275 3277 then over the second one checking for membership on the first one so we
3276 3278 dont yield any duplicates.
3277 3279
3278 3280 If the ascending attribute is set, iterate over both collections at the
3279 3281 same time, yielding only one value at a time in the given order.
3280 3282 """
3281 3283 if self._ascending is None:
3282 3284 if self._genlist:
3283 3285 return iter(self._genlist)
3284 3286 def arbitraryordergen():
3285 3287 for r in self._r1:
3286 3288 yield r
3287 3289 inr1 = self._r1.__contains__
3288 3290 for r in self._r2:
3289 3291 if not inr1(r):
3290 3292 yield r
3291 3293 return arbitraryordergen()
3292 3294 # try to use our own fast iterator if it exists
3293 3295 self._trysetasclist()
3294 3296 if self._ascending:
3295 3297 attr = 'fastasc'
3296 3298 else:
3297 3299 attr = 'fastdesc'
3298 3300 it = getattr(self, attr)
3299 3301 if it is not None:
3300 3302 return it()
3301 3303 # maybe half of the component supports fast
3302 3304 # get iterator for _r1
3303 3305 iter1 = getattr(self._r1, attr)
3304 3306 if iter1 is None:
3305 3307 # let's avoid side effect (not sure it matters)
3306 3308 iter1 = iter(sorted(self._r1, reverse=not self._ascending))
3307 3309 else:
3308 3310 iter1 = iter1()
3309 3311 # get iterator for _r2
3310 3312 iter2 = getattr(self._r2, attr)
3311 3313 if iter2 is None:
3312 3314 # let's avoid side effect (not sure it matters)
3313 3315 iter2 = iter(sorted(self._r2, reverse=not self._ascending))
3314 3316 else:
3315 3317 iter2 = iter2()
3316 3318 return _iterordered(self._ascending, iter1, iter2)
3317 3319
3318 3320 def _trysetasclist(self):
3319 3321 """populate the _asclist attribute if possible and necessary"""
3320 3322 if self._genlist is not None and self._asclist is None:
3321 3323 self._asclist = sorted(self._genlist)
3322 3324
3323 3325 @property
3324 3326 def fastasc(self):
3325 3327 self._trysetasclist()
3326 3328 if self._asclist is not None:
3327 3329 return self._asclist.__iter__
3328 3330 iter1 = self._r1.fastasc
3329 3331 iter2 = self._r2.fastasc
3330 3332 if None in (iter1, iter2):
3331 3333 return None
3332 3334 return lambda: _iterordered(True, iter1(), iter2())
3333 3335
3334 3336 @property
3335 3337 def fastdesc(self):
3336 3338 self._trysetasclist()
3337 3339 if self._asclist is not None:
3338 3340 return self._asclist.__reversed__
3339 3341 iter1 = self._r1.fastdesc
3340 3342 iter2 = self._r2.fastdesc
3341 3343 if None in (iter1, iter2):
3342 3344 return None
3343 3345 return lambda: _iterordered(False, iter1(), iter2())
3344 3346
3345 3347 def __contains__(self, x):
3346 3348 return x in self._r1 or x in self._r2
3347 3349
3348 3350 def sort(self, reverse=False):
3349 3351 """Sort the added set
3350 3352
3351 3353 For this we use the cached list with all the generated values and if we
3352 3354 know they are ascending or descending we can sort them in a smart way.
3353 3355 """
3354 3356 self._ascending = not reverse
3355 3357
3356 3358 def isascending(self):
3357 3359 return self._ascending is not None and self._ascending
3358 3360
3359 3361 def isdescending(self):
3360 3362 return self._ascending is not None and not self._ascending
3361 3363
3362 3364 def reverse(self):
3363 3365 if self._ascending is None:
3364 3366 self._list.reverse()
3365 3367 else:
3366 3368 self._ascending = not self._ascending
3367 3369
3368 3370 def first(self):
3369 3371 for x in self:
3370 3372 return x
3371 3373 return None
3372 3374
3373 3375 def last(self):
3374 3376 self.reverse()
3375 3377 val = self.first()
3376 3378 self.reverse()
3377 3379 return val
3378 3380
3379 3381 def __repr__(self):
3380 3382 d = {None: '', False: '-', True: '+'}[self._ascending]
3381 3383 return '<%s%s %r, %r>' % (type(self).__name__, d, self._r1, self._r2)
3382 3384
3383 3385 class generatorset(abstractsmartset):
3384 3386 """Wrap a generator for lazy iteration
3385 3387
3386 3388 Wrapper structure for generators that provides lazy membership and can
3387 3389 be iterated more than once.
3388 3390 When asked for membership it generates values until either it finds the
3389 3391 requested one or has gone through all the elements in the generator
3390 3392 """
3391 3393 def __init__(self, gen, iterasc=None):
3392 3394 """
3393 3395 gen: a generator producing the values for the generatorset.
3394 3396 """
3395 3397 self._gen = gen
3396 3398 self._asclist = None
3397 3399 self._cache = {}
3398 3400 self._genlist = []
3399 3401 self._finished = False
3400 3402 self._ascending = True
3401 3403 if iterasc is not None:
3402 3404 if iterasc:
3403 3405 self.fastasc = self._iterator
3404 3406 self.__contains__ = self._asccontains
3405 3407 else:
3406 3408 self.fastdesc = self._iterator
3407 3409 self.__contains__ = self._desccontains
3408 3410
3409 3411 def __nonzero__(self):
3410 3412 # Do not use 'for r in self' because it will enforce the iteration
3411 3413 # order (default ascending), possibly unrolling a whole descending
3412 3414 # iterator.
3413 3415 if self._genlist:
3414 3416 return True
3415 3417 for r in self._consumegen():
3416 3418 return True
3417 3419 return False
3418 3420
3419 3421 def __contains__(self, x):
3420 3422 if x in self._cache:
3421 3423 return self._cache[x]
3422 3424
3423 3425 # Use new values only, as existing values would be cached.
3424 3426 for l in self._consumegen():
3425 3427 if l == x:
3426 3428 return True
3427 3429
3428 3430 self._cache[x] = False
3429 3431 return False
3430 3432
3431 3433 def _asccontains(self, x):
3432 3434 """version of contains optimised for ascending generator"""
3433 3435 if x in self._cache:
3434 3436 return self._cache[x]
3435 3437
3436 3438 # Use new values only, as existing values would be cached.
3437 3439 for l in self._consumegen():
3438 3440 if l == x:
3439 3441 return True
3440 3442 if l > x:
3441 3443 break
3442 3444
3443 3445 self._cache[x] = False
3444 3446 return False
3445 3447
3446 3448 def _desccontains(self, x):
3447 3449 """version of contains optimised for descending generator"""
3448 3450 if x in self._cache:
3449 3451 return self._cache[x]
3450 3452
3451 3453 # Use new values only, as existing values would be cached.
3452 3454 for l in self._consumegen():
3453 3455 if l == x:
3454 3456 return True
3455 3457 if l < x:
3456 3458 break
3457 3459
3458 3460 self._cache[x] = False
3459 3461 return False
3460 3462
3461 3463 def __iter__(self):
3462 3464 if self._ascending:
3463 3465 it = self.fastasc
3464 3466 else:
3465 3467 it = self.fastdesc
3466 3468 if it is not None:
3467 3469 return it()
3468 3470 # we need to consume the iterator
3469 3471 for x in self._consumegen():
3470 3472 pass
3471 3473 # recall the same code
3472 3474 return iter(self)
3473 3475
3474 3476 def _iterator(self):
3475 3477 if self._finished:
3476 3478 return iter(self._genlist)
3477 3479
3478 3480 # We have to use this complex iteration strategy to allow multiple
3479 3481 # iterations at the same time. We need to be able to catch revision
3480 3482 # removed from _consumegen and added to genlist in another instance.
3481 3483 #
3482 3484 # Getting rid of it would provide an about 15% speed up on this
3483 3485 # iteration.
3484 3486 genlist = self._genlist
3485 3487 nextrev = self._consumegen().next
3486 3488 _len = len # cache global lookup
3487 3489 def gen():
3488 3490 i = 0
3489 3491 while True:
3490 3492 if i < _len(genlist):
3491 3493 yield genlist[i]
3492 3494 else:
3493 3495 yield nextrev()
3494 3496 i += 1
3495 3497 return gen()
3496 3498
3497 3499 def _consumegen(self):
3498 3500 cache = self._cache
3499 3501 genlist = self._genlist.append
3500 3502 for item in self._gen:
3501 3503 cache[item] = True
3502 3504 genlist(item)
3503 3505 yield item
3504 3506 if not self._finished:
3505 3507 self._finished = True
3506 3508 asc = self._genlist[:]
3507 3509 asc.sort()
3508 3510 self._asclist = asc
3509 3511 self.fastasc = asc.__iter__
3510 3512 self.fastdesc = asc.__reversed__
3511 3513
3512 3514 def __len__(self):
3513 3515 for x in self._consumegen():
3514 3516 pass
3515 3517 return len(self._genlist)
3516 3518
3517 3519 def sort(self, reverse=False):
3518 3520 self._ascending = not reverse
3519 3521
3520 3522 def reverse(self):
3521 3523 self._ascending = not self._ascending
3522 3524
3523 3525 def isascending(self):
3524 3526 return self._ascending
3525 3527
3526 3528 def isdescending(self):
3527 3529 return not self._ascending
3528 3530
3529 3531 def first(self):
3530 3532 if self._ascending:
3531 3533 it = self.fastasc
3532 3534 else:
3533 3535 it = self.fastdesc
3534 3536 if it is None:
3535 3537 # we need to consume all and try again
3536 3538 for x in self._consumegen():
3537 3539 pass
3538 3540 return self.first()
3539 3541 return next(it(), None)
3540 3542
3541 3543 def last(self):
3542 3544 if self._ascending:
3543 3545 it = self.fastdesc
3544 3546 else:
3545 3547 it = self.fastasc
3546 3548 if it is None:
3547 3549 # we need to consume all and try again
3548 3550 for x in self._consumegen():
3549 3551 pass
3550 3552 return self.first()
3551 3553 return next(it(), None)
3552 3554
3553 3555 def __repr__(self):
3554 3556 d = {False: '-', True: '+'}[self._ascending]
3555 3557 return '<%s%s>' % (type(self).__name__, d)
3556 3558
3557 3559 class spanset(abstractsmartset):
3558 3560 """Duck type for baseset class which represents a range of revisions and
3559 3561 can work lazily and without having all the range in memory
3560 3562
3561 3563 Note that spanset(x, y) behave almost like xrange(x, y) except for two
3562 3564 notable points:
3563 3565 - when x < y it will be automatically descending,
3564 3566 - revision filtered with this repoview will be skipped.
3565 3567
3566 3568 """
3567 3569 def __init__(self, repo, start=0, end=None):
3568 3570 """
3569 3571 start: first revision included the set
3570 3572 (default to 0)
3571 3573 end: first revision excluded (last+1)
3572 3574 (default to len(repo)
3573 3575
3574 3576 Spanset will be descending if `end` < `start`.
3575 3577 """
3576 3578 if end is None:
3577 3579 end = len(repo)
3578 3580 self._ascending = start <= end
3579 3581 if not self._ascending:
3580 3582 start, end = end + 1, start +1
3581 3583 self._start = start
3582 3584 self._end = end
3583 3585 self._hiddenrevs = repo.changelog.filteredrevs
3584 3586
3585 3587 def sort(self, reverse=False):
3586 3588 self._ascending = not reverse
3587 3589
3588 3590 def reverse(self):
3589 3591 self._ascending = not self._ascending
3590 3592
3591 3593 def _iterfilter(self, iterrange):
3592 3594 s = self._hiddenrevs
3593 3595 for r in iterrange:
3594 3596 if r not in s:
3595 3597 yield r
3596 3598
3597 3599 def __iter__(self):
3598 3600 if self._ascending:
3599 3601 return self.fastasc()
3600 3602 else:
3601 3603 return self.fastdesc()
3602 3604
3603 3605 def fastasc(self):
3604 3606 iterrange = xrange(self._start, self._end)
3605 3607 if self._hiddenrevs:
3606 3608 return self._iterfilter(iterrange)
3607 3609 return iter(iterrange)
3608 3610
3609 3611 def fastdesc(self):
3610 3612 iterrange = xrange(self._end - 1, self._start - 1, -1)
3611 3613 if self._hiddenrevs:
3612 3614 return self._iterfilter(iterrange)
3613 3615 return iter(iterrange)
3614 3616
3615 3617 def __contains__(self, rev):
3616 3618 hidden = self._hiddenrevs
3617 3619 return ((self._start <= rev < self._end)
3618 3620 and not (hidden and rev in hidden))
3619 3621
3620 3622 def __nonzero__(self):
3621 3623 for r in self:
3622 3624 return True
3623 3625 return False
3624 3626
3625 3627 def __len__(self):
3626 3628 if not self._hiddenrevs:
3627 3629 return abs(self._end - self._start)
3628 3630 else:
3629 3631 count = 0
3630 3632 start = self._start
3631 3633 end = self._end
3632 3634 for rev in self._hiddenrevs:
3633 3635 if (end < rev <= start) or (start <= rev < end):
3634 3636 count += 1
3635 3637 return abs(self._end - self._start) - count
3636 3638
3637 3639 def isascending(self):
3638 3640 return self._ascending
3639 3641
3640 3642 def isdescending(self):
3641 3643 return not self._ascending
3642 3644
3643 3645 def first(self):
3644 3646 if self._ascending:
3645 3647 it = self.fastasc
3646 3648 else:
3647 3649 it = self.fastdesc
3648 3650 for x in it():
3649 3651 return x
3650 3652 return None
3651 3653
3652 3654 def last(self):
3653 3655 if self._ascending:
3654 3656 it = self.fastdesc
3655 3657 else:
3656 3658 it = self.fastasc
3657 3659 for x in it():
3658 3660 return x
3659 3661 return None
3660 3662
3661 3663 def __repr__(self):
3662 3664 d = {False: '-', True: '+'}[self._ascending]
3663 3665 return '<%s%s %d:%d>' % (type(self).__name__, d,
3664 3666 self._start, self._end - 1)
3665 3667
3666 3668 class fullreposet(spanset):
3667 3669 """a set containing all revisions in the repo
3668 3670
3669 3671 This class exists to host special optimization and magic to handle virtual
3670 3672 revisions such as "null".
3671 3673 """
3672 3674
3673 3675 def __init__(self, repo):
3674 3676 super(fullreposet, self).__init__(repo)
3675 3677
3676 3678 def __and__(self, other):
3677 3679 """As self contains the whole repo, all of the other set should also be
3678 3680 in self. Therefore `self & other = other`.
3679 3681
3680 3682 This boldly assumes the other contains valid revs only.
3681 3683 """
3682 3684 # other not a smartset, make is so
3683 3685 if not util.safehasattr(other, 'isascending'):
3684 3686 # filter out hidden revision
3685 3687 # (this boldly assumes all smartset are pure)
3686 3688 #
3687 3689 # `other` was used with "&", let's assume this is a set like
3688 3690 # object.
3689 3691 other = baseset(other - self._hiddenrevs)
3690 3692
3691 3693 # XXX As fullreposet is also used as bootstrap, this is wrong.
3692 3694 #
3693 3695 # With a giveme312() revset returning [3,1,2], this makes
3694 3696 # 'hg log -r "giveme312()"' -> 1, 2, 3 (wrong)
3695 3697 # We cannot just drop it because other usage still need to sort it:
3696 3698 # 'hg log -r "all() and giveme312()"' -> 1, 2, 3 (right)
3697 3699 #
3698 3700 # There is also some faulty revset implementations that rely on it
3699 3701 # (eg: children as of its state in e8075329c5fb)
3700 3702 #
3701 3703 # When we fix the two points above we can move this into the if clause
3702 3704 other.sort(reverse=self.isdescending())
3703 3705 return other
3704 3706
3705 3707 def prettyformatset(revs):
3706 3708 lines = []
3707 3709 rs = repr(revs)
3708 3710 p = 0
3709 3711 while p < len(rs):
3710 3712 q = rs.find('<', p + 1)
3711 3713 if q < 0:
3712 3714 q = len(rs)
3713 3715 l = rs.count('<', 0, p) - rs.count('>', 0, p)
3714 3716 assert l >= 0
3715 3717 lines.append((l, rs[p:q].rstrip()))
3716 3718 p = q
3717 3719 return '\n'.join(' ' * l + s for l, s in lines)
3718 3720
3719 3721 # tell hggettext to extract docstrings from these functions:
3720 3722 i18nfunctions = symbols.values()
General Comments 0
You need to be logged in to leave comments. Login now