##// END OF EJS Templates
revset: use 'next()' to detect end of iteration in 'limit'...
Pierre-Yves David -
r25144:81a39544 default
parent child Browse files
Show More
@@ -1,3506 +1,3505
1 1 # revset.py - revision set queries for mercurial
2 2 #
3 3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 import re
9 9 import parser, util, error, hbisect, phases
10 10 import node
11 11 import heapq
12 12 import match as matchmod
13 13 from i18n import _
14 14 import encoding
15 15 import obsolete as obsmod
16 16 import pathutil
17 17 import repoview
18 18
19 19 def _revancestors(repo, revs, followfirst):
20 20 """Like revlog.ancestors(), but supports followfirst."""
21 21 if followfirst:
22 22 cut = 1
23 23 else:
24 24 cut = None
25 25 cl = repo.changelog
26 26
27 27 def iterate():
28 28 revs.sort(reverse=True)
29 29 irevs = iter(revs)
30 30 h = []
31 31
32 32 inputrev = next(irevs, None)
33 33 if inputrev is not None:
34 34 heapq.heappush(h, -inputrev)
35 35
36 36 seen = set()
37 37 while h:
38 38 current = -heapq.heappop(h)
39 39 if current == inputrev:
40 40 inputrev = next(irevs, None)
41 41 if inputrev is not None:
42 42 heapq.heappush(h, -inputrev)
43 43 if current not in seen:
44 44 seen.add(current)
45 45 yield current
46 46 for parent in cl.parentrevs(current)[:cut]:
47 47 if parent != node.nullrev:
48 48 heapq.heappush(h, -parent)
49 49
50 50 return generatorset(iterate(), iterasc=False)
51 51
52 52 def _revdescendants(repo, revs, followfirst):
53 53 """Like revlog.descendants() but supports followfirst."""
54 54 if followfirst:
55 55 cut = 1
56 56 else:
57 57 cut = None
58 58
59 59 def iterate():
60 60 cl = repo.changelog
61 61 first = min(revs)
62 62 nullrev = node.nullrev
63 63 if first == nullrev:
64 64 # Are there nodes with a null first parent and a non-null
65 65 # second one? Maybe. Do we care? Probably not.
66 66 for i in cl:
67 67 yield i
68 68 else:
69 69 seen = set(revs)
70 70 for i in cl.revs(first + 1):
71 71 for x in cl.parentrevs(i)[:cut]:
72 72 if x != nullrev and x in seen:
73 73 seen.add(i)
74 74 yield i
75 75 break
76 76
77 77 return generatorset(iterate(), iterasc=True)
78 78
79 79 def _revsbetween(repo, roots, heads):
80 80 """Return all paths between roots and heads, inclusive of both endpoint
81 81 sets."""
82 82 if not roots:
83 83 return baseset()
84 84 parentrevs = repo.changelog.parentrevs
85 85 visit = list(heads)
86 86 reachable = set()
87 87 seen = {}
88 88 minroot = min(roots)
89 89 roots = set(roots)
90 90 # open-code the post-order traversal due to the tiny size of
91 91 # sys.getrecursionlimit()
92 92 while visit:
93 93 rev = visit.pop()
94 94 if rev in roots:
95 95 reachable.add(rev)
96 96 parents = parentrevs(rev)
97 97 seen[rev] = parents
98 98 for parent in parents:
99 99 if parent >= minroot and parent not in seen:
100 100 visit.append(parent)
101 101 if not reachable:
102 102 return baseset()
103 103 for rev in sorted(seen):
104 104 for parent in seen[rev]:
105 105 if parent in reachable:
106 106 reachable.add(rev)
107 107 return baseset(sorted(reachable))
108 108
109 109 elements = {
110 110 "(": (21, ("group", 1, ")"), ("func", 1, ")")),
111 111 "##": (20, None, ("_concat", 20)),
112 112 "~": (18, None, ("ancestor", 18)),
113 113 "^": (18, None, ("parent", 18), ("parentpost", 18)),
114 114 "-": (5, ("negate", 19), ("minus", 5)),
115 115 "::": (17, ("dagrangepre", 17), ("dagrange", 17),
116 116 ("dagrangepost", 17)),
117 117 "..": (17, ("dagrangepre", 17), ("dagrange", 17),
118 118 ("dagrangepost", 17)),
119 119 ":": (15, ("rangepre", 15), ("range", 15), ("rangepost", 15)),
120 120 "not": (10, ("not", 10)),
121 121 "!": (10, ("not", 10)),
122 122 "and": (5, None, ("and", 5)),
123 123 "&": (5, None, ("and", 5)),
124 124 "%": (5, None, ("only", 5), ("onlypost", 5)),
125 125 "or": (4, None, ("or", 4)),
126 126 "|": (4, None, ("or", 4)),
127 127 "+": (4, None, ("or", 4)),
128 128 ",": (2, None, ("list", 2)),
129 129 ")": (0, None, None),
130 130 "symbol": (0, ("symbol",), None),
131 131 "string": (0, ("string",), None),
132 132 "end": (0, None, None),
133 133 }
134 134
135 135 keywords = set(['and', 'or', 'not'])
136 136
137 137 # default set of valid characters for the initial letter of symbols
138 138 _syminitletters = set(c for c in [chr(i) for i in xrange(256)]
139 139 if c.isalnum() or c in '._@' or ord(c) > 127)
140 140
141 141 # default set of valid characters for non-initial letters of symbols
142 142 _symletters = set(c for c in [chr(i) for i in xrange(256)]
143 143 if c.isalnum() or c in '-._/@' or ord(c) > 127)
144 144
145 145 def tokenize(program, lookup=None, syminitletters=None, symletters=None):
146 146 '''
147 147 Parse a revset statement into a stream of tokens
148 148
149 149 ``syminitletters`` is the set of valid characters for the initial
150 150 letter of symbols.
151 151
152 152 By default, character ``c`` is recognized as valid for initial
153 153 letter of symbols, if ``c.isalnum() or c in '._@' or ord(c) > 127``.
154 154
155 155 ``symletters`` is the set of valid characters for non-initial
156 156 letters of symbols.
157 157
158 158 By default, character ``c`` is recognized as valid for non-initial
159 159 letters of symbols, if ``c.isalnum() or c in '-._/@' or ord(c) > 127``.
160 160
161 161 Check that @ is a valid unquoted token character (issue3686):
162 162 >>> list(tokenize("@::"))
163 163 [('symbol', '@', 0), ('::', None, 1), ('end', None, 3)]
164 164
165 165 '''
166 166 if syminitletters is None:
167 167 syminitletters = _syminitletters
168 168 if symletters is None:
169 169 symletters = _symletters
170 170
171 171 pos, l = 0, len(program)
172 172 while pos < l:
173 173 c = program[pos]
174 174 if c.isspace(): # skip inter-token whitespace
175 175 pass
176 176 elif c == ':' and program[pos:pos + 2] == '::': # look ahead carefully
177 177 yield ('::', None, pos)
178 178 pos += 1 # skip ahead
179 179 elif c == '.' and program[pos:pos + 2] == '..': # look ahead carefully
180 180 yield ('..', None, pos)
181 181 pos += 1 # skip ahead
182 182 elif c == '#' and program[pos:pos + 2] == '##': # look ahead carefully
183 183 yield ('##', None, pos)
184 184 pos += 1 # skip ahead
185 185 elif c in "():,-|&+!~^%": # handle simple operators
186 186 yield (c, None, pos)
187 187 elif (c in '"\'' or c == 'r' and
188 188 program[pos:pos + 2] in ("r'", 'r"')): # handle quoted strings
189 189 if c == 'r':
190 190 pos += 1
191 191 c = program[pos]
192 192 decode = lambda x: x
193 193 else:
194 194 decode = lambda x: x.decode('string-escape')
195 195 pos += 1
196 196 s = pos
197 197 while pos < l: # find closing quote
198 198 d = program[pos]
199 199 if d == '\\': # skip over escaped characters
200 200 pos += 2
201 201 continue
202 202 if d == c:
203 203 yield ('string', decode(program[s:pos]), s)
204 204 break
205 205 pos += 1
206 206 else:
207 207 raise error.ParseError(_("unterminated string"), s)
208 208 # gather up a symbol/keyword
209 209 elif c in syminitletters:
210 210 s = pos
211 211 pos += 1
212 212 while pos < l: # find end of symbol
213 213 d = program[pos]
214 214 if d not in symletters:
215 215 break
216 216 if d == '.' and program[pos - 1] == '.': # special case for ..
217 217 pos -= 1
218 218 break
219 219 pos += 1
220 220 sym = program[s:pos]
221 221 if sym in keywords: # operator keywords
222 222 yield (sym, None, s)
223 223 elif '-' in sym:
224 224 # some jerk gave us foo-bar-baz, try to check if it's a symbol
225 225 if lookup and lookup(sym):
226 226 # looks like a real symbol
227 227 yield ('symbol', sym, s)
228 228 else:
229 229 # looks like an expression
230 230 parts = sym.split('-')
231 231 for p in parts[:-1]:
232 232 if p: # possible consecutive -
233 233 yield ('symbol', p, s)
234 234 s += len(p)
235 235 yield ('-', None, pos)
236 236 s += 1
237 237 if parts[-1]: # possible trailing -
238 238 yield ('symbol', parts[-1], s)
239 239 else:
240 240 yield ('symbol', sym, s)
241 241 pos -= 1
242 242 else:
243 243 raise error.ParseError(_("syntax error in revset '%s'") %
244 244 program, pos)
245 245 pos += 1
246 246 yield ('end', None, pos)
247 247
248 248 def parseerrordetail(inst):
249 249 """Compose error message from specified ParseError object
250 250 """
251 251 if len(inst.args) > 1:
252 252 return _('at %s: %s') % (inst.args[1], inst.args[0])
253 253 else:
254 254 return inst.args[0]
255 255
256 256 # helpers
257 257
258 258 def getstring(x, err):
259 259 if x and (x[0] == 'string' or x[0] == 'symbol'):
260 260 return x[1]
261 261 raise error.ParseError(err)
262 262
263 263 def getlist(x):
264 264 if not x:
265 265 return []
266 266 if x[0] == 'list':
267 267 return getlist(x[1]) + [x[2]]
268 268 return [x]
269 269
270 270 def getargs(x, min, max, err):
271 271 l = getlist(x)
272 272 if len(l) < min or (max >= 0 and len(l) > max):
273 273 raise error.ParseError(err)
274 274 return l
275 275
276 276 def isvalidsymbol(tree):
277 277 """Examine whether specified ``tree`` is valid ``symbol`` or not
278 278 """
279 279 return tree[0] == 'symbol' and len(tree) > 1
280 280
281 281 def getsymbol(tree):
282 282 """Get symbol name from valid ``symbol`` in ``tree``
283 283
284 284 This assumes that ``tree`` is already examined by ``isvalidsymbol``.
285 285 """
286 286 return tree[1]
287 287
288 288 def isvalidfunc(tree):
289 289 """Examine whether specified ``tree`` is valid ``func`` or not
290 290 """
291 291 return tree[0] == 'func' and len(tree) > 1 and isvalidsymbol(tree[1])
292 292
293 293 def getfuncname(tree):
294 294 """Get function name from valid ``func`` in ``tree``
295 295
296 296 This assumes that ``tree`` is already examined by ``isvalidfunc``.
297 297 """
298 298 return getsymbol(tree[1])
299 299
300 300 def getfuncargs(tree):
301 301 """Get list of function arguments from valid ``func`` in ``tree``
302 302
303 303 This assumes that ``tree`` is already examined by ``isvalidfunc``.
304 304 """
305 305 if len(tree) > 2:
306 306 return getlist(tree[2])
307 307 else:
308 308 return []
309 309
310 310 def getset(repo, subset, x):
311 311 if not x:
312 312 raise error.ParseError(_("missing argument"))
313 313 s = methods[x[0]](repo, subset, *x[1:])
314 314 if util.safehasattr(s, 'isascending'):
315 315 return s
316 316 return baseset(s)
317 317
318 318 def _getrevsource(repo, r):
319 319 extra = repo[r].extra()
320 320 for label in ('source', 'transplant_source', 'rebase_source'):
321 321 if label in extra:
322 322 try:
323 323 return repo[extra[label]].rev()
324 324 except error.RepoLookupError:
325 325 pass
326 326 return None
327 327
328 328 # operator methods
329 329
330 330 def stringset(repo, subset, x):
331 331 x = repo[x].rev()
332 332 if x in subset:
333 333 return baseset([x])
334 334 return baseset()
335 335
336 336 def rangeset(repo, subset, x, y):
337 337 m = getset(repo, fullreposet(repo), x)
338 338 n = getset(repo, fullreposet(repo), y)
339 339
340 340 if not m or not n:
341 341 return baseset()
342 342 m, n = m.first(), n.last()
343 343
344 344 if m < n:
345 345 r = spanset(repo, m, n + 1)
346 346 else:
347 347 r = spanset(repo, m, n - 1)
348 348 return r & subset
349 349
350 350 def dagrange(repo, subset, x, y):
351 351 r = fullreposet(repo)
352 352 xs = _revsbetween(repo, getset(repo, r, x), getset(repo, r, y))
353 353 return xs & subset
354 354
355 355 def andset(repo, subset, x, y):
356 356 return getset(repo, getset(repo, subset, x), y)
357 357
358 358 def orset(repo, subset, x, y):
359 359 xl = getset(repo, subset, x)
360 360 yl = getset(repo, subset, y)
361 361 return xl + yl
362 362
363 363 def notset(repo, subset, x):
364 364 return subset - getset(repo, subset, x)
365 365
366 366 def listset(repo, subset, a, b):
367 367 raise error.ParseError(_("can't use a list in this context"))
368 368
369 369 def func(repo, subset, a, b):
370 370 if a[0] == 'symbol' and a[1] in symbols:
371 371 return symbols[a[1]](repo, subset, b)
372 372 raise error.UnknownIdentifier(a[1], symbols.keys())
373 373
374 374 # functions
375 375
376 376 def adds(repo, subset, x):
377 377 """``adds(pattern)``
378 378 Changesets that add a file matching pattern.
379 379
380 380 The pattern without explicit kind like ``glob:`` is expected to be
381 381 relative to the current directory and match against a file or a
382 382 directory.
383 383 """
384 384 # i18n: "adds" is a keyword
385 385 pat = getstring(x, _("adds requires a pattern"))
386 386 return checkstatus(repo, subset, pat, 1)
387 387
388 388 def ancestor(repo, subset, x):
389 389 """``ancestor(*changeset)``
390 390 A greatest common ancestor of the changesets.
391 391
392 392 Accepts 0 or more changesets.
393 393 Will return empty list when passed no args.
394 394 Greatest common ancestor of a single changeset is that changeset.
395 395 """
396 396 # i18n: "ancestor" is a keyword
397 397 l = getlist(x)
398 398 rl = fullreposet(repo)
399 399 anc = None
400 400
401 401 # (getset(repo, rl, i) for i in l) generates a list of lists
402 402 for revs in (getset(repo, rl, i) for i in l):
403 403 for r in revs:
404 404 if anc is None:
405 405 anc = repo[r]
406 406 else:
407 407 anc = anc.ancestor(repo[r])
408 408
409 409 if anc is not None and anc.rev() in subset:
410 410 return baseset([anc.rev()])
411 411 return baseset()
412 412
413 413 def _ancestors(repo, subset, x, followfirst=False):
414 414 heads = getset(repo, fullreposet(repo), x)
415 415 if not heads:
416 416 return baseset()
417 417 s = _revancestors(repo, heads, followfirst)
418 418 return subset & s
419 419
420 420 def ancestors(repo, subset, x):
421 421 """``ancestors(set)``
422 422 Changesets that are ancestors of a changeset in set.
423 423 """
424 424 return _ancestors(repo, subset, x)
425 425
426 426 def _firstancestors(repo, subset, x):
427 427 # ``_firstancestors(set)``
428 428 # Like ``ancestors(set)`` but follows only the first parents.
429 429 return _ancestors(repo, subset, x, followfirst=True)
430 430
431 431 def ancestorspec(repo, subset, x, n):
432 432 """``set~n``
433 433 Changesets that are the Nth ancestor (first parents only) of a changeset
434 434 in set.
435 435 """
436 436 try:
437 437 n = int(n[1])
438 438 except (TypeError, ValueError):
439 439 raise error.ParseError(_("~ expects a number"))
440 440 ps = set()
441 441 cl = repo.changelog
442 442 for r in getset(repo, fullreposet(repo), x):
443 443 for i in range(n):
444 444 r = cl.parentrevs(r)[0]
445 445 ps.add(r)
446 446 return subset & ps
447 447
448 448 def author(repo, subset, x):
449 449 """``author(string)``
450 450 Alias for ``user(string)``.
451 451 """
452 452 # i18n: "author" is a keyword
453 453 n = encoding.lower(getstring(x, _("author requires a string")))
454 454 kind, pattern, matcher = _substringmatcher(n)
455 455 return subset.filter(lambda x: matcher(encoding.lower(repo[x].user())))
456 456
457 457 def bisect(repo, subset, x):
458 458 """``bisect(string)``
459 459 Changesets marked in the specified bisect status:
460 460
461 461 - ``good``, ``bad``, ``skip``: csets explicitly marked as good/bad/skip
462 462 - ``goods``, ``bads`` : csets topologically good/bad
463 463 - ``range`` : csets taking part in the bisection
464 464 - ``pruned`` : csets that are goods, bads or skipped
465 465 - ``untested`` : csets whose fate is yet unknown
466 466 - ``ignored`` : csets ignored due to DAG topology
467 467 - ``current`` : the cset currently being bisected
468 468 """
469 469 # i18n: "bisect" is a keyword
470 470 status = getstring(x, _("bisect requires a string")).lower()
471 471 state = set(hbisect.get(repo, status))
472 472 return subset & state
473 473
474 474 # Backward-compatibility
475 475 # - no help entry so that we do not advertise it any more
476 476 def bisected(repo, subset, x):
477 477 return bisect(repo, subset, x)
478 478
479 479 def bookmark(repo, subset, x):
480 480 """``bookmark([name])``
481 481 The named bookmark or all bookmarks.
482 482
483 483 If `name` starts with `re:`, the remainder of the name is treated as
484 484 a regular expression. To match a bookmark that actually starts with `re:`,
485 485 use the prefix `literal:`.
486 486 """
487 487 # i18n: "bookmark" is a keyword
488 488 args = getargs(x, 0, 1, _('bookmark takes one or no arguments'))
489 489 if args:
490 490 bm = getstring(args[0],
491 491 # i18n: "bookmark" is a keyword
492 492 _('the argument to bookmark must be a string'))
493 493 kind, pattern, matcher = _stringmatcher(bm)
494 494 bms = set()
495 495 if kind == 'literal':
496 496 bmrev = repo._bookmarks.get(pattern, None)
497 497 if not bmrev:
498 498 raise error.RepoLookupError(_("bookmark '%s' does not exist")
499 499 % bm)
500 500 bms.add(repo[bmrev].rev())
501 501 else:
502 502 matchrevs = set()
503 503 for name, bmrev in repo._bookmarks.iteritems():
504 504 if matcher(name):
505 505 matchrevs.add(bmrev)
506 506 if not matchrevs:
507 507 raise error.RepoLookupError(_("no bookmarks exist"
508 508 " that match '%s'") % pattern)
509 509 for bmrev in matchrevs:
510 510 bms.add(repo[bmrev].rev())
511 511 else:
512 512 bms = set([repo[r].rev()
513 513 for r in repo._bookmarks.values()])
514 514 bms -= set([node.nullrev])
515 515 return subset & bms
516 516
517 517 def branch(repo, subset, x):
518 518 """``branch(string or set)``
519 519 All changesets belonging to the given branch or the branches of the given
520 520 changesets.
521 521
522 522 If `string` starts with `re:`, the remainder of the name is treated as
523 523 a regular expression. To match a branch that actually starts with `re:`,
524 524 use the prefix `literal:`.
525 525 """
526 526 getbi = repo.revbranchcache().branchinfo
527 527
528 528 try:
529 529 b = getstring(x, '')
530 530 except error.ParseError:
531 531 # not a string, but another revspec, e.g. tip()
532 532 pass
533 533 else:
534 534 kind, pattern, matcher = _stringmatcher(b)
535 535 if kind == 'literal':
536 536 # note: falls through to the revspec case if no branch with
537 537 # this name exists
538 538 if pattern in repo.branchmap():
539 539 return subset.filter(lambda r: matcher(getbi(r)[0]))
540 540 else:
541 541 return subset.filter(lambda r: matcher(getbi(r)[0]))
542 542
543 543 s = getset(repo, fullreposet(repo), x)
544 544 b = set()
545 545 for r in s:
546 546 b.add(getbi(r)[0])
547 547 c = s.__contains__
548 548 return subset.filter(lambda r: c(r) or getbi(r)[0] in b)
549 549
550 550 def bumped(repo, subset, x):
551 551 """``bumped()``
552 552 Mutable changesets marked as successors of public changesets.
553 553
554 554 Only non-public and non-obsolete changesets can be `bumped`.
555 555 """
556 556 # i18n: "bumped" is a keyword
557 557 getargs(x, 0, 0, _("bumped takes no arguments"))
558 558 bumped = obsmod.getrevs(repo, 'bumped')
559 559 return subset & bumped
560 560
561 561 def bundle(repo, subset, x):
562 562 """``bundle()``
563 563 Changesets in the bundle.
564 564
565 565 Bundle must be specified by the -R option."""
566 566
567 567 try:
568 568 bundlerevs = repo.changelog.bundlerevs
569 569 except AttributeError:
570 570 raise util.Abort(_("no bundle provided - specify with -R"))
571 571 return subset & bundlerevs
572 572
573 573 def checkstatus(repo, subset, pat, field):
574 574 hasset = matchmod.patkind(pat) == 'set'
575 575
576 576 mcache = [None]
577 577 def matches(x):
578 578 c = repo[x]
579 579 if not mcache[0] or hasset:
580 580 mcache[0] = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
581 581 m = mcache[0]
582 582 fname = None
583 583 if not m.anypats() and len(m.files()) == 1:
584 584 fname = m.files()[0]
585 585 if fname is not None:
586 586 if fname not in c.files():
587 587 return False
588 588 else:
589 589 for f in c.files():
590 590 if m(f):
591 591 break
592 592 else:
593 593 return False
594 594 files = repo.status(c.p1().node(), c.node())[field]
595 595 if fname is not None:
596 596 if fname in files:
597 597 return True
598 598 else:
599 599 for f in files:
600 600 if m(f):
601 601 return True
602 602
603 603 return subset.filter(matches)
604 604
605 605 def _children(repo, narrow, parentset):
606 606 cs = set()
607 607 if not parentset:
608 608 return baseset(cs)
609 609 pr = repo.changelog.parentrevs
610 610 minrev = min(parentset)
611 611 for r in narrow:
612 612 if r <= minrev:
613 613 continue
614 614 for p in pr(r):
615 615 if p in parentset:
616 616 cs.add(r)
617 617 return baseset(cs)
618 618
619 619 def children(repo, subset, x):
620 620 """``children(set)``
621 621 Child changesets of changesets in set.
622 622 """
623 623 s = getset(repo, fullreposet(repo), x)
624 624 cs = _children(repo, subset, s)
625 625 return subset & cs
626 626
627 627 def closed(repo, subset, x):
628 628 """``closed()``
629 629 Changeset is closed.
630 630 """
631 631 # i18n: "closed" is a keyword
632 632 getargs(x, 0, 0, _("closed takes no arguments"))
633 633 return subset.filter(lambda r: repo[r].closesbranch())
634 634
635 635 def contains(repo, subset, x):
636 636 """``contains(pattern)``
637 637 The revision's manifest contains a file matching pattern (but might not
638 638 modify it). See :hg:`help patterns` for information about file patterns.
639 639
640 640 The pattern without explicit kind like ``glob:`` is expected to be
641 641 relative to the current directory and match against a file exactly
642 642 for efficiency.
643 643 """
644 644 # i18n: "contains" is a keyword
645 645 pat = getstring(x, _("contains requires a pattern"))
646 646
647 647 def matches(x):
648 648 if not matchmod.patkind(pat):
649 649 pats = pathutil.canonpath(repo.root, repo.getcwd(), pat)
650 650 if pats in repo[x]:
651 651 return True
652 652 else:
653 653 c = repo[x]
654 654 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
655 655 for f in c.manifest():
656 656 if m(f):
657 657 return True
658 658 return False
659 659
660 660 return subset.filter(matches)
661 661
662 662 def converted(repo, subset, x):
663 663 """``converted([id])``
664 664 Changesets converted from the given identifier in the old repository if
665 665 present, or all converted changesets if no identifier is specified.
666 666 """
667 667
668 668 # There is exactly no chance of resolving the revision, so do a simple
669 669 # string compare and hope for the best
670 670
671 671 rev = None
672 672 # i18n: "converted" is a keyword
673 673 l = getargs(x, 0, 1, _('converted takes one or no arguments'))
674 674 if l:
675 675 # i18n: "converted" is a keyword
676 676 rev = getstring(l[0], _('converted requires a revision'))
677 677
678 678 def _matchvalue(r):
679 679 source = repo[r].extra().get('convert_revision', None)
680 680 return source is not None and (rev is None or source.startswith(rev))
681 681
682 682 return subset.filter(lambda r: _matchvalue(r))
683 683
684 684 def date(repo, subset, x):
685 685 """``date(interval)``
686 686 Changesets within the interval, see :hg:`help dates`.
687 687 """
688 688 # i18n: "date" is a keyword
689 689 ds = getstring(x, _("date requires a string"))
690 690 dm = util.matchdate(ds)
691 691 return subset.filter(lambda x: dm(repo[x].date()[0]))
692 692
693 693 def desc(repo, subset, x):
694 694 """``desc(string)``
695 695 Search commit message for string. The match is case-insensitive.
696 696 """
697 697 # i18n: "desc" is a keyword
698 698 ds = encoding.lower(getstring(x, _("desc requires a string")))
699 699
700 700 def matches(x):
701 701 c = repo[x]
702 702 return ds in encoding.lower(c.description())
703 703
704 704 return subset.filter(matches)
705 705
706 706 def _descendants(repo, subset, x, followfirst=False):
707 707 roots = getset(repo, fullreposet(repo), x)
708 708 if not roots:
709 709 return baseset()
710 710 s = _revdescendants(repo, roots, followfirst)
711 711
712 712 # Both sets need to be ascending in order to lazily return the union
713 713 # in the correct order.
714 714 base = subset & roots
715 715 desc = subset & s
716 716 result = base + desc
717 717 if subset.isascending():
718 718 result.sort()
719 719 elif subset.isdescending():
720 720 result.sort(reverse=True)
721 721 else:
722 722 result = subset & result
723 723 return result
724 724
725 725 def descendants(repo, subset, x):
726 726 """``descendants(set)``
727 727 Changesets which are descendants of changesets in set.
728 728 """
729 729 return _descendants(repo, subset, x)
730 730
731 731 def _firstdescendants(repo, subset, x):
732 732 # ``_firstdescendants(set)``
733 733 # Like ``descendants(set)`` but follows only the first parents.
734 734 return _descendants(repo, subset, x, followfirst=True)
735 735
736 736 def destination(repo, subset, x):
737 737 """``destination([set])``
738 738 Changesets that were created by a graft, transplant or rebase operation,
739 739 with the given revisions specified as the source. Omitting the optional set
740 740 is the same as passing all().
741 741 """
742 742 if x is not None:
743 743 sources = getset(repo, fullreposet(repo), x)
744 744 else:
745 745 sources = fullreposet(repo)
746 746
747 747 dests = set()
748 748
749 749 # subset contains all of the possible destinations that can be returned, so
750 750 # iterate over them and see if their source(s) were provided in the arg set.
751 751 # Even if the immediate src of r is not in the arg set, src's source (or
752 752 # further back) may be. Scanning back further than the immediate src allows
753 753 # transitive transplants and rebases to yield the same results as transitive
754 754 # grafts.
755 755 for r in subset:
756 756 src = _getrevsource(repo, r)
757 757 lineage = None
758 758
759 759 while src is not None:
760 760 if lineage is None:
761 761 lineage = list()
762 762
763 763 lineage.append(r)
764 764
765 765 # The visited lineage is a match if the current source is in the arg
766 766 # set. Since every candidate dest is visited by way of iterating
767 767 # subset, any dests further back in the lineage will be tested by a
768 768 # different iteration over subset. Likewise, if the src was already
769 769 # selected, the current lineage can be selected without going back
770 770 # further.
771 771 if src in sources or src in dests:
772 772 dests.update(lineage)
773 773 break
774 774
775 775 r = src
776 776 src = _getrevsource(repo, r)
777 777
778 778 return subset.filter(dests.__contains__)
779 779
780 780 def divergent(repo, subset, x):
781 781 """``divergent()``
782 782 Final successors of changesets with an alternative set of final successors.
783 783 """
784 784 # i18n: "divergent" is a keyword
785 785 getargs(x, 0, 0, _("divergent takes no arguments"))
786 786 divergent = obsmod.getrevs(repo, 'divergent')
787 787 return subset & divergent
788 788
789 789 def draft(repo, subset, x):
790 790 """``draft()``
791 791 Changeset in draft phase."""
792 792 # i18n: "draft" is a keyword
793 793 getargs(x, 0, 0, _("draft takes no arguments"))
794 794 phase = repo._phasecache.phase
795 795 target = phases.draft
796 796 condition = lambda r: phase(repo, r) == target
797 797 return subset.filter(condition, cache=False)
798 798
799 799 def extinct(repo, subset, x):
800 800 """``extinct()``
801 801 Obsolete changesets with obsolete descendants only.
802 802 """
803 803 # i18n: "extinct" is a keyword
804 804 getargs(x, 0, 0, _("extinct takes no arguments"))
805 805 extincts = obsmod.getrevs(repo, 'extinct')
806 806 return subset & extincts
807 807
808 808 def extra(repo, subset, x):
809 809 """``extra(label, [value])``
810 810 Changesets with the given label in the extra metadata, with the given
811 811 optional value.
812 812
813 813 If `value` starts with `re:`, the remainder of the value is treated as
814 814 a regular expression. To match a value that actually starts with `re:`,
815 815 use the prefix `literal:`.
816 816 """
817 817
818 818 # i18n: "extra" is a keyword
819 819 l = getargs(x, 1, 2, _('extra takes at least 1 and at most 2 arguments'))
820 820 # i18n: "extra" is a keyword
821 821 label = getstring(l[0], _('first argument to extra must be a string'))
822 822 value = None
823 823
824 824 if len(l) > 1:
825 825 # i18n: "extra" is a keyword
826 826 value = getstring(l[1], _('second argument to extra must be a string'))
827 827 kind, value, matcher = _stringmatcher(value)
828 828
829 829 def _matchvalue(r):
830 830 extra = repo[r].extra()
831 831 return label in extra and (value is None or matcher(extra[label]))
832 832
833 833 return subset.filter(lambda r: _matchvalue(r))
834 834
835 835 def filelog(repo, subset, x):
836 836 """``filelog(pattern)``
837 837 Changesets connected to the specified filelog.
838 838
839 839 For performance reasons, visits only revisions mentioned in the file-level
840 840 filelog, rather than filtering through all changesets (much faster, but
841 841 doesn't include deletes or duplicate changes). For a slower, more accurate
842 842 result, use ``file()``.
843 843
844 844 The pattern without explicit kind like ``glob:`` is expected to be
845 845 relative to the current directory and match against a file exactly
846 846 for efficiency.
847 847
848 848 If some linkrev points to revisions filtered by the current repoview, we'll
849 849 work around it to return a non-filtered value.
850 850 """
851 851
852 852 # i18n: "filelog" is a keyword
853 853 pat = getstring(x, _("filelog requires a pattern"))
854 854 s = set()
855 855 cl = repo.changelog
856 856
857 857 if not matchmod.patkind(pat):
858 858 f = pathutil.canonpath(repo.root, repo.getcwd(), pat)
859 859 files = [f]
860 860 else:
861 861 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=repo[None])
862 862 files = (f for f in repo[None] if m(f))
863 863
864 864 for f in files:
865 865 backrevref = {} # final value for: filerev -> changerev
866 866 lowestchild = {} # lowest known filerev child of a filerev
867 867 delayed = [] # filerev with filtered linkrev, for post-processing
868 868 lowesthead = None # cache for manifest content of all head revisions
869 869 fl = repo.file(f)
870 870 for fr in list(fl):
871 871 rev = fl.linkrev(fr)
872 872 if rev not in cl:
873 873 # changerev pointed in linkrev is filtered
874 874 # record it for post processing.
875 875 delayed.append((fr, rev))
876 876 continue
877 877 for p in fl.parentrevs(fr):
878 878 if 0 <= p and p not in lowestchild:
879 879 lowestchild[p] = fr
880 880 backrevref[fr] = rev
881 881 s.add(rev)
882 882
883 883 # Post-processing of all filerevs we skipped because they were
884 884 # filtered. If such filerevs have known and unfiltered children, this
885 885 # means they have an unfiltered appearance out there. We'll use linkrev
886 886 # adjustment to find one of these appearances. The lowest known child
887 887 # will be used as a starting point because it is the best upper-bound we
888 888 # have.
889 889 #
890 890 # This approach will fail when an unfiltered but linkrev-shadowed
891 891 # appearance exists in a head changeset without unfiltered filerev
892 892 # children anywhere.
893 893 while delayed:
894 894 # must be a descending iteration. To slowly fill lowest child
895 895 # information that is of potential use by the next item.
896 896 fr, rev = delayed.pop()
897 897 lkr = rev
898 898
899 899 child = lowestchild.get(fr)
900 900
901 901 if child is None:
902 902 # search for existence of this file revision in a head revision.
903 903 # There are three possibilities:
904 904 # - the revision exists in a head and we can find an
905 905 # introduction from there,
906 906 # - the revision does not exist in a head because it has been
907 907 # changed since its introduction: we would have found a child
908 908 # and be in the other 'else' clause,
909 909 # - all versions of the revision are hidden.
910 910 if lowesthead is None:
911 911 lowesthead = {}
912 912 for h in repo.heads():
913 913 fnode = repo[h].manifest().get(f)
914 914 if fnode is not None:
915 915 lowesthead[fl.rev(fnode)] = h
916 916 headrev = lowesthead.get(fr)
917 917 if headrev is None:
918 918 # content is nowhere unfiltered
919 919 continue
920 920 rev = repo[headrev][f].introrev()
921 921 else:
922 922 # the lowest known child is a good upper bound
923 923 childcrev = backrevref[child]
924 924 # XXX this does not guarantee returning the lowest
925 925 # introduction of this revision, but this gives a
926 926 # result which is a good start and will fit in most
927 927 # cases. We probably need to fix the multiple
928 928 # introductions case properly (report each
929 929 # introduction, even for identical file revisions)
930 930 # once and for all at some point anyway.
931 931 for p in repo[childcrev][f].parents():
932 932 if p.filerev() == fr:
933 933 rev = p.rev()
934 934 break
935 935 if rev == lkr: # no shadowed entry found
936 936 # XXX This should never happen unless some manifest points
937 937 # to biggish file revisions (like a revision that uses a
938 938 # parent that never appears in the manifest ancestors)
939 939 continue
940 940
941 941 # Fill the data for the next iteration.
942 942 for p in fl.parentrevs(fr):
943 943 if 0 <= p and p not in lowestchild:
944 944 lowestchild[p] = fr
945 945 backrevref[fr] = rev
946 946 s.add(rev)
947 947
948 948 return subset & s
949 949
950 950 def first(repo, subset, x):
951 951 """``first(set, [n])``
952 952 An alias for limit().
953 953 """
954 954 return limit(repo, subset, x)
955 955
956 956 def _follow(repo, subset, x, name, followfirst=False):
957 957 l = getargs(x, 0, 1, _("%s takes no arguments or a filename") % name)
958 958 c = repo['.']
959 959 if l:
960 960 x = getstring(l[0], _("%s expected a filename") % name)
961 961 if x in c:
962 962 cx = c[x]
963 963 s = set(ctx.rev() for ctx in cx.ancestors(followfirst=followfirst))
964 964 # include the revision responsible for the most recent version
965 965 s.add(cx.introrev())
966 966 else:
967 967 return baseset()
968 968 else:
969 969 s = _revancestors(repo, baseset([c.rev()]), followfirst)
970 970
971 971 return subset & s
972 972
973 973 def follow(repo, subset, x):
974 974 """``follow([file])``
975 975 An alias for ``::.`` (ancestors of the working directory's first parent).
976 976 If a filename is specified, the history of the given file is followed,
977 977 including copies.
978 978 """
979 979 return _follow(repo, subset, x, 'follow')
980 980
981 981 def _followfirst(repo, subset, x):
982 982 # ``followfirst([file])``
983 983 # Like ``follow([file])`` but follows only the first parent of
984 984 # every revision or file revision.
985 985 return _follow(repo, subset, x, '_followfirst', followfirst=True)
986 986
987 987 def getall(repo, subset, x):
988 988 """``all()``
989 989 All changesets, the same as ``0:tip``.
990 990 """
991 991 # i18n: "all" is a keyword
992 992 getargs(x, 0, 0, _("all takes no arguments"))
993 993 return subset & spanset(repo) # drop "null" if any
994 994
995 995 def grep(repo, subset, x):
996 996 """``grep(regex)``
997 997 Like ``keyword(string)`` but accepts a regex. Use ``grep(r'...')``
998 998 to ensure special escape characters are handled correctly. Unlike
999 999 ``keyword(string)``, the match is case-sensitive.
1000 1000 """
1001 1001 try:
1002 1002 # i18n: "grep" is a keyword
1003 1003 gr = re.compile(getstring(x, _("grep requires a string")))
1004 1004 except re.error, e:
1005 1005 raise error.ParseError(_('invalid match pattern: %s') % e)
1006 1006
1007 1007 def matches(x):
1008 1008 c = repo[x]
1009 1009 for e in c.files() + [c.user(), c.description()]:
1010 1010 if gr.search(e):
1011 1011 return True
1012 1012 return False
1013 1013
1014 1014 return subset.filter(matches)
1015 1015
1016 1016 def _matchfiles(repo, subset, x):
1017 1017 # _matchfiles takes a revset list of prefixed arguments:
1018 1018 #
1019 1019 # [p:foo, i:bar, x:baz]
1020 1020 #
1021 1021 # builds a match object from them and filters subset. Allowed
1022 1022 # prefixes are 'p:' for regular patterns, 'i:' for include
1023 1023 # patterns and 'x:' for exclude patterns. Use 'r:' prefix to pass
1024 1024 # a revision identifier, or the empty string to reference the
1025 1025 # working directory, from which the match object is
1026 1026 # initialized. Use 'd:' to set the default matching mode, default
1027 1027 # to 'glob'. At most one 'r:' and 'd:' argument can be passed.
1028 1028
1029 1029 # i18n: "_matchfiles" is a keyword
1030 1030 l = getargs(x, 1, -1, _("_matchfiles requires at least one argument"))
1031 1031 pats, inc, exc = [], [], []
1032 1032 rev, default = None, None
1033 1033 for arg in l:
1034 1034 # i18n: "_matchfiles" is a keyword
1035 1035 s = getstring(arg, _("_matchfiles requires string arguments"))
1036 1036 prefix, value = s[:2], s[2:]
1037 1037 if prefix == 'p:':
1038 1038 pats.append(value)
1039 1039 elif prefix == 'i:':
1040 1040 inc.append(value)
1041 1041 elif prefix == 'x:':
1042 1042 exc.append(value)
1043 1043 elif prefix == 'r:':
1044 1044 if rev is not None:
1045 1045 # i18n: "_matchfiles" is a keyword
1046 1046 raise error.ParseError(_('_matchfiles expected at most one '
1047 1047 'revision'))
1048 1048 if value != '': # empty means working directory; leave rev as None
1049 1049 rev = value
1050 1050 elif prefix == 'd:':
1051 1051 if default is not None:
1052 1052 # i18n: "_matchfiles" is a keyword
1053 1053 raise error.ParseError(_('_matchfiles expected at most one '
1054 1054 'default mode'))
1055 1055 default = value
1056 1056 else:
1057 1057 # i18n: "_matchfiles" is a keyword
1058 1058 raise error.ParseError(_('invalid _matchfiles prefix: %s') % prefix)
1059 1059 if not default:
1060 1060 default = 'glob'
1061 1061
1062 1062 m = matchmod.match(repo.root, repo.getcwd(), pats, include=inc,
1063 1063 exclude=exc, ctx=repo[rev], default=default)
1064 1064
1065 1065 def matches(x):
1066 1066 for f in repo[x].files():
1067 1067 if m(f):
1068 1068 return True
1069 1069 return False
1070 1070
1071 1071 return subset.filter(matches)
1072 1072
1073 1073 def hasfile(repo, subset, x):
1074 1074 """``file(pattern)``
1075 1075 Changesets affecting files matched by pattern.
1076 1076
1077 1077 For a faster but less accurate result, consider using ``filelog()``
1078 1078 instead.
1079 1079
1080 1080 This predicate uses ``glob:`` as the default kind of pattern.
1081 1081 """
1082 1082 # i18n: "file" is a keyword
1083 1083 pat = getstring(x, _("file requires a pattern"))
1084 1084 return _matchfiles(repo, subset, ('string', 'p:' + pat))
1085 1085
1086 1086 def head(repo, subset, x):
1087 1087 """``head()``
1088 1088 Changeset is a named branch head.
1089 1089 """
1090 1090 # i18n: "head" is a keyword
1091 1091 getargs(x, 0, 0, _("head takes no arguments"))
1092 1092 hs = set()
1093 1093 for b, ls in repo.branchmap().iteritems():
1094 1094 hs.update(repo[h].rev() for h in ls)
1095 1095 return baseset(hs).filter(subset.__contains__)
1096 1096
1097 1097 def heads(repo, subset, x):
1098 1098 """``heads(set)``
1099 1099 Members of set with no children in set.
1100 1100 """
1101 1101 s = getset(repo, subset, x)
1102 1102 ps = parents(repo, subset, x)
1103 1103 return s - ps
1104 1104
1105 1105 def hidden(repo, subset, x):
1106 1106 """``hidden()``
1107 1107 Hidden changesets.
1108 1108 """
1109 1109 # i18n: "hidden" is a keyword
1110 1110 getargs(x, 0, 0, _("hidden takes no arguments"))
1111 1111 hiddenrevs = repoview.filterrevs(repo, 'visible')
1112 1112 return subset & hiddenrevs
1113 1113
1114 1114 def keyword(repo, subset, x):
1115 1115 """``keyword(string)``
1116 1116 Search commit message, user name, and names of changed files for
1117 1117 string. The match is case-insensitive.
1118 1118 """
1119 1119 # i18n: "keyword" is a keyword
1120 1120 kw = encoding.lower(getstring(x, _("keyword requires a string")))
1121 1121
1122 1122 def matches(r):
1123 1123 c = repo[r]
1124 1124 return util.any(kw in encoding.lower(t) for t in c.files() + [c.user(),
1125 1125 c.description()])
1126 1126
1127 1127 return subset.filter(matches)
1128 1128
1129 1129 def limit(repo, subset, x):
1130 1130 """``limit(set, [n])``
1131 1131 First n members of set, defaulting to 1.
1132 1132 """
1133 1133 # i18n: "limit" is a keyword
1134 1134 l = getargs(x, 1, 2, _("limit requires one or two arguments"))
1135 1135 try:
1136 1136 lim = 1
1137 1137 if len(l) == 2:
1138 1138 # i18n: "limit" is a keyword
1139 1139 lim = int(getstring(l[1], _("limit requires a number")))
1140 1140 except (TypeError, ValueError):
1141 1141 # i18n: "limit" is a keyword
1142 1142 raise error.ParseError(_("limit expects a number"))
1143 1143 ss = subset
1144 1144 os = getset(repo, fullreposet(repo), l[0])
1145 1145 result = []
1146 1146 it = iter(os)
1147 1147 for x in xrange(lim):
1148 try:
1149 y = it.next()
1150 if y in ss:
1148 y = next(it, None)
1149 if y is None:
1150 break
1151 elif y in ss:
1151 1152 result.append(y)
1152 except (StopIteration):
1153 break
1154 1153 return baseset(result)
1155 1154
1156 1155 def last(repo, subset, x):
1157 1156 """``last(set, [n])``
1158 1157 Last n members of set, defaulting to 1.
1159 1158 """
1160 1159 # i18n: "last" is a keyword
1161 1160 l = getargs(x, 1, 2, _("last requires one or two arguments"))
1162 1161 try:
1163 1162 lim = 1
1164 1163 if len(l) == 2:
1165 1164 # i18n: "last" is a keyword
1166 1165 lim = int(getstring(l[1], _("last requires a number")))
1167 1166 except (TypeError, ValueError):
1168 1167 # i18n: "last" is a keyword
1169 1168 raise error.ParseError(_("last expects a number"))
1170 1169 ss = subset
1171 1170 os = getset(repo, fullreposet(repo), l[0])
1172 1171 os.reverse()
1173 1172 result = []
1174 1173 it = iter(os)
1175 1174 for x in xrange(lim):
1176 1175 try:
1177 1176 y = it.next()
1178 1177 if y in ss:
1179 1178 result.append(y)
1180 1179 except (StopIteration):
1181 1180 break
1182 1181 return baseset(result)
1183 1182
1184 1183 def maxrev(repo, subset, x):
1185 1184 """``max(set)``
1186 1185 Changeset with highest revision number in set.
1187 1186 """
1188 1187 os = getset(repo, fullreposet(repo), x)
1189 1188 if os:
1190 1189 m = os.max()
1191 1190 if m in subset:
1192 1191 return baseset([m])
1193 1192 return baseset()
1194 1193
1195 1194 def merge(repo, subset, x):
1196 1195 """``merge()``
1197 1196 Changeset is a merge changeset.
1198 1197 """
1199 1198 # i18n: "merge" is a keyword
1200 1199 getargs(x, 0, 0, _("merge takes no arguments"))
1201 1200 cl = repo.changelog
1202 1201 return subset.filter(lambda r: cl.parentrevs(r)[1] != -1)
1203 1202
1204 1203 def branchpoint(repo, subset, x):
1205 1204 """``branchpoint()``
1206 1205 Changesets with more than one child.
1207 1206 """
1208 1207 # i18n: "branchpoint" is a keyword
1209 1208 getargs(x, 0, 0, _("branchpoint takes no arguments"))
1210 1209 cl = repo.changelog
1211 1210 if not subset:
1212 1211 return baseset()
1213 1212 baserev = min(subset)
1214 1213 parentscount = [0]*(len(repo) - baserev)
1215 1214 for r in cl.revs(start=baserev + 1):
1216 1215 for p in cl.parentrevs(r):
1217 1216 if p >= baserev:
1218 1217 parentscount[p - baserev] += 1
1219 1218 return subset.filter(lambda r: parentscount[r - baserev] > 1)
1220 1219
1221 1220 def minrev(repo, subset, x):
1222 1221 """``min(set)``
1223 1222 Changeset with lowest revision number in set.
1224 1223 """
1225 1224 os = getset(repo, fullreposet(repo), x)
1226 1225 if os:
1227 1226 m = os.min()
1228 1227 if m in subset:
1229 1228 return baseset([m])
1230 1229 return baseset()
1231 1230
1232 1231 def modifies(repo, subset, x):
1233 1232 """``modifies(pattern)``
1234 1233 Changesets modifying files matched by pattern.
1235 1234
1236 1235 The pattern without explicit kind like ``glob:`` is expected to be
1237 1236 relative to the current directory and match against a file or a
1238 1237 directory.
1239 1238 """
1240 1239 # i18n: "modifies" is a keyword
1241 1240 pat = getstring(x, _("modifies requires a pattern"))
1242 1241 return checkstatus(repo, subset, pat, 0)
1243 1242
1244 1243 def named(repo, subset, x):
1245 1244 """``named(namespace)``
1246 1245 The changesets in a given namespace.
1247 1246
1248 1247 If `namespace` starts with `re:`, the remainder of the string is treated as
1249 1248 a regular expression. To match a namespace that actually starts with `re:`,
1250 1249 use the prefix `literal:`.
1251 1250 """
1252 1251 # i18n: "named" is a keyword
1253 1252 args = getargs(x, 1, 1, _('named requires a namespace argument'))
1254 1253
1255 1254 ns = getstring(args[0],
1256 1255 # i18n: "named" is a keyword
1257 1256 _('the argument to named must be a string'))
1258 1257 kind, pattern, matcher = _stringmatcher(ns)
1259 1258 namespaces = set()
1260 1259 if kind == 'literal':
1261 1260 if pattern not in repo.names:
1262 1261 raise error.RepoLookupError(_("namespace '%s' does not exist")
1263 1262 % ns)
1264 1263 namespaces.add(repo.names[pattern])
1265 1264 else:
1266 1265 for name, ns in repo.names.iteritems():
1267 1266 if matcher(name):
1268 1267 namespaces.add(ns)
1269 1268 if not namespaces:
1270 1269 raise error.RepoLookupError(_("no namespace exists"
1271 1270 " that match '%s'") % pattern)
1272 1271
1273 1272 names = set()
1274 1273 for ns in namespaces:
1275 1274 for name in ns.listnames(repo):
1276 1275 if name not in ns.deprecated:
1277 1276 names.update(repo[n].rev() for n in ns.nodes(repo, name))
1278 1277
1279 1278 names -= set([node.nullrev])
1280 1279 return subset & names
1281 1280
1282 1281 def node_(repo, subset, x):
1283 1282 """``id(string)``
1284 1283 Revision non-ambiguously specified by the given hex string prefix.
1285 1284 """
1286 1285 # i18n: "id" is a keyword
1287 1286 l = getargs(x, 1, 1, _("id requires one argument"))
1288 1287 # i18n: "id" is a keyword
1289 1288 n = getstring(l[0], _("id requires a string"))
1290 1289 if len(n) == 40:
1291 1290 try:
1292 1291 rn = repo.changelog.rev(node.bin(n))
1293 1292 except (LookupError, TypeError):
1294 1293 rn = None
1295 1294 else:
1296 1295 rn = None
1297 1296 pm = repo.changelog._partialmatch(n)
1298 1297 if pm is not None:
1299 1298 rn = repo.changelog.rev(pm)
1300 1299
1301 1300 if rn is None:
1302 1301 return baseset()
1303 1302 result = baseset([rn])
1304 1303 return result & subset
1305 1304
1306 1305 def obsolete(repo, subset, x):
1307 1306 """``obsolete()``
1308 1307 Mutable changeset with a newer version."""
1309 1308 # i18n: "obsolete" is a keyword
1310 1309 getargs(x, 0, 0, _("obsolete takes no arguments"))
1311 1310 obsoletes = obsmod.getrevs(repo, 'obsolete')
1312 1311 return subset & obsoletes
1313 1312
1314 1313 def only(repo, subset, x):
1315 1314 """``only(set, [set])``
1316 1315 Changesets that are ancestors of the first set that are not ancestors
1317 1316 of any other head in the repo. If a second set is specified, the result
1318 1317 is ancestors of the first set that are not ancestors of the second set
1319 1318 (i.e. ::<set1> - ::<set2>).
1320 1319 """
1321 1320 cl = repo.changelog
1322 1321 # i18n: "only" is a keyword
1323 1322 args = getargs(x, 1, 2, _('only takes one or two arguments'))
1324 1323 include = getset(repo, fullreposet(repo), args[0])
1325 1324 if len(args) == 1:
1326 1325 if not include:
1327 1326 return baseset()
1328 1327
1329 1328 descendants = set(_revdescendants(repo, include, False))
1330 1329 exclude = [rev for rev in cl.headrevs()
1331 1330 if not rev in descendants and not rev in include]
1332 1331 else:
1333 1332 exclude = getset(repo, fullreposet(repo), args[1])
1334 1333
1335 1334 results = set(cl.findmissingrevs(common=exclude, heads=include))
1336 1335 return subset & results
1337 1336
1338 1337 def origin(repo, subset, x):
1339 1338 """``origin([set])``
1340 1339 Changesets that were specified as a source for the grafts, transplants or
1341 1340 rebases that created the given revisions. Omitting the optional set is the
1342 1341 same as passing all(). If a changeset created by these operations is itself
1343 1342 specified as a source for one of these operations, only the source changeset
1344 1343 for the first operation is selected.
1345 1344 """
1346 1345 if x is not None:
1347 1346 dests = getset(repo, fullreposet(repo), x)
1348 1347 else:
1349 1348 dests = fullreposet(repo)
1350 1349
1351 1350 def _firstsrc(rev):
1352 1351 src = _getrevsource(repo, rev)
1353 1352 if src is None:
1354 1353 return None
1355 1354
1356 1355 while True:
1357 1356 prev = _getrevsource(repo, src)
1358 1357
1359 1358 if prev is None:
1360 1359 return src
1361 1360 src = prev
1362 1361
1363 1362 o = set([_firstsrc(r) for r in dests])
1364 1363 o -= set([None])
1365 1364 return subset & o
1366 1365
1367 1366 def outgoing(repo, subset, x):
1368 1367 """``outgoing([path])``
1369 1368 Changesets not found in the specified destination repository, or the
1370 1369 default push location.
1371 1370 """
1372 1371 # Avoid cycles.
1373 1372 import discovery
1374 1373 import hg
1375 1374 # i18n: "outgoing" is a keyword
1376 1375 l = getargs(x, 0, 1, _("outgoing takes one or no arguments"))
1377 1376 # i18n: "outgoing" is a keyword
1378 1377 dest = l and getstring(l[0], _("outgoing requires a repository path")) or ''
1379 1378 dest = repo.ui.expandpath(dest or 'default-push', dest or 'default')
1380 1379 dest, branches = hg.parseurl(dest)
1381 1380 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1382 1381 if revs:
1383 1382 revs = [repo.lookup(rev) for rev in revs]
1384 1383 other = hg.peer(repo, {}, dest)
1385 1384 repo.ui.pushbuffer()
1386 1385 outgoing = discovery.findcommonoutgoing(repo, other, onlyheads=revs)
1387 1386 repo.ui.popbuffer()
1388 1387 cl = repo.changelog
1389 1388 o = set([cl.rev(r) for r in outgoing.missing])
1390 1389 return subset & o
1391 1390
1392 1391 def p1(repo, subset, x):
1393 1392 """``p1([set])``
1394 1393 First parent of changesets in set, or the working directory.
1395 1394 """
1396 1395 if x is None:
1397 1396 p = repo[x].p1().rev()
1398 1397 if p >= 0:
1399 1398 return subset & baseset([p])
1400 1399 return baseset()
1401 1400
1402 1401 ps = set()
1403 1402 cl = repo.changelog
1404 1403 for r in getset(repo, fullreposet(repo), x):
1405 1404 ps.add(cl.parentrevs(r)[0])
1406 1405 ps -= set([node.nullrev])
1407 1406 return subset & ps
1408 1407
1409 1408 def p2(repo, subset, x):
1410 1409 """``p2([set])``
1411 1410 Second parent of changesets in set, or the working directory.
1412 1411 """
1413 1412 if x is None:
1414 1413 ps = repo[x].parents()
1415 1414 try:
1416 1415 p = ps[1].rev()
1417 1416 if p >= 0:
1418 1417 return subset & baseset([p])
1419 1418 return baseset()
1420 1419 except IndexError:
1421 1420 return baseset()
1422 1421
1423 1422 ps = set()
1424 1423 cl = repo.changelog
1425 1424 for r in getset(repo, fullreposet(repo), x):
1426 1425 ps.add(cl.parentrevs(r)[1])
1427 1426 ps -= set([node.nullrev])
1428 1427 return subset & ps
1429 1428
1430 1429 def parents(repo, subset, x):
1431 1430 """``parents([set])``
1432 1431 The set of all parents for all changesets in set, or the working directory.
1433 1432 """
1434 1433 if x is None:
1435 1434 ps = set(p.rev() for p in repo[x].parents())
1436 1435 else:
1437 1436 ps = set()
1438 1437 cl = repo.changelog
1439 1438 for r in getset(repo, fullreposet(repo), x):
1440 1439 ps.update(cl.parentrevs(r))
1441 1440 ps -= set([node.nullrev])
1442 1441 return subset & ps
1443 1442
1444 1443 def parentspec(repo, subset, x, n):
1445 1444 """``set^0``
1446 1445 The set.
1447 1446 ``set^1`` (or ``set^``), ``set^2``
1448 1447 First or second parent, respectively, of all changesets in set.
1449 1448 """
1450 1449 try:
1451 1450 n = int(n[1])
1452 1451 if n not in (0, 1, 2):
1453 1452 raise ValueError
1454 1453 except (TypeError, ValueError):
1455 1454 raise error.ParseError(_("^ expects a number 0, 1, or 2"))
1456 1455 ps = set()
1457 1456 cl = repo.changelog
1458 1457 for r in getset(repo, fullreposet(repo), x):
1459 1458 if n == 0:
1460 1459 ps.add(r)
1461 1460 elif n == 1:
1462 1461 ps.add(cl.parentrevs(r)[0])
1463 1462 elif n == 2:
1464 1463 parents = cl.parentrevs(r)
1465 1464 if len(parents) > 1:
1466 1465 ps.add(parents[1])
1467 1466 return subset & ps
1468 1467
1469 1468 def present(repo, subset, x):
1470 1469 """``present(set)``
1471 1470 An empty set, if any revision in set isn't found; otherwise,
1472 1471 all revisions in set.
1473 1472
1474 1473 If any of specified revisions is not present in the local repository,
1475 1474 the query is normally aborted. But this predicate allows the query
1476 1475 to continue even in such cases.
1477 1476 """
1478 1477 try:
1479 1478 return getset(repo, subset, x)
1480 1479 except error.RepoLookupError:
1481 1480 return baseset()
1482 1481
1483 1482 def public(repo, subset, x):
1484 1483 """``public()``
1485 1484 Changeset in public phase."""
1486 1485 # i18n: "public" is a keyword
1487 1486 getargs(x, 0, 0, _("public takes no arguments"))
1488 1487 phase = repo._phasecache.phase
1489 1488 target = phases.public
1490 1489 condition = lambda r: phase(repo, r) == target
1491 1490 return subset.filter(condition, cache=False)
1492 1491
1493 1492 def remote(repo, subset, x):
1494 1493 """``remote([id [,path]])``
1495 1494 Local revision that corresponds to the given identifier in a
1496 1495 remote repository, if present. Here, the '.' identifier is a
1497 1496 synonym for the current local branch.
1498 1497 """
1499 1498
1500 1499 import hg # avoid start-up nasties
1501 1500 # i18n: "remote" is a keyword
1502 1501 l = getargs(x, 0, 2, _("remote takes one, two or no arguments"))
1503 1502
1504 1503 q = '.'
1505 1504 if len(l) > 0:
1506 1505 # i18n: "remote" is a keyword
1507 1506 q = getstring(l[0], _("remote requires a string id"))
1508 1507 if q == '.':
1509 1508 q = repo['.'].branch()
1510 1509
1511 1510 dest = ''
1512 1511 if len(l) > 1:
1513 1512 # i18n: "remote" is a keyword
1514 1513 dest = getstring(l[1], _("remote requires a repository path"))
1515 1514 dest = repo.ui.expandpath(dest or 'default')
1516 1515 dest, branches = hg.parseurl(dest)
1517 1516 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1518 1517 if revs:
1519 1518 revs = [repo.lookup(rev) for rev in revs]
1520 1519 other = hg.peer(repo, {}, dest)
1521 1520 n = other.lookup(q)
1522 1521 if n in repo:
1523 1522 r = repo[n].rev()
1524 1523 if r in subset:
1525 1524 return baseset([r])
1526 1525 return baseset()
1527 1526
1528 1527 def removes(repo, subset, x):
1529 1528 """``removes(pattern)``
1530 1529 Changesets which remove files matching pattern.
1531 1530
1532 1531 The pattern without explicit kind like ``glob:`` is expected to be
1533 1532 relative to the current directory and match against a file or a
1534 1533 directory.
1535 1534 """
1536 1535 # i18n: "removes" is a keyword
1537 1536 pat = getstring(x, _("removes requires a pattern"))
1538 1537 return checkstatus(repo, subset, pat, 2)
1539 1538
1540 1539 def rev(repo, subset, x):
1541 1540 """``rev(number)``
1542 1541 Revision with the given numeric identifier.
1543 1542 """
1544 1543 # i18n: "rev" is a keyword
1545 1544 l = getargs(x, 1, 1, _("rev requires one argument"))
1546 1545 try:
1547 1546 # i18n: "rev" is a keyword
1548 1547 l = int(getstring(l[0], _("rev requires a number")))
1549 1548 except (TypeError, ValueError):
1550 1549 # i18n: "rev" is a keyword
1551 1550 raise error.ParseError(_("rev expects a number"))
1552 1551 if l not in repo.changelog and l != node.nullrev:
1553 1552 return baseset()
1554 1553 return subset & baseset([l])
1555 1554
1556 1555 def matching(repo, subset, x):
1557 1556 """``matching(revision [, field])``
1558 1557 Changesets in which a given set of fields match the set of fields in the
1559 1558 selected revision or set.
1560 1559
1561 1560 To match more than one field pass the list of fields to match separated
1562 1561 by spaces (e.g. ``author description``).
1563 1562
1564 1563 Valid fields are most regular revision fields and some special fields.
1565 1564
1566 1565 Regular revision fields are ``description``, ``author``, ``branch``,
1567 1566 ``date``, ``files``, ``phase``, ``parents``, ``substate``, ``user``
1568 1567 and ``diff``.
1569 1568 Note that ``author`` and ``user`` are synonyms. ``diff`` refers to the
1570 1569 contents of the revision. Two revisions matching their ``diff`` will
1571 1570 also match their ``files``.
1572 1571
1573 1572 Special fields are ``summary`` and ``metadata``:
1574 1573 ``summary`` matches the first line of the description.
1575 1574 ``metadata`` is equivalent to matching ``description user date``
1576 1575 (i.e. it matches the main metadata fields).
1577 1576
1578 1577 ``metadata`` is the default field which is used when no fields are
1579 1578 specified. You can match more than one field at a time.
1580 1579 """
1581 1580 # i18n: "matching" is a keyword
1582 1581 l = getargs(x, 1, 2, _("matching takes 1 or 2 arguments"))
1583 1582
1584 1583 revs = getset(repo, fullreposet(repo), l[0])
1585 1584
1586 1585 fieldlist = ['metadata']
1587 1586 if len(l) > 1:
1588 1587 fieldlist = getstring(l[1],
1589 1588 # i18n: "matching" is a keyword
1590 1589 _("matching requires a string "
1591 1590 "as its second argument")).split()
1592 1591
1593 1592 # Make sure that there are no repeated fields,
1594 1593 # expand the 'special' 'metadata' field type
1595 1594 # and check the 'files' whenever we check the 'diff'
1596 1595 fields = []
1597 1596 for field in fieldlist:
1598 1597 if field == 'metadata':
1599 1598 fields += ['user', 'description', 'date']
1600 1599 elif field == 'diff':
1601 1600 # a revision matching the diff must also match the files
1602 1601 # since matching the diff is very costly, make sure to
1603 1602 # also match the files first
1604 1603 fields += ['files', 'diff']
1605 1604 else:
1606 1605 if field == 'author':
1607 1606 field = 'user'
1608 1607 fields.append(field)
1609 1608 fields = set(fields)
1610 1609 if 'summary' in fields and 'description' in fields:
1611 1610 # If a revision matches its description it also matches its summary
1612 1611 fields.discard('summary')
1613 1612
1614 1613 # We may want to match more than one field
1615 1614 # Not all fields take the same amount of time to be matched
1616 1615 # Sort the selected fields in order of increasing matching cost
1617 1616 fieldorder = ['phase', 'parents', 'user', 'date', 'branch', 'summary',
1618 1617 'files', 'description', 'substate', 'diff']
1619 1618 def fieldkeyfunc(f):
1620 1619 try:
1621 1620 return fieldorder.index(f)
1622 1621 except ValueError:
1623 1622 # assume an unknown field is very costly
1624 1623 return len(fieldorder)
1625 1624 fields = list(fields)
1626 1625 fields.sort(key=fieldkeyfunc)
1627 1626
1628 1627 # Each field will be matched with its own "getfield" function
1629 1628 # which will be added to the getfieldfuncs array of functions
1630 1629 getfieldfuncs = []
1631 1630 _funcs = {
1632 1631 'user': lambda r: repo[r].user(),
1633 1632 'branch': lambda r: repo[r].branch(),
1634 1633 'date': lambda r: repo[r].date(),
1635 1634 'description': lambda r: repo[r].description(),
1636 1635 'files': lambda r: repo[r].files(),
1637 1636 'parents': lambda r: repo[r].parents(),
1638 1637 'phase': lambda r: repo[r].phase(),
1639 1638 'substate': lambda r: repo[r].substate,
1640 1639 'summary': lambda r: repo[r].description().splitlines()[0],
1641 1640 'diff': lambda r: list(repo[r].diff(git=True),)
1642 1641 }
1643 1642 for info in fields:
1644 1643 getfield = _funcs.get(info, None)
1645 1644 if getfield is None:
1646 1645 raise error.ParseError(
1647 1646 # i18n: "matching" is a keyword
1648 1647 _("unexpected field name passed to matching: %s") % info)
1649 1648 getfieldfuncs.append(getfield)
1650 1649 # convert the getfield array of functions into a "getinfo" function
1651 1650 # which returns an array of field values (or a single value if there
1652 1651 # is only one field to match)
1653 1652 getinfo = lambda r: [f(r) for f in getfieldfuncs]
1654 1653
1655 1654 def matches(x):
1656 1655 for rev in revs:
1657 1656 target = getinfo(rev)
1658 1657 match = True
1659 1658 for n, f in enumerate(getfieldfuncs):
1660 1659 if target[n] != f(x):
1661 1660 match = False
1662 1661 if match:
1663 1662 return True
1664 1663 return False
1665 1664
1666 1665 return subset.filter(matches)
1667 1666
1668 1667 def reverse(repo, subset, x):
1669 1668 """``reverse(set)``
1670 1669 Reverse order of set.
1671 1670 """
1672 1671 l = getset(repo, subset, x)
1673 1672 l.reverse()
1674 1673 return l
1675 1674
1676 1675 def roots(repo, subset, x):
1677 1676 """``roots(set)``
1678 1677 Changesets in set with no parent changeset in set.
1679 1678 """
1680 1679 s = getset(repo, fullreposet(repo), x)
1681 1680 subset = subset & s# baseset([r for r in s if r in subset])
1682 1681 cs = _children(repo, subset, s)
1683 1682 return subset - cs
1684 1683
1685 1684 def secret(repo, subset, x):
1686 1685 """``secret()``
1687 1686 Changeset in secret phase."""
1688 1687 # i18n: "secret" is a keyword
1689 1688 getargs(x, 0, 0, _("secret takes no arguments"))
1690 1689 phase = repo._phasecache.phase
1691 1690 target = phases.secret
1692 1691 condition = lambda r: phase(repo, r) == target
1693 1692 return subset.filter(condition, cache=False)
1694 1693
1695 1694 def sort(repo, subset, x):
1696 1695 """``sort(set[, [-]key...])``
1697 1696 Sort set by keys. The default sort order is ascending, specify a key
1698 1697 as ``-key`` to sort in descending order.
1699 1698
1700 1699 The keys can be:
1701 1700
1702 1701 - ``rev`` for the revision number,
1703 1702 - ``branch`` for the branch name,
1704 1703 - ``desc`` for the commit message (description),
1705 1704 - ``user`` for user name (``author`` can be used as an alias),
1706 1705 - ``date`` for the commit date
1707 1706 """
1708 1707 # i18n: "sort" is a keyword
1709 1708 l = getargs(x, 1, 2, _("sort requires one or two arguments"))
1710 1709 keys = "rev"
1711 1710 if len(l) == 2:
1712 1711 # i18n: "sort" is a keyword
1713 1712 keys = getstring(l[1], _("sort spec must be a string"))
1714 1713
1715 1714 s = l[0]
1716 1715 keys = keys.split()
1717 1716 l = []
1718 1717 def invert(s):
1719 1718 return "".join(chr(255 - ord(c)) for c in s)
1720 1719 revs = getset(repo, subset, s)
1721 1720 if keys == ["rev"]:
1722 1721 revs.sort()
1723 1722 return revs
1724 1723 elif keys == ["-rev"]:
1725 1724 revs.sort(reverse=True)
1726 1725 return revs
1727 1726 for r in revs:
1728 1727 c = repo[r]
1729 1728 e = []
1730 1729 for k in keys:
1731 1730 if k == 'rev':
1732 1731 e.append(r)
1733 1732 elif k == '-rev':
1734 1733 e.append(-r)
1735 1734 elif k == 'branch':
1736 1735 e.append(c.branch())
1737 1736 elif k == '-branch':
1738 1737 e.append(invert(c.branch()))
1739 1738 elif k == 'desc':
1740 1739 e.append(c.description())
1741 1740 elif k == '-desc':
1742 1741 e.append(invert(c.description()))
1743 1742 elif k in 'user author':
1744 1743 e.append(c.user())
1745 1744 elif k in '-user -author':
1746 1745 e.append(invert(c.user()))
1747 1746 elif k == 'date':
1748 1747 e.append(c.date()[0])
1749 1748 elif k == '-date':
1750 1749 e.append(-c.date()[0])
1751 1750 else:
1752 1751 raise error.ParseError(_("unknown sort key %r") % k)
1753 1752 e.append(r)
1754 1753 l.append(e)
1755 1754 l.sort()
1756 1755 return baseset([e[-1] for e in l])
1757 1756
1758 1757 def subrepo(repo, subset, x):
1759 1758 """``subrepo([pattern])``
1760 1759 Changesets that add, modify or remove the given subrepo. If no subrepo
1761 1760 pattern is named, any subrepo changes are returned.
1762 1761 """
1763 1762 # i18n: "subrepo" is a keyword
1764 1763 args = getargs(x, 0, 1, _('subrepo takes at most one argument'))
1765 1764 if len(args) != 0:
1766 1765 pat = getstring(args[0], _("subrepo requires a pattern"))
1767 1766
1768 1767 m = matchmod.exact(repo.root, repo.root, ['.hgsubstate'])
1769 1768
1770 1769 def submatches(names):
1771 1770 k, p, m = _stringmatcher(pat)
1772 1771 for name in names:
1773 1772 if m(name):
1774 1773 yield name
1775 1774
1776 1775 def matches(x):
1777 1776 c = repo[x]
1778 1777 s = repo.status(c.p1().node(), c.node(), match=m)
1779 1778
1780 1779 if len(args) == 0:
1781 1780 return s.added or s.modified or s.removed
1782 1781
1783 1782 if s.added:
1784 1783 return util.any(submatches(c.substate.keys()))
1785 1784
1786 1785 if s.modified:
1787 1786 subs = set(c.p1().substate.keys())
1788 1787 subs.update(c.substate.keys())
1789 1788
1790 1789 for path in submatches(subs):
1791 1790 if c.p1().substate.get(path) != c.substate.get(path):
1792 1791 return True
1793 1792
1794 1793 if s.removed:
1795 1794 return util.any(submatches(c.p1().substate.keys()))
1796 1795
1797 1796 return False
1798 1797
1799 1798 return subset.filter(matches)
1800 1799
1801 1800 def _stringmatcher(pattern):
1802 1801 """
1803 1802 accepts a string, possibly starting with 're:' or 'literal:' prefix.
1804 1803 returns the matcher name, pattern, and matcher function.
1805 1804 missing or unknown prefixes are treated as literal matches.
1806 1805
1807 1806 helper for tests:
1808 1807 >>> def test(pattern, *tests):
1809 1808 ... kind, pattern, matcher = _stringmatcher(pattern)
1810 1809 ... return (kind, pattern, [bool(matcher(t)) for t in tests])
1811 1810
1812 1811 exact matching (no prefix):
1813 1812 >>> test('abcdefg', 'abc', 'def', 'abcdefg')
1814 1813 ('literal', 'abcdefg', [False, False, True])
1815 1814
1816 1815 regex matching ('re:' prefix)
1817 1816 >>> test('re:a.+b', 'nomatch', 'fooadef', 'fooadefbar')
1818 1817 ('re', 'a.+b', [False, False, True])
1819 1818
1820 1819 force exact matches ('literal:' prefix)
1821 1820 >>> test('literal:re:foobar', 'foobar', 're:foobar')
1822 1821 ('literal', 're:foobar', [False, True])
1823 1822
1824 1823 unknown prefixes are ignored and treated as literals
1825 1824 >>> test('foo:bar', 'foo', 'bar', 'foo:bar')
1826 1825 ('literal', 'foo:bar', [False, False, True])
1827 1826 """
1828 1827 if pattern.startswith('re:'):
1829 1828 pattern = pattern[3:]
1830 1829 try:
1831 1830 regex = re.compile(pattern)
1832 1831 except re.error, e:
1833 1832 raise error.ParseError(_('invalid regular expression: %s')
1834 1833 % e)
1835 1834 return 're', pattern, regex.search
1836 1835 elif pattern.startswith('literal:'):
1837 1836 pattern = pattern[8:]
1838 1837 return 'literal', pattern, pattern.__eq__
1839 1838
1840 1839 def _substringmatcher(pattern):
1841 1840 kind, pattern, matcher = _stringmatcher(pattern)
1842 1841 if kind == 'literal':
1843 1842 matcher = lambda s: pattern in s
1844 1843 return kind, pattern, matcher
1845 1844
1846 1845 def tag(repo, subset, x):
1847 1846 """``tag([name])``
1848 1847 The specified tag by name, or all tagged revisions if no name is given.
1849 1848
1850 1849 If `name` starts with `re:`, the remainder of the name is treated as
1851 1850 a regular expression. To match a tag that actually starts with `re:`,
1852 1851 use the prefix `literal:`.
1853 1852 """
1854 1853 # i18n: "tag" is a keyword
1855 1854 args = getargs(x, 0, 1, _("tag takes one or no arguments"))
1856 1855 cl = repo.changelog
1857 1856 if args:
1858 1857 pattern = getstring(args[0],
1859 1858 # i18n: "tag" is a keyword
1860 1859 _('the argument to tag must be a string'))
1861 1860 kind, pattern, matcher = _stringmatcher(pattern)
1862 1861 if kind == 'literal':
1863 1862 # avoid resolving all tags
1864 1863 tn = repo._tagscache.tags.get(pattern, None)
1865 1864 if tn is None:
1866 1865 raise error.RepoLookupError(_("tag '%s' does not exist")
1867 1866 % pattern)
1868 1867 s = set([repo[tn].rev()])
1869 1868 else:
1870 1869 s = set([cl.rev(n) for t, n in repo.tagslist() if matcher(t)])
1871 1870 else:
1872 1871 s = set([cl.rev(n) for t, n in repo.tagslist() if t != 'tip'])
1873 1872 return subset & s
1874 1873
1875 1874 def tagged(repo, subset, x):
1876 1875 return tag(repo, subset, x)
1877 1876
1878 1877 def unstable(repo, subset, x):
1879 1878 """``unstable()``
1880 1879 Non-obsolete changesets with obsolete ancestors.
1881 1880 """
1882 1881 # i18n: "unstable" is a keyword
1883 1882 getargs(x, 0, 0, _("unstable takes no arguments"))
1884 1883 unstables = obsmod.getrevs(repo, 'unstable')
1885 1884 return subset & unstables
1886 1885
1887 1886
1888 1887 def user(repo, subset, x):
1889 1888 """``user(string)``
1890 1889 User name contains string. The match is case-insensitive.
1891 1890
1892 1891 If `string` starts with `re:`, the remainder of the string is treated as
1893 1892 a regular expression. To match a user that actually contains `re:`, use
1894 1893 the prefix `literal:`.
1895 1894 """
1896 1895 return author(repo, subset, x)
1897 1896
1898 1897 # experimental
1899 1898 def wdir(repo, subset, x):
1900 1899 # i18n: "wdir" is a keyword
1901 1900 getargs(x, 0, 0, _("wdir takes no arguments"))
1902 1901 if None in subset:
1903 1902 return baseset([None])
1904 1903 return baseset()
1905 1904
1906 1905 # for internal use
1907 1906 def _list(repo, subset, x):
1908 1907 s = getstring(x, "internal error")
1909 1908 if not s:
1910 1909 return baseset()
1911 1910 ls = [repo[r].rev() for r in s.split('\0')]
1912 1911 s = subset
1913 1912 return baseset([r for r in ls if r in s])
1914 1913
1915 1914 # for internal use
1916 1915 def _intlist(repo, subset, x):
1917 1916 s = getstring(x, "internal error")
1918 1917 if not s:
1919 1918 return baseset()
1920 1919 ls = [int(r) for r in s.split('\0')]
1921 1920 s = subset
1922 1921 return baseset([r for r in ls if r in s])
1923 1922
1924 1923 # for internal use
1925 1924 def _hexlist(repo, subset, x):
1926 1925 s = getstring(x, "internal error")
1927 1926 if not s:
1928 1927 return baseset()
1929 1928 cl = repo.changelog
1930 1929 ls = [cl.rev(node.bin(r)) for r in s.split('\0')]
1931 1930 s = subset
1932 1931 return baseset([r for r in ls if r in s])
1933 1932
1934 1933 symbols = {
1935 1934 "adds": adds,
1936 1935 "all": getall,
1937 1936 "ancestor": ancestor,
1938 1937 "ancestors": ancestors,
1939 1938 "_firstancestors": _firstancestors,
1940 1939 "author": author,
1941 1940 "bisect": bisect,
1942 1941 "bisected": bisected,
1943 1942 "bookmark": bookmark,
1944 1943 "branch": branch,
1945 1944 "branchpoint": branchpoint,
1946 1945 "bumped": bumped,
1947 1946 "bundle": bundle,
1948 1947 "children": children,
1949 1948 "closed": closed,
1950 1949 "contains": contains,
1951 1950 "converted": converted,
1952 1951 "date": date,
1953 1952 "desc": desc,
1954 1953 "descendants": descendants,
1955 1954 "_firstdescendants": _firstdescendants,
1956 1955 "destination": destination,
1957 1956 "divergent": divergent,
1958 1957 "draft": draft,
1959 1958 "extinct": extinct,
1960 1959 "extra": extra,
1961 1960 "file": hasfile,
1962 1961 "filelog": filelog,
1963 1962 "first": first,
1964 1963 "follow": follow,
1965 1964 "_followfirst": _followfirst,
1966 1965 "grep": grep,
1967 1966 "head": head,
1968 1967 "heads": heads,
1969 1968 "hidden": hidden,
1970 1969 "id": node_,
1971 1970 "keyword": keyword,
1972 1971 "last": last,
1973 1972 "limit": limit,
1974 1973 "_matchfiles": _matchfiles,
1975 1974 "max": maxrev,
1976 1975 "merge": merge,
1977 1976 "min": minrev,
1978 1977 "modifies": modifies,
1979 1978 "named": named,
1980 1979 "obsolete": obsolete,
1981 1980 "only": only,
1982 1981 "origin": origin,
1983 1982 "outgoing": outgoing,
1984 1983 "p1": p1,
1985 1984 "p2": p2,
1986 1985 "parents": parents,
1987 1986 "present": present,
1988 1987 "public": public,
1989 1988 "remote": remote,
1990 1989 "removes": removes,
1991 1990 "rev": rev,
1992 1991 "reverse": reverse,
1993 1992 "roots": roots,
1994 1993 "sort": sort,
1995 1994 "secret": secret,
1996 1995 "subrepo": subrepo,
1997 1996 "matching": matching,
1998 1997 "tag": tag,
1999 1998 "tagged": tagged,
2000 1999 "user": user,
2001 2000 "unstable": unstable,
2002 2001 "wdir": wdir,
2003 2002 "_list": _list,
2004 2003 "_intlist": _intlist,
2005 2004 "_hexlist": _hexlist,
2006 2005 }
2007 2006
2008 2007 # symbols which can't be used for a DoS attack for any given input
2009 2008 # (e.g. those which accept regexes as plain strings shouldn't be included)
2010 2009 # functions that just return a lot of changesets (like all) don't count here
2011 2010 safesymbols = set([
2012 2011 "adds",
2013 2012 "all",
2014 2013 "ancestor",
2015 2014 "ancestors",
2016 2015 "_firstancestors",
2017 2016 "author",
2018 2017 "bisect",
2019 2018 "bisected",
2020 2019 "bookmark",
2021 2020 "branch",
2022 2021 "branchpoint",
2023 2022 "bumped",
2024 2023 "bundle",
2025 2024 "children",
2026 2025 "closed",
2027 2026 "converted",
2028 2027 "date",
2029 2028 "desc",
2030 2029 "descendants",
2031 2030 "_firstdescendants",
2032 2031 "destination",
2033 2032 "divergent",
2034 2033 "draft",
2035 2034 "extinct",
2036 2035 "extra",
2037 2036 "file",
2038 2037 "filelog",
2039 2038 "first",
2040 2039 "follow",
2041 2040 "_followfirst",
2042 2041 "head",
2043 2042 "heads",
2044 2043 "hidden",
2045 2044 "id",
2046 2045 "keyword",
2047 2046 "last",
2048 2047 "limit",
2049 2048 "_matchfiles",
2050 2049 "max",
2051 2050 "merge",
2052 2051 "min",
2053 2052 "modifies",
2054 2053 "obsolete",
2055 2054 "only",
2056 2055 "origin",
2057 2056 "outgoing",
2058 2057 "p1",
2059 2058 "p2",
2060 2059 "parents",
2061 2060 "present",
2062 2061 "public",
2063 2062 "remote",
2064 2063 "removes",
2065 2064 "rev",
2066 2065 "reverse",
2067 2066 "roots",
2068 2067 "sort",
2069 2068 "secret",
2070 2069 "matching",
2071 2070 "tag",
2072 2071 "tagged",
2073 2072 "user",
2074 2073 "unstable",
2075 2074 "wdir",
2076 2075 "_list",
2077 2076 "_intlist",
2078 2077 "_hexlist",
2079 2078 ])
2080 2079
2081 2080 methods = {
2082 2081 "range": rangeset,
2083 2082 "dagrange": dagrange,
2084 2083 "string": stringset,
2085 2084 "symbol": stringset,
2086 2085 "and": andset,
2087 2086 "or": orset,
2088 2087 "not": notset,
2089 2088 "list": listset,
2090 2089 "func": func,
2091 2090 "ancestor": ancestorspec,
2092 2091 "parent": parentspec,
2093 2092 "parentpost": p1,
2094 2093 }
2095 2094
2096 2095 def optimize(x, small):
2097 2096 if x is None:
2098 2097 return 0, x
2099 2098
2100 2099 smallbonus = 1
2101 2100 if small:
2102 2101 smallbonus = .5
2103 2102
2104 2103 op = x[0]
2105 2104 if op == 'minus':
2106 2105 return optimize(('and', x[1], ('not', x[2])), small)
2107 2106 elif op == 'only':
2108 2107 return optimize(('func', ('symbol', 'only'),
2109 2108 ('list', x[1], x[2])), small)
2110 2109 elif op == 'onlypost':
2111 2110 return optimize(('func', ('symbol', 'only'), x[1]), small)
2112 2111 elif op == 'dagrangepre':
2113 2112 return optimize(('func', ('symbol', 'ancestors'), x[1]), small)
2114 2113 elif op == 'dagrangepost':
2115 2114 return optimize(('func', ('symbol', 'descendants'), x[1]), small)
2116 2115 elif op == 'rangepre':
2117 2116 return optimize(('range', ('string', '0'), x[1]), small)
2118 2117 elif op == 'rangepost':
2119 2118 return optimize(('range', x[1], ('string', 'tip')), small)
2120 2119 elif op == 'negate':
2121 2120 return optimize(('string',
2122 2121 '-' + getstring(x[1], _("can't negate that"))), small)
2123 2122 elif op in 'string symbol negate':
2124 2123 return smallbonus, x # single revisions are small
2125 2124 elif op == 'and':
2126 2125 wa, ta = optimize(x[1], True)
2127 2126 wb, tb = optimize(x[2], True)
2128 2127
2129 2128 # (::x and not ::y)/(not ::y and ::x) have a fast path
2130 2129 def isonly(revs, bases):
2131 2130 return (
2132 2131 revs[0] == 'func'
2133 2132 and getstring(revs[1], _('not a symbol')) == 'ancestors'
2134 2133 and bases[0] == 'not'
2135 2134 and bases[1][0] == 'func'
2136 2135 and getstring(bases[1][1], _('not a symbol')) == 'ancestors')
2137 2136
2138 2137 w = min(wa, wb)
2139 2138 if isonly(ta, tb):
2140 2139 return w, ('func', ('symbol', 'only'), ('list', ta[2], tb[1][2]))
2141 2140 if isonly(tb, ta):
2142 2141 return w, ('func', ('symbol', 'only'), ('list', tb[2], ta[1][2]))
2143 2142
2144 2143 if wa > wb:
2145 2144 return w, (op, tb, ta)
2146 2145 return w, (op, ta, tb)
2147 2146 elif op == 'or':
2148 2147 wa, ta = optimize(x[1], False)
2149 2148 wb, tb = optimize(x[2], False)
2150 2149 if wb < wa:
2151 2150 wb, wa = wa, wb
2152 2151 return max(wa, wb), (op, ta, tb)
2153 2152 elif op == 'not':
2154 2153 o = optimize(x[1], not small)
2155 2154 return o[0], (op, o[1])
2156 2155 elif op == 'parentpost':
2157 2156 o = optimize(x[1], small)
2158 2157 return o[0], (op, o[1])
2159 2158 elif op == 'group':
2160 2159 return optimize(x[1], small)
2161 2160 elif op in 'dagrange range list parent ancestorspec':
2162 2161 if op == 'parent':
2163 2162 # x^:y means (x^) : y, not x ^ (:y)
2164 2163 post = ('parentpost', x[1])
2165 2164 if x[2][0] == 'dagrangepre':
2166 2165 return optimize(('dagrange', post, x[2][1]), small)
2167 2166 elif x[2][0] == 'rangepre':
2168 2167 return optimize(('range', post, x[2][1]), small)
2169 2168
2170 2169 wa, ta = optimize(x[1], small)
2171 2170 wb, tb = optimize(x[2], small)
2172 2171 return wa + wb, (op, ta, tb)
2173 2172 elif op == 'func':
2174 2173 f = getstring(x[1], _("not a symbol"))
2175 2174 wa, ta = optimize(x[2], small)
2176 2175 if f in ("author branch closed date desc file grep keyword "
2177 2176 "outgoing user"):
2178 2177 w = 10 # slow
2179 2178 elif f in "modifies adds removes":
2180 2179 w = 30 # slower
2181 2180 elif f == "contains":
2182 2181 w = 100 # very slow
2183 2182 elif f == "ancestor":
2184 2183 w = 1 * smallbonus
2185 2184 elif f in "reverse limit first _intlist":
2186 2185 w = 0
2187 2186 elif f in "sort":
2188 2187 w = 10 # assume most sorts look at changelog
2189 2188 else:
2190 2189 w = 1
2191 2190 return w + wa, (op, x[1], ta)
2192 2191 return 1, x
2193 2192
2194 2193 _aliasarg = ('func', ('symbol', '_aliasarg'))
2195 2194 def _getaliasarg(tree):
2196 2195 """If tree matches ('func', ('symbol', '_aliasarg'), ('string', X))
2197 2196 return X, None otherwise.
2198 2197 """
2199 2198 if (len(tree) == 3 and tree[:2] == _aliasarg
2200 2199 and tree[2][0] == 'string'):
2201 2200 return tree[2][1]
2202 2201 return None
2203 2202
2204 2203 def _checkaliasarg(tree, known=None):
2205 2204 """Check tree contains no _aliasarg construct or only ones which
2206 2205 value is in known. Used to avoid alias placeholders injection.
2207 2206 """
2208 2207 if isinstance(tree, tuple):
2209 2208 arg = _getaliasarg(tree)
2210 2209 if arg is not None and (not known or arg not in known):
2211 2210 raise error.UnknownIdentifier('_aliasarg', [])
2212 2211 for t in tree:
2213 2212 _checkaliasarg(t, known)
2214 2213
2215 2214 # the set of valid characters for the initial letter of symbols in
2216 2215 # alias declarations and definitions
2217 2216 _aliassyminitletters = set(c for c in [chr(i) for i in xrange(256)]
2218 2217 if c.isalnum() or c in '._@$' or ord(c) > 127)
2219 2218
2220 2219 def _tokenizealias(program, lookup=None):
2221 2220 """Parse alias declaration/definition into a stream of tokens
2222 2221
2223 2222 This allows symbol names to use also ``$`` as an initial letter
2224 2223 (for backward compatibility), and callers of this function should
2225 2224 examine whether ``$`` is used also for unexpected symbols or not.
2226 2225 """
2227 2226 return tokenize(program, lookup=lookup,
2228 2227 syminitletters=_aliassyminitletters)
2229 2228
2230 2229 def _parsealiasdecl(decl):
2231 2230 """Parse alias declaration ``decl``
2232 2231
2233 2232 This returns ``(name, tree, args, errorstr)`` tuple:
2234 2233
2235 2234 - ``name``: of declared alias (may be ``decl`` itself at error)
2236 2235 - ``tree``: parse result (or ``None`` at error)
2237 2236 - ``args``: list of alias argument names (or None for symbol declaration)
2238 2237 - ``errorstr``: detail about detected error (or None)
2239 2238
2240 2239 >>> _parsealiasdecl('foo')
2241 2240 ('foo', ('symbol', 'foo'), None, None)
2242 2241 >>> _parsealiasdecl('$foo')
2243 2242 ('$foo', None, None, "'$' not for alias arguments")
2244 2243 >>> _parsealiasdecl('foo::bar')
2245 2244 ('foo::bar', None, None, 'invalid format')
2246 2245 >>> _parsealiasdecl('foo bar')
2247 2246 ('foo bar', None, None, 'at 4: invalid token')
2248 2247 >>> _parsealiasdecl('foo()')
2249 2248 ('foo', ('func', ('symbol', 'foo')), [], None)
2250 2249 >>> _parsealiasdecl('$foo()')
2251 2250 ('$foo()', None, None, "'$' not for alias arguments")
2252 2251 >>> _parsealiasdecl('foo($1, $2)')
2253 2252 ('foo', ('func', ('symbol', 'foo')), ['$1', '$2'], None)
2254 2253 >>> _parsealiasdecl('foo(bar_bar, baz.baz)')
2255 2254 ('foo', ('func', ('symbol', 'foo')), ['bar_bar', 'baz.baz'], None)
2256 2255 >>> _parsealiasdecl('foo($1, $2, nested($1, $2))')
2257 2256 ('foo($1, $2, nested($1, $2))', None, None, 'invalid argument list')
2258 2257 >>> _parsealiasdecl('foo(bar($1, $2))')
2259 2258 ('foo(bar($1, $2))', None, None, 'invalid argument list')
2260 2259 >>> _parsealiasdecl('foo("string")')
2261 2260 ('foo("string")', None, None, 'invalid argument list')
2262 2261 >>> _parsealiasdecl('foo($1, $2')
2263 2262 ('foo($1, $2', None, None, 'at 10: unexpected token: end')
2264 2263 >>> _parsealiasdecl('foo("string')
2265 2264 ('foo("string', None, None, 'at 5: unterminated string')
2266 2265 >>> _parsealiasdecl('foo($1, $2, $1)')
2267 2266 ('foo', None, None, 'argument names collide with each other')
2268 2267 """
2269 2268 p = parser.parser(_tokenizealias, elements)
2270 2269 try:
2271 2270 tree, pos = p.parse(decl)
2272 2271 if (pos != len(decl)):
2273 2272 raise error.ParseError(_('invalid token'), pos)
2274 2273
2275 2274 if isvalidsymbol(tree):
2276 2275 # "name = ...." style
2277 2276 name = getsymbol(tree)
2278 2277 if name.startswith('$'):
2279 2278 return (decl, None, None, _("'$' not for alias arguments"))
2280 2279 return (name, ('symbol', name), None, None)
2281 2280
2282 2281 if isvalidfunc(tree):
2283 2282 # "name(arg, ....) = ...." style
2284 2283 name = getfuncname(tree)
2285 2284 if name.startswith('$'):
2286 2285 return (decl, None, None, _("'$' not for alias arguments"))
2287 2286 args = []
2288 2287 for arg in getfuncargs(tree):
2289 2288 if not isvalidsymbol(arg):
2290 2289 return (decl, None, None, _("invalid argument list"))
2291 2290 args.append(getsymbol(arg))
2292 2291 if len(args) != len(set(args)):
2293 2292 return (name, None, None,
2294 2293 _("argument names collide with each other"))
2295 2294 return (name, ('func', ('symbol', name)), args, None)
2296 2295
2297 2296 return (decl, None, None, _("invalid format"))
2298 2297 except error.ParseError, inst:
2299 2298 return (decl, None, None, parseerrordetail(inst))
2300 2299
2301 2300 def _parsealiasdefn(defn, args):
2302 2301 """Parse alias definition ``defn``
2303 2302
2304 2303 This function also replaces alias argument references in the
2305 2304 specified definition by ``_aliasarg(ARGNAME)``.
2306 2305
2307 2306 ``args`` is a list of alias argument names, or None if the alias
2308 2307 is declared as a symbol.
2309 2308
2310 2309 This returns "tree" as parsing result.
2311 2310
2312 2311 >>> args = ['$1', '$2', 'foo']
2313 2312 >>> print prettyformat(_parsealiasdefn('$1 or foo', args))
2314 2313 (or
2315 2314 (func
2316 2315 ('symbol', '_aliasarg')
2317 2316 ('string', '$1'))
2318 2317 (func
2319 2318 ('symbol', '_aliasarg')
2320 2319 ('string', 'foo')))
2321 2320 >>> try:
2322 2321 ... _parsealiasdefn('$1 or $bar', args)
2323 2322 ... except error.ParseError, inst:
2324 2323 ... print parseerrordetail(inst)
2325 2324 at 6: '$' not for alias arguments
2326 2325 >>> args = ['$1', '$10', 'foo']
2327 2326 >>> print prettyformat(_parsealiasdefn('$10 or foobar', args))
2328 2327 (or
2329 2328 (func
2330 2329 ('symbol', '_aliasarg')
2331 2330 ('string', '$10'))
2332 2331 ('symbol', 'foobar'))
2333 2332 >>> print prettyformat(_parsealiasdefn('"$1" or "foo"', args))
2334 2333 (or
2335 2334 ('string', '$1')
2336 2335 ('string', 'foo'))
2337 2336 """
2338 2337 def tokenizedefn(program, lookup=None):
2339 2338 if args:
2340 2339 argset = set(args)
2341 2340 else:
2342 2341 argset = set()
2343 2342
2344 2343 for t, value, pos in _tokenizealias(program, lookup=lookup):
2345 2344 if t == 'symbol':
2346 2345 if value in argset:
2347 2346 # emulate tokenization of "_aliasarg('ARGNAME')":
2348 2347 # "_aliasarg()" is an unknown symbol only used separate
2349 2348 # alias argument placeholders from regular strings.
2350 2349 yield ('symbol', '_aliasarg', pos)
2351 2350 yield ('(', None, pos)
2352 2351 yield ('string', value, pos)
2353 2352 yield (')', None, pos)
2354 2353 continue
2355 2354 elif value.startswith('$'):
2356 2355 raise error.ParseError(_("'$' not for alias arguments"),
2357 2356 pos)
2358 2357 yield (t, value, pos)
2359 2358
2360 2359 p = parser.parser(tokenizedefn, elements)
2361 2360 tree, pos = p.parse(defn)
2362 2361 if pos != len(defn):
2363 2362 raise error.ParseError(_('invalid token'), pos)
2364 2363 return tree
2365 2364
2366 2365 class revsetalias(object):
2367 2366 # whether own `error` information is already shown or not.
2368 2367 # this avoids showing same warning multiple times at each `findaliases`.
2369 2368 warned = False
2370 2369
2371 2370 def __init__(self, name, value):
2372 2371 '''Aliases like:
2373 2372
2374 2373 h = heads(default)
2375 2374 b($1) = ancestors($1) - ancestors(default)
2376 2375 '''
2377 2376 self.name, self.tree, self.args, self.error = _parsealiasdecl(name)
2378 2377 if self.error:
2379 2378 self.error = _('failed to parse the declaration of revset alias'
2380 2379 ' "%s": %s') % (self.name, self.error)
2381 2380 return
2382 2381
2383 2382 try:
2384 2383 self.replacement = _parsealiasdefn(value, self.args)
2385 2384 # Check for placeholder injection
2386 2385 _checkaliasarg(self.replacement, self.args)
2387 2386 except error.ParseError, inst:
2388 2387 self.error = _('failed to parse the definition of revset alias'
2389 2388 ' "%s": %s') % (self.name, parseerrordetail(inst))
2390 2389
2391 2390 def _getalias(aliases, tree):
2392 2391 """If tree looks like an unexpanded alias, return it. Return None
2393 2392 otherwise.
2394 2393 """
2395 2394 if isinstance(tree, tuple) and tree:
2396 2395 if tree[0] == 'symbol' and len(tree) == 2:
2397 2396 name = tree[1]
2398 2397 alias = aliases.get(name)
2399 2398 if alias and alias.args is None and alias.tree == tree:
2400 2399 return alias
2401 2400 if tree[0] == 'func' and len(tree) > 1:
2402 2401 if tree[1][0] == 'symbol' and len(tree[1]) == 2:
2403 2402 name = tree[1][1]
2404 2403 alias = aliases.get(name)
2405 2404 if alias and alias.args is not None and alias.tree == tree[:2]:
2406 2405 return alias
2407 2406 return None
2408 2407
2409 2408 def _expandargs(tree, args):
2410 2409 """Replace _aliasarg instances with the substitution value of the
2411 2410 same name in args, recursively.
2412 2411 """
2413 2412 if not tree or not isinstance(tree, tuple):
2414 2413 return tree
2415 2414 arg = _getaliasarg(tree)
2416 2415 if arg is not None:
2417 2416 return args[arg]
2418 2417 return tuple(_expandargs(t, args) for t in tree)
2419 2418
2420 2419 def _expandaliases(aliases, tree, expanding, cache):
2421 2420 """Expand aliases in tree, recursively.
2422 2421
2423 2422 'aliases' is a dictionary mapping user defined aliases to
2424 2423 revsetalias objects.
2425 2424 """
2426 2425 if not isinstance(tree, tuple):
2427 2426 # Do not expand raw strings
2428 2427 return tree
2429 2428 alias = _getalias(aliases, tree)
2430 2429 if alias is not None:
2431 2430 if alias.error:
2432 2431 raise util.Abort(alias.error)
2433 2432 if alias in expanding:
2434 2433 raise error.ParseError(_('infinite expansion of revset alias "%s" '
2435 2434 'detected') % alias.name)
2436 2435 expanding.append(alias)
2437 2436 if alias.name not in cache:
2438 2437 cache[alias.name] = _expandaliases(aliases, alias.replacement,
2439 2438 expanding, cache)
2440 2439 result = cache[alias.name]
2441 2440 expanding.pop()
2442 2441 if alias.args is not None:
2443 2442 l = getlist(tree[2])
2444 2443 if len(l) != len(alias.args):
2445 2444 raise error.ParseError(
2446 2445 _('invalid number of arguments: %s') % len(l))
2447 2446 l = [_expandaliases(aliases, a, [], cache) for a in l]
2448 2447 result = _expandargs(result, dict(zip(alias.args, l)))
2449 2448 else:
2450 2449 result = tuple(_expandaliases(aliases, t, expanding, cache)
2451 2450 for t in tree)
2452 2451 return result
2453 2452
2454 2453 def findaliases(ui, tree, showwarning=None):
2455 2454 _checkaliasarg(tree)
2456 2455 aliases = {}
2457 2456 for k, v in ui.configitems('revsetalias'):
2458 2457 alias = revsetalias(k, v)
2459 2458 aliases[alias.name] = alias
2460 2459 tree = _expandaliases(aliases, tree, [], {})
2461 2460 if showwarning:
2462 2461 # warn about problematic (but not referred) aliases
2463 2462 for name, alias in sorted(aliases.iteritems()):
2464 2463 if alias.error and not alias.warned:
2465 2464 showwarning(_('warning: %s\n') % (alias.error))
2466 2465 alias.warned = True
2467 2466 return tree
2468 2467
2469 2468 def foldconcat(tree):
2470 2469 """Fold elements to be concatenated by `##`
2471 2470 """
2472 2471 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2473 2472 return tree
2474 2473 if tree[0] == '_concat':
2475 2474 pending = [tree]
2476 2475 l = []
2477 2476 while pending:
2478 2477 e = pending.pop()
2479 2478 if e[0] == '_concat':
2480 2479 pending.extend(reversed(e[1:]))
2481 2480 elif e[0] in ('string', 'symbol'):
2482 2481 l.append(e[1])
2483 2482 else:
2484 2483 msg = _("\"##\" can't concatenate \"%s\" element") % (e[0])
2485 2484 raise error.ParseError(msg)
2486 2485 return ('string', ''.join(l))
2487 2486 else:
2488 2487 return tuple(foldconcat(t) for t in tree)
2489 2488
2490 2489 def parse(spec, lookup=None):
2491 2490 p = parser.parser(tokenize, elements)
2492 2491 return p.parse(spec, lookup=lookup)
2493 2492
2494 2493 def posttreebuilthook(tree, repo):
2495 2494 # hook for extensions to execute code on the optimized tree
2496 2495 pass
2497 2496
2498 2497 def match(ui, spec, repo=None):
2499 2498 if not spec:
2500 2499 raise error.ParseError(_("empty query"))
2501 2500 lookup = None
2502 2501 if repo:
2503 2502 lookup = repo.__contains__
2504 2503 tree, pos = parse(spec, lookup)
2505 2504 if (pos != len(spec)):
2506 2505 raise error.ParseError(_("invalid token"), pos)
2507 2506 if ui:
2508 2507 tree = findaliases(ui, tree, showwarning=ui.warn)
2509 2508 tree = foldconcat(tree)
2510 2509 weight, tree = optimize(tree, True)
2511 2510 posttreebuilthook(tree, repo)
2512 2511 def mfunc(repo, subset=None):
2513 2512 if subset is None:
2514 2513 subset = fullreposet(repo)
2515 2514 if util.safehasattr(subset, 'isascending'):
2516 2515 result = getset(repo, subset, tree)
2517 2516 else:
2518 2517 result = getset(repo, baseset(subset), tree)
2519 2518 return result
2520 2519 return mfunc
2521 2520
2522 2521 def formatspec(expr, *args):
2523 2522 '''
2524 2523 This is a convenience function for using revsets internally, and
2525 2524 escapes arguments appropriately. Aliases are intentionally ignored
2526 2525 so that intended expression behavior isn't accidentally subverted.
2527 2526
2528 2527 Supported arguments:
2529 2528
2530 2529 %r = revset expression, parenthesized
2531 2530 %d = int(arg), no quoting
2532 2531 %s = string(arg), escaped and single-quoted
2533 2532 %b = arg.branch(), escaped and single-quoted
2534 2533 %n = hex(arg), single-quoted
2535 2534 %% = a literal '%'
2536 2535
2537 2536 Prefixing the type with 'l' specifies a parenthesized list of that type.
2538 2537
2539 2538 >>> formatspec('%r:: and %lr', '10 or 11', ("this()", "that()"))
2540 2539 '(10 or 11):: and ((this()) or (that()))'
2541 2540 >>> formatspec('%d:: and not %d::', 10, 20)
2542 2541 '10:: and not 20::'
2543 2542 >>> formatspec('%ld or %ld', [], [1])
2544 2543 "_list('') or 1"
2545 2544 >>> formatspec('keyword(%s)', 'foo\\xe9')
2546 2545 "keyword('foo\\\\xe9')"
2547 2546 >>> b = lambda: 'default'
2548 2547 >>> b.branch = b
2549 2548 >>> formatspec('branch(%b)', b)
2550 2549 "branch('default')"
2551 2550 >>> formatspec('root(%ls)', ['a', 'b', 'c', 'd'])
2552 2551 "root(_list('a\\x00b\\x00c\\x00d'))"
2553 2552 '''
2554 2553
2555 2554 def quote(s):
2556 2555 return repr(str(s))
2557 2556
2558 2557 def argtype(c, arg):
2559 2558 if c == 'd':
2560 2559 return str(int(arg))
2561 2560 elif c == 's':
2562 2561 return quote(arg)
2563 2562 elif c == 'r':
2564 2563 parse(arg) # make sure syntax errors are confined
2565 2564 return '(%s)' % arg
2566 2565 elif c == 'n':
2567 2566 return quote(node.hex(arg))
2568 2567 elif c == 'b':
2569 2568 return quote(arg.branch())
2570 2569
2571 2570 def listexp(s, t):
2572 2571 l = len(s)
2573 2572 if l == 0:
2574 2573 return "_list('')"
2575 2574 elif l == 1:
2576 2575 return argtype(t, s[0])
2577 2576 elif t == 'd':
2578 2577 return "_intlist('%s')" % "\0".join(str(int(a)) for a in s)
2579 2578 elif t == 's':
2580 2579 return "_list('%s')" % "\0".join(s)
2581 2580 elif t == 'n':
2582 2581 return "_hexlist('%s')" % "\0".join(node.hex(a) for a in s)
2583 2582 elif t == 'b':
2584 2583 return "_list('%s')" % "\0".join(a.branch() for a in s)
2585 2584
2586 2585 m = l // 2
2587 2586 return '(%s or %s)' % (listexp(s[:m], t), listexp(s[m:], t))
2588 2587
2589 2588 ret = ''
2590 2589 pos = 0
2591 2590 arg = 0
2592 2591 while pos < len(expr):
2593 2592 c = expr[pos]
2594 2593 if c == '%':
2595 2594 pos += 1
2596 2595 d = expr[pos]
2597 2596 if d == '%':
2598 2597 ret += d
2599 2598 elif d in 'dsnbr':
2600 2599 ret += argtype(d, args[arg])
2601 2600 arg += 1
2602 2601 elif d == 'l':
2603 2602 # a list of some type
2604 2603 pos += 1
2605 2604 d = expr[pos]
2606 2605 ret += listexp(list(args[arg]), d)
2607 2606 arg += 1
2608 2607 else:
2609 2608 raise util.Abort('unexpected revspec format character %s' % d)
2610 2609 else:
2611 2610 ret += c
2612 2611 pos += 1
2613 2612
2614 2613 return ret
2615 2614
2616 2615 def prettyformat(tree):
2617 2616 def _prettyformat(tree, level, lines):
2618 2617 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2619 2618 lines.append((level, str(tree)))
2620 2619 else:
2621 2620 lines.append((level, '(%s' % tree[0]))
2622 2621 for s in tree[1:]:
2623 2622 _prettyformat(s, level + 1, lines)
2624 2623 lines[-1:] = [(lines[-1][0], lines[-1][1] + ')')]
2625 2624
2626 2625 lines = []
2627 2626 _prettyformat(tree, 0, lines)
2628 2627 output = '\n'.join((' '*l + s) for l, s in lines)
2629 2628 return output
2630 2629
2631 2630 def depth(tree):
2632 2631 if isinstance(tree, tuple):
2633 2632 return max(map(depth, tree)) + 1
2634 2633 else:
2635 2634 return 0
2636 2635
2637 2636 def funcsused(tree):
2638 2637 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2639 2638 return set()
2640 2639 else:
2641 2640 funcs = set()
2642 2641 for s in tree[1:]:
2643 2642 funcs |= funcsused(s)
2644 2643 if tree[0] == 'func':
2645 2644 funcs.add(tree[1][1])
2646 2645 return funcs
2647 2646
2648 2647 class abstractsmartset(object):
2649 2648
2650 2649 def __nonzero__(self):
2651 2650 """True if the smartset is not empty"""
2652 2651 raise NotImplementedError()
2653 2652
2654 2653 def __contains__(self, rev):
2655 2654 """provide fast membership testing"""
2656 2655 raise NotImplementedError()
2657 2656
2658 2657 def __iter__(self):
2659 2658 """iterate the set in the order it is supposed to be iterated"""
2660 2659 raise NotImplementedError()
2661 2660
2662 2661 # Attributes containing a function to perform a fast iteration in a given
2663 2662 # direction. A smartset can have none, one, or both defined.
2664 2663 #
2665 2664 # Default value is None instead of a function returning None to avoid
2666 2665 # initializing an iterator just for testing if a fast method exists.
2667 2666 fastasc = None
2668 2667 fastdesc = None
2669 2668
2670 2669 def isascending(self):
2671 2670 """True if the set will iterate in ascending order"""
2672 2671 raise NotImplementedError()
2673 2672
2674 2673 def isdescending(self):
2675 2674 """True if the set will iterate in descending order"""
2676 2675 raise NotImplementedError()
2677 2676
2678 2677 def min(self):
2679 2678 """return the minimum element in the set"""
2680 2679 if self.fastasc is not None:
2681 2680 for r in self.fastasc():
2682 2681 return r
2683 2682 raise ValueError('arg is an empty sequence')
2684 2683 return min(self)
2685 2684
2686 2685 def max(self):
2687 2686 """return the maximum element in the set"""
2688 2687 if self.fastdesc is not None:
2689 2688 for r in self.fastdesc():
2690 2689 return r
2691 2690 raise ValueError('arg is an empty sequence')
2692 2691 return max(self)
2693 2692
2694 2693 def first(self):
2695 2694 """return the first element in the set (user iteration perspective)
2696 2695
2697 2696 Return None if the set is empty"""
2698 2697 raise NotImplementedError()
2699 2698
2700 2699 def last(self):
2701 2700 """return the last element in the set (user iteration perspective)
2702 2701
2703 2702 Return None if the set is empty"""
2704 2703 raise NotImplementedError()
2705 2704
2706 2705 def __len__(self):
2707 2706 """return the length of the smartsets
2708 2707
2709 2708 This can be expensive on smartset that could be lazy otherwise."""
2710 2709 raise NotImplementedError()
2711 2710
2712 2711 def reverse(self):
2713 2712 """reverse the expected iteration order"""
2714 2713 raise NotImplementedError()
2715 2714
2716 2715 def sort(self, reverse=True):
2717 2716 """get the set to iterate in an ascending or descending order"""
2718 2717 raise NotImplementedError()
2719 2718
2720 2719 def __and__(self, other):
2721 2720 """Returns a new object with the intersection of the two collections.
2722 2721
2723 2722 This is part of the mandatory API for smartset."""
2724 2723 if isinstance(other, fullreposet):
2725 2724 return self
2726 2725 return self.filter(other.__contains__, cache=False)
2727 2726
2728 2727 def __add__(self, other):
2729 2728 """Returns a new object with the union of the two collections.
2730 2729
2731 2730 This is part of the mandatory API for smartset."""
2732 2731 return addset(self, other)
2733 2732
2734 2733 def __sub__(self, other):
2735 2734 """Returns a new object with the substraction of the two collections.
2736 2735
2737 2736 This is part of the mandatory API for smartset."""
2738 2737 c = other.__contains__
2739 2738 return self.filter(lambda r: not c(r), cache=False)
2740 2739
2741 2740 def filter(self, condition, cache=True):
2742 2741 """Returns this smartset filtered by condition as a new smartset.
2743 2742
2744 2743 `condition` is a callable which takes a revision number and returns a
2745 2744 boolean.
2746 2745
2747 2746 This is part of the mandatory API for smartset."""
2748 2747 # builtin cannot be cached. but do not needs to
2749 2748 if cache and util.safehasattr(condition, 'func_code'):
2750 2749 condition = util.cachefunc(condition)
2751 2750 return filteredset(self, condition)
2752 2751
2753 2752 class baseset(abstractsmartset):
2754 2753 """Basic data structure that represents a revset and contains the basic
2755 2754 operation that it should be able to perform.
2756 2755
2757 2756 Every method in this class should be implemented by any smartset class.
2758 2757 """
2759 2758 def __init__(self, data=()):
2760 2759 if not isinstance(data, list):
2761 2760 data = list(data)
2762 2761 self._list = data
2763 2762 self._ascending = None
2764 2763
2765 2764 @util.propertycache
2766 2765 def _set(self):
2767 2766 return set(self._list)
2768 2767
2769 2768 @util.propertycache
2770 2769 def _asclist(self):
2771 2770 asclist = self._list[:]
2772 2771 asclist.sort()
2773 2772 return asclist
2774 2773
2775 2774 def __iter__(self):
2776 2775 if self._ascending is None:
2777 2776 return iter(self._list)
2778 2777 elif self._ascending:
2779 2778 return iter(self._asclist)
2780 2779 else:
2781 2780 return reversed(self._asclist)
2782 2781
2783 2782 def fastasc(self):
2784 2783 return iter(self._asclist)
2785 2784
2786 2785 def fastdesc(self):
2787 2786 return reversed(self._asclist)
2788 2787
2789 2788 @util.propertycache
2790 2789 def __contains__(self):
2791 2790 return self._set.__contains__
2792 2791
2793 2792 def __nonzero__(self):
2794 2793 return bool(self._list)
2795 2794
2796 2795 def sort(self, reverse=False):
2797 2796 self._ascending = not bool(reverse)
2798 2797
2799 2798 def reverse(self):
2800 2799 if self._ascending is None:
2801 2800 self._list.reverse()
2802 2801 else:
2803 2802 self._ascending = not self._ascending
2804 2803
2805 2804 def __len__(self):
2806 2805 return len(self._list)
2807 2806
2808 2807 def isascending(self):
2809 2808 """Returns True if the collection is ascending order, False if not.
2810 2809
2811 2810 This is part of the mandatory API for smartset."""
2812 2811 if len(self) <= 1:
2813 2812 return True
2814 2813 return self._ascending is not None and self._ascending
2815 2814
2816 2815 def isdescending(self):
2817 2816 """Returns True if the collection is descending order, False if not.
2818 2817
2819 2818 This is part of the mandatory API for smartset."""
2820 2819 if len(self) <= 1:
2821 2820 return True
2822 2821 return self._ascending is not None and not self._ascending
2823 2822
2824 2823 def first(self):
2825 2824 if self:
2826 2825 if self._ascending is None:
2827 2826 return self._list[0]
2828 2827 elif self._ascending:
2829 2828 return self._asclist[0]
2830 2829 else:
2831 2830 return self._asclist[-1]
2832 2831 return None
2833 2832
2834 2833 def last(self):
2835 2834 if self:
2836 2835 if self._ascending is None:
2837 2836 return self._list[-1]
2838 2837 elif self._ascending:
2839 2838 return self._asclist[-1]
2840 2839 else:
2841 2840 return self._asclist[0]
2842 2841 return None
2843 2842
2844 2843 def __repr__(self):
2845 2844 d = {None: '', False: '-', True: '+'}[self._ascending]
2846 2845 return '<%s%s %r>' % (type(self).__name__, d, self._list)
2847 2846
2848 2847 class filteredset(abstractsmartset):
2849 2848 """Duck type for baseset class which iterates lazily over the revisions in
2850 2849 the subset and contains a function which tests for membership in the
2851 2850 revset
2852 2851 """
2853 2852 def __init__(self, subset, condition=lambda x: True):
2854 2853 """
2855 2854 condition: a function that decide whether a revision in the subset
2856 2855 belongs to the revset or not.
2857 2856 """
2858 2857 self._subset = subset
2859 2858 self._condition = condition
2860 2859 self._cache = {}
2861 2860
2862 2861 def __contains__(self, x):
2863 2862 c = self._cache
2864 2863 if x not in c:
2865 2864 v = c[x] = x in self._subset and self._condition(x)
2866 2865 return v
2867 2866 return c[x]
2868 2867
2869 2868 def __iter__(self):
2870 2869 return self._iterfilter(self._subset)
2871 2870
2872 2871 def _iterfilter(self, it):
2873 2872 cond = self._condition
2874 2873 for x in it:
2875 2874 if cond(x):
2876 2875 yield x
2877 2876
2878 2877 @property
2879 2878 def fastasc(self):
2880 2879 it = self._subset.fastasc
2881 2880 if it is None:
2882 2881 return None
2883 2882 return lambda: self._iterfilter(it())
2884 2883
2885 2884 @property
2886 2885 def fastdesc(self):
2887 2886 it = self._subset.fastdesc
2888 2887 if it is None:
2889 2888 return None
2890 2889 return lambda: self._iterfilter(it())
2891 2890
2892 2891 def __nonzero__(self):
2893 2892 for r in self:
2894 2893 return True
2895 2894 return False
2896 2895
2897 2896 def __len__(self):
2898 2897 # Basic implementation to be changed in future patches.
2899 2898 l = baseset([r for r in self])
2900 2899 return len(l)
2901 2900
2902 2901 def sort(self, reverse=False):
2903 2902 self._subset.sort(reverse=reverse)
2904 2903
2905 2904 def reverse(self):
2906 2905 self._subset.reverse()
2907 2906
2908 2907 def isascending(self):
2909 2908 return self._subset.isascending()
2910 2909
2911 2910 def isdescending(self):
2912 2911 return self._subset.isdescending()
2913 2912
2914 2913 def first(self):
2915 2914 for x in self:
2916 2915 return x
2917 2916 return None
2918 2917
2919 2918 def last(self):
2920 2919 it = None
2921 2920 if self._subset.isascending:
2922 2921 it = self.fastdesc
2923 2922 elif self._subset.isdescending:
2924 2923 it = self.fastdesc
2925 2924 if it is None:
2926 2925 # slowly consume everything. This needs improvement
2927 2926 it = lambda: reversed(list(self))
2928 2927 for x in it():
2929 2928 return x
2930 2929 return None
2931 2930
2932 2931 def __repr__(self):
2933 2932 return '<%s %r>' % (type(self).__name__, self._subset)
2934 2933
2935 2934 def _iterordered(ascending, iter1, iter2):
2936 2935 """produce an ordered iteration from two iterators with the same order
2937 2936
2938 2937 The ascending is used to indicated the iteration direction.
2939 2938 """
2940 2939 choice = max
2941 2940 if ascending:
2942 2941 choice = min
2943 2942
2944 2943 val1 = None
2945 2944 val2 = None
2946 2945 try:
2947 2946 # Consume both iterators in an ordered way until one is empty
2948 2947 while True:
2949 2948 if val1 is None:
2950 2949 val1 = iter1.next()
2951 2950 if val2 is None:
2952 2951 val2 = iter2.next()
2953 2952 next = choice(val1, val2)
2954 2953 yield next
2955 2954 if val1 == next:
2956 2955 val1 = None
2957 2956 if val2 == next:
2958 2957 val2 = None
2959 2958 except StopIteration:
2960 2959 # Flush any remaining values and consume the other one
2961 2960 it = iter2
2962 2961 if val1 is not None:
2963 2962 yield val1
2964 2963 it = iter1
2965 2964 elif val2 is not None:
2966 2965 # might have been equality and both are empty
2967 2966 yield val2
2968 2967 for val in it:
2969 2968 yield val
2970 2969
2971 2970 class addset(abstractsmartset):
2972 2971 """Represent the addition of two sets
2973 2972
2974 2973 Wrapper structure for lazily adding two structures without losing much
2975 2974 performance on the __contains__ method
2976 2975
2977 2976 If the ascending attribute is set, that means the two structures are
2978 2977 ordered in either an ascending or descending way. Therefore, we can add
2979 2978 them maintaining the order by iterating over both at the same time
2980 2979
2981 2980 >>> xs = baseset([0, 3, 2])
2982 2981 >>> ys = baseset([5, 2, 4])
2983 2982
2984 2983 >>> rs = addset(xs, ys)
2985 2984 >>> bool(rs), 0 in rs, 1 in rs, 5 in rs, rs.first(), rs.last()
2986 2985 (True, True, False, True, 0, 4)
2987 2986 >>> rs = addset(xs, baseset([]))
2988 2987 >>> bool(rs), 0 in rs, 1 in rs, rs.first(), rs.last()
2989 2988 (True, True, False, 0, 2)
2990 2989 >>> rs = addset(baseset([]), baseset([]))
2991 2990 >>> bool(rs), 0 in rs, rs.first(), rs.last()
2992 2991 (False, False, None, None)
2993 2992
2994 2993 iterate unsorted:
2995 2994 >>> rs = addset(xs, ys)
2996 2995 >>> [x for x in rs] # without _genlist
2997 2996 [0, 3, 2, 5, 4]
2998 2997 >>> assert not rs._genlist
2999 2998 >>> len(rs)
3000 2999 5
3001 3000 >>> [x for x in rs] # with _genlist
3002 3001 [0, 3, 2, 5, 4]
3003 3002 >>> assert rs._genlist
3004 3003
3005 3004 iterate ascending:
3006 3005 >>> rs = addset(xs, ys, ascending=True)
3007 3006 >>> [x for x in rs], [x for x in rs.fastasc()] # without _asclist
3008 3007 ([0, 2, 3, 4, 5], [0, 2, 3, 4, 5])
3009 3008 >>> assert not rs._asclist
3010 3009 >>> len(rs)
3011 3010 5
3012 3011 >>> [x for x in rs], [x for x in rs.fastasc()]
3013 3012 ([0, 2, 3, 4, 5], [0, 2, 3, 4, 5])
3014 3013 >>> assert rs._asclist
3015 3014
3016 3015 iterate descending:
3017 3016 >>> rs = addset(xs, ys, ascending=False)
3018 3017 >>> [x for x in rs], [x for x in rs.fastdesc()] # without _asclist
3019 3018 ([5, 4, 3, 2, 0], [5, 4, 3, 2, 0])
3020 3019 >>> assert not rs._asclist
3021 3020 >>> len(rs)
3022 3021 5
3023 3022 >>> [x for x in rs], [x for x in rs.fastdesc()]
3024 3023 ([5, 4, 3, 2, 0], [5, 4, 3, 2, 0])
3025 3024 >>> assert rs._asclist
3026 3025
3027 3026 iterate ascending without fastasc:
3028 3027 >>> rs = addset(xs, generatorset(ys), ascending=True)
3029 3028 >>> assert rs.fastasc is None
3030 3029 >>> [x for x in rs]
3031 3030 [0, 2, 3, 4, 5]
3032 3031
3033 3032 iterate descending without fastdesc:
3034 3033 >>> rs = addset(generatorset(xs), ys, ascending=False)
3035 3034 >>> assert rs.fastdesc is None
3036 3035 >>> [x for x in rs]
3037 3036 [5, 4, 3, 2, 0]
3038 3037 """
3039 3038 def __init__(self, revs1, revs2, ascending=None):
3040 3039 self._r1 = revs1
3041 3040 self._r2 = revs2
3042 3041 self._iter = None
3043 3042 self._ascending = ascending
3044 3043 self._genlist = None
3045 3044 self._asclist = None
3046 3045
3047 3046 def __len__(self):
3048 3047 return len(self._list)
3049 3048
3050 3049 def __nonzero__(self):
3051 3050 return bool(self._r1) or bool(self._r2)
3052 3051
3053 3052 @util.propertycache
3054 3053 def _list(self):
3055 3054 if not self._genlist:
3056 3055 self._genlist = baseset(iter(self))
3057 3056 return self._genlist
3058 3057
3059 3058 def __iter__(self):
3060 3059 """Iterate over both collections without repeating elements
3061 3060
3062 3061 If the ascending attribute is not set, iterate over the first one and
3063 3062 then over the second one checking for membership on the first one so we
3064 3063 dont yield any duplicates.
3065 3064
3066 3065 If the ascending attribute is set, iterate over both collections at the
3067 3066 same time, yielding only one value at a time in the given order.
3068 3067 """
3069 3068 if self._ascending is None:
3070 3069 if self._genlist:
3071 3070 return iter(self._genlist)
3072 3071 def arbitraryordergen():
3073 3072 for r in self._r1:
3074 3073 yield r
3075 3074 inr1 = self._r1.__contains__
3076 3075 for r in self._r2:
3077 3076 if not inr1(r):
3078 3077 yield r
3079 3078 return arbitraryordergen()
3080 3079 # try to use our own fast iterator if it exists
3081 3080 self._trysetasclist()
3082 3081 if self._ascending:
3083 3082 attr = 'fastasc'
3084 3083 else:
3085 3084 attr = 'fastdesc'
3086 3085 it = getattr(self, attr)
3087 3086 if it is not None:
3088 3087 return it()
3089 3088 # maybe half of the component supports fast
3090 3089 # get iterator for _r1
3091 3090 iter1 = getattr(self._r1, attr)
3092 3091 if iter1 is None:
3093 3092 # let's avoid side effect (not sure it matters)
3094 3093 iter1 = iter(sorted(self._r1, reverse=not self._ascending))
3095 3094 else:
3096 3095 iter1 = iter1()
3097 3096 # get iterator for _r2
3098 3097 iter2 = getattr(self._r2, attr)
3099 3098 if iter2 is None:
3100 3099 # let's avoid side effect (not sure it matters)
3101 3100 iter2 = iter(sorted(self._r2, reverse=not self._ascending))
3102 3101 else:
3103 3102 iter2 = iter2()
3104 3103 return _iterordered(self._ascending, iter1, iter2)
3105 3104
3106 3105 def _trysetasclist(self):
3107 3106 """populate the _asclist attribute if possible and necessary"""
3108 3107 if self._genlist is not None and self._asclist is None:
3109 3108 self._asclist = sorted(self._genlist)
3110 3109
3111 3110 @property
3112 3111 def fastasc(self):
3113 3112 self._trysetasclist()
3114 3113 if self._asclist is not None:
3115 3114 return self._asclist.__iter__
3116 3115 iter1 = self._r1.fastasc
3117 3116 iter2 = self._r2.fastasc
3118 3117 if None in (iter1, iter2):
3119 3118 return None
3120 3119 return lambda: _iterordered(True, iter1(), iter2())
3121 3120
3122 3121 @property
3123 3122 def fastdesc(self):
3124 3123 self._trysetasclist()
3125 3124 if self._asclist is not None:
3126 3125 return self._asclist.__reversed__
3127 3126 iter1 = self._r1.fastdesc
3128 3127 iter2 = self._r2.fastdesc
3129 3128 if None in (iter1, iter2):
3130 3129 return None
3131 3130 return lambda: _iterordered(False, iter1(), iter2())
3132 3131
3133 3132 def __contains__(self, x):
3134 3133 return x in self._r1 or x in self._r2
3135 3134
3136 3135 def sort(self, reverse=False):
3137 3136 """Sort the added set
3138 3137
3139 3138 For this we use the cached list with all the generated values and if we
3140 3139 know they are ascending or descending we can sort them in a smart way.
3141 3140 """
3142 3141 self._ascending = not reverse
3143 3142
3144 3143 def isascending(self):
3145 3144 return self._ascending is not None and self._ascending
3146 3145
3147 3146 def isdescending(self):
3148 3147 return self._ascending is not None and not self._ascending
3149 3148
3150 3149 def reverse(self):
3151 3150 if self._ascending is None:
3152 3151 self._list.reverse()
3153 3152 else:
3154 3153 self._ascending = not self._ascending
3155 3154
3156 3155 def first(self):
3157 3156 for x in self:
3158 3157 return x
3159 3158 return None
3160 3159
3161 3160 def last(self):
3162 3161 self.reverse()
3163 3162 val = self.first()
3164 3163 self.reverse()
3165 3164 return val
3166 3165
3167 3166 def __repr__(self):
3168 3167 d = {None: '', False: '-', True: '+'}[self._ascending]
3169 3168 return '<%s%s %r, %r>' % (type(self).__name__, d, self._r1, self._r2)
3170 3169
3171 3170 class generatorset(abstractsmartset):
3172 3171 """Wrap a generator for lazy iteration
3173 3172
3174 3173 Wrapper structure for generators that provides lazy membership and can
3175 3174 be iterated more than once.
3176 3175 When asked for membership it generates values until either it finds the
3177 3176 requested one or has gone through all the elements in the generator
3178 3177 """
3179 3178 def __init__(self, gen, iterasc=None):
3180 3179 """
3181 3180 gen: a generator producing the values for the generatorset.
3182 3181 """
3183 3182 self._gen = gen
3184 3183 self._asclist = None
3185 3184 self._cache = {}
3186 3185 self._genlist = []
3187 3186 self._finished = False
3188 3187 self._ascending = True
3189 3188 if iterasc is not None:
3190 3189 if iterasc:
3191 3190 self.fastasc = self._iterator
3192 3191 self.__contains__ = self._asccontains
3193 3192 else:
3194 3193 self.fastdesc = self._iterator
3195 3194 self.__contains__ = self._desccontains
3196 3195
3197 3196 def __nonzero__(self):
3198 3197 # Do not use 'for r in self' because it will enforce the iteration
3199 3198 # order (default ascending), possibly unrolling a whole descending
3200 3199 # iterator.
3201 3200 if self._genlist:
3202 3201 return True
3203 3202 for r in self._consumegen():
3204 3203 return True
3205 3204 return False
3206 3205
3207 3206 def __contains__(self, x):
3208 3207 if x in self._cache:
3209 3208 return self._cache[x]
3210 3209
3211 3210 # Use new values only, as existing values would be cached.
3212 3211 for l in self._consumegen():
3213 3212 if l == x:
3214 3213 return True
3215 3214
3216 3215 self._cache[x] = False
3217 3216 return False
3218 3217
3219 3218 def _asccontains(self, x):
3220 3219 """version of contains optimised for ascending generator"""
3221 3220 if x in self._cache:
3222 3221 return self._cache[x]
3223 3222
3224 3223 # Use new values only, as existing values would be cached.
3225 3224 for l in self._consumegen():
3226 3225 if l == x:
3227 3226 return True
3228 3227 if l > x:
3229 3228 break
3230 3229
3231 3230 self._cache[x] = False
3232 3231 return False
3233 3232
3234 3233 def _desccontains(self, x):
3235 3234 """version of contains optimised for descending generator"""
3236 3235 if x in self._cache:
3237 3236 return self._cache[x]
3238 3237
3239 3238 # Use new values only, as existing values would be cached.
3240 3239 for l in self._consumegen():
3241 3240 if l == x:
3242 3241 return True
3243 3242 if l < x:
3244 3243 break
3245 3244
3246 3245 self._cache[x] = False
3247 3246 return False
3248 3247
3249 3248 def __iter__(self):
3250 3249 if self._ascending:
3251 3250 it = self.fastasc
3252 3251 else:
3253 3252 it = self.fastdesc
3254 3253 if it is not None:
3255 3254 return it()
3256 3255 # we need to consume the iterator
3257 3256 for x in self._consumegen():
3258 3257 pass
3259 3258 # recall the same code
3260 3259 return iter(self)
3261 3260
3262 3261 def _iterator(self):
3263 3262 if self._finished:
3264 3263 return iter(self._genlist)
3265 3264
3266 3265 # We have to use this complex iteration strategy to allow multiple
3267 3266 # iterations at the same time. We need to be able to catch revision
3268 3267 # removed from _consumegen and added to genlist in another instance.
3269 3268 #
3270 3269 # Getting rid of it would provide an about 15% speed up on this
3271 3270 # iteration.
3272 3271 genlist = self._genlist
3273 3272 nextrev = self._consumegen().next
3274 3273 _len = len # cache global lookup
3275 3274 def gen():
3276 3275 i = 0
3277 3276 while True:
3278 3277 if i < _len(genlist):
3279 3278 yield genlist[i]
3280 3279 else:
3281 3280 yield nextrev()
3282 3281 i += 1
3283 3282 return gen()
3284 3283
3285 3284 def _consumegen(self):
3286 3285 cache = self._cache
3287 3286 genlist = self._genlist.append
3288 3287 for item in self._gen:
3289 3288 cache[item] = True
3290 3289 genlist(item)
3291 3290 yield item
3292 3291 if not self._finished:
3293 3292 self._finished = True
3294 3293 asc = self._genlist[:]
3295 3294 asc.sort()
3296 3295 self._asclist = asc
3297 3296 self.fastasc = asc.__iter__
3298 3297 self.fastdesc = asc.__reversed__
3299 3298
3300 3299 def __len__(self):
3301 3300 for x in self._consumegen():
3302 3301 pass
3303 3302 return len(self._genlist)
3304 3303
3305 3304 def sort(self, reverse=False):
3306 3305 self._ascending = not reverse
3307 3306
3308 3307 def reverse(self):
3309 3308 self._ascending = not self._ascending
3310 3309
3311 3310 def isascending(self):
3312 3311 return self._ascending
3313 3312
3314 3313 def isdescending(self):
3315 3314 return not self._ascending
3316 3315
3317 3316 def first(self):
3318 3317 if self._ascending:
3319 3318 it = self.fastasc
3320 3319 else:
3321 3320 it = self.fastdesc
3322 3321 if it is None:
3323 3322 # we need to consume all and try again
3324 3323 for x in self._consumegen():
3325 3324 pass
3326 3325 return self.first()
3327 3326 if self:
3328 3327 return it().next()
3329 3328 return None
3330 3329
3331 3330 def last(self):
3332 3331 if self._ascending:
3333 3332 it = self.fastdesc
3334 3333 else:
3335 3334 it = self.fastasc
3336 3335 if it is None:
3337 3336 # we need to consume all and try again
3338 3337 for x in self._consumegen():
3339 3338 pass
3340 3339 return self.first()
3341 3340 if self:
3342 3341 return it().next()
3343 3342 return None
3344 3343
3345 3344 def __repr__(self):
3346 3345 d = {False: '-', True: '+'}[self._ascending]
3347 3346 return '<%s%s>' % (type(self).__name__, d)
3348 3347
3349 3348 class spanset(abstractsmartset):
3350 3349 """Duck type for baseset class which represents a range of revisions and
3351 3350 can work lazily and without having all the range in memory
3352 3351
3353 3352 Note that spanset(x, y) behave almost like xrange(x, y) except for two
3354 3353 notable points:
3355 3354 - when x < y it will be automatically descending,
3356 3355 - revision filtered with this repoview will be skipped.
3357 3356
3358 3357 """
3359 3358 def __init__(self, repo, start=0, end=None):
3360 3359 """
3361 3360 start: first revision included the set
3362 3361 (default to 0)
3363 3362 end: first revision excluded (last+1)
3364 3363 (default to len(repo)
3365 3364
3366 3365 Spanset will be descending if `end` < `start`.
3367 3366 """
3368 3367 if end is None:
3369 3368 end = len(repo)
3370 3369 self._ascending = start <= end
3371 3370 if not self._ascending:
3372 3371 start, end = end + 1, start +1
3373 3372 self._start = start
3374 3373 self._end = end
3375 3374 self._hiddenrevs = repo.changelog.filteredrevs
3376 3375
3377 3376 def sort(self, reverse=False):
3378 3377 self._ascending = not reverse
3379 3378
3380 3379 def reverse(self):
3381 3380 self._ascending = not self._ascending
3382 3381
3383 3382 def _iterfilter(self, iterrange):
3384 3383 s = self._hiddenrevs
3385 3384 for r in iterrange:
3386 3385 if r not in s:
3387 3386 yield r
3388 3387
3389 3388 def __iter__(self):
3390 3389 if self._ascending:
3391 3390 return self.fastasc()
3392 3391 else:
3393 3392 return self.fastdesc()
3394 3393
3395 3394 def fastasc(self):
3396 3395 iterrange = xrange(self._start, self._end)
3397 3396 if self._hiddenrevs:
3398 3397 return self._iterfilter(iterrange)
3399 3398 return iter(iterrange)
3400 3399
3401 3400 def fastdesc(self):
3402 3401 iterrange = xrange(self._end - 1, self._start - 1, -1)
3403 3402 if self._hiddenrevs:
3404 3403 return self._iterfilter(iterrange)
3405 3404 return iter(iterrange)
3406 3405
3407 3406 def __contains__(self, rev):
3408 3407 hidden = self._hiddenrevs
3409 3408 return ((self._start <= rev < self._end)
3410 3409 and not (hidden and rev in hidden))
3411 3410
3412 3411 def __nonzero__(self):
3413 3412 for r in self:
3414 3413 return True
3415 3414 return False
3416 3415
3417 3416 def __len__(self):
3418 3417 if not self._hiddenrevs:
3419 3418 return abs(self._end - self._start)
3420 3419 else:
3421 3420 count = 0
3422 3421 start = self._start
3423 3422 end = self._end
3424 3423 for rev in self._hiddenrevs:
3425 3424 if (end < rev <= start) or (start <= rev < end):
3426 3425 count += 1
3427 3426 return abs(self._end - self._start) - count
3428 3427
3429 3428 def isascending(self):
3430 3429 return self._ascending
3431 3430
3432 3431 def isdescending(self):
3433 3432 return not self._ascending
3434 3433
3435 3434 def first(self):
3436 3435 if self._ascending:
3437 3436 it = self.fastasc
3438 3437 else:
3439 3438 it = self.fastdesc
3440 3439 for x in it():
3441 3440 return x
3442 3441 return None
3443 3442
3444 3443 def last(self):
3445 3444 if self._ascending:
3446 3445 it = self.fastdesc
3447 3446 else:
3448 3447 it = self.fastasc
3449 3448 for x in it():
3450 3449 return x
3451 3450 return None
3452 3451
3453 3452 def __repr__(self):
3454 3453 d = {False: '-', True: '+'}[self._ascending]
3455 3454 return '<%s%s %d:%d>' % (type(self).__name__, d,
3456 3455 self._start, self._end - 1)
3457 3456
3458 3457 class fullreposet(spanset):
3459 3458 """a set containing all revisions in the repo
3460 3459
3461 3460 This class exists to host special optimization and magic to handle virtual
3462 3461 revisions such as "null".
3463 3462 """
3464 3463
3465 3464 def __init__(self, repo):
3466 3465 super(fullreposet, self).__init__(repo)
3467 3466
3468 3467 def __contains__(self, rev):
3469 3468 # assumes the given rev is valid
3470 3469 hidden = self._hiddenrevs
3471 3470 return not (hidden and rev in hidden)
3472 3471
3473 3472 def __and__(self, other):
3474 3473 """As self contains the whole repo, all of the other set should also be
3475 3474 in self. Therefore `self & other = other`.
3476 3475
3477 3476 This boldly assumes the other contains valid revs only.
3478 3477 """
3479 3478 # other not a smartset, make is so
3480 3479 if not util.safehasattr(other, 'isascending'):
3481 3480 # filter out hidden revision
3482 3481 # (this boldly assumes all smartset are pure)
3483 3482 #
3484 3483 # `other` was used with "&", let's assume this is a set like
3485 3484 # object.
3486 3485 other = baseset(other - self._hiddenrevs)
3487 3486
3488 3487 other.sort(reverse=self.isdescending())
3489 3488 return other
3490 3489
3491 3490 def prettyformatset(revs):
3492 3491 lines = []
3493 3492 rs = repr(revs)
3494 3493 p = 0
3495 3494 while p < len(rs):
3496 3495 q = rs.find('<', p + 1)
3497 3496 if q < 0:
3498 3497 q = len(rs)
3499 3498 l = rs.count('<', 0, p) - rs.count('>', 0, p)
3500 3499 assert l >= 0
3501 3500 lines.append((l, rs[p:q].rstrip()))
3502 3501 p = q
3503 3502 return '\n'.join(' ' * l + s for l, s in lines)
3504 3503
3505 3504 # tell hggettext to extract docstrings from these functions:
3506 3505 i18nfunctions = symbols.values()
General Comments 0
You need to be logged in to leave comments. Login now