##// END OF EJS Templates
revset: use an iterator instead of a dequeue in ancestors()...
Pierre-Yves David -
r24939:85544a52 default
parent child Browse files
Show More
@@ -1,3448 +1,3450
1 1 # revset.py - revision set queries for mercurial
2 2 #
3 3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 import re
9 9 import parser, util, error, hbisect, phases
10 10 import node
11 11 import heapq
12 12 import match as matchmod
13 13 from i18n import _
14 14 import encoding
15 15 import obsolete as obsmod
16 16 import pathutil
17 17 import repoview
18 18
19 19 def _revancestors(repo, revs, followfirst):
20 20 """Like revlog.ancestors(), but supports followfirst."""
21 21 if followfirst:
22 22 cut = 1
23 23 else:
24 24 cut = None
25 25 cl = repo.changelog
26 26
27 27 def iterate():
28 28 revs.sort(reverse=True)
29 revqueue = util.deque(revs)
30 if not revqueue:
29 irevs = iter(revs)
30 h = []
31 try:
32 inputrev = irevs.next()
33 heapq.heappush(h, -inputrev)
34 except StopIteration:
31 35 return
32 36
33 h = []
34 inputrev = revqueue.popleft()
35 heapq.heappush(h, -inputrev)
36
37 37 seen = set()
38 38 while h:
39 39 current = -heapq.heappop(h)
40 40 if current not in seen:
41 41 if current == inputrev:
42 if revqueue:
43 inputrev = revqueue.popleft()
42 try:
43 inputrev = irevs.next()
44 44 heapq.heappush(h, -inputrev)
45 except StopIteration:
46 pass
45 47 seen.add(current)
46 48 yield current
47 49 for parent in cl.parentrevs(current)[:cut]:
48 50 if parent != node.nullrev:
49 51 heapq.heappush(h, -parent)
50 52
51 53 return generatorset(iterate(), iterasc=False)
52 54
53 55 def _revdescendants(repo, revs, followfirst):
54 56 """Like revlog.descendants() but supports followfirst."""
55 57 if followfirst:
56 58 cut = 1
57 59 else:
58 60 cut = None
59 61
60 62 def iterate():
61 63 cl = repo.changelog
62 64 first = min(revs)
63 65 nullrev = node.nullrev
64 66 if first == nullrev:
65 67 # Are there nodes with a null first parent and a non-null
66 68 # second one? Maybe. Do we care? Probably not.
67 69 for i in cl:
68 70 yield i
69 71 else:
70 72 seen = set(revs)
71 73 for i in cl.revs(first + 1):
72 74 for x in cl.parentrevs(i)[:cut]:
73 75 if x != nullrev and x in seen:
74 76 seen.add(i)
75 77 yield i
76 78 break
77 79
78 80 return generatorset(iterate(), iterasc=True)
79 81
80 82 def _revsbetween(repo, roots, heads):
81 83 """Return all paths between roots and heads, inclusive of both endpoint
82 84 sets."""
83 85 if not roots:
84 86 return baseset()
85 87 parentrevs = repo.changelog.parentrevs
86 88 visit = list(heads)
87 89 reachable = set()
88 90 seen = {}
89 91 minroot = min(roots)
90 92 roots = set(roots)
91 93 # open-code the post-order traversal due to the tiny size of
92 94 # sys.getrecursionlimit()
93 95 while visit:
94 96 rev = visit.pop()
95 97 if rev in roots:
96 98 reachable.add(rev)
97 99 parents = parentrevs(rev)
98 100 seen[rev] = parents
99 101 for parent in parents:
100 102 if parent >= minroot and parent not in seen:
101 103 visit.append(parent)
102 104 if not reachable:
103 105 return baseset()
104 106 for rev in sorted(seen):
105 107 for parent in seen[rev]:
106 108 if parent in reachable:
107 109 reachable.add(rev)
108 110 return baseset(sorted(reachable))
109 111
110 112 elements = {
111 113 "(": (21, ("group", 1, ")"), ("func", 1, ")")),
112 114 "##": (20, None, ("_concat", 20)),
113 115 "~": (18, None, ("ancestor", 18)),
114 116 "^": (18, None, ("parent", 18), ("parentpost", 18)),
115 117 "-": (5, ("negate", 19), ("minus", 5)),
116 118 "::": (17, ("dagrangepre", 17), ("dagrange", 17),
117 119 ("dagrangepost", 17)),
118 120 "..": (17, ("dagrangepre", 17), ("dagrange", 17),
119 121 ("dagrangepost", 17)),
120 122 ":": (15, ("rangepre", 15), ("range", 15), ("rangepost", 15)),
121 123 "not": (10, ("not", 10)),
122 124 "!": (10, ("not", 10)),
123 125 "and": (5, None, ("and", 5)),
124 126 "&": (5, None, ("and", 5)),
125 127 "%": (5, None, ("only", 5), ("onlypost", 5)),
126 128 "or": (4, None, ("or", 4)),
127 129 "|": (4, None, ("or", 4)),
128 130 "+": (4, None, ("or", 4)),
129 131 ",": (2, None, ("list", 2)),
130 132 ")": (0, None, None),
131 133 "symbol": (0, ("symbol",), None),
132 134 "string": (0, ("string",), None),
133 135 "end": (0, None, None),
134 136 }
135 137
136 138 keywords = set(['and', 'or', 'not'])
137 139
138 140 # default set of valid characters for the initial letter of symbols
139 141 _syminitletters = set(c for c in [chr(i) for i in xrange(256)]
140 142 if c.isalnum() or c in '._@' or ord(c) > 127)
141 143
142 144 # default set of valid characters for non-initial letters of symbols
143 145 _symletters = set(c for c in [chr(i) for i in xrange(256)]
144 146 if c.isalnum() or c in '-._/@' or ord(c) > 127)
145 147
146 148 def tokenize(program, lookup=None, syminitletters=None, symletters=None):
147 149 '''
148 150 Parse a revset statement into a stream of tokens
149 151
150 152 ``syminitletters`` is the set of valid characters for the initial
151 153 letter of symbols.
152 154
153 155 By default, character ``c`` is recognized as valid for initial
154 156 letter of symbols, if ``c.isalnum() or c in '._@' or ord(c) > 127``.
155 157
156 158 ``symletters`` is the set of valid characters for non-initial
157 159 letters of symbols.
158 160
159 161 By default, character ``c`` is recognized as valid for non-initial
160 162 letters of symbols, if ``c.isalnum() or c in '-._/@' or ord(c) > 127``.
161 163
162 164 Check that @ is a valid unquoted token character (issue3686):
163 165 >>> list(tokenize("@::"))
164 166 [('symbol', '@', 0), ('::', None, 1), ('end', None, 3)]
165 167
166 168 '''
167 169 if syminitletters is None:
168 170 syminitletters = _syminitletters
169 171 if symletters is None:
170 172 symletters = _symletters
171 173
172 174 pos, l = 0, len(program)
173 175 while pos < l:
174 176 c = program[pos]
175 177 if c.isspace(): # skip inter-token whitespace
176 178 pass
177 179 elif c == ':' and program[pos:pos + 2] == '::': # look ahead carefully
178 180 yield ('::', None, pos)
179 181 pos += 1 # skip ahead
180 182 elif c == '.' and program[pos:pos + 2] == '..': # look ahead carefully
181 183 yield ('..', None, pos)
182 184 pos += 1 # skip ahead
183 185 elif c == '#' and program[pos:pos + 2] == '##': # look ahead carefully
184 186 yield ('##', None, pos)
185 187 pos += 1 # skip ahead
186 188 elif c in "():,-|&+!~^%": # handle simple operators
187 189 yield (c, None, pos)
188 190 elif (c in '"\'' or c == 'r' and
189 191 program[pos:pos + 2] in ("r'", 'r"')): # handle quoted strings
190 192 if c == 'r':
191 193 pos += 1
192 194 c = program[pos]
193 195 decode = lambda x: x
194 196 else:
195 197 decode = lambda x: x.decode('string-escape')
196 198 pos += 1
197 199 s = pos
198 200 while pos < l: # find closing quote
199 201 d = program[pos]
200 202 if d == '\\': # skip over escaped characters
201 203 pos += 2
202 204 continue
203 205 if d == c:
204 206 yield ('string', decode(program[s:pos]), s)
205 207 break
206 208 pos += 1
207 209 else:
208 210 raise error.ParseError(_("unterminated string"), s)
209 211 # gather up a symbol/keyword
210 212 elif c in syminitletters:
211 213 s = pos
212 214 pos += 1
213 215 while pos < l: # find end of symbol
214 216 d = program[pos]
215 217 if d not in symletters:
216 218 break
217 219 if d == '.' and program[pos - 1] == '.': # special case for ..
218 220 pos -= 1
219 221 break
220 222 pos += 1
221 223 sym = program[s:pos]
222 224 if sym in keywords: # operator keywords
223 225 yield (sym, None, s)
224 226 elif '-' in sym:
225 227 # some jerk gave us foo-bar-baz, try to check if it's a symbol
226 228 if lookup and lookup(sym):
227 229 # looks like a real symbol
228 230 yield ('symbol', sym, s)
229 231 else:
230 232 # looks like an expression
231 233 parts = sym.split('-')
232 234 for p in parts[:-1]:
233 235 if p: # possible consecutive -
234 236 yield ('symbol', p, s)
235 237 s += len(p)
236 238 yield ('-', None, pos)
237 239 s += 1
238 240 if parts[-1]: # possible trailing -
239 241 yield ('symbol', parts[-1], s)
240 242 else:
241 243 yield ('symbol', sym, s)
242 244 pos -= 1
243 245 else:
244 246 raise error.ParseError(_("syntax error in revset '%s'") %
245 247 program, pos)
246 248 pos += 1
247 249 yield ('end', None, pos)
248 250
249 251 def parseerrordetail(inst):
250 252 """Compose error message from specified ParseError object
251 253 """
252 254 if len(inst.args) > 1:
253 255 return _('at %s: %s') % (inst.args[1], inst.args[0])
254 256 else:
255 257 return inst.args[0]
256 258
257 259 # helpers
258 260
259 261 def getstring(x, err):
260 262 if x and (x[0] == 'string' or x[0] == 'symbol'):
261 263 return x[1]
262 264 raise error.ParseError(err)
263 265
264 266 def getlist(x):
265 267 if not x:
266 268 return []
267 269 if x[0] == 'list':
268 270 return getlist(x[1]) + [x[2]]
269 271 return [x]
270 272
271 273 def getargs(x, min, max, err):
272 274 l = getlist(x)
273 275 if len(l) < min or (max >= 0 and len(l) > max):
274 276 raise error.ParseError(err)
275 277 return l
276 278
277 279 def isvalidsymbol(tree):
278 280 """Examine whether specified ``tree`` is valid ``symbol`` or not
279 281 """
280 282 return tree[0] == 'symbol' and len(tree) > 1
281 283
282 284 def getsymbol(tree):
283 285 """Get symbol name from valid ``symbol`` in ``tree``
284 286
285 287 This assumes that ``tree`` is already examined by ``isvalidsymbol``.
286 288 """
287 289 return tree[1]
288 290
289 291 def isvalidfunc(tree):
290 292 """Examine whether specified ``tree`` is valid ``func`` or not
291 293 """
292 294 return tree[0] == 'func' and len(tree) > 1 and isvalidsymbol(tree[1])
293 295
294 296 def getfuncname(tree):
295 297 """Get function name from valid ``func`` in ``tree``
296 298
297 299 This assumes that ``tree`` is already examined by ``isvalidfunc``.
298 300 """
299 301 return getsymbol(tree[1])
300 302
301 303 def getfuncargs(tree):
302 304 """Get list of function arguments from valid ``func`` in ``tree``
303 305
304 306 This assumes that ``tree`` is already examined by ``isvalidfunc``.
305 307 """
306 308 if len(tree) > 2:
307 309 return getlist(tree[2])
308 310 else:
309 311 return []
310 312
311 313 def getset(repo, subset, x):
312 314 if not x:
313 315 raise error.ParseError(_("missing argument"))
314 316 s = methods[x[0]](repo, subset, *x[1:])
315 317 if util.safehasattr(s, 'isascending'):
316 318 return s
317 319 return baseset(s)
318 320
319 321 def _getrevsource(repo, r):
320 322 extra = repo[r].extra()
321 323 for label in ('source', 'transplant_source', 'rebase_source'):
322 324 if label in extra:
323 325 try:
324 326 return repo[extra[label]].rev()
325 327 except error.RepoLookupError:
326 328 pass
327 329 return None
328 330
329 331 # operator methods
330 332
331 333 def stringset(repo, subset, x):
332 334 x = repo[x].rev()
333 335 if x in subset:
334 336 return baseset([x])
335 337 return baseset()
336 338
337 339 def rangeset(repo, subset, x, y):
338 340 m = getset(repo, fullreposet(repo), x)
339 341 n = getset(repo, fullreposet(repo), y)
340 342
341 343 if not m or not n:
342 344 return baseset()
343 345 m, n = m.first(), n.last()
344 346
345 347 if m < n:
346 348 r = spanset(repo, m, n + 1)
347 349 else:
348 350 r = spanset(repo, m, n - 1)
349 351 return r & subset
350 352
351 353 def dagrange(repo, subset, x, y):
352 354 r = fullreposet(repo)
353 355 xs = _revsbetween(repo, getset(repo, r, x), getset(repo, r, y))
354 356 return xs & subset
355 357
356 358 def andset(repo, subset, x, y):
357 359 return getset(repo, getset(repo, subset, x), y)
358 360
359 361 def orset(repo, subset, x, y):
360 362 xl = getset(repo, subset, x)
361 363 yl = getset(repo, subset - xl, y)
362 364 return xl + yl
363 365
364 366 def notset(repo, subset, x):
365 367 return subset - getset(repo, subset, x)
366 368
367 369 def listset(repo, subset, a, b):
368 370 raise error.ParseError(_("can't use a list in this context"))
369 371
370 372 def func(repo, subset, a, b):
371 373 if a[0] == 'symbol' and a[1] in symbols:
372 374 return symbols[a[1]](repo, subset, b)
373 375 raise error.UnknownIdentifier(a[1], symbols.keys())
374 376
375 377 # functions
376 378
377 379 def adds(repo, subset, x):
378 380 """``adds(pattern)``
379 381 Changesets that add a file matching pattern.
380 382
381 383 The pattern without explicit kind like ``glob:`` is expected to be
382 384 relative to the current directory and match against a file or a
383 385 directory.
384 386 """
385 387 # i18n: "adds" is a keyword
386 388 pat = getstring(x, _("adds requires a pattern"))
387 389 return checkstatus(repo, subset, pat, 1)
388 390
389 391 def ancestor(repo, subset, x):
390 392 """``ancestor(*changeset)``
391 393 A greatest common ancestor of the changesets.
392 394
393 395 Accepts 0 or more changesets.
394 396 Will return empty list when passed no args.
395 397 Greatest common ancestor of a single changeset is that changeset.
396 398 """
397 399 # i18n: "ancestor" is a keyword
398 400 l = getlist(x)
399 401 rl = fullreposet(repo)
400 402 anc = None
401 403
402 404 # (getset(repo, rl, i) for i in l) generates a list of lists
403 405 for revs in (getset(repo, rl, i) for i in l):
404 406 for r in revs:
405 407 if anc is None:
406 408 anc = repo[r]
407 409 else:
408 410 anc = anc.ancestor(repo[r])
409 411
410 412 if anc is not None and anc.rev() in subset:
411 413 return baseset([anc.rev()])
412 414 return baseset()
413 415
414 416 def _ancestors(repo, subset, x, followfirst=False):
415 417 heads = getset(repo, fullreposet(repo), x)
416 418 if not heads:
417 419 return baseset()
418 420 s = _revancestors(repo, heads, followfirst)
419 421 return subset & s
420 422
421 423 def ancestors(repo, subset, x):
422 424 """``ancestors(set)``
423 425 Changesets that are ancestors of a changeset in set.
424 426 """
425 427 return _ancestors(repo, subset, x)
426 428
427 429 def _firstancestors(repo, subset, x):
428 430 # ``_firstancestors(set)``
429 431 # Like ``ancestors(set)`` but follows only the first parents.
430 432 return _ancestors(repo, subset, x, followfirst=True)
431 433
432 434 def ancestorspec(repo, subset, x, n):
433 435 """``set~n``
434 436 Changesets that are the Nth ancestor (first parents only) of a changeset
435 437 in set.
436 438 """
437 439 try:
438 440 n = int(n[1])
439 441 except (TypeError, ValueError):
440 442 raise error.ParseError(_("~ expects a number"))
441 443 ps = set()
442 444 cl = repo.changelog
443 445 for r in getset(repo, fullreposet(repo), x):
444 446 for i in range(n):
445 447 r = cl.parentrevs(r)[0]
446 448 ps.add(r)
447 449 return subset & ps
448 450
449 451 def author(repo, subset, x):
450 452 """``author(string)``
451 453 Alias for ``user(string)``.
452 454 """
453 455 # i18n: "author" is a keyword
454 456 n = encoding.lower(getstring(x, _("author requires a string")))
455 457 kind, pattern, matcher = _substringmatcher(n)
456 458 return subset.filter(lambda x: matcher(encoding.lower(repo[x].user())))
457 459
458 460 def bisect(repo, subset, x):
459 461 """``bisect(string)``
460 462 Changesets marked in the specified bisect status:
461 463
462 464 - ``good``, ``bad``, ``skip``: csets explicitly marked as good/bad/skip
463 465 - ``goods``, ``bads`` : csets topologically good/bad
464 466 - ``range`` : csets taking part in the bisection
465 467 - ``pruned`` : csets that are goods, bads or skipped
466 468 - ``untested`` : csets whose fate is yet unknown
467 469 - ``ignored`` : csets ignored due to DAG topology
468 470 - ``current`` : the cset currently being bisected
469 471 """
470 472 # i18n: "bisect" is a keyword
471 473 status = getstring(x, _("bisect requires a string")).lower()
472 474 state = set(hbisect.get(repo, status))
473 475 return subset & state
474 476
475 477 # Backward-compatibility
476 478 # - no help entry so that we do not advertise it any more
477 479 def bisected(repo, subset, x):
478 480 return bisect(repo, subset, x)
479 481
480 482 def bookmark(repo, subset, x):
481 483 """``bookmark([name])``
482 484 The named bookmark or all bookmarks.
483 485
484 486 If `name` starts with `re:`, the remainder of the name is treated as
485 487 a regular expression. To match a bookmark that actually starts with `re:`,
486 488 use the prefix `literal:`.
487 489 """
488 490 # i18n: "bookmark" is a keyword
489 491 args = getargs(x, 0, 1, _('bookmark takes one or no arguments'))
490 492 if args:
491 493 bm = getstring(args[0],
492 494 # i18n: "bookmark" is a keyword
493 495 _('the argument to bookmark must be a string'))
494 496 kind, pattern, matcher = _stringmatcher(bm)
495 497 bms = set()
496 498 if kind == 'literal':
497 499 bmrev = repo._bookmarks.get(pattern, None)
498 500 if not bmrev:
499 501 raise error.RepoLookupError(_("bookmark '%s' does not exist")
500 502 % bm)
501 503 bms.add(repo[bmrev].rev())
502 504 else:
503 505 matchrevs = set()
504 506 for name, bmrev in repo._bookmarks.iteritems():
505 507 if matcher(name):
506 508 matchrevs.add(bmrev)
507 509 if not matchrevs:
508 510 raise error.RepoLookupError(_("no bookmarks exist"
509 511 " that match '%s'") % pattern)
510 512 for bmrev in matchrevs:
511 513 bms.add(repo[bmrev].rev())
512 514 else:
513 515 bms = set([repo[r].rev()
514 516 for r in repo._bookmarks.values()])
515 517 bms -= set([node.nullrev])
516 518 return subset & bms
517 519
518 520 def branch(repo, subset, x):
519 521 """``branch(string or set)``
520 522 All changesets belonging to the given branch or the branches of the given
521 523 changesets.
522 524
523 525 If `string` starts with `re:`, the remainder of the name is treated as
524 526 a regular expression. To match a branch that actually starts with `re:`,
525 527 use the prefix `literal:`.
526 528 """
527 529 getbi = repo.revbranchcache().branchinfo
528 530
529 531 try:
530 532 b = getstring(x, '')
531 533 except error.ParseError:
532 534 # not a string, but another revspec, e.g. tip()
533 535 pass
534 536 else:
535 537 kind, pattern, matcher = _stringmatcher(b)
536 538 if kind == 'literal':
537 539 # note: falls through to the revspec case if no branch with
538 540 # this name exists
539 541 if pattern in repo.branchmap():
540 542 return subset.filter(lambda r: matcher(getbi(r)[0]))
541 543 else:
542 544 return subset.filter(lambda r: matcher(getbi(r)[0]))
543 545
544 546 s = getset(repo, fullreposet(repo), x)
545 547 b = set()
546 548 for r in s:
547 549 b.add(getbi(r)[0])
548 550 c = s.__contains__
549 551 return subset.filter(lambda r: c(r) or getbi(r)[0] in b)
550 552
551 553 def bumped(repo, subset, x):
552 554 """``bumped()``
553 555 Mutable changesets marked as successors of public changesets.
554 556
555 557 Only non-public and non-obsolete changesets can be `bumped`.
556 558 """
557 559 # i18n: "bumped" is a keyword
558 560 getargs(x, 0, 0, _("bumped takes no arguments"))
559 561 bumped = obsmod.getrevs(repo, 'bumped')
560 562 return subset & bumped
561 563
562 564 def bundle(repo, subset, x):
563 565 """``bundle()``
564 566 Changesets in the bundle.
565 567
566 568 Bundle must be specified by the -R option."""
567 569
568 570 try:
569 571 bundlerevs = repo.changelog.bundlerevs
570 572 except AttributeError:
571 573 raise util.Abort(_("no bundle provided - specify with -R"))
572 574 return subset & bundlerevs
573 575
574 576 def checkstatus(repo, subset, pat, field):
575 577 hasset = matchmod.patkind(pat) == 'set'
576 578
577 579 mcache = [None]
578 580 def matches(x):
579 581 c = repo[x]
580 582 if not mcache[0] or hasset:
581 583 mcache[0] = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
582 584 m = mcache[0]
583 585 fname = None
584 586 if not m.anypats() and len(m.files()) == 1:
585 587 fname = m.files()[0]
586 588 if fname is not None:
587 589 if fname not in c.files():
588 590 return False
589 591 else:
590 592 for f in c.files():
591 593 if m(f):
592 594 break
593 595 else:
594 596 return False
595 597 files = repo.status(c.p1().node(), c.node())[field]
596 598 if fname is not None:
597 599 if fname in files:
598 600 return True
599 601 else:
600 602 for f in files:
601 603 if m(f):
602 604 return True
603 605
604 606 return subset.filter(matches)
605 607
606 608 def _children(repo, narrow, parentset):
607 609 cs = set()
608 610 if not parentset:
609 611 return baseset(cs)
610 612 pr = repo.changelog.parentrevs
611 613 minrev = min(parentset)
612 614 for r in narrow:
613 615 if r <= minrev:
614 616 continue
615 617 for p in pr(r):
616 618 if p in parentset:
617 619 cs.add(r)
618 620 return baseset(cs)
619 621
620 622 def children(repo, subset, x):
621 623 """``children(set)``
622 624 Child changesets of changesets in set.
623 625 """
624 626 s = getset(repo, fullreposet(repo), x)
625 627 cs = _children(repo, subset, s)
626 628 return subset & cs
627 629
628 630 def closed(repo, subset, x):
629 631 """``closed()``
630 632 Changeset is closed.
631 633 """
632 634 # i18n: "closed" is a keyword
633 635 getargs(x, 0, 0, _("closed takes no arguments"))
634 636 return subset.filter(lambda r: repo[r].closesbranch())
635 637
636 638 def contains(repo, subset, x):
637 639 """``contains(pattern)``
638 640 The revision's manifest contains a file matching pattern (but might not
639 641 modify it). See :hg:`help patterns` for information about file patterns.
640 642
641 643 The pattern without explicit kind like ``glob:`` is expected to be
642 644 relative to the current directory and match against a file exactly
643 645 for efficiency.
644 646 """
645 647 # i18n: "contains" is a keyword
646 648 pat = getstring(x, _("contains requires a pattern"))
647 649
648 650 def matches(x):
649 651 if not matchmod.patkind(pat):
650 652 pats = pathutil.canonpath(repo.root, repo.getcwd(), pat)
651 653 if pats in repo[x]:
652 654 return True
653 655 else:
654 656 c = repo[x]
655 657 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
656 658 for f in c.manifest():
657 659 if m(f):
658 660 return True
659 661 return False
660 662
661 663 return subset.filter(matches)
662 664
663 665 def converted(repo, subset, x):
664 666 """``converted([id])``
665 667 Changesets converted from the given identifier in the old repository if
666 668 present, or all converted changesets if no identifier is specified.
667 669 """
668 670
669 671 # There is exactly no chance of resolving the revision, so do a simple
670 672 # string compare and hope for the best
671 673
672 674 rev = None
673 675 # i18n: "converted" is a keyword
674 676 l = getargs(x, 0, 1, _('converted takes one or no arguments'))
675 677 if l:
676 678 # i18n: "converted" is a keyword
677 679 rev = getstring(l[0], _('converted requires a revision'))
678 680
679 681 def _matchvalue(r):
680 682 source = repo[r].extra().get('convert_revision', None)
681 683 return source is not None and (rev is None or source.startswith(rev))
682 684
683 685 return subset.filter(lambda r: _matchvalue(r))
684 686
685 687 def date(repo, subset, x):
686 688 """``date(interval)``
687 689 Changesets within the interval, see :hg:`help dates`.
688 690 """
689 691 # i18n: "date" is a keyword
690 692 ds = getstring(x, _("date requires a string"))
691 693 dm = util.matchdate(ds)
692 694 return subset.filter(lambda x: dm(repo[x].date()[0]))
693 695
694 696 def desc(repo, subset, x):
695 697 """``desc(string)``
696 698 Search commit message for string. The match is case-insensitive.
697 699 """
698 700 # i18n: "desc" is a keyword
699 701 ds = encoding.lower(getstring(x, _("desc requires a string")))
700 702
701 703 def matches(x):
702 704 c = repo[x]
703 705 return ds in encoding.lower(c.description())
704 706
705 707 return subset.filter(matches)
706 708
707 709 def _descendants(repo, subset, x, followfirst=False):
708 710 roots = getset(repo, fullreposet(repo), x)
709 711 if not roots:
710 712 return baseset()
711 713 s = _revdescendants(repo, roots, followfirst)
712 714
713 715 # Both sets need to be ascending in order to lazily return the union
714 716 # in the correct order.
715 717 base = subset & roots
716 718 desc = subset & s
717 719 result = base + desc
718 720 if subset.isascending():
719 721 result.sort()
720 722 elif subset.isdescending():
721 723 result.sort(reverse=True)
722 724 else:
723 725 result = subset & result
724 726 return result
725 727
726 728 def descendants(repo, subset, x):
727 729 """``descendants(set)``
728 730 Changesets which are descendants of changesets in set.
729 731 """
730 732 return _descendants(repo, subset, x)
731 733
732 734 def _firstdescendants(repo, subset, x):
733 735 # ``_firstdescendants(set)``
734 736 # Like ``descendants(set)`` but follows only the first parents.
735 737 return _descendants(repo, subset, x, followfirst=True)
736 738
737 739 def destination(repo, subset, x):
738 740 """``destination([set])``
739 741 Changesets that were created by a graft, transplant or rebase operation,
740 742 with the given revisions specified as the source. Omitting the optional set
741 743 is the same as passing all().
742 744 """
743 745 if x is not None:
744 746 sources = getset(repo, fullreposet(repo), x)
745 747 else:
746 748 sources = fullreposet(repo)
747 749
748 750 dests = set()
749 751
750 752 # subset contains all of the possible destinations that can be returned, so
751 753 # iterate over them and see if their source(s) were provided in the arg set.
752 754 # Even if the immediate src of r is not in the arg set, src's source (or
753 755 # further back) may be. Scanning back further than the immediate src allows
754 756 # transitive transplants and rebases to yield the same results as transitive
755 757 # grafts.
756 758 for r in subset:
757 759 src = _getrevsource(repo, r)
758 760 lineage = None
759 761
760 762 while src is not None:
761 763 if lineage is None:
762 764 lineage = list()
763 765
764 766 lineage.append(r)
765 767
766 768 # The visited lineage is a match if the current source is in the arg
767 769 # set. Since every candidate dest is visited by way of iterating
768 770 # subset, any dests further back in the lineage will be tested by a
769 771 # different iteration over subset. Likewise, if the src was already
770 772 # selected, the current lineage can be selected without going back
771 773 # further.
772 774 if src in sources or src in dests:
773 775 dests.update(lineage)
774 776 break
775 777
776 778 r = src
777 779 src = _getrevsource(repo, r)
778 780
779 781 return subset.filter(dests.__contains__)
780 782
781 783 def divergent(repo, subset, x):
782 784 """``divergent()``
783 785 Final successors of changesets with an alternative set of final successors.
784 786 """
785 787 # i18n: "divergent" is a keyword
786 788 getargs(x, 0, 0, _("divergent takes no arguments"))
787 789 divergent = obsmod.getrevs(repo, 'divergent')
788 790 return subset & divergent
789 791
790 792 def draft(repo, subset, x):
791 793 """``draft()``
792 794 Changeset in draft phase."""
793 795 # i18n: "draft" is a keyword
794 796 getargs(x, 0, 0, _("draft takes no arguments"))
795 797 phase = repo._phasecache.phase
796 798 target = phases.draft
797 799 condition = lambda r: phase(repo, r) == target
798 800 return subset.filter(condition, cache=False)
799 801
800 802 def extinct(repo, subset, x):
801 803 """``extinct()``
802 804 Obsolete changesets with obsolete descendants only.
803 805 """
804 806 # i18n: "extinct" is a keyword
805 807 getargs(x, 0, 0, _("extinct takes no arguments"))
806 808 extincts = obsmod.getrevs(repo, 'extinct')
807 809 return subset & extincts
808 810
809 811 def extra(repo, subset, x):
810 812 """``extra(label, [value])``
811 813 Changesets with the given label in the extra metadata, with the given
812 814 optional value.
813 815
814 816 If `value` starts with `re:`, the remainder of the value is treated as
815 817 a regular expression. To match a value that actually starts with `re:`,
816 818 use the prefix `literal:`.
817 819 """
818 820
819 821 # i18n: "extra" is a keyword
820 822 l = getargs(x, 1, 2, _('extra takes at least 1 and at most 2 arguments'))
821 823 # i18n: "extra" is a keyword
822 824 label = getstring(l[0], _('first argument to extra must be a string'))
823 825 value = None
824 826
825 827 if len(l) > 1:
826 828 # i18n: "extra" is a keyword
827 829 value = getstring(l[1], _('second argument to extra must be a string'))
828 830 kind, value, matcher = _stringmatcher(value)
829 831
830 832 def _matchvalue(r):
831 833 extra = repo[r].extra()
832 834 return label in extra and (value is None or matcher(extra[label]))
833 835
834 836 return subset.filter(lambda r: _matchvalue(r))
835 837
836 838 def filelog(repo, subset, x):
837 839 """``filelog(pattern)``
838 840 Changesets connected to the specified filelog.
839 841
840 842 For performance reasons, visits only revisions mentioned in the file-level
841 843 filelog, rather than filtering through all changesets (much faster, but
842 844 doesn't include deletes or duplicate changes). For a slower, more accurate
843 845 result, use ``file()``.
844 846
845 847 The pattern without explicit kind like ``glob:`` is expected to be
846 848 relative to the current directory and match against a file exactly
847 849 for efficiency.
848 850
849 851 If some linkrev points to revisions filtered by the current repoview, we'll
850 852 work around it to return a non-filtered value.
851 853 """
852 854
853 855 # i18n: "filelog" is a keyword
854 856 pat = getstring(x, _("filelog requires a pattern"))
855 857 s = set()
856 858 cl = repo.changelog
857 859
858 860 if not matchmod.patkind(pat):
859 861 f = pathutil.canonpath(repo.root, repo.getcwd(), pat)
860 862 files = [f]
861 863 else:
862 864 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=repo[None])
863 865 files = (f for f in repo[None] if m(f))
864 866
865 867 for f in files:
866 868 backrevref = {} # final value for: filerev -> changerev
867 869 lowestchild = {} # lowest known filerev child of a filerev
868 870 delayed = [] # filerev with filtered linkrev, for post-processing
869 871 lowesthead = None # cache for manifest content of all head revisions
870 872 fl = repo.file(f)
871 873 for fr in list(fl):
872 874 rev = fl.linkrev(fr)
873 875 if rev not in cl:
874 876 # changerev pointed in linkrev is filtered
875 877 # record it for post processing.
876 878 delayed.append((fr, rev))
877 879 continue
878 880 for p in fl.parentrevs(fr):
879 881 if 0 <= p and p not in lowestchild:
880 882 lowestchild[p] = fr
881 883 backrevref[fr] = rev
882 884 s.add(rev)
883 885
884 886 # Post-processing of all filerevs we skipped because they were
885 887 # filtered. If such filerevs have known and unfiltered children, this
886 888 # means they have an unfiltered appearance out there. We'll use linkrev
887 889 # adjustment to find one of these appearances. The lowest known child
888 890 # will be used as a starting point because it is the best upper-bound we
889 891 # have.
890 892 #
891 893 # This approach will fail when an unfiltered but linkrev-shadowed
892 894 # appearance exists in a head changeset without unfiltered filerev
893 895 # children anywhere.
894 896 while delayed:
895 897 # must be a descending iteration. To slowly fill lowest child
896 898 # information that is of potential use by the next item.
897 899 fr, rev = delayed.pop()
898 900 lkr = rev
899 901
900 902 child = lowestchild.get(fr)
901 903
902 904 if child is None:
903 905 # search for existence of this file revision in a head revision.
904 906 # There are three possibilities:
905 907 # - the revision exists in a head and we can find an
906 908 # introduction from there,
907 909 # - the revision does not exist in a head because it has been
908 910 # changed since its introduction: we would have found a child
909 911 # and be in the other 'else' clause,
910 912 # - all versions of the revision are hidden.
911 913 if lowesthead is None:
912 914 lowesthead = {}
913 915 for h in repo.heads():
914 916 fnode = repo[h].manifest().get(f)
915 917 if fnode is not None:
916 918 lowesthead[fl.rev(fnode)] = h
917 919 headrev = lowesthead.get(fr)
918 920 if headrev is None:
919 921 # content is nowhere unfiltered
920 922 continue
921 923 rev = repo[headrev][f].introrev()
922 924 else:
923 925 # the lowest known child is a good upper bound
924 926 childcrev = backrevref[child]
925 927 # XXX this does not guarantee returning the lowest
926 928 # introduction of this revision, but this gives a
927 929 # result which is a good start and will fit in most
928 930 # cases. We probably need to fix the multiple
929 931 # introductions case properly (report each
930 932 # introduction, even for identical file revisions)
931 933 # once and for all at some point anyway.
932 934 for p in repo[childcrev][f].parents():
933 935 if p.filerev() == fr:
934 936 rev = p.rev()
935 937 break
936 938 if rev == lkr: # no shadowed entry found
937 939 # XXX This should never happen unless some manifest points
938 940 # to biggish file revisions (like a revision that uses a
939 941 # parent that never appears in the manifest ancestors)
940 942 continue
941 943
942 944 # Fill the data for the next iteration.
943 945 for p in fl.parentrevs(fr):
944 946 if 0 <= p and p not in lowestchild:
945 947 lowestchild[p] = fr
946 948 backrevref[fr] = rev
947 949 s.add(rev)
948 950
949 951 return subset & s
950 952
951 953 def first(repo, subset, x):
952 954 """``first(set, [n])``
953 955 An alias for limit().
954 956 """
955 957 return limit(repo, subset, x)
956 958
957 959 def _follow(repo, subset, x, name, followfirst=False):
958 960 l = getargs(x, 0, 1, _("%s takes no arguments or a filename") % name)
959 961 c = repo['.']
960 962 if l:
961 963 x = getstring(l[0], _("%s expected a filename") % name)
962 964 if x in c:
963 965 cx = c[x]
964 966 s = set(ctx.rev() for ctx in cx.ancestors(followfirst=followfirst))
965 967 # include the revision responsible for the most recent version
966 968 s.add(cx.introrev())
967 969 else:
968 970 return baseset()
969 971 else:
970 972 s = _revancestors(repo, baseset([c.rev()]), followfirst)
971 973
972 974 return subset & s
973 975
974 976 def follow(repo, subset, x):
975 977 """``follow([file])``
976 978 An alias for ``::.`` (ancestors of the working directory's first parent).
977 979 If a filename is specified, the history of the given file is followed,
978 980 including copies.
979 981 """
980 982 return _follow(repo, subset, x, 'follow')
981 983
982 984 def _followfirst(repo, subset, x):
983 985 # ``followfirst([file])``
984 986 # Like ``follow([file])`` but follows only the first parent of
985 987 # every revision or file revision.
986 988 return _follow(repo, subset, x, '_followfirst', followfirst=True)
987 989
988 990 def getall(repo, subset, x):
989 991 """``all()``
990 992 All changesets, the same as ``0:tip``.
991 993 """
992 994 # i18n: "all" is a keyword
993 995 getargs(x, 0, 0, _("all takes no arguments"))
994 996 return subset & spanset(repo) # drop "null" if any
995 997
996 998 def grep(repo, subset, x):
997 999 """``grep(regex)``
998 1000 Like ``keyword(string)`` but accepts a regex. Use ``grep(r'...')``
999 1001 to ensure special escape characters are handled correctly. Unlike
1000 1002 ``keyword(string)``, the match is case-sensitive.
1001 1003 """
1002 1004 try:
1003 1005 # i18n: "grep" is a keyword
1004 1006 gr = re.compile(getstring(x, _("grep requires a string")))
1005 1007 except re.error, e:
1006 1008 raise error.ParseError(_('invalid match pattern: %s') % e)
1007 1009
1008 1010 def matches(x):
1009 1011 c = repo[x]
1010 1012 for e in c.files() + [c.user(), c.description()]:
1011 1013 if gr.search(e):
1012 1014 return True
1013 1015 return False
1014 1016
1015 1017 return subset.filter(matches)
1016 1018
1017 1019 def _matchfiles(repo, subset, x):
1018 1020 # _matchfiles takes a revset list of prefixed arguments:
1019 1021 #
1020 1022 # [p:foo, i:bar, x:baz]
1021 1023 #
1022 1024 # builds a match object from them and filters subset. Allowed
1023 1025 # prefixes are 'p:' for regular patterns, 'i:' for include
1024 1026 # patterns and 'x:' for exclude patterns. Use 'r:' prefix to pass
1025 1027 # a revision identifier, or the empty string to reference the
1026 1028 # working directory, from which the match object is
1027 1029 # initialized. Use 'd:' to set the default matching mode, default
1028 1030 # to 'glob'. At most one 'r:' and 'd:' argument can be passed.
1029 1031
1030 1032 # i18n: "_matchfiles" is a keyword
1031 1033 l = getargs(x, 1, -1, _("_matchfiles requires at least one argument"))
1032 1034 pats, inc, exc = [], [], []
1033 1035 rev, default = None, None
1034 1036 for arg in l:
1035 1037 # i18n: "_matchfiles" is a keyword
1036 1038 s = getstring(arg, _("_matchfiles requires string arguments"))
1037 1039 prefix, value = s[:2], s[2:]
1038 1040 if prefix == 'p:':
1039 1041 pats.append(value)
1040 1042 elif prefix == 'i:':
1041 1043 inc.append(value)
1042 1044 elif prefix == 'x:':
1043 1045 exc.append(value)
1044 1046 elif prefix == 'r:':
1045 1047 if rev is not None:
1046 1048 # i18n: "_matchfiles" is a keyword
1047 1049 raise error.ParseError(_('_matchfiles expected at most one '
1048 1050 'revision'))
1049 1051 if value != '': # empty means working directory; leave rev as None
1050 1052 rev = value
1051 1053 elif prefix == 'd:':
1052 1054 if default is not None:
1053 1055 # i18n: "_matchfiles" is a keyword
1054 1056 raise error.ParseError(_('_matchfiles expected at most one '
1055 1057 'default mode'))
1056 1058 default = value
1057 1059 else:
1058 1060 # i18n: "_matchfiles" is a keyword
1059 1061 raise error.ParseError(_('invalid _matchfiles prefix: %s') % prefix)
1060 1062 if not default:
1061 1063 default = 'glob'
1062 1064
1063 1065 m = matchmod.match(repo.root, repo.getcwd(), pats, include=inc,
1064 1066 exclude=exc, ctx=repo[rev], default=default)
1065 1067
1066 1068 def matches(x):
1067 1069 for f in repo[x].files():
1068 1070 if m(f):
1069 1071 return True
1070 1072 return False
1071 1073
1072 1074 return subset.filter(matches)
1073 1075
1074 1076 def hasfile(repo, subset, x):
1075 1077 """``file(pattern)``
1076 1078 Changesets affecting files matched by pattern.
1077 1079
1078 1080 For a faster but less accurate result, consider using ``filelog()``
1079 1081 instead.
1080 1082
1081 1083 This predicate uses ``glob:`` as the default kind of pattern.
1082 1084 """
1083 1085 # i18n: "file" is a keyword
1084 1086 pat = getstring(x, _("file requires a pattern"))
1085 1087 return _matchfiles(repo, subset, ('string', 'p:' + pat))
1086 1088
1087 1089 def head(repo, subset, x):
1088 1090 """``head()``
1089 1091 Changeset is a named branch head.
1090 1092 """
1091 1093 # i18n: "head" is a keyword
1092 1094 getargs(x, 0, 0, _("head takes no arguments"))
1093 1095 hs = set()
1094 1096 for b, ls in repo.branchmap().iteritems():
1095 1097 hs.update(repo[h].rev() for h in ls)
1096 1098 return baseset(hs).filter(subset.__contains__)
1097 1099
1098 1100 def heads(repo, subset, x):
1099 1101 """``heads(set)``
1100 1102 Members of set with no children in set.
1101 1103 """
1102 1104 s = getset(repo, subset, x)
1103 1105 ps = parents(repo, subset, x)
1104 1106 return s - ps
1105 1107
1106 1108 def hidden(repo, subset, x):
1107 1109 """``hidden()``
1108 1110 Hidden changesets.
1109 1111 """
1110 1112 # i18n: "hidden" is a keyword
1111 1113 getargs(x, 0, 0, _("hidden takes no arguments"))
1112 1114 hiddenrevs = repoview.filterrevs(repo, 'visible')
1113 1115 return subset & hiddenrevs
1114 1116
1115 1117 def keyword(repo, subset, x):
1116 1118 """``keyword(string)``
1117 1119 Search commit message, user name, and names of changed files for
1118 1120 string. The match is case-insensitive.
1119 1121 """
1120 1122 # i18n: "keyword" is a keyword
1121 1123 kw = encoding.lower(getstring(x, _("keyword requires a string")))
1122 1124
1123 1125 def matches(r):
1124 1126 c = repo[r]
1125 1127 return util.any(kw in encoding.lower(t) for t in c.files() + [c.user(),
1126 1128 c.description()])
1127 1129
1128 1130 return subset.filter(matches)
1129 1131
1130 1132 def limit(repo, subset, x):
1131 1133 """``limit(set, [n])``
1132 1134 First n members of set, defaulting to 1.
1133 1135 """
1134 1136 # i18n: "limit" is a keyword
1135 1137 l = getargs(x, 1, 2, _("limit requires one or two arguments"))
1136 1138 try:
1137 1139 lim = 1
1138 1140 if len(l) == 2:
1139 1141 # i18n: "limit" is a keyword
1140 1142 lim = int(getstring(l[1], _("limit requires a number")))
1141 1143 except (TypeError, ValueError):
1142 1144 # i18n: "limit" is a keyword
1143 1145 raise error.ParseError(_("limit expects a number"))
1144 1146 ss = subset
1145 1147 os = getset(repo, fullreposet(repo), l[0])
1146 1148 result = []
1147 1149 it = iter(os)
1148 1150 for x in xrange(lim):
1149 1151 try:
1150 1152 y = it.next()
1151 1153 if y in ss:
1152 1154 result.append(y)
1153 1155 except (StopIteration):
1154 1156 break
1155 1157 return baseset(result)
1156 1158
1157 1159 def last(repo, subset, x):
1158 1160 """``last(set, [n])``
1159 1161 Last n members of set, defaulting to 1.
1160 1162 """
1161 1163 # i18n: "last" is a keyword
1162 1164 l = getargs(x, 1, 2, _("last requires one or two arguments"))
1163 1165 try:
1164 1166 lim = 1
1165 1167 if len(l) == 2:
1166 1168 # i18n: "last" is a keyword
1167 1169 lim = int(getstring(l[1], _("last requires a number")))
1168 1170 except (TypeError, ValueError):
1169 1171 # i18n: "last" is a keyword
1170 1172 raise error.ParseError(_("last expects a number"))
1171 1173 ss = subset
1172 1174 os = getset(repo, fullreposet(repo), l[0])
1173 1175 os.reverse()
1174 1176 result = []
1175 1177 it = iter(os)
1176 1178 for x in xrange(lim):
1177 1179 try:
1178 1180 y = it.next()
1179 1181 if y in ss:
1180 1182 result.append(y)
1181 1183 except (StopIteration):
1182 1184 break
1183 1185 return baseset(result)
1184 1186
1185 1187 def maxrev(repo, subset, x):
1186 1188 """``max(set)``
1187 1189 Changeset with highest revision number in set.
1188 1190 """
1189 1191 os = getset(repo, fullreposet(repo), x)
1190 1192 if os:
1191 1193 m = os.max()
1192 1194 if m in subset:
1193 1195 return baseset([m])
1194 1196 return baseset()
1195 1197
1196 1198 def merge(repo, subset, x):
1197 1199 """``merge()``
1198 1200 Changeset is a merge changeset.
1199 1201 """
1200 1202 # i18n: "merge" is a keyword
1201 1203 getargs(x, 0, 0, _("merge takes no arguments"))
1202 1204 cl = repo.changelog
1203 1205 return subset.filter(lambda r: cl.parentrevs(r)[1] != -1)
1204 1206
1205 1207 def branchpoint(repo, subset, x):
1206 1208 """``branchpoint()``
1207 1209 Changesets with more than one child.
1208 1210 """
1209 1211 # i18n: "branchpoint" is a keyword
1210 1212 getargs(x, 0, 0, _("branchpoint takes no arguments"))
1211 1213 cl = repo.changelog
1212 1214 if not subset:
1213 1215 return baseset()
1214 1216 baserev = min(subset)
1215 1217 parentscount = [0]*(len(repo) - baserev)
1216 1218 for r in cl.revs(start=baserev + 1):
1217 1219 for p in cl.parentrevs(r):
1218 1220 if p >= baserev:
1219 1221 parentscount[p - baserev] += 1
1220 1222 return subset.filter(lambda r: parentscount[r - baserev] > 1)
1221 1223
1222 1224 def minrev(repo, subset, x):
1223 1225 """``min(set)``
1224 1226 Changeset with lowest revision number in set.
1225 1227 """
1226 1228 os = getset(repo, fullreposet(repo), x)
1227 1229 if os:
1228 1230 m = os.min()
1229 1231 if m in subset:
1230 1232 return baseset([m])
1231 1233 return baseset()
1232 1234
1233 1235 def modifies(repo, subset, x):
1234 1236 """``modifies(pattern)``
1235 1237 Changesets modifying files matched by pattern.
1236 1238
1237 1239 The pattern without explicit kind like ``glob:`` is expected to be
1238 1240 relative to the current directory and match against a file or a
1239 1241 directory.
1240 1242 """
1241 1243 # i18n: "modifies" is a keyword
1242 1244 pat = getstring(x, _("modifies requires a pattern"))
1243 1245 return checkstatus(repo, subset, pat, 0)
1244 1246
1245 1247 def named(repo, subset, x):
1246 1248 """``named(namespace)``
1247 1249 The changesets in a given namespace.
1248 1250
1249 1251 If `namespace` starts with `re:`, the remainder of the string is treated as
1250 1252 a regular expression. To match a namespace that actually starts with `re:`,
1251 1253 use the prefix `literal:`.
1252 1254 """
1253 1255 # i18n: "named" is a keyword
1254 1256 args = getargs(x, 1, 1, _('named requires a namespace argument'))
1255 1257
1256 1258 ns = getstring(args[0],
1257 1259 # i18n: "named" is a keyword
1258 1260 _('the argument to named must be a string'))
1259 1261 kind, pattern, matcher = _stringmatcher(ns)
1260 1262 namespaces = set()
1261 1263 if kind == 'literal':
1262 1264 if pattern not in repo.names:
1263 1265 raise error.RepoLookupError(_("namespace '%s' does not exist")
1264 1266 % ns)
1265 1267 namespaces.add(repo.names[pattern])
1266 1268 else:
1267 1269 for name, ns in repo.names.iteritems():
1268 1270 if matcher(name):
1269 1271 namespaces.add(ns)
1270 1272 if not namespaces:
1271 1273 raise error.RepoLookupError(_("no namespace exists"
1272 1274 " that match '%s'") % pattern)
1273 1275
1274 1276 names = set()
1275 1277 for ns in namespaces:
1276 1278 for name in ns.listnames(repo):
1277 1279 if name not in ns.deprecated:
1278 1280 names.update(repo[n].rev() for n in ns.nodes(repo, name))
1279 1281
1280 1282 names -= set([node.nullrev])
1281 1283 return subset & names
1282 1284
1283 1285 def node_(repo, subset, x):
1284 1286 """``id(string)``
1285 1287 Revision non-ambiguously specified by the given hex string prefix.
1286 1288 """
1287 1289 # i18n: "id" is a keyword
1288 1290 l = getargs(x, 1, 1, _("id requires one argument"))
1289 1291 # i18n: "id" is a keyword
1290 1292 n = getstring(l[0], _("id requires a string"))
1291 1293 if len(n) == 40:
1292 1294 try:
1293 1295 rn = repo.changelog.rev(node.bin(n))
1294 1296 except (LookupError, TypeError):
1295 1297 rn = None
1296 1298 else:
1297 1299 rn = None
1298 1300 pm = repo.changelog._partialmatch(n)
1299 1301 if pm is not None:
1300 1302 rn = repo.changelog.rev(pm)
1301 1303
1302 1304 if rn is None:
1303 1305 return baseset()
1304 1306 result = baseset([rn])
1305 1307 return result & subset
1306 1308
1307 1309 def obsolete(repo, subset, x):
1308 1310 """``obsolete()``
1309 1311 Mutable changeset with a newer version."""
1310 1312 # i18n: "obsolete" is a keyword
1311 1313 getargs(x, 0, 0, _("obsolete takes no arguments"))
1312 1314 obsoletes = obsmod.getrevs(repo, 'obsolete')
1313 1315 return subset & obsoletes
1314 1316
1315 1317 def only(repo, subset, x):
1316 1318 """``only(set, [set])``
1317 1319 Changesets that are ancestors of the first set that are not ancestors
1318 1320 of any other head in the repo. If a second set is specified, the result
1319 1321 is ancestors of the first set that are not ancestors of the second set
1320 1322 (i.e. ::<set1> - ::<set2>).
1321 1323 """
1322 1324 cl = repo.changelog
1323 1325 # i18n: "only" is a keyword
1324 1326 args = getargs(x, 1, 2, _('only takes one or two arguments'))
1325 1327 include = getset(repo, fullreposet(repo), args[0])
1326 1328 if len(args) == 1:
1327 1329 if not include:
1328 1330 return baseset()
1329 1331
1330 1332 descendants = set(_revdescendants(repo, include, False))
1331 1333 exclude = [rev for rev in cl.headrevs()
1332 1334 if not rev in descendants and not rev in include]
1333 1335 else:
1334 1336 exclude = getset(repo, fullreposet(repo), args[1])
1335 1337
1336 1338 results = set(cl.findmissingrevs(common=exclude, heads=include))
1337 1339 return subset & results
1338 1340
1339 1341 def origin(repo, subset, x):
1340 1342 """``origin([set])``
1341 1343 Changesets that were specified as a source for the grafts, transplants or
1342 1344 rebases that created the given revisions. Omitting the optional set is the
1343 1345 same as passing all(). If a changeset created by these operations is itself
1344 1346 specified as a source for one of these operations, only the source changeset
1345 1347 for the first operation is selected.
1346 1348 """
1347 1349 if x is not None:
1348 1350 dests = getset(repo, fullreposet(repo), x)
1349 1351 else:
1350 1352 dests = fullreposet(repo)
1351 1353
1352 1354 def _firstsrc(rev):
1353 1355 src = _getrevsource(repo, rev)
1354 1356 if src is None:
1355 1357 return None
1356 1358
1357 1359 while True:
1358 1360 prev = _getrevsource(repo, src)
1359 1361
1360 1362 if prev is None:
1361 1363 return src
1362 1364 src = prev
1363 1365
1364 1366 o = set([_firstsrc(r) for r in dests])
1365 1367 o -= set([None])
1366 1368 return subset & o
1367 1369
1368 1370 def outgoing(repo, subset, x):
1369 1371 """``outgoing([path])``
1370 1372 Changesets not found in the specified destination repository, or the
1371 1373 default push location.
1372 1374 """
1373 1375 # Avoid cycles.
1374 1376 import discovery
1375 1377 import hg
1376 1378 # i18n: "outgoing" is a keyword
1377 1379 l = getargs(x, 0, 1, _("outgoing takes one or no arguments"))
1378 1380 # i18n: "outgoing" is a keyword
1379 1381 dest = l and getstring(l[0], _("outgoing requires a repository path")) or ''
1380 1382 dest = repo.ui.expandpath(dest or 'default-push', dest or 'default')
1381 1383 dest, branches = hg.parseurl(dest)
1382 1384 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1383 1385 if revs:
1384 1386 revs = [repo.lookup(rev) for rev in revs]
1385 1387 other = hg.peer(repo, {}, dest)
1386 1388 repo.ui.pushbuffer()
1387 1389 outgoing = discovery.findcommonoutgoing(repo, other, onlyheads=revs)
1388 1390 repo.ui.popbuffer()
1389 1391 cl = repo.changelog
1390 1392 o = set([cl.rev(r) for r in outgoing.missing])
1391 1393 return subset & o
1392 1394
1393 1395 def p1(repo, subset, x):
1394 1396 """``p1([set])``
1395 1397 First parent of changesets in set, or the working directory.
1396 1398 """
1397 1399 if x is None:
1398 1400 p = repo[x].p1().rev()
1399 1401 if p >= 0:
1400 1402 return subset & baseset([p])
1401 1403 return baseset()
1402 1404
1403 1405 ps = set()
1404 1406 cl = repo.changelog
1405 1407 for r in getset(repo, fullreposet(repo), x):
1406 1408 ps.add(cl.parentrevs(r)[0])
1407 1409 ps -= set([node.nullrev])
1408 1410 return subset & ps
1409 1411
1410 1412 def p2(repo, subset, x):
1411 1413 """``p2([set])``
1412 1414 Second parent of changesets in set, or the working directory.
1413 1415 """
1414 1416 if x is None:
1415 1417 ps = repo[x].parents()
1416 1418 try:
1417 1419 p = ps[1].rev()
1418 1420 if p >= 0:
1419 1421 return subset & baseset([p])
1420 1422 return baseset()
1421 1423 except IndexError:
1422 1424 return baseset()
1423 1425
1424 1426 ps = set()
1425 1427 cl = repo.changelog
1426 1428 for r in getset(repo, fullreposet(repo), x):
1427 1429 ps.add(cl.parentrevs(r)[1])
1428 1430 ps -= set([node.nullrev])
1429 1431 return subset & ps
1430 1432
1431 1433 def parents(repo, subset, x):
1432 1434 """``parents([set])``
1433 1435 The set of all parents for all changesets in set, or the working directory.
1434 1436 """
1435 1437 if x is None:
1436 1438 ps = set(p.rev() for p in repo[x].parents())
1437 1439 else:
1438 1440 ps = set()
1439 1441 cl = repo.changelog
1440 1442 for r in getset(repo, fullreposet(repo), x):
1441 1443 ps.update(cl.parentrevs(r))
1442 1444 ps -= set([node.nullrev])
1443 1445 return subset & ps
1444 1446
1445 1447 def parentspec(repo, subset, x, n):
1446 1448 """``set^0``
1447 1449 The set.
1448 1450 ``set^1`` (or ``set^``), ``set^2``
1449 1451 First or second parent, respectively, of all changesets in set.
1450 1452 """
1451 1453 try:
1452 1454 n = int(n[1])
1453 1455 if n not in (0, 1, 2):
1454 1456 raise ValueError
1455 1457 except (TypeError, ValueError):
1456 1458 raise error.ParseError(_("^ expects a number 0, 1, or 2"))
1457 1459 ps = set()
1458 1460 cl = repo.changelog
1459 1461 for r in getset(repo, fullreposet(repo), x):
1460 1462 if n == 0:
1461 1463 ps.add(r)
1462 1464 elif n == 1:
1463 1465 ps.add(cl.parentrevs(r)[0])
1464 1466 elif n == 2:
1465 1467 parents = cl.parentrevs(r)
1466 1468 if len(parents) > 1:
1467 1469 ps.add(parents[1])
1468 1470 return subset & ps
1469 1471
1470 1472 def present(repo, subset, x):
1471 1473 """``present(set)``
1472 1474 An empty set, if any revision in set isn't found; otherwise,
1473 1475 all revisions in set.
1474 1476
1475 1477 If any of specified revisions is not present in the local repository,
1476 1478 the query is normally aborted. But this predicate allows the query
1477 1479 to continue even in such cases.
1478 1480 """
1479 1481 try:
1480 1482 return getset(repo, subset, x)
1481 1483 except error.RepoLookupError:
1482 1484 return baseset()
1483 1485
1484 1486 def public(repo, subset, x):
1485 1487 """``public()``
1486 1488 Changeset in public phase."""
1487 1489 # i18n: "public" is a keyword
1488 1490 getargs(x, 0, 0, _("public takes no arguments"))
1489 1491 phase = repo._phasecache.phase
1490 1492 target = phases.public
1491 1493 condition = lambda r: phase(repo, r) == target
1492 1494 return subset.filter(condition, cache=False)
1493 1495
1494 1496 def remote(repo, subset, x):
1495 1497 """``remote([id [,path]])``
1496 1498 Local revision that corresponds to the given identifier in a
1497 1499 remote repository, if present. Here, the '.' identifier is a
1498 1500 synonym for the current local branch.
1499 1501 """
1500 1502
1501 1503 import hg # avoid start-up nasties
1502 1504 # i18n: "remote" is a keyword
1503 1505 l = getargs(x, 0, 2, _("remote takes one, two or no arguments"))
1504 1506
1505 1507 q = '.'
1506 1508 if len(l) > 0:
1507 1509 # i18n: "remote" is a keyword
1508 1510 q = getstring(l[0], _("remote requires a string id"))
1509 1511 if q == '.':
1510 1512 q = repo['.'].branch()
1511 1513
1512 1514 dest = ''
1513 1515 if len(l) > 1:
1514 1516 # i18n: "remote" is a keyword
1515 1517 dest = getstring(l[1], _("remote requires a repository path"))
1516 1518 dest = repo.ui.expandpath(dest or 'default')
1517 1519 dest, branches = hg.parseurl(dest)
1518 1520 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1519 1521 if revs:
1520 1522 revs = [repo.lookup(rev) for rev in revs]
1521 1523 other = hg.peer(repo, {}, dest)
1522 1524 n = other.lookup(q)
1523 1525 if n in repo:
1524 1526 r = repo[n].rev()
1525 1527 if r in subset:
1526 1528 return baseset([r])
1527 1529 return baseset()
1528 1530
1529 1531 def removes(repo, subset, x):
1530 1532 """``removes(pattern)``
1531 1533 Changesets which remove files matching pattern.
1532 1534
1533 1535 The pattern without explicit kind like ``glob:`` is expected to be
1534 1536 relative to the current directory and match against a file or a
1535 1537 directory.
1536 1538 """
1537 1539 # i18n: "removes" is a keyword
1538 1540 pat = getstring(x, _("removes requires a pattern"))
1539 1541 return checkstatus(repo, subset, pat, 2)
1540 1542
1541 1543 def rev(repo, subset, x):
1542 1544 """``rev(number)``
1543 1545 Revision with the given numeric identifier.
1544 1546 """
1545 1547 # i18n: "rev" is a keyword
1546 1548 l = getargs(x, 1, 1, _("rev requires one argument"))
1547 1549 try:
1548 1550 # i18n: "rev" is a keyword
1549 1551 l = int(getstring(l[0], _("rev requires a number")))
1550 1552 except (TypeError, ValueError):
1551 1553 # i18n: "rev" is a keyword
1552 1554 raise error.ParseError(_("rev expects a number"))
1553 1555 if l not in repo.changelog and l != node.nullrev:
1554 1556 return baseset()
1555 1557 return subset & baseset([l])
1556 1558
1557 1559 def matching(repo, subset, x):
1558 1560 """``matching(revision [, field])``
1559 1561 Changesets in which a given set of fields match the set of fields in the
1560 1562 selected revision or set.
1561 1563
1562 1564 To match more than one field pass the list of fields to match separated
1563 1565 by spaces (e.g. ``author description``).
1564 1566
1565 1567 Valid fields are most regular revision fields and some special fields.
1566 1568
1567 1569 Regular revision fields are ``description``, ``author``, ``branch``,
1568 1570 ``date``, ``files``, ``phase``, ``parents``, ``substate``, ``user``
1569 1571 and ``diff``.
1570 1572 Note that ``author`` and ``user`` are synonyms. ``diff`` refers to the
1571 1573 contents of the revision. Two revisions matching their ``diff`` will
1572 1574 also match their ``files``.
1573 1575
1574 1576 Special fields are ``summary`` and ``metadata``:
1575 1577 ``summary`` matches the first line of the description.
1576 1578 ``metadata`` is equivalent to matching ``description user date``
1577 1579 (i.e. it matches the main metadata fields).
1578 1580
1579 1581 ``metadata`` is the default field which is used when no fields are
1580 1582 specified. You can match more than one field at a time.
1581 1583 """
1582 1584 # i18n: "matching" is a keyword
1583 1585 l = getargs(x, 1, 2, _("matching takes 1 or 2 arguments"))
1584 1586
1585 1587 revs = getset(repo, fullreposet(repo), l[0])
1586 1588
1587 1589 fieldlist = ['metadata']
1588 1590 if len(l) > 1:
1589 1591 fieldlist = getstring(l[1],
1590 1592 # i18n: "matching" is a keyword
1591 1593 _("matching requires a string "
1592 1594 "as its second argument")).split()
1593 1595
1594 1596 # Make sure that there are no repeated fields,
1595 1597 # expand the 'special' 'metadata' field type
1596 1598 # and check the 'files' whenever we check the 'diff'
1597 1599 fields = []
1598 1600 for field in fieldlist:
1599 1601 if field == 'metadata':
1600 1602 fields += ['user', 'description', 'date']
1601 1603 elif field == 'diff':
1602 1604 # a revision matching the diff must also match the files
1603 1605 # since matching the diff is very costly, make sure to
1604 1606 # also match the files first
1605 1607 fields += ['files', 'diff']
1606 1608 else:
1607 1609 if field == 'author':
1608 1610 field = 'user'
1609 1611 fields.append(field)
1610 1612 fields = set(fields)
1611 1613 if 'summary' in fields and 'description' in fields:
1612 1614 # If a revision matches its description it also matches its summary
1613 1615 fields.discard('summary')
1614 1616
1615 1617 # We may want to match more than one field
1616 1618 # Not all fields take the same amount of time to be matched
1617 1619 # Sort the selected fields in order of increasing matching cost
1618 1620 fieldorder = ['phase', 'parents', 'user', 'date', 'branch', 'summary',
1619 1621 'files', 'description', 'substate', 'diff']
1620 1622 def fieldkeyfunc(f):
1621 1623 try:
1622 1624 return fieldorder.index(f)
1623 1625 except ValueError:
1624 1626 # assume an unknown field is very costly
1625 1627 return len(fieldorder)
1626 1628 fields = list(fields)
1627 1629 fields.sort(key=fieldkeyfunc)
1628 1630
1629 1631 # Each field will be matched with its own "getfield" function
1630 1632 # which will be added to the getfieldfuncs array of functions
1631 1633 getfieldfuncs = []
1632 1634 _funcs = {
1633 1635 'user': lambda r: repo[r].user(),
1634 1636 'branch': lambda r: repo[r].branch(),
1635 1637 'date': lambda r: repo[r].date(),
1636 1638 'description': lambda r: repo[r].description(),
1637 1639 'files': lambda r: repo[r].files(),
1638 1640 'parents': lambda r: repo[r].parents(),
1639 1641 'phase': lambda r: repo[r].phase(),
1640 1642 'substate': lambda r: repo[r].substate,
1641 1643 'summary': lambda r: repo[r].description().splitlines()[0],
1642 1644 'diff': lambda r: list(repo[r].diff(git=True),)
1643 1645 }
1644 1646 for info in fields:
1645 1647 getfield = _funcs.get(info, None)
1646 1648 if getfield is None:
1647 1649 raise error.ParseError(
1648 1650 # i18n: "matching" is a keyword
1649 1651 _("unexpected field name passed to matching: %s") % info)
1650 1652 getfieldfuncs.append(getfield)
1651 1653 # convert the getfield array of functions into a "getinfo" function
1652 1654 # which returns an array of field values (or a single value if there
1653 1655 # is only one field to match)
1654 1656 getinfo = lambda r: [f(r) for f in getfieldfuncs]
1655 1657
1656 1658 def matches(x):
1657 1659 for rev in revs:
1658 1660 target = getinfo(rev)
1659 1661 match = True
1660 1662 for n, f in enumerate(getfieldfuncs):
1661 1663 if target[n] != f(x):
1662 1664 match = False
1663 1665 if match:
1664 1666 return True
1665 1667 return False
1666 1668
1667 1669 return subset.filter(matches)
1668 1670
1669 1671 def reverse(repo, subset, x):
1670 1672 """``reverse(set)``
1671 1673 Reverse order of set.
1672 1674 """
1673 1675 l = getset(repo, subset, x)
1674 1676 l.reverse()
1675 1677 return l
1676 1678
1677 1679 def roots(repo, subset, x):
1678 1680 """``roots(set)``
1679 1681 Changesets in set with no parent changeset in set.
1680 1682 """
1681 1683 s = getset(repo, fullreposet(repo), x)
1682 1684 subset = subset & s# baseset([r for r in s if r in subset])
1683 1685 cs = _children(repo, subset, s)
1684 1686 return subset - cs
1685 1687
1686 1688 def secret(repo, subset, x):
1687 1689 """``secret()``
1688 1690 Changeset in secret phase."""
1689 1691 # i18n: "secret" is a keyword
1690 1692 getargs(x, 0, 0, _("secret takes no arguments"))
1691 1693 phase = repo._phasecache.phase
1692 1694 target = phases.secret
1693 1695 condition = lambda r: phase(repo, r) == target
1694 1696 return subset.filter(condition, cache=False)
1695 1697
1696 1698 def sort(repo, subset, x):
1697 1699 """``sort(set[, [-]key...])``
1698 1700 Sort set by keys. The default sort order is ascending, specify a key
1699 1701 as ``-key`` to sort in descending order.
1700 1702
1701 1703 The keys can be:
1702 1704
1703 1705 - ``rev`` for the revision number,
1704 1706 - ``branch`` for the branch name,
1705 1707 - ``desc`` for the commit message (description),
1706 1708 - ``user`` for user name (``author`` can be used as an alias),
1707 1709 - ``date`` for the commit date
1708 1710 """
1709 1711 # i18n: "sort" is a keyword
1710 1712 l = getargs(x, 1, 2, _("sort requires one or two arguments"))
1711 1713 keys = "rev"
1712 1714 if len(l) == 2:
1713 1715 # i18n: "sort" is a keyword
1714 1716 keys = getstring(l[1], _("sort spec must be a string"))
1715 1717
1716 1718 s = l[0]
1717 1719 keys = keys.split()
1718 1720 l = []
1719 1721 def invert(s):
1720 1722 return "".join(chr(255 - ord(c)) for c in s)
1721 1723 revs = getset(repo, subset, s)
1722 1724 if keys == ["rev"]:
1723 1725 revs.sort()
1724 1726 return revs
1725 1727 elif keys == ["-rev"]:
1726 1728 revs.sort(reverse=True)
1727 1729 return revs
1728 1730 for r in revs:
1729 1731 c = repo[r]
1730 1732 e = []
1731 1733 for k in keys:
1732 1734 if k == 'rev':
1733 1735 e.append(r)
1734 1736 elif k == '-rev':
1735 1737 e.append(-r)
1736 1738 elif k == 'branch':
1737 1739 e.append(c.branch())
1738 1740 elif k == '-branch':
1739 1741 e.append(invert(c.branch()))
1740 1742 elif k == 'desc':
1741 1743 e.append(c.description())
1742 1744 elif k == '-desc':
1743 1745 e.append(invert(c.description()))
1744 1746 elif k in 'user author':
1745 1747 e.append(c.user())
1746 1748 elif k in '-user -author':
1747 1749 e.append(invert(c.user()))
1748 1750 elif k == 'date':
1749 1751 e.append(c.date()[0])
1750 1752 elif k == '-date':
1751 1753 e.append(-c.date()[0])
1752 1754 else:
1753 1755 raise error.ParseError(_("unknown sort key %r") % k)
1754 1756 e.append(r)
1755 1757 l.append(e)
1756 1758 l.sort()
1757 1759 return baseset([e[-1] for e in l])
1758 1760
1759 1761 def subrepo(repo, subset, x):
1760 1762 """``subrepo([pattern])``
1761 1763 Changesets that add, modify or remove the given subrepo. If no subrepo
1762 1764 pattern is named, any subrepo changes are returned.
1763 1765 """
1764 1766 # i18n: "subrepo" is a keyword
1765 1767 args = getargs(x, 0, 1, _('subrepo takes at most one argument'))
1766 1768 if len(args) != 0:
1767 1769 pat = getstring(args[0], _("subrepo requires a pattern"))
1768 1770
1769 1771 m = matchmod.exact(repo.root, repo.root, ['.hgsubstate'])
1770 1772
1771 1773 def submatches(names):
1772 1774 k, p, m = _stringmatcher(pat)
1773 1775 for name in names:
1774 1776 if m(name):
1775 1777 yield name
1776 1778
1777 1779 def matches(x):
1778 1780 c = repo[x]
1779 1781 s = repo.status(c.p1().node(), c.node(), match=m)
1780 1782
1781 1783 if len(args) == 0:
1782 1784 return s.added or s.modified or s.removed
1783 1785
1784 1786 if s.added:
1785 1787 return util.any(submatches(c.substate.keys()))
1786 1788
1787 1789 if s.modified:
1788 1790 subs = set(c.p1().substate.keys())
1789 1791 subs.update(c.substate.keys())
1790 1792
1791 1793 for path in submatches(subs):
1792 1794 if c.p1().substate.get(path) != c.substate.get(path):
1793 1795 return True
1794 1796
1795 1797 if s.removed:
1796 1798 return util.any(submatches(c.p1().substate.keys()))
1797 1799
1798 1800 return False
1799 1801
1800 1802 return subset.filter(matches)
1801 1803
1802 1804 def _stringmatcher(pattern):
1803 1805 """
1804 1806 accepts a string, possibly starting with 're:' or 'literal:' prefix.
1805 1807 returns the matcher name, pattern, and matcher function.
1806 1808 missing or unknown prefixes are treated as literal matches.
1807 1809
1808 1810 helper for tests:
1809 1811 >>> def test(pattern, *tests):
1810 1812 ... kind, pattern, matcher = _stringmatcher(pattern)
1811 1813 ... return (kind, pattern, [bool(matcher(t)) for t in tests])
1812 1814
1813 1815 exact matching (no prefix):
1814 1816 >>> test('abcdefg', 'abc', 'def', 'abcdefg')
1815 1817 ('literal', 'abcdefg', [False, False, True])
1816 1818
1817 1819 regex matching ('re:' prefix)
1818 1820 >>> test('re:a.+b', 'nomatch', 'fooadef', 'fooadefbar')
1819 1821 ('re', 'a.+b', [False, False, True])
1820 1822
1821 1823 force exact matches ('literal:' prefix)
1822 1824 >>> test('literal:re:foobar', 'foobar', 're:foobar')
1823 1825 ('literal', 're:foobar', [False, True])
1824 1826
1825 1827 unknown prefixes are ignored and treated as literals
1826 1828 >>> test('foo:bar', 'foo', 'bar', 'foo:bar')
1827 1829 ('literal', 'foo:bar', [False, False, True])
1828 1830 """
1829 1831 if pattern.startswith('re:'):
1830 1832 pattern = pattern[3:]
1831 1833 try:
1832 1834 regex = re.compile(pattern)
1833 1835 except re.error, e:
1834 1836 raise error.ParseError(_('invalid regular expression: %s')
1835 1837 % e)
1836 1838 return 're', pattern, regex.search
1837 1839 elif pattern.startswith('literal:'):
1838 1840 pattern = pattern[8:]
1839 1841 return 'literal', pattern, pattern.__eq__
1840 1842
1841 1843 def _substringmatcher(pattern):
1842 1844 kind, pattern, matcher = _stringmatcher(pattern)
1843 1845 if kind == 'literal':
1844 1846 matcher = lambda s: pattern in s
1845 1847 return kind, pattern, matcher
1846 1848
1847 1849 def tag(repo, subset, x):
1848 1850 """``tag([name])``
1849 1851 The specified tag by name, or all tagged revisions if no name is given.
1850 1852
1851 1853 If `name` starts with `re:`, the remainder of the name is treated as
1852 1854 a regular expression. To match a tag that actually starts with `re:`,
1853 1855 use the prefix `literal:`.
1854 1856 """
1855 1857 # i18n: "tag" is a keyword
1856 1858 args = getargs(x, 0, 1, _("tag takes one or no arguments"))
1857 1859 cl = repo.changelog
1858 1860 if args:
1859 1861 pattern = getstring(args[0],
1860 1862 # i18n: "tag" is a keyword
1861 1863 _('the argument to tag must be a string'))
1862 1864 kind, pattern, matcher = _stringmatcher(pattern)
1863 1865 if kind == 'literal':
1864 1866 # avoid resolving all tags
1865 1867 tn = repo._tagscache.tags.get(pattern, None)
1866 1868 if tn is None:
1867 1869 raise error.RepoLookupError(_("tag '%s' does not exist")
1868 1870 % pattern)
1869 1871 s = set([repo[tn].rev()])
1870 1872 else:
1871 1873 s = set([cl.rev(n) for t, n in repo.tagslist() if matcher(t)])
1872 1874 else:
1873 1875 s = set([cl.rev(n) for t, n in repo.tagslist() if t != 'tip'])
1874 1876 return subset & s
1875 1877
1876 1878 def tagged(repo, subset, x):
1877 1879 return tag(repo, subset, x)
1878 1880
1879 1881 def unstable(repo, subset, x):
1880 1882 """``unstable()``
1881 1883 Non-obsolete changesets with obsolete ancestors.
1882 1884 """
1883 1885 # i18n: "unstable" is a keyword
1884 1886 getargs(x, 0, 0, _("unstable takes no arguments"))
1885 1887 unstables = obsmod.getrevs(repo, 'unstable')
1886 1888 return subset & unstables
1887 1889
1888 1890
1889 1891 def user(repo, subset, x):
1890 1892 """``user(string)``
1891 1893 User name contains string. The match is case-insensitive.
1892 1894
1893 1895 If `string` starts with `re:`, the remainder of the string is treated as
1894 1896 a regular expression. To match a user that actually contains `re:`, use
1895 1897 the prefix `literal:`.
1896 1898 """
1897 1899 return author(repo, subset, x)
1898 1900
1899 1901 # experimental
1900 1902 def wdir(repo, subset, x):
1901 1903 # i18n: "wdir" is a keyword
1902 1904 getargs(x, 0, 0, _("wdir takes no arguments"))
1903 1905 if None in subset:
1904 1906 return baseset([None])
1905 1907 return baseset()
1906 1908
1907 1909 # for internal use
1908 1910 def _list(repo, subset, x):
1909 1911 s = getstring(x, "internal error")
1910 1912 if not s:
1911 1913 return baseset()
1912 1914 ls = [repo[r].rev() for r in s.split('\0')]
1913 1915 s = subset
1914 1916 return baseset([r for r in ls if r in s])
1915 1917
1916 1918 # for internal use
1917 1919 def _intlist(repo, subset, x):
1918 1920 s = getstring(x, "internal error")
1919 1921 if not s:
1920 1922 return baseset()
1921 1923 ls = [int(r) for r in s.split('\0')]
1922 1924 s = subset
1923 1925 return baseset([r for r in ls if r in s])
1924 1926
1925 1927 # for internal use
1926 1928 def _hexlist(repo, subset, x):
1927 1929 s = getstring(x, "internal error")
1928 1930 if not s:
1929 1931 return baseset()
1930 1932 cl = repo.changelog
1931 1933 ls = [cl.rev(node.bin(r)) for r in s.split('\0')]
1932 1934 s = subset
1933 1935 return baseset([r for r in ls if r in s])
1934 1936
1935 1937 symbols = {
1936 1938 "adds": adds,
1937 1939 "all": getall,
1938 1940 "ancestor": ancestor,
1939 1941 "ancestors": ancestors,
1940 1942 "_firstancestors": _firstancestors,
1941 1943 "author": author,
1942 1944 "bisect": bisect,
1943 1945 "bisected": bisected,
1944 1946 "bookmark": bookmark,
1945 1947 "branch": branch,
1946 1948 "branchpoint": branchpoint,
1947 1949 "bumped": bumped,
1948 1950 "bundle": bundle,
1949 1951 "children": children,
1950 1952 "closed": closed,
1951 1953 "contains": contains,
1952 1954 "converted": converted,
1953 1955 "date": date,
1954 1956 "desc": desc,
1955 1957 "descendants": descendants,
1956 1958 "_firstdescendants": _firstdescendants,
1957 1959 "destination": destination,
1958 1960 "divergent": divergent,
1959 1961 "draft": draft,
1960 1962 "extinct": extinct,
1961 1963 "extra": extra,
1962 1964 "file": hasfile,
1963 1965 "filelog": filelog,
1964 1966 "first": first,
1965 1967 "follow": follow,
1966 1968 "_followfirst": _followfirst,
1967 1969 "grep": grep,
1968 1970 "head": head,
1969 1971 "heads": heads,
1970 1972 "hidden": hidden,
1971 1973 "id": node_,
1972 1974 "keyword": keyword,
1973 1975 "last": last,
1974 1976 "limit": limit,
1975 1977 "_matchfiles": _matchfiles,
1976 1978 "max": maxrev,
1977 1979 "merge": merge,
1978 1980 "min": minrev,
1979 1981 "modifies": modifies,
1980 1982 "named": named,
1981 1983 "obsolete": obsolete,
1982 1984 "only": only,
1983 1985 "origin": origin,
1984 1986 "outgoing": outgoing,
1985 1987 "p1": p1,
1986 1988 "p2": p2,
1987 1989 "parents": parents,
1988 1990 "present": present,
1989 1991 "public": public,
1990 1992 "remote": remote,
1991 1993 "removes": removes,
1992 1994 "rev": rev,
1993 1995 "reverse": reverse,
1994 1996 "roots": roots,
1995 1997 "sort": sort,
1996 1998 "secret": secret,
1997 1999 "subrepo": subrepo,
1998 2000 "matching": matching,
1999 2001 "tag": tag,
2000 2002 "tagged": tagged,
2001 2003 "user": user,
2002 2004 "unstable": unstable,
2003 2005 "wdir": wdir,
2004 2006 "_list": _list,
2005 2007 "_intlist": _intlist,
2006 2008 "_hexlist": _hexlist,
2007 2009 }
2008 2010
2009 2011 # symbols which can't be used for a DoS attack for any given input
2010 2012 # (e.g. those which accept regexes as plain strings shouldn't be included)
2011 2013 # functions that just return a lot of changesets (like all) don't count here
2012 2014 safesymbols = set([
2013 2015 "adds",
2014 2016 "all",
2015 2017 "ancestor",
2016 2018 "ancestors",
2017 2019 "_firstancestors",
2018 2020 "author",
2019 2021 "bisect",
2020 2022 "bisected",
2021 2023 "bookmark",
2022 2024 "branch",
2023 2025 "branchpoint",
2024 2026 "bumped",
2025 2027 "bundle",
2026 2028 "children",
2027 2029 "closed",
2028 2030 "converted",
2029 2031 "date",
2030 2032 "desc",
2031 2033 "descendants",
2032 2034 "_firstdescendants",
2033 2035 "destination",
2034 2036 "divergent",
2035 2037 "draft",
2036 2038 "extinct",
2037 2039 "extra",
2038 2040 "file",
2039 2041 "filelog",
2040 2042 "first",
2041 2043 "follow",
2042 2044 "_followfirst",
2043 2045 "head",
2044 2046 "heads",
2045 2047 "hidden",
2046 2048 "id",
2047 2049 "keyword",
2048 2050 "last",
2049 2051 "limit",
2050 2052 "_matchfiles",
2051 2053 "max",
2052 2054 "merge",
2053 2055 "min",
2054 2056 "modifies",
2055 2057 "obsolete",
2056 2058 "only",
2057 2059 "origin",
2058 2060 "outgoing",
2059 2061 "p1",
2060 2062 "p2",
2061 2063 "parents",
2062 2064 "present",
2063 2065 "public",
2064 2066 "remote",
2065 2067 "removes",
2066 2068 "rev",
2067 2069 "reverse",
2068 2070 "roots",
2069 2071 "sort",
2070 2072 "secret",
2071 2073 "matching",
2072 2074 "tag",
2073 2075 "tagged",
2074 2076 "user",
2075 2077 "unstable",
2076 2078 "wdir",
2077 2079 "_list",
2078 2080 "_intlist",
2079 2081 "_hexlist",
2080 2082 ])
2081 2083
2082 2084 methods = {
2083 2085 "range": rangeset,
2084 2086 "dagrange": dagrange,
2085 2087 "string": stringset,
2086 2088 "symbol": stringset,
2087 2089 "and": andset,
2088 2090 "or": orset,
2089 2091 "not": notset,
2090 2092 "list": listset,
2091 2093 "func": func,
2092 2094 "ancestor": ancestorspec,
2093 2095 "parent": parentspec,
2094 2096 "parentpost": p1,
2095 2097 "only": only,
2096 2098 "onlypost": only,
2097 2099 }
2098 2100
2099 2101 def optimize(x, small):
2100 2102 if x is None:
2101 2103 return 0, x
2102 2104
2103 2105 smallbonus = 1
2104 2106 if small:
2105 2107 smallbonus = .5
2106 2108
2107 2109 op = x[0]
2108 2110 if op == 'minus':
2109 2111 return optimize(('and', x[1], ('not', x[2])), small)
2110 2112 elif op == 'only':
2111 2113 return optimize(('func', ('symbol', 'only'),
2112 2114 ('list', x[1], x[2])), small)
2113 2115 elif op == 'dagrangepre':
2114 2116 return optimize(('func', ('symbol', 'ancestors'), x[1]), small)
2115 2117 elif op == 'dagrangepost':
2116 2118 return optimize(('func', ('symbol', 'descendants'), x[1]), small)
2117 2119 elif op == 'rangepre':
2118 2120 return optimize(('range', ('string', '0'), x[1]), small)
2119 2121 elif op == 'rangepost':
2120 2122 return optimize(('range', x[1], ('string', 'tip')), small)
2121 2123 elif op == 'negate':
2122 2124 return optimize(('string',
2123 2125 '-' + getstring(x[1], _("can't negate that"))), small)
2124 2126 elif op in 'string symbol negate':
2125 2127 return smallbonus, x # single revisions are small
2126 2128 elif op == 'and':
2127 2129 wa, ta = optimize(x[1], True)
2128 2130 wb, tb = optimize(x[2], True)
2129 2131
2130 2132 # (::x and not ::y)/(not ::y and ::x) have a fast path
2131 2133 def isonly(revs, bases):
2132 2134 return (
2133 2135 revs[0] == 'func'
2134 2136 and getstring(revs[1], _('not a symbol')) == 'ancestors'
2135 2137 and bases[0] == 'not'
2136 2138 and bases[1][0] == 'func'
2137 2139 and getstring(bases[1][1], _('not a symbol')) == 'ancestors')
2138 2140
2139 2141 w = min(wa, wb)
2140 2142 if isonly(ta, tb):
2141 2143 return w, ('func', ('symbol', 'only'), ('list', ta[2], tb[1][2]))
2142 2144 if isonly(tb, ta):
2143 2145 return w, ('func', ('symbol', 'only'), ('list', tb[2], ta[1][2]))
2144 2146
2145 2147 if wa > wb:
2146 2148 return w, (op, tb, ta)
2147 2149 return w, (op, ta, tb)
2148 2150 elif op == 'or':
2149 2151 wa, ta = optimize(x[1], False)
2150 2152 wb, tb = optimize(x[2], False)
2151 2153 if wb < wa:
2152 2154 wb, wa = wa, wb
2153 2155 return max(wa, wb), (op, ta, tb)
2154 2156 elif op == 'not':
2155 2157 o = optimize(x[1], not small)
2156 2158 return o[0], (op, o[1])
2157 2159 elif op == 'parentpost':
2158 2160 o = optimize(x[1], small)
2159 2161 return o[0], (op, o[1])
2160 2162 elif op == 'group':
2161 2163 return optimize(x[1], small)
2162 2164 elif op in 'dagrange range list parent ancestorspec':
2163 2165 if op == 'parent':
2164 2166 # x^:y means (x^) : y, not x ^ (:y)
2165 2167 post = ('parentpost', x[1])
2166 2168 if x[2][0] == 'dagrangepre':
2167 2169 return optimize(('dagrange', post, x[2][1]), small)
2168 2170 elif x[2][0] == 'rangepre':
2169 2171 return optimize(('range', post, x[2][1]), small)
2170 2172
2171 2173 wa, ta = optimize(x[1], small)
2172 2174 wb, tb = optimize(x[2], small)
2173 2175 return wa + wb, (op, ta, tb)
2174 2176 elif op == 'func':
2175 2177 f = getstring(x[1], _("not a symbol"))
2176 2178 wa, ta = optimize(x[2], small)
2177 2179 if f in ("author branch closed date desc file grep keyword "
2178 2180 "outgoing user"):
2179 2181 w = 10 # slow
2180 2182 elif f in "modifies adds removes":
2181 2183 w = 30 # slower
2182 2184 elif f == "contains":
2183 2185 w = 100 # very slow
2184 2186 elif f == "ancestor":
2185 2187 w = 1 * smallbonus
2186 2188 elif f in "reverse limit first _intlist":
2187 2189 w = 0
2188 2190 elif f in "sort":
2189 2191 w = 10 # assume most sorts look at changelog
2190 2192 else:
2191 2193 w = 1
2192 2194 return w + wa, (op, x[1], ta)
2193 2195 return 1, x
2194 2196
2195 2197 _aliasarg = ('func', ('symbol', '_aliasarg'))
2196 2198 def _getaliasarg(tree):
2197 2199 """If tree matches ('func', ('symbol', '_aliasarg'), ('string', X))
2198 2200 return X, None otherwise.
2199 2201 """
2200 2202 if (len(tree) == 3 and tree[:2] == _aliasarg
2201 2203 and tree[2][0] == 'string'):
2202 2204 return tree[2][1]
2203 2205 return None
2204 2206
2205 2207 def _checkaliasarg(tree, known=None):
2206 2208 """Check tree contains no _aliasarg construct or only ones which
2207 2209 value is in known. Used to avoid alias placeholders injection.
2208 2210 """
2209 2211 if isinstance(tree, tuple):
2210 2212 arg = _getaliasarg(tree)
2211 2213 if arg is not None and (not known or arg not in known):
2212 2214 raise error.UnknownIdentifier('_aliasarg', [])
2213 2215 for t in tree:
2214 2216 _checkaliasarg(t, known)
2215 2217
2216 2218 # the set of valid characters for the initial letter of symbols in
2217 2219 # alias declarations and definitions
2218 2220 _aliassyminitletters = set(c for c in [chr(i) for i in xrange(256)]
2219 2221 if c.isalnum() or c in '._@$' or ord(c) > 127)
2220 2222
2221 2223 def _tokenizealias(program, lookup=None):
2222 2224 """Parse alias declaration/definition into a stream of tokens
2223 2225
2224 2226 This allows symbol names to use also ``$`` as an initial letter
2225 2227 (for backward compatibility), and callers of this function should
2226 2228 examine whether ``$`` is used also for unexpected symbols or not.
2227 2229 """
2228 2230 return tokenize(program, lookup=lookup,
2229 2231 syminitletters=_aliassyminitletters)
2230 2232
2231 2233 def _parsealiasdecl(decl):
2232 2234 """Parse alias declaration ``decl``
2233 2235
2234 2236 This returns ``(name, tree, args, errorstr)`` tuple:
2235 2237
2236 2238 - ``name``: of declared alias (may be ``decl`` itself at error)
2237 2239 - ``tree``: parse result (or ``None`` at error)
2238 2240 - ``args``: list of alias argument names (or None for symbol declaration)
2239 2241 - ``errorstr``: detail about detected error (or None)
2240 2242
2241 2243 >>> _parsealiasdecl('foo')
2242 2244 ('foo', ('symbol', 'foo'), None, None)
2243 2245 >>> _parsealiasdecl('$foo')
2244 2246 ('$foo', None, None, "'$' not for alias arguments")
2245 2247 >>> _parsealiasdecl('foo::bar')
2246 2248 ('foo::bar', None, None, 'invalid format')
2247 2249 >>> _parsealiasdecl('foo bar')
2248 2250 ('foo bar', None, None, 'at 4: invalid token')
2249 2251 >>> _parsealiasdecl('foo()')
2250 2252 ('foo', ('func', ('symbol', 'foo')), [], None)
2251 2253 >>> _parsealiasdecl('$foo()')
2252 2254 ('$foo()', None, None, "'$' not for alias arguments")
2253 2255 >>> _parsealiasdecl('foo($1, $2)')
2254 2256 ('foo', ('func', ('symbol', 'foo')), ['$1', '$2'], None)
2255 2257 >>> _parsealiasdecl('foo(bar_bar, baz.baz)')
2256 2258 ('foo', ('func', ('symbol', 'foo')), ['bar_bar', 'baz.baz'], None)
2257 2259 >>> _parsealiasdecl('foo($1, $2, nested($1, $2))')
2258 2260 ('foo($1, $2, nested($1, $2))', None, None, 'invalid argument list')
2259 2261 >>> _parsealiasdecl('foo(bar($1, $2))')
2260 2262 ('foo(bar($1, $2))', None, None, 'invalid argument list')
2261 2263 >>> _parsealiasdecl('foo("string")')
2262 2264 ('foo("string")', None, None, 'invalid argument list')
2263 2265 >>> _parsealiasdecl('foo($1, $2')
2264 2266 ('foo($1, $2', None, None, 'at 10: unexpected token: end')
2265 2267 >>> _parsealiasdecl('foo("string')
2266 2268 ('foo("string', None, None, 'at 5: unterminated string')
2267 2269 >>> _parsealiasdecl('foo($1, $2, $1)')
2268 2270 ('foo', None, None, 'argument names collide with each other')
2269 2271 """
2270 2272 p = parser.parser(_tokenizealias, elements)
2271 2273 try:
2272 2274 tree, pos = p.parse(decl)
2273 2275 if (pos != len(decl)):
2274 2276 raise error.ParseError(_('invalid token'), pos)
2275 2277
2276 2278 if isvalidsymbol(tree):
2277 2279 # "name = ...." style
2278 2280 name = getsymbol(tree)
2279 2281 if name.startswith('$'):
2280 2282 return (decl, None, None, _("'$' not for alias arguments"))
2281 2283 return (name, ('symbol', name), None, None)
2282 2284
2283 2285 if isvalidfunc(tree):
2284 2286 # "name(arg, ....) = ...." style
2285 2287 name = getfuncname(tree)
2286 2288 if name.startswith('$'):
2287 2289 return (decl, None, None, _("'$' not for alias arguments"))
2288 2290 args = []
2289 2291 for arg in getfuncargs(tree):
2290 2292 if not isvalidsymbol(arg):
2291 2293 return (decl, None, None, _("invalid argument list"))
2292 2294 args.append(getsymbol(arg))
2293 2295 if len(args) != len(set(args)):
2294 2296 return (name, None, None,
2295 2297 _("argument names collide with each other"))
2296 2298 return (name, ('func', ('symbol', name)), args, None)
2297 2299
2298 2300 return (decl, None, None, _("invalid format"))
2299 2301 except error.ParseError, inst:
2300 2302 return (decl, None, None, parseerrordetail(inst))
2301 2303
2302 2304 def _parsealiasdefn(defn, args):
2303 2305 """Parse alias definition ``defn``
2304 2306
2305 2307 This function also replaces alias argument references in the
2306 2308 specified definition by ``_aliasarg(ARGNAME)``.
2307 2309
2308 2310 ``args`` is a list of alias argument names, or None if the alias
2309 2311 is declared as a symbol.
2310 2312
2311 2313 This returns "tree" as parsing result.
2312 2314
2313 2315 >>> args = ['$1', '$2', 'foo']
2314 2316 >>> print prettyformat(_parsealiasdefn('$1 or foo', args))
2315 2317 (or
2316 2318 (func
2317 2319 ('symbol', '_aliasarg')
2318 2320 ('string', '$1'))
2319 2321 (func
2320 2322 ('symbol', '_aliasarg')
2321 2323 ('string', 'foo')))
2322 2324 >>> try:
2323 2325 ... _parsealiasdefn('$1 or $bar', args)
2324 2326 ... except error.ParseError, inst:
2325 2327 ... print parseerrordetail(inst)
2326 2328 at 6: '$' not for alias arguments
2327 2329 >>> args = ['$1', '$10', 'foo']
2328 2330 >>> print prettyformat(_parsealiasdefn('$10 or foobar', args))
2329 2331 (or
2330 2332 (func
2331 2333 ('symbol', '_aliasarg')
2332 2334 ('string', '$10'))
2333 2335 ('symbol', 'foobar'))
2334 2336 >>> print prettyformat(_parsealiasdefn('"$1" or "foo"', args))
2335 2337 (or
2336 2338 ('string', '$1')
2337 2339 ('string', 'foo'))
2338 2340 """
2339 2341 def tokenizedefn(program, lookup=None):
2340 2342 if args:
2341 2343 argset = set(args)
2342 2344 else:
2343 2345 argset = set()
2344 2346
2345 2347 for t, value, pos in _tokenizealias(program, lookup=lookup):
2346 2348 if t == 'symbol':
2347 2349 if value in argset:
2348 2350 # emulate tokenization of "_aliasarg('ARGNAME')":
2349 2351 # "_aliasarg()" is an unknown symbol only used separate
2350 2352 # alias argument placeholders from regular strings.
2351 2353 yield ('symbol', '_aliasarg', pos)
2352 2354 yield ('(', None, pos)
2353 2355 yield ('string', value, pos)
2354 2356 yield (')', None, pos)
2355 2357 continue
2356 2358 elif value.startswith('$'):
2357 2359 raise error.ParseError(_("'$' not for alias arguments"),
2358 2360 pos)
2359 2361 yield (t, value, pos)
2360 2362
2361 2363 p = parser.parser(tokenizedefn, elements)
2362 2364 tree, pos = p.parse(defn)
2363 2365 if pos != len(defn):
2364 2366 raise error.ParseError(_('invalid token'), pos)
2365 2367 return tree
2366 2368
2367 2369 class revsetalias(object):
2368 2370 # whether own `error` information is already shown or not.
2369 2371 # this avoids showing same warning multiple times at each `findaliases`.
2370 2372 warned = False
2371 2373
2372 2374 def __init__(self, name, value):
2373 2375 '''Aliases like:
2374 2376
2375 2377 h = heads(default)
2376 2378 b($1) = ancestors($1) - ancestors(default)
2377 2379 '''
2378 2380 self.name, self.tree, self.args, self.error = _parsealiasdecl(name)
2379 2381 if self.error:
2380 2382 self.error = _('failed to parse the declaration of revset alias'
2381 2383 ' "%s": %s') % (self.name, self.error)
2382 2384 return
2383 2385
2384 2386 try:
2385 2387 self.replacement = _parsealiasdefn(value, self.args)
2386 2388 # Check for placeholder injection
2387 2389 _checkaliasarg(self.replacement, self.args)
2388 2390 except error.ParseError, inst:
2389 2391 self.error = _('failed to parse the definition of revset alias'
2390 2392 ' "%s": %s') % (self.name, parseerrordetail(inst))
2391 2393
2392 2394 def _getalias(aliases, tree):
2393 2395 """If tree looks like an unexpanded alias, return it. Return None
2394 2396 otherwise.
2395 2397 """
2396 2398 if isinstance(tree, tuple) and tree:
2397 2399 if tree[0] == 'symbol' and len(tree) == 2:
2398 2400 name = tree[1]
2399 2401 alias = aliases.get(name)
2400 2402 if alias and alias.args is None and alias.tree == tree:
2401 2403 return alias
2402 2404 if tree[0] == 'func' and len(tree) > 1:
2403 2405 if tree[1][0] == 'symbol' and len(tree[1]) == 2:
2404 2406 name = tree[1][1]
2405 2407 alias = aliases.get(name)
2406 2408 if alias and alias.args is not None and alias.tree == tree[:2]:
2407 2409 return alias
2408 2410 return None
2409 2411
2410 2412 def _expandargs(tree, args):
2411 2413 """Replace _aliasarg instances with the substitution value of the
2412 2414 same name in args, recursively.
2413 2415 """
2414 2416 if not tree or not isinstance(tree, tuple):
2415 2417 return tree
2416 2418 arg = _getaliasarg(tree)
2417 2419 if arg is not None:
2418 2420 return args[arg]
2419 2421 return tuple(_expandargs(t, args) for t in tree)
2420 2422
2421 2423 def _expandaliases(aliases, tree, expanding, cache):
2422 2424 """Expand aliases in tree, recursively.
2423 2425
2424 2426 'aliases' is a dictionary mapping user defined aliases to
2425 2427 revsetalias objects.
2426 2428 """
2427 2429 if not isinstance(tree, tuple):
2428 2430 # Do not expand raw strings
2429 2431 return tree
2430 2432 alias = _getalias(aliases, tree)
2431 2433 if alias is not None:
2432 2434 if alias.error:
2433 2435 raise util.Abort(alias.error)
2434 2436 if alias in expanding:
2435 2437 raise error.ParseError(_('infinite expansion of revset alias "%s" '
2436 2438 'detected') % alias.name)
2437 2439 expanding.append(alias)
2438 2440 if alias.name not in cache:
2439 2441 cache[alias.name] = _expandaliases(aliases, alias.replacement,
2440 2442 expanding, cache)
2441 2443 result = cache[alias.name]
2442 2444 expanding.pop()
2443 2445 if alias.args is not None:
2444 2446 l = getlist(tree[2])
2445 2447 if len(l) != len(alias.args):
2446 2448 raise error.ParseError(
2447 2449 _('invalid number of arguments: %s') % len(l))
2448 2450 l = [_expandaliases(aliases, a, [], cache) for a in l]
2449 2451 result = _expandargs(result, dict(zip(alias.args, l)))
2450 2452 else:
2451 2453 result = tuple(_expandaliases(aliases, t, expanding, cache)
2452 2454 for t in tree)
2453 2455 return result
2454 2456
2455 2457 def findaliases(ui, tree, showwarning=None):
2456 2458 _checkaliasarg(tree)
2457 2459 aliases = {}
2458 2460 for k, v in ui.configitems('revsetalias'):
2459 2461 alias = revsetalias(k, v)
2460 2462 aliases[alias.name] = alias
2461 2463 tree = _expandaliases(aliases, tree, [], {})
2462 2464 if showwarning:
2463 2465 # warn about problematic (but not referred) aliases
2464 2466 for name, alias in sorted(aliases.iteritems()):
2465 2467 if alias.error and not alias.warned:
2466 2468 showwarning(_('warning: %s\n') % (alias.error))
2467 2469 alias.warned = True
2468 2470 return tree
2469 2471
2470 2472 def foldconcat(tree):
2471 2473 """Fold elements to be concatenated by `##`
2472 2474 """
2473 2475 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2474 2476 return tree
2475 2477 if tree[0] == '_concat':
2476 2478 pending = [tree]
2477 2479 l = []
2478 2480 while pending:
2479 2481 e = pending.pop()
2480 2482 if e[0] == '_concat':
2481 2483 pending.extend(reversed(e[1:]))
2482 2484 elif e[0] in ('string', 'symbol'):
2483 2485 l.append(e[1])
2484 2486 else:
2485 2487 msg = _("\"##\" can't concatenate \"%s\" element") % (e[0])
2486 2488 raise error.ParseError(msg)
2487 2489 return ('string', ''.join(l))
2488 2490 else:
2489 2491 return tuple(foldconcat(t) for t in tree)
2490 2492
2491 2493 def parse(spec, lookup=None):
2492 2494 p = parser.parser(tokenize, elements)
2493 2495 return p.parse(spec, lookup=lookup)
2494 2496
2495 2497 def posttreebuilthook(tree, repo):
2496 2498 # hook for extensions to execute code on the optimized tree
2497 2499 pass
2498 2500
2499 2501 def match(ui, spec, repo=None):
2500 2502 if not spec:
2501 2503 raise error.ParseError(_("empty query"))
2502 2504 lookup = None
2503 2505 if repo:
2504 2506 lookup = repo.__contains__
2505 2507 tree, pos = parse(spec, lookup)
2506 2508 if (pos != len(spec)):
2507 2509 raise error.ParseError(_("invalid token"), pos)
2508 2510 if ui:
2509 2511 tree = findaliases(ui, tree, showwarning=ui.warn)
2510 2512 tree = foldconcat(tree)
2511 2513 weight, tree = optimize(tree, True)
2512 2514 posttreebuilthook(tree, repo)
2513 2515 def mfunc(repo, subset=None):
2514 2516 if subset is None:
2515 2517 subset = fullreposet(repo)
2516 2518 if util.safehasattr(subset, 'isascending'):
2517 2519 result = getset(repo, subset, tree)
2518 2520 else:
2519 2521 result = getset(repo, baseset(subset), tree)
2520 2522 return result
2521 2523 return mfunc
2522 2524
2523 2525 def formatspec(expr, *args):
2524 2526 '''
2525 2527 This is a convenience function for using revsets internally, and
2526 2528 escapes arguments appropriately. Aliases are intentionally ignored
2527 2529 so that intended expression behavior isn't accidentally subverted.
2528 2530
2529 2531 Supported arguments:
2530 2532
2531 2533 %r = revset expression, parenthesized
2532 2534 %d = int(arg), no quoting
2533 2535 %s = string(arg), escaped and single-quoted
2534 2536 %b = arg.branch(), escaped and single-quoted
2535 2537 %n = hex(arg), single-quoted
2536 2538 %% = a literal '%'
2537 2539
2538 2540 Prefixing the type with 'l' specifies a parenthesized list of that type.
2539 2541
2540 2542 >>> formatspec('%r:: and %lr', '10 or 11', ("this()", "that()"))
2541 2543 '(10 or 11):: and ((this()) or (that()))'
2542 2544 >>> formatspec('%d:: and not %d::', 10, 20)
2543 2545 '10:: and not 20::'
2544 2546 >>> formatspec('%ld or %ld', [], [1])
2545 2547 "_list('') or 1"
2546 2548 >>> formatspec('keyword(%s)', 'foo\\xe9')
2547 2549 "keyword('foo\\\\xe9')"
2548 2550 >>> b = lambda: 'default'
2549 2551 >>> b.branch = b
2550 2552 >>> formatspec('branch(%b)', b)
2551 2553 "branch('default')"
2552 2554 >>> formatspec('root(%ls)', ['a', 'b', 'c', 'd'])
2553 2555 "root(_list('a\\x00b\\x00c\\x00d'))"
2554 2556 '''
2555 2557
2556 2558 def quote(s):
2557 2559 return repr(str(s))
2558 2560
2559 2561 def argtype(c, arg):
2560 2562 if c == 'd':
2561 2563 return str(int(arg))
2562 2564 elif c == 's':
2563 2565 return quote(arg)
2564 2566 elif c == 'r':
2565 2567 parse(arg) # make sure syntax errors are confined
2566 2568 return '(%s)' % arg
2567 2569 elif c == 'n':
2568 2570 return quote(node.hex(arg))
2569 2571 elif c == 'b':
2570 2572 return quote(arg.branch())
2571 2573
2572 2574 def listexp(s, t):
2573 2575 l = len(s)
2574 2576 if l == 0:
2575 2577 return "_list('')"
2576 2578 elif l == 1:
2577 2579 return argtype(t, s[0])
2578 2580 elif t == 'd':
2579 2581 return "_intlist('%s')" % "\0".join(str(int(a)) for a in s)
2580 2582 elif t == 's':
2581 2583 return "_list('%s')" % "\0".join(s)
2582 2584 elif t == 'n':
2583 2585 return "_hexlist('%s')" % "\0".join(node.hex(a) for a in s)
2584 2586 elif t == 'b':
2585 2587 return "_list('%s')" % "\0".join(a.branch() for a in s)
2586 2588
2587 2589 m = l // 2
2588 2590 return '(%s or %s)' % (listexp(s[:m], t), listexp(s[m:], t))
2589 2591
2590 2592 ret = ''
2591 2593 pos = 0
2592 2594 arg = 0
2593 2595 while pos < len(expr):
2594 2596 c = expr[pos]
2595 2597 if c == '%':
2596 2598 pos += 1
2597 2599 d = expr[pos]
2598 2600 if d == '%':
2599 2601 ret += d
2600 2602 elif d in 'dsnbr':
2601 2603 ret += argtype(d, args[arg])
2602 2604 arg += 1
2603 2605 elif d == 'l':
2604 2606 # a list of some type
2605 2607 pos += 1
2606 2608 d = expr[pos]
2607 2609 ret += listexp(list(args[arg]), d)
2608 2610 arg += 1
2609 2611 else:
2610 2612 raise util.Abort('unexpected revspec format character %s' % d)
2611 2613 else:
2612 2614 ret += c
2613 2615 pos += 1
2614 2616
2615 2617 return ret
2616 2618
2617 2619 def prettyformat(tree):
2618 2620 def _prettyformat(tree, level, lines):
2619 2621 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2620 2622 lines.append((level, str(tree)))
2621 2623 else:
2622 2624 lines.append((level, '(%s' % tree[0]))
2623 2625 for s in tree[1:]:
2624 2626 _prettyformat(s, level + 1, lines)
2625 2627 lines[-1:] = [(lines[-1][0], lines[-1][1] + ')')]
2626 2628
2627 2629 lines = []
2628 2630 _prettyformat(tree, 0, lines)
2629 2631 output = '\n'.join((' '*l + s) for l, s in lines)
2630 2632 return output
2631 2633
2632 2634 def depth(tree):
2633 2635 if isinstance(tree, tuple):
2634 2636 return max(map(depth, tree)) + 1
2635 2637 else:
2636 2638 return 0
2637 2639
2638 2640 def funcsused(tree):
2639 2641 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2640 2642 return set()
2641 2643 else:
2642 2644 funcs = set()
2643 2645 for s in tree[1:]:
2644 2646 funcs |= funcsused(s)
2645 2647 if tree[0] == 'func':
2646 2648 funcs.add(tree[1][1])
2647 2649 return funcs
2648 2650
2649 2651 class abstractsmartset(object):
2650 2652
2651 2653 def __nonzero__(self):
2652 2654 """True if the smartset is not empty"""
2653 2655 raise NotImplementedError()
2654 2656
2655 2657 def __contains__(self, rev):
2656 2658 """provide fast membership testing"""
2657 2659 raise NotImplementedError()
2658 2660
2659 2661 def __iter__(self):
2660 2662 """iterate the set in the order it is supposed to be iterated"""
2661 2663 raise NotImplementedError()
2662 2664
2663 2665 # Attributes containing a function to perform a fast iteration in a given
2664 2666 # direction. A smartset can have none, one, or both defined.
2665 2667 #
2666 2668 # Default value is None instead of a function returning None to avoid
2667 2669 # initializing an iterator just for testing if a fast method exists.
2668 2670 fastasc = None
2669 2671 fastdesc = None
2670 2672
2671 2673 def isascending(self):
2672 2674 """True if the set will iterate in ascending order"""
2673 2675 raise NotImplementedError()
2674 2676
2675 2677 def isdescending(self):
2676 2678 """True if the set will iterate in descending order"""
2677 2679 raise NotImplementedError()
2678 2680
2679 2681 def min(self):
2680 2682 """return the minimum element in the set"""
2681 2683 if self.fastasc is not None:
2682 2684 for r in self.fastasc():
2683 2685 return r
2684 2686 raise ValueError('arg is an empty sequence')
2685 2687 return min(self)
2686 2688
2687 2689 def max(self):
2688 2690 """return the maximum element in the set"""
2689 2691 if self.fastdesc is not None:
2690 2692 for r in self.fastdesc():
2691 2693 return r
2692 2694 raise ValueError('arg is an empty sequence')
2693 2695 return max(self)
2694 2696
2695 2697 def first(self):
2696 2698 """return the first element in the set (user iteration perspective)
2697 2699
2698 2700 Return None if the set is empty"""
2699 2701 raise NotImplementedError()
2700 2702
2701 2703 def last(self):
2702 2704 """return the last element in the set (user iteration perspective)
2703 2705
2704 2706 Return None if the set is empty"""
2705 2707 raise NotImplementedError()
2706 2708
2707 2709 def __len__(self):
2708 2710 """return the length of the smartsets
2709 2711
2710 2712 This can be expensive on smartset that could be lazy otherwise."""
2711 2713 raise NotImplementedError()
2712 2714
2713 2715 def reverse(self):
2714 2716 """reverse the expected iteration order"""
2715 2717 raise NotImplementedError()
2716 2718
2717 2719 def sort(self, reverse=True):
2718 2720 """get the set to iterate in an ascending or descending order"""
2719 2721 raise NotImplementedError()
2720 2722
2721 2723 def __and__(self, other):
2722 2724 """Returns a new object with the intersection of the two collections.
2723 2725
2724 2726 This is part of the mandatory API for smartset."""
2725 2727 if isinstance(other, fullreposet):
2726 2728 return self
2727 2729 return self.filter(other.__contains__, cache=False)
2728 2730
2729 2731 def __add__(self, other):
2730 2732 """Returns a new object with the union of the two collections.
2731 2733
2732 2734 This is part of the mandatory API for smartset."""
2733 2735 return addset(self, other)
2734 2736
2735 2737 def __sub__(self, other):
2736 2738 """Returns a new object with the substraction of the two collections.
2737 2739
2738 2740 This is part of the mandatory API for smartset."""
2739 2741 c = other.__contains__
2740 2742 return self.filter(lambda r: not c(r), cache=False)
2741 2743
2742 2744 def filter(self, condition, cache=True):
2743 2745 """Returns this smartset filtered by condition as a new smartset.
2744 2746
2745 2747 `condition` is a callable which takes a revision number and returns a
2746 2748 boolean.
2747 2749
2748 2750 This is part of the mandatory API for smartset."""
2749 2751 # builtin cannot be cached. but do not needs to
2750 2752 if cache and util.safehasattr(condition, 'func_code'):
2751 2753 condition = util.cachefunc(condition)
2752 2754 return filteredset(self, condition)
2753 2755
2754 2756 class baseset(abstractsmartset):
2755 2757 """Basic data structure that represents a revset and contains the basic
2756 2758 operation that it should be able to perform.
2757 2759
2758 2760 Every method in this class should be implemented by any smartset class.
2759 2761 """
2760 2762 def __init__(self, data=()):
2761 2763 if not isinstance(data, list):
2762 2764 data = list(data)
2763 2765 self._list = data
2764 2766 self._ascending = None
2765 2767
2766 2768 @util.propertycache
2767 2769 def _set(self):
2768 2770 return set(self._list)
2769 2771
2770 2772 @util.propertycache
2771 2773 def _asclist(self):
2772 2774 asclist = self._list[:]
2773 2775 asclist.sort()
2774 2776 return asclist
2775 2777
2776 2778 def __iter__(self):
2777 2779 if self._ascending is None:
2778 2780 return iter(self._list)
2779 2781 elif self._ascending:
2780 2782 return iter(self._asclist)
2781 2783 else:
2782 2784 return reversed(self._asclist)
2783 2785
2784 2786 def fastasc(self):
2785 2787 return iter(self._asclist)
2786 2788
2787 2789 def fastdesc(self):
2788 2790 return reversed(self._asclist)
2789 2791
2790 2792 @util.propertycache
2791 2793 def __contains__(self):
2792 2794 return self._set.__contains__
2793 2795
2794 2796 def __nonzero__(self):
2795 2797 return bool(self._list)
2796 2798
2797 2799 def sort(self, reverse=False):
2798 2800 self._ascending = not bool(reverse)
2799 2801
2800 2802 def reverse(self):
2801 2803 if self._ascending is None:
2802 2804 self._list.reverse()
2803 2805 else:
2804 2806 self._ascending = not self._ascending
2805 2807
2806 2808 def __len__(self):
2807 2809 return len(self._list)
2808 2810
2809 2811 def isascending(self):
2810 2812 """Returns True if the collection is ascending order, False if not.
2811 2813
2812 2814 This is part of the mandatory API for smartset."""
2813 2815 if len(self) <= 1:
2814 2816 return True
2815 2817 return self._ascending is not None and self._ascending
2816 2818
2817 2819 def isdescending(self):
2818 2820 """Returns True if the collection is descending order, False if not.
2819 2821
2820 2822 This is part of the mandatory API for smartset."""
2821 2823 if len(self) <= 1:
2822 2824 return True
2823 2825 return self._ascending is not None and not self._ascending
2824 2826
2825 2827 def first(self):
2826 2828 if self:
2827 2829 if self._ascending is None:
2828 2830 return self._list[0]
2829 2831 elif self._ascending:
2830 2832 return self._asclist[0]
2831 2833 else:
2832 2834 return self._asclist[-1]
2833 2835 return None
2834 2836
2835 2837 def last(self):
2836 2838 if self:
2837 2839 if self._ascending is None:
2838 2840 return self._list[-1]
2839 2841 elif self._ascending:
2840 2842 return self._asclist[-1]
2841 2843 else:
2842 2844 return self._asclist[0]
2843 2845 return None
2844 2846
2845 2847 def __repr__(self):
2846 2848 d = {None: '', False: '-', True: '+'}[self._ascending]
2847 2849 return '<%s%s %r>' % (type(self).__name__, d, self._list)
2848 2850
2849 2851 class filteredset(abstractsmartset):
2850 2852 """Duck type for baseset class which iterates lazily over the revisions in
2851 2853 the subset and contains a function which tests for membership in the
2852 2854 revset
2853 2855 """
2854 2856 def __init__(self, subset, condition=lambda x: True):
2855 2857 """
2856 2858 condition: a function that decide whether a revision in the subset
2857 2859 belongs to the revset or not.
2858 2860 """
2859 2861 self._subset = subset
2860 2862 self._condition = condition
2861 2863 self._cache = {}
2862 2864
2863 2865 def __contains__(self, x):
2864 2866 c = self._cache
2865 2867 if x not in c:
2866 2868 v = c[x] = x in self._subset and self._condition(x)
2867 2869 return v
2868 2870 return c[x]
2869 2871
2870 2872 def __iter__(self):
2871 2873 return self._iterfilter(self._subset)
2872 2874
2873 2875 def _iterfilter(self, it):
2874 2876 cond = self._condition
2875 2877 for x in it:
2876 2878 if cond(x):
2877 2879 yield x
2878 2880
2879 2881 @property
2880 2882 def fastasc(self):
2881 2883 it = self._subset.fastasc
2882 2884 if it is None:
2883 2885 return None
2884 2886 return lambda: self._iterfilter(it())
2885 2887
2886 2888 @property
2887 2889 def fastdesc(self):
2888 2890 it = self._subset.fastdesc
2889 2891 if it is None:
2890 2892 return None
2891 2893 return lambda: self._iterfilter(it())
2892 2894
2893 2895 def __nonzero__(self):
2894 2896 for r in self:
2895 2897 return True
2896 2898 return False
2897 2899
2898 2900 def __len__(self):
2899 2901 # Basic implementation to be changed in future patches.
2900 2902 l = baseset([r for r in self])
2901 2903 return len(l)
2902 2904
2903 2905 def sort(self, reverse=False):
2904 2906 self._subset.sort(reverse=reverse)
2905 2907
2906 2908 def reverse(self):
2907 2909 self._subset.reverse()
2908 2910
2909 2911 def isascending(self):
2910 2912 return self._subset.isascending()
2911 2913
2912 2914 def isdescending(self):
2913 2915 return self._subset.isdescending()
2914 2916
2915 2917 def first(self):
2916 2918 for x in self:
2917 2919 return x
2918 2920 return None
2919 2921
2920 2922 def last(self):
2921 2923 it = None
2922 2924 if self._subset.isascending:
2923 2925 it = self.fastdesc
2924 2926 elif self._subset.isdescending:
2925 2927 it = self.fastdesc
2926 2928 if it is None:
2927 2929 # slowly consume everything. This needs improvement
2928 2930 it = lambda: reversed(list(self))
2929 2931 for x in it():
2930 2932 return x
2931 2933 return None
2932 2934
2933 2935 def __repr__(self):
2934 2936 return '<%s %r>' % (type(self).__name__, self._subset)
2935 2937
2936 2938 class addset(abstractsmartset):
2937 2939 """Represent the addition of two sets
2938 2940
2939 2941 Wrapper structure for lazily adding two structures without losing much
2940 2942 performance on the __contains__ method
2941 2943
2942 2944 If the ascending attribute is set, that means the two structures are
2943 2945 ordered in either an ascending or descending way. Therefore, we can add
2944 2946 them maintaining the order by iterating over both at the same time
2945 2947 """
2946 2948 def __init__(self, revs1, revs2, ascending=None):
2947 2949 self._r1 = revs1
2948 2950 self._r2 = revs2
2949 2951 self._iter = None
2950 2952 self._ascending = ascending
2951 2953 self._genlist = None
2952 2954 self._asclist = None
2953 2955
2954 2956 def __len__(self):
2955 2957 return len(self._list)
2956 2958
2957 2959 def __nonzero__(self):
2958 2960 return bool(self._r1) or bool(self._r2)
2959 2961
2960 2962 @util.propertycache
2961 2963 def _list(self):
2962 2964 if not self._genlist:
2963 2965 self._genlist = baseset(self._iterator())
2964 2966 return self._genlist
2965 2967
2966 2968 def _iterator(self):
2967 2969 """Iterate over both collections without repeating elements
2968 2970
2969 2971 If the ascending attribute is not set, iterate over the first one and
2970 2972 then over the second one checking for membership on the first one so we
2971 2973 dont yield any duplicates.
2972 2974
2973 2975 If the ascending attribute is set, iterate over both collections at the
2974 2976 same time, yielding only one value at a time in the given order.
2975 2977 """
2976 2978 if self._ascending is None:
2977 2979 def gen():
2978 2980 for r in self._r1:
2979 2981 yield r
2980 2982 inr1 = self._r1.__contains__
2981 2983 for r in self._r2:
2982 2984 if not inr1(r):
2983 2985 yield r
2984 2986 gen = gen()
2985 2987 else:
2986 2988 iter1 = iter(self._r1)
2987 2989 iter2 = iter(self._r2)
2988 2990 gen = self._iterordered(self._ascending, iter1, iter2)
2989 2991 return gen
2990 2992
2991 2993 def __iter__(self):
2992 2994 if self._ascending is None:
2993 2995 if self._genlist:
2994 2996 return iter(self._genlist)
2995 2997 return iter(self._iterator())
2996 2998 self._trysetasclist()
2997 2999 if self._ascending:
2998 3000 it = self.fastasc
2999 3001 else:
3000 3002 it = self.fastdesc
3001 3003 if it is None:
3002 3004 # consume the gen and try again
3003 3005 self._list
3004 3006 return iter(self)
3005 3007 return it()
3006 3008
3007 3009 def _trysetasclist(self):
3008 3010 """populate the _asclist attribute if possible and necessary"""
3009 3011 if self._genlist is not None and self._asclist is None:
3010 3012 self._asclist = sorted(self._genlist)
3011 3013
3012 3014 @property
3013 3015 def fastasc(self):
3014 3016 self._trysetasclist()
3015 3017 if self._asclist is not None:
3016 3018 return self._asclist.__iter__
3017 3019 iter1 = self._r1.fastasc
3018 3020 iter2 = self._r2.fastasc
3019 3021 if None in (iter1, iter2):
3020 3022 return None
3021 3023 return lambda: self._iterordered(True, iter1(), iter2())
3022 3024
3023 3025 @property
3024 3026 def fastdesc(self):
3025 3027 self._trysetasclist()
3026 3028 if self._asclist is not None:
3027 3029 return self._asclist.__reversed__
3028 3030 iter1 = self._r1.fastdesc
3029 3031 iter2 = self._r2.fastdesc
3030 3032 if None in (iter1, iter2):
3031 3033 return None
3032 3034 return lambda: self._iterordered(False, iter1(), iter2())
3033 3035
3034 3036 def _iterordered(self, ascending, iter1, iter2):
3035 3037 """produce an ordered iteration from two iterators with the same order
3036 3038
3037 3039 The ascending is used to indicated the iteration direction.
3038 3040 """
3039 3041 choice = max
3040 3042 if ascending:
3041 3043 choice = min
3042 3044
3043 3045 val1 = None
3044 3046 val2 = None
3045 3047
3046 3048 choice = max
3047 3049 if ascending:
3048 3050 choice = min
3049 3051 try:
3050 3052 # Consume both iterators in an ordered way until one is
3051 3053 # empty
3052 3054 while True:
3053 3055 if val1 is None:
3054 3056 val1 = iter1.next()
3055 3057 if val2 is None:
3056 3058 val2 = iter2.next()
3057 3059 next = choice(val1, val2)
3058 3060 yield next
3059 3061 if val1 == next:
3060 3062 val1 = None
3061 3063 if val2 == next:
3062 3064 val2 = None
3063 3065 except StopIteration:
3064 3066 # Flush any remaining values and consume the other one
3065 3067 it = iter2
3066 3068 if val1 is not None:
3067 3069 yield val1
3068 3070 it = iter1
3069 3071 elif val2 is not None:
3070 3072 # might have been equality and both are empty
3071 3073 yield val2
3072 3074 for val in it:
3073 3075 yield val
3074 3076
3075 3077 def __contains__(self, x):
3076 3078 return x in self._r1 or x in self._r2
3077 3079
3078 3080 def sort(self, reverse=False):
3079 3081 """Sort the added set
3080 3082
3081 3083 For this we use the cached list with all the generated values and if we
3082 3084 know they are ascending or descending we can sort them in a smart way.
3083 3085 """
3084 3086 self._ascending = not reverse
3085 3087
3086 3088 def isascending(self):
3087 3089 return self._ascending is not None and self._ascending
3088 3090
3089 3091 def isdescending(self):
3090 3092 return self._ascending is not None and not self._ascending
3091 3093
3092 3094 def reverse(self):
3093 3095 if self._ascending is None:
3094 3096 self._list.reverse()
3095 3097 else:
3096 3098 self._ascending = not self._ascending
3097 3099
3098 3100 def first(self):
3099 3101 for x in self:
3100 3102 return x
3101 3103 return None
3102 3104
3103 3105 def last(self):
3104 3106 self.reverse()
3105 3107 val = self.first()
3106 3108 self.reverse()
3107 3109 return val
3108 3110
3109 3111 def __repr__(self):
3110 3112 d = {None: '', False: '-', True: '+'}[self._ascending]
3111 3113 return '<%s%s %r, %r>' % (type(self).__name__, d, self._r1, self._r2)
3112 3114
3113 3115 class generatorset(abstractsmartset):
3114 3116 """Wrap a generator for lazy iteration
3115 3117
3116 3118 Wrapper structure for generators that provides lazy membership and can
3117 3119 be iterated more than once.
3118 3120 When asked for membership it generates values until either it finds the
3119 3121 requested one or has gone through all the elements in the generator
3120 3122 """
3121 3123 def __init__(self, gen, iterasc=None):
3122 3124 """
3123 3125 gen: a generator producing the values for the generatorset.
3124 3126 """
3125 3127 self._gen = gen
3126 3128 self._asclist = None
3127 3129 self._cache = {}
3128 3130 self._genlist = []
3129 3131 self._finished = False
3130 3132 self._ascending = True
3131 3133 if iterasc is not None:
3132 3134 if iterasc:
3133 3135 self.fastasc = self._iterator
3134 3136 self.__contains__ = self._asccontains
3135 3137 else:
3136 3138 self.fastdesc = self._iterator
3137 3139 self.__contains__ = self._desccontains
3138 3140
3139 3141 def __nonzero__(self):
3140 3142 # Do not use 'for r in self' because it will enforce the iteration
3141 3143 # order (default ascending), possibly unrolling a whole descending
3142 3144 # iterator.
3143 3145 if self._genlist:
3144 3146 return True
3145 3147 for r in self._consumegen():
3146 3148 return True
3147 3149 return False
3148 3150
3149 3151 def __contains__(self, x):
3150 3152 if x in self._cache:
3151 3153 return self._cache[x]
3152 3154
3153 3155 # Use new values only, as existing values would be cached.
3154 3156 for l in self._consumegen():
3155 3157 if l == x:
3156 3158 return True
3157 3159
3158 3160 self._cache[x] = False
3159 3161 return False
3160 3162
3161 3163 def _asccontains(self, x):
3162 3164 """version of contains optimised for ascending generator"""
3163 3165 if x in self._cache:
3164 3166 return self._cache[x]
3165 3167
3166 3168 # Use new values only, as existing values would be cached.
3167 3169 for l in self._consumegen():
3168 3170 if l == x:
3169 3171 return True
3170 3172 if l > x:
3171 3173 break
3172 3174
3173 3175 self._cache[x] = False
3174 3176 return False
3175 3177
3176 3178 def _desccontains(self, x):
3177 3179 """version of contains optimised for descending generator"""
3178 3180 if x in self._cache:
3179 3181 return self._cache[x]
3180 3182
3181 3183 # Use new values only, as existing values would be cached.
3182 3184 for l in self._consumegen():
3183 3185 if l == x:
3184 3186 return True
3185 3187 if l < x:
3186 3188 break
3187 3189
3188 3190 self._cache[x] = False
3189 3191 return False
3190 3192
3191 3193 def __iter__(self):
3192 3194 if self._ascending:
3193 3195 it = self.fastasc
3194 3196 else:
3195 3197 it = self.fastdesc
3196 3198 if it is not None:
3197 3199 return it()
3198 3200 # we need to consume the iterator
3199 3201 for x in self._consumegen():
3200 3202 pass
3201 3203 # recall the same code
3202 3204 return iter(self)
3203 3205
3204 3206 def _iterator(self):
3205 3207 if self._finished:
3206 3208 return iter(self._genlist)
3207 3209
3208 3210 # We have to use this complex iteration strategy to allow multiple
3209 3211 # iterations at the same time. We need to be able to catch revision
3210 3212 # removed from _consumegen and added to genlist in another instance.
3211 3213 #
3212 3214 # Getting rid of it would provide an about 15% speed up on this
3213 3215 # iteration.
3214 3216 genlist = self._genlist
3215 3217 nextrev = self._consumegen().next
3216 3218 _len = len # cache global lookup
3217 3219 def gen():
3218 3220 i = 0
3219 3221 while True:
3220 3222 if i < _len(genlist):
3221 3223 yield genlist[i]
3222 3224 else:
3223 3225 yield nextrev()
3224 3226 i += 1
3225 3227 return gen()
3226 3228
3227 3229 def _consumegen(self):
3228 3230 cache = self._cache
3229 3231 genlist = self._genlist.append
3230 3232 for item in self._gen:
3231 3233 cache[item] = True
3232 3234 genlist(item)
3233 3235 yield item
3234 3236 if not self._finished:
3235 3237 self._finished = True
3236 3238 asc = self._genlist[:]
3237 3239 asc.sort()
3238 3240 self._asclist = asc
3239 3241 self.fastasc = asc.__iter__
3240 3242 self.fastdesc = asc.__reversed__
3241 3243
3242 3244 def __len__(self):
3243 3245 for x in self._consumegen():
3244 3246 pass
3245 3247 return len(self._genlist)
3246 3248
3247 3249 def sort(self, reverse=False):
3248 3250 self._ascending = not reverse
3249 3251
3250 3252 def reverse(self):
3251 3253 self._ascending = not self._ascending
3252 3254
3253 3255 def isascending(self):
3254 3256 return self._ascending
3255 3257
3256 3258 def isdescending(self):
3257 3259 return not self._ascending
3258 3260
3259 3261 def first(self):
3260 3262 if self._ascending:
3261 3263 it = self.fastasc
3262 3264 else:
3263 3265 it = self.fastdesc
3264 3266 if it is None:
3265 3267 # we need to consume all and try again
3266 3268 for x in self._consumegen():
3267 3269 pass
3268 3270 return self.first()
3269 3271 if self:
3270 3272 return it().next()
3271 3273 return None
3272 3274
3273 3275 def last(self):
3274 3276 if self._ascending:
3275 3277 it = self.fastdesc
3276 3278 else:
3277 3279 it = self.fastasc
3278 3280 if it is None:
3279 3281 # we need to consume all and try again
3280 3282 for x in self._consumegen():
3281 3283 pass
3282 3284 return self.first()
3283 3285 if self:
3284 3286 return it().next()
3285 3287 return None
3286 3288
3287 3289 def __repr__(self):
3288 3290 d = {False: '-', True: '+'}[self._ascending]
3289 3291 return '<%s%s>' % (type(self).__name__, d)
3290 3292
3291 3293 class spanset(abstractsmartset):
3292 3294 """Duck type for baseset class which represents a range of revisions and
3293 3295 can work lazily and without having all the range in memory
3294 3296
3295 3297 Note that spanset(x, y) behave almost like xrange(x, y) except for two
3296 3298 notable points:
3297 3299 - when x < y it will be automatically descending,
3298 3300 - revision filtered with this repoview will be skipped.
3299 3301
3300 3302 """
3301 3303 def __init__(self, repo, start=0, end=None):
3302 3304 """
3303 3305 start: first revision included the set
3304 3306 (default to 0)
3305 3307 end: first revision excluded (last+1)
3306 3308 (default to len(repo)
3307 3309
3308 3310 Spanset will be descending if `end` < `start`.
3309 3311 """
3310 3312 if end is None:
3311 3313 end = len(repo)
3312 3314 self._ascending = start <= end
3313 3315 if not self._ascending:
3314 3316 start, end = end + 1, start +1
3315 3317 self._start = start
3316 3318 self._end = end
3317 3319 self._hiddenrevs = repo.changelog.filteredrevs
3318 3320
3319 3321 def sort(self, reverse=False):
3320 3322 self._ascending = not reverse
3321 3323
3322 3324 def reverse(self):
3323 3325 self._ascending = not self._ascending
3324 3326
3325 3327 def _iterfilter(self, iterrange):
3326 3328 s = self._hiddenrevs
3327 3329 for r in iterrange:
3328 3330 if r not in s:
3329 3331 yield r
3330 3332
3331 3333 def __iter__(self):
3332 3334 if self._ascending:
3333 3335 return self.fastasc()
3334 3336 else:
3335 3337 return self.fastdesc()
3336 3338
3337 3339 def fastasc(self):
3338 3340 iterrange = xrange(self._start, self._end)
3339 3341 if self._hiddenrevs:
3340 3342 return self._iterfilter(iterrange)
3341 3343 return iter(iterrange)
3342 3344
3343 3345 def fastdesc(self):
3344 3346 iterrange = xrange(self._end - 1, self._start - 1, -1)
3345 3347 if self._hiddenrevs:
3346 3348 return self._iterfilter(iterrange)
3347 3349 return iter(iterrange)
3348 3350
3349 3351 def __contains__(self, rev):
3350 3352 hidden = self._hiddenrevs
3351 3353 return ((self._start <= rev < self._end)
3352 3354 and not (hidden and rev in hidden))
3353 3355
3354 3356 def __nonzero__(self):
3355 3357 for r in self:
3356 3358 return True
3357 3359 return False
3358 3360
3359 3361 def __len__(self):
3360 3362 if not self._hiddenrevs:
3361 3363 return abs(self._end - self._start)
3362 3364 else:
3363 3365 count = 0
3364 3366 start = self._start
3365 3367 end = self._end
3366 3368 for rev in self._hiddenrevs:
3367 3369 if (end < rev <= start) or (start <= rev < end):
3368 3370 count += 1
3369 3371 return abs(self._end - self._start) - count
3370 3372
3371 3373 def isascending(self):
3372 3374 return self._ascending
3373 3375
3374 3376 def isdescending(self):
3375 3377 return not self._ascending
3376 3378
3377 3379 def first(self):
3378 3380 if self._ascending:
3379 3381 it = self.fastasc
3380 3382 else:
3381 3383 it = self.fastdesc
3382 3384 for x in it():
3383 3385 return x
3384 3386 return None
3385 3387
3386 3388 def last(self):
3387 3389 if self._ascending:
3388 3390 it = self.fastdesc
3389 3391 else:
3390 3392 it = self.fastasc
3391 3393 for x in it():
3392 3394 return x
3393 3395 return None
3394 3396
3395 3397 def __repr__(self):
3396 3398 d = {False: '-', True: '+'}[self._ascending]
3397 3399 return '<%s%s %d:%d>' % (type(self).__name__, d,
3398 3400 self._start, self._end - 1)
3399 3401
3400 3402 class fullreposet(spanset):
3401 3403 """a set containing all revisions in the repo
3402 3404
3403 3405 This class exists to host special optimization and magic to handle virtual
3404 3406 revisions such as "null".
3405 3407 """
3406 3408
3407 3409 def __init__(self, repo):
3408 3410 super(fullreposet, self).__init__(repo)
3409 3411
3410 3412 def __contains__(self, rev):
3411 3413 # assumes the given rev is valid
3412 3414 hidden = self._hiddenrevs
3413 3415 return not (hidden and rev in hidden)
3414 3416
3415 3417 def __and__(self, other):
3416 3418 """As self contains the whole repo, all of the other set should also be
3417 3419 in self. Therefore `self & other = other`.
3418 3420
3419 3421 This boldly assumes the other contains valid revs only.
3420 3422 """
3421 3423 # other not a smartset, make is so
3422 3424 if not util.safehasattr(other, 'isascending'):
3423 3425 # filter out hidden revision
3424 3426 # (this boldly assumes all smartset are pure)
3425 3427 #
3426 3428 # `other` was used with "&", let's assume this is a set like
3427 3429 # object.
3428 3430 other = baseset(other - self._hiddenrevs)
3429 3431
3430 3432 other.sort(reverse=self.isdescending())
3431 3433 return other
3432 3434
3433 3435 def prettyformatset(revs):
3434 3436 lines = []
3435 3437 rs = repr(revs)
3436 3438 p = 0
3437 3439 while p < len(rs):
3438 3440 q = rs.find('<', p + 1)
3439 3441 if q < 0:
3440 3442 q = len(rs)
3441 3443 l = rs.count('<', 0, p) - rs.count('>', 0, p)
3442 3444 assert l >= 0
3443 3445 lines.append((l, rs[p:q].rstrip()))
3444 3446 p = q
3445 3447 return '\n'.join(' ' * l + s for l, s in lines)
3446 3448
3447 3449 # tell hggettext to extract docstrings from these functions:
3448 3450 i18nfunctions = symbols.values()
General Comments 0
You need to be logged in to leave comments. Login now