##// END OF EJS Templates
_revancestors: use 'next' to remove the verbose try except clauses...
Pierre-Yves David -
r25143:91c49621 default
parent child Browse files
Show More
@@ -1,3509 +1,3506
1 1 # revset.py - revision set queries for mercurial
2 2 #
3 3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 import re
9 9 import parser, util, error, hbisect, phases
10 10 import node
11 11 import heapq
12 12 import match as matchmod
13 13 from i18n import _
14 14 import encoding
15 15 import obsolete as obsmod
16 16 import pathutil
17 17 import repoview
18 18
19 19 def _revancestors(repo, revs, followfirst):
20 20 """Like revlog.ancestors(), but supports followfirst."""
21 21 if followfirst:
22 22 cut = 1
23 23 else:
24 24 cut = None
25 25 cl = repo.changelog
26 26
27 27 def iterate():
28 28 revs.sort(reverse=True)
29 29 irevs = iter(revs)
30 30 h = []
31 try:
32 inputrev = irevs.next()
31
32 inputrev = next(irevs, None)
33 if inputrev is not None:
33 34 heapq.heappush(h, -inputrev)
34 except StopIteration:
35 return
36 35
37 36 seen = set()
38 37 while h:
39 38 current = -heapq.heappop(h)
40 39 if current == inputrev:
41 try:
42 inputrev = irevs.next()
40 inputrev = next(irevs, None)
41 if inputrev is not None:
43 42 heapq.heappush(h, -inputrev)
44 except StopIteration:
45 pass
46 43 if current not in seen:
47 44 seen.add(current)
48 45 yield current
49 46 for parent in cl.parentrevs(current)[:cut]:
50 47 if parent != node.nullrev:
51 48 heapq.heappush(h, -parent)
52 49
53 50 return generatorset(iterate(), iterasc=False)
54 51
55 52 def _revdescendants(repo, revs, followfirst):
56 53 """Like revlog.descendants() but supports followfirst."""
57 54 if followfirst:
58 55 cut = 1
59 56 else:
60 57 cut = None
61 58
62 59 def iterate():
63 60 cl = repo.changelog
64 61 first = min(revs)
65 62 nullrev = node.nullrev
66 63 if first == nullrev:
67 64 # Are there nodes with a null first parent and a non-null
68 65 # second one? Maybe. Do we care? Probably not.
69 66 for i in cl:
70 67 yield i
71 68 else:
72 69 seen = set(revs)
73 70 for i in cl.revs(first + 1):
74 71 for x in cl.parentrevs(i)[:cut]:
75 72 if x != nullrev and x in seen:
76 73 seen.add(i)
77 74 yield i
78 75 break
79 76
80 77 return generatorset(iterate(), iterasc=True)
81 78
82 79 def _revsbetween(repo, roots, heads):
83 80 """Return all paths between roots and heads, inclusive of both endpoint
84 81 sets."""
85 82 if not roots:
86 83 return baseset()
87 84 parentrevs = repo.changelog.parentrevs
88 85 visit = list(heads)
89 86 reachable = set()
90 87 seen = {}
91 88 minroot = min(roots)
92 89 roots = set(roots)
93 90 # open-code the post-order traversal due to the tiny size of
94 91 # sys.getrecursionlimit()
95 92 while visit:
96 93 rev = visit.pop()
97 94 if rev in roots:
98 95 reachable.add(rev)
99 96 parents = parentrevs(rev)
100 97 seen[rev] = parents
101 98 for parent in parents:
102 99 if parent >= minroot and parent not in seen:
103 100 visit.append(parent)
104 101 if not reachable:
105 102 return baseset()
106 103 for rev in sorted(seen):
107 104 for parent in seen[rev]:
108 105 if parent in reachable:
109 106 reachable.add(rev)
110 107 return baseset(sorted(reachable))
111 108
112 109 elements = {
113 110 "(": (21, ("group", 1, ")"), ("func", 1, ")")),
114 111 "##": (20, None, ("_concat", 20)),
115 112 "~": (18, None, ("ancestor", 18)),
116 113 "^": (18, None, ("parent", 18), ("parentpost", 18)),
117 114 "-": (5, ("negate", 19), ("minus", 5)),
118 115 "::": (17, ("dagrangepre", 17), ("dagrange", 17),
119 116 ("dagrangepost", 17)),
120 117 "..": (17, ("dagrangepre", 17), ("dagrange", 17),
121 118 ("dagrangepost", 17)),
122 119 ":": (15, ("rangepre", 15), ("range", 15), ("rangepost", 15)),
123 120 "not": (10, ("not", 10)),
124 121 "!": (10, ("not", 10)),
125 122 "and": (5, None, ("and", 5)),
126 123 "&": (5, None, ("and", 5)),
127 124 "%": (5, None, ("only", 5), ("onlypost", 5)),
128 125 "or": (4, None, ("or", 4)),
129 126 "|": (4, None, ("or", 4)),
130 127 "+": (4, None, ("or", 4)),
131 128 ",": (2, None, ("list", 2)),
132 129 ")": (0, None, None),
133 130 "symbol": (0, ("symbol",), None),
134 131 "string": (0, ("string",), None),
135 132 "end": (0, None, None),
136 133 }
137 134
138 135 keywords = set(['and', 'or', 'not'])
139 136
140 137 # default set of valid characters for the initial letter of symbols
141 138 _syminitletters = set(c for c in [chr(i) for i in xrange(256)]
142 139 if c.isalnum() or c in '._@' or ord(c) > 127)
143 140
144 141 # default set of valid characters for non-initial letters of symbols
145 142 _symletters = set(c for c in [chr(i) for i in xrange(256)]
146 143 if c.isalnum() or c in '-._/@' or ord(c) > 127)
147 144
148 145 def tokenize(program, lookup=None, syminitletters=None, symletters=None):
149 146 '''
150 147 Parse a revset statement into a stream of tokens
151 148
152 149 ``syminitletters`` is the set of valid characters for the initial
153 150 letter of symbols.
154 151
155 152 By default, character ``c`` is recognized as valid for initial
156 153 letter of symbols, if ``c.isalnum() or c in '._@' or ord(c) > 127``.
157 154
158 155 ``symletters`` is the set of valid characters for non-initial
159 156 letters of symbols.
160 157
161 158 By default, character ``c`` is recognized as valid for non-initial
162 159 letters of symbols, if ``c.isalnum() or c in '-._/@' or ord(c) > 127``.
163 160
164 161 Check that @ is a valid unquoted token character (issue3686):
165 162 >>> list(tokenize("@::"))
166 163 [('symbol', '@', 0), ('::', None, 1), ('end', None, 3)]
167 164
168 165 '''
169 166 if syminitletters is None:
170 167 syminitletters = _syminitletters
171 168 if symletters is None:
172 169 symletters = _symletters
173 170
174 171 pos, l = 0, len(program)
175 172 while pos < l:
176 173 c = program[pos]
177 174 if c.isspace(): # skip inter-token whitespace
178 175 pass
179 176 elif c == ':' and program[pos:pos + 2] == '::': # look ahead carefully
180 177 yield ('::', None, pos)
181 178 pos += 1 # skip ahead
182 179 elif c == '.' and program[pos:pos + 2] == '..': # look ahead carefully
183 180 yield ('..', None, pos)
184 181 pos += 1 # skip ahead
185 182 elif c == '#' and program[pos:pos + 2] == '##': # look ahead carefully
186 183 yield ('##', None, pos)
187 184 pos += 1 # skip ahead
188 185 elif c in "():,-|&+!~^%": # handle simple operators
189 186 yield (c, None, pos)
190 187 elif (c in '"\'' or c == 'r' and
191 188 program[pos:pos + 2] in ("r'", 'r"')): # handle quoted strings
192 189 if c == 'r':
193 190 pos += 1
194 191 c = program[pos]
195 192 decode = lambda x: x
196 193 else:
197 194 decode = lambda x: x.decode('string-escape')
198 195 pos += 1
199 196 s = pos
200 197 while pos < l: # find closing quote
201 198 d = program[pos]
202 199 if d == '\\': # skip over escaped characters
203 200 pos += 2
204 201 continue
205 202 if d == c:
206 203 yield ('string', decode(program[s:pos]), s)
207 204 break
208 205 pos += 1
209 206 else:
210 207 raise error.ParseError(_("unterminated string"), s)
211 208 # gather up a symbol/keyword
212 209 elif c in syminitletters:
213 210 s = pos
214 211 pos += 1
215 212 while pos < l: # find end of symbol
216 213 d = program[pos]
217 214 if d not in symletters:
218 215 break
219 216 if d == '.' and program[pos - 1] == '.': # special case for ..
220 217 pos -= 1
221 218 break
222 219 pos += 1
223 220 sym = program[s:pos]
224 221 if sym in keywords: # operator keywords
225 222 yield (sym, None, s)
226 223 elif '-' in sym:
227 224 # some jerk gave us foo-bar-baz, try to check if it's a symbol
228 225 if lookup and lookup(sym):
229 226 # looks like a real symbol
230 227 yield ('symbol', sym, s)
231 228 else:
232 229 # looks like an expression
233 230 parts = sym.split('-')
234 231 for p in parts[:-1]:
235 232 if p: # possible consecutive -
236 233 yield ('symbol', p, s)
237 234 s += len(p)
238 235 yield ('-', None, pos)
239 236 s += 1
240 237 if parts[-1]: # possible trailing -
241 238 yield ('symbol', parts[-1], s)
242 239 else:
243 240 yield ('symbol', sym, s)
244 241 pos -= 1
245 242 else:
246 243 raise error.ParseError(_("syntax error in revset '%s'") %
247 244 program, pos)
248 245 pos += 1
249 246 yield ('end', None, pos)
250 247
251 248 def parseerrordetail(inst):
252 249 """Compose error message from specified ParseError object
253 250 """
254 251 if len(inst.args) > 1:
255 252 return _('at %s: %s') % (inst.args[1], inst.args[0])
256 253 else:
257 254 return inst.args[0]
258 255
259 256 # helpers
260 257
261 258 def getstring(x, err):
262 259 if x and (x[0] == 'string' or x[0] == 'symbol'):
263 260 return x[1]
264 261 raise error.ParseError(err)
265 262
266 263 def getlist(x):
267 264 if not x:
268 265 return []
269 266 if x[0] == 'list':
270 267 return getlist(x[1]) + [x[2]]
271 268 return [x]
272 269
273 270 def getargs(x, min, max, err):
274 271 l = getlist(x)
275 272 if len(l) < min or (max >= 0 and len(l) > max):
276 273 raise error.ParseError(err)
277 274 return l
278 275
279 276 def isvalidsymbol(tree):
280 277 """Examine whether specified ``tree`` is valid ``symbol`` or not
281 278 """
282 279 return tree[0] == 'symbol' and len(tree) > 1
283 280
284 281 def getsymbol(tree):
285 282 """Get symbol name from valid ``symbol`` in ``tree``
286 283
287 284 This assumes that ``tree`` is already examined by ``isvalidsymbol``.
288 285 """
289 286 return tree[1]
290 287
291 288 def isvalidfunc(tree):
292 289 """Examine whether specified ``tree`` is valid ``func`` or not
293 290 """
294 291 return tree[0] == 'func' and len(tree) > 1 and isvalidsymbol(tree[1])
295 292
296 293 def getfuncname(tree):
297 294 """Get function name from valid ``func`` in ``tree``
298 295
299 296 This assumes that ``tree`` is already examined by ``isvalidfunc``.
300 297 """
301 298 return getsymbol(tree[1])
302 299
303 300 def getfuncargs(tree):
304 301 """Get list of function arguments from valid ``func`` in ``tree``
305 302
306 303 This assumes that ``tree`` is already examined by ``isvalidfunc``.
307 304 """
308 305 if len(tree) > 2:
309 306 return getlist(tree[2])
310 307 else:
311 308 return []
312 309
313 310 def getset(repo, subset, x):
314 311 if not x:
315 312 raise error.ParseError(_("missing argument"))
316 313 s = methods[x[0]](repo, subset, *x[1:])
317 314 if util.safehasattr(s, 'isascending'):
318 315 return s
319 316 return baseset(s)
320 317
321 318 def _getrevsource(repo, r):
322 319 extra = repo[r].extra()
323 320 for label in ('source', 'transplant_source', 'rebase_source'):
324 321 if label in extra:
325 322 try:
326 323 return repo[extra[label]].rev()
327 324 except error.RepoLookupError:
328 325 pass
329 326 return None
330 327
331 328 # operator methods
332 329
333 330 def stringset(repo, subset, x):
334 331 x = repo[x].rev()
335 332 if x in subset:
336 333 return baseset([x])
337 334 return baseset()
338 335
339 336 def rangeset(repo, subset, x, y):
340 337 m = getset(repo, fullreposet(repo), x)
341 338 n = getset(repo, fullreposet(repo), y)
342 339
343 340 if not m or not n:
344 341 return baseset()
345 342 m, n = m.first(), n.last()
346 343
347 344 if m < n:
348 345 r = spanset(repo, m, n + 1)
349 346 else:
350 347 r = spanset(repo, m, n - 1)
351 348 return r & subset
352 349
353 350 def dagrange(repo, subset, x, y):
354 351 r = fullreposet(repo)
355 352 xs = _revsbetween(repo, getset(repo, r, x), getset(repo, r, y))
356 353 return xs & subset
357 354
358 355 def andset(repo, subset, x, y):
359 356 return getset(repo, getset(repo, subset, x), y)
360 357
361 358 def orset(repo, subset, x, y):
362 359 xl = getset(repo, subset, x)
363 360 yl = getset(repo, subset, y)
364 361 return xl + yl
365 362
366 363 def notset(repo, subset, x):
367 364 return subset - getset(repo, subset, x)
368 365
369 366 def listset(repo, subset, a, b):
370 367 raise error.ParseError(_("can't use a list in this context"))
371 368
372 369 def func(repo, subset, a, b):
373 370 if a[0] == 'symbol' and a[1] in symbols:
374 371 return symbols[a[1]](repo, subset, b)
375 372 raise error.UnknownIdentifier(a[1], symbols.keys())
376 373
377 374 # functions
378 375
379 376 def adds(repo, subset, x):
380 377 """``adds(pattern)``
381 378 Changesets that add a file matching pattern.
382 379
383 380 The pattern without explicit kind like ``glob:`` is expected to be
384 381 relative to the current directory and match against a file or a
385 382 directory.
386 383 """
387 384 # i18n: "adds" is a keyword
388 385 pat = getstring(x, _("adds requires a pattern"))
389 386 return checkstatus(repo, subset, pat, 1)
390 387
391 388 def ancestor(repo, subset, x):
392 389 """``ancestor(*changeset)``
393 390 A greatest common ancestor of the changesets.
394 391
395 392 Accepts 0 or more changesets.
396 393 Will return empty list when passed no args.
397 394 Greatest common ancestor of a single changeset is that changeset.
398 395 """
399 396 # i18n: "ancestor" is a keyword
400 397 l = getlist(x)
401 398 rl = fullreposet(repo)
402 399 anc = None
403 400
404 401 # (getset(repo, rl, i) for i in l) generates a list of lists
405 402 for revs in (getset(repo, rl, i) for i in l):
406 403 for r in revs:
407 404 if anc is None:
408 405 anc = repo[r]
409 406 else:
410 407 anc = anc.ancestor(repo[r])
411 408
412 409 if anc is not None and anc.rev() in subset:
413 410 return baseset([anc.rev()])
414 411 return baseset()
415 412
416 413 def _ancestors(repo, subset, x, followfirst=False):
417 414 heads = getset(repo, fullreposet(repo), x)
418 415 if not heads:
419 416 return baseset()
420 417 s = _revancestors(repo, heads, followfirst)
421 418 return subset & s
422 419
423 420 def ancestors(repo, subset, x):
424 421 """``ancestors(set)``
425 422 Changesets that are ancestors of a changeset in set.
426 423 """
427 424 return _ancestors(repo, subset, x)
428 425
429 426 def _firstancestors(repo, subset, x):
430 427 # ``_firstancestors(set)``
431 428 # Like ``ancestors(set)`` but follows only the first parents.
432 429 return _ancestors(repo, subset, x, followfirst=True)
433 430
434 431 def ancestorspec(repo, subset, x, n):
435 432 """``set~n``
436 433 Changesets that are the Nth ancestor (first parents only) of a changeset
437 434 in set.
438 435 """
439 436 try:
440 437 n = int(n[1])
441 438 except (TypeError, ValueError):
442 439 raise error.ParseError(_("~ expects a number"))
443 440 ps = set()
444 441 cl = repo.changelog
445 442 for r in getset(repo, fullreposet(repo), x):
446 443 for i in range(n):
447 444 r = cl.parentrevs(r)[0]
448 445 ps.add(r)
449 446 return subset & ps
450 447
451 448 def author(repo, subset, x):
452 449 """``author(string)``
453 450 Alias for ``user(string)``.
454 451 """
455 452 # i18n: "author" is a keyword
456 453 n = encoding.lower(getstring(x, _("author requires a string")))
457 454 kind, pattern, matcher = _substringmatcher(n)
458 455 return subset.filter(lambda x: matcher(encoding.lower(repo[x].user())))
459 456
460 457 def bisect(repo, subset, x):
461 458 """``bisect(string)``
462 459 Changesets marked in the specified bisect status:
463 460
464 461 - ``good``, ``bad``, ``skip``: csets explicitly marked as good/bad/skip
465 462 - ``goods``, ``bads`` : csets topologically good/bad
466 463 - ``range`` : csets taking part in the bisection
467 464 - ``pruned`` : csets that are goods, bads or skipped
468 465 - ``untested`` : csets whose fate is yet unknown
469 466 - ``ignored`` : csets ignored due to DAG topology
470 467 - ``current`` : the cset currently being bisected
471 468 """
472 469 # i18n: "bisect" is a keyword
473 470 status = getstring(x, _("bisect requires a string")).lower()
474 471 state = set(hbisect.get(repo, status))
475 472 return subset & state
476 473
477 474 # Backward-compatibility
478 475 # - no help entry so that we do not advertise it any more
479 476 def bisected(repo, subset, x):
480 477 return bisect(repo, subset, x)
481 478
482 479 def bookmark(repo, subset, x):
483 480 """``bookmark([name])``
484 481 The named bookmark or all bookmarks.
485 482
486 483 If `name` starts with `re:`, the remainder of the name is treated as
487 484 a regular expression. To match a bookmark that actually starts with `re:`,
488 485 use the prefix `literal:`.
489 486 """
490 487 # i18n: "bookmark" is a keyword
491 488 args = getargs(x, 0, 1, _('bookmark takes one or no arguments'))
492 489 if args:
493 490 bm = getstring(args[0],
494 491 # i18n: "bookmark" is a keyword
495 492 _('the argument to bookmark must be a string'))
496 493 kind, pattern, matcher = _stringmatcher(bm)
497 494 bms = set()
498 495 if kind == 'literal':
499 496 bmrev = repo._bookmarks.get(pattern, None)
500 497 if not bmrev:
501 498 raise error.RepoLookupError(_("bookmark '%s' does not exist")
502 499 % bm)
503 500 bms.add(repo[bmrev].rev())
504 501 else:
505 502 matchrevs = set()
506 503 for name, bmrev in repo._bookmarks.iteritems():
507 504 if matcher(name):
508 505 matchrevs.add(bmrev)
509 506 if not matchrevs:
510 507 raise error.RepoLookupError(_("no bookmarks exist"
511 508 " that match '%s'") % pattern)
512 509 for bmrev in matchrevs:
513 510 bms.add(repo[bmrev].rev())
514 511 else:
515 512 bms = set([repo[r].rev()
516 513 for r in repo._bookmarks.values()])
517 514 bms -= set([node.nullrev])
518 515 return subset & bms
519 516
520 517 def branch(repo, subset, x):
521 518 """``branch(string or set)``
522 519 All changesets belonging to the given branch or the branches of the given
523 520 changesets.
524 521
525 522 If `string` starts with `re:`, the remainder of the name is treated as
526 523 a regular expression. To match a branch that actually starts with `re:`,
527 524 use the prefix `literal:`.
528 525 """
529 526 getbi = repo.revbranchcache().branchinfo
530 527
531 528 try:
532 529 b = getstring(x, '')
533 530 except error.ParseError:
534 531 # not a string, but another revspec, e.g. tip()
535 532 pass
536 533 else:
537 534 kind, pattern, matcher = _stringmatcher(b)
538 535 if kind == 'literal':
539 536 # note: falls through to the revspec case if no branch with
540 537 # this name exists
541 538 if pattern in repo.branchmap():
542 539 return subset.filter(lambda r: matcher(getbi(r)[0]))
543 540 else:
544 541 return subset.filter(lambda r: matcher(getbi(r)[0]))
545 542
546 543 s = getset(repo, fullreposet(repo), x)
547 544 b = set()
548 545 for r in s:
549 546 b.add(getbi(r)[0])
550 547 c = s.__contains__
551 548 return subset.filter(lambda r: c(r) or getbi(r)[0] in b)
552 549
553 550 def bumped(repo, subset, x):
554 551 """``bumped()``
555 552 Mutable changesets marked as successors of public changesets.
556 553
557 554 Only non-public and non-obsolete changesets can be `bumped`.
558 555 """
559 556 # i18n: "bumped" is a keyword
560 557 getargs(x, 0, 0, _("bumped takes no arguments"))
561 558 bumped = obsmod.getrevs(repo, 'bumped')
562 559 return subset & bumped
563 560
564 561 def bundle(repo, subset, x):
565 562 """``bundle()``
566 563 Changesets in the bundle.
567 564
568 565 Bundle must be specified by the -R option."""
569 566
570 567 try:
571 568 bundlerevs = repo.changelog.bundlerevs
572 569 except AttributeError:
573 570 raise util.Abort(_("no bundle provided - specify with -R"))
574 571 return subset & bundlerevs
575 572
576 573 def checkstatus(repo, subset, pat, field):
577 574 hasset = matchmod.patkind(pat) == 'set'
578 575
579 576 mcache = [None]
580 577 def matches(x):
581 578 c = repo[x]
582 579 if not mcache[0] or hasset:
583 580 mcache[0] = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
584 581 m = mcache[0]
585 582 fname = None
586 583 if not m.anypats() and len(m.files()) == 1:
587 584 fname = m.files()[0]
588 585 if fname is not None:
589 586 if fname not in c.files():
590 587 return False
591 588 else:
592 589 for f in c.files():
593 590 if m(f):
594 591 break
595 592 else:
596 593 return False
597 594 files = repo.status(c.p1().node(), c.node())[field]
598 595 if fname is not None:
599 596 if fname in files:
600 597 return True
601 598 else:
602 599 for f in files:
603 600 if m(f):
604 601 return True
605 602
606 603 return subset.filter(matches)
607 604
608 605 def _children(repo, narrow, parentset):
609 606 cs = set()
610 607 if not parentset:
611 608 return baseset(cs)
612 609 pr = repo.changelog.parentrevs
613 610 minrev = min(parentset)
614 611 for r in narrow:
615 612 if r <= minrev:
616 613 continue
617 614 for p in pr(r):
618 615 if p in parentset:
619 616 cs.add(r)
620 617 return baseset(cs)
621 618
622 619 def children(repo, subset, x):
623 620 """``children(set)``
624 621 Child changesets of changesets in set.
625 622 """
626 623 s = getset(repo, fullreposet(repo), x)
627 624 cs = _children(repo, subset, s)
628 625 return subset & cs
629 626
630 627 def closed(repo, subset, x):
631 628 """``closed()``
632 629 Changeset is closed.
633 630 """
634 631 # i18n: "closed" is a keyword
635 632 getargs(x, 0, 0, _("closed takes no arguments"))
636 633 return subset.filter(lambda r: repo[r].closesbranch())
637 634
638 635 def contains(repo, subset, x):
639 636 """``contains(pattern)``
640 637 The revision's manifest contains a file matching pattern (but might not
641 638 modify it). See :hg:`help patterns` for information about file patterns.
642 639
643 640 The pattern without explicit kind like ``glob:`` is expected to be
644 641 relative to the current directory and match against a file exactly
645 642 for efficiency.
646 643 """
647 644 # i18n: "contains" is a keyword
648 645 pat = getstring(x, _("contains requires a pattern"))
649 646
650 647 def matches(x):
651 648 if not matchmod.patkind(pat):
652 649 pats = pathutil.canonpath(repo.root, repo.getcwd(), pat)
653 650 if pats in repo[x]:
654 651 return True
655 652 else:
656 653 c = repo[x]
657 654 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
658 655 for f in c.manifest():
659 656 if m(f):
660 657 return True
661 658 return False
662 659
663 660 return subset.filter(matches)
664 661
665 662 def converted(repo, subset, x):
666 663 """``converted([id])``
667 664 Changesets converted from the given identifier in the old repository if
668 665 present, or all converted changesets if no identifier is specified.
669 666 """
670 667
671 668 # There is exactly no chance of resolving the revision, so do a simple
672 669 # string compare and hope for the best
673 670
674 671 rev = None
675 672 # i18n: "converted" is a keyword
676 673 l = getargs(x, 0, 1, _('converted takes one or no arguments'))
677 674 if l:
678 675 # i18n: "converted" is a keyword
679 676 rev = getstring(l[0], _('converted requires a revision'))
680 677
681 678 def _matchvalue(r):
682 679 source = repo[r].extra().get('convert_revision', None)
683 680 return source is not None and (rev is None or source.startswith(rev))
684 681
685 682 return subset.filter(lambda r: _matchvalue(r))
686 683
687 684 def date(repo, subset, x):
688 685 """``date(interval)``
689 686 Changesets within the interval, see :hg:`help dates`.
690 687 """
691 688 # i18n: "date" is a keyword
692 689 ds = getstring(x, _("date requires a string"))
693 690 dm = util.matchdate(ds)
694 691 return subset.filter(lambda x: dm(repo[x].date()[0]))
695 692
696 693 def desc(repo, subset, x):
697 694 """``desc(string)``
698 695 Search commit message for string. The match is case-insensitive.
699 696 """
700 697 # i18n: "desc" is a keyword
701 698 ds = encoding.lower(getstring(x, _("desc requires a string")))
702 699
703 700 def matches(x):
704 701 c = repo[x]
705 702 return ds in encoding.lower(c.description())
706 703
707 704 return subset.filter(matches)
708 705
709 706 def _descendants(repo, subset, x, followfirst=False):
710 707 roots = getset(repo, fullreposet(repo), x)
711 708 if not roots:
712 709 return baseset()
713 710 s = _revdescendants(repo, roots, followfirst)
714 711
715 712 # Both sets need to be ascending in order to lazily return the union
716 713 # in the correct order.
717 714 base = subset & roots
718 715 desc = subset & s
719 716 result = base + desc
720 717 if subset.isascending():
721 718 result.sort()
722 719 elif subset.isdescending():
723 720 result.sort(reverse=True)
724 721 else:
725 722 result = subset & result
726 723 return result
727 724
728 725 def descendants(repo, subset, x):
729 726 """``descendants(set)``
730 727 Changesets which are descendants of changesets in set.
731 728 """
732 729 return _descendants(repo, subset, x)
733 730
734 731 def _firstdescendants(repo, subset, x):
735 732 # ``_firstdescendants(set)``
736 733 # Like ``descendants(set)`` but follows only the first parents.
737 734 return _descendants(repo, subset, x, followfirst=True)
738 735
739 736 def destination(repo, subset, x):
740 737 """``destination([set])``
741 738 Changesets that were created by a graft, transplant or rebase operation,
742 739 with the given revisions specified as the source. Omitting the optional set
743 740 is the same as passing all().
744 741 """
745 742 if x is not None:
746 743 sources = getset(repo, fullreposet(repo), x)
747 744 else:
748 745 sources = fullreposet(repo)
749 746
750 747 dests = set()
751 748
752 749 # subset contains all of the possible destinations that can be returned, so
753 750 # iterate over them and see if their source(s) were provided in the arg set.
754 751 # Even if the immediate src of r is not in the arg set, src's source (or
755 752 # further back) may be. Scanning back further than the immediate src allows
756 753 # transitive transplants and rebases to yield the same results as transitive
757 754 # grafts.
758 755 for r in subset:
759 756 src = _getrevsource(repo, r)
760 757 lineage = None
761 758
762 759 while src is not None:
763 760 if lineage is None:
764 761 lineage = list()
765 762
766 763 lineage.append(r)
767 764
768 765 # The visited lineage is a match if the current source is in the arg
769 766 # set. Since every candidate dest is visited by way of iterating
770 767 # subset, any dests further back in the lineage will be tested by a
771 768 # different iteration over subset. Likewise, if the src was already
772 769 # selected, the current lineage can be selected without going back
773 770 # further.
774 771 if src in sources or src in dests:
775 772 dests.update(lineage)
776 773 break
777 774
778 775 r = src
779 776 src = _getrevsource(repo, r)
780 777
781 778 return subset.filter(dests.__contains__)
782 779
783 780 def divergent(repo, subset, x):
784 781 """``divergent()``
785 782 Final successors of changesets with an alternative set of final successors.
786 783 """
787 784 # i18n: "divergent" is a keyword
788 785 getargs(x, 0, 0, _("divergent takes no arguments"))
789 786 divergent = obsmod.getrevs(repo, 'divergent')
790 787 return subset & divergent
791 788
792 789 def draft(repo, subset, x):
793 790 """``draft()``
794 791 Changeset in draft phase."""
795 792 # i18n: "draft" is a keyword
796 793 getargs(x, 0, 0, _("draft takes no arguments"))
797 794 phase = repo._phasecache.phase
798 795 target = phases.draft
799 796 condition = lambda r: phase(repo, r) == target
800 797 return subset.filter(condition, cache=False)
801 798
802 799 def extinct(repo, subset, x):
803 800 """``extinct()``
804 801 Obsolete changesets with obsolete descendants only.
805 802 """
806 803 # i18n: "extinct" is a keyword
807 804 getargs(x, 0, 0, _("extinct takes no arguments"))
808 805 extincts = obsmod.getrevs(repo, 'extinct')
809 806 return subset & extincts
810 807
811 808 def extra(repo, subset, x):
812 809 """``extra(label, [value])``
813 810 Changesets with the given label in the extra metadata, with the given
814 811 optional value.
815 812
816 813 If `value` starts with `re:`, the remainder of the value is treated as
817 814 a regular expression. To match a value that actually starts with `re:`,
818 815 use the prefix `literal:`.
819 816 """
820 817
821 818 # i18n: "extra" is a keyword
822 819 l = getargs(x, 1, 2, _('extra takes at least 1 and at most 2 arguments'))
823 820 # i18n: "extra" is a keyword
824 821 label = getstring(l[0], _('first argument to extra must be a string'))
825 822 value = None
826 823
827 824 if len(l) > 1:
828 825 # i18n: "extra" is a keyword
829 826 value = getstring(l[1], _('second argument to extra must be a string'))
830 827 kind, value, matcher = _stringmatcher(value)
831 828
832 829 def _matchvalue(r):
833 830 extra = repo[r].extra()
834 831 return label in extra and (value is None or matcher(extra[label]))
835 832
836 833 return subset.filter(lambda r: _matchvalue(r))
837 834
838 835 def filelog(repo, subset, x):
839 836 """``filelog(pattern)``
840 837 Changesets connected to the specified filelog.
841 838
842 839 For performance reasons, visits only revisions mentioned in the file-level
843 840 filelog, rather than filtering through all changesets (much faster, but
844 841 doesn't include deletes or duplicate changes). For a slower, more accurate
845 842 result, use ``file()``.
846 843
847 844 The pattern without explicit kind like ``glob:`` is expected to be
848 845 relative to the current directory and match against a file exactly
849 846 for efficiency.
850 847
851 848 If some linkrev points to revisions filtered by the current repoview, we'll
852 849 work around it to return a non-filtered value.
853 850 """
854 851
855 852 # i18n: "filelog" is a keyword
856 853 pat = getstring(x, _("filelog requires a pattern"))
857 854 s = set()
858 855 cl = repo.changelog
859 856
860 857 if not matchmod.patkind(pat):
861 858 f = pathutil.canonpath(repo.root, repo.getcwd(), pat)
862 859 files = [f]
863 860 else:
864 861 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=repo[None])
865 862 files = (f for f in repo[None] if m(f))
866 863
867 864 for f in files:
868 865 backrevref = {} # final value for: filerev -> changerev
869 866 lowestchild = {} # lowest known filerev child of a filerev
870 867 delayed = [] # filerev with filtered linkrev, for post-processing
871 868 lowesthead = None # cache for manifest content of all head revisions
872 869 fl = repo.file(f)
873 870 for fr in list(fl):
874 871 rev = fl.linkrev(fr)
875 872 if rev not in cl:
876 873 # changerev pointed in linkrev is filtered
877 874 # record it for post processing.
878 875 delayed.append((fr, rev))
879 876 continue
880 877 for p in fl.parentrevs(fr):
881 878 if 0 <= p and p not in lowestchild:
882 879 lowestchild[p] = fr
883 880 backrevref[fr] = rev
884 881 s.add(rev)
885 882
886 883 # Post-processing of all filerevs we skipped because they were
887 884 # filtered. If such filerevs have known and unfiltered children, this
888 885 # means they have an unfiltered appearance out there. We'll use linkrev
889 886 # adjustment to find one of these appearances. The lowest known child
890 887 # will be used as a starting point because it is the best upper-bound we
891 888 # have.
892 889 #
893 890 # This approach will fail when an unfiltered but linkrev-shadowed
894 891 # appearance exists in a head changeset without unfiltered filerev
895 892 # children anywhere.
896 893 while delayed:
897 894 # must be a descending iteration. To slowly fill lowest child
898 895 # information that is of potential use by the next item.
899 896 fr, rev = delayed.pop()
900 897 lkr = rev
901 898
902 899 child = lowestchild.get(fr)
903 900
904 901 if child is None:
905 902 # search for existence of this file revision in a head revision.
906 903 # There are three possibilities:
907 904 # - the revision exists in a head and we can find an
908 905 # introduction from there,
909 906 # - the revision does not exist in a head because it has been
910 907 # changed since its introduction: we would have found a child
911 908 # and be in the other 'else' clause,
912 909 # - all versions of the revision are hidden.
913 910 if lowesthead is None:
914 911 lowesthead = {}
915 912 for h in repo.heads():
916 913 fnode = repo[h].manifest().get(f)
917 914 if fnode is not None:
918 915 lowesthead[fl.rev(fnode)] = h
919 916 headrev = lowesthead.get(fr)
920 917 if headrev is None:
921 918 # content is nowhere unfiltered
922 919 continue
923 920 rev = repo[headrev][f].introrev()
924 921 else:
925 922 # the lowest known child is a good upper bound
926 923 childcrev = backrevref[child]
927 924 # XXX this does not guarantee returning the lowest
928 925 # introduction of this revision, but this gives a
929 926 # result which is a good start and will fit in most
930 927 # cases. We probably need to fix the multiple
931 928 # introductions case properly (report each
932 929 # introduction, even for identical file revisions)
933 930 # once and for all at some point anyway.
934 931 for p in repo[childcrev][f].parents():
935 932 if p.filerev() == fr:
936 933 rev = p.rev()
937 934 break
938 935 if rev == lkr: # no shadowed entry found
939 936 # XXX This should never happen unless some manifest points
940 937 # to biggish file revisions (like a revision that uses a
941 938 # parent that never appears in the manifest ancestors)
942 939 continue
943 940
944 941 # Fill the data for the next iteration.
945 942 for p in fl.parentrevs(fr):
946 943 if 0 <= p and p not in lowestchild:
947 944 lowestchild[p] = fr
948 945 backrevref[fr] = rev
949 946 s.add(rev)
950 947
951 948 return subset & s
952 949
953 950 def first(repo, subset, x):
954 951 """``first(set, [n])``
955 952 An alias for limit().
956 953 """
957 954 return limit(repo, subset, x)
958 955
959 956 def _follow(repo, subset, x, name, followfirst=False):
960 957 l = getargs(x, 0, 1, _("%s takes no arguments or a filename") % name)
961 958 c = repo['.']
962 959 if l:
963 960 x = getstring(l[0], _("%s expected a filename") % name)
964 961 if x in c:
965 962 cx = c[x]
966 963 s = set(ctx.rev() for ctx in cx.ancestors(followfirst=followfirst))
967 964 # include the revision responsible for the most recent version
968 965 s.add(cx.introrev())
969 966 else:
970 967 return baseset()
971 968 else:
972 969 s = _revancestors(repo, baseset([c.rev()]), followfirst)
973 970
974 971 return subset & s
975 972
976 973 def follow(repo, subset, x):
977 974 """``follow([file])``
978 975 An alias for ``::.`` (ancestors of the working directory's first parent).
979 976 If a filename is specified, the history of the given file is followed,
980 977 including copies.
981 978 """
982 979 return _follow(repo, subset, x, 'follow')
983 980
984 981 def _followfirst(repo, subset, x):
985 982 # ``followfirst([file])``
986 983 # Like ``follow([file])`` but follows only the first parent of
987 984 # every revision or file revision.
988 985 return _follow(repo, subset, x, '_followfirst', followfirst=True)
989 986
990 987 def getall(repo, subset, x):
991 988 """``all()``
992 989 All changesets, the same as ``0:tip``.
993 990 """
994 991 # i18n: "all" is a keyword
995 992 getargs(x, 0, 0, _("all takes no arguments"))
996 993 return subset & spanset(repo) # drop "null" if any
997 994
998 995 def grep(repo, subset, x):
999 996 """``grep(regex)``
1000 997 Like ``keyword(string)`` but accepts a regex. Use ``grep(r'...')``
1001 998 to ensure special escape characters are handled correctly. Unlike
1002 999 ``keyword(string)``, the match is case-sensitive.
1003 1000 """
1004 1001 try:
1005 1002 # i18n: "grep" is a keyword
1006 1003 gr = re.compile(getstring(x, _("grep requires a string")))
1007 1004 except re.error, e:
1008 1005 raise error.ParseError(_('invalid match pattern: %s') % e)
1009 1006
1010 1007 def matches(x):
1011 1008 c = repo[x]
1012 1009 for e in c.files() + [c.user(), c.description()]:
1013 1010 if gr.search(e):
1014 1011 return True
1015 1012 return False
1016 1013
1017 1014 return subset.filter(matches)
1018 1015
1019 1016 def _matchfiles(repo, subset, x):
1020 1017 # _matchfiles takes a revset list of prefixed arguments:
1021 1018 #
1022 1019 # [p:foo, i:bar, x:baz]
1023 1020 #
1024 1021 # builds a match object from them and filters subset. Allowed
1025 1022 # prefixes are 'p:' for regular patterns, 'i:' for include
1026 1023 # patterns and 'x:' for exclude patterns. Use 'r:' prefix to pass
1027 1024 # a revision identifier, or the empty string to reference the
1028 1025 # working directory, from which the match object is
1029 1026 # initialized. Use 'd:' to set the default matching mode, default
1030 1027 # to 'glob'. At most one 'r:' and 'd:' argument can be passed.
1031 1028
1032 1029 # i18n: "_matchfiles" is a keyword
1033 1030 l = getargs(x, 1, -1, _("_matchfiles requires at least one argument"))
1034 1031 pats, inc, exc = [], [], []
1035 1032 rev, default = None, None
1036 1033 for arg in l:
1037 1034 # i18n: "_matchfiles" is a keyword
1038 1035 s = getstring(arg, _("_matchfiles requires string arguments"))
1039 1036 prefix, value = s[:2], s[2:]
1040 1037 if prefix == 'p:':
1041 1038 pats.append(value)
1042 1039 elif prefix == 'i:':
1043 1040 inc.append(value)
1044 1041 elif prefix == 'x:':
1045 1042 exc.append(value)
1046 1043 elif prefix == 'r:':
1047 1044 if rev is not None:
1048 1045 # i18n: "_matchfiles" is a keyword
1049 1046 raise error.ParseError(_('_matchfiles expected at most one '
1050 1047 'revision'))
1051 1048 if value != '': # empty means working directory; leave rev as None
1052 1049 rev = value
1053 1050 elif prefix == 'd:':
1054 1051 if default is not None:
1055 1052 # i18n: "_matchfiles" is a keyword
1056 1053 raise error.ParseError(_('_matchfiles expected at most one '
1057 1054 'default mode'))
1058 1055 default = value
1059 1056 else:
1060 1057 # i18n: "_matchfiles" is a keyword
1061 1058 raise error.ParseError(_('invalid _matchfiles prefix: %s') % prefix)
1062 1059 if not default:
1063 1060 default = 'glob'
1064 1061
1065 1062 m = matchmod.match(repo.root, repo.getcwd(), pats, include=inc,
1066 1063 exclude=exc, ctx=repo[rev], default=default)
1067 1064
1068 1065 def matches(x):
1069 1066 for f in repo[x].files():
1070 1067 if m(f):
1071 1068 return True
1072 1069 return False
1073 1070
1074 1071 return subset.filter(matches)
1075 1072
1076 1073 def hasfile(repo, subset, x):
1077 1074 """``file(pattern)``
1078 1075 Changesets affecting files matched by pattern.
1079 1076
1080 1077 For a faster but less accurate result, consider using ``filelog()``
1081 1078 instead.
1082 1079
1083 1080 This predicate uses ``glob:`` as the default kind of pattern.
1084 1081 """
1085 1082 # i18n: "file" is a keyword
1086 1083 pat = getstring(x, _("file requires a pattern"))
1087 1084 return _matchfiles(repo, subset, ('string', 'p:' + pat))
1088 1085
1089 1086 def head(repo, subset, x):
1090 1087 """``head()``
1091 1088 Changeset is a named branch head.
1092 1089 """
1093 1090 # i18n: "head" is a keyword
1094 1091 getargs(x, 0, 0, _("head takes no arguments"))
1095 1092 hs = set()
1096 1093 for b, ls in repo.branchmap().iteritems():
1097 1094 hs.update(repo[h].rev() for h in ls)
1098 1095 return baseset(hs).filter(subset.__contains__)
1099 1096
1100 1097 def heads(repo, subset, x):
1101 1098 """``heads(set)``
1102 1099 Members of set with no children in set.
1103 1100 """
1104 1101 s = getset(repo, subset, x)
1105 1102 ps = parents(repo, subset, x)
1106 1103 return s - ps
1107 1104
1108 1105 def hidden(repo, subset, x):
1109 1106 """``hidden()``
1110 1107 Hidden changesets.
1111 1108 """
1112 1109 # i18n: "hidden" is a keyword
1113 1110 getargs(x, 0, 0, _("hidden takes no arguments"))
1114 1111 hiddenrevs = repoview.filterrevs(repo, 'visible')
1115 1112 return subset & hiddenrevs
1116 1113
1117 1114 def keyword(repo, subset, x):
1118 1115 """``keyword(string)``
1119 1116 Search commit message, user name, and names of changed files for
1120 1117 string. The match is case-insensitive.
1121 1118 """
1122 1119 # i18n: "keyword" is a keyword
1123 1120 kw = encoding.lower(getstring(x, _("keyword requires a string")))
1124 1121
1125 1122 def matches(r):
1126 1123 c = repo[r]
1127 1124 return util.any(kw in encoding.lower(t) for t in c.files() + [c.user(),
1128 1125 c.description()])
1129 1126
1130 1127 return subset.filter(matches)
1131 1128
1132 1129 def limit(repo, subset, x):
1133 1130 """``limit(set, [n])``
1134 1131 First n members of set, defaulting to 1.
1135 1132 """
1136 1133 # i18n: "limit" is a keyword
1137 1134 l = getargs(x, 1, 2, _("limit requires one or two arguments"))
1138 1135 try:
1139 1136 lim = 1
1140 1137 if len(l) == 2:
1141 1138 # i18n: "limit" is a keyword
1142 1139 lim = int(getstring(l[1], _("limit requires a number")))
1143 1140 except (TypeError, ValueError):
1144 1141 # i18n: "limit" is a keyword
1145 1142 raise error.ParseError(_("limit expects a number"))
1146 1143 ss = subset
1147 1144 os = getset(repo, fullreposet(repo), l[0])
1148 1145 result = []
1149 1146 it = iter(os)
1150 1147 for x in xrange(lim):
1151 1148 try:
1152 1149 y = it.next()
1153 1150 if y in ss:
1154 1151 result.append(y)
1155 1152 except (StopIteration):
1156 1153 break
1157 1154 return baseset(result)
1158 1155
1159 1156 def last(repo, subset, x):
1160 1157 """``last(set, [n])``
1161 1158 Last n members of set, defaulting to 1.
1162 1159 """
1163 1160 # i18n: "last" is a keyword
1164 1161 l = getargs(x, 1, 2, _("last requires one or two arguments"))
1165 1162 try:
1166 1163 lim = 1
1167 1164 if len(l) == 2:
1168 1165 # i18n: "last" is a keyword
1169 1166 lim = int(getstring(l[1], _("last requires a number")))
1170 1167 except (TypeError, ValueError):
1171 1168 # i18n: "last" is a keyword
1172 1169 raise error.ParseError(_("last expects a number"))
1173 1170 ss = subset
1174 1171 os = getset(repo, fullreposet(repo), l[0])
1175 1172 os.reverse()
1176 1173 result = []
1177 1174 it = iter(os)
1178 1175 for x in xrange(lim):
1179 1176 try:
1180 1177 y = it.next()
1181 1178 if y in ss:
1182 1179 result.append(y)
1183 1180 except (StopIteration):
1184 1181 break
1185 1182 return baseset(result)
1186 1183
1187 1184 def maxrev(repo, subset, x):
1188 1185 """``max(set)``
1189 1186 Changeset with highest revision number in set.
1190 1187 """
1191 1188 os = getset(repo, fullreposet(repo), x)
1192 1189 if os:
1193 1190 m = os.max()
1194 1191 if m in subset:
1195 1192 return baseset([m])
1196 1193 return baseset()
1197 1194
1198 1195 def merge(repo, subset, x):
1199 1196 """``merge()``
1200 1197 Changeset is a merge changeset.
1201 1198 """
1202 1199 # i18n: "merge" is a keyword
1203 1200 getargs(x, 0, 0, _("merge takes no arguments"))
1204 1201 cl = repo.changelog
1205 1202 return subset.filter(lambda r: cl.parentrevs(r)[1] != -1)
1206 1203
1207 1204 def branchpoint(repo, subset, x):
1208 1205 """``branchpoint()``
1209 1206 Changesets with more than one child.
1210 1207 """
1211 1208 # i18n: "branchpoint" is a keyword
1212 1209 getargs(x, 0, 0, _("branchpoint takes no arguments"))
1213 1210 cl = repo.changelog
1214 1211 if not subset:
1215 1212 return baseset()
1216 1213 baserev = min(subset)
1217 1214 parentscount = [0]*(len(repo) - baserev)
1218 1215 for r in cl.revs(start=baserev + 1):
1219 1216 for p in cl.parentrevs(r):
1220 1217 if p >= baserev:
1221 1218 parentscount[p - baserev] += 1
1222 1219 return subset.filter(lambda r: parentscount[r - baserev] > 1)
1223 1220
1224 1221 def minrev(repo, subset, x):
1225 1222 """``min(set)``
1226 1223 Changeset with lowest revision number in set.
1227 1224 """
1228 1225 os = getset(repo, fullreposet(repo), x)
1229 1226 if os:
1230 1227 m = os.min()
1231 1228 if m in subset:
1232 1229 return baseset([m])
1233 1230 return baseset()
1234 1231
1235 1232 def modifies(repo, subset, x):
1236 1233 """``modifies(pattern)``
1237 1234 Changesets modifying files matched by pattern.
1238 1235
1239 1236 The pattern without explicit kind like ``glob:`` is expected to be
1240 1237 relative to the current directory and match against a file or a
1241 1238 directory.
1242 1239 """
1243 1240 # i18n: "modifies" is a keyword
1244 1241 pat = getstring(x, _("modifies requires a pattern"))
1245 1242 return checkstatus(repo, subset, pat, 0)
1246 1243
1247 1244 def named(repo, subset, x):
1248 1245 """``named(namespace)``
1249 1246 The changesets in a given namespace.
1250 1247
1251 1248 If `namespace` starts with `re:`, the remainder of the string is treated as
1252 1249 a regular expression. To match a namespace that actually starts with `re:`,
1253 1250 use the prefix `literal:`.
1254 1251 """
1255 1252 # i18n: "named" is a keyword
1256 1253 args = getargs(x, 1, 1, _('named requires a namespace argument'))
1257 1254
1258 1255 ns = getstring(args[0],
1259 1256 # i18n: "named" is a keyword
1260 1257 _('the argument to named must be a string'))
1261 1258 kind, pattern, matcher = _stringmatcher(ns)
1262 1259 namespaces = set()
1263 1260 if kind == 'literal':
1264 1261 if pattern not in repo.names:
1265 1262 raise error.RepoLookupError(_("namespace '%s' does not exist")
1266 1263 % ns)
1267 1264 namespaces.add(repo.names[pattern])
1268 1265 else:
1269 1266 for name, ns in repo.names.iteritems():
1270 1267 if matcher(name):
1271 1268 namespaces.add(ns)
1272 1269 if not namespaces:
1273 1270 raise error.RepoLookupError(_("no namespace exists"
1274 1271 " that match '%s'") % pattern)
1275 1272
1276 1273 names = set()
1277 1274 for ns in namespaces:
1278 1275 for name in ns.listnames(repo):
1279 1276 if name not in ns.deprecated:
1280 1277 names.update(repo[n].rev() for n in ns.nodes(repo, name))
1281 1278
1282 1279 names -= set([node.nullrev])
1283 1280 return subset & names
1284 1281
1285 1282 def node_(repo, subset, x):
1286 1283 """``id(string)``
1287 1284 Revision non-ambiguously specified by the given hex string prefix.
1288 1285 """
1289 1286 # i18n: "id" is a keyword
1290 1287 l = getargs(x, 1, 1, _("id requires one argument"))
1291 1288 # i18n: "id" is a keyword
1292 1289 n = getstring(l[0], _("id requires a string"))
1293 1290 if len(n) == 40:
1294 1291 try:
1295 1292 rn = repo.changelog.rev(node.bin(n))
1296 1293 except (LookupError, TypeError):
1297 1294 rn = None
1298 1295 else:
1299 1296 rn = None
1300 1297 pm = repo.changelog._partialmatch(n)
1301 1298 if pm is not None:
1302 1299 rn = repo.changelog.rev(pm)
1303 1300
1304 1301 if rn is None:
1305 1302 return baseset()
1306 1303 result = baseset([rn])
1307 1304 return result & subset
1308 1305
1309 1306 def obsolete(repo, subset, x):
1310 1307 """``obsolete()``
1311 1308 Mutable changeset with a newer version."""
1312 1309 # i18n: "obsolete" is a keyword
1313 1310 getargs(x, 0, 0, _("obsolete takes no arguments"))
1314 1311 obsoletes = obsmod.getrevs(repo, 'obsolete')
1315 1312 return subset & obsoletes
1316 1313
1317 1314 def only(repo, subset, x):
1318 1315 """``only(set, [set])``
1319 1316 Changesets that are ancestors of the first set that are not ancestors
1320 1317 of any other head in the repo. If a second set is specified, the result
1321 1318 is ancestors of the first set that are not ancestors of the second set
1322 1319 (i.e. ::<set1> - ::<set2>).
1323 1320 """
1324 1321 cl = repo.changelog
1325 1322 # i18n: "only" is a keyword
1326 1323 args = getargs(x, 1, 2, _('only takes one or two arguments'))
1327 1324 include = getset(repo, fullreposet(repo), args[0])
1328 1325 if len(args) == 1:
1329 1326 if not include:
1330 1327 return baseset()
1331 1328
1332 1329 descendants = set(_revdescendants(repo, include, False))
1333 1330 exclude = [rev for rev in cl.headrevs()
1334 1331 if not rev in descendants and not rev in include]
1335 1332 else:
1336 1333 exclude = getset(repo, fullreposet(repo), args[1])
1337 1334
1338 1335 results = set(cl.findmissingrevs(common=exclude, heads=include))
1339 1336 return subset & results
1340 1337
1341 1338 def origin(repo, subset, x):
1342 1339 """``origin([set])``
1343 1340 Changesets that were specified as a source for the grafts, transplants or
1344 1341 rebases that created the given revisions. Omitting the optional set is the
1345 1342 same as passing all(). If a changeset created by these operations is itself
1346 1343 specified as a source for one of these operations, only the source changeset
1347 1344 for the first operation is selected.
1348 1345 """
1349 1346 if x is not None:
1350 1347 dests = getset(repo, fullreposet(repo), x)
1351 1348 else:
1352 1349 dests = fullreposet(repo)
1353 1350
1354 1351 def _firstsrc(rev):
1355 1352 src = _getrevsource(repo, rev)
1356 1353 if src is None:
1357 1354 return None
1358 1355
1359 1356 while True:
1360 1357 prev = _getrevsource(repo, src)
1361 1358
1362 1359 if prev is None:
1363 1360 return src
1364 1361 src = prev
1365 1362
1366 1363 o = set([_firstsrc(r) for r in dests])
1367 1364 o -= set([None])
1368 1365 return subset & o
1369 1366
1370 1367 def outgoing(repo, subset, x):
1371 1368 """``outgoing([path])``
1372 1369 Changesets not found in the specified destination repository, or the
1373 1370 default push location.
1374 1371 """
1375 1372 # Avoid cycles.
1376 1373 import discovery
1377 1374 import hg
1378 1375 # i18n: "outgoing" is a keyword
1379 1376 l = getargs(x, 0, 1, _("outgoing takes one or no arguments"))
1380 1377 # i18n: "outgoing" is a keyword
1381 1378 dest = l and getstring(l[0], _("outgoing requires a repository path")) or ''
1382 1379 dest = repo.ui.expandpath(dest or 'default-push', dest or 'default')
1383 1380 dest, branches = hg.parseurl(dest)
1384 1381 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1385 1382 if revs:
1386 1383 revs = [repo.lookup(rev) for rev in revs]
1387 1384 other = hg.peer(repo, {}, dest)
1388 1385 repo.ui.pushbuffer()
1389 1386 outgoing = discovery.findcommonoutgoing(repo, other, onlyheads=revs)
1390 1387 repo.ui.popbuffer()
1391 1388 cl = repo.changelog
1392 1389 o = set([cl.rev(r) for r in outgoing.missing])
1393 1390 return subset & o
1394 1391
1395 1392 def p1(repo, subset, x):
1396 1393 """``p1([set])``
1397 1394 First parent of changesets in set, or the working directory.
1398 1395 """
1399 1396 if x is None:
1400 1397 p = repo[x].p1().rev()
1401 1398 if p >= 0:
1402 1399 return subset & baseset([p])
1403 1400 return baseset()
1404 1401
1405 1402 ps = set()
1406 1403 cl = repo.changelog
1407 1404 for r in getset(repo, fullreposet(repo), x):
1408 1405 ps.add(cl.parentrevs(r)[0])
1409 1406 ps -= set([node.nullrev])
1410 1407 return subset & ps
1411 1408
1412 1409 def p2(repo, subset, x):
1413 1410 """``p2([set])``
1414 1411 Second parent of changesets in set, or the working directory.
1415 1412 """
1416 1413 if x is None:
1417 1414 ps = repo[x].parents()
1418 1415 try:
1419 1416 p = ps[1].rev()
1420 1417 if p >= 0:
1421 1418 return subset & baseset([p])
1422 1419 return baseset()
1423 1420 except IndexError:
1424 1421 return baseset()
1425 1422
1426 1423 ps = set()
1427 1424 cl = repo.changelog
1428 1425 for r in getset(repo, fullreposet(repo), x):
1429 1426 ps.add(cl.parentrevs(r)[1])
1430 1427 ps -= set([node.nullrev])
1431 1428 return subset & ps
1432 1429
1433 1430 def parents(repo, subset, x):
1434 1431 """``parents([set])``
1435 1432 The set of all parents for all changesets in set, or the working directory.
1436 1433 """
1437 1434 if x is None:
1438 1435 ps = set(p.rev() for p in repo[x].parents())
1439 1436 else:
1440 1437 ps = set()
1441 1438 cl = repo.changelog
1442 1439 for r in getset(repo, fullreposet(repo), x):
1443 1440 ps.update(cl.parentrevs(r))
1444 1441 ps -= set([node.nullrev])
1445 1442 return subset & ps
1446 1443
1447 1444 def parentspec(repo, subset, x, n):
1448 1445 """``set^0``
1449 1446 The set.
1450 1447 ``set^1`` (or ``set^``), ``set^2``
1451 1448 First or second parent, respectively, of all changesets in set.
1452 1449 """
1453 1450 try:
1454 1451 n = int(n[1])
1455 1452 if n not in (0, 1, 2):
1456 1453 raise ValueError
1457 1454 except (TypeError, ValueError):
1458 1455 raise error.ParseError(_("^ expects a number 0, 1, or 2"))
1459 1456 ps = set()
1460 1457 cl = repo.changelog
1461 1458 for r in getset(repo, fullreposet(repo), x):
1462 1459 if n == 0:
1463 1460 ps.add(r)
1464 1461 elif n == 1:
1465 1462 ps.add(cl.parentrevs(r)[0])
1466 1463 elif n == 2:
1467 1464 parents = cl.parentrevs(r)
1468 1465 if len(parents) > 1:
1469 1466 ps.add(parents[1])
1470 1467 return subset & ps
1471 1468
1472 1469 def present(repo, subset, x):
1473 1470 """``present(set)``
1474 1471 An empty set, if any revision in set isn't found; otherwise,
1475 1472 all revisions in set.
1476 1473
1477 1474 If any of specified revisions is not present in the local repository,
1478 1475 the query is normally aborted. But this predicate allows the query
1479 1476 to continue even in such cases.
1480 1477 """
1481 1478 try:
1482 1479 return getset(repo, subset, x)
1483 1480 except error.RepoLookupError:
1484 1481 return baseset()
1485 1482
1486 1483 def public(repo, subset, x):
1487 1484 """``public()``
1488 1485 Changeset in public phase."""
1489 1486 # i18n: "public" is a keyword
1490 1487 getargs(x, 0, 0, _("public takes no arguments"))
1491 1488 phase = repo._phasecache.phase
1492 1489 target = phases.public
1493 1490 condition = lambda r: phase(repo, r) == target
1494 1491 return subset.filter(condition, cache=False)
1495 1492
1496 1493 def remote(repo, subset, x):
1497 1494 """``remote([id [,path]])``
1498 1495 Local revision that corresponds to the given identifier in a
1499 1496 remote repository, if present. Here, the '.' identifier is a
1500 1497 synonym for the current local branch.
1501 1498 """
1502 1499
1503 1500 import hg # avoid start-up nasties
1504 1501 # i18n: "remote" is a keyword
1505 1502 l = getargs(x, 0, 2, _("remote takes one, two or no arguments"))
1506 1503
1507 1504 q = '.'
1508 1505 if len(l) > 0:
1509 1506 # i18n: "remote" is a keyword
1510 1507 q = getstring(l[0], _("remote requires a string id"))
1511 1508 if q == '.':
1512 1509 q = repo['.'].branch()
1513 1510
1514 1511 dest = ''
1515 1512 if len(l) > 1:
1516 1513 # i18n: "remote" is a keyword
1517 1514 dest = getstring(l[1], _("remote requires a repository path"))
1518 1515 dest = repo.ui.expandpath(dest or 'default')
1519 1516 dest, branches = hg.parseurl(dest)
1520 1517 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1521 1518 if revs:
1522 1519 revs = [repo.lookup(rev) for rev in revs]
1523 1520 other = hg.peer(repo, {}, dest)
1524 1521 n = other.lookup(q)
1525 1522 if n in repo:
1526 1523 r = repo[n].rev()
1527 1524 if r in subset:
1528 1525 return baseset([r])
1529 1526 return baseset()
1530 1527
1531 1528 def removes(repo, subset, x):
1532 1529 """``removes(pattern)``
1533 1530 Changesets which remove files matching pattern.
1534 1531
1535 1532 The pattern without explicit kind like ``glob:`` is expected to be
1536 1533 relative to the current directory and match against a file or a
1537 1534 directory.
1538 1535 """
1539 1536 # i18n: "removes" is a keyword
1540 1537 pat = getstring(x, _("removes requires a pattern"))
1541 1538 return checkstatus(repo, subset, pat, 2)
1542 1539
1543 1540 def rev(repo, subset, x):
1544 1541 """``rev(number)``
1545 1542 Revision with the given numeric identifier.
1546 1543 """
1547 1544 # i18n: "rev" is a keyword
1548 1545 l = getargs(x, 1, 1, _("rev requires one argument"))
1549 1546 try:
1550 1547 # i18n: "rev" is a keyword
1551 1548 l = int(getstring(l[0], _("rev requires a number")))
1552 1549 except (TypeError, ValueError):
1553 1550 # i18n: "rev" is a keyword
1554 1551 raise error.ParseError(_("rev expects a number"))
1555 1552 if l not in repo.changelog and l != node.nullrev:
1556 1553 return baseset()
1557 1554 return subset & baseset([l])
1558 1555
1559 1556 def matching(repo, subset, x):
1560 1557 """``matching(revision [, field])``
1561 1558 Changesets in which a given set of fields match the set of fields in the
1562 1559 selected revision or set.
1563 1560
1564 1561 To match more than one field pass the list of fields to match separated
1565 1562 by spaces (e.g. ``author description``).
1566 1563
1567 1564 Valid fields are most regular revision fields and some special fields.
1568 1565
1569 1566 Regular revision fields are ``description``, ``author``, ``branch``,
1570 1567 ``date``, ``files``, ``phase``, ``parents``, ``substate``, ``user``
1571 1568 and ``diff``.
1572 1569 Note that ``author`` and ``user`` are synonyms. ``diff`` refers to the
1573 1570 contents of the revision. Two revisions matching their ``diff`` will
1574 1571 also match their ``files``.
1575 1572
1576 1573 Special fields are ``summary`` and ``metadata``:
1577 1574 ``summary`` matches the first line of the description.
1578 1575 ``metadata`` is equivalent to matching ``description user date``
1579 1576 (i.e. it matches the main metadata fields).
1580 1577
1581 1578 ``metadata`` is the default field which is used when no fields are
1582 1579 specified. You can match more than one field at a time.
1583 1580 """
1584 1581 # i18n: "matching" is a keyword
1585 1582 l = getargs(x, 1, 2, _("matching takes 1 or 2 arguments"))
1586 1583
1587 1584 revs = getset(repo, fullreposet(repo), l[0])
1588 1585
1589 1586 fieldlist = ['metadata']
1590 1587 if len(l) > 1:
1591 1588 fieldlist = getstring(l[1],
1592 1589 # i18n: "matching" is a keyword
1593 1590 _("matching requires a string "
1594 1591 "as its second argument")).split()
1595 1592
1596 1593 # Make sure that there are no repeated fields,
1597 1594 # expand the 'special' 'metadata' field type
1598 1595 # and check the 'files' whenever we check the 'diff'
1599 1596 fields = []
1600 1597 for field in fieldlist:
1601 1598 if field == 'metadata':
1602 1599 fields += ['user', 'description', 'date']
1603 1600 elif field == 'diff':
1604 1601 # a revision matching the diff must also match the files
1605 1602 # since matching the diff is very costly, make sure to
1606 1603 # also match the files first
1607 1604 fields += ['files', 'diff']
1608 1605 else:
1609 1606 if field == 'author':
1610 1607 field = 'user'
1611 1608 fields.append(field)
1612 1609 fields = set(fields)
1613 1610 if 'summary' in fields and 'description' in fields:
1614 1611 # If a revision matches its description it also matches its summary
1615 1612 fields.discard('summary')
1616 1613
1617 1614 # We may want to match more than one field
1618 1615 # Not all fields take the same amount of time to be matched
1619 1616 # Sort the selected fields in order of increasing matching cost
1620 1617 fieldorder = ['phase', 'parents', 'user', 'date', 'branch', 'summary',
1621 1618 'files', 'description', 'substate', 'diff']
1622 1619 def fieldkeyfunc(f):
1623 1620 try:
1624 1621 return fieldorder.index(f)
1625 1622 except ValueError:
1626 1623 # assume an unknown field is very costly
1627 1624 return len(fieldorder)
1628 1625 fields = list(fields)
1629 1626 fields.sort(key=fieldkeyfunc)
1630 1627
1631 1628 # Each field will be matched with its own "getfield" function
1632 1629 # which will be added to the getfieldfuncs array of functions
1633 1630 getfieldfuncs = []
1634 1631 _funcs = {
1635 1632 'user': lambda r: repo[r].user(),
1636 1633 'branch': lambda r: repo[r].branch(),
1637 1634 'date': lambda r: repo[r].date(),
1638 1635 'description': lambda r: repo[r].description(),
1639 1636 'files': lambda r: repo[r].files(),
1640 1637 'parents': lambda r: repo[r].parents(),
1641 1638 'phase': lambda r: repo[r].phase(),
1642 1639 'substate': lambda r: repo[r].substate,
1643 1640 'summary': lambda r: repo[r].description().splitlines()[0],
1644 1641 'diff': lambda r: list(repo[r].diff(git=True),)
1645 1642 }
1646 1643 for info in fields:
1647 1644 getfield = _funcs.get(info, None)
1648 1645 if getfield is None:
1649 1646 raise error.ParseError(
1650 1647 # i18n: "matching" is a keyword
1651 1648 _("unexpected field name passed to matching: %s") % info)
1652 1649 getfieldfuncs.append(getfield)
1653 1650 # convert the getfield array of functions into a "getinfo" function
1654 1651 # which returns an array of field values (or a single value if there
1655 1652 # is only one field to match)
1656 1653 getinfo = lambda r: [f(r) for f in getfieldfuncs]
1657 1654
1658 1655 def matches(x):
1659 1656 for rev in revs:
1660 1657 target = getinfo(rev)
1661 1658 match = True
1662 1659 for n, f in enumerate(getfieldfuncs):
1663 1660 if target[n] != f(x):
1664 1661 match = False
1665 1662 if match:
1666 1663 return True
1667 1664 return False
1668 1665
1669 1666 return subset.filter(matches)
1670 1667
1671 1668 def reverse(repo, subset, x):
1672 1669 """``reverse(set)``
1673 1670 Reverse order of set.
1674 1671 """
1675 1672 l = getset(repo, subset, x)
1676 1673 l.reverse()
1677 1674 return l
1678 1675
1679 1676 def roots(repo, subset, x):
1680 1677 """``roots(set)``
1681 1678 Changesets in set with no parent changeset in set.
1682 1679 """
1683 1680 s = getset(repo, fullreposet(repo), x)
1684 1681 subset = subset & s# baseset([r for r in s if r in subset])
1685 1682 cs = _children(repo, subset, s)
1686 1683 return subset - cs
1687 1684
1688 1685 def secret(repo, subset, x):
1689 1686 """``secret()``
1690 1687 Changeset in secret phase."""
1691 1688 # i18n: "secret" is a keyword
1692 1689 getargs(x, 0, 0, _("secret takes no arguments"))
1693 1690 phase = repo._phasecache.phase
1694 1691 target = phases.secret
1695 1692 condition = lambda r: phase(repo, r) == target
1696 1693 return subset.filter(condition, cache=False)
1697 1694
1698 1695 def sort(repo, subset, x):
1699 1696 """``sort(set[, [-]key...])``
1700 1697 Sort set by keys. The default sort order is ascending, specify a key
1701 1698 as ``-key`` to sort in descending order.
1702 1699
1703 1700 The keys can be:
1704 1701
1705 1702 - ``rev`` for the revision number,
1706 1703 - ``branch`` for the branch name,
1707 1704 - ``desc`` for the commit message (description),
1708 1705 - ``user`` for user name (``author`` can be used as an alias),
1709 1706 - ``date`` for the commit date
1710 1707 """
1711 1708 # i18n: "sort" is a keyword
1712 1709 l = getargs(x, 1, 2, _("sort requires one or two arguments"))
1713 1710 keys = "rev"
1714 1711 if len(l) == 2:
1715 1712 # i18n: "sort" is a keyword
1716 1713 keys = getstring(l[1], _("sort spec must be a string"))
1717 1714
1718 1715 s = l[0]
1719 1716 keys = keys.split()
1720 1717 l = []
1721 1718 def invert(s):
1722 1719 return "".join(chr(255 - ord(c)) for c in s)
1723 1720 revs = getset(repo, subset, s)
1724 1721 if keys == ["rev"]:
1725 1722 revs.sort()
1726 1723 return revs
1727 1724 elif keys == ["-rev"]:
1728 1725 revs.sort(reverse=True)
1729 1726 return revs
1730 1727 for r in revs:
1731 1728 c = repo[r]
1732 1729 e = []
1733 1730 for k in keys:
1734 1731 if k == 'rev':
1735 1732 e.append(r)
1736 1733 elif k == '-rev':
1737 1734 e.append(-r)
1738 1735 elif k == 'branch':
1739 1736 e.append(c.branch())
1740 1737 elif k == '-branch':
1741 1738 e.append(invert(c.branch()))
1742 1739 elif k == 'desc':
1743 1740 e.append(c.description())
1744 1741 elif k == '-desc':
1745 1742 e.append(invert(c.description()))
1746 1743 elif k in 'user author':
1747 1744 e.append(c.user())
1748 1745 elif k in '-user -author':
1749 1746 e.append(invert(c.user()))
1750 1747 elif k == 'date':
1751 1748 e.append(c.date()[0])
1752 1749 elif k == '-date':
1753 1750 e.append(-c.date()[0])
1754 1751 else:
1755 1752 raise error.ParseError(_("unknown sort key %r") % k)
1756 1753 e.append(r)
1757 1754 l.append(e)
1758 1755 l.sort()
1759 1756 return baseset([e[-1] for e in l])
1760 1757
1761 1758 def subrepo(repo, subset, x):
1762 1759 """``subrepo([pattern])``
1763 1760 Changesets that add, modify or remove the given subrepo. If no subrepo
1764 1761 pattern is named, any subrepo changes are returned.
1765 1762 """
1766 1763 # i18n: "subrepo" is a keyword
1767 1764 args = getargs(x, 0, 1, _('subrepo takes at most one argument'))
1768 1765 if len(args) != 0:
1769 1766 pat = getstring(args[0], _("subrepo requires a pattern"))
1770 1767
1771 1768 m = matchmod.exact(repo.root, repo.root, ['.hgsubstate'])
1772 1769
1773 1770 def submatches(names):
1774 1771 k, p, m = _stringmatcher(pat)
1775 1772 for name in names:
1776 1773 if m(name):
1777 1774 yield name
1778 1775
1779 1776 def matches(x):
1780 1777 c = repo[x]
1781 1778 s = repo.status(c.p1().node(), c.node(), match=m)
1782 1779
1783 1780 if len(args) == 0:
1784 1781 return s.added or s.modified or s.removed
1785 1782
1786 1783 if s.added:
1787 1784 return util.any(submatches(c.substate.keys()))
1788 1785
1789 1786 if s.modified:
1790 1787 subs = set(c.p1().substate.keys())
1791 1788 subs.update(c.substate.keys())
1792 1789
1793 1790 for path in submatches(subs):
1794 1791 if c.p1().substate.get(path) != c.substate.get(path):
1795 1792 return True
1796 1793
1797 1794 if s.removed:
1798 1795 return util.any(submatches(c.p1().substate.keys()))
1799 1796
1800 1797 return False
1801 1798
1802 1799 return subset.filter(matches)
1803 1800
1804 1801 def _stringmatcher(pattern):
1805 1802 """
1806 1803 accepts a string, possibly starting with 're:' or 'literal:' prefix.
1807 1804 returns the matcher name, pattern, and matcher function.
1808 1805 missing or unknown prefixes are treated as literal matches.
1809 1806
1810 1807 helper for tests:
1811 1808 >>> def test(pattern, *tests):
1812 1809 ... kind, pattern, matcher = _stringmatcher(pattern)
1813 1810 ... return (kind, pattern, [bool(matcher(t)) for t in tests])
1814 1811
1815 1812 exact matching (no prefix):
1816 1813 >>> test('abcdefg', 'abc', 'def', 'abcdefg')
1817 1814 ('literal', 'abcdefg', [False, False, True])
1818 1815
1819 1816 regex matching ('re:' prefix)
1820 1817 >>> test('re:a.+b', 'nomatch', 'fooadef', 'fooadefbar')
1821 1818 ('re', 'a.+b', [False, False, True])
1822 1819
1823 1820 force exact matches ('literal:' prefix)
1824 1821 >>> test('literal:re:foobar', 'foobar', 're:foobar')
1825 1822 ('literal', 're:foobar', [False, True])
1826 1823
1827 1824 unknown prefixes are ignored and treated as literals
1828 1825 >>> test('foo:bar', 'foo', 'bar', 'foo:bar')
1829 1826 ('literal', 'foo:bar', [False, False, True])
1830 1827 """
1831 1828 if pattern.startswith('re:'):
1832 1829 pattern = pattern[3:]
1833 1830 try:
1834 1831 regex = re.compile(pattern)
1835 1832 except re.error, e:
1836 1833 raise error.ParseError(_('invalid regular expression: %s')
1837 1834 % e)
1838 1835 return 're', pattern, regex.search
1839 1836 elif pattern.startswith('literal:'):
1840 1837 pattern = pattern[8:]
1841 1838 return 'literal', pattern, pattern.__eq__
1842 1839
1843 1840 def _substringmatcher(pattern):
1844 1841 kind, pattern, matcher = _stringmatcher(pattern)
1845 1842 if kind == 'literal':
1846 1843 matcher = lambda s: pattern in s
1847 1844 return kind, pattern, matcher
1848 1845
1849 1846 def tag(repo, subset, x):
1850 1847 """``tag([name])``
1851 1848 The specified tag by name, or all tagged revisions if no name is given.
1852 1849
1853 1850 If `name` starts with `re:`, the remainder of the name is treated as
1854 1851 a regular expression. To match a tag that actually starts with `re:`,
1855 1852 use the prefix `literal:`.
1856 1853 """
1857 1854 # i18n: "tag" is a keyword
1858 1855 args = getargs(x, 0, 1, _("tag takes one or no arguments"))
1859 1856 cl = repo.changelog
1860 1857 if args:
1861 1858 pattern = getstring(args[0],
1862 1859 # i18n: "tag" is a keyword
1863 1860 _('the argument to tag must be a string'))
1864 1861 kind, pattern, matcher = _stringmatcher(pattern)
1865 1862 if kind == 'literal':
1866 1863 # avoid resolving all tags
1867 1864 tn = repo._tagscache.tags.get(pattern, None)
1868 1865 if tn is None:
1869 1866 raise error.RepoLookupError(_("tag '%s' does not exist")
1870 1867 % pattern)
1871 1868 s = set([repo[tn].rev()])
1872 1869 else:
1873 1870 s = set([cl.rev(n) for t, n in repo.tagslist() if matcher(t)])
1874 1871 else:
1875 1872 s = set([cl.rev(n) for t, n in repo.tagslist() if t != 'tip'])
1876 1873 return subset & s
1877 1874
1878 1875 def tagged(repo, subset, x):
1879 1876 return tag(repo, subset, x)
1880 1877
1881 1878 def unstable(repo, subset, x):
1882 1879 """``unstable()``
1883 1880 Non-obsolete changesets with obsolete ancestors.
1884 1881 """
1885 1882 # i18n: "unstable" is a keyword
1886 1883 getargs(x, 0, 0, _("unstable takes no arguments"))
1887 1884 unstables = obsmod.getrevs(repo, 'unstable')
1888 1885 return subset & unstables
1889 1886
1890 1887
1891 1888 def user(repo, subset, x):
1892 1889 """``user(string)``
1893 1890 User name contains string. The match is case-insensitive.
1894 1891
1895 1892 If `string` starts with `re:`, the remainder of the string is treated as
1896 1893 a regular expression. To match a user that actually contains `re:`, use
1897 1894 the prefix `literal:`.
1898 1895 """
1899 1896 return author(repo, subset, x)
1900 1897
1901 1898 # experimental
1902 1899 def wdir(repo, subset, x):
1903 1900 # i18n: "wdir" is a keyword
1904 1901 getargs(x, 0, 0, _("wdir takes no arguments"))
1905 1902 if None in subset:
1906 1903 return baseset([None])
1907 1904 return baseset()
1908 1905
1909 1906 # for internal use
1910 1907 def _list(repo, subset, x):
1911 1908 s = getstring(x, "internal error")
1912 1909 if not s:
1913 1910 return baseset()
1914 1911 ls = [repo[r].rev() for r in s.split('\0')]
1915 1912 s = subset
1916 1913 return baseset([r for r in ls if r in s])
1917 1914
1918 1915 # for internal use
1919 1916 def _intlist(repo, subset, x):
1920 1917 s = getstring(x, "internal error")
1921 1918 if not s:
1922 1919 return baseset()
1923 1920 ls = [int(r) for r in s.split('\0')]
1924 1921 s = subset
1925 1922 return baseset([r for r in ls if r in s])
1926 1923
1927 1924 # for internal use
1928 1925 def _hexlist(repo, subset, x):
1929 1926 s = getstring(x, "internal error")
1930 1927 if not s:
1931 1928 return baseset()
1932 1929 cl = repo.changelog
1933 1930 ls = [cl.rev(node.bin(r)) for r in s.split('\0')]
1934 1931 s = subset
1935 1932 return baseset([r for r in ls if r in s])
1936 1933
1937 1934 symbols = {
1938 1935 "adds": adds,
1939 1936 "all": getall,
1940 1937 "ancestor": ancestor,
1941 1938 "ancestors": ancestors,
1942 1939 "_firstancestors": _firstancestors,
1943 1940 "author": author,
1944 1941 "bisect": bisect,
1945 1942 "bisected": bisected,
1946 1943 "bookmark": bookmark,
1947 1944 "branch": branch,
1948 1945 "branchpoint": branchpoint,
1949 1946 "bumped": bumped,
1950 1947 "bundle": bundle,
1951 1948 "children": children,
1952 1949 "closed": closed,
1953 1950 "contains": contains,
1954 1951 "converted": converted,
1955 1952 "date": date,
1956 1953 "desc": desc,
1957 1954 "descendants": descendants,
1958 1955 "_firstdescendants": _firstdescendants,
1959 1956 "destination": destination,
1960 1957 "divergent": divergent,
1961 1958 "draft": draft,
1962 1959 "extinct": extinct,
1963 1960 "extra": extra,
1964 1961 "file": hasfile,
1965 1962 "filelog": filelog,
1966 1963 "first": first,
1967 1964 "follow": follow,
1968 1965 "_followfirst": _followfirst,
1969 1966 "grep": grep,
1970 1967 "head": head,
1971 1968 "heads": heads,
1972 1969 "hidden": hidden,
1973 1970 "id": node_,
1974 1971 "keyword": keyword,
1975 1972 "last": last,
1976 1973 "limit": limit,
1977 1974 "_matchfiles": _matchfiles,
1978 1975 "max": maxrev,
1979 1976 "merge": merge,
1980 1977 "min": minrev,
1981 1978 "modifies": modifies,
1982 1979 "named": named,
1983 1980 "obsolete": obsolete,
1984 1981 "only": only,
1985 1982 "origin": origin,
1986 1983 "outgoing": outgoing,
1987 1984 "p1": p1,
1988 1985 "p2": p2,
1989 1986 "parents": parents,
1990 1987 "present": present,
1991 1988 "public": public,
1992 1989 "remote": remote,
1993 1990 "removes": removes,
1994 1991 "rev": rev,
1995 1992 "reverse": reverse,
1996 1993 "roots": roots,
1997 1994 "sort": sort,
1998 1995 "secret": secret,
1999 1996 "subrepo": subrepo,
2000 1997 "matching": matching,
2001 1998 "tag": tag,
2002 1999 "tagged": tagged,
2003 2000 "user": user,
2004 2001 "unstable": unstable,
2005 2002 "wdir": wdir,
2006 2003 "_list": _list,
2007 2004 "_intlist": _intlist,
2008 2005 "_hexlist": _hexlist,
2009 2006 }
2010 2007
2011 2008 # symbols which can't be used for a DoS attack for any given input
2012 2009 # (e.g. those which accept regexes as plain strings shouldn't be included)
2013 2010 # functions that just return a lot of changesets (like all) don't count here
2014 2011 safesymbols = set([
2015 2012 "adds",
2016 2013 "all",
2017 2014 "ancestor",
2018 2015 "ancestors",
2019 2016 "_firstancestors",
2020 2017 "author",
2021 2018 "bisect",
2022 2019 "bisected",
2023 2020 "bookmark",
2024 2021 "branch",
2025 2022 "branchpoint",
2026 2023 "bumped",
2027 2024 "bundle",
2028 2025 "children",
2029 2026 "closed",
2030 2027 "converted",
2031 2028 "date",
2032 2029 "desc",
2033 2030 "descendants",
2034 2031 "_firstdescendants",
2035 2032 "destination",
2036 2033 "divergent",
2037 2034 "draft",
2038 2035 "extinct",
2039 2036 "extra",
2040 2037 "file",
2041 2038 "filelog",
2042 2039 "first",
2043 2040 "follow",
2044 2041 "_followfirst",
2045 2042 "head",
2046 2043 "heads",
2047 2044 "hidden",
2048 2045 "id",
2049 2046 "keyword",
2050 2047 "last",
2051 2048 "limit",
2052 2049 "_matchfiles",
2053 2050 "max",
2054 2051 "merge",
2055 2052 "min",
2056 2053 "modifies",
2057 2054 "obsolete",
2058 2055 "only",
2059 2056 "origin",
2060 2057 "outgoing",
2061 2058 "p1",
2062 2059 "p2",
2063 2060 "parents",
2064 2061 "present",
2065 2062 "public",
2066 2063 "remote",
2067 2064 "removes",
2068 2065 "rev",
2069 2066 "reverse",
2070 2067 "roots",
2071 2068 "sort",
2072 2069 "secret",
2073 2070 "matching",
2074 2071 "tag",
2075 2072 "tagged",
2076 2073 "user",
2077 2074 "unstable",
2078 2075 "wdir",
2079 2076 "_list",
2080 2077 "_intlist",
2081 2078 "_hexlist",
2082 2079 ])
2083 2080
2084 2081 methods = {
2085 2082 "range": rangeset,
2086 2083 "dagrange": dagrange,
2087 2084 "string": stringset,
2088 2085 "symbol": stringset,
2089 2086 "and": andset,
2090 2087 "or": orset,
2091 2088 "not": notset,
2092 2089 "list": listset,
2093 2090 "func": func,
2094 2091 "ancestor": ancestorspec,
2095 2092 "parent": parentspec,
2096 2093 "parentpost": p1,
2097 2094 }
2098 2095
2099 2096 def optimize(x, small):
2100 2097 if x is None:
2101 2098 return 0, x
2102 2099
2103 2100 smallbonus = 1
2104 2101 if small:
2105 2102 smallbonus = .5
2106 2103
2107 2104 op = x[0]
2108 2105 if op == 'minus':
2109 2106 return optimize(('and', x[1], ('not', x[2])), small)
2110 2107 elif op == 'only':
2111 2108 return optimize(('func', ('symbol', 'only'),
2112 2109 ('list', x[1], x[2])), small)
2113 2110 elif op == 'onlypost':
2114 2111 return optimize(('func', ('symbol', 'only'), x[1]), small)
2115 2112 elif op == 'dagrangepre':
2116 2113 return optimize(('func', ('symbol', 'ancestors'), x[1]), small)
2117 2114 elif op == 'dagrangepost':
2118 2115 return optimize(('func', ('symbol', 'descendants'), x[1]), small)
2119 2116 elif op == 'rangepre':
2120 2117 return optimize(('range', ('string', '0'), x[1]), small)
2121 2118 elif op == 'rangepost':
2122 2119 return optimize(('range', x[1], ('string', 'tip')), small)
2123 2120 elif op == 'negate':
2124 2121 return optimize(('string',
2125 2122 '-' + getstring(x[1], _("can't negate that"))), small)
2126 2123 elif op in 'string symbol negate':
2127 2124 return smallbonus, x # single revisions are small
2128 2125 elif op == 'and':
2129 2126 wa, ta = optimize(x[1], True)
2130 2127 wb, tb = optimize(x[2], True)
2131 2128
2132 2129 # (::x and not ::y)/(not ::y and ::x) have a fast path
2133 2130 def isonly(revs, bases):
2134 2131 return (
2135 2132 revs[0] == 'func'
2136 2133 and getstring(revs[1], _('not a symbol')) == 'ancestors'
2137 2134 and bases[0] == 'not'
2138 2135 and bases[1][0] == 'func'
2139 2136 and getstring(bases[1][1], _('not a symbol')) == 'ancestors')
2140 2137
2141 2138 w = min(wa, wb)
2142 2139 if isonly(ta, tb):
2143 2140 return w, ('func', ('symbol', 'only'), ('list', ta[2], tb[1][2]))
2144 2141 if isonly(tb, ta):
2145 2142 return w, ('func', ('symbol', 'only'), ('list', tb[2], ta[1][2]))
2146 2143
2147 2144 if wa > wb:
2148 2145 return w, (op, tb, ta)
2149 2146 return w, (op, ta, tb)
2150 2147 elif op == 'or':
2151 2148 wa, ta = optimize(x[1], False)
2152 2149 wb, tb = optimize(x[2], False)
2153 2150 if wb < wa:
2154 2151 wb, wa = wa, wb
2155 2152 return max(wa, wb), (op, ta, tb)
2156 2153 elif op == 'not':
2157 2154 o = optimize(x[1], not small)
2158 2155 return o[0], (op, o[1])
2159 2156 elif op == 'parentpost':
2160 2157 o = optimize(x[1], small)
2161 2158 return o[0], (op, o[1])
2162 2159 elif op == 'group':
2163 2160 return optimize(x[1], small)
2164 2161 elif op in 'dagrange range list parent ancestorspec':
2165 2162 if op == 'parent':
2166 2163 # x^:y means (x^) : y, not x ^ (:y)
2167 2164 post = ('parentpost', x[1])
2168 2165 if x[2][0] == 'dagrangepre':
2169 2166 return optimize(('dagrange', post, x[2][1]), small)
2170 2167 elif x[2][0] == 'rangepre':
2171 2168 return optimize(('range', post, x[2][1]), small)
2172 2169
2173 2170 wa, ta = optimize(x[1], small)
2174 2171 wb, tb = optimize(x[2], small)
2175 2172 return wa + wb, (op, ta, tb)
2176 2173 elif op == 'func':
2177 2174 f = getstring(x[1], _("not a symbol"))
2178 2175 wa, ta = optimize(x[2], small)
2179 2176 if f in ("author branch closed date desc file grep keyword "
2180 2177 "outgoing user"):
2181 2178 w = 10 # slow
2182 2179 elif f in "modifies adds removes":
2183 2180 w = 30 # slower
2184 2181 elif f == "contains":
2185 2182 w = 100 # very slow
2186 2183 elif f == "ancestor":
2187 2184 w = 1 * smallbonus
2188 2185 elif f in "reverse limit first _intlist":
2189 2186 w = 0
2190 2187 elif f in "sort":
2191 2188 w = 10 # assume most sorts look at changelog
2192 2189 else:
2193 2190 w = 1
2194 2191 return w + wa, (op, x[1], ta)
2195 2192 return 1, x
2196 2193
2197 2194 _aliasarg = ('func', ('symbol', '_aliasarg'))
2198 2195 def _getaliasarg(tree):
2199 2196 """If tree matches ('func', ('symbol', '_aliasarg'), ('string', X))
2200 2197 return X, None otherwise.
2201 2198 """
2202 2199 if (len(tree) == 3 and tree[:2] == _aliasarg
2203 2200 and tree[2][0] == 'string'):
2204 2201 return tree[2][1]
2205 2202 return None
2206 2203
2207 2204 def _checkaliasarg(tree, known=None):
2208 2205 """Check tree contains no _aliasarg construct or only ones which
2209 2206 value is in known. Used to avoid alias placeholders injection.
2210 2207 """
2211 2208 if isinstance(tree, tuple):
2212 2209 arg = _getaliasarg(tree)
2213 2210 if arg is not None and (not known or arg not in known):
2214 2211 raise error.UnknownIdentifier('_aliasarg', [])
2215 2212 for t in tree:
2216 2213 _checkaliasarg(t, known)
2217 2214
2218 2215 # the set of valid characters for the initial letter of symbols in
2219 2216 # alias declarations and definitions
2220 2217 _aliassyminitletters = set(c for c in [chr(i) for i in xrange(256)]
2221 2218 if c.isalnum() or c in '._@$' or ord(c) > 127)
2222 2219
2223 2220 def _tokenizealias(program, lookup=None):
2224 2221 """Parse alias declaration/definition into a stream of tokens
2225 2222
2226 2223 This allows symbol names to use also ``$`` as an initial letter
2227 2224 (for backward compatibility), and callers of this function should
2228 2225 examine whether ``$`` is used also for unexpected symbols or not.
2229 2226 """
2230 2227 return tokenize(program, lookup=lookup,
2231 2228 syminitletters=_aliassyminitletters)
2232 2229
2233 2230 def _parsealiasdecl(decl):
2234 2231 """Parse alias declaration ``decl``
2235 2232
2236 2233 This returns ``(name, tree, args, errorstr)`` tuple:
2237 2234
2238 2235 - ``name``: of declared alias (may be ``decl`` itself at error)
2239 2236 - ``tree``: parse result (or ``None`` at error)
2240 2237 - ``args``: list of alias argument names (or None for symbol declaration)
2241 2238 - ``errorstr``: detail about detected error (or None)
2242 2239
2243 2240 >>> _parsealiasdecl('foo')
2244 2241 ('foo', ('symbol', 'foo'), None, None)
2245 2242 >>> _parsealiasdecl('$foo')
2246 2243 ('$foo', None, None, "'$' not for alias arguments")
2247 2244 >>> _parsealiasdecl('foo::bar')
2248 2245 ('foo::bar', None, None, 'invalid format')
2249 2246 >>> _parsealiasdecl('foo bar')
2250 2247 ('foo bar', None, None, 'at 4: invalid token')
2251 2248 >>> _parsealiasdecl('foo()')
2252 2249 ('foo', ('func', ('symbol', 'foo')), [], None)
2253 2250 >>> _parsealiasdecl('$foo()')
2254 2251 ('$foo()', None, None, "'$' not for alias arguments")
2255 2252 >>> _parsealiasdecl('foo($1, $2)')
2256 2253 ('foo', ('func', ('symbol', 'foo')), ['$1', '$2'], None)
2257 2254 >>> _parsealiasdecl('foo(bar_bar, baz.baz)')
2258 2255 ('foo', ('func', ('symbol', 'foo')), ['bar_bar', 'baz.baz'], None)
2259 2256 >>> _parsealiasdecl('foo($1, $2, nested($1, $2))')
2260 2257 ('foo($1, $2, nested($1, $2))', None, None, 'invalid argument list')
2261 2258 >>> _parsealiasdecl('foo(bar($1, $2))')
2262 2259 ('foo(bar($1, $2))', None, None, 'invalid argument list')
2263 2260 >>> _parsealiasdecl('foo("string")')
2264 2261 ('foo("string")', None, None, 'invalid argument list')
2265 2262 >>> _parsealiasdecl('foo($1, $2')
2266 2263 ('foo($1, $2', None, None, 'at 10: unexpected token: end')
2267 2264 >>> _parsealiasdecl('foo("string')
2268 2265 ('foo("string', None, None, 'at 5: unterminated string')
2269 2266 >>> _parsealiasdecl('foo($1, $2, $1)')
2270 2267 ('foo', None, None, 'argument names collide with each other')
2271 2268 """
2272 2269 p = parser.parser(_tokenizealias, elements)
2273 2270 try:
2274 2271 tree, pos = p.parse(decl)
2275 2272 if (pos != len(decl)):
2276 2273 raise error.ParseError(_('invalid token'), pos)
2277 2274
2278 2275 if isvalidsymbol(tree):
2279 2276 # "name = ...." style
2280 2277 name = getsymbol(tree)
2281 2278 if name.startswith('$'):
2282 2279 return (decl, None, None, _("'$' not for alias arguments"))
2283 2280 return (name, ('symbol', name), None, None)
2284 2281
2285 2282 if isvalidfunc(tree):
2286 2283 # "name(arg, ....) = ...." style
2287 2284 name = getfuncname(tree)
2288 2285 if name.startswith('$'):
2289 2286 return (decl, None, None, _("'$' not for alias arguments"))
2290 2287 args = []
2291 2288 for arg in getfuncargs(tree):
2292 2289 if not isvalidsymbol(arg):
2293 2290 return (decl, None, None, _("invalid argument list"))
2294 2291 args.append(getsymbol(arg))
2295 2292 if len(args) != len(set(args)):
2296 2293 return (name, None, None,
2297 2294 _("argument names collide with each other"))
2298 2295 return (name, ('func', ('symbol', name)), args, None)
2299 2296
2300 2297 return (decl, None, None, _("invalid format"))
2301 2298 except error.ParseError, inst:
2302 2299 return (decl, None, None, parseerrordetail(inst))
2303 2300
2304 2301 def _parsealiasdefn(defn, args):
2305 2302 """Parse alias definition ``defn``
2306 2303
2307 2304 This function also replaces alias argument references in the
2308 2305 specified definition by ``_aliasarg(ARGNAME)``.
2309 2306
2310 2307 ``args`` is a list of alias argument names, or None if the alias
2311 2308 is declared as a symbol.
2312 2309
2313 2310 This returns "tree" as parsing result.
2314 2311
2315 2312 >>> args = ['$1', '$2', 'foo']
2316 2313 >>> print prettyformat(_parsealiasdefn('$1 or foo', args))
2317 2314 (or
2318 2315 (func
2319 2316 ('symbol', '_aliasarg')
2320 2317 ('string', '$1'))
2321 2318 (func
2322 2319 ('symbol', '_aliasarg')
2323 2320 ('string', 'foo')))
2324 2321 >>> try:
2325 2322 ... _parsealiasdefn('$1 or $bar', args)
2326 2323 ... except error.ParseError, inst:
2327 2324 ... print parseerrordetail(inst)
2328 2325 at 6: '$' not for alias arguments
2329 2326 >>> args = ['$1', '$10', 'foo']
2330 2327 >>> print prettyformat(_parsealiasdefn('$10 or foobar', args))
2331 2328 (or
2332 2329 (func
2333 2330 ('symbol', '_aliasarg')
2334 2331 ('string', '$10'))
2335 2332 ('symbol', 'foobar'))
2336 2333 >>> print prettyformat(_parsealiasdefn('"$1" or "foo"', args))
2337 2334 (or
2338 2335 ('string', '$1')
2339 2336 ('string', 'foo'))
2340 2337 """
2341 2338 def tokenizedefn(program, lookup=None):
2342 2339 if args:
2343 2340 argset = set(args)
2344 2341 else:
2345 2342 argset = set()
2346 2343
2347 2344 for t, value, pos in _tokenizealias(program, lookup=lookup):
2348 2345 if t == 'symbol':
2349 2346 if value in argset:
2350 2347 # emulate tokenization of "_aliasarg('ARGNAME')":
2351 2348 # "_aliasarg()" is an unknown symbol only used separate
2352 2349 # alias argument placeholders from regular strings.
2353 2350 yield ('symbol', '_aliasarg', pos)
2354 2351 yield ('(', None, pos)
2355 2352 yield ('string', value, pos)
2356 2353 yield (')', None, pos)
2357 2354 continue
2358 2355 elif value.startswith('$'):
2359 2356 raise error.ParseError(_("'$' not for alias arguments"),
2360 2357 pos)
2361 2358 yield (t, value, pos)
2362 2359
2363 2360 p = parser.parser(tokenizedefn, elements)
2364 2361 tree, pos = p.parse(defn)
2365 2362 if pos != len(defn):
2366 2363 raise error.ParseError(_('invalid token'), pos)
2367 2364 return tree
2368 2365
2369 2366 class revsetalias(object):
2370 2367 # whether own `error` information is already shown or not.
2371 2368 # this avoids showing same warning multiple times at each `findaliases`.
2372 2369 warned = False
2373 2370
2374 2371 def __init__(self, name, value):
2375 2372 '''Aliases like:
2376 2373
2377 2374 h = heads(default)
2378 2375 b($1) = ancestors($1) - ancestors(default)
2379 2376 '''
2380 2377 self.name, self.tree, self.args, self.error = _parsealiasdecl(name)
2381 2378 if self.error:
2382 2379 self.error = _('failed to parse the declaration of revset alias'
2383 2380 ' "%s": %s') % (self.name, self.error)
2384 2381 return
2385 2382
2386 2383 try:
2387 2384 self.replacement = _parsealiasdefn(value, self.args)
2388 2385 # Check for placeholder injection
2389 2386 _checkaliasarg(self.replacement, self.args)
2390 2387 except error.ParseError, inst:
2391 2388 self.error = _('failed to parse the definition of revset alias'
2392 2389 ' "%s": %s') % (self.name, parseerrordetail(inst))
2393 2390
2394 2391 def _getalias(aliases, tree):
2395 2392 """If tree looks like an unexpanded alias, return it. Return None
2396 2393 otherwise.
2397 2394 """
2398 2395 if isinstance(tree, tuple) and tree:
2399 2396 if tree[0] == 'symbol' and len(tree) == 2:
2400 2397 name = tree[1]
2401 2398 alias = aliases.get(name)
2402 2399 if alias and alias.args is None and alias.tree == tree:
2403 2400 return alias
2404 2401 if tree[0] == 'func' and len(tree) > 1:
2405 2402 if tree[1][0] == 'symbol' and len(tree[1]) == 2:
2406 2403 name = tree[1][1]
2407 2404 alias = aliases.get(name)
2408 2405 if alias and alias.args is not None and alias.tree == tree[:2]:
2409 2406 return alias
2410 2407 return None
2411 2408
2412 2409 def _expandargs(tree, args):
2413 2410 """Replace _aliasarg instances with the substitution value of the
2414 2411 same name in args, recursively.
2415 2412 """
2416 2413 if not tree or not isinstance(tree, tuple):
2417 2414 return tree
2418 2415 arg = _getaliasarg(tree)
2419 2416 if arg is not None:
2420 2417 return args[arg]
2421 2418 return tuple(_expandargs(t, args) for t in tree)
2422 2419
2423 2420 def _expandaliases(aliases, tree, expanding, cache):
2424 2421 """Expand aliases in tree, recursively.
2425 2422
2426 2423 'aliases' is a dictionary mapping user defined aliases to
2427 2424 revsetalias objects.
2428 2425 """
2429 2426 if not isinstance(tree, tuple):
2430 2427 # Do not expand raw strings
2431 2428 return tree
2432 2429 alias = _getalias(aliases, tree)
2433 2430 if alias is not None:
2434 2431 if alias.error:
2435 2432 raise util.Abort(alias.error)
2436 2433 if alias in expanding:
2437 2434 raise error.ParseError(_('infinite expansion of revset alias "%s" '
2438 2435 'detected') % alias.name)
2439 2436 expanding.append(alias)
2440 2437 if alias.name not in cache:
2441 2438 cache[alias.name] = _expandaliases(aliases, alias.replacement,
2442 2439 expanding, cache)
2443 2440 result = cache[alias.name]
2444 2441 expanding.pop()
2445 2442 if alias.args is not None:
2446 2443 l = getlist(tree[2])
2447 2444 if len(l) != len(alias.args):
2448 2445 raise error.ParseError(
2449 2446 _('invalid number of arguments: %s') % len(l))
2450 2447 l = [_expandaliases(aliases, a, [], cache) for a in l]
2451 2448 result = _expandargs(result, dict(zip(alias.args, l)))
2452 2449 else:
2453 2450 result = tuple(_expandaliases(aliases, t, expanding, cache)
2454 2451 for t in tree)
2455 2452 return result
2456 2453
2457 2454 def findaliases(ui, tree, showwarning=None):
2458 2455 _checkaliasarg(tree)
2459 2456 aliases = {}
2460 2457 for k, v in ui.configitems('revsetalias'):
2461 2458 alias = revsetalias(k, v)
2462 2459 aliases[alias.name] = alias
2463 2460 tree = _expandaliases(aliases, tree, [], {})
2464 2461 if showwarning:
2465 2462 # warn about problematic (but not referred) aliases
2466 2463 for name, alias in sorted(aliases.iteritems()):
2467 2464 if alias.error and not alias.warned:
2468 2465 showwarning(_('warning: %s\n') % (alias.error))
2469 2466 alias.warned = True
2470 2467 return tree
2471 2468
2472 2469 def foldconcat(tree):
2473 2470 """Fold elements to be concatenated by `##`
2474 2471 """
2475 2472 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2476 2473 return tree
2477 2474 if tree[0] == '_concat':
2478 2475 pending = [tree]
2479 2476 l = []
2480 2477 while pending:
2481 2478 e = pending.pop()
2482 2479 if e[0] == '_concat':
2483 2480 pending.extend(reversed(e[1:]))
2484 2481 elif e[0] in ('string', 'symbol'):
2485 2482 l.append(e[1])
2486 2483 else:
2487 2484 msg = _("\"##\" can't concatenate \"%s\" element") % (e[0])
2488 2485 raise error.ParseError(msg)
2489 2486 return ('string', ''.join(l))
2490 2487 else:
2491 2488 return tuple(foldconcat(t) for t in tree)
2492 2489
2493 2490 def parse(spec, lookup=None):
2494 2491 p = parser.parser(tokenize, elements)
2495 2492 return p.parse(spec, lookup=lookup)
2496 2493
2497 2494 def posttreebuilthook(tree, repo):
2498 2495 # hook for extensions to execute code on the optimized tree
2499 2496 pass
2500 2497
2501 2498 def match(ui, spec, repo=None):
2502 2499 if not spec:
2503 2500 raise error.ParseError(_("empty query"))
2504 2501 lookup = None
2505 2502 if repo:
2506 2503 lookup = repo.__contains__
2507 2504 tree, pos = parse(spec, lookup)
2508 2505 if (pos != len(spec)):
2509 2506 raise error.ParseError(_("invalid token"), pos)
2510 2507 if ui:
2511 2508 tree = findaliases(ui, tree, showwarning=ui.warn)
2512 2509 tree = foldconcat(tree)
2513 2510 weight, tree = optimize(tree, True)
2514 2511 posttreebuilthook(tree, repo)
2515 2512 def mfunc(repo, subset=None):
2516 2513 if subset is None:
2517 2514 subset = fullreposet(repo)
2518 2515 if util.safehasattr(subset, 'isascending'):
2519 2516 result = getset(repo, subset, tree)
2520 2517 else:
2521 2518 result = getset(repo, baseset(subset), tree)
2522 2519 return result
2523 2520 return mfunc
2524 2521
2525 2522 def formatspec(expr, *args):
2526 2523 '''
2527 2524 This is a convenience function for using revsets internally, and
2528 2525 escapes arguments appropriately. Aliases are intentionally ignored
2529 2526 so that intended expression behavior isn't accidentally subverted.
2530 2527
2531 2528 Supported arguments:
2532 2529
2533 2530 %r = revset expression, parenthesized
2534 2531 %d = int(arg), no quoting
2535 2532 %s = string(arg), escaped and single-quoted
2536 2533 %b = arg.branch(), escaped and single-quoted
2537 2534 %n = hex(arg), single-quoted
2538 2535 %% = a literal '%'
2539 2536
2540 2537 Prefixing the type with 'l' specifies a parenthesized list of that type.
2541 2538
2542 2539 >>> formatspec('%r:: and %lr', '10 or 11', ("this()", "that()"))
2543 2540 '(10 or 11):: and ((this()) or (that()))'
2544 2541 >>> formatspec('%d:: and not %d::', 10, 20)
2545 2542 '10:: and not 20::'
2546 2543 >>> formatspec('%ld or %ld', [], [1])
2547 2544 "_list('') or 1"
2548 2545 >>> formatspec('keyword(%s)', 'foo\\xe9')
2549 2546 "keyword('foo\\\\xe9')"
2550 2547 >>> b = lambda: 'default'
2551 2548 >>> b.branch = b
2552 2549 >>> formatspec('branch(%b)', b)
2553 2550 "branch('default')"
2554 2551 >>> formatspec('root(%ls)', ['a', 'b', 'c', 'd'])
2555 2552 "root(_list('a\\x00b\\x00c\\x00d'))"
2556 2553 '''
2557 2554
2558 2555 def quote(s):
2559 2556 return repr(str(s))
2560 2557
2561 2558 def argtype(c, arg):
2562 2559 if c == 'd':
2563 2560 return str(int(arg))
2564 2561 elif c == 's':
2565 2562 return quote(arg)
2566 2563 elif c == 'r':
2567 2564 parse(arg) # make sure syntax errors are confined
2568 2565 return '(%s)' % arg
2569 2566 elif c == 'n':
2570 2567 return quote(node.hex(arg))
2571 2568 elif c == 'b':
2572 2569 return quote(arg.branch())
2573 2570
2574 2571 def listexp(s, t):
2575 2572 l = len(s)
2576 2573 if l == 0:
2577 2574 return "_list('')"
2578 2575 elif l == 1:
2579 2576 return argtype(t, s[0])
2580 2577 elif t == 'd':
2581 2578 return "_intlist('%s')" % "\0".join(str(int(a)) for a in s)
2582 2579 elif t == 's':
2583 2580 return "_list('%s')" % "\0".join(s)
2584 2581 elif t == 'n':
2585 2582 return "_hexlist('%s')" % "\0".join(node.hex(a) for a in s)
2586 2583 elif t == 'b':
2587 2584 return "_list('%s')" % "\0".join(a.branch() for a in s)
2588 2585
2589 2586 m = l // 2
2590 2587 return '(%s or %s)' % (listexp(s[:m], t), listexp(s[m:], t))
2591 2588
2592 2589 ret = ''
2593 2590 pos = 0
2594 2591 arg = 0
2595 2592 while pos < len(expr):
2596 2593 c = expr[pos]
2597 2594 if c == '%':
2598 2595 pos += 1
2599 2596 d = expr[pos]
2600 2597 if d == '%':
2601 2598 ret += d
2602 2599 elif d in 'dsnbr':
2603 2600 ret += argtype(d, args[arg])
2604 2601 arg += 1
2605 2602 elif d == 'l':
2606 2603 # a list of some type
2607 2604 pos += 1
2608 2605 d = expr[pos]
2609 2606 ret += listexp(list(args[arg]), d)
2610 2607 arg += 1
2611 2608 else:
2612 2609 raise util.Abort('unexpected revspec format character %s' % d)
2613 2610 else:
2614 2611 ret += c
2615 2612 pos += 1
2616 2613
2617 2614 return ret
2618 2615
2619 2616 def prettyformat(tree):
2620 2617 def _prettyformat(tree, level, lines):
2621 2618 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2622 2619 lines.append((level, str(tree)))
2623 2620 else:
2624 2621 lines.append((level, '(%s' % tree[0]))
2625 2622 for s in tree[1:]:
2626 2623 _prettyformat(s, level + 1, lines)
2627 2624 lines[-1:] = [(lines[-1][0], lines[-1][1] + ')')]
2628 2625
2629 2626 lines = []
2630 2627 _prettyformat(tree, 0, lines)
2631 2628 output = '\n'.join((' '*l + s) for l, s in lines)
2632 2629 return output
2633 2630
2634 2631 def depth(tree):
2635 2632 if isinstance(tree, tuple):
2636 2633 return max(map(depth, tree)) + 1
2637 2634 else:
2638 2635 return 0
2639 2636
2640 2637 def funcsused(tree):
2641 2638 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2642 2639 return set()
2643 2640 else:
2644 2641 funcs = set()
2645 2642 for s in tree[1:]:
2646 2643 funcs |= funcsused(s)
2647 2644 if tree[0] == 'func':
2648 2645 funcs.add(tree[1][1])
2649 2646 return funcs
2650 2647
2651 2648 class abstractsmartset(object):
2652 2649
2653 2650 def __nonzero__(self):
2654 2651 """True if the smartset is not empty"""
2655 2652 raise NotImplementedError()
2656 2653
2657 2654 def __contains__(self, rev):
2658 2655 """provide fast membership testing"""
2659 2656 raise NotImplementedError()
2660 2657
2661 2658 def __iter__(self):
2662 2659 """iterate the set in the order it is supposed to be iterated"""
2663 2660 raise NotImplementedError()
2664 2661
2665 2662 # Attributes containing a function to perform a fast iteration in a given
2666 2663 # direction. A smartset can have none, one, or both defined.
2667 2664 #
2668 2665 # Default value is None instead of a function returning None to avoid
2669 2666 # initializing an iterator just for testing if a fast method exists.
2670 2667 fastasc = None
2671 2668 fastdesc = None
2672 2669
2673 2670 def isascending(self):
2674 2671 """True if the set will iterate in ascending order"""
2675 2672 raise NotImplementedError()
2676 2673
2677 2674 def isdescending(self):
2678 2675 """True if the set will iterate in descending order"""
2679 2676 raise NotImplementedError()
2680 2677
2681 2678 def min(self):
2682 2679 """return the minimum element in the set"""
2683 2680 if self.fastasc is not None:
2684 2681 for r in self.fastasc():
2685 2682 return r
2686 2683 raise ValueError('arg is an empty sequence')
2687 2684 return min(self)
2688 2685
2689 2686 def max(self):
2690 2687 """return the maximum element in the set"""
2691 2688 if self.fastdesc is not None:
2692 2689 for r in self.fastdesc():
2693 2690 return r
2694 2691 raise ValueError('arg is an empty sequence')
2695 2692 return max(self)
2696 2693
2697 2694 def first(self):
2698 2695 """return the first element in the set (user iteration perspective)
2699 2696
2700 2697 Return None if the set is empty"""
2701 2698 raise NotImplementedError()
2702 2699
2703 2700 def last(self):
2704 2701 """return the last element in the set (user iteration perspective)
2705 2702
2706 2703 Return None if the set is empty"""
2707 2704 raise NotImplementedError()
2708 2705
2709 2706 def __len__(self):
2710 2707 """return the length of the smartsets
2711 2708
2712 2709 This can be expensive on smartset that could be lazy otherwise."""
2713 2710 raise NotImplementedError()
2714 2711
2715 2712 def reverse(self):
2716 2713 """reverse the expected iteration order"""
2717 2714 raise NotImplementedError()
2718 2715
2719 2716 def sort(self, reverse=True):
2720 2717 """get the set to iterate in an ascending or descending order"""
2721 2718 raise NotImplementedError()
2722 2719
2723 2720 def __and__(self, other):
2724 2721 """Returns a new object with the intersection of the two collections.
2725 2722
2726 2723 This is part of the mandatory API for smartset."""
2727 2724 if isinstance(other, fullreposet):
2728 2725 return self
2729 2726 return self.filter(other.__contains__, cache=False)
2730 2727
2731 2728 def __add__(self, other):
2732 2729 """Returns a new object with the union of the two collections.
2733 2730
2734 2731 This is part of the mandatory API for smartset."""
2735 2732 return addset(self, other)
2736 2733
2737 2734 def __sub__(self, other):
2738 2735 """Returns a new object with the substraction of the two collections.
2739 2736
2740 2737 This is part of the mandatory API for smartset."""
2741 2738 c = other.__contains__
2742 2739 return self.filter(lambda r: not c(r), cache=False)
2743 2740
2744 2741 def filter(self, condition, cache=True):
2745 2742 """Returns this smartset filtered by condition as a new smartset.
2746 2743
2747 2744 `condition` is a callable which takes a revision number and returns a
2748 2745 boolean.
2749 2746
2750 2747 This is part of the mandatory API for smartset."""
2751 2748 # builtin cannot be cached. but do not needs to
2752 2749 if cache and util.safehasattr(condition, 'func_code'):
2753 2750 condition = util.cachefunc(condition)
2754 2751 return filteredset(self, condition)
2755 2752
2756 2753 class baseset(abstractsmartset):
2757 2754 """Basic data structure that represents a revset and contains the basic
2758 2755 operation that it should be able to perform.
2759 2756
2760 2757 Every method in this class should be implemented by any smartset class.
2761 2758 """
2762 2759 def __init__(self, data=()):
2763 2760 if not isinstance(data, list):
2764 2761 data = list(data)
2765 2762 self._list = data
2766 2763 self._ascending = None
2767 2764
2768 2765 @util.propertycache
2769 2766 def _set(self):
2770 2767 return set(self._list)
2771 2768
2772 2769 @util.propertycache
2773 2770 def _asclist(self):
2774 2771 asclist = self._list[:]
2775 2772 asclist.sort()
2776 2773 return asclist
2777 2774
2778 2775 def __iter__(self):
2779 2776 if self._ascending is None:
2780 2777 return iter(self._list)
2781 2778 elif self._ascending:
2782 2779 return iter(self._asclist)
2783 2780 else:
2784 2781 return reversed(self._asclist)
2785 2782
2786 2783 def fastasc(self):
2787 2784 return iter(self._asclist)
2788 2785
2789 2786 def fastdesc(self):
2790 2787 return reversed(self._asclist)
2791 2788
2792 2789 @util.propertycache
2793 2790 def __contains__(self):
2794 2791 return self._set.__contains__
2795 2792
2796 2793 def __nonzero__(self):
2797 2794 return bool(self._list)
2798 2795
2799 2796 def sort(self, reverse=False):
2800 2797 self._ascending = not bool(reverse)
2801 2798
2802 2799 def reverse(self):
2803 2800 if self._ascending is None:
2804 2801 self._list.reverse()
2805 2802 else:
2806 2803 self._ascending = not self._ascending
2807 2804
2808 2805 def __len__(self):
2809 2806 return len(self._list)
2810 2807
2811 2808 def isascending(self):
2812 2809 """Returns True if the collection is ascending order, False if not.
2813 2810
2814 2811 This is part of the mandatory API for smartset."""
2815 2812 if len(self) <= 1:
2816 2813 return True
2817 2814 return self._ascending is not None and self._ascending
2818 2815
2819 2816 def isdescending(self):
2820 2817 """Returns True if the collection is descending order, False if not.
2821 2818
2822 2819 This is part of the mandatory API for smartset."""
2823 2820 if len(self) <= 1:
2824 2821 return True
2825 2822 return self._ascending is not None and not self._ascending
2826 2823
2827 2824 def first(self):
2828 2825 if self:
2829 2826 if self._ascending is None:
2830 2827 return self._list[0]
2831 2828 elif self._ascending:
2832 2829 return self._asclist[0]
2833 2830 else:
2834 2831 return self._asclist[-1]
2835 2832 return None
2836 2833
2837 2834 def last(self):
2838 2835 if self:
2839 2836 if self._ascending is None:
2840 2837 return self._list[-1]
2841 2838 elif self._ascending:
2842 2839 return self._asclist[-1]
2843 2840 else:
2844 2841 return self._asclist[0]
2845 2842 return None
2846 2843
2847 2844 def __repr__(self):
2848 2845 d = {None: '', False: '-', True: '+'}[self._ascending]
2849 2846 return '<%s%s %r>' % (type(self).__name__, d, self._list)
2850 2847
2851 2848 class filteredset(abstractsmartset):
2852 2849 """Duck type for baseset class which iterates lazily over the revisions in
2853 2850 the subset and contains a function which tests for membership in the
2854 2851 revset
2855 2852 """
2856 2853 def __init__(self, subset, condition=lambda x: True):
2857 2854 """
2858 2855 condition: a function that decide whether a revision in the subset
2859 2856 belongs to the revset or not.
2860 2857 """
2861 2858 self._subset = subset
2862 2859 self._condition = condition
2863 2860 self._cache = {}
2864 2861
2865 2862 def __contains__(self, x):
2866 2863 c = self._cache
2867 2864 if x not in c:
2868 2865 v = c[x] = x in self._subset and self._condition(x)
2869 2866 return v
2870 2867 return c[x]
2871 2868
2872 2869 def __iter__(self):
2873 2870 return self._iterfilter(self._subset)
2874 2871
2875 2872 def _iterfilter(self, it):
2876 2873 cond = self._condition
2877 2874 for x in it:
2878 2875 if cond(x):
2879 2876 yield x
2880 2877
2881 2878 @property
2882 2879 def fastasc(self):
2883 2880 it = self._subset.fastasc
2884 2881 if it is None:
2885 2882 return None
2886 2883 return lambda: self._iterfilter(it())
2887 2884
2888 2885 @property
2889 2886 def fastdesc(self):
2890 2887 it = self._subset.fastdesc
2891 2888 if it is None:
2892 2889 return None
2893 2890 return lambda: self._iterfilter(it())
2894 2891
2895 2892 def __nonzero__(self):
2896 2893 for r in self:
2897 2894 return True
2898 2895 return False
2899 2896
2900 2897 def __len__(self):
2901 2898 # Basic implementation to be changed in future patches.
2902 2899 l = baseset([r for r in self])
2903 2900 return len(l)
2904 2901
2905 2902 def sort(self, reverse=False):
2906 2903 self._subset.sort(reverse=reverse)
2907 2904
2908 2905 def reverse(self):
2909 2906 self._subset.reverse()
2910 2907
2911 2908 def isascending(self):
2912 2909 return self._subset.isascending()
2913 2910
2914 2911 def isdescending(self):
2915 2912 return self._subset.isdescending()
2916 2913
2917 2914 def first(self):
2918 2915 for x in self:
2919 2916 return x
2920 2917 return None
2921 2918
2922 2919 def last(self):
2923 2920 it = None
2924 2921 if self._subset.isascending:
2925 2922 it = self.fastdesc
2926 2923 elif self._subset.isdescending:
2927 2924 it = self.fastdesc
2928 2925 if it is None:
2929 2926 # slowly consume everything. This needs improvement
2930 2927 it = lambda: reversed(list(self))
2931 2928 for x in it():
2932 2929 return x
2933 2930 return None
2934 2931
2935 2932 def __repr__(self):
2936 2933 return '<%s %r>' % (type(self).__name__, self._subset)
2937 2934
2938 2935 def _iterordered(ascending, iter1, iter2):
2939 2936 """produce an ordered iteration from two iterators with the same order
2940 2937
2941 2938 The ascending is used to indicated the iteration direction.
2942 2939 """
2943 2940 choice = max
2944 2941 if ascending:
2945 2942 choice = min
2946 2943
2947 2944 val1 = None
2948 2945 val2 = None
2949 2946 try:
2950 2947 # Consume both iterators in an ordered way until one is empty
2951 2948 while True:
2952 2949 if val1 is None:
2953 2950 val1 = iter1.next()
2954 2951 if val2 is None:
2955 2952 val2 = iter2.next()
2956 2953 next = choice(val1, val2)
2957 2954 yield next
2958 2955 if val1 == next:
2959 2956 val1 = None
2960 2957 if val2 == next:
2961 2958 val2 = None
2962 2959 except StopIteration:
2963 2960 # Flush any remaining values and consume the other one
2964 2961 it = iter2
2965 2962 if val1 is not None:
2966 2963 yield val1
2967 2964 it = iter1
2968 2965 elif val2 is not None:
2969 2966 # might have been equality and both are empty
2970 2967 yield val2
2971 2968 for val in it:
2972 2969 yield val
2973 2970
2974 2971 class addset(abstractsmartset):
2975 2972 """Represent the addition of two sets
2976 2973
2977 2974 Wrapper structure for lazily adding two structures without losing much
2978 2975 performance on the __contains__ method
2979 2976
2980 2977 If the ascending attribute is set, that means the two structures are
2981 2978 ordered in either an ascending or descending way. Therefore, we can add
2982 2979 them maintaining the order by iterating over both at the same time
2983 2980
2984 2981 >>> xs = baseset([0, 3, 2])
2985 2982 >>> ys = baseset([5, 2, 4])
2986 2983
2987 2984 >>> rs = addset(xs, ys)
2988 2985 >>> bool(rs), 0 in rs, 1 in rs, 5 in rs, rs.first(), rs.last()
2989 2986 (True, True, False, True, 0, 4)
2990 2987 >>> rs = addset(xs, baseset([]))
2991 2988 >>> bool(rs), 0 in rs, 1 in rs, rs.first(), rs.last()
2992 2989 (True, True, False, 0, 2)
2993 2990 >>> rs = addset(baseset([]), baseset([]))
2994 2991 >>> bool(rs), 0 in rs, rs.first(), rs.last()
2995 2992 (False, False, None, None)
2996 2993
2997 2994 iterate unsorted:
2998 2995 >>> rs = addset(xs, ys)
2999 2996 >>> [x for x in rs] # without _genlist
3000 2997 [0, 3, 2, 5, 4]
3001 2998 >>> assert not rs._genlist
3002 2999 >>> len(rs)
3003 3000 5
3004 3001 >>> [x for x in rs] # with _genlist
3005 3002 [0, 3, 2, 5, 4]
3006 3003 >>> assert rs._genlist
3007 3004
3008 3005 iterate ascending:
3009 3006 >>> rs = addset(xs, ys, ascending=True)
3010 3007 >>> [x for x in rs], [x for x in rs.fastasc()] # without _asclist
3011 3008 ([0, 2, 3, 4, 5], [0, 2, 3, 4, 5])
3012 3009 >>> assert not rs._asclist
3013 3010 >>> len(rs)
3014 3011 5
3015 3012 >>> [x for x in rs], [x for x in rs.fastasc()]
3016 3013 ([0, 2, 3, 4, 5], [0, 2, 3, 4, 5])
3017 3014 >>> assert rs._asclist
3018 3015
3019 3016 iterate descending:
3020 3017 >>> rs = addset(xs, ys, ascending=False)
3021 3018 >>> [x for x in rs], [x for x in rs.fastdesc()] # without _asclist
3022 3019 ([5, 4, 3, 2, 0], [5, 4, 3, 2, 0])
3023 3020 >>> assert not rs._asclist
3024 3021 >>> len(rs)
3025 3022 5
3026 3023 >>> [x for x in rs], [x for x in rs.fastdesc()]
3027 3024 ([5, 4, 3, 2, 0], [5, 4, 3, 2, 0])
3028 3025 >>> assert rs._asclist
3029 3026
3030 3027 iterate ascending without fastasc:
3031 3028 >>> rs = addset(xs, generatorset(ys), ascending=True)
3032 3029 >>> assert rs.fastasc is None
3033 3030 >>> [x for x in rs]
3034 3031 [0, 2, 3, 4, 5]
3035 3032
3036 3033 iterate descending without fastdesc:
3037 3034 >>> rs = addset(generatorset(xs), ys, ascending=False)
3038 3035 >>> assert rs.fastdesc is None
3039 3036 >>> [x for x in rs]
3040 3037 [5, 4, 3, 2, 0]
3041 3038 """
3042 3039 def __init__(self, revs1, revs2, ascending=None):
3043 3040 self._r1 = revs1
3044 3041 self._r2 = revs2
3045 3042 self._iter = None
3046 3043 self._ascending = ascending
3047 3044 self._genlist = None
3048 3045 self._asclist = None
3049 3046
3050 3047 def __len__(self):
3051 3048 return len(self._list)
3052 3049
3053 3050 def __nonzero__(self):
3054 3051 return bool(self._r1) or bool(self._r2)
3055 3052
3056 3053 @util.propertycache
3057 3054 def _list(self):
3058 3055 if not self._genlist:
3059 3056 self._genlist = baseset(iter(self))
3060 3057 return self._genlist
3061 3058
3062 3059 def __iter__(self):
3063 3060 """Iterate over both collections without repeating elements
3064 3061
3065 3062 If the ascending attribute is not set, iterate over the first one and
3066 3063 then over the second one checking for membership on the first one so we
3067 3064 dont yield any duplicates.
3068 3065
3069 3066 If the ascending attribute is set, iterate over both collections at the
3070 3067 same time, yielding only one value at a time in the given order.
3071 3068 """
3072 3069 if self._ascending is None:
3073 3070 if self._genlist:
3074 3071 return iter(self._genlist)
3075 3072 def arbitraryordergen():
3076 3073 for r in self._r1:
3077 3074 yield r
3078 3075 inr1 = self._r1.__contains__
3079 3076 for r in self._r2:
3080 3077 if not inr1(r):
3081 3078 yield r
3082 3079 return arbitraryordergen()
3083 3080 # try to use our own fast iterator if it exists
3084 3081 self._trysetasclist()
3085 3082 if self._ascending:
3086 3083 attr = 'fastasc'
3087 3084 else:
3088 3085 attr = 'fastdesc'
3089 3086 it = getattr(self, attr)
3090 3087 if it is not None:
3091 3088 return it()
3092 3089 # maybe half of the component supports fast
3093 3090 # get iterator for _r1
3094 3091 iter1 = getattr(self._r1, attr)
3095 3092 if iter1 is None:
3096 3093 # let's avoid side effect (not sure it matters)
3097 3094 iter1 = iter(sorted(self._r1, reverse=not self._ascending))
3098 3095 else:
3099 3096 iter1 = iter1()
3100 3097 # get iterator for _r2
3101 3098 iter2 = getattr(self._r2, attr)
3102 3099 if iter2 is None:
3103 3100 # let's avoid side effect (not sure it matters)
3104 3101 iter2 = iter(sorted(self._r2, reverse=not self._ascending))
3105 3102 else:
3106 3103 iter2 = iter2()
3107 3104 return _iterordered(self._ascending, iter1, iter2)
3108 3105
3109 3106 def _trysetasclist(self):
3110 3107 """populate the _asclist attribute if possible and necessary"""
3111 3108 if self._genlist is not None and self._asclist is None:
3112 3109 self._asclist = sorted(self._genlist)
3113 3110
3114 3111 @property
3115 3112 def fastasc(self):
3116 3113 self._trysetasclist()
3117 3114 if self._asclist is not None:
3118 3115 return self._asclist.__iter__
3119 3116 iter1 = self._r1.fastasc
3120 3117 iter2 = self._r2.fastasc
3121 3118 if None in (iter1, iter2):
3122 3119 return None
3123 3120 return lambda: _iterordered(True, iter1(), iter2())
3124 3121
3125 3122 @property
3126 3123 def fastdesc(self):
3127 3124 self._trysetasclist()
3128 3125 if self._asclist is not None:
3129 3126 return self._asclist.__reversed__
3130 3127 iter1 = self._r1.fastdesc
3131 3128 iter2 = self._r2.fastdesc
3132 3129 if None in (iter1, iter2):
3133 3130 return None
3134 3131 return lambda: _iterordered(False, iter1(), iter2())
3135 3132
3136 3133 def __contains__(self, x):
3137 3134 return x in self._r1 or x in self._r2
3138 3135
3139 3136 def sort(self, reverse=False):
3140 3137 """Sort the added set
3141 3138
3142 3139 For this we use the cached list with all the generated values and if we
3143 3140 know they are ascending or descending we can sort them in a smart way.
3144 3141 """
3145 3142 self._ascending = not reverse
3146 3143
3147 3144 def isascending(self):
3148 3145 return self._ascending is not None and self._ascending
3149 3146
3150 3147 def isdescending(self):
3151 3148 return self._ascending is not None and not self._ascending
3152 3149
3153 3150 def reverse(self):
3154 3151 if self._ascending is None:
3155 3152 self._list.reverse()
3156 3153 else:
3157 3154 self._ascending = not self._ascending
3158 3155
3159 3156 def first(self):
3160 3157 for x in self:
3161 3158 return x
3162 3159 return None
3163 3160
3164 3161 def last(self):
3165 3162 self.reverse()
3166 3163 val = self.first()
3167 3164 self.reverse()
3168 3165 return val
3169 3166
3170 3167 def __repr__(self):
3171 3168 d = {None: '', False: '-', True: '+'}[self._ascending]
3172 3169 return '<%s%s %r, %r>' % (type(self).__name__, d, self._r1, self._r2)
3173 3170
3174 3171 class generatorset(abstractsmartset):
3175 3172 """Wrap a generator for lazy iteration
3176 3173
3177 3174 Wrapper structure for generators that provides lazy membership and can
3178 3175 be iterated more than once.
3179 3176 When asked for membership it generates values until either it finds the
3180 3177 requested one or has gone through all the elements in the generator
3181 3178 """
3182 3179 def __init__(self, gen, iterasc=None):
3183 3180 """
3184 3181 gen: a generator producing the values for the generatorset.
3185 3182 """
3186 3183 self._gen = gen
3187 3184 self._asclist = None
3188 3185 self._cache = {}
3189 3186 self._genlist = []
3190 3187 self._finished = False
3191 3188 self._ascending = True
3192 3189 if iterasc is not None:
3193 3190 if iterasc:
3194 3191 self.fastasc = self._iterator
3195 3192 self.__contains__ = self._asccontains
3196 3193 else:
3197 3194 self.fastdesc = self._iterator
3198 3195 self.__contains__ = self._desccontains
3199 3196
3200 3197 def __nonzero__(self):
3201 3198 # Do not use 'for r in self' because it will enforce the iteration
3202 3199 # order (default ascending), possibly unrolling a whole descending
3203 3200 # iterator.
3204 3201 if self._genlist:
3205 3202 return True
3206 3203 for r in self._consumegen():
3207 3204 return True
3208 3205 return False
3209 3206
3210 3207 def __contains__(self, x):
3211 3208 if x in self._cache:
3212 3209 return self._cache[x]
3213 3210
3214 3211 # Use new values only, as existing values would be cached.
3215 3212 for l in self._consumegen():
3216 3213 if l == x:
3217 3214 return True
3218 3215
3219 3216 self._cache[x] = False
3220 3217 return False
3221 3218
3222 3219 def _asccontains(self, x):
3223 3220 """version of contains optimised for ascending generator"""
3224 3221 if x in self._cache:
3225 3222 return self._cache[x]
3226 3223
3227 3224 # Use new values only, as existing values would be cached.
3228 3225 for l in self._consumegen():
3229 3226 if l == x:
3230 3227 return True
3231 3228 if l > x:
3232 3229 break
3233 3230
3234 3231 self._cache[x] = False
3235 3232 return False
3236 3233
3237 3234 def _desccontains(self, x):
3238 3235 """version of contains optimised for descending generator"""
3239 3236 if x in self._cache:
3240 3237 return self._cache[x]
3241 3238
3242 3239 # Use new values only, as existing values would be cached.
3243 3240 for l in self._consumegen():
3244 3241 if l == x:
3245 3242 return True
3246 3243 if l < x:
3247 3244 break
3248 3245
3249 3246 self._cache[x] = False
3250 3247 return False
3251 3248
3252 3249 def __iter__(self):
3253 3250 if self._ascending:
3254 3251 it = self.fastasc
3255 3252 else:
3256 3253 it = self.fastdesc
3257 3254 if it is not None:
3258 3255 return it()
3259 3256 # we need to consume the iterator
3260 3257 for x in self._consumegen():
3261 3258 pass
3262 3259 # recall the same code
3263 3260 return iter(self)
3264 3261
3265 3262 def _iterator(self):
3266 3263 if self._finished:
3267 3264 return iter(self._genlist)
3268 3265
3269 3266 # We have to use this complex iteration strategy to allow multiple
3270 3267 # iterations at the same time. We need to be able to catch revision
3271 3268 # removed from _consumegen and added to genlist in another instance.
3272 3269 #
3273 3270 # Getting rid of it would provide an about 15% speed up on this
3274 3271 # iteration.
3275 3272 genlist = self._genlist
3276 3273 nextrev = self._consumegen().next
3277 3274 _len = len # cache global lookup
3278 3275 def gen():
3279 3276 i = 0
3280 3277 while True:
3281 3278 if i < _len(genlist):
3282 3279 yield genlist[i]
3283 3280 else:
3284 3281 yield nextrev()
3285 3282 i += 1
3286 3283 return gen()
3287 3284
3288 3285 def _consumegen(self):
3289 3286 cache = self._cache
3290 3287 genlist = self._genlist.append
3291 3288 for item in self._gen:
3292 3289 cache[item] = True
3293 3290 genlist(item)
3294 3291 yield item
3295 3292 if not self._finished:
3296 3293 self._finished = True
3297 3294 asc = self._genlist[:]
3298 3295 asc.sort()
3299 3296 self._asclist = asc
3300 3297 self.fastasc = asc.__iter__
3301 3298 self.fastdesc = asc.__reversed__
3302 3299
3303 3300 def __len__(self):
3304 3301 for x in self._consumegen():
3305 3302 pass
3306 3303 return len(self._genlist)
3307 3304
3308 3305 def sort(self, reverse=False):
3309 3306 self._ascending = not reverse
3310 3307
3311 3308 def reverse(self):
3312 3309 self._ascending = not self._ascending
3313 3310
3314 3311 def isascending(self):
3315 3312 return self._ascending
3316 3313
3317 3314 def isdescending(self):
3318 3315 return not self._ascending
3319 3316
3320 3317 def first(self):
3321 3318 if self._ascending:
3322 3319 it = self.fastasc
3323 3320 else:
3324 3321 it = self.fastdesc
3325 3322 if it is None:
3326 3323 # we need to consume all and try again
3327 3324 for x in self._consumegen():
3328 3325 pass
3329 3326 return self.first()
3330 3327 if self:
3331 3328 return it().next()
3332 3329 return None
3333 3330
3334 3331 def last(self):
3335 3332 if self._ascending:
3336 3333 it = self.fastdesc
3337 3334 else:
3338 3335 it = self.fastasc
3339 3336 if it is None:
3340 3337 # we need to consume all and try again
3341 3338 for x in self._consumegen():
3342 3339 pass
3343 3340 return self.first()
3344 3341 if self:
3345 3342 return it().next()
3346 3343 return None
3347 3344
3348 3345 def __repr__(self):
3349 3346 d = {False: '-', True: '+'}[self._ascending]
3350 3347 return '<%s%s>' % (type(self).__name__, d)
3351 3348
3352 3349 class spanset(abstractsmartset):
3353 3350 """Duck type for baseset class which represents a range of revisions and
3354 3351 can work lazily and without having all the range in memory
3355 3352
3356 3353 Note that spanset(x, y) behave almost like xrange(x, y) except for two
3357 3354 notable points:
3358 3355 - when x < y it will be automatically descending,
3359 3356 - revision filtered with this repoview will be skipped.
3360 3357
3361 3358 """
3362 3359 def __init__(self, repo, start=0, end=None):
3363 3360 """
3364 3361 start: first revision included the set
3365 3362 (default to 0)
3366 3363 end: first revision excluded (last+1)
3367 3364 (default to len(repo)
3368 3365
3369 3366 Spanset will be descending if `end` < `start`.
3370 3367 """
3371 3368 if end is None:
3372 3369 end = len(repo)
3373 3370 self._ascending = start <= end
3374 3371 if not self._ascending:
3375 3372 start, end = end + 1, start +1
3376 3373 self._start = start
3377 3374 self._end = end
3378 3375 self._hiddenrevs = repo.changelog.filteredrevs
3379 3376
3380 3377 def sort(self, reverse=False):
3381 3378 self._ascending = not reverse
3382 3379
3383 3380 def reverse(self):
3384 3381 self._ascending = not self._ascending
3385 3382
3386 3383 def _iterfilter(self, iterrange):
3387 3384 s = self._hiddenrevs
3388 3385 for r in iterrange:
3389 3386 if r not in s:
3390 3387 yield r
3391 3388
3392 3389 def __iter__(self):
3393 3390 if self._ascending:
3394 3391 return self.fastasc()
3395 3392 else:
3396 3393 return self.fastdesc()
3397 3394
3398 3395 def fastasc(self):
3399 3396 iterrange = xrange(self._start, self._end)
3400 3397 if self._hiddenrevs:
3401 3398 return self._iterfilter(iterrange)
3402 3399 return iter(iterrange)
3403 3400
3404 3401 def fastdesc(self):
3405 3402 iterrange = xrange(self._end - 1, self._start - 1, -1)
3406 3403 if self._hiddenrevs:
3407 3404 return self._iterfilter(iterrange)
3408 3405 return iter(iterrange)
3409 3406
3410 3407 def __contains__(self, rev):
3411 3408 hidden = self._hiddenrevs
3412 3409 return ((self._start <= rev < self._end)
3413 3410 and not (hidden and rev in hidden))
3414 3411
3415 3412 def __nonzero__(self):
3416 3413 for r in self:
3417 3414 return True
3418 3415 return False
3419 3416
3420 3417 def __len__(self):
3421 3418 if not self._hiddenrevs:
3422 3419 return abs(self._end - self._start)
3423 3420 else:
3424 3421 count = 0
3425 3422 start = self._start
3426 3423 end = self._end
3427 3424 for rev in self._hiddenrevs:
3428 3425 if (end < rev <= start) or (start <= rev < end):
3429 3426 count += 1
3430 3427 return abs(self._end - self._start) - count
3431 3428
3432 3429 def isascending(self):
3433 3430 return self._ascending
3434 3431
3435 3432 def isdescending(self):
3436 3433 return not self._ascending
3437 3434
3438 3435 def first(self):
3439 3436 if self._ascending:
3440 3437 it = self.fastasc
3441 3438 else:
3442 3439 it = self.fastdesc
3443 3440 for x in it():
3444 3441 return x
3445 3442 return None
3446 3443
3447 3444 def last(self):
3448 3445 if self._ascending:
3449 3446 it = self.fastdesc
3450 3447 else:
3451 3448 it = self.fastasc
3452 3449 for x in it():
3453 3450 return x
3454 3451 return None
3455 3452
3456 3453 def __repr__(self):
3457 3454 d = {False: '-', True: '+'}[self._ascending]
3458 3455 return '<%s%s %d:%d>' % (type(self).__name__, d,
3459 3456 self._start, self._end - 1)
3460 3457
3461 3458 class fullreposet(spanset):
3462 3459 """a set containing all revisions in the repo
3463 3460
3464 3461 This class exists to host special optimization and magic to handle virtual
3465 3462 revisions such as "null".
3466 3463 """
3467 3464
3468 3465 def __init__(self, repo):
3469 3466 super(fullreposet, self).__init__(repo)
3470 3467
3471 3468 def __contains__(self, rev):
3472 3469 # assumes the given rev is valid
3473 3470 hidden = self._hiddenrevs
3474 3471 return not (hidden and rev in hidden)
3475 3472
3476 3473 def __and__(self, other):
3477 3474 """As self contains the whole repo, all of the other set should also be
3478 3475 in self. Therefore `self & other = other`.
3479 3476
3480 3477 This boldly assumes the other contains valid revs only.
3481 3478 """
3482 3479 # other not a smartset, make is so
3483 3480 if not util.safehasattr(other, 'isascending'):
3484 3481 # filter out hidden revision
3485 3482 # (this boldly assumes all smartset are pure)
3486 3483 #
3487 3484 # `other` was used with "&", let's assume this is a set like
3488 3485 # object.
3489 3486 other = baseset(other - self._hiddenrevs)
3490 3487
3491 3488 other.sort(reverse=self.isdescending())
3492 3489 return other
3493 3490
3494 3491 def prettyformatset(revs):
3495 3492 lines = []
3496 3493 rs = repr(revs)
3497 3494 p = 0
3498 3495 while p < len(rs):
3499 3496 q = rs.find('<', p + 1)
3500 3497 if q < 0:
3501 3498 q = len(rs)
3502 3499 l = rs.count('<', 0, p) - rs.count('>', 0, p)
3503 3500 assert l >= 0
3504 3501 lines.append((l, rs[p:q].rstrip()))
3505 3502 p = q
3506 3503 return '\n'.join(' ' * l + s for l, s in lines)
3507 3504
3508 3505 # tell hggettext to extract docstrings from these functions:
3509 3506 i18nfunctions = symbols.values()
General Comments 0
You need to be logged in to leave comments. Login now