##// END OF EJS Templates
revset: return early when revs is empty...
Martin von Zweigbergk -
r24938:6db8074f default
parent child Browse files
Show More
@@ -1,3448 +1,3448
1 1 # revset.py - revision set queries for mercurial
2 2 #
3 3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 import re
9 9 import parser, util, error, hbisect, phases
10 10 import node
11 11 import heapq
12 12 import match as matchmod
13 13 from i18n import _
14 14 import encoding
15 15 import obsolete as obsmod
16 16 import pathutil
17 17 import repoview
18 18
19 19 def _revancestors(repo, revs, followfirst):
20 20 """Like revlog.ancestors(), but supports followfirst."""
21 21 if followfirst:
22 22 cut = 1
23 23 else:
24 24 cut = None
25 25 cl = repo.changelog
26 26
27 27 def iterate():
28 revqueue, inputrev = None, None
29 h = []
30
31 28 revs.sort(reverse=True)
32 29 revqueue = util.deque(revs)
33 if revqueue:
34 inputrev = revqueue.popleft()
35 heapq.heappush(h, -inputrev)
30 if not revqueue:
31 return
32
33 h = []
34 inputrev = revqueue.popleft()
35 heapq.heappush(h, -inputrev)
36 36
37 37 seen = set()
38 38 while h:
39 39 current = -heapq.heappop(h)
40 40 if current not in seen:
41 if inputrev and current == inputrev:
41 if current == inputrev:
42 42 if revqueue:
43 43 inputrev = revqueue.popleft()
44 44 heapq.heappush(h, -inputrev)
45 45 seen.add(current)
46 46 yield current
47 47 for parent in cl.parentrevs(current)[:cut]:
48 48 if parent != node.nullrev:
49 49 heapq.heappush(h, -parent)
50 50
51 51 return generatorset(iterate(), iterasc=False)
52 52
53 53 def _revdescendants(repo, revs, followfirst):
54 54 """Like revlog.descendants() but supports followfirst."""
55 55 if followfirst:
56 56 cut = 1
57 57 else:
58 58 cut = None
59 59
60 60 def iterate():
61 61 cl = repo.changelog
62 62 first = min(revs)
63 63 nullrev = node.nullrev
64 64 if first == nullrev:
65 65 # Are there nodes with a null first parent and a non-null
66 66 # second one? Maybe. Do we care? Probably not.
67 67 for i in cl:
68 68 yield i
69 69 else:
70 70 seen = set(revs)
71 71 for i in cl.revs(first + 1):
72 72 for x in cl.parentrevs(i)[:cut]:
73 73 if x != nullrev and x in seen:
74 74 seen.add(i)
75 75 yield i
76 76 break
77 77
78 78 return generatorset(iterate(), iterasc=True)
79 79
80 80 def _revsbetween(repo, roots, heads):
81 81 """Return all paths between roots and heads, inclusive of both endpoint
82 82 sets."""
83 83 if not roots:
84 84 return baseset()
85 85 parentrevs = repo.changelog.parentrevs
86 86 visit = list(heads)
87 87 reachable = set()
88 88 seen = {}
89 89 minroot = min(roots)
90 90 roots = set(roots)
91 91 # open-code the post-order traversal due to the tiny size of
92 92 # sys.getrecursionlimit()
93 93 while visit:
94 94 rev = visit.pop()
95 95 if rev in roots:
96 96 reachable.add(rev)
97 97 parents = parentrevs(rev)
98 98 seen[rev] = parents
99 99 for parent in parents:
100 100 if parent >= minroot and parent not in seen:
101 101 visit.append(parent)
102 102 if not reachable:
103 103 return baseset()
104 104 for rev in sorted(seen):
105 105 for parent in seen[rev]:
106 106 if parent in reachable:
107 107 reachable.add(rev)
108 108 return baseset(sorted(reachable))
109 109
110 110 elements = {
111 111 "(": (21, ("group", 1, ")"), ("func", 1, ")")),
112 112 "##": (20, None, ("_concat", 20)),
113 113 "~": (18, None, ("ancestor", 18)),
114 114 "^": (18, None, ("parent", 18), ("parentpost", 18)),
115 115 "-": (5, ("negate", 19), ("minus", 5)),
116 116 "::": (17, ("dagrangepre", 17), ("dagrange", 17),
117 117 ("dagrangepost", 17)),
118 118 "..": (17, ("dagrangepre", 17), ("dagrange", 17),
119 119 ("dagrangepost", 17)),
120 120 ":": (15, ("rangepre", 15), ("range", 15), ("rangepost", 15)),
121 121 "not": (10, ("not", 10)),
122 122 "!": (10, ("not", 10)),
123 123 "and": (5, None, ("and", 5)),
124 124 "&": (5, None, ("and", 5)),
125 125 "%": (5, None, ("only", 5), ("onlypost", 5)),
126 126 "or": (4, None, ("or", 4)),
127 127 "|": (4, None, ("or", 4)),
128 128 "+": (4, None, ("or", 4)),
129 129 ",": (2, None, ("list", 2)),
130 130 ")": (0, None, None),
131 131 "symbol": (0, ("symbol",), None),
132 132 "string": (0, ("string",), None),
133 133 "end": (0, None, None),
134 134 }
135 135
136 136 keywords = set(['and', 'or', 'not'])
137 137
138 138 # default set of valid characters for the initial letter of symbols
139 139 _syminitletters = set(c for c in [chr(i) for i in xrange(256)]
140 140 if c.isalnum() or c in '._@' or ord(c) > 127)
141 141
142 142 # default set of valid characters for non-initial letters of symbols
143 143 _symletters = set(c for c in [chr(i) for i in xrange(256)]
144 144 if c.isalnum() or c in '-._/@' or ord(c) > 127)
145 145
146 146 def tokenize(program, lookup=None, syminitletters=None, symletters=None):
147 147 '''
148 148 Parse a revset statement into a stream of tokens
149 149
150 150 ``syminitletters`` is the set of valid characters for the initial
151 151 letter of symbols.
152 152
153 153 By default, character ``c`` is recognized as valid for initial
154 154 letter of symbols, if ``c.isalnum() or c in '._@' or ord(c) > 127``.
155 155
156 156 ``symletters`` is the set of valid characters for non-initial
157 157 letters of symbols.
158 158
159 159 By default, character ``c`` is recognized as valid for non-initial
160 160 letters of symbols, if ``c.isalnum() or c in '-._/@' or ord(c) > 127``.
161 161
162 162 Check that @ is a valid unquoted token character (issue3686):
163 163 >>> list(tokenize("@::"))
164 164 [('symbol', '@', 0), ('::', None, 1), ('end', None, 3)]
165 165
166 166 '''
167 167 if syminitletters is None:
168 168 syminitletters = _syminitletters
169 169 if symletters is None:
170 170 symletters = _symletters
171 171
172 172 pos, l = 0, len(program)
173 173 while pos < l:
174 174 c = program[pos]
175 175 if c.isspace(): # skip inter-token whitespace
176 176 pass
177 177 elif c == ':' and program[pos:pos + 2] == '::': # look ahead carefully
178 178 yield ('::', None, pos)
179 179 pos += 1 # skip ahead
180 180 elif c == '.' and program[pos:pos + 2] == '..': # look ahead carefully
181 181 yield ('..', None, pos)
182 182 pos += 1 # skip ahead
183 183 elif c == '#' and program[pos:pos + 2] == '##': # look ahead carefully
184 184 yield ('##', None, pos)
185 185 pos += 1 # skip ahead
186 186 elif c in "():,-|&+!~^%": # handle simple operators
187 187 yield (c, None, pos)
188 188 elif (c in '"\'' or c == 'r' and
189 189 program[pos:pos + 2] in ("r'", 'r"')): # handle quoted strings
190 190 if c == 'r':
191 191 pos += 1
192 192 c = program[pos]
193 193 decode = lambda x: x
194 194 else:
195 195 decode = lambda x: x.decode('string-escape')
196 196 pos += 1
197 197 s = pos
198 198 while pos < l: # find closing quote
199 199 d = program[pos]
200 200 if d == '\\': # skip over escaped characters
201 201 pos += 2
202 202 continue
203 203 if d == c:
204 204 yield ('string', decode(program[s:pos]), s)
205 205 break
206 206 pos += 1
207 207 else:
208 208 raise error.ParseError(_("unterminated string"), s)
209 209 # gather up a symbol/keyword
210 210 elif c in syminitletters:
211 211 s = pos
212 212 pos += 1
213 213 while pos < l: # find end of symbol
214 214 d = program[pos]
215 215 if d not in symletters:
216 216 break
217 217 if d == '.' and program[pos - 1] == '.': # special case for ..
218 218 pos -= 1
219 219 break
220 220 pos += 1
221 221 sym = program[s:pos]
222 222 if sym in keywords: # operator keywords
223 223 yield (sym, None, s)
224 224 elif '-' in sym:
225 225 # some jerk gave us foo-bar-baz, try to check if it's a symbol
226 226 if lookup and lookup(sym):
227 227 # looks like a real symbol
228 228 yield ('symbol', sym, s)
229 229 else:
230 230 # looks like an expression
231 231 parts = sym.split('-')
232 232 for p in parts[:-1]:
233 233 if p: # possible consecutive -
234 234 yield ('symbol', p, s)
235 235 s += len(p)
236 236 yield ('-', None, pos)
237 237 s += 1
238 238 if parts[-1]: # possible trailing -
239 239 yield ('symbol', parts[-1], s)
240 240 else:
241 241 yield ('symbol', sym, s)
242 242 pos -= 1
243 243 else:
244 244 raise error.ParseError(_("syntax error in revset '%s'") %
245 245 program, pos)
246 246 pos += 1
247 247 yield ('end', None, pos)
248 248
249 249 def parseerrordetail(inst):
250 250 """Compose error message from specified ParseError object
251 251 """
252 252 if len(inst.args) > 1:
253 253 return _('at %s: %s') % (inst.args[1], inst.args[0])
254 254 else:
255 255 return inst.args[0]
256 256
257 257 # helpers
258 258
259 259 def getstring(x, err):
260 260 if x and (x[0] == 'string' or x[0] == 'symbol'):
261 261 return x[1]
262 262 raise error.ParseError(err)
263 263
264 264 def getlist(x):
265 265 if not x:
266 266 return []
267 267 if x[0] == 'list':
268 268 return getlist(x[1]) + [x[2]]
269 269 return [x]
270 270
271 271 def getargs(x, min, max, err):
272 272 l = getlist(x)
273 273 if len(l) < min or (max >= 0 and len(l) > max):
274 274 raise error.ParseError(err)
275 275 return l
276 276
277 277 def isvalidsymbol(tree):
278 278 """Examine whether specified ``tree`` is valid ``symbol`` or not
279 279 """
280 280 return tree[0] == 'symbol' and len(tree) > 1
281 281
282 282 def getsymbol(tree):
283 283 """Get symbol name from valid ``symbol`` in ``tree``
284 284
285 285 This assumes that ``tree`` is already examined by ``isvalidsymbol``.
286 286 """
287 287 return tree[1]
288 288
289 289 def isvalidfunc(tree):
290 290 """Examine whether specified ``tree`` is valid ``func`` or not
291 291 """
292 292 return tree[0] == 'func' and len(tree) > 1 and isvalidsymbol(tree[1])
293 293
294 294 def getfuncname(tree):
295 295 """Get function name from valid ``func`` in ``tree``
296 296
297 297 This assumes that ``tree`` is already examined by ``isvalidfunc``.
298 298 """
299 299 return getsymbol(tree[1])
300 300
301 301 def getfuncargs(tree):
302 302 """Get list of function arguments from valid ``func`` in ``tree``
303 303
304 304 This assumes that ``tree`` is already examined by ``isvalidfunc``.
305 305 """
306 306 if len(tree) > 2:
307 307 return getlist(tree[2])
308 308 else:
309 309 return []
310 310
311 311 def getset(repo, subset, x):
312 312 if not x:
313 313 raise error.ParseError(_("missing argument"))
314 314 s = methods[x[0]](repo, subset, *x[1:])
315 315 if util.safehasattr(s, 'isascending'):
316 316 return s
317 317 return baseset(s)
318 318
319 319 def _getrevsource(repo, r):
320 320 extra = repo[r].extra()
321 321 for label in ('source', 'transplant_source', 'rebase_source'):
322 322 if label in extra:
323 323 try:
324 324 return repo[extra[label]].rev()
325 325 except error.RepoLookupError:
326 326 pass
327 327 return None
328 328
329 329 # operator methods
330 330
331 331 def stringset(repo, subset, x):
332 332 x = repo[x].rev()
333 333 if x in subset:
334 334 return baseset([x])
335 335 return baseset()
336 336
337 337 def rangeset(repo, subset, x, y):
338 338 m = getset(repo, fullreposet(repo), x)
339 339 n = getset(repo, fullreposet(repo), y)
340 340
341 341 if not m or not n:
342 342 return baseset()
343 343 m, n = m.first(), n.last()
344 344
345 345 if m < n:
346 346 r = spanset(repo, m, n + 1)
347 347 else:
348 348 r = spanset(repo, m, n - 1)
349 349 return r & subset
350 350
351 351 def dagrange(repo, subset, x, y):
352 352 r = fullreposet(repo)
353 353 xs = _revsbetween(repo, getset(repo, r, x), getset(repo, r, y))
354 354 return xs & subset
355 355
356 356 def andset(repo, subset, x, y):
357 357 return getset(repo, getset(repo, subset, x), y)
358 358
359 359 def orset(repo, subset, x, y):
360 360 xl = getset(repo, subset, x)
361 361 yl = getset(repo, subset - xl, y)
362 362 return xl + yl
363 363
364 364 def notset(repo, subset, x):
365 365 return subset - getset(repo, subset, x)
366 366
367 367 def listset(repo, subset, a, b):
368 368 raise error.ParseError(_("can't use a list in this context"))
369 369
370 370 def func(repo, subset, a, b):
371 371 if a[0] == 'symbol' and a[1] in symbols:
372 372 return symbols[a[1]](repo, subset, b)
373 373 raise error.UnknownIdentifier(a[1], symbols.keys())
374 374
375 375 # functions
376 376
377 377 def adds(repo, subset, x):
378 378 """``adds(pattern)``
379 379 Changesets that add a file matching pattern.
380 380
381 381 The pattern without explicit kind like ``glob:`` is expected to be
382 382 relative to the current directory and match against a file or a
383 383 directory.
384 384 """
385 385 # i18n: "adds" is a keyword
386 386 pat = getstring(x, _("adds requires a pattern"))
387 387 return checkstatus(repo, subset, pat, 1)
388 388
389 389 def ancestor(repo, subset, x):
390 390 """``ancestor(*changeset)``
391 391 A greatest common ancestor of the changesets.
392 392
393 393 Accepts 0 or more changesets.
394 394 Will return empty list when passed no args.
395 395 Greatest common ancestor of a single changeset is that changeset.
396 396 """
397 397 # i18n: "ancestor" is a keyword
398 398 l = getlist(x)
399 399 rl = fullreposet(repo)
400 400 anc = None
401 401
402 402 # (getset(repo, rl, i) for i in l) generates a list of lists
403 403 for revs in (getset(repo, rl, i) for i in l):
404 404 for r in revs:
405 405 if anc is None:
406 406 anc = repo[r]
407 407 else:
408 408 anc = anc.ancestor(repo[r])
409 409
410 410 if anc is not None and anc.rev() in subset:
411 411 return baseset([anc.rev()])
412 412 return baseset()
413 413
414 414 def _ancestors(repo, subset, x, followfirst=False):
415 415 heads = getset(repo, fullreposet(repo), x)
416 416 if not heads:
417 417 return baseset()
418 418 s = _revancestors(repo, heads, followfirst)
419 419 return subset & s
420 420
421 421 def ancestors(repo, subset, x):
422 422 """``ancestors(set)``
423 423 Changesets that are ancestors of a changeset in set.
424 424 """
425 425 return _ancestors(repo, subset, x)
426 426
427 427 def _firstancestors(repo, subset, x):
428 428 # ``_firstancestors(set)``
429 429 # Like ``ancestors(set)`` but follows only the first parents.
430 430 return _ancestors(repo, subset, x, followfirst=True)
431 431
432 432 def ancestorspec(repo, subset, x, n):
433 433 """``set~n``
434 434 Changesets that are the Nth ancestor (first parents only) of a changeset
435 435 in set.
436 436 """
437 437 try:
438 438 n = int(n[1])
439 439 except (TypeError, ValueError):
440 440 raise error.ParseError(_("~ expects a number"))
441 441 ps = set()
442 442 cl = repo.changelog
443 443 for r in getset(repo, fullreposet(repo), x):
444 444 for i in range(n):
445 445 r = cl.parentrevs(r)[0]
446 446 ps.add(r)
447 447 return subset & ps
448 448
449 449 def author(repo, subset, x):
450 450 """``author(string)``
451 451 Alias for ``user(string)``.
452 452 """
453 453 # i18n: "author" is a keyword
454 454 n = encoding.lower(getstring(x, _("author requires a string")))
455 455 kind, pattern, matcher = _substringmatcher(n)
456 456 return subset.filter(lambda x: matcher(encoding.lower(repo[x].user())))
457 457
458 458 def bisect(repo, subset, x):
459 459 """``bisect(string)``
460 460 Changesets marked in the specified bisect status:
461 461
462 462 - ``good``, ``bad``, ``skip``: csets explicitly marked as good/bad/skip
463 463 - ``goods``, ``bads`` : csets topologically good/bad
464 464 - ``range`` : csets taking part in the bisection
465 465 - ``pruned`` : csets that are goods, bads or skipped
466 466 - ``untested`` : csets whose fate is yet unknown
467 467 - ``ignored`` : csets ignored due to DAG topology
468 468 - ``current`` : the cset currently being bisected
469 469 """
470 470 # i18n: "bisect" is a keyword
471 471 status = getstring(x, _("bisect requires a string")).lower()
472 472 state = set(hbisect.get(repo, status))
473 473 return subset & state
474 474
475 475 # Backward-compatibility
476 476 # - no help entry so that we do not advertise it any more
477 477 def bisected(repo, subset, x):
478 478 return bisect(repo, subset, x)
479 479
480 480 def bookmark(repo, subset, x):
481 481 """``bookmark([name])``
482 482 The named bookmark or all bookmarks.
483 483
484 484 If `name` starts with `re:`, the remainder of the name is treated as
485 485 a regular expression. To match a bookmark that actually starts with `re:`,
486 486 use the prefix `literal:`.
487 487 """
488 488 # i18n: "bookmark" is a keyword
489 489 args = getargs(x, 0, 1, _('bookmark takes one or no arguments'))
490 490 if args:
491 491 bm = getstring(args[0],
492 492 # i18n: "bookmark" is a keyword
493 493 _('the argument to bookmark must be a string'))
494 494 kind, pattern, matcher = _stringmatcher(bm)
495 495 bms = set()
496 496 if kind == 'literal':
497 497 bmrev = repo._bookmarks.get(pattern, None)
498 498 if not bmrev:
499 499 raise error.RepoLookupError(_("bookmark '%s' does not exist")
500 500 % bm)
501 501 bms.add(repo[bmrev].rev())
502 502 else:
503 503 matchrevs = set()
504 504 for name, bmrev in repo._bookmarks.iteritems():
505 505 if matcher(name):
506 506 matchrevs.add(bmrev)
507 507 if not matchrevs:
508 508 raise error.RepoLookupError(_("no bookmarks exist"
509 509 " that match '%s'") % pattern)
510 510 for bmrev in matchrevs:
511 511 bms.add(repo[bmrev].rev())
512 512 else:
513 513 bms = set([repo[r].rev()
514 514 for r in repo._bookmarks.values()])
515 515 bms -= set([node.nullrev])
516 516 return subset & bms
517 517
518 518 def branch(repo, subset, x):
519 519 """``branch(string or set)``
520 520 All changesets belonging to the given branch or the branches of the given
521 521 changesets.
522 522
523 523 If `string` starts with `re:`, the remainder of the name is treated as
524 524 a regular expression. To match a branch that actually starts with `re:`,
525 525 use the prefix `literal:`.
526 526 """
527 527 getbi = repo.revbranchcache().branchinfo
528 528
529 529 try:
530 530 b = getstring(x, '')
531 531 except error.ParseError:
532 532 # not a string, but another revspec, e.g. tip()
533 533 pass
534 534 else:
535 535 kind, pattern, matcher = _stringmatcher(b)
536 536 if kind == 'literal':
537 537 # note: falls through to the revspec case if no branch with
538 538 # this name exists
539 539 if pattern in repo.branchmap():
540 540 return subset.filter(lambda r: matcher(getbi(r)[0]))
541 541 else:
542 542 return subset.filter(lambda r: matcher(getbi(r)[0]))
543 543
544 544 s = getset(repo, fullreposet(repo), x)
545 545 b = set()
546 546 for r in s:
547 547 b.add(getbi(r)[0])
548 548 c = s.__contains__
549 549 return subset.filter(lambda r: c(r) or getbi(r)[0] in b)
550 550
551 551 def bumped(repo, subset, x):
552 552 """``bumped()``
553 553 Mutable changesets marked as successors of public changesets.
554 554
555 555 Only non-public and non-obsolete changesets can be `bumped`.
556 556 """
557 557 # i18n: "bumped" is a keyword
558 558 getargs(x, 0, 0, _("bumped takes no arguments"))
559 559 bumped = obsmod.getrevs(repo, 'bumped')
560 560 return subset & bumped
561 561
562 562 def bundle(repo, subset, x):
563 563 """``bundle()``
564 564 Changesets in the bundle.
565 565
566 566 Bundle must be specified by the -R option."""
567 567
568 568 try:
569 569 bundlerevs = repo.changelog.bundlerevs
570 570 except AttributeError:
571 571 raise util.Abort(_("no bundle provided - specify with -R"))
572 572 return subset & bundlerevs
573 573
574 574 def checkstatus(repo, subset, pat, field):
575 575 hasset = matchmod.patkind(pat) == 'set'
576 576
577 577 mcache = [None]
578 578 def matches(x):
579 579 c = repo[x]
580 580 if not mcache[0] or hasset:
581 581 mcache[0] = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
582 582 m = mcache[0]
583 583 fname = None
584 584 if not m.anypats() and len(m.files()) == 1:
585 585 fname = m.files()[0]
586 586 if fname is not None:
587 587 if fname not in c.files():
588 588 return False
589 589 else:
590 590 for f in c.files():
591 591 if m(f):
592 592 break
593 593 else:
594 594 return False
595 595 files = repo.status(c.p1().node(), c.node())[field]
596 596 if fname is not None:
597 597 if fname in files:
598 598 return True
599 599 else:
600 600 for f in files:
601 601 if m(f):
602 602 return True
603 603
604 604 return subset.filter(matches)
605 605
606 606 def _children(repo, narrow, parentset):
607 607 cs = set()
608 608 if not parentset:
609 609 return baseset(cs)
610 610 pr = repo.changelog.parentrevs
611 611 minrev = min(parentset)
612 612 for r in narrow:
613 613 if r <= minrev:
614 614 continue
615 615 for p in pr(r):
616 616 if p in parentset:
617 617 cs.add(r)
618 618 return baseset(cs)
619 619
620 620 def children(repo, subset, x):
621 621 """``children(set)``
622 622 Child changesets of changesets in set.
623 623 """
624 624 s = getset(repo, fullreposet(repo), x)
625 625 cs = _children(repo, subset, s)
626 626 return subset & cs
627 627
628 628 def closed(repo, subset, x):
629 629 """``closed()``
630 630 Changeset is closed.
631 631 """
632 632 # i18n: "closed" is a keyword
633 633 getargs(x, 0, 0, _("closed takes no arguments"))
634 634 return subset.filter(lambda r: repo[r].closesbranch())
635 635
636 636 def contains(repo, subset, x):
637 637 """``contains(pattern)``
638 638 The revision's manifest contains a file matching pattern (but might not
639 639 modify it). See :hg:`help patterns` for information about file patterns.
640 640
641 641 The pattern without explicit kind like ``glob:`` is expected to be
642 642 relative to the current directory and match against a file exactly
643 643 for efficiency.
644 644 """
645 645 # i18n: "contains" is a keyword
646 646 pat = getstring(x, _("contains requires a pattern"))
647 647
648 648 def matches(x):
649 649 if not matchmod.patkind(pat):
650 650 pats = pathutil.canonpath(repo.root, repo.getcwd(), pat)
651 651 if pats in repo[x]:
652 652 return True
653 653 else:
654 654 c = repo[x]
655 655 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
656 656 for f in c.manifest():
657 657 if m(f):
658 658 return True
659 659 return False
660 660
661 661 return subset.filter(matches)
662 662
663 663 def converted(repo, subset, x):
664 664 """``converted([id])``
665 665 Changesets converted from the given identifier in the old repository if
666 666 present, or all converted changesets if no identifier is specified.
667 667 """
668 668
669 669 # There is exactly no chance of resolving the revision, so do a simple
670 670 # string compare and hope for the best
671 671
672 672 rev = None
673 673 # i18n: "converted" is a keyword
674 674 l = getargs(x, 0, 1, _('converted takes one or no arguments'))
675 675 if l:
676 676 # i18n: "converted" is a keyword
677 677 rev = getstring(l[0], _('converted requires a revision'))
678 678
679 679 def _matchvalue(r):
680 680 source = repo[r].extra().get('convert_revision', None)
681 681 return source is not None and (rev is None or source.startswith(rev))
682 682
683 683 return subset.filter(lambda r: _matchvalue(r))
684 684
685 685 def date(repo, subset, x):
686 686 """``date(interval)``
687 687 Changesets within the interval, see :hg:`help dates`.
688 688 """
689 689 # i18n: "date" is a keyword
690 690 ds = getstring(x, _("date requires a string"))
691 691 dm = util.matchdate(ds)
692 692 return subset.filter(lambda x: dm(repo[x].date()[0]))
693 693
694 694 def desc(repo, subset, x):
695 695 """``desc(string)``
696 696 Search commit message for string. The match is case-insensitive.
697 697 """
698 698 # i18n: "desc" is a keyword
699 699 ds = encoding.lower(getstring(x, _("desc requires a string")))
700 700
701 701 def matches(x):
702 702 c = repo[x]
703 703 return ds in encoding.lower(c.description())
704 704
705 705 return subset.filter(matches)
706 706
707 707 def _descendants(repo, subset, x, followfirst=False):
708 708 roots = getset(repo, fullreposet(repo), x)
709 709 if not roots:
710 710 return baseset()
711 711 s = _revdescendants(repo, roots, followfirst)
712 712
713 713 # Both sets need to be ascending in order to lazily return the union
714 714 # in the correct order.
715 715 base = subset & roots
716 716 desc = subset & s
717 717 result = base + desc
718 718 if subset.isascending():
719 719 result.sort()
720 720 elif subset.isdescending():
721 721 result.sort(reverse=True)
722 722 else:
723 723 result = subset & result
724 724 return result
725 725
726 726 def descendants(repo, subset, x):
727 727 """``descendants(set)``
728 728 Changesets which are descendants of changesets in set.
729 729 """
730 730 return _descendants(repo, subset, x)
731 731
732 732 def _firstdescendants(repo, subset, x):
733 733 # ``_firstdescendants(set)``
734 734 # Like ``descendants(set)`` but follows only the first parents.
735 735 return _descendants(repo, subset, x, followfirst=True)
736 736
737 737 def destination(repo, subset, x):
738 738 """``destination([set])``
739 739 Changesets that were created by a graft, transplant or rebase operation,
740 740 with the given revisions specified as the source. Omitting the optional set
741 741 is the same as passing all().
742 742 """
743 743 if x is not None:
744 744 sources = getset(repo, fullreposet(repo), x)
745 745 else:
746 746 sources = fullreposet(repo)
747 747
748 748 dests = set()
749 749
750 750 # subset contains all of the possible destinations that can be returned, so
751 751 # iterate over them and see if their source(s) were provided in the arg set.
752 752 # Even if the immediate src of r is not in the arg set, src's source (or
753 753 # further back) may be. Scanning back further than the immediate src allows
754 754 # transitive transplants and rebases to yield the same results as transitive
755 755 # grafts.
756 756 for r in subset:
757 757 src = _getrevsource(repo, r)
758 758 lineage = None
759 759
760 760 while src is not None:
761 761 if lineage is None:
762 762 lineage = list()
763 763
764 764 lineage.append(r)
765 765
766 766 # The visited lineage is a match if the current source is in the arg
767 767 # set. Since every candidate dest is visited by way of iterating
768 768 # subset, any dests further back in the lineage will be tested by a
769 769 # different iteration over subset. Likewise, if the src was already
770 770 # selected, the current lineage can be selected without going back
771 771 # further.
772 772 if src in sources or src in dests:
773 773 dests.update(lineage)
774 774 break
775 775
776 776 r = src
777 777 src = _getrevsource(repo, r)
778 778
779 779 return subset.filter(dests.__contains__)
780 780
781 781 def divergent(repo, subset, x):
782 782 """``divergent()``
783 783 Final successors of changesets with an alternative set of final successors.
784 784 """
785 785 # i18n: "divergent" is a keyword
786 786 getargs(x, 0, 0, _("divergent takes no arguments"))
787 787 divergent = obsmod.getrevs(repo, 'divergent')
788 788 return subset & divergent
789 789
790 790 def draft(repo, subset, x):
791 791 """``draft()``
792 792 Changeset in draft phase."""
793 793 # i18n: "draft" is a keyword
794 794 getargs(x, 0, 0, _("draft takes no arguments"))
795 795 phase = repo._phasecache.phase
796 796 target = phases.draft
797 797 condition = lambda r: phase(repo, r) == target
798 798 return subset.filter(condition, cache=False)
799 799
800 800 def extinct(repo, subset, x):
801 801 """``extinct()``
802 802 Obsolete changesets with obsolete descendants only.
803 803 """
804 804 # i18n: "extinct" is a keyword
805 805 getargs(x, 0, 0, _("extinct takes no arguments"))
806 806 extincts = obsmod.getrevs(repo, 'extinct')
807 807 return subset & extincts
808 808
809 809 def extra(repo, subset, x):
810 810 """``extra(label, [value])``
811 811 Changesets with the given label in the extra metadata, with the given
812 812 optional value.
813 813
814 814 If `value` starts with `re:`, the remainder of the value is treated as
815 815 a regular expression. To match a value that actually starts with `re:`,
816 816 use the prefix `literal:`.
817 817 """
818 818
819 819 # i18n: "extra" is a keyword
820 820 l = getargs(x, 1, 2, _('extra takes at least 1 and at most 2 arguments'))
821 821 # i18n: "extra" is a keyword
822 822 label = getstring(l[0], _('first argument to extra must be a string'))
823 823 value = None
824 824
825 825 if len(l) > 1:
826 826 # i18n: "extra" is a keyword
827 827 value = getstring(l[1], _('second argument to extra must be a string'))
828 828 kind, value, matcher = _stringmatcher(value)
829 829
830 830 def _matchvalue(r):
831 831 extra = repo[r].extra()
832 832 return label in extra and (value is None or matcher(extra[label]))
833 833
834 834 return subset.filter(lambda r: _matchvalue(r))
835 835
836 836 def filelog(repo, subset, x):
837 837 """``filelog(pattern)``
838 838 Changesets connected to the specified filelog.
839 839
840 840 For performance reasons, visits only revisions mentioned in the file-level
841 841 filelog, rather than filtering through all changesets (much faster, but
842 842 doesn't include deletes or duplicate changes). For a slower, more accurate
843 843 result, use ``file()``.
844 844
845 845 The pattern without explicit kind like ``glob:`` is expected to be
846 846 relative to the current directory and match against a file exactly
847 847 for efficiency.
848 848
849 849 If some linkrev points to revisions filtered by the current repoview, we'll
850 850 work around it to return a non-filtered value.
851 851 """
852 852
853 853 # i18n: "filelog" is a keyword
854 854 pat = getstring(x, _("filelog requires a pattern"))
855 855 s = set()
856 856 cl = repo.changelog
857 857
858 858 if not matchmod.patkind(pat):
859 859 f = pathutil.canonpath(repo.root, repo.getcwd(), pat)
860 860 files = [f]
861 861 else:
862 862 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=repo[None])
863 863 files = (f for f in repo[None] if m(f))
864 864
865 865 for f in files:
866 866 backrevref = {} # final value for: filerev -> changerev
867 867 lowestchild = {} # lowest known filerev child of a filerev
868 868 delayed = [] # filerev with filtered linkrev, for post-processing
869 869 lowesthead = None # cache for manifest content of all head revisions
870 870 fl = repo.file(f)
871 871 for fr in list(fl):
872 872 rev = fl.linkrev(fr)
873 873 if rev not in cl:
874 874 # changerev pointed in linkrev is filtered
875 875 # record it for post processing.
876 876 delayed.append((fr, rev))
877 877 continue
878 878 for p in fl.parentrevs(fr):
879 879 if 0 <= p and p not in lowestchild:
880 880 lowestchild[p] = fr
881 881 backrevref[fr] = rev
882 882 s.add(rev)
883 883
884 884 # Post-processing of all filerevs we skipped because they were
885 885 # filtered. If such filerevs have known and unfiltered children, this
886 886 # means they have an unfiltered appearance out there. We'll use linkrev
887 887 # adjustment to find one of these appearances. The lowest known child
888 888 # will be used as a starting point because it is the best upper-bound we
889 889 # have.
890 890 #
891 891 # This approach will fail when an unfiltered but linkrev-shadowed
892 892 # appearance exists in a head changeset without unfiltered filerev
893 893 # children anywhere.
894 894 while delayed:
895 895 # must be a descending iteration. To slowly fill lowest child
896 896 # information that is of potential use by the next item.
897 897 fr, rev = delayed.pop()
898 898 lkr = rev
899 899
900 900 child = lowestchild.get(fr)
901 901
902 902 if child is None:
903 903 # search for existence of this file revision in a head revision.
904 904 # There are three possibilities:
905 905 # - the revision exists in a head and we can find an
906 906 # introduction from there,
907 907 # - the revision does not exist in a head because it has been
908 908 # changed since its introduction: we would have found a child
909 909 # and be in the other 'else' clause,
910 910 # - all versions of the revision are hidden.
911 911 if lowesthead is None:
912 912 lowesthead = {}
913 913 for h in repo.heads():
914 914 fnode = repo[h].manifest().get(f)
915 915 if fnode is not None:
916 916 lowesthead[fl.rev(fnode)] = h
917 917 headrev = lowesthead.get(fr)
918 918 if headrev is None:
919 919 # content is nowhere unfiltered
920 920 continue
921 921 rev = repo[headrev][f].introrev()
922 922 else:
923 923 # the lowest known child is a good upper bound
924 924 childcrev = backrevref[child]
925 925 # XXX this does not guarantee returning the lowest
926 926 # introduction of this revision, but this gives a
927 927 # result which is a good start and will fit in most
928 928 # cases. We probably need to fix the multiple
929 929 # introductions case properly (report each
930 930 # introduction, even for identical file revisions)
931 931 # once and for all at some point anyway.
932 932 for p in repo[childcrev][f].parents():
933 933 if p.filerev() == fr:
934 934 rev = p.rev()
935 935 break
936 936 if rev == lkr: # no shadowed entry found
937 937 # XXX This should never happen unless some manifest points
938 938 # to biggish file revisions (like a revision that uses a
939 939 # parent that never appears in the manifest ancestors)
940 940 continue
941 941
942 942 # Fill the data for the next iteration.
943 943 for p in fl.parentrevs(fr):
944 944 if 0 <= p and p not in lowestchild:
945 945 lowestchild[p] = fr
946 946 backrevref[fr] = rev
947 947 s.add(rev)
948 948
949 949 return subset & s
950 950
951 951 def first(repo, subset, x):
952 952 """``first(set, [n])``
953 953 An alias for limit().
954 954 """
955 955 return limit(repo, subset, x)
956 956
957 957 def _follow(repo, subset, x, name, followfirst=False):
958 958 l = getargs(x, 0, 1, _("%s takes no arguments or a filename") % name)
959 959 c = repo['.']
960 960 if l:
961 961 x = getstring(l[0], _("%s expected a filename") % name)
962 962 if x in c:
963 963 cx = c[x]
964 964 s = set(ctx.rev() for ctx in cx.ancestors(followfirst=followfirst))
965 965 # include the revision responsible for the most recent version
966 966 s.add(cx.introrev())
967 967 else:
968 968 return baseset()
969 969 else:
970 970 s = _revancestors(repo, baseset([c.rev()]), followfirst)
971 971
972 972 return subset & s
973 973
974 974 def follow(repo, subset, x):
975 975 """``follow([file])``
976 976 An alias for ``::.`` (ancestors of the working directory's first parent).
977 977 If a filename is specified, the history of the given file is followed,
978 978 including copies.
979 979 """
980 980 return _follow(repo, subset, x, 'follow')
981 981
982 982 def _followfirst(repo, subset, x):
983 983 # ``followfirst([file])``
984 984 # Like ``follow([file])`` but follows only the first parent of
985 985 # every revision or file revision.
986 986 return _follow(repo, subset, x, '_followfirst', followfirst=True)
987 987
988 988 def getall(repo, subset, x):
989 989 """``all()``
990 990 All changesets, the same as ``0:tip``.
991 991 """
992 992 # i18n: "all" is a keyword
993 993 getargs(x, 0, 0, _("all takes no arguments"))
994 994 return subset & spanset(repo) # drop "null" if any
995 995
996 996 def grep(repo, subset, x):
997 997 """``grep(regex)``
998 998 Like ``keyword(string)`` but accepts a regex. Use ``grep(r'...')``
999 999 to ensure special escape characters are handled correctly. Unlike
1000 1000 ``keyword(string)``, the match is case-sensitive.
1001 1001 """
1002 1002 try:
1003 1003 # i18n: "grep" is a keyword
1004 1004 gr = re.compile(getstring(x, _("grep requires a string")))
1005 1005 except re.error, e:
1006 1006 raise error.ParseError(_('invalid match pattern: %s') % e)
1007 1007
1008 1008 def matches(x):
1009 1009 c = repo[x]
1010 1010 for e in c.files() + [c.user(), c.description()]:
1011 1011 if gr.search(e):
1012 1012 return True
1013 1013 return False
1014 1014
1015 1015 return subset.filter(matches)
1016 1016
1017 1017 def _matchfiles(repo, subset, x):
1018 1018 # _matchfiles takes a revset list of prefixed arguments:
1019 1019 #
1020 1020 # [p:foo, i:bar, x:baz]
1021 1021 #
1022 1022 # builds a match object from them and filters subset. Allowed
1023 1023 # prefixes are 'p:' for regular patterns, 'i:' for include
1024 1024 # patterns and 'x:' for exclude patterns. Use 'r:' prefix to pass
1025 1025 # a revision identifier, or the empty string to reference the
1026 1026 # working directory, from which the match object is
1027 1027 # initialized. Use 'd:' to set the default matching mode, default
1028 1028 # to 'glob'. At most one 'r:' and 'd:' argument can be passed.
1029 1029
1030 1030 # i18n: "_matchfiles" is a keyword
1031 1031 l = getargs(x, 1, -1, _("_matchfiles requires at least one argument"))
1032 1032 pats, inc, exc = [], [], []
1033 1033 rev, default = None, None
1034 1034 for arg in l:
1035 1035 # i18n: "_matchfiles" is a keyword
1036 1036 s = getstring(arg, _("_matchfiles requires string arguments"))
1037 1037 prefix, value = s[:2], s[2:]
1038 1038 if prefix == 'p:':
1039 1039 pats.append(value)
1040 1040 elif prefix == 'i:':
1041 1041 inc.append(value)
1042 1042 elif prefix == 'x:':
1043 1043 exc.append(value)
1044 1044 elif prefix == 'r:':
1045 1045 if rev is not None:
1046 1046 # i18n: "_matchfiles" is a keyword
1047 1047 raise error.ParseError(_('_matchfiles expected at most one '
1048 1048 'revision'))
1049 1049 if value != '': # empty means working directory; leave rev as None
1050 1050 rev = value
1051 1051 elif prefix == 'd:':
1052 1052 if default is not None:
1053 1053 # i18n: "_matchfiles" is a keyword
1054 1054 raise error.ParseError(_('_matchfiles expected at most one '
1055 1055 'default mode'))
1056 1056 default = value
1057 1057 else:
1058 1058 # i18n: "_matchfiles" is a keyword
1059 1059 raise error.ParseError(_('invalid _matchfiles prefix: %s') % prefix)
1060 1060 if not default:
1061 1061 default = 'glob'
1062 1062
1063 1063 m = matchmod.match(repo.root, repo.getcwd(), pats, include=inc,
1064 1064 exclude=exc, ctx=repo[rev], default=default)
1065 1065
1066 1066 def matches(x):
1067 1067 for f in repo[x].files():
1068 1068 if m(f):
1069 1069 return True
1070 1070 return False
1071 1071
1072 1072 return subset.filter(matches)
1073 1073
1074 1074 def hasfile(repo, subset, x):
1075 1075 """``file(pattern)``
1076 1076 Changesets affecting files matched by pattern.
1077 1077
1078 1078 For a faster but less accurate result, consider using ``filelog()``
1079 1079 instead.
1080 1080
1081 1081 This predicate uses ``glob:`` as the default kind of pattern.
1082 1082 """
1083 1083 # i18n: "file" is a keyword
1084 1084 pat = getstring(x, _("file requires a pattern"))
1085 1085 return _matchfiles(repo, subset, ('string', 'p:' + pat))
1086 1086
1087 1087 def head(repo, subset, x):
1088 1088 """``head()``
1089 1089 Changeset is a named branch head.
1090 1090 """
1091 1091 # i18n: "head" is a keyword
1092 1092 getargs(x, 0, 0, _("head takes no arguments"))
1093 1093 hs = set()
1094 1094 for b, ls in repo.branchmap().iteritems():
1095 1095 hs.update(repo[h].rev() for h in ls)
1096 1096 return baseset(hs).filter(subset.__contains__)
1097 1097
1098 1098 def heads(repo, subset, x):
1099 1099 """``heads(set)``
1100 1100 Members of set with no children in set.
1101 1101 """
1102 1102 s = getset(repo, subset, x)
1103 1103 ps = parents(repo, subset, x)
1104 1104 return s - ps
1105 1105
1106 1106 def hidden(repo, subset, x):
1107 1107 """``hidden()``
1108 1108 Hidden changesets.
1109 1109 """
1110 1110 # i18n: "hidden" is a keyword
1111 1111 getargs(x, 0, 0, _("hidden takes no arguments"))
1112 1112 hiddenrevs = repoview.filterrevs(repo, 'visible')
1113 1113 return subset & hiddenrevs
1114 1114
1115 1115 def keyword(repo, subset, x):
1116 1116 """``keyword(string)``
1117 1117 Search commit message, user name, and names of changed files for
1118 1118 string. The match is case-insensitive.
1119 1119 """
1120 1120 # i18n: "keyword" is a keyword
1121 1121 kw = encoding.lower(getstring(x, _("keyword requires a string")))
1122 1122
1123 1123 def matches(r):
1124 1124 c = repo[r]
1125 1125 return util.any(kw in encoding.lower(t) for t in c.files() + [c.user(),
1126 1126 c.description()])
1127 1127
1128 1128 return subset.filter(matches)
1129 1129
1130 1130 def limit(repo, subset, x):
1131 1131 """``limit(set, [n])``
1132 1132 First n members of set, defaulting to 1.
1133 1133 """
1134 1134 # i18n: "limit" is a keyword
1135 1135 l = getargs(x, 1, 2, _("limit requires one or two arguments"))
1136 1136 try:
1137 1137 lim = 1
1138 1138 if len(l) == 2:
1139 1139 # i18n: "limit" is a keyword
1140 1140 lim = int(getstring(l[1], _("limit requires a number")))
1141 1141 except (TypeError, ValueError):
1142 1142 # i18n: "limit" is a keyword
1143 1143 raise error.ParseError(_("limit expects a number"))
1144 1144 ss = subset
1145 1145 os = getset(repo, fullreposet(repo), l[0])
1146 1146 result = []
1147 1147 it = iter(os)
1148 1148 for x in xrange(lim):
1149 1149 try:
1150 1150 y = it.next()
1151 1151 if y in ss:
1152 1152 result.append(y)
1153 1153 except (StopIteration):
1154 1154 break
1155 1155 return baseset(result)
1156 1156
1157 1157 def last(repo, subset, x):
1158 1158 """``last(set, [n])``
1159 1159 Last n members of set, defaulting to 1.
1160 1160 """
1161 1161 # i18n: "last" is a keyword
1162 1162 l = getargs(x, 1, 2, _("last requires one or two arguments"))
1163 1163 try:
1164 1164 lim = 1
1165 1165 if len(l) == 2:
1166 1166 # i18n: "last" is a keyword
1167 1167 lim = int(getstring(l[1], _("last requires a number")))
1168 1168 except (TypeError, ValueError):
1169 1169 # i18n: "last" is a keyword
1170 1170 raise error.ParseError(_("last expects a number"))
1171 1171 ss = subset
1172 1172 os = getset(repo, fullreposet(repo), l[0])
1173 1173 os.reverse()
1174 1174 result = []
1175 1175 it = iter(os)
1176 1176 for x in xrange(lim):
1177 1177 try:
1178 1178 y = it.next()
1179 1179 if y in ss:
1180 1180 result.append(y)
1181 1181 except (StopIteration):
1182 1182 break
1183 1183 return baseset(result)
1184 1184
1185 1185 def maxrev(repo, subset, x):
1186 1186 """``max(set)``
1187 1187 Changeset with highest revision number in set.
1188 1188 """
1189 1189 os = getset(repo, fullreposet(repo), x)
1190 1190 if os:
1191 1191 m = os.max()
1192 1192 if m in subset:
1193 1193 return baseset([m])
1194 1194 return baseset()
1195 1195
1196 1196 def merge(repo, subset, x):
1197 1197 """``merge()``
1198 1198 Changeset is a merge changeset.
1199 1199 """
1200 1200 # i18n: "merge" is a keyword
1201 1201 getargs(x, 0, 0, _("merge takes no arguments"))
1202 1202 cl = repo.changelog
1203 1203 return subset.filter(lambda r: cl.parentrevs(r)[1] != -1)
1204 1204
1205 1205 def branchpoint(repo, subset, x):
1206 1206 """``branchpoint()``
1207 1207 Changesets with more than one child.
1208 1208 """
1209 1209 # i18n: "branchpoint" is a keyword
1210 1210 getargs(x, 0, 0, _("branchpoint takes no arguments"))
1211 1211 cl = repo.changelog
1212 1212 if not subset:
1213 1213 return baseset()
1214 1214 baserev = min(subset)
1215 1215 parentscount = [0]*(len(repo) - baserev)
1216 1216 for r in cl.revs(start=baserev + 1):
1217 1217 for p in cl.parentrevs(r):
1218 1218 if p >= baserev:
1219 1219 parentscount[p - baserev] += 1
1220 1220 return subset.filter(lambda r: parentscount[r - baserev] > 1)
1221 1221
1222 1222 def minrev(repo, subset, x):
1223 1223 """``min(set)``
1224 1224 Changeset with lowest revision number in set.
1225 1225 """
1226 1226 os = getset(repo, fullreposet(repo), x)
1227 1227 if os:
1228 1228 m = os.min()
1229 1229 if m in subset:
1230 1230 return baseset([m])
1231 1231 return baseset()
1232 1232
1233 1233 def modifies(repo, subset, x):
1234 1234 """``modifies(pattern)``
1235 1235 Changesets modifying files matched by pattern.
1236 1236
1237 1237 The pattern without explicit kind like ``glob:`` is expected to be
1238 1238 relative to the current directory and match against a file or a
1239 1239 directory.
1240 1240 """
1241 1241 # i18n: "modifies" is a keyword
1242 1242 pat = getstring(x, _("modifies requires a pattern"))
1243 1243 return checkstatus(repo, subset, pat, 0)
1244 1244
1245 1245 def named(repo, subset, x):
1246 1246 """``named(namespace)``
1247 1247 The changesets in a given namespace.
1248 1248
1249 1249 If `namespace` starts with `re:`, the remainder of the string is treated as
1250 1250 a regular expression. To match a namespace that actually starts with `re:`,
1251 1251 use the prefix `literal:`.
1252 1252 """
1253 1253 # i18n: "named" is a keyword
1254 1254 args = getargs(x, 1, 1, _('named requires a namespace argument'))
1255 1255
1256 1256 ns = getstring(args[0],
1257 1257 # i18n: "named" is a keyword
1258 1258 _('the argument to named must be a string'))
1259 1259 kind, pattern, matcher = _stringmatcher(ns)
1260 1260 namespaces = set()
1261 1261 if kind == 'literal':
1262 1262 if pattern not in repo.names:
1263 1263 raise error.RepoLookupError(_("namespace '%s' does not exist")
1264 1264 % ns)
1265 1265 namespaces.add(repo.names[pattern])
1266 1266 else:
1267 1267 for name, ns in repo.names.iteritems():
1268 1268 if matcher(name):
1269 1269 namespaces.add(ns)
1270 1270 if not namespaces:
1271 1271 raise error.RepoLookupError(_("no namespace exists"
1272 1272 " that match '%s'") % pattern)
1273 1273
1274 1274 names = set()
1275 1275 for ns in namespaces:
1276 1276 for name in ns.listnames(repo):
1277 1277 if name not in ns.deprecated:
1278 1278 names.update(repo[n].rev() for n in ns.nodes(repo, name))
1279 1279
1280 1280 names -= set([node.nullrev])
1281 1281 return subset & names
1282 1282
1283 1283 def node_(repo, subset, x):
1284 1284 """``id(string)``
1285 1285 Revision non-ambiguously specified by the given hex string prefix.
1286 1286 """
1287 1287 # i18n: "id" is a keyword
1288 1288 l = getargs(x, 1, 1, _("id requires one argument"))
1289 1289 # i18n: "id" is a keyword
1290 1290 n = getstring(l[0], _("id requires a string"))
1291 1291 if len(n) == 40:
1292 1292 try:
1293 1293 rn = repo.changelog.rev(node.bin(n))
1294 1294 except (LookupError, TypeError):
1295 1295 rn = None
1296 1296 else:
1297 1297 rn = None
1298 1298 pm = repo.changelog._partialmatch(n)
1299 1299 if pm is not None:
1300 1300 rn = repo.changelog.rev(pm)
1301 1301
1302 1302 if rn is None:
1303 1303 return baseset()
1304 1304 result = baseset([rn])
1305 1305 return result & subset
1306 1306
1307 1307 def obsolete(repo, subset, x):
1308 1308 """``obsolete()``
1309 1309 Mutable changeset with a newer version."""
1310 1310 # i18n: "obsolete" is a keyword
1311 1311 getargs(x, 0, 0, _("obsolete takes no arguments"))
1312 1312 obsoletes = obsmod.getrevs(repo, 'obsolete')
1313 1313 return subset & obsoletes
1314 1314
1315 1315 def only(repo, subset, x):
1316 1316 """``only(set, [set])``
1317 1317 Changesets that are ancestors of the first set that are not ancestors
1318 1318 of any other head in the repo. If a second set is specified, the result
1319 1319 is ancestors of the first set that are not ancestors of the second set
1320 1320 (i.e. ::<set1> - ::<set2>).
1321 1321 """
1322 1322 cl = repo.changelog
1323 1323 # i18n: "only" is a keyword
1324 1324 args = getargs(x, 1, 2, _('only takes one or two arguments'))
1325 1325 include = getset(repo, fullreposet(repo), args[0])
1326 1326 if len(args) == 1:
1327 1327 if not include:
1328 1328 return baseset()
1329 1329
1330 1330 descendants = set(_revdescendants(repo, include, False))
1331 1331 exclude = [rev for rev in cl.headrevs()
1332 1332 if not rev in descendants and not rev in include]
1333 1333 else:
1334 1334 exclude = getset(repo, fullreposet(repo), args[1])
1335 1335
1336 1336 results = set(cl.findmissingrevs(common=exclude, heads=include))
1337 1337 return subset & results
1338 1338
1339 1339 def origin(repo, subset, x):
1340 1340 """``origin([set])``
1341 1341 Changesets that were specified as a source for the grafts, transplants or
1342 1342 rebases that created the given revisions. Omitting the optional set is the
1343 1343 same as passing all(). If a changeset created by these operations is itself
1344 1344 specified as a source for one of these operations, only the source changeset
1345 1345 for the first operation is selected.
1346 1346 """
1347 1347 if x is not None:
1348 1348 dests = getset(repo, fullreposet(repo), x)
1349 1349 else:
1350 1350 dests = fullreposet(repo)
1351 1351
1352 1352 def _firstsrc(rev):
1353 1353 src = _getrevsource(repo, rev)
1354 1354 if src is None:
1355 1355 return None
1356 1356
1357 1357 while True:
1358 1358 prev = _getrevsource(repo, src)
1359 1359
1360 1360 if prev is None:
1361 1361 return src
1362 1362 src = prev
1363 1363
1364 1364 o = set([_firstsrc(r) for r in dests])
1365 1365 o -= set([None])
1366 1366 return subset & o
1367 1367
1368 1368 def outgoing(repo, subset, x):
1369 1369 """``outgoing([path])``
1370 1370 Changesets not found in the specified destination repository, or the
1371 1371 default push location.
1372 1372 """
1373 1373 # Avoid cycles.
1374 1374 import discovery
1375 1375 import hg
1376 1376 # i18n: "outgoing" is a keyword
1377 1377 l = getargs(x, 0, 1, _("outgoing takes one or no arguments"))
1378 1378 # i18n: "outgoing" is a keyword
1379 1379 dest = l and getstring(l[0], _("outgoing requires a repository path")) or ''
1380 1380 dest = repo.ui.expandpath(dest or 'default-push', dest or 'default')
1381 1381 dest, branches = hg.parseurl(dest)
1382 1382 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1383 1383 if revs:
1384 1384 revs = [repo.lookup(rev) for rev in revs]
1385 1385 other = hg.peer(repo, {}, dest)
1386 1386 repo.ui.pushbuffer()
1387 1387 outgoing = discovery.findcommonoutgoing(repo, other, onlyheads=revs)
1388 1388 repo.ui.popbuffer()
1389 1389 cl = repo.changelog
1390 1390 o = set([cl.rev(r) for r in outgoing.missing])
1391 1391 return subset & o
1392 1392
1393 1393 def p1(repo, subset, x):
1394 1394 """``p1([set])``
1395 1395 First parent of changesets in set, or the working directory.
1396 1396 """
1397 1397 if x is None:
1398 1398 p = repo[x].p1().rev()
1399 1399 if p >= 0:
1400 1400 return subset & baseset([p])
1401 1401 return baseset()
1402 1402
1403 1403 ps = set()
1404 1404 cl = repo.changelog
1405 1405 for r in getset(repo, fullreposet(repo), x):
1406 1406 ps.add(cl.parentrevs(r)[0])
1407 1407 ps -= set([node.nullrev])
1408 1408 return subset & ps
1409 1409
1410 1410 def p2(repo, subset, x):
1411 1411 """``p2([set])``
1412 1412 Second parent of changesets in set, or the working directory.
1413 1413 """
1414 1414 if x is None:
1415 1415 ps = repo[x].parents()
1416 1416 try:
1417 1417 p = ps[1].rev()
1418 1418 if p >= 0:
1419 1419 return subset & baseset([p])
1420 1420 return baseset()
1421 1421 except IndexError:
1422 1422 return baseset()
1423 1423
1424 1424 ps = set()
1425 1425 cl = repo.changelog
1426 1426 for r in getset(repo, fullreposet(repo), x):
1427 1427 ps.add(cl.parentrevs(r)[1])
1428 1428 ps -= set([node.nullrev])
1429 1429 return subset & ps
1430 1430
1431 1431 def parents(repo, subset, x):
1432 1432 """``parents([set])``
1433 1433 The set of all parents for all changesets in set, or the working directory.
1434 1434 """
1435 1435 if x is None:
1436 1436 ps = set(p.rev() for p in repo[x].parents())
1437 1437 else:
1438 1438 ps = set()
1439 1439 cl = repo.changelog
1440 1440 for r in getset(repo, fullreposet(repo), x):
1441 1441 ps.update(cl.parentrevs(r))
1442 1442 ps -= set([node.nullrev])
1443 1443 return subset & ps
1444 1444
1445 1445 def parentspec(repo, subset, x, n):
1446 1446 """``set^0``
1447 1447 The set.
1448 1448 ``set^1`` (or ``set^``), ``set^2``
1449 1449 First or second parent, respectively, of all changesets in set.
1450 1450 """
1451 1451 try:
1452 1452 n = int(n[1])
1453 1453 if n not in (0, 1, 2):
1454 1454 raise ValueError
1455 1455 except (TypeError, ValueError):
1456 1456 raise error.ParseError(_("^ expects a number 0, 1, or 2"))
1457 1457 ps = set()
1458 1458 cl = repo.changelog
1459 1459 for r in getset(repo, fullreposet(repo), x):
1460 1460 if n == 0:
1461 1461 ps.add(r)
1462 1462 elif n == 1:
1463 1463 ps.add(cl.parentrevs(r)[0])
1464 1464 elif n == 2:
1465 1465 parents = cl.parentrevs(r)
1466 1466 if len(parents) > 1:
1467 1467 ps.add(parents[1])
1468 1468 return subset & ps
1469 1469
1470 1470 def present(repo, subset, x):
1471 1471 """``present(set)``
1472 1472 An empty set, if any revision in set isn't found; otherwise,
1473 1473 all revisions in set.
1474 1474
1475 1475 If any of specified revisions is not present in the local repository,
1476 1476 the query is normally aborted. But this predicate allows the query
1477 1477 to continue even in such cases.
1478 1478 """
1479 1479 try:
1480 1480 return getset(repo, subset, x)
1481 1481 except error.RepoLookupError:
1482 1482 return baseset()
1483 1483
1484 1484 def public(repo, subset, x):
1485 1485 """``public()``
1486 1486 Changeset in public phase."""
1487 1487 # i18n: "public" is a keyword
1488 1488 getargs(x, 0, 0, _("public takes no arguments"))
1489 1489 phase = repo._phasecache.phase
1490 1490 target = phases.public
1491 1491 condition = lambda r: phase(repo, r) == target
1492 1492 return subset.filter(condition, cache=False)
1493 1493
1494 1494 def remote(repo, subset, x):
1495 1495 """``remote([id [,path]])``
1496 1496 Local revision that corresponds to the given identifier in a
1497 1497 remote repository, if present. Here, the '.' identifier is a
1498 1498 synonym for the current local branch.
1499 1499 """
1500 1500
1501 1501 import hg # avoid start-up nasties
1502 1502 # i18n: "remote" is a keyword
1503 1503 l = getargs(x, 0, 2, _("remote takes one, two or no arguments"))
1504 1504
1505 1505 q = '.'
1506 1506 if len(l) > 0:
1507 1507 # i18n: "remote" is a keyword
1508 1508 q = getstring(l[0], _("remote requires a string id"))
1509 1509 if q == '.':
1510 1510 q = repo['.'].branch()
1511 1511
1512 1512 dest = ''
1513 1513 if len(l) > 1:
1514 1514 # i18n: "remote" is a keyword
1515 1515 dest = getstring(l[1], _("remote requires a repository path"))
1516 1516 dest = repo.ui.expandpath(dest or 'default')
1517 1517 dest, branches = hg.parseurl(dest)
1518 1518 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1519 1519 if revs:
1520 1520 revs = [repo.lookup(rev) for rev in revs]
1521 1521 other = hg.peer(repo, {}, dest)
1522 1522 n = other.lookup(q)
1523 1523 if n in repo:
1524 1524 r = repo[n].rev()
1525 1525 if r in subset:
1526 1526 return baseset([r])
1527 1527 return baseset()
1528 1528
1529 1529 def removes(repo, subset, x):
1530 1530 """``removes(pattern)``
1531 1531 Changesets which remove files matching pattern.
1532 1532
1533 1533 The pattern without explicit kind like ``glob:`` is expected to be
1534 1534 relative to the current directory and match against a file or a
1535 1535 directory.
1536 1536 """
1537 1537 # i18n: "removes" is a keyword
1538 1538 pat = getstring(x, _("removes requires a pattern"))
1539 1539 return checkstatus(repo, subset, pat, 2)
1540 1540
1541 1541 def rev(repo, subset, x):
1542 1542 """``rev(number)``
1543 1543 Revision with the given numeric identifier.
1544 1544 """
1545 1545 # i18n: "rev" is a keyword
1546 1546 l = getargs(x, 1, 1, _("rev requires one argument"))
1547 1547 try:
1548 1548 # i18n: "rev" is a keyword
1549 1549 l = int(getstring(l[0], _("rev requires a number")))
1550 1550 except (TypeError, ValueError):
1551 1551 # i18n: "rev" is a keyword
1552 1552 raise error.ParseError(_("rev expects a number"))
1553 1553 if l not in repo.changelog and l != node.nullrev:
1554 1554 return baseset()
1555 1555 return subset & baseset([l])
1556 1556
1557 1557 def matching(repo, subset, x):
1558 1558 """``matching(revision [, field])``
1559 1559 Changesets in which a given set of fields match the set of fields in the
1560 1560 selected revision or set.
1561 1561
1562 1562 To match more than one field pass the list of fields to match separated
1563 1563 by spaces (e.g. ``author description``).
1564 1564
1565 1565 Valid fields are most regular revision fields and some special fields.
1566 1566
1567 1567 Regular revision fields are ``description``, ``author``, ``branch``,
1568 1568 ``date``, ``files``, ``phase``, ``parents``, ``substate``, ``user``
1569 1569 and ``diff``.
1570 1570 Note that ``author`` and ``user`` are synonyms. ``diff`` refers to the
1571 1571 contents of the revision. Two revisions matching their ``diff`` will
1572 1572 also match their ``files``.
1573 1573
1574 1574 Special fields are ``summary`` and ``metadata``:
1575 1575 ``summary`` matches the first line of the description.
1576 1576 ``metadata`` is equivalent to matching ``description user date``
1577 1577 (i.e. it matches the main metadata fields).
1578 1578
1579 1579 ``metadata`` is the default field which is used when no fields are
1580 1580 specified. You can match more than one field at a time.
1581 1581 """
1582 1582 # i18n: "matching" is a keyword
1583 1583 l = getargs(x, 1, 2, _("matching takes 1 or 2 arguments"))
1584 1584
1585 1585 revs = getset(repo, fullreposet(repo), l[0])
1586 1586
1587 1587 fieldlist = ['metadata']
1588 1588 if len(l) > 1:
1589 1589 fieldlist = getstring(l[1],
1590 1590 # i18n: "matching" is a keyword
1591 1591 _("matching requires a string "
1592 1592 "as its second argument")).split()
1593 1593
1594 1594 # Make sure that there are no repeated fields,
1595 1595 # expand the 'special' 'metadata' field type
1596 1596 # and check the 'files' whenever we check the 'diff'
1597 1597 fields = []
1598 1598 for field in fieldlist:
1599 1599 if field == 'metadata':
1600 1600 fields += ['user', 'description', 'date']
1601 1601 elif field == 'diff':
1602 1602 # a revision matching the diff must also match the files
1603 1603 # since matching the diff is very costly, make sure to
1604 1604 # also match the files first
1605 1605 fields += ['files', 'diff']
1606 1606 else:
1607 1607 if field == 'author':
1608 1608 field = 'user'
1609 1609 fields.append(field)
1610 1610 fields = set(fields)
1611 1611 if 'summary' in fields and 'description' in fields:
1612 1612 # If a revision matches its description it also matches its summary
1613 1613 fields.discard('summary')
1614 1614
1615 1615 # We may want to match more than one field
1616 1616 # Not all fields take the same amount of time to be matched
1617 1617 # Sort the selected fields in order of increasing matching cost
1618 1618 fieldorder = ['phase', 'parents', 'user', 'date', 'branch', 'summary',
1619 1619 'files', 'description', 'substate', 'diff']
1620 1620 def fieldkeyfunc(f):
1621 1621 try:
1622 1622 return fieldorder.index(f)
1623 1623 except ValueError:
1624 1624 # assume an unknown field is very costly
1625 1625 return len(fieldorder)
1626 1626 fields = list(fields)
1627 1627 fields.sort(key=fieldkeyfunc)
1628 1628
1629 1629 # Each field will be matched with its own "getfield" function
1630 1630 # which will be added to the getfieldfuncs array of functions
1631 1631 getfieldfuncs = []
1632 1632 _funcs = {
1633 1633 'user': lambda r: repo[r].user(),
1634 1634 'branch': lambda r: repo[r].branch(),
1635 1635 'date': lambda r: repo[r].date(),
1636 1636 'description': lambda r: repo[r].description(),
1637 1637 'files': lambda r: repo[r].files(),
1638 1638 'parents': lambda r: repo[r].parents(),
1639 1639 'phase': lambda r: repo[r].phase(),
1640 1640 'substate': lambda r: repo[r].substate,
1641 1641 'summary': lambda r: repo[r].description().splitlines()[0],
1642 1642 'diff': lambda r: list(repo[r].diff(git=True),)
1643 1643 }
1644 1644 for info in fields:
1645 1645 getfield = _funcs.get(info, None)
1646 1646 if getfield is None:
1647 1647 raise error.ParseError(
1648 1648 # i18n: "matching" is a keyword
1649 1649 _("unexpected field name passed to matching: %s") % info)
1650 1650 getfieldfuncs.append(getfield)
1651 1651 # convert the getfield array of functions into a "getinfo" function
1652 1652 # which returns an array of field values (or a single value if there
1653 1653 # is only one field to match)
1654 1654 getinfo = lambda r: [f(r) for f in getfieldfuncs]
1655 1655
1656 1656 def matches(x):
1657 1657 for rev in revs:
1658 1658 target = getinfo(rev)
1659 1659 match = True
1660 1660 for n, f in enumerate(getfieldfuncs):
1661 1661 if target[n] != f(x):
1662 1662 match = False
1663 1663 if match:
1664 1664 return True
1665 1665 return False
1666 1666
1667 1667 return subset.filter(matches)
1668 1668
1669 1669 def reverse(repo, subset, x):
1670 1670 """``reverse(set)``
1671 1671 Reverse order of set.
1672 1672 """
1673 1673 l = getset(repo, subset, x)
1674 1674 l.reverse()
1675 1675 return l
1676 1676
1677 1677 def roots(repo, subset, x):
1678 1678 """``roots(set)``
1679 1679 Changesets in set with no parent changeset in set.
1680 1680 """
1681 1681 s = getset(repo, fullreposet(repo), x)
1682 1682 subset = subset & s# baseset([r for r in s if r in subset])
1683 1683 cs = _children(repo, subset, s)
1684 1684 return subset - cs
1685 1685
1686 1686 def secret(repo, subset, x):
1687 1687 """``secret()``
1688 1688 Changeset in secret phase."""
1689 1689 # i18n: "secret" is a keyword
1690 1690 getargs(x, 0, 0, _("secret takes no arguments"))
1691 1691 phase = repo._phasecache.phase
1692 1692 target = phases.secret
1693 1693 condition = lambda r: phase(repo, r) == target
1694 1694 return subset.filter(condition, cache=False)
1695 1695
1696 1696 def sort(repo, subset, x):
1697 1697 """``sort(set[, [-]key...])``
1698 1698 Sort set by keys. The default sort order is ascending, specify a key
1699 1699 as ``-key`` to sort in descending order.
1700 1700
1701 1701 The keys can be:
1702 1702
1703 1703 - ``rev`` for the revision number,
1704 1704 - ``branch`` for the branch name,
1705 1705 - ``desc`` for the commit message (description),
1706 1706 - ``user`` for user name (``author`` can be used as an alias),
1707 1707 - ``date`` for the commit date
1708 1708 """
1709 1709 # i18n: "sort" is a keyword
1710 1710 l = getargs(x, 1, 2, _("sort requires one or two arguments"))
1711 1711 keys = "rev"
1712 1712 if len(l) == 2:
1713 1713 # i18n: "sort" is a keyword
1714 1714 keys = getstring(l[1], _("sort spec must be a string"))
1715 1715
1716 1716 s = l[0]
1717 1717 keys = keys.split()
1718 1718 l = []
1719 1719 def invert(s):
1720 1720 return "".join(chr(255 - ord(c)) for c in s)
1721 1721 revs = getset(repo, subset, s)
1722 1722 if keys == ["rev"]:
1723 1723 revs.sort()
1724 1724 return revs
1725 1725 elif keys == ["-rev"]:
1726 1726 revs.sort(reverse=True)
1727 1727 return revs
1728 1728 for r in revs:
1729 1729 c = repo[r]
1730 1730 e = []
1731 1731 for k in keys:
1732 1732 if k == 'rev':
1733 1733 e.append(r)
1734 1734 elif k == '-rev':
1735 1735 e.append(-r)
1736 1736 elif k == 'branch':
1737 1737 e.append(c.branch())
1738 1738 elif k == '-branch':
1739 1739 e.append(invert(c.branch()))
1740 1740 elif k == 'desc':
1741 1741 e.append(c.description())
1742 1742 elif k == '-desc':
1743 1743 e.append(invert(c.description()))
1744 1744 elif k in 'user author':
1745 1745 e.append(c.user())
1746 1746 elif k in '-user -author':
1747 1747 e.append(invert(c.user()))
1748 1748 elif k == 'date':
1749 1749 e.append(c.date()[0])
1750 1750 elif k == '-date':
1751 1751 e.append(-c.date()[0])
1752 1752 else:
1753 1753 raise error.ParseError(_("unknown sort key %r") % k)
1754 1754 e.append(r)
1755 1755 l.append(e)
1756 1756 l.sort()
1757 1757 return baseset([e[-1] for e in l])
1758 1758
1759 1759 def subrepo(repo, subset, x):
1760 1760 """``subrepo([pattern])``
1761 1761 Changesets that add, modify or remove the given subrepo. If no subrepo
1762 1762 pattern is named, any subrepo changes are returned.
1763 1763 """
1764 1764 # i18n: "subrepo" is a keyword
1765 1765 args = getargs(x, 0, 1, _('subrepo takes at most one argument'))
1766 1766 if len(args) != 0:
1767 1767 pat = getstring(args[0], _("subrepo requires a pattern"))
1768 1768
1769 1769 m = matchmod.exact(repo.root, repo.root, ['.hgsubstate'])
1770 1770
1771 1771 def submatches(names):
1772 1772 k, p, m = _stringmatcher(pat)
1773 1773 for name in names:
1774 1774 if m(name):
1775 1775 yield name
1776 1776
1777 1777 def matches(x):
1778 1778 c = repo[x]
1779 1779 s = repo.status(c.p1().node(), c.node(), match=m)
1780 1780
1781 1781 if len(args) == 0:
1782 1782 return s.added or s.modified or s.removed
1783 1783
1784 1784 if s.added:
1785 1785 return util.any(submatches(c.substate.keys()))
1786 1786
1787 1787 if s.modified:
1788 1788 subs = set(c.p1().substate.keys())
1789 1789 subs.update(c.substate.keys())
1790 1790
1791 1791 for path in submatches(subs):
1792 1792 if c.p1().substate.get(path) != c.substate.get(path):
1793 1793 return True
1794 1794
1795 1795 if s.removed:
1796 1796 return util.any(submatches(c.p1().substate.keys()))
1797 1797
1798 1798 return False
1799 1799
1800 1800 return subset.filter(matches)
1801 1801
1802 1802 def _stringmatcher(pattern):
1803 1803 """
1804 1804 accepts a string, possibly starting with 're:' or 'literal:' prefix.
1805 1805 returns the matcher name, pattern, and matcher function.
1806 1806 missing or unknown prefixes are treated as literal matches.
1807 1807
1808 1808 helper for tests:
1809 1809 >>> def test(pattern, *tests):
1810 1810 ... kind, pattern, matcher = _stringmatcher(pattern)
1811 1811 ... return (kind, pattern, [bool(matcher(t)) for t in tests])
1812 1812
1813 1813 exact matching (no prefix):
1814 1814 >>> test('abcdefg', 'abc', 'def', 'abcdefg')
1815 1815 ('literal', 'abcdefg', [False, False, True])
1816 1816
1817 1817 regex matching ('re:' prefix)
1818 1818 >>> test('re:a.+b', 'nomatch', 'fooadef', 'fooadefbar')
1819 1819 ('re', 'a.+b', [False, False, True])
1820 1820
1821 1821 force exact matches ('literal:' prefix)
1822 1822 >>> test('literal:re:foobar', 'foobar', 're:foobar')
1823 1823 ('literal', 're:foobar', [False, True])
1824 1824
1825 1825 unknown prefixes are ignored and treated as literals
1826 1826 >>> test('foo:bar', 'foo', 'bar', 'foo:bar')
1827 1827 ('literal', 'foo:bar', [False, False, True])
1828 1828 """
1829 1829 if pattern.startswith('re:'):
1830 1830 pattern = pattern[3:]
1831 1831 try:
1832 1832 regex = re.compile(pattern)
1833 1833 except re.error, e:
1834 1834 raise error.ParseError(_('invalid regular expression: %s')
1835 1835 % e)
1836 1836 return 're', pattern, regex.search
1837 1837 elif pattern.startswith('literal:'):
1838 1838 pattern = pattern[8:]
1839 1839 return 'literal', pattern, pattern.__eq__
1840 1840
1841 1841 def _substringmatcher(pattern):
1842 1842 kind, pattern, matcher = _stringmatcher(pattern)
1843 1843 if kind == 'literal':
1844 1844 matcher = lambda s: pattern in s
1845 1845 return kind, pattern, matcher
1846 1846
1847 1847 def tag(repo, subset, x):
1848 1848 """``tag([name])``
1849 1849 The specified tag by name, or all tagged revisions if no name is given.
1850 1850
1851 1851 If `name` starts with `re:`, the remainder of the name is treated as
1852 1852 a regular expression. To match a tag that actually starts with `re:`,
1853 1853 use the prefix `literal:`.
1854 1854 """
1855 1855 # i18n: "tag" is a keyword
1856 1856 args = getargs(x, 0, 1, _("tag takes one or no arguments"))
1857 1857 cl = repo.changelog
1858 1858 if args:
1859 1859 pattern = getstring(args[0],
1860 1860 # i18n: "tag" is a keyword
1861 1861 _('the argument to tag must be a string'))
1862 1862 kind, pattern, matcher = _stringmatcher(pattern)
1863 1863 if kind == 'literal':
1864 1864 # avoid resolving all tags
1865 1865 tn = repo._tagscache.tags.get(pattern, None)
1866 1866 if tn is None:
1867 1867 raise error.RepoLookupError(_("tag '%s' does not exist")
1868 1868 % pattern)
1869 1869 s = set([repo[tn].rev()])
1870 1870 else:
1871 1871 s = set([cl.rev(n) for t, n in repo.tagslist() if matcher(t)])
1872 1872 else:
1873 1873 s = set([cl.rev(n) for t, n in repo.tagslist() if t != 'tip'])
1874 1874 return subset & s
1875 1875
1876 1876 def tagged(repo, subset, x):
1877 1877 return tag(repo, subset, x)
1878 1878
1879 1879 def unstable(repo, subset, x):
1880 1880 """``unstable()``
1881 1881 Non-obsolete changesets with obsolete ancestors.
1882 1882 """
1883 1883 # i18n: "unstable" is a keyword
1884 1884 getargs(x, 0, 0, _("unstable takes no arguments"))
1885 1885 unstables = obsmod.getrevs(repo, 'unstable')
1886 1886 return subset & unstables
1887 1887
1888 1888
1889 1889 def user(repo, subset, x):
1890 1890 """``user(string)``
1891 1891 User name contains string. The match is case-insensitive.
1892 1892
1893 1893 If `string` starts with `re:`, the remainder of the string is treated as
1894 1894 a regular expression. To match a user that actually contains `re:`, use
1895 1895 the prefix `literal:`.
1896 1896 """
1897 1897 return author(repo, subset, x)
1898 1898
1899 1899 # experimental
1900 1900 def wdir(repo, subset, x):
1901 1901 # i18n: "wdir" is a keyword
1902 1902 getargs(x, 0, 0, _("wdir takes no arguments"))
1903 1903 if None in subset:
1904 1904 return baseset([None])
1905 1905 return baseset()
1906 1906
1907 1907 # for internal use
1908 1908 def _list(repo, subset, x):
1909 1909 s = getstring(x, "internal error")
1910 1910 if not s:
1911 1911 return baseset()
1912 1912 ls = [repo[r].rev() for r in s.split('\0')]
1913 1913 s = subset
1914 1914 return baseset([r for r in ls if r in s])
1915 1915
1916 1916 # for internal use
1917 1917 def _intlist(repo, subset, x):
1918 1918 s = getstring(x, "internal error")
1919 1919 if not s:
1920 1920 return baseset()
1921 1921 ls = [int(r) for r in s.split('\0')]
1922 1922 s = subset
1923 1923 return baseset([r for r in ls if r in s])
1924 1924
1925 1925 # for internal use
1926 1926 def _hexlist(repo, subset, x):
1927 1927 s = getstring(x, "internal error")
1928 1928 if not s:
1929 1929 return baseset()
1930 1930 cl = repo.changelog
1931 1931 ls = [cl.rev(node.bin(r)) for r in s.split('\0')]
1932 1932 s = subset
1933 1933 return baseset([r for r in ls if r in s])
1934 1934
1935 1935 symbols = {
1936 1936 "adds": adds,
1937 1937 "all": getall,
1938 1938 "ancestor": ancestor,
1939 1939 "ancestors": ancestors,
1940 1940 "_firstancestors": _firstancestors,
1941 1941 "author": author,
1942 1942 "bisect": bisect,
1943 1943 "bisected": bisected,
1944 1944 "bookmark": bookmark,
1945 1945 "branch": branch,
1946 1946 "branchpoint": branchpoint,
1947 1947 "bumped": bumped,
1948 1948 "bundle": bundle,
1949 1949 "children": children,
1950 1950 "closed": closed,
1951 1951 "contains": contains,
1952 1952 "converted": converted,
1953 1953 "date": date,
1954 1954 "desc": desc,
1955 1955 "descendants": descendants,
1956 1956 "_firstdescendants": _firstdescendants,
1957 1957 "destination": destination,
1958 1958 "divergent": divergent,
1959 1959 "draft": draft,
1960 1960 "extinct": extinct,
1961 1961 "extra": extra,
1962 1962 "file": hasfile,
1963 1963 "filelog": filelog,
1964 1964 "first": first,
1965 1965 "follow": follow,
1966 1966 "_followfirst": _followfirst,
1967 1967 "grep": grep,
1968 1968 "head": head,
1969 1969 "heads": heads,
1970 1970 "hidden": hidden,
1971 1971 "id": node_,
1972 1972 "keyword": keyword,
1973 1973 "last": last,
1974 1974 "limit": limit,
1975 1975 "_matchfiles": _matchfiles,
1976 1976 "max": maxrev,
1977 1977 "merge": merge,
1978 1978 "min": minrev,
1979 1979 "modifies": modifies,
1980 1980 "named": named,
1981 1981 "obsolete": obsolete,
1982 1982 "only": only,
1983 1983 "origin": origin,
1984 1984 "outgoing": outgoing,
1985 1985 "p1": p1,
1986 1986 "p2": p2,
1987 1987 "parents": parents,
1988 1988 "present": present,
1989 1989 "public": public,
1990 1990 "remote": remote,
1991 1991 "removes": removes,
1992 1992 "rev": rev,
1993 1993 "reverse": reverse,
1994 1994 "roots": roots,
1995 1995 "sort": sort,
1996 1996 "secret": secret,
1997 1997 "subrepo": subrepo,
1998 1998 "matching": matching,
1999 1999 "tag": tag,
2000 2000 "tagged": tagged,
2001 2001 "user": user,
2002 2002 "unstable": unstable,
2003 2003 "wdir": wdir,
2004 2004 "_list": _list,
2005 2005 "_intlist": _intlist,
2006 2006 "_hexlist": _hexlist,
2007 2007 }
2008 2008
2009 2009 # symbols which can't be used for a DoS attack for any given input
2010 2010 # (e.g. those which accept regexes as plain strings shouldn't be included)
2011 2011 # functions that just return a lot of changesets (like all) don't count here
2012 2012 safesymbols = set([
2013 2013 "adds",
2014 2014 "all",
2015 2015 "ancestor",
2016 2016 "ancestors",
2017 2017 "_firstancestors",
2018 2018 "author",
2019 2019 "bisect",
2020 2020 "bisected",
2021 2021 "bookmark",
2022 2022 "branch",
2023 2023 "branchpoint",
2024 2024 "bumped",
2025 2025 "bundle",
2026 2026 "children",
2027 2027 "closed",
2028 2028 "converted",
2029 2029 "date",
2030 2030 "desc",
2031 2031 "descendants",
2032 2032 "_firstdescendants",
2033 2033 "destination",
2034 2034 "divergent",
2035 2035 "draft",
2036 2036 "extinct",
2037 2037 "extra",
2038 2038 "file",
2039 2039 "filelog",
2040 2040 "first",
2041 2041 "follow",
2042 2042 "_followfirst",
2043 2043 "head",
2044 2044 "heads",
2045 2045 "hidden",
2046 2046 "id",
2047 2047 "keyword",
2048 2048 "last",
2049 2049 "limit",
2050 2050 "_matchfiles",
2051 2051 "max",
2052 2052 "merge",
2053 2053 "min",
2054 2054 "modifies",
2055 2055 "obsolete",
2056 2056 "only",
2057 2057 "origin",
2058 2058 "outgoing",
2059 2059 "p1",
2060 2060 "p2",
2061 2061 "parents",
2062 2062 "present",
2063 2063 "public",
2064 2064 "remote",
2065 2065 "removes",
2066 2066 "rev",
2067 2067 "reverse",
2068 2068 "roots",
2069 2069 "sort",
2070 2070 "secret",
2071 2071 "matching",
2072 2072 "tag",
2073 2073 "tagged",
2074 2074 "user",
2075 2075 "unstable",
2076 2076 "wdir",
2077 2077 "_list",
2078 2078 "_intlist",
2079 2079 "_hexlist",
2080 2080 ])
2081 2081
2082 2082 methods = {
2083 2083 "range": rangeset,
2084 2084 "dagrange": dagrange,
2085 2085 "string": stringset,
2086 2086 "symbol": stringset,
2087 2087 "and": andset,
2088 2088 "or": orset,
2089 2089 "not": notset,
2090 2090 "list": listset,
2091 2091 "func": func,
2092 2092 "ancestor": ancestorspec,
2093 2093 "parent": parentspec,
2094 2094 "parentpost": p1,
2095 2095 "only": only,
2096 2096 "onlypost": only,
2097 2097 }
2098 2098
2099 2099 def optimize(x, small):
2100 2100 if x is None:
2101 2101 return 0, x
2102 2102
2103 2103 smallbonus = 1
2104 2104 if small:
2105 2105 smallbonus = .5
2106 2106
2107 2107 op = x[0]
2108 2108 if op == 'minus':
2109 2109 return optimize(('and', x[1], ('not', x[2])), small)
2110 2110 elif op == 'only':
2111 2111 return optimize(('func', ('symbol', 'only'),
2112 2112 ('list', x[1], x[2])), small)
2113 2113 elif op == 'dagrangepre':
2114 2114 return optimize(('func', ('symbol', 'ancestors'), x[1]), small)
2115 2115 elif op == 'dagrangepost':
2116 2116 return optimize(('func', ('symbol', 'descendants'), x[1]), small)
2117 2117 elif op == 'rangepre':
2118 2118 return optimize(('range', ('string', '0'), x[1]), small)
2119 2119 elif op == 'rangepost':
2120 2120 return optimize(('range', x[1], ('string', 'tip')), small)
2121 2121 elif op == 'negate':
2122 2122 return optimize(('string',
2123 2123 '-' + getstring(x[1], _("can't negate that"))), small)
2124 2124 elif op in 'string symbol negate':
2125 2125 return smallbonus, x # single revisions are small
2126 2126 elif op == 'and':
2127 2127 wa, ta = optimize(x[1], True)
2128 2128 wb, tb = optimize(x[2], True)
2129 2129
2130 2130 # (::x and not ::y)/(not ::y and ::x) have a fast path
2131 2131 def isonly(revs, bases):
2132 2132 return (
2133 2133 revs[0] == 'func'
2134 2134 and getstring(revs[1], _('not a symbol')) == 'ancestors'
2135 2135 and bases[0] == 'not'
2136 2136 and bases[1][0] == 'func'
2137 2137 and getstring(bases[1][1], _('not a symbol')) == 'ancestors')
2138 2138
2139 2139 w = min(wa, wb)
2140 2140 if isonly(ta, tb):
2141 2141 return w, ('func', ('symbol', 'only'), ('list', ta[2], tb[1][2]))
2142 2142 if isonly(tb, ta):
2143 2143 return w, ('func', ('symbol', 'only'), ('list', tb[2], ta[1][2]))
2144 2144
2145 2145 if wa > wb:
2146 2146 return w, (op, tb, ta)
2147 2147 return w, (op, ta, tb)
2148 2148 elif op == 'or':
2149 2149 wa, ta = optimize(x[1], False)
2150 2150 wb, tb = optimize(x[2], False)
2151 2151 if wb < wa:
2152 2152 wb, wa = wa, wb
2153 2153 return max(wa, wb), (op, ta, tb)
2154 2154 elif op == 'not':
2155 2155 o = optimize(x[1], not small)
2156 2156 return o[0], (op, o[1])
2157 2157 elif op == 'parentpost':
2158 2158 o = optimize(x[1], small)
2159 2159 return o[0], (op, o[1])
2160 2160 elif op == 'group':
2161 2161 return optimize(x[1], small)
2162 2162 elif op in 'dagrange range list parent ancestorspec':
2163 2163 if op == 'parent':
2164 2164 # x^:y means (x^) : y, not x ^ (:y)
2165 2165 post = ('parentpost', x[1])
2166 2166 if x[2][0] == 'dagrangepre':
2167 2167 return optimize(('dagrange', post, x[2][1]), small)
2168 2168 elif x[2][0] == 'rangepre':
2169 2169 return optimize(('range', post, x[2][1]), small)
2170 2170
2171 2171 wa, ta = optimize(x[1], small)
2172 2172 wb, tb = optimize(x[2], small)
2173 2173 return wa + wb, (op, ta, tb)
2174 2174 elif op == 'func':
2175 2175 f = getstring(x[1], _("not a symbol"))
2176 2176 wa, ta = optimize(x[2], small)
2177 2177 if f in ("author branch closed date desc file grep keyword "
2178 2178 "outgoing user"):
2179 2179 w = 10 # slow
2180 2180 elif f in "modifies adds removes":
2181 2181 w = 30 # slower
2182 2182 elif f == "contains":
2183 2183 w = 100 # very slow
2184 2184 elif f == "ancestor":
2185 2185 w = 1 * smallbonus
2186 2186 elif f in "reverse limit first _intlist":
2187 2187 w = 0
2188 2188 elif f in "sort":
2189 2189 w = 10 # assume most sorts look at changelog
2190 2190 else:
2191 2191 w = 1
2192 2192 return w + wa, (op, x[1], ta)
2193 2193 return 1, x
2194 2194
2195 2195 _aliasarg = ('func', ('symbol', '_aliasarg'))
2196 2196 def _getaliasarg(tree):
2197 2197 """If tree matches ('func', ('symbol', '_aliasarg'), ('string', X))
2198 2198 return X, None otherwise.
2199 2199 """
2200 2200 if (len(tree) == 3 and tree[:2] == _aliasarg
2201 2201 and tree[2][0] == 'string'):
2202 2202 return tree[2][1]
2203 2203 return None
2204 2204
2205 2205 def _checkaliasarg(tree, known=None):
2206 2206 """Check tree contains no _aliasarg construct or only ones which
2207 2207 value is in known. Used to avoid alias placeholders injection.
2208 2208 """
2209 2209 if isinstance(tree, tuple):
2210 2210 arg = _getaliasarg(tree)
2211 2211 if arg is not None and (not known or arg not in known):
2212 2212 raise error.UnknownIdentifier('_aliasarg', [])
2213 2213 for t in tree:
2214 2214 _checkaliasarg(t, known)
2215 2215
2216 2216 # the set of valid characters for the initial letter of symbols in
2217 2217 # alias declarations and definitions
2218 2218 _aliassyminitletters = set(c for c in [chr(i) for i in xrange(256)]
2219 2219 if c.isalnum() or c in '._@$' or ord(c) > 127)
2220 2220
2221 2221 def _tokenizealias(program, lookup=None):
2222 2222 """Parse alias declaration/definition into a stream of tokens
2223 2223
2224 2224 This allows symbol names to use also ``$`` as an initial letter
2225 2225 (for backward compatibility), and callers of this function should
2226 2226 examine whether ``$`` is used also for unexpected symbols or not.
2227 2227 """
2228 2228 return tokenize(program, lookup=lookup,
2229 2229 syminitletters=_aliassyminitletters)
2230 2230
2231 2231 def _parsealiasdecl(decl):
2232 2232 """Parse alias declaration ``decl``
2233 2233
2234 2234 This returns ``(name, tree, args, errorstr)`` tuple:
2235 2235
2236 2236 - ``name``: of declared alias (may be ``decl`` itself at error)
2237 2237 - ``tree``: parse result (or ``None`` at error)
2238 2238 - ``args``: list of alias argument names (or None for symbol declaration)
2239 2239 - ``errorstr``: detail about detected error (or None)
2240 2240
2241 2241 >>> _parsealiasdecl('foo')
2242 2242 ('foo', ('symbol', 'foo'), None, None)
2243 2243 >>> _parsealiasdecl('$foo')
2244 2244 ('$foo', None, None, "'$' not for alias arguments")
2245 2245 >>> _parsealiasdecl('foo::bar')
2246 2246 ('foo::bar', None, None, 'invalid format')
2247 2247 >>> _parsealiasdecl('foo bar')
2248 2248 ('foo bar', None, None, 'at 4: invalid token')
2249 2249 >>> _parsealiasdecl('foo()')
2250 2250 ('foo', ('func', ('symbol', 'foo')), [], None)
2251 2251 >>> _parsealiasdecl('$foo()')
2252 2252 ('$foo()', None, None, "'$' not for alias arguments")
2253 2253 >>> _parsealiasdecl('foo($1, $2)')
2254 2254 ('foo', ('func', ('symbol', 'foo')), ['$1', '$2'], None)
2255 2255 >>> _parsealiasdecl('foo(bar_bar, baz.baz)')
2256 2256 ('foo', ('func', ('symbol', 'foo')), ['bar_bar', 'baz.baz'], None)
2257 2257 >>> _parsealiasdecl('foo($1, $2, nested($1, $2))')
2258 2258 ('foo($1, $2, nested($1, $2))', None, None, 'invalid argument list')
2259 2259 >>> _parsealiasdecl('foo(bar($1, $2))')
2260 2260 ('foo(bar($1, $2))', None, None, 'invalid argument list')
2261 2261 >>> _parsealiasdecl('foo("string")')
2262 2262 ('foo("string")', None, None, 'invalid argument list')
2263 2263 >>> _parsealiasdecl('foo($1, $2')
2264 2264 ('foo($1, $2', None, None, 'at 10: unexpected token: end')
2265 2265 >>> _parsealiasdecl('foo("string')
2266 2266 ('foo("string', None, None, 'at 5: unterminated string')
2267 2267 >>> _parsealiasdecl('foo($1, $2, $1)')
2268 2268 ('foo', None, None, 'argument names collide with each other')
2269 2269 """
2270 2270 p = parser.parser(_tokenizealias, elements)
2271 2271 try:
2272 2272 tree, pos = p.parse(decl)
2273 2273 if (pos != len(decl)):
2274 2274 raise error.ParseError(_('invalid token'), pos)
2275 2275
2276 2276 if isvalidsymbol(tree):
2277 2277 # "name = ...." style
2278 2278 name = getsymbol(tree)
2279 2279 if name.startswith('$'):
2280 2280 return (decl, None, None, _("'$' not for alias arguments"))
2281 2281 return (name, ('symbol', name), None, None)
2282 2282
2283 2283 if isvalidfunc(tree):
2284 2284 # "name(arg, ....) = ...." style
2285 2285 name = getfuncname(tree)
2286 2286 if name.startswith('$'):
2287 2287 return (decl, None, None, _("'$' not for alias arguments"))
2288 2288 args = []
2289 2289 for arg in getfuncargs(tree):
2290 2290 if not isvalidsymbol(arg):
2291 2291 return (decl, None, None, _("invalid argument list"))
2292 2292 args.append(getsymbol(arg))
2293 2293 if len(args) != len(set(args)):
2294 2294 return (name, None, None,
2295 2295 _("argument names collide with each other"))
2296 2296 return (name, ('func', ('symbol', name)), args, None)
2297 2297
2298 2298 return (decl, None, None, _("invalid format"))
2299 2299 except error.ParseError, inst:
2300 2300 return (decl, None, None, parseerrordetail(inst))
2301 2301
2302 2302 def _parsealiasdefn(defn, args):
2303 2303 """Parse alias definition ``defn``
2304 2304
2305 2305 This function also replaces alias argument references in the
2306 2306 specified definition by ``_aliasarg(ARGNAME)``.
2307 2307
2308 2308 ``args`` is a list of alias argument names, or None if the alias
2309 2309 is declared as a symbol.
2310 2310
2311 2311 This returns "tree" as parsing result.
2312 2312
2313 2313 >>> args = ['$1', '$2', 'foo']
2314 2314 >>> print prettyformat(_parsealiasdefn('$1 or foo', args))
2315 2315 (or
2316 2316 (func
2317 2317 ('symbol', '_aliasarg')
2318 2318 ('string', '$1'))
2319 2319 (func
2320 2320 ('symbol', '_aliasarg')
2321 2321 ('string', 'foo')))
2322 2322 >>> try:
2323 2323 ... _parsealiasdefn('$1 or $bar', args)
2324 2324 ... except error.ParseError, inst:
2325 2325 ... print parseerrordetail(inst)
2326 2326 at 6: '$' not for alias arguments
2327 2327 >>> args = ['$1', '$10', 'foo']
2328 2328 >>> print prettyformat(_parsealiasdefn('$10 or foobar', args))
2329 2329 (or
2330 2330 (func
2331 2331 ('symbol', '_aliasarg')
2332 2332 ('string', '$10'))
2333 2333 ('symbol', 'foobar'))
2334 2334 >>> print prettyformat(_parsealiasdefn('"$1" or "foo"', args))
2335 2335 (or
2336 2336 ('string', '$1')
2337 2337 ('string', 'foo'))
2338 2338 """
2339 2339 def tokenizedefn(program, lookup=None):
2340 2340 if args:
2341 2341 argset = set(args)
2342 2342 else:
2343 2343 argset = set()
2344 2344
2345 2345 for t, value, pos in _tokenizealias(program, lookup=lookup):
2346 2346 if t == 'symbol':
2347 2347 if value in argset:
2348 2348 # emulate tokenization of "_aliasarg('ARGNAME')":
2349 2349 # "_aliasarg()" is an unknown symbol only used separate
2350 2350 # alias argument placeholders from regular strings.
2351 2351 yield ('symbol', '_aliasarg', pos)
2352 2352 yield ('(', None, pos)
2353 2353 yield ('string', value, pos)
2354 2354 yield (')', None, pos)
2355 2355 continue
2356 2356 elif value.startswith('$'):
2357 2357 raise error.ParseError(_("'$' not for alias arguments"),
2358 2358 pos)
2359 2359 yield (t, value, pos)
2360 2360
2361 2361 p = parser.parser(tokenizedefn, elements)
2362 2362 tree, pos = p.parse(defn)
2363 2363 if pos != len(defn):
2364 2364 raise error.ParseError(_('invalid token'), pos)
2365 2365 return tree
2366 2366
2367 2367 class revsetalias(object):
2368 2368 # whether own `error` information is already shown or not.
2369 2369 # this avoids showing same warning multiple times at each `findaliases`.
2370 2370 warned = False
2371 2371
2372 2372 def __init__(self, name, value):
2373 2373 '''Aliases like:
2374 2374
2375 2375 h = heads(default)
2376 2376 b($1) = ancestors($1) - ancestors(default)
2377 2377 '''
2378 2378 self.name, self.tree, self.args, self.error = _parsealiasdecl(name)
2379 2379 if self.error:
2380 2380 self.error = _('failed to parse the declaration of revset alias'
2381 2381 ' "%s": %s') % (self.name, self.error)
2382 2382 return
2383 2383
2384 2384 try:
2385 2385 self.replacement = _parsealiasdefn(value, self.args)
2386 2386 # Check for placeholder injection
2387 2387 _checkaliasarg(self.replacement, self.args)
2388 2388 except error.ParseError, inst:
2389 2389 self.error = _('failed to parse the definition of revset alias'
2390 2390 ' "%s": %s') % (self.name, parseerrordetail(inst))
2391 2391
2392 2392 def _getalias(aliases, tree):
2393 2393 """If tree looks like an unexpanded alias, return it. Return None
2394 2394 otherwise.
2395 2395 """
2396 2396 if isinstance(tree, tuple) and tree:
2397 2397 if tree[0] == 'symbol' and len(tree) == 2:
2398 2398 name = tree[1]
2399 2399 alias = aliases.get(name)
2400 2400 if alias and alias.args is None and alias.tree == tree:
2401 2401 return alias
2402 2402 if tree[0] == 'func' and len(tree) > 1:
2403 2403 if tree[1][0] == 'symbol' and len(tree[1]) == 2:
2404 2404 name = tree[1][1]
2405 2405 alias = aliases.get(name)
2406 2406 if alias and alias.args is not None and alias.tree == tree[:2]:
2407 2407 return alias
2408 2408 return None
2409 2409
2410 2410 def _expandargs(tree, args):
2411 2411 """Replace _aliasarg instances with the substitution value of the
2412 2412 same name in args, recursively.
2413 2413 """
2414 2414 if not tree or not isinstance(tree, tuple):
2415 2415 return tree
2416 2416 arg = _getaliasarg(tree)
2417 2417 if arg is not None:
2418 2418 return args[arg]
2419 2419 return tuple(_expandargs(t, args) for t in tree)
2420 2420
2421 2421 def _expandaliases(aliases, tree, expanding, cache):
2422 2422 """Expand aliases in tree, recursively.
2423 2423
2424 2424 'aliases' is a dictionary mapping user defined aliases to
2425 2425 revsetalias objects.
2426 2426 """
2427 2427 if not isinstance(tree, tuple):
2428 2428 # Do not expand raw strings
2429 2429 return tree
2430 2430 alias = _getalias(aliases, tree)
2431 2431 if alias is not None:
2432 2432 if alias.error:
2433 2433 raise util.Abort(alias.error)
2434 2434 if alias in expanding:
2435 2435 raise error.ParseError(_('infinite expansion of revset alias "%s" '
2436 2436 'detected') % alias.name)
2437 2437 expanding.append(alias)
2438 2438 if alias.name not in cache:
2439 2439 cache[alias.name] = _expandaliases(aliases, alias.replacement,
2440 2440 expanding, cache)
2441 2441 result = cache[alias.name]
2442 2442 expanding.pop()
2443 2443 if alias.args is not None:
2444 2444 l = getlist(tree[2])
2445 2445 if len(l) != len(alias.args):
2446 2446 raise error.ParseError(
2447 2447 _('invalid number of arguments: %s') % len(l))
2448 2448 l = [_expandaliases(aliases, a, [], cache) for a in l]
2449 2449 result = _expandargs(result, dict(zip(alias.args, l)))
2450 2450 else:
2451 2451 result = tuple(_expandaliases(aliases, t, expanding, cache)
2452 2452 for t in tree)
2453 2453 return result
2454 2454
2455 2455 def findaliases(ui, tree, showwarning=None):
2456 2456 _checkaliasarg(tree)
2457 2457 aliases = {}
2458 2458 for k, v in ui.configitems('revsetalias'):
2459 2459 alias = revsetalias(k, v)
2460 2460 aliases[alias.name] = alias
2461 2461 tree = _expandaliases(aliases, tree, [], {})
2462 2462 if showwarning:
2463 2463 # warn about problematic (but not referred) aliases
2464 2464 for name, alias in sorted(aliases.iteritems()):
2465 2465 if alias.error and not alias.warned:
2466 2466 showwarning(_('warning: %s\n') % (alias.error))
2467 2467 alias.warned = True
2468 2468 return tree
2469 2469
2470 2470 def foldconcat(tree):
2471 2471 """Fold elements to be concatenated by `##`
2472 2472 """
2473 2473 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2474 2474 return tree
2475 2475 if tree[0] == '_concat':
2476 2476 pending = [tree]
2477 2477 l = []
2478 2478 while pending:
2479 2479 e = pending.pop()
2480 2480 if e[0] == '_concat':
2481 2481 pending.extend(reversed(e[1:]))
2482 2482 elif e[0] in ('string', 'symbol'):
2483 2483 l.append(e[1])
2484 2484 else:
2485 2485 msg = _("\"##\" can't concatenate \"%s\" element") % (e[0])
2486 2486 raise error.ParseError(msg)
2487 2487 return ('string', ''.join(l))
2488 2488 else:
2489 2489 return tuple(foldconcat(t) for t in tree)
2490 2490
2491 2491 def parse(spec, lookup=None):
2492 2492 p = parser.parser(tokenize, elements)
2493 2493 return p.parse(spec, lookup=lookup)
2494 2494
2495 2495 def posttreebuilthook(tree, repo):
2496 2496 # hook for extensions to execute code on the optimized tree
2497 2497 pass
2498 2498
2499 2499 def match(ui, spec, repo=None):
2500 2500 if not spec:
2501 2501 raise error.ParseError(_("empty query"))
2502 2502 lookup = None
2503 2503 if repo:
2504 2504 lookup = repo.__contains__
2505 2505 tree, pos = parse(spec, lookup)
2506 2506 if (pos != len(spec)):
2507 2507 raise error.ParseError(_("invalid token"), pos)
2508 2508 if ui:
2509 2509 tree = findaliases(ui, tree, showwarning=ui.warn)
2510 2510 tree = foldconcat(tree)
2511 2511 weight, tree = optimize(tree, True)
2512 2512 posttreebuilthook(tree, repo)
2513 2513 def mfunc(repo, subset=None):
2514 2514 if subset is None:
2515 2515 subset = fullreposet(repo)
2516 2516 if util.safehasattr(subset, 'isascending'):
2517 2517 result = getset(repo, subset, tree)
2518 2518 else:
2519 2519 result = getset(repo, baseset(subset), tree)
2520 2520 return result
2521 2521 return mfunc
2522 2522
2523 2523 def formatspec(expr, *args):
2524 2524 '''
2525 2525 This is a convenience function for using revsets internally, and
2526 2526 escapes arguments appropriately. Aliases are intentionally ignored
2527 2527 so that intended expression behavior isn't accidentally subverted.
2528 2528
2529 2529 Supported arguments:
2530 2530
2531 2531 %r = revset expression, parenthesized
2532 2532 %d = int(arg), no quoting
2533 2533 %s = string(arg), escaped and single-quoted
2534 2534 %b = arg.branch(), escaped and single-quoted
2535 2535 %n = hex(arg), single-quoted
2536 2536 %% = a literal '%'
2537 2537
2538 2538 Prefixing the type with 'l' specifies a parenthesized list of that type.
2539 2539
2540 2540 >>> formatspec('%r:: and %lr', '10 or 11', ("this()", "that()"))
2541 2541 '(10 or 11):: and ((this()) or (that()))'
2542 2542 >>> formatspec('%d:: and not %d::', 10, 20)
2543 2543 '10:: and not 20::'
2544 2544 >>> formatspec('%ld or %ld', [], [1])
2545 2545 "_list('') or 1"
2546 2546 >>> formatspec('keyword(%s)', 'foo\\xe9')
2547 2547 "keyword('foo\\\\xe9')"
2548 2548 >>> b = lambda: 'default'
2549 2549 >>> b.branch = b
2550 2550 >>> formatspec('branch(%b)', b)
2551 2551 "branch('default')"
2552 2552 >>> formatspec('root(%ls)', ['a', 'b', 'c', 'd'])
2553 2553 "root(_list('a\\x00b\\x00c\\x00d'))"
2554 2554 '''
2555 2555
2556 2556 def quote(s):
2557 2557 return repr(str(s))
2558 2558
2559 2559 def argtype(c, arg):
2560 2560 if c == 'd':
2561 2561 return str(int(arg))
2562 2562 elif c == 's':
2563 2563 return quote(arg)
2564 2564 elif c == 'r':
2565 2565 parse(arg) # make sure syntax errors are confined
2566 2566 return '(%s)' % arg
2567 2567 elif c == 'n':
2568 2568 return quote(node.hex(arg))
2569 2569 elif c == 'b':
2570 2570 return quote(arg.branch())
2571 2571
2572 2572 def listexp(s, t):
2573 2573 l = len(s)
2574 2574 if l == 0:
2575 2575 return "_list('')"
2576 2576 elif l == 1:
2577 2577 return argtype(t, s[0])
2578 2578 elif t == 'd':
2579 2579 return "_intlist('%s')" % "\0".join(str(int(a)) for a in s)
2580 2580 elif t == 's':
2581 2581 return "_list('%s')" % "\0".join(s)
2582 2582 elif t == 'n':
2583 2583 return "_hexlist('%s')" % "\0".join(node.hex(a) for a in s)
2584 2584 elif t == 'b':
2585 2585 return "_list('%s')" % "\0".join(a.branch() for a in s)
2586 2586
2587 2587 m = l // 2
2588 2588 return '(%s or %s)' % (listexp(s[:m], t), listexp(s[m:], t))
2589 2589
2590 2590 ret = ''
2591 2591 pos = 0
2592 2592 arg = 0
2593 2593 while pos < len(expr):
2594 2594 c = expr[pos]
2595 2595 if c == '%':
2596 2596 pos += 1
2597 2597 d = expr[pos]
2598 2598 if d == '%':
2599 2599 ret += d
2600 2600 elif d in 'dsnbr':
2601 2601 ret += argtype(d, args[arg])
2602 2602 arg += 1
2603 2603 elif d == 'l':
2604 2604 # a list of some type
2605 2605 pos += 1
2606 2606 d = expr[pos]
2607 2607 ret += listexp(list(args[arg]), d)
2608 2608 arg += 1
2609 2609 else:
2610 2610 raise util.Abort('unexpected revspec format character %s' % d)
2611 2611 else:
2612 2612 ret += c
2613 2613 pos += 1
2614 2614
2615 2615 return ret
2616 2616
2617 2617 def prettyformat(tree):
2618 2618 def _prettyformat(tree, level, lines):
2619 2619 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2620 2620 lines.append((level, str(tree)))
2621 2621 else:
2622 2622 lines.append((level, '(%s' % tree[0]))
2623 2623 for s in tree[1:]:
2624 2624 _prettyformat(s, level + 1, lines)
2625 2625 lines[-1:] = [(lines[-1][0], lines[-1][1] + ')')]
2626 2626
2627 2627 lines = []
2628 2628 _prettyformat(tree, 0, lines)
2629 2629 output = '\n'.join((' '*l + s) for l, s in lines)
2630 2630 return output
2631 2631
2632 2632 def depth(tree):
2633 2633 if isinstance(tree, tuple):
2634 2634 return max(map(depth, tree)) + 1
2635 2635 else:
2636 2636 return 0
2637 2637
2638 2638 def funcsused(tree):
2639 2639 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2640 2640 return set()
2641 2641 else:
2642 2642 funcs = set()
2643 2643 for s in tree[1:]:
2644 2644 funcs |= funcsused(s)
2645 2645 if tree[0] == 'func':
2646 2646 funcs.add(tree[1][1])
2647 2647 return funcs
2648 2648
2649 2649 class abstractsmartset(object):
2650 2650
2651 2651 def __nonzero__(self):
2652 2652 """True if the smartset is not empty"""
2653 2653 raise NotImplementedError()
2654 2654
2655 2655 def __contains__(self, rev):
2656 2656 """provide fast membership testing"""
2657 2657 raise NotImplementedError()
2658 2658
2659 2659 def __iter__(self):
2660 2660 """iterate the set in the order it is supposed to be iterated"""
2661 2661 raise NotImplementedError()
2662 2662
2663 2663 # Attributes containing a function to perform a fast iteration in a given
2664 2664 # direction. A smartset can have none, one, or both defined.
2665 2665 #
2666 2666 # Default value is None instead of a function returning None to avoid
2667 2667 # initializing an iterator just for testing if a fast method exists.
2668 2668 fastasc = None
2669 2669 fastdesc = None
2670 2670
2671 2671 def isascending(self):
2672 2672 """True if the set will iterate in ascending order"""
2673 2673 raise NotImplementedError()
2674 2674
2675 2675 def isdescending(self):
2676 2676 """True if the set will iterate in descending order"""
2677 2677 raise NotImplementedError()
2678 2678
2679 2679 def min(self):
2680 2680 """return the minimum element in the set"""
2681 2681 if self.fastasc is not None:
2682 2682 for r in self.fastasc():
2683 2683 return r
2684 2684 raise ValueError('arg is an empty sequence')
2685 2685 return min(self)
2686 2686
2687 2687 def max(self):
2688 2688 """return the maximum element in the set"""
2689 2689 if self.fastdesc is not None:
2690 2690 for r in self.fastdesc():
2691 2691 return r
2692 2692 raise ValueError('arg is an empty sequence')
2693 2693 return max(self)
2694 2694
2695 2695 def first(self):
2696 2696 """return the first element in the set (user iteration perspective)
2697 2697
2698 2698 Return None if the set is empty"""
2699 2699 raise NotImplementedError()
2700 2700
2701 2701 def last(self):
2702 2702 """return the last element in the set (user iteration perspective)
2703 2703
2704 2704 Return None if the set is empty"""
2705 2705 raise NotImplementedError()
2706 2706
2707 2707 def __len__(self):
2708 2708 """return the length of the smartsets
2709 2709
2710 2710 This can be expensive on smartset that could be lazy otherwise."""
2711 2711 raise NotImplementedError()
2712 2712
2713 2713 def reverse(self):
2714 2714 """reverse the expected iteration order"""
2715 2715 raise NotImplementedError()
2716 2716
2717 2717 def sort(self, reverse=True):
2718 2718 """get the set to iterate in an ascending or descending order"""
2719 2719 raise NotImplementedError()
2720 2720
2721 2721 def __and__(self, other):
2722 2722 """Returns a new object with the intersection of the two collections.
2723 2723
2724 2724 This is part of the mandatory API for smartset."""
2725 2725 if isinstance(other, fullreposet):
2726 2726 return self
2727 2727 return self.filter(other.__contains__, cache=False)
2728 2728
2729 2729 def __add__(self, other):
2730 2730 """Returns a new object with the union of the two collections.
2731 2731
2732 2732 This is part of the mandatory API for smartset."""
2733 2733 return addset(self, other)
2734 2734
2735 2735 def __sub__(self, other):
2736 2736 """Returns a new object with the substraction of the two collections.
2737 2737
2738 2738 This is part of the mandatory API for smartset."""
2739 2739 c = other.__contains__
2740 2740 return self.filter(lambda r: not c(r), cache=False)
2741 2741
2742 2742 def filter(self, condition, cache=True):
2743 2743 """Returns this smartset filtered by condition as a new smartset.
2744 2744
2745 2745 `condition` is a callable which takes a revision number and returns a
2746 2746 boolean.
2747 2747
2748 2748 This is part of the mandatory API for smartset."""
2749 2749 # builtin cannot be cached. but do not needs to
2750 2750 if cache and util.safehasattr(condition, 'func_code'):
2751 2751 condition = util.cachefunc(condition)
2752 2752 return filteredset(self, condition)
2753 2753
2754 2754 class baseset(abstractsmartset):
2755 2755 """Basic data structure that represents a revset and contains the basic
2756 2756 operation that it should be able to perform.
2757 2757
2758 2758 Every method in this class should be implemented by any smartset class.
2759 2759 """
2760 2760 def __init__(self, data=()):
2761 2761 if not isinstance(data, list):
2762 2762 data = list(data)
2763 2763 self._list = data
2764 2764 self._ascending = None
2765 2765
2766 2766 @util.propertycache
2767 2767 def _set(self):
2768 2768 return set(self._list)
2769 2769
2770 2770 @util.propertycache
2771 2771 def _asclist(self):
2772 2772 asclist = self._list[:]
2773 2773 asclist.sort()
2774 2774 return asclist
2775 2775
2776 2776 def __iter__(self):
2777 2777 if self._ascending is None:
2778 2778 return iter(self._list)
2779 2779 elif self._ascending:
2780 2780 return iter(self._asclist)
2781 2781 else:
2782 2782 return reversed(self._asclist)
2783 2783
2784 2784 def fastasc(self):
2785 2785 return iter(self._asclist)
2786 2786
2787 2787 def fastdesc(self):
2788 2788 return reversed(self._asclist)
2789 2789
2790 2790 @util.propertycache
2791 2791 def __contains__(self):
2792 2792 return self._set.__contains__
2793 2793
2794 2794 def __nonzero__(self):
2795 2795 return bool(self._list)
2796 2796
2797 2797 def sort(self, reverse=False):
2798 2798 self._ascending = not bool(reverse)
2799 2799
2800 2800 def reverse(self):
2801 2801 if self._ascending is None:
2802 2802 self._list.reverse()
2803 2803 else:
2804 2804 self._ascending = not self._ascending
2805 2805
2806 2806 def __len__(self):
2807 2807 return len(self._list)
2808 2808
2809 2809 def isascending(self):
2810 2810 """Returns True if the collection is ascending order, False if not.
2811 2811
2812 2812 This is part of the mandatory API for smartset."""
2813 2813 if len(self) <= 1:
2814 2814 return True
2815 2815 return self._ascending is not None and self._ascending
2816 2816
2817 2817 def isdescending(self):
2818 2818 """Returns True if the collection is descending order, False if not.
2819 2819
2820 2820 This is part of the mandatory API for smartset."""
2821 2821 if len(self) <= 1:
2822 2822 return True
2823 2823 return self._ascending is not None and not self._ascending
2824 2824
2825 2825 def first(self):
2826 2826 if self:
2827 2827 if self._ascending is None:
2828 2828 return self._list[0]
2829 2829 elif self._ascending:
2830 2830 return self._asclist[0]
2831 2831 else:
2832 2832 return self._asclist[-1]
2833 2833 return None
2834 2834
2835 2835 def last(self):
2836 2836 if self:
2837 2837 if self._ascending is None:
2838 2838 return self._list[-1]
2839 2839 elif self._ascending:
2840 2840 return self._asclist[-1]
2841 2841 else:
2842 2842 return self._asclist[0]
2843 2843 return None
2844 2844
2845 2845 def __repr__(self):
2846 2846 d = {None: '', False: '-', True: '+'}[self._ascending]
2847 2847 return '<%s%s %r>' % (type(self).__name__, d, self._list)
2848 2848
2849 2849 class filteredset(abstractsmartset):
2850 2850 """Duck type for baseset class which iterates lazily over the revisions in
2851 2851 the subset and contains a function which tests for membership in the
2852 2852 revset
2853 2853 """
2854 2854 def __init__(self, subset, condition=lambda x: True):
2855 2855 """
2856 2856 condition: a function that decide whether a revision in the subset
2857 2857 belongs to the revset or not.
2858 2858 """
2859 2859 self._subset = subset
2860 2860 self._condition = condition
2861 2861 self._cache = {}
2862 2862
2863 2863 def __contains__(self, x):
2864 2864 c = self._cache
2865 2865 if x not in c:
2866 2866 v = c[x] = x in self._subset and self._condition(x)
2867 2867 return v
2868 2868 return c[x]
2869 2869
2870 2870 def __iter__(self):
2871 2871 return self._iterfilter(self._subset)
2872 2872
2873 2873 def _iterfilter(self, it):
2874 2874 cond = self._condition
2875 2875 for x in it:
2876 2876 if cond(x):
2877 2877 yield x
2878 2878
2879 2879 @property
2880 2880 def fastasc(self):
2881 2881 it = self._subset.fastasc
2882 2882 if it is None:
2883 2883 return None
2884 2884 return lambda: self._iterfilter(it())
2885 2885
2886 2886 @property
2887 2887 def fastdesc(self):
2888 2888 it = self._subset.fastdesc
2889 2889 if it is None:
2890 2890 return None
2891 2891 return lambda: self._iterfilter(it())
2892 2892
2893 2893 def __nonzero__(self):
2894 2894 for r in self:
2895 2895 return True
2896 2896 return False
2897 2897
2898 2898 def __len__(self):
2899 2899 # Basic implementation to be changed in future patches.
2900 2900 l = baseset([r for r in self])
2901 2901 return len(l)
2902 2902
2903 2903 def sort(self, reverse=False):
2904 2904 self._subset.sort(reverse=reverse)
2905 2905
2906 2906 def reverse(self):
2907 2907 self._subset.reverse()
2908 2908
2909 2909 def isascending(self):
2910 2910 return self._subset.isascending()
2911 2911
2912 2912 def isdescending(self):
2913 2913 return self._subset.isdescending()
2914 2914
2915 2915 def first(self):
2916 2916 for x in self:
2917 2917 return x
2918 2918 return None
2919 2919
2920 2920 def last(self):
2921 2921 it = None
2922 2922 if self._subset.isascending:
2923 2923 it = self.fastdesc
2924 2924 elif self._subset.isdescending:
2925 2925 it = self.fastdesc
2926 2926 if it is None:
2927 2927 # slowly consume everything. This needs improvement
2928 2928 it = lambda: reversed(list(self))
2929 2929 for x in it():
2930 2930 return x
2931 2931 return None
2932 2932
2933 2933 def __repr__(self):
2934 2934 return '<%s %r>' % (type(self).__name__, self._subset)
2935 2935
2936 2936 class addset(abstractsmartset):
2937 2937 """Represent the addition of two sets
2938 2938
2939 2939 Wrapper structure for lazily adding two structures without losing much
2940 2940 performance on the __contains__ method
2941 2941
2942 2942 If the ascending attribute is set, that means the two structures are
2943 2943 ordered in either an ascending or descending way. Therefore, we can add
2944 2944 them maintaining the order by iterating over both at the same time
2945 2945 """
2946 2946 def __init__(self, revs1, revs2, ascending=None):
2947 2947 self._r1 = revs1
2948 2948 self._r2 = revs2
2949 2949 self._iter = None
2950 2950 self._ascending = ascending
2951 2951 self._genlist = None
2952 2952 self._asclist = None
2953 2953
2954 2954 def __len__(self):
2955 2955 return len(self._list)
2956 2956
2957 2957 def __nonzero__(self):
2958 2958 return bool(self._r1) or bool(self._r2)
2959 2959
2960 2960 @util.propertycache
2961 2961 def _list(self):
2962 2962 if not self._genlist:
2963 2963 self._genlist = baseset(self._iterator())
2964 2964 return self._genlist
2965 2965
2966 2966 def _iterator(self):
2967 2967 """Iterate over both collections without repeating elements
2968 2968
2969 2969 If the ascending attribute is not set, iterate over the first one and
2970 2970 then over the second one checking for membership on the first one so we
2971 2971 dont yield any duplicates.
2972 2972
2973 2973 If the ascending attribute is set, iterate over both collections at the
2974 2974 same time, yielding only one value at a time in the given order.
2975 2975 """
2976 2976 if self._ascending is None:
2977 2977 def gen():
2978 2978 for r in self._r1:
2979 2979 yield r
2980 2980 inr1 = self._r1.__contains__
2981 2981 for r in self._r2:
2982 2982 if not inr1(r):
2983 2983 yield r
2984 2984 gen = gen()
2985 2985 else:
2986 2986 iter1 = iter(self._r1)
2987 2987 iter2 = iter(self._r2)
2988 2988 gen = self._iterordered(self._ascending, iter1, iter2)
2989 2989 return gen
2990 2990
2991 2991 def __iter__(self):
2992 2992 if self._ascending is None:
2993 2993 if self._genlist:
2994 2994 return iter(self._genlist)
2995 2995 return iter(self._iterator())
2996 2996 self._trysetasclist()
2997 2997 if self._ascending:
2998 2998 it = self.fastasc
2999 2999 else:
3000 3000 it = self.fastdesc
3001 3001 if it is None:
3002 3002 # consume the gen and try again
3003 3003 self._list
3004 3004 return iter(self)
3005 3005 return it()
3006 3006
3007 3007 def _trysetasclist(self):
3008 3008 """populate the _asclist attribute if possible and necessary"""
3009 3009 if self._genlist is not None and self._asclist is None:
3010 3010 self._asclist = sorted(self._genlist)
3011 3011
3012 3012 @property
3013 3013 def fastasc(self):
3014 3014 self._trysetasclist()
3015 3015 if self._asclist is not None:
3016 3016 return self._asclist.__iter__
3017 3017 iter1 = self._r1.fastasc
3018 3018 iter2 = self._r2.fastasc
3019 3019 if None in (iter1, iter2):
3020 3020 return None
3021 3021 return lambda: self._iterordered(True, iter1(), iter2())
3022 3022
3023 3023 @property
3024 3024 def fastdesc(self):
3025 3025 self._trysetasclist()
3026 3026 if self._asclist is not None:
3027 3027 return self._asclist.__reversed__
3028 3028 iter1 = self._r1.fastdesc
3029 3029 iter2 = self._r2.fastdesc
3030 3030 if None in (iter1, iter2):
3031 3031 return None
3032 3032 return lambda: self._iterordered(False, iter1(), iter2())
3033 3033
3034 3034 def _iterordered(self, ascending, iter1, iter2):
3035 3035 """produce an ordered iteration from two iterators with the same order
3036 3036
3037 3037 The ascending is used to indicated the iteration direction.
3038 3038 """
3039 3039 choice = max
3040 3040 if ascending:
3041 3041 choice = min
3042 3042
3043 3043 val1 = None
3044 3044 val2 = None
3045 3045
3046 3046 choice = max
3047 3047 if ascending:
3048 3048 choice = min
3049 3049 try:
3050 3050 # Consume both iterators in an ordered way until one is
3051 3051 # empty
3052 3052 while True:
3053 3053 if val1 is None:
3054 3054 val1 = iter1.next()
3055 3055 if val2 is None:
3056 3056 val2 = iter2.next()
3057 3057 next = choice(val1, val2)
3058 3058 yield next
3059 3059 if val1 == next:
3060 3060 val1 = None
3061 3061 if val2 == next:
3062 3062 val2 = None
3063 3063 except StopIteration:
3064 3064 # Flush any remaining values and consume the other one
3065 3065 it = iter2
3066 3066 if val1 is not None:
3067 3067 yield val1
3068 3068 it = iter1
3069 3069 elif val2 is not None:
3070 3070 # might have been equality and both are empty
3071 3071 yield val2
3072 3072 for val in it:
3073 3073 yield val
3074 3074
3075 3075 def __contains__(self, x):
3076 3076 return x in self._r1 or x in self._r2
3077 3077
3078 3078 def sort(self, reverse=False):
3079 3079 """Sort the added set
3080 3080
3081 3081 For this we use the cached list with all the generated values and if we
3082 3082 know they are ascending or descending we can sort them in a smart way.
3083 3083 """
3084 3084 self._ascending = not reverse
3085 3085
3086 3086 def isascending(self):
3087 3087 return self._ascending is not None and self._ascending
3088 3088
3089 3089 def isdescending(self):
3090 3090 return self._ascending is not None and not self._ascending
3091 3091
3092 3092 def reverse(self):
3093 3093 if self._ascending is None:
3094 3094 self._list.reverse()
3095 3095 else:
3096 3096 self._ascending = not self._ascending
3097 3097
3098 3098 def first(self):
3099 3099 for x in self:
3100 3100 return x
3101 3101 return None
3102 3102
3103 3103 def last(self):
3104 3104 self.reverse()
3105 3105 val = self.first()
3106 3106 self.reverse()
3107 3107 return val
3108 3108
3109 3109 def __repr__(self):
3110 3110 d = {None: '', False: '-', True: '+'}[self._ascending]
3111 3111 return '<%s%s %r, %r>' % (type(self).__name__, d, self._r1, self._r2)
3112 3112
3113 3113 class generatorset(abstractsmartset):
3114 3114 """Wrap a generator for lazy iteration
3115 3115
3116 3116 Wrapper structure for generators that provides lazy membership and can
3117 3117 be iterated more than once.
3118 3118 When asked for membership it generates values until either it finds the
3119 3119 requested one or has gone through all the elements in the generator
3120 3120 """
3121 3121 def __init__(self, gen, iterasc=None):
3122 3122 """
3123 3123 gen: a generator producing the values for the generatorset.
3124 3124 """
3125 3125 self._gen = gen
3126 3126 self._asclist = None
3127 3127 self._cache = {}
3128 3128 self._genlist = []
3129 3129 self._finished = False
3130 3130 self._ascending = True
3131 3131 if iterasc is not None:
3132 3132 if iterasc:
3133 3133 self.fastasc = self._iterator
3134 3134 self.__contains__ = self._asccontains
3135 3135 else:
3136 3136 self.fastdesc = self._iterator
3137 3137 self.__contains__ = self._desccontains
3138 3138
3139 3139 def __nonzero__(self):
3140 3140 # Do not use 'for r in self' because it will enforce the iteration
3141 3141 # order (default ascending), possibly unrolling a whole descending
3142 3142 # iterator.
3143 3143 if self._genlist:
3144 3144 return True
3145 3145 for r in self._consumegen():
3146 3146 return True
3147 3147 return False
3148 3148
3149 3149 def __contains__(self, x):
3150 3150 if x in self._cache:
3151 3151 return self._cache[x]
3152 3152
3153 3153 # Use new values only, as existing values would be cached.
3154 3154 for l in self._consumegen():
3155 3155 if l == x:
3156 3156 return True
3157 3157
3158 3158 self._cache[x] = False
3159 3159 return False
3160 3160
3161 3161 def _asccontains(self, x):
3162 3162 """version of contains optimised for ascending generator"""
3163 3163 if x in self._cache:
3164 3164 return self._cache[x]
3165 3165
3166 3166 # Use new values only, as existing values would be cached.
3167 3167 for l in self._consumegen():
3168 3168 if l == x:
3169 3169 return True
3170 3170 if l > x:
3171 3171 break
3172 3172
3173 3173 self._cache[x] = False
3174 3174 return False
3175 3175
3176 3176 def _desccontains(self, x):
3177 3177 """version of contains optimised for descending generator"""
3178 3178 if x in self._cache:
3179 3179 return self._cache[x]
3180 3180
3181 3181 # Use new values only, as existing values would be cached.
3182 3182 for l in self._consumegen():
3183 3183 if l == x:
3184 3184 return True
3185 3185 if l < x:
3186 3186 break
3187 3187
3188 3188 self._cache[x] = False
3189 3189 return False
3190 3190
3191 3191 def __iter__(self):
3192 3192 if self._ascending:
3193 3193 it = self.fastasc
3194 3194 else:
3195 3195 it = self.fastdesc
3196 3196 if it is not None:
3197 3197 return it()
3198 3198 # we need to consume the iterator
3199 3199 for x in self._consumegen():
3200 3200 pass
3201 3201 # recall the same code
3202 3202 return iter(self)
3203 3203
3204 3204 def _iterator(self):
3205 3205 if self._finished:
3206 3206 return iter(self._genlist)
3207 3207
3208 3208 # We have to use this complex iteration strategy to allow multiple
3209 3209 # iterations at the same time. We need to be able to catch revision
3210 3210 # removed from _consumegen and added to genlist in another instance.
3211 3211 #
3212 3212 # Getting rid of it would provide an about 15% speed up on this
3213 3213 # iteration.
3214 3214 genlist = self._genlist
3215 3215 nextrev = self._consumegen().next
3216 3216 _len = len # cache global lookup
3217 3217 def gen():
3218 3218 i = 0
3219 3219 while True:
3220 3220 if i < _len(genlist):
3221 3221 yield genlist[i]
3222 3222 else:
3223 3223 yield nextrev()
3224 3224 i += 1
3225 3225 return gen()
3226 3226
3227 3227 def _consumegen(self):
3228 3228 cache = self._cache
3229 3229 genlist = self._genlist.append
3230 3230 for item in self._gen:
3231 3231 cache[item] = True
3232 3232 genlist(item)
3233 3233 yield item
3234 3234 if not self._finished:
3235 3235 self._finished = True
3236 3236 asc = self._genlist[:]
3237 3237 asc.sort()
3238 3238 self._asclist = asc
3239 3239 self.fastasc = asc.__iter__
3240 3240 self.fastdesc = asc.__reversed__
3241 3241
3242 3242 def __len__(self):
3243 3243 for x in self._consumegen():
3244 3244 pass
3245 3245 return len(self._genlist)
3246 3246
3247 3247 def sort(self, reverse=False):
3248 3248 self._ascending = not reverse
3249 3249
3250 3250 def reverse(self):
3251 3251 self._ascending = not self._ascending
3252 3252
3253 3253 def isascending(self):
3254 3254 return self._ascending
3255 3255
3256 3256 def isdescending(self):
3257 3257 return not self._ascending
3258 3258
3259 3259 def first(self):
3260 3260 if self._ascending:
3261 3261 it = self.fastasc
3262 3262 else:
3263 3263 it = self.fastdesc
3264 3264 if it is None:
3265 3265 # we need to consume all and try again
3266 3266 for x in self._consumegen():
3267 3267 pass
3268 3268 return self.first()
3269 3269 if self:
3270 3270 return it().next()
3271 3271 return None
3272 3272
3273 3273 def last(self):
3274 3274 if self._ascending:
3275 3275 it = self.fastdesc
3276 3276 else:
3277 3277 it = self.fastasc
3278 3278 if it is None:
3279 3279 # we need to consume all and try again
3280 3280 for x in self._consumegen():
3281 3281 pass
3282 3282 return self.first()
3283 3283 if self:
3284 3284 return it().next()
3285 3285 return None
3286 3286
3287 3287 def __repr__(self):
3288 3288 d = {False: '-', True: '+'}[self._ascending]
3289 3289 return '<%s%s>' % (type(self).__name__, d)
3290 3290
3291 3291 class spanset(abstractsmartset):
3292 3292 """Duck type for baseset class which represents a range of revisions and
3293 3293 can work lazily and without having all the range in memory
3294 3294
3295 3295 Note that spanset(x, y) behave almost like xrange(x, y) except for two
3296 3296 notable points:
3297 3297 - when x < y it will be automatically descending,
3298 3298 - revision filtered with this repoview will be skipped.
3299 3299
3300 3300 """
3301 3301 def __init__(self, repo, start=0, end=None):
3302 3302 """
3303 3303 start: first revision included the set
3304 3304 (default to 0)
3305 3305 end: first revision excluded (last+1)
3306 3306 (default to len(repo)
3307 3307
3308 3308 Spanset will be descending if `end` < `start`.
3309 3309 """
3310 3310 if end is None:
3311 3311 end = len(repo)
3312 3312 self._ascending = start <= end
3313 3313 if not self._ascending:
3314 3314 start, end = end + 1, start +1
3315 3315 self._start = start
3316 3316 self._end = end
3317 3317 self._hiddenrevs = repo.changelog.filteredrevs
3318 3318
3319 3319 def sort(self, reverse=False):
3320 3320 self._ascending = not reverse
3321 3321
3322 3322 def reverse(self):
3323 3323 self._ascending = not self._ascending
3324 3324
3325 3325 def _iterfilter(self, iterrange):
3326 3326 s = self._hiddenrevs
3327 3327 for r in iterrange:
3328 3328 if r not in s:
3329 3329 yield r
3330 3330
3331 3331 def __iter__(self):
3332 3332 if self._ascending:
3333 3333 return self.fastasc()
3334 3334 else:
3335 3335 return self.fastdesc()
3336 3336
3337 3337 def fastasc(self):
3338 3338 iterrange = xrange(self._start, self._end)
3339 3339 if self._hiddenrevs:
3340 3340 return self._iterfilter(iterrange)
3341 3341 return iter(iterrange)
3342 3342
3343 3343 def fastdesc(self):
3344 3344 iterrange = xrange(self._end - 1, self._start - 1, -1)
3345 3345 if self._hiddenrevs:
3346 3346 return self._iterfilter(iterrange)
3347 3347 return iter(iterrange)
3348 3348
3349 3349 def __contains__(self, rev):
3350 3350 hidden = self._hiddenrevs
3351 3351 return ((self._start <= rev < self._end)
3352 3352 and not (hidden and rev in hidden))
3353 3353
3354 3354 def __nonzero__(self):
3355 3355 for r in self:
3356 3356 return True
3357 3357 return False
3358 3358
3359 3359 def __len__(self):
3360 3360 if not self._hiddenrevs:
3361 3361 return abs(self._end - self._start)
3362 3362 else:
3363 3363 count = 0
3364 3364 start = self._start
3365 3365 end = self._end
3366 3366 for rev in self._hiddenrevs:
3367 3367 if (end < rev <= start) or (start <= rev < end):
3368 3368 count += 1
3369 3369 return abs(self._end - self._start) - count
3370 3370
3371 3371 def isascending(self):
3372 3372 return self._ascending
3373 3373
3374 3374 def isdescending(self):
3375 3375 return not self._ascending
3376 3376
3377 3377 def first(self):
3378 3378 if self._ascending:
3379 3379 it = self.fastasc
3380 3380 else:
3381 3381 it = self.fastdesc
3382 3382 for x in it():
3383 3383 return x
3384 3384 return None
3385 3385
3386 3386 def last(self):
3387 3387 if self._ascending:
3388 3388 it = self.fastdesc
3389 3389 else:
3390 3390 it = self.fastasc
3391 3391 for x in it():
3392 3392 return x
3393 3393 return None
3394 3394
3395 3395 def __repr__(self):
3396 3396 d = {False: '-', True: '+'}[self._ascending]
3397 3397 return '<%s%s %d:%d>' % (type(self).__name__, d,
3398 3398 self._start, self._end - 1)
3399 3399
3400 3400 class fullreposet(spanset):
3401 3401 """a set containing all revisions in the repo
3402 3402
3403 3403 This class exists to host special optimization and magic to handle virtual
3404 3404 revisions such as "null".
3405 3405 """
3406 3406
3407 3407 def __init__(self, repo):
3408 3408 super(fullreposet, self).__init__(repo)
3409 3409
3410 3410 def __contains__(self, rev):
3411 3411 # assumes the given rev is valid
3412 3412 hidden = self._hiddenrevs
3413 3413 return not (hidden and rev in hidden)
3414 3414
3415 3415 def __and__(self, other):
3416 3416 """As self contains the whole repo, all of the other set should also be
3417 3417 in self. Therefore `self & other = other`.
3418 3418
3419 3419 This boldly assumes the other contains valid revs only.
3420 3420 """
3421 3421 # other not a smartset, make is so
3422 3422 if not util.safehasattr(other, 'isascending'):
3423 3423 # filter out hidden revision
3424 3424 # (this boldly assumes all smartset are pure)
3425 3425 #
3426 3426 # `other` was used with "&", let's assume this is a set like
3427 3427 # object.
3428 3428 other = baseset(other - self._hiddenrevs)
3429 3429
3430 3430 other.sort(reverse=self.isdescending())
3431 3431 return other
3432 3432
3433 3433 def prettyformatset(revs):
3434 3434 lines = []
3435 3435 rs = repr(revs)
3436 3436 p = 0
3437 3437 while p < len(rs):
3438 3438 q = rs.find('<', p + 1)
3439 3439 if q < 0:
3440 3440 q = len(rs)
3441 3441 l = rs.count('<', 0, p) - rs.count('>', 0, p)
3442 3442 assert l >= 0
3443 3443 lines.append((l, rs[p:q].rstrip()))
3444 3444 p = q
3445 3445 return '\n'.join(' ' * l + s for l, s in lines)
3446 3446
3447 3447 # tell hggettext to extract docstrings from these functions:
3448 3448 i18nfunctions = symbols.values()
General Comments 0
You need to be logged in to leave comments. Login now