##// END OF EJS Templates
revset: comment that we can't swap 'or' operands by weight...
Yuya Nishihara -
r25307:4d1e56b2 default
parent child Browse files
Show More
@@ -1,3507 +1,3509 b''
1 1 # revset.py - revision set queries for mercurial
2 2 #
3 3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 import re
9 9 import parser, util, error, hbisect, phases
10 10 import node
11 11 import heapq
12 12 import match as matchmod
13 13 from i18n import _
14 14 import encoding
15 15 import obsolete as obsmod
16 16 import pathutil
17 17 import repoview
18 18
19 19 def _revancestors(repo, revs, followfirst):
20 20 """Like revlog.ancestors(), but supports followfirst."""
21 21 if followfirst:
22 22 cut = 1
23 23 else:
24 24 cut = None
25 25 cl = repo.changelog
26 26
27 27 def iterate():
28 28 revs.sort(reverse=True)
29 29 irevs = iter(revs)
30 30 h = []
31 31
32 32 inputrev = next(irevs, None)
33 33 if inputrev is not None:
34 34 heapq.heappush(h, -inputrev)
35 35
36 36 seen = set()
37 37 while h:
38 38 current = -heapq.heappop(h)
39 39 if current == inputrev:
40 40 inputrev = next(irevs, None)
41 41 if inputrev is not None:
42 42 heapq.heappush(h, -inputrev)
43 43 if current not in seen:
44 44 seen.add(current)
45 45 yield current
46 46 for parent in cl.parentrevs(current)[:cut]:
47 47 if parent != node.nullrev:
48 48 heapq.heappush(h, -parent)
49 49
50 50 return generatorset(iterate(), iterasc=False)
51 51
52 52 def _revdescendants(repo, revs, followfirst):
53 53 """Like revlog.descendants() but supports followfirst."""
54 54 if followfirst:
55 55 cut = 1
56 56 else:
57 57 cut = None
58 58
59 59 def iterate():
60 60 cl = repo.changelog
61 61 first = min(revs)
62 62 nullrev = node.nullrev
63 63 if first == nullrev:
64 64 # Are there nodes with a null first parent and a non-null
65 65 # second one? Maybe. Do we care? Probably not.
66 66 for i in cl:
67 67 yield i
68 68 else:
69 69 seen = set(revs)
70 70 for i in cl.revs(first + 1):
71 71 for x in cl.parentrevs(i)[:cut]:
72 72 if x != nullrev and x in seen:
73 73 seen.add(i)
74 74 yield i
75 75 break
76 76
77 77 return generatorset(iterate(), iterasc=True)
78 78
79 79 def _revsbetween(repo, roots, heads):
80 80 """Return all paths between roots and heads, inclusive of both endpoint
81 81 sets."""
82 82 if not roots:
83 83 return baseset()
84 84 parentrevs = repo.changelog.parentrevs
85 85 visit = list(heads)
86 86 reachable = set()
87 87 seen = {}
88 88 minroot = min(roots)
89 89 roots = set(roots)
90 90 # open-code the post-order traversal due to the tiny size of
91 91 # sys.getrecursionlimit()
92 92 while visit:
93 93 rev = visit.pop()
94 94 if rev in roots:
95 95 reachable.add(rev)
96 96 parents = parentrevs(rev)
97 97 seen[rev] = parents
98 98 for parent in parents:
99 99 if parent >= minroot and parent not in seen:
100 100 visit.append(parent)
101 101 if not reachable:
102 102 return baseset()
103 103 for rev in sorted(seen):
104 104 for parent in seen[rev]:
105 105 if parent in reachable:
106 106 reachable.add(rev)
107 107 return baseset(sorted(reachable))
108 108
109 109 elements = {
110 110 "(": (21, ("group", 1, ")"), ("func", 1, ")")),
111 111 "##": (20, None, ("_concat", 20)),
112 112 "~": (18, None, ("ancestor", 18)),
113 113 "^": (18, None, ("parent", 18), ("parentpost", 18)),
114 114 "-": (5, ("negate", 19), ("minus", 5)),
115 115 "::": (17, ("dagrangepre", 17), ("dagrange", 17),
116 116 ("dagrangepost", 17)),
117 117 "..": (17, ("dagrangepre", 17), ("dagrange", 17),
118 118 ("dagrangepost", 17)),
119 119 ":": (15, ("rangepre", 15), ("range", 15), ("rangepost", 15)),
120 120 "not": (10, ("not", 10)),
121 121 "!": (10, ("not", 10)),
122 122 "and": (5, None, ("and", 5)),
123 123 "&": (5, None, ("and", 5)),
124 124 "%": (5, None, ("only", 5), ("onlypost", 5)),
125 125 "or": (4, None, ("or", 4)),
126 126 "|": (4, None, ("or", 4)),
127 127 "+": (4, None, ("or", 4)),
128 128 ",": (2, None, ("list", 2)),
129 129 ")": (0, None, None),
130 130 "symbol": (0, ("symbol",), None),
131 131 "string": (0, ("string",), None),
132 132 "end": (0, None, None),
133 133 }
134 134
135 135 keywords = set(['and', 'or', 'not'])
136 136
137 137 # default set of valid characters for the initial letter of symbols
138 138 _syminitletters = set(c for c in [chr(i) for i in xrange(256)]
139 139 if c.isalnum() or c in '._@' or ord(c) > 127)
140 140
141 141 # default set of valid characters for non-initial letters of symbols
142 142 _symletters = set(c for c in [chr(i) for i in xrange(256)]
143 143 if c.isalnum() or c in '-._/@' or ord(c) > 127)
144 144
145 145 def tokenize(program, lookup=None, syminitletters=None, symletters=None):
146 146 '''
147 147 Parse a revset statement into a stream of tokens
148 148
149 149 ``syminitletters`` is the set of valid characters for the initial
150 150 letter of symbols.
151 151
152 152 By default, character ``c`` is recognized as valid for initial
153 153 letter of symbols, if ``c.isalnum() or c in '._@' or ord(c) > 127``.
154 154
155 155 ``symletters`` is the set of valid characters for non-initial
156 156 letters of symbols.
157 157
158 158 By default, character ``c`` is recognized as valid for non-initial
159 159 letters of symbols, if ``c.isalnum() or c in '-._/@' or ord(c) > 127``.
160 160
161 161 Check that @ is a valid unquoted token character (issue3686):
162 162 >>> list(tokenize("@::"))
163 163 [('symbol', '@', 0), ('::', None, 1), ('end', None, 3)]
164 164
165 165 '''
166 166 if syminitletters is None:
167 167 syminitletters = _syminitletters
168 168 if symletters is None:
169 169 symletters = _symletters
170 170
171 171 pos, l = 0, len(program)
172 172 while pos < l:
173 173 c = program[pos]
174 174 if c.isspace(): # skip inter-token whitespace
175 175 pass
176 176 elif c == ':' and program[pos:pos + 2] == '::': # look ahead carefully
177 177 yield ('::', None, pos)
178 178 pos += 1 # skip ahead
179 179 elif c == '.' and program[pos:pos + 2] == '..': # look ahead carefully
180 180 yield ('..', None, pos)
181 181 pos += 1 # skip ahead
182 182 elif c == '#' and program[pos:pos + 2] == '##': # look ahead carefully
183 183 yield ('##', None, pos)
184 184 pos += 1 # skip ahead
185 185 elif c in "():,-|&+!~^%": # handle simple operators
186 186 yield (c, None, pos)
187 187 elif (c in '"\'' or c == 'r' and
188 188 program[pos:pos + 2] in ("r'", 'r"')): # handle quoted strings
189 189 if c == 'r':
190 190 pos += 1
191 191 c = program[pos]
192 192 decode = lambda x: x
193 193 else:
194 194 decode = lambda x: x.decode('string-escape')
195 195 pos += 1
196 196 s = pos
197 197 while pos < l: # find closing quote
198 198 d = program[pos]
199 199 if d == '\\': # skip over escaped characters
200 200 pos += 2
201 201 continue
202 202 if d == c:
203 203 yield ('string', decode(program[s:pos]), s)
204 204 break
205 205 pos += 1
206 206 else:
207 207 raise error.ParseError(_("unterminated string"), s)
208 208 # gather up a symbol/keyword
209 209 elif c in syminitletters:
210 210 s = pos
211 211 pos += 1
212 212 while pos < l: # find end of symbol
213 213 d = program[pos]
214 214 if d not in symletters:
215 215 break
216 216 if d == '.' and program[pos - 1] == '.': # special case for ..
217 217 pos -= 1
218 218 break
219 219 pos += 1
220 220 sym = program[s:pos]
221 221 if sym in keywords: # operator keywords
222 222 yield (sym, None, s)
223 223 elif '-' in sym:
224 224 # some jerk gave us foo-bar-baz, try to check if it's a symbol
225 225 if lookup and lookup(sym):
226 226 # looks like a real symbol
227 227 yield ('symbol', sym, s)
228 228 else:
229 229 # looks like an expression
230 230 parts = sym.split('-')
231 231 for p in parts[:-1]:
232 232 if p: # possible consecutive -
233 233 yield ('symbol', p, s)
234 234 s += len(p)
235 235 yield ('-', None, pos)
236 236 s += 1
237 237 if parts[-1]: # possible trailing -
238 238 yield ('symbol', parts[-1], s)
239 239 else:
240 240 yield ('symbol', sym, s)
241 241 pos -= 1
242 242 else:
243 243 raise error.ParseError(_("syntax error in revset '%s'") %
244 244 program, pos)
245 245 pos += 1
246 246 yield ('end', None, pos)
247 247
248 248 def parseerrordetail(inst):
249 249 """Compose error message from specified ParseError object
250 250 """
251 251 if len(inst.args) > 1:
252 252 return _('at %s: %s') % (inst.args[1], inst.args[0])
253 253 else:
254 254 return inst.args[0]
255 255
256 256 # helpers
257 257
258 258 def getstring(x, err):
259 259 if x and (x[0] == 'string' or x[0] == 'symbol'):
260 260 return x[1]
261 261 raise error.ParseError(err)
262 262
263 263 def getlist(x):
264 264 if not x:
265 265 return []
266 266 if x[0] == 'list':
267 267 return getlist(x[1]) + [x[2]]
268 268 return [x]
269 269
270 270 def getargs(x, min, max, err):
271 271 l = getlist(x)
272 272 if len(l) < min or (max >= 0 and len(l) > max):
273 273 raise error.ParseError(err)
274 274 return l
275 275
276 276 def isvalidsymbol(tree):
277 277 """Examine whether specified ``tree`` is valid ``symbol`` or not
278 278 """
279 279 return tree[0] == 'symbol' and len(tree) > 1
280 280
281 281 def getsymbol(tree):
282 282 """Get symbol name from valid ``symbol`` in ``tree``
283 283
284 284 This assumes that ``tree`` is already examined by ``isvalidsymbol``.
285 285 """
286 286 return tree[1]
287 287
288 288 def isvalidfunc(tree):
289 289 """Examine whether specified ``tree`` is valid ``func`` or not
290 290 """
291 291 return tree[0] == 'func' and len(tree) > 1 and isvalidsymbol(tree[1])
292 292
293 293 def getfuncname(tree):
294 294 """Get function name from valid ``func`` in ``tree``
295 295
296 296 This assumes that ``tree`` is already examined by ``isvalidfunc``.
297 297 """
298 298 return getsymbol(tree[1])
299 299
300 300 def getfuncargs(tree):
301 301 """Get list of function arguments from valid ``func`` in ``tree``
302 302
303 303 This assumes that ``tree`` is already examined by ``isvalidfunc``.
304 304 """
305 305 if len(tree) > 2:
306 306 return getlist(tree[2])
307 307 else:
308 308 return []
309 309
310 310 def getset(repo, subset, x):
311 311 if not x:
312 312 raise error.ParseError(_("missing argument"))
313 313 s = methods[x[0]](repo, subset, *x[1:])
314 314 if util.safehasattr(s, 'isascending'):
315 315 return s
316 316 return baseset(s)
317 317
318 318 def _getrevsource(repo, r):
319 319 extra = repo[r].extra()
320 320 for label in ('source', 'transplant_source', 'rebase_source'):
321 321 if label in extra:
322 322 try:
323 323 return repo[extra[label]].rev()
324 324 except error.RepoLookupError:
325 325 pass
326 326 return None
327 327
328 328 # operator methods
329 329
330 330 def stringset(repo, subset, x):
331 331 x = repo[x].rev()
332 332 if (x in subset
333 333 or x == node.nullrev and isinstance(subset, fullreposet)):
334 334 return baseset([x])
335 335 return baseset()
336 336
337 337 def rangeset(repo, subset, x, y):
338 338 m = getset(repo, fullreposet(repo), x)
339 339 n = getset(repo, fullreposet(repo), y)
340 340
341 341 if not m or not n:
342 342 return baseset()
343 343 m, n = m.first(), n.last()
344 344
345 345 if m < n:
346 346 r = spanset(repo, m, n + 1)
347 347 else:
348 348 r = spanset(repo, m, n - 1)
349 349 return r & subset
350 350
351 351 def dagrange(repo, subset, x, y):
352 352 r = fullreposet(repo)
353 353 xs = _revsbetween(repo, getset(repo, r, x), getset(repo, r, y))
354 354 return xs & subset
355 355
356 356 def andset(repo, subset, x, y):
357 357 return getset(repo, getset(repo, subset, x), y)
358 358
359 359 def orset(repo, subset, x, y):
360 360 xl = getset(repo, subset, x)
361 361 yl = getset(repo, subset, y)
362 362 return xl + yl
363 363
364 364 def notset(repo, subset, x):
365 365 return subset - getset(repo, subset, x)
366 366
367 367 def listset(repo, subset, a, b):
368 368 raise error.ParseError(_("can't use a list in this context"))
369 369
370 370 def func(repo, subset, a, b):
371 371 if a[0] == 'symbol' and a[1] in symbols:
372 372 return symbols[a[1]](repo, subset, b)
373 373 raise error.UnknownIdentifier(a[1], symbols.keys())
374 374
375 375 # functions
376 376
377 377 def adds(repo, subset, x):
378 378 """``adds(pattern)``
379 379 Changesets that add a file matching pattern.
380 380
381 381 The pattern without explicit kind like ``glob:`` is expected to be
382 382 relative to the current directory and match against a file or a
383 383 directory.
384 384 """
385 385 # i18n: "adds" is a keyword
386 386 pat = getstring(x, _("adds requires a pattern"))
387 387 return checkstatus(repo, subset, pat, 1)
388 388
389 389 def ancestor(repo, subset, x):
390 390 """``ancestor(*changeset)``
391 391 A greatest common ancestor of the changesets.
392 392
393 393 Accepts 0 or more changesets.
394 394 Will return empty list when passed no args.
395 395 Greatest common ancestor of a single changeset is that changeset.
396 396 """
397 397 # i18n: "ancestor" is a keyword
398 398 l = getlist(x)
399 399 rl = fullreposet(repo)
400 400 anc = None
401 401
402 402 # (getset(repo, rl, i) for i in l) generates a list of lists
403 403 for revs in (getset(repo, rl, i) for i in l):
404 404 for r in revs:
405 405 if anc is None:
406 406 anc = repo[r]
407 407 else:
408 408 anc = anc.ancestor(repo[r])
409 409
410 410 if anc is not None and anc.rev() in subset:
411 411 return baseset([anc.rev()])
412 412 return baseset()
413 413
414 414 def _ancestors(repo, subset, x, followfirst=False):
415 415 heads = getset(repo, fullreposet(repo), x)
416 416 if not heads:
417 417 return baseset()
418 418 s = _revancestors(repo, heads, followfirst)
419 419 return subset & s
420 420
421 421 def ancestors(repo, subset, x):
422 422 """``ancestors(set)``
423 423 Changesets that are ancestors of a changeset in set.
424 424 """
425 425 return _ancestors(repo, subset, x)
426 426
427 427 def _firstancestors(repo, subset, x):
428 428 # ``_firstancestors(set)``
429 429 # Like ``ancestors(set)`` but follows only the first parents.
430 430 return _ancestors(repo, subset, x, followfirst=True)
431 431
432 432 def ancestorspec(repo, subset, x, n):
433 433 """``set~n``
434 434 Changesets that are the Nth ancestor (first parents only) of a changeset
435 435 in set.
436 436 """
437 437 try:
438 438 n = int(n[1])
439 439 except (TypeError, ValueError):
440 440 raise error.ParseError(_("~ expects a number"))
441 441 ps = set()
442 442 cl = repo.changelog
443 443 for r in getset(repo, fullreposet(repo), x):
444 444 for i in range(n):
445 445 r = cl.parentrevs(r)[0]
446 446 ps.add(r)
447 447 return subset & ps
448 448
449 449 def author(repo, subset, x):
450 450 """``author(string)``
451 451 Alias for ``user(string)``.
452 452 """
453 453 # i18n: "author" is a keyword
454 454 n = encoding.lower(getstring(x, _("author requires a string")))
455 455 kind, pattern, matcher = _substringmatcher(n)
456 456 return subset.filter(lambda x: matcher(encoding.lower(repo[x].user())))
457 457
458 458 def bisect(repo, subset, x):
459 459 """``bisect(string)``
460 460 Changesets marked in the specified bisect status:
461 461
462 462 - ``good``, ``bad``, ``skip``: csets explicitly marked as good/bad/skip
463 463 - ``goods``, ``bads`` : csets topologically good/bad
464 464 - ``range`` : csets taking part in the bisection
465 465 - ``pruned`` : csets that are goods, bads or skipped
466 466 - ``untested`` : csets whose fate is yet unknown
467 467 - ``ignored`` : csets ignored due to DAG topology
468 468 - ``current`` : the cset currently being bisected
469 469 """
470 470 # i18n: "bisect" is a keyword
471 471 status = getstring(x, _("bisect requires a string")).lower()
472 472 state = set(hbisect.get(repo, status))
473 473 return subset & state
474 474
475 475 # Backward-compatibility
476 476 # - no help entry so that we do not advertise it any more
477 477 def bisected(repo, subset, x):
478 478 return bisect(repo, subset, x)
479 479
480 480 def bookmark(repo, subset, x):
481 481 """``bookmark([name])``
482 482 The named bookmark or all bookmarks.
483 483
484 484 If `name` starts with `re:`, the remainder of the name is treated as
485 485 a regular expression. To match a bookmark that actually starts with `re:`,
486 486 use the prefix `literal:`.
487 487 """
488 488 # i18n: "bookmark" is a keyword
489 489 args = getargs(x, 0, 1, _('bookmark takes one or no arguments'))
490 490 if args:
491 491 bm = getstring(args[0],
492 492 # i18n: "bookmark" is a keyword
493 493 _('the argument to bookmark must be a string'))
494 494 kind, pattern, matcher = _stringmatcher(bm)
495 495 bms = set()
496 496 if kind == 'literal':
497 497 bmrev = repo._bookmarks.get(pattern, None)
498 498 if not bmrev:
499 499 raise error.RepoLookupError(_("bookmark '%s' does not exist")
500 500 % bm)
501 501 bms.add(repo[bmrev].rev())
502 502 else:
503 503 matchrevs = set()
504 504 for name, bmrev in repo._bookmarks.iteritems():
505 505 if matcher(name):
506 506 matchrevs.add(bmrev)
507 507 if not matchrevs:
508 508 raise error.RepoLookupError(_("no bookmarks exist"
509 509 " that match '%s'") % pattern)
510 510 for bmrev in matchrevs:
511 511 bms.add(repo[bmrev].rev())
512 512 else:
513 513 bms = set([repo[r].rev()
514 514 for r in repo._bookmarks.values()])
515 515 bms -= set([node.nullrev])
516 516 return subset & bms
517 517
518 518 def branch(repo, subset, x):
519 519 """``branch(string or set)``
520 520 All changesets belonging to the given branch or the branches of the given
521 521 changesets.
522 522
523 523 If `string` starts with `re:`, the remainder of the name is treated as
524 524 a regular expression. To match a branch that actually starts with `re:`,
525 525 use the prefix `literal:`.
526 526 """
527 527 getbi = repo.revbranchcache().branchinfo
528 528
529 529 try:
530 530 b = getstring(x, '')
531 531 except error.ParseError:
532 532 # not a string, but another revspec, e.g. tip()
533 533 pass
534 534 else:
535 535 kind, pattern, matcher = _stringmatcher(b)
536 536 if kind == 'literal':
537 537 # note: falls through to the revspec case if no branch with
538 538 # this name exists
539 539 if pattern in repo.branchmap():
540 540 return subset.filter(lambda r: matcher(getbi(r)[0]))
541 541 else:
542 542 return subset.filter(lambda r: matcher(getbi(r)[0]))
543 543
544 544 s = getset(repo, fullreposet(repo), x)
545 545 b = set()
546 546 for r in s:
547 547 b.add(getbi(r)[0])
548 548 c = s.__contains__
549 549 return subset.filter(lambda r: c(r) or getbi(r)[0] in b)
550 550
551 551 def bumped(repo, subset, x):
552 552 """``bumped()``
553 553 Mutable changesets marked as successors of public changesets.
554 554
555 555 Only non-public and non-obsolete changesets can be `bumped`.
556 556 """
557 557 # i18n: "bumped" is a keyword
558 558 getargs(x, 0, 0, _("bumped takes no arguments"))
559 559 bumped = obsmod.getrevs(repo, 'bumped')
560 560 return subset & bumped
561 561
562 562 def bundle(repo, subset, x):
563 563 """``bundle()``
564 564 Changesets in the bundle.
565 565
566 566 Bundle must be specified by the -R option."""
567 567
568 568 try:
569 569 bundlerevs = repo.changelog.bundlerevs
570 570 except AttributeError:
571 571 raise util.Abort(_("no bundle provided - specify with -R"))
572 572 return subset & bundlerevs
573 573
574 574 def checkstatus(repo, subset, pat, field):
575 575 hasset = matchmod.patkind(pat) == 'set'
576 576
577 577 mcache = [None]
578 578 def matches(x):
579 579 c = repo[x]
580 580 if not mcache[0] or hasset:
581 581 mcache[0] = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
582 582 m = mcache[0]
583 583 fname = None
584 584 if not m.anypats() and len(m.files()) == 1:
585 585 fname = m.files()[0]
586 586 if fname is not None:
587 587 if fname not in c.files():
588 588 return False
589 589 else:
590 590 for f in c.files():
591 591 if m(f):
592 592 break
593 593 else:
594 594 return False
595 595 files = repo.status(c.p1().node(), c.node())[field]
596 596 if fname is not None:
597 597 if fname in files:
598 598 return True
599 599 else:
600 600 for f in files:
601 601 if m(f):
602 602 return True
603 603
604 604 return subset.filter(matches)
605 605
606 606 def _children(repo, narrow, parentset):
607 607 cs = set()
608 608 if not parentset:
609 609 return baseset(cs)
610 610 pr = repo.changelog.parentrevs
611 611 minrev = min(parentset)
612 612 for r in narrow:
613 613 if r <= minrev:
614 614 continue
615 615 for p in pr(r):
616 616 if p in parentset:
617 617 cs.add(r)
618 618 return baseset(cs)
619 619
620 620 def children(repo, subset, x):
621 621 """``children(set)``
622 622 Child changesets of changesets in set.
623 623 """
624 624 s = getset(repo, fullreposet(repo), x)
625 625 cs = _children(repo, subset, s)
626 626 return subset & cs
627 627
628 628 def closed(repo, subset, x):
629 629 """``closed()``
630 630 Changeset is closed.
631 631 """
632 632 # i18n: "closed" is a keyword
633 633 getargs(x, 0, 0, _("closed takes no arguments"))
634 634 return subset.filter(lambda r: repo[r].closesbranch())
635 635
636 636 def contains(repo, subset, x):
637 637 """``contains(pattern)``
638 638 The revision's manifest contains a file matching pattern (but might not
639 639 modify it). See :hg:`help patterns` for information about file patterns.
640 640
641 641 The pattern without explicit kind like ``glob:`` is expected to be
642 642 relative to the current directory and match against a file exactly
643 643 for efficiency.
644 644 """
645 645 # i18n: "contains" is a keyword
646 646 pat = getstring(x, _("contains requires a pattern"))
647 647
648 648 def matches(x):
649 649 if not matchmod.patkind(pat):
650 650 pats = pathutil.canonpath(repo.root, repo.getcwd(), pat)
651 651 if pats in repo[x]:
652 652 return True
653 653 else:
654 654 c = repo[x]
655 655 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
656 656 for f in c.manifest():
657 657 if m(f):
658 658 return True
659 659 return False
660 660
661 661 return subset.filter(matches)
662 662
663 663 def converted(repo, subset, x):
664 664 """``converted([id])``
665 665 Changesets converted from the given identifier in the old repository if
666 666 present, or all converted changesets if no identifier is specified.
667 667 """
668 668
669 669 # There is exactly no chance of resolving the revision, so do a simple
670 670 # string compare and hope for the best
671 671
672 672 rev = None
673 673 # i18n: "converted" is a keyword
674 674 l = getargs(x, 0, 1, _('converted takes one or no arguments'))
675 675 if l:
676 676 # i18n: "converted" is a keyword
677 677 rev = getstring(l[0], _('converted requires a revision'))
678 678
679 679 def _matchvalue(r):
680 680 source = repo[r].extra().get('convert_revision', None)
681 681 return source is not None and (rev is None or source.startswith(rev))
682 682
683 683 return subset.filter(lambda r: _matchvalue(r))
684 684
685 685 def date(repo, subset, x):
686 686 """``date(interval)``
687 687 Changesets within the interval, see :hg:`help dates`.
688 688 """
689 689 # i18n: "date" is a keyword
690 690 ds = getstring(x, _("date requires a string"))
691 691 dm = util.matchdate(ds)
692 692 return subset.filter(lambda x: dm(repo[x].date()[0]))
693 693
694 694 def desc(repo, subset, x):
695 695 """``desc(string)``
696 696 Search commit message for string. The match is case-insensitive.
697 697 """
698 698 # i18n: "desc" is a keyword
699 699 ds = encoding.lower(getstring(x, _("desc requires a string")))
700 700
701 701 def matches(x):
702 702 c = repo[x]
703 703 return ds in encoding.lower(c.description())
704 704
705 705 return subset.filter(matches)
706 706
707 707 def _descendants(repo, subset, x, followfirst=False):
708 708 roots = getset(repo, fullreposet(repo), x)
709 709 if not roots:
710 710 return baseset()
711 711 s = _revdescendants(repo, roots, followfirst)
712 712
713 713 # Both sets need to be ascending in order to lazily return the union
714 714 # in the correct order.
715 715 base = subset & roots
716 716 desc = subset & s
717 717 result = base + desc
718 718 if subset.isascending():
719 719 result.sort()
720 720 elif subset.isdescending():
721 721 result.sort(reverse=True)
722 722 else:
723 723 result = subset & result
724 724 return result
725 725
726 726 def descendants(repo, subset, x):
727 727 """``descendants(set)``
728 728 Changesets which are descendants of changesets in set.
729 729 """
730 730 return _descendants(repo, subset, x)
731 731
732 732 def _firstdescendants(repo, subset, x):
733 733 # ``_firstdescendants(set)``
734 734 # Like ``descendants(set)`` but follows only the first parents.
735 735 return _descendants(repo, subset, x, followfirst=True)
736 736
737 737 def destination(repo, subset, x):
738 738 """``destination([set])``
739 739 Changesets that were created by a graft, transplant or rebase operation,
740 740 with the given revisions specified as the source. Omitting the optional set
741 741 is the same as passing all().
742 742 """
743 743 if x is not None:
744 744 sources = getset(repo, fullreposet(repo), x)
745 745 else:
746 746 sources = fullreposet(repo)
747 747
748 748 dests = set()
749 749
750 750 # subset contains all of the possible destinations that can be returned, so
751 751 # iterate over them and see if their source(s) were provided in the arg set.
752 752 # Even if the immediate src of r is not in the arg set, src's source (or
753 753 # further back) may be. Scanning back further than the immediate src allows
754 754 # transitive transplants and rebases to yield the same results as transitive
755 755 # grafts.
756 756 for r in subset:
757 757 src = _getrevsource(repo, r)
758 758 lineage = None
759 759
760 760 while src is not None:
761 761 if lineage is None:
762 762 lineage = list()
763 763
764 764 lineage.append(r)
765 765
766 766 # The visited lineage is a match if the current source is in the arg
767 767 # set. Since every candidate dest is visited by way of iterating
768 768 # subset, any dests further back in the lineage will be tested by a
769 769 # different iteration over subset. Likewise, if the src was already
770 770 # selected, the current lineage can be selected without going back
771 771 # further.
772 772 if src in sources or src in dests:
773 773 dests.update(lineage)
774 774 break
775 775
776 776 r = src
777 777 src = _getrevsource(repo, r)
778 778
779 779 return subset.filter(dests.__contains__)
780 780
781 781 def divergent(repo, subset, x):
782 782 """``divergent()``
783 783 Final successors of changesets with an alternative set of final successors.
784 784 """
785 785 # i18n: "divergent" is a keyword
786 786 getargs(x, 0, 0, _("divergent takes no arguments"))
787 787 divergent = obsmod.getrevs(repo, 'divergent')
788 788 return subset & divergent
789 789
790 790 def draft(repo, subset, x):
791 791 """``draft()``
792 792 Changeset in draft phase."""
793 793 # i18n: "draft" is a keyword
794 794 getargs(x, 0, 0, _("draft takes no arguments"))
795 795 phase = repo._phasecache.phase
796 796 target = phases.draft
797 797 condition = lambda r: phase(repo, r) == target
798 798 return subset.filter(condition, cache=False)
799 799
800 800 def extinct(repo, subset, x):
801 801 """``extinct()``
802 802 Obsolete changesets with obsolete descendants only.
803 803 """
804 804 # i18n: "extinct" is a keyword
805 805 getargs(x, 0, 0, _("extinct takes no arguments"))
806 806 extincts = obsmod.getrevs(repo, 'extinct')
807 807 return subset & extincts
808 808
809 809 def extra(repo, subset, x):
810 810 """``extra(label, [value])``
811 811 Changesets with the given label in the extra metadata, with the given
812 812 optional value.
813 813
814 814 If `value` starts with `re:`, the remainder of the value is treated as
815 815 a regular expression. To match a value that actually starts with `re:`,
816 816 use the prefix `literal:`.
817 817 """
818 818
819 819 # i18n: "extra" is a keyword
820 820 l = getargs(x, 1, 2, _('extra takes at least 1 and at most 2 arguments'))
821 821 # i18n: "extra" is a keyword
822 822 label = getstring(l[0], _('first argument to extra must be a string'))
823 823 value = None
824 824
825 825 if len(l) > 1:
826 826 # i18n: "extra" is a keyword
827 827 value = getstring(l[1], _('second argument to extra must be a string'))
828 828 kind, value, matcher = _stringmatcher(value)
829 829
830 830 def _matchvalue(r):
831 831 extra = repo[r].extra()
832 832 return label in extra and (value is None or matcher(extra[label]))
833 833
834 834 return subset.filter(lambda r: _matchvalue(r))
835 835
836 836 def filelog(repo, subset, x):
837 837 """``filelog(pattern)``
838 838 Changesets connected to the specified filelog.
839 839
840 840 For performance reasons, visits only revisions mentioned in the file-level
841 841 filelog, rather than filtering through all changesets (much faster, but
842 842 doesn't include deletes or duplicate changes). For a slower, more accurate
843 843 result, use ``file()``.
844 844
845 845 The pattern without explicit kind like ``glob:`` is expected to be
846 846 relative to the current directory and match against a file exactly
847 847 for efficiency.
848 848
849 849 If some linkrev points to revisions filtered by the current repoview, we'll
850 850 work around it to return a non-filtered value.
851 851 """
852 852
853 853 # i18n: "filelog" is a keyword
854 854 pat = getstring(x, _("filelog requires a pattern"))
855 855 s = set()
856 856 cl = repo.changelog
857 857
858 858 if not matchmod.patkind(pat):
859 859 f = pathutil.canonpath(repo.root, repo.getcwd(), pat)
860 860 files = [f]
861 861 else:
862 862 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=repo[None])
863 863 files = (f for f in repo[None] if m(f))
864 864
865 865 for f in files:
866 866 backrevref = {} # final value for: filerev -> changerev
867 867 lowestchild = {} # lowest known filerev child of a filerev
868 868 delayed = [] # filerev with filtered linkrev, for post-processing
869 869 lowesthead = None # cache for manifest content of all head revisions
870 870 fl = repo.file(f)
871 871 for fr in list(fl):
872 872 rev = fl.linkrev(fr)
873 873 if rev not in cl:
874 874 # changerev pointed in linkrev is filtered
875 875 # record it for post processing.
876 876 delayed.append((fr, rev))
877 877 continue
878 878 for p in fl.parentrevs(fr):
879 879 if 0 <= p and p not in lowestchild:
880 880 lowestchild[p] = fr
881 881 backrevref[fr] = rev
882 882 s.add(rev)
883 883
884 884 # Post-processing of all filerevs we skipped because they were
885 885 # filtered. If such filerevs have known and unfiltered children, this
886 886 # means they have an unfiltered appearance out there. We'll use linkrev
887 887 # adjustment to find one of these appearances. The lowest known child
888 888 # will be used as a starting point because it is the best upper-bound we
889 889 # have.
890 890 #
891 891 # This approach will fail when an unfiltered but linkrev-shadowed
892 892 # appearance exists in a head changeset without unfiltered filerev
893 893 # children anywhere.
894 894 while delayed:
895 895 # must be a descending iteration. To slowly fill lowest child
896 896 # information that is of potential use by the next item.
897 897 fr, rev = delayed.pop()
898 898 lkr = rev
899 899
900 900 child = lowestchild.get(fr)
901 901
902 902 if child is None:
903 903 # search for existence of this file revision in a head revision.
904 904 # There are three possibilities:
905 905 # - the revision exists in a head and we can find an
906 906 # introduction from there,
907 907 # - the revision does not exist in a head because it has been
908 908 # changed since its introduction: we would have found a child
909 909 # and be in the other 'else' clause,
910 910 # - all versions of the revision are hidden.
911 911 if lowesthead is None:
912 912 lowesthead = {}
913 913 for h in repo.heads():
914 914 fnode = repo[h].manifest().get(f)
915 915 if fnode is not None:
916 916 lowesthead[fl.rev(fnode)] = h
917 917 headrev = lowesthead.get(fr)
918 918 if headrev is None:
919 919 # content is nowhere unfiltered
920 920 continue
921 921 rev = repo[headrev][f].introrev()
922 922 else:
923 923 # the lowest known child is a good upper bound
924 924 childcrev = backrevref[child]
925 925 # XXX this does not guarantee returning the lowest
926 926 # introduction of this revision, but this gives a
927 927 # result which is a good start and will fit in most
928 928 # cases. We probably need to fix the multiple
929 929 # introductions case properly (report each
930 930 # introduction, even for identical file revisions)
931 931 # once and for all at some point anyway.
932 932 for p in repo[childcrev][f].parents():
933 933 if p.filerev() == fr:
934 934 rev = p.rev()
935 935 break
936 936 if rev == lkr: # no shadowed entry found
937 937 # XXX This should never happen unless some manifest points
938 938 # to biggish file revisions (like a revision that uses a
939 939 # parent that never appears in the manifest ancestors)
940 940 continue
941 941
942 942 # Fill the data for the next iteration.
943 943 for p in fl.parentrevs(fr):
944 944 if 0 <= p and p not in lowestchild:
945 945 lowestchild[p] = fr
946 946 backrevref[fr] = rev
947 947 s.add(rev)
948 948
949 949 return subset & s
950 950
951 951 def first(repo, subset, x):
952 952 """``first(set, [n])``
953 953 An alias for limit().
954 954 """
955 955 return limit(repo, subset, x)
956 956
957 957 def _follow(repo, subset, x, name, followfirst=False):
958 958 l = getargs(x, 0, 1, _("%s takes no arguments or a filename") % name)
959 959 c = repo['.']
960 960 if l:
961 961 x = getstring(l[0], _("%s expected a filename") % name)
962 962 if x in c:
963 963 cx = c[x]
964 964 s = set(ctx.rev() for ctx in cx.ancestors(followfirst=followfirst))
965 965 # include the revision responsible for the most recent version
966 966 s.add(cx.introrev())
967 967 else:
968 968 return baseset()
969 969 else:
970 970 s = _revancestors(repo, baseset([c.rev()]), followfirst)
971 971
972 972 return subset & s
973 973
974 974 def follow(repo, subset, x):
975 975 """``follow([file])``
976 976 An alias for ``::.`` (ancestors of the working directory's first parent).
977 977 If a filename is specified, the history of the given file is followed,
978 978 including copies.
979 979 """
980 980 return _follow(repo, subset, x, 'follow')
981 981
982 982 def _followfirst(repo, subset, x):
983 983 # ``followfirst([file])``
984 984 # Like ``follow([file])`` but follows only the first parent of
985 985 # every revision or file revision.
986 986 return _follow(repo, subset, x, '_followfirst', followfirst=True)
987 987
988 988 def getall(repo, subset, x):
989 989 """``all()``
990 990 All changesets, the same as ``0:tip``.
991 991 """
992 992 # i18n: "all" is a keyword
993 993 getargs(x, 0, 0, _("all takes no arguments"))
994 994 return subset & spanset(repo) # drop "null" if any
995 995
996 996 def grep(repo, subset, x):
997 997 """``grep(regex)``
998 998 Like ``keyword(string)`` but accepts a regex. Use ``grep(r'...')``
999 999 to ensure special escape characters are handled correctly. Unlike
1000 1000 ``keyword(string)``, the match is case-sensitive.
1001 1001 """
1002 1002 try:
1003 1003 # i18n: "grep" is a keyword
1004 1004 gr = re.compile(getstring(x, _("grep requires a string")))
1005 1005 except re.error, e:
1006 1006 raise error.ParseError(_('invalid match pattern: %s') % e)
1007 1007
1008 1008 def matches(x):
1009 1009 c = repo[x]
1010 1010 for e in c.files() + [c.user(), c.description()]:
1011 1011 if gr.search(e):
1012 1012 return True
1013 1013 return False
1014 1014
1015 1015 return subset.filter(matches)
1016 1016
1017 1017 def _matchfiles(repo, subset, x):
1018 1018 # _matchfiles takes a revset list of prefixed arguments:
1019 1019 #
1020 1020 # [p:foo, i:bar, x:baz]
1021 1021 #
1022 1022 # builds a match object from them and filters subset. Allowed
1023 1023 # prefixes are 'p:' for regular patterns, 'i:' for include
1024 1024 # patterns and 'x:' for exclude patterns. Use 'r:' prefix to pass
1025 1025 # a revision identifier, or the empty string to reference the
1026 1026 # working directory, from which the match object is
1027 1027 # initialized. Use 'd:' to set the default matching mode, default
1028 1028 # to 'glob'. At most one 'r:' and 'd:' argument can be passed.
1029 1029
1030 1030 # i18n: "_matchfiles" is a keyword
1031 1031 l = getargs(x, 1, -1, _("_matchfiles requires at least one argument"))
1032 1032 pats, inc, exc = [], [], []
1033 1033 rev, default = None, None
1034 1034 for arg in l:
1035 1035 # i18n: "_matchfiles" is a keyword
1036 1036 s = getstring(arg, _("_matchfiles requires string arguments"))
1037 1037 prefix, value = s[:2], s[2:]
1038 1038 if prefix == 'p:':
1039 1039 pats.append(value)
1040 1040 elif prefix == 'i:':
1041 1041 inc.append(value)
1042 1042 elif prefix == 'x:':
1043 1043 exc.append(value)
1044 1044 elif prefix == 'r:':
1045 1045 if rev is not None:
1046 1046 # i18n: "_matchfiles" is a keyword
1047 1047 raise error.ParseError(_('_matchfiles expected at most one '
1048 1048 'revision'))
1049 1049 if value != '': # empty means working directory; leave rev as None
1050 1050 rev = value
1051 1051 elif prefix == 'd:':
1052 1052 if default is not None:
1053 1053 # i18n: "_matchfiles" is a keyword
1054 1054 raise error.ParseError(_('_matchfiles expected at most one '
1055 1055 'default mode'))
1056 1056 default = value
1057 1057 else:
1058 1058 # i18n: "_matchfiles" is a keyword
1059 1059 raise error.ParseError(_('invalid _matchfiles prefix: %s') % prefix)
1060 1060 if not default:
1061 1061 default = 'glob'
1062 1062
1063 1063 m = matchmod.match(repo.root, repo.getcwd(), pats, include=inc,
1064 1064 exclude=exc, ctx=repo[rev], default=default)
1065 1065
1066 1066 def matches(x):
1067 1067 for f in repo[x].files():
1068 1068 if m(f):
1069 1069 return True
1070 1070 return False
1071 1071
1072 1072 return subset.filter(matches)
1073 1073
1074 1074 def hasfile(repo, subset, x):
1075 1075 """``file(pattern)``
1076 1076 Changesets affecting files matched by pattern.
1077 1077
1078 1078 For a faster but less accurate result, consider using ``filelog()``
1079 1079 instead.
1080 1080
1081 1081 This predicate uses ``glob:`` as the default kind of pattern.
1082 1082 """
1083 1083 # i18n: "file" is a keyword
1084 1084 pat = getstring(x, _("file requires a pattern"))
1085 1085 return _matchfiles(repo, subset, ('string', 'p:' + pat))
1086 1086
1087 1087 def head(repo, subset, x):
1088 1088 """``head()``
1089 1089 Changeset is a named branch head.
1090 1090 """
1091 1091 # i18n: "head" is a keyword
1092 1092 getargs(x, 0, 0, _("head takes no arguments"))
1093 1093 hs = set()
1094 1094 for b, ls in repo.branchmap().iteritems():
1095 1095 hs.update(repo[h].rev() for h in ls)
1096 1096 return baseset(hs).filter(subset.__contains__)
1097 1097
1098 1098 def heads(repo, subset, x):
1099 1099 """``heads(set)``
1100 1100 Members of set with no children in set.
1101 1101 """
1102 1102 s = getset(repo, subset, x)
1103 1103 ps = parents(repo, subset, x)
1104 1104 return s - ps
1105 1105
1106 1106 def hidden(repo, subset, x):
1107 1107 """``hidden()``
1108 1108 Hidden changesets.
1109 1109 """
1110 1110 # i18n: "hidden" is a keyword
1111 1111 getargs(x, 0, 0, _("hidden takes no arguments"))
1112 1112 hiddenrevs = repoview.filterrevs(repo, 'visible')
1113 1113 return subset & hiddenrevs
1114 1114
1115 1115 def keyword(repo, subset, x):
1116 1116 """``keyword(string)``
1117 1117 Search commit message, user name, and names of changed files for
1118 1118 string. The match is case-insensitive.
1119 1119 """
1120 1120 # i18n: "keyword" is a keyword
1121 1121 kw = encoding.lower(getstring(x, _("keyword requires a string")))
1122 1122
1123 1123 def matches(r):
1124 1124 c = repo[r]
1125 1125 return any(kw in encoding.lower(t) for t in c.files() + [c.user(),
1126 1126 c.description()])
1127 1127
1128 1128 return subset.filter(matches)
1129 1129
1130 1130 def limit(repo, subset, x):
1131 1131 """``limit(set, [n])``
1132 1132 First n members of set, defaulting to 1.
1133 1133 """
1134 1134 # i18n: "limit" is a keyword
1135 1135 l = getargs(x, 1, 2, _("limit requires one or two arguments"))
1136 1136 try:
1137 1137 lim = 1
1138 1138 if len(l) == 2:
1139 1139 # i18n: "limit" is a keyword
1140 1140 lim = int(getstring(l[1], _("limit requires a number")))
1141 1141 except (TypeError, ValueError):
1142 1142 # i18n: "limit" is a keyword
1143 1143 raise error.ParseError(_("limit expects a number"))
1144 1144 ss = subset
1145 1145 os = getset(repo, fullreposet(repo), l[0])
1146 1146 result = []
1147 1147 it = iter(os)
1148 1148 for x in xrange(lim):
1149 1149 y = next(it, None)
1150 1150 if y is None:
1151 1151 break
1152 1152 elif y in ss:
1153 1153 result.append(y)
1154 1154 return baseset(result)
1155 1155
1156 1156 def last(repo, subset, x):
1157 1157 """``last(set, [n])``
1158 1158 Last n members of set, defaulting to 1.
1159 1159 """
1160 1160 # i18n: "last" is a keyword
1161 1161 l = getargs(x, 1, 2, _("last requires one or two arguments"))
1162 1162 try:
1163 1163 lim = 1
1164 1164 if len(l) == 2:
1165 1165 # i18n: "last" is a keyword
1166 1166 lim = int(getstring(l[1], _("last requires a number")))
1167 1167 except (TypeError, ValueError):
1168 1168 # i18n: "last" is a keyword
1169 1169 raise error.ParseError(_("last expects a number"))
1170 1170 ss = subset
1171 1171 os = getset(repo, fullreposet(repo), l[0])
1172 1172 os.reverse()
1173 1173 result = []
1174 1174 it = iter(os)
1175 1175 for x in xrange(lim):
1176 1176 y = next(it, None)
1177 1177 if y is None:
1178 1178 break
1179 1179 elif y in ss:
1180 1180 result.append(y)
1181 1181 return baseset(result)
1182 1182
1183 1183 def maxrev(repo, subset, x):
1184 1184 """``max(set)``
1185 1185 Changeset with highest revision number in set.
1186 1186 """
1187 1187 os = getset(repo, fullreposet(repo), x)
1188 1188 if os:
1189 1189 m = os.max()
1190 1190 if m in subset:
1191 1191 return baseset([m])
1192 1192 return baseset()
1193 1193
1194 1194 def merge(repo, subset, x):
1195 1195 """``merge()``
1196 1196 Changeset is a merge changeset.
1197 1197 """
1198 1198 # i18n: "merge" is a keyword
1199 1199 getargs(x, 0, 0, _("merge takes no arguments"))
1200 1200 cl = repo.changelog
1201 1201 return subset.filter(lambda r: cl.parentrevs(r)[1] != -1)
1202 1202
1203 1203 def branchpoint(repo, subset, x):
1204 1204 """``branchpoint()``
1205 1205 Changesets with more than one child.
1206 1206 """
1207 1207 # i18n: "branchpoint" is a keyword
1208 1208 getargs(x, 0, 0, _("branchpoint takes no arguments"))
1209 1209 cl = repo.changelog
1210 1210 if not subset:
1211 1211 return baseset()
1212 1212 baserev = min(subset)
1213 1213 parentscount = [0]*(len(repo) - baserev)
1214 1214 for r in cl.revs(start=baserev + 1):
1215 1215 for p in cl.parentrevs(r):
1216 1216 if p >= baserev:
1217 1217 parentscount[p - baserev] += 1
1218 1218 return subset.filter(lambda r: parentscount[r - baserev] > 1)
1219 1219
1220 1220 def minrev(repo, subset, x):
1221 1221 """``min(set)``
1222 1222 Changeset with lowest revision number in set.
1223 1223 """
1224 1224 os = getset(repo, fullreposet(repo), x)
1225 1225 if os:
1226 1226 m = os.min()
1227 1227 if m in subset:
1228 1228 return baseset([m])
1229 1229 return baseset()
1230 1230
1231 1231 def modifies(repo, subset, x):
1232 1232 """``modifies(pattern)``
1233 1233 Changesets modifying files matched by pattern.
1234 1234
1235 1235 The pattern without explicit kind like ``glob:`` is expected to be
1236 1236 relative to the current directory and match against a file or a
1237 1237 directory.
1238 1238 """
1239 1239 # i18n: "modifies" is a keyword
1240 1240 pat = getstring(x, _("modifies requires a pattern"))
1241 1241 return checkstatus(repo, subset, pat, 0)
1242 1242
1243 1243 def named(repo, subset, x):
1244 1244 """``named(namespace)``
1245 1245 The changesets in a given namespace.
1246 1246
1247 1247 If `namespace` starts with `re:`, the remainder of the string is treated as
1248 1248 a regular expression. To match a namespace that actually starts with `re:`,
1249 1249 use the prefix `literal:`.
1250 1250 """
1251 1251 # i18n: "named" is a keyword
1252 1252 args = getargs(x, 1, 1, _('named requires a namespace argument'))
1253 1253
1254 1254 ns = getstring(args[0],
1255 1255 # i18n: "named" is a keyword
1256 1256 _('the argument to named must be a string'))
1257 1257 kind, pattern, matcher = _stringmatcher(ns)
1258 1258 namespaces = set()
1259 1259 if kind == 'literal':
1260 1260 if pattern not in repo.names:
1261 1261 raise error.RepoLookupError(_("namespace '%s' does not exist")
1262 1262 % ns)
1263 1263 namespaces.add(repo.names[pattern])
1264 1264 else:
1265 1265 for name, ns in repo.names.iteritems():
1266 1266 if matcher(name):
1267 1267 namespaces.add(ns)
1268 1268 if not namespaces:
1269 1269 raise error.RepoLookupError(_("no namespace exists"
1270 1270 " that match '%s'") % pattern)
1271 1271
1272 1272 names = set()
1273 1273 for ns in namespaces:
1274 1274 for name in ns.listnames(repo):
1275 1275 if name not in ns.deprecated:
1276 1276 names.update(repo[n].rev() for n in ns.nodes(repo, name))
1277 1277
1278 1278 names -= set([node.nullrev])
1279 1279 return subset & names
1280 1280
1281 1281 def node_(repo, subset, x):
1282 1282 """``id(string)``
1283 1283 Revision non-ambiguously specified by the given hex string prefix.
1284 1284 """
1285 1285 # i18n: "id" is a keyword
1286 1286 l = getargs(x, 1, 1, _("id requires one argument"))
1287 1287 # i18n: "id" is a keyword
1288 1288 n = getstring(l[0], _("id requires a string"))
1289 1289 if len(n) == 40:
1290 1290 try:
1291 1291 rn = repo.changelog.rev(node.bin(n))
1292 1292 except (LookupError, TypeError):
1293 1293 rn = None
1294 1294 else:
1295 1295 rn = None
1296 1296 pm = repo.changelog._partialmatch(n)
1297 1297 if pm is not None:
1298 1298 rn = repo.changelog.rev(pm)
1299 1299
1300 1300 if rn is None:
1301 1301 return baseset()
1302 1302 result = baseset([rn])
1303 1303 return result & subset
1304 1304
1305 1305 def obsolete(repo, subset, x):
1306 1306 """``obsolete()``
1307 1307 Mutable changeset with a newer version."""
1308 1308 # i18n: "obsolete" is a keyword
1309 1309 getargs(x, 0, 0, _("obsolete takes no arguments"))
1310 1310 obsoletes = obsmod.getrevs(repo, 'obsolete')
1311 1311 return subset & obsoletes
1312 1312
1313 1313 def only(repo, subset, x):
1314 1314 """``only(set, [set])``
1315 1315 Changesets that are ancestors of the first set that are not ancestors
1316 1316 of any other head in the repo. If a second set is specified, the result
1317 1317 is ancestors of the first set that are not ancestors of the second set
1318 1318 (i.e. ::<set1> - ::<set2>).
1319 1319 """
1320 1320 cl = repo.changelog
1321 1321 # i18n: "only" is a keyword
1322 1322 args = getargs(x, 1, 2, _('only takes one or two arguments'))
1323 1323 include = getset(repo, fullreposet(repo), args[0])
1324 1324 if len(args) == 1:
1325 1325 if not include:
1326 1326 return baseset()
1327 1327
1328 1328 descendants = set(_revdescendants(repo, include, False))
1329 1329 exclude = [rev for rev in cl.headrevs()
1330 1330 if not rev in descendants and not rev in include]
1331 1331 else:
1332 1332 exclude = getset(repo, fullreposet(repo), args[1])
1333 1333
1334 1334 results = set(cl.findmissingrevs(common=exclude, heads=include))
1335 1335 return subset & results
1336 1336
1337 1337 def origin(repo, subset, x):
1338 1338 """``origin([set])``
1339 1339 Changesets that were specified as a source for the grafts, transplants or
1340 1340 rebases that created the given revisions. Omitting the optional set is the
1341 1341 same as passing all(). If a changeset created by these operations is itself
1342 1342 specified as a source for one of these operations, only the source changeset
1343 1343 for the first operation is selected.
1344 1344 """
1345 1345 if x is not None:
1346 1346 dests = getset(repo, fullreposet(repo), x)
1347 1347 else:
1348 1348 dests = fullreposet(repo)
1349 1349
1350 1350 def _firstsrc(rev):
1351 1351 src = _getrevsource(repo, rev)
1352 1352 if src is None:
1353 1353 return None
1354 1354
1355 1355 while True:
1356 1356 prev = _getrevsource(repo, src)
1357 1357
1358 1358 if prev is None:
1359 1359 return src
1360 1360 src = prev
1361 1361
1362 1362 o = set([_firstsrc(r) for r in dests])
1363 1363 o -= set([None])
1364 1364 return subset & o
1365 1365
1366 1366 def outgoing(repo, subset, x):
1367 1367 """``outgoing([path])``
1368 1368 Changesets not found in the specified destination repository, or the
1369 1369 default push location.
1370 1370 """
1371 1371 # Avoid cycles.
1372 1372 import discovery
1373 1373 import hg
1374 1374 # i18n: "outgoing" is a keyword
1375 1375 l = getargs(x, 0, 1, _("outgoing takes one or no arguments"))
1376 1376 # i18n: "outgoing" is a keyword
1377 1377 dest = l and getstring(l[0], _("outgoing requires a repository path")) or ''
1378 1378 dest = repo.ui.expandpath(dest or 'default-push', dest or 'default')
1379 1379 dest, branches = hg.parseurl(dest)
1380 1380 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1381 1381 if revs:
1382 1382 revs = [repo.lookup(rev) for rev in revs]
1383 1383 other = hg.peer(repo, {}, dest)
1384 1384 repo.ui.pushbuffer()
1385 1385 outgoing = discovery.findcommonoutgoing(repo, other, onlyheads=revs)
1386 1386 repo.ui.popbuffer()
1387 1387 cl = repo.changelog
1388 1388 o = set([cl.rev(r) for r in outgoing.missing])
1389 1389 return subset & o
1390 1390
1391 1391 def p1(repo, subset, x):
1392 1392 """``p1([set])``
1393 1393 First parent of changesets in set, or the working directory.
1394 1394 """
1395 1395 if x is None:
1396 1396 p = repo[x].p1().rev()
1397 1397 if p >= 0:
1398 1398 return subset & baseset([p])
1399 1399 return baseset()
1400 1400
1401 1401 ps = set()
1402 1402 cl = repo.changelog
1403 1403 for r in getset(repo, fullreposet(repo), x):
1404 1404 ps.add(cl.parentrevs(r)[0])
1405 1405 ps -= set([node.nullrev])
1406 1406 return subset & ps
1407 1407
1408 1408 def p2(repo, subset, x):
1409 1409 """``p2([set])``
1410 1410 Second parent of changesets in set, or the working directory.
1411 1411 """
1412 1412 if x is None:
1413 1413 ps = repo[x].parents()
1414 1414 try:
1415 1415 p = ps[1].rev()
1416 1416 if p >= 0:
1417 1417 return subset & baseset([p])
1418 1418 return baseset()
1419 1419 except IndexError:
1420 1420 return baseset()
1421 1421
1422 1422 ps = set()
1423 1423 cl = repo.changelog
1424 1424 for r in getset(repo, fullreposet(repo), x):
1425 1425 ps.add(cl.parentrevs(r)[1])
1426 1426 ps -= set([node.nullrev])
1427 1427 return subset & ps
1428 1428
1429 1429 def parents(repo, subset, x):
1430 1430 """``parents([set])``
1431 1431 The set of all parents for all changesets in set, or the working directory.
1432 1432 """
1433 1433 if x is None:
1434 1434 ps = set(p.rev() for p in repo[x].parents())
1435 1435 else:
1436 1436 ps = set()
1437 1437 cl = repo.changelog
1438 1438 for r in getset(repo, fullreposet(repo), x):
1439 1439 ps.update(cl.parentrevs(r))
1440 1440 ps -= set([node.nullrev])
1441 1441 return subset & ps
1442 1442
1443 1443 def parentspec(repo, subset, x, n):
1444 1444 """``set^0``
1445 1445 The set.
1446 1446 ``set^1`` (or ``set^``), ``set^2``
1447 1447 First or second parent, respectively, of all changesets in set.
1448 1448 """
1449 1449 try:
1450 1450 n = int(n[1])
1451 1451 if n not in (0, 1, 2):
1452 1452 raise ValueError
1453 1453 except (TypeError, ValueError):
1454 1454 raise error.ParseError(_("^ expects a number 0, 1, or 2"))
1455 1455 ps = set()
1456 1456 cl = repo.changelog
1457 1457 for r in getset(repo, fullreposet(repo), x):
1458 1458 if n == 0:
1459 1459 ps.add(r)
1460 1460 elif n == 1:
1461 1461 ps.add(cl.parentrevs(r)[0])
1462 1462 elif n == 2:
1463 1463 parents = cl.parentrevs(r)
1464 1464 if len(parents) > 1:
1465 1465 ps.add(parents[1])
1466 1466 return subset & ps
1467 1467
1468 1468 def present(repo, subset, x):
1469 1469 """``present(set)``
1470 1470 An empty set, if any revision in set isn't found; otherwise,
1471 1471 all revisions in set.
1472 1472
1473 1473 If any of specified revisions is not present in the local repository,
1474 1474 the query is normally aborted. But this predicate allows the query
1475 1475 to continue even in such cases.
1476 1476 """
1477 1477 try:
1478 1478 return getset(repo, subset, x)
1479 1479 except error.RepoLookupError:
1480 1480 return baseset()
1481 1481
1482 1482 # for internal use
1483 1483 def _notpublic(repo, subset, x):
1484 1484 getargs(x, 0, 0, "_notpublic takes no arguments")
1485 1485 if repo._phasecache._phasesets:
1486 1486 s = set()
1487 1487 for u in repo._phasecache._phasesets[1:]:
1488 1488 s.update(u)
1489 1489 return subset & s
1490 1490 else:
1491 1491 phase = repo._phasecache.phase
1492 1492 target = phases.public
1493 1493 condition = lambda r: phase(repo, r) != target
1494 1494 return subset.filter(condition, cache=False)
1495 1495
1496 1496 def public(repo, subset, x):
1497 1497 """``public()``
1498 1498 Changeset in public phase."""
1499 1499 # i18n: "public" is a keyword
1500 1500 getargs(x, 0, 0, _("public takes no arguments"))
1501 1501 phase = repo._phasecache.phase
1502 1502 target = phases.public
1503 1503 condition = lambda r: phase(repo, r) == target
1504 1504 return subset.filter(condition, cache=False)
1505 1505
1506 1506 def remote(repo, subset, x):
1507 1507 """``remote([id [,path]])``
1508 1508 Local revision that corresponds to the given identifier in a
1509 1509 remote repository, if present. Here, the '.' identifier is a
1510 1510 synonym for the current local branch.
1511 1511 """
1512 1512
1513 1513 import hg # avoid start-up nasties
1514 1514 # i18n: "remote" is a keyword
1515 1515 l = getargs(x, 0, 2, _("remote takes one, two or no arguments"))
1516 1516
1517 1517 q = '.'
1518 1518 if len(l) > 0:
1519 1519 # i18n: "remote" is a keyword
1520 1520 q = getstring(l[0], _("remote requires a string id"))
1521 1521 if q == '.':
1522 1522 q = repo['.'].branch()
1523 1523
1524 1524 dest = ''
1525 1525 if len(l) > 1:
1526 1526 # i18n: "remote" is a keyword
1527 1527 dest = getstring(l[1], _("remote requires a repository path"))
1528 1528 dest = repo.ui.expandpath(dest or 'default')
1529 1529 dest, branches = hg.parseurl(dest)
1530 1530 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1531 1531 if revs:
1532 1532 revs = [repo.lookup(rev) for rev in revs]
1533 1533 other = hg.peer(repo, {}, dest)
1534 1534 n = other.lookup(q)
1535 1535 if n in repo:
1536 1536 r = repo[n].rev()
1537 1537 if r in subset:
1538 1538 return baseset([r])
1539 1539 return baseset()
1540 1540
1541 1541 def removes(repo, subset, x):
1542 1542 """``removes(pattern)``
1543 1543 Changesets which remove files matching pattern.
1544 1544
1545 1545 The pattern without explicit kind like ``glob:`` is expected to be
1546 1546 relative to the current directory and match against a file or a
1547 1547 directory.
1548 1548 """
1549 1549 # i18n: "removes" is a keyword
1550 1550 pat = getstring(x, _("removes requires a pattern"))
1551 1551 return checkstatus(repo, subset, pat, 2)
1552 1552
1553 1553 def rev(repo, subset, x):
1554 1554 """``rev(number)``
1555 1555 Revision with the given numeric identifier.
1556 1556 """
1557 1557 # i18n: "rev" is a keyword
1558 1558 l = getargs(x, 1, 1, _("rev requires one argument"))
1559 1559 try:
1560 1560 # i18n: "rev" is a keyword
1561 1561 l = int(getstring(l[0], _("rev requires a number")))
1562 1562 except (TypeError, ValueError):
1563 1563 # i18n: "rev" is a keyword
1564 1564 raise error.ParseError(_("rev expects a number"))
1565 1565 if l not in repo.changelog and l != node.nullrev:
1566 1566 return baseset()
1567 1567 return subset & baseset([l])
1568 1568
1569 1569 def matching(repo, subset, x):
1570 1570 """``matching(revision [, field])``
1571 1571 Changesets in which a given set of fields match the set of fields in the
1572 1572 selected revision or set.
1573 1573
1574 1574 To match more than one field pass the list of fields to match separated
1575 1575 by spaces (e.g. ``author description``).
1576 1576
1577 1577 Valid fields are most regular revision fields and some special fields.
1578 1578
1579 1579 Regular revision fields are ``description``, ``author``, ``branch``,
1580 1580 ``date``, ``files``, ``phase``, ``parents``, ``substate``, ``user``
1581 1581 and ``diff``.
1582 1582 Note that ``author`` and ``user`` are synonyms. ``diff`` refers to the
1583 1583 contents of the revision. Two revisions matching their ``diff`` will
1584 1584 also match their ``files``.
1585 1585
1586 1586 Special fields are ``summary`` and ``metadata``:
1587 1587 ``summary`` matches the first line of the description.
1588 1588 ``metadata`` is equivalent to matching ``description user date``
1589 1589 (i.e. it matches the main metadata fields).
1590 1590
1591 1591 ``metadata`` is the default field which is used when no fields are
1592 1592 specified. You can match more than one field at a time.
1593 1593 """
1594 1594 # i18n: "matching" is a keyword
1595 1595 l = getargs(x, 1, 2, _("matching takes 1 or 2 arguments"))
1596 1596
1597 1597 revs = getset(repo, fullreposet(repo), l[0])
1598 1598
1599 1599 fieldlist = ['metadata']
1600 1600 if len(l) > 1:
1601 1601 fieldlist = getstring(l[1],
1602 1602 # i18n: "matching" is a keyword
1603 1603 _("matching requires a string "
1604 1604 "as its second argument")).split()
1605 1605
1606 1606 # Make sure that there are no repeated fields,
1607 1607 # expand the 'special' 'metadata' field type
1608 1608 # and check the 'files' whenever we check the 'diff'
1609 1609 fields = []
1610 1610 for field in fieldlist:
1611 1611 if field == 'metadata':
1612 1612 fields += ['user', 'description', 'date']
1613 1613 elif field == 'diff':
1614 1614 # a revision matching the diff must also match the files
1615 1615 # since matching the diff is very costly, make sure to
1616 1616 # also match the files first
1617 1617 fields += ['files', 'diff']
1618 1618 else:
1619 1619 if field == 'author':
1620 1620 field = 'user'
1621 1621 fields.append(field)
1622 1622 fields = set(fields)
1623 1623 if 'summary' in fields and 'description' in fields:
1624 1624 # If a revision matches its description it also matches its summary
1625 1625 fields.discard('summary')
1626 1626
1627 1627 # We may want to match more than one field
1628 1628 # Not all fields take the same amount of time to be matched
1629 1629 # Sort the selected fields in order of increasing matching cost
1630 1630 fieldorder = ['phase', 'parents', 'user', 'date', 'branch', 'summary',
1631 1631 'files', 'description', 'substate', 'diff']
1632 1632 def fieldkeyfunc(f):
1633 1633 try:
1634 1634 return fieldorder.index(f)
1635 1635 except ValueError:
1636 1636 # assume an unknown field is very costly
1637 1637 return len(fieldorder)
1638 1638 fields = list(fields)
1639 1639 fields.sort(key=fieldkeyfunc)
1640 1640
1641 1641 # Each field will be matched with its own "getfield" function
1642 1642 # which will be added to the getfieldfuncs array of functions
1643 1643 getfieldfuncs = []
1644 1644 _funcs = {
1645 1645 'user': lambda r: repo[r].user(),
1646 1646 'branch': lambda r: repo[r].branch(),
1647 1647 'date': lambda r: repo[r].date(),
1648 1648 'description': lambda r: repo[r].description(),
1649 1649 'files': lambda r: repo[r].files(),
1650 1650 'parents': lambda r: repo[r].parents(),
1651 1651 'phase': lambda r: repo[r].phase(),
1652 1652 'substate': lambda r: repo[r].substate,
1653 1653 'summary': lambda r: repo[r].description().splitlines()[0],
1654 1654 'diff': lambda r: list(repo[r].diff(git=True),)
1655 1655 }
1656 1656 for info in fields:
1657 1657 getfield = _funcs.get(info, None)
1658 1658 if getfield is None:
1659 1659 raise error.ParseError(
1660 1660 # i18n: "matching" is a keyword
1661 1661 _("unexpected field name passed to matching: %s") % info)
1662 1662 getfieldfuncs.append(getfield)
1663 1663 # convert the getfield array of functions into a "getinfo" function
1664 1664 # which returns an array of field values (or a single value if there
1665 1665 # is only one field to match)
1666 1666 getinfo = lambda r: [f(r) for f in getfieldfuncs]
1667 1667
1668 1668 def matches(x):
1669 1669 for rev in revs:
1670 1670 target = getinfo(rev)
1671 1671 match = True
1672 1672 for n, f in enumerate(getfieldfuncs):
1673 1673 if target[n] != f(x):
1674 1674 match = False
1675 1675 if match:
1676 1676 return True
1677 1677 return False
1678 1678
1679 1679 return subset.filter(matches)
1680 1680
1681 1681 def reverse(repo, subset, x):
1682 1682 """``reverse(set)``
1683 1683 Reverse order of set.
1684 1684 """
1685 1685 l = getset(repo, subset, x)
1686 1686 l.reverse()
1687 1687 return l
1688 1688
1689 1689 def roots(repo, subset, x):
1690 1690 """``roots(set)``
1691 1691 Changesets in set with no parent changeset in set.
1692 1692 """
1693 1693 s = getset(repo, fullreposet(repo), x)
1694 1694 subset = subset & s# baseset([r for r in s if r in subset])
1695 1695 cs = _children(repo, subset, s)
1696 1696 return subset - cs
1697 1697
1698 1698 def secret(repo, subset, x):
1699 1699 """``secret()``
1700 1700 Changeset in secret phase."""
1701 1701 # i18n: "secret" is a keyword
1702 1702 getargs(x, 0, 0, _("secret takes no arguments"))
1703 1703 phase = repo._phasecache.phase
1704 1704 target = phases.secret
1705 1705 condition = lambda r: phase(repo, r) == target
1706 1706 return subset.filter(condition, cache=False)
1707 1707
1708 1708 def sort(repo, subset, x):
1709 1709 """``sort(set[, [-]key...])``
1710 1710 Sort set by keys. The default sort order is ascending, specify a key
1711 1711 as ``-key`` to sort in descending order.
1712 1712
1713 1713 The keys can be:
1714 1714
1715 1715 - ``rev`` for the revision number,
1716 1716 - ``branch`` for the branch name,
1717 1717 - ``desc`` for the commit message (description),
1718 1718 - ``user`` for user name (``author`` can be used as an alias),
1719 1719 - ``date`` for the commit date
1720 1720 """
1721 1721 # i18n: "sort" is a keyword
1722 1722 l = getargs(x, 1, 2, _("sort requires one or two arguments"))
1723 1723 keys = "rev"
1724 1724 if len(l) == 2:
1725 1725 # i18n: "sort" is a keyword
1726 1726 keys = getstring(l[1], _("sort spec must be a string"))
1727 1727
1728 1728 s = l[0]
1729 1729 keys = keys.split()
1730 1730 l = []
1731 1731 def invert(s):
1732 1732 return "".join(chr(255 - ord(c)) for c in s)
1733 1733 revs = getset(repo, subset, s)
1734 1734 if keys == ["rev"]:
1735 1735 revs.sort()
1736 1736 return revs
1737 1737 elif keys == ["-rev"]:
1738 1738 revs.sort(reverse=True)
1739 1739 return revs
1740 1740 for r in revs:
1741 1741 c = repo[r]
1742 1742 e = []
1743 1743 for k in keys:
1744 1744 if k == 'rev':
1745 1745 e.append(r)
1746 1746 elif k == '-rev':
1747 1747 e.append(-r)
1748 1748 elif k == 'branch':
1749 1749 e.append(c.branch())
1750 1750 elif k == '-branch':
1751 1751 e.append(invert(c.branch()))
1752 1752 elif k == 'desc':
1753 1753 e.append(c.description())
1754 1754 elif k == '-desc':
1755 1755 e.append(invert(c.description()))
1756 1756 elif k in 'user author':
1757 1757 e.append(c.user())
1758 1758 elif k in '-user -author':
1759 1759 e.append(invert(c.user()))
1760 1760 elif k == 'date':
1761 1761 e.append(c.date()[0])
1762 1762 elif k == '-date':
1763 1763 e.append(-c.date()[0])
1764 1764 else:
1765 1765 raise error.ParseError(_("unknown sort key %r") % k)
1766 1766 e.append(r)
1767 1767 l.append(e)
1768 1768 l.sort()
1769 1769 return baseset([e[-1] for e in l])
1770 1770
1771 1771 def subrepo(repo, subset, x):
1772 1772 """``subrepo([pattern])``
1773 1773 Changesets that add, modify or remove the given subrepo. If no subrepo
1774 1774 pattern is named, any subrepo changes are returned.
1775 1775 """
1776 1776 # i18n: "subrepo" is a keyword
1777 1777 args = getargs(x, 0, 1, _('subrepo takes at most one argument'))
1778 1778 if len(args) != 0:
1779 1779 pat = getstring(args[0], _("subrepo requires a pattern"))
1780 1780
1781 1781 m = matchmod.exact(repo.root, repo.root, ['.hgsubstate'])
1782 1782
1783 1783 def submatches(names):
1784 1784 k, p, m = _stringmatcher(pat)
1785 1785 for name in names:
1786 1786 if m(name):
1787 1787 yield name
1788 1788
1789 1789 def matches(x):
1790 1790 c = repo[x]
1791 1791 s = repo.status(c.p1().node(), c.node(), match=m)
1792 1792
1793 1793 if len(args) == 0:
1794 1794 return s.added or s.modified or s.removed
1795 1795
1796 1796 if s.added:
1797 1797 return any(submatches(c.substate.keys()))
1798 1798
1799 1799 if s.modified:
1800 1800 subs = set(c.p1().substate.keys())
1801 1801 subs.update(c.substate.keys())
1802 1802
1803 1803 for path in submatches(subs):
1804 1804 if c.p1().substate.get(path) != c.substate.get(path):
1805 1805 return True
1806 1806
1807 1807 if s.removed:
1808 1808 return any(submatches(c.p1().substate.keys()))
1809 1809
1810 1810 return False
1811 1811
1812 1812 return subset.filter(matches)
1813 1813
1814 1814 def _stringmatcher(pattern):
1815 1815 """
1816 1816 accepts a string, possibly starting with 're:' or 'literal:' prefix.
1817 1817 returns the matcher name, pattern, and matcher function.
1818 1818 missing or unknown prefixes are treated as literal matches.
1819 1819
1820 1820 helper for tests:
1821 1821 >>> def test(pattern, *tests):
1822 1822 ... kind, pattern, matcher = _stringmatcher(pattern)
1823 1823 ... return (kind, pattern, [bool(matcher(t)) for t in tests])
1824 1824
1825 1825 exact matching (no prefix):
1826 1826 >>> test('abcdefg', 'abc', 'def', 'abcdefg')
1827 1827 ('literal', 'abcdefg', [False, False, True])
1828 1828
1829 1829 regex matching ('re:' prefix)
1830 1830 >>> test('re:a.+b', 'nomatch', 'fooadef', 'fooadefbar')
1831 1831 ('re', 'a.+b', [False, False, True])
1832 1832
1833 1833 force exact matches ('literal:' prefix)
1834 1834 >>> test('literal:re:foobar', 'foobar', 're:foobar')
1835 1835 ('literal', 're:foobar', [False, True])
1836 1836
1837 1837 unknown prefixes are ignored and treated as literals
1838 1838 >>> test('foo:bar', 'foo', 'bar', 'foo:bar')
1839 1839 ('literal', 'foo:bar', [False, False, True])
1840 1840 """
1841 1841 if pattern.startswith('re:'):
1842 1842 pattern = pattern[3:]
1843 1843 try:
1844 1844 regex = re.compile(pattern)
1845 1845 except re.error, e:
1846 1846 raise error.ParseError(_('invalid regular expression: %s')
1847 1847 % e)
1848 1848 return 're', pattern, regex.search
1849 1849 elif pattern.startswith('literal:'):
1850 1850 pattern = pattern[8:]
1851 1851 return 'literal', pattern, pattern.__eq__
1852 1852
1853 1853 def _substringmatcher(pattern):
1854 1854 kind, pattern, matcher = _stringmatcher(pattern)
1855 1855 if kind == 'literal':
1856 1856 matcher = lambda s: pattern in s
1857 1857 return kind, pattern, matcher
1858 1858
1859 1859 def tag(repo, subset, x):
1860 1860 """``tag([name])``
1861 1861 The specified tag by name, or all tagged revisions if no name is given.
1862 1862
1863 1863 If `name` starts with `re:`, the remainder of the name is treated as
1864 1864 a regular expression. To match a tag that actually starts with `re:`,
1865 1865 use the prefix `literal:`.
1866 1866 """
1867 1867 # i18n: "tag" is a keyword
1868 1868 args = getargs(x, 0, 1, _("tag takes one or no arguments"))
1869 1869 cl = repo.changelog
1870 1870 if args:
1871 1871 pattern = getstring(args[0],
1872 1872 # i18n: "tag" is a keyword
1873 1873 _('the argument to tag must be a string'))
1874 1874 kind, pattern, matcher = _stringmatcher(pattern)
1875 1875 if kind == 'literal':
1876 1876 # avoid resolving all tags
1877 1877 tn = repo._tagscache.tags.get(pattern, None)
1878 1878 if tn is None:
1879 1879 raise error.RepoLookupError(_("tag '%s' does not exist")
1880 1880 % pattern)
1881 1881 s = set([repo[tn].rev()])
1882 1882 else:
1883 1883 s = set([cl.rev(n) for t, n in repo.tagslist() if matcher(t)])
1884 1884 else:
1885 1885 s = set([cl.rev(n) for t, n in repo.tagslist() if t != 'tip'])
1886 1886 return subset & s
1887 1887
1888 1888 def tagged(repo, subset, x):
1889 1889 return tag(repo, subset, x)
1890 1890
1891 1891 def unstable(repo, subset, x):
1892 1892 """``unstable()``
1893 1893 Non-obsolete changesets with obsolete ancestors.
1894 1894 """
1895 1895 # i18n: "unstable" is a keyword
1896 1896 getargs(x, 0, 0, _("unstable takes no arguments"))
1897 1897 unstables = obsmod.getrevs(repo, 'unstable')
1898 1898 return subset & unstables
1899 1899
1900 1900
1901 1901 def user(repo, subset, x):
1902 1902 """``user(string)``
1903 1903 User name contains string. The match is case-insensitive.
1904 1904
1905 1905 If `string` starts with `re:`, the remainder of the string is treated as
1906 1906 a regular expression. To match a user that actually contains `re:`, use
1907 1907 the prefix `literal:`.
1908 1908 """
1909 1909 return author(repo, subset, x)
1910 1910
1911 1911 # experimental
1912 1912 def wdir(repo, subset, x):
1913 1913 # i18n: "wdir" is a keyword
1914 1914 getargs(x, 0, 0, _("wdir takes no arguments"))
1915 1915 if None in subset or isinstance(subset, fullreposet):
1916 1916 return baseset([None])
1917 1917 return baseset()
1918 1918
1919 1919 # for internal use
1920 1920 def _list(repo, subset, x):
1921 1921 s = getstring(x, "internal error")
1922 1922 if not s:
1923 1923 return baseset()
1924 1924 ls = [repo[r].rev() for r in s.split('\0')]
1925 1925 s = subset
1926 1926 return baseset([r for r in ls if r in s])
1927 1927
1928 1928 # for internal use
1929 1929 def _intlist(repo, subset, x):
1930 1930 s = getstring(x, "internal error")
1931 1931 if not s:
1932 1932 return baseset()
1933 1933 ls = [int(r) for r in s.split('\0')]
1934 1934 s = subset
1935 1935 return baseset([r for r in ls if r in s])
1936 1936
1937 1937 # for internal use
1938 1938 def _hexlist(repo, subset, x):
1939 1939 s = getstring(x, "internal error")
1940 1940 if not s:
1941 1941 return baseset()
1942 1942 cl = repo.changelog
1943 1943 ls = [cl.rev(node.bin(r)) for r in s.split('\0')]
1944 1944 s = subset
1945 1945 return baseset([r for r in ls if r in s])
1946 1946
1947 1947 symbols = {
1948 1948 "adds": adds,
1949 1949 "all": getall,
1950 1950 "ancestor": ancestor,
1951 1951 "ancestors": ancestors,
1952 1952 "_firstancestors": _firstancestors,
1953 1953 "author": author,
1954 1954 "bisect": bisect,
1955 1955 "bisected": bisected,
1956 1956 "bookmark": bookmark,
1957 1957 "branch": branch,
1958 1958 "branchpoint": branchpoint,
1959 1959 "bumped": bumped,
1960 1960 "bundle": bundle,
1961 1961 "children": children,
1962 1962 "closed": closed,
1963 1963 "contains": contains,
1964 1964 "converted": converted,
1965 1965 "date": date,
1966 1966 "desc": desc,
1967 1967 "descendants": descendants,
1968 1968 "_firstdescendants": _firstdescendants,
1969 1969 "destination": destination,
1970 1970 "divergent": divergent,
1971 1971 "draft": draft,
1972 1972 "extinct": extinct,
1973 1973 "extra": extra,
1974 1974 "file": hasfile,
1975 1975 "filelog": filelog,
1976 1976 "first": first,
1977 1977 "follow": follow,
1978 1978 "_followfirst": _followfirst,
1979 1979 "grep": grep,
1980 1980 "head": head,
1981 1981 "heads": heads,
1982 1982 "hidden": hidden,
1983 1983 "id": node_,
1984 1984 "keyword": keyword,
1985 1985 "last": last,
1986 1986 "limit": limit,
1987 1987 "_matchfiles": _matchfiles,
1988 1988 "max": maxrev,
1989 1989 "merge": merge,
1990 1990 "min": minrev,
1991 1991 "modifies": modifies,
1992 1992 "named": named,
1993 1993 "obsolete": obsolete,
1994 1994 "only": only,
1995 1995 "origin": origin,
1996 1996 "outgoing": outgoing,
1997 1997 "p1": p1,
1998 1998 "p2": p2,
1999 1999 "parents": parents,
2000 2000 "present": present,
2001 2001 "public": public,
2002 2002 "_notpublic": _notpublic,
2003 2003 "remote": remote,
2004 2004 "removes": removes,
2005 2005 "rev": rev,
2006 2006 "reverse": reverse,
2007 2007 "roots": roots,
2008 2008 "sort": sort,
2009 2009 "secret": secret,
2010 2010 "subrepo": subrepo,
2011 2011 "matching": matching,
2012 2012 "tag": tag,
2013 2013 "tagged": tagged,
2014 2014 "user": user,
2015 2015 "unstable": unstable,
2016 2016 "wdir": wdir,
2017 2017 "_list": _list,
2018 2018 "_intlist": _intlist,
2019 2019 "_hexlist": _hexlist,
2020 2020 }
2021 2021
2022 2022 # symbols which can't be used for a DoS attack for any given input
2023 2023 # (e.g. those which accept regexes as plain strings shouldn't be included)
2024 2024 # functions that just return a lot of changesets (like all) don't count here
2025 2025 safesymbols = set([
2026 2026 "adds",
2027 2027 "all",
2028 2028 "ancestor",
2029 2029 "ancestors",
2030 2030 "_firstancestors",
2031 2031 "author",
2032 2032 "bisect",
2033 2033 "bisected",
2034 2034 "bookmark",
2035 2035 "branch",
2036 2036 "branchpoint",
2037 2037 "bumped",
2038 2038 "bundle",
2039 2039 "children",
2040 2040 "closed",
2041 2041 "converted",
2042 2042 "date",
2043 2043 "desc",
2044 2044 "descendants",
2045 2045 "_firstdescendants",
2046 2046 "destination",
2047 2047 "divergent",
2048 2048 "draft",
2049 2049 "extinct",
2050 2050 "extra",
2051 2051 "file",
2052 2052 "filelog",
2053 2053 "first",
2054 2054 "follow",
2055 2055 "_followfirst",
2056 2056 "head",
2057 2057 "heads",
2058 2058 "hidden",
2059 2059 "id",
2060 2060 "keyword",
2061 2061 "last",
2062 2062 "limit",
2063 2063 "_matchfiles",
2064 2064 "max",
2065 2065 "merge",
2066 2066 "min",
2067 2067 "modifies",
2068 2068 "obsolete",
2069 2069 "only",
2070 2070 "origin",
2071 2071 "outgoing",
2072 2072 "p1",
2073 2073 "p2",
2074 2074 "parents",
2075 2075 "present",
2076 2076 "public",
2077 2077 "_notpublic",
2078 2078 "remote",
2079 2079 "removes",
2080 2080 "rev",
2081 2081 "reverse",
2082 2082 "roots",
2083 2083 "sort",
2084 2084 "secret",
2085 2085 "matching",
2086 2086 "tag",
2087 2087 "tagged",
2088 2088 "user",
2089 2089 "unstable",
2090 2090 "wdir",
2091 2091 "_list",
2092 2092 "_intlist",
2093 2093 "_hexlist",
2094 2094 ])
2095 2095
2096 2096 methods = {
2097 2097 "range": rangeset,
2098 2098 "dagrange": dagrange,
2099 2099 "string": stringset,
2100 2100 "symbol": stringset,
2101 2101 "and": andset,
2102 2102 "or": orset,
2103 2103 "not": notset,
2104 2104 "list": listset,
2105 2105 "func": func,
2106 2106 "ancestor": ancestorspec,
2107 2107 "parent": parentspec,
2108 2108 "parentpost": p1,
2109 2109 }
2110 2110
2111 2111 def optimize(x, small):
2112 2112 if x is None:
2113 2113 return 0, x
2114 2114
2115 2115 smallbonus = 1
2116 2116 if small:
2117 2117 smallbonus = .5
2118 2118
2119 2119 op = x[0]
2120 2120 if op == 'minus':
2121 2121 return optimize(('and', x[1], ('not', x[2])), small)
2122 2122 elif op == 'only':
2123 2123 return optimize(('func', ('symbol', 'only'),
2124 2124 ('list', x[1], x[2])), small)
2125 2125 elif op == 'onlypost':
2126 2126 return optimize(('func', ('symbol', 'only'), x[1]), small)
2127 2127 elif op == 'dagrangepre':
2128 2128 return optimize(('func', ('symbol', 'ancestors'), x[1]), small)
2129 2129 elif op == 'dagrangepost':
2130 2130 return optimize(('func', ('symbol', 'descendants'), x[1]), small)
2131 2131 elif op == 'rangepre':
2132 2132 return optimize(('range', ('string', '0'), x[1]), small)
2133 2133 elif op == 'rangepost':
2134 2134 return optimize(('range', x[1], ('string', 'tip')), small)
2135 2135 elif op == 'negate':
2136 2136 return optimize(('string',
2137 2137 '-' + getstring(x[1], _("can't negate that"))), small)
2138 2138 elif op in 'string symbol negate':
2139 2139 return smallbonus, x # single revisions are small
2140 2140 elif op == 'and':
2141 2141 wa, ta = optimize(x[1], True)
2142 2142 wb, tb = optimize(x[2], True)
2143 2143
2144 2144 # (::x and not ::y)/(not ::y and ::x) have a fast path
2145 2145 def isonly(revs, bases):
2146 2146 return (
2147 2147 revs[0] == 'func'
2148 2148 and getstring(revs[1], _('not a symbol')) == 'ancestors'
2149 2149 and bases[0] == 'not'
2150 2150 and bases[1][0] == 'func'
2151 2151 and getstring(bases[1][1], _('not a symbol')) == 'ancestors')
2152 2152
2153 2153 w = min(wa, wb)
2154 2154 if isonly(ta, tb):
2155 2155 return w, ('func', ('symbol', 'only'), ('list', ta[2], tb[1][2]))
2156 2156 if isonly(tb, ta):
2157 2157 return w, ('func', ('symbol', 'only'), ('list', tb[2], ta[1][2]))
2158 2158
2159 2159 if wa > wb:
2160 2160 return w, (op, tb, ta)
2161 2161 return w, (op, ta, tb)
2162 2162 elif op == 'or':
2163 2163 wa, ta = optimize(x[1], False)
2164 2164 wb, tb = optimize(x[2], False)
2165 if wb < wa:
2166 wb, wa = wa, wb
2165 # we can't reorder trees by weight because it would change the order.
2166 # ("sort(a + b)" == "sort(b + a)", but "a + b" != "b + a")
2167 # if wb < wa:
2168 # tb, ta = ta, tb
2167 2169 return max(wa, wb), (op, ta, tb)
2168 2170 elif op == 'not':
2169 2171 # Optimize not public() to _notpublic() because we have a fast version
2170 2172 if x[1] == ('func', ('symbol', 'public'), None):
2171 2173 newsym = ('func', ('symbol', '_notpublic'), None)
2172 2174 o = optimize(newsym, not small)
2173 2175 return o[0], o[1]
2174 2176 else:
2175 2177 o = optimize(x[1], not small)
2176 2178 return o[0], (op, o[1])
2177 2179 elif op == 'parentpost':
2178 2180 o = optimize(x[1], small)
2179 2181 return o[0], (op, o[1])
2180 2182 elif op == 'group':
2181 2183 return optimize(x[1], small)
2182 2184 elif op in 'dagrange range list parent ancestorspec':
2183 2185 if op == 'parent':
2184 2186 # x^:y means (x^) : y, not x ^ (:y)
2185 2187 post = ('parentpost', x[1])
2186 2188 if x[2][0] == 'dagrangepre':
2187 2189 return optimize(('dagrange', post, x[2][1]), small)
2188 2190 elif x[2][0] == 'rangepre':
2189 2191 return optimize(('range', post, x[2][1]), small)
2190 2192
2191 2193 wa, ta = optimize(x[1], small)
2192 2194 wb, tb = optimize(x[2], small)
2193 2195 return wa + wb, (op, ta, tb)
2194 2196 elif op == 'func':
2195 2197 f = getstring(x[1], _("not a symbol"))
2196 2198 wa, ta = optimize(x[2], small)
2197 2199 if f in ("author branch closed date desc file grep keyword "
2198 2200 "outgoing user"):
2199 2201 w = 10 # slow
2200 2202 elif f in "modifies adds removes":
2201 2203 w = 30 # slower
2202 2204 elif f == "contains":
2203 2205 w = 100 # very slow
2204 2206 elif f == "ancestor":
2205 2207 w = 1 * smallbonus
2206 2208 elif f in "reverse limit first _intlist":
2207 2209 w = 0
2208 2210 elif f in "sort":
2209 2211 w = 10 # assume most sorts look at changelog
2210 2212 else:
2211 2213 w = 1
2212 2214 return w + wa, (op, x[1], ta)
2213 2215 return 1, x
2214 2216
2215 2217 _aliasarg = ('func', ('symbol', '_aliasarg'))
2216 2218 def _getaliasarg(tree):
2217 2219 """If tree matches ('func', ('symbol', '_aliasarg'), ('string', X))
2218 2220 return X, None otherwise.
2219 2221 """
2220 2222 if (len(tree) == 3 and tree[:2] == _aliasarg
2221 2223 and tree[2][0] == 'string'):
2222 2224 return tree[2][1]
2223 2225 return None
2224 2226
2225 2227 def _checkaliasarg(tree, known=None):
2226 2228 """Check tree contains no _aliasarg construct or only ones which
2227 2229 value is in known. Used to avoid alias placeholders injection.
2228 2230 """
2229 2231 if isinstance(tree, tuple):
2230 2232 arg = _getaliasarg(tree)
2231 2233 if arg is not None and (not known or arg not in known):
2232 2234 raise error.UnknownIdentifier('_aliasarg', [])
2233 2235 for t in tree:
2234 2236 _checkaliasarg(t, known)
2235 2237
2236 2238 # the set of valid characters for the initial letter of symbols in
2237 2239 # alias declarations and definitions
2238 2240 _aliassyminitletters = set(c for c in [chr(i) for i in xrange(256)]
2239 2241 if c.isalnum() or c in '._@$' or ord(c) > 127)
2240 2242
2241 2243 def _tokenizealias(program, lookup=None):
2242 2244 """Parse alias declaration/definition into a stream of tokens
2243 2245
2244 2246 This allows symbol names to use also ``$`` as an initial letter
2245 2247 (for backward compatibility), and callers of this function should
2246 2248 examine whether ``$`` is used also for unexpected symbols or not.
2247 2249 """
2248 2250 return tokenize(program, lookup=lookup,
2249 2251 syminitletters=_aliassyminitletters)
2250 2252
2251 2253 def _parsealiasdecl(decl):
2252 2254 """Parse alias declaration ``decl``
2253 2255
2254 2256 This returns ``(name, tree, args, errorstr)`` tuple:
2255 2257
2256 2258 - ``name``: of declared alias (may be ``decl`` itself at error)
2257 2259 - ``tree``: parse result (or ``None`` at error)
2258 2260 - ``args``: list of alias argument names (or None for symbol declaration)
2259 2261 - ``errorstr``: detail about detected error (or None)
2260 2262
2261 2263 >>> _parsealiasdecl('foo')
2262 2264 ('foo', ('symbol', 'foo'), None, None)
2263 2265 >>> _parsealiasdecl('$foo')
2264 2266 ('$foo', None, None, "'$' not for alias arguments")
2265 2267 >>> _parsealiasdecl('foo::bar')
2266 2268 ('foo::bar', None, None, 'invalid format')
2267 2269 >>> _parsealiasdecl('foo bar')
2268 2270 ('foo bar', None, None, 'at 4: invalid token')
2269 2271 >>> _parsealiasdecl('foo()')
2270 2272 ('foo', ('func', ('symbol', 'foo')), [], None)
2271 2273 >>> _parsealiasdecl('$foo()')
2272 2274 ('$foo()', None, None, "'$' not for alias arguments")
2273 2275 >>> _parsealiasdecl('foo($1, $2)')
2274 2276 ('foo', ('func', ('symbol', 'foo')), ['$1', '$2'], None)
2275 2277 >>> _parsealiasdecl('foo(bar_bar, baz.baz)')
2276 2278 ('foo', ('func', ('symbol', 'foo')), ['bar_bar', 'baz.baz'], None)
2277 2279 >>> _parsealiasdecl('foo($1, $2, nested($1, $2))')
2278 2280 ('foo($1, $2, nested($1, $2))', None, None, 'invalid argument list')
2279 2281 >>> _parsealiasdecl('foo(bar($1, $2))')
2280 2282 ('foo(bar($1, $2))', None, None, 'invalid argument list')
2281 2283 >>> _parsealiasdecl('foo("string")')
2282 2284 ('foo("string")', None, None, 'invalid argument list')
2283 2285 >>> _parsealiasdecl('foo($1, $2')
2284 2286 ('foo($1, $2', None, None, 'at 10: unexpected token: end')
2285 2287 >>> _parsealiasdecl('foo("string')
2286 2288 ('foo("string', None, None, 'at 5: unterminated string')
2287 2289 >>> _parsealiasdecl('foo($1, $2, $1)')
2288 2290 ('foo', None, None, 'argument names collide with each other')
2289 2291 """
2290 2292 p = parser.parser(_tokenizealias, elements)
2291 2293 try:
2292 2294 tree, pos = p.parse(decl)
2293 2295 if (pos != len(decl)):
2294 2296 raise error.ParseError(_('invalid token'), pos)
2295 2297
2296 2298 if isvalidsymbol(tree):
2297 2299 # "name = ...." style
2298 2300 name = getsymbol(tree)
2299 2301 if name.startswith('$'):
2300 2302 return (decl, None, None, _("'$' not for alias arguments"))
2301 2303 return (name, ('symbol', name), None, None)
2302 2304
2303 2305 if isvalidfunc(tree):
2304 2306 # "name(arg, ....) = ...." style
2305 2307 name = getfuncname(tree)
2306 2308 if name.startswith('$'):
2307 2309 return (decl, None, None, _("'$' not for alias arguments"))
2308 2310 args = []
2309 2311 for arg in getfuncargs(tree):
2310 2312 if not isvalidsymbol(arg):
2311 2313 return (decl, None, None, _("invalid argument list"))
2312 2314 args.append(getsymbol(arg))
2313 2315 if len(args) != len(set(args)):
2314 2316 return (name, None, None,
2315 2317 _("argument names collide with each other"))
2316 2318 return (name, ('func', ('symbol', name)), args, None)
2317 2319
2318 2320 return (decl, None, None, _("invalid format"))
2319 2321 except error.ParseError, inst:
2320 2322 return (decl, None, None, parseerrordetail(inst))
2321 2323
2322 2324 def _parsealiasdefn(defn, args):
2323 2325 """Parse alias definition ``defn``
2324 2326
2325 2327 This function also replaces alias argument references in the
2326 2328 specified definition by ``_aliasarg(ARGNAME)``.
2327 2329
2328 2330 ``args`` is a list of alias argument names, or None if the alias
2329 2331 is declared as a symbol.
2330 2332
2331 2333 This returns "tree" as parsing result.
2332 2334
2333 2335 >>> args = ['$1', '$2', 'foo']
2334 2336 >>> print prettyformat(_parsealiasdefn('$1 or foo', args))
2335 2337 (or
2336 2338 (func
2337 2339 ('symbol', '_aliasarg')
2338 2340 ('string', '$1'))
2339 2341 (func
2340 2342 ('symbol', '_aliasarg')
2341 2343 ('string', 'foo')))
2342 2344 >>> try:
2343 2345 ... _parsealiasdefn('$1 or $bar', args)
2344 2346 ... except error.ParseError, inst:
2345 2347 ... print parseerrordetail(inst)
2346 2348 at 6: '$' not for alias arguments
2347 2349 >>> args = ['$1', '$10', 'foo']
2348 2350 >>> print prettyformat(_parsealiasdefn('$10 or foobar', args))
2349 2351 (or
2350 2352 (func
2351 2353 ('symbol', '_aliasarg')
2352 2354 ('string', '$10'))
2353 2355 ('symbol', 'foobar'))
2354 2356 >>> print prettyformat(_parsealiasdefn('"$1" or "foo"', args))
2355 2357 (or
2356 2358 ('string', '$1')
2357 2359 ('string', 'foo'))
2358 2360 """
2359 2361 def tokenizedefn(program, lookup=None):
2360 2362 if args:
2361 2363 argset = set(args)
2362 2364 else:
2363 2365 argset = set()
2364 2366
2365 2367 for t, value, pos in _tokenizealias(program, lookup=lookup):
2366 2368 if t == 'symbol':
2367 2369 if value in argset:
2368 2370 # emulate tokenization of "_aliasarg('ARGNAME')":
2369 2371 # "_aliasarg()" is an unknown symbol only used separate
2370 2372 # alias argument placeholders from regular strings.
2371 2373 yield ('symbol', '_aliasarg', pos)
2372 2374 yield ('(', None, pos)
2373 2375 yield ('string', value, pos)
2374 2376 yield (')', None, pos)
2375 2377 continue
2376 2378 elif value.startswith('$'):
2377 2379 raise error.ParseError(_("'$' not for alias arguments"),
2378 2380 pos)
2379 2381 yield (t, value, pos)
2380 2382
2381 2383 p = parser.parser(tokenizedefn, elements)
2382 2384 tree, pos = p.parse(defn)
2383 2385 if pos != len(defn):
2384 2386 raise error.ParseError(_('invalid token'), pos)
2385 2387 return tree
2386 2388
2387 2389 class revsetalias(object):
2388 2390 # whether own `error` information is already shown or not.
2389 2391 # this avoids showing same warning multiple times at each `findaliases`.
2390 2392 warned = False
2391 2393
2392 2394 def __init__(self, name, value):
2393 2395 '''Aliases like:
2394 2396
2395 2397 h = heads(default)
2396 2398 b($1) = ancestors($1) - ancestors(default)
2397 2399 '''
2398 2400 self.name, self.tree, self.args, self.error = _parsealiasdecl(name)
2399 2401 if self.error:
2400 2402 self.error = _('failed to parse the declaration of revset alias'
2401 2403 ' "%s": %s') % (self.name, self.error)
2402 2404 return
2403 2405
2404 2406 try:
2405 2407 self.replacement = _parsealiasdefn(value, self.args)
2406 2408 # Check for placeholder injection
2407 2409 _checkaliasarg(self.replacement, self.args)
2408 2410 except error.ParseError, inst:
2409 2411 self.error = _('failed to parse the definition of revset alias'
2410 2412 ' "%s": %s') % (self.name, parseerrordetail(inst))
2411 2413
2412 2414 def _getalias(aliases, tree):
2413 2415 """If tree looks like an unexpanded alias, return it. Return None
2414 2416 otherwise.
2415 2417 """
2416 2418 if isinstance(tree, tuple) and tree:
2417 2419 if tree[0] == 'symbol' and len(tree) == 2:
2418 2420 name = tree[1]
2419 2421 alias = aliases.get(name)
2420 2422 if alias and alias.args is None and alias.tree == tree:
2421 2423 return alias
2422 2424 if tree[0] == 'func' and len(tree) > 1:
2423 2425 if tree[1][0] == 'symbol' and len(tree[1]) == 2:
2424 2426 name = tree[1][1]
2425 2427 alias = aliases.get(name)
2426 2428 if alias and alias.args is not None and alias.tree == tree[:2]:
2427 2429 return alias
2428 2430 return None
2429 2431
2430 2432 def _expandargs(tree, args):
2431 2433 """Replace _aliasarg instances with the substitution value of the
2432 2434 same name in args, recursively.
2433 2435 """
2434 2436 if not tree or not isinstance(tree, tuple):
2435 2437 return tree
2436 2438 arg = _getaliasarg(tree)
2437 2439 if arg is not None:
2438 2440 return args[arg]
2439 2441 return tuple(_expandargs(t, args) for t in tree)
2440 2442
2441 2443 def _expandaliases(aliases, tree, expanding, cache):
2442 2444 """Expand aliases in tree, recursively.
2443 2445
2444 2446 'aliases' is a dictionary mapping user defined aliases to
2445 2447 revsetalias objects.
2446 2448 """
2447 2449 if not isinstance(tree, tuple):
2448 2450 # Do not expand raw strings
2449 2451 return tree
2450 2452 alias = _getalias(aliases, tree)
2451 2453 if alias is not None:
2452 2454 if alias.error:
2453 2455 raise util.Abort(alias.error)
2454 2456 if alias in expanding:
2455 2457 raise error.ParseError(_('infinite expansion of revset alias "%s" '
2456 2458 'detected') % alias.name)
2457 2459 expanding.append(alias)
2458 2460 if alias.name not in cache:
2459 2461 cache[alias.name] = _expandaliases(aliases, alias.replacement,
2460 2462 expanding, cache)
2461 2463 result = cache[alias.name]
2462 2464 expanding.pop()
2463 2465 if alias.args is not None:
2464 2466 l = getlist(tree[2])
2465 2467 if len(l) != len(alias.args):
2466 2468 raise error.ParseError(
2467 2469 _('invalid number of arguments: %s') % len(l))
2468 2470 l = [_expandaliases(aliases, a, [], cache) for a in l]
2469 2471 result = _expandargs(result, dict(zip(alias.args, l)))
2470 2472 else:
2471 2473 result = tuple(_expandaliases(aliases, t, expanding, cache)
2472 2474 for t in tree)
2473 2475 return result
2474 2476
2475 2477 def findaliases(ui, tree, showwarning=None):
2476 2478 _checkaliasarg(tree)
2477 2479 aliases = {}
2478 2480 for k, v in ui.configitems('revsetalias'):
2479 2481 alias = revsetalias(k, v)
2480 2482 aliases[alias.name] = alias
2481 2483 tree = _expandaliases(aliases, tree, [], {})
2482 2484 if showwarning:
2483 2485 # warn about problematic (but not referred) aliases
2484 2486 for name, alias in sorted(aliases.iteritems()):
2485 2487 if alias.error and not alias.warned:
2486 2488 showwarning(_('warning: %s\n') % (alias.error))
2487 2489 alias.warned = True
2488 2490 return tree
2489 2491
2490 2492 def foldconcat(tree):
2491 2493 """Fold elements to be concatenated by `##`
2492 2494 """
2493 2495 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2494 2496 return tree
2495 2497 if tree[0] == '_concat':
2496 2498 pending = [tree]
2497 2499 l = []
2498 2500 while pending:
2499 2501 e = pending.pop()
2500 2502 if e[0] == '_concat':
2501 2503 pending.extend(reversed(e[1:]))
2502 2504 elif e[0] in ('string', 'symbol'):
2503 2505 l.append(e[1])
2504 2506 else:
2505 2507 msg = _("\"##\" can't concatenate \"%s\" element") % (e[0])
2506 2508 raise error.ParseError(msg)
2507 2509 return ('string', ''.join(l))
2508 2510 else:
2509 2511 return tuple(foldconcat(t) for t in tree)
2510 2512
2511 2513 def parse(spec, lookup=None):
2512 2514 p = parser.parser(tokenize, elements)
2513 2515 tree, pos = p.parse(spec, lookup=lookup)
2514 2516 if pos != len(spec):
2515 2517 raise error.ParseError(_("invalid token"), pos)
2516 2518 return tree
2517 2519
2518 2520 def posttreebuilthook(tree, repo):
2519 2521 # hook for extensions to execute code on the optimized tree
2520 2522 pass
2521 2523
2522 2524 def match(ui, spec, repo=None):
2523 2525 if not spec:
2524 2526 raise error.ParseError(_("empty query"))
2525 2527 lookup = None
2526 2528 if repo:
2527 2529 lookup = repo.__contains__
2528 2530 tree = parse(spec, lookup)
2529 2531 if ui:
2530 2532 tree = findaliases(ui, tree, showwarning=ui.warn)
2531 2533 tree = foldconcat(tree)
2532 2534 weight, tree = optimize(tree, True)
2533 2535 posttreebuilthook(tree, repo)
2534 2536 def mfunc(repo, subset=None):
2535 2537 if subset is None:
2536 2538 subset = fullreposet(repo)
2537 2539 if util.safehasattr(subset, 'isascending'):
2538 2540 result = getset(repo, subset, tree)
2539 2541 else:
2540 2542 result = getset(repo, baseset(subset), tree)
2541 2543 return result
2542 2544 return mfunc
2543 2545
2544 2546 def formatspec(expr, *args):
2545 2547 '''
2546 2548 This is a convenience function for using revsets internally, and
2547 2549 escapes arguments appropriately. Aliases are intentionally ignored
2548 2550 so that intended expression behavior isn't accidentally subverted.
2549 2551
2550 2552 Supported arguments:
2551 2553
2552 2554 %r = revset expression, parenthesized
2553 2555 %d = int(arg), no quoting
2554 2556 %s = string(arg), escaped and single-quoted
2555 2557 %b = arg.branch(), escaped and single-quoted
2556 2558 %n = hex(arg), single-quoted
2557 2559 %% = a literal '%'
2558 2560
2559 2561 Prefixing the type with 'l' specifies a parenthesized list of that type.
2560 2562
2561 2563 >>> formatspec('%r:: and %lr', '10 or 11', ("this()", "that()"))
2562 2564 '(10 or 11):: and ((this()) or (that()))'
2563 2565 >>> formatspec('%d:: and not %d::', 10, 20)
2564 2566 '10:: and not 20::'
2565 2567 >>> formatspec('%ld or %ld', [], [1])
2566 2568 "_list('') or 1"
2567 2569 >>> formatspec('keyword(%s)', 'foo\\xe9')
2568 2570 "keyword('foo\\\\xe9')"
2569 2571 >>> b = lambda: 'default'
2570 2572 >>> b.branch = b
2571 2573 >>> formatspec('branch(%b)', b)
2572 2574 "branch('default')"
2573 2575 >>> formatspec('root(%ls)', ['a', 'b', 'c', 'd'])
2574 2576 "root(_list('a\\x00b\\x00c\\x00d'))"
2575 2577 '''
2576 2578
2577 2579 def quote(s):
2578 2580 return repr(str(s))
2579 2581
2580 2582 def argtype(c, arg):
2581 2583 if c == 'd':
2582 2584 return str(int(arg))
2583 2585 elif c == 's':
2584 2586 return quote(arg)
2585 2587 elif c == 'r':
2586 2588 parse(arg) # make sure syntax errors are confined
2587 2589 return '(%s)' % arg
2588 2590 elif c == 'n':
2589 2591 return quote(node.hex(arg))
2590 2592 elif c == 'b':
2591 2593 return quote(arg.branch())
2592 2594
2593 2595 def listexp(s, t):
2594 2596 l = len(s)
2595 2597 if l == 0:
2596 2598 return "_list('')"
2597 2599 elif l == 1:
2598 2600 return argtype(t, s[0])
2599 2601 elif t == 'd':
2600 2602 return "_intlist('%s')" % "\0".join(str(int(a)) for a in s)
2601 2603 elif t == 's':
2602 2604 return "_list('%s')" % "\0".join(s)
2603 2605 elif t == 'n':
2604 2606 return "_hexlist('%s')" % "\0".join(node.hex(a) for a in s)
2605 2607 elif t == 'b':
2606 2608 return "_list('%s')" % "\0".join(a.branch() for a in s)
2607 2609
2608 2610 m = l // 2
2609 2611 return '(%s or %s)' % (listexp(s[:m], t), listexp(s[m:], t))
2610 2612
2611 2613 ret = ''
2612 2614 pos = 0
2613 2615 arg = 0
2614 2616 while pos < len(expr):
2615 2617 c = expr[pos]
2616 2618 if c == '%':
2617 2619 pos += 1
2618 2620 d = expr[pos]
2619 2621 if d == '%':
2620 2622 ret += d
2621 2623 elif d in 'dsnbr':
2622 2624 ret += argtype(d, args[arg])
2623 2625 arg += 1
2624 2626 elif d == 'l':
2625 2627 # a list of some type
2626 2628 pos += 1
2627 2629 d = expr[pos]
2628 2630 ret += listexp(list(args[arg]), d)
2629 2631 arg += 1
2630 2632 else:
2631 2633 raise util.Abort('unexpected revspec format character %s' % d)
2632 2634 else:
2633 2635 ret += c
2634 2636 pos += 1
2635 2637
2636 2638 return ret
2637 2639
2638 2640 def prettyformat(tree):
2639 2641 return parser.prettyformat(tree, ('string', 'symbol'))
2640 2642
2641 2643 def depth(tree):
2642 2644 if isinstance(tree, tuple):
2643 2645 return max(map(depth, tree)) + 1
2644 2646 else:
2645 2647 return 0
2646 2648
2647 2649 def funcsused(tree):
2648 2650 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2649 2651 return set()
2650 2652 else:
2651 2653 funcs = set()
2652 2654 for s in tree[1:]:
2653 2655 funcs |= funcsused(s)
2654 2656 if tree[0] == 'func':
2655 2657 funcs.add(tree[1][1])
2656 2658 return funcs
2657 2659
2658 2660 class abstractsmartset(object):
2659 2661
2660 2662 def __nonzero__(self):
2661 2663 """True if the smartset is not empty"""
2662 2664 raise NotImplementedError()
2663 2665
2664 2666 def __contains__(self, rev):
2665 2667 """provide fast membership testing"""
2666 2668 raise NotImplementedError()
2667 2669
2668 2670 def __iter__(self):
2669 2671 """iterate the set in the order it is supposed to be iterated"""
2670 2672 raise NotImplementedError()
2671 2673
2672 2674 # Attributes containing a function to perform a fast iteration in a given
2673 2675 # direction. A smartset can have none, one, or both defined.
2674 2676 #
2675 2677 # Default value is None instead of a function returning None to avoid
2676 2678 # initializing an iterator just for testing if a fast method exists.
2677 2679 fastasc = None
2678 2680 fastdesc = None
2679 2681
2680 2682 def isascending(self):
2681 2683 """True if the set will iterate in ascending order"""
2682 2684 raise NotImplementedError()
2683 2685
2684 2686 def isdescending(self):
2685 2687 """True if the set will iterate in descending order"""
2686 2688 raise NotImplementedError()
2687 2689
2688 2690 def min(self):
2689 2691 """return the minimum element in the set"""
2690 2692 if self.fastasc is not None:
2691 2693 for r in self.fastasc():
2692 2694 return r
2693 2695 raise ValueError('arg is an empty sequence')
2694 2696 return min(self)
2695 2697
2696 2698 def max(self):
2697 2699 """return the maximum element in the set"""
2698 2700 if self.fastdesc is not None:
2699 2701 for r in self.fastdesc():
2700 2702 return r
2701 2703 raise ValueError('arg is an empty sequence')
2702 2704 return max(self)
2703 2705
2704 2706 def first(self):
2705 2707 """return the first element in the set (user iteration perspective)
2706 2708
2707 2709 Return None if the set is empty"""
2708 2710 raise NotImplementedError()
2709 2711
2710 2712 def last(self):
2711 2713 """return the last element in the set (user iteration perspective)
2712 2714
2713 2715 Return None if the set is empty"""
2714 2716 raise NotImplementedError()
2715 2717
2716 2718 def __len__(self):
2717 2719 """return the length of the smartsets
2718 2720
2719 2721 This can be expensive on smartset that could be lazy otherwise."""
2720 2722 raise NotImplementedError()
2721 2723
2722 2724 def reverse(self):
2723 2725 """reverse the expected iteration order"""
2724 2726 raise NotImplementedError()
2725 2727
2726 2728 def sort(self, reverse=True):
2727 2729 """get the set to iterate in an ascending or descending order"""
2728 2730 raise NotImplementedError()
2729 2731
2730 2732 def __and__(self, other):
2731 2733 """Returns a new object with the intersection of the two collections.
2732 2734
2733 2735 This is part of the mandatory API for smartset."""
2734 2736 if isinstance(other, fullreposet):
2735 2737 return self
2736 2738 return self.filter(other.__contains__, cache=False)
2737 2739
2738 2740 def __add__(self, other):
2739 2741 """Returns a new object with the union of the two collections.
2740 2742
2741 2743 This is part of the mandatory API for smartset."""
2742 2744 return addset(self, other)
2743 2745
2744 2746 def __sub__(self, other):
2745 2747 """Returns a new object with the substraction of the two collections.
2746 2748
2747 2749 This is part of the mandatory API for smartset."""
2748 2750 c = other.__contains__
2749 2751 return self.filter(lambda r: not c(r), cache=False)
2750 2752
2751 2753 def filter(self, condition, cache=True):
2752 2754 """Returns this smartset filtered by condition as a new smartset.
2753 2755
2754 2756 `condition` is a callable which takes a revision number and returns a
2755 2757 boolean.
2756 2758
2757 2759 This is part of the mandatory API for smartset."""
2758 2760 # builtin cannot be cached. but do not needs to
2759 2761 if cache and util.safehasattr(condition, 'func_code'):
2760 2762 condition = util.cachefunc(condition)
2761 2763 return filteredset(self, condition)
2762 2764
2763 2765 class baseset(abstractsmartset):
2764 2766 """Basic data structure that represents a revset and contains the basic
2765 2767 operation that it should be able to perform.
2766 2768
2767 2769 Every method in this class should be implemented by any smartset class.
2768 2770 """
2769 2771 def __init__(self, data=()):
2770 2772 if not isinstance(data, list):
2771 2773 data = list(data)
2772 2774 self._list = data
2773 2775 self._ascending = None
2774 2776
2775 2777 @util.propertycache
2776 2778 def _set(self):
2777 2779 return set(self._list)
2778 2780
2779 2781 @util.propertycache
2780 2782 def _asclist(self):
2781 2783 asclist = self._list[:]
2782 2784 asclist.sort()
2783 2785 return asclist
2784 2786
2785 2787 def __iter__(self):
2786 2788 if self._ascending is None:
2787 2789 return iter(self._list)
2788 2790 elif self._ascending:
2789 2791 return iter(self._asclist)
2790 2792 else:
2791 2793 return reversed(self._asclist)
2792 2794
2793 2795 def fastasc(self):
2794 2796 return iter(self._asclist)
2795 2797
2796 2798 def fastdesc(self):
2797 2799 return reversed(self._asclist)
2798 2800
2799 2801 @util.propertycache
2800 2802 def __contains__(self):
2801 2803 return self._set.__contains__
2802 2804
2803 2805 def __nonzero__(self):
2804 2806 return bool(self._list)
2805 2807
2806 2808 def sort(self, reverse=False):
2807 2809 self._ascending = not bool(reverse)
2808 2810
2809 2811 def reverse(self):
2810 2812 if self._ascending is None:
2811 2813 self._list.reverse()
2812 2814 else:
2813 2815 self._ascending = not self._ascending
2814 2816
2815 2817 def __len__(self):
2816 2818 return len(self._list)
2817 2819
2818 2820 def isascending(self):
2819 2821 """Returns True if the collection is ascending order, False if not.
2820 2822
2821 2823 This is part of the mandatory API for smartset."""
2822 2824 if len(self) <= 1:
2823 2825 return True
2824 2826 return self._ascending is not None and self._ascending
2825 2827
2826 2828 def isdescending(self):
2827 2829 """Returns True if the collection is descending order, False if not.
2828 2830
2829 2831 This is part of the mandatory API for smartset."""
2830 2832 if len(self) <= 1:
2831 2833 return True
2832 2834 return self._ascending is not None and not self._ascending
2833 2835
2834 2836 def first(self):
2835 2837 if self:
2836 2838 if self._ascending is None:
2837 2839 return self._list[0]
2838 2840 elif self._ascending:
2839 2841 return self._asclist[0]
2840 2842 else:
2841 2843 return self._asclist[-1]
2842 2844 return None
2843 2845
2844 2846 def last(self):
2845 2847 if self:
2846 2848 if self._ascending is None:
2847 2849 return self._list[-1]
2848 2850 elif self._ascending:
2849 2851 return self._asclist[-1]
2850 2852 else:
2851 2853 return self._asclist[0]
2852 2854 return None
2853 2855
2854 2856 def __repr__(self):
2855 2857 d = {None: '', False: '-', True: '+'}[self._ascending]
2856 2858 return '<%s%s %r>' % (type(self).__name__, d, self._list)
2857 2859
2858 2860 class filteredset(abstractsmartset):
2859 2861 """Duck type for baseset class which iterates lazily over the revisions in
2860 2862 the subset and contains a function which tests for membership in the
2861 2863 revset
2862 2864 """
2863 2865 def __init__(self, subset, condition=lambda x: True):
2864 2866 """
2865 2867 condition: a function that decide whether a revision in the subset
2866 2868 belongs to the revset or not.
2867 2869 """
2868 2870 self._subset = subset
2869 2871 self._condition = condition
2870 2872 self._cache = {}
2871 2873
2872 2874 def __contains__(self, x):
2873 2875 c = self._cache
2874 2876 if x not in c:
2875 2877 v = c[x] = x in self._subset and self._condition(x)
2876 2878 return v
2877 2879 return c[x]
2878 2880
2879 2881 def __iter__(self):
2880 2882 return self._iterfilter(self._subset)
2881 2883
2882 2884 def _iterfilter(self, it):
2883 2885 cond = self._condition
2884 2886 for x in it:
2885 2887 if cond(x):
2886 2888 yield x
2887 2889
2888 2890 @property
2889 2891 def fastasc(self):
2890 2892 it = self._subset.fastasc
2891 2893 if it is None:
2892 2894 return None
2893 2895 return lambda: self._iterfilter(it())
2894 2896
2895 2897 @property
2896 2898 def fastdesc(self):
2897 2899 it = self._subset.fastdesc
2898 2900 if it is None:
2899 2901 return None
2900 2902 return lambda: self._iterfilter(it())
2901 2903
2902 2904 def __nonzero__(self):
2903 2905 for r in self:
2904 2906 return True
2905 2907 return False
2906 2908
2907 2909 def __len__(self):
2908 2910 # Basic implementation to be changed in future patches.
2909 2911 l = baseset([r for r in self])
2910 2912 return len(l)
2911 2913
2912 2914 def sort(self, reverse=False):
2913 2915 self._subset.sort(reverse=reverse)
2914 2916
2915 2917 def reverse(self):
2916 2918 self._subset.reverse()
2917 2919
2918 2920 def isascending(self):
2919 2921 return self._subset.isascending()
2920 2922
2921 2923 def isdescending(self):
2922 2924 return self._subset.isdescending()
2923 2925
2924 2926 def first(self):
2925 2927 for x in self:
2926 2928 return x
2927 2929 return None
2928 2930
2929 2931 def last(self):
2930 2932 it = None
2931 2933 if self._subset.isascending:
2932 2934 it = self.fastdesc
2933 2935 elif self._subset.isdescending:
2934 2936 it = self.fastdesc
2935 2937 if it is None:
2936 2938 # slowly consume everything. This needs improvement
2937 2939 it = lambda: reversed(list(self))
2938 2940 for x in it():
2939 2941 return x
2940 2942 return None
2941 2943
2942 2944 def __repr__(self):
2943 2945 return '<%s %r>' % (type(self).__name__, self._subset)
2944 2946
2945 2947 def _iterordered(ascending, iter1, iter2):
2946 2948 """produce an ordered iteration from two iterators with the same order
2947 2949
2948 2950 The ascending is used to indicated the iteration direction.
2949 2951 """
2950 2952 choice = max
2951 2953 if ascending:
2952 2954 choice = min
2953 2955
2954 2956 val1 = None
2955 2957 val2 = None
2956 2958 try:
2957 2959 # Consume both iterators in an ordered way until one is empty
2958 2960 while True:
2959 2961 if val1 is None:
2960 2962 val1 = iter1.next()
2961 2963 if val2 is None:
2962 2964 val2 = iter2.next()
2963 2965 next = choice(val1, val2)
2964 2966 yield next
2965 2967 if val1 == next:
2966 2968 val1 = None
2967 2969 if val2 == next:
2968 2970 val2 = None
2969 2971 except StopIteration:
2970 2972 # Flush any remaining values and consume the other one
2971 2973 it = iter2
2972 2974 if val1 is not None:
2973 2975 yield val1
2974 2976 it = iter1
2975 2977 elif val2 is not None:
2976 2978 # might have been equality and both are empty
2977 2979 yield val2
2978 2980 for val in it:
2979 2981 yield val
2980 2982
2981 2983 class addset(abstractsmartset):
2982 2984 """Represent the addition of two sets
2983 2985
2984 2986 Wrapper structure for lazily adding two structures without losing much
2985 2987 performance on the __contains__ method
2986 2988
2987 2989 If the ascending attribute is set, that means the two structures are
2988 2990 ordered in either an ascending or descending way. Therefore, we can add
2989 2991 them maintaining the order by iterating over both at the same time
2990 2992
2991 2993 >>> xs = baseset([0, 3, 2])
2992 2994 >>> ys = baseset([5, 2, 4])
2993 2995
2994 2996 >>> rs = addset(xs, ys)
2995 2997 >>> bool(rs), 0 in rs, 1 in rs, 5 in rs, rs.first(), rs.last()
2996 2998 (True, True, False, True, 0, 4)
2997 2999 >>> rs = addset(xs, baseset([]))
2998 3000 >>> bool(rs), 0 in rs, 1 in rs, rs.first(), rs.last()
2999 3001 (True, True, False, 0, 2)
3000 3002 >>> rs = addset(baseset([]), baseset([]))
3001 3003 >>> bool(rs), 0 in rs, rs.first(), rs.last()
3002 3004 (False, False, None, None)
3003 3005
3004 3006 iterate unsorted:
3005 3007 >>> rs = addset(xs, ys)
3006 3008 >>> [x for x in rs] # without _genlist
3007 3009 [0, 3, 2, 5, 4]
3008 3010 >>> assert not rs._genlist
3009 3011 >>> len(rs)
3010 3012 5
3011 3013 >>> [x for x in rs] # with _genlist
3012 3014 [0, 3, 2, 5, 4]
3013 3015 >>> assert rs._genlist
3014 3016
3015 3017 iterate ascending:
3016 3018 >>> rs = addset(xs, ys, ascending=True)
3017 3019 >>> [x for x in rs], [x for x in rs.fastasc()] # without _asclist
3018 3020 ([0, 2, 3, 4, 5], [0, 2, 3, 4, 5])
3019 3021 >>> assert not rs._asclist
3020 3022 >>> len(rs)
3021 3023 5
3022 3024 >>> [x for x in rs], [x for x in rs.fastasc()]
3023 3025 ([0, 2, 3, 4, 5], [0, 2, 3, 4, 5])
3024 3026 >>> assert rs._asclist
3025 3027
3026 3028 iterate descending:
3027 3029 >>> rs = addset(xs, ys, ascending=False)
3028 3030 >>> [x for x in rs], [x for x in rs.fastdesc()] # without _asclist
3029 3031 ([5, 4, 3, 2, 0], [5, 4, 3, 2, 0])
3030 3032 >>> assert not rs._asclist
3031 3033 >>> len(rs)
3032 3034 5
3033 3035 >>> [x for x in rs], [x for x in rs.fastdesc()]
3034 3036 ([5, 4, 3, 2, 0], [5, 4, 3, 2, 0])
3035 3037 >>> assert rs._asclist
3036 3038
3037 3039 iterate ascending without fastasc:
3038 3040 >>> rs = addset(xs, generatorset(ys), ascending=True)
3039 3041 >>> assert rs.fastasc is None
3040 3042 >>> [x for x in rs]
3041 3043 [0, 2, 3, 4, 5]
3042 3044
3043 3045 iterate descending without fastdesc:
3044 3046 >>> rs = addset(generatorset(xs), ys, ascending=False)
3045 3047 >>> assert rs.fastdesc is None
3046 3048 >>> [x for x in rs]
3047 3049 [5, 4, 3, 2, 0]
3048 3050 """
3049 3051 def __init__(self, revs1, revs2, ascending=None):
3050 3052 self._r1 = revs1
3051 3053 self._r2 = revs2
3052 3054 self._iter = None
3053 3055 self._ascending = ascending
3054 3056 self._genlist = None
3055 3057 self._asclist = None
3056 3058
3057 3059 def __len__(self):
3058 3060 return len(self._list)
3059 3061
3060 3062 def __nonzero__(self):
3061 3063 return bool(self._r1) or bool(self._r2)
3062 3064
3063 3065 @util.propertycache
3064 3066 def _list(self):
3065 3067 if not self._genlist:
3066 3068 self._genlist = baseset(iter(self))
3067 3069 return self._genlist
3068 3070
3069 3071 def __iter__(self):
3070 3072 """Iterate over both collections without repeating elements
3071 3073
3072 3074 If the ascending attribute is not set, iterate over the first one and
3073 3075 then over the second one checking for membership on the first one so we
3074 3076 dont yield any duplicates.
3075 3077
3076 3078 If the ascending attribute is set, iterate over both collections at the
3077 3079 same time, yielding only one value at a time in the given order.
3078 3080 """
3079 3081 if self._ascending is None:
3080 3082 if self._genlist:
3081 3083 return iter(self._genlist)
3082 3084 def arbitraryordergen():
3083 3085 for r in self._r1:
3084 3086 yield r
3085 3087 inr1 = self._r1.__contains__
3086 3088 for r in self._r2:
3087 3089 if not inr1(r):
3088 3090 yield r
3089 3091 return arbitraryordergen()
3090 3092 # try to use our own fast iterator if it exists
3091 3093 self._trysetasclist()
3092 3094 if self._ascending:
3093 3095 attr = 'fastasc'
3094 3096 else:
3095 3097 attr = 'fastdesc'
3096 3098 it = getattr(self, attr)
3097 3099 if it is not None:
3098 3100 return it()
3099 3101 # maybe half of the component supports fast
3100 3102 # get iterator for _r1
3101 3103 iter1 = getattr(self._r1, attr)
3102 3104 if iter1 is None:
3103 3105 # let's avoid side effect (not sure it matters)
3104 3106 iter1 = iter(sorted(self._r1, reverse=not self._ascending))
3105 3107 else:
3106 3108 iter1 = iter1()
3107 3109 # get iterator for _r2
3108 3110 iter2 = getattr(self._r2, attr)
3109 3111 if iter2 is None:
3110 3112 # let's avoid side effect (not sure it matters)
3111 3113 iter2 = iter(sorted(self._r2, reverse=not self._ascending))
3112 3114 else:
3113 3115 iter2 = iter2()
3114 3116 return _iterordered(self._ascending, iter1, iter2)
3115 3117
3116 3118 def _trysetasclist(self):
3117 3119 """populate the _asclist attribute if possible and necessary"""
3118 3120 if self._genlist is not None and self._asclist is None:
3119 3121 self._asclist = sorted(self._genlist)
3120 3122
3121 3123 @property
3122 3124 def fastasc(self):
3123 3125 self._trysetasclist()
3124 3126 if self._asclist is not None:
3125 3127 return self._asclist.__iter__
3126 3128 iter1 = self._r1.fastasc
3127 3129 iter2 = self._r2.fastasc
3128 3130 if None in (iter1, iter2):
3129 3131 return None
3130 3132 return lambda: _iterordered(True, iter1(), iter2())
3131 3133
3132 3134 @property
3133 3135 def fastdesc(self):
3134 3136 self._trysetasclist()
3135 3137 if self._asclist is not None:
3136 3138 return self._asclist.__reversed__
3137 3139 iter1 = self._r1.fastdesc
3138 3140 iter2 = self._r2.fastdesc
3139 3141 if None in (iter1, iter2):
3140 3142 return None
3141 3143 return lambda: _iterordered(False, iter1(), iter2())
3142 3144
3143 3145 def __contains__(self, x):
3144 3146 return x in self._r1 or x in self._r2
3145 3147
3146 3148 def sort(self, reverse=False):
3147 3149 """Sort the added set
3148 3150
3149 3151 For this we use the cached list with all the generated values and if we
3150 3152 know they are ascending or descending we can sort them in a smart way.
3151 3153 """
3152 3154 self._ascending = not reverse
3153 3155
3154 3156 def isascending(self):
3155 3157 return self._ascending is not None and self._ascending
3156 3158
3157 3159 def isdescending(self):
3158 3160 return self._ascending is not None and not self._ascending
3159 3161
3160 3162 def reverse(self):
3161 3163 if self._ascending is None:
3162 3164 self._list.reverse()
3163 3165 else:
3164 3166 self._ascending = not self._ascending
3165 3167
3166 3168 def first(self):
3167 3169 for x in self:
3168 3170 return x
3169 3171 return None
3170 3172
3171 3173 def last(self):
3172 3174 self.reverse()
3173 3175 val = self.first()
3174 3176 self.reverse()
3175 3177 return val
3176 3178
3177 3179 def __repr__(self):
3178 3180 d = {None: '', False: '-', True: '+'}[self._ascending]
3179 3181 return '<%s%s %r, %r>' % (type(self).__name__, d, self._r1, self._r2)
3180 3182
3181 3183 class generatorset(abstractsmartset):
3182 3184 """Wrap a generator for lazy iteration
3183 3185
3184 3186 Wrapper structure for generators that provides lazy membership and can
3185 3187 be iterated more than once.
3186 3188 When asked for membership it generates values until either it finds the
3187 3189 requested one or has gone through all the elements in the generator
3188 3190 """
3189 3191 def __init__(self, gen, iterasc=None):
3190 3192 """
3191 3193 gen: a generator producing the values for the generatorset.
3192 3194 """
3193 3195 self._gen = gen
3194 3196 self._asclist = None
3195 3197 self._cache = {}
3196 3198 self._genlist = []
3197 3199 self._finished = False
3198 3200 self._ascending = True
3199 3201 if iterasc is not None:
3200 3202 if iterasc:
3201 3203 self.fastasc = self._iterator
3202 3204 self.__contains__ = self._asccontains
3203 3205 else:
3204 3206 self.fastdesc = self._iterator
3205 3207 self.__contains__ = self._desccontains
3206 3208
3207 3209 def __nonzero__(self):
3208 3210 # Do not use 'for r in self' because it will enforce the iteration
3209 3211 # order (default ascending), possibly unrolling a whole descending
3210 3212 # iterator.
3211 3213 if self._genlist:
3212 3214 return True
3213 3215 for r in self._consumegen():
3214 3216 return True
3215 3217 return False
3216 3218
3217 3219 def __contains__(self, x):
3218 3220 if x in self._cache:
3219 3221 return self._cache[x]
3220 3222
3221 3223 # Use new values only, as existing values would be cached.
3222 3224 for l in self._consumegen():
3223 3225 if l == x:
3224 3226 return True
3225 3227
3226 3228 self._cache[x] = False
3227 3229 return False
3228 3230
3229 3231 def _asccontains(self, x):
3230 3232 """version of contains optimised for ascending generator"""
3231 3233 if x in self._cache:
3232 3234 return self._cache[x]
3233 3235
3234 3236 # Use new values only, as existing values would be cached.
3235 3237 for l in self._consumegen():
3236 3238 if l == x:
3237 3239 return True
3238 3240 if l > x:
3239 3241 break
3240 3242
3241 3243 self._cache[x] = False
3242 3244 return False
3243 3245
3244 3246 def _desccontains(self, x):
3245 3247 """version of contains optimised for descending generator"""
3246 3248 if x in self._cache:
3247 3249 return self._cache[x]
3248 3250
3249 3251 # Use new values only, as existing values would be cached.
3250 3252 for l in self._consumegen():
3251 3253 if l == x:
3252 3254 return True
3253 3255 if l < x:
3254 3256 break
3255 3257
3256 3258 self._cache[x] = False
3257 3259 return False
3258 3260
3259 3261 def __iter__(self):
3260 3262 if self._ascending:
3261 3263 it = self.fastasc
3262 3264 else:
3263 3265 it = self.fastdesc
3264 3266 if it is not None:
3265 3267 return it()
3266 3268 # we need to consume the iterator
3267 3269 for x in self._consumegen():
3268 3270 pass
3269 3271 # recall the same code
3270 3272 return iter(self)
3271 3273
3272 3274 def _iterator(self):
3273 3275 if self._finished:
3274 3276 return iter(self._genlist)
3275 3277
3276 3278 # We have to use this complex iteration strategy to allow multiple
3277 3279 # iterations at the same time. We need to be able to catch revision
3278 3280 # removed from _consumegen and added to genlist in another instance.
3279 3281 #
3280 3282 # Getting rid of it would provide an about 15% speed up on this
3281 3283 # iteration.
3282 3284 genlist = self._genlist
3283 3285 nextrev = self._consumegen().next
3284 3286 _len = len # cache global lookup
3285 3287 def gen():
3286 3288 i = 0
3287 3289 while True:
3288 3290 if i < _len(genlist):
3289 3291 yield genlist[i]
3290 3292 else:
3291 3293 yield nextrev()
3292 3294 i += 1
3293 3295 return gen()
3294 3296
3295 3297 def _consumegen(self):
3296 3298 cache = self._cache
3297 3299 genlist = self._genlist.append
3298 3300 for item in self._gen:
3299 3301 cache[item] = True
3300 3302 genlist(item)
3301 3303 yield item
3302 3304 if not self._finished:
3303 3305 self._finished = True
3304 3306 asc = self._genlist[:]
3305 3307 asc.sort()
3306 3308 self._asclist = asc
3307 3309 self.fastasc = asc.__iter__
3308 3310 self.fastdesc = asc.__reversed__
3309 3311
3310 3312 def __len__(self):
3311 3313 for x in self._consumegen():
3312 3314 pass
3313 3315 return len(self._genlist)
3314 3316
3315 3317 def sort(self, reverse=False):
3316 3318 self._ascending = not reverse
3317 3319
3318 3320 def reverse(self):
3319 3321 self._ascending = not self._ascending
3320 3322
3321 3323 def isascending(self):
3322 3324 return self._ascending
3323 3325
3324 3326 def isdescending(self):
3325 3327 return not self._ascending
3326 3328
3327 3329 def first(self):
3328 3330 if self._ascending:
3329 3331 it = self.fastasc
3330 3332 else:
3331 3333 it = self.fastdesc
3332 3334 if it is None:
3333 3335 # we need to consume all and try again
3334 3336 for x in self._consumegen():
3335 3337 pass
3336 3338 return self.first()
3337 3339 return next(it(), None)
3338 3340
3339 3341 def last(self):
3340 3342 if self._ascending:
3341 3343 it = self.fastdesc
3342 3344 else:
3343 3345 it = self.fastasc
3344 3346 if it is None:
3345 3347 # we need to consume all and try again
3346 3348 for x in self._consumegen():
3347 3349 pass
3348 3350 return self.first()
3349 3351 return next(it(), None)
3350 3352
3351 3353 def __repr__(self):
3352 3354 d = {False: '-', True: '+'}[self._ascending]
3353 3355 return '<%s%s>' % (type(self).__name__, d)
3354 3356
3355 3357 class spanset(abstractsmartset):
3356 3358 """Duck type for baseset class which represents a range of revisions and
3357 3359 can work lazily and without having all the range in memory
3358 3360
3359 3361 Note that spanset(x, y) behave almost like xrange(x, y) except for two
3360 3362 notable points:
3361 3363 - when x < y it will be automatically descending,
3362 3364 - revision filtered with this repoview will be skipped.
3363 3365
3364 3366 """
3365 3367 def __init__(self, repo, start=0, end=None):
3366 3368 """
3367 3369 start: first revision included the set
3368 3370 (default to 0)
3369 3371 end: first revision excluded (last+1)
3370 3372 (default to len(repo)
3371 3373
3372 3374 Spanset will be descending if `end` < `start`.
3373 3375 """
3374 3376 if end is None:
3375 3377 end = len(repo)
3376 3378 self._ascending = start <= end
3377 3379 if not self._ascending:
3378 3380 start, end = end + 1, start +1
3379 3381 self._start = start
3380 3382 self._end = end
3381 3383 self._hiddenrevs = repo.changelog.filteredrevs
3382 3384
3383 3385 def sort(self, reverse=False):
3384 3386 self._ascending = not reverse
3385 3387
3386 3388 def reverse(self):
3387 3389 self._ascending = not self._ascending
3388 3390
3389 3391 def _iterfilter(self, iterrange):
3390 3392 s = self._hiddenrevs
3391 3393 for r in iterrange:
3392 3394 if r not in s:
3393 3395 yield r
3394 3396
3395 3397 def __iter__(self):
3396 3398 if self._ascending:
3397 3399 return self.fastasc()
3398 3400 else:
3399 3401 return self.fastdesc()
3400 3402
3401 3403 def fastasc(self):
3402 3404 iterrange = xrange(self._start, self._end)
3403 3405 if self._hiddenrevs:
3404 3406 return self._iterfilter(iterrange)
3405 3407 return iter(iterrange)
3406 3408
3407 3409 def fastdesc(self):
3408 3410 iterrange = xrange(self._end - 1, self._start - 1, -1)
3409 3411 if self._hiddenrevs:
3410 3412 return self._iterfilter(iterrange)
3411 3413 return iter(iterrange)
3412 3414
3413 3415 def __contains__(self, rev):
3414 3416 hidden = self._hiddenrevs
3415 3417 return ((self._start <= rev < self._end)
3416 3418 and not (hidden and rev in hidden))
3417 3419
3418 3420 def __nonzero__(self):
3419 3421 for r in self:
3420 3422 return True
3421 3423 return False
3422 3424
3423 3425 def __len__(self):
3424 3426 if not self._hiddenrevs:
3425 3427 return abs(self._end - self._start)
3426 3428 else:
3427 3429 count = 0
3428 3430 start = self._start
3429 3431 end = self._end
3430 3432 for rev in self._hiddenrevs:
3431 3433 if (end < rev <= start) or (start <= rev < end):
3432 3434 count += 1
3433 3435 return abs(self._end - self._start) - count
3434 3436
3435 3437 def isascending(self):
3436 3438 return self._ascending
3437 3439
3438 3440 def isdescending(self):
3439 3441 return not self._ascending
3440 3442
3441 3443 def first(self):
3442 3444 if self._ascending:
3443 3445 it = self.fastasc
3444 3446 else:
3445 3447 it = self.fastdesc
3446 3448 for x in it():
3447 3449 return x
3448 3450 return None
3449 3451
3450 3452 def last(self):
3451 3453 if self._ascending:
3452 3454 it = self.fastdesc
3453 3455 else:
3454 3456 it = self.fastasc
3455 3457 for x in it():
3456 3458 return x
3457 3459 return None
3458 3460
3459 3461 def __repr__(self):
3460 3462 d = {False: '-', True: '+'}[self._ascending]
3461 3463 return '<%s%s %d:%d>' % (type(self).__name__, d,
3462 3464 self._start, self._end - 1)
3463 3465
3464 3466 class fullreposet(spanset):
3465 3467 """a set containing all revisions in the repo
3466 3468
3467 3469 This class exists to host special optimization and magic to handle virtual
3468 3470 revisions such as "null".
3469 3471 """
3470 3472
3471 3473 def __init__(self, repo):
3472 3474 super(fullreposet, self).__init__(repo)
3473 3475
3474 3476 def __and__(self, other):
3475 3477 """As self contains the whole repo, all of the other set should also be
3476 3478 in self. Therefore `self & other = other`.
3477 3479
3478 3480 This boldly assumes the other contains valid revs only.
3479 3481 """
3480 3482 # other not a smartset, make is so
3481 3483 if not util.safehasattr(other, 'isascending'):
3482 3484 # filter out hidden revision
3483 3485 # (this boldly assumes all smartset are pure)
3484 3486 #
3485 3487 # `other` was used with "&", let's assume this is a set like
3486 3488 # object.
3487 3489 other = baseset(other - self._hiddenrevs)
3488 3490
3489 3491 other.sort(reverse=self.isdescending())
3490 3492 return other
3491 3493
3492 3494 def prettyformatset(revs):
3493 3495 lines = []
3494 3496 rs = repr(revs)
3495 3497 p = 0
3496 3498 while p < len(rs):
3497 3499 q = rs.find('<', p + 1)
3498 3500 if q < 0:
3499 3501 q = len(rs)
3500 3502 l = rs.count('<', 0, p) - rs.count('>', 0, p)
3501 3503 assert l >= 0
3502 3504 lines.append((l, rs[p:q].rstrip()))
3503 3505 p = q
3504 3506 return '\n'.join(' ' * l + s for l, s in lines)
3505 3507
3506 3508 # tell hggettext to extract docstrings from these functions:
3507 3509 i18nfunctions = symbols.values()
General Comments 0
You need to be logged in to leave comments. Login now