##// END OF EJS Templates
revset: parse alias declaration strictly by _parsealiasdecl...
FUJIWARA Katsunori -
r23846:aac4a1a7 default
parent child Browse files
Show More
@@ -1,3288 +1,3282 b''
1 1 # revset.py - revision set queries for mercurial
2 2 #
3 3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 import re
9 9 import parser, util, error, discovery, hbisect, phases
10 10 import node
11 11 import heapq
12 12 import match as matchmod
13 13 from i18n import _
14 14 import encoding
15 15 import obsolete as obsmod
16 16 import pathutil
17 17 import repoview
18 18
19 19 def _revancestors(repo, revs, followfirst):
20 20 """Like revlog.ancestors(), but supports followfirst."""
21 21 cut = followfirst and 1 or None
22 22 cl = repo.changelog
23 23
24 24 def iterate():
25 25 revqueue, revsnode = None, None
26 26 h = []
27 27
28 28 revs.sort(reverse=True)
29 29 revqueue = util.deque(revs)
30 30 if revqueue:
31 31 revsnode = revqueue.popleft()
32 32 heapq.heappush(h, -revsnode)
33 33
34 34 seen = set([node.nullrev])
35 35 while h:
36 36 current = -heapq.heappop(h)
37 37 if current not in seen:
38 38 if revsnode and current == revsnode:
39 39 if revqueue:
40 40 revsnode = revqueue.popleft()
41 41 heapq.heappush(h, -revsnode)
42 42 seen.add(current)
43 43 yield current
44 44 for parent in cl.parentrevs(current)[:cut]:
45 45 if parent != node.nullrev:
46 46 heapq.heappush(h, -parent)
47 47
48 48 return generatorset(iterate(), iterasc=False)
49 49
50 50 def _revdescendants(repo, revs, followfirst):
51 51 """Like revlog.descendants() but supports followfirst."""
52 52 cut = followfirst and 1 or None
53 53
54 54 def iterate():
55 55 cl = repo.changelog
56 56 first = min(revs)
57 57 nullrev = node.nullrev
58 58 if first == nullrev:
59 59 # Are there nodes with a null first parent and a non-null
60 60 # second one? Maybe. Do we care? Probably not.
61 61 for i in cl:
62 62 yield i
63 63 else:
64 64 seen = set(revs)
65 65 for i in cl.revs(first + 1):
66 66 for x in cl.parentrevs(i)[:cut]:
67 67 if x != nullrev and x in seen:
68 68 seen.add(i)
69 69 yield i
70 70 break
71 71
72 72 return generatorset(iterate(), iterasc=True)
73 73
74 74 def _revsbetween(repo, roots, heads):
75 75 """Return all paths between roots and heads, inclusive of both endpoint
76 76 sets."""
77 77 if not roots:
78 78 return baseset()
79 79 parentrevs = repo.changelog.parentrevs
80 80 visit = list(heads)
81 81 reachable = set()
82 82 seen = {}
83 83 minroot = min(roots)
84 84 roots = set(roots)
85 85 # open-code the post-order traversal due to the tiny size of
86 86 # sys.getrecursionlimit()
87 87 while visit:
88 88 rev = visit.pop()
89 89 if rev in roots:
90 90 reachable.add(rev)
91 91 parents = parentrevs(rev)
92 92 seen[rev] = parents
93 93 for parent in parents:
94 94 if parent >= minroot and parent not in seen:
95 95 visit.append(parent)
96 96 if not reachable:
97 97 return baseset()
98 98 for rev in sorted(seen):
99 99 for parent in seen[rev]:
100 100 if parent in reachable:
101 101 reachable.add(rev)
102 102 return baseset(sorted(reachable))
103 103
104 104 elements = {
105 105 "(": (21, ("group", 1, ")"), ("func", 1, ")")),
106 106 "##": (20, None, ("_concat", 20)),
107 107 "~": (18, None, ("ancestor", 18)),
108 108 "^": (18, None, ("parent", 18), ("parentpost", 18)),
109 109 "-": (5, ("negate", 19), ("minus", 5)),
110 110 "::": (17, ("dagrangepre", 17), ("dagrange", 17),
111 111 ("dagrangepost", 17)),
112 112 "..": (17, ("dagrangepre", 17), ("dagrange", 17),
113 113 ("dagrangepost", 17)),
114 114 ":": (15, ("rangepre", 15), ("range", 15), ("rangepost", 15)),
115 115 "not": (10, ("not", 10)),
116 116 "!": (10, ("not", 10)),
117 117 "and": (5, None, ("and", 5)),
118 118 "&": (5, None, ("and", 5)),
119 119 "%": (5, None, ("only", 5), ("onlypost", 5)),
120 120 "or": (4, None, ("or", 4)),
121 121 "|": (4, None, ("or", 4)),
122 122 "+": (4, None, ("or", 4)),
123 123 ",": (2, None, ("list", 2)),
124 124 ")": (0, None, None),
125 125 "symbol": (0, ("symbol",), None),
126 126 "string": (0, ("string",), None),
127 127 "end": (0, None, None),
128 128 }
129 129
130 130 keywords = set(['and', 'or', 'not'])
131 131
132 132 # default set of valid characters for the initial letter of symbols
133 133 _syminitletters = set(c for c in [chr(i) for i in xrange(256)]
134 134 if c.isalnum() or c in '._@' or ord(c) > 127)
135 135
136 136 # default set of valid characters for non-initial letters of symbols
137 137 _symletters = set(c for c in [chr(i) for i in xrange(256)]
138 138 if c.isalnum() or c in '-._/@' or ord(c) > 127)
139 139
140 140 def tokenize(program, lookup=None, syminitletters=None, symletters=None):
141 141 '''
142 142 Parse a revset statement into a stream of tokens
143 143
144 144 ``syminitletters`` is the set of valid characters for the initial
145 145 letter of symbols.
146 146
147 147 By default, character ``c`` is recognized as valid for initial
148 148 letter of symbols, if ``c.isalnum() or c in '._@' or ord(c) > 127``.
149 149
150 150 ``symletters`` is the set of valid characters for non-initial
151 151 letters of symbols.
152 152
153 153 By default, character ``c`` is recognized as valid for non-initial
154 154 letters of symbols, if ``c.isalnum() or c in '-._/@' or ord(c) > 127``.
155 155
156 156 Check that @ is a valid unquoted token character (issue3686):
157 157 >>> list(tokenize("@::"))
158 158 [('symbol', '@', 0), ('::', None, 1), ('end', None, 3)]
159 159
160 160 '''
161 161 if syminitletters is None:
162 162 syminitletters = _syminitletters
163 163 if symletters is None:
164 164 symletters = _symletters
165 165
166 166 pos, l = 0, len(program)
167 167 while pos < l:
168 168 c = program[pos]
169 169 if c.isspace(): # skip inter-token whitespace
170 170 pass
171 171 elif c == ':' and program[pos:pos + 2] == '::': # look ahead carefully
172 172 yield ('::', None, pos)
173 173 pos += 1 # skip ahead
174 174 elif c == '.' and program[pos:pos + 2] == '..': # look ahead carefully
175 175 yield ('..', None, pos)
176 176 pos += 1 # skip ahead
177 177 elif c == '#' and program[pos:pos + 2] == '##': # look ahead carefully
178 178 yield ('##', None, pos)
179 179 pos += 1 # skip ahead
180 180 elif c in "():,-|&+!~^%": # handle simple operators
181 181 yield (c, None, pos)
182 182 elif (c in '"\'' or c == 'r' and
183 183 program[pos:pos + 2] in ("r'", 'r"')): # handle quoted strings
184 184 if c == 'r':
185 185 pos += 1
186 186 c = program[pos]
187 187 decode = lambda x: x
188 188 else:
189 189 decode = lambda x: x.decode('string-escape')
190 190 pos += 1
191 191 s = pos
192 192 while pos < l: # find closing quote
193 193 d = program[pos]
194 194 if d == '\\': # skip over escaped characters
195 195 pos += 2
196 196 continue
197 197 if d == c:
198 198 yield ('string', decode(program[s:pos]), s)
199 199 break
200 200 pos += 1
201 201 else:
202 202 raise error.ParseError(_("unterminated string"), s)
203 203 # gather up a symbol/keyword
204 204 elif c in syminitletters:
205 205 s = pos
206 206 pos += 1
207 207 while pos < l: # find end of symbol
208 208 d = program[pos]
209 209 if d not in symletters:
210 210 break
211 211 if d == '.' and program[pos - 1] == '.': # special case for ..
212 212 pos -= 1
213 213 break
214 214 pos += 1
215 215 sym = program[s:pos]
216 216 if sym in keywords: # operator keywords
217 217 yield (sym, None, s)
218 218 elif '-' in sym:
219 219 # some jerk gave us foo-bar-baz, try to check if it's a symbol
220 220 if lookup and lookup(sym):
221 221 # looks like a real symbol
222 222 yield ('symbol', sym, s)
223 223 else:
224 224 # looks like an expression
225 225 parts = sym.split('-')
226 226 for p in parts[:-1]:
227 227 if p: # possible consecutive -
228 228 yield ('symbol', p, s)
229 229 s += len(p)
230 230 yield ('-', None, pos)
231 231 s += 1
232 232 if parts[-1]: # possible trailing -
233 233 yield ('symbol', parts[-1], s)
234 234 else:
235 235 yield ('symbol', sym, s)
236 236 pos -= 1
237 237 else:
238 238 raise error.ParseError(_("syntax error"), pos)
239 239 pos += 1
240 240 yield ('end', None, pos)
241 241
242 242 def parseerrordetail(inst):
243 243 """Compose error message from specified ParseError object
244 244 """
245 245 if len(inst.args) > 1:
246 246 return _('at %s: %s') % (inst.args[1], inst.args[0])
247 247 else:
248 248 return inst.args[0]
249 249
250 250 # helpers
251 251
252 252 def getstring(x, err):
253 253 if x and (x[0] == 'string' or x[0] == 'symbol'):
254 254 return x[1]
255 255 raise error.ParseError(err)
256 256
257 257 def getlist(x):
258 258 if not x:
259 259 return []
260 260 if x[0] == 'list':
261 261 return getlist(x[1]) + [x[2]]
262 262 return [x]
263 263
264 264 def getargs(x, min, max, err):
265 265 l = getlist(x)
266 266 if len(l) < min or (max >= 0 and len(l) > max):
267 267 raise error.ParseError(err)
268 268 return l
269 269
270 270 def isvalidsymbol(tree):
271 271 """Examine whether specified ``tree`` is valid ``symbol`` or not
272 272 """
273 273 return tree[0] == 'symbol' and len(tree) > 1
274 274
275 275 def getsymbol(tree):
276 276 """Get symbol name from valid ``symbol`` in ``tree``
277 277
278 278 This assumes that ``tree`` is already examined by ``isvalidsymbol``.
279 279 """
280 280 return tree[1]
281 281
282 282 def isvalidfunc(tree):
283 283 """Examine whether specified ``tree`` is valid ``func`` or not
284 284 """
285 285 return tree[0] == 'func' and len(tree) > 1 and isvalidsymbol(tree[1])
286 286
287 287 def getfuncname(tree):
288 288 """Get function name from valid ``func`` in ``tree``
289 289
290 290 This assumes that ``tree`` is already examined by ``isvalidfunc``.
291 291 """
292 292 return getsymbol(tree[1])
293 293
294 294 def getfuncargs(tree):
295 295 """Get list of function arguments from valid ``func`` in ``tree``
296 296
297 297 This assumes that ``tree`` is already examined by ``isvalidfunc``.
298 298 """
299 299 if len(tree) > 2:
300 300 return getlist(tree[2])
301 301 else:
302 302 return []
303 303
304 304 def getset(repo, subset, x):
305 305 if not x:
306 306 raise error.ParseError(_("missing argument"))
307 307 s = methods[x[0]](repo, subset, *x[1:])
308 308 if util.safehasattr(s, 'isascending'):
309 309 return s
310 310 return baseset(s)
311 311
312 312 def _getrevsource(repo, r):
313 313 extra = repo[r].extra()
314 314 for label in ('source', 'transplant_source', 'rebase_source'):
315 315 if label in extra:
316 316 try:
317 317 return repo[extra[label]].rev()
318 318 except error.RepoLookupError:
319 319 pass
320 320 return None
321 321
322 322 # operator methods
323 323
324 324 def stringset(repo, subset, x):
325 325 x = repo[x].rev()
326 326 if x == -1 and len(subset) == len(repo):
327 327 return baseset([-1])
328 328 if x in subset:
329 329 return baseset([x])
330 330 return baseset()
331 331
332 332 def symbolset(repo, subset, x):
333 333 if x in symbols:
334 334 raise error.ParseError(_("can't use %s here") % x)
335 335 return stringset(repo, subset, x)
336 336
337 337 def rangeset(repo, subset, x, y):
338 338 m = getset(repo, fullreposet(repo), x)
339 339 n = getset(repo, fullreposet(repo), y)
340 340
341 341 if not m or not n:
342 342 return baseset()
343 343 m, n = m.first(), n.last()
344 344
345 345 if m < n:
346 346 r = spanset(repo, m, n + 1)
347 347 else:
348 348 r = spanset(repo, m, n - 1)
349 349 return r & subset
350 350
351 351 def dagrange(repo, subset, x, y):
352 352 r = spanset(repo)
353 353 xs = _revsbetween(repo, getset(repo, r, x), getset(repo, r, y))
354 354 return xs & subset
355 355
356 356 def andset(repo, subset, x, y):
357 357 return getset(repo, getset(repo, subset, x), y)
358 358
359 359 def orset(repo, subset, x, y):
360 360 xl = getset(repo, subset, x)
361 361 yl = getset(repo, subset - xl, y)
362 362 return xl + yl
363 363
364 364 def notset(repo, subset, x):
365 365 return subset - getset(repo, subset, x)
366 366
367 367 def listset(repo, subset, a, b):
368 368 raise error.ParseError(_("can't use a list in this context"))
369 369
370 370 def func(repo, subset, a, b):
371 371 if a[0] == 'symbol' and a[1] in symbols:
372 372 return symbols[a[1]](repo, subset, b)
373 373 raise error.ParseError(_("not a function: %s") % a[1])
374 374
375 375 # functions
376 376
377 377 def adds(repo, subset, x):
378 378 """``adds(pattern)``
379 379 Changesets that add a file matching pattern.
380 380
381 381 The pattern without explicit kind like ``glob:`` is expected to be
382 382 relative to the current directory and match against a file or a
383 383 directory.
384 384 """
385 385 # i18n: "adds" is a keyword
386 386 pat = getstring(x, _("adds requires a pattern"))
387 387 return checkstatus(repo, subset, pat, 1)
388 388
389 389 def ancestor(repo, subset, x):
390 390 """``ancestor(*changeset)``
391 391 A greatest common ancestor of the changesets.
392 392
393 393 Accepts 0 or more changesets.
394 394 Will return empty list when passed no args.
395 395 Greatest common ancestor of a single changeset is that changeset.
396 396 """
397 397 # i18n: "ancestor" is a keyword
398 398 l = getlist(x)
399 399 rl = spanset(repo)
400 400 anc = None
401 401
402 402 # (getset(repo, rl, i) for i in l) generates a list of lists
403 403 for revs in (getset(repo, rl, i) for i in l):
404 404 for r in revs:
405 405 if anc is None:
406 406 anc = repo[r]
407 407 else:
408 408 anc = anc.ancestor(repo[r])
409 409
410 410 if anc is not None and anc.rev() in subset:
411 411 return baseset([anc.rev()])
412 412 return baseset()
413 413
414 414 def _ancestors(repo, subset, x, followfirst=False):
415 415 heads = getset(repo, spanset(repo), x)
416 416 if not heads:
417 417 return baseset()
418 418 s = _revancestors(repo, heads, followfirst)
419 419 return subset & s
420 420
421 421 def ancestors(repo, subset, x):
422 422 """``ancestors(set)``
423 423 Changesets that are ancestors of a changeset in set.
424 424 """
425 425 return _ancestors(repo, subset, x)
426 426
427 427 def _firstancestors(repo, subset, x):
428 428 # ``_firstancestors(set)``
429 429 # Like ``ancestors(set)`` but follows only the first parents.
430 430 return _ancestors(repo, subset, x, followfirst=True)
431 431
432 432 def ancestorspec(repo, subset, x, n):
433 433 """``set~n``
434 434 Changesets that are the Nth ancestor (first parents only) of a changeset
435 435 in set.
436 436 """
437 437 try:
438 438 n = int(n[1])
439 439 except (TypeError, ValueError):
440 440 raise error.ParseError(_("~ expects a number"))
441 441 ps = set()
442 442 cl = repo.changelog
443 443 for r in getset(repo, fullreposet(repo), x):
444 444 for i in range(n):
445 445 r = cl.parentrevs(r)[0]
446 446 ps.add(r)
447 447 return subset & ps
448 448
449 449 def author(repo, subset, x):
450 450 """``author(string)``
451 451 Alias for ``user(string)``.
452 452 """
453 453 # i18n: "author" is a keyword
454 454 n = encoding.lower(getstring(x, _("author requires a string")))
455 455 kind, pattern, matcher = _substringmatcher(n)
456 456 return subset.filter(lambda x: matcher(encoding.lower(repo[x].user())))
457 457
458 458 def bisect(repo, subset, x):
459 459 """``bisect(string)``
460 460 Changesets marked in the specified bisect status:
461 461
462 462 - ``good``, ``bad``, ``skip``: csets explicitly marked as good/bad/skip
463 463 - ``goods``, ``bads`` : csets topologically good/bad
464 464 - ``range`` : csets taking part in the bisection
465 465 - ``pruned`` : csets that are goods, bads or skipped
466 466 - ``untested`` : csets whose fate is yet unknown
467 467 - ``ignored`` : csets ignored due to DAG topology
468 468 - ``current`` : the cset currently being bisected
469 469 """
470 470 # i18n: "bisect" is a keyword
471 471 status = getstring(x, _("bisect requires a string")).lower()
472 472 state = set(hbisect.get(repo, status))
473 473 return subset & state
474 474
475 475 # Backward-compatibility
476 476 # - no help entry so that we do not advertise it any more
477 477 def bisected(repo, subset, x):
478 478 return bisect(repo, subset, x)
479 479
480 480 def bookmark(repo, subset, x):
481 481 """``bookmark([name])``
482 482 The named bookmark or all bookmarks.
483 483
484 484 If `name` starts with `re:`, the remainder of the name is treated as
485 485 a regular expression. To match a bookmark that actually starts with `re:`,
486 486 use the prefix `literal:`.
487 487 """
488 488 # i18n: "bookmark" is a keyword
489 489 args = getargs(x, 0, 1, _('bookmark takes one or no arguments'))
490 490 if args:
491 491 bm = getstring(args[0],
492 492 # i18n: "bookmark" is a keyword
493 493 _('the argument to bookmark must be a string'))
494 494 kind, pattern, matcher = _stringmatcher(bm)
495 495 bms = set()
496 496 if kind == 'literal':
497 497 bmrev = repo._bookmarks.get(pattern, None)
498 498 if not bmrev:
499 499 raise util.Abort(_("bookmark '%s' does not exist") % bm)
500 500 bms.add(repo[bmrev].rev())
501 501 else:
502 502 matchrevs = set()
503 503 for name, bmrev in repo._bookmarks.iteritems():
504 504 if matcher(name):
505 505 matchrevs.add(bmrev)
506 506 if not matchrevs:
507 507 raise util.Abort(_("no bookmarks exist that match '%s'")
508 508 % pattern)
509 509 for bmrev in matchrevs:
510 510 bms.add(repo[bmrev].rev())
511 511 else:
512 512 bms = set([repo[r].rev()
513 513 for r in repo._bookmarks.values()])
514 514 bms -= set([node.nullrev])
515 515 return subset & bms
516 516
517 517 def branch(repo, subset, x):
518 518 """``branch(string or set)``
519 519 All changesets belonging to the given branch or the branches of the given
520 520 changesets.
521 521
522 522 If `string` starts with `re:`, the remainder of the name is treated as
523 523 a regular expression. To match a branch that actually starts with `re:`,
524 524 use the prefix `literal:`.
525 525 """
526 526 import branchmap
527 527 urepo = repo.unfiltered()
528 528 ucl = urepo.changelog
529 529 getbi = branchmap.revbranchcache(urepo).branchinfo
530 530
531 531 try:
532 532 b = getstring(x, '')
533 533 except error.ParseError:
534 534 # not a string, but another revspec, e.g. tip()
535 535 pass
536 536 else:
537 537 kind, pattern, matcher = _stringmatcher(b)
538 538 if kind == 'literal':
539 539 # note: falls through to the revspec case if no branch with
540 540 # this name exists
541 541 if pattern in repo.branchmap():
542 542 return subset.filter(lambda r: matcher(getbi(ucl, r)[0]))
543 543 else:
544 544 return subset.filter(lambda r: matcher(getbi(ucl, r)[0]))
545 545
546 546 s = getset(repo, spanset(repo), x)
547 547 b = set()
548 548 for r in s:
549 549 b.add(getbi(ucl, r)[0])
550 550 c = s.__contains__
551 551 return subset.filter(lambda r: c(r) or getbi(ucl, r)[0] in b)
552 552
553 553 def bumped(repo, subset, x):
554 554 """``bumped()``
555 555 Mutable changesets marked as successors of public changesets.
556 556
557 557 Only non-public and non-obsolete changesets can be `bumped`.
558 558 """
559 559 # i18n: "bumped" is a keyword
560 560 getargs(x, 0, 0, _("bumped takes no arguments"))
561 561 bumped = obsmod.getrevs(repo, 'bumped')
562 562 return subset & bumped
563 563
564 564 def bundle(repo, subset, x):
565 565 """``bundle()``
566 566 Changesets in the bundle.
567 567
568 568 Bundle must be specified by the -R option."""
569 569
570 570 try:
571 571 bundlerevs = repo.changelog.bundlerevs
572 572 except AttributeError:
573 573 raise util.Abort(_("no bundle provided - specify with -R"))
574 574 return subset & bundlerevs
575 575
576 576 def checkstatus(repo, subset, pat, field):
577 577 hasset = matchmod.patkind(pat) == 'set'
578 578
579 579 mcache = [None]
580 580 def matches(x):
581 581 c = repo[x]
582 582 if not mcache[0] or hasset:
583 583 mcache[0] = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
584 584 m = mcache[0]
585 585 fname = None
586 586 if not m.anypats() and len(m.files()) == 1:
587 587 fname = m.files()[0]
588 588 if fname is not None:
589 589 if fname not in c.files():
590 590 return False
591 591 else:
592 592 for f in c.files():
593 593 if m(f):
594 594 break
595 595 else:
596 596 return False
597 597 files = repo.status(c.p1().node(), c.node())[field]
598 598 if fname is not None:
599 599 if fname in files:
600 600 return True
601 601 else:
602 602 for f in files:
603 603 if m(f):
604 604 return True
605 605
606 606 return subset.filter(matches)
607 607
608 608 def _children(repo, narrow, parentset):
609 609 cs = set()
610 610 if not parentset:
611 611 return baseset(cs)
612 612 pr = repo.changelog.parentrevs
613 613 minrev = min(parentset)
614 614 for r in narrow:
615 615 if r <= minrev:
616 616 continue
617 617 for p in pr(r):
618 618 if p in parentset:
619 619 cs.add(r)
620 620 return baseset(cs)
621 621
622 622 def children(repo, subset, x):
623 623 """``children(set)``
624 624 Child changesets of changesets in set.
625 625 """
626 626 s = getset(repo, fullreposet(repo), x)
627 627 cs = _children(repo, subset, s)
628 628 return subset & cs
629 629
630 630 def closed(repo, subset, x):
631 631 """``closed()``
632 632 Changeset is closed.
633 633 """
634 634 # i18n: "closed" is a keyword
635 635 getargs(x, 0, 0, _("closed takes no arguments"))
636 636 return subset.filter(lambda r: repo[r].closesbranch())
637 637
638 638 def contains(repo, subset, x):
639 639 """``contains(pattern)``
640 640 The revision's manifest contains a file matching pattern (but might not
641 641 modify it). See :hg:`help patterns` for information about file patterns.
642 642
643 643 The pattern without explicit kind like ``glob:`` is expected to be
644 644 relative to the current directory and match against a file exactly
645 645 for efficiency.
646 646 """
647 647 # i18n: "contains" is a keyword
648 648 pat = getstring(x, _("contains requires a pattern"))
649 649
650 650 def matches(x):
651 651 if not matchmod.patkind(pat):
652 652 pats = pathutil.canonpath(repo.root, repo.getcwd(), pat)
653 653 if pats in repo[x]:
654 654 return True
655 655 else:
656 656 c = repo[x]
657 657 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
658 658 for f in c.manifest():
659 659 if m(f):
660 660 return True
661 661 return False
662 662
663 663 return subset.filter(matches)
664 664
665 665 def converted(repo, subset, x):
666 666 """``converted([id])``
667 667 Changesets converted from the given identifier in the old repository if
668 668 present, or all converted changesets if no identifier is specified.
669 669 """
670 670
671 671 # There is exactly no chance of resolving the revision, so do a simple
672 672 # string compare and hope for the best
673 673
674 674 rev = None
675 675 # i18n: "converted" is a keyword
676 676 l = getargs(x, 0, 1, _('converted takes one or no arguments'))
677 677 if l:
678 678 # i18n: "converted" is a keyword
679 679 rev = getstring(l[0], _('converted requires a revision'))
680 680
681 681 def _matchvalue(r):
682 682 source = repo[r].extra().get('convert_revision', None)
683 683 return source is not None and (rev is None or source.startswith(rev))
684 684
685 685 return subset.filter(lambda r: _matchvalue(r))
686 686
687 687 def date(repo, subset, x):
688 688 """``date(interval)``
689 689 Changesets within the interval, see :hg:`help dates`.
690 690 """
691 691 # i18n: "date" is a keyword
692 692 ds = getstring(x, _("date requires a string"))
693 693 dm = util.matchdate(ds)
694 694 return subset.filter(lambda x: dm(repo[x].date()[0]))
695 695
696 696 def desc(repo, subset, x):
697 697 """``desc(string)``
698 698 Search commit message for string. The match is case-insensitive.
699 699 """
700 700 # i18n: "desc" is a keyword
701 701 ds = encoding.lower(getstring(x, _("desc requires a string")))
702 702
703 703 def matches(x):
704 704 c = repo[x]
705 705 return ds in encoding.lower(c.description())
706 706
707 707 return subset.filter(matches)
708 708
709 709 def _descendants(repo, subset, x, followfirst=False):
710 710 roots = getset(repo, spanset(repo), x)
711 711 if not roots:
712 712 return baseset()
713 713 s = _revdescendants(repo, roots, followfirst)
714 714
715 715 # Both sets need to be ascending in order to lazily return the union
716 716 # in the correct order.
717 717 base = subset & roots
718 718 desc = subset & s
719 719 result = base + desc
720 720 if subset.isascending():
721 721 result.sort()
722 722 elif subset.isdescending():
723 723 result.sort(reverse=True)
724 724 else:
725 725 result = subset & result
726 726 return result
727 727
728 728 def descendants(repo, subset, x):
729 729 """``descendants(set)``
730 730 Changesets which are descendants of changesets in set.
731 731 """
732 732 return _descendants(repo, subset, x)
733 733
734 734 def _firstdescendants(repo, subset, x):
735 735 # ``_firstdescendants(set)``
736 736 # Like ``descendants(set)`` but follows only the first parents.
737 737 return _descendants(repo, subset, x, followfirst=True)
738 738
739 739 def destination(repo, subset, x):
740 740 """``destination([set])``
741 741 Changesets that were created by a graft, transplant or rebase operation,
742 742 with the given revisions specified as the source. Omitting the optional set
743 743 is the same as passing all().
744 744 """
745 745 if x is not None:
746 746 sources = getset(repo, spanset(repo), x)
747 747 else:
748 748 sources = getall(repo, spanset(repo), x)
749 749
750 750 dests = set()
751 751
752 752 # subset contains all of the possible destinations that can be returned, so
753 753 # iterate over them and see if their source(s) were provided in the arg set.
754 754 # Even if the immediate src of r is not in the arg set, src's source (or
755 755 # further back) may be. Scanning back further than the immediate src allows
756 756 # transitive transplants and rebases to yield the same results as transitive
757 757 # grafts.
758 758 for r in subset:
759 759 src = _getrevsource(repo, r)
760 760 lineage = None
761 761
762 762 while src is not None:
763 763 if lineage is None:
764 764 lineage = list()
765 765
766 766 lineage.append(r)
767 767
768 768 # The visited lineage is a match if the current source is in the arg
769 769 # set. Since every candidate dest is visited by way of iterating
770 770 # subset, any dests further back in the lineage will be tested by a
771 771 # different iteration over subset. Likewise, if the src was already
772 772 # selected, the current lineage can be selected without going back
773 773 # further.
774 774 if src in sources or src in dests:
775 775 dests.update(lineage)
776 776 break
777 777
778 778 r = src
779 779 src = _getrevsource(repo, r)
780 780
781 781 return subset.filter(dests.__contains__)
782 782
783 783 def divergent(repo, subset, x):
784 784 """``divergent()``
785 785 Final successors of changesets with an alternative set of final successors.
786 786 """
787 787 # i18n: "divergent" is a keyword
788 788 getargs(x, 0, 0, _("divergent takes no arguments"))
789 789 divergent = obsmod.getrevs(repo, 'divergent')
790 790 return subset & divergent
791 791
792 792 def draft(repo, subset, x):
793 793 """``draft()``
794 794 Changeset in draft phase."""
795 795 # i18n: "draft" is a keyword
796 796 getargs(x, 0, 0, _("draft takes no arguments"))
797 797 phase = repo._phasecache.phase
798 798 target = phases.draft
799 799 condition = lambda r: phase(repo, r) == target
800 800 return subset.filter(condition, cache=False)
801 801
802 802 def extinct(repo, subset, x):
803 803 """``extinct()``
804 804 Obsolete changesets with obsolete descendants only.
805 805 """
806 806 # i18n: "extinct" is a keyword
807 807 getargs(x, 0, 0, _("extinct takes no arguments"))
808 808 extincts = obsmod.getrevs(repo, 'extinct')
809 809 return subset & extincts
810 810
811 811 def extra(repo, subset, x):
812 812 """``extra(label, [value])``
813 813 Changesets with the given label in the extra metadata, with the given
814 814 optional value.
815 815
816 816 If `value` starts with `re:`, the remainder of the value is treated as
817 817 a regular expression. To match a value that actually starts with `re:`,
818 818 use the prefix `literal:`.
819 819 """
820 820
821 821 # i18n: "extra" is a keyword
822 822 l = getargs(x, 1, 2, _('extra takes at least 1 and at most 2 arguments'))
823 823 # i18n: "extra" is a keyword
824 824 label = getstring(l[0], _('first argument to extra must be a string'))
825 825 value = None
826 826
827 827 if len(l) > 1:
828 828 # i18n: "extra" is a keyword
829 829 value = getstring(l[1], _('second argument to extra must be a string'))
830 830 kind, value, matcher = _stringmatcher(value)
831 831
832 832 def _matchvalue(r):
833 833 extra = repo[r].extra()
834 834 return label in extra and (value is None or matcher(extra[label]))
835 835
836 836 return subset.filter(lambda r: _matchvalue(r))
837 837
838 838 def filelog(repo, subset, x):
839 839 """``filelog(pattern)``
840 840 Changesets connected to the specified filelog.
841 841
842 842 For performance reasons, visits only revisions mentioned in the file-level
843 843 filelog, rather than filtering through all changesets (much faster, but
844 844 doesn't include deletes or duplicate changes). For a slower, more accurate
845 845 result, use ``file()``.
846 846
847 847 The pattern without explicit kind like ``glob:`` is expected to be
848 848 relative to the current directory and match against a file exactly
849 849 for efficiency.
850 850
851 851 If some linkrev points to revisions filtered by the current repoview, we'll
852 852 work around it to return a non-filtered value.
853 853 """
854 854
855 855 # i18n: "filelog" is a keyword
856 856 pat = getstring(x, _("filelog requires a pattern"))
857 857 s = set()
858 858 cl = repo.changelog
859 859
860 860 if not matchmod.patkind(pat):
861 861 f = pathutil.canonpath(repo.root, repo.getcwd(), pat)
862 862 files = [f]
863 863 else:
864 864 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=repo[None])
865 865 files = (f for f in repo[None] if m(f))
866 866
867 867 for f in files:
868 868 backrevref = {} # final value for: filerev -> changerev
869 869 lowestchild = {} # lowest known filerev child of a filerev
870 870 delayed = [] # filerev with filtered linkrev, for post-processing
871 871 lowesthead = None # cache for manifest content of all head revisions
872 872 fl = repo.file(f)
873 873 for fr in list(fl):
874 874 rev = fl.linkrev(fr)
875 875 if rev not in cl:
876 876 # changerev pointed in linkrev is filtered
877 877 # record it for post processing.
878 878 delayed.append((fr, rev))
879 879 continue
880 880 for p in fl.parentrevs(fr):
881 881 if 0 <= p and p not in lowestchild:
882 882 lowestchild[p] = fr
883 883 backrevref[fr] = rev
884 884 s.add(rev)
885 885
886 886 # Post-processing of all filerevs we skipped because they were
887 887 # filtered. If such filerevs have known and unfiltered children, this
888 888 # means they have an unfiltered appearance out there. We'll use linkrev
889 889 # adjustment to find one of these appearances. The lowest known child
890 890 # will be used as a starting point because it is the best upper-bound we
891 891 # have.
892 892 #
893 893 # This approach will fail when an unfiltered but linkrev-shadowed
894 894 # appearance exists in a head changeset without unfiltered filerev
895 895 # children anywhere.
896 896 while delayed:
897 897 # must be a descending iteration. To slowly fill lowest child
898 898 # information that is of potential use by the next item.
899 899 fr, rev = delayed.pop()
900 900 lkr = rev
901 901
902 902 child = lowestchild.get(fr)
903 903
904 904 if child is None:
905 905 # search for existence of this file revision in a head revision.
906 906 # There are three possibilities:
907 907 # - the revision exists in a head and we can find an
908 908 # introduction from there,
909 909 # - the revision does not exist in a head because it has been
910 910 # changed since its introduction: we would have found a child
911 911 # and be in the other 'else' clause,
912 912 # - all versions of the revision are hidden.
913 913 if lowesthead is None:
914 914 lowesthead = {}
915 915 for h in repo.heads():
916 916 fnode = repo[h].manifest().get(f)
917 917 if fnode is not None:
918 918 lowesthead[fl.rev(fnode)] = h
919 919 headrev = lowesthead.get(fr)
920 920 if headrev is None:
921 921 # content is nowhere unfiltered
922 922 continue
923 923 rev = repo[headrev][f].introrev()
924 924 else:
925 925 # the lowest known child is a good upper bound
926 926 childcrev = backrevref[child]
927 927 # XXX this does not guarantee returning the lowest
928 928 # introduction of this revision, but this gives a
929 929 # result which is a good start and will fit in most
930 930 # cases. We probably need to fix the multiple
931 931 # introductions case properly (report each
932 932 # introduction, even for identical file revisions)
933 933 # once and for all at some point anyway.
934 934 for p in repo[childcrev][f].parents():
935 935 if p.filerev() == fr:
936 936 rev = p.rev()
937 937 break
938 938 if rev == lkr: # no shadowed entry found
939 939 # XXX This should never happen unless some manifest points
940 940 # to biggish file revisions (like a revision that uses a
941 941 # parent that never appears in the manifest ancestors)
942 942 continue
943 943
944 944 # Fill the data for the next iteration.
945 945 for p in fl.parentrevs(fr):
946 946 if 0 <= p and p not in lowestchild:
947 947 lowestchild[p] = fr
948 948 backrevref[fr] = rev
949 949 s.add(rev)
950 950
951 951 return subset & s
952 952
953 953 def first(repo, subset, x):
954 954 """``first(set, [n])``
955 955 An alias for limit().
956 956 """
957 957 return limit(repo, subset, x)
958 958
959 959 def _follow(repo, subset, x, name, followfirst=False):
960 960 l = getargs(x, 0, 1, _("%s takes no arguments or a filename") % name)
961 961 c = repo['.']
962 962 if l:
963 963 x = getstring(l[0], _("%s expected a filename") % name)
964 964 if x in c:
965 965 cx = c[x]
966 966 s = set(ctx.rev() for ctx in cx.ancestors(followfirst=followfirst))
967 967 # include the revision responsible for the most recent version
968 968 s.add(cx.introrev())
969 969 else:
970 970 return baseset()
971 971 else:
972 972 s = _revancestors(repo, baseset([c.rev()]), followfirst)
973 973
974 974 return subset & s
975 975
976 976 def follow(repo, subset, x):
977 977 """``follow([file])``
978 978 An alias for ``::.`` (ancestors of the working copy's first parent).
979 979 If a filename is specified, the history of the given file is followed,
980 980 including copies.
981 981 """
982 982 return _follow(repo, subset, x, 'follow')
983 983
984 984 def _followfirst(repo, subset, x):
985 985 # ``followfirst([file])``
986 986 # Like ``follow([file])`` but follows only the first parent of
987 987 # every revision or file revision.
988 988 return _follow(repo, subset, x, '_followfirst', followfirst=True)
989 989
990 990 def getall(repo, subset, x):
991 991 """``all()``
992 992 All changesets, the same as ``0:tip``.
993 993 """
994 994 # i18n: "all" is a keyword
995 995 getargs(x, 0, 0, _("all takes no arguments"))
996 996 return subset
997 997
998 998 def grep(repo, subset, x):
999 999 """``grep(regex)``
1000 1000 Like ``keyword(string)`` but accepts a regex. Use ``grep(r'...')``
1001 1001 to ensure special escape characters are handled correctly. Unlike
1002 1002 ``keyword(string)``, the match is case-sensitive.
1003 1003 """
1004 1004 try:
1005 1005 # i18n: "grep" is a keyword
1006 1006 gr = re.compile(getstring(x, _("grep requires a string")))
1007 1007 except re.error, e:
1008 1008 raise error.ParseError(_('invalid match pattern: %s') % e)
1009 1009
1010 1010 def matches(x):
1011 1011 c = repo[x]
1012 1012 for e in c.files() + [c.user(), c.description()]:
1013 1013 if gr.search(e):
1014 1014 return True
1015 1015 return False
1016 1016
1017 1017 return subset.filter(matches)
1018 1018
1019 1019 def _matchfiles(repo, subset, x):
1020 1020 # _matchfiles takes a revset list of prefixed arguments:
1021 1021 #
1022 1022 # [p:foo, i:bar, x:baz]
1023 1023 #
1024 1024 # builds a match object from them and filters subset. Allowed
1025 1025 # prefixes are 'p:' for regular patterns, 'i:' for include
1026 1026 # patterns and 'x:' for exclude patterns. Use 'r:' prefix to pass
1027 1027 # a revision identifier, or the empty string to reference the
1028 1028 # working directory, from which the match object is
1029 1029 # initialized. Use 'd:' to set the default matching mode, default
1030 1030 # to 'glob'. At most one 'r:' and 'd:' argument can be passed.
1031 1031
1032 1032 # i18n: "_matchfiles" is a keyword
1033 1033 l = getargs(x, 1, -1, _("_matchfiles requires at least one argument"))
1034 1034 pats, inc, exc = [], [], []
1035 1035 rev, default = None, None
1036 1036 for arg in l:
1037 1037 # i18n: "_matchfiles" is a keyword
1038 1038 s = getstring(arg, _("_matchfiles requires string arguments"))
1039 1039 prefix, value = s[:2], s[2:]
1040 1040 if prefix == 'p:':
1041 1041 pats.append(value)
1042 1042 elif prefix == 'i:':
1043 1043 inc.append(value)
1044 1044 elif prefix == 'x:':
1045 1045 exc.append(value)
1046 1046 elif prefix == 'r:':
1047 1047 if rev is not None:
1048 1048 # i18n: "_matchfiles" is a keyword
1049 1049 raise error.ParseError(_('_matchfiles expected at most one '
1050 1050 'revision'))
1051 1051 rev = value
1052 1052 elif prefix == 'd:':
1053 1053 if default is not None:
1054 1054 # i18n: "_matchfiles" is a keyword
1055 1055 raise error.ParseError(_('_matchfiles expected at most one '
1056 1056 'default mode'))
1057 1057 default = value
1058 1058 else:
1059 1059 # i18n: "_matchfiles" is a keyword
1060 1060 raise error.ParseError(_('invalid _matchfiles prefix: %s') % prefix)
1061 1061 if not default:
1062 1062 default = 'glob'
1063 1063
1064 1064 m = matchmod.match(repo.root, repo.getcwd(), pats, include=inc,
1065 1065 exclude=exc, ctx=repo[rev], default=default)
1066 1066
1067 1067 def matches(x):
1068 1068 for f in repo[x].files():
1069 1069 if m(f):
1070 1070 return True
1071 1071 return False
1072 1072
1073 1073 return subset.filter(matches)
1074 1074
1075 1075 def hasfile(repo, subset, x):
1076 1076 """``file(pattern)``
1077 1077 Changesets affecting files matched by pattern.
1078 1078
1079 1079 For a faster but less accurate result, consider using ``filelog()``
1080 1080 instead.
1081 1081
1082 1082 This predicate uses ``glob:`` as the default kind of pattern.
1083 1083 """
1084 1084 # i18n: "file" is a keyword
1085 1085 pat = getstring(x, _("file requires a pattern"))
1086 1086 return _matchfiles(repo, subset, ('string', 'p:' + pat))
1087 1087
1088 1088 def head(repo, subset, x):
1089 1089 """``head()``
1090 1090 Changeset is a named branch head.
1091 1091 """
1092 1092 # i18n: "head" is a keyword
1093 1093 getargs(x, 0, 0, _("head takes no arguments"))
1094 1094 hs = set()
1095 1095 for b, ls in repo.branchmap().iteritems():
1096 1096 hs.update(repo[h].rev() for h in ls)
1097 1097 return baseset(hs).filter(subset.__contains__)
1098 1098
1099 1099 def heads(repo, subset, x):
1100 1100 """``heads(set)``
1101 1101 Members of set with no children in set.
1102 1102 """
1103 1103 s = getset(repo, subset, x)
1104 1104 ps = parents(repo, subset, x)
1105 1105 return s - ps
1106 1106
1107 1107 def hidden(repo, subset, x):
1108 1108 """``hidden()``
1109 1109 Hidden changesets.
1110 1110 """
1111 1111 # i18n: "hidden" is a keyword
1112 1112 getargs(x, 0, 0, _("hidden takes no arguments"))
1113 1113 hiddenrevs = repoview.filterrevs(repo, 'visible')
1114 1114 return subset & hiddenrevs
1115 1115
1116 1116 def keyword(repo, subset, x):
1117 1117 """``keyword(string)``
1118 1118 Search commit message, user name, and names of changed files for
1119 1119 string. The match is case-insensitive.
1120 1120 """
1121 1121 # i18n: "keyword" is a keyword
1122 1122 kw = encoding.lower(getstring(x, _("keyword requires a string")))
1123 1123
1124 1124 def matches(r):
1125 1125 c = repo[r]
1126 1126 return util.any(kw in encoding.lower(t) for t in c.files() + [c.user(),
1127 1127 c.description()])
1128 1128
1129 1129 return subset.filter(matches)
1130 1130
1131 1131 def limit(repo, subset, x):
1132 1132 """``limit(set, [n])``
1133 1133 First n members of set, defaulting to 1.
1134 1134 """
1135 1135 # i18n: "limit" is a keyword
1136 1136 l = getargs(x, 1, 2, _("limit requires one or two arguments"))
1137 1137 try:
1138 1138 lim = 1
1139 1139 if len(l) == 2:
1140 1140 # i18n: "limit" is a keyword
1141 1141 lim = int(getstring(l[1], _("limit requires a number")))
1142 1142 except (TypeError, ValueError):
1143 1143 # i18n: "limit" is a keyword
1144 1144 raise error.ParseError(_("limit expects a number"))
1145 1145 ss = subset
1146 1146 os = getset(repo, spanset(repo), l[0])
1147 1147 result = []
1148 1148 it = iter(os)
1149 1149 for x in xrange(lim):
1150 1150 try:
1151 1151 y = it.next()
1152 1152 if y in ss:
1153 1153 result.append(y)
1154 1154 except (StopIteration):
1155 1155 break
1156 1156 return baseset(result)
1157 1157
1158 1158 def last(repo, subset, x):
1159 1159 """``last(set, [n])``
1160 1160 Last n members of set, defaulting to 1.
1161 1161 """
1162 1162 # i18n: "last" is a keyword
1163 1163 l = getargs(x, 1, 2, _("last requires one or two arguments"))
1164 1164 try:
1165 1165 lim = 1
1166 1166 if len(l) == 2:
1167 1167 # i18n: "last" is a keyword
1168 1168 lim = int(getstring(l[1], _("last requires a number")))
1169 1169 except (TypeError, ValueError):
1170 1170 # i18n: "last" is a keyword
1171 1171 raise error.ParseError(_("last expects a number"))
1172 1172 ss = subset
1173 1173 os = getset(repo, spanset(repo), l[0])
1174 1174 os.reverse()
1175 1175 result = []
1176 1176 it = iter(os)
1177 1177 for x in xrange(lim):
1178 1178 try:
1179 1179 y = it.next()
1180 1180 if y in ss:
1181 1181 result.append(y)
1182 1182 except (StopIteration):
1183 1183 break
1184 1184 return baseset(result)
1185 1185
1186 1186 def maxrev(repo, subset, x):
1187 1187 """``max(set)``
1188 1188 Changeset with highest revision number in set.
1189 1189 """
1190 1190 os = getset(repo, spanset(repo), x)
1191 1191 if os:
1192 1192 m = os.max()
1193 1193 if m in subset:
1194 1194 return baseset([m])
1195 1195 return baseset()
1196 1196
1197 1197 def merge(repo, subset, x):
1198 1198 """``merge()``
1199 1199 Changeset is a merge changeset.
1200 1200 """
1201 1201 # i18n: "merge" is a keyword
1202 1202 getargs(x, 0, 0, _("merge takes no arguments"))
1203 1203 cl = repo.changelog
1204 1204 return subset.filter(lambda r: cl.parentrevs(r)[1] != -1)
1205 1205
1206 1206 def branchpoint(repo, subset, x):
1207 1207 """``branchpoint()``
1208 1208 Changesets with more than one child.
1209 1209 """
1210 1210 # i18n: "branchpoint" is a keyword
1211 1211 getargs(x, 0, 0, _("branchpoint takes no arguments"))
1212 1212 cl = repo.changelog
1213 1213 if not subset:
1214 1214 return baseset()
1215 1215 baserev = min(subset)
1216 1216 parentscount = [0]*(len(repo) - baserev)
1217 1217 for r in cl.revs(start=baserev + 1):
1218 1218 for p in cl.parentrevs(r):
1219 1219 if p >= baserev:
1220 1220 parentscount[p - baserev] += 1
1221 1221 return subset.filter(lambda r: parentscount[r - baserev] > 1)
1222 1222
1223 1223 def minrev(repo, subset, x):
1224 1224 """``min(set)``
1225 1225 Changeset with lowest revision number in set.
1226 1226 """
1227 1227 os = getset(repo, spanset(repo), x)
1228 1228 if os:
1229 1229 m = os.min()
1230 1230 if m in subset:
1231 1231 return baseset([m])
1232 1232 return baseset()
1233 1233
1234 1234 def modifies(repo, subset, x):
1235 1235 """``modifies(pattern)``
1236 1236 Changesets modifying files matched by pattern.
1237 1237
1238 1238 The pattern without explicit kind like ``glob:`` is expected to be
1239 1239 relative to the current directory and match against a file or a
1240 1240 directory.
1241 1241 """
1242 1242 # i18n: "modifies" is a keyword
1243 1243 pat = getstring(x, _("modifies requires a pattern"))
1244 1244 return checkstatus(repo, subset, pat, 0)
1245 1245
1246 1246 def named(repo, subset, x):
1247 1247 """``named(namespace)``
1248 1248 The changesets in a given namespace.
1249 1249
1250 1250 If `namespace` starts with `re:`, the remainder of the string is treated as
1251 1251 a regular expression. To match a namespace that actually starts with `re:`,
1252 1252 use the prefix `literal:`.
1253 1253 """
1254 1254 # i18n: "named" is a keyword
1255 1255 args = getargs(x, 1, 1, _('named requires a namespace argument'))
1256 1256
1257 1257 ns = getstring(args[0],
1258 1258 # i18n: "named" is a keyword
1259 1259 _('the argument to named must be a string'))
1260 1260 kind, pattern, matcher = _stringmatcher(ns)
1261 1261 namespaces = set()
1262 1262 if kind == 'literal':
1263 1263 if pattern not in repo.names:
1264 1264 raise util.Abort(_("namespace '%s' does not exist") % ns)
1265 1265 namespaces.add(repo.names[pattern])
1266 1266 else:
1267 1267 for name, ns in repo.names.iteritems():
1268 1268 if matcher(name):
1269 1269 namespaces.add(ns)
1270 1270 if not namespaces:
1271 1271 raise util.Abort(_("no namespace exists that match '%s'")
1272 1272 % pattern)
1273 1273
1274 1274 names = set()
1275 1275 for ns in namespaces:
1276 1276 for name in ns.listnames(repo):
1277 1277 names.update(ns.nodes(repo, name))
1278 1278
1279 1279 names -= set([node.nullrev])
1280 1280 return subset & names
1281 1281
1282 1282 def node_(repo, subset, x):
1283 1283 """``id(string)``
1284 1284 Revision non-ambiguously specified by the given hex string prefix.
1285 1285 """
1286 1286 # i18n: "id" is a keyword
1287 1287 l = getargs(x, 1, 1, _("id requires one argument"))
1288 1288 # i18n: "id" is a keyword
1289 1289 n = getstring(l[0], _("id requires a string"))
1290 1290 if len(n) == 40:
1291 1291 rn = repo[n].rev()
1292 1292 else:
1293 1293 rn = None
1294 1294 pm = repo.changelog._partialmatch(n)
1295 1295 if pm is not None:
1296 1296 rn = repo.changelog.rev(pm)
1297 1297
1298 1298 if rn is None:
1299 1299 return baseset()
1300 1300 result = baseset([rn])
1301 1301 return result & subset
1302 1302
1303 1303 def obsolete(repo, subset, x):
1304 1304 """``obsolete()``
1305 1305 Mutable changeset with a newer version."""
1306 1306 # i18n: "obsolete" is a keyword
1307 1307 getargs(x, 0, 0, _("obsolete takes no arguments"))
1308 1308 obsoletes = obsmod.getrevs(repo, 'obsolete')
1309 1309 return subset & obsoletes
1310 1310
1311 1311 def only(repo, subset, x):
1312 1312 """``only(set, [set])``
1313 1313 Changesets that are ancestors of the first set that are not ancestors
1314 1314 of any other head in the repo. If a second set is specified, the result
1315 1315 is ancestors of the first set that are not ancestors of the second set
1316 1316 (i.e. ::<set1> - ::<set2>).
1317 1317 """
1318 1318 cl = repo.changelog
1319 1319 # i18n: "only" is a keyword
1320 1320 args = getargs(x, 1, 2, _('only takes one or two arguments'))
1321 1321 include = getset(repo, spanset(repo), args[0])
1322 1322 if len(args) == 1:
1323 1323 if not include:
1324 1324 return baseset()
1325 1325
1326 1326 descendants = set(_revdescendants(repo, include, False))
1327 1327 exclude = [rev for rev in cl.headrevs()
1328 1328 if not rev in descendants and not rev in include]
1329 1329 else:
1330 1330 exclude = getset(repo, spanset(repo), args[1])
1331 1331
1332 1332 results = set(cl.findmissingrevs(common=exclude, heads=include))
1333 1333 return subset & results
1334 1334
1335 1335 def origin(repo, subset, x):
1336 1336 """``origin([set])``
1337 1337 Changesets that were specified as a source for the grafts, transplants or
1338 1338 rebases that created the given revisions. Omitting the optional set is the
1339 1339 same as passing all(). If a changeset created by these operations is itself
1340 1340 specified as a source for one of these operations, only the source changeset
1341 1341 for the first operation is selected.
1342 1342 """
1343 1343 if x is not None:
1344 1344 dests = getset(repo, spanset(repo), x)
1345 1345 else:
1346 1346 dests = getall(repo, spanset(repo), x)
1347 1347
1348 1348 def _firstsrc(rev):
1349 1349 src = _getrevsource(repo, rev)
1350 1350 if src is None:
1351 1351 return None
1352 1352
1353 1353 while True:
1354 1354 prev = _getrevsource(repo, src)
1355 1355
1356 1356 if prev is None:
1357 1357 return src
1358 1358 src = prev
1359 1359
1360 1360 o = set([_firstsrc(r) for r in dests])
1361 1361 o -= set([None])
1362 1362 return subset & o
1363 1363
1364 1364 def outgoing(repo, subset, x):
1365 1365 """``outgoing([path])``
1366 1366 Changesets not found in the specified destination repository, or the
1367 1367 default push location.
1368 1368 """
1369 1369 import hg # avoid start-up nasties
1370 1370 # i18n: "outgoing" is a keyword
1371 1371 l = getargs(x, 0, 1, _("outgoing takes one or no arguments"))
1372 1372 # i18n: "outgoing" is a keyword
1373 1373 dest = l and getstring(l[0], _("outgoing requires a repository path")) or ''
1374 1374 dest = repo.ui.expandpath(dest or 'default-push', dest or 'default')
1375 1375 dest, branches = hg.parseurl(dest)
1376 1376 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1377 1377 if revs:
1378 1378 revs = [repo.lookup(rev) for rev in revs]
1379 1379 other = hg.peer(repo, {}, dest)
1380 1380 repo.ui.pushbuffer()
1381 1381 outgoing = discovery.findcommonoutgoing(repo, other, onlyheads=revs)
1382 1382 repo.ui.popbuffer()
1383 1383 cl = repo.changelog
1384 1384 o = set([cl.rev(r) for r in outgoing.missing])
1385 1385 return subset & o
1386 1386
1387 1387 def p1(repo, subset, x):
1388 1388 """``p1([set])``
1389 1389 First parent of changesets in set, or the working directory.
1390 1390 """
1391 1391 if x is None:
1392 1392 p = repo[x].p1().rev()
1393 1393 if p >= 0:
1394 1394 return subset & baseset([p])
1395 1395 return baseset()
1396 1396
1397 1397 ps = set()
1398 1398 cl = repo.changelog
1399 1399 for r in getset(repo, spanset(repo), x):
1400 1400 ps.add(cl.parentrevs(r)[0])
1401 1401 ps -= set([node.nullrev])
1402 1402 return subset & ps
1403 1403
1404 1404 def p2(repo, subset, x):
1405 1405 """``p2([set])``
1406 1406 Second parent of changesets in set, or the working directory.
1407 1407 """
1408 1408 if x is None:
1409 1409 ps = repo[x].parents()
1410 1410 try:
1411 1411 p = ps[1].rev()
1412 1412 if p >= 0:
1413 1413 return subset & baseset([p])
1414 1414 return baseset()
1415 1415 except IndexError:
1416 1416 return baseset()
1417 1417
1418 1418 ps = set()
1419 1419 cl = repo.changelog
1420 1420 for r in getset(repo, spanset(repo), x):
1421 1421 ps.add(cl.parentrevs(r)[1])
1422 1422 ps -= set([node.nullrev])
1423 1423 return subset & ps
1424 1424
1425 1425 def parents(repo, subset, x):
1426 1426 """``parents([set])``
1427 1427 The set of all parents for all changesets in set, or the working directory.
1428 1428 """
1429 1429 if x is None:
1430 1430 ps = set(p.rev() for p in repo[x].parents())
1431 1431 else:
1432 1432 ps = set()
1433 1433 cl = repo.changelog
1434 1434 for r in getset(repo, spanset(repo), x):
1435 1435 ps.update(cl.parentrevs(r))
1436 1436 ps -= set([node.nullrev])
1437 1437 return subset & ps
1438 1438
1439 1439 def parentspec(repo, subset, x, n):
1440 1440 """``set^0``
1441 1441 The set.
1442 1442 ``set^1`` (or ``set^``), ``set^2``
1443 1443 First or second parent, respectively, of all changesets in set.
1444 1444 """
1445 1445 try:
1446 1446 n = int(n[1])
1447 1447 if n not in (0, 1, 2):
1448 1448 raise ValueError
1449 1449 except (TypeError, ValueError):
1450 1450 raise error.ParseError(_("^ expects a number 0, 1, or 2"))
1451 1451 ps = set()
1452 1452 cl = repo.changelog
1453 1453 for r in getset(repo, fullreposet(repo), x):
1454 1454 if n == 0:
1455 1455 ps.add(r)
1456 1456 elif n == 1:
1457 1457 ps.add(cl.parentrevs(r)[0])
1458 1458 elif n == 2:
1459 1459 parents = cl.parentrevs(r)
1460 1460 if len(parents) > 1:
1461 1461 ps.add(parents[1])
1462 1462 return subset & ps
1463 1463
1464 1464 def present(repo, subset, x):
1465 1465 """``present(set)``
1466 1466 An empty set, if any revision in set isn't found; otherwise,
1467 1467 all revisions in set.
1468 1468
1469 1469 If any of specified revisions is not present in the local repository,
1470 1470 the query is normally aborted. But this predicate allows the query
1471 1471 to continue even in such cases.
1472 1472 """
1473 1473 try:
1474 1474 return getset(repo, subset, x)
1475 1475 except error.RepoLookupError:
1476 1476 return baseset()
1477 1477
1478 1478 def public(repo, subset, x):
1479 1479 """``public()``
1480 1480 Changeset in public phase."""
1481 1481 # i18n: "public" is a keyword
1482 1482 getargs(x, 0, 0, _("public takes no arguments"))
1483 1483 phase = repo._phasecache.phase
1484 1484 target = phases.public
1485 1485 condition = lambda r: phase(repo, r) == target
1486 1486 return subset.filter(condition, cache=False)
1487 1487
1488 1488 def remote(repo, subset, x):
1489 1489 """``remote([id [,path]])``
1490 1490 Local revision that corresponds to the given identifier in a
1491 1491 remote repository, if present. Here, the '.' identifier is a
1492 1492 synonym for the current local branch.
1493 1493 """
1494 1494
1495 1495 import hg # avoid start-up nasties
1496 1496 # i18n: "remote" is a keyword
1497 1497 l = getargs(x, 0, 2, _("remote takes one, two or no arguments"))
1498 1498
1499 1499 q = '.'
1500 1500 if len(l) > 0:
1501 1501 # i18n: "remote" is a keyword
1502 1502 q = getstring(l[0], _("remote requires a string id"))
1503 1503 if q == '.':
1504 1504 q = repo['.'].branch()
1505 1505
1506 1506 dest = ''
1507 1507 if len(l) > 1:
1508 1508 # i18n: "remote" is a keyword
1509 1509 dest = getstring(l[1], _("remote requires a repository path"))
1510 1510 dest = repo.ui.expandpath(dest or 'default')
1511 1511 dest, branches = hg.parseurl(dest)
1512 1512 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1513 1513 if revs:
1514 1514 revs = [repo.lookup(rev) for rev in revs]
1515 1515 other = hg.peer(repo, {}, dest)
1516 1516 n = other.lookup(q)
1517 1517 if n in repo:
1518 1518 r = repo[n].rev()
1519 1519 if r in subset:
1520 1520 return baseset([r])
1521 1521 return baseset()
1522 1522
1523 1523 def removes(repo, subset, x):
1524 1524 """``removes(pattern)``
1525 1525 Changesets which remove files matching pattern.
1526 1526
1527 1527 The pattern without explicit kind like ``glob:`` is expected to be
1528 1528 relative to the current directory and match against a file or a
1529 1529 directory.
1530 1530 """
1531 1531 # i18n: "removes" is a keyword
1532 1532 pat = getstring(x, _("removes requires a pattern"))
1533 1533 return checkstatus(repo, subset, pat, 2)
1534 1534
1535 1535 def rev(repo, subset, x):
1536 1536 """``rev(number)``
1537 1537 Revision with the given numeric identifier.
1538 1538 """
1539 1539 # i18n: "rev" is a keyword
1540 1540 l = getargs(x, 1, 1, _("rev requires one argument"))
1541 1541 try:
1542 1542 # i18n: "rev" is a keyword
1543 1543 l = int(getstring(l[0], _("rev requires a number")))
1544 1544 except (TypeError, ValueError):
1545 1545 # i18n: "rev" is a keyword
1546 1546 raise error.ParseError(_("rev expects a number"))
1547 1547 if l not in fullreposet(repo):
1548 1548 return baseset()
1549 1549 return subset & baseset([l])
1550 1550
1551 1551 def matching(repo, subset, x):
1552 1552 """``matching(revision [, field])``
1553 1553 Changesets in which a given set of fields match the set of fields in the
1554 1554 selected revision or set.
1555 1555
1556 1556 To match more than one field pass the list of fields to match separated
1557 1557 by spaces (e.g. ``author description``).
1558 1558
1559 1559 Valid fields are most regular revision fields and some special fields.
1560 1560
1561 1561 Regular revision fields are ``description``, ``author``, ``branch``,
1562 1562 ``date``, ``files``, ``phase``, ``parents``, ``substate``, ``user``
1563 1563 and ``diff``.
1564 1564 Note that ``author`` and ``user`` are synonyms. ``diff`` refers to the
1565 1565 contents of the revision. Two revisions matching their ``diff`` will
1566 1566 also match their ``files``.
1567 1567
1568 1568 Special fields are ``summary`` and ``metadata``:
1569 1569 ``summary`` matches the first line of the description.
1570 1570 ``metadata`` is equivalent to matching ``description user date``
1571 1571 (i.e. it matches the main metadata fields).
1572 1572
1573 1573 ``metadata`` is the default field which is used when no fields are
1574 1574 specified. You can match more than one field at a time.
1575 1575 """
1576 1576 # i18n: "matching" is a keyword
1577 1577 l = getargs(x, 1, 2, _("matching takes 1 or 2 arguments"))
1578 1578
1579 1579 revs = getset(repo, fullreposet(repo), l[0])
1580 1580
1581 1581 fieldlist = ['metadata']
1582 1582 if len(l) > 1:
1583 1583 fieldlist = getstring(l[1],
1584 1584 # i18n: "matching" is a keyword
1585 1585 _("matching requires a string "
1586 1586 "as its second argument")).split()
1587 1587
1588 1588 # Make sure that there are no repeated fields,
1589 1589 # expand the 'special' 'metadata' field type
1590 1590 # and check the 'files' whenever we check the 'diff'
1591 1591 fields = []
1592 1592 for field in fieldlist:
1593 1593 if field == 'metadata':
1594 1594 fields += ['user', 'description', 'date']
1595 1595 elif field == 'diff':
1596 1596 # a revision matching the diff must also match the files
1597 1597 # since matching the diff is very costly, make sure to
1598 1598 # also match the files first
1599 1599 fields += ['files', 'diff']
1600 1600 else:
1601 1601 if field == 'author':
1602 1602 field = 'user'
1603 1603 fields.append(field)
1604 1604 fields = set(fields)
1605 1605 if 'summary' in fields and 'description' in fields:
1606 1606 # If a revision matches its description it also matches its summary
1607 1607 fields.discard('summary')
1608 1608
1609 1609 # We may want to match more than one field
1610 1610 # Not all fields take the same amount of time to be matched
1611 1611 # Sort the selected fields in order of increasing matching cost
1612 1612 fieldorder = ['phase', 'parents', 'user', 'date', 'branch', 'summary',
1613 1613 'files', 'description', 'substate', 'diff']
1614 1614 def fieldkeyfunc(f):
1615 1615 try:
1616 1616 return fieldorder.index(f)
1617 1617 except ValueError:
1618 1618 # assume an unknown field is very costly
1619 1619 return len(fieldorder)
1620 1620 fields = list(fields)
1621 1621 fields.sort(key=fieldkeyfunc)
1622 1622
1623 1623 # Each field will be matched with its own "getfield" function
1624 1624 # which will be added to the getfieldfuncs array of functions
1625 1625 getfieldfuncs = []
1626 1626 _funcs = {
1627 1627 'user': lambda r: repo[r].user(),
1628 1628 'branch': lambda r: repo[r].branch(),
1629 1629 'date': lambda r: repo[r].date(),
1630 1630 'description': lambda r: repo[r].description(),
1631 1631 'files': lambda r: repo[r].files(),
1632 1632 'parents': lambda r: repo[r].parents(),
1633 1633 'phase': lambda r: repo[r].phase(),
1634 1634 'substate': lambda r: repo[r].substate,
1635 1635 'summary': lambda r: repo[r].description().splitlines()[0],
1636 1636 'diff': lambda r: list(repo[r].diff(git=True),)
1637 1637 }
1638 1638 for info in fields:
1639 1639 getfield = _funcs.get(info, None)
1640 1640 if getfield is None:
1641 1641 raise error.ParseError(
1642 1642 # i18n: "matching" is a keyword
1643 1643 _("unexpected field name passed to matching: %s") % info)
1644 1644 getfieldfuncs.append(getfield)
1645 1645 # convert the getfield array of functions into a "getinfo" function
1646 1646 # which returns an array of field values (or a single value if there
1647 1647 # is only one field to match)
1648 1648 getinfo = lambda r: [f(r) for f in getfieldfuncs]
1649 1649
1650 1650 def matches(x):
1651 1651 for rev in revs:
1652 1652 target = getinfo(rev)
1653 1653 match = True
1654 1654 for n, f in enumerate(getfieldfuncs):
1655 1655 if target[n] != f(x):
1656 1656 match = False
1657 1657 if match:
1658 1658 return True
1659 1659 return False
1660 1660
1661 1661 return subset.filter(matches)
1662 1662
1663 1663 def reverse(repo, subset, x):
1664 1664 """``reverse(set)``
1665 1665 Reverse order of set.
1666 1666 """
1667 1667 l = getset(repo, subset, x)
1668 1668 l.reverse()
1669 1669 return l
1670 1670
1671 1671 def roots(repo, subset, x):
1672 1672 """``roots(set)``
1673 1673 Changesets in set with no parent changeset in set.
1674 1674 """
1675 1675 s = getset(repo, spanset(repo), x)
1676 1676 subset = baseset([r for r in s if r in subset])
1677 1677 cs = _children(repo, subset, s)
1678 1678 return subset - cs
1679 1679
1680 1680 def secret(repo, subset, x):
1681 1681 """``secret()``
1682 1682 Changeset in secret phase."""
1683 1683 # i18n: "secret" is a keyword
1684 1684 getargs(x, 0, 0, _("secret takes no arguments"))
1685 1685 phase = repo._phasecache.phase
1686 1686 target = phases.secret
1687 1687 condition = lambda r: phase(repo, r) == target
1688 1688 return subset.filter(condition, cache=False)
1689 1689
1690 1690 def sort(repo, subset, x):
1691 1691 """``sort(set[, [-]key...])``
1692 1692 Sort set by keys. The default sort order is ascending, specify a key
1693 1693 as ``-key`` to sort in descending order.
1694 1694
1695 1695 The keys can be:
1696 1696
1697 1697 - ``rev`` for the revision number,
1698 1698 - ``branch`` for the branch name,
1699 1699 - ``desc`` for the commit message (description),
1700 1700 - ``user`` for user name (``author`` can be used as an alias),
1701 1701 - ``date`` for the commit date
1702 1702 """
1703 1703 # i18n: "sort" is a keyword
1704 1704 l = getargs(x, 1, 2, _("sort requires one or two arguments"))
1705 1705 keys = "rev"
1706 1706 if len(l) == 2:
1707 1707 # i18n: "sort" is a keyword
1708 1708 keys = getstring(l[1], _("sort spec must be a string"))
1709 1709
1710 1710 s = l[0]
1711 1711 keys = keys.split()
1712 1712 l = []
1713 1713 def invert(s):
1714 1714 return "".join(chr(255 - ord(c)) for c in s)
1715 1715 revs = getset(repo, subset, s)
1716 1716 if keys == ["rev"]:
1717 1717 revs.sort()
1718 1718 return revs
1719 1719 elif keys == ["-rev"]:
1720 1720 revs.sort(reverse=True)
1721 1721 return revs
1722 1722 for r in revs:
1723 1723 c = repo[r]
1724 1724 e = []
1725 1725 for k in keys:
1726 1726 if k == 'rev':
1727 1727 e.append(r)
1728 1728 elif k == '-rev':
1729 1729 e.append(-r)
1730 1730 elif k == 'branch':
1731 1731 e.append(c.branch())
1732 1732 elif k == '-branch':
1733 1733 e.append(invert(c.branch()))
1734 1734 elif k == 'desc':
1735 1735 e.append(c.description())
1736 1736 elif k == '-desc':
1737 1737 e.append(invert(c.description()))
1738 1738 elif k in 'user author':
1739 1739 e.append(c.user())
1740 1740 elif k in '-user -author':
1741 1741 e.append(invert(c.user()))
1742 1742 elif k == 'date':
1743 1743 e.append(c.date()[0])
1744 1744 elif k == '-date':
1745 1745 e.append(-c.date()[0])
1746 1746 else:
1747 1747 raise error.ParseError(_("unknown sort key %r") % k)
1748 1748 e.append(r)
1749 1749 l.append(e)
1750 1750 l.sort()
1751 1751 return baseset([e[-1] for e in l])
1752 1752
1753 1753 def _stringmatcher(pattern):
1754 1754 """
1755 1755 accepts a string, possibly starting with 're:' or 'literal:' prefix.
1756 1756 returns the matcher name, pattern, and matcher function.
1757 1757 missing or unknown prefixes are treated as literal matches.
1758 1758
1759 1759 helper for tests:
1760 1760 >>> def test(pattern, *tests):
1761 1761 ... kind, pattern, matcher = _stringmatcher(pattern)
1762 1762 ... return (kind, pattern, [bool(matcher(t)) for t in tests])
1763 1763
1764 1764 exact matching (no prefix):
1765 1765 >>> test('abcdefg', 'abc', 'def', 'abcdefg')
1766 1766 ('literal', 'abcdefg', [False, False, True])
1767 1767
1768 1768 regex matching ('re:' prefix)
1769 1769 >>> test('re:a.+b', 'nomatch', 'fooadef', 'fooadefbar')
1770 1770 ('re', 'a.+b', [False, False, True])
1771 1771
1772 1772 force exact matches ('literal:' prefix)
1773 1773 >>> test('literal:re:foobar', 'foobar', 're:foobar')
1774 1774 ('literal', 're:foobar', [False, True])
1775 1775
1776 1776 unknown prefixes are ignored and treated as literals
1777 1777 >>> test('foo:bar', 'foo', 'bar', 'foo:bar')
1778 1778 ('literal', 'foo:bar', [False, False, True])
1779 1779 """
1780 1780 if pattern.startswith('re:'):
1781 1781 pattern = pattern[3:]
1782 1782 try:
1783 1783 regex = re.compile(pattern)
1784 1784 except re.error, e:
1785 1785 raise error.ParseError(_('invalid regular expression: %s')
1786 1786 % e)
1787 1787 return 're', pattern, regex.search
1788 1788 elif pattern.startswith('literal:'):
1789 1789 pattern = pattern[8:]
1790 1790 return 'literal', pattern, pattern.__eq__
1791 1791
1792 1792 def _substringmatcher(pattern):
1793 1793 kind, pattern, matcher = _stringmatcher(pattern)
1794 1794 if kind == 'literal':
1795 1795 matcher = lambda s: pattern in s
1796 1796 return kind, pattern, matcher
1797 1797
1798 1798 def tag(repo, subset, x):
1799 1799 """``tag([name])``
1800 1800 The specified tag by name, or all tagged revisions if no name is given.
1801 1801
1802 1802 If `name` starts with `re:`, the remainder of the name is treated as
1803 1803 a regular expression. To match a tag that actually starts with `re:`,
1804 1804 use the prefix `literal:`.
1805 1805 """
1806 1806 # i18n: "tag" is a keyword
1807 1807 args = getargs(x, 0, 1, _("tag takes one or no arguments"))
1808 1808 cl = repo.changelog
1809 1809 if args:
1810 1810 pattern = getstring(args[0],
1811 1811 # i18n: "tag" is a keyword
1812 1812 _('the argument to tag must be a string'))
1813 1813 kind, pattern, matcher = _stringmatcher(pattern)
1814 1814 if kind == 'literal':
1815 1815 # avoid resolving all tags
1816 1816 tn = repo._tagscache.tags.get(pattern, None)
1817 1817 if tn is None:
1818 1818 raise util.Abort(_("tag '%s' does not exist") % pattern)
1819 1819 s = set([repo[tn].rev()])
1820 1820 else:
1821 1821 s = set([cl.rev(n) for t, n in repo.tagslist() if matcher(t)])
1822 1822 else:
1823 1823 s = set([cl.rev(n) for t, n in repo.tagslist() if t != 'tip'])
1824 1824 return subset & s
1825 1825
1826 1826 def tagged(repo, subset, x):
1827 1827 return tag(repo, subset, x)
1828 1828
1829 1829 def unstable(repo, subset, x):
1830 1830 """``unstable()``
1831 1831 Non-obsolete changesets with obsolete ancestors.
1832 1832 """
1833 1833 # i18n: "unstable" is a keyword
1834 1834 getargs(x, 0, 0, _("unstable takes no arguments"))
1835 1835 unstables = obsmod.getrevs(repo, 'unstable')
1836 1836 return subset & unstables
1837 1837
1838 1838
1839 1839 def user(repo, subset, x):
1840 1840 """``user(string)``
1841 1841 User name contains string. The match is case-insensitive.
1842 1842
1843 1843 If `string` starts with `re:`, the remainder of the string is treated as
1844 1844 a regular expression. To match a user that actually contains `re:`, use
1845 1845 the prefix `literal:`.
1846 1846 """
1847 1847 return author(repo, subset, x)
1848 1848
1849 1849 # for internal use
1850 1850 def _list(repo, subset, x):
1851 1851 s = getstring(x, "internal error")
1852 1852 if not s:
1853 1853 return baseset()
1854 1854 ls = [repo[r].rev() for r in s.split('\0')]
1855 1855 s = subset
1856 1856 return baseset([r for r in ls if r in s])
1857 1857
1858 1858 # for internal use
1859 1859 def _intlist(repo, subset, x):
1860 1860 s = getstring(x, "internal error")
1861 1861 if not s:
1862 1862 return baseset()
1863 1863 ls = [int(r) for r in s.split('\0')]
1864 1864 s = subset
1865 1865 return baseset([r for r in ls if r in s])
1866 1866
1867 1867 # for internal use
1868 1868 def _hexlist(repo, subset, x):
1869 1869 s = getstring(x, "internal error")
1870 1870 if not s:
1871 1871 return baseset()
1872 1872 cl = repo.changelog
1873 1873 ls = [cl.rev(node.bin(r)) for r in s.split('\0')]
1874 1874 s = subset
1875 1875 return baseset([r for r in ls if r in s])
1876 1876
1877 1877 symbols = {
1878 1878 "adds": adds,
1879 1879 "all": getall,
1880 1880 "ancestor": ancestor,
1881 1881 "ancestors": ancestors,
1882 1882 "_firstancestors": _firstancestors,
1883 1883 "author": author,
1884 1884 "bisect": bisect,
1885 1885 "bisected": bisected,
1886 1886 "bookmark": bookmark,
1887 1887 "branch": branch,
1888 1888 "branchpoint": branchpoint,
1889 1889 "bumped": bumped,
1890 1890 "bundle": bundle,
1891 1891 "children": children,
1892 1892 "closed": closed,
1893 1893 "contains": contains,
1894 1894 "converted": converted,
1895 1895 "date": date,
1896 1896 "desc": desc,
1897 1897 "descendants": descendants,
1898 1898 "_firstdescendants": _firstdescendants,
1899 1899 "destination": destination,
1900 1900 "divergent": divergent,
1901 1901 "draft": draft,
1902 1902 "extinct": extinct,
1903 1903 "extra": extra,
1904 1904 "file": hasfile,
1905 1905 "filelog": filelog,
1906 1906 "first": first,
1907 1907 "follow": follow,
1908 1908 "_followfirst": _followfirst,
1909 1909 "grep": grep,
1910 1910 "head": head,
1911 1911 "heads": heads,
1912 1912 "hidden": hidden,
1913 1913 "id": node_,
1914 1914 "keyword": keyword,
1915 1915 "last": last,
1916 1916 "limit": limit,
1917 1917 "_matchfiles": _matchfiles,
1918 1918 "max": maxrev,
1919 1919 "merge": merge,
1920 1920 "min": minrev,
1921 1921 "modifies": modifies,
1922 1922 "named": named,
1923 1923 "obsolete": obsolete,
1924 1924 "only": only,
1925 1925 "origin": origin,
1926 1926 "outgoing": outgoing,
1927 1927 "p1": p1,
1928 1928 "p2": p2,
1929 1929 "parents": parents,
1930 1930 "present": present,
1931 1931 "public": public,
1932 1932 "remote": remote,
1933 1933 "removes": removes,
1934 1934 "rev": rev,
1935 1935 "reverse": reverse,
1936 1936 "roots": roots,
1937 1937 "sort": sort,
1938 1938 "secret": secret,
1939 1939 "matching": matching,
1940 1940 "tag": tag,
1941 1941 "tagged": tagged,
1942 1942 "user": user,
1943 1943 "unstable": unstable,
1944 1944 "_list": _list,
1945 1945 "_intlist": _intlist,
1946 1946 "_hexlist": _hexlist,
1947 1947 }
1948 1948
1949 1949 # symbols which can't be used for a DoS attack for any given input
1950 1950 # (e.g. those which accept regexes as plain strings shouldn't be included)
1951 1951 # functions that just return a lot of changesets (like all) don't count here
1952 1952 safesymbols = set([
1953 1953 "adds",
1954 1954 "all",
1955 1955 "ancestor",
1956 1956 "ancestors",
1957 1957 "_firstancestors",
1958 1958 "author",
1959 1959 "bisect",
1960 1960 "bisected",
1961 1961 "bookmark",
1962 1962 "branch",
1963 1963 "branchpoint",
1964 1964 "bumped",
1965 1965 "bundle",
1966 1966 "children",
1967 1967 "closed",
1968 1968 "converted",
1969 1969 "date",
1970 1970 "desc",
1971 1971 "descendants",
1972 1972 "_firstdescendants",
1973 1973 "destination",
1974 1974 "divergent",
1975 1975 "draft",
1976 1976 "extinct",
1977 1977 "extra",
1978 1978 "file",
1979 1979 "filelog",
1980 1980 "first",
1981 1981 "follow",
1982 1982 "_followfirst",
1983 1983 "head",
1984 1984 "heads",
1985 1985 "hidden",
1986 1986 "id",
1987 1987 "keyword",
1988 1988 "last",
1989 1989 "limit",
1990 1990 "_matchfiles",
1991 1991 "max",
1992 1992 "merge",
1993 1993 "min",
1994 1994 "modifies",
1995 1995 "obsolete",
1996 1996 "only",
1997 1997 "origin",
1998 1998 "outgoing",
1999 1999 "p1",
2000 2000 "p2",
2001 2001 "parents",
2002 2002 "present",
2003 2003 "public",
2004 2004 "remote",
2005 2005 "removes",
2006 2006 "rev",
2007 2007 "reverse",
2008 2008 "roots",
2009 2009 "sort",
2010 2010 "secret",
2011 2011 "matching",
2012 2012 "tag",
2013 2013 "tagged",
2014 2014 "user",
2015 2015 "unstable",
2016 2016 "_list",
2017 2017 "_intlist",
2018 2018 "_hexlist",
2019 2019 ])
2020 2020
2021 2021 methods = {
2022 2022 "range": rangeset,
2023 2023 "dagrange": dagrange,
2024 2024 "string": stringset,
2025 2025 "symbol": symbolset,
2026 2026 "and": andset,
2027 2027 "or": orset,
2028 2028 "not": notset,
2029 2029 "list": listset,
2030 2030 "func": func,
2031 2031 "ancestor": ancestorspec,
2032 2032 "parent": parentspec,
2033 2033 "parentpost": p1,
2034 2034 "only": only,
2035 2035 "onlypost": only,
2036 2036 }
2037 2037
2038 2038 def optimize(x, small):
2039 2039 if x is None:
2040 2040 return 0, x
2041 2041
2042 2042 smallbonus = 1
2043 2043 if small:
2044 2044 smallbonus = .5
2045 2045
2046 2046 op = x[0]
2047 2047 if op == 'minus':
2048 2048 return optimize(('and', x[1], ('not', x[2])), small)
2049 2049 elif op == 'only':
2050 2050 return optimize(('func', ('symbol', 'only'),
2051 2051 ('list', x[1], x[2])), small)
2052 2052 elif op == 'dagrangepre':
2053 2053 return optimize(('func', ('symbol', 'ancestors'), x[1]), small)
2054 2054 elif op == 'dagrangepost':
2055 2055 return optimize(('func', ('symbol', 'descendants'), x[1]), small)
2056 2056 elif op == 'rangepre':
2057 2057 return optimize(('range', ('string', '0'), x[1]), small)
2058 2058 elif op == 'rangepost':
2059 2059 return optimize(('range', x[1], ('string', 'tip')), small)
2060 2060 elif op == 'negate':
2061 2061 return optimize(('string',
2062 2062 '-' + getstring(x[1], _("can't negate that"))), small)
2063 2063 elif op in 'string symbol negate':
2064 2064 return smallbonus, x # single revisions are small
2065 2065 elif op == 'and':
2066 2066 wa, ta = optimize(x[1], True)
2067 2067 wb, tb = optimize(x[2], True)
2068 2068
2069 2069 # (::x and not ::y)/(not ::y and ::x) have a fast path
2070 2070 def isonly(revs, bases):
2071 2071 return (
2072 2072 revs[0] == 'func'
2073 2073 and getstring(revs[1], _('not a symbol')) == 'ancestors'
2074 2074 and bases[0] == 'not'
2075 2075 and bases[1][0] == 'func'
2076 2076 and getstring(bases[1][1], _('not a symbol')) == 'ancestors')
2077 2077
2078 2078 w = min(wa, wb)
2079 2079 if isonly(ta, tb):
2080 2080 return w, ('func', ('symbol', 'only'), ('list', ta[2], tb[1][2]))
2081 2081 if isonly(tb, ta):
2082 2082 return w, ('func', ('symbol', 'only'), ('list', tb[2], ta[1][2]))
2083 2083
2084 2084 if wa > wb:
2085 2085 return w, (op, tb, ta)
2086 2086 return w, (op, ta, tb)
2087 2087 elif op == 'or':
2088 2088 wa, ta = optimize(x[1], False)
2089 2089 wb, tb = optimize(x[2], False)
2090 2090 if wb < wa:
2091 2091 wb, wa = wa, wb
2092 2092 return max(wa, wb), (op, ta, tb)
2093 2093 elif op == 'not':
2094 2094 o = optimize(x[1], not small)
2095 2095 return o[0], (op, o[1])
2096 2096 elif op == 'parentpost':
2097 2097 o = optimize(x[1], small)
2098 2098 return o[0], (op, o[1])
2099 2099 elif op == 'group':
2100 2100 return optimize(x[1], small)
2101 2101 elif op in 'dagrange range list parent ancestorspec':
2102 2102 if op == 'parent':
2103 2103 # x^:y means (x^) : y, not x ^ (:y)
2104 2104 post = ('parentpost', x[1])
2105 2105 if x[2][0] == 'dagrangepre':
2106 2106 return optimize(('dagrange', post, x[2][1]), small)
2107 2107 elif x[2][0] == 'rangepre':
2108 2108 return optimize(('range', post, x[2][1]), small)
2109 2109
2110 2110 wa, ta = optimize(x[1], small)
2111 2111 wb, tb = optimize(x[2], small)
2112 2112 return wa + wb, (op, ta, tb)
2113 2113 elif op == 'func':
2114 2114 f = getstring(x[1], _("not a symbol"))
2115 2115 wa, ta = optimize(x[2], small)
2116 2116 if f in ("author branch closed date desc file grep keyword "
2117 2117 "outgoing user"):
2118 2118 w = 10 # slow
2119 2119 elif f in "modifies adds removes":
2120 2120 w = 30 # slower
2121 2121 elif f == "contains":
2122 2122 w = 100 # very slow
2123 2123 elif f == "ancestor":
2124 2124 w = 1 * smallbonus
2125 2125 elif f in "reverse limit first _intlist":
2126 2126 w = 0
2127 2127 elif f in "sort":
2128 2128 w = 10 # assume most sorts look at changelog
2129 2129 else:
2130 2130 w = 1
2131 2131 return w + wa, (op, x[1], ta)
2132 2132 return 1, x
2133 2133
2134 2134 _aliasarg = ('func', ('symbol', '_aliasarg'))
2135 2135 def _getaliasarg(tree):
2136 2136 """If tree matches ('func', ('symbol', '_aliasarg'), ('string', X))
2137 2137 return X, None otherwise.
2138 2138 """
2139 2139 if (len(tree) == 3 and tree[:2] == _aliasarg
2140 2140 and tree[2][0] == 'string'):
2141 2141 return tree[2][1]
2142 2142 return None
2143 2143
2144 2144 def _checkaliasarg(tree, known=None):
2145 2145 """Check tree contains no _aliasarg construct or only ones which
2146 2146 value is in known. Used to avoid alias placeholders injection.
2147 2147 """
2148 2148 if isinstance(tree, tuple):
2149 2149 arg = _getaliasarg(tree)
2150 2150 if arg is not None and (not known or arg not in known):
2151 2151 raise error.ParseError(_("not a function: %s") % '_aliasarg')
2152 2152 for t in tree:
2153 2153 _checkaliasarg(t, known)
2154 2154
2155 2155 # the set of valid characters for the initial letter of symbols in
2156 2156 # alias declarations and definitions
2157 2157 _aliassyminitletters = set(c for c in [chr(i) for i in xrange(256)]
2158 2158 if c.isalnum() or c in '._@$' or ord(c) > 127)
2159 2159
2160 2160 def _tokenizealias(program, lookup=None):
2161 2161 """Parse alias declaration/definition into a stream of tokens
2162 2162
2163 2163 This allows symbol names to use also ``$`` as an initial letter
2164 2164 (for backward compatibility), and callers of this function should
2165 2165 examine whether ``$`` is used also for unexpected symbols or not.
2166 2166 """
2167 2167 return tokenize(program, lookup=lookup,
2168 2168 syminitletters=_aliassyminitletters)
2169 2169
2170 2170 def _parsealiasdecl(decl):
2171 2171 """Parse alias declaration ``decl``
2172 2172
2173 2173 This returns ``(name, tree, args, errorstr)`` tuple:
2174 2174
2175 2175 - ``name``: of declared alias (may be ``decl`` itself at error)
2176 2176 - ``tree``: parse result (or ``None`` at error)
2177 2177 - ``args``: list of alias argument names (or None for symbol declaration)
2178 2178 - ``errorstr``: detail about detected error (or None)
2179 2179
2180 2180 >>> _parsealiasdecl('foo')
2181 2181 ('foo', ('symbol', 'foo'), None, None)
2182 2182 >>> _parsealiasdecl('$foo')
2183 2183 ('$foo', None, None, "'$' not for alias arguments")
2184 2184 >>> _parsealiasdecl('foo::bar')
2185 2185 ('foo::bar', None, None, 'invalid format')
2186 2186 >>> _parsealiasdecl('foo bar')
2187 2187 ('foo bar', None, None, 'at 4: invalid token')
2188 2188 >>> _parsealiasdecl('foo()')
2189 2189 ('foo', ('func', ('symbol', 'foo')), [], None)
2190 2190 >>> _parsealiasdecl('$foo()')
2191 2191 ('$foo()', None, None, "'$' not for alias arguments")
2192 2192 >>> _parsealiasdecl('foo($1, $2)')
2193 2193 ('foo', ('func', ('symbol', 'foo')), ['$1', '$2'], None)
2194 2194 >>> _parsealiasdecl('foo(bar_bar, baz.baz)')
2195 2195 ('foo', ('func', ('symbol', 'foo')), ['bar_bar', 'baz.baz'], None)
2196 2196 >>> _parsealiasdecl('foo($1, $2, nested($1, $2))')
2197 2197 ('foo($1, $2, nested($1, $2))', None, None, 'invalid argument list')
2198 2198 >>> _parsealiasdecl('foo(bar($1, $2))')
2199 2199 ('foo(bar($1, $2))', None, None, 'invalid argument list')
2200 2200 >>> _parsealiasdecl('foo("string")')
2201 2201 ('foo("string")', None, None, 'invalid argument list')
2202 2202 >>> _parsealiasdecl('foo($1, $2')
2203 2203 ('foo($1, $2', None, None, 'at 10: unexpected token: end')
2204 2204 >>> _parsealiasdecl('foo("string')
2205 2205 ('foo("string', None, None, 'at 5: unterminated string')
2206 2206 """
2207 2207 p = parser.parser(_tokenizealias, elements)
2208 2208 try:
2209 2209 tree, pos = p.parse(decl)
2210 2210 if (pos != len(decl)):
2211 2211 raise error.ParseError(_('invalid token'), pos)
2212 2212
2213 2213 if isvalidsymbol(tree):
2214 2214 # "name = ...." style
2215 2215 name = getsymbol(tree)
2216 2216 if name.startswith('$'):
2217 2217 return (decl, None, None, _("'$' not for alias arguments"))
2218 2218 return (name, ('symbol', name), None, None)
2219 2219
2220 2220 if isvalidfunc(tree):
2221 2221 # "name(arg, ....) = ...." style
2222 2222 name = getfuncname(tree)
2223 2223 if name.startswith('$'):
2224 2224 return (decl, None, None, _("'$' not for alias arguments"))
2225 2225 args = []
2226 2226 for arg in getfuncargs(tree):
2227 2227 if not isvalidsymbol(arg):
2228 2228 return (decl, None, None, _("invalid argument list"))
2229 2229 args.append(getsymbol(arg))
2230 2230 return (name, ('func', ('symbol', name)), args, None)
2231 2231
2232 2232 return (decl, None, None, _("invalid format"))
2233 2233 except error.ParseError, inst:
2234 2234 return (decl, None, None, parseerrordetail(inst))
2235 2235
2236 2236 class revsetalias(object):
2237 funcre = re.compile('^([^(]+)\(([^)]+)\)$')
2238 args = None
2239
2240 # error message at parsing, or None
2241 error = None
2242 2237 # whether own `error` information is already shown or not.
2243 2238 # this avoids showing same warning multiple times at each `findaliases`.
2244 2239 warned = False
2245 2240
2246 2241 def __init__(self, name, value):
2247 2242 '''Aliases like:
2248 2243
2249 2244 h = heads(default)
2250 2245 b($1) = ancestors($1) - ancestors(default)
2251 2246 '''
2252 m = self.funcre.search(name)
2253 if m:
2254 self.name = m.group(1)
2255 self.tree = ('func', ('symbol', m.group(1)))
2256 self.args = [x.strip() for x in m.group(2).split(',')]
2247 self.name, self.tree, self.args, self.error = _parsealiasdecl(name)
2248 if self.error:
2249 self.error = _('failed to parse the declaration of revset alias'
2250 ' "%s": %s') % (self.name, self.error)
2251 return
2252
2253 if self.args:
2257 2254 for arg in self.args:
2258 2255 # _aliasarg() is an unknown symbol only used separate
2259 2256 # alias argument placeholders from regular strings.
2260 2257 value = value.replace(arg, '_aliasarg(%r)' % (arg,))
2261 else:
2262 self.name = name
2263 self.tree = ('symbol', name)
2264 2258
2265 2259 try:
2266 2260 self.replacement, pos = parse(value)
2267 2261 if pos != len(value):
2268 2262 raise error.ParseError(_('invalid token'), pos)
2269 2263 # Check for placeholder injection
2270 2264 _checkaliasarg(self.replacement, self.args)
2271 2265 except error.ParseError, inst:
2272 2266 self.error = _('failed to parse the definition of revset alias'
2273 2267 ' "%s": %s') % (self.name, parseerrordetail(inst))
2274 2268
2275 2269 def _getalias(aliases, tree):
2276 2270 """If tree looks like an unexpanded alias, return it. Return None
2277 2271 otherwise.
2278 2272 """
2279 2273 if isinstance(tree, tuple) and tree:
2280 2274 if tree[0] == 'symbol' and len(tree) == 2:
2281 2275 name = tree[1]
2282 2276 alias = aliases.get(name)
2283 2277 if alias and alias.args is None and alias.tree == tree:
2284 2278 return alias
2285 2279 if tree[0] == 'func' and len(tree) > 1:
2286 2280 if tree[1][0] == 'symbol' and len(tree[1]) == 2:
2287 2281 name = tree[1][1]
2288 2282 alias = aliases.get(name)
2289 2283 if alias and alias.args is not None and alias.tree == tree[:2]:
2290 2284 return alias
2291 2285 return None
2292 2286
2293 2287 def _expandargs(tree, args):
2294 2288 """Replace _aliasarg instances with the substitution value of the
2295 2289 same name in args, recursively.
2296 2290 """
2297 2291 if not tree or not isinstance(tree, tuple):
2298 2292 return tree
2299 2293 arg = _getaliasarg(tree)
2300 2294 if arg is not None:
2301 2295 return args[arg]
2302 2296 return tuple(_expandargs(t, args) for t in tree)
2303 2297
2304 2298 def _expandaliases(aliases, tree, expanding, cache):
2305 2299 """Expand aliases in tree, recursively.
2306 2300
2307 2301 'aliases' is a dictionary mapping user defined aliases to
2308 2302 revsetalias objects.
2309 2303 """
2310 2304 if not isinstance(tree, tuple):
2311 2305 # Do not expand raw strings
2312 2306 return tree
2313 2307 alias = _getalias(aliases, tree)
2314 2308 if alias is not None:
2315 2309 if alias.error:
2316 2310 raise util.Abort(alias.error)
2317 2311 if alias in expanding:
2318 2312 raise error.ParseError(_('infinite expansion of revset alias "%s" '
2319 2313 'detected') % alias.name)
2320 2314 expanding.append(alias)
2321 2315 if alias.name not in cache:
2322 2316 cache[alias.name] = _expandaliases(aliases, alias.replacement,
2323 2317 expanding, cache)
2324 2318 result = cache[alias.name]
2325 2319 expanding.pop()
2326 2320 if alias.args is not None:
2327 2321 l = getlist(tree[2])
2328 2322 if len(l) != len(alias.args):
2329 2323 raise error.ParseError(
2330 2324 _('invalid number of arguments: %s') % len(l))
2331 2325 l = [_expandaliases(aliases, a, [], cache) for a in l]
2332 2326 result = _expandargs(result, dict(zip(alias.args, l)))
2333 2327 else:
2334 2328 result = tuple(_expandaliases(aliases, t, expanding, cache)
2335 2329 for t in tree)
2336 2330 return result
2337 2331
2338 2332 def findaliases(ui, tree, showwarning=None):
2339 2333 _checkaliasarg(tree)
2340 2334 aliases = {}
2341 2335 for k, v in ui.configitems('revsetalias'):
2342 2336 alias = revsetalias(k, v)
2343 2337 aliases[alias.name] = alias
2344 2338 tree = _expandaliases(aliases, tree, [], {})
2345 2339 if showwarning:
2346 2340 # warn about problematic (but not referred) aliases
2347 2341 for name, alias in sorted(aliases.iteritems()):
2348 2342 if alias.error and not alias.warned:
2349 2343 showwarning(_('warning: %s\n') % (alias.error))
2350 2344 alias.warned = True
2351 2345 return tree
2352 2346
2353 2347 def foldconcat(tree):
2354 2348 """Fold elements to be concatenated by `##`
2355 2349 """
2356 2350 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2357 2351 return tree
2358 2352 if tree[0] == '_concat':
2359 2353 pending = [tree]
2360 2354 l = []
2361 2355 while pending:
2362 2356 e = pending.pop()
2363 2357 if e[0] == '_concat':
2364 2358 pending.extend(reversed(e[1:]))
2365 2359 elif e[0] in ('string', 'symbol'):
2366 2360 l.append(e[1])
2367 2361 else:
2368 2362 msg = _("\"##\" can't concatenate \"%s\" element") % (e[0])
2369 2363 raise error.ParseError(msg)
2370 2364 return ('string', ''.join(l))
2371 2365 else:
2372 2366 return tuple(foldconcat(t) for t in tree)
2373 2367
2374 2368 def parse(spec, lookup=None):
2375 2369 p = parser.parser(tokenize, elements)
2376 2370 return p.parse(spec, lookup=lookup)
2377 2371
2378 2372 def match(ui, spec, repo=None):
2379 2373 if not spec:
2380 2374 raise error.ParseError(_("empty query"))
2381 2375 lookup = None
2382 2376 if repo:
2383 2377 lookup = repo.__contains__
2384 2378 tree, pos = parse(spec, lookup)
2385 2379 if (pos != len(spec)):
2386 2380 raise error.ParseError(_("invalid token"), pos)
2387 2381 if ui:
2388 2382 tree = findaliases(ui, tree, showwarning=ui.warn)
2389 2383 tree = foldconcat(tree)
2390 2384 weight, tree = optimize(tree, True)
2391 2385 def mfunc(repo, subset):
2392 2386 if util.safehasattr(subset, 'isascending'):
2393 2387 result = getset(repo, subset, tree)
2394 2388 else:
2395 2389 result = getset(repo, baseset(subset), tree)
2396 2390 return result
2397 2391 return mfunc
2398 2392
2399 2393 def formatspec(expr, *args):
2400 2394 '''
2401 2395 This is a convenience function for using revsets internally, and
2402 2396 escapes arguments appropriately. Aliases are intentionally ignored
2403 2397 so that intended expression behavior isn't accidentally subverted.
2404 2398
2405 2399 Supported arguments:
2406 2400
2407 2401 %r = revset expression, parenthesized
2408 2402 %d = int(arg), no quoting
2409 2403 %s = string(arg), escaped and single-quoted
2410 2404 %b = arg.branch(), escaped and single-quoted
2411 2405 %n = hex(arg), single-quoted
2412 2406 %% = a literal '%'
2413 2407
2414 2408 Prefixing the type with 'l' specifies a parenthesized list of that type.
2415 2409
2416 2410 >>> formatspec('%r:: and %lr', '10 or 11', ("this()", "that()"))
2417 2411 '(10 or 11):: and ((this()) or (that()))'
2418 2412 >>> formatspec('%d:: and not %d::', 10, 20)
2419 2413 '10:: and not 20::'
2420 2414 >>> formatspec('%ld or %ld', [], [1])
2421 2415 "_list('') or 1"
2422 2416 >>> formatspec('keyword(%s)', 'foo\\xe9')
2423 2417 "keyword('foo\\\\xe9')"
2424 2418 >>> b = lambda: 'default'
2425 2419 >>> b.branch = b
2426 2420 >>> formatspec('branch(%b)', b)
2427 2421 "branch('default')"
2428 2422 >>> formatspec('root(%ls)', ['a', 'b', 'c', 'd'])
2429 2423 "root(_list('a\\x00b\\x00c\\x00d'))"
2430 2424 '''
2431 2425
2432 2426 def quote(s):
2433 2427 return repr(str(s))
2434 2428
2435 2429 def argtype(c, arg):
2436 2430 if c == 'd':
2437 2431 return str(int(arg))
2438 2432 elif c == 's':
2439 2433 return quote(arg)
2440 2434 elif c == 'r':
2441 2435 parse(arg) # make sure syntax errors are confined
2442 2436 return '(%s)' % arg
2443 2437 elif c == 'n':
2444 2438 return quote(node.hex(arg))
2445 2439 elif c == 'b':
2446 2440 return quote(arg.branch())
2447 2441
2448 2442 def listexp(s, t):
2449 2443 l = len(s)
2450 2444 if l == 0:
2451 2445 return "_list('')"
2452 2446 elif l == 1:
2453 2447 return argtype(t, s[0])
2454 2448 elif t == 'd':
2455 2449 return "_intlist('%s')" % "\0".join(str(int(a)) for a in s)
2456 2450 elif t == 's':
2457 2451 return "_list('%s')" % "\0".join(s)
2458 2452 elif t == 'n':
2459 2453 return "_hexlist('%s')" % "\0".join(node.hex(a) for a in s)
2460 2454 elif t == 'b':
2461 2455 return "_list('%s')" % "\0".join(a.branch() for a in s)
2462 2456
2463 2457 m = l // 2
2464 2458 return '(%s or %s)' % (listexp(s[:m], t), listexp(s[m:], t))
2465 2459
2466 2460 ret = ''
2467 2461 pos = 0
2468 2462 arg = 0
2469 2463 while pos < len(expr):
2470 2464 c = expr[pos]
2471 2465 if c == '%':
2472 2466 pos += 1
2473 2467 d = expr[pos]
2474 2468 if d == '%':
2475 2469 ret += d
2476 2470 elif d in 'dsnbr':
2477 2471 ret += argtype(d, args[arg])
2478 2472 arg += 1
2479 2473 elif d == 'l':
2480 2474 # a list of some type
2481 2475 pos += 1
2482 2476 d = expr[pos]
2483 2477 ret += listexp(list(args[arg]), d)
2484 2478 arg += 1
2485 2479 else:
2486 2480 raise util.Abort('unexpected revspec format character %s' % d)
2487 2481 else:
2488 2482 ret += c
2489 2483 pos += 1
2490 2484
2491 2485 return ret
2492 2486
2493 2487 def prettyformat(tree):
2494 2488 def _prettyformat(tree, level, lines):
2495 2489 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2496 2490 lines.append((level, str(tree)))
2497 2491 else:
2498 2492 lines.append((level, '(%s' % tree[0]))
2499 2493 for s in tree[1:]:
2500 2494 _prettyformat(s, level + 1, lines)
2501 2495 lines[-1:] = [(lines[-1][0], lines[-1][1] + ')')]
2502 2496
2503 2497 lines = []
2504 2498 _prettyformat(tree, 0, lines)
2505 2499 output = '\n'.join((' '*l + s) for l, s in lines)
2506 2500 return output
2507 2501
2508 2502 def depth(tree):
2509 2503 if isinstance(tree, tuple):
2510 2504 return max(map(depth, tree)) + 1
2511 2505 else:
2512 2506 return 0
2513 2507
2514 2508 def funcsused(tree):
2515 2509 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2516 2510 return set()
2517 2511 else:
2518 2512 funcs = set()
2519 2513 for s in tree[1:]:
2520 2514 funcs |= funcsused(s)
2521 2515 if tree[0] == 'func':
2522 2516 funcs.add(tree[1][1])
2523 2517 return funcs
2524 2518
2525 2519 class abstractsmartset(object):
2526 2520
2527 2521 def __nonzero__(self):
2528 2522 """True if the smartset is not empty"""
2529 2523 raise NotImplementedError()
2530 2524
2531 2525 def __contains__(self, rev):
2532 2526 """provide fast membership testing"""
2533 2527 raise NotImplementedError()
2534 2528
2535 2529 def __iter__(self):
2536 2530 """iterate the set in the order it is supposed to be iterated"""
2537 2531 raise NotImplementedError()
2538 2532
2539 2533 # Attributes containing a function to perform a fast iteration in a given
2540 2534 # direction. A smartset can have none, one, or both defined.
2541 2535 #
2542 2536 # Default value is None instead of a function returning None to avoid
2543 2537 # initializing an iterator just for testing if a fast method exists.
2544 2538 fastasc = None
2545 2539 fastdesc = None
2546 2540
2547 2541 def isascending(self):
2548 2542 """True if the set will iterate in ascending order"""
2549 2543 raise NotImplementedError()
2550 2544
2551 2545 def isdescending(self):
2552 2546 """True if the set will iterate in descending order"""
2553 2547 raise NotImplementedError()
2554 2548
2555 2549 def min(self):
2556 2550 """return the minimum element in the set"""
2557 2551 if self.fastasc is not None:
2558 2552 for r in self.fastasc():
2559 2553 return r
2560 2554 raise ValueError('arg is an empty sequence')
2561 2555 return min(self)
2562 2556
2563 2557 def max(self):
2564 2558 """return the maximum element in the set"""
2565 2559 if self.fastdesc is not None:
2566 2560 for r in self.fastdesc():
2567 2561 return r
2568 2562 raise ValueError('arg is an empty sequence')
2569 2563 return max(self)
2570 2564
2571 2565 def first(self):
2572 2566 """return the first element in the set (user iteration perspective)
2573 2567
2574 2568 Return None if the set is empty"""
2575 2569 raise NotImplementedError()
2576 2570
2577 2571 def last(self):
2578 2572 """return the last element in the set (user iteration perspective)
2579 2573
2580 2574 Return None if the set is empty"""
2581 2575 raise NotImplementedError()
2582 2576
2583 2577 def __len__(self):
2584 2578 """return the length of the smartsets
2585 2579
2586 2580 This can be expensive on smartset that could be lazy otherwise."""
2587 2581 raise NotImplementedError()
2588 2582
2589 2583 def reverse(self):
2590 2584 """reverse the expected iteration order"""
2591 2585 raise NotImplementedError()
2592 2586
2593 2587 def sort(self, reverse=True):
2594 2588 """get the set to iterate in an ascending or descending order"""
2595 2589 raise NotImplementedError()
2596 2590
2597 2591 def __and__(self, other):
2598 2592 """Returns a new object with the intersection of the two collections.
2599 2593
2600 2594 This is part of the mandatory API for smartset."""
2601 2595 return self.filter(other.__contains__, cache=False)
2602 2596
2603 2597 def __add__(self, other):
2604 2598 """Returns a new object with the union of the two collections.
2605 2599
2606 2600 This is part of the mandatory API for smartset."""
2607 2601 return addset(self, other)
2608 2602
2609 2603 def __sub__(self, other):
2610 2604 """Returns a new object with the substraction of the two collections.
2611 2605
2612 2606 This is part of the mandatory API for smartset."""
2613 2607 c = other.__contains__
2614 2608 return self.filter(lambda r: not c(r), cache=False)
2615 2609
2616 2610 def filter(self, condition, cache=True):
2617 2611 """Returns this smartset filtered by condition as a new smartset.
2618 2612
2619 2613 `condition` is a callable which takes a revision number and returns a
2620 2614 boolean.
2621 2615
2622 2616 This is part of the mandatory API for smartset."""
2623 2617 # builtin cannot be cached. but do not needs to
2624 2618 if cache and util.safehasattr(condition, 'func_code'):
2625 2619 condition = util.cachefunc(condition)
2626 2620 return filteredset(self, condition)
2627 2621
2628 2622 class baseset(abstractsmartset):
2629 2623 """Basic data structure that represents a revset and contains the basic
2630 2624 operation that it should be able to perform.
2631 2625
2632 2626 Every method in this class should be implemented by any smartset class.
2633 2627 """
2634 2628 def __init__(self, data=()):
2635 2629 if not isinstance(data, list):
2636 2630 data = list(data)
2637 2631 self._list = data
2638 2632 self._ascending = None
2639 2633
2640 2634 @util.propertycache
2641 2635 def _set(self):
2642 2636 return set(self._list)
2643 2637
2644 2638 @util.propertycache
2645 2639 def _asclist(self):
2646 2640 asclist = self._list[:]
2647 2641 asclist.sort()
2648 2642 return asclist
2649 2643
2650 2644 def __iter__(self):
2651 2645 if self._ascending is None:
2652 2646 return iter(self._list)
2653 2647 elif self._ascending:
2654 2648 return iter(self._asclist)
2655 2649 else:
2656 2650 return reversed(self._asclist)
2657 2651
2658 2652 def fastasc(self):
2659 2653 return iter(self._asclist)
2660 2654
2661 2655 def fastdesc(self):
2662 2656 return reversed(self._asclist)
2663 2657
2664 2658 @util.propertycache
2665 2659 def __contains__(self):
2666 2660 return self._set.__contains__
2667 2661
2668 2662 def __nonzero__(self):
2669 2663 return bool(self._list)
2670 2664
2671 2665 def sort(self, reverse=False):
2672 2666 self._ascending = not bool(reverse)
2673 2667
2674 2668 def reverse(self):
2675 2669 if self._ascending is None:
2676 2670 self._list.reverse()
2677 2671 else:
2678 2672 self._ascending = not self._ascending
2679 2673
2680 2674 def __len__(self):
2681 2675 return len(self._list)
2682 2676
2683 2677 def isascending(self):
2684 2678 """Returns True if the collection is ascending order, False if not.
2685 2679
2686 2680 This is part of the mandatory API for smartset."""
2687 2681 if len(self) <= 1:
2688 2682 return True
2689 2683 return self._ascending is not None and self._ascending
2690 2684
2691 2685 def isdescending(self):
2692 2686 """Returns True if the collection is descending order, False if not.
2693 2687
2694 2688 This is part of the mandatory API for smartset."""
2695 2689 if len(self) <= 1:
2696 2690 return True
2697 2691 return self._ascending is not None and not self._ascending
2698 2692
2699 2693 def first(self):
2700 2694 if self:
2701 2695 if self._ascending is None:
2702 2696 return self._list[0]
2703 2697 elif self._ascending:
2704 2698 return self._asclist[0]
2705 2699 else:
2706 2700 return self._asclist[-1]
2707 2701 return None
2708 2702
2709 2703 def last(self):
2710 2704 if self:
2711 2705 if self._ascending is None:
2712 2706 return self._list[-1]
2713 2707 elif self._ascending:
2714 2708 return self._asclist[-1]
2715 2709 else:
2716 2710 return self._asclist[0]
2717 2711 return None
2718 2712
2719 2713 class filteredset(abstractsmartset):
2720 2714 """Duck type for baseset class which iterates lazily over the revisions in
2721 2715 the subset and contains a function which tests for membership in the
2722 2716 revset
2723 2717 """
2724 2718 def __init__(self, subset, condition=lambda x: True):
2725 2719 """
2726 2720 condition: a function that decide whether a revision in the subset
2727 2721 belongs to the revset or not.
2728 2722 """
2729 2723 self._subset = subset
2730 2724 self._condition = condition
2731 2725 self._cache = {}
2732 2726
2733 2727 def __contains__(self, x):
2734 2728 c = self._cache
2735 2729 if x not in c:
2736 2730 v = c[x] = x in self._subset and self._condition(x)
2737 2731 return v
2738 2732 return c[x]
2739 2733
2740 2734 def __iter__(self):
2741 2735 return self._iterfilter(self._subset)
2742 2736
2743 2737 def _iterfilter(self, it):
2744 2738 cond = self._condition
2745 2739 for x in it:
2746 2740 if cond(x):
2747 2741 yield x
2748 2742
2749 2743 @property
2750 2744 def fastasc(self):
2751 2745 it = self._subset.fastasc
2752 2746 if it is None:
2753 2747 return None
2754 2748 return lambda: self._iterfilter(it())
2755 2749
2756 2750 @property
2757 2751 def fastdesc(self):
2758 2752 it = self._subset.fastdesc
2759 2753 if it is None:
2760 2754 return None
2761 2755 return lambda: self._iterfilter(it())
2762 2756
2763 2757 def __nonzero__(self):
2764 2758 for r in self:
2765 2759 return True
2766 2760 return False
2767 2761
2768 2762 def __len__(self):
2769 2763 # Basic implementation to be changed in future patches.
2770 2764 l = baseset([r for r in self])
2771 2765 return len(l)
2772 2766
2773 2767 def sort(self, reverse=False):
2774 2768 self._subset.sort(reverse=reverse)
2775 2769
2776 2770 def reverse(self):
2777 2771 self._subset.reverse()
2778 2772
2779 2773 def isascending(self):
2780 2774 return self._subset.isascending()
2781 2775
2782 2776 def isdescending(self):
2783 2777 return self._subset.isdescending()
2784 2778
2785 2779 def first(self):
2786 2780 for x in self:
2787 2781 return x
2788 2782 return None
2789 2783
2790 2784 def last(self):
2791 2785 it = None
2792 2786 if self._subset.isascending:
2793 2787 it = self.fastdesc
2794 2788 elif self._subset.isdescending:
2795 2789 it = self.fastdesc
2796 2790 if it is None:
2797 2791 # slowly consume everything. This needs improvement
2798 2792 it = lambda: reversed(list(self))
2799 2793 for x in it():
2800 2794 return x
2801 2795 return None
2802 2796
2803 2797 class addset(abstractsmartset):
2804 2798 """Represent the addition of two sets
2805 2799
2806 2800 Wrapper structure for lazily adding two structures without losing much
2807 2801 performance on the __contains__ method
2808 2802
2809 2803 If the ascending attribute is set, that means the two structures are
2810 2804 ordered in either an ascending or descending way. Therefore, we can add
2811 2805 them maintaining the order by iterating over both at the same time
2812 2806 """
2813 2807 def __init__(self, revs1, revs2, ascending=None):
2814 2808 self._r1 = revs1
2815 2809 self._r2 = revs2
2816 2810 self._iter = None
2817 2811 self._ascending = ascending
2818 2812 self._genlist = None
2819 2813 self._asclist = None
2820 2814
2821 2815 def __len__(self):
2822 2816 return len(self._list)
2823 2817
2824 2818 def __nonzero__(self):
2825 2819 return bool(self._r1) or bool(self._r2)
2826 2820
2827 2821 @util.propertycache
2828 2822 def _list(self):
2829 2823 if not self._genlist:
2830 2824 self._genlist = baseset(self._iterator())
2831 2825 return self._genlist
2832 2826
2833 2827 def _iterator(self):
2834 2828 """Iterate over both collections without repeating elements
2835 2829
2836 2830 If the ascending attribute is not set, iterate over the first one and
2837 2831 then over the second one checking for membership on the first one so we
2838 2832 dont yield any duplicates.
2839 2833
2840 2834 If the ascending attribute is set, iterate over both collections at the
2841 2835 same time, yielding only one value at a time in the given order.
2842 2836 """
2843 2837 if self._ascending is None:
2844 2838 def gen():
2845 2839 for r in self._r1:
2846 2840 yield r
2847 2841 inr1 = self._r1.__contains__
2848 2842 for r in self._r2:
2849 2843 if not inr1(r):
2850 2844 yield r
2851 2845 gen = gen()
2852 2846 else:
2853 2847 iter1 = iter(self._r1)
2854 2848 iter2 = iter(self._r2)
2855 2849 gen = self._iterordered(self._ascending, iter1, iter2)
2856 2850 return gen
2857 2851
2858 2852 def __iter__(self):
2859 2853 if self._ascending is None:
2860 2854 if self._genlist:
2861 2855 return iter(self._genlist)
2862 2856 return iter(self._iterator())
2863 2857 self._trysetasclist()
2864 2858 if self._ascending:
2865 2859 it = self.fastasc
2866 2860 else:
2867 2861 it = self.fastdesc
2868 2862 if it is None:
2869 2863 # consume the gen and try again
2870 2864 self._list
2871 2865 return iter(self)
2872 2866 return it()
2873 2867
2874 2868 def _trysetasclist(self):
2875 2869 """populate the _asclist attribute if possible and necessary"""
2876 2870 if self._genlist is not None and self._asclist is None:
2877 2871 self._asclist = sorted(self._genlist)
2878 2872
2879 2873 @property
2880 2874 def fastasc(self):
2881 2875 self._trysetasclist()
2882 2876 if self._asclist is not None:
2883 2877 return self._asclist.__iter__
2884 2878 iter1 = self._r1.fastasc
2885 2879 iter2 = self._r2.fastasc
2886 2880 if None in (iter1, iter2):
2887 2881 return None
2888 2882 return lambda: self._iterordered(True, iter1(), iter2())
2889 2883
2890 2884 @property
2891 2885 def fastdesc(self):
2892 2886 self._trysetasclist()
2893 2887 if self._asclist is not None:
2894 2888 return self._asclist.__reversed__
2895 2889 iter1 = self._r1.fastdesc
2896 2890 iter2 = self._r2.fastdesc
2897 2891 if None in (iter1, iter2):
2898 2892 return None
2899 2893 return lambda: self._iterordered(False, iter1(), iter2())
2900 2894
2901 2895 def _iterordered(self, ascending, iter1, iter2):
2902 2896 """produce an ordered iteration from two iterators with the same order
2903 2897
2904 2898 The ascending is used to indicated the iteration direction.
2905 2899 """
2906 2900 choice = max
2907 2901 if ascending:
2908 2902 choice = min
2909 2903
2910 2904 val1 = None
2911 2905 val2 = None
2912 2906
2913 2907 choice = max
2914 2908 if ascending:
2915 2909 choice = min
2916 2910 try:
2917 2911 # Consume both iterators in an ordered way until one is
2918 2912 # empty
2919 2913 while True:
2920 2914 if val1 is None:
2921 2915 val1 = iter1.next()
2922 2916 if val2 is None:
2923 2917 val2 = iter2.next()
2924 2918 next = choice(val1, val2)
2925 2919 yield next
2926 2920 if val1 == next:
2927 2921 val1 = None
2928 2922 if val2 == next:
2929 2923 val2 = None
2930 2924 except StopIteration:
2931 2925 # Flush any remaining values and consume the other one
2932 2926 it = iter2
2933 2927 if val1 is not None:
2934 2928 yield val1
2935 2929 it = iter1
2936 2930 elif val2 is not None:
2937 2931 # might have been equality and both are empty
2938 2932 yield val2
2939 2933 for val in it:
2940 2934 yield val
2941 2935
2942 2936 def __contains__(self, x):
2943 2937 return x in self._r1 or x in self._r2
2944 2938
2945 2939 def sort(self, reverse=False):
2946 2940 """Sort the added set
2947 2941
2948 2942 For this we use the cached list with all the generated values and if we
2949 2943 know they are ascending or descending we can sort them in a smart way.
2950 2944 """
2951 2945 self._ascending = not reverse
2952 2946
2953 2947 def isascending(self):
2954 2948 return self._ascending is not None and self._ascending
2955 2949
2956 2950 def isdescending(self):
2957 2951 return self._ascending is not None and not self._ascending
2958 2952
2959 2953 def reverse(self):
2960 2954 if self._ascending is None:
2961 2955 self._list.reverse()
2962 2956 else:
2963 2957 self._ascending = not self._ascending
2964 2958
2965 2959 def first(self):
2966 2960 for x in self:
2967 2961 return x
2968 2962 return None
2969 2963
2970 2964 def last(self):
2971 2965 self.reverse()
2972 2966 val = self.first()
2973 2967 self.reverse()
2974 2968 return val
2975 2969
2976 2970 class generatorset(abstractsmartset):
2977 2971 """Wrap a generator for lazy iteration
2978 2972
2979 2973 Wrapper structure for generators that provides lazy membership and can
2980 2974 be iterated more than once.
2981 2975 When asked for membership it generates values until either it finds the
2982 2976 requested one or has gone through all the elements in the generator
2983 2977 """
2984 2978 def __init__(self, gen, iterasc=None):
2985 2979 """
2986 2980 gen: a generator producing the values for the generatorset.
2987 2981 """
2988 2982 self._gen = gen
2989 2983 self._asclist = None
2990 2984 self._cache = {}
2991 2985 self._genlist = []
2992 2986 self._finished = False
2993 2987 self._ascending = True
2994 2988 if iterasc is not None:
2995 2989 if iterasc:
2996 2990 self.fastasc = self._iterator
2997 2991 self.__contains__ = self._asccontains
2998 2992 else:
2999 2993 self.fastdesc = self._iterator
3000 2994 self.__contains__ = self._desccontains
3001 2995
3002 2996 def __nonzero__(self):
3003 2997 for r in self:
3004 2998 return True
3005 2999 return False
3006 3000
3007 3001 def __contains__(self, x):
3008 3002 if x in self._cache:
3009 3003 return self._cache[x]
3010 3004
3011 3005 # Use new values only, as existing values would be cached.
3012 3006 for l in self._consumegen():
3013 3007 if l == x:
3014 3008 return True
3015 3009
3016 3010 self._cache[x] = False
3017 3011 return False
3018 3012
3019 3013 def _asccontains(self, x):
3020 3014 """version of contains optimised for ascending generator"""
3021 3015 if x in self._cache:
3022 3016 return self._cache[x]
3023 3017
3024 3018 # Use new values only, as existing values would be cached.
3025 3019 for l in self._consumegen():
3026 3020 if l == x:
3027 3021 return True
3028 3022 if l > x:
3029 3023 break
3030 3024
3031 3025 self._cache[x] = False
3032 3026 return False
3033 3027
3034 3028 def _desccontains(self, x):
3035 3029 """version of contains optimised for descending generator"""
3036 3030 if x in self._cache:
3037 3031 return self._cache[x]
3038 3032
3039 3033 # Use new values only, as existing values would be cached.
3040 3034 for l in self._consumegen():
3041 3035 if l == x:
3042 3036 return True
3043 3037 if l < x:
3044 3038 break
3045 3039
3046 3040 self._cache[x] = False
3047 3041 return False
3048 3042
3049 3043 def __iter__(self):
3050 3044 if self._ascending:
3051 3045 it = self.fastasc
3052 3046 else:
3053 3047 it = self.fastdesc
3054 3048 if it is not None:
3055 3049 return it()
3056 3050 # we need to consume the iterator
3057 3051 for x in self._consumegen():
3058 3052 pass
3059 3053 # recall the same code
3060 3054 return iter(self)
3061 3055
3062 3056 def _iterator(self):
3063 3057 if self._finished:
3064 3058 return iter(self._genlist)
3065 3059
3066 3060 # We have to use this complex iteration strategy to allow multiple
3067 3061 # iterations at the same time. We need to be able to catch revision
3068 3062 # removed from _consumegen and added to genlist in another instance.
3069 3063 #
3070 3064 # Getting rid of it would provide an about 15% speed up on this
3071 3065 # iteration.
3072 3066 genlist = self._genlist
3073 3067 nextrev = self._consumegen().next
3074 3068 _len = len # cache global lookup
3075 3069 def gen():
3076 3070 i = 0
3077 3071 while True:
3078 3072 if i < _len(genlist):
3079 3073 yield genlist[i]
3080 3074 else:
3081 3075 yield nextrev()
3082 3076 i += 1
3083 3077 return gen()
3084 3078
3085 3079 def _consumegen(self):
3086 3080 cache = self._cache
3087 3081 genlist = self._genlist.append
3088 3082 for item in self._gen:
3089 3083 cache[item] = True
3090 3084 genlist(item)
3091 3085 yield item
3092 3086 if not self._finished:
3093 3087 self._finished = True
3094 3088 asc = self._genlist[:]
3095 3089 asc.sort()
3096 3090 self._asclist = asc
3097 3091 self.fastasc = asc.__iter__
3098 3092 self.fastdesc = asc.__reversed__
3099 3093
3100 3094 def __len__(self):
3101 3095 for x in self._consumegen():
3102 3096 pass
3103 3097 return len(self._genlist)
3104 3098
3105 3099 def sort(self, reverse=False):
3106 3100 self._ascending = not reverse
3107 3101
3108 3102 def reverse(self):
3109 3103 self._ascending = not self._ascending
3110 3104
3111 3105 def isascending(self):
3112 3106 return self._ascending
3113 3107
3114 3108 def isdescending(self):
3115 3109 return not self._ascending
3116 3110
3117 3111 def first(self):
3118 3112 if self._ascending:
3119 3113 it = self.fastasc
3120 3114 else:
3121 3115 it = self.fastdesc
3122 3116 if it is None:
3123 3117 # we need to consume all and try again
3124 3118 for x in self._consumegen():
3125 3119 pass
3126 3120 return self.first()
3127 3121 if self:
3128 3122 return it().next()
3129 3123 return None
3130 3124
3131 3125 def last(self):
3132 3126 if self._ascending:
3133 3127 it = self.fastdesc
3134 3128 else:
3135 3129 it = self.fastasc
3136 3130 if it is None:
3137 3131 # we need to consume all and try again
3138 3132 for x in self._consumegen():
3139 3133 pass
3140 3134 return self.first()
3141 3135 if self:
3142 3136 return it().next()
3143 3137 return None
3144 3138
3145 3139 def spanset(repo, start=None, end=None):
3146 3140 """factory function to dispatch between fullreposet and actual spanset
3147 3141
3148 3142 Feel free to update all spanset call sites and kill this function at some
3149 3143 point.
3150 3144 """
3151 3145 if start is None and end is None:
3152 3146 return fullreposet(repo)
3153 3147 return _spanset(repo, start, end)
3154 3148
3155 3149
3156 3150 class _spanset(abstractsmartset):
3157 3151 """Duck type for baseset class which represents a range of revisions and
3158 3152 can work lazily and without having all the range in memory
3159 3153
3160 3154 Note that spanset(x, y) behave almost like xrange(x, y) except for two
3161 3155 notable points:
3162 3156 - when x < y it will be automatically descending,
3163 3157 - revision filtered with this repoview will be skipped.
3164 3158
3165 3159 """
3166 3160 def __init__(self, repo, start=0, end=None):
3167 3161 """
3168 3162 start: first revision included the set
3169 3163 (default to 0)
3170 3164 end: first revision excluded (last+1)
3171 3165 (default to len(repo)
3172 3166
3173 3167 Spanset will be descending if `end` < `start`.
3174 3168 """
3175 3169 if end is None:
3176 3170 end = len(repo)
3177 3171 self._ascending = start <= end
3178 3172 if not self._ascending:
3179 3173 start, end = end + 1, start +1
3180 3174 self._start = start
3181 3175 self._end = end
3182 3176 self._hiddenrevs = repo.changelog.filteredrevs
3183 3177
3184 3178 def sort(self, reverse=False):
3185 3179 self._ascending = not reverse
3186 3180
3187 3181 def reverse(self):
3188 3182 self._ascending = not self._ascending
3189 3183
3190 3184 def _iterfilter(self, iterrange):
3191 3185 s = self._hiddenrevs
3192 3186 for r in iterrange:
3193 3187 if r not in s:
3194 3188 yield r
3195 3189
3196 3190 def __iter__(self):
3197 3191 if self._ascending:
3198 3192 return self.fastasc()
3199 3193 else:
3200 3194 return self.fastdesc()
3201 3195
3202 3196 def fastasc(self):
3203 3197 iterrange = xrange(self._start, self._end)
3204 3198 if self._hiddenrevs:
3205 3199 return self._iterfilter(iterrange)
3206 3200 return iter(iterrange)
3207 3201
3208 3202 def fastdesc(self):
3209 3203 iterrange = xrange(self._end - 1, self._start - 1, -1)
3210 3204 if self._hiddenrevs:
3211 3205 return self._iterfilter(iterrange)
3212 3206 return iter(iterrange)
3213 3207
3214 3208 def __contains__(self, rev):
3215 3209 hidden = self._hiddenrevs
3216 3210 return ((self._start <= rev < self._end)
3217 3211 and not (hidden and rev in hidden))
3218 3212
3219 3213 def __nonzero__(self):
3220 3214 for r in self:
3221 3215 return True
3222 3216 return False
3223 3217
3224 3218 def __len__(self):
3225 3219 if not self._hiddenrevs:
3226 3220 return abs(self._end - self._start)
3227 3221 else:
3228 3222 count = 0
3229 3223 start = self._start
3230 3224 end = self._end
3231 3225 for rev in self._hiddenrevs:
3232 3226 if (end < rev <= start) or (start <= rev < end):
3233 3227 count += 1
3234 3228 return abs(self._end - self._start) - count
3235 3229
3236 3230 def isascending(self):
3237 3231 return self._ascending
3238 3232
3239 3233 def isdescending(self):
3240 3234 return not self._ascending
3241 3235
3242 3236 def first(self):
3243 3237 if self._ascending:
3244 3238 it = self.fastasc
3245 3239 else:
3246 3240 it = self.fastdesc
3247 3241 for x in it():
3248 3242 return x
3249 3243 return None
3250 3244
3251 3245 def last(self):
3252 3246 if self._ascending:
3253 3247 it = self.fastdesc
3254 3248 else:
3255 3249 it = self.fastasc
3256 3250 for x in it():
3257 3251 return x
3258 3252 return None
3259 3253
3260 3254 class fullreposet(_spanset):
3261 3255 """a set containing all revisions in the repo
3262 3256
3263 3257 This class exists to host special optimization.
3264 3258 """
3265 3259
3266 3260 def __init__(self, repo):
3267 3261 super(fullreposet, self).__init__(repo)
3268 3262
3269 3263 def __and__(self, other):
3270 3264 """As self contains the whole repo, all of the other set should also be
3271 3265 in self. Therefore `self & other = other`.
3272 3266
3273 3267 This boldly assumes the other contains valid revs only.
3274 3268 """
3275 3269 # other not a smartset, make is so
3276 3270 if not util.safehasattr(other, 'isascending'):
3277 3271 # filter out hidden revision
3278 3272 # (this boldly assumes all smartset are pure)
3279 3273 #
3280 3274 # `other` was used with "&", let's assume this is a set like
3281 3275 # object.
3282 3276 other = baseset(other - self._hiddenrevs)
3283 3277
3284 3278 other.sort(reverse=self.isdescending())
3285 3279 return other
3286 3280
3287 3281 # tell hggettext to extract docstrings from these functions:
3288 3282 i18nfunctions = symbols.values()
@@ -1,1259 +1,1265 b''
1 1 $ HGENCODING=utf-8
2 2 $ export HGENCODING
3 3
4 4 $ try() {
5 5 > hg debugrevspec --debug "$@"
6 6 > }
7 7
8 8 $ log() {
9 9 > hg log --template '{rev}\n' -r "$1"
10 10 > }
11 11
12 12 $ hg init repo
13 13 $ cd repo
14 14
15 15 $ echo a > a
16 16 $ hg branch a
17 17 marked working directory as branch a
18 18 (branches are permanent and global, did you want a bookmark?)
19 19 $ hg ci -Aqm0
20 20
21 21 $ echo b > b
22 22 $ hg branch b
23 23 marked working directory as branch b
24 24 (branches are permanent and global, did you want a bookmark?)
25 25 $ hg ci -Aqm1
26 26
27 27 $ rm a
28 28 $ hg branch a-b-c-
29 29 marked working directory as branch a-b-c-
30 30 (branches are permanent and global, did you want a bookmark?)
31 31 $ hg ci -Aqm2 -u Bob
32 32
33 33 $ hg log -r "extra('branch', 'a-b-c-')" --template '{rev}\n'
34 34 2
35 35 $ hg log -r "extra('branch')" --template '{rev}\n'
36 36 0
37 37 1
38 38 2
39 39 $ hg log -r "extra('branch', 're:a')" --template '{rev} {branch}\n'
40 40 0 a
41 41 2 a-b-c-
42 42
43 43 $ hg co 1
44 44 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
45 45 $ hg branch +a+b+c+
46 46 marked working directory as branch +a+b+c+
47 47 (branches are permanent and global, did you want a bookmark?)
48 48 $ hg ci -Aqm3
49 49
50 50 $ hg co 2 # interleave
51 51 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
52 52 $ echo bb > b
53 53 $ hg branch -- -a-b-c-
54 54 marked working directory as branch -a-b-c-
55 55 (branches are permanent and global, did you want a bookmark?)
56 56 $ hg ci -Aqm4 -d "May 12 2005"
57 57
58 58 $ hg co 3
59 59 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
60 60 $ hg branch !a/b/c/
61 61 marked working directory as branch !a/b/c/
62 62 (branches are permanent and global, did you want a bookmark?)
63 63 $ hg ci -Aqm"5 bug"
64 64
65 65 $ hg merge 4
66 66 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
67 67 (branch merge, don't forget to commit)
68 68 $ hg branch _a_b_c_
69 69 marked working directory as branch _a_b_c_
70 70 (branches are permanent and global, did you want a bookmark?)
71 71 $ hg ci -Aqm"6 issue619"
72 72
73 73 $ hg branch .a.b.c.
74 74 marked working directory as branch .a.b.c.
75 75 (branches are permanent and global, did you want a bookmark?)
76 76 $ hg ci -Aqm7
77 77
78 78 $ hg branch all
79 79 marked working directory as branch all
80 80 (branches are permanent and global, did you want a bookmark?)
81 81
82 82 $ hg co 4
83 83 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
84 84 $ hg branch Γ©
85 85 marked working directory as branch \xc3\xa9 (esc)
86 86 (branches are permanent and global, did you want a bookmark?)
87 87 $ hg ci -Aqm9
88 88
89 89 $ hg tag -r6 1.0
90 90
91 91 $ hg clone --quiet -U -r 7 . ../remote1
92 92 $ hg clone --quiet -U -r 8 . ../remote2
93 93 $ echo "[paths]" >> .hg/hgrc
94 94 $ echo "default = ../remote1" >> .hg/hgrc
95 95
96 96 names that should work without quoting
97 97
98 98 $ try a
99 99 ('symbol', 'a')
100 100 0
101 101 $ try b-a
102 102 (minus
103 103 ('symbol', 'b')
104 104 ('symbol', 'a'))
105 105 1
106 106 $ try _a_b_c_
107 107 ('symbol', '_a_b_c_')
108 108 6
109 109 $ try _a_b_c_-a
110 110 (minus
111 111 ('symbol', '_a_b_c_')
112 112 ('symbol', 'a'))
113 113 6
114 114 $ try .a.b.c.
115 115 ('symbol', '.a.b.c.')
116 116 7
117 117 $ try .a.b.c.-a
118 118 (minus
119 119 ('symbol', '.a.b.c.')
120 120 ('symbol', 'a'))
121 121 7
122 122 $ try -- '-a-b-c-' # complains
123 123 hg: parse error at 7: not a prefix: end
124 124 [255]
125 125 $ log -a-b-c- # succeeds with fallback
126 126 4
127 127
128 128 $ try -- -a-b-c--a # complains
129 129 (minus
130 130 (minus
131 131 (minus
132 132 (negate
133 133 ('symbol', 'a'))
134 134 ('symbol', 'b'))
135 135 ('symbol', 'c'))
136 136 (negate
137 137 ('symbol', 'a')))
138 138 abort: unknown revision '-a'!
139 139 [255]
140 140 $ try Γ©
141 141 ('symbol', '\xc3\xa9')
142 142 9
143 143
144 144 no quoting needed
145 145
146 146 $ log ::a-b-c-
147 147 0
148 148 1
149 149 2
150 150
151 151 quoting needed
152 152
153 153 $ try '"-a-b-c-"-a'
154 154 (minus
155 155 ('string', '-a-b-c-')
156 156 ('symbol', 'a'))
157 157 4
158 158
159 159 $ log '1 or 2'
160 160 1
161 161 2
162 162 $ log '1|2'
163 163 1
164 164 2
165 165 $ log '1 and 2'
166 166 $ log '1&2'
167 167 $ try '1&2|3' # precedence - and is higher
168 168 (or
169 169 (and
170 170 ('symbol', '1')
171 171 ('symbol', '2'))
172 172 ('symbol', '3'))
173 173 3
174 174 $ try '1|2&3'
175 175 (or
176 176 ('symbol', '1')
177 177 (and
178 178 ('symbol', '2')
179 179 ('symbol', '3')))
180 180 1
181 181 $ try '1&2&3' # associativity
182 182 (and
183 183 (and
184 184 ('symbol', '1')
185 185 ('symbol', '2'))
186 186 ('symbol', '3'))
187 187 $ try '1|(2|3)'
188 188 (or
189 189 ('symbol', '1')
190 190 (group
191 191 (or
192 192 ('symbol', '2')
193 193 ('symbol', '3'))))
194 194 1
195 195 2
196 196 3
197 197 $ log '1.0' # tag
198 198 6
199 199 $ log 'a' # branch
200 200 0
201 201 $ log '2785f51ee'
202 202 0
203 203 $ log 'date(2005)'
204 204 4
205 205 $ log 'date(this is a test)'
206 206 hg: parse error at 10: unexpected token: symbol
207 207 [255]
208 208 $ log 'date()'
209 209 hg: parse error: date requires a string
210 210 [255]
211 211 $ log 'date'
212 212 hg: parse error: can't use date here
213 213 [255]
214 214 $ log 'date('
215 215 hg: parse error at 5: not a prefix: end
216 216 [255]
217 217 $ log 'date(tip)'
218 218 abort: invalid date: 'tip'
219 219 [255]
220 220 $ log '"date"'
221 221 abort: unknown revision 'date'!
222 222 [255]
223 223 $ log 'date(2005) and 1::'
224 224 4
225 225
226 226 ancestor can accept 0 or more arguments
227 227
228 228 $ log 'ancestor()'
229 229 $ log 'ancestor(1)'
230 230 1
231 231 $ log 'ancestor(4,5)'
232 232 1
233 233 $ log 'ancestor(4,5) and 4'
234 234 $ log 'ancestor(0,0,1,3)'
235 235 0
236 236 $ log 'ancestor(3,1,5,3,5,1)'
237 237 1
238 238 $ log 'ancestor(0,1,3,5)'
239 239 0
240 240 $ log 'ancestor(1,2,3,4,5)'
241 241 1
242 242 $ log 'ancestors(5)'
243 243 0
244 244 1
245 245 3
246 246 5
247 247 $ log 'ancestor(ancestors(5))'
248 248 0
249 249 $ log 'author(bob)'
250 250 2
251 251 $ log 'author("re:bob|test")'
252 252 0
253 253 1
254 254 2
255 255 3
256 256 4
257 257 5
258 258 6
259 259 7
260 260 8
261 261 9
262 262 $ log 'branch(Γ©)'
263 263 8
264 264 9
265 265 $ log 'branch(a)'
266 266 0
267 267 $ hg log -r 'branch("re:a")' --template '{rev} {branch}\n'
268 268 0 a
269 269 2 a-b-c-
270 270 3 +a+b+c+
271 271 4 -a-b-c-
272 272 5 !a/b/c/
273 273 6 _a_b_c_
274 274 7 .a.b.c.
275 275 $ log 'children(ancestor(4,5))'
276 276 2
277 277 3
278 278 $ log 'closed()'
279 279 $ log 'contains(a)'
280 280 0
281 281 1
282 282 3
283 283 5
284 284 $ log 'contains("../repo/a")'
285 285 0
286 286 1
287 287 3
288 288 5
289 289 $ log 'desc(B)'
290 290 5
291 291 $ log 'descendants(2 or 3)'
292 292 2
293 293 3
294 294 4
295 295 5
296 296 6
297 297 7
298 298 8
299 299 9
300 300 $ log 'file("b*")'
301 301 1
302 302 4
303 303 $ log 'filelog("b")'
304 304 1
305 305 4
306 306 $ log 'filelog("../repo/b")'
307 307 1
308 308 4
309 309 $ log 'follow()'
310 310 0
311 311 1
312 312 2
313 313 4
314 314 8
315 315 9
316 316 $ log 'grep("issue\d+")'
317 317 6
318 318 $ try 'grep("(")' # invalid regular expression
319 319 (func
320 320 ('symbol', 'grep')
321 321 ('string', '('))
322 322 hg: parse error: invalid match pattern: unbalanced parenthesis
323 323 [255]
324 324 $ try 'grep("\bissue\d+")'
325 325 (func
326 326 ('symbol', 'grep')
327 327 ('string', '\x08issue\\d+'))
328 328 $ try 'grep(r"\bissue\d+")'
329 329 (func
330 330 ('symbol', 'grep')
331 331 ('string', '\\bissue\\d+'))
332 332 6
333 333 $ try 'grep(r"\")'
334 334 hg: parse error at 7: unterminated string
335 335 [255]
336 336 $ log 'head()'
337 337 0
338 338 1
339 339 2
340 340 3
341 341 4
342 342 5
343 343 6
344 344 7
345 345 9
346 346 $ log 'heads(6::)'
347 347 7
348 348 $ log 'keyword(issue)'
349 349 6
350 350 $ log 'keyword("test a")'
351 351 $ log 'limit(head(), 1)'
352 352 0
353 353 $ log 'matching(6)'
354 354 6
355 355 $ log 'matching(6:7, "phase parents user date branch summary files description substate")'
356 356 6
357 357 7
358 358
359 359 Testing min and max
360 360
361 361 max: simple
362 362
363 363 $ log 'max(contains(a))'
364 364 5
365 365
366 366 max: simple on unordered set)
367 367
368 368 $ log 'max((4+0+2+5+7) and contains(a))'
369 369 5
370 370
371 371 max: no result
372 372
373 373 $ log 'max(contains(stringthatdoesnotappearanywhere))'
374 374
375 375 max: no result on unordered set
376 376
377 377 $ log 'max((4+0+2+5+7) and contains(stringthatdoesnotappearanywhere))'
378 378
379 379 min: simple
380 380
381 381 $ log 'min(contains(a))'
382 382 0
383 383
384 384 min: simple on unordered set
385 385
386 386 $ log 'min((4+0+2+5+7) and contains(a))'
387 387 0
388 388
389 389 min: empty
390 390
391 391 $ log 'min(contains(stringthatdoesnotappearanywhere))'
392 392
393 393 min: empty on unordered set
394 394
395 395 $ log 'min((4+0+2+5+7) and contains(stringthatdoesnotappearanywhere))'
396 396
397 397
398 398 $ log 'merge()'
399 399 6
400 400 $ log 'branchpoint()'
401 401 1
402 402 4
403 403 $ log 'modifies(b)'
404 404 4
405 405 $ log 'modifies("path:b")'
406 406 4
407 407 $ log 'modifies("*")'
408 408 4
409 409 6
410 410 $ log 'modifies("set:modified()")'
411 411 4
412 412 $ log 'id(5)'
413 413 2
414 414 $ log 'only(9)'
415 415 8
416 416 9
417 417 $ log 'only(8)'
418 418 8
419 419 $ log 'only(9, 5)'
420 420 2
421 421 4
422 422 8
423 423 9
424 424 $ log 'only(7 + 9, 5 + 2)'
425 425 4
426 426 6
427 427 7
428 428 8
429 429 9
430 430
431 431 Test empty set input
432 432 $ log 'only(p2())'
433 433 $ log 'only(p1(), p2())'
434 434 0
435 435 1
436 436 2
437 437 4
438 438 8
439 439 9
440 440
441 441 Test '%' operator
442 442
443 443 $ log '9%'
444 444 8
445 445 9
446 446 $ log '9%5'
447 447 2
448 448 4
449 449 8
450 450 9
451 451 $ log '(7 + 9)%(5 + 2)'
452 452 4
453 453 6
454 454 7
455 455 8
456 456 9
457 457
458 458 Test the order of operations
459 459
460 460 $ log '7 + 9%5 + 2'
461 461 7
462 462 2
463 463 4
464 464 8
465 465 9
466 466
467 467 Test explicit numeric revision
468 468 $ log 'rev(-1)'
469 469 $ log 'rev(0)'
470 470 0
471 471 $ log 'rev(9)'
472 472 9
473 473 $ log 'rev(10)'
474 474 $ log 'rev(tip)'
475 475 hg: parse error: rev expects a number
476 476 [255]
477 477
478 478 $ log 'outgoing()'
479 479 8
480 480 9
481 481 $ log 'outgoing("../remote1")'
482 482 8
483 483 9
484 484 $ log 'outgoing("../remote2")'
485 485 3
486 486 5
487 487 6
488 488 7
489 489 9
490 490 $ log 'p1(merge())'
491 491 5
492 492 $ log 'p2(merge())'
493 493 4
494 494 $ log 'parents(merge())'
495 495 4
496 496 5
497 497 $ log 'p1(branchpoint())'
498 498 0
499 499 2
500 500 $ log 'p2(branchpoint())'
501 501 $ log 'parents(branchpoint())'
502 502 0
503 503 2
504 504 $ log 'removes(a)'
505 505 2
506 506 6
507 507 $ log 'roots(all())'
508 508 0
509 509 $ log 'reverse(2 or 3 or 4 or 5)'
510 510 5
511 511 4
512 512 3
513 513 2
514 514 $ log 'reverse(all())'
515 515 9
516 516 8
517 517 7
518 518 6
519 519 5
520 520 4
521 521 3
522 522 2
523 523 1
524 524 0
525 525 $ log 'reverse(all()) & filelog(b)'
526 526 4
527 527 1
528 528 $ log 'rev(5)'
529 529 5
530 530 $ log 'sort(limit(reverse(all()), 3))'
531 531 7
532 532 8
533 533 9
534 534 $ log 'sort(2 or 3 or 4 or 5, date)'
535 535 2
536 536 3
537 537 5
538 538 4
539 539 $ log 'tagged()'
540 540 6
541 541 $ log 'tag()'
542 542 6
543 543 $ log 'tag(1.0)'
544 544 6
545 545 $ log 'tag(tip)'
546 546 9
547 547
548 548 test sort revset
549 549 --------------------------------------------
550 550
551 551 test when adding two unordered revsets
552 552
553 553 $ log 'sort(keyword(issue) or modifies(b))'
554 554 4
555 555 6
556 556
557 557 test when sorting a reversed collection in the same way it is
558 558
559 559 $ log 'sort(reverse(all()), -rev)'
560 560 9
561 561 8
562 562 7
563 563 6
564 564 5
565 565 4
566 566 3
567 567 2
568 568 1
569 569 0
570 570
571 571 test when sorting a reversed collection
572 572
573 573 $ log 'sort(reverse(all()), rev)'
574 574 0
575 575 1
576 576 2
577 577 3
578 578 4
579 579 5
580 580 6
581 581 7
582 582 8
583 583 9
584 584
585 585
586 586 test sorting two sorted collections in different orders
587 587
588 588 $ log 'sort(outgoing() or reverse(removes(a)), rev)'
589 589 2
590 590 6
591 591 8
592 592 9
593 593
594 594 test sorting two sorted collections in different orders backwards
595 595
596 596 $ log 'sort(outgoing() or reverse(removes(a)), -rev)'
597 597 9
598 598 8
599 599 6
600 600 2
601 601
602 602 test subtracting something from an addset
603 603
604 604 $ log '(outgoing() or removes(a)) - removes(a)'
605 605 8
606 606 9
607 607
608 608 test intersecting something with an addset
609 609
610 610 $ log 'parents(outgoing() or removes(a))'
611 611 1
612 612 4
613 613 5
614 614 8
615 615
616 616 test that `or` operation combines elements in the right order:
617 617
618 618 $ log '3:4 or 2:5'
619 619 3
620 620 4
621 621 2
622 622 5
623 623 $ log '3:4 or 5:2'
624 624 3
625 625 4
626 626 5
627 627 2
628 628 $ log 'sort(3:4 or 2:5)'
629 629 2
630 630 3
631 631 4
632 632 5
633 633 $ log 'sort(3:4 or 5:2)'
634 634 2
635 635 3
636 636 4
637 637 5
638 638
639 639 check that conversion to only works
640 640 $ try --optimize '::3 - ::1'
641 641 (minus
642 642 (dagrangepre
643 643 ('symbol', '3'))
644 644 (dagrangepre
645 645 ('symbol', '1')))
646 646 * optimized:
647 647 (func
648 648 ('symbol', 'only')
649 649 (list
650 650 ('symbol', '3')
651 651 ('symbol', '1')))
652 652 3
653 653 $ try --optimize 'ancestors(1) - ancestors(3)'
654 654 (minus
655 655 (func
656 656 ('symbol', 'ancestors')
657 657 ('symbol', '1'))
658 658 (func
659 659 ('symbol', 'ancestors')
660 660 ('symbol', '3')))
661 661 * optimized:
662 662 (func
663 663 ('symbol', 'only')
664 664 (list
665 665 ('symbol', '1')
666 666 ('symbol', '3')))
667 667 $ try --optimize 'not ::2 and ::6'
668 668 (and
669 669 (not
670 670 (dagrangepre
671 671 ('symbol', '2')))
672 672 (dagrangepre
673 673 ('symbol', '6')))
674 674 * optimized:
675 675 (func
676 676 ('symbol', 'only')
677 677 (list
678 678 ('symbol', '6')
679 679 ('symbol', '2')))
680 680 3
681 681 4
682 682 5
683 683 6
684 684 $ try --optimize 'ancestors(6) and not ancestors(4)'
685 685 (and
686 686 (func
687 687 ('symbol', 'ancestors')
688 688 ('symbol', '6'))
689 689 (not
690 690 (func
691 691 ('symbol', 'ancestors')
692 692 ('symbol', '4'))))
693 693 * optimized:
694 694 (func
695 695 ('symbol', 'only')
696 696 (list
697 697 ('symbol', '6')
698 698 ('symbol', '4')))
699 699 3
700 700 5
701 701 6
702 702
703 703 we can use patterns when searching for tags
704 704
705 705 $ log 'tag("1..*")'
706 706 abort: tag '1..*' does not exist
707 707 [255]
708 708 $ log 'tag("re:1..*")'
709 709 6
710 710 $ log 'tag("re:[0-9].[0-9]")'
711 711 6
712 712 $ log 'tag("literal:1.0")'
713 713 6
714 714 $ log 'tag("re:0..*")'
715 715
716 716 $ log 'tag(unknown)'
717 717 abort: tag 'unknown' does not exist
718 718 [255]
719 719 $ log 'branch(unknown)'
720 720 abort: unknown revision 'unknown'!
721 721 [255]
722 722 $ log 'user(bob)'
723 723 2
724 724
725 725 $ log '4::8'
726 726 4
727 727 8
728 728 $ log '4:8'
729 729 4
730 730 5
731 731 6
732 732 7
733 733 8
734 734
735 735 $ log 'sort(!merge() & (modifies(b) | user(bob) | keyword(bug) | keyword(issue) & 1::9), "-date")'
736 736 4
737 737 2
738 738 5
739 739
740 740 $ log 'not 0 and 0:2'
741 741 1
742 742 2
743 743 $ log 'not 1 and 0:2'
744 744 0
745 745 2
746 746 $ log 'not 2 and 0:2'
747 747 0
748 748 1
749 749 $ log '(1 and 2)::'
750 750 $ log '(1 and 2):'
751 751 $ log '(1 and 2):3'
752 752 $ log 'sort(head(), -rev)'
753 753 9
754 754 7
755 755 6
756 756 5
757 757 4
758 758 3
759 759 2
760 760 1
761 761 0
762 762 $ log '4::8 - 8'
763 763 4
764 764 $ log 'matching(1 or 2 or 3) and (2 or 3 or 1)'
765 765 2
766 766 3
767 767 1
768 768
769 769 issue2437
770 770
771 771 $ log '3 and p1(5)'
772 772 3
773 773 $ log '4 and p2(6)'
774 774 4
775 775 $ log '1 and parents(:2)'
776 776 1
777 777 $ log '2 and children(1:)'
778 778 2
779 779 $ log 'roots(all()) or roots(all())'
780 780 0
781 781 $ hg debugrevspec 'roots(all()) or roots(all())'
782 782 0
783 783 $ log 'heads(branch(Γ©)) or heads(branch(Γ©))'
784 784 9
785 785 $ log 'ancestors(8) and (heads(branch("-a-b-c-")) or heads(branch(Γ©)))'
786 786 4
787 787
788 788 issue2654: report a parse error if the revset was not completely parsed
789 789
790 790 $ log '1 OR 2'
791 791 hg: parse error at 2: invalid token
792 792 [255]
793 793
794 794 or operator should preserve ordering:
795 795 $ log 'reverse(2::4) or tip'
796 796 4
797 797 2
798 798 9
799 799
800 800 parentrevspec
801 801
802 802 $ log 'merge()^0'
803 803 6
804 804 $ log 'merge()^'
805 805 5
806 806 $ log 'merge()^1'
807 807 5
808 808 $ log 'merge()^2'
809 809 4
810 810 $ log 'merge()^^'
811 811 3
812 812 $ log 'merge()^1^'
813 813 3
814 814 $ log 'merge()^^^'
815 815 1
816 816
817 817 $ log 'merge()~0'
818 818 6
819 819 $ log 'merge()~1'
820 820 5
821 821 $ log 'merge()~2'
822 822 3
823 823 $ log 'merge()~2^1'
824 824 1
825 825 $ log 'merge()~3'
826 826 1
827 827
828 828 $ log '(-3:tip)^'
829 829 4
830 830 6
831 831 8
832 832
833 833 $ log 'tip^foo'
834 834 hg: parse error: ^ expects a number 0, 1, or 2
835 835 [255]
836 836
837 837 multiple revspecs
838 838
839 839 $ hg log -r 'tip~1:tip' -r 'tip~2:tip~1' --template '{rev}\n'
840 840 8
841 841 9
842 842 4
843 843 5
844 844 6
845 845 7
846 846
847 847 test usage in revpair (with "+")
848 848
849 849 (real pair)
850 850
851 851 $ hg diff -r 'tip^^' -r 'tip'
852 852 diff -r 2326846efdab -r 24286f4ae135 .hgtags
853 853 --- /dev/null Thu Jan 01 00:00:00 1970 +0000
854 854 +++ b/.hgtags Thu Jan 01 00:00:00 1970 +0000
855 855 @@ -0,0 +1,1 @@
856 856 +e0cc66ef77e8b6f711815af4e001a6594fde3ba5 1.0
857 857 $ hg diff -r 'tip^^::tip'
858 858 diff -r 2326846efdab -r 24286f4ae135 .hgtags
859 859 --- /dev/null Thu Jan 01 00:00:00 1970 +0000
860 860 +++ b/.hgtags Thu Jan 01 00:00:00 1970 +0000
861 861 @@ -0,0 +1,1 @@
862 862 +e0cc66ef77e8b6f711815af4e001a6594fde3ba5 1.0
863 863
864 864 (single rev)
865 865
866 866 $ hg diff -r 'tip^' -r 'tip^'
867 867 $ hg diff -r 'tip^::tip^ or tip^'
868 868
869 869 (single rev that does not looks like a range)
870 870
871 871 $ hg diff -r 'tip^ or tip^'
872 872 diff -r d5d0dcbdc4d9 .hgtags
873 873 --- /dev/null Thu Jan 01 00:00:00 1970 +0000
874 874 +++ b/.hgtags * (glob)
875 875 @@ -0,0 +1,1 @@
876 876 +e0cc66ef77e8b6f711815af4e001a6594fde3ba5 1.0
877 877
878 878 (no rev)
879 879
880 880 $ hg diff -r 'author("babar") or author("celeste")'
881 881 abort: empty revision range
882 882 [255]
883 883
884 884 aliases:
885 885
886 886 $ echo '[revsetalias]' >> .hg/hgrc
887 887 $ echo 'm = merge()' >> .hg/hgrc
888 888 $ echo 'sincem = descendants(m)' >> .hg/hgrc
889 889 $ echo 'd($1) = reverse(sort($1, date))' >> .hg/hgrc
890 890 $ echo 'rs(ARG1, ARG2) = reverse(sort(ARG1, ARG2))' >> .hg/hgrc
891 891 $ echo 'rs4(ARG1, ARGA, ARGB, ARG2) = reverse(sort(ARG1, ARG2))' >> .hg/hgrc
892 892
893 893 $ try m
894 894 ('symbol', 'm')
895 895 (func
896 896 ('symbol', 'merge')
897 897 None)
898 898 6
899 899
900 900 test alias recursion
901 901
902 902 $ try sincem
903 903 ('symbol', 'sincem')
904 904 (func
905 905 ('symbol', 'descendants')
906 906 (func
907 907 ('symbol', 'merge')
908 908 None))
909 909 6
910 910 7
911 911
912 912 test infinite recursion
913 913
914 914 $ echo 'recurse1 = recurse2' >> .hg/hgrc
915 915 $ echo 'recurse2 = recurse1' >> .hg/hgrc
916 916 $ try recurse1
917 917 ('symbol', 'recurse1')
918 918 hg: parse error: infinite expansion of revset alias "recurse1" detected
919 919 [255]
920 920
921 921 $ echo 'level1($1, $2) = $1 or $2' >> .hg/hgrc
922 922 $ echo 'level2($1, $2) = level1($2, $1)' >> .hg/hgrc
923 923 $ try "level2(level1(1, 2), 3)"
924 924 (func
925 925 ('symbol', 'level2')
926 926 (list
927 927 (func
928 928 ('symbol', 'level1')
929 929 (list
930 930 ('symbol', '1')
931 931 ('symbol', '2')))
932 932 ('symbol', '3')))
933 933 (or
934 934 ('symbol', '3')
935 935 (or
936 936 ('symbol', '1')
937 937 ('symbol', '2')))
938 938 3
939 939 1
940 940 2
941 941
942 942 test nesting and variable passing
943 943
944 944 $ echo 'nested($1) = nested2($1)' >> .hg/hgrc
945 945 $ echo 'nested2($1) = nested3($1)' >> .hg/hgrc
946 946 $ echo 'nested3($1) = max($1)' >> .hg/hgrc
947 947 $ try 'nested(2:5)'
948 948 (func
949 949 ('symbol', 'nested')
950 950 (range
951 951 ('symbol', '2')
952 952 ('symbol', '5')))
953 953 (func
954 954 ('symbol', 'max')
955 955 (range
956 956 ('symbol', '2')
957 957 ('symbol', '5')))
958 958 5
959 959
960 960 test variable isolation, variable placeholders are rewritten as string
961 961 then parsed and matched again as string. Check they do not leak too
962 962 far away.
963 963
964 964 $ echo 'injectparamasstring = max("$1")' >> .hg/hgrc
965 965 $ echo 'callinjection($1) = descendants(injectparamasstring)' >> .hg/hgrc
966 966 $ try 'callinjection(2:5)'
967 967 (func
968 968 ('symbol', 'callinjection')
969 969 (range
970 970 ('symbol', '2')
971 971 ('symbol', '5')))
972 972 (func
973 973 ('symbol', 'descendants')
974 974 (func
975 975 ('symbol', 'max')
976 976 ('string', '$1')))
977 977 abort: unknown revision '$1'!
978 978 [255]
979 979
980 980 $ echo 'injectparamasstring2 = max(_aliasarg("$1"))' >> .hg/hgrc
981 981 $ echo 'callinjection2($1) = descendants(injectparamasstring2)' >> .hg/hgrc
982 982 $ try 'callinjection2(2:5)'
983 983 (func
984 984 ('symbol', 'callinjection2')
985 985 (range
986 986 ('symbol', '2')
987 987 ('symbol', '5')))
988 988 abort: failed to parse the definition of revset alias "injectparamasstring2": not a function: _aliasarg
989 989 [255]
990 990 $ hg debugrevspec --debug --config revsetalias.anotherbadone='branch(' "tip"
991 991 ('symbol', 'tip')
992 992 warning: failed to parse the definition of revset alias "anotherbadone": at 7: not a prefix: end
993 993 warning: failed to parse the definition of revset alias "injectparamasstring2": not a function: _aliasarg
994 994 9
995 995 >>> data = file('.hg/hgrc', 'rb').read()
996 996 >>> file('.hg/hgrc', 'wb').write(data.replace('_aliasarg', ''))
997 997
998 998 $ try 'tip'
999 999 ('symbol', 'tip')
1000 1000 9
1001
1002 $ hg debugrevspec --debug --config revsetalias.'bad name'='tip' "tip"
1003 ('symbol', 'tip')
1004 warning: failed to parse the declaration of revset alias "bad name": at 4: invalid token
1005 9
1006
1001 1007 $ try 'd(2:5)'
1002 1008 (func
1003 1009 ('symbol', 'd')
1004 1010 (range
1005 1011 ('symbol', '2')
1006 1012 ('symbol', '5')))
1007 1013 (func
1008 1014 ('symbol', 'reverse')
1009 1015 (func
1010 1016 ('symbol', 'sort')
1011 1017 (list
1012 1018 (range
1013 1019 ('symbol', '2')
1014 1020 ('symbol', '5'))
1015 1021 ('symbol', 'date'))))
1016 1022 4
1017 1023 5
1018 1024 3
1019 1025 2
1020 1026 $ try 'rs(2 or 3, date)'
1021 1027 (func
1022 1028 ('symbol', 'rs')
1023 1029 (list
1024 1030 (or
1025 1031 ('symbol', '2')
1026 1032 ('symbol', '3'))
1027 1033 ('symbol', 'date')))
1028 1034 (func
1029 1035 ('symbol', 'reverse')
1030 1036 (func
1031 1037 ('symbol', 'sort')
1032 1038 (list
1033 1039 (or
1034 1040 ('symbol', '2')
1035 1041 ('symbol', '3'))
1036 1042 ('symbol', 'date'))))
1037 1043 3
1038 1044 2
1039 1045 $ try 'rs()'
1040 1046 (func
1041 1047 ('symbol', 'rs')
1042 1048 None)
1043 1049 hg: parse error: invalid number of arguments: 0
1044 1050 [255]
1045 1051 $ try 'rs(2)'
1046 1052 (func
1047 1053 ('symbol', 'rs')
1048 1054 ('symbol', '2'))
1049 1055 hg: parse error: invalid number of arguments: 1
1050 1056 [255]
1051 1057 $ try 'rs(2, data, 7)'
1052 1058 (func
1053 1059 ('symbol', 'rs')
1054 1060 (list
1055 1061 (list
1056 1062 ('symbol', '2')
1057 1063 ('symbol', 'data'))
1058 1064 ('symbol', '7')))
1059 1065 hg: parse error: invalid number of arguments: 3
1060 1066 [255]
1061 1067 $ try 'rs4(2 or 3, x, x, date)'
1062 1068 (func
1063 1069 ('symbol', 'rs4')
1064 1070 (list
1065 1071 (list
1066 1072 (list
1067 1073 (or
1068 1074 ('symbol', '2')
1069 1075 ('symbol', '3'))
1070 1076 ('symbol', 'x'))
1071 1077 ('symbol', 'x'))
1072 1078 ('symbol', 'date')))
1073 1079 (func
1074 1080 ('symbol', 'reverse')
1075 1081 (func
1076 1082 ('symbol', 'sort')
1077 1083 (list
1078 1084 (or
1079 1085 ('symbol', '2')
1080 1086 ('symbol', '3'))
1081 1087 ('symbol', 'date'))))
1082 1088 3
1083 1089 2
1084 1090
1085 1091 issue2549 - correct optimizations
1086 1092
1087 1093 $ log 'limit(1 or 2 or 3, 2) and not 2'
1088 1094 1
1089 1095 $ log 'max(1 or 2) and not 2'
1090 1096 $ log 'min(1 or 2) and not 1'
1091 1097 $ log 'last(1 or 2, 1) and not 2'
1092 1098
1093 1099 issue4289 - ordering of built-ins
1094 1100 $ hg log -M -q -r 3:2
1095 1101 3:8528aa5637f2
1096 1102 2:5ed5505e9f1c
1097 1103
1098 1104 test revsets started with 40-chars hash (issue3669)
1099 1105
1100 1106 $ ISSUE3669_TIP=`hg tip --template '{node}'`
1101 1107 $ hg log -r "${ISSUE3669_TIP}" --template '{rev}\n'
1102 1108 9
1103 1109 $ hg log -r "${ISSUE3669_TIP}^" --template '{rev}\n'
1104 1110 8
1105 1111
1106 1112 test or-ed indirect predicates (issue3775)
1107 1113
1108 1114 $ log '6 or 6^1' | sort
1109 1115 5
1110 1116 6
1111 1117 $ log '6^1 or 6' | sort
1112 1118 5
1113 1119 6
1114 1120 $ log '4 or 4~1' | sort
1115 1121 2
1116 1122 4
1117 1123 $ log '4~1 or 4' | sort
1118 1124 2
1119 1125 4
1120 1126 $ log '(0 or 2):(4 or 6) or 0 or 6' | sort
1121 1127 0
1122 1128 1
1123 1129 2
1124 1130 3
1125 1131 4
1126 1132 5
1127 1133 6
1128 1134 $ log '0 or 6 or (0 or 2):(4 or 6)' | sort
1129 1135 0
1130 1136 1
1131 1137 2
1132 1138 3
1133 1139 4
1134 1140 5
1135 1141 6
1136 1142
1137 1143 tests for 'remote()' predicate:
1138 1144 #. (csets in remote) (id) (remote)
1139 1145 1. less than local current branch "default"
1140 1146 2. same with local specified "default"
1141 1147 3. more than local specified specified
1142 1148
1143 1149 $ hg clone --quiet -U . ../remote3
1144 1150 $ cd ../remote3
1145 1151 $ hg update -q 7
1146 1152 $ echo r > r
1147 1153 $ hg ci -Aqm 10
1148 1154 $ log 'remote()'
1149 1155 7
1150 1156 $ log 'remote("a-b-c-")'
1151 1157 2
1152 1158 $ cd ../repo
1153 1159 $ log 'remote(".a.b.c.", "../remote3")'
1154 1160
1155 1161 tests for concatenation of strings/symbols by "##"
1156 1162
1157 1163 $ try "278 ## '5f5' ## 1ee ## 'ce5'"
1158 1164 (_concat
1159 1165 (_concat
1160 1166 (_concat
1161 1167 ('symbol', '278')
1162 1168 ('string', '5f5'))
1163 1169 ('symbol', '1ee'))
1164 1170 ('string', 'ce5'))
1165 1171 ('string', '2785f51eece5')
1166 1172 0
1167 1173
1168 1174 $ echo 'cat4($1, $2, $3, $4) = $1 ## $2 ## $3 ## $4' >> .hg/hgrc
1169 1175 $ try "cat4(278, '5f5', 1ee, 'ce5')"
1170 1176 (func
1171 1177 ('symbol', 'cat4')
1172 1178 (list
1173 1179 (list
1174 1180 (list
1175 1181 ('symbol', '278')
1176 1182 ('string', '5f5'))
1177 1183 ('symbol', '1ee'))
1178 1184 ('string', 'ce5')))
1179 1185 (_concat
1180 1186 (_concat
1181 1187 (_concat
1182 1188 ('symbol', '278')
1183 1189 ('string', '5f5'))
1184 1190 ('symbol', '1ee'))
1185 1191 ('string', 'ce5'))
1186 1192 ('string', '2785f51eece5')
1187 1193 0
1188 1194
1189 1195 (check concatenation in alias nesting)
1190 1196
1191 1197 $ echo 'cat2($1, $2) = $1 ## $2' >> .hg/hgrc
1192 1198 $ echo 'cat2x2($1, $2, $3, $4) = cat2($1 ## $2, $3 ## $4)' >> .hg/hgrc
1193 1199 $ log "cat2x2(278, '5f5', 1ee, 'ce5')"
1194 1200 0
1195 1201
1196 1202 (check operator priority)
1197 1203
1198 1204 $ echo 'cat2n2($1, $2, $3, $4) = $1 ## $2 or $3 ## $4~2' >> .hg/hgrc
1199 1205 $ log "cat2n2(2785f5, 1eece5, 24286f, 4ae135)"
1200 1206 0
1201 1207 4
1202 1208
1203 1209 $ cd ..
1204 1210
1205 1211 test author/desc/keyword in problematic encoding
1206 1212 # unicode: cp932:
1207 1213 # u30A2 0x83 0x41(= 'A')
1208 1214 # u30C2 0x83 0x61(= 'a')
1209 1215
1210 1216 $ hg init problematicencoding
1211 1217 $ cd problematicencoding
1212 1218
1213 1219 $ python > setup.sh <<EOF
1214 1220 > print u'''
1215 1221 > echo a > text
1216 1222 > hg add text
1217 1223 > hg --encoding utf-8 commit -u '\u30A2' -m none
1218 1224 > echo b > text
1219 1225 > hg --encoding utf-8 commit -u '\u30C2' -m none
1220 1226 > echo c > text
1221 1227 > hg --encoding utf-8 commit -u none -m '\u30A2'
1222 1228 > echo d > text
1223 1229 > hg --encoding utf-8 commit -u none -m '\u30C2'
1224 1230 > '''.encode('utf-8')
1225 1231 > EOF
1226 1232 $ sh < setup.sh
1227 1233
1228 1234 test in problematic encoding
1229 1235 $ python > test.sh <<EOF
1230 1236 > print u'''
1231 1237 > hg --encoding cp932 log --template '{rev}\\n' -r 'author(\u30A2)'
1232 1238 > echo ====
1233 1239 > hg --encoding cp932 log --template '{rev}\\n' -r 'author(\u30C2)'
1234 1240 > echo ====
1235 1241 > hg --encoding cp932 log --template '{rev}\\n' -r 'desc(\u30A2)'
1236 1242 > echo ====
1237 1243 > hg --encoding cp932 log --template '{rev}\\n' -r 'desc(\u30C2)'
1238 1244 > echo ====
1239 1245 > hg --encoding cp932 log --template '{rev}\\n' -r 'keyword(\u30A2)'
1240 1246 > echo ====
1241 1247 > hg --encoding cp932 log --template '{rev}\\n' -r 'keyword(\u30C2)'
1242 1248 > '''.encode('cp932')
1243 1249 > EOF
1244 1250 $ sh < test.sh
1245 1251 0
1246 1252 ====
1247 1253 1
1248 1254 ====
1249 1255 2
1250 1256 ====
1251 1257 3
1252 1258 ====
1253 1259 0
1254 1260 2
1255 1261 ====
1256 1262 1
1257 1263 3
1258 1264
1259 1265 $ cd ..
General Comments 0
You need to be logged in to leave comments. Login now