##// END OF EJS Templates
namespaces: add revset for 'named(namespace)'...
Sean Farley -
r23836:3fb61fcb default
parent child Browse files
Show More
@@ -1,3109 +1,3146 b''
1 1 # revset.py - revision set queries for mercurial
2 2 #
3 3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 import re
9 9 import parser, util, error, discovery, hbisect, phases
10 10 import node
11 11 import heapq
12 12 import match as matchmod
13 13 from i18n import _
14 14 import encoding
15 15 import obsolete as obsmod
16 16 import pathutil
17 17 import repoview
18 18
19 19 def _revancestors(repo, revs, followfirst):
20 20 """Like revlog.ancestors(), but supports followfirst."""
21 21 cut = followfirst and 1 or None
22 22 cl = repo.changelog
23 23
24 24 def iterate():
25 25 revqueue, revsnode = None, None
26 26 h = []
27 27
28 28 revs.sort(reverse=True)
29 29 revqueue = util.deque(revs)
30 30 if revqueue:
31 31 revsnode = revqueue.popleft()
32 32 heapq.heappush(h, -revsnode)
33 33
34 34 seen = set([node.nullrev])
35 35 while h:
36 36 current = -heapq.heappop(h)
37 37 if current not in seen:
38 38 if revsnode and current == revsnode:
39 39 if revqueue:
40 40 revsnode = revqueue.popleft()
41 41 heapq.heappush(h, -revsnode)
42 42 seen.add(current)
43 43 yield current
44 44 for parent in cl.parentrevs(current)[:cut]:
45 45 if parent != node.nullrev:
46 46 heapq.heappush(h, -parent)
47 47
48 48 return generatorset(iterate(), iterasc=False)
49 49
50 50 def _revdescendants(repo, revs, followfirst):
51 51 """Like revlog.descendants() but supports followfirst."""
52 52 cut = followfirst and 1 or None
53 53
54 54 def iterate():
55 55 cl = repo.changelog
56 56 first = min(revs)
57 57 nullrev = node.nullrev
58 58 if first == nullrev:
59 59 # Are there nodes with a null first parent and a non-null
60 60 # second one? Maybe. Do we care? Probably not.
61 61 for i in cl:
62 62 yield i
63 63 else:
64 64 seen = set(revs)
65 65 for i in cl.revs(first + 1):
66 66 for x in cl.parentrevs(i)[:cut]:
67 67 if x != nullrev and x in seen:
68 68 seen.add(i)
69 69 yield i
70 70 break
71 71
72 72 return generatorset(iterate(), iterasc=True)
73 73
74 74 def _revsbetween(repo, roots, heads):
75 75 """Return all paths between roots and heads, inclusive of both endpoint
76 76 sets."""
77 77 if not roots:
78 78 return baseset()
79 79 parentrevs = repo.changelog.parentrevs
80 80 visit = list(heads)
81 81 reachable = set()
82 82 seen = {}
83 83 minroot = min(roots)
84 84 roots = set(roots)
85 85 # open-code the post-order traversal due to the tiny size of
86 86 # sys.getrecursionlimit()
87 87 while visit:
88 88 rev = visit.pop()
89 89 if rev in roots:
90 90 reachable.add(rev)
91 91 parents = parentrevs(rev)
92 92 seen[rev] = parents
93 93 for parent in parents:
94 94 if parent >= minroot and parent not in seen:
95 95 visit.append(parent)
96 96 if not reachable:
97 97 return baseset()
98 98 for rev in sorted(seen):
99 99 for parent in seen[rev]:
100 100 if parent in reachable:
101 101 reachable.add(rev)
102 102 return baseset(sorted(reachable))
103 103
104 104 elements = {
105 105 "(": (21, ("group", 1, ")"), ("func", 1, ")")),
106 106 "##": (20, None, ("_concat", 20)),
107 107 "~": (18, None, ("ancestor", 18)),
108 108 "^": (18, None, ("parent", 18), ("parentpost", 18)),
109 109 "-": (5, ("negate", 19), ("minus", 5)),
110 110 "::": (17, ("dagrangepre", 17), ("dagrange", 17),
111 111 ("dagrangepost", 17)),
112 112 "..": (17, ("dagrangepre", 17), ("dagrange", 17),
113 113 ("dagrangepost", 17)),
114 114 ":": (15, ("rangepre", 15), ("range", 15), ("rangepost", 15)),
115 115 "not": (10, ("not", 10)),
116 116 "!": (10, ("not", 10)),
117 117 "and": (5, None, ("and", 5)),
118 118 "&": (5, None, ("and", 5)),
119 119 "%": (5, None, ("only", 5), ("onlypost", 5)),
120 120 "or": (4, None, ("or", 4)),
121 121 "|": (4, None, ("or", 4)),
122 122 "+": (4, None, ("or", 4)),
123 123 ",": (2, None, ("list", 2)),
124 124 ")": (0, None, None),
125 125 "symbol": (0, ("symbol",), None),
126 126 "string": (0, ("string",), None),
127 127 "end": (0, None, None),
128 128 }
129 129
130 130 keywords = set(['and', 'or', 'not'])
131 131
132 132 def tokenize(program, lookup=None):
133 133 '''
134 134 Parse a revset statement into a stream of tokens
135 135
136 136 Check that @ is a valid unquoted token character (issue3686):
137 137 >>> list(tokenize("@::"))
138 138 [('symbol', '@', 0), ('::', None, 1), ('end', None, 3)]
139 139
140 140 '''
141 141
142 142 pos, l = 0, len(program)
143 143 while pos < l:
144 144 c = program[pos]
145 145 if c.isspace(): # skip inter-token whitespace
146 146 pass
147 147 elif c == ':' and program[pos:pos + 2] == '::': # look ahead carefully
148 148 yield ('::', None, pos)
149 149 pos += 1 # skip ahead
150 150 elif c == '.' and program[pos:pos + 2] == '..': # look ahead carefully
151 151 yield ('..', None, pos)
152 152 pos += 1 # skip ahead
153 153 elif c == '#' and program[pos:pos + 2] == '##': # look ahead carefully
154 154 yield ('##', None, pos)
155 155 pos += 1 # skip ahead
156 156 elif c in "():,-|&+!~^%": # handle simple operators
157 157 yield (c, None, pos)
158 158 elif (c in '"\'' or c == 'r' and
159 159 program[pos:pos + 2] in ("r'", 'r"')): # handle quoted strings
160 160 if c == 'r':
161 161 pos += 1
162 162 c = program[pos]
163 163 decode = lambda x: x
164 164 else:
165 165 decode = lambda x: x.decode('string-escape')
166 166 pos += 1
167 167 s = pos
168 168 while pos < l: # find closing quote
169 169 d = program[pos]
170 170 if d == '\\': # skip over escaped characters
171 171 pos += 2
172 172 continue
173 173 if d == c:
174 174 yield ('string', decode(program[s:pos]), s)
175 175 break
176 176 pos += 1
177 177 else:
178 178 raise error.ParseError(_("unterminated string"), s)
179 179 # gather up a symbol/keyword
180 180 elif c.isalnum() or c in '._@' or ord(c) > 127:
181 181 s = pos
182 182 pos += 1
183 183 while pos < l: # find end of symbol
184 184 d = program[pos]
185 185 if not (d.isalnum() or d in "-._/@" or ord(d) > 127):
186 186 break
187 187 if d == '.' and program[pos - 1] == '.': # special case for ..
188 188 pos -= 1
189 189 break
190 190 pos += 1
191 191 sym = program[s:pos]
192 192 if sym in keywords: # operator keywords
193 193 yield (sym, None, s)
194 194 elif '-' in sym:
195 195 # some jerk gave us foo-bar-baz, try to check if it's a symbol
196 196 if lookup and lookup(sym):
197 197 # looks like a real symbol
198 198 yield ('symbol', sym, s)
199 199 else:
200 200 # looks like an expression
201 201 parts = sym.split('-')
202 202 for p in parts[:-1]:
203 203 if p: # possible consecutive -
204 204 yield ('symbol', p, s)
205 205 s += len(p)
206 206 yield ('-', None, pos)
207 207 s += 1
208 208 if parts[-1]: # possible trailing -
209 209 yield ('symbol', parts[-1], s)
210 210 else:
211 211 yield ('symbol', sym, s)
212 212 pos -= 1
213 213 else:
214 214 raise error.ParseError(_("syntax error"), pos)
215 215 pos += 1
216 216 yield ('end', None, pos)
217 217
218 218 # helpers
219 219
220 220 def getstring(x, err):
221 221 if x and (x[0] == 'string' or x[0] == 'symbol'):
222 222 return x[1]
223 223 raise error.ParseError(err)
224 224
225 225 def getlist(x):
226 226 if not x:
227 227 return []
228 228 if x[0] == 'list':
229 229 return getlist(x[1]) + [x[2]]
230 230 return [x]
231 231
232 232 def getargs(x, min, max, err):
233 233 l = getlist(x)
234 234 if len(l) < min or (max >= 0 and len(l) > max):
235 235 raise error.ParseError(err)
236 236 return l
237 237
238 238 def getset(repo, subset, x):
239 239 if not x:
240 240 raise error.ParseError(_("missing argument"))
241 241 s = methods[x[0]](repo, subset, *x[1:])
242 242 if util.safehasattr(s, 'isascending'):
243 243 return s
244 244 return baseset(s)
245 245
246 246 def _getrevsource(repo, r):
247 247 extra = repo[r].extra()
248 248 for label in ('source', 'transplant_source', 'rebase_source'):
249 249 if label in extra:
250 250 try:
251 251 return repo[extra[label]].rev()
252 252 except error.RepoLookupError:
253 253 pass
254 254 return None
255 255
256 256 # operator methods
257 257
258 258 def stringset(repo, subset, x):
259 259 x = repo[x].rev()
260 260 if x == -1 and len(subset) == len(repo):
261 261 return baseset([-1])
262 262 if x in subset:
263 263 return baseset([x])
264 264 return baseset()
265 265
266 266 def symbolset(repo, subset, x):
267 267 if x in symbols:
268 268 raise error.ParseError(_("can't use %s here") % x)
269 269 return stringset(repo, subset, x)
270 270
271 271 def rangeset(repo, subset, x, y):
272 272 m = getset(repo, fullreposet(repo), x)
273 273 n = getset(repo, fullreposet(repo), y)
274 274
275 275 if not m or not n:
276 276 return baseset()
277 277 m, n = m.first(), n.last()
278 278
279 279 if m < n:
280 280 r = spanset(repo, m, n + 1)
281 281 else:
282 282 r = spanset(repo, m, n - 1)
283 283 return r & subset
284 284
285 285 def dagrange(repo, subset, x, y):
286 286 r = spanset(repo)
287 287 xs = _revsbetween(repo, getset(repo, r, x), getset(repo, r, y))
288 288 return xs & subset
289 289
290 290 def andset(repo, subset, x, y):
291 291 return getset(repo, getset(repo, subset, x), y)
292 292
293 293 def orset(repo, subset, x, y):
294 294 xl = getset(repo, subset, x)
295 295 yl = getset(repo, subset - xl, y)
296 296 return xl + yl
297 297
298 298 def notset(repo, subset, x):
299 299 return subset - getset(repo, subset, x)
300 300
301 301 def listset(repo, subset, a, b):
302 302 raise error.ParseError(_("can't use a list in this context"))
303 303
304 304 def func(repo, subset, a, b):
305 305 if a[0] == 'symbol' and a[1] in symbols:
306 306 return symbols[a[1]](repo, subset, b)
307 307 raise error.ParseError(_("not a function: %s") % a[1])
308 308
309 309 # functions
310 310
311 311 def adds(repo, subset, x):
312 312 """``adds(pattern)``
313 313 Changesets that add a file matching pattern.
314 314
315 315 The pattern without explicit kind like ``glob:`` is expected to be
316 316 relative to the current directory and match against a file or a
317 317 directory.
318 318 """
319 319 # i18n: "adds" is a keyword
320 320 pat = getstring(x, _("adds requires a pattern"))
321 321 return checkstatus(repo, subset, pat, 1)
322 322
323 323 def ancestor(repo, subset, x):
324 324 """``ancestor(*changeset)``
325 325 A greatest common ancestor of the changesets.
326 326
327 327 Accepts 0 or more changesets.
328 328 Will return empty list when passed no args.
329 329 Greatest common ancestor of a single changeset is that changeset.
330 330 """
331 331 # i18n: "ancestor" is a keyword
332 332 l = getlist(x)
333 333 rl = spanset(repo)
334 334 anc = None
335 335
336 336 # (getset(repo, rl, i) for i in l) generates a list of lists
337 337 for revs in (getset(repo, rl, i) for i in l):
338 338 for r in revs:
339 339 if anc is None:
340 340 anc = repo[r]
341 341 else:
342 342 anc = anc.ancestor(repo[r])
343 343
344 344 if anc is not None and anc.rev() in subset:
345 345 return baseset([anc.rev()])
346 346 return baseset()
347 347
348 348 def _ancestors(repo, subset, x, followfirst=False):
349 349 heads = getset(repo, spanset(repo), x)
350 350 if not heads:
351 351 return baseset()
352 352 s = _revancestors(repo, heads, followfirst)
353 353 return subset & s
354 354
355 355 def ancestors(repo, subset, x):
356 356 """``ancestors(set)``
357 357 Changesets that are ancestors of a changeset in set.
358 358 """
359 359 return _ancestors(repo, subset, x)
360 360
361 361 def _firstancestors(repo, subset, x):
362 362 # ``_firstancestors(set)``
363 363 # Like ``ancestors(set)`` but follows only the first parents.
364 364 return _ancestors(repo, subset, x, followfirst=True)
365 365
366 366 def ancestorspec(repo, subset, x, n):
367 367 """``set~n``
368 368 Changesets that are the Nth ancestor (first parents only) of a changeset
369 369 in set.
370 370 """
371 371 try:
372 372 n = int(n[1])
373 373 except (TypeError, ValueError):
374 374 raise error.ParseError(_("~ expects a number"))
375 375 ps = set()
376 376 cl = repo.changelog
377 377 for r in getset(repo, fullreposet(repo), x):
378 378 for i in range(n):
379 379 r = cl.parentrevs(r)[0]
380 380 ps.add(r)
381 381 return subset & ps
382 382
383 383 def author(repo, subset, x):
384 384 """``author(string)``
385 385 Alias for ``user(string)``.
386 386 """
387 387 # i18n: "author" is a keyword
388 388 n = encoding.lower(getstring(x, _("author requires a string")))
389 389 kind, pattern, matcher = _substringmatcher(n)
390 390 return subset.filter(lambda x: matcher(encoding.lower(repo[x].user())))
391 391
392 392 def bisect(repo, subset, x):
393 393 """``bisect(string)``
394 394 Changesets marked in the specified bisect status:
395 395
396 396 - ``good``, ``bad``, ``skip``: csets explicitly marked as good/bad/skip
397 397 - ``goods``, ``bads`` : csets topologically good/bad
398 398 - ``range`` : csets taking part in the bisection
399 399 - ``pruned`` : csets that are goods, bads or skipped
400 400 - ``untested`` : csets whose fate is yet unknown
401 401 - ``ignored`` : csets ignored due to DAG topology
402 402 - ``current`` : the cset currently being bisected
403 403 """
404 404 # i18n: "bisect" is a keyword
405 405 status = getstring(x, _("bisect requires a string")).lower()
406 406 state = set(hbisect.get(repo, status))
407 407 return subset & state
408 408
409 409 # Backward-compatibility
410 410 # - no help entry so that we do not advertise it any more
411 411 def bisected(repo, subset, x):
412 412 return bisect(repo, subset, x)
413 413
414 414 def bookmark(repo, subset, x):
415 415 """``bookmark([name])``
416 416 The named bookmark or all bookmarks.
417 417
418 418 If `name` starts with `re:`, the remainder of the name is treated as
419 419 a regular expression. To match a bookmark that actually starts with `re:`,
420 420 use the prefix `literal:`.
421 421 """
422 422 # i18n: "bookmark" is a keyword
423 423 args = getargs(x, 0, 1, _('bookmark takes one or no arguments'))
424 424 if args:
425 425 bm = getstring(args[0],
426 426 # i18n: "bookmark" is a keyword
427 427 _('the argument to bookmark must be a string'))
428 428 kind, pattern, matcher = _stringmatcher(bm)
429 429 bms = set()
430 430 if kind == 'literal':
431 431 bmrev = repo._bookmarks.get(pattern, None)
432 432 if not bmrev:
433 433 raise util.Abort(_("bookmark '%s' does not exist") % bm)
434 434 bms.add(repo[bmrev].rev())
435 435 else:
436 436 matchrevs = set()
437 437 for name, bmrev in repo._bookmarks.iteritems():
438 438 if matcher(name):
439 439 matchrevs.add(bmrev)
440 440 if not matchrevs:
441 441 raise util.Abort(_("no bookmarks exist that match '%s'")
442 442 % pattern)
443 443 for bmrev in matchrevs:
444 444 bms.add(repo[bmrev].rev())
445 445 else:
446 446 bms = set([repo[r].rev()
447 447 for r in repo._bookmarks.values()])
448 448 bms -= set([node.nullrev])
449 449 return subset & bms
450 450
451 451 def branch(repo, subset, x):
452 452 """``branch(string or set)``
453 453 All changesets belonging to the given branch or the branches of the given
454 454 changesets.
455 455
456 456 If `string` starts with `re:`, the remainder of the name is treated as
457 457 a regular expression. To match a branch that actually starts with `re:`,
458 458 use the prefix `literal:`.
459 459 """
460 460 import branchmap
461 461 urepo = repo.unfiltered()
462 462 ucl = urepo.changelog
463 463 getbi = branchmap.revbranchcache(urepo).branchinfo
464 464
465 465 try:
466 466 b = getstring(x, '')
467 467 except error.ParseError:
468 468 # not a string, but another revspec, e.g. tip()
469 469 pass
470 470 else:
471 471 kind, pattern, matcher = _stringmatcher(b)
472 472 if kind == 'literal':
473 473 # note: falls through to the revspec case if no branch with
474 474 # this name exists
475 475 if pattern in repo.branchmap():
476 476 return subset.filter(lambda r: matcher(getbi(ucl, r)[0]))
477 477 else:
478 478 return subset.filter(lambda r: matcher(getbi(ucl, r)[0]))
479 479
480 480 s = getset(repo, spanset(repo), x)
481 481 b = set()
482 482 for r in s:
483 483 b.add(getbi(ucl, r)[0])
484 484 c = s.__contains__
485 485 return subset.filter(lambda r: c(r) or getbi(ucl, r)[0] in b)
486 486
487 487 def bumped(repo, subset, x):
488 488 """``bumped()``
489 489 Mutable changesets marked as successors of public changesets.
490 490
491 491 Only non-public and non-obsolete changesets can be `bumped`.
492 492 """
493 493 # i18n: "bumped" is a keyword
494 494 getargs(x, 0, 0, _("bumped takes no arguments"))
495 495 bumped = obsmod.getrevs(repo, 'bumped')
496 496 return subset & bumped
497 497
498 498 def bundle(repo, subset, x):
499 499 """``bundle()``
500 500 Changesets in the bundle.
501 501
502 502 Bundle must be specified by the -R option."""
503 503
504 504 try:
505 505 bundlerevs = repo.changelog.bundlerevs
506 506 except AttributeError:
507 507 raise util.Abort(_("no bundle provided - specify with -R"))
508 508 return subset & bundlerevs
509 509
510 510 def checkstatus(repo, subset, pat, field):
511 511 hasset = matchmod.patkind(pat) == 'set'
512 512
513 513 mcache = [None]
514 514 def matches(x):
515 515 c = repo[x]
516 516 if not mcache[0] or hasset:
517 517 mcache[0] = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
518 518 m = mcache[0]
519 519 fname = None
520 520 if not m.anypats() and len(m.files()) == 1:
521 521 fname = m.files()[0]
522 522 if fname is not None:
523 523 if fname not in c.files():
524 524 return False
525 525 else:
526 526 for f in c.files():
527 527 if m(f):
528 528 break
529 529 else:
530 530 return False
531 531 files = repo.status(c.p1().node(), c.node())[field]
532 532 if fname is not None:
533 533 if fname in files:
534 534 return True
535 535 else:
536 536 for f in files:
537 537 if m(f):
538 538 return True
539 539
540 540 return subset.filter(matches)
541 541
542 542 def _children(repo, narrow, parentset):
543 543 cs = set()
544 544 if not parentset:
545 545 return baseset(cs)
546 546 pr = repo.changelog.parentrevs
547 547 minrev = min(parentset)
548 548 for r in narrow:
549 549 if r <= minrev:
550 550 continue
551 551 for p in pr(r):
552 552 if p in parentset:
553 553 cs.add(r)
554 554 return baseset(cs)
555 555
556 556 def children(repo, subset, x):
557 557 """``children(set)``
558 558 Child changesets of changesets in set.
559 559 """
560 560 s = getset(repo, fullreposet(repo), x)
561 561 cs = _children(repo, subset, s)
562 562 return subset & cs
563 563
564 564 def closed(repo, subset, x):
565 565 """``closed()``
566 566 Changeset is closed.
567 567 """
568 568 # i18n: "closed" is a keyword
569 569 getargs(x, 0, 0, _("closed takes no arguments"))
570 570 return subset.filter(lambda r: repo[r].closesbranch())
571 571
572 572 def contains(repo, subset, x):
573 573 """``contains(pattern)``
574 574 The revision's manifest contains a file matching pattern (but might not
575 575 modify it). See :hg:`help patterns` for information about file patterns.
576 576
577 577 The pattern without explicit kind like ``glob:`` is expected to be
578 578 relative to the current directory and match against a file exactly
579 579 for efficiency.
580 580 """
581 581 # i18n: "contains" is a keyword
582 582 pat = getstring(x, _("contains requires a pattern"))
583 583
584 584 def matches(x):
585 585 if not matchmod.patkind(pat):
586 586 pats = pathutil.canonpath(repo.root, repo.getcwd(), pat)
587 587 if pats in repo[x]:
588 588 return True
589 589 else:
590 590 c = repo[x]
591 591 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
592 592 for f in c.manifest():
593 593 if m(f):
594 594 return True
595 595 return False
596 596
597 597 return subset.filter(matches)
598 598
599 599 def converted(repo, subset, x):
600 600 """``converted([id])``
601 601 Changesets converted from the given identifier in the old repository if
602 602 present, or all converted changesets if no identifier is specified.
603 603 """
604 604
605 605 # There is exactly no chance of resolving the revision, so do a simple
606 606 # string compare and hope for the best
607 607
608 608 rev = None
609 609 # i18n: "converted" is a keyword
610 610 l = getargs(x, 0, 1, _('converted takes one or no arguments'))
611 611 if l:
612 612 # i18n: "converted" is a keyword
613 613 rev = getstring(l[0], _('converted requires a revision'))
614 614
615 615 def _matchvalue(r):
616 616 source = repo[r].extra().get('convert_revision', None)
617 617 return source is not None and (rev is None or source.startswith(rev))
618 618
619 619 return subset.filter(lambda r: _matchvalue(r))
620 620
621 621 def date(repo, subset, x):
622 622 """``date(interval)``
623 623 Changesets within the interval, see :hg:`help dates`.
624 624 """
625 625 # i18n: "date" is a keyword
626 626 ds = getstring(x, _("date requires a string"))
627 627 dm = util.matchdate(ds)
628 628 return subset.filter(lambda x: dm(repo[x].date()[0]))
629 629
630 630 def desc(repo, subset, x):
631 631 """``desc(string)``
632 632 Search commit message for string. The match is case-insensitive.
633 633 """
634 634 # i18n: "desc" is a keyword
635 635 ds = encoding.lower(getstring(x, _("desc requires a string")))
636 636
637 637 def matches(x):
638 638 c = repo[x]
639 639 return ds in encoding.lower(c.description())
640 640
641 641 return subset.filter(matches)
642 642
643 643 def _descendants(repo, subset, x, followfirst=False):
644 644 roots = getset(repo, spanset(repo), x)
645 645 if not roots:
646 646 return baseset()
647 647 s = _revdescendants(repo, roots, followfirst)
648 648
649 649 # Both sets need to be ascending in order to lazily return the union
650 650 # in the correct order.
651 651 base = subset & roots
652 652 desc = subset & s
653 653 result = base + desc
654 654 if subset.isascending():
655 655 result.sort()
656 656 elif subset.isdescending():
657 657 result.sort(reverse=True)
658 658 else:
659 659 result = subset & result
660 660 return result
661 661
662 662 def descendants(repo, subset, x):
663 663 """``descendants(set)``
664 664 Changesets which are descendants of changesets in set.
665 665 """
666 666 return _descendants(repo, subset, x)
667 667
668 668 def _firstdescendants(repo, subset, x):
669 669 # ``_firstdescendants(set)``
670 670 # Like ``descendants(set)`` but follows only the first parents.
671 671 return _descendants(repo, subset, x, followfirst=True)
672 672
673 673 def destination(repo, subset, x):
674 674 """``destination([set])``
675 675 Changesets that were created by a graft, transplant or rebase operation,
676 676 with the given revisions specified as the source. Omitting the optional set
677 677 is the same as passing all().
678 678 """
679 679 if x is not None:
680 680 sources = getset(repo, spanset(repo), x)
681 681 else:
682 682 sources = getall(repo, spanset(repo), x)
683 683
684 684 dests = set()
685 685
686 686 # subset contains all of the possible destinations that can be returned, so
687 687 # iterate over them and see if their source(s) were provided in the arg set.
688 688 # Even if the immediate src of r is not in the arg set, src's source (or
689 689 # further back) may be. Scanning back further than the immediate src allows
690 690 # transitive transplants and rebases to yield the same results as transitive
691 691 # grafts.
692 692 for r in subset:
693 693 src = _getrevsource(repo, r)
694 694 lineage = None
695 695
696 696 while src is not None:
697 697 if lineage is None:
698 698 lineage = list()
699 699
700 700 lineage.append(r)
701 701
702 702 # The visited lineage is a match if the current source is in the arg
703 703 # set. Since every candidate dest is visited by way of iterating
704 704 # subset, any dests further back in the lineage will be tested by a
705 705 # different iteration over subset. Likewise, if the src was already
706 706 # selected, the current lineage can be selected without going back
707 707 # further.
708 708 if src in sources or src in dests:
709 709 dests.update(lineage)
710 710 break
711 711
712 712 r = src
713 713 src = _getrevsource(repo, r)
714 714
715 715 return subset.filter(dests.__contains__)
716 716
717 717 def divergent(repo, subset, x):
718 718 """``divergent()``
719 719 Final successors of changesets with an alternative set of final successors.
720 720 """
721 721 # i18n: "divergent" is a keyword
722 722 getargs(x, 0, 0, _("divergent takes no arguments"))
723 723 divergent = obsmod.getrevs(repo, 'divergent')
724 724 return subset & divergent
725 725
726 726 def draft(repo, subset, x):
727 727 """``draft()``
728 728 Changeset in draft phase."""
729 729 # i18n: "draft" is a keyword
730 730 getargs(x, 0, 0, _("draft takes no arguments"))
731 731 phase = repo._phasecache.phase
732 732 target = phases.draft
733 733 condition = lambda r: phase(repo, r) == target
734 734 return subset.filter(condition, cache=False)
735 735
736 736 def extinct(repo, subset, x):
737 737 """``extinct()``
738 738 Obsolete changesets with obsolete descendants only.
739 739 """
740 740 # i18n: "extinct" is a keyword
741 741 getargs(x, 0, 0, _("extinct takes no arguments"))
742 742 extincts = obsmod.getrevs(repo, 'extinct')
743 743 return subset & extincts
744 744
745 745 def extra(repo, subset, x):
746 746 """``extra(label, [value])``
747 747 Changesets with the given label in the extra metadata, with the given
748 748 optional value.
749 749
750 750 If `value` starts with `re:`, the remainder of the value is treated as
751 751 a regular expression. To match a value that actually starts with `re:`,
752 752 use the prefix `literal:`.
753 753 """
754 754
755 755 # i18n: "extra" is a keyword
756 756 l = getargs(x, 1, 2, _('extra takes at least 1 and at most 2 arguments'))
757 757 # i18n: "extra" is a keyword
758 758 label = getstring(l[0], _('first argument to extra must be a string'))
759 759 value = None
760 760
761 761 if len(l) > 1:
762 762 # i18n: "extra" is a keyword
763 763 value = getstring(l[1], _('second argument to extra must be a string'))
764 764 kind, value, matcher = _stringmatcher(value)
765 765
766 766 def _matchvalue(r):
767 767 extra = repo[r].extra()
768 768 return label in extra and (value is None or matcher(extra[label]))
769 769
770 770 return subset.filter(lambda r: _matchvalue(r))
771 771
772 772 def filelog(repo, subset, x):
773 773 """``filelog(pattern)``
774 774 Changesets connected to the specified filelog.
775 775
776 776 For performance reasons, visits only revisions mentioned in the file-level
777 777 filelog, rather than filtering through all changesets (much faster, but
778 778 doesn't include deletes or duplicate changes). For a slower, more accurate
779 779 result, use ``file()``.
780 780
781 781 The pattern without explicit kind like ``glob:`` is expected to be
782 782 relative to the current directory and match against a file exactly
783 783 for efficiency.
784 784
785 785 If some linkrev points to revisions filtered by the current repoview, we'll
786 786 work around it to return a non-filtered value.
787 787 """
788 788
789 789 # i18n: "filelog" is a keyword
790 790 pat = getstring(x, _("filelog requires a pattern"))
791 791 s = set()
792 792 cl = repo.changelog
793 793
794 794 if not matchmod.patkind(pat):
795 795 f = pathutil.canonpath(repo.root, repo.getcwd(), pat)
796 796 files = [f]
797 797 else:
798 798 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=repo[None])
799 799 files = (f for f in repo[None] if m(f))
800 800
801 801 for f in files:
802 802 backrevref = {} # final value for: filerev -> changerev
803 803 lowestchild = {} # lowest known filerev child of a filerev
804 804 delayed = [] # filerev with filtered linkrev, for post-processing
805 805 lowesthead = None # cache for manifest content of all head revisions
806 806 fl = repo.file(f)
807 807 for fr in list(fl):
808 808 rev = fl.linkrev(fr)
809 809 if rev not in cl:
810 810 # changerev pointed in linkrev is filtered
811 811 # record it for post processing.
812 812 delayed.append((fr, rev))
813 813 continue
814 814 for p in fl.parentrevs(fr):
815 815 if 0 <= p and p not in lowestchild:
816 816 lowestchild[p] = fr
817 817 backrevref[fr] = rev
818 818 s.add(rev)
819 819
820 820 # Post-processing of all filerevs we skipped because they were
821 821 # filtered. If such filerevs have known and unfiltered children, this
822 822 # means they have an unfiltered appearance out there. We'll use linkrev
823 823 # adjustment to find one of these appearances. The lowest known child
824 824 # will be used as a starting point because it is the best upper-bound we
825 825 # have.
826 826 #
827 827 # This approach will fail when an unfiltered but linkrev-shadowed
828 828 # appearance exists in a head changeset without unfiltered filerev
829 829 # children anywhere.
830 830 while delayed:
831 831 # must be a descending iteration. To slowly fill lowest child
832 832 # information that is of potential use by the next item.
833 833 fr, rev = delayed.pop()
834 834 lkr = rev
835 835
836 836 child = lowestchild.get(fr)
837 837
838 838 if child is None:
839 839 # search for existence of this file revision in a head revision.
840 840 # There are three possibilities:
841 841 # - the revision exists in a head and we can find an
842 842 # introduction from there,
843 843 # - the revision does not exist in a head because it has been
844 844 # changed since its introduction: we would have found a child
845 845 # and be in the other 'else' clause,
846 846 # - all versions of the revision are hidden.
847 847 if lowesthead is None:
848 848 lowesthead = {}
849 849 for h in repo.heads():
850 850 fnode = repo[h].manifest().get(f)
851 851 if fnode is not None:
852 852 lowesthead[fl.rev(fnode)] = h
853 853 headrev = lowesthead.get(fr)
854 854 if headrev is None:
855 855 # content is nowhere unfiltered
856 856 continue
857 857 rev = repo[headrev][f].introrev()
858 858 else:
859 859 # the lowest known child is a good upper bound
860 860 childcrev = backrevref[child]
861 861 # XXX this does not guarantee returning the lowest
862 862 # introduction of this revision, but this gives a
863 863 # result which is a good start and will fit in most
864 864 # cases. We probably need to fix the multiple
865 865 # introductions case properly (report each
866 866 # introduction, even for identical file revisions)
867 867 # once and for all at some point anyway.
868 868 for p in repo[childcrev][f].parents():
869 869 if p.filerev() == fr:
870 870 rev = p.rev()
871 871 break
872 872 if rev == lkr: # no shadowed entry found
873 873 # XXX This should never happen unless some manifest points
874 874 # to biggish file revisions (like a revision that uses a
875 875 # parent that never appears in the manifest ancestors)
876 876 continue
877 877
878 878 # Fill the data for the next iteration.
879 879 for p in fl.parentrevs(fr):
880 880 if 0 <= p and p not in lowestchild:
881 881 lowestchild[p] = fr
882 882 backrevref[fr] = rev
883 883 s.add(rev)
884 884
885 885 return subset & s
886 886
887 887 def first(repo, subset, x):
888 888 """``first(set, [n])``
889 889 An alias for limit().
890 890 """
891 891 return limit(repo, subset, x)
892 892
893 893 def _follow(repo, subset, x, name, followfirst=False):
894 894 l = getargs(x, 0, 1, _("%s takes no arguments or a filename") % name)
895 895 c = repo['.']
896 896 if l:
897 897 x = getstring(l[0], _("%s expected a filename") % name)
898 898 if x in c:
899 899 cx = c[x]
900 900 s = set(ctx.rev() for ctx in cx.ancestors(followfirst=followfirst))
901 901 # include the revision responsible for the most recent version
902 902 s.add(cx.introrev())
903 903 else:
904 904 return baseset()
905 905 else:
906 906 s = _revancestors(repo, baseset([c.rev()]), followfirst)
907 907
908 908 return subset & s
909 909
910 910 def follow(repo, subset, x):
911 911 """``follow([file])``
912 912 An alias for ``::.`` (ancestors of the working copy's first parent).
913 913 If a filename is specified, the history of the given file is followed,
914 914 including copies.
915 915 """
916 916 return _follow(repo, subset, x, 'follow')
917 917
918 918 def _followfirst(repo, subset, x):
919 919 # ``followfirst([file])``
920 920 # Like ``follow([file])`` but follows only the first parent of
921 921 # every revision or file revision.
922 922 return _follow(repo, subset, x, '_followfirst', followfirst=True)
923 923
924 924 def getall(repo, subset, x):
925 925 """``all()``
926 926 All changesets, the same as ``0:tip``.
927 927 """
928 928 # i18n: "all" is a keyword
929 929 getargs(x, 0, 0, _("all takes no arguments"))
930 930 return subset
931 931
932 932 def grep(repo, subset, x):
933 933 """``grep(regex)``
934 934 Like ``keyword(string)`` but accepts a regex. Use ``grep(r'...')``
935 935 to ensure special escape characters are handled correctly. Unlike
936 936 ``keyword(string)``, the match is case-sensitive.
937 937 """
938 938 try:
939 939 # i18n: "grep" is a keyword
940 940 gr = re.compile(getstring(x, _("grep requires a string")))
941 941 except re.error, e:
942 942 raise error.ParseError(_('invalid match pattern: %s') % e)
943 943
944 944 def matches(x):
945 945 c = repo[x]
946 946 for e in c.files() + [c.user(), c.description()]:
947 947 if gr.search(e):
948 948 return True
949 949 return False
950 950
951 951 return subset.filter(matches)
952 952
953 953 def _matchfiles(repo, subset, x):
954 954 # _matchfiles takes a revset list of prefixed arguments:
955 955 #
956 956 # [p:foo, i:bar, x:baz]
957 957 #
958 958 # builds a match object from them and filters subset. Allowed
959 959 # prefixes are 'p:' for regular patterns, 'i:' for include
960 960 # patterns and 'x:' for exclude patterns. Use 'r:' prefix to pass
961 961 # a revision identifier, or the empty string to reference the
962 962 # working directory, from which the match object is
963 963 # initialized. Use 'd:' to set the default matching mode, default
964 964 # to 'glob'. At most one 'r:' and 'd:' argument can be passed.
965 965
966 966 # i18n: "_matchfiles" is a keyword
967 967 l = getargs(x, 1, -1, _("_matchfiles requires at least one argument"))
968 968 pats, inc, exc = [], [], []
969 969 rev, default = None, None
970 970 for arg in l:
971 971 # i18n: "_matchfiles" is a keyword
972 972 s = getstring(arg, _("_matchfiles requires string arguments"))
973 973 prefix, value = s[:2], s[2:]
974 974 if prefix == 'p:':
975 975 pats.append(value)
976 976 elif prefix == 'i:':
977 977 inc.append(value)
978 978 elif prefix == 'x:':
979 979 exc.append(value)
980 980 elif prefix == 'r:':
981 981 if rev is not None:
982 982 # i18n: "_matchfiles" is a keyword
983 983 raise error.ParseError(_('_matchfiles expected at most one '
984 984 'revision'))
985 985 rev = value
986 986 elif prefix == 'd:':
987 987 if default is not None:
988 988 # i18n: "_matchfiles" is a keyword
989 989 raise error.ParseError(_('_matchfiles expected at most one '
990 990 'default mode'))
991 991 default = value
992 992 else:
993 993 # i18n: "_matchfiles" is a keyword
994 994 raise error.ParseError(_('invalid _matchfiles prefix: %s') % prefix)
995 995 if not default:
996 996 default = 'glob'
997 997
998 998 m = matchmod.match(repo.root, repo.getcwd(), pats, include=inc,
999 999 exclude=exc, ctx=repo[rev], default=default)
1000 1000
1001 1001 def matches(x):
1002 1002 for f in repo[x].files():
1003 1003 if m(f):
1004 1004 return True
1005 1005 return False
1006 1006
1007 1007 return subset.filter(matches)
1008 1008
1009 1009 def hasfile(repo, subset, x):
1010 1010 """``file(pattern)``
1011 1011 Changesets affecting files matched by pattern.
1012 1012
1013 1013 For a faster but less accurate result, consider using ``filelog()``
1014 1014 instead.
1015 1015
1016 1016 This predicate uses ``glob:`` as the default kind of pattern.
1017 1017 """
1018 1018 # i18n: "file" is a keyword
1019 1019 pat = getstring(x, _("file requires a pattern"))
1020 1020 return _matchfiles(repo, subset, ('string', 'p:' + pat))
1021 1021
1022 1022 def head(repo, subset, x):
1023 1023 """``head()``
1024 1024 Changeset is a named branch head.
1025 1025 """
1026 1026 # i18n: "head" is a keyword
1027 1027 getargs(x, 0, 0, _("head takes no arguments"))
1028 1028 hs = set()
1029 1029 for b, ls in repo.branchmap().iteritems():
1030 1030 hs.update(repo[h].rev() for h in ls)
1031 1031 return baseset(hs).filter(subset.__contains__)
1032 1032
1033 1033 def heads(repo, subset, x):
1034 1034 """``heads(set)``
1035 1035 Members of set with no children in set.
1036 1036 """
1037 1037 s = getset(repo, subset, x)
1038 1038 ps = parents(repo, subset, x)
1039 1039 return s - ps
1040 1040
1041 1041 def hidden(repo, subset, x):
1042 1042 """``hidden()``
1043 1043 Hidden changesets.
1044 1044 """
1045 1045 # i18n: "hidden" is a keyword
1046 1046 getargs(x, 0, 0, _("hidden takes no arguments"))
1047 1047 hiddenrevs = repoview.filterrevs(repo, 'visible')
1048 1048 return subset & hiddenrevs
1049 1049
1050 1050 def keyword(repo, subset, x):
1051 1051 """``keyword(string)``
1052 1052 Search commit message, user name, and names of changed files for
1053 1053 string. The match is case-insensitive.
1054 1054 """
1055 1055 # i18n: "keyword" is a keyword
1056 1056 kw = encoding.lower(getstring(x, _("keyword requires a string")))
1057 1057
1058 1058 def matches(r):
1059 1059 c = repo[r]
1060 1060 return util.any(kw in encoding.lower(t) for t in c.files() + [c.user(),
1061 1061 c.description()])
1062 1062
1063 1063 return subset.filter(matches)
1064 1064
1065 1065 def limit(repo, subset, x):
1066 1066 """``limit(set, [n])``
1067 1067 First n members of set, defaulting to 1.
1068 1068 """
1069 1069 # i18n: "limit" is a keyword
1070 1070 l = getargs(x, 1, 2, _("limit requires one or two arguments"))
1071 1071 try:
1072 1072 lim = 1
1073 1073 if len(l) == 2:
1074 1074 # i18n: "limit" is a keyword
1075 1075 lim = int(getstring(l[1], _("limit requires a number")))
1076 1076 except (TypeError, ValueError):
1077 1077 # i18n: "limit" is a keyword
1078 1078 raise error.ParseError(_("limit expects a number"))
1079 1079 ss = subset
1080 1080 os = getset(repo, spanset(repo), l[0])
1081 1081 result = []
1082 1082 it = iter(os)
1083 1083 for x in xrange(lim):
1084 1084 try:
1085 1085 y = it.next()
1086 1086 if y in ss:
1087 1087 result.append(y)
1088 1088 except (StopIteration):
1089 1089 break
1090 1090 return baseset(result)
1091 1091
1092 1092 def last(repo, subset, x):
1093 1093 """``last(set, [n])``
1094 1094 Last n members of set, defaulting to 1.
1095 1095 """
1096 1096 # i18n: "last" is a keyword
1097 1097 l = getargs(x, 1, 2, _("last requires one or two arguments"))
1098 1098 try:
1099 1099 lim = 1
1100 1100 if len(l) == 2:
1101 1101 # i18n: "last" is a keyword
1102 1102 lim = int(getstring(l[1], _("last requires a number")))
1103 1103 except (TypeError, ValueError):
1104 1104 # i18n: "last" is a keyword
1105 1105 raise error.ParseError(_("last expects a number"))
1106 1106 ss = subset
1107 1107 os = getset(repo, spanset(repo), l[0])
1108 1108 os.reverse()
1109 1109 result = []
1110 1110 it = iter(os)
1111 1111 for x in xrange(lim):
1112 1112 try:
1113 1113 y = it.next()
1114 1114 if y in ss:
1115 1115 result.append(y)
1116 1116 except (StopIteration):
1117 1117 break
1118 1118 return baseset(result)
1119 1119
1120 1120 def maxrev(repo, subset, x):
1121 1121 """``max(set)``
1122 1122 Changeset with highest revision number in set.
1123 1123 """
1124 1124 os = getset(repo, spanset(repo), x)
1125 1125 if os:
1126 1126 m = os.max()
1127 1127 if m in subset:
1128 1128 return baseset([m])
1129 1129 return baseset()
1130 1130
1131 1131 def merge(repo, subset, x):
1132 1132 """``merge()``
1133 1133 Changeset is a merge changeset.
1134 1134 """
1135 1135 # i18n: "merge" is a keyword
1136 1136 getargs(x, 0, 0, _("merge takes no arguments"))
1137 1137 cl = repo.changelog
1138 1138 return subset.filter(lambda r: cl.parentrevs(r)[1] != -1)
1139 1139
1140 1140 def branchpoint(repo, subset, x):
1141 1141 """``branchpoint()``
1142 1142 Changesets with more than one child.
1143 1143 """
1144 1144 # i18n: "branchpoint" is a keyword
1145 1145 getargs(x, 0, 0, _("branchpoint takes no arguments"))
1146 1146 cl = repo.changelog
1147 1147 if not subset:
1148 1148 return baseset()
1149 1149 baserev = min(subset)
1150 1150 parentscount = [0]*(len(repo) - baserev)
1151 1151 for r in cl.revs(start=baserev + 1):
1152 1152 for p in cl.parentrevs(r):
1153 1153 if p >= baserev:
1154 1154 parentscount[p - baserev] += 1
1155 1155 return subset.filter(lambda r: parentscount[r - baserev] > 1)
1156 1156
1157 1157 def minrev(repo, subset, x):
1158 1158 """``min(set)``
1159 1159 Changeset with lowest revision number in set.
1160 1160 """
1161 1161 os = getset(repo, spanset(repo), x)
1162 1162 if os:
1163 1163 m = os.min()
1164 1164 if m in subset:
1165 1165 return baseset([m])
1166 1166 return baseset()
1167 1167
1168 1168 def modifies(repo, subset, x):
1169 1169 """``modifies(pattern)``
1170 1170 Changesets modifying files matched by pattern.
1171 1171
1172 1172 The pattern without explicit kind like ``glob:`` is expected to be
1173 1173 relative to the current directory and match against a file or a
1174 1174 directory.
1175 1175 """
1176 1176 # i18n: "modifies" is a keyword
1177 1177 pat = getstring(x, _("modifies requires a pattern"))
1178 1178 return checkstatus(repo, subset, pat, 0)
1179 1179
1180 def named(repo, subset, x):
1181 """``named(namespace)``
1182 The changesets in a given namespace.
1183
1184 If `namespace` starts with `re:`, the remainder of the string is treated as
1185 a regular expression. To match a namespace that actually starts with `re:`,
1186 use the prefix `literal:`.
1187 """
1188 # i18n: "named" is a keyword
1189 args = getargs(x, 1, 1, _('named requires a namespace argument'))
1190
1191 ns = getstring(args[0],
1192 # i18n: "named" is a keyword
1193 _('the argument to named must be a string'))
1194 kind, pattern, matcher = _stringmatcher(ns)
1195 namespaces = set()
1196 if kind == 'literal':
1197 if pattern not in repo.names:
1198 raise util.Abort(_("namespace '%s' does not exist") % ns)
1199 namespaces.add(repo.names[pattern])
1200 else:
1201 for name, ns in repo.names.iteritems():
1202 if matcher(name):
1203 namespaces.add(ns)
1204 if not namespaces:
1205 raise util.Abort(_("no namespace exists that match '%s'")
1206 % pattern)
1207
1208 names = set()
1209 for ns in namespaces:
1210 for name in ns.listnames(repo):
1211 names.update(ns.nodes(repo, name))
1212
1213 names -= set([node.nullrev])
1214 return subset & names
1215
1180 1216 def node_(repo, subset, x):
1181 1217 """``id(string)``
1182 1218 Revision non-ambiguously specified by the given hex string prefix.
1183 1219 """
1184 1220 # i18n: "id" is a keyword
1185 1221 l = getargs(x, 1, 1, _("id requires one argument"))
1186 1222 # i18n: "id" is a keyword
1187 1223 n = getstring(l[0], _("id requires a string"))
1188 1224 if len(n) == 40:
1189 1225 rn = repo[n].rev()
1190 1226 else:
1191 1227 rn = None
1192 1228 pm = repo.changelog._partialmatch(n)
1193 1229 if pm is not None:
1194 1230 rn = repo.changelog.rev(pm)
1195 1231
1196 1232 if rn is None:
1197 1233 return baseset()
1198 1234 result = baseset([rn])
1199 1235 return result & subset
1200 1236
1201 1237 def obsolete(repo, subset, x):
1202 1238 """``obsolete()``
1203 1239 Mutable changeset with a newer version."""
1204 1240 # i18n: "obsolete" is a keyword
1205 1241 getargs(x, 0, 0, _("obsolete takes no arguments"))
1206 1242 obsoletes = obsmod.getrevs(repo, 'obsolete')
1207 1243 return subset & obsoletes
1208 1244
1209 1245 def only(repo, subset, x):
1210 1246 """``only(set, [set])``
1211 1247 Changesets that are ancestors of the first set that are not ancestors
1212 1248 of any other head in the repo. If a second set is specified, the result
1213 1249 is ancestors of the first set that are not ancestors of the second set
1214 1250 (i.e. ::<set1> - ::<set2>).
1215 1251 """
1216 1252 cl = repo.changelog
1217 1253 # i18n: "only" is a keyword
1218 1254 args = getargs(x, 1, 2, _('only takes one or two arguments'))
1219 1255 include = getset(repo, spanset(repo), args[0])
1220 1256 if len(args) == 1:
1221 1257 if not include:
1222 1258 return baseset()
1223 1259
1224 1260 descendants = set(_revdescendants(repo, include, False))
1225 1261 exclude = [rev for rev in cl.headrevs()
1226 1262 if not rev in descendants and not rev in include]
1227 1263 else:
1228 1264 exclude = getset(repo, spanset(repo), args[1])
1229 1265
1230 1266 results = set(cl.findmissingrevs(common=exclude, heads=include))
1231 1267 return subset & results
1232 1268
1233 1269 def origin(repo, subset, x):
1234 1270 """``origin([set])``
1235 1271 Changesets that were specified as a source for the grafts, transplants or
1236 1272 rebases that created the given revisions. Omitting the optional set is the
1237 1273 same as passing all(). If a changeset created by these operations is itself
1238 1274 specified as a source for one of these operations, only the source changeset
1239 1275 for the first operation is selected.
1240 1276 """
1241 1277 if x is not None:
1242 1278 dests = getset(repo, spanset(repo), x)
1243 1279 else:
1244 1280 dests = getall(repo, spanset(repo), x)
1245 1281
1246 1282 def _firstsrc(rev):
1247 1283 src = _getrevsource(repo, rev)
1248 1284 if src is None:
1249 1285 return None
1250 1286
1251 1287 while True:
1252 1288 prev = _getrevsource(repo, src)
1253 1289
1254 1290 if prev is None:
1255 1291 return src
1256 1292 src = prev
1257 1293
1258 1294 o = set([_firstsrc(r) for r in dests])
1259 1295 o -= set([None])
1260 1296 return subset & o
1261 1297
1262 1298 def outgoing(repo, subset, x):
1263 1299 """``outgoing([path])``
1264 1300 Changesets not found in the specified destination repository, or the
1265 1301 default push location.
1266 1302 """
1267 1303 import hg # avoid start-up nasties
1268 1304 # i18n: "outgoing" is a keyword
1269 1305 l = getargs(x, 0, 1, _("outgoing takes one or no arguments"))
1270 1306 # i18n: "outgoing" is a keyword
1271 1307 dest = l and getstring(l[0], _("outgoing requires a repository path")) or ''
1272 1308 dest = repo.ui.expandpath(dest or 'default-push', dest or 'default')
1273 1309 dest, branches = hg.parseurl(dest)
1274 1310 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1275 1311 if revs:
1276 1312 revs = [repo.lookup(rev) for rev in revs]
1277 1313 other = hg.peer(repo, {}, dest)
1278 1314 repo.ui.pushbuffer()
1279 1315 outgoing = discovery.findcommonoutgoing(repo, other, onlyheads=revs)
1280 1316 repo.ui.popbuffer()
1281 1317 cl = repo.changelog
1282 1318 o = set([cl.rev(r) for r in outgoing.missing])
1283 1319 return subset & o
1284 1320
1285 1321 def p1(repo, subset, x):
1286 1322 """``p1([set])``
1287 1323 First parent of changesets in set, or the working directory.
1288 1324 """
1289 1325 if x is None:
1290 1326 p = repo[x].p1().rev()
1291 1327 if p >= 0:
1292 1328 return subset & baseset([p])
1293 1329 return baseset()
1294 1330
1295 1331 ps = set()
1296 1332 cl = repo.changelog
1297 1333 for r in getset(repo, spanset(repo), x):
1298 1334 ps.add(cl.parentrevs(r)[0])
1299 1335 ps -= set([node.nullrev])
1300 1336 return subset & ps
1301 1337
1302 1338 def p2(repo, subset, x):
1303 1339 """``p2([set])``
1304 1340 Second parent of changesets in set, or the working directory.
1305 1341 """
1306 1342 if x is None:
1307 1343 ps = repo[x].parents()
1308 1344 try:
1309 1345 p = ps[1].rev()
1310 1346 if p >= 0:
1311 1347 return subset & baseset([p])
1312 1348 return baseset()
1313 1349 except IndexError:
1314 1350 return baseset()
1315 1351
1316 1352 ps = set()
1317 1353 cl = repo.changelog
1318 1354 for r in getset(repo, spanset(repo), x):
1319 1355 ps.add(cl.parentrevs(r)[1])
1320 1356 ps -= set([node.nullrev])
1321 1357 return subset & ps
1322 1358
1323 1359 def parents(repo, subset, x):
1324 1360 """``parents([set])``
1325 1361 The set of all parents for all changesets in set, or the working directory.
1326 1362 """
1327 1363 if x is None:
1328 1364 ps = set(p.rev() for p in repo[x].parents())
1329 1365 else:
1330 1366 ps = set()
1331 1367 cl = repo.changelog
1332 1368 for r in getset(repo, spanset(repo), x):
1333 1369 ps.update(cl.parentrevs(r))
1334 1370 ps -= set([node.nullrev])
1335 1371 return subset & ps
1336 1372
1337 1373 def parentspec(repo, subset, x, n):
1338 1374 """``set^0``
1339 1375 The set.
1340 1376 ``set^1`` (or ``set^``), ``set^2``
1341 1377 First or second parent, respectively, of all changesets in set.
1342 1378 """
1343 1379 try:
1344 1380 n = int(n[1])
1345 1381 if n not in (0, 1, 2):
1346 1382 raise ValueError
1347 1383 except (TypeError, ValueError):
1348 1384 raise error.ParseError(_("^ expects a number 0, 1, or 2"))
1349 1385 ps = set()
1350 1386 cl = repo.changelog
1351 1387 for r in getset(repo, fullreposet(repo), x):
1352 1388 if n == 0:
1353 1389 ps.add(r)
1354 1390 elif n == 1:
1355 1391 ps.add(cl.parentrevs(r)[0])
1356 1392 elif n == 2:
1357 1393 parents = cl.parentrevs(r)
1358 1394 if len(parents) > 1:
1359 1395 ps.add(parents[1])
1360 1396 return subset & ps
1361 1397
1362 1398 def present(repo, subset, x):
1363 1399 """``present(set)``
1364 1400 An empty set, if any revision in set isn't found; otherwise,
1365 1401 all revisions in set.
1366 1402
1367 1403 If any of specified revisions is not present in the local repository,
1368 1404 the query is normally aborted. But this predicate allows the query
1369 1405 to continue even in such cases.
1370 1406 """
1371 1407 try:
1372 1408 return getset(repo, subset, x)
1373 1409 except error.RepoLookupError:
1374 1410 return baseset()
1375 1411
1376 1412 def public(repo, subset, x):
1377 1413 """``public()``
1378 1414 Changeset in public phase."""
1379 1415 # i18n: "public" is a keyword
1380 1416 getargs(x, 0, 0, _("public takes no arguments"))
1381 1417 phase = repo._phasecache.phase
1382 1418 target = phases.public
1383 1419 condition = lambda r: phase(repo, r) == target
1384 1420 return subset.filter(condition, cache=False)
1385 1421
1386 1422 def remote(repo, subset, x):
1387 1423 """``remote([id [,path]])``
1388 1424 Local revision that corresponds to the given identifier in a
1389 1425 remote repository, if present. Here, the '.' identifier is a
1390 1426 synonym for the current local branch.
1391 1427 """
1392 1428
1393 1429 import hg # avoid start-up nasties
1394 1430 # i18n: "remote" is a keyword
1395 1431 l = getargs(x, 0, 2, _("remote takes one, two or no arguments"))
1396 1432
1397 1433 q = '.'
1398 1434 if len(l) > 0:
1399 1435 # i18n: "remote" is a keyword
1400 1436 q = getstring(l[0], _("remote requires a string id"))
1401 1437 if q == '.':
1402 1438 q = repo['.'].branch()
1403 1439
1404 1440 dest = ''
1405 1441 if len(l) > 1:
1406 1442 # i18n: "remote" is a keyword
1407 1443 dest = getstring(l[1], _("remote requires a repository path"))
1408 1444 dest = repo.ui.expandpath(dest or 'default')
1409 1445 dest, branches = hg.parseurl(dest)
1410 1446 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1411 1447 if revs:
1412 1448 revs = [repo.lookup(rev) for rev in revs]
1413 1449 other = hg.peer(repo, {}, dest)
1414 1450 n = other.lookup(q)
1415 1451 if n in repo:
1416 1452 r = repo[n].rev()
1417 1453 if r in subset:
1418 1454 return baseset([r])
1419 1455 return baseset()
1420 1456
1421 1457 def removes(repo, subset, x):
1422 1458 """``removes(pattern)``
1423 1459 Changesets which remove files matching pattern.
1424 1460
1425 1461 The pattern without explicit kind like ``glob:`` is expected to be
1426 1462 relative to the current directory and match against a file or a
1427 1463 directory.
1428 1464 """
1429 1465 # i18n: "removes" is a keyword
1430 1466 pat = getstring(x, _("removes requires a pattern"))
1431 1467 return checkstatus(repo, subset, pat, 2)
1432 1468
1433 1469 def rev(repo, subset, x):
1434 1470 """``rev(number)``
1435 1471 Revision with the given numeric identifier.
1436 1472 """
1437 1473 # i18n: "rev" is a keyword
1438 1474 l = getargs(x, 1, 1, _("rev requires one argument"))
1439 1475 try:
1440 1476 # i18n: "rev" is a keyword
1441 1477 l = int(getstring(l[0], _("rev requires a number")))
1442 1478 except (TypeError, ValueError):
1443 1479 # i18n: "rev" is a keyword
1444 1480 raise error.ParseError(_("rev expects a number"))
1445 1481 if l not in fullreposet(repo):
1446 1482 return baseset()
1447 1483 return subset & baseset([l])
1448 1484
1449 1485 def matching(repo, subset, x):
1450 1486 """``matching(revision [, field])``
1451 1487 Changesets in which a given set of fields match the set of fields in the
1452 1488 selected revision or set.
1453 1489
1454 1490 To match more than one field pass the list of fields to match separated
1455 1491 by spaces (e.g. ``author description``).
1456 1492
1457 1493 Valid fields are most regular revision fields and some special fields.
1458 1494
1459 1495 Regular revision fields are ``description``, ``author``, ``branch``,
1460 1496 ``date``, ``files``, ``phase``, ``parents``, ``substate``, ``user``
1461 1497 and ``diff``.
1462 1498 Note that ``author`` and ``user`` are synonyms. ``diff`` refers to the
1463 1499 contents of the revision. Two revisions matching their ``diff`` will
1464 1500 also match their ``files``.
1465 1501
1466 1502 Special fields are ``summary`` and ``metadata``:
1467 1503 ``summary`` matches the first line of the description.
1468 1504 ``metadata`` is equivalent to matching ``description user date``
1469 1505 (i.e. it matches the main metadata fields).
1470 1506
1471 1507 ``metadata`` is the default field which is used when no fields are
1472 1508 specified. You can match more than one field at a time.
1473 1509 """
1474 1510 # i18n: "matching" is a keyword
1475 1511 l = getargs(x, 1, 2, _("matching takes 1 or 2 arguments"))
1476 1512
1477 1513 revs = getset(repo, fullreposet(repo), l[0])
1478 1514
1479 1515 fieldlist = ['metadata']
1480 1516 if len(l) > 1:
1481 1517 fieldlist = getstring(l[1],
1482 1518 # i18n: "matching" is a keyword
1483 1519 _("matching requires a string "
1484 1520 "as its second argument")).split()
1485 1521
1486 1522 # Make sure that there are no repeated fields,
1487 1523 # expand the 'special' 'metadata' field type
1488 1524 # and check the 'files' whenever we check the 'diff'
1489 1525 fields = []
1490 1526 for field in fieldlist:
1491 1527 if field == 'metadata':
1492 1528 fields += ['user', 'description', 'date']
1493 1529 elif field == 'diff':
1494 1530 # a revision matching the diff must also match the files
1495 1531 # since matching the diff is very costly, make sure to
1496 1532 # also match the files first
1497 1533 fields += ['files', 'diff']
1498 1534 else:
1499 1535 if field == 'author':
1500 1536 field = 'user'
1501 1537 fields.append(field)
1502 1538 fields = set(fields)
1503 1539 if 'summary' in fields and 'description' in fields:
1504 1540 # If a revision matches its description it also matches its summary
1505 1541 fields.discard('summary')
1506 1542
1507 1543 # We may want to match more than one field
1508 1544 # Not all fields take the same amount of time to be matched
1509 1545 # Sort the selected fields in order of increasing matching cost
1510 1546 fieldorder = ['phase', 'parents', 'user', 'date', 'branch', 'summary',
1511 1547 'files', 'description', 'substate', 'diff']
1512 1548 def fieldkeyfunc(f):
1513 1549 try:
1514 1550 return fieldorder.index(f)
1515 1551 except ValueError:
1516 1552 # assume an unknown field is very costly
1517 1553 return len(fieldorder)
1518 1554 fields = list(fields)
1519 1555 fields.sort(key=fieldkeyfunc)
1520 1556
1521 1557 # Each field will be matched with its own "getfield" function
1522 1558 # which will be added to the getfieldfuncs array of functions
1523 1559 getfieldfuncs = []
1524 1560 _funcs = {
1525 1561 'user': lambda r: repo[r].user(),
1526 1562 'branch': lambda r: repo[r].branch(),
1527 1563 'date': lambda r: repo[r].date(),
1528 1564 'description': lambda r: repo[r].description(),
1529 1565 'files': lambda r: repo[r].files(),
1530 1566 'parents': lambda r: repo[r].parents(),
1531 1567 'phase': lambda r: repo[r].phase(),
1532 1568 'substate': lambda r: repo[r].substate,
1533 1569 'summary': lambda r: repo[r].description().splitlines()[0],
1534 1570 'diff': lambda r: list(repo[r].diff(git=True),)
1535 1571 }
1536 1572 for info in fields:
1537 1573 getfield = _funcs.get(info, None)
1538 1574 if getfield is None:
1539 1575 raise error.ParseError(
1540 1576 # i18n: "matching" is a keyword
1541 1577 _("unexpected field name passed to matching: %s") % info)
1542 1578 getfieldfuncs.append(getfield)
1543 1579 # convert the getfield array of functions into a "getinfo" function
1544 1580 # which returns an array of field values (or a single value if there
1545 1581 # is only one field to match)
1546 1582 getinfo = lambda r: [f(r) for f in getfieldfuncs]
1547 1583
1548 1584 def matches(x):
1549 1585 for rev in revs:
1550 1586 target = getinfo(rev)
1551 1587 match = True
1552 1588 for n, f in enumerate(getfieldfuncs):
1553 1589 if target[n] != f(x):
1554 1590 match = False
1555 1591 if match:
1556 1592 return True
1557 1593 return False
1558 1594
1559 1595 return subset.filter(matches)
1560 1596
1561 1597 def reverse(repo, subset, x):
1562 1598 """``reverse(set)``
1563 1599 Reverse order of set.
1564 1600 """
1565 1601 l = getset(repo, subset, x)
1566 1602 l.reverse()
1567 1603 return l
1568 1604
1569 1605 def roots(repo, subset, x):
1570 1606 """``roots(set)``
1571 1607 Changesets in set with no parent changeset in set.
1572 1608 """
1573 1609 s = getset(repo, spanset(repo), x)
1574 1610 subset = baseset([r for r in s if r in subset])
1575 1611 cs = _children(repo, subset, s)
1576 1612 return subset - cs
1577 1613
1578 1614 def secret(repo, subset, x):
1579 1615 """``secret()``
1580 1616 Changeset in secret phase."""
1581 1617 # i18n: "secret" is a keyword
1582 1618 getargs(x, 0, 0, _("secret takes no arguments"))
1583 1619 phase = repo._phasecache.phase
1584 1620 target = phases.secret
1585 1621 condition = lambda r: phase(repo, r) == target
1586 1622 return subset.filter(condition, cache=False)
1587 1623
1588 1624 def sort(repo, subset, x):
1589 1625 """``sort(set[, [-]key...])``
1590 1626 Sort set by keys. The default sort order is ascending, specify a key
1591 1627 as ``-key`` to sort in descending order.
1592 1628
1593 1629 The keys can be:
1594 1630
1595 1631 - ``rev`` for the revision number,
1596 1632 - ``branch`` for the branch name,
1597 1633 - ``desc`` for the commit message (description),
1598 1634 - ``user`` for user name (``author`` can be used as an alias),
1599 1635 - ``date`` for the commit date
1600 1636 """
1601 1637 # i18n: "sort" is a keyword
1602 1638 l = getargs(x, 1, 2, _("sort requires one or two arguments"))
1603 1639 keys = "rev"
1604 1640 if len(l) == 2:
1605 1641 # i18n: "sort" is a keyword
1606 1642 keys = getstring(l[1], _("sort spec must be a string"))
1607 1643
1608 1644 s = l[0]
1609 1645 keys = keys.split()
1610 1646 l = []
1611 1647 def invert(s):
1612 1648 return "".join(chr(255 - ord(c)) for c in s)
1613 1649 revs = getset(repo, subset, s)
1614 1650 if keys == ["rev"]:
1615 1651 revs.sort()
1616 1652 return revs
1617 1653 elif keys == ["-rev"]:
1618 1654 revs.sort(reverse=True)
1619 1655 return revs
1620 1656 for r in revs:
1621 1657 c = repo[r]
1622 1658 e = []
1623 1659 for k in keys:
1624 1660 if k == 'rev':
1625 1661 e.append(r)
1626 1662 elif k == '-rev':
1627 1663 e.append(-r)
1628 1664 elif k == 'branch':
1629 1665 e.append(c.branch())
1630 1666 elif k == '-branch':
1631 1667 e.append(invert(c.branch()))
1632 1668 elif k == 'desc':
1633 1669 e.append(c.description())
1634 1670 elif k == '-desc':
1635 1671 e.append(invert(c.description()))
1636 1672 elif k in 'user author':
1637 1673 e.append(c.user())
1638 1674 elif k in '-user -author':
1639 1675 e.append(invert(c.user()))
1640 1676 elif k == 'date':
1641 1677 e.append(c.date()[0])
1642 1678 elif k == '-date':
1643 1679 e.append(-c.date()[0])
1644 1680 else:
1645 1681 raise error.ParseError(_("unknown sort key %r") % k)
1646 1682 e.append(r)
1647 1683 l.append(e)
1648 1684 l.sort()
1649 1685 return baseset([e[-1] for e in l])
1650 1686
1651 1687 def _stringmatcher(pattern):
1652 1688 """
1653 1689 accepts a string, possibly starting with 're:' or 'literal:' prefix.
1654 1690 returns the matcher name, pattern, and matcher function.
1655 1691 missing or unknown prefixes are treated as literal matches.
1656 1692
1657 1693 helper for tests:
1658 1694 >>> def test(pattern, *tests):
1659 1695 ... kind, pattern, matcher = _stringmatcher(pattern)
1660 1696 ... return (kind, pattern, [bool(matcher(t)) for t in tests])
1661 1697
1662 1698 exact matching (no prefix):
1663 1699 >>> test('abcdefg', 'abc', 'def', 'abcdefg')
1664 1700 ('literal', 'abcdefg', [False, False, True])
1665 1701
1666 1702 regex matching ('re:' prefix)
1667 1703 >>> test('re:a.+b', 'nomatch', 'fooadef', 'fooadefbar')
1668 1704 ('re', 'a.+b', [False, False, True])
1669 1705
1670 1706 force exact matches ('literal:' prefix)
1671 1707 >>> test('literal:re:foobar', 'foobar', 're:foobar')
1672 1708 ('literal', 're:foobar', [False, True])
1673 1709
1674 1710 unknown prefixes are ignored and treated as literals
1675 1711 >>> test('foo:bar', 'foo', 'bar', 'foo:bar')
1676 1712 ('literal', 'foo:bar', [False, False, True])
1677 1713 """
1678 1714 if pattern.startswith('re:'):
1679 1715 pattern = pattern[3:]
1680 1716 try:
1681 1717 regex = re.compile(pattern)
1682 1718 except re.error, e:
1683 1719 raise error.ParseError(_('invalid regular expression: %s')
1684 1720 % e)
1685 1721 return 're', pattern, regex.search
1686 1722 elif pattern.startswith('literal:'):
1687 1723 pattern = pattern[8:]
1688 1724 return 'literal', pattern, pattern.__eq__
1689 1725
1690 1726 def _substringmatcher(pattern):
1691 1727 kind, pattern, matcher = _stringmatcher(pattern)
1692 1728 if kind == 'literal':
1693 1729 matcher = lambda s: pattern in s
1694 1730 return kind, pattern, matcher
1695 1731
1696 1732 def tag(repo, subset, x):
1697 1733 """``tag([name])``
1698 1734 The specified tag by name, or all tagged revisions if no name is given.
1699 1735
1700 1736 If `name` starts with `re:`, the remainder of the name is treated as
1701 1737 a regular expression. To match a tag that actually starts with `re:`,
1702 1738 use the prefix `literal:`.
1703 1739 """
1704 1740 # i18n: "tag" is a keyword
1705 1741 args = getargs(x, 0, 1, _("tag takes one or no arguments"))
1706 1742 cl = repo.changelog
1707 1743 if args:
1708 1744 pattern = getstring(args[0],
1709 1745 # i18n: "tag" is a keyword
1710 1746 _('the argument to tag must be a string'))
1711 1747 kind, pattern, matcher = _stringmatcher(pattern)
1712 1748 if kind == 'literal':
1713 1749 # avoid resolving all tags
1714 1750 tn = repo._tagscache.tags.get(pattern, None)
1715 1751 if tn is None:
1716 1752 raise util.Abort(_("tag '%s' does not exist") % pattern)
1717 1753 s = set([repo[tn].rev()])
1718 1754 else:
1719 1755 s = set([cl.rev(n) for t, n in repo.tagslist() if matcher(t)])
1720 1756 else:
1721 1757 s = set([cl.rev(n) for t, n in repo.tagslist() if t != 'tip'])
1722 1758 return subset & s
1723 1759
1724 1760 def tagged(repo, subset, x):
1725 1761 return tag(repo, subset, x)
1726 1762
1727 1763 def unstable(repo, subset, x):
1728 1764 """``unstable()``
1729 1765 Non-obsolete changesets with obsolete ancestors.
1730 1766 """
1731 1767 # i18n: "unstable" is a keyword
1732 1768 getargs(x, 0, 0, _("unstable takes no arguments"))
1733 1769 unstables = obsmod.getrevs(repo, 'unstable')
1734 1770 return subset & unstables
1735 1771
1736 1772
1737 1773 def user(repo, subset, x):
1738 1774 """``user(string)``
1739 1775 User name contains string. The match is case-insensitive.
1740 1776
1741 1777 If `string` starts with `re:`, the remainder of the string is treated as
1742 1778 a regular expression. To match a user that actually contains `re:`, use
1743 1779 the prefix `literal:`.
1744 1780 """
1745 1781 return author(repo, subset, x)
1746 1782
1747 1783 # for internal use
1748 1784 def _list(repo, subset, x):
1749 1785 s = getstring(x, "internal error")
1750 1786 if not s:
1751 1787 return baseset()
1752 1788 ls = [repo[r].rev() for r in s.split('\0')]
1753 1789 s = subset
1754 1790 return baseset([r for r in ls if r in s])
1755 1791
1756 1792 # for internal use
1757 1793 def _intlist(repo, subset, x):
1758 1794 s = getstring(x, "internal error")
1759 1795 if not s:
1760 1796 return baseset()
1761 1797 ls = [int(r) for r in s.split('\0')]
1762 1798 s = subset
1763 1799 return baseset([r for r in ls if r in s])
1764 1800
1765 1801 # for internal use
1766 1802 def _hexlist(repo, subset, x):
1767 1803 s = getstring(x, "internal error")
1768 1804 if not s:
1769 1805 return baseset()
1770 1806 cl = repo.changelog
1771 1807 ls = [cl.rev(node.bin(r)) for r in s.split('\0')]
1772 1808 s = subset
1773 1809 return baseset([r for r in ls if r in s])
1774 1810
1775 1811 symbols = {
1776 1812 "adds": adds,
1777 1813 "all": getall,
1778 1814 "ancestor": ancestor,
1779 1815 "ancestors": ancestors,
1780 1816 "_firstancestors": _firstancestors,
1781 1817 "author": author,
1782 1818 "bisect": bisect,
1783 1819 "bisected": bisected,
1784 1820 "bookmark": bookmark,
1785 1821 "branch": branch,
1786 1822 "branchpoint": branchpoint,
1787 1823 "bumped": bumped,
1788 1824 "bundle": bundle,
1789 1825 "children": children,
1790 1826 "closed": closed,
1791 1827 "contains": contains,
1792 1828 "converted": converted,
1793 1829 "date": date,
1794 1830 "desc": desc,
1795 1831 "descendants": descendants,
1796 1832 "_firstdescendants": _firstdescendants,
1797 1833 "destination": destination,
1798 1834 "divergent": divergent,
1799 1835 "draft": draft,
1800 1836 "extinct": extinct,
1801 1837 "extra": extra,
1802 1838 "file": hasfile,
1803 1839 "filelog": filelog,
1804 1840 "first": first,
1805 1841 "follow": follow,
1806 1842 "_followfirst": _followfirst,
1807 1843 "grep": grep,
1808 1844 "head": head,
1809 1845 "heads": heads,
1810 1846 "hidden": hidden,
1811 1847 "id": node_,
1812 1848 "keyword": keyword,
1813 1849 "last": last,
1814 1850 "limit": limit,
1815 1851 "_matchfiles": _matchfiles,
1816 1852 "max": maxrev,
1817 1853 "merge": merge,
1818 1854 "min": minrev,
1819 1855 "modifies": modifies,
1856 "named": named,
1820 1857 "obsolete": obsolete,
1821 1858 "only": only,
1822 1859 "origin": origin,
1823 1860 "outgoing": outgoing,
1824 1861 "p1": p1,
1825 1862 "p2": p2,
1826 1863 "parents": parents,
1827 1864 "present": present,
1828 1865 "public": public,
1829 1866 "remote": remote,
1830 1867 "removes": removes,
1831 1868 "rev": rev,
1832 1869 "reverse": reverse,
1833 1870 "roots": roots,
1834 1871 "sort": sort,
1835 1872 "secret": secret,
1836 1873 "matching": matching,
1837 1874 "tag": tag,
1838 1875 "tagged": tagged,
1839 1876 "user": user,
1840 1877 "unstable": unstable,
1841 1878 "_list": _list,
1842 1879 "_intlist": _intlist,
1843 1880 "_hexlist": _hexlist,
1844 1881 }
1845 1882
1846 1883 # symbols which can't be used for a DoS attack for any given input
1847 1884 # (e.g. those which accept regexes as plain strings shouldn't be included)
1848 1885 # functions that just return a lot of changesets (like all) don't count here
1849 1886 safesymbols = set([
1850 1887 "adds",
1851 1888 "all",
1852 1889 "ancestor",
1853 1890 "ancestors",
1854 1891 "_firstancestors",
1855 1892 "author",
1856 1893 "bisect",
1857 1894 "bisected",
1858 1895 "bookmark",
1859 1896 "branch",
1860 1897 "branchpoint",
1861 1898 "bumped",
1862 1899 "bundle",
1863 1900 "children",
1864 1901 "closed",
1865 1902 "converted",
1866 1903 "date",
1867 1904 "desc",
1868 1905 "descendants",
1869 1906 "_firstdescendants",
1870 1907 "destination",
1871 1908 "divergent",
1872 1909 "draft",
1873 1910 "extinct",
1874 1911 "extra",
1875 1912 "file",
1876 1913 "filelog",
1877 1914 "first",
1878 1915 "follow",
1879 1916 "_followfirst",
1880 1917 "head",
1881 1918 "heads",
1882 1919 "hidden",
1883 1920 "id",
1884 1921 "keyword",
1885 1922 "last",
1886 1923 "limit",
1887 1924 "_matchfiles",
1888 1925 "max",
1889 1926 "merge",
1890 1927 "min",
1891 1928 "modifies",
1892 1929 "obsolete",
1893 1930 "only",
1894 1931 "origin",
1895 1932 "outgoing",
1896 1933 "p1",
1897 1934 "p2",
1898 1935 "parents",
1899 1936 "present",
1900 1937 "public",
1901 1938 "remote",
1902 1939 "removes",
1903 1940 "rev",
1904 1941 "reverse",
1905 1942 "roots",
1906 1943 "sort",
1907 1944 "secret",
1908 1945 "matching",
1909 1946 "tag",
1910 1947 "tagged",
1911 1948 "user",
1912 1949 "unstable",
1913 1950 "_list",
1914 1951 "_intlist",
1915 1952 "_hexlist",
1916 1953 ])
1917 1954
1918 1955 methods = {
1919 1956 "range": rangeset,
1920 1957 "dagrange": dagrange,
1921 1958 "string": stringset,
1922 1959 "symbol": symbolset,
1923 1960 "and": andset,
1924 1961 "or": orset,
1925 1962 "not": notset,
1926 1963 "list": listset,
1927 1964 "func": func,
1928 1965 "ancestor": ancestorspec,
1929 1966 "parent": parentspec,
1930 1967 "parentpost": p1,
1931 1968 "only": only,
1932 1969 "onlypost": only,
1933 1970 }
1934 1971
1935 1972 def optimize(x, small):
1936 1973 if x is None:
1937 1974 return 0, x
1938 1975
1939 1976 smallbonus = 1
1940 1977 if small:
1941 1978 smallbonus = .5
1942 1979
1943 1980 op = x[0]
1944 1981 if op == 'minus':
1945 1982 return optimize(('and', x[1], ('not', x[2])), small)
1946 1983 elif op == 'only':
1947 1984 return optimize(('func', ('symbol', 'only'),
1948 1985 ('list', x[1], x[2])), small)
1949 1986 elif op == 'dagrangepre':
1950 1987 return optimize(('func', ('symbol', 'ancestors'), x[1]), small)
1951 1988 elif op == 'dagrangepost':
1952 1989 return optimize(('func', ('symbol', 'descendants'), x[1]), small)
1953 1990 elif op == 'rangepre':
1954 1991 return optimize(('range', ('string', '0'), x[1]), small)
1955 1992 elif op == 'rangepost':
1956 1993 return optimize(('range', x[1], ('string', 'tip')), small)
1957 1994 elif op == 'negate':
1958 1995 return optimize(('string',
1959 1996 '-' + getstring(x[1], _("can't negate that"))), small)
1960 1997 elif op in 'string symbol negate':
1961 1998 return smallbonus, x # single revisions are small
1962 1999 elif op == 'and':
1963 2000 wa, ta = optimize(x[1], True)
1964 2001 wb, tb = optimize(x[2], True)
1965 2002
1966 2003 # (::x and not ::y)/(not ::y and ::x) have a fast path
1967 2004 def isonly(revs, bases):
1968 2005 return (
1969 2006 revs[0] == 'func'
1970 2007 and getstring(revs[1], _('not a symbol')) == 'ancestors'
1971 2008 and bases[0] == 'not'
1972 2009 and bases[1][0] == 'func'
1973 2010 and getstring(bases[1][1], _('not a symbol')) == 'ancestors')
1974 2011
1975 2012 w = min(wa, wb)
1976 2013 if isonly(ta, tb):
1977 2014 return w, ('func', ('symbol', 'only'), ('list', ta[2], tb[1][2]))
1978 2015 if isonly(tb, ta):
1979 2016 return w, ('func', ('symbol', 'only'), ('list', tb[2], ta[1][2]))
1980 2017
1981 2018 if wa > wb:
1982 2019 return w, (op, tb, ta)
1983 2020 return w, (op, ta, tb)
1984 2021 elif op == 'or':
1985 2022 wa, ta = optimize(x[1], False)
1986 2023 wb, tb = optimize(x[2], False)
1987 2024 if wb < wa:
1988 2025 wb, wa = wa, wb
1989 2026 return max(wa, wb), (op, ta, tb)
1990 2027 elif op == 'not':
1991 2028 o = optimize(x[1], not small)
1992 2029 return o[0], (op, o[1])
1993 2030 elif op == 'parentpost':
1994 2031 o = optimize(x[1], small)
1995 2032 return o[0], (op, o[1])
1996 2033 elif op == 'group':
1997 2034 return optimize(x[1], small)
1998 2035 elif op in 'dagrange range list parent ancestorspec':
1999 2036 if op == 'parent':
2000 2037 # x^:y means (x^) : y, not x ^ (:y)
2001 2038 post = ('parentpost', x[1])
2002 2039 if x[2][0] == 'dagrangepre':
2003 2040 return optimize(('dagrange', post, x[2][1]), small)
2004 2041 elif x[2][0] == 'rangepre':
2005 2042 return optimize(('range', post, x[2][1]), small)
2006 2043
2007 2044 wa, ta = optimize(x[1], small)
2008 2045 wb, tb = optimize(x[2], small)
2009 2046 return wa + wb, (op, ta, tb)
2010 2047 elif op == 'func':
2011 2048 f = getstring(x[1], _("not a symbol"))
2012 2049 wa, ta = optimize(x[2], small)
2013 2050 if f in ("author branch closed date desc file grep keyword "
2014 2051 "outgoing user"):
2015 2052 w = 10 # slow
2016 2053 elif f in "modifies adds removes":
2017 2054 w = 30 # slower
2018 2055 elif f == "contains":
2019 2056 w = 100 # very slow
2020 2057 elif f == "ancestor":
2021 2058 w = 1 * smallbonus
2022 2059 elif f in "reverse limit first _intlist":
2023 2060 w = 0
2024 2061 elif f in "sort":
2025 2062 w = 10 # assume most sorts look at changelog
2026 2063 else:
2027 2064 w = 1
2028 2065 return w + wa, (op, x[1], ta)
2029 2066 return 1, x
2030 2067
2031 2068 _aliasarg = ('func', ('symbol', '_aliasarg'))
2032 2069 def _getaliasarg(tree):
2033 2070 """If tree matches ('func', ('symbol', '_aliasarg'), ('string', X))
2034 2071 return X, None otherwise.
2035 2072 """
2036 2073 if (len(tree) == 3 and tree[:2] == _aliasarg
2037 2074 and tree[2][0] == 'string'):
2038 2075 return tree[2][1]
2039 2076 return None
2040 2077
2041 2078 def _checkaliasarg(tree, known=None):
2042 2079 """Check tree contains no _aliasarg construct or only ones which
2043 2080 value is in known. Used to avoid alias placeholders injection.
2044 2081 """
2045 2082 if isinstance(tree, tuple):
2046 2083 arg = _getaliasarg(tree)
2047 2084 if arg is not None and (not known or arg not in known):
2048 2085 raise error.ParseError(_("not a function: %s") % '_aliasarg')
2049 2086 for t in tree:
2050 2087 _checkaliasarg(t, known)
2051 2088
2052 2089 class revsetalias(object):
2053 2090 funcre = re.compile('^([^(]+)\(([^)]+)\)$')
2054 2091 args = None
2055 2092
2056 2093 # error message at parsing, or None
2057 2094 error = None
2058 2095 # whether own `error` information is already shown or not.
2059 2096 # this avoids showing same warning multiple times at each `findaliases`.
2060 2097 warned = False
2061 2098
2062 2099 def __init__(self, name, value):
2063 2100 '''Aliases like:
2064 2101
2065 2102 h = heads(default)
2066 2103 b($1) = ancestors($1) - ancestors(default)
2067 2104 '''
2068 2105 m = self.funcre.search(name)
2069 2106 if m:
2070 2107 self.name = m.group(1)
2071 2108 self.tree = ('func', ('symbol', m.group(1)))
2072 2109 self.args = [x.strip() for x in m.group(2).split(',')]
2073 2110 for arg in self.args:
2074 2111 # _aliasarg() is an unknown symbol only used separate
2075 2112 # alias argument placeholders from regular strings.
2076 2113 value = value.replace(arg, '_aliasarg(%r)' % (arg,))
2077 2114 else:
2078 2115 self.name = name
2079 2116 self.tree = ('symbol', name)
2080 2117
2081 2118 try:
2082 2119 self.replacement, pos = parse(value)
2083 2120 if pos != len(value):
2084 2121 raise error.ParseError(_('invalid token'), pos)
2085 2122 # Check for placeholder injection
2086 2123 _checkaliasarg(self.replacement, self.args)
2087 2124 except error.ParseError, inst:
2088 2125 if len(inst.args) > 1:
2089 2126 self.error = _('at %s: %s') % (inst.args[1], inst.args[0])
2090 2127 else:
2091 2128 self.error = inst.args[0]
2092 2129
2093 2130 def _getalias(aliases, tree):
2094 2131 """If tree looks like an unexpanded alias, return it. Return None
2095 2132 otherwise.
2096 2133 """
2097 2134 if isinstance(tree, tuple) and tree:
2098 2135 if tree[0] == 'symbol' and len(tree) == 2:
2099 2136 name = tree[1]
2100 2137 alias = aliases.get(name)
2101 2138 if alias and alias.args is None and alias.tree == tree:
2102 2139 return alias
2103 2140 if tree[0] == 'func' and len(tree) > 1:
2104 2141 if tree[1][0] == 'symbol' and len(tree[1]) == 2:
2105 2142 name = tree[1][1]
2106 2143 alias = aliases.get(name)
2107 2144 if alias and alias.args is not None and alias.tree == tree[:2]:
2108 2145 return alias
2109 2146 return None
2110 2147
2111 2148 def _expandargs(tree, args):
2112 2149 """Replace _aliasarg instances with the substitution value of the
2113 2150 same name in args, recursively.
2114 2151 """
2115 2152 if not tree or not isinstance(tree, tuple):
2116 2153 return tree
2117 2154 arg = _getaliasarg(tree)
2118 2155 if arg is not None:
2119 2156 return args[arg]
2120 2157 return tuple(_expandargs(t, args) for t in tree)
2121 2158
2122 2159 def _expandaliases(aliases, tree, expanding, cache):
2123 2160 """Expand aliases in tree, recursively.
2124 2161
2125 2162 'aliases' is a dictionary mapping user defined aliases to
2126 2163 revsetalias objects.
2127 2164 """
2128 2165 if not isinstance(tree, tuple):
2129 2166 # Do not expand raw strings
2130 2167 return tree
2131 2168 alias = _getalias(aliases, tree)
2132 2169 if alias is not None:
2133 2170 if alias.error:
2134 2171 raise util.Abort(_('failed to parse revset alias "%s": %s') %
2135 2172 (alias.name, alias.error))
2136 2173 if alias in expanding:
2137 2174 raise error.ParseError(_('infinite expansion of revset alias "%s" '
2138 2175 'detected') % alias.name)
2139 2176 expanding.append(alias)
2140 2177 if alias.name not in cache:
2141 2178 cache[alias.name] = _expandaliases(aliases, alias.replacement,
2142 2179 expanding, cache)
2143 2180 result = cache[alias.name]
2144 2181 expanding.pop()
2145 2182 if alias.args is not None:
2146 2183 l = getlist(tree[2])
2147 2184 if len(l) != len(alias.args):
2148 2185 raise error.ParseError(
2149 2186 _('invalid number of arguments: %s') % len(l))
2150 2187 l = [_expandaliases(aliases, a, [], cache) for a in l]
2151 2188 result = _expandargs(result, dict(zip(alias.args, l)))
2152 2189 else:
2153 2190 result = tuple(_expandaliases(aliases, t, expanding, cache)
2154 2191 for t in tree)
2155 2192 return result
2156 2193
2157 2194 def findaliases(ui, tree, showwarning=None):
2158 2195 _checkaliasarg(tree)
2159 2196 aliases = {}
2160 2197 for k, v in ui.configitems('revsetalias'):
2161 2198 alias = revsetalias(k, v)
2162 2199 aliases[alias.name] = alias
2163 2200 tree = _expandaliases(aliases, tree, [], {})
2164 2201 if showwarning:
2165 2202 # warn about problematic (but not referred) aliases
2166 2203 for name, alias in sorted(aliases.iteritems()):
2167 2204 if alias.error and not alias.warned:
2168 2205 msg = _('failed to parse revset alias "%s": %s'
2169 2206 ) % (name, alias.error)
2170 2207 showwarning(_('warning: %s\n') % (msg))
2171 2208 alias.warned = True
2172 2209 return tree
2173 2210
2174 2211 def foldconcat(tree):
2175 2212 """Fold elements to be concatenated by `##`
2176 2213 """
2177 2214 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2178 2215 return tree
2179 2216 if tree[0] == '_concat':
2180 2217 pending = [tree]
2181 2218 l = []
2182 2219 while pending:
2183 2220 e = pending.pop()
2184 2221 if e[0] == '_concat':
2185 2222 pending.extend(reversed(e[1:]))
2186 2223 elif e[0] in ('string', 'symbol'):
2187 2224 l.append(e[1])
2188 2225 else:
2189 2226 msg = _("\"##\" can't concatenate \"%s\" element") % (e[0])
2190 2227 raise error.ParseError(msg)
2191 2228 return ('string', ''.join(l))
2192 2229 else:
2193 2230 return tuple(foldconcat(t) for t in tree)
2194 2231
2195 2232 def parse(spec, lookup=None):
2196 2233 p = parser.parser(tokenize, elements)
2197 2234 return p.parse(spec, lookup=lookup)
2198 2235
2199 2236 def match(ui, spec, repo=None):
2200 2237 if not spec:
2201 2238 raise error.ParseError(_("empty query"))
2202 2239 lookup = None
2203 2240 if repo:
2204 2241 lookup = repo.__contains__
2205 2242 tree, pos = parse(spec, lookup)
2206 2243 if (pos != len(spec)):
2207 2244 raise error.ParseError(_("invalid token"), pos)
2208 2245 if ui:
2209 2246 tree = findaliases(ui, tree, showwarning=ui.warn)
2210 2247 tree = foldconcat(tree)
2211 2248 weight, tree = optimize(tree, True)
2212 2249 def mfunc(repo, subset):
2213 2250 if util.safehasattr(subset, 'isascending'):
2214 2251 result = getset(repo, subset, tree)
2215 2252 else:
2216 2253 result = getset(repo, baseset(subset), tree)
2217 2254 return result
2218 2255 return mfunc
2219 2256
2220 2257 def formatspec(expr, *args):
2221 2258 '''
2222 2259 This is a convenience function for using revsets internally, and
2223 2260 escapes arguments appropriately. Aliases are intentionally ignored
2224 2261 so that intended expression behavior isn't accidentally subverted.
2225 2262
2226 2263 Supported arguments:
2227 2264
2228 2265 %r = revset expression, parenthesized
2229 2266 %d = int(arg), no quoting
2230 2267 %s = string(arg), escaped and single-quoted
2231 2268 %b = arg.branch(), escaped and single-quoted
2232 2269 %n = hex(arg), single-quoted
2233 2270 %% = a literal '%'
2234 2271
2235 2272 Prefixing the type with 'l' specifies a parenthesized list of that type.
2236 2273
2237 2274 >>> formatspec('%r:: and %lr', '10 or 11', ("this()", "that()"))
2238 2275 '(10 or 11):: and ((this()) or (that()))'
2239 2276 >>> formatspec('%d:: and not %d::', 10, 20)
2240 2277 '10:: and not 20::'
2241 2278 >>> formatspec('%ld or %ld', [], [1])
2242 2279 "_list('') or 1"
2243 2280 >>> formatspec('keyword(%s)', 'foo\\xe9')
2244 2281 "keyword('foo\\\\xe9')"
2245 2282 >>> b = lambda: 'default'
2246 2283 >>> b.branch = b
2247 2284 >>> formatspec('branch(%b)', b)
2248 2285 "branch('default')"
2249 2286 >>> formatspec('root(%ls)', ['a', 'b', 'c', 'd'])
2250 2287 "root(_list('a\\x00b\\x00c\\x00d'))"
2251 2288 '''
2252 2289
2253 2290 def quote(s):
2254 2291 return repr(str(s))
2255 2292
2256 2293 def argtype(c, arg):
2257 2294 if c == 'd':
2258 2295 return str(int(arg))
2259 2296 elif c == 's':
2260 2297 return quote(arg)
2261 2298 elif c == 'r':
2262 2299 parse(arg) # make sure syntax errors are confined
2263 2300 return '(%s)' % arg
2264 2301 elif c == 'n':
2265 2302 return quote(node.hex(arg))
2266 2303 elif c == 'b':
2267 2304 return quote(arg.branch())
2268 2305
2269 2306 def listexp(s, t):
2270 2307 l = len(s)
2271 2308 if l == 0:
2272 2309 return "_list('')"
2273 2310 elif l == 1:
2274 2311 return argtype(t, s[0])
2275 2312 elif t == 'd':
2276 2313 return "_intlist('%s')" % "\0".join(str(int(a)) for a in s)
2277 2314 elif t == 's':
2278 2315 return "_list('%s')" % "\0".join(s)
2279 2316 elif t == 'n':
2280 2317 return "_hexlist('%s')" % "\0".join(node.hex(a) for a in s)
2281 2318 elif t == 'b':
2282 2319 return "_list('%s')" % "\0".join(a.branch() for a in s)
2283 2320
2284 2321 m = l // 2
2285 2322 return '(%s or %s)' % (listexp(s[:m], t), listexp(s[m:], t))
2286 2323
2287 2324 ret = ''
2288 2325 pos = 0
2289 2326 arg = 0
2290 2327 while pos < len(expr):
2291 2328 c = expr[pos]
2292 2329 if c == '%':
2293 2330 pos += 1
2294 2331 d = expr[pos]
2295 2332 if d == '%':
2296 2333 ret += d
2297 2334 elif d in 'dsnbr':
2298 2335 ret += argtype(d, args[arg])
2299 2336 arg += 1
2300 2337 elif d == 'l':
2301 2338 # a list of some type
2302 2339 pos += 1
2303 2340 d = expr[pos]
2304 2341 ret += listexp(list(args[arg]), d)
2305 2342 arg += 1
2306 2343 else:
2307 2344 raise util.Abort('unexpected revspec format character %s' % d)
2308 2345 else:
2309 2346 ret += c
2310 2347 pos += 1
2311 2348
2312 2349 return ret
2313 2350
2314 2351 def prettyformat(tree):
2315 2352 def _prettyformat(tree, level, lines):
2316 2353 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2317 2354 lines.append((level, str(tree)))
2318 2355 else:
2319 2356 lines.append((level, '(%s' % tree[0]))
2320 2357 for s in tree[1:]:
2321 2358 _prettyformat(s, level + 1, lines)
2322 2359 lines[-1:] = [(lines[-1][0], lines[-1][1] + ')')]
2323 2360
2324 2361 lines = []
2325 2362 _prettyformat(tree, 0, lines)
2326 2363 output = '\n'.join((' '*l + s) for l, s in lines)
2327 2364 return output
2328 2365
2329 2366 def depth(tree):
2330 2367 if isinstance(tree, tuple):
2331 2368 return max(map(depth, tree)) + 1
2332 2369 else:
2333 2370 return 0
2334 2371
2335 2372 def funcsused(tree):
2336 2373 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2337 2374 return set()
2338 2375 else:
2339 2376 funcs = set()
2340 2377 for s in tree[1:]:
2341 2378 funcs |= funcsused(s)
2342 2379 if tree[0] == 'func':
2343 2380 funcs.add(tree[1][1])
2344 2381 return funcs
2345 2382
2346 2383 class abstractsmartset(object):
2347 2384
2348 2385 def __nonzero__(self):
2349 2386 """True if the smartset is not empty"""
2350 2387 raise NotImplementedError()
2351 2388
2352 2389 def __contains__(self, rev):
2353 2390 """provide fast membership testing"""
2354 2391 raise NotImplementedError()
2355 2392
2356 2393 def __iter__(self):
2357 2394 """iterate the set in the order it is supposed to be iterated"""
2358 2395 raise NotImplementedError()
2359 2396
2360 2397 # Attributes containing a function to perform a fast iteration in a given
2361 2398 # direction. A smartset can have none, one, or both defined.
2362 2399 #
2363 2400 # Default value is None instead of a function returning None to avoid
2364 2401 # initializing an iterator just for testing if a fast method exists.
2365 2402 fastasc = None
2366 2403 fastdesc = None
2367 2404
2368 2405 def isascending(self):
2369 2406 """True if the set will iterate in ascending order"""
2370 2407 raise NotImplementedError()
2371 2408
2372 2409 def isdescending(self):
2373 2410 """True if the set will iterate in descending order"""
2374 2411 raise NotImplementedError()
2375 2412
2376 2413 def min(self):
2377 2414 """return the minimum element in the set"""
2378 2415 if self.fastasc is not None:
2379 2416 for r in self.fastasc():
2380 2417 return r
2381 2418 raise ValueError('arg is an empty sequence')
2382 2419 return min(self)
2383 2420
2384 2421 def max(self):
2385 2422 """return the maximum element in the set"""
2386 2423 if self.fastdesc is not None:
2387 2424 for r in self.fastdesc():
2388 2425 return r
2389 2426 raise ValueError('arg is an empty sequence')
2390 2427 return max(self)
2391 2428
2392 2429 def first(self):
2393 2430 """return the first element in the set (user iteration perspective)
2394 2431
2395 2432 Return None if the set is empty"""
2396 2433 raise NotImplementedError()
2397 2434
2398 2435 def last(self):
2399 2436 """return the last element in the set (user iteration perspective)
2400 2437
2401 2438 Return None if the set is empty"""
2402 2439 raise NotImplementedError()
2403 2440
2404 2441 def __len__(self):
2405 2442 """return the length of the smartsets
2406 2443
2407 2444 This can be expensive on smartset that could be lazy otherwise."""
2408 2445 raise NotImplementedError()
2409 2446
2410 2447 def reverse(self):
2411 2448 """reverse the expected iteration order"""
2412 2449 raise NotImplementedError()
2413 2450
2414 2451 def sort(self, reverse=True):
2415 2452 """get the set to iterate in an ascending or descending order"""
2416 2453 raise NotImplementedError()
2417 2454
2418 2455 def __and__(self, other):
2419 2456 """Returns a new object with the intersection of the two collections.
2420 2457
2421 2458 This is part of the mandatory API for smartset."""
2422 2459 return self.filter(other.__contains__, cache=False)
2423 2460
2424 2461 def __add__(self, other):
2425 2462 """Returns a new object with the union of the two collections.
2426 2463
2427 2464 This is part of the mandatory API for smartset."""
2428 2465 return addset(self, other)
2429 2466
2430 2467 def __sub__(self, other):
2431 2468 """Returns a new object with the substraction of the two collections.
2432 2469
2433 2470 This is part of the mandatory API for smartset."""
2434 2471 c = other.__contains__
2435 2472 return self.filter(lambda r: not c(r), cache=False)
2436 2473
2437 2474 def filter(self, condition, cache=True):
2438 2475 """Returns this smartset filtered by condition as a new smartset.
2439 2476
2440 2477 `condition` is a callable which takes a revision number and returns a
2441 2478 boolean.
2442 2479
2443 2480 This is part of the mandatory API for smartset."""
2444 2481 # builtin cannot be cached. but do not needs to
2445 2482 if cache and util.safehasattr(condition, 'func_code'):
2446 2483 condition = util.cachefunc(condition)
2447 2484 return filteredset(self, condition)
2448 2485
2449 2486 class baseset(abstractsmartset):
2450 2487 """Basic data structure that represents a revset and contains the basic
2451 2488 operation that it should be able to perform.
2452 2489
2453 2490 Every method in this class should be implemented by any smartset class.
2454 2491 """
2455 2492 def __init__(self, data=()):
2456 2493 if not isinstance(data, list):
2457 2494 data = list(data)
2458 2495 self._list = data
2459 2496 self._ascending = None
2460 2497
2461 2498 @util.propertycache
2462 2499 def _set(self):
2463 2500 return set(self._list)
2464 2501
2465 2502 @util.propertycache
2466 2503 def _asclist(self):
2467 2504 asclist = self._list[:]
2468 2505 asclist.sort()
2469 2506 return asclist
2470 2507
2471 2508 def __iter__(self):
2472 2509 if self._ascending is None:
2473 2510 return iter(self._list)
2474 2511 elif self._ascending:
2475 2512 return iter(self._asclist)
2476 2513 else:
2477 2514 return reversed(self._asclist)
2478 2515
2479 2516 def fastasc(self):
2480 2517 return iter(self._asclist)
2481 2518
2482 2519 def fastdesc(self):
2483 2520 return reversed(self._asclist)
2484 2521
2485 2522 @util.propertycache
2486 2523 def __contains__(self):
2487 2524 return self._set.__contains__
2488 2525
2489 2526 def __nonzero__(self):
2490 2527 return bool(self._list)
2491 2528
2492 2529 def sort(self, reverse=False):
2493 2530 self._ascending = not bool(reverse)
2494 2531
2495 2532 def reverse(self):
2496 2533 if self._ascending is None:
2497 2534 self._list.reverse()
2498 2535 else:
2499 2536 self._ascending = not self._ascending
2500 2537
2501 2538 def __len__(self):
2502 2539 return len(self._list)
2503 2540
2504 2541 def isascending(self):
2505 2542 """Returns True if the collection is ascending order, False if not.
2506 2543
2507 2544 This is part of the mandatory API for smartset."""
2508 2545 if len(self) <= 1:
2509 2546 return True
2510 2547 return self._ascending is not None and self._ascending
2511 2548
2512 2549 def isdescending(self):
2513 2550 """Returns True if the collection is descending order, False if not.
2514 2551
2515 2552 This is part of the mandatory API for smartset."""
2516 2553 if len(self) <= 1:
2517 2554 return True
2518 2555 return self._ascending is not None and not self._ascending
2519 2556
2520 2557 def first(self):
2521 2558 if self:
2522 2559 if self._ascending is None:
2523 2560 return self._list[0]
2524 2561 elif self._ascending:
2525 2562 return self._asclist[0]
2526 2563 else:
2527 2564 return self._asclist[-1]
2528 2565 return None
2529 2566
2530 2567 def last(self):
2531 2568 if self:
2532 2569 if self._ascending is None:
2533 2570 return self._list[-1]
2534 2571 elif self._ascending:
2535 2572 return self._asclist[-1]
2536 2573 else:
2537 2574 return self._asclist[0]
2538 2575 return None
2539 2576
2540 2577 class filteredset(abstractsmartset):
2541 2578 """Duck type for baseset class which iterates lazily over the revisions in
2542 2579 the subset and contains a function which tests for membership in the
2543 2580 revset
2544 2581 """
2545 2582 def __init__(self, subset, condition=lambda x: True):
2546 2583 """
2547 2584 condition: a function that decide whether a revision in the subset
2548 2585 belongs to the revset or not.
2549 2586 """
2550 2587 self._subset = subset
2551 2588 self._condition = condition
2552 2589 self._cache = {}
2553 2590
2554 2591 def __contains__(self, x):
2555 2592 c = self._cache
2556 2593 if x not in c:
2557 2594 v = c[x] = x in self._subset and self._condition(x)
2558 2595 return v
2559 2596 return c[x]
2560 2597
2561 2598 def __iter__(self):
2562 2599 return self._iterfilter(self._subset)
2563 2600
2564 2601 def _iterfilter(self, it):
2565 2602 cond = self._condition
2566 2603 for x in it:
2567 2604 if cond(x):
2568 2605 yield x
2569 2606
2570 2607 @property
2571 2608 def fastasc(self):
2572 2609 it = self._subset.fastasc
2573 2610 if it is None:
2574 2611 return None
2575 2612 return lambda: self._iterfilter(it())
2576 2613
2577 2614 @property
2578 2615 def fastdesc(self):
2579 2616 it = self._subset.fastdesc
2580 2617 if it is None:
2581 2618 return None
2582 2619 return lambda: self._iterfilter(it())
2583 2620
2584 2621 def __nonzero__(self):
2585 2622 for r in self:
2586 2623 return True
2587 2624 return False
2588 2625
2589 2626 def __len__(self):
2590 2627 # Basic implementation to be changed in future patches.
2591 2628 l = baseset([r for r in self])
2592 2629 return len(l)
2593 2630
2594 2631 def sort(self, reverse=False):
2595 2632 self._subset.sort(reverse=reverse)
2596 2633
2597 2634 def reverse(self):
2598 2635 self._subset.reverse()
2599 2636
2600 2637 def isascending(self):
2601 2638 return self._subset.isascending()
2602 2639
2603 2640 def isdescending(self):
2604 2641 return self._subset.isdescending()
2605 2642
2606 2643 def first(self):
2607 2644 for x in self:
2608 2645 return x
2609 2646 return None
2610 2647
2611 2648 def last(self):
2612 2649 it = None
2613 2650 if self._subset.isascending:
2614 2651 it = self.fastdesc
2615 2652 elif self._subset.isdescending:
2616 2653 it = self.fastdesc
2617 2654 if it is None:
2618 2655 # slowly consume everything. This needs improvement
2619 2656 it = lambda: reversed(list(self))
2620 2657 for x in it():
2621 2658 return x
2622 2659 return None
2623 2660
2624 2661 class addset(abstractsmartset):
2625 2662 """Represent the addition of two sets
2626 2663
2627 2664 Wrapper structure for lazily adding two structures without losing much
2628 2665 performance on the __contains__ method
2629 2666
2630 2667 If the ascending attribute is set, that means the two structures are
2631 2668 ordered in either an ascending or descending way. Therefore, we can add
2632 2669 them maintaining the order by iterating over both at the same time
2633 2670 """
2634 2671 def __init__(self, revs1, revs2, ascending=None):
2635 2672 self._r1 = revs1
2636 2673 self._r2 = revs2
2637 2674 self._iter = None
2638 2675 self._ascending = ascending
2639 2676 self._genlist = None
2640 2677 self._asclist = None
2641 2678
2642 2679 def __len__(self):
2643 2680 return len(self._list)
2644 2681
2645 2682 def __nonzero__(self):
2646 2683 return bool(self._r1) or bool(self._r2)
2647 2684
2648 2685 @util.propertycache
2649 2686 def _list(self):
2650 2687 if not self._genlist:
2651 2688 self._genlist = baseset(self._iterator())
2652 2689 return self._genlist
2653 2690
2654 2691 def _iterator(self):
2655 2692 """Iterate over both collections without repeating elements
2656 2693
2657 2694 If the ascending attribute is not set, iterate over the first one and
2658 2695 then over the second one checking for membership on the first one so we
2659 2696 dont yield any duplicates.
2660 2697
2661 2698 If the ascending attribute is set, iterate over both collections at the
2662 2699 same time, yielding only one value at a time in the given order.
2663 2700 """
2664 2701 if self._ascending is None:
2665 2702 def gen():
2666 2703 for r in self._r1:
2667 2704 yield r
2668 2705 inr1 = self._r1.__contains__
2669 2706 for r in self._r2:
2670 2707 if not inr1(r):
2671 2708 yield r
2672 2709 gen = gen()
2673 2710 else:
2674 2711 iter1 = iter(self._r1)
2675 2712 iter2 = iter(self._r2)
2676 2713 gen = self._iterordered(self._ascending, iter1, iter2)
2677 2714 return gen
2678 2715
2679 2716 def __iter__(self):
2680 2717 if self._ascending is None:
2681 2718 if self._genlist:
2682 2719 return iter(self._genlist)
2683 2720 return iter(self._iterator())
2684 2721 self._trysetasclist()
2685 2722 if self._ascending:
2686 2723 it = self.fastasc
2687 2724 else:
2688 2725 it = self.fastdesc
2689 2726 if it is None:
2690 2727 # consume the gen and try again
2691 2728 self._list
2692 2729 return iter(self)
2693 2730 return it()
2694 2731
2695 2732 def _trysetasclist(self):
2696 2733 """populate the _asclist attribute if possible and necessary"""
2697 2734 if self._genlist is not None and self._asclist is None:
2698 2735 self._asclist = sorted(self._genlist)
2699 2736
2700 2737 @property
2701 2738 def fastasc(self):
2702 2739 self._trysetasclist()
2703 2740 if self._asclist is not None:
2704 2741 return self._asclist.__iter__
2705 2742 iter1 = self._r1.fastasc
2706 2743 iter2 = self._r2.fastasc
2707 2744 if None in (iter1, iter2):
2708 2745 return None
2709 2746 return lambda: self._iterordered(True, iter1(), iter2())
2710 2747
2711 2748 @property
2712 2749 def fastdesc(self):
2713 2750 self._trysetasclist()
2714 2751 if self._asclist is not None:
2715 2752 return self._asclist.__reversed__
2716 2753 iter1 = self._r1.fastdesc
2717 2754 iter2 = self._r2.fastdesc
2718 2755 if None in (iter1, iter2):
2719 2756 return None
2720 2757 return lambda: self._iterordered(False, iter1(), iter2())
2721 2758
2722 2759 def _iterordered(self, ascending, iter1, iter2):
2723 2760 """produce an ordered iteration from two iterators with the same order
2724 2761
2725 2762 The ascending is used to indicated the iteration direction.
2726 2763 """
2727 2764 choice = max
2728 2765 if ascending:
2729 2766 choice = min
2730 2767
2731 2768 val1 = None
2732 2769 val2 = None
2733 2770
2734 2771 choice = max
2735 2772 if ascending:
2736 2773 choice = min
2737 2774 try:
2738 2775 # Consume both iterators in an ordered way until one is
2739 2776 # empty
2740 2777 while True:
2741 2778 if val1 is None:
2742 2779 val1 = iter1.next()
2743 2780 if val2 is None:
2744 2781 val2 = iter2.next()
2745 2782 next = choice(val1, val2)
2746 2783 yield next
2747 2784 if val1 == next:
2748 2785 val1 = None
2749 2786 if val2 == next:
2750 2787 val2 = None
2751 2788 except StopIteration:
2752 2789 # Flush any remaining values and consume the other one
2753 2790 it = iter2
2754 2791 if val1 is not None:
2755 2792 yield val1
2756 2793 it = iter1
2757 2794 elif val2 is not None:
2758 2795 # might have been equality and both are empty
2759 2796 yield val2
2760 2797 for val in it:
2761 2798 yield val
2762 2799
2763 2800 def __contains__(self, x):
2764 2801 return x in self._r1 or x in self._r2
2765 2802
2766 2803 def sort(self, reverse=False):
2767 2804 """Sort the added set
2768 2805
2769 2806 For this we use the cached list with all the generated values and if we
2770 2807 know they are ascending or descending we can sort them in a smart way.
2771 2808 """
2772 2809 self._ascending = not reverse
2773 2810
2774 2811 def isascending(self):
2775 2812 return self._ascending is not None and self._ascending
2776 2813
2777 2814 def isdescending(self):
2778 2815 return self._ascending is not None and not self._ascending
2779 2816
2780 2817 def reverse(self):
2781 2818 if self._ascending is None:
2782 2819 self._list.reverse()
2783 2820 else:
2784 2821 self._ascending = not self._ascending
2785 2822
2786 2823 def first(self):
2787 2824 for x in self:
2788 2825 return x
2789 2826 return None
2790 2827
2791 2828 def last(self):
2792 2829 self.reverse()
2793 2830 val = self.first()
2794 2831 self.reverse()
2795 2832 return val
2796 2833
2797 2834 class generatorset(abstractsmartset):
2798 2835 """Wrap a generator for lazy iteration
2799 2836
2800 2837 Wrapper structure for generators that provides lazy membership and can
2801 2838 be iterated more than once.
2802 2839 When asked for membership it generates values until either it finds the
2803 2840 requested one or has gone through all the elements in the generator
2804 2841 """
2805 2842 def __init__(self, gen, iterasc=None):
2806 2843 """
2807 2844 gen: a generator producing the values for the generatorset.
2808 2845 """
2809 2846 self._gen = gen
2810 2847 self._asclist = None
2811 2848 self._cache = {}
2812 2849 self._genlist = []
2813 2850 self._finished = False
2814 2851 self._ascending = True
2815 2852 if iterasc is not None:
2816 2853 if iterasc:
2817 2854 self.fastasc = self._iterator
2818 2855 self.__contains__ = self._asccontains
2819 2856 else:
2820 2857 self.fastdesc = self._iterator
2821 2858 self.__contains__ = self._desccontains
2822 2859
2823 2860 def __nonzero__(self):
2824 2861 for r in self:
2825 2862 return True
2826 2863 return False
2827 2864
2828 2865 def __contains__(self, x):
2829 2866 if x in self._cache:
2830 2867 return self._cache[x]
2831 2868
2832 2869 # Use new values only, as existing values would be cached.
2833 2870 for l in self._consumegen():
2834 2871 if l == x:
2835 2872 return True
2836 2873
2837 2874 self._cache[x] = False
2838 2875 return False
2839 2876
2840 2877 def _asccontains(self, x):
2841 2878 """version of contains optimised for ascending generator"""
2842 2879 if x in self._cache:
2843 2880 return self._cache[x]
2844 2881
2845 2882 # Use new values only, as existing values would be cached.
2846 2883 for l in self._consumegen():
2847 2884 if l == x:
2848 2885 return True
2849 2886 if l > x:
2850 2887 break
2851 2888
2852 2889 self._cache[x] = False
2853 2890 return False
2854 2891
2855 2892 def _desccontains(self, x):
2856 2893 """version of contains optimised for descending generator"""
2857 2894 if x in self._cache:
2858 2895 return self._cache[x]
2859 2896
2860 2897 # Use new values only, as existing values would be cached.
2861 2898 for l in self._consumegen():
2862 2899 if l == x:
2863 2900 return True
2864 2901 if l < x:
2865 2902 break
2866 2903
2867 2904 self._cache[x] = False
2868 2905 return False
2869 2906
2870 2907 def __iter__(self):
2871 2908 if self._ascending:
2872 2909 it = self.fastasc
2873 2910 else:
2874 2911 it = self.fastdesc
2875 2912 if it is not None:
2876 2913 return it()
2877 2914 # we need to consume the iterator
2878 2915 for x in self._consumegen():
2879 2916 pass
2880 2917 # recall the same code
2881 2918 return iter(self)
2882 2919
2883 2920 def _iterator(self):
2884 2921 if self._finished:
2885 2922 return iter(self._genlist)
2886 2923
2887 2924 # We have to use this complex iteration strategy to allow multiple
2888 2925 # iterations at the same time. We need to be able to catch revision
2889 2926 # removed from _consumegen and added to genlist in another instance.
2890 2927 #
2891 2928 # Getting rid of it would provide an about 15% speed up on this
2892 2929 # iteration.
2893 2930 genlist = self._genlist
2894 2931 nextrev = self._consumegen().next
2895 2932 _len = len # cache global lookup
2896 2933 def gen():
2897 2934 i = 0
2898 2935 while True:
2899 2936 if i < _len(genlist):
2900 2937 yield genlist[i]
2901 2938 else:
2902 2939 yield nextrev()
2903 2940 i += 1
2904 2941 return gen()
2905 2942
2906 2943 def _consumegen(self):
2907 2944 cache = self._cache
2908 2945 genlist = self._genlist.append
2909 2946 for item in self._gen:
2910 2947 cache[item] = True
2911 2948 genlist(item)
2912 2949 yield item
2913 2950 if not self._finished:
2914 2951 self._finished = True
2915 2952 asc = self._genlist[:]
2916 2953 asc.sort()
2917 2954 self._asclist = asc
2918 2955 self.fastasc = asc.__iter__
2919 2956 self.fastdesc = asc.__reversed__
2920 2957
2921 2958 def __len__(self):
2922 2959 for x in self._consumegen():
2923 2960 pass
2924 2961 return len(self._genlist)
2925 2962
2926 2963 def sort(self, reverse=False):
2927 2964 self._ascending = not reverse
2928 2965
2929 2966 def reverse(self):
2930 2967 self._ascending = not self._ascending
2931 2968
2932 2969 def isascending(self):
2933 2970 return self._ascending
2934 2971
2935 2972 def isdescending(self):
2936 2973 return not self._ascending
2937 2974
2938 2975 def first(self):
2939 2976 if self._ascending:
2940 2977 it = self.fastasc
2941 2978 else:
2942 2979 it = self.fastdesc
2943 2980 if it is None:
2944 2981 # we need to consume all and try again
2945 2982 for x in self._consumegen():
2946 2983 pass
2947 2984 return self.first()
2948 2985 if self:
2949 2986 return it().next()
2950 2987 return None
2951 2988
2952 2989 def last(self):
2953 2990 if self._ascending:
2954 2991 it = self.fastdesc
2955 2992 else:
2956 2993 it = self.fastasc
2957 2994 if it is None:
2958 2995 # we need to consume all and try again
2959 2996 for x in self._consumegen():
2960 2997 pass
2961 2998 return self.first()
2962 2999 if self:
2963 3000 return it().next()
2964 3001 return None
2965 3002
2966 3003 def spanset(repo, start=None, end=None):
2967 3004 """factory function to dispatch between fullreposet and actual spanset
2968 3005
2969 3006 Feel free to update all spanset call sites and kill this function at some
2970 3007 point.
2971 3008 """
2972 3009 if start is None and end is None:
2973 3010 return fullreposet(repo)
2974 3011 return _spanset(repo, start, end)
2975 3012
2976 3013
2977 3014 class _spanset(abstractsmartset):
2978 3015 """Duck type for baseset class which represents a range of revisions and
2979 3016 can work lazily and without having all the range in memory
2980 3017
2981 3018 Note that spanset(x, y) behave almost like xrange(x, y) except for two
2982 3019 notable points:
2983 3020 - when x < y it will be automatically descending,
2984 3021 - revision filtered with this repoview will be skipped.
2985 3022
2986 3023 """
2987 3024 def __init__(self, repo, start=0, end=None):
2988 3025 """
2989 3026 start: first revision included the set
2990 3027 (default to 0)
2991 3028 end: first revision excluded (last+1)
2992 3029 (default to len(repo)
2993 3030
2994 3031 Spanset will be descending if `end` < `start`.
2995 3032 """
2996 3033 if end is None:
2997 3034 end = len(repo)
2998 3035 self._ascending = start <= end
2999 3036 if not self._ascending:
3000 3037 start, end = end + 1, start +1
3001 3038 self._start = start
3002 3039 self._end = end
3003 3040 self._hiddenrevs = repo.changelog.filteredrevs
3004 3041
3005 3042 def sort(self, reverse=False):
3006 3043 self._ascending = not reverse
3007 3044
3008 3045 def reverse(self):
3009 3046 self._ascending = not self._ascending
3010 3047
3011 3048 def _iterfilter(self, iterrange):
3012 3049 s = self._hiddenrevs
3013 3050 for r in iterrange:
3014 3051 if r not in s:
3015 3052 yield r
3016 3053
3017 3054 def __iter__(self):
3018 3055 if self._ascending:
3019 3056 return self.fastasc()
3020 3057 else:
3021 3058 return self.fastdesc()
3022 3059
3023 3060 def fastasc(self):
3024 3061 iterrange = xrange(self._start, self._end)
3025 3062 if self._hiddenrevs:
3026 3063 return self._iterfilter(iterrange)
3027 3064 return iter(iterrange)
3028 3065
3029 3066 def fastdesc(self):
3030 3067 iterrange = xrange(self._end - 1, self._start - 1, -1)
3031 3068 if self._hiddenrevs:
3032 3069 return self._iterfilter(iterrange)
3033 3070 return iter(iterrange)
3034 3071
3035 3072 def __contains__(self, rev):
3036 3073 hidden = self._hiddenrevs
3037 3074 return ((self._start <= rev < self._end)
3038 3075 and not (hidden and rev in hidden))
3039 3076
3040 3077 def __nonzero__(self):
3041 3078 for r in self:
3042 3079 return True
3043 3080 return False
3044 3081
3045 3082 def __len__(self):
3046 3083 if not self._hiddenrevs:
3047 3084 return abs(self._end - self._start)
3048 3085 else:
3049 3086 count = 0
3050 3087 start = self._start
3051 3088 end = self._end
3052 3089 for rev in self._hiddenrevs:
3053 3090 if (end < rev <= start) or (start <= rev < end):
3054 3091 count += 1
3055 3092 return abs(self._end - self._start) - count
3056 3093
3057 3094 def isascending(self):
3058 3095 return self._ascending
3059 3096
3060 3097 def isdescending(self):
3061 3098 return not self._ascending
3062 3099
3063 3100 def first(self):
3064 3101 if self._ascending:
3065 3102 it = self.fastasc
3066 3103 else:
3067 3104 it = self.fastdesc
3068 3105 for x in it():
3069 3106 return x
3070 3107 return None
3071 3108
3072 3109 def last(self):
3073 3110 if self._ascending:
3074 3111 it = self.fastdesc
3075 3112 else:
3076 3113 it = self.fastasc
3077 3114 for x in it():
3078 3115 return x
3079 3116 return None
3080 3117
3081 3118 class fullreposet(_spanset):
3082 3119 """a set containing all revisions in the repo
3083 3120
3084 3121 This class exists to host special optimization.
3085 3122 """
3086 3123
3087 3124 def __init__(self, repo):
3088 3125 super(fullreposet, self).__init__(repo)
3089 3126
3090 3127 def __and__(self, other):
3091 3128 """As self contains the whole repo, all of the other set should also be
3092 3129 in self. Therefore `self & other = other`.
3093 3130
3094 3131 This boldly assumes the other contains valid revs only.
3095 3132 """
3096 3133 # other not a smartset, make is so
3097 3134 if not util.safehasattr(other, 'isascending'):
3098 3135 # filter out hidden revision
3099 3136 # (this boldly assumes all smartset are pure)
3100 3137 #
3101 3138 # `other` was used with "&", let's assume this is a set like
3102 3139 # object.
3103 3140 other = baseset(other - self._hiddenrevs)
3104 3141
3105 3142 other.sort(reverse=self.isdescending())
3106 3143 return other
3107 3144
3108 3145 # tell hggettext to extract docstrings from these functions:
3109 3146 i18nfunctions = symbols.values()
General Comments 0
You need to be logged in to leave comments. Login now