##// END OF EJS Templates
_descendants: directly use smartset...
Pierre-Yves David -
r22830:1d1da8ab default
parent child Browse files
Show More
@@ -1,3036 +1,3034 b''
1 1 # revset.py - revision set queries for mercurial
2 2 #
3 3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 import re
9 9 import parser, util, error, discovery, hbisect, phases
10 10 import node
11 11 import heapq
12 12 import match as matchmod
13 13 import ancestor as ancestormod
14 14 from i18n import _
15 15 import encoding
16 16 import obsolete as obsmod
17 17 import pathutil
18 18 import repoview
19 19
20 20 def _revancestors(repo, revs, followfirst):
21 21 """Like revlog.ancestors(), but supports followfirst."""
22 22 cut = followfirst and 1 or None
23 23 cl = repo.changelog
24 24
25 25 def iterate():
26 26 revqueue, revsnode = None, None
27 27 h = []
28 28
29 29 revs.descending()
30 30 revqueue = util.deque(revs)
31 31 if revqueue:
32 32 revsnode = revqueue.popleft()
33 33 heapq.heappush(h, -revsnode)
34 34
35 35 seen = set([node.nullrev])
36 36 while h:
37 37 current = -heapq.heappop(h)
38 38 if current not in seen:
39 39 if revsnode and current == revsnode:
40 40 if revqueue:
41 41 revsnode = revqueue.popleft()
42 42 heapq.heappush(h, -revsnode)
43 43 seen.add(current)
44 44 yield current
45 45 for parent in cl.parentrevs(current)[:cut]:
46 46 if parent != node.nullrev:
47 47 heapq.heappush(h, -parent)
48 48
49 49 return generatorset(iterate(), iterasc=False)
50 50
51 51 def _revdescendants(repo, revs, followfirst):
52 52 """Like revlog.descendants() but supports followfirst."""
53 53 cut = followfirst and 1 or None
54 54
55 55 def iterate():
56 56 cl = repo.changelog
57 57 first = min(revs)
58 58 nullrev = node.nullrev
59 59 if first == nullrev:
60 60 # Are there nodes with a null first parent and a non-null
61 61 # second one? Maybe. Do we care? Probably not.
62 62 for i in cl:
63 63 yield i
64 64 else:
65 65 seen = set(revs)
66 66 for i in cl.revs(first + 1):
67 67 for x in cl.parentrevs(i)[:cut]:
68 68 if x != nullrev and x in seen:
69 69 seen.add(i)
70 70 yield i
71 71 break
72 72
73 73 return generatorset(iterate(), iterasc=True)
74 74
75 75 def _revsbetween(repo, roots, heads):
76 76 """Return all paths between roots and heads, inclusive of both endpoint
77 77 sets."""
78 78 if not roots:
79 79 return baseset()
80 80 parentrevs = repo.changelog.parentrevs
81 81 visit = list(heads)
82 82 reachable = set()
83 83 seen = {}
84 84 minroot = min(roots)
85 85 roots = set(roots)
86 86 # open-code the post-order traversal due to the tiny size of
87 87 # sys.getrecursionlimit()
88 88 while visit:
89 89 rev = visit.pop()
90 90 if rev in roots:
91 91 reachable.add(rev)
92 92 parents = parentrevs(rev)
93 93 seen[rev] = parents
94 94 for parent in parents:
95 95 if parent >= minroot and parent not in seen:
96 96 visit.append(parent)
97 97 if not reachable:
98 98 return baseset()
99 99 for rev in sorted(seen):
100 100 for parent in seen[rev]:
101 101 if parent in reachable:
102 102 reachable.add(rev)
103 103 return baseset(sorted(reachable))
104 104
105 105 elements = {
106 106 "(": (20, ("group", 1, ")"), ("func", 1, ")")),
107 107 "~": (18, None, ("ancestor", 18)),
108 108 "^": (18, None, ("parent", 18), ("parentpost", 18)),
109 109 "-": (5, ("negate", 19), ("minus", 5)),
110 110 "::": (17, ("dagrangepre", 17), ("dagrange", 17),
111 111 ("dagrangepost", 17)),
112 112 "..": (17, ("dagrangepre", 17), ("dagrange", 17),
113 113 ("dagrangepost", 17)),
114 114 ":": (15, ("rangepre", 15), ("range", 15), ("rangepost", 15)),
115 115 "not": (10, ("not", 10)),
116 116 "!": (10, ("not", 10)),
117 117 "and": (5, None, ("and", 5)),
118 118 "&": (5, None, ("and", 5)),
119 119 "or": (4, None, ("or", 4)),
120 120 "|": (4, None, ("or", 4)),
121 121 "+": (4, None, ("or", 4)),
122 122 ",": (2, None, ("list", 2)),
123 123 ")": (0, None, None),
124 124 "symbol": (0, ("symbol",), None),
125 125 "string": (0, ("string",), None),
126 126 "end": (0, None, None),
127 127 }
128 128
129 129 keywords = set(['and', 'or', 'not'])
130 130
131 131 def tokenize(program, lookup=None):
132 132 '''
133 133 Parse a revset statement into a stream of tokens
134 134
135 135 Check that @ is a valid unquoted token character (issue3686):
136 136 >>> list(tokenize("@::"))
137 137 [('symbol', '@', 0), ('::', None, 1), ('end', None, 3)]
138 138
139 139 '''
140 140
141 141 pos, l = 0, len(program)
142 142 while pos < l:
143 143 c = program[pos]
144 144 if c.isspace(): # skip inter-token whitespace
145 145 pass
146 146 elif c == ':' and program[pos:pos + 2] == '::': # look ahead carefully
147 147 yield ('::', None, pos)
148 148 pos += 1 # skip ahead
149 149 elif c == '.' and program[pos:pos + 2] == '..': # look ahead carefully
150 150 yield ('..', None, pos)
151 151 pos += 1 # skip ahead
152 152 elif c in "():,-|&+!~^": # handle simple operators
153 153 yield (c, None, pos)
154 154 elif (c in '"\'' or c == 'r' and
155 155 program[pos:pos + 2] in ("r'", 'r"')): # handle quoted strings
156 156 if c == 'r':
157 157 pos += 1
158 158 c = program[pos]
159 159 decode = lambda x: x
160 160 else:
161 161 decode = lambda x: x.decode('string-escape')
162 162 pos += 1
163 163 s = pos
164 164 while pos < l: # find closing quote
165 165 d = program[pos]
166 166 if d == '\\': # skip over escaped characters
167 167 pos += 2
168 168 continue
169 169 if d == c:
170 170 yield ('string', decode(program[s:pos]), s)
171 171 break
172 172 pos += 1
173 173 else:
174 174 raise error.ParseError(_("unterminated string"), s)
175 175 # gather up a symbol/keyword
176 176 elif c.isalnum() or c in '._@' or ord(c) > 127:
177 177 s = pos
178 178 pos += 1
179 179 while pos < l: # find end of symbol
180 180 d = program[pos]
181 181 if not (d.isalnum() or d in "-._/@" or ord(d) > 127):
182 182 break
183 183 if d == '.' and program[pos - 1] == '.': # special case for ..
184 184 pos -= 1
185 185 break
186 186 pos += 1
187 187 sym = program[s:pos]
188 188 if sym in keywords: # operator keywords
189 189 yield (sym, None, s)
190 190 elif '-' in sym:
191 191 # some jerk gave us foo-bar-baz, try to check if it's a symbol
192 192 if lookup and lookup(sym):
193 193 # looks like a real symbol
194 194 yield ('symbol', sym, s)
195 195 else:
196 196 # looks like an expression
197 197 parts = sym.split('-')
198 198 for p in parts[:-1]:
199 199 if p: # possible consecutive -
200 200 yield ('symbol', p, s)
201 201 s += len(p)
202 202 yield ('-', None, pos)
203 203 s += 1
204 204 if parts[-1]: # possible trailing -
205 205 yield ('symbol', parts[-1], s)
206 206 else:
207 207 yield ('symbol', sym, s)
208 208 pos -= 1
209 209 else:
210 210 raise error.ParseError(_("syntax error"), pos)
211 211 pos += 1
212 212 yield ('end', None, pos)
213 213
214 214 # helpers
215 215
216 216 def getstring(x, err):
217 217 if x and (x[0] == 'string' or x[0] == 'symbol'):
218 218 return x[1]
219 219 raise error.ParseError(err)
220 220
221 221 def getlist(x):
222 222 if not x:
223 223 return []
224 224 if x[0] == 'list':
225 225 return getlist(x[1]) + [x[2]]
226 226 return [x]
227 227
228 228 def getargs(x, min, max, err):
229 229 l = getlist(x)
230 230 if len(l) < min or (max >= 0 and len(l) > max):
231 231 raise error.ParseError(err)
232 232 return l
233 233
234 234 def getset(repo, subset, x):
235 235 if not x:
236 236 raise error.ParseError(_("missing argument"))
237 237 s = methods[x[0]](repo, subset, *x[1:])
238 238 if util.safehasattr(s, 'set'):
239 239 return s
240 240 return baseset(s)
241 241
242 242 def _getrevsource(repo, r):
243 243 extra = repo[r].extra()
244 244 for label in ('source', 'transplant_source', 'rebase_source'):
245 245 if label in extra:
246 246 try:
247 247 return repo[extra[label]].rev()
248 248 except error.RepoLookupError:
249 249 pass
250 250 return None
251 251
252 252 # operator methods
253 253
254 254 def stringset(repo, subset, x):
255 255 x = repo[x].rev()
256 256 if x == -1 and len(subset) == len(repo):
257 257 return baseset([-1])
258 258 if len(subset) == len(repo) or x in subset:
259 259 return baseset([x])
260 260 return baseset()
261 261
262 262 def symbolset(repo, subset, x):
263 263 if x in symbols:
264 264 raise error.ParseError(_("can't use %s here") % x)
265 265 return stringset(repo, subset, x)
266 266
267 267 def rangeset(repo, subset, x, y):
268 268 cl = baseset(repo.changelog)
269 269 m = getset(repo, cl, x)
270 270 n = getset(repo, cl, y)
271 271
272 272 if not m or not n:
273 273 return baseset()
274 274 m, n = m.first(), n.last()
275 275
276 276 if m < n:
277 277 r = spanset(repo, m, n + 1)
278 278 else:
279 279 r = spanset(repo, m, n - 1)
280 280 return r & subset
281 281
282 282 def dagrange(repo, subset, x, y):
283 283 r = spanset(repo)
284 284 xs = _revsbetween(repo, getset(repo, r, x), getset(repo, r, y))
285 285 s = subset.set()
286 286 return xs.filter(s.__contains__)
287 287
288 288 def andset(repo, subset, x, y):
289 289 return getset(repo, getset(repo, subset, x), y)
290 290
291 291 def orset(repo, subset, x, y):
292 292 xl = getset(repo, subset, x)
293 293 yl = getset(repo, subset - xl, y)
294 294 return xl + yl
295 295
296 296 def notset(repo, subset, x):
297 297 return subset - getset(repo, subset, x)
298 298
299 299 def listset(repo, subset, a, b):
300 300 raise error.ParseError(_("can't use a list in this context"))
301 301
302 302 def func(repo, subset, a, b):
303 303 if a[0] == 'symbol' and a[1] in symbols:
304 304 return symbols[a[1]](repo, subset, b)
305 305 raise error.ParseError(_("not a function: %s") % a[1])
306 306
307 307 # functions
308 308
309 309 def adds(repo, subset, x):
310 310 """``adds(pattern)``
311 311 Changesets that add a file matching pattern.
312 312
313 313 The pattern without explicit kind like ``glob:`` is expected to be
314 314 relative to the current directory and match against a file or a
315 315 directory.
316 316 """
317 317 # i18n: "adds" is a keyword
318 318 pat = getstring(x, _("adds requires a pattern"))
319 319 return checkstatus(repo, subset, pat, 1)
320 320
321 321 def ancestor(repo, subset, x):
322 322 """``ancestor(*changeset)``
323 323 A greatest common ancestor of the changesets.
324 324
325 325 Accepts 0 or more changesets.
326 326 Will return empty list when passed no args.
327 327 Greatest common ancestor of a single changeset is that changeset.
328 328 """
329 329 # i18n: "ancestor" is a keyword
330 330 l = getlist(x)
331 331 rl = spanset(repo)
332 332 anc = None
333 333
334 334 # (getset(repo, rl, i) for i in l) generates a list of lists
335 335 for revs in (getset(repo, rl, i) for i in l):
336 336 for r in revs:
337 337 if anc is None:
338 338 anc = repo[r]
339 339 else:
340 340 anc = anc.ancestor(repo[r])
341 341
342 342 if anc is not None and anc.rev() in subset:
343 343 return baseset([anc.rev()])
344 344 return baseset()
345 345
346 346 def _ancestors(repo, subset, x, followfirst=False):
347 347 args = getset(repo, spanset(repo), x)
348 348 if not args:
349 349 return baseset()
350 350 s = _revancestors(repo, args, followfirst)
351 351 return subset.filter(s.__contains__)
352 352
353 353 def ancestors(repo, subset, x):
354 354 """``ancestors(set)``
355 355 Changesets that are ancestors of a changeset in set.
356 356 """
357 357 return _ancestors(repo, subset, x)
358 358
359 359 def _firstancestors(repo, subset, x):
360 360 # ``_firstancestors(set)``
361 361 # Like ``ancestors(set)`` but follows only the first parents.
362 362 return _ancestors(repo, subset, x, followfirst=True)
363 363
364 364 def ancestorspec(repo, subset, x, n):
365 365 """``set~n``
366 366 Changesets that are the Nth ancestor (first parents only) of a changeset
367 367 in set.
368 368 """
369 369 try:
370 370 n = int(n[1])
371 371 except (TypeError, ValueError):
372 372 raise error.ParseError(_("~ expects a number"))
373 373 ps = set()
374 374 cl = repo.changelog
375 375 for r in getset(repo, baseset(cl), x):
376 376 for i in range(n):
377 377 r = cl.parentrevs(r)[0]
378 378 ps.add(r)
379 379 return subset & ps
380 380
381 381 def author(repo, subset, x):
382 382 """``author(string)``
383 383 Alias for ``user(string)``.
384 384 """
385 385 # i18n: "author" is a keyword
386 386 n = encoding.lower(getstring(x, _("author requires a string")))
387 387 kind, pattern, matcher = _substringmatcher(n)
388 388 return subset.filter(lambda x: matcher(encoding.lower(repo[x].user())))
389 389
390 390 def only(repo, subset, x):
391 391 """``only(set, [set])``
392 392 Changesets that are ancestors of the first set that are not ancestors
393 393 of any other head in the repo. If a second set is specified, the result
394 394 is ancestors of the first set that are not ancestors of the second set
395 395 (i.e. ::<set1> - ::<set2>).
396 396 """
397 397 cl = repo.changelog
398 398 # i18n: "only" is a keyword
399 399 args = getargs(x, 1, 2, _('only takes one or two arguments'))
400 400 include = getset(repo, spanset(repo), args[0]).set()
401 401 if len(args) == 1:
402 402 if len(include) == 0:
403 403 return baseset()
404 404
405 405 descendants = set(_revdescendants(repo, include, False))
406 406 exclude = [rev for rev in cl.headrevs()
407 407 if not rev in descendants and not rev in include]
408 408 else:
409 409 exclude = getset(repo, spanset(repo), args[1])
410 410
411 411 results = set(ancestormod.missingancestors(include, exclude, cl.parentrevs))
412 412 return filteredset(subset, results.__contains__)
413 413
414 414 def bisect(repo, subset, x):
415 415 """``bisect(string)``
416 416 Changesets marked in the specified bisect status:
417 417
418 418 - ``good``, ``bad``, ``skip``: csets explicitly marked as good/bad/skip
419 419 - ``goods``, ``bads`` : csets topologically good/bad
420 420 - ``range`` : csets taking part in the bisection
421 421 - ``pruned`` : csets that are goods, bads or skipped
422 422 - ``untested`` : csets whose fate is yet unknown
423 423 - ``ignored`` : csets ignored due to DAG topology
424 424 - ``current`` : the cset currently being bisected
425 425 """
426 426 # i18n: "bisect" is a keyword
427 427 status = getstring(x, _("bisect requires a string")).lower()
428 428 state = set(hbisect.get(repo, status))
429 429 return subset & state
430 430
431 431 # Backward-compatibility
432 432 # - no help entry so that we do not advertise it any more
433 433 def bisected(repo, subset, x):
434 434 return bisect(repo, subset, x)
435 435
436 436 def bookmark(repo, subset, x):
437 437 """``bookmark([name])``
438 438 The named bookmark or all bookmarks.
439 439
440 440 If `name` starts with `re:`, the remainder of the name is treated as
441 441 a regular expression. To match a bookmark that actually starts with `re:`,
442 442 use the prefix `literal:`.
443 443 """
444 444 # i18n: "bookmark" is a keyword
445 445 args = getargs(x, 0, 1, _('bookmark takes one or no arguments'))
446 446 if args:
447 447 bm = getstring(args[0],
448 448 # i18n: "bookmark" is a keyword
449 449 _('the argument to bookmark must be a string'))
450 450 kind, pattern, matcher = _stringmatcher(bm)
451 451 bms = set()
452 452 if kind == 'literal':
453 453 bmrev = repo._bookmarks.get(pattern, None)
454 454 if not bmrev:
455 455 raise util.Abort(_("bookmark '%s' does not exist") % bm)
456 456 bms.add(repo[bmrev].rev())
457 457 else:
458 458 matchrevs = set()
459 459 for name, bmrev in repo._bookmarks.iteritems():
460 460 if matcher(name):
461 461 matchrevs.add(bmrev)
462 462 if not matchrevs:
463 463 raise util.Abort(_("no bookmarks exist that match '%s'")
464 464 % pattern)
465 465 for bmrev in matchrevs:
466 466 bms.add(repo[bmrev].rev())
467 467 else:
468 468 bms = set([repo[r].rev()
469 469 for r in repo._bookmarks.values()])
470 470 bms -= set([node.nullrev])
471 471 return subset & bms
472 472
473 473 def branch(repo, subset, x):
474 474 """``branch(string or set)``
475 475 All changesets belonging to the given branch or the branches of the given
476 476 changesets.
477 477
478 478 If `string` starts with `re:`, the remainder of the name is treated as
479 479 a regular expression. To match a branch that actually starts with `re:`,
480 480 use the prefix `literal:`.
481 481 """
482 482 try:
483 483 b = getstring(x, '')
484 484 except error.ParseError:
485 485 # not a string, but another revspec, e.g. tip()
486 486 pass
487 487 else:
488 488 kind, pattern, matcher = _stringmatcher(b)
489 489 if kind == 'literal':
490 490 # note: falls through to the revspec case if no branch with
491 491 # this name exists
492 492 if pattern in repo.branchmap():
493 493 return subset.filter(lambda r: matcher(repo[r].branch()))
494 494 else:
495 495 return subset.filter(lambda r: matcher(repo[r].branch()))
496 496
497 497 s = getset(repo, spanset(repo), x)
498 498 b = set()
499 499 for r in s:
500 500 b.add(repo[r].branch())
501 501 s = s.set()
502 502 return subset.filter(lambda r: r in s or repo[r].branch() in b)
503 503
504 504 def bumped(repo, subset, x):
505 505 """``bumped()``
506 506 Mutable changesets marked as successors of public changesets.
507 507
508 508 Only non-public and non-obsolete changesets can be `bumped`.
509 509 """
510 510 # i18n: "bumped" is a keyword
511 511 getargs(x, 0, 0, _("bumped takes no arguments"))
512 512 bumped = obsmod.getrevs(repo, 'bumped')
513 513 return subset & bumped
514 514
515 515 def bundle(repo, subset, x):
516 516 """``bundle()``
517 517 Changesets in the bundle.
518 518
519 519 Bundle must be specified by the -R option."""
520 520
521 521 try:
522 522 bundlerevs = repo.changelog.bundlerevs
523 523 except AttributeError:
524 524 raise util.Abort(_("no bundle provided - specify with -R"))
525 525 return subset & bundlerevs
526 526
527 527 def checkstatus(repo, subset, pat, field):
528 528 hasset = matchmod.patkind(pat) == 'set'
529 529
530 530 def matches(x):
531 531 m = None
532 532 fname = None
533 533 c = repo[x]
534 534 if not m or hasset:
535 535 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
536 536 if not m.anypats() and len(m.files()) == 1:
537 537 fname = m.files()[0]
538 538 if fname is not None:
539 539 if fname not in c.files():
540 540 return False
541 541 else:
542 542 for f in c.files():
543 543 if m(f):
544 544 break
545 545 else:
546 546 return False
547 547 files = repo.status(c.p1().node(), c.node())[field]
548 548 if fname is not None:
549 549 if fname in files:
550 550 return True
551 551 else:
552 552 for f in files:
553 553 if m(f):
554 554 return True
555 555
556 556 return subset.filter(matches)
557 557
558 558 def _children(repo, narrow, parentset):
559 559 cs = set()
560 560 if not parentset:
561 561 return baseset(cs)
562 562 pr = repo.changelog.parentrevs
563 563 minrev = min(parentset)
564 564 for r in narrow:
565 565 if r <= minrev:
566 566 continue
567 567 for p in pr(r):
568 568 if p in parentset:
569 569 cs.add(r)
570 570 return baseset(cs)
571 571
572 572 def children(repo, subset, x):
573 573 """``children(set)``
574 574 Child changesets of changesets in set.
575 575 """
576 576 s = getset(repo, baseset(repo), x).set()
577 577 cs = _children(repo, subset, s)
578 578 return subset & cs
579 579
580 580 def closed(repo, subset, x):
581 581 """``closed()``
582 582 Changeset is closed.
583 583 """
584 584 # i18n: "closed" is a keyword
585 585 getargs(x, 0, 0, _("closed takes no arguments"))
586 586 return subset.filter(lambda r: repo[r].closesbranch())
587 587
588 588 def contains(repo, subset, x):
589 589 """``contains(pattern)``
590 590 The revision's manifest contains a file matching pattern (but might not
591 591 modify it). See :hg:`help patterns` for information about file patterns.
592 592
593 593 The pattern without explicit kind like ``glob:`` is expected to be
594 594 relative to the current directory and match against a file exactly
595 595 for efficiency.
596 596 """
597 597 # i18n: "contains" is a keyword
598 598 pat = getstring(x, _("contains requires a pattern"))
599 599
600 600 def matches(x):
601 601 if not matchmod.patkind(pat):
602 602 pats = pathutil.canonpath(repo.root, repo.getcwd(), pat)
603 603 if pats in repo[x]:
604 604 return True
605 605 else:
606 606 c = repo[x]
607 607 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
608 608 for f in c.manifest():
609 609 if m(f):
610 610 return True
611 611 return False
612 612
613 613 return subset.filter(matches)
614 614
615 615 def converted(repo, subset, x):
616 616 """``converted([id])``
617 617 Changesets converted from the given identifier in the old repository if
618 618 present, or all converted changesets if no identifier is specified.
619 619 """
620 620
621 621 # There is exactly no chance of resolving the revision, so do a simple
622 622 # string compare and hope for the best
623 623
624 624 rev = None
625 625 # i18n: "converted" is a keyword
626 626 l = getargs(x, 0, 1, _('converted takes one or no arguments'))
627 627 if l:
628 628 # i18n: "converted" is a keyword
629 629 rev = getstring(l[0], _('converted requires a revision'))
630 630
631 631 def _matchvalue(r):
632 632 source = repo[r].extra().get('convert_revision', None)
633 633 return source is not None and (rev is None or source.startswith(rev))
634 634
635 635 return subset.filter(lambda r: _matchvalue(r))
636 636
637 637 def date(repo, subset, x):
638 638 """``date(interval)``
639 639 Changesets within the interval, see :hg:`help dates`.
640 640 """
641 641 # i18n: "date" is a keyword
642 642 ds = getstring(x, _("date requires a string"))
643 643 dm = util.matchdate(ds)
644 644 return subset.filter(lambda x: dm(repo[x].date()[0]))
645 645
646 646 def desc(repo, subset, x):
647 647 """``desc(string)``
648 648 Search commit message for string. The match is case-insensitive.
649 649 """
650 650 # i18n: "desc" is a keyword
651 651 ds = encoding.lower(getstring(x, _("desc requires a string")))
652 652
653 653 def matches(x):
654 654 c = repo[x]
655 655 return ds in encoding.lower(c.description())
656 656
657 657 return subset.filter(matches)
658 658
659 659 def _descendants(repo, subset, x, followfirst=False):
660 660 args = getset(repo, spanset(repo), x)
661 661 if not args:
662 662 return baseset()
663 663 s = _revdescendants(repo, args, followfirst)
664 664
665 665 # Both sets need to be ascending in order to lazily return the union
666 666 # in the correct order.
667 667 args.ascending()
668 668 result = (filteredset(s, subset.__contains__, ascending=True) +
669 669 filteredset(args, subset.__contains__, ascending=True))
670 670
671 # Wrap result in a filteredset since it's an addset, which doesn't
672 # implement all the necessary functions to be consumed by callers.
673 return filteredset(result, lambda r: True, ascending=True)
671 return result
674 672
675 673 def descendants(repo, subset, x):
676 674 """``descendants(set)``
677 675 Changesets which are descendants of changesets in set.
678 676 """
679 677 return _descendants(repo, subset, x)
680 678
681 679 def _firstdescendants(repo, subset, x):
682 680 # ``_firstdescendants(set)``
683 681 # Like ``descendants(set)`` but follows only the first parents.
684 682 return _descendants(repo, subset, x, followfirst=True)
685 683
686 684 def destination(repo, subset, x):
687 685 """``destination([set])``
688 686 Changesets that were created by a graft, transplant or rebase operation,
689 687 with the given revisions specified as the source. Omitting the optional set
690 688 is the same as passing all().
691 689 """
692 690 if x is not None:
693 691 args = getset(repo, spanset(repo), x).set()
694 692 else:
695 693 args = getall(repo, spanset(repo), x).set()
696 694
697 695 dests = set()
698 696
699 697 # subset contains all of the possible destinations that can be returned, so
700 698 # iterate over them and see if their source(s) were provided in the args.
701 699 # Even if the immediate src of r is not in the args, src's source (or
702 700 # further back) may be. Scanning back further than the immediate src allows
703 701 # transitive transplants and rebases to yield the same results as transitive
704 702 # grafts.
705 703 for r in subset:
706 704 src = _getrevsource(repo, r)
707 705 lineage = None
708 706
709 707 while src is not None:
710 708 if lineage is None:
711 709 lineage = list()
712 710
713 711 lineage.append(r)
714 712
715 713 # The visited lineage is a match if the current source is in the arg
716 714 # set. Since every candidate dest is visited by way of iterating
717 715 # subset, any dests further back in the lineage will be tested by a
718 716 # different iteration over subset. Likewise, if the src was already
719 717 # selected, the current lineage can be selected without going back
720 718 # further.
721 719 if src in args or src in dests:
722 720 dests.update(lineage)
723 721 break
724 722
725 723 r = src
726 724 src = _getrevsource(repo, r)
727 725
728 726 return subset.filter(dests.__contains__)
729 727
730 728 def divergent(repo, subset, x):
731 729 """``divergent()``
732 730 Final successors of changesets with an alternative set of final successors.
733 731 """
734 732 # i18n: "divergent" is a keyword
735 733 getargs(x, 0, 0, _("divergent takes no arguments"))
736 734 divergent = obsmod.getrevs(repo, 'divergent')
737 735 return subset & divergent
738 736
739 737 def draft(repo, subset, x):
740 738 """``draft()``
741 739 Changeset in draft phase."""
742 740 # i18n: "draft" is a keyword
743 741 getargs(x, 0, 0, _("draft takes no arguments"))
744 742 pc = repo._phasecache
745 743 return subset.filter(lambda r: pc.phase(repo, r) == phases.draft)
746 744
747 745 def extinct(repo, subset, x):
748 746 """``extinct()``
749 747 Obsolete changesets with obsolete descendants only.
750 748 """
751 749 # i18n: "extinct" is a keyword
752 750 getargs(x, 0, 0, _("extinct takes no arguments"))
753 751 extincts = obsmod.getrevs(repo, 'extinct')
754 752 return subset & extincts
755 753
756 754 def extra(repo, subset, x):
757 755 """``extra(label, [value])``
758 756 Changesets with the given label in the extra metadata, with the given
759 757 optional value.
760 758
761 759 If `value` starts with `re:`, the remainder of the value is treated as
762 760 a regular expression. To match a value that actually starts with `re:`,
763 761 use the prefix `literal:`.
764 762 """
765 763
766 764 # i18n: "extra" is a keyword
767 765 l = getargs(x, 1, 2, _('extra takes at least 1 and at most 2 arguments'))
768 766 # i18n: "extra" is a keyword
769 767 label = getstring(l[0], _('first argument to extra must be a string'))
770 768 value = None
771 769
772 770 if len(l) > 1:
773 771 # i18n: "extra" is a keyword
774 772 value = getstring(l[1], _('second argument to extra must be a string'))
775 773 kind, value, matcher = _stringmatcher(value)
776 774
777 775 def _matchvalue(r):
778 776 extra = repo[r].extra()
779 777 return label in extra and (value is None or matcher(extra[label]))
780 778
781 779 return subset.filter(lambda r: _matchvalue(r))
782 780
783 781 def filelog(repo, subset, x):
784 782 """``filelog(pattern)``
785 783 Changesets connected to the specified filelog.
786 784
787 785 For performance reasons, visits only revisions mentioned in the file-level
788 786 filelog, rather than filtering through all changesets (much faster, but
789 787 doesn't include deletes or duplicate changes). For a slower, more accurate
790 788 result, use ``file()``.
791 789
792 790 The pattern without explicit kind like ``glob:`` is expected to be
793 791 relative to the current directory and match against a file exactly
794 792 for efficiency.
795 793 """
796 794
797 795 # i18n: "filelog" is a keyword
798 796 pat = getstring(x, _("filelog requires a pattern"))
799 797 s = set()
800 798
801 799 if not matchmod.patkind(pat):
802 800 f = pathutil.canonpath(repo.root, repo.getcwd(), pat)
803 801 fl = repo.file(f)
804 802 for fr in fl:
805 803 s.add(fl.linkrev(fr))
806 804 else:
807 805 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=repo[None])
808 806 for f in repo[None]:
809 807 if m(f):
810 808 fl = repo.file(f)
811 809 for fr in fl:
812 810 s.add(fl.linkrev(fr))
813 811
814 812 return subset & s
815 813
816 814 def first(repo, subset, x):
817 815 """``first(set, [n])``
818 816 An alias for limit().
819 817 """
820 818 return limit(repo, subset, x)
821 819
822 820 def _follow(repo, subset, x, name, followfirst=False):
823 821 l = getargs(x, 0, 1, _("%s takes no arguments or a filename") % name)
824 822 c = repo['.']
825 823 if l:
826 824 x = getstring(l[0], _("%s expected a filename") % name)
827 825 if x in c:
828 826 cx = c[x]
829 827 s = set(ctx.rev() for ctx in cx.ancestors(followfirst=followfirst))
830 828 # include the revision responsible for the most recent version
831 829 s.add(cx.linkrev())
832 830 else:
833 831 return baseset()
834 832 else:
835 833 s = _revancestors(repo, baseset([c.rev()]), followfirst)
836 834
837 835 return subset & s
838 836
839 837 def follow(repo, subset, x):
840 838 """``follow([file])``
841 839 An alias for ``::.`` (ancestors of the working copy's first parent).
842 840 If a filename is specified, the history of the given file is followed,
843 841 including copies.
844 842 """
845 843 return _follow(repo, subset, x, 'follow')
846 844
847 845 def _followfirst(repo, subset, x):
848 846 # ``followfirst([file])``
849 847 # Like ``follow([file])`` but follows only the first parent of
850 848 # every revision or file revision.
851 849 return _follow(repo, subset, x, '_followfirst', followfirst=True)
852 850
853 851 def getall(repo, subset, x):
854 852 """``all()``
855 853 All changesets, the same as ``0:tip``.
856 854 """
857 855 # i18n: "all" is a keyword
858 856 getargs(x, 0, 0, _("all takes no arguments"))
859 857 return subset
860 858
861 859 def grep(repo, subset, x):
862 860 """``grep(regex)``
863 861 Like ``keyword(string)`` but accepts a regex. Use ``grep(r'...')``
864 862 to ensure special escape characters are handled correctly. Unlike
865 863 ``keyword(string)``, the match is case-sensitive.
866 864 """
867 865 try:
868 866 # i18n: "grep" is a keyword
869 867 gr = re.compile(getstring(x, _("grep requires a string")))
870 868 except re.error, e:
871 869 raise error.ParseError(_('invalid match pattern: %s') % e)
872 870
873 871 def matches(x):
874 872 c = repo[x]
875 873 for e in c.files() + [c.user(), c.description()]:
876 874 if gr.search(e):
877 875 return True
878 876 return False
879 877
880 878 return subset.filter(matches)
881 879
882 880 def _matchfiles(repo, subset, x):
883 881 # _matchfiles takes a revset list of prefixed arguments:
884 882 #
885 883 # [p:foo, i:bar, x:baz]
886 884 #
887 885 # builds a match object from them and filters subset. Allowed
888 886 # prefixes are 'p:' for regular patterns, 'i:' for include
889 887 # patterns and 'x:' for exclude patterns. Use 'r:' prefix to pass
890 888 # a revision identifier, or the empty string to reference the
891 889 # working directory, from which the match object is
892 890 # initialized. Use 'd:' to set the default matching mode, default
893 891 # to 'glob'. At most one 'r:' and 'd:' argument can be passed.
894 892
895 893 # i18n: "_matchfiles" is a keyword
896 894 l = getargs(x, 1, -1, _("_matchfiles requires at least one argument"))
897 895 pats, inc, exc = [], [], []
898 896 hasset = False
899 897 rev, default = None, None
900 898 for arg in l:
901 899 # i18n: "_matchfiles" is a keyword
902 900 s = getstring(arg, _("_matchfiles requires string arguments"))
903 901 prefix, value = s[:2], s[2:]
904 902 if prefix == 'p:':
905 903 pats.append(value)
906 904 elif prefix == 'i:':
907 905 inc.append(value)
908 906 elif prefix == 'x:':
909 907 exc.append(value)
910 908 elif prefix == 'r:':
911 909 if rev is not None:
912 910 # i18n: "_matchfiles" is a keyword
913 911 raise error.ParseError(_('_matchfiles expected at most one '
914 912 'revision'))
915 913 rev = value
916 914 elif prefix == 'd:':
917 915 if default is not None:
918 916 # i18n: "_matchfiles" is a keyword
919 917 raise error.ParseError(_('_matchfiles expected at most one '
920 918 'default mode'))
921 919 default = value
922 920 else:
923 921 # i18n: "_matchfiles" is a keyword
924 922 raise error.ParseError(_('invalid _matchfiles prefix: %s') % prefix)
925 923 if not hasset and matchmod.patkind(value) == 'set':
926 924 hasset = True
927 925 if not default:
928 926 default = 'glob'
929 927
930 928 def matches(x):
931 929 m = None
932 930 c = repo[x]
933 931 if not m or (hasset and rev is None):
934 932 ctx = c
935 933 if rev is not None:
936 934 ctx = repo[rev or None]
937 935 m = matchmod.match(repo.root, repo.getcwd(), pats, include=inc,
938 936 exclude=exc, ctx=ctx, default=default)
939 937 for f in c.files():
940 938 if m(f):
941 939 return True
942 940 return False
943 941
944 942 return subset.filter(matches)
945 943
946 944 def hasfile(repo, subset, x):
947 945 """``file(pattern)``
948 946 Changesets affecting files matched by pattern.
949 947
950 948 For a faster but less accurate result, consider using ``filelog()``
951 949 instead.
952 950
953 951 This predicate uses ``glob:`` as the default kind of pattern.
954 952 """
955 953 # i18n: "file" is a keyword
956 954 pat = getstring(x, _("file requires a pattern"))
957 955 return _matchfiles(repo, subset, ('string', 'p:' + pat))
958 956
959 957 def head(repo, subset, x):
960 958 """``head()``
961 959 Changeset is a named branch head.
962 960 """
963 961 # i18n: "head" is a keyword
964 962 getargs(x, 0, 0, _("head takes no arguments"))
965 963 hs = set()
966 964 for b, ls in repo.branchmap().iteritems():
967 965 hs.update(repo[h].rev() for h in ls)
968 966 return baseset(hs).filter(subset.__contains__)
969 967
970 968 def heads(repo, subset, x):
971 969 """``heads(set)``
972 970 Members of set with no children in set.
973 971 """
974 972 s = getset(repo, subset, x)
975 973 ps = parents(repo, subset, x)
976 974 return s - ps
977 975
978 976 def hidden(repo, subset, x):
979 977 """``hidden()``
980 978 Hidden changesets.
981 979 """
982 980 # i18n: "hidden" is a keyword
983 981 getargs(x, 0, 0, _("hidden takes no arguments"))
984 982 hiddenrevs = repoview.filterrevs(repo, 'visible')
985 983 return subset & hiddenrevs
986 984
987 985 def keyword(repo, subset, x):
988 986 """``keyword(string)``
989 987 Search commit message, user name, and names of changed files for
990 988 string. The match is case-insensitive.
991 989 """
992 990 # i18n: "keyword" is a keyword
993 991 kw = encoding.lower(getstring(x, _("keyword requires a string")))
994 992
995 993 def matches(r):
996 994 c = repo[r]
997 995 return util.any(kw in encoding.lower(t) for t in c.files() + [c.user(),
998 996 c.description()])
999 997
1000 998 return subset.filter(matches)
1001 999
1002 1000 def limit(repo, subset, x):
1003 1001 """``limit(set, [n])``
1004 1002 First n members of set, defaulting to 1.
1005 1003 """
1006 1004 # i18n: "limit" is a keyword
1007 1005 l = getargs(x, 1, 2, _("limit requires one or two arguments"))
1008 1006 try:
1009 1007 lim = 1
1010 1008 if len(l) == 2:
1011 1009 # i18n: "limit" is a keyword
1012 1010 lim = int(getstring(l[1], _("limit requires a number")))
1013 1011 except (TypeError, ValueError):
1014 1012 # i18n: "limit" is a keyword
1015 1013 raise error.ParseError(_("limit expects a number"))
1016 1014 ss = subset.set()
1017 1015 os = getset(repo, spanset(repo), l[0])
1018 1016 result = []
1019 1017 it = iter(os)
1020 1018 for x in xrange(lim):
1021 1019 try:
1022 1020 y = it.next()
1023 1021 if y in ss:
1024 1022 result.append(y)
1025 1023 except (StopIteration):
1026 1024 break
1027 1025 return baseset(result)
1028 1026
1029 1027 def last(repo, subset, x):
1030 1028 """``last(set, [n])``
1031 1029 Last n members of set, defaulting to 1.
1032 1030 """
1033 1031 # i18n: "last" is a keyword
1034 1032 l = getargs(x, 1, 2, _("last requires one or two arguments"))
1035 1033 try:
1036 1034 lim = 1
1037 1035 if len(l) == 2:
1038 1036 # i18n: "last" is a keyword
1039 1037 lim = int(getstring(l[1], _("last requires a number")))
1040 1038 except (TypeError, ValueError):
1041 1039 # i18n: "last" is a keyword
1042 1040 raise error.ParseError(_("last expects a number"))
1043 1041 ss = subset.set()
1044 1042 os = getset(repo, spanset(repo), l[0])
1045 1043 os.reverse()
1046 1044 result = []
1047 1045 it = iter(os)
1048 1046 for x in xrange(lim):
1049 1047 try:
1050 1048 y = it.next()
1051 1049 if y in ss:
1052 1050 result.append(y)
1053 1051 except (StopIteration):
1054 1052 break
1055 1053 return baseset(result)
1056 1054
1057 1055 def maxrev(repo, subset, x):
1058 1056 """``max(set)``
1059 1057 Changeset with highest revision number in set.
1060 1058 """
1061 1059 os = getset(repo, spanset(repo), x)
1062 1060 if os:
1063 1061 m = os.max()
1064 1062 if m in subset:
1065 1063 return baseset([m])
1066 1064 return baseset()
1067 1065
1068 1066 def merge(repo, subset, x):
1069 1067 """``merge()``
1070 1068 Changeset is a merge changeset.
1071 1069 """
1072 1070 # i18n: "merge" is a keyword
1073 1071 getargs(x, 0, 0, _("merge takes no arguments"))
1074 1072 cl = repo.changelog
1075 1073 return subset.filter(lambda r: cl.parentrevs(r)[1] != -1)
1076 1074
1077 1075 def branchpoint(repo, subset, x):
1078 1076 """``branchpoint()``
1079 1077 Changesets with more than one child.
1080 1078 """
1081 1079 # i18n: "branchpoint" is a keyword
1082 1080 getargs(x, 0, 0, _("branchpoint takes no arguments"))
1083 1081 cl = repo.changelog
1084 1082 if not subset:
1085 1083 return baseset()
1086 1084 baserev = min(subset)
1087 1085 parentscount = [0]*(len(repo) - baserev)
1088 1086 for r in cl.revs(start=baserev + 1):
1089 1087 for p in cl.parentrevs(r):
1090 1088 if p >= baserev:
1091 1089 parentscount[p - baserev] += 1
1092 1090 return subset.filter(lambda r: parentscount[r - baserev] > 1)
1093 1091
1094 1092 def minrev(repo, subset, x):
1095 1093 """``min(set)``
1096 1094 Changeset with lowest revision number in set.
1097 1095 """
1098 1096 os = getset(repo, spanset(repo), x)
1099 1097 if os:
1100 1098 m = os.min()
1101 1099 if m in subset:
1102 1100 return baseset([m])
1103 1101 return baseset()
1104 1102
1105 1103 def modifies(repo, subset, x):
1106 1104 """``modifies(pattern)``
1107 1105 Changesets modifying files matched by pattern.
1108 1106
1109 1107 The pattern without explicit kind like ``glob:`` is expected to be
1110 1108 relative to the current directory and match against a file or a
1111 1109 directory.
1112 1110 """
1113 1111 # i18n: "modifies" is a keyword
1114 1112 pat = getstring(x, _("modifies requires a pattern"))
1115 1113 return checkstatus(repo, subset, pat, 0)
1116 1114
1117 1115 def node_(repo, subset, x):
1118 1116 """``id(string)``
1119 1117 Revision non-ambiguously specified by the given hex string prefix.
1120 1118 """
1121 1119 # i18n: "id" is a keyword
1122 1120 l = getargs(x, 1, 1, _("id requires one argument"))
1123 1121 # i18n: "id" is a keyword
1124 1122 n = getstring(l[0], _("id requires a string"))
1125 1123 if len(n) == 40:
1126 1124 rn = repo[n].rev()
1127 1125 else:
1128 1126 rn = None
1129 1127 pm = repo.changelog._partialmatch(n)
1130 1128 if pm is not None:
1131 1129 rn = repo.changelog.rev(pm)
1132 1130
1133 1131 return subset.filter(lambda r: r == rn)
1134 1132
1135 1133 def obsolete(repo, subset, x):
1136 1134 """``obsolete()``
1137 1135 Mutable changeset with a newer version."""
1138 1136 # i18n: "obsolete" is a keyword
1139 1137 getargs(x, 0, 0, _("obsolete takes no arguments"))
1140 1138 obsoletes = obsmod.getrevs(repo, 'obsolete')
1141 1139 return subset & obsoletes
1142 1140
1143 1141 def origin(repo, subset, x):
1144 1142 """``origin([set])``
1145 1143 Changesets that were specified as a source for the grafts, transplants or
1146 1144 rebases that created the given revisions. Omitting the optional set is the
1147 1145 same as passing all(). If a changeset created by these operations is itself
1148 1146 specified as a source for one of these operations, only the source changeset
1149 1147 for the first operation is selected.
1150 1148 """
1151 1149 if x is not None:
1152 1150 args = getset(repo, spanset(repo), x).set()
1153 1151 else:
1154 1152 args = getall(repo, spanset(repo), x).set()
1155 1153
1156 1154 def _firstsrc(rev):
1157 1155 src = _getrevsource(repo, rev)
1158 1156 if src is None:
1159 1157 return None
1160 1158
1161 1159 while True:
1162 1160 prev = _getrevsource(repo, src)
1163 1161
1164 1162 if prev is None:
1165 1163 return src
1166 1164 src = prev
1167 1165
1168 1166 o = set([_firstsrc(r) for r in args])
1169 1167 o -= set([None])
1170 1168 return subset & o
1171 1169
1172 1170 def outgoing(repo, subset, x):
1173 1171 """``outgoing([path])``
1174 1172 Changesets not found in the specified destination repository, or the
1175 1173 default push location.
1176 1174 """
1177 1175 import hg # avoid start-up nasties
1178 1176 # i18n: "outgoing" is a keyword
1179 1177 l = getargs(x, 0, 1, _("outgoing takes one or no arguments"))
1180 1178 # i18n: "outgoing" is a keyword
1181 1179 dest = l and getstring(l[0], _("outgoing requires a repository path")) or ''
1182 1180 dest = repo.ui.expandpath(dest or 'default-push', dest or 'default')
1183 1181 dest, branches = hg.parseurl(dest)
1184 1182 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1185 1183 if revs:
1186 1184 revs = [repo.lookup(rev) for rev in revs]
1187 1185 other = hg.peer(repo, {}, dest)
1188 1186 repo.ui.pushbuffer()
1189 1187 outgoing = discovery.findcommonoutgoing(repo, other, onlyheads=revs)
1190 1188 repo.ui.popbuffer()
1191 1189 cl = repo.changelog
1192 1190 o = set([cl.rev(r) for r in outgoing.missing])
1193 1191 return subset & o
1194 1192
1195 1193 def p1(repo, subset, x):
1196 1194 """``p1([set])``
1197 1195 First parent of changesets in set, or the working directory.
1198 1196 """
1199 1197 if x is None:
1200 1198 p = repo[x].p1().rev()
1201 1199 if p >= 0:
1202 1200 return subset & baseset([p])
1203 1201 return baseset()
1204 1202
1205 1203 ps = set()
1206 1204 cl = repo.changelog
1207 1205 for r in getset(repo, spanset(repo), x):
1208 1206 ps.add(cl.parentrevs(r)[0])
1209 1207 ps -= set([node.nullrev])
1210 1208 return subset & ps
1211 1209
1212 1210 def p2(repo, subset, x):
1213 1211 """``p2([set])``
1214 1212 Second parent of changesets in set, or the working directory.
1215 1213 """
1216 1214 if x is None:
1217 1215 ps = repo[x].parents()
1218 1216 try:
1219 1217 p = ps[1].rev()
1220 1218 if p >= 0:
1221 1219 return subset & baseset([p])
1222 1220 return baseset()
1223 1221 except IndexError:
1224 1222 return baseset()
1225 1223
1226 1224 ps = set()
1227 1225 cl = repo.changelog
1228 1226 for r in getset(repo, spanset(repo), x):
1229 1227 ps.add(cl.parentrevs(r)[1])
1230 1228 ps -= set([node.nullrev])
1231 1229 return subset & ps
1232 1230
1233 1231 def parents(repo, subset, x):
1234 1232 """``parents([set])``
1235 1233 The set of all parents for all changesets in set, or the working directory.
1236 1234 """
1237 1235 if x is None:
1238 1236 ps = set(p.rev() for p in repo[x].parents())
1239 1237 else:
1240 1238 ps = set()
1241 1239 cl = repo.changelog
1242 1240 for r in getset(repo, spanset(repo), x):
1243 1241 ps.update(cl.parentrevs(r))
1244 1242 ps -= set([node.nullrev])
1245 1243 return subset & ps
1246 1244
1247 1245 def parentspec(repo, subset, x, n):
1248 1246 """``set^0``
1249 1247 The set.
1250 1248 ``set^1`` (or ``set^``), ``set^2``
1251 1249 First or second parent, respectively, of all changesets in set.
1252 1250 """
1253 1251 try:
1254 1252 n = int(n[1])
1255 1253 if n not in (0, 1, 2):
1256 1254 raise ValueError
1257 1255 except (TypeError, ValueError):
1258 1256 raise error.ParseError(_("^ expects a number 0, 1, or 2"))
1259 1257 ps = set()
1260 1258 cl = repo.changelog
1261 1259 for r in getset(repo, baseset(cl), x):
1262 1260 if n == 0:
1263 1261 ps.add(r)
1264 1262 elif n == 1:
1265 1263 ps.add(cl.parentrevs(r)[0])
1266 1264 elif n == 2:
1267 1265 parents = cl.parentrevs(r)
1268 1266 if len(parents) > 1:
1269 1267 ps.add(parents[1])
1270 1268 return subset & ps
1271 1269
1272 1270 def present(repo, subset, x):
1273 1271 """``present(set)``
1274 1272 An empty set, if any revision in set isn't found; otherwise,
1275 1273 all revisions in set.
1276 1274
1277 1275 If any of specified revisions is not present in the local repository,
1278 1276 the query is normally aborted. But this predicate allows the query
1279 1277 to continue even in such cases.
1280 1278 """
1281 1279 try:
1282 1280 return getset(repo, subset, x)
1283 1281 except error.RepoLookupError:
1284 1282 return baseset()
1285 1283
1286 1284 def public(repo, subset, x):
1287 1285 """``public()``
1288 1286 Changeset in public phase."""
1289 1287 # i18n: "public" is a keyword
1290 1288 getargs(x, 0, 0, _("public takes no arguments"))
1291 1289 pc = repo._phasecache
1292 1290 return subset.filter(lambda r: pc.phase(repo, r) == phases.public)
1293 1291
1294 1292 def remote(repo, subset, x):
1295 1293 """``remote([id [,path]])``
1296 1294 Local revision that corresponds to the given identifier in a
1297 1295 remote repository, if present. Here, the '.' identifier is a
1298 1296 synonym for the current local branch.
1299 1297 """
1300 1298
1301 1299 import hg # avoid start-up nasties
1302 1300 # i18n: "remote" is a keyword
1303 1301 l = getargs(x, 0, 2, _("remote takes one, two or no arguments"))
1304 1302
1305 1303 q = '.'
1306 1304 if len(l) > 0:
1307 1305 # i18n: "remote" is a keyword
1308 1306 q = getstring(l[0], _("remote requires a string id"))
1309 1307 if q == '.':
1310 1308 q = repo['.'].branch()
1311 1309
1312 1310 dest = ''
1313 1311 if len(l) > 1:
1314 1312 # i18n: "remote" is a keyword
1315 1313 dest = getstring(l[1], _("remote requires a repository path"))
1316 1314 dest = repo.ui.expandpath(dest or 'default')
1317 1315 dest, branches = hg.parseurl(dest)
1318 1316 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1319 1317 if revs:
1320 1318 revs = [repo.lookup(rev) for rev in revs]
1321 1319 other = hg.peer(repo, {}, dest)
1322 1320 n = other.lookup(q)
1323 1321 if n in repo:
1324 1322 r = repo[n].rev()
1325 1323 if r in subset:
1326 1324 return baseset([r])
1327 1325 return baseset()
1328 1326
1329 1327 def removes(repo, subset, x):
1330 1328 """``removes(pattern)``
1331 1329 Changesets which remove files matching pattern.
1332 1330
1333 1331 The pattern without explicit kind like ``glob:`` is expected to be
1334 1332 relative to the current directory and match against a file or a
1335 1333 directory.
1336 1334 """
1337 1335 # i18n: "removes" is a keyword
1338 1336 pat = getstring(x, _("removes requires a pattern"))
1339 1337 return checkstatus(repo, subset, pat, 2)
1340 1338
1341 1339 def rev(repo, subset, x):
1342 1340 """``rev(number)``
1343 1341 Revision with the given numeric identifier.
1344 1342 """
1345 1343 # i18n: "rev" is a keyword
1346 1344 l = getargs(x, 1, 1, _("rev requires one argument"))
1347 1345 try:
1348 1346 # i18n: "rev" is a keyword
1349 1347 l = int(getstring(l[0], _("rev requires a number")))
1350 1348 except (TypeError, ValueError):
1351 1349 # i18n: "rev" is a keyword
1352 1350 raise error.ParseError(_("rev expects a number"))
1353 1351 return subset & baseset([l])
1354 1352
1355 1353 def matching(repo, subset, x):
1356 1354 """``matching(revision [, field])``
1357 1355 Changesets in which a given set of fields match the set of fields in the
1358 1356 selected revision or set.
1359 1357
1360 1358 To match more than one field pass the list of fields to match separated
1361 1359 by spaces (e.g. ``author description``).
1362 1360
1363 1361 Valid fields are most regular revision fields and some special fields.
1364 1362
1365 1363 Regular revision fields are ``description``, ``author``, ``branch``,
1366 1364 ``date``, ``files``, ``phase``, ``parents``, ``substate``, ``user``
1367 1365 and ``diff``.
1368 1366 Note that ``author`` and ``user`` are synonyms. ``diff`` refers to the
1369 1367 contents of the revision. Two revisions matching their ``diff`` will
1370 1368 also match their ``files``.
1371 1369
1372 1370 Special fields are ``summary`` and ``metadata``:
1373 1371 ``summary`` matches the first line of the description.
1374 1372 ``metadata`` is equivalent to matching ``description user date``
1375 1373 (i.e. it matches the main metadata fields).
1376 1374
1377 1375 ``metadata`` is the default field which is used when no fields are
1378 1376 specified. You can match more than one field at a time.
1379 1377 """
1380 1378 # i18n: "matching" is a keyword
1381 1379 l = getargs(x, 1, 2, _("matching takes 1 or 2 arguments"))
1382 1380
1383 1381 revs = getset(repo, baseset(repo.changelog), l[0])
1384 1382
1385 1383 fieldlist = ['metadata']
1386 1384 if len(l) > 1:
1387 1385 fieldlist = getstring(l[1],
1388 1386 # i18n: "matching" is a keyword
1389 1387 _("matching requires a string "
1390 1388 "as its second argument")).split()
1391 1389
1392 1390 # Make sure that there are no repeated fields,
1393 1391 # expand the 'special' 'metadata' field type
1394 1392 # and check the 'files' whenever we check the 'diff'
1395 1393 fields = []
1396 1394 for field in fieldlist:
1397 1395 if field == 'metadata':
1398 1396 fields += ['user', 'description', 'date']
1399 1397 elif field == 'diff':
1400 1398 # a revision matching the diff must also match the files
1401 1399 # since matching the diff is very costly, make sure to
1402 1400 # also match the files first
1403 1401 fields += ['files', 'diff']
1404 1402 else:
1405 1403 if field == 'author':
1406 1404 field = 'user'
1407 1405 fields.append(field)
1408 1406 fields = set(fields)
1409 1407 if 'summary' in fields and 'description' in fields:
1410 1408 # If a revision matches its description it also matches its summary
1411 1409 fields.discard('summary')
1412 1410
1413 1411 # We may want to match more than one field
1414 1412 # Not all fields take the same amount of time to be matched
1415 1413 # Sort the selected fields in order of increasing matching cost
1416 1414 fieldorder = ['phase', 'parents', 'user', 'date', 'branch', 'summary',
1417 1415 'files', 'description', 'substate', 'diff']
1418 1416 def fieldkeyfunc(f):
1419 1417 try:
1420 1418 return fieldorder.index(f)
1421 1419 except ValueError:
1422 1420 # assume an unknown field is very costly
1423 1421 return len(fieldorder)
1424 1422 fields = list(fields)
1425 1423 fields.sort(key=fieldkeyfunc)
1426 1424
1427 1425 # Each field will be matched with its own "getfield" function
1428 1426 # which will be added to the getfieldfuncs array of functions
1429 1427 getfieldfuncs = []
1430 1428 _funcs = {
1431 1429 'user': lambda r: repo[r].user(),
1432 1430 'branch': lambda r: repo[r].branch(),
1433 1431 'date': lambda r: repo[r].date(),
1434 1432 'description': lambda r: repo[r].description(),
1435 1433 'files': lambda r: repo[r].files(),
1436 1434 'parents': lambda r: repo[r].parents(),
1437 1435 'phase': lambda r: repo[r].phase(),
1438 1436 'substate': lambda r: repo[r].substate,
1439 1437 'summary': lambda r: repo[r].description().splitlines()[0],
1440 1438 'diff': lambda r: list(repo[r].diff(git=True),)
1441 1439 }
1442 1440 for info in fields:
1443 1441 getfield = _funcs.get(info, None)
1444 1442 if getfield is None:
1445 1443 raise error.ParseError(
1446 1444 # i18n: "matching" is a keyword
1447 1445 _("unexpected field name passed to matching: %s") % info)
1448 1446 getfieldfuncs.append(getfield)
1449 1447 # convert the getfield array of functions into a "getinfo" function
1450 1448 # which returns an array of field values (or a single value if there
1451 1449 # is only one field to match)
1452 1450 getinfo = lambda r: [f(r) for f in getfieldfuncs]
1453 1451
1454 1452 def matches(x):
1455 1453 for rev in revs:
1456 1454 target = getinfo(rev)
1457 1455 match = True
1458 1456 for n, f in enumerate(getfieldfuncs):
1459 1457 if target[n] != f(x):
1460 1458 match = False
1461 1459 if match:
1462 1460 return True
1463 1461 return False
1464 1462
1465 1463 return subset.filter(matches)
1466 1464
1467 1465 def reverse(repo, subset, x):
1468 1466 """``reverse(set)``
1469 1467 Reverse order of set.
1470 1468 """
1471 1469 l = getset(repo, subset, x)
1472 1470 l.reverse()
1473 1471 return l
1474 1472
1475 1473 def roots(repo, subset, x):
1476 1474 """``roots(set)``
1477 1475 Changesets in set with no parent changeset in set.
1478 1476 """
1479 1477 s = getset(repo, spanset(repo), x).set()
1480 1478 subset = baseset([r for r in s if r in subset.set()])
1481 1479 cs = _children(repo, subset, s)
1482 1480 return subset - cs
1483 1481
1484 1482 def secret(repo, subset, x):
1485 1483 """``secret()``
1486 1484 Changeset in secret phase."""
1487 1485 # i18n: "secret" is a keyword
1488 1486 getargs(x, 0, 0, _("secret takes no arguments"))
1489 1487 pc = repo._phasecache
1490 1488 return subset.filter(lambda x: pc.phase(repo, x) == phases.secret)
1491 1489
1492 1490 def sort(repo, subset, x):
1493 1491 """``sort(set[, [-]key...])``
1494 1492 Sort set by keys. The default sort order is ascending, specify a key
1495 1493 as ``-key`` to sort in descending order.
1496 1494
1497 1495 The keys can be:
1498 1496
1499 1497 - ``rev`` for the revision number,
1500 1498 - ``branch`` for the branch name,
1501 1499 - ``desc`` for the commit message (description),
1502 1500 - ``user`` for user name (``author`` can be used as an alias),
1503 1501 - ``date`` for the commit date
1504 1502 """
1505 1503 # i18n: "sort" is a keyword
1506 1504 l = getargs(x, 1, 2, _("sort requires one or two arguments"))
1507 1505 keys = "rev"
1508 1506 if len(l) == 2:
1509 1507 # i18n: "sort" is a keyword
1510 1508 keys = getstring(l[1], _("sort spec must be a string"))
1511 1509
1512 1510 s = l[0]
1513 1511 keys = keys.split()
1514 1512 l = []
1515 1513 def invert(s):
1516 1514 return "".join(chr(255 - ord(c)) for c in s)
1517 1515 revs = getset(repo, subset, s)
1518 1516 if keys == ["rev"]:
1519 1517 revs.sort()
1520 1518 return revs
1521 1519 elif keys == ["-rev"]:
1522 1520 revs.sort(reverse=True)
1523 1521 return revs
1524 1522 for r in revs:
1525 1523 c = repo[r]
1526 1524 e = []
1527 1525 for k in keys:
1528 1526 if k == 'rev':
1529 1527 e.append(r)
1530 1528 elif k == '-rev':
1531 1529 e.append(-r)
1532 1530 elif k == 'branch':
1533 1531 e.append(c.branch())
1534 1532 elif k == '-branch':
1535 1533 e.append(invert(c.branch()))
1536 1534 elif k == 'desc':
1537 1535 e.append(c.description())
1538 1536 elif k == '-desc':
1539 1537 e.append(invert(c.description()))
1540 1538 elif k in 'user author':
1541 1539 e.append(c.user())
1542 1540 elif k in '-user -author':
1543 1541 e.append(invert(c.user()))
1544 1542 elif k == 'date':
1545 1543 e.append(c.date()[0])
1546 1544 elif k == '-date':
1547 1545 e.append(-c.date()[0])
1548 1546 else:
1549 1547 raise error.ParseError(_("unknown sort key %r") % k)
1550 1548 e.append(r)
1551 1549 l.append(e)
1552 1550 l.sort()
1553 1551 return baseset([e[-1] for e in l])
1554 1552
1555 1553 def _stringmatcher(pattern):
1556 1554 """
1557 1555 accepts a string, possibly starting with 're:' or 'literal:' prefix.
1558 1556 returns the matcher name, pattern, and matcher function.
1559 1557 missing or unknown prefixes are treated as literal matches.
1560 1558
1561 1559 helper for tests:
1562 1560 >>> def test(pattern, *tests):
1563 1561 ... kind, pattern, matcher = _stringmatcher(pattern)
1564 1562 ... return (kind, pattern, [bool(matcher(t)) for t in tests])
1565 1563
1566 1564 exact matching (no prefix):
1567 1565 >>> test('abcdefg', 'abc', 'def', 'abcdefg')
1568 1566 ('literal', 'abcdefg', [False, False, True])
1569 1567
1570 1568 regex matching ('re:' prefix)
1571 1569 >>> test('re:a.+b', 'nomatch', 'fooadef', 'fooadefbar')
1572 1570 ('re', 'a.+b', [False, False, True])
1573 1571
1574 1572 force exact matches ('literal:' prefix)
1575 1573 >>> test('literal:re:foobar', 'foobar', 're:foobar')
1576 1574 ('literal', 're:foobar', [False, True])
1577 1575
1578 1576 unknown prefixes are ignored and treated as literals
1579 1577 >>> test('foo:bar', 'foo', 'bar', 'foo:bar')
1580 1578 ('literal', 'foo:bar', [False, False, True])
1581 1579 """
1582 1580 if pattern.startswith('re:'):
1583 1581 pattern = pattern[3:]
1584 1582 try:
1585 1583 regex = re.compile(pattern)
1586 1584 except re.error, e:
1587 1585 raise error.ParseError(_('invalid regular expression: %s')
1588 1586 % e)
1589 1587 return 're', pattern, regex.search
1590 1588 elif pattern.startswith('literal:'):
1591 1589 pattern = pattern[8:]
1592 1590 return 'literal', pattern, pattern.__eq__
1593 1591
1594 1592 def _substringmatcher(pattern):
1595 1593 kind, pattern, matcher = _stringmatcher(pattern)
1596 1594 if kind == 'literal':
1597 1595 matcher = lambda s: pattern in s
1598 1596 return kind, pattern, matcher
1599 1597
1600 1598 def tag(repo, subset, x):
1601 1599 """``tag([name])``
1602 1600 The specified tag by name, or all tagged revisions if no name is given.
1603 1601
1604 1602 If `name` starts with `re:`, the remainder of the name is treated as
1605 1603 a regular expression. To match a tag that actually starts with `re:`,
1606 1604 use the prefix `literal:`.
1607 1605 """
1608 1606 # i18n: "tag" is a keyword
1609 1607 args = getargs(x, 0, 1, _("tag takes one or no arguments"))
1610 1608 cl = repo.changelog
1611 1609 if args:
1612 1610 pattern = getstring(args[0],
1613 1611 # i18n: "tag" is a keyword
1614 1612 _('the argument to tag must be a string'))
1615 1613 kind, pattern, matcher = _stringmatcher(pattern)
1616 1614 if kind == 'literal':
1617 1615 # avoid resolving all tags
1618 1616 tn = repo._tagscache.tags.get(pattern, None)
1619 1617 if tn is None:
1620 1618 raise util.Abort(_("tag '%s' does not exist") % pattern)
1621 1619 s = set([repo[tn].rev()])
1622 1620 else:
1623 1621 s = set([cl.rev(n) for t, n in repo.tagslist() if matcher(t)])
1624 1622 else:
1625 1623 s = set([cl.rev(n) for t, n in repo.tagslist() if t != 'tip'])
1626 1624 return subset & s
1627 1625
1628 1626 def tagged(repo, subset, x):
1629 1627 return tag(repo, subset, x)
1630 1628
1631 1629 def unstable(repo, subset, x):
1632 1630 """``unstable()``
1633 1631 Non-obsolete changesets with obsolete ancestors.
1634 1632 """
1635 1633 # i18n: "unstable" is a keyword
1636 1634 getargs(x, 0, 0, _("unstable takes no arguments"))
1637 1635 unstables = obsmod.getrevs(repo, 'unstable')
1638 1636 return subset & unstables
1639 1637
1640 1638
1641 1639 def user(repo, subset, x):
1642 1640 """``user(string)``
1643 1641 User name contains string. The match is case-insensitive.
1644 1642
1645 1643 If `string` starts with `re:`, the remainder of the string is treated as
1646 1644 a regular expression. To match a user that actually contains `re:`, use
1647 1645 the prefix `literal:`.
1648 1646 """
1649 1647 return author(repo, subset, x)
1650 1648
1651 1649 # for internal use
1652 1650 def _list(repo, subset, x):
1653 1651 s = getstring(x, "internal error")
1654 1652 if not s:
1655 1653 return baseset()
1656 1654 ls = [repo[r].rev() for r in s.split('\0')]
1657 1655 s = subset.set()
1658 1656 return baseset([r for r in ls if r in s])
1659 1657
1660 1658 # for internal use
1661 1659 def _intlist(repo, subset, x):
1662 1660 s = getstring(x, "internal error")
1663 1661 if not s:
1664 1662 return baseset()
1665 1663 ls = [int(r) for r in s.split('\0')]
1666 1664 s = subset.set()
1667 1665 return baseset([r for r in ls if r in s])
1668 1666
1669 1667 # for internal use
1670 1668 def _hexlist(repo, subset, x):
1671 1669 s = getstring(x, "internal error")
1672 1670 if not s:
1673 1671 return baseset()
1674 1672 cl = repo.changelog
1675 1673 ls = [cl.rev(node.bin(r)) for r in s.split('\0')]
1676 1674 s = subset.set()
1677 1675 return baseset([r for r in ls if r in s])
1678 1676
1679 1677 symbols = {
1680 1678 "adds": adds,
1681 1679 "all": getall,
1682 1680 "ancestor": ancestor,
1683 1681 "ancestors": ancestors,
1684 1682 "_firstancestors": _firstancestors,
1685 1683 "author": author,
1686 1684 "only": only,
1687 1685 "bisect": bisect,
1688 1686 "bisected": bisected,
1689 1687 "bookmark": bookmark,
1690 1688 "branch": branch,
1691 1689 "branchpoint": branchpoint,
1692 1690 "bumped": bumped,
1693 1691 "bundle": bundle,
1694 1692 "children": children,
1695 1693 "closed": closed,
1696 1694 "contains": contains,
1697 1695 "converted": converted,
1698 1696 "date": date,
1699 1697 "desc": desc,
1700 1698 "descendants": descendants,
1701 1699 "_firstdescendants": _firstdescendants,
1702 1700 "destination": destination,
1703 1701 "divergent": divergent,
1704 1702 "draft": draft,
1705 1703 "extinct": extinct,
1706 1704 "extra": extra,
1707 1705 "file": hasfile,
1708 1706 "filelog": filelog,
1709 1707 "first": first,
1710 1708 "follow": follow,
1711 1709 "_followfirst": _followfirst,
1712 1710 "grep": grep,
1713 1711 "head": head,
1714 1712 "heads": heads,
1715 1713 "hidden": hidden,
1716 1714 "id": node_,
1717 1715 "keyword": keyword,
1718 1716 "last": last,
1719 1717 "limit": limit,
1720 1718 "_matchfiles": _matchfiles,
1721 1719 "max": maxrev,
1722 1720 "merge": merge,
1723 1721 "min": minrev,
1724 1722 "modifies": modifies,
1725 1723 "obsolete": obsolete,
1726 1724 "origin": origin,
1727 1725 "outgoing": outgoing,
1728 1726 "p1": p1,
1729 1727 "p2": p2,
1730 1728 "parents": parents,
1731 1729 "present": present,
1732 1730 "public": public,
1733 1731 "remote": remote,
1734 1732 "removes": removes,
1735 1733 "rev": rev,
1736 1734 "reverse": reverse,
1737 1735 "roots": roots,
1738 1736 "sort": sort,
1739 1737 "secret": secret,
1740 1738 "matching": matching,
1741 1739 "tag": tag,
1742 1740 "tagged": tagged,
1743 1741 "user": user,
1744 1742 "unstable": unstable,
1745 1743 "_list": _list,
1746 1744 "_intlist": _intlist,
1747 1745 "_hexlist": _hexlist,
1748 1746 }
1749 1747
1750 1748 # symbols which can't be used for a DoS attack for any given input
1751 1749 # (e.g. those which accept regexes as plain strings shouldn't be included)
1752 1750 # functions that just return a lot of changesets (like all) don't count here
1753 1751 safesymbols = set([
1754 1752 "adds",
1755 1753 "all",
1756 1754 "ancestor",
1757 1755 "ancestors",
1758 1756 "_firstancestors",
1759 1757 "author",
1760 1758 "bisect",
1761 1759 "bisected",
1762 1760 "bookmark",
1763 1761 "branch",
1764 1762 "branchpoint",
1765 1763 "bumped",
1766 1764 "bundle",
1767 1765 "children",
1768 1766 "closed",
1769 1767 "converted",
1770 1768 "date",
1771 1769 "desc",
1772 1770 "descendants",
1773 1771 "_firstdescendants",
1774 1772 "destination",
1775 1773 "divergent",
1776 1774 "draft",
1777 1775 "extinct",
1778 1776 "extra",
1779 1777 "file",
1780 1778 "filelog",
1781 1779 "first",
1782 1780 "follow",
1783 1781 "_followfirst",
1784 1782 "head",
1785 1783 "heads",
1786 1784 "hidden",
1787 1785 "id",
1788 1786 "keyword",
1789 1787 "last",
1790 1788 "limit",
1791 1789 "_matchfiles",
1792 1790 "max",
1793 1791 "merge",
1794 1792 "min",
1795 1793 "modifies",
1796 1794 "obsolete",
1797 1795 "origin",
1798 1796 "outgoing",
1799 1797 "p1",
1800 1798 "p2",
1801 1799 "parents",
1802 1800 "present",
1803 1801 "public",
1804 1802 "remote",
1805 1803 "removes",
1806 1804 "rev",
1807 1805 "reverse",
1808 1806 "roots",
1809 1807 "sort",
1810 1808 "secret",
1811 1809 "matching",
1812 1810 "tag",
1813 1811 "tagged",
1814 1812 "user",
1815 1813 "unstable",
1816 1814 "_list",
1817 1815 "_intlist",
1818 1816 "_hexlist",
1819 1817 ])
1820 1818
1821 1819 methods = {
1822 1820 "range": rangeset,
1823 1821 "dagrange": dagrange,
1824 1822 "string": stringset,
1825 1823 "symbol": symbolset,
1826 1824 "and": andset,
1827 1825 "or": orset,
1828 1826 "not": notset,
1829 1827 "list": listset,
1830 1828 "func": func,
1831 1829 "ancestor": ancestorspec,
1832 1830 "parent": parentspec,
1833 1831 "parentpost": p1,
1834 1832 }
1835 1833
1836 1834 def optimize(x, small):
1837 1835 if x is None:
1838 1836 return 0, x
1839 1837
1840 1838 smallbonus = 1
1841 1839 if small:
1842 1840 smallbonus = .5
1843 1841
1844 1842 op = x[0]
1845 1843 if op == 'minus':
1846 1844 return optimize(('and', x[1], ('not', x[2])), small)
1847 1845 elif op == 'dagrangepre':
1848 1846 return optimize(('func', ('symbol', 'ancestors'), x[1]), small)
1849 1847 elif op == 'dagrangepost':
1850 1848 return optimize(('func', ('symbol', 'descendants'), x[1]), small)
1851 1849 elif op == 'rangepre':
1852 1850 return optimize(('range', ('string', '0'), x[1]), small)
1853 1851 elif op == 'rangepost':
1854 1852 return optimize(('range', x[1], ('string', 'tip')), small)
1855 1853 elif op == 'negate':
1856 1854 return optimize(('string',
1857 1855 '-' + getstring(x[1], _("can't negate that"))), small)
1858 1856 elif op in 'string symbol negate':
1859 1857 return smallbonus, x # single revisions are small
1860 1858 elif op == 'and':
1861 1859 wa, ta = optimize(x[1], True)
1862 1860 wb, tb = optimize(x[2], True)
1863 1861
1864 1862 # (::x and not ::y)/(not ::y and ::x) have a fast path
1865 1863 def isonly(revs, bases):
1866 1864 return (
1867 1865 revs[0] == 'func'
1868 1866 and getstring(revs[1], _('not a symbol')) == 'ancestors'
1869 1867 and bases[0] == 'not'
1870 1868 and bases[1][0] == 'func'
1871 1869 and getstring(bases[1][1], _('not a symbol')) == 'ancestors')
1872 1870
1873 1871 w = min(wa, wb)
1874 1872 if isonly(ta, tb):
1875 1873 return w, ('func', ('symbol', 'only'), ('list', ta[2], tb[1][2]))
1876 1874 if isonly(tb, ta):
1877 1875 return w, ('func', ('symbol', 'only'), ('list', tb[2], ta[1][2]))
1878 1876
1879 1877 if wa > wb:
1880 1878 return w, (op, tb, ta)
1881 1879 return w, (op, ta, tb)
1882 1880 elif op == 'or':
1883 1881 wa, ta = optimize(x[1], False)
1884 1882 wb, tb = optimize(x[2], False)
1885 1883 if wb < wa:
1886 1884 wb, wa = wa, wb
1887 1885 return max(wa, wb), (op, ta, tb)
1888 1886 elif op == 'not':
1889 1887 o = optimize(x[1], not small)
1890 1888 return o[0], (op, o[1])
1891 1889 elif op == 'parentpost':
1892 1890 o = optimize(x[1], small)
1893 1891 return o[0], (op, o[1])
1894 1892 elif op == 'group':
1895 1893 return optimize(x[1], small)
1896 1894 elif op in 'dagrange range list parent ancestorspec':
1897 1895 if op == 'parent':
1898 1896 # x^:y means (x^) : y, not x ^ (:y)
1899 1897 post = ('parentpost', x[1])
1900 1898 if x[2][0] == 'dagrangepre':
1901 1899 return optimize(('dagrange', post, x[2][1]), small)
1902 1900 elif x[2][0] == 'rangepre':
1903 1901 return optimize(('range', post, x[2][1]), small)
1904 1902
1905 1903 wa, ta = optimize(x[1], small)
1906 1904 wb, tb = optimize(x[2], small)
1907 1905 return wa + wb, (op, ta, tb)
1908 1906 elif op == 'func':
1909 1907 f = getstring(x[1], _("not a symbol"))
1910 1908 wa, ta = optimize(x[2], small)
1911 1909 if f in ("author branch closed date desc file grep keyword "
1912 1910 "outgoing user"):
1913 1911 w = 10 # slow
1914 1912 elif f in "modifies adds removes":
1915 1913 w = 30 # slower
1916 1914 elif f == "contains":
1917 1915 w = 100 # very slow
1918 1916 elif f == "ancestor":
1919 1917 w = 1 * smallbonus
1920 1918 elif f in "reverse limit first _intlist":
1921 1919 w = 0
1922 1920 elif f in "sort":
1923 1921 w = 10 # assume most sorts look at changelog
1924 1922 else:
1925 1923 w = 1
1926 1924 return w + wa, (op, x[1], ta)
1927 1925 return 1, x
1928 1926
1929 1927 _aliasarg = ('func', ('symbol', '_aliasarg'))
1930 1928 def _getaliasarg(tree):
1931 1929 """If tree matches ('func', ('symbol', '_aliasarg'), ('string', X))
1932 1930 return X, None otherwise.
1933 1931 """
1934 1932 if (len(tree) == 3 and tree[:2] == _aliasarg
1935 1933 and tree[2][0] == 'string'):
1936 1934 return tree[2][1]
1937 1935 return None
1938 1936
1939 1937 def _checkaliasarg(tree, known=None):
1940 1938 """Check tree contains no _aliasarg construct or only ones which
1941 1939 value is in known. Used to avoid alias placeholders injection.
1942 1940 """
1943 1941 if isinstance(tree, tuple):
1944 1942 arg = _getaliasarg(tree)
1945 1943 if arg is not None and (not known or arg not in known):
1946 1944 raise error.ParseError(_("not a function: %s") % '_aliasarg')
1947 1945 for t in tree:
1948 1946 _checkaliasarg(t, known)
1949 1947
1950 1948 class revsetalias(object):
1951 1949 funcre = re.compile('^([^(]+)\(([^)]+)\)$')
1952 1950 args = None
1953 1951
1954 1952 def __init__(self, name, value):
1955 1953 '''Aliases like:
1956 1954
1957 1955 h = heads(default)
1958 1956 b($1) = ancestors($1) - ancestors(default)
1959 1957 '''
1960 1958 m = self.funcre.search(name)
1961 1959 if m:
1962 1960 self.name = m.group(1)
1963 1961 self.tree = ('func', ('symbol', m.group(1)))
1964 1962 self.args = [x.strip() for x in m.group(2).split(',')]
1965 1963 for arg in self.args:
1966 1964 # _aliasarg() is an unknown symbol only used separate
1967 1965 # alias argument placeholders from regular strings.
1968 1966 value = value.replace(arg, '_aliasarg(%r)' % (arg,))
1969 1967 else:
1970 1968 self.name = name
1971 1969 self.tree = ('symbol', name)
1972 1970
1973 1971 self.replacement, pos = parse(value)
1974 1972 if pos != len(value):
1975 1973 raise error.ParseError(_('invalid token'), pos)
1976 1974 # Check for placeholder injection
1977 1975 _checkaliasarg(self.replacement, self.args)
1978 1976
1979 1977 def _getalias(aliases, tree):
1980 1978 """If tree looks like an unexpanded alias, return it. Return None
1981 1979 otherwise.
1982 1980 """
1983 1981 if isinstance(tree, tuple) and tree:
1984 1982 if tree[0] == 'symbol' and len(tree) == 2:
1985 1983 name = tree[1]
1986 1984 alias = aliases.get(name)
1987 1985 if alias and alias.args is None and alias.tree == tree:
1988 1986 return alias
1989 1987 if tree[0] == 'func' and len(tree) > 1:
1990 1988 if tree[1][0] == 'symbol' and len(tree[1]) == 2:
1991 1989 name = tree[1][1]
1992 1990 alias = aliases.get(name)
1993 1991 if alias and alias.args is not None and alias.tree == tree[:2]:
1994 1992 return alias
1995 1993 return None
1996 1994
1997 1995 def _expandargs(tree, args):
1998 1996 """Replace _aliasarg instances with the substitution value of the
1999 1997 same name in args, recursively.
2000 1998 """
2001 1999 if not tree or not isinstance(tree, tuple):
2002 2000 return tree
2003 2001 arg = _getaliasarg(tree)
2004 2002 if arg is not None:
2005 2003 return args[arg]
2006 2004 return tuple(_expandargs(t, args) for t in tree)
2007 2005
2008 2006 def _expandaliases(aliases, tree, expanding, cache):
2009 2007 """Expand aliases in tree, recursively.
2010 2008
2011 2009 'aliases' is a dictionary mapping user defined aliases to
2012 2010 revsetalias objects.
2013 2011 """
2014 2012 if not isinstance(tree, tuple):
2015 2013 # Do not expand raw strings
2016 2014 return tree
2017 2015 alias = _getalias(aliases, tree)
2018 2016 if alias is not None:
2019 2017 if alias in expanding:
2020 2018 raise error.ParseError(_('infinite expansion of revset alias "%s" '
2021 2019 'detected') % alias.name)
2022 2020 expanding.append(alias)
2023 2021 if alias.name not in cache:
2024 2022 cache[alias.name] = _expandaliases(aliases, alias.replacement,
2025 2023 expanding, cache)
2026 2024 result = cache[alias.name]
2027 2025 expanding.pop()
2028 2026 if alias.args is not None:
2029 2027 l = getlist(tree[2])
2030 2028 if len(l) != len(alias.args):
2031 2029 raise error.ParseError(
2032 2030 _('invalid number of arguments: %s') % len(l))
2033 2031 l = [_expandaliases(aliases, a, [], cache) for a in l]
2034 2032 result = _expandargs(result, dict(zip(alias.args, l)))
2035 2033 else:
2036 2034 result = tuple(_expandaliases(aliases, t, expanding, cache)
2037 2035 for t in tree)
2038 2036 return result
2039 2037
2040 2038 def findaliases(ui, tree):
2041 2039 _checkaliasarg(tree)
2042 2040 aliases = {}
2043 2041 for k, v in ui.configitems('revsetalias'):
2044 2042 alias = revsetalias(k, v)
2045 2043 aliases[alias.name] = alias
2046 2044 return _expandaliases(aliases, tree, [], {})
2047 2045
2048 2046 def parse(spec, lookup=None):
2049 2047 p = parser.parser(tokenize, elements)
2050 2048 return p.parse(spec, lookup=lookup)
2051 2049
2052 2050 def match(ui, spec, repo=None):
2053 2051 if not spec:
2054 2052 raise error.ParseError(_("empty query"))
2055 2053 lookup = None
2056 2054 if repo:
2057 2055 lookup = repo.__contains__
2058 2056 tree, pos = parse(spec, lookup)
2059 2057 if (pos != len(spec)):
2060 2058 raise error.ParseError(_("invalid token"), pos)
2061 2059 if ui:
2062 2060 tree = findaliases(ui, tree)
2063 2061 weight, tree = optimize(tree, True)
2064 2062 def mfunc(repo, subset):
2065 2063 if util.safehasattr(subset, 'set'):
2066 2064 result = getset(repo, subset, tree)
2067 2065 else:
2068 2066 result = getset(repo, baseset(subset), tree)
2069 2067 return result
2070 2068 return mfunc
2071 2069
2072 2070 def formatspec(expr, *args):
2073 2071 '''
2074 2072 This is a convenience function for using revsets internally, and
2075 2073 escapes arguments appropriately. Aliases are intentionally ignored
2076 2074 so that intended expression behavior isn't accidentally subverted.
2077 2075
2078 2076 Supported arguments:
2079 2077
2080 2078 %r = revset expression, parenthesized
2081 2079 %d = int(arg), no quoting
2082 2080 %s = string(arg), escaped and single-quoted
2083 2081 %b = arg.branch(), escaped and single-quoted
2084 2082 %n = hex(arg), single-quoted
2085 2083 %% = a literal '%'
2086 2084
2087 2085 Prefixing the type with 'l' specifies a parenthesized list of that type.
2088 2086
2089 2087 >>> formatspec('%r:: and %lr', '10 or 11', ("this()", "that()"))
2090 2088 '(10 or 11):: and ((this()) or (that()))'
2091 2089 >>> formatspec('%d:: and not %d::', 10, 20)
2092 2090 '10:: and not 20::'
2093 2091 >>> formatspec('%ld or %ld', [], [1])
2094 2092 "_list('') or 1"
2095 2093 >>> formatspec('keyword(%s)', 'foo\\xe9')
2096 2094 "keyword('foo\\\\xe9')"
2097 2095 >>> b = lambda: 'default'
2098 2096 >>> b.branch = b
2099 2097 >>> formatspec('branch(%b)', b)
2100 2098 "branch('default')"
2101 2099 >>> formatspec('root(%ls)', ['a', 'b', 'c', 'd'])
2102 2100 "root(_list('a\\x00b\\x00c\\x00d'))"
2103 2101 '''
2104 2102
2105 2103 def quote(s):
2106 2104 return repr(str(s))
2107 2105
2108 2106 def argtype(c, arg):
2109 2107 if c == 'd':
2110 2108 return str(int(arg))
2111 2109 elif c == 's':
2112 2110 return quote(arg)
2113 2111 elif c == 'r':
2114 2112 parse(arg) # make sure syntax errors are confined
2115 2113 return '(%s)' % arg
2116 2114 elif c == 'n':
2117 2115 return quote(node.hex(arg))
2118 2116 elif c == 'b':
2119 2117 return quote(arg.branch())
2120 2118
2121 2119 def listexp(s, t):
2122 2120 l = len(s)
2123 2121 if l == 0:
2124 2122 return "_list('')"
2125 2123 elif l == 1:
2126 2124 return argtype(t, s[0])
2127 2125 elif t == 'd':
2128 2126 return "_intlist('%s')" % "\0".join(str(int(a)) for a in s)
2129 2127 elif t == 's':
2130 2128 return "_list('%s')" % "\0".join(s)
2131 2129 elif t == 'n':
2132 2130 return "_hexlist('%s')" % "\0".join(node.hex(a) for a in s)
2133 2131 elif t == 'b':
2134 2132 return "_list('%s')" % "\0".join(a.branch() for a in s)
2135 2133
2136 2134 m = l // 2
2137 2135 return '(%s or %s)' % (listexp(s[:m], t), listexp(s[m:], t))
2138 2136
2139 2137 ret = ''
2140 2138 pos = 0
2141 2139 arg = 0
2142 2140 while pos < len(expr):
2143 2141 c = expr[pos]
2144 2142 if c == '%':
2145 2143 pos += 1
2146 2144 d = expr[pos]
2147 2145 if d == '%':
2148 2146 ret += d
2149 2147 elif d in 'dsnbr':
2150 2148 ret += argtype(d, args[arg])
2151 2149 arg += 1
2152 2150 elif d == 'l':
2153 2151 # a list of some type
2154 2152 pos += 1
2155 2153 d = expr[pos]
2156 2154 ret += listexp(list(args[arg]), d)
2157 2155 arg += 1
2158 2156 else:
2159 2157 raise util.Abort('unexpected revspec format character %s' % d)
2160 2158 else:
2161 2159 ret += c
2162 2160 pos += 1
2163 2161
2164 2162 return ret
2165 2163
2166 2164 def prettyformat(tree):
2167 2165 def _prettyformat(tree, level, lines):
2168 2166 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2169 2167 lines.append((level, str(tree)))
2170 2168 else:
2171 2169 lines.append((level, '(%s' % tree[0]))
2172 2170 for s in tree[1:]:
2173 2171 _prettyformat(s, level + 1, lines)
2174 2172 lines[-1:] = [(lines[-1][0], lines[-1][1] + ')')]
2175 2173
2176 2174 lines = []
2177 2175 _prettyformat(tree, 0, lines)
2178 2176 output = '\n'.join((' '*l + s) for l, s in lines)
2179 2177 return output
2180 2178
2181 2179 def depth(tree):
2182 2180 if isinstance(tree, tuple):
2183 2181 return max(map(depth, tree)) + 1
2184 2182 else:
2185 2183 return 0
2186 2184
2187 2185 def funcsused(tree):
2188 2186 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2189 2187 return set()
2190 2188 else:
2191 2189 funcs = set()
2192 2190 for s in tree[1:]:
2193 2191 funcs |= funcsused(s)
2194 2192 if tree[0] == 'func':
2195 2193 funcs.add(tree[1][1])
2196 2194 return funcs
2197 2195
2198 2196 class abstractsmartset(object):
2199 2197
2200 2198 def __nonzero__(self):
2201 2199 """True if the smartset is not empty"""
2202 2200 raise NotImplementedError()
2203 2201
2204 2202 def __contains__(self, rev):
2205 2203 """provide fast membership testing"""
2206 2204 raise NotImplementedError()
2207 2205
2208 2206 def __set__(self):
2209 2207 """Returns a set or a smartset containing all the elements.
2210 2208
2211 2209 The returned structure should be the fastest option for membership
2212 2210 testing.
2213 2211
2214 2212 This is part of the mandatory API for smartset."""
2215 2213 raise NotImplementedError()
2216 2214
2217 2215 def __iter__(self):
2218 2216 """iterate the set in the order it is supposed to be iterated"""
2219 2217 raise NotImplementedError()
2220 2218
2221 2219 # Attributes containing a function to perform a fast iteration in a given
2222 2220 # direction. A smartset can have none, one, or both defined.
2223 2221 #
2224 2222 # Default value is None instead of a function returning None to avoid
2225 2223 # initializing an iterator just for testing if a fast method exists.
2226 2224 fastasc = None
2227 2225 fastdesc = None
2228 2226
2229 2227 def isascending(self):
2230 2228 """True if the set will iterate in ascending order"""
2231 2229 raise NotImplementedError()
2232 2230
2233 2231 def ascending(self):
2234 2232 """Sorts the set in ascending order (in place).
2235 2233
2236 2234 This is part of the mandatory API for smartset."""
2237 2235 self.sort()
2238 2236
2239 2237 def isdescending(self):
2240 2238 """True if the set will iterate in descending order"""
2241 2239 raise NotImplementedError()
2242 2240
2243 2241 def descending(self):
2244 2242 """Sorts the set in descending order (in place).
2245 2243
2246 2244 This is part of the mandatory API for smartset."""
2247 2245 self.sort(reverse=True)
2248 2246
2249 2247 def min(self):
2250 2248 """return the minimum element in the set"""
2251 2249 if self.fastasc is not None:
2252 2250 for r in self.fastasc():
2253 2251 return r
2254 2252 raise ValueError('arg is an empty sequence')
2255 2253 return min(self)
2256 2254
2257 2255 def max(self):
2258 2256 """return the maximum element in the set"""
2259 2257 if self.fastdesc is not None:
2260 2258 for r in self.fastdesc():
2261 2259 return r
2262 2260 raise ValueError('arg is an empty sequence')
2263 2261 return max(self)
2264 2262
2265 2263 def first(self):
2266 2264 """return the first element in the set (user iteration perspective)
2267 2265
2268 2266 Return None if the set is empty"""
2269 2267 raise NotImplementedError()
2270 2268
2271 2269 def last(self):
2272 2270 """return the last element in the set (user iteration perspective)
2273 2271
2274 2272 Return None if the set is empty"""
2275 2273 raise NotImplementedError()
2276 2274
2277 2275 def reverse(self):
2278 2276 """reverse the expected iteration order"""
2279 2277 raise NotImplementedError()
2280 2278
2281 2279 def sort(self, reverse=True):
2282 2280 """get the set to iterate in an ascending or descending order"""
2283 2281 raise NotImplementedError()
2284 2282
2285 2283 def __and__(self, other):
2286 2284 """Returns a new object with the intersection of the two collections.
2287 2285
2288 2286 This is part of the mandatory API for smartset."""
2289 2287 return self.filter(other.__contains__)
2290 2288
2291 2289 def __add__(self, other):
2292 2290 """Returns a new object with the union of the two collections.
2293 2291
2294 2292 This is part of the mandatory API for smartset."""
2295 2293 kwargs = {}
2296 2294 if self.isascending() and other.isascending():
2297 2295 kwargs['ascending'] = True
2298 2296 if self.isdescending() and other.isdescending():
2299 2297 kwargs['ascending'] = False
2300 2298 return addset(self, other, **kwargs)
2301 2299
2302 2300 def __sub__(self, other):
2303 2301 """Returns a new object with the substraction of the two collections.
2304 2302
2305 2303 This is part of the mandatory API for smartset."""
2306 2304 c = other.__contains__
2307 2305 return self.filter(lambda r: not c(r))
2308 2306
2309 2307 def filter(self, condition):
2310 2308 """Returns this smartset filtered by condition as a new smartset.
2311 2309
2312 2310 `condition` is a callable which takes a revision number and returns a
2313 2311 boolean.
2314 2312
2315 2313 This is part of the mandatory API for smartset."""
2316 2314 kwargs = {}
2317 2315 if self.isascending():
2318 2316 kwargs['ascending'] = True
2319 2317 elif self.isdescending():
2320 2318 kwargs['ascending'] = False
2321 2319 return filteredset(self, condition, **kwargs)
2322 2320
2323 2321 class baseset(abstractsmartset):
2324 2322 """Basic data structure that represents a revset and contains the basic
2325 2323 operation that it should be able to perform.
2326 2324
2327 2325 Every method in this class should be implemented by any smartset class.
2328 2326 """
2329 2327 def __init__(self, data=()):
2330 2328 if not isinstance(data, list):
2331 2329 data = list(data)
2332 2330 self._list = data
2333 2331 self._set = None
2334 2332 self._ascending = None
2335 2333
2336 2334 @util.propertycache
2337 2335 def _asclist(self):
2338 2336 asclist = self._list[:]
2339 2337 asclist.sort()
2340 2338 return asclist
2341 2339
2342 2340 def __iter__(self):
2343 2341 if self._ascending is None:
2344 2342 return iter(self._list)
2345 2343 elif self._ascending:
2346 2344 return iter(self._asclist)
2347 2345 else:
2348 2346 return reversed(self._asclist)
2349 2347
2350 2348 def fastasc(self):
2351 2349 return iter(self._asclist)
2352 2350
2353 2351 def fastdesc(self):
2354 2352 return reversed(self._asclist)
2355 2353
2356 2354 def set(self):
2357 2355 """Returns a set or a smartset containing all the elements.
2358 2356
2359 2357 The returned structure should be the fastest option for membership
2360 2358 testing.
2361 2359
2362 2360 This is part of the mandatory API for smartset."""
2363 2361 if not self._set:
2364 2362 self._set = set(self)
2365 2363 return self._set
2366 2364
2367 2365 @util.propertycache
2368 2366 def __contains__(self):
2369 2367 return self.set().__contains__
2370 2368
2371 2369 def __nonzero__(self):
2372 2370 return bool(self._list)
2373 2371
2374 2372 def sort(self, reverse=False):
2375 2373 self._ascending = not bool(reverse)
2376 2374
2377 2375 def reverse(self):
2378 2376 if self._ascending is None:
2379 2377 self._list.reverse()
2380 2378 else:
2381 2379 self._ascending = not self._ascending
2382 2380
2383 2381 def __len__(self):
2384 2382 return len(self._list)
2385 2383
2386 2384 def __sub__(self, other):
2387 2385 """Returns a new object with the substraction of the two collections.
2388 2386
2389 2387 This is part of the mandatory API for smartset."""
2390 2388 # If we are operating on 2 baseset, do the computation now since all
2391 2389 # data is available. The alternative is to involve a filteredset, which
2392 2390 # may be slow.
2393 2391 if isinstance(other, baseset):
2394 2392 other = other.set()
2395 2393 return baseset([x for x in self if x not in other])
2396 2394
2397 2395 return self.filter(lambda x: x not in other)
2398 2396
2399 2397 def __and__(self, other):
2400 2398 """Returns a new object with the intersection of the two collections.
2401 2399
2402 2400 This is part of the mandatory API for smartset."""
2403 2401 return baseset([y for y in self if y in other])
2404 2402
2405 2403 def __add__(self, other):
2406 2404 """Returns a new object with the union of the two collections.
2407 2405
2408 2406 This is part of the mandatory API for smartset."""
2409 2407 s = self.set()
2410 2408 l = [r for r in other if r not in s]
2411 2409 return baseset(list(self) + l)
2412 2410
2413 2411 def isascending(self):
2414 2412 """Returns True if the collection is ascending order, False if not.
2415 2413
2416 2414 This is part of the mandatory API for smartset."""
2417 2415 return self._ascending is not None and self._ascending
2418 2416
2419 2417 def isdescending(self):
2420 2418 """Returns True if the collection is descending order, False if not.
2421 2419
2422 2420 This is part of the mandatory API for smartset."""
2423 2421 return self._ascending is not None and not self._ascending
2424 2422
2425 2423 def first(self):
2426 2424 if self:
2427 2425 if self._ascending is None:
2428 2426 return self._list[0]
2429 2427 elif self._ascending:
2430 2428 return self._asclist[0]
2431 2429 else:
2432 2430 return self._asclist[-1]
2433 2431 return None
2434 2432
2435 2433 def last(self):
2436 2434 if self:
2437 2435 if self._ascending is None:
2438 2436 return self._list[-1]
2439 2437 elif self._ascending:
2440 2438 return self._asclist[-1]
2441 2439 else:
2442 2440 return self._asclist[0]
2443 2441 return None
2444 2442
2445 2443 class filteredset(abstractsmartset):
2446 2444 """Duck type for baseset class which iterates lazily over the revisions in
2447 2445 the subset and contains a function which tests for membership in the
2448 2446 revset
2449 2447 """
2450 2448 def __init__(self, subset, condition=lambda x: True, ascending=None):
2451 2449 """
2452 2450 condition: a function that decide whether a revision in the subset
2453 2451 belongs to the revset or not.
2454 2452 """
2455 2453 self._subset = subset
2456 2454 self._condition = condition
2457 2455 self._cache = {}
2458 2456 if ascending is not None:
2459 2457 ascending = bool(ascending)
2460 2458 self._ascending = ascending
2461 2459
2462 2460 def __contains__(self, x):
2463 2461 c = self._cache
2464 2462 if x not in c:
2465 2463 v = c[x] = x in self._subset and self._condition(x)
2466 2464 return v
2467 2465 return c[x]
2468 2466
2469 2467 def __iter__(self):
2470 2468 return self._iterfilter(self._subset)
2471 2469
2472 2470 def _iterfilter(self, it):
2473 2471 cond = self._condition
2474 2472 for x in it:
2475 2473 if cond(x):
2476 2474 yield x
2477 2475
2478 2476 @property
2479 2477 def fastasc(self):
2480 2478 it = self._subset.fastasc
2481 2479 if it is None:
2482 2480 return None
2483 2481 return lambda: self._iterfilter(it())
2484 2482
2485 2483 @property
2486 2484 def fastdesc(self):
2487 2485 it = self._subset.fastdesc
2488 2486 if it is None:
2489 2487 return None
2490 2488 return lambda: self._iterfilter(it())
2491 2489
2492 2490 def __nonzero__(self):
2493 2491 for r in self:
2494 2492 return True
2495 2493 return False
2496 2494
2497 2495 def __len__(self):
2498 2496 # Basic implementation to be changed in future patches.
2499 2497 l = baseset([r for r in self])
2500 2498 return len(l)
2501 2499
2502 2500 def __getitem__(self, x):
2503 2501 # Basic implementation to be changed in future patches.
2504 2502 l = baseset([r for r in self])
2505 2503 return l[x]
2506 2504
2507 2505 def sort(self, reverse=False):
2508 2506 if self._ascending is None:
2509 2507 if not util.safehasattr(self._subset, 'sort'):
2510 2508 self._subset = baseset(self._subset)
2511 2509 self._subset.sort(reverse=reverse)
2512 2510 self._ascending = not reverse
2513 2511 elif bool(reverse) == self._ascending:
2514 2512 self.reverse()
2515 2513
2516 2514 def reverse(self):
2517 2515 self._subset.reverse()
2518 2516 if self._ascending is not None:
2519 2517 self._ascending = not self._ascending
2520 2518
2521 2519 def set(self):
2522 2520 return set([r for r in self])
2523 2521
2524 2522 def isascending(self):
2525 2523 return self._ascending is not None and self._ascending
2526 2524
2527 2525 def isdescending(self):
2528 2526 return self._ascending is not None and not self._ascending
2529 2527
2530 2528 def first(self):
2531 2529 for x in self:
2532 2530 return x
2533 2531 return None
2534 2532
2535 2533 def last(self):
2536 2534 it = None
2537 2535 if self._ascending is not None:
2538 2536 if self._ascending:
2539 2537 it = self.fastdesc
2540 2538 else:
2541 2539 it = self.fastasc
2542 2540 if it is None:
2543 2541 # slowly consume everything. This needs improvement
2544 2542 it = lambda: reversed(list(self))
2545 2543 for x in it():
2546 2544 return x
2547 2545 return None
2548 2546
2549 2547 class addset(abstractsmartset):
2550 2548 """Represent the addition of two sets
2551 2549
2552 2550 Wrapper structure for lazily adding two structures without losing much
2553 2551 performance on the __contains__ method
2554 2552
2555 2553 If the ascending attribute is set, that means the two structures are
2556 2554 ordered in either an ascending or descending way. Therefore, we can add
2557 2555 them maintaining the order by iterating over both at the same time
2558 2556 """
2559 2557 def __init__(self, revs1, revs2, ascending=None):
2560 2558 self._r1 = revs1
2561 2559 self._r2 = revs2
2562 2560 self._iter = None
2563 2561 self._ascending = ascending
2564 2562 self._genlist = None
2565 2563
2566 2564 def __len__(self):
2567 2565 return len(self._list)
2568 2566
2569 2567 def __nonzero__(self):
2570 2568 return bool(self._r1 or self._r2)
2571 2569
2572 2570 @util.propertycache
2573 2571 def _list(self):
2574 2572 if not self._genlist:
2575 2573 self._genlist = baseset(self._iterator())
2576 2574 return self._genlist
2577 2575
2578 2576 def _iterator(self):
2579 2577 """Iterate over both collections without repeating elements
2580 2578
2581 2579 If the ascending attribute is not set, iterate over the first one and
2582 2580 then over the second one checking for membership on the first one so we
2583 2581 dont yield any duplicates.
2584 2582
2585 2583 If the ascending attribute is set, iterate over both collections at the
2586 2584 same time, yielding only one value at a time in the given order.
2587 2585 """
2588 2586 if self._ascending is None:
2589 2587 def gen():
2590 2588 for r in self._r1:
2591 2589 yield r
2592 2590 s = self._r1.set()
2593 2591 for r in self._r2:
2594 2592 if r not in s:
2595 2593 yield r
2596 2594 gen = gen()
2597 2595 else:
2598 2596 iter1 = iter(self._r1)
2599 2597 iter2 = iter(self._r2)
2600 2598 gen = self._iterordered(self._ascending, iter1, iter2)
2601 2599 return gen
2602 2600
2603 2601 def __iter__(self):
2604 2602 if self._genlist:
2605 2603 return iter(self._genlist)
2606 2604 return iter(self._iterator())
2607 2605
2608 2606 @property
2609 2607 def fastasc(self):
2610 2608 iter1 = self._r1.fastasc
2611 2609 iter2 = self._r2.fastasc
2612 2610 if None in (iter1, iter2):
2613 2611 return None
2614 2612 return lambda: self._iterordered(True, iter1(), iter2())
2615 2613
2616 2614 @property
2617 2615 def fastdesc(self):
2618 2616 iter1 = self._r1.fastdesc
2619 2617 iter2 = self._r2.fastdesc
2620 2618 if None in (iter1, iter2):
2621 2619 return None
2622 2620 return lambda: self._iterordered(False, iter1(), iter2())
2623 2621
2624 2622 def _iterordered(self, ascending, iter1, iter2):
2625 2623 """produce an ordered iteration from two iterators with the same order
2626 2624
2627 2625 The ascending is used to indicated the iteration direction.
2628 2626 """
2629 2627 choice = max
2630 2628 if ascending:
2631 2629 choice = min
2632 2630
2633 2631 val1 = None
2634 2632 val2 = None
2635 2633
2636 2634 choice = max
2637 2635 if ascending:
2638 2636 choice = min
2639 2637 try:
2640 2638 # Consume both iterators in an ordered way until one is
2641 2639 # empty
2642 2640 while True:
2643 2641 if val1 is None:
2644 2642 val1 = iter1.next()
2645 2643 if val2 is None:
2646 2644 val2 = iter2.next()
2647 2645 next = choice(val1, val2)
2648 2646 yield next
2649 2647 if val1 == next:
2650 2648 val1 = None
2651 2649 if val2 == next:
2652 2650 val2 = None
2653 2651 except StopIteration:
2654 2652 # Flush any remaining values and consume the other one
2655 2653 it = iter2
2656 2654 if val1 is not None:
2657 2655 yield val1
2658 2656 it = iter1
2659 2657 elif val2 is not None:
2660 2658 # might have been equality and both are empty
2661 2659 yield val2
2662 2660 for val in it:
2663 2661 yield val
2664 2662
2665 2663 def __contains__(self, x):
2666 2664 return x in self._r1 or x in self._r2
2667 2665
2668 2666 def set(self):
2669 2667 return self
2670 2668
2671 2669 def sort(self, reverse=False):
2672 2670 """Sort the added set
2673 2671
2674 2672 For this we use the cached list with all the generated values and if we
2675 2673 know they are ascending or descending we can sort them in a smart way.
2676 2674 """
2677 2675 if self._ascending is None:
2678 2676 self._list.sort(reverse=reverse)
2679 2677 self._ascending = not reverse
2680 2678 else:
2681 2679 if bool(self._ascending) == bool(reverse):
2682 2680 self.reverse()
2683 2681
2684 2682 def isascending(self):
2685 2683 return self._ascending is not None and self._ascending
2686 2684
2687 2685 def isdescending(self):
2688 2686 return self._ascending is not None and not self._ascending
2689 2687
2690 2688 def reverse(self):
2691 2689 self._list.reverse()
2692 2690 if self._ascending is not None:
2693 2691 self._ascending = not self._ascending
2694 2692
2695 2693 def first(self):
2696 2694 if self:
2697 2695 return self._list.first()
2698 2696 return None
2699 2697
2700 2698 def last(self):
2701 2699 if self:
2702 2700 return self._list.last()
2703 2701 return None
2704 2702
2705 2703 class generatorset(abstractsmartset):
2706 2704 """Wrap a generator for lazy iteration
2707 2705
2708 2706 Wrapper structure for generators that provides lazy membership and can
2709 2707 be iterated more than once.
2710 2708 When asked for membership it generates values until either it finds the
2711 2709 requested one or has gone through all the elements in the generator
2712 2710 """
2713 2711 def __init__(self, gen, iterasc=None):
2714 2712 """
2715 2713 gen: a generator producing the values for the generatorset.
2716 2714 """
2717 2715 self._gen = gen
2718 2716 self._asclist = None
2719 2717 self._cache = {}
2720 2718 self._genlist = []
2721 2719 self._finished = False
2722 2720 self._ascending = True
2723 2721 if iterasc is not None:
2724 2722 if iterasc:
2725 2723 self.fastasc = self._iterator
2726 2724 self.__contains__ = self._asccontains
2727 2725 else:
2728 2726 self.fastdesc = self._iterator
2729 2727 self.__contains__ = self._desccontains
2730 2728
2731 2729 def __nonzero__(self):
2732 2730 for r in self:
2733 2731 return True
2734 2732 return False
2735 2733
2736 2734 def __contains__(self, x):
2737 2735 if x in self._cache:
2738 2736 return self._cache[x]
2739 2737
2740 2738 # Use new values only, as existing values would be cached.
2741 2739 for l in self._consumegen():
2742 2740 if l == x:
2743 2741 return True
2744 2742
2745 2743 self._cache[x] = False
2746 2744 return False
2747 2745
2748 2746 def _asccontains(self, x):
2749 2747 """version of contains optimised for ascending generator"""
2750 2748 if x in self._cache:
2751 2749 return self._cache[x]
2752 2750
2753 2751 # Use new values only, as existing values would be cached.
2754 2752 for l in self._consumegen():
2755 2753 if l == x:
2756 2754 return True
2757 2755 if l > x:
2758 2756 break
2759 2757
2760 2758 self._cache[x] = False
2761 2759 return False
2762 2760
2763 2761 def _desccontains(self, x):
2764 2762 """version of contains optimised for descending generator"""
2765 2763 if x in self._cache:
2766 2764 return self._cache[x]
2767 2765
2768 2766 # Use new values only, as existing values would be cached.
2769 2767 for l in self._consumegen():
2770 2768 if l == x:
2771 2769 return True
2772 2770 if l < x:
2773 2771 break
2774 2772
2775 2773 self._cache[x] = False
2776 2774 return False
2777 2775
2778 2776 def __iter__(self):
2779 2777 if self._ascending:
2780 2778 it = self.fastasc
2781 2779 else:
2782 2780 it = self.fastdesc
2783 2781 if it is not None:
2784 2782 return it()
2785 2783 # we need to consume the iterator
2786 2784 for x in self._consumegen():
2787 2785 pass
2788 2786 # recall the same code
2789 2787 return iter(self)
2790 2788
2791 2789 def _iterator(self):
2792 2790 if self._finished:
2793 2791 return iter(self._genlist)
2794 2792
2795 2793 # We have to use this complex iteration strategy to allow multiple
2796 2794 # iterations at the same time. We need to be able to catch revision
2797 2795 # removed from `consumegen` and added to genlist in another instance.
2798 2796 #
2799 2797 # Getting rid of it would provide an about 15% speed up on this
2800 2798 # iteration.
2801 2799 genlist = self._genlist
2802 2800 nextrev = self._consumegen().next
2803 2801 _len = len # cache global lookup
2804 2802 def gen():
2805 2803 i = 0
2806 2804 while True:
2807 2805 if i < _len(genlist):
2808 2806 yield genlist[i]
2809 2807 else:
2810 2808 yield nextrev()
2811 2809 i += 1
2812 2810 return gen()
2813 2811
2814 2812 def _consumegen(self):
2815 2813 cache = self._cache
2816 2814 genlist = self._genlist.append
2817 2815 for item in self._gen:
2818 2816 cache[item] = True
2819 2817 genlist(item)
2820 2818 yield item
2821 2819 if not self._finished:
2822 2820 self._finished = True
2823 2821 asc = self._genlist[:]
2824 2822 asc.sort()
2825 2823 self._asclist = asc
2826 2824 self.fastasc = asc.__iter__
2827 2825 self.fastdesc = asc.__reversed__
2828 2826
2829 2827 def set(self):
2830 2828 return self
2831 2829
2832 2830 def sort(self, reverse=False):
2833 2831 self._ascending = not reverse
2834 2832
2835 2833 def reverse(self):
2836 2834 self._ascending = not self._ascending
2837 2835
2838 2836 def isascending(self):
2839 2837 return self._ascending
2840 2838
2841 2839 def isdescending(self):
2842 2840 return not self._ascending
2843 2841
2844 2842 def first(self):
2845 2843 if self._ascending:
2846 2844 it = self.fastasc
2847 2845 else:
2848 2846 it = self.fastdesc
2849 2847 if it is None:
2850 2848 # we need to consume all and try again
2851 2849 for x in self._consumegen():
2852 2850 pass
2853 2851 return self.first()
2854 2852 if self:
2855 2853 return it.next()
2856 2854 return None
2857 2855
2858 2856 def last(self):
2859 2857 if self._ascending:
2860 2858 it = self.fastdesc
2861 2859 else:
2862 2860 it = self.fastasc
2863 2861 if it is None:
2864 2862 # we need to consume all and try again
2865 2863 for x in self._consumegen():
2866 2864 pass
2867 2865 return self.first()
2868 2866 if self:
2869 2867 return it.next()
2870 2868 return None
2871 2869
2872 2870 def spanset(repo, start=None, end=None):
2873 2871 """factory function to dispatch between fullreposet and actual spanset
2874 2872
2875 2873 Feel free to update all spanset call sites and kill this function at some
2876 2874 point.
2877 2875 """
2878 2876 if start is None and end is None:
2879 2877 return fullreposet(repo)
2880 2878 return _spanset(repo, start, end)
2881 2879
2882 2880
2883 2881 class _spanset(abstractsmartset):
2884 2882 """Duck type for baseset class which represents a range of revisions and
2885 2883 can work lazily and without having all the range in memory
2886 2884
2887 2885 Note that spanset(x, y) behave almost like xrange(x, y) except for two
2888 2886 notable points:
2889 2887 - when x < y it will be automatically descending,
2890 2888 - revision filtered with this repoview will be skipped.
2891 2889
2892 2890 """
2893 2891 def __init__(self, repo, start=0, end=None):
2894 2892 """
2895 2893 start: first revision included the set
2896 2894 (default to 0)
2897 2895 end: first revision excluded (last+1)
2898 2896 (default to len(repo)
2899 2897
2900 2898 Spanset will be descending if `end` < `start`.
2901 2899 """
2902 2900 if end is None:
2903 2901 end = len(repo)
2904 2902 self._ascending = start <= end
2905 2903 if not self._ascending:
2906 2904 start, end = end + 1, start +1
2907 2905 self._start = start
2908 2906 self._end = end
2909 2907 self._hiddenrevs = repo.changelog.filteredrevs
2910 2908
2911 2909 def sort(self, reverse=False):
2912 2910 self._ascending = not reverse
2913 2911
2914 2912 def reverse(self):
2915 2913 self._ascending = not self._ascending
2916 2914
2917 2915 def _iterfilter(self, iterrange):
2918 2916 s = self._hiddenrevs
2919 2917 for r in iterrange:
2920 2918 if r not in s:
2921 2919 yield r
2922 2920
2923 2921 def __iter__(self):
2924 2922 if self._ascending:
2925 2923 return self.fastasc()
2926 2924 else:
2927 2925 return self.fastdesc()
2928 2926
2929 2927 def fastasc(self):
2930 2928 iterrange = xrange(self._start, self._end)
2931 2929 if self._hiddenrevs:
2932 2930 return self._iterfilter(iterrange)
2933 2931 return iter(iterrange)
2934 2932
2935 2933 def fastdesc(self):
2936 2934 iterrange = xrange(self._end - 1, self._start - 1, -1)
2937 2935 if self._hiddenrevs:
2938 2936 return self._iterfilter(iterrange)
2939 2937 return iter(iterrange)
2940 2938
2941 2939 def __contains__(self, rev):
2942 2940 hidden = self._hiddenrevs
2943 2941 return ((self._start <= rev < self._end)
2944 2942 and not (hidden and rev in hidden))
2945 2943
2946 2944 def __nonzero__(self):
2947 2945 for r in self:
2948 2946 return True
2949 2947 return False
2950 2948
2951 2949 def __len__(self):
2952 2950 if not self._hiddenrevs:
2953 2951 return abs(self._end - self._start)
2954 2952 else:
2955 2953 count = 0
2956 2954 start = self._start
2957 2955 end = self._end
2958 2956 for rev in self._hiddenrevs:
2959 2957 if (end < rev <= start) or (start <= rev < end):
2960 2958 count += 1
2961 2959 return abs(self._end - self._start) - count
2962 2960
2963 2961 def __getitem__(self, x):
2964 2962 # Basic implementation to be changed in future patches.
2965 2963 l = baseset([r for r in self])
2966 2964 return l[x]
2967 2965
2968 2966 def set(self):
2969 2967 return self
2970 2968
2971 2969 def isascending(self):
2972 2970 return self._start <= self._end
2973 2971
2974 2972 def isdescending(self):
2975 2973 return self._start >= self._end
2976 2974
2977 2975 def first(self):
2978 2976 if self._ascending:
2979 2977 it = self.fastasc
2980 2978 else:
2981 2979 it = self.fastdesc
2982 2980 for x in it():
2983 2981 return x
2984 2982 return None
2985 2983
2986 2984 def last(self):
2987 2985 if self._ascending:
2988 2986 it = self.fastdesc
2989 2987 else:
2990 2988 it = self.fastasc
2991 2989 for x in it():
2992 2990 return x
2993 2991 return None
2994 2992
2995 2993 class fullreposet(_spanset):
2996 2994 """a set containing all revisions in the repo
2997 2995
2998 2996 This class exists to host special optimisation.
2999 2997 """
3000 2998
3001 2999 def __init__(self, repo):
3002 3000 super(fullreposet, self).__init__(repo)
3003 3001
3004 3002 def __and__(self, other):
3005 3003 """fullrepo & other -> other
3006 3004
3007 3005 As self contains the whole repo, all of the other set should also be in
3008 3006 self. Therefor `self & other = other`.
3009 3007
3010 3008 This boldly assumes the other contains valid revs only.
3011 3009 """
3012 3010 # other not a smartset, make is so
3013 3011 if not util.safehasattr(other, 'set'):
3014 3012 # filter out hidden revision
3015 3013 # (this boldly assumes all smartset are pure)
3016 3014 #
3017 3015 # `other` was used with "&", let's assume this is a set like
3018 3016 # object.
3019 3017 other = baseset(other - self._hiddenrevs)
3020 3018 elif not util.safehasattr(other, 'ascending'):
3021 3019 # "other" is generatorset not a real smart set
3022 3020 # we fallback to the old way (sad kitten)
3023 3021 return super(fullreposet, self).__and__(other)
3024 3022
3025 3023 # preserve order:
3026 3024 #
3027 3025 # this is probably useless and harmful in multiple cases but matches
3028 3026 # the current behavior.
3029 3027 if self.isascending():
3030 3028 other.ascending()
3031 3029 else:
3032 3030 other.descending()
3033 3031 return other
3034 3032
3035 3033 # tell hggettext to extract docstrings from these functions:
3036 3034 i18nfunctions = symbols.values()
General Comments 0
You need to be logged in to leave comments. Login now