##// END OF EJS Templates
filteredset: drop explicit order management...
Pierre-Yves David -
r22862:9e5576f8 default
parent child Browse files
Show More
@@ -1,3010 +1,2993
1 1 # revset.py - revision set queries for mercurial
2 2 #
3 3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 import re
9 9 import parser, util, error, discovery, hbisect, phases
10 10 import node
11 11 import heapq
12 12 import match as matchmod
13 13 import ancestor as ancestormod
14 14 from i18n import _
15 15 import encoding
16 16 import obsolete as obsmod
17 17 import pathutil
18 18 import repoview
19 19
20 20 def _revancestors(repo, revs, followfirst):
21 21 """Like revlog.ancestors(), but supports followfirst."""
22 22 cut = followfirst and 1 or None
23 23 cl = repo.changelog
24 24
25 25 def iterate():
26 26 revqueue, revsnode = None, None
27 27 h = []
28 28
29 29 revs.sort(reverse=True)
30 30 revqueue = util.deque(revs)
31 31 if revqueue:
32 32 revsnode = revqueue.popleft()
33 33 heapq.heappush(h, -revsnode)
34 34
35 35 seen = set([node.nullrev])
36 36 while h:
37 37 current = -heapq.heappop(h)
38 38 if current not in seen:
39 39 if revsnode and current == revsnode:
40 40 if revqueue:
41 41 revsnode = revqueue.popleft()
42 42 heapq.heappush(h, -revsnode)
43 43 seen.add(current)
44 44 yield current
45 45 for parent in cl.parentrevs(current)[:cut]:
46 46 if parent != node.nullrev:
47 47 heapq.heappush(h, -parent)
48 48
49 49 return generatorset(iterate(), iterasc=False)
50 50
51 51 def _revdescendants(repo, revs, followfirst):
52 52 """Like revlog.descendants() but supports followfirst."""
53 53 cut = followfirst and 1 or None
54 54
55 55 def iterate():
56 56 cl = repo.changelog
57 57 first = min(revs)
58 58 nullrev = node.nullrev
59 59 if first == nullrev:
60 60 # Are there nodes with a null first parent and a non-null
61 61 # second one? Maybe. Do we care? Probably not.
62 62 for i in cl:
63 63 yield i
64 64 else:
65 65 seen = set(revs)
66 66 for i in cl.revs(first + 1):
67 67 for x in cl.parentrevs(i)[:cut]:
68 68 if x != nullrev and x in seen:
69 69 seen.add(i)
70 70 yield i
71 71 break
72 72
73 73 return generatorset(iterate(), iterasc=True)
74 74
75 75 def _revsbetween(repo, roots, heads):
76 76 """Return all paths between roots and heads, inclusive of both endpoint
77 77 sets."""
78 78 if not roots:
79 79 return baseset()
80 80 parentrevs = repo.changelog.parentrevs
81 81 visit = list(heads)
82 82 reachable = set()
83 83 seen = {}
84 84 minroot = min(roots)
85 85 roots = set(roots)
86 86 # open-code the post-order traversal due to the tiny size of
87 87 # sys.getrecursionlimit()
88 88 while visit:
89 89 rev = visit.pop()
90 90 if rev in roots:
91 91 reachable.add(rev)
92 92 parents = parentrevs(rev)
93 93 seen[rev] = parents
94 94 for parent in parents:
95 95 if parent >= minroot and parent not in seen:
96 96 visit.append(parent)
97 97 if not reachable:
98 98 return baseset()
99 99 for rev in sorted(seen):
100 100 for parent in seen[rev]:
101 101 if parent in reachable:
102 102 reachable.add(rev)
103 103 return baseset(sorted(reachable))
104 104
105 105 elements = {
106 106 "(": (20, ("group", 1, ")"), ("func", 1, ")")),
107 107 "~": (18, None, ("ancestor", 18)),
108 108 "^": (18, None, ("parent", 18), ("parentpost", 18)),
109 109 "-": (5, ("negate", 19), ("minus", 5)),
110 110 "::": (17, ("dagrangepre", 17), ("dagrange", 17),
111 111 ("dagrangepost", 17)),
112 112 "..": (17, ("dagrangepre", 17), ("dagrange", 17),
113 113 ("dagrangepost", 17)),
114 114 ":": (15, ("rangepre", 15), ("range", 15), ("rangepost", 15)),
115 115 "not": (10, ("not", 10)),
116 116 "!": (10, ("not", 10)),
117 117 "and": (5, None, ("and", 5)),
118 118 "&": (5, None, ("and", 5)),
119 119 "or": (4, None, ("or", 4)),
120 120 "|": (4, None, ("or", 4)),
121 121 "+": (4, None, ("or", 4)),
122 122 ",": (2, None, ("list", 2)),
123 123 ")": (0, None, None),
124 124 "symbol": (0, ("symbol",), None),
125 125 "string": (0, ("string",), None),
126 126 "end": (0, None, None),
127 127 }
128 128
129 129 keywords = set(['and', 'or', 'not'])
130 130
131 131 def tokenize(program, lookup=None):
132 132 '''
133 133 Parse a revset statement into a stream of tokens
134 134
135 135 Check that @ is a valid unquoted token character (issue3686):
136 136 >>> list(tokenize("@::"))
137 137 [('symbol', '@', 0), ('::', None, 1), ('end', None, 3)]
138 138
139 139 '''
140 140
141 141 pos, l = 0, len(program)
142 142 while pos < l:
143 143 c = program[pos]
144 144 if c.isspace(): # skip inter-token whitespace
145 145 pass
146 146 elif c == ':' and program[pos:pos + 2] == '::': # look ahead carefully
147 147 yield ('::', None, pos)
148 148 pos += 1 # skip ahead
149 149 elif c == '.' and program[pos:pos + 2] == '..': # look ahead carefully
150 150 yield ('..', None, pos)
151 151 pos += 1 # skip ahead
152 152 elif c in "():,-|&+!~^": # handle simple operators
153 153 yield (c, None, pos)
154 154 elif (c in '"\'' or c == 'r' and
155 155 program[pos:pos + 2] in ("r'", 'r"')): # handle quoted strings
156 156 if c == 'r':
157 157 pos += 1
158 158 c = program[pos]
159 159 decode = lambda x: x
160 160 else:
161 161 decode = lambda x: x.decode('string-escape')
162 162 pos += 1
163 163 s = pos
164 164 while pos < l: # find closing quote
165 165 d = program[pos]
166 166 if d == '\\': # skip over escaped characters
167 167 pos += 2
168 168 continue
169 169 if d == c:
170 170 yield ('string', decode(program[s:pos]), s)
171 171 break
172 172 pos += 1
173 173 else:
174 174 raise error.ParseError(_("unterminated string"), s)
175 175 # gather up a symbol/keyword
176 176 elif c.isalnum() or c in '._@' or ord(c) > 127:
177 177 s = pos
178 178 pos += 1
179 179 while pos < l: # find end of symbol
180 180 d = program[pos]
181 181 if not (d.isalnum() or d in "-._/@" or ord(d) > 127):
182 182 break
183 183 if d == '.' and program[pos - 1] == '.': # special case for ..
184 184 pos -= 1
185 185 break
186 186 pos += 1
187 187 sym = program[s:pos]
188 188 if sym in keywords: # operator keywords
189 189 yield (sym, None, s)
190 190 elif '-' in sym:
191 191 # some jerk gave us foo-bar-baz, try to check if it's a symbol
192 192 if lookup and lookup(sym):
193 193 # looks like a real symbol
194 194 yield ('symbol', sym, s)
195 195 else:
196 196 # looks like an expression
197 197 parts = sym.split('-')
198 198 for p in parts[:-1]:
199 199 if p: # possible consecutive -
200 200 yield ('symbol', p, s)
201 201 s += len(p)
202 202 yield ('-', None, pos)
203 203 s += 1
204 204 if parts[-1]: # possible trailing -
205 205 yield ('symbol', parts[-1], s)
206 206 else:
207 207 yield ('symbol', sym, s)
208 208 pos -= 1
209 209 else:
210 210 raise error.ParseError(_("syntax error"), pos)
211 211 pos += 1
212 212 yield ('end', None, pos)
213 213
214 214 # helpers
215 215
216 216 def getstring(x, err):
217 217 if x and (x[0] == 'string' or x[0] == 'symbol'):
218 218 return x[1]
219 219 raise error.ParseError(err)
220 220
221 221 def getlist(x):
222 222 if not x:
223 223 return []
224 224 if x[0] == 'list':
225 225 return getlist(x[1]) + [x[2]]
226 226 return [x]
227 227
228 228 def getargs(x, min, max, err):
229 229 l = getlist(x)
230 230 if len(l) < min or (max >= 0 and len(l) > max):
231 231 raise error.ParseError(err)
232 232 return l
233 233
234 234 def getset(repo, subset, x):
235 235 if not x:
236 236 raise error.ParseError(_("missing argument"))
237 237 s = methods[x[0]](repo, subset, *x[1:])
238 238 if util.safehasattr(s, 'set'):
239 239 return s
240 240 return baseset(s)
241 241
242 242 def _getrevsource(repo, r):
243 243 extra = repo[r].extra()
244 244 for label in ('source', 'transplant_source', 'rebase_source'):
245 245 if label in extra:
246 246 try:
247 247 return repo[extra[label]].rev()
248 248 except error.RepoLookupError:
249 249 pass
250 250 return None
251 251
252 252 # operator methods
253 253
254 254 def stringset(repo, subset, x):
255 255 x = repo[x].rev()
256 256 if x == -1 and len(subset) == len(repo):
257 257 return baseset([-1])
258 258 if len(subset) == len(repo) or x in subset:
259 259 return baseset([x])
260 260 return baseset()
261 261
262 262 def symbolset(repo, subset, x):
263 263 if x in symbols:
264 264 raise error.ParseError(_("can't use %s here") % x)
265 265 return stringset(repo, subset, x)
266 266
267 267 def rangeset(repo, subset, x, y):
268 268 cl = baseset(repo.changelog)
269 269 m = getset(repo, cl, x)
270 270 n = getset(repo, cl, y)
271 271
272 272 if not m or not n:
273 273 return baseset()
274 274 m, n = m.first(), n.last()
275 275
276 276 if m < n:
277 277 r = spanset(repo, m, n + 1)
278 278 else:
279 279 r = spanset(repo, m, n - 1)
280 280 return r & subset
281 281
282 282 def dagrange(repo, subset, x, y):
283 283 r = spanset(repo)
284 284 xs = _revsbetween(repo, getset(repo, r, x), getset(repo, r, y))
285 285 s = subset.set()
286 286 return xs.filter(s.__contains__)
287 287
288 288 def andset(repo, subset, x, y):
289 289 return getset(repo, getset(repo, subset, x), y)
290 290
291 291 def orset(repo, subset, x, y):
292 292 xl = getset(repo, subset, x)
293 293 yl = getset(repo, subset - xl, y)
294 294 return xl + yl
295 295
296 296 def notset(repo, subset, x):
297 297 return subset - getset(repo, subset, x)
298 298
299 299 def listset(repo, subset, a, b):
300 300 raise error.ParseError(_("can't use a list in this context"))
301 301
302 302 def func(repo, subset, a, b):
303 303 if a[0] == 'symbol' and a[1] in symbols:
304 304 return symbols[a[1]](repo, subset, b)
305 305 raise error.ParseError(_("not a function: %s") % a[1])
306 306
307 307 # functions
308 308
309 309 def adds(repo, subset, x):
310 310 """``adds(pattern)``
311 311 Changesets that add a file matching pattern.
312 312
313 313 The pattern without explicit kind like ``glob:`` is expected to be
314 314 relative to the current directory and match against a file or a
315 315 directory.
316 316 """
317 317 # i18n: "adds" is a keyword
318 318 pat = getstring(x, _("adds requires a pattern"))
319 319 return checkstatus(repo, subset, pat, 1)
320 320
321 321 def ancestor(repo, subset, x):
322 322 """``ancestor(*changeset)``
323 323 A greatest common ancestor of the changesets.
324 324
325 325 Accepts 0 or more changesets.
326 326 Will return empty list when passed no args.
327 327 Greatest common ancestor of a single changeset is that changeset.
328 328 """
329 329 # i18n: "ancestor" is a keyword
330 330 l = getlist(x)
331 331 rl = spanset(repo)
332 332 anc = None
333 333
334 334 # (getset(repo, rl, i) for i in l) generates a list of lists
335 335 for revs in (getset(repo, rl, i) for i in l):
336 336 for r in revs:
337 337 if anc is None:
338 338 anc = repo[r]
339 339 else:
340 340 anc = anc.ancestor(repo[r])
341 341
342 342 if anc is not None and anc.rev() in subset:
343 343 return baseset([anc.rev()])
344 344 return baseset()
345 345
346 346 def _ancestors(repo, subset, x, followfirst=False):
347 347 args = getset(repo, spanset(repo), x)
348 348 if not args:
349 349 return baseset()
350 350 s = _revancestors(repo, args, followfirst)
351 351 return subset.filter(s.__contains__)
352 352
353 353 def ancestors(repo, subset, x):
354 354 """``ancestors(set)``
355 355 Changesets that are ancestors of a changeset in set.
356 356 """
357 357 return _ancestors(repo, subset, x)
358 358
359 359 def _firstancestors(repo, subset, x):
360 360 # ``_firstancestors(set)``
361 361 # Like ``ancestors(set)`` but follows only the first parents.
362 362 return _ancestors(repo, subset, x, followfirst=True)
363 363
364 364 def ancestorspec(repo, subset, x, n):
365 365 """``set~n``
366 366 Changesets that are the Nth ancestor (first parents only) of a changeset
367 367 in set.
368 368 """
369 369 try:
370 370 n = int(n[1])
371 371 except (TypeError, ValueError):
372 372 raise error.ParseError(_("~ expects a number"))
373 373 ps = set()
374 374 cl = repo.changelog
375 375 for r in getset(repo, baseset(cl), x):
376 376 for i in range(n):
377 377 r = cl.parentrevs(r)[0]
378 378 ps.add(r)
379 379 return subset & ps
380 380
381 381 def author(repo, subset, x):
382 382 """``author(string)``
383 383 Alias for ``user(string)``.
384 384 """
385 385 # i18n: "author" is a keyword
386 386 n = encoding.lower(getstring(x, _("author requires a string")))
387 387 kind, pattern, matcher = _substringmatcher(n)
388 388 return subset.filter(lambda x: matcher(encoding.lower(repo[x].user())))
389 389
390 390 def only(repo, subset, x):
391 391 """``only(set, [set])``
392 392 Changesets that are ancestors of the first set that are not ancestors
393 393 of any other head in the repo. If a second set is specified, the result
394 394 is ancestors of the first set that are not ancestors of the second set
395 395 (i.e. ::<set1> - ::<set2>).
396 396 """
397 397 cl = repo.changelog
398 398 # i18n: "only" is a keyword
399 399 args = getargs(x, 1, 2, _('only takes one or two arguments'))
400 400 include = getset(repo, spanset(repo), args[0]).set()
401 401 if len(args) == 1:
402 402 if len(include) == 0:
403 403 return baseset()
404 404
405 405 descendants = set(_revdescendants(repo, include, False))
406 406 exclude = [rev for rev in cl.headrevs()
407 407 if not rev in descendants and not rev in include]
408 408 else:
409 409 exclude = getset(repo, spanset(repo), args[1])
410 410
411 411 results = set(ancestormod.missingancestors(include, exclude, cl.parentrevs))
412 412 return filteredset(subset, results.__contains__)
413 413
414 414 def bisect(repo, subset, x):
415 415 """``bisect(string)``
416 416 Changesets marked in the specified bisect status:
417 417
418 418 - ``good``, ``bad``, ``skip``: csets explicitly marked as good/bad/skip
419 419 - ``goods``, ``bads`` : csets topologically good/bad
420 420 - ``range`` : csets taking part in the bisection
421 421 - ``pruned`` : csets that are goods, bads or skipped
422 422 - ``untested`` : csets whose fate is yet unknown
423 423 - ``ignored`` : csets ignored due to DAG topology
424 424 - ``current`` : the cset currently being bisected
425 425 """
426 426 # i18n: "bisect" is a keyword
427 427 status = getstring(x, _("bisect requires a string")).lower()
428 428 state = set(hbisect.get(repo, status))
429 429 return subset & state
430 430
431 431 # Backward-compatibility
432 432 # - no help entry so that we do not advertise it any more
433 433 def bisected(repo, subset, x):
434 434 return bisect(repo, subset, x)
435 435
436 436 def bookmark(repo, subset, x):
437 437 """``bookmark([name])``
438 438 The named bookmark or all bookmarks.
439 439
440 440 If `name` starts with `re:`, the remainder of the name is treated as
441 441 a regular expression. To match a bookmark that actually starts with `re:`,
442 442 use the prefix `literal:`.
443 443 """
444 444 # i18n: "bookmark" is a keyword
445 445 args = getargs(x, 0, 1, _('bookmark takes one or no arguments'))
446 446 if args:
447 447 bm = getstring(args[0],
448 448 # i18n: "bookmark" is a keyword
449 449 _('the argument to bookmark must be a string'))
450 450 kind, pattern, matcher = _stringmatcher(bm)
451 451 bms = set()
452 452 if kind == 'literal':
453 453 bmrev = repo._bookmarks.get(pattern, None)
454 454 if not bmrev:
455 455 raise util.Abort(_("bookmark '%s' does not exist") % bm)
456 456 bms.add(repo[bmrev].rev())
457 457 else:
458 458 matchrevs = set()
459 459 for name, bmrev in repo._bookmarks.iteritems():
460 460 if matcher(name):
461 461 matchrevs.add(bmrev)
462 462 if not matchrevs:
463 463 raise util.Abort(_("no bookmarks exist that match '%s'")
464 464 % pattern)
465 465 for bmrev in matchrevs:
466 466 bms.add(repo[bmrev].rev())
467 467 else:
468 468 bms = set([repo[r].rev()
469 469 for r in repo._bookmarks.values()])
470 470 bms -= set([node.nullrev])
471 471 return subset & bms
472 472
473 473 def branch(repo, subset, x):
474 474 """``branch(string or set)``
475 475 All changesets belonging to the given branch or the branches of the given
476 476 changesets.
477 477
478 478 If `string` starts with `re:`, the remainder of the name is treated as
479 479 a regular expression. To match a branch that actually starts with `re:`,
480 480 use the prefix `literal:`.
481 481 """
482 482 try:
483 483 b = getstring(x, '')
484 484 except error.ParseError:
485 485 # not a string, but another revspec, e.g. tip()
486 486 pass
487 487 else:
488 488 kind, pattern, matcher = _stringmatcher(b)
489 489 if kind == 'literal':
490 490 # note: falls through to the revspec case if no branch with
491 491 # this name exists
492 492 if pattern in repo.branchmap():
493 493 return subset.filter(lambda r: matcher(repo[r].branch()))
494 494 else:
495 495 return subset.filter(lambda r: matcher(repo[r].branch()))
496 496
497 497 s = getset(repo, spanset(repo), x)
498 498 b = set()
499 499 for r in s:
500 500 b.add(repo[r].branch())
501 501 s = s.set()
502 502 return subset.filter(lambda r: r in s or repo[r].branch() in b)
503 503
504 504 def bumped(repo, subset, x):
505 505 """``bumped()``
506 506 Mutable changesets marked as successors of public changesets.
507 507
508 508 Only non-public and non-obsolete changesets can be `bumped`.
509 509 """
510 510 # i18n: "bumped" is a keyword
511 511 getargs(x, 0, 0, _("bumped takes no arguments"))
512 512 bumped = obsmod.getrevs(repo, 'bumped')
513 513 return subset & bumped
514 514
515 515 def bundle(repo, subset, x):
516 516 """``bundle()``
517 517 Changesets in the bundle.
518 518
519 519 Bundle must be specified by the -R option."""
520 520
521 521 try:
522 522 bundlerevs = repo.changelog.bundlerevs
523 523 except AttributeError:
524 524 raise util.Abort(_("no bundle provided - specify with -R"))
525 525 return subset & bundlerevs
526 526
527 527 def checkstatus(repo, subset, pat, field):
528 528 hasset = matchmod.patkind(pat) == 'set'
529 529
530 530 def matches(x):
531 531 m = None
532 532 fname = None
533 533 c = repo[x]
534 534 if not m or hasset:
535 535 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
536 536 if not m.anypats() and len(m.files()) == 1:
537 537 fname = m.files()[0]
538 538 if fname is not None:
539 539 if fname not in c.files():
540 540 return False
541 541 else:
542 542 for f in c.files():
543 543 if m(f):
544 544 break
545 545 else:
546 546 return False
547 547 files = repo.status(c.p1().node(), c.node())[field]
548 548 if fname is not None:
549 549 if fname in files:
550 550 return True
551 551 else:
552 552 for f in files:
553 553 if m(f):
554 554 return True
555 555
556 556 return subset.filter(matches)
557 557
558 558 def _children(repo, narrow, parentset):
559 559 cs = set()
560 560 if not parentset:
561 561 return baseset(cs)
562 562 pr = repo.changelog.parentrevs
563 563 minrev = min(parentset)
564 564 for r in narrow:
565 565 if r <= minrev:
566 566 continue
567 567 for p in pr(r):
568 568 if p in parentset:
569 569 cs.add(r)
570 570 return baseset(cs)
571 571
572 572 def children(repo, subset, x):
573 573 """``children(set)``
574 574 Child changesets of changesets in set.
575 575 """
576 576 s = getset(repo, baseset(repo), x).set()
577 577 cs = _children(repo, subset, s)
578 578 return subset & cs
579 579
580 580 def closed(repo, subset, x):
581 581 """``closed()``
582 582 Changeset is closed.
583 583 """
584 584 # i18n: "closed" is a keyword
585 585 getargs(x, 0, 0, _("closed takes no arguments"))
586 586 return subset.filter(lambda r: repo[r].closesbranch())
587 587
588 588 def contains(repo, subset, x):
589 589 """``contains(pattern)``
590 590 The revision's manifest contains a file matching pattern (but might not
591 591 modify it). See :hg:`help patterns` for information about file patterns.
592 592
593 593 The pattern without explicit kind like ``glob:`` is expected to be
594 594 relative to the current directory and match against a file exactly
595 595 for efficiency.
596 596 """
597 597 # i18n: "contains" is a keyword
598 598 pat = getstring(x, _("contains requires a pattern"))
599 599
600 600 def matches(x):
601 601 if not matchmod.patkind(pat):
602 602 pats = pathutil.canonpath(repo.root, repo.getcwd(), pat)
603 603 if pats in repo[x]:
604 604 return True
605 605 else:
606 606 c = repo[x]
607 607 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
608 608 for f in c.manifest():
609 609 if m(f):
610 610 return True
611 611 return False
612 612
613 613 return subset.filter(matches)
614 614
615 615 def converted(repo, subset, x):
616 616 """``converted([id])``
617 617 Changesets converted from the given identifier in the old repository if
618 618 present, or all converted changesets if no identifier is specified.
619 619 """
620 620
621 621 # There is exactly no chance of resolving the revision, so do a simple
622 622 # string compare and hope for the best
623 623
624 624 rev = None
625 625 # i18n: "converted" is a keyword
626 626 l = getargs(x, 0, 1, _('converted takes one or no arguments'))
627 627 if l:
628 628 # i18n: "converted" is a keyword
629 629 rev = getstring(l[0], _('converted requires a revision'))
630 630
631 631 def _matchvalue(r):
632 632 source = repo[r].extra().get('convert_revision', None)
633 633 return source is not None and (rev is None or source.startswith(rev))
634 634
635 635 return subset.filter(lambda r: _matchvalue(r))
636 636
637 637 def date(repo, subset, x):
638 638 """``date(interval)``
639 639 Changesets within the interval, see :hg:`help dates`.
640 640 """
641 641 # i18n: "date" is a keyword
642 642 ds = getstring(x, _("date requires a string"))
643 643 dm = util.matchdate(ds)
644 644 return subset.filter(lambda x: dm(repo[x].date()[0]))
645 645
646 646 def desc(repo, subset, x):
647 647 """``desc(string)``
648 648 Search commit message for string. The match is case-insensitive.
649 649 """
650 650 # i18n: "desc" is a keyword
651 651 ds = encoding.lower(getstring(x, _("desc requires a string")))
652 652
653 653 def matches(x):
654 654 c = repo[x]
655 655 return ds in encoding.lower(c.description())
656 656
657 657 return subset.filter(matches)
658 658
659 659 def _descendants(repo, subset, x, followfirst=False):
660 660 args = getset(repo, spanset(repo), x)
661 661 if not args:
662 662 return baseset()
663 663 s = _revdescendants(repo, args, followfirst)
664 664
665 665 # Both sets need to be ascending in order to lazily return the union
666 666 # in the correct order.
667 667 base = subset & args
668 668 desc = subset & s
669 669 result = base + desc
670 670 if subset.isascending():
671 671 result.sort()
672 672 elif subset.isdescending():
673 673 result.sort(reverse=True)
674 674 else:
675 675 result = subset & result
676 676 return result
677 677
678 678 def descendants(repo, subset, x):
679 679 """``descendants(set)``
680 680 Changesets which are descendants of changesets in set.
681 681 """
682 682 return _descendants(repo, subset, x)
683 683
684 684 def _firstdescendants(repo, subset, x):
685 685 # ``_firstdescendants(set)``
686 686 # Like ``descendants(set)`` but follows only the first parents.
687 687 return _descendants(repo, subset, x, followfirst=True)
688 688
689 689 def destination(repo, subset, x):
690 690 """``destination([set])``
691 691 Changesets that were created by a graft, transplant or rebase operation,
692 692 with the given revisions specified as the source. Omitting the optional set
693 693 is the same as passing all().
694 694 """
695 695 if x is not None:
696 696 args = getset(repo, spanset(repo), x).set()
697 697 else:
698 698 args = getall(repo, spanset(repo), x).set()
699 699
700 700 dests = set()
701 701
702 702 # subset contains all of the possible destinations that can be returned, so
703 703 # iterate over them and see if their source(s) were provided in the args.
704 704 # Even if the immediate src of r is not in the args, src's source (or
705 705 # further back) may be. Scanning back further than the immediate src allows
706 706 # transitive transplants and rebases to yield the same results as transitive
707 707 # grafts.
708 708 for r in subset:
709 709 src = _getrevsource(repo, r)
710 710 lineage = None
711 711
712 712 while src is not None:
713 713 if lineage is None:
714 714 lineage = list()
715 715
716 716 lineage.append(r)
717 717
718 718 # The visited lineage is a match if the current source is in the arg
719 719 # set. Since every candidate dest is visited by way of iterating
720 720 # subset, any dests further back in the lineage will be tested by a
721 721 # different iteration over subset. Likewise, if the src was already
722 722 # selected, the current lineage can be selected without going back
723 723 # further.
724 724 if src in args or src in dests:
725 725 dests.update(lineage)
726 726 break
727 727
728 728 r = src
729 729 src = _getrevsource(repo, r)
730 730
731 731 return subset.filter(dests.__contains__)
732 732
733 733 def divergent(repo, subset, x):
734 734 """``divergent()``
735 735 Final successors of changesets with an alternative set of final successors.
736 736 """
737 737 # i18n: "divergent" is a keyword
738 738 getargs(x, 0, 0, _("divergent takes no arguments"))
739 739 divergent = obsmod.getrevs(repo, 'divergent')
740 740 return subset & divergent
741 741
742 742 def draft(repo, subset, x):
743 743 """``draft()``
744 744 Changeset in draft phase."""
745 745 # i18n: "draft" is a keyword
746 746 getargs(x, 0, 0, _("draft takes no arguments"))
747 747 pc = repo._phasecache
748 748 return subset.filter(lambda r: pc.phase(repo, r) == phases.draft)
749 749
750 750 def extinct(repo, subset, x):
751 751 """``extinct()``
752 752 Obsolete changesets with obsolete descendants only.
753 753 """
754 754 # i18n: "extinct" is a keyword
755 755 getargs(x, 0, 0, _("extinct takes no arguments"))
756 756 extincts = obsmod.getrevs(repo, 'extinct')
757 757 return subset & extincts
758 758
759 759 def extra(repo, subset, x):
760 760 """``extra(label, [value])``
761 761 Changesets with the given label in the extra metadata, with the given
762 762 optional value.
763 763
764 764 If `value` starts with `re:`, the remainder of the value is treated as
765 765 a regular expression. To match a value that actually starts with `re:`,
766 766 use the prefix `literal:`.
767 767 """
768 768
769 769 # i18n: "extra" is a keyword
770 770 l = getargs(x, 1, 2, _('extra takes at least 1 and at most 2 arguments'))
771 771 # i18n: "extra" is a keyword
772 772 label = getstring(l[0], _('first argument to extra must be a string'))
773 773 value = None
774 774
775 775 if len(l) > 1:
776 776 # i18n: "extra" is a keyword
777 777 value = getstring(l[1], _('second argument to extra must be a string'))
778 778 kind, value, matcher = _stringmatcher(value)
779 779
780 780 def _matchvalue(r):
781 781 extra = repo[r].extra()
782 782 return label in extra and (value is None or matcher(extra[label]))
783 783
784 784 return subset.filter(lambda r: _matchvalue(r))
785 785
786 786 def filelog(repo, subset, x):
787 787 """``filelog(pattern)``
788 788 Changesets connected to the specified filelog.
789 789
790 790 For performance reasons, visits only revisions mentioned in the file-level
791 791 filelog, rather than filtering through all changesets (much faster, but
792 792 doesn't include deletes or duplicate changes). For a slower, more accurate
793 793 result, use ``file()``.
794 794
795 795 The pattern without explicit kind like ``glob:`` is expected to be
796 796 relative to the current directory and match against a file exactly
797 797 for efficiency.
798 798 """
799 799
800 800 # i18n: "filelog" is a keyword
801 801 pat = getstring(x, _("filelog requires a pattern"))
802 802 s = set()
803 803
804 804 if not matchmod.patkind(pat):
805 805 f = pathutil.canonpath(repo.root, repo.getcwd(), pat)
806 806 fl = repo.file(f)
807 807 for fr in fl:
808 808 s.add(fl.linkrev(fr))
809 809 else:
810 810 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=repo[None])
811 811 for f in repo[None]:
812 812 if m(f):
813 813 fl = repo.file(f)
814 814 for fr in fl:
815 815 s.add(fl.linkrev(fr))
816 816
817 817 return subset & s
818 818
819 819 def first(repo, subset, x):
820 820 """``first(set, [n])``
821 821 An alias for limit().
822 822 """
823 823 return limit(repo, subset, x)
824 824
825 825 def _follow(repo, subset, x, name, followfirst=False):
826 826 l = getargs(x, 0, 1, _("%s takes no arguments or a filename") % name)
827 827 c = repo['.']
828 828 if l:
829 829 x = getstring(l[0], _("%s expected a filename") % name)
830 830 if x in c:
831 831 cx = c[x]
832 832 s = set(ctx.rev() for ctx in cx.ancestors(followfirst=followfirst))
833 833 # include the revision responsible for the most recent version
834 834 s.add(cx.linkrev())
835 835 else:
836 836 return baseset()
837 837 else:
838 838 s = _revancestors(repo, baseset([c.rev()]), followfirst)
839 839
840 840 return subset & s
841 841
842 842 def follow(repo, subset, x):
843 843 """``follow([file])``
844 844 An alias for ``::.`` (ancestors of the working copy's first parent).
845 845 If a filename is specified, the history of the given file is followed,
846 846 including copies.
847 847 """
848 848 return _follow(repo, subset, x, 'follow')
849 849
850 850 def _followfirst(repo, subset, x):
851 851 # ``followfirst([file])``
852 852 # Like ``follow([file])`` but follows only the first parent of
853 853 # every revision or file revision.
854 854 return _follow(repo, subset, x, '_followfirst', followfirst=True)
855 855
856 856 def getall(repo, subset, x):
857 857 """``all()``
858 858 All changesets, the same as ``0:tip``.
859 859 """
860 860 # i18n: "all" is a keyword
861 861 getargs(x, 0, 0, _("all takes no arguments"))
862 862 return subset
863 863
864 864 def grep(repo, subset, x):
865 865 """``grep(regex)``
866 866 Like ``keyword(string)`` but accepts a regex. Use ``grep(r'...')``
867 867 to ensure special escape characters are handled correctly. Unlike
868 868 ``keyword(string)``, the match is case-sensitive.
869 869 """
870 870 try:
871 871 # i18n: "grep" is a keyword
872 872 gr = re.compile(getstring(x, _("grep requires a string")))
873 873 except re.error, e:
874 874 raise error.ParseError(_('invalid match pattern: %s') % e)
875 875
876 876 def matches(x):
877 877 c = repo[x]
878 878 for e in c.files() + [c.user(), c.description()]:
879 879 if gr.search(e):
880 880 return True
881 881 return False
882 882
883 883 return subset.filter(matches)
884 884
885 885 def _matchfiles(repo, subset, x):
886 886 # _matchfiles takes a revset list of prefixed arguments:
887 887 #
888 888 # [p:foo, i:bar, x:baz]
889 889 #
890 890 # builds a match object from them and filters subset. Allowed
891 891 # prefixes are 'p:' for regular patterns, 'i:' for include
892 892 # patterns and 'x:' for exclude patterns. Use 'r:' prefix to pass
893 893 # a revision identifier, or the empty string to reference the
894 894 # working directory, from which the match object is
895 895 # initialized. Use 'd:' to set the default matching mode, default
896 896 # to 'glob'. At most one 'r:' and 'd:' argument can be passed.
897 897
898 898 # i18n: "_matchfiles" is a keyword
899 899 l = getargs(x, 1, -1, _("_matchfiles requires at least one argument"))
900 900 pats, inc, exc = [], [], []
901 901 hasset = False
902 902 rev, default = None, None
903 903 for arg in l:
904 904 # i18n: "_matchfiles" is a keyword
905 905 s = getstring(arg, _("_matchfiles requires string arguments"))
906 906 prefix, value = s[:2], s[2:]
907 907 if prefix == 'p:':
908 908 pats.append(value)
909 909 elif prefix == 'i:':
910 910 inc.append(value)
911 911 elif prefix == 'x:':
912 912 exc.append(value)
913 913 elif prefix == 'r:':
914 914 if rev is not None:
915 915 # i18n: "_matchfiles" is a keyword
916 916 raise error.ParseError(_('_matchfiles expected at most one '
917 917 'revision'))
918 918 rev = value
919 919 elif prefix == 'd:':
920 920 if default is not None:
921 921 # i18n: "_matchfiles" is a keyword
922 922 raise error.ParseError(_('_matchfiles expected at most one '
923 923 'default mode'))
924 924 default = value
925 925 else:
926 926 # i18n: "_matchfiles" is a keyword
927 927 raise error.ParseError(_('invalid _matchfiles prefix: %s') % prefix)
928 928 if not hasset and matchmod.patkind(value) == 'set':
929 929 hasset = True
930 930 if not default:
931 931 default = 'glob'
932 932
933 933 def matches(x):
934 934 m = None
935 935 c = repo[x]
936 936 if not m or (hasset and rev is None):
937 937 ctx = c
938 938 if rev is not None:
939 939 ctx = repo[rev or None]
940 940 m = matchmod.match(repo.root, repo.getcwd(), pats, include=inc,
941 941 exclude=exc, ctx=ctx, default=default)
942 942 for f in c.files():
943 943 if m(f):
944 944 return True
945 945 return False
946 946
947 947 return subset.filter(matches)
948 948
949 949 def hasfile(repo, subset, x):
950 950 """``file(pattern)``
951 951 Changesets affecting files matched by pattern.
952 952
953 953 For a faster but less accurate result, consider using ``filelog()``
954 954 instead.
955 955
956 956 This predicate uses ``glob:`` as the default kind of pattern.
957 957 """
958 958 # i18n: "file" is a keyword
959 959 pat = getstring(x, _("file requires a pattern"))
960 960 return _matchfiles(repo, subset, ('string', 'p:' + pat))
961 961
962 962 def head(repo, subset, x):
963 963 """``head()``
964 964 Changeset is a named branch head.
965 965 """
966 966 # i18n: "head" is a keyword
967 967 getargs(x, 0, 0, _("head takes no arguments"))
968 968 hs = set()
969 969 for b, ls in repo.branchmap().iteritems():
970 970 hs.update(repo[h].rev() for h in ls)
971 971 return baseset(hs).filter(subset.__contains__)
972 972
973 973 def heads(repo, subset, x):
974 974 """``heads(set)``
975 975 Members of set with no children in set.
976 976 """
977 977 s = getset(repo, subset, x)
978 978 ps = parents(repo, subset, x)
979 979 return s - ps
980 980
981 981 def hidden(repo, subset, x):
982 982 """``hidden()``
983 983 Hidden changesets.
984 984 """
985 985 # i18n: "hidden" is a keyword
986 986 getargs(x, 0, 0, _("hidden takes no arguments"))
987 987 hiddenrevs = repoview.filterrevs(repo, 'visible')
988 988 return subset & hiddenrevs
989 989
990 990 def keyword(repo, subset, x):
991 991 """``keyword(string)``
992 992 Search commit message, user name, and names of changed files for
993 993 string. The match is case-insensitive.
994 994 """
995 995 # i18n: "keyword" is a keyword
996 996 kw = encoding.lower(getstring(x, _("keyword requires a string")))
997 997
998 998 def matches(r):
999 999 c = repo[r]
1000 1000 return util.any(kw in encoding.lower(t) for t in c.files() + [c.user(),
1001 1001 c.description()])
1002 1002
1003 1003 return subset.filter(matches)
1004 1004
1005 1005 def limit(repo, subset, x):
1006 1006 """``limit(set, [n])``
1007 1007 First n members of set, defaulting to 1.
1008 1008 """
1009 1009 # i18n: "limit" is a keyword
1010 1010 l = getargs(x, 1, 2, _("limit requires one or two arguments"))
1011 1011 try:
1012 1012 lim = 1
1013 1013 if len(l) == 2:
1014 1014 # i18n: "limit" is a keyword
1015 1015 lim = int(getstring(l[1], _("limit requires a number")))
1016 1016 except (TypeError, ValueError):
1017 1017 # i18n: "limit" is a keyword
1018 1018 raise error.ParseError(_("limit expects a number"))
1019 1019 ss = subset.set()
1020 1020 os = getset(repo, spanset(repo), l[0])
1021 1021 result = []
1022 1022 it = iter(os)
1023 1023 for x in xrange(lim):
1024 1024 try:
1025 1025 y = it.next()
1026 1026 if y in ss:
1027 1027 result.append(y)
1028 1028 except (StopIteration):
1029 1029 break
1030 1030 return baseset(result)
1031 1031
1032 1032 def last(repo, subset, x):
1033 1033 """``last(set, [n])``
1034 1034 Last n members of set, defaulting to 1.
1035 1035 """
1036 1036 # i18n: "last" is a keyword
1037 1037 l = getargs(x, 1, 2, _("last requires one or two arguments"))
1038 1038 try:
1039 1039 lim = 1
1040 1040 if len(l) == 2:
1041 1041 # i18n: "last" is a keyword
1042 1042 lim = int(getstring(l[1], _("last requires a number")))
1043 1043 except (TypeError, ValueError):
1044 1044 # i18n: "last" is a keyword
1045 1045 raise error.ParseError(_("last expects a number"))
1046 1046 ss = subset.set()
1047 1047 os = getset(repo, spanset(repo), l[0])
1048 1048 os.reverse()
1049 1049 result = []
1050 1050 it = iter(os)
1051 1051 for x in xrange(lim):
1052 1052 try:
1053 1053 y = it.next()
1054 1054 if y in ss:
1055 1055 result.append(y)
1056 1056 except (StopIteration):
1057 1057 break
1058 1058 return baseset(result)
1059 1059
1060 1060 def maxrev(repo, subset, x):
1061 1061 """``max(set)``
1062 1062 Changeset with highest revision number in set.
1063 1063 """
1064 1064 os = getset(repo, spanset(repo), x)
1065 1065 if os:
1066 1066 m = os.max()
1067 1067 if m in subset:
1068 1068 return baseset([m])
1069 1069 return baseset()
1070 1070
1071 1071 def merge(repo, subset, x):
1072 1072 """``merge()``
1073 1073 Changeset is a merge changeset.
1074 1074 """
1075 1075 # i18n: "merge" is a keyword
1076 1076 getargs(x, 0, 0, _("merge takes no arguments"))
1077 1077 cl = repo.changelog
1078 1078 return subset.filter(lambda r: cl.parentrevs(r)[1] != -1)
1079 1079
1080 1080 def branchpoint(repo, subset, x):
1081 1081 """``branchpoint()``
1082 1082 Changesets with more than one child.
1083 1083 """
1084 1084 # i18n: "branchpoint" is a keyword
1085 1085 getargs(x, 0, 0, _("branchpoint takes no arguments"))
1086 1086 cl = repo.changelog
1087 1087 if not subset:
1088 1088 return baseset()
1089 1089 baserev = min(subset)
1090 1090 parentscount = [0]*(len(repo) - baserev)
1091 1091 for r in cl.revs(start=baserev + 1):
1092 1092 for p in cl.parentrevs(r):
1093 1093 if p >= baserev:
1094 1094 parentscount[p - baserev] += 1
1095 1095 return subset.filter(lambda r: parentscount[r - baserev] > 1)
1096 1096
1097 1097 def minrev(repo, subset, x):
1098 1098 """``min(set)``
1099 1099 Changeset with lowest revision number in set.
1100 1100 """
1101 1101 os = getset(repo, spanset(repo), x)
1102 1102 if os:
1103 1103 m = os.min()
1104 1104 if m in subset:
1105 1105 return baseset([m])
1106 1106 return baseset()
1107 1107
1108 1108 def modifies(repo, subset, x):
1109 1109 """``modifies(pattern)``
1110 1110 Changesets modifying files matched by pattern.
1111 1111
1112 1112 The pattern without explicit kind like ``glob:`` is expected to be
1113 1113 relative to the current directory and match against a file or a
1114 1114 directory.
1115 1115 """
1116 1116 # i18n: "modifies" is a keyword
1117 1117 pat = getstring(x, _("modifies requires a pattern"))
1118 1118 return checkstatus(repo, subset, pat, 0)
1119 1119
1120 1120 def node_(repo, subset, x):
1121 1121 """``id(string)``
1122 1122 Revision non-ambiguously specified by the given hex string prefix.
1123 1123 """
1124 1124 # i18n: "id" is a keyword
1125 1125 l = getargs(x, 1, 1, _("id requires one argument"))
1126 1126 # i18n: "id" is a keyword
1127 1127 n = getstring(l[0], _("id requires a string"))
1128 1128 if len(n) == 40:
1129 1129 rn = repo[n].rev()
1130 1130 else:
1131 1131 rn = None
1132 1132 pm = repo.changelog._partialmatch(n)
1133 1133 if pm is not None:
1134 1134 rn = repo.changelog.rev(pm)
1135 1135
1136 1136 return subset.filter(lambda r: r == rn)
1137 1137
1138 1138 def obsolete(repo, subset, x):
1139 1139 """``obsolete()``
1140 1140 Mutable changeset with a newer version."""
1141 1141 # i18n: "obsolete" is a keyword
1142 1142 getargs(x, 0, 0, _("obsolete takes no arguments"))
1143 1143 obsoletes = obsmod.getrevs(repo, 'obsolete')
1144 1144 return subset & obsoletes
1145 1145
1146 1146 def origin(repo, subset, x):
1147 1147 """``origin([set])``
1148 1148 Changesets that were specified as a source for the grafts, transplants or
1149 1149 rebases that created the given revisions. Omitting the optional set is the
1150 1150 same as passing all(). If a changeset created by these operations is itself
1151 1151 specified as a source for one of these operations, only the source changeset
1152 1152 for the first operation is selected.
1153 1153 """
1154 1154 if x is not None:
1155 1155 args = getset(repo, spanset(repo), x).set()
1156 1156 else:
1157 1157 args = getall(repo, spanset(repo), x).set()
1158 1158
1159 1159 def _firstsrc(rev):
1160 1160 src = _getrevsource(repo, rev)
1161 1161 if src is None:
1162 1162 return None
1163 1163
1164 1164 while True:
1165 1165 prev = _getrevsource(repo, src)
1166 1166
1167 1167 if prev is None:
1168 1168 return src
1169 1169 src = prev
1170 1170
1171 1171 o = set([_firstsrc(r) for r in args])
1172 1172 o -= set([None])
1173 1173 return subset & o
1174 1174
1175 1175 def outgoing(repo, subset, x):
1176 1176 """``outgoing([path])``
1177 1177 Changesets not found in the specified destination repository, or the
1178 1178 default push location.
1179 1179 """
1180 1180 import hg # avoid start-up nasties
1181 1181 # i18n: "outgoing" is a keyword
1182 1182 l = getargs(x, 0, 1, _("outgoing takes one or no arguments"))
1183 1183 # i18n: "outgoing" is a keyword
1184 1184 dest = l and getstring(l[0], _("outgoing requires a repository path")) or ''
1185 1185 dest = repo.ui.expandpath(dest or 'default-push', dest or 'default')
1186 1186 dest, branches = hg.parseurl(dest)
1187 1187 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1188 1188 if revs:
1189 1189 revs = [repo.lookup(rev) for rev in revs]
1190 1190 other = hg.peer(repo, {}, dest)
1191 1191 repo.ui.pushbuffer()
1192 1192 outgoing = discovery.findcommonoutgoing(repo, other, onlyheads=revs)
1193 1193 repo.ui.popbuffer()
1194 1194 cl = repo.changelog
1195 1195 o = set([cl.rev(r) for r in outgoing.missing])
1196 1196 return subset & o
1197 1197
1198 1198 def p1(repo, subset, x):
1199 1199 """``p1([set])``
1200 1200 First parent of changesets in set, or the working directory.
1201 1201 """
1202 1202 if x is None:
1203 1203 p = repo[x].p1().rev()
1204 1204 if p >= 0:
1205 1205 return subset & baseset([p])
1206 1206 return baseset()
1207 1207
1208 1208 ps = set()
1209 1209 cl = repo.changelog
1210 1210 for r in getset(repo, spanset(repo), x):
1211 1211 ps.add(cl.parentrevs(r)[0])
1212 1212 ps -= set([node.nullrev])
1213 1213 return subset & ps
1214 1214
1215 1215 def p2(repo, subset, x):
1216 1216 """``p2([set])``
1217 1217 Second parent of changesets in set, or the working directory.
1218 1218 """
1219 1219 if x is None:
1220 1220 ps = repo[x].parents()
1221 1221 try:
1222 1222 p = ps[1].rev()
1223 1223 if p >= 0:
1224 1224 return subset & baseset([p])
1225 1225 return baseset()
1226 1226 except IndexError:
1227 1227 return baseset()
1228 1228
1229 1229 ps = set()
1230 1230 cl = repo.changelog
1231 1231 for r in getset(repo, spanset(repo), x):
1232 1232 ps.add(cl.parentrevs(r)[1])
1233 1233 ps -= set([node.nullrev])
1234 1234 return subset & ps
1235 1235
1236 1236 def parents(repo, subset, x):
1237 1237 """``parents([set])``
1238 1238 The set of all parents for all changesets in set, or the working directory.
1239 1239 """
1240 1240 if x is None:
1241 1241 ps = set(p.rev() for p in repo[x].parents())
1242 1242 else:
1243 1243 ps = set()
1244 1244 cl = repo.changelog
1245 1245 for r in getset(repo, spanset(repo), x):
1246 1246 ps.update(cl.parentrevs(r))
1247 1247 ps -= set([node.nullrev])
1248 1248 return subset & ps
1249 1249
1250 1250 def parentspec(repo, subset, x, n):
1251 1251 """``set^0``
1252 1252 The set.
1253 1253 ``set^1`` (or ``set^``), ``set^2``
1254 1254 First or second parent, respectively, of all changesets in set.
1255 1255 """
1256 1256 try:
1257 1257 n = int(n[1])
1258 1258 if n not in (0, 1, 2):
1259 1259 raise ValueError
1260 1260 except (TypeError, ValueError):
1261 1261 raise error.ParseError(_("^ expects a number 0, 1, or 2"))
1262 1262 ps = set()
1263 1263 cl = repo.changelog
1264 1264 for r in getset(repo, baseset(cl), x):
1265 1265 if n == 0:
1266 1266 ps.add(r)
1267 1267 elif n == 1:
1268 1268 ps.add(cl.parentrevs(r)[0])
1269 1269 elif n == 2:
1270 1270 parents = cl.parentrevs(r)
1271 1271 if len(parents) > 1:
1272 1272 ps.add(parents[1])
1273 1273 return subset & ps
1274 1274
1275 1275 def present(repo, subset, x):
1276 1276 """``present(set)``
1277 1277 An empty set, if any revision in set isn't found; otherwise,
1278 1278 all revisions in set.
1279 1279
1280 1280 If any of specified revisions is not present in the local repository,
1281 1281 the query is normally aborted. But this predicate allows the query
1282 1282 to continue even in such cases.
1283 1283 """
1284 1284 try:
1285 1285 return getset(repo, subset, x)
1286 1286 except error.RepoLookupError:
1287 1287 return baseset()
1288 1288
1289 1289 def public(repo, subset, x):
1290 1290 """``public()``
1291 1291 Changeset in public phase."""
1292 1292 # i18n: "public" is a keyword
1293 1293 getargs(x, 0, 0, _("public takes no arguments"))
1294 1294 pc = repo._phasecache
1295 1295 return subset.filter(lambda r: pc.phase(repo, r) == phases.public)
1296 1296
1297 1297 def remote(repo, subset, x):
1298 1298 """``remote([id [,path]])``
1299 1299 Local revision that corresponds to the given identifier in a
1300 1300 remote repository, if present. Here, the '.' identifier is a
1301 1301 synonym for the current local branch.
1302 1302 """
1303 1303
1304 1304 import hg # avoid start-up nasties
1305 1305 # i18n: "remote" is a keyword
1306 1306 l = getargs(x, 0, 2, _("remote takes one, two or no arguments"))
1307 1307
1308 1308 q = '.'
1309 1309 if len(l) > 0:
1310 1310 # i18n: "remote" is a keyword
1311 1311 q = getstring(l[0], _("remote requires a string id"))
1312 1312 if q == '.':
1313 1313 q = repo['.'].branch()
1314 1314
1315 1315 dest = ''
1316 1316 if len(l) > 1:
1317 1317 # i18n: "remote" is a keyword
1318 1318 dest = getstring(l[1], _("remote requires a repository path"))
1319 1319 dest = repo.ui.expandpath(dest or 'default')
1320 1320 dest, branches = hg.parseurl(dest)
1321 1321 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1322 1322 if revs:
1323 1323 revs = [repo.lookup(rev) for rev in revs]
1324 1324 other = hg.peer(repo, {}, dest)
1325 1325 n = other.lookup(q)
1326 1326 if n in repo:
1327 1327 r = repo[n].rev()
1328 1328 if r in subset:
1329 1329 return baseset([r])
1330 1330 return baseset()
1331 1331
1332 1332 def removes(repo, subset, x):
1333 1333 """``removes(pattern)``
1334 1334 Changesets which remove files matching pattern.
1335 1335
1336 1336 The pattern without explicit kind like ``glob:`` is expected to be
1337 1337 relative to the current directory and match against a file or a
1338 1338 directory.
1339 1339 """
1340 1340 # i18n: "removes" is a keyword
1341 1341 pat = getstring(x, _("removes requires a pattern"))
1342 1342 return checkstatus(repo, subset, pat, 2)
1343 1343
1344 1344 def rev(repo, subset, x):
1345 1345 """``rev(number)``
1346 1346 Revision with the given numeric identifier.
1347 1347 """
1348 1348 # i18n: "rev" is a keyword
1349 1349 l = getargs(x, 1, 1, _("rev requires one argument"))
1350 1350 try:
1351 1351 # i18n: "rev" is a keyword
1352 1352 l = int(getstring(l[0], _("rev requires a number")))
1353 1353 except (TypeError, ValueError):
1354 1354 # i18n: "rev" is a keyword
1355 1355 raise error.ParseError(_("rev expects a number"))
1356 1356 return subset & baseset([l])
1357 1357
1358 1358 def matching(repo, subset, x):
1359 1359 """``matching(revision [, field])``
1360 1360 Changesets in which a given set of fields match the set of fields in the
1361 1361 selected revision or set.
1362 1362
1363 1363 To match more than one field pass the list of fields to match separated
1364 1364 by spaces (e.g. ``author description``).
1365 1365
1366 1366 Valid fields are most regular revision fields and some special fields.
1367 1367
1368 1368 Regular revision fields are ``description``, ``author``, ``branch``,
1369 1369 ``date``, ``files``, ``phase``, ``parents``, ``substate``, ``user``
1370 1370 and ``diff``.
1371 1371 Note that ``author`` and ``user`` are synonyms. ``diff`` refers to the
1372 1372 contents of the revision. Two revisions matching their ``diff`` will
1373 1373 also match their ``files``.
1374 1374
1375 1375 Special fields are ``summary`` and ``metadata``:
1376 1376 ``summary`` matches the first line of the description.
1377 1377 ``metadata`` is equivalent to matching ``description user date``
1378 1378 (i.e. it matches the main metadata fields).
1379 1379
1380 1380 ``metadata`` is the default field which is used when no fields are
1381 1381 specified. You can match more than one field at a time.
1382 1382 """
1383 1383 # i18n: "matching" is a keyword
1384 1384 l = getargs(x, 1, 2, _("matching takes 1 or 2 arguments"))
1385 1385
1386 1386 revs = getset(repo, baseset(repo.changelog), l[0])
1387 1387
1388 1388 fieldlist = ['metadata']
1389 1389 if len(l) > 1:
1390 1390 fieldlist = getstring(l[1],
1391 1391 # i18n: "matching" is a keyword
1392 1392 _("matching requires a string "
1393 1393 "as its second argument")).split()
1394 1394
1395 1395 # Make sure that there are no repeated fields,
1396 1396 # expand the 'special' 'metadata' field type
1397 1397 # and check the 'files' whenever we check the 'diff'
1398 1398 fields = []
1399 1399 for field in fieldlist:
1400 1400 if field == 'metadata':
1401 1401 fields += ['user', 'description', 'date']
1402 1402 elif field == 'diff':
1403 1403 # a revision matching the diff must also match the files
1404 1404 # since matching the diff is very costly, make sure to
1405 1405 # also match the files first
1406 1406 fields += ['files', 'diff']
1407 1407 else:
1408 1408 if field == 'author':
1409 1409 field = 'user'
1410 1410 fields.append(field)
1411 1411 fields = set(fields)
1412 1412 if 'summary' in fields and 'description' in fields:
1413 1413 # If a revision matches its description it also matches its summary
1414 1414 fields.discard('summary')
1415 1415
1416 1416 # We may want to match more than one field
1417 1417 # Not all fields take the same amount of time to be matched
1418 1418 # Sort the selected fields in order of increasing matching cost
1419 1419 fieldorder = ['phase', 'parents', 'user', 'date', 'branch', 'summary',
1420 1420 'files', 'description', 'substate', 'diff']
1421 1421 def fieldkeyfunc(f):
1422 1422 try:
1423 1423 return fieldorder.index(f)
1424 1424 except ValueError:
1425 1425 # assume an unknown field is very costly
1426 1426 return len(fieldorder)
1427 1427 fields = list(fields)
1428 1428 fields.sort(key=fieldkeyfunc)
1429 1429
1430 1430 # Each field will be matched with its own "getfield" function
1431 1431 # which will be added to the getfieldfuncs array of functions
1432 1432 getfieldfuncs = []
1433 1433 _funcs = {
1434 1434 'user': lambda r: repo[r].user(),
1435 1435 'branch': lambda r: repo[r].branch(),
1436 1436 'date': lambda r: repo[r].date(),
1437 1437 'description': lambda r: repo[r].description(),
1438 1438 'files': lambda r: repo[r].files(),
1439 1439 'parents': lambda r: repo[r].parents(),
1440 1440 'phase': lambda r: repo[r].phase(),
1441 1441 'substate': lambda r: repo[r].substate,
1442 1442 'summary': lambda r: repo[r].description().splitlines()[0],
1443 1443 'diff': lambda r: list(repo[r].diff(git=True),)
1444 1444 }
1445 1445 for info in fields:
1446 1446 getfield = _funcs.get(info, None)
1447 1447 if getfield is None:
1448 1448 raise error.ParseError(
1449 1449 # i18n: "matching" is a keyword
1450 1450 _("unexpected field name passed to matching: %s") % info)
1451 1451 getfieldfuncs.append(getfield)
1452 1452 # convert the getfield array of functions into a "getinfo" function
1453 1453 # which returns an array of field values (or a single value if there
1454 1454 # is only one field to match)
1455 1455 getinfo = lambda r: [f(r) for f in getfieldfuncs]
1456 1456
1457 1457 def matches(x):
1458 1458 for rev in revs:
1459 1459 target = getinfo(rev)
1460 1460 match = True
1461 1461 for n, f in enumerate(getfieldfuncs):
1462 1462 if target[n] != f(x):
1463 1463 match = False
1464 1464 if match:
1465 1465 return True
1466 1466 return False
1467 1467
1468 1468 return subset.filter(matches)
1469 1469
1470 1470 def reverse(repo, subset, x):
1471 1471 """``reverse(set)``
1472 1472 Reverse order of set.
1473 1473 """
1474 1474 l = getset(repo, subset, x)
1475 1475 l.reverse()
1476 1476 return l
1477 1477
1478 1478 def roots(repo, subset, x):
1479 1479 """``roots(set)``
1480 1480 Changesets in set with no parent changeset in set.
1481 1481 """
1482 1482 s = getset(repo, spanset(repo), x).set()
1483 1483 subset = baseset([r for r in s if r in subset.set()])
1484 1484 cs = _children(repo, subset, s)
1485 1485 return subset - cs
1486 1486
1487 1487 def secret(repo, subset, x):
1488 1488 """``secret()``
1489 1489 Changeset in secret phase."""
1490 1490 # i18n: "secret" is a keyword
1491 1491 getargs(x, 0, 0, _("secret takes no arguments"))
1492 1492 pc = repo._phasecache
1493 1493 return subset.filter(lambda x: pc.phase(repo, x) == phases.secret)
1494 1494
1495 1495 def sort(repo, subset, x):
1496 1496 """``sort(set[, [-]key...])``
1497 1497 Sort set by keys. The default sort order is ascending, specify a key
1498 1498 as ``-key`` to sort in descending order.
1499 1499
1500 1500 The keys can be:
1501 1501
1502 1502 - ``rev`` for the revision number,
1503 1503 - ``branch`` for the branch name,
1504 1504 - ``desc`` for the commit message (description),
1505 1505 - ``user`` for user name (``author`` can be used as an alias),
1506 1506 - ``date`` for the commit date
1507 1507 """
1508 1508 # i18n: "sort" is a keyword
1509 1509 l = getargs(x, 1, 2, _("sort requires one or two arguments"))
1510 1510 keys = "rev"
1511 1511 if len(l) == 2:
1512 1512 # i18n: "sort" is a keyword
1513 1513 keys = getstring(l[1], _("sort spec must be a string"))
1514 1514
1515 1515 s = l[0]
1516 1516 keys = keys.split()
1517 1517 l = []
1518 1518 def invert(s):
1519 1519 return "".join(chr(255 - ord(c)) for c in s)
1520 1520 revs = getset(repo, subset, s)
1521 1521 if keys == ["rev"]:
1522 1522 revs.sort()
1523 1523 return revs
1524 1524 elif keys == ["-rev"]:
1525 1525 revs.sort(reverse=True)
1526 1526 return revs
1527 1527 for r in revs:
1528 1528 c = repo[r]
1529 1529 e = []
1530 1530 for k in keys:
1531 1531 if k == 'rev':
1532 1532 e.append(r)
1533 1533 elif k == '-rev':
1534 1534 e.append(-r)
1535 1535 elif k == 'branch':
1536 1536 e.append(c.branch())
1537 1537 elif k == '-branch':
1538 1538 e.append(invert(c.branch()))
1539 1539 elif k == 'desc':
1540 1540 e.append(c.description())
1541 1541 elif k == '-desc':
1542 1542 e.append(invert(c.description()))
1543 1543 elif k in 'user author':
1544 1544 e.append(c.user())
1545 1545 elif k in '-user -author':
1546 1546 e.append(invert(c.user()))
1547 1547 elif k == 'date':
1548 1548 e.append(c.date()[0])
1549 1549 elif k == '-date':
1550 1550 e.append(-c.date()[0])
1551 1551 else:
1552 1552 raise error.ParseError(_("unknown sort key %r") % k)
1553 1553 e.append(r)
1554 1554 l.append(e)
1555 1555 l.sort()
1556 1556 return baseset([e[-1] for e in l])
1557 1557
1558 1558 def _stringmatcher(pattern):
1559 1559 """
1560 1560 accepts a string, possibly starting with 're:' or 'literal:' prefix.
1561 1561 returns the matcher name, pattern, and matcher function.
1562 1562 missing or unknown prefixes are treated as literal matches.
1563 1563
1564 1564 helper for tests:
1565 1565 >>> def test(pattern, *tests):
1566 1566 ... kind, pattern, matcher = _stringmatcher(pattern)
1567 1567 ... return (kind, pattern, [bool(matcher(t)) for t in tests])
1568 1568
1569 1569 exact matching (no prefix):
1570 1570 >>> test('abcdefg', 'abc', 'def', 'abcdefg')
1571 1571 ('literal', 'abcdefg', [False, False, True])
1572 1572
1573 1573 regex matching ('re:' prefix)
1574 1574 >>> test('re:a.+b', 'nomatch', 'fooadef', 'fooadefbar')
1575 1575 ('re', 'a.+b', [False, False, True])
1576 1576
1577 1577 force exact matches ('literal:' prefix)
1578 1578 >>> test('literal:re:foobar', 'foobar', 're:foobar')
1579 1579 ('literal', 're:foobar', [False, True])
1580 1580
1581 1581 unknown prefixes are ignored and treated as literals
1582 1582 >>> test('foo:bar', 'foo', 'bar', 'foo:bar')
1583 1583 ('literal', 'foo:bar', [False, False, True])
1584 1584 """
1585 1585 if pattern.startswith('re:'):
1586 1586 pattern = pattern[3:]
1587 1587 try:
1588 1588 regex = re.compile(pattern)
1589 1589 except re.error, e:
1590 1590 raise error.ParseError(_('invalid regular expression: %s')
1591 1591 % e)
1592 1592 return 're', pattern, regex.search
1593 1593 elif pattern.startswith('literal:'):
1594 1594 pattern = pattern[8:]
1595 1595 return 'literal', pattern, pattern.__eq__
1596 1596
1597 1597 def _substringmatcher(pattern):
1598 1598 kind, pattern, matcher = _stringmatcher(pattern)
1599 1599 if kind == 'literal':
1600 1600 matcher = lambda s: pattern in s
1601 1601 return kind, pattern, matcher
1602 1602
1603 1603 def tag(repo, subset, x):
1604 1604 """``tag([name])``
1605 1605 The specified tag by name, or all tagged revisions if no name is given.
1606 1606
1607 1607 If `name` starts with `re:`, the remainder of the name is treated as
1608 1608 a regular expression. To match a tag that actually starts with `re:`,
1609 1609 use the prefix `literal:`.
1610 1610 """
1611 1611 # i18n: "tag" is a keyword
1612 1612 args = getargs(x, 0, 1, _("tag takes one or no arguments"))
1613 1613 cl = repo.changelog
1614 1614 if args:
1615 1615 pattern = getstring(args[0],
1616 1616 # i18n: "tag" is a keyword
1617 1617 _('the argument to tag must be a string'))
1618 1618 kind, pattern, matcher = _stringmatcher(pattern)
1619 1619 if kind == 'literal':
1620 1620 # avoid resolving all tags
1621 1621 tn = repo._tagscache.tags.get(pattern, None)
1622 1622 if tn is None:
1623 1623 raise util.Abort(_("tag '%s' does not exist") % pattern)
1624 1624 s = set([repo[tn].rev()])
1625 1625 else:
1626 1626 s = set([cl.rev(n) for t, n in repo.tagslist() if matcher(t)])
1627 1627 else:
1628 1628 s = set([cl.rev(n) for t, n in repo.tagslist() if t != 'tip'])
1629 1629 return subset & s
1630 1630
1631 1631 def tagged(repo, subset, x):
1632 1632 return tag(repo, subset, x)
1633 1633
1634 1634 def unstable(repo, subset, x):
1635 1635 """``unstable()``
1636 1636 Non-obsolete changesets with obsolete ancestors.
1637 1637 """
1638 1638 # i18n: "unstable" is a keyword
1639 1639 getargs(x, 0, 0, _("unstable takes no arguments"))
1640 1640 unstables = obsmod.getrevs(repo, 'unstable')
1641 1641 return subset & unstables
1642 1642
1643 1643
1644 1644 def user(repo, subset, x):
1645 1645 """``user(string)``
1646 1646 User name contains string. The match is case-insensitive.
1647 1647
1648 1648 If `string` starts with `re:`, the remainder of the string is treated as
1649 1649 a regular expression. To match a user that actually contains `re:`, use
1650 1650 the prefix `literal:`.
1651 1651 """
1652 1652 return author(repo, subset, x)
1653 1653
1654 1654 # for internal use
1655 1655 def _list(repo, subset, x):
1656 1656 s = getstring(x, "internal error")
1657 1657 if not s:
1658 1658 return baseset()
1659 1659 ls = [repo[r].rev() for r in s.split('\0')]
1660 1660 s = subset.set()
1661 1661 return baseset([r for r in ls if r in s])
1662 1662
1663 1663 # for internal use
1664 1664 def _intlist(repo, subset, x):
1665 1665 s = getstring(x, "internal error")
1666 1666 if not s:
1667 1667 return baseset()
1668 1668 ls = [int(r) for r in s.split('\0')]
1669 1669 s = subset.set()
1670 1670 return baseset([r for r in ls if r in s])
1671 1671
1672 1672 # for internal use
1673 1673 def _hexlist(repo, subset, x):
1674 1674 s = getstring(x, "internal error")
1675 1675 if not s:
1676 1676 return baseset()
1677 1677 cl = repo.changelog
1678 1678 ls = [cl.rev(node.bin(r)) for r in s.split('\0')]
1679 1679 s = subset.set()
1680 1680 return baseset([r for r in ls if r in s])
1681 1681
1682 1682 symbols = {
1683 1683 "adds": adds,
1684 1684 "all": getall,
1685 1685 "ancestor": ancestor,
1686 1686 "ancestors": ancestors,
1687 1687 "_firstancestors": _firstancestors,
1688 1688 "author": author,
1689 1689 "only": only,
1690 1690 "bisect": bisect,
1691 1691 "bisected": bisected,
1692 1692 "bookmark": bookmark,
1693 1693 "branch": branch,
1694 1694 "branchpoint": branchpoint,
1695 1695 "bumped": bumped,
1696 1696 "bundle": bundle,
1697 1697 "children": children,
1698 1698 "closed": closed,
1699 1699 "contains": contains,
1700 1700 "converted": converted,
1701 1701 "date": date,
1702 1702 "desc": desc,
1703 1703 "descendants": descendants,
1704 1704 "_firstdescendants": _firstdescendants,
1705 1705 "destination": destination,
1706 1706 "divergent": divergent,
1707 1707 "draft": draft,
1708 1708 "extinct": extinct,
1709 1709 "extra": extra,
1710 1710 "file": hasfile,
1711 1711 "filelog": filelog,
1712 1712 "first": first,
1713 1713 "follow": follow,
1714 1714 "_followfirst": _followfirst,
1715 1715 "grep": grep,
1716 1716 "head": head,
1717 1717 "heads": heads,
1718 1718 "hidden": hidden,
1719 1719 "id": node_,
1720 1720 "keyword": keyword,
1721 1721 "last": last,
1722 1722 "limit": limit,
1723 1723 "_matchfiles": _matchfiles,
1724 1724 "max": maxrev,
1725 1725 "merge": merge,
1726 1726 "min": minrev,
1727 1727 "modifies": modifies,
1728 1728 "obsolete": obsolete,
1729 1729 "origin": origin,
1730 1730 "outgoing": outgoing,
1731 1731 "p1": p1,
1732 1732 "p2": p2,
1733 1733 "parents": parents,
1734 1734 "present": present,
1735 1735 "public": public,
1736 1736 "remote": remote,
1737 1737 "removes": removes,
1738 1738 "rev": rev,
1739 1739 "reverse": reverse,
1740 1740 "roots": roots,
1741 1741 "sort": sort,
1742 1742 "secret": secret,
1743 1743 "matching": matching,
1744 1744 "tag": tag,
1745 1745 "tagged": tagged,
1746 1746 "user": user,
1747 1747 "unstable": unstable,
1748 1748 "_list": _list,
1749 1749 "_intlist": _intlist,
1750 1750 "_hexlist": _hexlist,
1751 1751 }
1752 1752
1753 1753 # symbols which can't be used for a DoS attack for any given input
1754 1754 # (e.g. those which accept regexes as plain strings shouldn't be included)
1755 1755 # functions that just return a lot of changesets (like all) don't count here
1756 1756 safesymbols = set([
1757 1757 "adds",
1758 1758 "all",
1759 1759 "ancestor",
1760 1760 "ancestors",
1761 1761 "_firstancestors",
1762 1762 "author",
1763 1763 "bisect",
1764 1764 "bisected",
1765 1765 "bookmark",
1766 1766 "branch",
1767 1767 "branchpoint",
1768 1768 "bumped",
1769 1769 "bundle",
1770 1770 "children",
1771 1771 "closed",
1772 1772 "converted",
1773 1773 "date",
1774 1774 "desc",
1775 1775 "descendants",
1776 1776 "_firstdescendants",
1777 1777 "destination",
1778 1778 "divergent",
1779 1779 "draft",
1780 1780 "extinct",
1781 1781 "extra",
1782 1782 "file",
1783 1783 "filelog",
1784 1784 "first",
1785 1785 "follow",
1786 1786 "_followfirst",
1787 1787 "head",
1788 1788 "heads",
1789 1789 "hidden",
1790 1790 "id",
1791 1791 "keyword",
1792 1792 "last",
1793 1793 "limit",
1794 1794 "_matchfiles",
1795 1795 "max",
1796 1796 "merge",
1797 1797 "min",
1798 1798 "modifies",
1799 1799 "obsolete",
1800 1800 "origin",
1801 1801 "outgoing",
1802 1802 "p1",
1803 1803 "p2",
1804 1804 "parents",
1805 1805 "present",
1806 1806 "public",
1807 1807 "remote",
1808 1808 "removes",
1809 1809 "rev",
1810 1810 "reverse",
1811 1811 "roots",
1812 1812 "sort",
1813 1813 "secret",
1814 1814 "matching",
1815 1815 "tag",
1816 1816 "tagged",
1817 1817 "user",
1818 1818 "unstable",
1819 1819 "_list",
1820 1820 "_intlist",
1821 1821 "_hexlist",
1822 1822 ])
1823 1823
1824 1824 methods = {
1825 1825 "range": rangeset,
1826 1826 "dagrange": dagrange,
1827 1827 "string": stringset,
1828 1828 "symbol": symbolset,
1829 1829 "and": andset,
1830 1830 "or": orset,
1831 1831 "not": notset,
1832 1832 "list": listset,
1833 1833 "func": func,
1834 1834 "ancestor": ancestorspec,
1835 1835 "parent": parentspec,
1836 1836 "parentpost": p1,
1837 1837 }
1838 1838
1839 1839 def optimize(x, small):
1840 1840 if x is None:
1841 1841 return 0, x
1842 1842
1843 1843 smallbonus = 1
1844 1844 if small:
1845 1845 smallbonus = .5
1846 1846
1847 1847 op = x[0]
1848 1848 if op == 'minus':
1849 1849 return optimize(('and', x[1], ('not', x[2])), small)
1850 1850 elif op == 'dagrangepre':
1851 1851 return optimize(('func', ('symbol', 'ancestors'), x[1]), small)
1852 1852 elif op == 'dagrangepost':
1853 1853 return optimize(('func', ('symbol', 'descendants'), x[1]), small)
1854 1854 elif op == 'rangepre':
1855 1855 return optimize(('range', ('string', '0'), x[1]), small)
1856 1856 elif op == 'rangepost':
1857 1857 return optimize(('range', x[1], ('string', 'tip')), small)
1858 1858 elif op == 'negate':
1859 1859 return optimize(('string',
1860 1860 '-' + getstring(x[1], _("can't negate that"))), small)
1861 1861 elif op in 'string symbol negate':
1862 1862 return smallbonus, x # single revisions are small
1863 1863 elif op == 'and':
1864 1864 wa, ta = optimize(x[1], True)
1865 1865 wb, tb = optimize(x[2], True)
1866 1866
1867 1867 # (::x and not ::y)/(not ::y and ::x) have a fast path
1868 1868 def isonly(revs, bases):
1869 1869 return (
1870 1870 revs[0] == 'func'
1871 1871 and getstring(revs[1], _('not a symbol')) == 'ancestors'
1872 1872 and bases[0] == 'not'
1873 1873 and bases[1][0] == 'func'
1874 1874 and getstring(bases[1][1], _('not a symbol')) == 'ancestors')
1875 1875
1876 1876 w = min(wa, wb)
1877 1877 if isonly(ta, tb):
1878 1878 return w, ('func', ('symbol', 'only'), ('list', ta[2], tb[1][2]))
1879 1879 if isonly(tb, ta):
1880 1880 return w, ('func', ('symbol', 'only'), ('list', tb[2], ta[1][2]))
1881 1881
1882 1882 if wa > wb:
1883 1883 return w, (op, tb, ta)
1884 1884 return w, (op, ta, tb)
1885 1885 elif op == 'or':
1886 1886 wa, ta = optimize(x[1], False)
1887 1887 wb, tb = optimize(x[2], False)
1888 1888 if wb < wa:
1889 1889 wb, wa = wa, wb
1890 1890 return max(wa, wb), (op, ta, tb)
1891 1891 elif op == 'not':
1892 1892 o = optimize(x[1], not small)
1893 1893 return o[0], (op, o[1])
1894 1894 elif op == 'parentpost':
1895 1895 o = optimize(x[1], small)
1896 1896 return o[0], (op, o[1])
1897 1897 elif op == 'group':
1898 1898 return optimize(x[1], small)
1899 1899 elif op in 'dagrange range list parent ancestorspec':
1900 1900 if op == 'parent':
1901 1901 # x^:y means (x^) : y, not x ^ (:y)
1902 1902 post = ('parentpost', x[1])
1903 1903 if x[2][0] == 'dagrangepre':
1904 1904 return optimize(('dagrange', post, x[2][1]), small)
1905 1905 elif x[2][0] == 'rangepre':
1906 1906 return optimize(('range', post, x[2][1]), small)
1907 1907
1908 1908 wa, ta = optimize(x[1], small)
1909 1909 wb, tb = optimize(x[2], small)
1910 1910 return wa + wb, (op, ta, tb)
1911 1911 elif op == 'func':
1912 1912 f = getstring(x[1], _("not a symbol"))
1913 1913 wa, ta = optimize(x[2], small)
1914 1914 if f in ("author branch closed date desc file grep keyword "
1915 1915 "outgoing user"):
1916 1916 w = 10 # slow
1917 1917 elif f in "modifies adds removes":
1918 1918 w = 30 # slower
1919 1919 elif f == "contains":
1920 1920 w = 100 # very slow
1921 1921 elif f == "ancestor":
1922 1922 w = 1 * smallbonus
1923 1923 elif f in "reverse limit first _intlist":
1924 1924 w = 0
1925 1925 elif f in "sort":
1926 1926 w = 10 # assume most sorts look at changelog
1927 1927 else:
1928 1928 w = 1
1929 1929 return w + wa, (op, x[1], ta)
1930 1930 return 1, x
1931 1931
1932 1932 _aliasarg = ('func', ('symbol', '_aliasarg'))
1933 1933 def _getaliasarg(tree):
1934 1934 """If tree matches ('func', ('symbol', '_aliasarg'), ('string', X))
1935 1935 return X, None otherwise.
1936 1936 """
1937 1937 if (len(tree) == 3 and tree[:2] == _aliasarg
1938 1938 and tree[2][0] == 'string'):
1939 1939 return tree[2][1]
1940 1940 return None
1941 1941
1942 1942 def _checkaliasarg(tree, known=None):
1943 1943 """Check tree contains no _aliasarg construct or only ones which
1944 1944 value is in known. Used to avoid alias placeholders injection.
1945 1945 """
1946 1946 if isinstance(tree, tuple):
1947 1947 arg = _getaliasarg(tree)
1948 1948 if arg is not None and (not known or arg not in known):
1949 1949 raise error.ParseError(_("not a function: %s") % '_aliasarg')
1950 1950 for t in tree:
1951 1951 _checkaliasarg(t, known)
1952 1952
1953 1953 class revsetalias(object):
1954 1954 funcre = re.compile('^([^(]+)\(([^)]+)\)$')
1955 1955 args = None
1956 1956
1957 1957 def __init__(self, name, value):
1958 1958 '''Aliases like:
1959 1959
1960 1960 h = heads(default)
1961 1961 b($1) = ancestors($1) - ancestors(default)
1962 1962 '''
1963 1963 m = self.funcre.search(name)
1964 1964 if m:
1965 1965 self.name = m.group(1)
1966 1966 self.tree = ('func', ('symbol', m.group(1)))
1967 1967 self.args = [x.strip() for x in m.group(2).split(',')]
1968 1968 for arg in self.args:
1969 1969 # _aliasarg() is an unknown symbol only used separate
1970 1970 # alias argument placeholders from regular strings.
1971 1971 value = value.replace(arg, '_aliasarg(%r)' % (arg,))
1972 1972 else:
1973 1973 self.name = name
1974 1974 self.tree = ('symbol', name)
1975 1975
1976 1976 self.replacement, pos = parse(value)
1977 1977 if pos != len(value):
1978 1978 raise error.ParseError(_('invalid token'), pos)
1979 1979 # Check for placeholder injection
1980 1980 _checkaliasarg(self.replacement, self.args)
1981 1981
1982 1982 def _getalias(aliases, tree):
1983 1983 """If tree looks like an unexpanded alias, return it. Return None
1984 1984 otherwise.
1985 1985 """
1986 1986 if isinstance(tree, tuple) and tree:
1987 1987 if tree[0] == 'symbol' and len(tree) == 2:
1988 1988 name = tree[1]
1989 1989 alias = aliases.get(name)
1990 1990 if alias and alias.args is None and alias.tree == tree:
1991 1991 return alias
1992 1992 if tree[0] == 'func' and len(tree) > 1:
1993 1993 if tree[1][0] == 'symbol' and len(tree[1]) == 2:
1994 1994 name = tree[1][1]
1995 1995 alias = aliases.get(name)
1996 1996 if alias and alias.args is not None and alias.tree == tree[:2]:
1997 1997 return alias
1998 1998 return None
1999 1999
2000 2000 def _expandargs(tree, args):
2001 2001 """Replace _aliasarg instances with the substitution value of the
2002 2002 same name in args, recursively.
2003 2003 """
2004 2004 if not tree or not isinstance(tree, tuple):
2005 2005 return tree
2006 2006 arg = _getaliasarg(tree)
2007 2007 if arg is not None:
2008 2008 return args[arg]
2009 2009 return tuple(_expandargs(t, args) for t in tree)
2010 2010
2011 2011 def _expandaliases(aliases, tree, expanding, cache):
2012 2012 """Expand aliases in tree, recursively.
2013 2013
2014 2014 'aliases' is a dictionary mapping user defined aliases to
2015 2015 revsetalias objects.
2016 2016 """
2017 2017 if not isinstance(tree, tuple):
2018 2018 # Do not expand raw strings
2019 2019 return tree
2020 2020 alias = _getalias(aliases, tree)
2021 2021 if alias is not None:
2022 2022 if alias in expanding:
2023 2023 raise error.ParseError(_('infinite expansion of revset alias "%s" '
2024 2024 'detected') % alias.name)
2025 2025 expanding.append(alias)
2026 2026 if alias.name not in cache:
2027 2027 cache[alias.name] = _expandaliases(aliases, alias.replacement,
2028 2028 expanding, cache)
2029 2029 result = cache[alias.name]
2030 2030 expanding.pop()
2031 2031 if alias.args is not None:
2032 2032 l = getlist(tree[2])
2033 2033 if len(l) != len(alias.args):
2034 2034 raise error.ParseError(
2035 2035 _('invalid number of arguments: %s') % len(l))
2036 2036 l = [_expandaliases(aliases, a, [], cache) for a in l]
2037 2037 result = _expandargs(result, dict(zip(alias.args, l)))
2038 2038 else:
2039 2039 result = tuple(_expandaliases(aliases, t, expanding, cache)
2040 2040 for t in tree)
2041 2041 return result
2042 2042
2043 2043 def findaliases(ui, tree):
2044 2044 _checkaliasarg(tree)
2045 2045 aliases = {}
2046 2046 for k, v in ui.configitems('revsetalias'):
2047 2047 alias = revsetalias(k, v)
2048 2048 aliases[alias.name] = alias
2049 2049 return _expandaliases(aliases, tree, [], {})
2050 2050
2051 2051 def parse(spec, lookup=None):
2052 2052 p = parser.parser(tokenize, elements)
2053 2053 return p.parse(spec, lookup=lookup)
2054 2054
2055 2055 def match(ui, spec, repo=None):
2056 2056 if not spec:
2057 2057 raise error.ParseError(_("empty query"))
2058 2058 lookup = None
2059 2059 if repo:
2060 2060 lookup = repo.__contains__
2061 2061 tree, pos = parse(spec, lookup)
2062 2062 if (pos != len(spec)):
2063 2063 raise error.ParseError(_("invalid token"), pos)
2064 2064 if ui:
2065 2065 tree = findaliases(ui, tree)
2066 2066 weight, tree = optimize(tree, True)
2067 2067 def mfunc(repo, subset):
2068 2068 if util.safehasattr(subset, 'set'):
2069 2069 result = getset(repo, subset, tree)
2070 2070 else:
2071 2071 result = getset(repo, baseset(subset), tree)
2072 2072 return result
2073 2073 return mfunc
2074 2074
2075 2075 def formatspec(expr, *args):
2076 2076 '''
2077 2077 This is a convenience function for using revsets internally, and
2078 2078 escapes arguments appropriately. Aliases are intentionally ignored
2079 2079 so that intended expression behavior isn't accidentally subverted.
2080 2080
2081 2081 Supported arguments:
2082 2082
2083 2083 %r = revset expression, parenthesized
2084 2084 %d = int(arg), no quoting
2085 2085 %s = string(arg), escaped and single-quoted
2086 2086 %b = arg.branch(), escaped and single-quoted
2087 2087 %n = hex(arg), single-quoted
2088 2088 %% = a literal '%'
2089 2089
2090 2090 Prefixing the type with 'l' specifies a parenthesized list of that type.
2091 2091
2092 2092 >>> formatspec('%r:: and %lr', '10 or 11', ("this()", "that()"))
2093 2093 '(10 or 11):: and ((this()) or (that()))'
2094 2094 >>> formatspec('%d:: and not %d::', 10, 20)
2095 2095 '10:: and not 20::'
2096 2096 >>> formatspec('%ld or %ld', [], [1])
2097 2097 "_list('') or 1"
2098 2098 >>> formatspec('keyword(%s)', 'foo\\xe9')
2099 2099 "keyword('foo\\\\xe9')"
2100 2100 >>> b = lambda: 'default'
2101 2101 >>> b.branch = b
2102 2102 >>> formatspec('branch(%b)', b)
2103 2103 "branch('default')"
2104 2104 >>> formatspec('root(%ls)', ['a', 'b', 'c', 'd'])
2105 2105 "root(_list('a\\x00b\\x00c\\x00d'))"
2106 2106 '''
2107 2107
2108 2108 def quote(s):
2109 2109 return repr(str(s))
2110 2110
2111 2111 def argtype(c, arg):
2112 2112 if c == 'd':
2113 2113 return str(int(arg))
2114 2114 elif c == 's':
2115 2115 return quote(arg)
2116 2116 elif c == 'r':
2117 2117 parse(arg) # make sure syntax errors are confined
2118 2118 return '(%s)' % arg
2119 2119 elif c == 'n':
2120 2120 return quote(node.hex(arg))
2121 2121 elif c == 'b':
2122 2122 return quote(arg.branch())
2123 2123
2124 2124 def listexp(s, t):
2125 2125 l = len(s)
2126 2126 if l == 0:
2127 2127 return "_list('')"
2128 2128 elif l == 1:
2129 2129 return argtype(t, s[0])
2130 2130 elif t == 'd':
2131 2131 return "_intlist('%s')" % "\0".join(str(int(a)) for a in s)
2132 2132 elif t == 's':
2133 2133 return "_list('%s')" % "\0".join(s)
2134 2134 elif t == 'n':
2135 2135 return "_hexlist('%s')" % "\0".join(node.hex(a) for a in s)
2136 2136 elif t == 'b':
2137 2137 return "_list('%s')" % "\0".join(a.branch() for a in s)
2138 2138
2139 2139 m = l // 2
2140 2140 return '(%s or %s)' % (listexp(s[:m], t), listexp(s[m:], t))
2141 2141
2142 2142 ret = ''
2143 2143 pos = 0
2144 2144 arg = 0
2145 2145 while pos < len(expr):
2146 2146 c = expr[pos]
2147 2147 if c == '%':
2148 2148 pos += 1
2149 2149 d = expr[pos]
2150 2150 if d == '%':
2151 2151 ret += d
2152 2152 elif d in 'dsnbr':
2153 2153 ret += argtype(d, args[arg])
2154 2154 arg += 1
2155 2155 elif d == 'l':
2156 2156 # a list of some type
2157 2157 pos += 1
2158 2158 d = expr[pos]
2159 2159 ret += listexp(list(args[arg]), d)
2160 2160 arg += 1
2161 2161 else:
2162 2162 raise util.Abort('unexpected revspec format character %s' % d)
2163 2163 else:
2164 2164 ret += c
2165 2165 pos += 1
2166 2166
2167 2167 return ret
2168 2168
2169 2169 def prettyformat(tree):
2170 2170 def _prettyformat(tree, level, lines):
2171 2171 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2172 2172 lines.append((level, str(tree)))
2173 2173 else:
2174 2174 lines.append((level, '(%s' % tree[0]))
2175 2175 for s in tree[1:]:
2176 2176 _prettyformat(s, level + 1, lines)
2177 2177 lines[-1:] = [(lines[-1][0], lines[-1][1] + ')')]
2178 2178
2179 2179 lines = []
2180 2180 _prettyformat(tree, 0, lines)
2181 2181 output = '\n'.join((' '*l + s) for l, s in lines)
2182 2182 return output
2183 2183
2184 2184 def depth(tree):
2185 2185 if isinstance(tree, tuple):
2186 2186 return max(map(depth, tree)) + 1
2187 2187 else:
2188 2188 return 0
2189 2189
2190 2190 def funcsused(tree):
2191 2191 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2192 2192 return set()
2193 2193 else:
2194 2194 funcs = set()
2195 2195 for s in tree[1:]:
2196 2196 funcs |= funcsused(s)
2197 2197 if tree[0] == 'func':
2198 2198 funcs.add(tree[1][1])
2199 2199 return funcs
2200 2200
2201 2201 class abstractsmartset(object):
2202 2202
2203 2203 def __nonzero__(self):
2204 2204 """True if the smartset is not empty"""
2205 2205 raise NotImplementedError()
2206 2206
2207 2207 def __contains__(self, rev):
2208 2208 """provide fast membership testing"""
2209 2209 raise NotImplementedError()
2210 2210
2211 2211 def __set__(self):
2212 2212 """Returns a set or a smartset containing all the elements.
2213 2213
2214 2214 The returned structure should be the fastest option for membership
2215 2215 testing.
2216 2216
2217 2217 This is part of the mandatory API for smartset."""
2218 2218 raise NotImplementedError()
2219 2219
2220 2220 def __iter__(self):
2221 2221 """iterate the set in the order it is supposed to be iterated"""
2222 2222 raise NotImplementedError()
2223 2223
2224 2224 # Attributes containing a function to perform a fast iteration in a given
2225 2225 # direction. A smartset can have none, one, or both defined.
2226 2226 #
2227 2227 # Default value is None instead of a function returning None to avoid
2228 2228 # initializing an iterator just for testing if a fast method exists.
2229 2229 fastasc = None
2230 2230 fastdesc = None
2231 2231
2232 2232 def isascending(self):
2233 2233 """True if the set will iterate in ascending order"""
2234 2234 raise NotImplementedError()
2235 2235
2236 2236 def isdescending(self):
2237 2237 """True if the set will iterate in descending order"""
2238 2238 raise NotImplementedError()
2239 2239
2240 2240 def min(self):
2241 2241 """return the minimum element in the set"""
2242 2242 if self.fastasc is not None:
2243 2243 for r in self.fastasc():
2244 2244 return r
2245 2245 raise ValueError('arg is an empty sequence')
2246 2246 return min(self)
2247 2247
2248 2248 def max(self):
2249 2249 """return the maximum element in the set"""
2250 2250 if self.fastdesc is not None:
2251 2251 for r in self.fastdesc():
2252 2252 return r
2253 2253 raise ValueError('arg is an empty sequence')
2254 2254 return max(self)
2255 2255
2256 2256 def first(self):
2257 2257 """return the first element in the set (user iteration perspective)
2258 2258
2259 2259 Return None if the set is empty"""
2260 2260 raise NotImplementedError()
2261 2261
2262 2262 def last(self):
2263 2263 """return the last element in the set (user iteration perspective)
2264 2264
2265 2265 Return None if the set is empty"""
2266 2266 raise NotImplementedError()
2267 2267
2268 2268 def reverse(self):
2269 2269 """reverse the expected iteration order"""
2270 2270 raise NotImplementedError()
2271 2271
2272 2272 def sort(self, reverse=True):
2273 2273 """get the set to iterate in an ascending or descending order"""
2274 2274 raise NotImplementedError()
2275 2275
2276 2276 def __and__(self, other):
2277 2277 """Returns a new object with the intersection of the two collections.
2278 2278
2279 2279 This is part of the mandatory API for smartset."""
2280 2280 return self.filter(other.__contains__)
2281 2281
2282 2282 def __add__(self, other):
2283 2283 """Returns a new object with the union of the two collections.
2284 2284
2285 2285 This is part of the mandatory API for smartset."""
2286 2286 return addset(self, other)
2287 2287
2288 2288 def __sub__(self, other):
2289 2289 """Returns a new object with the substraction of the two collections.
2290 2290
2291 2291 This is part of the mandatory API for smartset."""
2292 2292 c = other.__contains__
2293 2293 return self.filter(lambda r: not c(r))
2294 2294
2295 2295 def filter(self, condition):
2296 2296 """Returns this smartset filtered by condition as a new smartset.
2297 2297
2298 2298 `condition` is a callable which takes a revision number and returns a
2299 2299 boolean.
2300 2300
2301 2301 This is part of the mandatory API for smartset."""
2302 kwargs = {}
2303 if self.isascending():
2304 kwargs['ascending'] = True
2305 elif self.isdescending():
2306 kwargs['ascending'] = False
2307 return filteredset(self, condition, **kwargs)
2302 return filteredset(self, condition)
2308 2303
2309 2304 class baseset(abstractsmartset):
2310 2305 """Basic data structure that represents a revset and contains the basic
2311 2306 operation that it should be able to perform.
2312 2307
2313 2308 Every method in this class should be implemented by any smartset class.
2314 2309 """
2315 2310 def __init__(self, data=()):
2316 2311 if not isinstance(data, list):
2317 2312 data = list(data)
2318 2313 self._list = data
2319 2314 self._set = None
2320 2315 self._ascending = None
2321 2316
2322 2317 @util.propertycache
2323 2318 def _asclist(self):
2324 2319 asclist = self._list[:]
2325 2320 asclist.sort()
2326 2321 return asclist
2327 2322
2328 2323 def __iter__(self):
2329 2324 if self._ascending is None:
2330 2325 return iter(self._list)
2331 2326 elif self._ascending:
2332 2327 return iter(self._asclist)
2333 2328 else:
2334 2329 return reversed(self._asclist)
2335 2330
2336 2331 def fastasc(self):
2337 2332 return iter(self._asclist)
2338 2333
2339 2334 def fastdesc(self):
2340 2335 return reversed(self._asclist)
2341 2336
2342 2337 def set(self):
2343 2338 """Returns a set or a smartset containing all the elements.
2344 2339
2345 2340 The returned structure should be the fastest option for membership
2346 2341 testing.
2347 2342
2348 2343 This is part of the mandatory API for smartset."""
2349 2344 if not self._set:
2350 2345 self._set = set(self)
2351 2346 return self._set
2352 2347
2353 2348 @util.propertycache
2354 2349 def __contains__(self):
2355 2350 return self.set().__contains__
2356 2351
2357 2352 def __nonzero__(self):
2358 2353 return bool(self._list)
2359 2354
2360 2355 def sort(self, reverse=False):
2361 2356 self._ascending = not bool(reverse)
2362 2357
2363 2358 def reverse(self):
2364 2359 if self._ascending is None:
2365 2360 self._list.reverse()
2366 2361 else:
2367 2362 self._ascending = not self._ascending
2368 2363
2369 2364 def __len__(self):
2370 2365 return len(self._list)
2371 2366
2372 2367 def isascending(self):
2373 2368 """Returns True if the collection is ascending order, False if not.
2374 2369
2375 2370 This is part of the mandatory API for smartset."""
2376 2371 return self._ascending is not None and self._ascending
2377 2372
2378 2373 def isdescending(self):
2379 2374 """Returns True if the collection is descending order, False if not.
2380 2375
2381 2376 This is part of the mandatory API for smartset."""
2382 2377 return self._ascending is not None and not self._ascending
2383 2378
2384 2379 def first(self):
2385 2380 if self:
2386 2381 if self._ascending is None:
2387 2382 return self._list[0]
2388 2383 elif self._ascending:
2389 2384 return self._asclist[0]
2390 2385 else:
2391 2386 return self._asclist[-1]
2392 2387 return None
2393 2388
2394 2389 def last(self):
2395 2390 if self:
2396 2391 if self._ascending is None:
2397 2392 return self._list[-1]
2398 2393 elif self._ascending:
2399 2394 return self._asclist[-1]
2400 2395 else:
2401 2396 return self._asclist[0]
2402 2397 return None
2403 2398
2404 2399 class filteredset(abstractsmartset):
2405 2400 """Duck type for baseset class which iterates lazily over the revisions in
2406 2401 the subset and contains a function which tests for membership in the
2407 2402 revset
2408 2403 """
2409 def __init__(self, subset, condition=lambda x: True, ascending=None):
2404 def __init__(self, subset, condition=lambda x: True):
2410 2405 """
2411 2406 condition: a function that decide whether a revision in the subset
2412 2407 belongs to the revset or not.
2413 2408 """
2414 2409 self._subset = subset
2415 2410 self._condition = condition
2416 2411 self._cache = {}
2417 if ascending is not None:
2418 ascending = bool(ascending)
2419 self._ascending = ascending
2420 2412
2421 2413 def __contains__(self, x):
2422 2414 c = self._cache
2423 2415 if x not in c:
2424 2416 v = c[x] = x in self._subset and self._condition(x)
2425 2417 return v
2426 2418 return c[x]
2427 2419
2428 2420 def __iter__(self):
2429 2421 return self._iterfilter(self._subset)
2430 2422
2431 2423 def _iterfilter(self, it):
2432 2424 cond = self._condition
2433 2425 for x in it:
2434 2426 if cond(x):
2435 2427 yield x
2436 2428
2437 2429 @property
2438 2430 def fastasc(self):
2439 2431 it = self._subset.fastasc
2440 2432 if it is None:
2441 2433 return None
2442 2434 return lambda: self._iterfilter(it())
2443 2435
2444 2436 @property
2445 2437 def fastdesc(self):
2446 2438 it = self._subset.fastdesc
2447 2439 if it is None:
2448 2440 return None
2449 2441 return lambda: self._iterfilter(it())
2450 2442
2451 2443 def __nonzero__(self):
2452 2444 for r in self:
2453 2445 return True
2454 2446 return False
2455 2447
2456 2448 def __len__(self):
2457 2449 # Basic implementation to be changed in future patches.
2458 2450 l = baseset([r for r in self])
2459 2451 return len(l)
2460 2452
2461 2453 def __getitem__(self, x):
2462 2454 # Basic implementation to be changed in future patches.
2463 2455 l = baseset([r for r in self])
2464 2456 return l[x]
2465 2457
2466 2458 def sort(self, reverse=False):
2467 if self._ascending is None:
2468 if not util.safehasattr(self._subset, 'sort'):
2469 self._subset = baseset(self._subset)
2470 self._subset.sort(reverse=reverse)
2471 self._ascending = not reverse
2472 elif bool(reverse) == self._ascending:
2473 self.reverse()
2459 self._subset.sort(reverse=reverse)
2474 2460
2475 2461 def reverse(self):
2476 2462 self._subset.reverse()
2477 if self._ascending is not None:
2478 self._ascending = not self._ascending
2479 2463
2480 2464 def set(self):
2481 2465 return set([r for r in self])
2482 2466
2483 2467 def isascending(self):
2484 return self._ascending is not None and self._ascending
2468 return self._subset.isascending()
2485 2469
2486 2470 def isdescending(self):
2487 return self._ascending is not None and not self._ascending
2471 return self._subset.isdescending()
2488 2472
2489 2473 def first(self):
2490 2474 for x in self:
2491 2475 return x
2492 2476 return None
2493 2477
2494 2478 def last(self):
2495 2479 it = None
2496 if self._ascending is not None:
2497 if self._ascending:
2498 it = self.fastdesc
2499 else:
2500 it = self.fastasc
2480 if self._subset.isascending:
2481 it = self.fastdesc
2482 elif self._subset.isdescending:
2483 it = self.fastdesc
2501 2484 if it is None:
2502 2485 # slowly consume everything. This needs improvement
2503 2486 it = lambda: reversed(list(self))
2504 2487 for x in it():
2505 2488 return x
2506 2489 return None
2507 2490
2508 2491 class addset(abstractsmartset):
2509 2492 """Represent the addition of two sets
2510 2493
2511 2494 Wrapper structure for lazily adding two structures without losing much
2512 2495 performance on the __contains__ method
2513 2496
2514 2497 If the ascending attribute is set, that means the two structures are
2515 2498 ordered in either an ascending or descending way. Therefore, we can add
2516 2499 them maintaining the order by iterating over both at the same time
2517 2500 """
2518 2501 def __init__(self, revs1, revs2, ascending=None):
2519 2502 self._r1 = revs1
2520 2503 self._r2 = revs2
2521 2504 self._iter = None
2522 2505 self._ascending = ascending
2523 2506 self._genlist = None
2524 2507 self._asclist = None
2525 2508
2526 2509 def __len__(self):
2527 2510 return len(self._list)
2528 2511
2529 2512 def __nonzero__(self):
2530 2513 return bool(self._r1 or self._r2)
2531 2514
2532 2515 @util.propertycache
2533 2516 def _list(self):
2534 2517 if not self._genlist:
2535 2518 self._genlist = baseset(self._iterator())
2536 2519 return self._genlist
2537 2520
2538 2521 def _iterator(self):
2539 2522 """Iterate over both collections without repeating elements
2540 2523
2541 2524 If the ascending attribute is not set, iterate over the first one and
2542 2525 then over the second one checking for membership on the first one so we
2543 2526 dont yield any duplicates.
2544 2527
2545 2528 If the ascending attribute is set, iterate over both collections at the
2546 2529 same time, yielding only one value at a time in the given order.
2547 2530 """
2548 2531 if self._ascending is None:
2549 2532 def gen():
2550 2533 for r in self._r1:
2551 2534 yield r
2552 2535 s = self._r1.set()
2553 2536 for r in self._r2:
2554 2537 if r not in s:
2555 2538 yield r
2556 2539 gen = gen()
2557 2540 else:
2558 2541 iter1 = iter(self._r1)
2559 2542 iter2 = iter(self._r2)
2560 2543 gen = self._iterordered(self._ascending, iter1, iter2)
2561 2544 return gen
2562 2545
2563 2546 def __iter__(self):
2564 2547 if self._ascending is None:
2565 2548 if self._genlist:
2566 2549 return iter(self._genlist)
2567 2550 return iter(self._iterator())
2568 2551 self._trysetasclist()
2569 2552 if self._ascending:
2570 2553 it = self.fastasc
2571 2554 else:
2572 2555 it = self.fastdesc
2573 2556 if it is None:
2574 2557 # consume the gen and try again
2575 2558 self._list
2576 2559 return iter(self)
2577 2560 return it()
2578 2561
2579 2562 def _trysetasclist(self):
2580 2563 """populate the _asclist attribut if possible and necessary"""
2581 2564 if self._genlist is not None and self._asclist is None:
2582 2565 self._asclist = sorted(self._genlist)
2583 2566
2584 2567 @property
2585 2568 def fastasc(self):
2586 2569 self._trysetasclist()
2587 2570 if self._asclist is not None:
2588 2571 return self._asclist.__iter__
2589 2572 iter1 = self._r1.fastasc
2590 2573 iter2 = self._r2.fastasc
2591 2574 if None in (iter1, iter2):
2592 2575 return None
2593 2576 return lambda: self._iterordered(True, iter1(), iter2())
2594 2577
2595 2578 @property
2596 2579 def fastdesc(self):
2597 2580 self._trysetasclist()
2598 2581 if self._asclist is not None:
2599 2582 return self._asclist.__reversed__
2600 2583 iter1 = self._r1.fastdesc
2601 2584 iter2 = self._r2.fastdesc
2602 2585 if None in (iter1, iter2):
2603 2586 return None
2604 2587 return lambda: self._iterordered(False, iter1(), iter2())
2605 2588
2606 2589 def _iterordered(self, ascending, iter1, iter2):
2607 2590 """produce an ordered iteration from two iterators with the same order
2608 2591
2609 2592 The ascending is used to indicated the iteration direction.
2610 2593 """
2611 2594 choice = max
2612 2595 if ascending:
2613 2596 choice = min
2614 2597
2615 2598 val1 = None
2616 2599 val2 = None
2617 2600
2618 2601 choice = max
2619 2602 if ascending:
2620 2603 choice = min
2621 2604 try:
2622 2605 # Consume both iterators in an ordered way until one is
2623 2606 # empty
2624 2607 while True:
2625 2608 if val1 is None:
2626 2609 val1 = iter1.next()
2627 2610 if val2 is None:
2628 2611 val2 = iter2.next()
2629 2612 next = choice(val1, val2)
2630 2613 yield next
2631 2614 if val1 == next:
2632 2615 val1 = None
2633 2616 if val2 == next:
2634 2617 val2 = None
2635 2618 except StopIteration:
2636 2619 # Flush any remaining values and consume the other one
2637 2620 it = iter2
2638 2621 if val1 is not None:
2639 2622 yield val1
2640 2623 it = iter1
2641 2624 elif val2 is not None:
2642 2625 # might have been equality and both are empty
2643 2626 yield val2
2644 2627 for val in it:
2645 2628 yield val
2646 2629
2647 2630 def __contains__(self, x):
2648 2631 return x in self._r1 or x in self._r2
2649 2632
2650 2633 def set(self):
2651 2634 return self
2652 2635
2653 2636 def sort(self, reverse=False):
2654 2637 """Sort the added set
2655 2638
2656 2639 For this we use the cached list with all the generated values and if we
2657 2640 know they are ascending or descending we can sort them in a smart way.
2658 2641 """
2659 2642 self._ascending = not reverse
2660 2643
2661 2644 def isascending(self):
2662 2645 return self._ascending is not None and self._ascending
2663 2646
2664 2647 def isdescending(self):
2665 2648 return self._ascending is not None and not self._ascending
2666 2649
2667 2650 def reverse(self):
2668 2651 if self._ascending is None:
2669 2652 self._list.reverse()
2670 2653 else:
2671 2654 self._ascending = not self._ascending
2672 2655
2673 2656 def first(self):
2674 2657 if self:
2675 2658 return self._list.first()
2676 2659 return None
2677 2660
2678 2661 def last(self):
2679 2662 if self:
2680 2663 return self._list.last()
2681 2664 return None
2682 2665
2683 2666 class generatorset(abstractsmartset):
2684 2667 """Wrap a generator for lazy iteration
2685 2668
2686 2669 Wrapper structure for generators that provides lazy membership and can
2687 2670 be iterated more than once.
2688 2671 When asked for membership it generates values until either it finds the
2689 2672 requested one or has gone through all the elements in the generator
2690 2673 """
2691 2674 def __init__(self, gen, iterasc=None):
2692 2675 """
2693 2676 gen: a generator producing the values for the generatorset.
2694 2677 """
2695 2678 self._gen = gen
2696 2679 self._asclist = None
2697 2680 self._cache = {}
2698 2681 self._genlist = []
2699 2682 self._finished = False
2700 2683 self._ascending = True
2701 2684 if iterasc is not None:
2702 2685 if iterasc:
2703 2686 self.fastasc = self._iterator
2704 2687 self.__contains__ = self._asccontains
2705 2688 else:
2706 2689 self.fastdesc = self._iterator
2707 2690 self.__contains__ = self._desccontains
2708 2691
2709 2692 def __nonzero__(self):
2710 2693 for r in self:
2711 2694 return True
2712 2695 return False
2713 2696
2714 2697 def __contains__(self, x):
2715 2698 if x in self._cache:
2716 2699 return self._cache[x]
2717 2700
2718 2701 # Use new values only, as existing values would be cached.
2719 2702 for l in self._consumegen():
2720 2703 if l == x:
2721 2704 return True
2722 2705
2723 2706 self._cache[x] = False
2724 2707 return False
2725 2708
2726 2709 def _asccontains(self, x):
2727 2710 """version of contains optimised for ascending generator"""
2728 2711 if x in self._cache:
2729 2712 return self._cache[x]
2730 2713
2731 2714 # Use new values only, as existing values would be cached.
2732 2715 for l in self._consumegen():
2733 2716 if l == x:
2734 2717 return True
2735 2718 if l > x:
2736 2719 break
2737 2720
2738 2721 self._cache[x] = False
2739 2722 return False
2740 2723
2741 2724 def _desccontains(self, x):
2742 2725 """version of contains optimised for descending generator"""
2743 2726 if x in self._cache:
2744 2727 return self._cache[x]
2745 2728
2746 2729 # Use new values only, as existing values would be cached.
2747 2730 for l in self._consumegen():
2748 2731 if l == x:
2749 2732 return True
2750 2733 if l < x:
2751 2734 break
2752 2735
2753 2736 self._cache[x] = False
2754 2737 return False
2755 2738
2756 2739 def __iter__(self):
2757 2740 if self._ascending:
2758 2741 it = self.fastasc
2759 2742 else:
2760 2743 it = self.fastdesc
2761 2744 if it is not None:
2762 2745 return it()
2763 2746 # we need to consume the iterator
2764 2747 for x in self._consumegen():
2765 2748 pass
2766 2749 # recall the same code
2767 2750 return iter(self)
2768 2751
2769 2752 def _iterator(self):
2770 2753 if self._finished:
2771 2754 return iter(self._genlist)
2772 2755
2773 2756 # We have to use this complex iteration strategy to allow multiple
2774 2757 # iterations at the same time. We need to be able to catch revision
2775 2758 # removed from `consumegen` and added to genlist in another instance.
2776 2759 #
2777 2760 # Getting rid of it would provide an about 15% speed up on this
2778 2761 # iteration.
2779 2762 genlist = self._genlist
2780 2763 nextrev = self._consumegen().next
2781 2764 _len = len # cache global lookup
2782 2765 def gen():
2783 2766 i = 0
2784 2767 while True:
2785 2768 if i < _len(genlist):
2786 2769 yield genlist[i]
2787 2770 else:
2788 2771 yield nextrev()
2789 2772 i += 1
2790 2773 return gen()
2791 2774
2792 2775 def _consumegen(self):
2793 2776 cache = self._cache
2794 2777 genlist = self._genlist.append
2795 2778 for item in self._gen:
2796 2779 cache[item] = True
2797 2780 genlist(item)
2798 2781 yield item
2799 2782 if not self._finished:
2800 2783 self._finished = True
2801 2784 asc = self._genlist[:]
2802 2785 asc.sort()
2803 2786 self._asclist = asc
2804 2787 self.fastasc = asc.__iter__
2805 2788 self.fastdesc = asc.__reversed__
2806 2789
2807 2790 def set(self):
2808 2791 return self
2809 2792
2810 2793 def sort(self, reverse=False):
2811 2794 self._ascending = not reverse
2812 2795
2813 2796 def reverse(self):
2814 2797 self._ascending = not self._ascending
2815 2798
2816 2799 def isascending(self):
2817 2800 return self._ascending
2818 2801
2819 2802 def isdescending(self):
2820 2803 return not self._ascending
2821 2804
2822 2805 def first(self):
2823 2806 if self._ascending:
2824 2807 it = self.fastasc
2825 2808 else:
2826 2809 it = self.fastdesc
2827 2810 if it is None:
2828 2811 # we need to consume all and try again
2829 2812 for x in self._consumegen():
2830 2813 pass
2831 2814 return self.first()
2832 2815 if self:
2833 2816 return it.next()
2834 2817 return None
2835 2818
2836 2819 def last(self):
2837 2820 if self._ascending:
2838 2821 it = self.fastdesc
2839 2822 else:
2840 2823 it = self.fastasc
2841 2824 if it is None:
2842 2825 # we need to consume all and try again
2843 2826 for x in self._consumegen():
2844 2827 pass
2845 2828 return self.first()
2846 2829 if self:
2847 2830 return it.next()
2848 2831 return None
2849 2832
2850 2833 def spanset(repo, start=None, end=None):
2851 2834 """factory function to dispatch between fullreposet and actual spanset
2852 2835
2853 2836 Feel free to update all spanset call sites and kill this function at some
2854 2837 point.
2855 2838 """
2856 2839 if start is None and end is None:
2857 2840 return fullreposet(repo)
2858 2841 return _spanset(repo, start, end)
2859 2842
2860 2843
2861 2844 class _spanset(abstractsmartset):
2862 2845 """Duck type for baseset class which represents a range of revisions and
2863 2846 can work lazily and without having all the range in memory
2864 2847
2865 2848 Note that spanset(x, y) behave almost like xrange(x, y) except for two
2866 2849 notable points:
2867 2850 - when x < y it will be automatically descending,
2868 2851 - revision filtered with this repoview will be skipped.
2869 2852
2870 2853 """
2871 2854 def __init__(self, repo, start=0, end=None):
2872 2855 """
2873 2856 start: first revision included the set
2874 2857 (default to 0)
2875 2858 end: first revision excluded (last+1)
2876 2859 (default to len(repo)
2877 2860
2878 2861 Spanset will be descending if `end` < `start`.
2879 2862 """
2880 2863 if end is None:
2881 2864 end = len(repo)
2882 2865 self._ascending = start <= end
2883 2866 if not self._ascending:
2884 2867 start, end = end + 1, start +1
2885 2868 self._start = start
2886 2869 self._end = end
2887 2870 self._hiddenrevs = repo.changelog.filteredrevs
2888 2871
2889 2872 def sort(self, reverse=False):
2890 2873 self._ascending = not reverse
2891 2874
2892 2875 def reverse(self):
2893 2876 self._ascending = not self._ascending
2894 2877
2895 2878 def _iterfilter(self, iterrange):
2896 2879 s = self._hiddenrevs
2897 2880 for r in iterrange:
2898 2881 if r not in s:
2899 2882 yield r
2900 2883
2901 2884 def __iter__(self):
2902 2885 if self._ascending:
2903 2886 return self.fastasc()
2904 2887 else:
2905 2888 return self.fastdesc()
2906 2889
2907 2890 def fastasc(self):
2908 2891 iterrange = xrange(self._start, self._end)
2909 2892 if self._hiddenrevs:
2910 2893 return self._iterfilter(iterrange)
2911 2894 return iter(iterrange)
2912 2895
2913 2896 def fastdesc(self):
2914 2897 iterrange = xrange(self._end - 1, self._start - 1, -1)
2915 2898 if self._hiddenrevs:
2916 2899 return self._iterfilter(iterrange)
2917 2900 return iter(iterrange)
2918 2901
2919 2902 def __contains__(self, rev):
2920 2903 hidden = self._hiddenrevs
2921 2904 return ((self._start <= rev < self._end)
2922 2905 and not (hidden and rev in hidden))
2923 2906
2924 2907 def __nonzero__(self):
2925 2908 for r in self:
2926 2909 return True
2927 2910 return False
2928 2911
2929 2912 def __len__(self):
2930 2913 if not self._hiddenrevs:
2931 2914 return abs(self._end - self._start)
2932 2915 else:
2933 2916 count = 0
2934 2917 start = self._start
2935 2918 end = self._end
2936 2919 for rev in self._hiddenrevs:
2937 2920 if (end < rev <= start) or (start <= rev < end):
2938 2921 count += 1
2939 2922 return abs(self._end - self._start) - count
2940 2923
2941 2924 def __getitem__(self, x):
2942 2925 # Basic implementation to be changed in future patches.
2943 2926 l = baseset([r for r in self])
2944 2927 return l[x]
2945 2928
2946 2929 def set(self):
2947 2930 return self
2948 2931
2949 2932 def isascending(self):
2950 2933 return self._start <= self._end
2951 2934
2952 2935 def isdescending(self):
2953 2936 return self._start >= self._end
2954 2937
2955 2938 def first(self):
2956 2939 if self._ascending:
2957 2940 it = self.fastasc
2958 2941 else:
2959 2942 it = self.fastdesc
2960 2943 for x in it():
2961 2944 return x
2962 2945 return None
2963 2946
2964 2947 def last(self):
2965 2948 if self._ascending:
2966 2949 it = self.fastdesc
2967 2950 else:
2968 2951 it = self.fastasc
2969 2952 for x in it():
2970 2953 return x
2971 2954 return None
2972 2955
2973 2956 class fullreposet(_spanset):
2974 2957 """a set containing all revisions in the repo
2975 2958
2976 2959 This class exists to host special optimisation.
2977 2960 """
2978 2961
2979 2962 def __init__(self, repo):
2980 2963 super(fullreposet, self).__init__(repo)
2981 2964
2982 2965 def __and__(self, other):
2983 2966 """fullrepo & other -> other
2984 2967
2985 2968 As self contains the whole repo, all of the other set should also be in
2986 2969 self. Therefor `self & other = other`.
2987 2970
2988 2971 This boldly assumes the other contains valid revs only.
2989 2972 """
2990 2973 # other not a smartset, make is so
2991 2974 if not util.safehasattr(other, 'set'):
2992 2975 # filter out hidden revision
2993 2976 # (this boldly assumes all smartset are pure)
2994 2977 #
2995 2978 # `other` was used with "&", let's assume this is a set like
2996 2979 # object.
2997 2980 other = baseset(other - self._hiddenrevs)
2998 2981 elif not util.safehasattr(other, 'isascending'):
2999 2982 # "other" is generatorset not a real smart set
3000 2983 # we fallback to the old way (sad kitten)
3001 2984 return super(fullreposet, self).__and__(other)
3002 2985
3003 2986 if self.isascending():
3004 2987 other.sort()
3005 2988 else:
3006 2989 other.sort(reverse)
3007 2990 return other
3008 2991
3009 2992 # tell hggettext to extract docstrings from these functions:
3010 2993 i18nfunctions = symbols.values()
General Comments 0
You need to be logged in to leave comments. Login now