##// END OF EJS Templates
revset.only: use cl.findmissingrevs...
Siddharth Agarwal -
r23321:d716b1ce default
parent child Browse files
Show More
@@ -1,2969 +1,2968 b''
1 1 # revset.py - revision set queries for mercurial
2 2 #
3 3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 import re
9 9 import parser, util, error, discovery, hbisect, phases
10 10 import node
11 11 import heapq
12 12 import match as matchmod
13 import ancestor as ancestormod
14 13 from i18n import _
15 14 import encoding
16 15 import obsolete as obsmod
17 16 import pathutil
18 17 import repoview
19 18
20 19 def _revancestors(repo, revs, followfirst):
21 20 """Like revlog.ancestors(), but supports followfirst."""
22 21 cut = followfirst and 1 or None
23 22 cl = repo.changelog
24 23
25 24 def iterate():
26 25 revqueue, revsnode = None, None
27 26 h = []
28 27
29 28 revs.sort(reverse=True)
30 29 revqueue = util.deque(revs)
31 30 if revqueue:
32 31 revsnode = revqueue.popleft()
33 32 heapq.heappush(h, -revsnode)
34 33
35 34 seen = set([node.nullrev])
36 35 while h:
37 36 current = -heapq.heappop(h)
38 37 if current not in seen:
39 38 if revsnode and current == revsnode:
40 39 if revqueue:
41 40 revsnode = revqueue.popleft()
42 41 heapq.heappush(h, -revsnode)
43 42 seen.add(current)
44 43 yield current
45 44 for parent in cl.parentrevs(current)[:cut]:
46 45 if parent != node.nullrev:
47 46 heapq.heappush(h, -parent)
48 47
49 48 return generatorset(iterate(), iterasc=False)
50 49
51 50 def _revdescendants(repo, revs, followfirst):
52 51 """Like revlog.descendants() but supports followfirst."""
53 52 cut = followfirst and 1 or None
54 53
55 54 def iterate():
56 55 cl = repo.changelog
57 56 first = min(revs)
58 57 nullrev = node.nullrev
59 58 if first == nullrev:
60 59 # Are there nodes with a null first parent and a non-null
61 60 # second one? Maybe. Do we care? Probably not.
62 61 for i in cl:
63 62 yield i
64 63 else:
65 64 seen = set(revs)
66 65 for i in cl.revs(first + 1):
67 66 for x in cl.parentrevs(i)[:cut]:
68 67 if x != nullrev and x in seen:
69 68 seen.add(i)
70 69 yield i
71 70 break
72 71
73 72 return generatorset(iterate(), iterasc=True)
74 73
75 74 def _revsbetween(repo, roots, heads):
76 75 """Return all paths between roots and heads, inclusive of both endpoint
77 76 sets."""
78 77 if not roots:
79 78 return baseset()
80 79 parentrevs = repo.changelog.parentrevs
81 80 visit = list(heads)
82 81 reachable = set()
83 82 seen = {}
84 83 minroot = min(roots)
85 84 roots = set(roots)
86 85 # open-code the post-order traversal due to the tiny size of
87 86 # sys.getrecursionlimit()
88 87 while visit:
89 88 rev = visit.pop()
90 89 if rev in roots:
91 90 reachable.add(rev)
92 91 parents = parentrevs(rev)
93 92 seen[rev] = parents
94 93 for parent in parents:
95 94 if parent >= minroot and parent not in seen:
96 95 visit.append(parent)
97 96 if not reachable:
98 97 return baseset()
99 98 for rev in sorted(seen):
100 99 for parent in seen[rev]:
101 100 if parent in reachable:
102 101 reachable.add(rev)
103 102 return baseset(sorted(reachable))
104 103
105 104 elements = {
106 105 "(": (20, ("group", 1, ")"), ("func", 1, ")")),
107 106 "~": (18, None, ("ancestor", 18)),
108 107 "^": (18, None, ("parent", 18), ("parentpost", 18)),
109 108 "-": (5, ("negate", 19), ("minus", 5)),
110 109 "::": (17, ("dagrangepre", 17), ("dagrange", 17),
111 110 ("dagrangepost", 17)),
112 111 "..": (17, ("dagrangepre", 17), ("dagrange", 17),
113 112 ("dagrangepost", 17)),
114 113 ":": (15, ("rangepre", 15), ("range", 15), ("rangepost", 15)),
115 114 "not": (10, ("not", 10)),
116 115 "!": (10, ("not", 10)),
117 116 "and": (5, None, ("and", 5)),
118 117 "&": (5, None, ("and", 5)),
119 118 "or": (4, None, ("or", 4)),
120 119 "|": (4, None, ("or", 4)),
121 120 "+": (4, None, ("or", 4)),
122 121 ",": (2, None, ("list", 2)),
123 122 ")": (0, None, None),
124 123 "symbol": (0, ("symbol",), None),
125 124 "string": (0, ("string",), None),
126 125 "end": (0, None, None),
127 126 }
128 127
129 128 keywords = set(['and', 'or', 'not'])
130 129
131 130 def tokenize(program, lookup=None):
132 131 '''
133 132 Parse a revset statement into a stream of tokens
134 133
135 134 Check that @ is a valid unquoted token character (issue3686):
136 135 >>> list(tokenize("@::"))
137 136 [('symbol', '@', 0), ('::', None, 1), ('end', None, 3)]
138 137
139 138 '''
140 139
141 140 pos, l = 0, len(program)
142 141 while pos < l:
143 142 c = program[pos]
144 143 if c.isspace(): # skip inter-token whitespace
145 144 pass
146 145 elif c == ':' and program[pos:pos + 2] == '::': # look ahead carefully
147 146 yield ('::', None, pos)
148 147 pos += 1 # skip ahead
149 148 elif c == '.' and program[pos:pos + 2] == '..': # look ahead carefully
150 149 yield ('..', None, pos)
151 150 pos += 1 # skip ahead
152 151 elif c in "():,-|&+!~^": # handle simple operators
153 152 yield (c, None, pos)
154 153 elif (c in '"\'' or c == 'r' and
155 154 program[pos:pos + 2] in ("r'", 'r"')): # handle quoted strings
156 155 if c == 'r':
157 156 pos += 1
158 157 c = program[pos]
159 158 decode = lambda x: x
160 159 else:
161 160 decode = lambda x: x.decode('string-escape')
162 161 pos += 1
163 162 s = pos
164 163 while pos < l: # find closing quote
165 164 d = program[pos]
166 165 if d == '\\': # skip over escaped characters
167 166 pos += 2
168 167 continue
169 168 if d == c:
170 169 yield ('string', decode(program[s:pos]), s)
171 170 break
172 171 pos += 1
173 172 else:
174 173 raise error.ParseError(_("unterminated string"), s)
175 174 # gather up a symbol/keyword
176 175 elif c.isalnum() or c in '._@' or ord(c) > 127:
177 176 s = pos
178 177 pos += 1
179 178 while pos < l: # find end of symbol
180 179 d = program[pos]
181 180 if not (d.isalnum() or d in "-._/@" or ord(d) > 127):
182 181 break
183 182 if d == '.' and program[pos - 1] == '.': # special case for ..
184 183 pos -= 1
185 184 break
186 185 pos += 1
187 186 sym = program[s:pos]
188 187 if sym in keywords: # operator keywords
189 188 yield (sym, None, s)
190 189 elif '-' in sym:
191 190 # some jerk gave us foo-bar-baz, try to check if it's a symbol
192 191 if lookup and lookup(sym):
193 192 # looks like a real symbol
194 193 yield ('symbol', sym, s)
195 194 else:
196 195 # looks like an expression
197 196 parts = sym.split('-')
198 197 for p in parts[:-1]:
199 198 if p: # possible consecutive -
200 199 yield ('symbol', p, s)
201 200 s += len(p)
202 201 yield ('-', None, pos)
203 202 s += 1
204 203 if parts[-1]: # possible trailing -
205 204 yield ('symbol', parts[-1], s)
206 205 else:
207 206 yield ('symbol', sym, s)
208 207 pos -= 1
209 208 else:
210 209 raise error.ParseError(_("syntax error"), pos)
211 210 pos += 1
212 211 yield ('end', None, pos)
213 212
214 213 # helpers
215 214
216 215 def getstring(x, err):
217 216 if x and (x[0] == 'string' or x[0] == 'symbol'):
218 217 return x[1]
219 218 raise error.ParseError(err)
220 219
221 220 def getlist(x):
222 221 if not x:
223 222 return []
224 223 if x[0] == 'list':
225 224 return getlist(x[1]) + [x[2]]
226 225 return [x]
227 226
228 227 def getargs(x, min, max, err):
229 228 l = getlist(x)
230 229 if len(l) < min or (max >= 0 and len(l) > max):
231 230 raise error.ParseError(err)
232 231 return l
233 232
234 233 def getset(repo, subset, x):
235 234 if not x:
236 235 raise error.ParseError(_("missing argument"))
237 236 s = methods[x[0]](repo, subset, *x[1:])
238 237 if util.safehasattr(s, 'isascending'):
239 238 return s
240 239 return baseset(s)
241 240
242 241 def _getrevsource(repo, r):
243 242 extra = repo[r].extra()
244 243 for label in ('source', 'transplant_source', 'rebase_source'):
245 244 if label in extra:
246 245 try:
247 246 return repo[extra[label]].rev()
248 247 except error.RepoLookupError:
249 248 pass
250 249 return None
251 250
252 251 # operator methods
253 252
254 253 def stringset(repo, subset, x):
255 254 x = repo[x].rev()
256 255 if x == -1 and len(subset) == len(repo):
257 256 return baseset([-1])
258 257 if len(subset) == len(repo) or x in subset:
259 258 return baseset([x])
260 259 return baseset()
261 260
262 261 def symbolset(repo, subset, x):
263 262 if x in symbols:
264 263 raise error.ParseError(_("can't use %s here") % x)
265 264 return stringset(repo, subset, x)
266 265
267 266 def rangeset(repo, subset, x, y):
268 267 m = getset(repo, fullreposet(repo), x)
269 268 n = getset(repo, fullreposet(repo), y)
270 269
271 270 if not m or not n:
272 271 return baseset()
273 272 m, n = m.first(), n.last()
274 273
275 274 if m < n:
276 275 r = spanset(repo, m, n + 1)
277 276 else:
278 277 r = spanset(repo, m, n - 1)
279 278 return r & subset
280 279
281 280 def dagrange(repo, subset, x, y):
282 281 r = spanset(repo)
283 282 xs = _revsbetween(repo, getset(repo, r, x), getset(repo, r, y))
284 283 return xs & subset
285 284
286 285 def andset(repo, subset, x, y):
287 286 return getset(repo, getset(repo, subset, x), y)
288 287
289 288 def orset(repo, subset, x, y):
290 289 xl = getset(repo, subset, x)
291 290 yl = getset(repo, subset - xl, y)
292 291 return xl + yl
293 292
294 293 def notset(repo, subset, x):
295 294 return subset - getset(repo, subset, x)
296 295
297 296 def listset(repo, subset, a, b):
298 297 raise error.ParseError(_("can't use a list in this context"))
299 298
300 299 def func(repo, subset, a, b):
301 300 if a[0] == 'symbol' and a[1] in symbols:
302 301 return symbols[a[1]](repo, subset, b)
303 302 raise error.ParseError(_("not a function: %s") % a[1])
304 303
305 304 # functions
306 305
307 306 def adds(repo, subset, x):
308 307 """``adds(pattern)``
309 308 Changesets that add a file matching pattern.
310 309
311 310 The pattern without explicit kind like ``glob:`` is expected to be
312 311 relative to the current directory and match against a file or a
313 312 directory.
314 313 """
315 314 # i18n: "adds" is a keyword
316 315 pat = getstring(x, _("adds requires a pattern"))
317 316 return checkstatus(repo, subset, pat, 1)
318 317
319 318 def ancestor(repo, subset, x):
320 319 """``ancestor(*changeset)``
321 320 A greatest common ancestor of the changesets.
322 321
323 322 Accepts 0 or more changesets.
324 323 Will return empty list when passed no args.
325 324 Greatest common ancestor of a single changeset is that changeset.
326 325 """
327 326 # i18n: "ancestor" is a keyword
328 327 l = getlist(x)
329 328 rl = spanset(repo)
330 329 anc = None
331 330
332 331 # (getset(repo, rl, i) for i in l) generates a list of lists
333 332 for revs in (getset(repo, rl, i) for i in l):
334 333 for r in revs:
335 334 if anc is None:
336 335 anc = repo[r]
337 336 else:
338 337 anc = anc.ancestor(repo[r])
339 338
340 339 if anc is not None and anc.rev() in subset:
341 340 return baseset([anc.rev()])
342 341 return baseset()
343 342
344 343 def _ancestors(repo, subset, x, followfirst=False):
345 344 heads = getset(repo, spanset(repo), x)
346 345 if not heads:
347 346 return baseset()
348 347 s = _revancestors(repo, heads, followfirst)
349 348 return subset & s
350 349
351 350 def ancestors(repo, subset, x):
352 351 """``ancestors(set)``
353 352 Changesets that are ancestors of a changeset in set.
354 353 """
355 354 return _ancestors(repo, subset, x)
356 355
357 356 def _firstancestors(repo, subset, x):
358 357 # ``_firstancestors(set)``
359 358 # Like ``ancestors(set)`` but follows only the first parents.
360 359 return _ancestors(repo, subset, x, followfirst=True)
361 360
362 361 def ancestorspec(repo, subset, x, n):
363 362 """``set~n``
364 363 Changesets that are the Nth ancestor (first parents only) of a changeset
365 364 in set.
366 365 """
367 366 try:
368 367 n = int(n[1])
369 368 except (TypeError, ValueError):
370 369 raise error.ParseError(_("~ expects a number"))
371 370 ps = set()
372 371 cl = repo.changelog
373 372 for r in getset(repo, fullreposet(repo), x):
374 373 for i in range(n):
375 374 r = cl.parentrevs(r)[0]
376 375 ps.add(r)
377 376 return subset & ps
378 377
379 378 def author(repo, subset, x):
380 379 """``author(string)``
381 380 Alias for ``user(string)``.
382 381 """
383 382 # i18n: "author" is a keyword
384 383 n = encoding.lower(getstring(x, _("author requires a string")))
385 384 kind, pattern, matcher = _substringmatcher(n)
386 385 return subset.filter(lambda x: matcher(encoding.lower(repo[x].user())))
387 386
388 387 def only(repo, subset, x):
389 388 """``only(set, [set])``
390 389 Changesets that are ancestors of the first set that are not ancestors
391 390 of any other head in the repo. If a second set is specified, the result
392 391 is ancestors of the first set that are not ancestors of the second set
393 392 (i.e. ::<set1> - ::<set2>).
394 393 """
395 394 cl = repo.changelog
396 395 # i18n: "only" is a keyword
397 396 args = getargs(x, 1, 2, _('only takes one or two arguments'))
398 397 include = getset(repo, spanset(repo), args[0])
399 398 if len(args) == 1:
400 399 if not include:
401 400 return baseset()
402 401
403 402 descendants = set(_revdescendants(repo, include, False))
404 403 exclude = [rev for rev in cl.headrevs()
405 404 if not rev in descendants and not rev in include]
406 405 else:
407 406 exclude = getset(repo, spanset(repo), args[1])
408 407
409 results = set(ancestormod.missingancestors(include, exclude, cl.parentrevs))
408 results = set(cl.findmissingrevs(common=exclude, heads=include))
410 409 return subset & results
411 410
412 411 def bisect(repo, subset, x):
413 412 """``bisect(string)``
414 413 Changesets marked in the specified bisect status:
415 414
416 415 - ``good``, ``bad``, ``skip``: csets explicitly marked as good/bad/skip
417 416 - ``goods``, ``bads`` : csets topologically good/bad
418 417 - ``range`` : csets taking part in the bisection
419 418 - ``pruned`` : csets that are goods, bads or skipped
420 419 - ``untested`` : csets whose fate is yet unknown
421 420 - ``ignored`` : csets ignored due to DAG topology
422 421 - ``current`` : the cset currently being bisected
423 422 """
424 423 # i18n: "bisect" is a keyword
425 424 status = getstring(x, _("bisect requires a string")).lower()
426 425 state = set(hbisect.get(repo, status))
427 426 return subset & state
428 427
429 428 # Backward-compatibility
430 429 # - no help entry so that we do not advertise it any more
431 430 def bisected(repo, subset, x):
432 431 return bisect(repo, subset, x)
433 432
434 433 def bookmark(repo, subset, x):
435 434 """``bookmark([name])``
436 435 The named bookmark or all bookmarks.
437 436
438 437 If `name` starts with `re:`, the remainder of the name is treated as
439 438 a regular expression. To match a bookmark that actually starts with `re:`,
440 439 use the prefix `literal:`.
441 440 """
442 441 # i18n: "bookmark" is a keyword
443 442 args = getargs(x, 0, 1, _('bookmark takes one or no arguments'))
444 443 if args:
445 444 bm = getstring(args[0],
446 445 # i18n: "bookmark" is a keyword
447 446 _('the argument to bookmark must be a string'))
448 447 kind, pattern, matcher = _stringmatcher(bm)
449 448 bms = set()
450 449 if kind == 'literal':
451 450 bmrev = repo._bookmarks.get(pattern, None)
452 451 if not bmrev:
453 452 raise util.Abort(_("bookmark '%s' does not exist") % bm)
454 453 bms.add(repo[bmrev].rev())
455 454 else:
456 455 matchrevs = set()
457 456 for name, bmrev in repo._bookmarks.iteritems():
458 457 if matcher(name):
459 458 matchrevs.add(bmrev)
460 459 if not matchrevs:
461 460 raise util.Abort(_("no bookmarks exist that match '%s'")
462 461 % pattern)
463 462 for bmrev in matchrevs:
464 463 bms.add(repo[bmrev].rev())
465 464 else:
466 465 bms = set([repo[r].rev()
467 466 for r in repo._bookmarks.values()])
468 467 bms -= set([node.nullrev])
469 468 return subset & bms
470 469
471 470 def branch(repo, subset, x):
472 471 """``branch(string or set)``
473 472 All changesets belonging to the given branch or the branches of the given
474 473 changesets.
475 474
476 475 If `string` starts with `re:`, the remainder of the name is treated as
477 476 a regular expression. To match a branch that actually starts with `re:`,
478 477 use the prefix `literal:`.
479 478 """
480 479 try:
481 480 b = getstring(x, '')
482 481 except error.ParseError:
483 482 # not a string, but another revspec, e.g. tip()
484 483 pass
485 484 else:
486 485 kind, pattern, matcher = _stringmatcher(b)
487 486 if kind == 'literal':
488 487 # note: falls through to the revspec case if no branch with
489 488 # this name exists
490 489 if pattern in repo.branchmap():
491 490 return subset.filter(lambda r: matcher(repo[r].branch()))
492 491 else:
493 492 return subset.filter(lambda r: matcher(repo[r].branch()))
494 493
495 494 s = getset(repo, spanset(repo), x)
496 495 b = set()
497 496 for r in s:
498 497 b.add(repo[r].branch())
499 498 c = s.__contains__
500 499 return subset.filter(lambda r: c(r) or repo[r].branch() in b)
501 500
502 501 def bumped(repo, subset, x):
503 502 """``bumped()``
504 503 Mutable changesets marked as successors of public changesets.
505 504
506 505 Only non-public and non-obsolete changesets can be `bumped`.
507 506 """
508 507 # i18n: "bumped" is a keyword
509 508 getargs(x, 0, 0, _("bumped takes no arguments"))
510 509 bumped = obsmod.getrevs(repo, 'bumped')
511 510 return subset & bumped
512 511
513 512 def bundle(repo, subset, x):
514 513 """``bundle()``
515 514 Changesets in the bundle.
516 515
517 516 Bundle must be specified by the -R option."""
518 517
519 518 try:
520 519 bundlerevs = repo.changelog.bundlerevs
521 520 except AttributeError:
522 521 raise util.Abort(_("no bundle provided - specify with -R"))
523 522 return subset & bundlerevs
524 523
525 524 def checkstatus(repo, subset, pat, field):
526 525 hasset = matchmod.patkind(pat) == 'set'
527 526
528 527 mcache = [None]
529 528 def matches(x):
530 529 c = repo[x]
531 530 if not mcache[0] or hasset:
532 531 mcache[0] = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
533 532 m = mcache[0]
534 533 fname = None
535 534 if not m.anypats() and len(m.files()) == 1:
536 535 fname = m.files()[0]
537 536 if fname is not None:
538 537 if fname not in c.files():
539 538 return False
540 539 else:
541 540 for f in c.files():
542 541 if m(f):
543 542 break
544 543 else:
545 544 return False
546 545 files = repo.status(c.p1().node(), c.node())[field]
547 546 if fname is not None:
548 547 if fname in files:
549 548 return True
550 549 else:
551 550 for f in files:
552 551 if m(f):
553 552 return True
554 553
555 554 return subset.filter(matches)
556 555
557 556 def _children(repo, narrow, parentset):
558 557 cs = set()
559 558 if not parentset:
560 559 return baseset(cs)
561 560 pr = repo.changelog.parentrevs
562 561 minrev = min(parentset)
563 562 for r in narrow:
564 563 if r <= minrev:
565 564 continue
566 565 for p in pr(r):
567 566 if p in parentset:
568 567 cs.add(r)
569 568 return baseset(cs)
570 569
571 570 def children(repo, subset, x):
572 571 """``children(set)``
573 572 Child changesets of changesets in set.
574 573 """
575 574 s = getset(repo, fullreposet(repo), x)
576 575 cs = _children(repo, subset, s)
577 576 return subset & cs
578 577
579 578 def closed(repo, subset, x):
580 579 """``closed()``
581 580 Changeset is closed.
582 581 """
583 582 # i18n: "closed" is a keyword
584 583 getargs(x, 0, 0, _("closed takes no arguments"))
585 584 return subset.filter(lambda r: repo[r].closesbranch())
586 585
587 586 def contains(repo, subset, x):
588 587 """``contains(pattern)``
589 588 The revision's manifest contains a file matching pattern (but might not
590 589 modify it). See :hg:`help patterns` for information about file patterns.
591 590
592 591 The pattern without explicit kind like ``glob:`` is expected to be
593 592 relative to the current directory and match against a file exactly
594 593 for efficiency.
595 594 """
596 595 # i18n: "contains" is a keyword
597 596 pat = getstring(x, _("contains requires a pattern"))
598 597
599 598 def matches(x):
600 599 if not matchmod.patkind(pat):
601 600 pats = pathutil.canonpath(repo.root, repo.getcwd(), pat)
602 601 if pats in repo[x]:
603 602 return True
604 603 else:
605 604 c = repo[x]
606 605 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
607 606 for f in c.manifest():
608 607 if m(f):
609 608 return True
610 609 return False
611 610
612 611 return subset.filter(matches)
613 612
614 613 def converted(repo, subset, x):
615 614 """``converted([id])``
616 615 Changesets converted from the given identifier in the old repository if
617 616 present, or all converted changesets if no identifier is specified.
618 617 """
619 618
620 619 # There is exactly no chance of resolving the revision, so do a simple
621 620 # string compare and hope for the best
622 621
623 622 rev = None
624 623 # i18n: "converted" is a keyword
625 624 l = getargs(x, 0, 1, _('converted takes one or no arguments'))
626 625 if l:
627 626 # i18n: "converted" is a keyword
628 627 rev = getstring(l[0], _('converted requires a revision'))
629 628
630 629 def _matchvalue(r):
631 630 source = repo[r].extra().get('convert_revision', None)
632 631 return source is not None and (rev is None or source.startswith(rev))
633 632
634 633 return subset.filter(lambda r: _matchvalue(r))
635 634
636 635 def date(repo, subset, x):
637 636 """``date(interval)``
638 637 Changesets within the interval, see :hg:`help dates`.
639 638 """
640 639 # i18n: "date" is a keyword
641 640 ds = getstring(x, _("date requires a string"))
642 641 dm = util.matchdate(ds)
643 642 return subset.filter(lambda x: dm(repo[x].date()[0]))
644 643
645 644 def desc(repo, subset, x):
646 645 """``desc(string)``
647 646 Search commit message for string. The match is case-insensitive.
648 647 """
649 648 # i18n: "desc" is a keyword
650 649 ds = encoding.lower(getstring(x, _("desc requires a string")))
651 650
652 651 def matches(x):
653 652 c = repo[x]
654 653 return ds in encoding.lower(c.description())
655 654
656 655 return subset.filter(matches)
657 656
658 657 def _descendants(repo, subset, x, followfirst=False):
659 658 roots = getset(repo, spanset(repo), x)
660 659 if not roots:
661 660 return baseset()
662 661 s = _revdescendants(repo, roots, followfirst)
663 662
664 663 # Both sets need to be ascending in order to lazily return the union
665 664 # in the correct order.
666 665 base = subset & roots
667 666 desc = subset & s
668 667 result = base + desc
669 668 if subset.isascending():
670 669 result.sort()
671 670 elif subset.isdescending():
672 671 result.sort(reverse=True)
673 672 else:
674 673 result = subset & result
675 674 return result
676 675
677 676 def descendants(repo, subset, x):
678 677 """``descendants(set)``
679 678 Changesets which are descendants of changesets in set.
680 679 """
681 680 return _descendants(repo, subset, x)
682 681
683 682 def _firstdescendants(repo, subset, x):
684 683 # ``_firstdescendants(set)``
685 684 # Like ``descendants(set)`` but follows only the first parents.
686 685 return _descendants(repo, subset, x, followfirst=True)
687 686
688 687 def destination(repo, subset, x):
689 688 """``destination([set])``
690 689 Changesets that were created by a graft, transplant or rebase operation,
691 690 with the given revisions specified as the source. Omitting the optional set
692 691 is the same as passing all().
693 692 """
694 693 if x is not None:
695 694 sources = getset(repo, spanset(repo), x)
696 695 else:
697 696 sources = getall(repo, spanset(repo), x)
698 697
699 698 dests = set()
700 699
701 700 # subset contains all of the possible destinations that can be returned, so
702 701 # iterate over them and see if their source(s) were provided in the arg set.
703 702 # Even if the immediate src of r is not in the arg set, src's source (or
704 703 # further back) may be. Scanning back further than the immediate src allows
705 704 # transitive transplants and rebases to yield the same results as transitive
706 705 # grafts.
707 706 for r in subset:
708 707 src = _getrevsource(repo, r)
709 708 lineage = None
710 709
711 710 while src is not None:
712 711 if lineage is None:
713 712 lineage = list()
714 713
715 714 lineage.append(r)
716 715
717 716 # The visited lineage is a match if the current source is in the arg
718 717 # set. Since every candidate dest is visited by way of iterating
719 718 # subset, any dests further back in the lineage will be tested by a
720 719 # different iteration over subset. Likewise, if the src was already
721 720 # selected, the current lineage can be selected without going back
722 721 # further.
723 722 if src in sources or src in dests:
724 723 dests.update(lineage)
725 724 break
726 725
727 726 r = src
728 727 src = _getrevsource(repo, r)
729 728
730 729 return subset.filter(dests.__contains__)
731 730
732 731 def divergent(repo, subset, x):
733 732 """``divergent()``
734 733 Final successors of changesets with an alternative set of final successors.
735 734 """
736 735 # i18n: "divergent" is a keyword
737 736 getargs(x, 0, 0, _("divergent takes no arguments"))
738 737 divergent = obsmod.getrevs(repo, 'divergent')
739 738 return subset & divergent
740 739
741 740 def draft(repo, subset, x):
742 741 """``draft()``
743 742 Changeset in draft phase."""
744 743 # i18n: "draft" is a keyword
745 744 getargs(x, 0, 0, _("draft takes no arguments"))
746 745 phase = repo._phasecache.phase
747 746 target = phases.draft
748 747 condition = lambda r: phase(repo, r) == target
749 748 return subset.filter(condition, cache=False)
750 749
751 750 def extinct(repo, subset, x):
752 751 """``extinct()``
753 752 Obsolete changesets with obsolete descendants only.
754 753 """
755 754 # i18n: "extinct" is a keyword
756 755 getargs(x, 0, 0, _("extinct takes no arguments"))
757 756 extincts = obsmod.getrevs(repo, 'extinct')
758 757 return subset & extincts
759 758
760 759 def extra(repo, subset, x):
761 760 """``extra(label, [value])``
762 761 Changesets with the given label in the extra metadata, with the given
763 762 optional value.
764 763
765 764 If `value` starts with `re:`, the remainder of the value is treated as
766 765 a regular expression. To match a value that actually starts with `re:`,
767 766 use the prefix `literal:`.
768 767 """
769 768
770 769 # i18n: "extra" is a keyword
771 770 l = getargs(x, 1, 2, _('extra takes at least 1 and at most 2 arguments'))
772 771 # i18n: "extra" is a keyword
773 772 label = getstring(l[0], _('first argument to extra must be a string'))
774 773 value = None
775 774
776 775 if len(l) > 1:
777 776 # i18n: "extra" is a keyword
778 777 value = getstring(l[1], _('second argument to extra must be a string'))
779 778 kind, value, matcher = _stringmatcher(value)
780 779
781 780 def _matchvalue(r):
782 781 extra = repo[r].extra()
783 782 return label in extra and (value is None or matcher(extra[label]))
784 783
785 784 return subset.filter(lambda r: _matchvalue(r))
786 785
787 786 def filelog(repo, subset, x):
788 787 """``filelog(pattern)``
789 788 Changesets connected to the specified filelog.
790 789
791 790 For performance reasons, visits only revisions mentioned in the file-level
792 791 filelog, rather than filtering through all changesets (much faster, but
793 792 doesn't include deletes or duplicate changes). For a slower, more accurate
794 793 result, use ``file()``.
795 794
796 795 The pattern without explicit kind like ``glob:`` is expected to be
797 796 relative to the current directory and match against a file exactly
798 797 for efficiency.
799 798 """
800 799
801 800 # i18n: "filelog" is a keyword
802 801 pat = getstring(x, _("filelog requires a pattern"))
803 802 s = set()
804 803
805 804 if not matchmod.patkind(pat):
806 805 f = pathutil.canonpath(repo.root, repo.getcwd(), pat)
807 806 fl = repo.file(f)
808 807 for fr in fl:
809 808 s.add(fl.linkrev(fr))
810 809 else:
811 810 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=repo[None])
812 811 for f in repo[None]:
813 812 if m(f):
814 813 fl = repo.file(f)
815 814 for fr in fl:
816 815 s.add(fl.linkrev(fr))
817 816
818 817 return subset & s
819 818
820 819 def first(repo, subset, x):
821 820 """``first(set, [n])``
822 821 An alias for limit().
823 822 """
824 823 return limit(repo, subset, x)
825 824
826 825 def _follow(repo, subset, x, name, followfirst=False):
827 826 l = getargs(x, 0, 1, _("%s takes no arguments or a filename") % name)
828 827 c = repo['.']
829 828 if l:
830 829 x = getstring(l[0], _("%s expected a filename") % name)
831 830 if x in c:
832 831 cx = c[x]
833 832 s = set(ctx.rev() for ctx in cx.ancestors(followfirst=followfirst))
834 833 # include the revision responsible for the most recent version
835 834 s.add(cx.linkrev())
836 835 else:
837 836 return baseset()
838 837 else:
839 838 s = _revancestors(repo, baseset([c.rev()]), followfirst)
840 839
841 840 return subset & s
842 841
843 842 def follow(repo, subset, x):
844 843 """``follow([file])``
845 844 An alias for ``::.`` (ancestors of the working copy's first parent).
846 845 If a filename is specified, the history of the given file is followed,
847 846 including copies.
848 847 """
849 848 return _follow(repo, subset, x, 'follow')
850 849
851 850 def _followfirst(repo, subset, x):
852 851 # ``followfirst([file])``
853 852 # Like ``follow([file])`` but follows only the first parent of
854 853 # every revision or file revision.
855 854 return _follow(repo, subset, x, '_followfirst', followfirst=True)
856 855
857 856 def getall(repo, subset, x):
858 857 """``all()``
859 858 All changesets, the same as ``0:tip``.
860 859 """
861 860 # i18n: "all" is a keyword
862 861 getargs(x, 0, 0, _("all takes no arguments"))
863 862 return subset
864 863
865 864 def grep(repo, subset, x):
866 865 """``grep(regex)``
867 866 Like ``keyword(string)`` but accepts a regex. Use ``grep(r'...')``
868 867 to ensure special escape characters are handled correctly. Unlike
869 868 ``keyword(string)``, the match is case-sensitive.
870 869 """
871 870 try:
872 871 # i18n: "grep" is a keyword
873 872 gr = re.compile(getstring(x, _("grep requires a string")))
874 873 except re.error, e:
875 874 raise error.ParseError(_('invalid match pattern: %s') % e)
876 875
877 876 def matches(x):
878 877 c = repo[x]
879 878 for e in c.files() + [c.user(), c.description()]:
880 879 if gr.search(e):
881 880 return True
882 881 return False
883 882
884 883 return subset.filter(matches)
885 884
886 885 def _matchfiles(repo, subset, x):
887 886 # _matchfiles takes a revset list of prefixed arguments:
888 887 #
889 888 # [p:foo, i:bar, x:baz]
890 889 #
891 890 # builds a match object from them and filters subset. Allowed
892 891 # prefixes are 'p:' for regular patterns, 'i:' for include
893 892 # patterns and 'x:' for exclude patterns. Use 'r:' prefix to pass
894 893 # a revision identifier, or the empty string to reference the
895 894 # working directory, from which the match object is
896 895 # initialized. Use 'd:' to set the default matching mode, default
897 896 # to 'glob'. At most one 'r:' and 'd:' argument can be passed.
898 897
899 898 # i18n: "_matchfiles" is a keyword
900 899 l = getargs(x, 1, -1, _("_matchfiles requires at least one argument"))
901 900 pats, inc, exc = [], [], []
902 901 rev, default = None, None
903 902 for arg in l:
904 903 # i18n: "_matchfiles" is a keyword
905 904 s = getstring(arg, _("_matchfiles requires string arguments"))
906 905 prefix, value = s[:2], s[2:]
907 906 if prefix == 'p:':
908 907 pats.append(value)
909 908 elif prefix == 'i:':
910 909 inc.append(value)
911 910 elif prefix == 'x:':
912 911 exc.append(value)
913 912 elif prefix == 'r:':
914 913 if rev is not None:
915 914 # i18n: "_matchfiles" is a keyword
916 915 raise error.ParseError(_('_matchfiles expected at most one '
917 916 'revision'))
918 917 rev = value
919 918 elif prefix == 'd:':
920 919 if default is not None:
921 920 # i18n: "_matchfiles" is a keyword
922 921 raise error.ParseError(_('_matchfiles expected at most one '
923 922 'default mode'))
924 923 default = value
925 924 else:
926 925 # i18n: "_matchfiles" is a keyword
927 926 raise error.ParseError(_('invalid _matchfiles prefix: %s') % prefix)
928 927 if not default:
929 928 default = 'glob'
930 929
931 930 m = matchmod.match(repo.root, repo.getcwd(), pats, include=inc,
932 931 exclude=exc, ctx=repo[rev], default=default)
933 932
934 933 def matches(x):
935 934 for f in repo[x].files():
936 935 if m(f):
937 936 return True
938 937 return False
939 938
940 939 return subset.filter(matches)
941 940
942 941 def hasfile(repo, subset, x):
943 942 """``file(pattern)``
944 943 Changesets affecting files matched by pattern.
945 944
946 945 For a faster but less accurate result, consider using ``filelog()``
947 946 instead.
948 947
949 948 This predicate uses ``glob:`` as the default kind of pattern.
950 949 """
951 950 # i18n: "file" is a keyword
952 951 pat = getstring(x, _("file requires a pattern"))
953 952 return _matchfiles(repo, subset, ('string', 'p:' + pat))
954 953
955 954 def head(repo, subset, x):
956 955 """``head()``
957 956 Changeset is a named branch head.
958 957 """
959 958 # i18n: "head" is a keyword
960 959 getargs(x, 0, 0, _("head takes no arguments"))
961 960 hs = set()
962 961 for b, ls in repo.branchmap().iteritems():
963 962 hs.update(repo[h].rev() for h in ls)
964 963 return baseset(hs).filter(subset.__contains__)
965 964
966 965 def heads(repo, subset, x):
967 966 """``heads(set)``
968 967 Members of set with no children in set.
969 968 """
970 969 s = getset(repo, subset, x)
971 970 ps = parents(repo, subset, x)
972 971 return s - ps
973 972
974 973 def hidden(repo, subset, x):
975 974 """``hidden()``
976 975 Hidden changesets.
977 976 """
978 977 # i18n: "hidden" is a keyword
979 978 getargs(x, 0, 0, _("hidden takes no arguments"))
980 979 hiddenrevs = repoview.filterrevs(repo, 'visible')
981 980 return subset & hiddenrevs
982 981
983 982 def keyword(repo, subset, x):
984 983 """``keyword(string)``
985 984 Search commit message, user name, and names of changed files for
986 985 string. The match is case-insensitive.
987 986 """
988 987 # i18n: "keyword" is a keyword
989 988 kw = encoding.lower(getstring(x, _("keyword requires a string")))
990 989
991 990 def matches(r):
992 991 c = repo[r]
993 992 return util.any(kw in encoding.lower(t) for t in c.files() + [c.user(),
994 993 c.description()])
995 994
996 995 return subset.filter(matches)
997 996
998 997 def limit(repo, subset, x):
999 998 """``limit(set, [n])``
1000 999 First n members of set, defaulting to 1.
1001 1000 """
1002 1001 # i18n: "limit" is a keyword
1003 1002 l = getargs(x, 1, 2, _("limit requires one or two arguments"))
1004 1003 try:
1005 1004 lim = 1
1006 1005 if len(l) == 2:
1007 1006 # i18n: "limit" is a keyword
1008 1007 lim = int(getstring(l[1], _("limit requires a number")))
1009 1008 except (TypeError, ValueError):
1010 1009 # i18n: "limit" is a keyword
1011 1010 raise error.ParseError(_("limit expects a number"))
1012 1011 ss = subset
1013 1012 os = getset(repo, spanset(repo), l[0])
1014 1013 result = []
1015 1014 it = iter(os)
1016 1015 for x in xrange(lim):
1017 1016 try:
1018 1017 y = it.next()
1019 1018 if y in ss:
1020 1019 result.append(y)
1021 1020 except (StopIteration):
1022 1021 break
1023 1022 return baseset(result)
1024 1023
1025 1024 def last(repo, subset, x):
1026 1025 """``last(set, [n])``
1027 1026 Last n members of set, defaulting to 1.
1028 1027 """
1029 1028 # i18n: "last" is a keyword
1030 1029 l = getargs(x, 1, 2, _("last requires one or two arguments"))
1031 1030 try:
1032 1031 lim = 1
1033 1032 if len(l) == 2:
1034 1033 # i18n: "last" is a keyword
1035 1034 lim = int(getstring(l[1], _("last requires a number")))
1036 1035 except (TypeError, ValueError):
1037 1036 # i18n: "last" is a keyword
1038 1037 raise error.ParseError(_("last expects a number"))
1039 1038 ss = subset
1040 1039 os = getset(repo, spanset(repo), l[0])
1041 1040 os.reverse()
1042 1041 result = []
1043 1042 it = iter(os)
1044 1043 for x in xrange(lim):
1045 1044 try:
1046 1045 y = it.next()
1047 1046 if y in ss:
1048 1047 result.append(y)
1049 1048 except (StopIteration):
1050 1049 break
1051 1050 return baseset(result)
1052 1051
1053 1052 def maxrev(repo, subset, x):
1054 1053 """``max(set)``
1055 1054 Changeset with highest revision number in set.
1056 1055 """
1057 1056 os = getset(repo, spanset(repo), x)
1058 1057 if os:
1059 1058 m = os.max()
1060 1059 if m in subset:
1061 1060 return baseset([m])
1062 1061 return baseset()
1063 1062
1064 1063 def merge(repo, subset, x):
1065 1064 """``merge()``
1066 1065 Changeset is a merge changeset.
1067 1066 """
1068 1067 # i18n: "merge" is a keyword
1069 1068 getargs(x, 0, 0, _("merge takes no arguments"))
1070 1069 cl = repo.changelog
1071 1070 return subset.filter(lambda r: cl.parentrevs(r)[1] != -1)
1072 1071
1073 1072 def branchpoint(repo, subset, x):
1074 1073 """``branchpoint()``
1075 1074 Changesets with more than one child.
1076 1075 """
1077 1076 # i18n: "branchpoint" is a keyword
1078 1077 getargs(x, 0, 0, _("branchpoint takes no arguments"))
1079 1078 cl = repo.changelog
1080 1079 if not subset:
1081 1080 return baseset()
1082 1081 baserev = min(subset)
1083 1082 parentscount = [0]*(len(repo) - baserev)
1084 1083 for r in cl.revs(start=baserev + 1):
1085 1084 for p in cl.parentrevs(r):
1086 1085 if p >= baserev:
1087 1086 parentscount[p - baserev] += 1
1088 1087 return subset.filter(lambda r: parentscount[r - baserev] > 1)
1089 1088
1090 1089 def minrev(repo, subset, x):
1091 1090 """``min(set)``
1092 1091 Changeset with lowest revision number in set.
1093 1092 """
1094 1093 os = getset(repo, spanset(repo), x)
1095 1094 if os:
1096 1095 m = os.min()
1097 1096 if m in subset:
1098 1097 return baseset([m])
1099 1098 return baseset()
1100 1099
1101 1100 def modifies(repo, subset, x):
1102 1101 """``modifies(pattern)``
1103 1102 Changesets modifying files matched by pattern.
1104 1103
1105 1104 The pattern without explicit kind like ``glob:`` is expected to be
1106 1105 relative to the current directory and match against a file or a
1107 1106 directory.
1108 1107 """
1109 1108 # i18n: "modifies" is a keyword
1110 1109 pat = getstring(x, _("modifies requires a pattern"))
1111 1110 return checkstatus(repo, subset, pat, 0)
1112 1111
1113 1112 def node_(repo, subset, x):
1114 1113 """``id(string)``
1115 1114 Revision non-ambiguously specified by the given hex string prefix.
1116 1115 """
1117 1116 # i18n: "id" is a keyword
1118 1117 l = getargs(x, 1, 1, _("id requires one argument"))
1119 1118 # i18n: "id" is a keyword
1120 1119 n = getstring(l[0], _("id requires a string"))
1121 1120 if len(n) == 40:
1122 1121 rn = repo[n].rev()
1123 1122 else:
1124 1123 rn = None
1125 1124 pm = repo.changelog._partialmatch(n)
1126 1125 if pm is not None:
1127 1126 rn = repo.changelog.rev(pm)
1128 1127
1129 1128 if rn is None:
1130 1129 return baseset()
1131 1130 result = baseset([rn])
1132 1131 return result & subset
1133 1132
1134 1133 def obsolete(repo, subset, x):
1135 1134 """``obsolete()``
1136 1135 Mutable changeset with a newer version."""
1137 1136 # i18n: "obsolete" is a keyword
1138 1137 getargs(x, 0, 0, _("obsolete takes no arguments"))
1139 1138 obsoletes = obsmod.getrevs(repo, 'obsolete')
1140 1139 return subset & obsoletes
1141 1140
1142 1141 def origin(repo, subset, x):
1143 1142 """``origin([set])``
1144 1143 Changesets that were specified as a source for the grafts, transplants or
1145 1144 rebases that created the given revisions. Omitting the optional set is the
1146 1145 same as passing all(). If a changeset created by these operations is itself
1147 1146 specified as a source for one of these operations, only the source changeset
1148 1147 for the first operation is selected.
1149 1148 """
1150 1149 if x is not None:
1151 1150 dests = getset(repo, spanset(repo), x)
1152 1151 else:
1153 1152 dests = getall(repo, spanset(repo), x)
1154 1153
1155 1154 def _firstsrc(rev):
1156 1155 src = _getrevsource(repo, rev)
1157 1156 if src is None:
1158 1157 return None
1159 1158
1160 1159 while True:
1161 1160 prev = _getrevsource(repo, src)
1162 1161
1163 1162 if prev is None:
1164 1163 return src
1165 1164 src = prev
1166 1165
1167 1166 o = set([_firstsrc(r) for r in dests])
1168 1167 o -= set([None])
1169 1168 return subset & o
1170 1169
1171 1170 def outgoing(repo, subset, x):
1172 1171 """``outgoing([path])``
1173 1172 Changesets not found in the specified destination repository, or the
1174 1173 default push location.
1175 1174 """
1176 1175 import hg # avoid start-up nasties
1177 1176 # i18n: "outgoing" is a keyword
1178 1177 l = getargs(x, 0, 1, _("outgoing takes one or no arguments"))
1179 1178 # i18n: "outgoing" is a keyword
1180 1179 dest = l and getstring(l[0], _("outgoing requires a repository path")) or ''
1181 1180 dest = repo.ui.expandpath(dest or 'default-push', dest or 'default')
1182 1181 dest, branches = hg.parseurl(dest)
1183 1182 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1184 1183 if revs:
1185 1184 revs = [repo.lookup(rev) for rev in revs]
1186 1185 other = hg.peer(repo, {}, dest)
1187 1186 repo.ui.pushbuffer()
1188 1187 outgoing = discovery.findcommonoutgoing(repo, other, onlyheads=revs)
1189 1188 repo.ui.popbuffer()
1190 1189 cl = repo.changelog
1191 1190 o = set([cl.rev(r) for r in outgoing.missing])
1192 1191 return subset & o
1193 1192
1194 1193 def p1(repo, subset, x):
1195 1194 """``p1([set])``
1196 1195 First parent of changesets in set, or the working directory.
1197 1196 """
1198 1197 if x is None:
1199 1198 p = repo[x].p1().rev()
1200 1199 if p >= 0:
1201 1200 return subset & baseset([p])
1202 1201 return baseset()
1203 1202
1204 1203 ps = set()
1205 1204 cl = repo.changelog
1206 1205 for r in getset(repo, spanset(repo), x):
1207 1206 ps.add(cl.parentrevs(r)[0])
1208 1207 ps -= set([node.nullrev])
1209 1208 return subset & ps
1210 1209
1211 1210 def p2(repo, subset, x):
1212 1211 """``p2([set])``
1213 1212 Second parent of changesets in set, or the working directory.
1214 1213 """
1215 1214 if x is None:
1216 1215 ps = repo[x].parents()
1217 1216 try:
1218 1217 p = ps[1].rev()
1219 1218 if p >= 0:
1220 1219 return subset & baseset([p])
1221 1220 return baseset()
1222 1221 except IndexError:
1223 1222 return baseset()
1224 1223
1225 1224 ps = set()
1226 1225 cl = repo.changelog
1227 1226 for r in getset(repo, spanset(repo), x):
1228 1227 ps.add(cl.parentrevs(r)[1])
1229 1228 ps -= set([node.nullrev])
1230 1229 return subset & ps
1231 1230
1232 1231 def parents(repo, subset, x):
1233 1232 """``parents([set])``
1234 1233 The set of all parents for all changesets in set, or the working directory.
1235 1234 """
1236 1235 if x is None:
1237 1236 ps = set(p.rev() for p in repo[x].parents())
1238 1237 else:
1239 1238 ps = set()
1240 1239 cl = repo.changelog
1241 1240 for r in getset(repo, spanset(repo), x):
1242 1241 ps.update(cl.parentrevs(r))
1243 1242 ps -= set([node.nullrev])
1244 1243 return subset & ps
1245 1244
1246 1245 def parentspec(repo, subset, x, n):
1247 1246 """``set^0``
1248 1247 The set.
1249 1248 ``set^1`` (or ``set^``), ``set^2``
1250 1249 First or second parent, respectively, of all changesets in set.
1251 1250 """
1252 1251 try:
1253 1252 n = int(n[1])
1254 1253 if n not in (0, 1, 2):
1255 1254 raise ValueError
1256 1255 except (TypeError, ValueError):
1257 1256 raise error.ParseError(_("^ expects a number 0, 1, or 2"))
1258 1257 ps = set()
1259 1258 cl = repo.changelog
1260 1259 for r in getset(repo, fullreposet(repo), x):
1261 1260 if n == 0:
1262 1261 ps.add(r)
1263 1262 elif n == 1:
1264 1263 ps.add(cl.parentrevs(r)[0])
1265 1264 elif n == 2:
1266 1265 parents = cl.parentrevs(r)
1267 1266 if len(parents) > 1:
1268 1267 ps.add(parents[1])
1269 1268 return subset & ps
1270 1269
1271 1270 def present(repo, subset, x):
1272 1271 """``present(set)``
1273 1272 An empty set, if any revision in set isn't found; otherwise,
1274 1273 all revisions in set.
1275 1274
1276 1275 If any of specified revisions is not present in the local repository,
1277 1276 the query is normally aborted. But this predicate allows the query
1278 1277 to continue even in such cases.
1279 1278 """
1280 1279 try:
1281 1280 return getset(repo, subset, x)
1282 1281 except error.RepoLookupError:
1283 1282 return baseset()
1284 1283
1285 1284 def public(repo, subset, x):
1286 1285 """``public()``
1287 1286 Changeset in public phase."""
1288 1287 # i18n: "public" is a keyword
1289 1288 getargs(x, 0, 0, _("public takes no arguments"))
1290 1289 phase = repo._phasecache.phase
1291 1290 target = phases.public
1292 1291 condition = lambda r: phase(repo, r) == target
1293 1292 return subset.filter(condition, cache=False)
1294 1293
1295 1294 def remote(repo, subset, x):
1296 1295 """``remote([id [,path]])``
1297 1296 Local revision that corresponds to the given identifier in a
1298 1297 remote repository, if present. Here, the '.' identifier is a
1299 1298 synonym for the current local branch.
1300 1299 """
1301 1300
1302 1301 import hg # avoid start-up nasties
1303 1302 # i18n: "remote" is a keyword
1304 1303 l = getargs(x, 0, 2, _("remote takes one, two or no arguments"))
1305 1304
1306 1305 q = '.'
1307 1306 if len(l) > 0:
1308 1307 # i18n: "remote" is a keyword
1309 1308 q = getstring(l[0], _("remote requires a string id"))
1310 1309 if q == '.':
1311 1310 q = repo['.'].branch()
1312 1311
1313 1312 dest = ''
1314 1313 if len(l) > 1:
1315 1314 # i18n: "remote" is a keyword
1316 1315 dest = getstring(l[1], _("remote requires a repository path"))
1317 1316 dest = repo.ui.expandpath(dest or 'default')
1318 1317 dest, branches = hg.parseurl(dest)
1319 1318 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1320 1319 if revs:
1321 1320 revs = [repo.lookup(rev) for rev in revs]
1322 1321 other = hg.peer(repo, {}, dest)
1323 1322 n = other.lookup(q)
1324 1323 if n in repo:
1325 1324 r = repo[n].rev()
1326 1325 if r in subset:
1327 1326 return baseset([r])
1328 1327 return baseset()
1329 1328
1330 1329 def removes(repo, subset, x):
1331 1330 """``removes(pattern)``
1332 1331 Changesets which remove files matching pattern.
1333 1332
1334 1333 The pattern without explicit kind like ``glob:`` is expected to be
1335 1334 relative to the current directory and match against a file or a
1336 1335 directory.
1337 1336 """
1338 1337 # i18n: "removes" is a keyword
1339 1338 pat = getstring(x, _("removes requires a pattern"))
1340 1339 return checkstatus(repo, subset, pat, 2)
1341 1340
1342 1341 def rev(repo, subset, x):
1343 1342 """``rev(number)``
1344 1343 Revision with the given numeric identifier.
1345 1344 """
1346 1345 # i18n: "rev" is a keyword
1347 1346 l = getargs(x, 1, 1, _("rev requires one argument"))
1348 1347 try:
1349 1348 # i18n: "rev" is a keyword
1350 1349 l = int(getstring(l[0], _("rev requires a number")))
1351 1350 except (TypeError, ValueError):
1352 1351 # i18n: "rev" is a keyword
1353 1352 raise error.ParseError(_("rev expects a number"))
1354 1353 if l not in fullreposet(repo):
1355 1354 return baseset()
1356 1355 return subset & baseset([l])
1357 1356
1358 1357 def matching(repo, subset, x):
1359 1358 """``matching(revision [, field])``
1360 1359 Changesets in which a given set of fields match the set of fields in the
1361 1360 selected revision or set.
1362 1361
1363 1362 To match more than one field pass the list of fields to match separated
1364 1363 by spaces (e.g. ``author description``).
1365 1364
1366 1365 Valid fields are most regular revision fields and some special fields.
1367 1366
1368 1367 Regular revision fields are ``description``, ``author``, ``branch``,
1369 1368 ``date``, ``files``, ``phase``, ``parents``, ``substate``, ``user``
1370 1369 and ``diff``.
1371 1370 Note that ``author`` and ``user`` are synonyms. ``diff`` refers to the
1372 1371 contents of the revision. Two revisions matching their ``diff`` will
1373 1372 also match their ``files``.
1374 1373
1375 1374 Special fields are ``summary`` and ``metadata``:
1376 1375 ``summary`` matches the first line of the description.
1377 1376 ``metadata`` is equivalent to matching ``description user date``
1378 1377 (i.e. it matches the main metadata fields).
1379 1378
1380 1379 ``metadata`` is the default field which is used when no fields are
1381 1380 specified. You can match more than one field at a time.
1382 1381 """
1383 1382 # i18n: "matching" is a keyword
1384 1383 l = getargs(x, 1, 2, _("matching takes 1 or 2 arguments"))
1385 1384
1386 1385 revs = getset(repo, fullreposet(repo), l[0])
1387 1386
1388 1387 fieldlist = ['metadata']
1389 1388 if len(l) > 1:
1390 1389 fieldlist = getstring(l[1],
1391 1390 # i18n: "matching" is a keyword
1392 1391 _("matching requires a string "
1393 1392 "as its second argument")).split()
1394 1393
1395 1394 # Make sure that there are no repeated fields,
1396 1395 # expand the 'special' 'metadata' field type
1397 1396 # and check the 'files' whenever we check the 'diff'
1398 1397 fields = []
1399 1398 for field in fieldlist:
1400 1399 if field == 'metadata':
1401 1400 fields += ['user', 'description', 'date']
1402 1401 elif field == 'diff':
1403 1402 # a revision matching the diff must also match the files
1404 1403 # since matching the diff is very costly, make sure to
1405 1404 # also match the files first
1406 1405 fields += ['files', 'diff']
1407 1406 else:
1408 1407 if field == 'author':
1409 1408 field = 'user'
1410 1409 fields.append(field)
1411 1410 fields = set(fields)
1412 1411 if 'summary' in fields and 'description' in fields:
1413 1412 # If a revision matches its description it also matches its summary
1414 1413 fields.discard('summary')
1415 1414
1416 1415 # We may want to match more than one field
1417 1416 # Not all fields take the same amount of time to be matched
1418 1417 # Sort the selected fields in order of increasing matching cost
1419 1418 fieldorder = ['phase', 'parents', 'user', 'date', 'branch', 'summary',
1420 1419 'files', 'description', 'substate', 'diff']
1421 1420 def fieldkeyfunc(f):
1422 1421 try:
1423 1422 return fieldorder.index(f)
1424 1423 except ValueError:
1425 1424 # assume an unknown field is very costly
1426 1425 return len(fieldorder)
1427 1426 fields = list(fields)
1428 1427 fields.sort(key=fieldkeyfunc)
1429 1428
1430 1429 # Each field will be matched with its own "getfield" function
1431 1430 # which will be added to the getfieldfuncs array of functions
1432 1431 getfieldfuncs = []
1433 1432 _funcs = {
1434 1433 'user': lambda r: repo[r].user(),
1435 1434 'branch': lambda r: repo[r].branch(),
1436 1435 'date': lambda r: repo[r].date(),
1437 1436 'description': lambda r: repo[r].description(),
1438 1437 'files': lambda r: repo[r].files(),
1439 1438 'parents': lambda r: repo[r].parents(),
1440 1439 'phase': lambda r: repo[r].phase(),
1441 1440 'substate': lambda r: repo[r].substate,
1442 1441 'summary': lambda r: repo[r].description().splitlines()[0],
1443 1442 'diff': lambda r: list(repo[r].diff(git=True),)
1444 1443 }
1445 1444 for info in fields:
1446 1445 getfield = _funcs.get(info, None)
1447 1446 if getfield is None:
1448 1447 raise error.ParseError(
1449 1448 # i18n: "matching" is a keyword
1450 1449 _("unexpected field name passed to matching: %s") % info)
1451 1450 getfieldfuncs.append(getfield)
1452 1451 # convert the getfield array of functions into a "getinfo" function
1453 1452 # which returns an array of field values (or a single value if there
1454 1453 # is only one field to match)
1455 1454 getinfo = lambda r: [f(r) for f in getfieldfuncs]
1456 1455
1457 1456 def matches(x):
1458 1457 for rev in revs:
1459 1458 target = getinfo(rev)
1460 1459 match = True
1461 1460 for n, f in enumerate(getfieldfuncs):
1462 1461 if target[n] != f(x):
1463 1462 match = False
1464 1463 if match:
1465 1464 return True
1466 1465 return False
1467 1466
1468 1467 return subset.filter(matches)
1469 1468
1470 1469 def reverse(repo, subset, x):
1471 1470 """``reverse(set)``
1472 1471 Reverse order of set.
1473 1472 """
1474 1473 l = getset(repo, subset, x)
1475 1474 l.reverse()
1476 1475 return l
1477 1476
1478 1477 def roots(repo, subset, x):
1479 1478 """``roots(set)``
1480 1479 Changesets in set with no parent changeset in set.
1481 1480 """
1482 1481 s = getset(repo, spanset(repo), x)
1483 1482 subset = baseset([r for r in s if r in subset])
1484 1483 cs = _children(repo, subset, s)
1485 1484 return subset - cs
1486 1485
1487 1486 def secret(repo, subset, x):
1488 1487 """``secret()``
1489 1488 Changeset in secret phase."""
1490 1489 # i18n: "secret" is a keyword
1491 1490 getargs(x, 0, 0, _("secret takes no arguments"))
1492 1491 phase = repo._phasecache.phase
1493 1492 target = phases.secret
1494 1493 condition = lambda r: phase(repo, r) == target
1495 1494 return subset.filter(condition, cache=False)
1496 1495
1497 1496 def sort(repo, subset, x):
1498 1497 """``sort(set[, [-]key...])``
1499 1498 Sort set by keys. The default sort order is ascending, specify a key
1500 1499 as ``-key`` to sort in descending order.
1501 1500
1502 1501 The keys can be:
1503 1502
1504 1503 - ``rev`` for the revision number,
1505 1504 - ``branch`` for the branch name,
1506 1505 - ``desc`` for the commit message (description),
1507 1506 - ``user`` for user name (``author`` can be used as an alias),
1508 1507 - ``date`` for the commit date
1509 1508 """
1510 1509 # i18n: "sort" is a keyword
1511 1510 l = getargs(x, 1, 2, _("sort requires one or two arguments"))
1512 1511 keys = "rev"
1513 1512 if len(l) == 2:
1514 1513 # i18n: "sort" is a keyword
1515 1514 keys = getstring(l[1], _("sort spec must be a string"))
1516 1515
1517 1516 s = l[0]
1518 1517 keys = keys.split()
1519 1518 l = []
1520 1519 def invert(s):
1521 1520 return "".join(chr(255 - ord(c)) for c in s)
1522 1521 revs = getset(repo, subset, s)
1523 1522 if keys == ["rev"]:
1524 1523 revs.sort()
1525 1524 return revs
1526 1525 elif keys == ["-rev"]:
1527 1526 revs.sort(reverse=True)
1528 1527 return revs
1529 1528 for r in revs:
1530 1529 c = repo[r]
1531 1530 e = []
1532 1531 for k in keys:
1533 1532 if k == 'rev':
1534 1533 e.append(r)
1535 1534 elif k == '-rev':
1536 1535 e.append(-r)
1537 1536 elif k == 'branch':
1538 1537 e.append(c.branch())
1539 1538 elif k == '-branch':
1540 1539 e.append(invert(c.branch()))
1541 1540 elif k == 'desc':
1542 1541 e.append(c.description())
1543 1542 elif k == '-desc':
1544 1543 e.append(invert(c.description()))
1545 1544 elif k in 'user author':
1546 1545 e.append(c.user())
1547 1546 elif k in '-user -author':
1548 1547 e.append(invert(c.user()))
1549 1548 elif k == 'date':
1550 1549 e.append(c.date()[0])
1551 1550 elif k == '-date':
1552 1551 e.append(-c.date()[0])
1553 1552 else:
1554 1553 raise error.ParseError(_("unknown sort key %r") % k)
1555 1554 e.append(r)
1556 1555 l.append(e)
1557 1556 l.sort()
1558 1557 return baseset([e[-1] for e in l])
1559 1558
1560 1559 def _stringmatcher(pattern):
1561 1560 """
1562 1561 accepts a string, possibly starting with 're:' or 'literal:' prefix.
1563 1562 returns the matcher name, pattern, and matcher function.
1564 1563 missing or unknown prefixes are treated as literal matches.
1565 1564
1566 1565 helper for tests:
1567 1566 >>> def test(pattern, *tests):
1568 1567 ... kind, pattern, matcher = _stringmatcher(pattern)
1569 1568 ... return (kind, pattern, [bool(matcher(t)) for t in tests])
1570 1569
1571 1570 exact matching (no prefix):
1572 1571 >>> test('abcdefg', 'abc', 'def', 'abcdefg')
1573 1572 ('literal', 'abcdefg', [False, False, True])
1574 1573
1575 1574 regex matching ('re:' prefix)
1576 1575 >>> test('re:a.+b', 'nomatch', 'fooadef', 'fooadefbar')
1577 1576 ('re', 'a.+b', [False, False, True])
1578 1577
1579 1578 force exact matches ('literal:' prefix)
1580 1579 >>> test('literal:re:foobar', 'foobar', 're:foobar')
1581 1580 ('literal', 're:foobar', [False, True])
1582 1581
1583 1582 unknown prefixes are ignored and treated as literals
1584 1583 >>> test('foo:bar', 'foo', 'bar', 'foo:bar')
1585 1584 ('literal', 'foo:bar', [False, False, True])
1586 1585 """
1587 1586 if pattern.startswith('re:'):
1588 1587 pattern = pattern[3:]
1589 1588 try:
1590 1589 regex = re.compile(pattern)
1591 1590 except re.error, e:
1592 1591 raise error.ParseError(_('invalid regular expression: %s')
1593 1592 % e)
1594 1593 return 're', pattern, regex.search
1595 1594 elif pattern.startswith('literal:'):
1596 1595 pattern = pattern[8:]
1597 1596 return 'literal', pattern, pattern.__eq__
1598 1597
1599 1598 def _substringmatcher(pattern):
1600 1599 kind, pattern, matcher = _stringmatcher(pattern)
1601 1600 if kind == 'literal':
1602 1601 matcher = lambda s: pattern in s
1603 1602 return kind, pattern, matcher
1604 1603
1605 1604 def tag(repo, subset, x):
1606 1605 """``tag([name])``
1607 1606 The specified tag by name, or all tagged revisions if no name is given.
1608 1607
1609 1608 If `name` starts with `re:`, the remainder of the name is treated as
1610 1609 a regular expression. To match a tag that actually starts with `re:`,
1611 1610 use the prefix `literal:`.
1612 1611 """
1613 1612 # i18n: "tag" is a keyword
1614 1613 args = getargs(x, 0, 1, _("tag takes one or no arguments"))
1615 1614 cl = repo.changelog
1616 1615 if args:
1617 1616 pattern = getstring(args[0],
1618 1617 # i18n: "tag" is a keyword
1619 1618 _('the argument to tag must be a string'))
1620 1619 kind, pattern, matcher = _stringmatcher(pattern)
1621 1620 if kind == 'literal':
1622 1621 # avoid resolving all tags
1623 1622 tn = repo._tagscache.tags.get(pattern, None)
1624 1623 if tn is None:
1625 1624 raise util.Abort(_("tag '%s' does not exist") % pattern)
1626 1625 s = set([repo[tn].rev()])
1627 1626 else:
1628 1627 s = set([cl.rev(n) for t, n in repo.tagslist() if matcher(t)])
1629 1628 else:
1630 1629 s = set([cl.rev(n) for t, n in repo.tagslist() if t != 'tip'])
1631 1630 return subset & s
1632 1631
1633 1632 def tagged(repo, subset, x):
1634 1633 return tag(repo, subset, x)
1635 1634
1636 1635 def unstable(repo, subset, x):
1637 1636 """``unstable()``
1638 1637 Non-obsolete changesets with obsolete ancestors.
1639 1638 """
1640 1639 # i18n: "unstable" is a keyword
1641 1640 getargs(x, 0, 0, _("unstable takes no arguments"))
1642 1641 unstables = obsmod.getrevs(repo, 'unstable')
1643 1642 return subset & unstables
1644 1643
1645 1644
1646 1645 def user(repo, subset, x):
1647 1646 """``user(string)``
1648 1647 User name contains string. The match is case-insensitive.
1649 1648
1650 1649 If `string` starts with `re:`, the remainder of the string is treated as
1651 1650 a regular expression. To match a user that actually contains `re:`, use
1652 1651 the prefix `literal:`.
1653 1652 """
1654 1653 return author(repo, subset, x)
1655 1654
1656 1655 # for internal use
1657 1656 def _list(repo, subset, x):
1658 1657 s = getstring(x, "internal error")
1659 1658 if not s:
1660 1659 return baseset()
1661 1660 ls = [repo[r].rev() for r in s.split('\0')]
1662 1661 s = subset
1663 1662 return baseset([r for r in ls if r in s])
1664 1663
1665 1664 # for internal use
1666 1665 def _intlist(repo, subset, x):
1667 1666 s = getstring(x, "internal error")
1668 1667 if not s:
1669 1668 return baseset()
1670 1669 ls = [int(r) for r in s.split('\0')]
1671 1670 s = subset
1672 1671 return baseset([r for r in ls if r in s])
1673 1672
1674 1673 # for internal use
1675 1674 def _hexlist(repo, subset, x):
1676 1675 s = getstring(x, "internal error")
1677 1676 if not s:
1678 1677 return baseset()
1679 1678 cl = repo.changelog
1680 1679 ls = [cl.rev(node.bin(r)) for r in s.split('\0')]
1681 1680 s = subset
1682 1681 return baseset([r for r in ls if r in s])
1683 1682
1684 1683 symbols = {
1685 1684 "adds": adds,
1686 1685 "all": getall,
1687 1686 "ancestor": ancestor,
1688 1687 "ancestors": ancestors,
1689 1688 "_firstancestors": _firstancestors,
1690 1689 "author": author,
1691 1690 "only": only,
1692 1691 "bisect": bisect,
1693 1692 "bisected": bisected,
1694 1693 "bookmark": bookmark,
1695 1694 "branch": branch,
1696 1695 "branchpoint": branchpoint,
1697 1696 "bumped": bumped,
1698 1697 "bundle": bundle,
1699 1698 "children": children,
1700 1699 "closed": closed,
1701 1700 "contains": contains,
1702 1701 "converted": converted,
1703 1702 "date": date,
1704 1703 "desc": desc,
1705 1704 "descendants": descendants,
1706 1705 "_firstdescendants": _firstdescendants,
1707 1706 "destination": destination,
1708 1707 "divergent": divergent,
1709 1708 "draft": draft,
1710 1709 "extinct": extinct,
1711 1710 "extra": extra,
1712 1711 "file": hasfile,
1713 1712 "filelog": filelog,
1714 1713 "first": first,
1715 1714 "follow": follow,
1716 1715 "_followfirst": _followfirst,
1717 1716 "grep": grep,
1718 1717 "head": head,
1719 1718 "heads": heads,
1720 1719 "hidden": hidden,
1721 1720 "id": node_,
1722 1721 "keyword": keyword,
1723 1722 "last": last,
1724 1723 "limit": limit,
1725 1724 "_matchfiles": _matchfiles,
1726 1725 "max": maxrev,
1727 1726 "merge": merge,
1728 1727 "min": minrev,
1729 1728 "modifies": modifies,
1730 1729 "obsolete": obsolete,
1731 1730 "origin": origin,
1732 1731 "outgoing": outgoing,
1733 1732 "p1": p1,
1734 1733 "p2": p2,
1735 1734 "parents": parents,
1736 1735 "present": present,
1737 1736 "public": public,
1738 1737 "remote": remote,
1739 1738 "removes": removes,
1740 1739 "rev": rev,
1741 1740 "reverse": reverse,
1742 1741 "roots": roots,
1743 1742 "sort": sort,
1744 1743 "secret": secret,
1745 1744 "matching": matching,
1746 1745 "tag": tag,
1747 1746 "tagged": tagged,
1748 1747 "user": user,
1749 1748 "unstable": unstable,
1750 1749 "_list": _list,
1751 1750 "_intlist": _intlist,
1752 1751 "_hexlist": _hexlist,
1753 1752 }
1754 1753
1755 1754 # symbols which can't be used for a DoS attack for any given input
1756 1755 # (e.g. those which accept regexes as plain strings shouldn't be included)
1757 1756 # functions that just return a lot of changesets (like all) don't count here
1758 1757 safesymbols = set([
1759 1758 "adds",
1760 1759 "all",
1761 1760 "ancestor",
1762 1761 "ancestors",
1763 1762 "_firstancestors",
1764 1763 "author",
1765 1764 "bisect",
1766 1765 "bisected",
1767 1766 "bookmark",
1768 1767 "branch",
1769 1768 "branchpoint",
1770 1769 "bumped",
1771 1770 "bundle",
1772 1771 "children",
1773 1772 "closed",
1774 1773 "converted",
1775 1774 "date",
1776 1775 "desc",
1777 1776 "descendants",
1778 1777 "_firstdescendants",
1779 1778 "destination",
1780 1779 "divergent",
1781 1780 "draft",
1782 1781 "extinct",
1783 1782 "extra",
1784 1783 "file",
1785 1784 "filelog",
1786 1785 "first",
1787 1786 "follow",
1788 1787 "_followfirst",
1789 1788 "head",
1790 1789 "heads",
1791 1790 "hidden",
1792 1791 "id",
1793 1792 "keyword",
1794 1793 "last",
1795 1794 "limit",
1796 1795 "_matchfiles",
1797 1796 "max",
1798 1797 "merge",
1799 1798 "min",
1800 1799 "modifies",
1801 1800 "obsolete",
1802 1801 "origin",
1803 1802 "outgoing",
1804 1803 "p1",
1805 1804 "p2",
1806 1805 "parents",
1807 1806 "present",
1808 1807 "public",
1809 1808 "remote",
1810 1809 "removes",
1811 1810 "rev",
1812 1811 "reverse",
1813 1812 "roots",
1814 1813 "sort",
1815 1814 "secret",
1816 1815 "matching",
1817 1816 "tag",
1818 1817 "tagged",
1819 1818 "user",
1820 1819 "unstable",
1821 1820 "_list",
1822 1821 "_intlist",
1823 1822 "_hexlist",
1824 1823 ])
1825 1824
1826 1825 methods = {
1827 1826 "range": rangeset,
1828 1827 "dagrange": dagrange,
1829 1828 "string": stringset,
1830 1829 "symbol": symbolset,
1831 1830 "and": andset,
1832 1831 "or": orset,
1833 1832 "not": notset,
1834 1833 "list": listset,
1835 1834 "func": func,
1836 1835 "ancestor": ancestorspec,
1837 1836 "parent": parentspec,
1838 1837 "parentpost": p1,
1839 1838 }
1840 1839
1841 1840 def optimize(x, small):
1842 1841 if x is None:
1843 1842 return 0, x
1844 1843
1845 1844 smallbonus = 1
1846 1845 if small:
1847 1846 smallbonus = .5
1848 1847
1849 1848 op = x[0]
1850 1849 if op == 'minus':
1851 1850 return optimize(('and', x[1], ('not', x[2])), small)
1852 1851 elif op == 'dagrangepre':
1853 1852 return optimize(('func', ('symbol', 'ancestors'), x[1]), small)
1854 1853 elif op == 'dagrangepost':
1855 1854 return optimize(('func', ('symbol', 'descendants'), x[1]), small)
1856 1855 elif op == 'rangepre':
1857 1856 return optimize(('range', ('string', '0'), x[1]), small)
1858 1857 elif op == 'rangepost':
1859 1858 return optimize(('range', x[1], ('string', 'tip')), small)
1860 1859 elif op == 'negate':
1861 1860 return optimize(('string',
1862 1861 '-' + getstring(x[1], _("can't negate that"))), small)
1863 1862 elif op in 'string symbol negate':
1864 1863 return smallbonus, x # single revisions are small
1865 1864 elif op == 'and':
1866 1865 wa, ta = optimize(x[1], True)
1867 1866 wb, tb = optimize(x[2], True)
1868 1867
1869 1868 # (::x and not ::y)/(not ::y and ::x) have a fast path
1870 1869 def isonly(revs, bases):
1871 1870 return (
1872 1871 revs[0] == 'func'
1873 1872 and getstring(revs[1], _('not a symbol')) == 'ancestors'
1874 1873 and bases[0] == 'not'
1875 1874 and bases[1][0] == 'func'
1876 1875 and getstring(bases[1][1], _('not a symbol')) == 'ancestors')
1877 1876
1878 1877 w = min(wa, wb)
1879 1878 if isonly(ta, tb):
1880 1879 return w, ('func', ('symbol', 'only'), ('list', ta[2], tb[1][2]))
1881 1880 if isonly(tb, ta):
1882 1881 return w, ('func', ('symbol', 'only'), ('list', tb[2], ta[1][2]))
1883 1882
1884 1883 if wa > wb:
1885 1884 return w, (op, tb, ta)
1886 1885 return w, (op, ta, tb)
1887 1886 elif op == 'or':
1888 1887 wa, ta = optimize(x[1], False)
1889 1888 wb, tb = optimize(x[2], False)
1890 1889 if wb < wa:
1891 1890 wb, wa = wa, wb
1892 1891 return max(wa, wb), (op, ta, tb)
1893 1892 elif op == 'not':
1894 1893 o = optimize(x[1], not small)
1895 1894 return o[0], (op, o[1])
1896 1895 elif op == 'parentpost':
1897 1896 o = optimize(x[1], small)
1898 1897 return o[0], (op, o[1])
1899 1898 elif op == 'group':
1900 1899 return optimize(x[1], small)
1901 1900 elif op in 'dagrange range list parent ancestorspec':
1902 1901 if op == 'parent':
1903 1902 # x^:y means (x^) : y, not x ^ (:y)
1904 1903 post = ('parentpost', x[1])
1905 1904 if x[2][0] == 'dagrangepre':
1906 1905 return optimize(('dagrange', post, x[2][1]), small)
1907 1906 elif x[2][0] == 'rangepre':
1908 1907 return optimize(('range', post, x[2][1]), small)
1909 1908
1910 1909 wa, ta = optimize(x[1], small)
1911 1910 wb, tb = optimize(x[2], small)
1912 1911 return wa + wb, (op, ta, tb)
1913 1912 elif op == 'func':
1914 1913 f = getstring(x[1], _("not a symbol"))
1915 1914 wa, ta = optimize(x[2], small)
1916 1915 if f in ("author branch closed date desc file grep keyword "
1917 1916 "outgoing user"):
1918 1917 w = 10 # slow
1919 1918 elif f in "modifies adds removes":
1920 1919 w = 30 # slower
1921 1920 elif f == "contains":
1922 1921 w = 100 # very slow
1923 1922 elif f == "ancestor":
1924 1923 w = 1 * smallbonus
1925 1924 elif f in "reverse limit first _intlist":
1926 1925 w = 0
1927 1926 elif f in "sort":
1928 1927 w = 10 # assume most sorts look at changelog
1929 1928 else:
1930 1929 w = 1
1931 1930 return w + wa, (op, x[1], ta)
1932 1931 return 1, x
1933 1932
1934 1933 _aliasarg = ('func', ('symbol', '_aliasarg'))
1935 1934 def _getaliasarg(tree):
1936 1935 """If tree matches ('func', ('symbol', '_aliasarg'), ('string', X))
1937 1936 return X, None otherwise.
1938 1937 """
1939 1938 if (len(tree) == 3 and tree[:2] == _aliasarg
1940 1939 and tree[2][0] == 'string'):
1941 1940 return tree[2][1]
1942 1941 return None
1943 1942
1944 1943 def _checkaliasarg(tree, known=None):
1945 1944 """Check tree contains no _aliasarg construct or only ones which
1946 1945 value is in known. Used to avoid alias placeholders injection.
1947 1946 """
1948 1947 if isinstance(tree, tuple):
1949 1948 arg = _getaliasarg(tree)
1950 1949 if arg is not None and (not known or arg not in known):
1951 1950 raise error.ParseError(_("not a function: %s") % '_aliasarg')
1952 1951 for t in tree:
1953 1952 _checkaliasarg(t, known)
1954 1953
1955 1954 class revsetalias(object):
1956 1955 funcre = re.compile('^([^(]+)\(([^)]+)\)$')
1957 1956 args = None
1958 1957
1959 1958 def __init__(self, name, value):
1960 1959 '''Aliases like:
1961 1960
1962 1961 h = heads(default)
1963 1962 b($1) = ancestors($1) - ancestors(default)
1964 1963 '''
1965 1964 m = self.funcre.search(name)
1966 1965 if m:
1967 1966 self.name = m.group(1)
1968 1967 self.tree = ('func', ('symbol', m.group(1)))
1969 1968 self.args = [x.strip() for x in m.group(2).split(',')]
1970 1969 for arg in self.args:
1971 1970 # _aliasarg() is an unknown symbol only used separate
1972 1971 # alias argument placeholders from regular strings.
1973 1972 value = value.replace(arg, '_aliasarg(%r)' % (arg,))
1974 1973 else:
1975 1974 self.name = name
1976 1975 self.tree = ('symbol', name)
1977 1976
1978 1977 self.replacement, pos = parse(value)
1979 1978 if pos != len(value):
1980 1979 raise error.ParseError(_('invalid token'), pos)
1981 1980 # Check for placeholder injection
1982 1981 _checkaliasarg(self.replacement, self.args)
1983 1982
1984 1983 def _getalias(aliases, tree):
1985 1984 """If tree looks like an unexpanded alias, return it. Return None
1986 1985 otherwise.
1987 1986 """
1988 1987 if isinstance(tree, tuple) and tree:
1989 1988 if tree[0] == 'symbol' and len(tree) == 2:
1990 1989 name = tree[1]
1991 1990 alias = aliases.get(name)
1992 1991 if alias and alias.args is None and alias.tree == tree:
1993 1992 return alias
1994 1993 if tree[0] == 'func' and len(tree) > 1:
1995 1994 if tree[1][0] == 'symbol' and len(tree[1]) == 2:
1996 1995 name = tree[1][1]
1997 1996 alias = aliases.get(name)
1998 1997 if alias and alias.args is not None and alias.tree == tree[:2]:
1999 1998 return alias
2000 1999 return None
2001 2000
2002 2001 def _expandargs(tree, args):
2003 2002 """Replace _aliasarg instances with the substitution value of the
2004 2003 same name in args, recursively.
2005 2004 """
2006 2005 if not tree or not isinstance(tree, tuple):
2007 2006 return tree
2008 2007 arg = _getaliasarg(tree)
2009 2008 if arg is not None:
2010 2009 return args[arg]
2011 2010 return tuple(_expandargs(t, args) for t in tree)
2012 2011
2013 2012 def _expandaliases(aliases, tree, expanding, cache):
2014 2013 """Expand aliases in tree, recursively.
2015 2014
2016 2015 'aliases' is a dictionary mapping user defined aliases to
2017 2016 revsetalias objects.
2018 2017 """
2019 2018 if not isinstance(tree, tuple):
2020 2019 # Do not expand raw strings
2021 2020 return tree
2022 2021 alias = _getalias(aliases, tree)
2023 2022 if alias is not None:
2024 2023 if alias in expanding:
2025 2024 raise error.ParseError(_('infinite expansion of revset alias "%s" '
2026 2025 'detected') % alias.name)
2027 2026 expanding.append(alias)
2028 2027 if alias.name not in cache:
2029 2028 cache[alias.name] = _expandaliases(aliases, alias.replacement,
2030 2029 expanding, cache)
2031 2030 result = cache[alias.name]
2032 2031 expanding.pop()
2033 2032 if alias.args is not None:
2034 2033 l = getlist(tree[2])
2035 2034 if len(l) != len(alias.args):
2036 2035 raise error.ParseError(
2037 2036 _('invalid number of arguments: %s') % len(l))
2038 2037 l = [_expandaliases(aliases, a, [], cache) for a in l]
2039 2038 result = _expandargs(result, dict(zip(alias.args, l)))
2040 2039 else:
2041 2040 result = tuple(_expandaliases(aliases, t, expanding, cache)
2042 2041 for t in tree)
2043 2042 return result
2044 2043
2045 2044 def findaliases(ui, tree):
2046 2045 _checkaliasarg(tree)
2047 2046 aliases = {}
2048 2047 for k, v in ui.configitems('revsetalias'):
2049 2048 alias = revsetalias(k, v)
2050 2049 aliases[alias.name] = alias
2051 2050 return _expandaliases(aliases, tree, [], {})
2052 2051
2053 2052 def parse(spec, lookup=None):
2054 2053 p = parser.parser(tokenize, elements)
2055 2054 return p.parse(spec, lookup=lookup)
2056 2055
2057 2056 def match(ui, spec, repo=None):
2058 2057 if not spec:
2059 2058 raise error.ParseError(_("empty query"))
2060 2059 lookup = None
2061 2060 if repo:
2062 2061 lookup = repo.__contains__
2063 2062 tree, pos = parse(spec, lookup)
2064 2063 if (pos != len(spec)):
2065 2064 raise error.ParseError(_("invalid token"), pos)
2066 2065 if ui:
2067 2066 tree = findaliases(ui, tree)
2068 2067 weight, tree = optimize(tree, True)
2069 2068 def mfunc(repo, subset):
2070 2069 if util.safehasattr(subset, 'isascending'):
2071 2070 result = getset(repo, subset, tree)
2072 2071 else:
2073 2072 result = getset(repo, baseset(subset), tree)
2074 2073 return result
2075 2074 return mfunc
2076 2075
2077 2076 def formatspec(expr, *args):
2078 2077 '''
2079 2078 This is a convenience function for using revsets internally, and
2080 2079 escapes arguments appropriately. Aliases are intentionally ignored
2081 2080 so that intended expression behavior isn't accidentally subverted.
2082 2081
2083 2082 Supported arguments:
2084 2083
2085 2084 %r = revset expression, parenthesized
2086 2085 %d = int(arg), no quoting
2087 2086 %s = string(arg), escaped and single-quoted
2088 2087 %b = arg.branch(), escaped and single-quoted
2089 2088 %n = hex(arg), single-quoted
2090 2089 %% = a literal '%'
2091 2090
2092 2091 Prefixing the type with 'l' specifies a parenthesized list of that type.
2093 2092
2094 2093 >>> formatspec('%r:: and %lr', '10 or 11', ("this()", "that()"))
2095 2094 '(10 or 11):: and ((this()) or (that()))'
2096 2095 >>> formatspec('%d:: and not %d::', 10, 20)
2097 2096 '10:: and not 20::'
2098 2097 >>> formatspec('%ld or %ld', [], [1])
2099 2098 "_list('') or 1"
2100 2099 >>> formatspec('keyword(%s)', 'foo\\xe9')
2101 2100 "keyword('foo\\\\xe9')"
2102 2101 >>> b = lambda: 'default'
2103 2102 >>> b.branch = b
2104 2103 >>> formatspec('branch(%b)', b)
2105 2104 "branch('default')"
2106 2105 >>> formatspec('root(%ls)', ['a', 'b', 'c', 'd'])
2107 2106 "root(_list('a\\x00b\\x00c\\x00d'))"
2108 2107 '''
2109 2108
2110 2109 def quote(s):
2111 2110 return repr(str(s))
2112 2111
2113 2112 def argtype(c, arg):
2114 2113 if c == 'd':
2115 2114 return str(int(arg))
2116 2115 elif c == 's':
2117 2116 return quote(arg)
2118 2117 elif c == 'r':
2119 2118 parse(arg) # make sure syntax errors are confined
2120 2119 return '(%s)' % arg
2121 2120 elif c == 'n':
2122 2121 return quote(node.hex(arg))
2123 2122 elif c == 'b':
2124 2123 return quote(arg.branch())
2125 2124
2126 2125 def listexp(s, t):
2127 2126 l = len(s)
2128 2127 if l == 0:
2129 2128 return "_list('')"
2130 2129 elif l == 1:
2131 2130 return argtype(t, s[0])
2132 2131 elif t == 'd':
2133 2132 return "_intlist('%s')" % "\0".join(str(int(a)) for a in s)
2134 2133 elif t == 's':
2135 2134 return "_list('%s')" % "\0".join(s)
2136 2135 elif t == 'n':
2137 2136 return "_hexlist('%s')" % "\0".join(node.hex(a) for a in s)
2138 2137 elif t == 'b':
2139 2138 return "_list('%s')" % "\0".join(a.branch() for a in s)
2140 2139
2141 2140 m = l // 2
2142 2141 return '(%s or %s)' % (listexp(s[:m], t), listexp(s[m:], t))
2143 2142
2144 2143 ret = ''
2145 2144 pos = 0
2146 2145 arg = 0
2147 2146 while pos < len(expr):
2148 2147 c = expr[pos]
2149 2148 if c == '%':
2150 2149 pos += 1
2151 2150 d = expr[pos]
2152 2151 if d == '%':
2153 2152 ret += d
2154 2153 elif d in 'dsnbr':
2155 2154 ret += argtype(d, args[arg])
2156 2155 arg += 1
2157 2156 elif d == 'l':
2158 2157 # a list of some type
2159 2158 pos += 1
2160 2159 d = expr[pos]
2161 2160 ret += listexp(list(args[arg]), d)
2162 2161 arg += 1
2163 2162 else:
2164 2163 raise util.Abort('unexpected revspec format character %s' % d)
2165 2164 else:
2166 2165 ret += c
2167 2166 pos += 1
2168 2167
2169 2168 return ret
2170 2169
2171 2170 def prettyformat(tree):
2172 2171 def _prettyformat(tree, level, lines):
2173 2172 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2174 2173 lines.append((level, str(tree)))
2175 2174 else:
2176 2175 lines.append((level, '(%s' % tree[0]))
2177 2176 for s in tree[1:]:
2178 2177 _prettyformat(s, level + 1, lines)
2179 2178 lines[-1:] = [(lines[-1][0], lines[-1][1] + ')')]
2180 2179
2181 2180 lines = []
2182 2181 _prettyformat(tree, 0, lines)
2183 2182 output = '\n'.join((' '*l + s) for l, s in lines)
2184 2183 return output
2185 2184
2186 2185 def depth(tree):
2187 2186 if isinstance(tree, tuple):
2188 2187 return max(map(depth, tree)) + 1
2189 2188 else:
2190 2189 return 0
2191 2190
2192 2191 def funcsused(tree):
2193 2192 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2194 2193 return set()
2195 2194 else:
2196 2195 funcs = set()
2197 2196 for s in tree[1:]:
2198 2197 funcs |= funcsused(s)
2199 2198 if tree[0] == 'func':
2200 2199 funcs.add(tree[1][1])
2201 2200 return funcs
2202 2201
2203 2202 class abstractsmartset(object):
2204 2203
2205 2204 def __nonzero__(self):
2206 2205 """True if the smartset is not empty"""
2207 2206 raise NotImplementedError()
2208 2207
2209 2208 def __contains__(self, rev):
2210 2209 """provide fast membership testing"""
2211 2210 raise NotImplementedError()
2212 2211
2213 2212 def __iter__(self):
2214 2213 """iterate the set in the order it is supposed to be iterated"""
2215 2214 raise NotImplementedError()
2216 2215
2217 2216 # Attributes containing a function to perform a fast iteration in a given
2218 2217 # direction. A smartset can have none, one, or both defined.
2219 2218 #
2220 2219 # Default value is None instead of a function returning None to avoid
2221 2220 # initializing an iterator just for testing if a fast method exists.
2222 2221 fastasc = None
2223 2222 fastdesc = None
2224 2223
2225 2224 def isascending(self):
2226 2225 """True if the set will iterate in ascending order"""
2227 2226 raise NotImplementedError()
2228 2227
2229 2228 def isdescending(self):
2230 2229 """True if the set will iterate in descending order"""
2231 2230 raise NotImplementedError()
2232 2231
2233 2232 def min(self):
2234 2233 """return the minimum element in the set"""
2235 2234 if self.fastasc is not None:
2236 2235 for r in self.fastasc():
2237 2236 return r
2238 2237 raise ValueError('arg is an empty sequence')
2239 2238 return min(self)
2240 2239
2241 2240 def max(self):
2242 2241 """return the maximum element in the set"""
2243 2242 if self.fastdesc is not None:
2244 2243 for r in self.fastdesc():
2245 2244 return r
2246 2245 raise ValueError('arg is an empty sequence')
2247 2246 return max(self)
2248 2247
2249 2248 def first(self):
2250 2249 """return the first element in the set (user iteration perspective)
2251 2250
2252 2251 Return None if the set is empty"""
2253 2252 raise NotImplementedError()
2254 2253
2255 2254 def last(self):
2256 2255 """return the last element in the set (user iteration perspective)
2257 2256
2258 2257 Return None if the set is empty"""
2259 2258 raise NotImplementedError()
2260 2259
2261 2260 def __len__(self):
2262 2261 """return the length of the smartsets
2263 2262
2264 2263 This can be expensive on smartset that could be lazy otherwise."""
2265 2264 raise NotImplementedError()
2266 2265
2267 2266 def reverse(self):
2268 2267 """reverse the expected iteration order"""
2269 2268 raise NotImplementedError()
2270 2269
2271 2270 def sort(self, reverse=True):
2272 2271 """get the set to iterate in an ascending or descending order"""
2273 2272 raise NotImplementedError()
2274 2273
2275 2274 def __and__(self, other):
2276 2275 """Returns a new object with the intersection of the two collections.
2277 2276
2278 2277 This is part of the mandatory API for smartset."""
2279 2278 return self.filter(other.__contains__, cache=False)
2280 2279
2281 2280 def __add__(self, other):
2282 2281 """Returns a new object with the union of the two collections.
2283 2282
2284 2283 This is part of the mandatory API for smartset."""
2285 2284 return addset(self, other)
2286 2285
2287 2286 def __sub__(self, other):
2288 2287 """Returns a new object with the substraction of the two collections.
2289 2288
2290 2289 This is part of the mandatory API for smartset."""
2291 2290 c = other.__contains__
2292 2291 return self.filter(lambda r: not c(r), cache=False)
2293 2292
2294 2293 def filter(self, condition, cache=True):
2295 2294 """Returns this smartset filtered by condition as a new smartset.
2296 2295
2297 2296 `condition` is a callable which takes a revision number and returns a
2298 2297 boolean.
2299 2298
2300 2299 This is part of the mandatory API for smartset."""
2301 2300 # builtin cannot be cached. but do not needs to
2302 2301 if cache and util.safehasattr(condition, 'func_code'):
2303 2302 condition = util.cachefunc(condition)
2304 2303 return filteredset(self, condition)
2305 2304
2306 2305 class baseset(abstractsmartset):
2307 2306 """Basic data structure that represents a revset and contains the basic
2308 2307 operation that it should be able to perform.
2309 2308
2310 2309 Every method in this class should be implemented by any smartset class.
2311 2310 """
2312 2311 def __init__(self, data=()):
2313 2312 if not isinstance(data, list):
2314 2313 data = list(data)
2315 2314 self._list = data
2316 2315 self._ascending = None
2317 2316
2318 2317 @util.propertycache
2319 2318 def _set(self):
2320 2319 return set(self._list)
2321 2320
2322 2321 @util.propertycache
2323 2322 def _asclist(self):
2324 2323 asclist = self._list[:]
2325 2324 asclist.sort()
2326 2325 return asclist
2327 2326
2328 2327 def __iter__(self):
2329 2328 if self._ascending is None:
2330 2329 return iter(self._list)
2331 2330 elif self._ascending:
2332 2331 return iter(self._asclist)
2333 2332 else:
2334 2333 return reversed(self._asclist)
2335 2334
2336 2335 def fastasc(self):
2337 2336 return iter(self._asclist)
2338 2337
2339 2338 def fastdesc(self):
2340 2339 return reversed(self._asclist)
2341 2340
2342 2341 @util.propertycache
2343 2342 def __contains__(self):
2344 2343 return self._set.__contains__
2345 2344
2346 2345 def __nonzero__(self):
2347 2346 return bool(self._list)
2348 2347
2349 2348 def sort(self, reverse=False):
2350 2349 self._ascending = not bool(reverse)
2351 2350
2352 2351 def reverse(self):
2353 2352 if self._ascending is None:
2354 2353 self._list.reverse()
2355 2354 else:
2356 2355 self._ascending = not self._ascending
2357 2356
2358 2357 def __len__(self):
2359 2358 return len(self._list)
2360 2359
2361 2360 def isascending(self):
2362 2361 """Returns True if the collection is ascending order, False if not.
2363 2362
2364 2363 This is part of the mandatory API for smartset."""
2365 2364 if len(self) <= 1:
2366 2365 return True
2367 2366 return self._ascending is not None and self._ascending
2368 2367
2369 2368 def isdescending(self):
2370 2369 """Returns True if the collection is descending order, False if not.
2371 2370
2372 2371 This is part of the mandatory API for smartset."""
2373 2372 if len(self) <= 1:
2374 2373 return True
2375 2374 return self._ascending is not None and not self._ascending
2376 2375
2377 2376 def first(self):
2378 2377 if self:
2379 2378 if self._ascending is None:
2380 2379 return self._list[0]
2381 2380 elif self._ascending:
2382 2381 return self._asclist[0]
2383 2382 else:
2384 2383 return self._asclist[-1]
2385 2384 return None
2386 2385
2387 2386 def last(self):
2388 2387 if self:
2389 2388 if self._ascending is None:
2390 2389 return self._list[-1]
2391 2390 elif self._ascending:
2392 2391 return self._asclist[-1]
2393 2392 else:
2394 2393 return self._asclist[0]
2395 2394 return None
2396 2395
2397 2396 class filteredset(abstractsmartset):
2398 2397 """Duck type for baseset class which iterates lazily over the revisions in
2399 2398 the subset and contains a function which tests for membership in the
2400 2399 revset
2401 2400 """
2402 2401 def __init__(self, subset, condition=lambda x: True):
2403 2402 """
2404 2403 condition: a function that decide whether a revision in the subset
2405 2404 belongs to the revset or not.
2406 2405 """
2407 2406 self._subset = subset
2408 2407 self._condition = condition
2409 2408 self._cache = {}
2410 2409
2411 2410 def __contains__(self, x):
2412 2411 c = self._cache
2413 2412 if x not in c:
2414 2413 v = c[x] = x in self._subset and self._condition(x)
2415 2414 return v
2416 2415 return c[x]
2417 2416
2418 2417 def __iter__(self):
2419 2418 return self._iterfilter(self._subset)
2420 2419
2421 2420 def _iterfilter(self, it):
2422 2421 cond = self._condition
2423 2422 for x in it:
2424 2423 if cond(x):
2425 2424 yield x
2426 2425
2427 2426 @property
2428 2427 def fastasc(self):
2429 2428 it = self._subset.fastasc
2430 2429 if it is None:
2431 2430 return None
2432 2431 return lambda: self._iterfilter(it())
2433 2432
2434 2433 @property
2435 2434 def fastdesc(self):
2436 2435 it = self._subset.fastdesc
2437 2436 if it is None:
2438 2437 return None
2439 2438 return lambda: self._iterfilter(it())
2440 2439
2441 2440 def __nonzero__(self):
2442 2441 for r in self:
2443 2442 return True
2444 2443 return False
2445 2444
2446 2445 def __len__(self):
2447 2446 # Basic implementation to be changed in future patches.
2448 2447 l = baseset([r for r in self])
2449 2448 return len(l)
2450 2449
2451 2450 def sort(self, reverse=False):
2452 2451 self._subset.sort(reverse=reverse)
2453 2452
2454 2453 def reverse(self):
2455 2454 self._subset.reverse()
2456 2455
2457 2456 def isascending(self):
2458 2457 return self._subset.isascending()
2459 2458
2460 2459 def isdescending(self):
2461 2460 return self._subset.isdescending()
2462 2461
2463 2462 def first(self):
2464 2463 for x in self:
2465 2464 return x
2466 2465 return None
2467 2466
2468 2467 def last(self):
2469 2468 it = None
2470 2469 if self._subset.isascending:
2471 2470 it = self.fastdesc
2472 2471 elif self._subset.isdescending:
2473 2472 it = self.fastdesc
2474 2473 if it is None:
2475 2474 # slowly consume everything. This needs improvement
2476 2475 it = lambda: reversed(list(self))
2477 2476 for x in it():
2478 2477 return x
2479 2478 return None
2480 2479
2481 2480 class addset(abstractsmartset):
2482 2481 """Represent the addition of two sets
2483 2482
2484 2483 Wrapper structure for lazily adding two structures without losing much
2485 2484 performance on the __contains__ method
2486 2485
2487 2486 If the ascending attribute is set, that means the two structures are
2488 2487 ordered in either an ascending or descending way. Therefore, we can add
2489 2488 them maintaining the order by iterating over both at the same time
2490 2489 """
2491 2490 def __init__(self, revs1, revs2, ascending=None):
2492 2491 self._r1 = revs1
2493 2492 self._r2 = revs2
2494 2493 self._iter = None
2495 2494 self._ascending = ascending
2496 2495 self._genlist = None
2497 2496 self._asclist = None
2498 2497
2499 2498 def __len__(self):
2500 2499 return len(self._list)
2501 2500
2502 2501 def __nonzero__(self):
2503 2502 return bool(self._r1) or bool(self._r2)
2504 2503
2505 2504 @util.propertycache
2506 2505 def _list(self):
2507 2506 if not self._genlist:
2508 2507 self._genlist = baseset(self._iterator())
2509 2508 return self._genlist
2510 2509
2511 2510 def _iterator(self):
2512 2511 """Iterate over both collections without repeating elements
2513 2512
2514 2513 If the ascending attribute is not set, iterate over the first one and
2515 2514 then over the second one checking for membership on the first one so we
2516 2515 dont yield any duplicates.
2517 2516
2518 2517 If the ascending attribute is set, iterate over both collections at the
2519 2518 same time, yielding only one value at a time in the given order.
2520 2519 """
2521 2520 if self._ascending is None:
2522 2521 def gen():
2523 2522 for r in self._r1:
2524 2523 yield r
2525 2524 inr1 = self._r1.__contains__
2526 2525 for r in self._r2:
2527 2526 if not inr1(r):
2528 2527 yield r
2529 2528 gen = gen()
2530 2529 else:
2531 2530 iter1 = iter(self._r1)
2532 2531 iter2 = iter(self._r2)
2533 2532 gen = self._iterordered(self._ascending, iter1, iter2)
2534 2533 return gen
2535 2534
2536 2535 def __iter__(self):
2537 2536 if self._ascending is None:
2538 2537 if self._genlist:
2539 2538 return iter(self._genlist)
2540 2539 return iter(self._iterator())
2541 2540 self._trysetasclist()
2542 2541 if self._ascending:
2543 2542 it = self.fastasc
2544 2543 else:
2545 2544 it = self.fastdesc
2546 2545 if it is None:
2547 2546 # consume the gen and try again
2548 2547 self._list
2549 2548 return iter(self)
2550 2549 return it()
2551 2550
2552 2551 def _trysetasclist(self):
2553 2552 """populate the _asclist attribute if possible and necessary"""
2554 2553 if self._genlist is not None and self._asclist is None:
2555 2554 self._asclist = sorted(self._genlist)
2556 2555
2557 2556 @property
2558 2557 def fastasc(self):
2559 2558 self._trysetasclist()
2560 2559 if self._asclist is not None:
2561 2560 return self._asclist.__iter__
2562 2561 iter1 = self._r1.fastasc
2563 2562 iter2 = self._r2.fastasc
2564 2563 if None in (iter1, iter2):
2565 2564 return None
2566 2565 return lambda: self._iterordered(True, iter1(), iter2())
2567 2566
2568 2567 @property
2569 2568 def fastdesc(self):
2570 2569 self._trysetasclist()
2571 2570 if self._asclist is not None:
2572 2571 return self._asclist.__reversed__
2573 2572 iter1 = self._r1.fastdesc
2574 2573 iter2 = self._r2.fastdesc
2575 2574 if None in (iter1, iter2):
2576 2575 return None
2577 2576 return lambda: self._iterordered(False, iter1(), iter2())
2578 2577
2579 2578 def _iterordered(self, ascending, iter1, iter2):
2580 2579 """produce an ordered iteration from two iterators with the same order
2581 2580
2582 2581 The ascending is used to indicated the iteration direction.
2583 2582 """
2584 2583 choice = max
2585 2584 if ascending:
2586 2585 choice = min
2587 2586
2588 2587 val1 = None
2589 2588 val2 = None
2590 2589
2591 2590 choice = max
2592 2591 if ascending:
2593 2592 choice = min
2594 2593 try:
2595 2594 # Consume both iterators in an ordered way until one is
2596 2595 # empty
2597 2596 while True:
2598 2597 if val1 is None:
2599 2598 val1 = iter1.next()
2600 2599 if val2 is None:
2601 2600 val2 = iter2.next()
2602 2601 next = choice(val1, val2)
2603 2602 yield next
2604 2603 if val1 == next:
2605 2604 val1 = None
2606 2605 if val2 == next:
2607 2606 val2 = None
2608 2607 except StopIteration:
2609 2608 # Flush any remaining values and consume the other one
2610 2609 it = iter2
2611 2610 if val1 is not None:
2612 2611 yield val1
2613 2612 it = iter1
2614 2613 elif val2 is not None:
2615 2614 # might have been equality and both are empty
2616 2615 yield val2
2617 2616 for val in it:
2618 2617 yield val
2619 2618
2620 2619 def __contains__(self, x):
2621 2620 return x in self._r1 or x in self._r2
2622 2621
2623 2622 def sort(self, reverse=False):
2624 2623 """Sort the added set
2625 2624
2626 2625 For this we use the cached list with all the generated values and if we
2627 2626 know they are ascending or descending we can sort them in a smart way.
2628 2627 """
2629 2628 self._ascending = not reverse
2630 2629
2631 2630 def isascending(self):
2632 2631 return self._ascending is not None and self._ascending
2633 2632
2634 2633 def isdescending(self):
2635 2634 return self._ascending is not None and not self._ascending
2636 2635
2637 2636 def reverse(self):
2638 2637 if self._ascending is None:
2639 2638 self._list.reverse()
2640 2639 else:
2641 2640 self._ascending = not self._ascending
2642 2641
2643 2642 def first(self):
2644 2643 for x in self:
2645 2644 return x
2646 2645 return None
2647 2646
2648 2647 def last(self):
2649 2648 self.reverse()
2650 2649 val = self.first()
2651 2650 self.reverse()
2652 2651 return val
2653 2652
2654 2653 class generatorset(abstractsmartset):
2655 2654 """Wrap a generator for lazy iteration
2656 2655
2657 2656 Wrapper structure for generators that provides lazy membership and can
2658 2657 be iterated more than once.
2659 2658 When asked for membership it generates values until either it finds the
2660 2659 requested one or has gone through all the elements in the generator
2661 2660 """
2662 2661 def __init__(self, gen, iterasc=None):
2663 2662 """
2664 2663 gen: a generator producing the values for the generatorset.
2665 2664 """
2666 2665 self._gen = gen
2667 2666 self._asclist = None
2668 2667 self._cache = {}
2669 2668 self._genlist = []
2670 2669 self._finished = False
2671 2670 self._ascending = True
2672 2671 if iterasc is not None:
2673 2672 if iterasc:
2674 2673 self.fastasc = self._iterator
2675 2674 self.__contains__ = self._asccontains
2676 2675 else:
2677 2676 self.fastdesc = self._iterator
2678 2677 self.__contains__ = self._desccontains
2679 2678
2680 2679 def __nonzero__(self):
2681 2680 for r in self:
2682 2681 return True
2683 2682 return False
2684 2683
2685 2684 def __contains__(self, x):
2686 2685 if x in self._cache:
2687 2686 return self._cache[x]
2688 2687
2689 2688 # Use new values only, as existing values would be cached.
2690 2689 for l in self._consumegen():
2691 2690 if l == x:
2692 2691 return True
2693 2692
2694 2693 self._cache[x] = False
2695 2694 return False
2696 2695
2697 2696 def _asccontains(self, x):
2698 2697 """version of contains optimised for ascending generator"""
2699 2698 if x in self._cache:
2700 2699 return self._cache[x]
2701 2700
2702 2701 # Use new values only, as existing values would be cached.
2703 2702 for l in self._consumegen():
2704 2703 if l == x:
2705 2704 return True
2706 2705 if l > x:
2707 2706 break
2708 2707
2709 2708 self._cache[x] = False
2710 2709 return False
2711 2710
2712 2711 def _desccontains(self, x):
2713 2712 """version of contains optimised for descending generator"""
2714 2713 if x in self._cache:
2715 2714 return self._cache[x]
2716 2715
2717 2716 # Use new values only, as existing values would be cached.
2718 2717 for l in self._consumegen():
2719 2718 if l == x:
2720 2719 return True
2721 2720 if l < x:
2722 2721 break
2723 2722
2724 2723 self._cache[x] = False
2725 2724 return False
2726 2725
2727 2726 def __iter__(self):
2728 2727 if self._ascending:
2729 2728 it = self.fastasc
2730 2729 else:
2731 2730 it = self.fastdesc
2732 2731 if it is not None:
2733 2732 return it()
2734 2733 # we need to consume the iterator
2735 2734 for x in self._consumegen():
2736 2735 pass
2737 2736 # recall the same code
2738 2737 return iter(self)
2739 2738
2740 2739 def _iterator(self):
2741 2740 if self._finished:
2742 2741 return iter(self._genlist)
2743 2742
2744 2743 # We have to use this complex iteration strategy to allow multiple
2745 2744 # iterations at the same time. We need to be able to catch revision
2746 2745 # removed from _consumegen and added to genlist in another instance.
2747 2746 #
2748 2747 # Getting rid of it would provide an about 15% speed up on this
2749 2748 # iteration.
2750 2749 genlist = self._genlist
2751 2750 nextrev = self._consumegen().next
2752 2751 _len = len # cache global lookup
2753 2752 def gen():
2754 2753 i = 0
2755 2754 while True:
2756 2755 if i < _len(genlist):
2757 2756 yield genlist[i]
2758 2757 else:
2759 2758 yield nextrev()
2760 2759 i += 1
2761 2760 return gen()
2762 2761
2763 2762 def _consumegen(self):
2764 2763 cache = self._cache
2765 2764 genlist = self._genlist.append
2766 2765 for item in self._gen:
2767 2766 cache[item] = True
2768 2767 genlist(item)
2769 2768 yield item
2770 2769 if not self._finished:
2771 2770 self._finished = True
2772 2771 asc = self._genlist[:]
2773 2772 asc.sort()
2774 2773 self._asclist = asc
2775 2774 self.fastasc = asc.__iter__
2776 2775 self.fastdesc = asc.__reversed__
2777 2776
2778 2777 def __len__(self):
2779 2778 for x in self._consumegen():
2780 2779 pass
2781 2780 return len(self._genlist)
2782 2781
2783 2782 def sort(self, reverse=False):
2784 2783 self._ascending = not reverse
2785 2784
2786 2785 def reverse(self):
2787 2786 self._ascending = not self._ascending
2788 2787
2789 2788 def isascending(self):
2790 2789 return self._ascending
2791 2790
2792 2791 def isdescending(self):
2793 2792 return not self._ascending
2794 2793
2795 2794 def first(self):
2796 2795 if self._ascending:
2797 2796 it = self.fastasc
2798 2797 else:
2799 2798 it = self.fastdesc
2800 2799 if it is None:
2801 2800 # we need to consume all and try again
2802 2801 for x in self._consumegen():
2803 2802 pass
2804 2803 return self.first()
2805 2804 if self:
2806 2805 return it.next()
2807 2806 return None
2808 2807
2809 2808 def last(self):
2810 2809 if self._ascending:
2811 2810 it = self.fastdesc
2812 2811 else:
2813 2812 it = self.fastasc
2814 2813 if it is None:
2815 2814 # we need to consume all and try again
2816 2815 for x in self._consumegen():
2817 2816 pass
2818 2817 return self.first()
2819 2818 if self:
2820 2819 return it.next()
2821 2820 return None
2822 2821
2823 2822 def spanset(repo, start=None, end=None):
2824 2823 """factory function to dispatch between fullreposet and actual spanset
2825 2824
2826 2825 Feel free to update all spanset call sites and kill this function at some
2827 2826 point.
2828 2827 """
2829 2828 if start is None and end is None:
2830 2829 return fullreposet(repo)
2831 2830 return _spanset(repo, start, end)
2832 2831
2833 2832
2834 2833 class _spanset(abstractsmartset):
2835 2834 """Duck type for baseset class which represents a range of revisions and
2836 2835 can work lazily and without having all the range in memory
2837 2836
2838 2837 Note that spanset(x, y) behave almost like xrange(x, y) except for two
2839 2838 notable points:
2840 2839 - when x < y it will be automatically descending,
2841 2840 - revision filtered with this repoview will be skipped.
2842 2841
2843 2842 """
2844 2843 def __init__(self, repo, start=0, end=None):
2845 2844 """
2846 2845 start: first revision included the set
2847 2846 (default to 0)
2848 2847 end: first revision excluded (last+1)
2849 2848 (default to len(repo)
2850 2849
2851 2850 Spanset will be descending if `end` < `start`.
2852 2851 """
2853 2852 if end is None:
2854 2853 end = len(repo)
2855 2854 self._ascending = start <= end
2856 2855 if not self._ascending:
2857 2856 start, end = end + 1, start +1
2858 2857 self._start = start
2859 2858 self._end = end
2860 2859 self._hiddenrevs = repo.changelog.filteredrevs
2861 2860
2862 2861 def sort(self, reverse=False):
2863 2862 self._ascending = not reverse
2864 2863
2865 2864 def reverse(self):
2866 2865 self._ascending = not self._ascending
2867 2866
2868 2867 def _iterfilter(self, iterrange):
2869 2868 s = self._hiddenrevs
2870 2869 for r in iterrange:
2871 2870 if r not in s:
2872 2871 yield r
2873 2872
2874 2873 def __iter__(self):
2875 2874 if self._ascending:
2876 2875 return self.fastasc()
2877 2876 else:
2878 2877 return self.fastdesc()
2879 2878
2880 2879 def fastasc(self):
2881 2880 iterrange = xrange(self._start, self._end)
2882 2881 if self._hiddenrevs:
2883 2882 return self._iterfilter(iterrange)
2884 2883 return iter(iterrange)
2885 2884
2886 2885 def fastdesc(self):
2887 2886 iterrange = xrange(self._end - 1, self._start - 1, -1)
2888 2887 if self._hiddenrevs:
2889 2888 return self._iterfilter(iterrange)
2890 2889 return iter(iterrange)
2891 2890
2892 2891 def __contains__(self, rev):
2893 2892 hidden = self._hiddenrevs
2894 2893 return ((self._start <= rev < self._end)
2895 2894 and not (hidden and rev in hidden))
2896 2895
2897 2896 def __nonzero__(self):
2898 2897 for r in self:
2899 2898 return True
2900 2899 return False
2901 2900
2902 2901 def __len__(self):
2903 2902 if not self._hiddenrevs:
2904 2903 return abs(self._end - self._start)
2905 2904 else:
2906 2905 count = 0
2907 2906 start = self._start
2908 2907 end = self._end
2909 2908 for rev in self._hiddenrevs:
2910 2909 if (end < rev <= start) or (start <= rev < end):
2911 2910 count += 1
2912 2911 return abs(self._end - self._start) - count
2913 2912
2914 2913 def isascending(self):
2915 2914 return self._start <= self._end
2916 2915
2917 2916 def isdescending(self):
2918 2917 return self._start >= self._end
2919 2918
2920 2919 def first(self):
2921 2920 if self._ascending:
2922 2921 it = self.fastasc
2923 2922 else:
2924 2923 it = self.fastdesc
2925 2924 for x in it():
2926 2925 return x
2927 2926 return None
2928 2927
2929 2928 def last(self):
2930 2929 if self._ascending:
2931 2930 it = self.fastdesc
2932 2931 else:
2933 2932 it = self.fastasc
2934 2933 for x in it():
2935 2934 return x
2936 2935 return None
2937 2936
2938 2937 class fullreposet(_spanset):
2939 2938 """a set containing all revisions in the repo
2940 2939
2941 2940 This class exists to host special optimization.
2942 2941 """
2943 2942
2944 2943 def __init__(self, repo):
2945 2944 super(fullreposet, self).__init__(repo)
2946 2945
2947 2946 def __and__(self, other):
2948 2947 """As self contains the whole repo, all of the other set should also be
2949 2948 in self. Therefore `self & other = other`.
2950 2949
2951 2950 This boldly assumes the other contains valid revs only.
2952 2951 """
2953 2952 # other not a smartset, make is so
2954 2953 if not util.safehasattr(other, 'isascending'):
2955 2954 # filter out hidden revision
2956 2955 # (this boldly assumes all smartset are pure)
2957 2956 #
2958 2957 # `other` was used with "&", let's assume this is a set like
2959 2958 # object.
2960 2959 other = baseset(other - self._hiddenrevs)
2961 2960
2962 2961 if self.isascending():
2963 2962 other.sort()
2964 2963 else:
2965 2964 other.sort(reverse)
2966 2965 return other
2967 2966
2968 2967 # tell hggettext to extract docstrings from these functions:
2969 2968 i18nfunctions = symbols.values()
General Comments 0
You need to be logged in to leave comments. Login now