##// END OF EJS Templates
revset: move 'only' so that functions are sorted alphabetically
Yuya Nishihara -
r23466:d5b1a452 default
parent child Browse files
Show More
@@ -1,2968 +1,2968 b''
1 1 # revset.py - revision set queries for mercurial
2 2 #
3 3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 import re
9 9 import parser, util, error, discovery, hbisect, phases
10 10 import node
11 11 import heapq
12 12 import match as matchmod
13 13 from i18n import _
14 14 import encoding
15 15 import obsolete as obsmod
16 16 import pathutil
17 17 import repoview
18 18
19 19 def _revancestors(repo, revs, followfirst):
20 20 """Like revlog.ancestors(), but supports followfirst."""
21 21 cut = followfirst and 1 or None
22 22 cl = repo.changelog
23 23
24 24 def iterate():
25 25 revqueue, revsnode = None, None
26 26 h = []
27 27
28 28 revs.sort(reverse=True)
29 29 revqueue = util.deque(revs)
30 30 if revqueue:
31 31 revsnode = revqueue.popleft()
32 32 heapq.heappush(h, -revsnode)
33 33
34 34 seen = set([node.nullrev])
35 35 while h:
36 36 current = -heapq.heappop(h)
37 37 if current not in seen:
38 38 if revsnode and current == revsnode:
39 39 if revqueue:
40 40 revsnode = revqueue.popleft()
41 41 heapq.heappush(h, -revsnode)
42 42 seen.add(current)
43 43 yield current
44 44 for parent in cl.parentrevs(current)[:cut]:
45 45 if parent != node.nullrev:
46 46 heapq.heappush(h, -parent)
47 47
48 48 return generatorset(iterate(), iterasc=False)
49 49
50 50 def _revdescendants(repo, revs, followfirst):
51 51 """Like revlog.descendants() but supports followfirst."""
52 52 cut = followfirst and 1 or None
53 53
54 54 def iterate():
55 55 cl = repo.changelog
56 56 first = min(revs)
57 57 nullrev = node.nullrev
58 58 if first == nullrev:
59 59 # Are there nodes with a null first parent and a non-null
60 60 # second one? Maybe. Do we care? Probably not.
61 61 for i in cl:
62 62 yield i
63 63 else:
64 64 seen = set(revs)
65 65 for i in cl.revs(first + 1):
66 66 for x in cl.parentrevs(i)[:cut]:
67 67 if x != nullrev and x in seen:
68 68 seen.add(i)
69 69 yield i
70 70 break
71 71
72 72 return generatorset(iterate(), iterasc=True)
73 73
74 74 def _revsbetween(repo, roots, heads):
75 75 """Return all paths between roots and heads, inclusive of both endpoint
76 76 sets."""
77 77 if not roots:
78 78 return baseset()
79 79 parentrevs = repo.changelog.parentrevs
80 80 visit = list(heads)
81 81 reachable = set()
82 82 seen = {}
83 83 minroot = min(roots)
84 84 roots = set(roots)
85 85 # open-code the post-order traversal due to the tiny size of
86 86 # sys.getrecursionlimit()
87 87 while visit:
88 88 rev = visit.pop()
89 89 if rev in roots:
90 90 reachable.add(rev)
91 91 parents = parentrevs(rev)
92 92 seen[rev] = parents
93 93 for parent in parents:
94 94 if parent >= minroot and parent not in seen:
95 95 visit.append(parent)
96 96 if not reachable:
97 97 return baseset()
98 98 for rev in sorted(seen):
99 99 for parent in seen[rev]:
100 100 if parent in reachable:
101 101 reachable.add(rev)
102 102 return baseset(sorted(reachable))
103 103
104 104 elements = {
105 105 "(": (20, ("group", 1, ")"), ("func", 1, ")")),
106 106 "~": (18, None, ("ancestor", 18)),
107 107 "^": (18, None, ("parent", 18), ("parentpost", 18)),
108 108 "-": (5, ("negate", 19), ("minus", 5)),
109 109 "::": (17, ("dagrangepre", 17), ("dagrange", 17),
110 110 ("dagrangepost", 17)),
111 111 "..": (17, ("dagrangepre", 17), ("dagrange", 17),
112 112 ("dagrangepost", 17)),
113 113 ":": (15, ("rangepre", 15), ("range", 15), ("rangepost", 15)),
114 114 "not": (10, ("not", 10)),
115 115 "!": (10, ("not", 10)),
116 116 "and": (5, None, ("and", 5)),
117 117 "&": (5, None, ("and", 5)),
118 118 "or": (4, None, ("or", 4)),
119 119 "|": (4, None, ("or", 4)),
120 120 "+": (4, None, ("or", 4)),
121 121 ",": (2, None, ("list", 2)),
122 122 ")": (0, None, None),
123 123 "symbol": (0, ("symbol",), None),
124 124 "string": (0, ("string",), None),
125 125 "end": (0, None, None),
126 126 }
127 127
128 128 keywords = set(['and', 'or', 'not'])
129 129
130 130 def tokenize(program, lookup=None):
131 131 '''
132 132 Parse a revset statement into a stream of tokens
133 133
134 134 Check that @ is a valid unquoted token character (issue3686):
135 135 >>> list(tokenize("@::"))
136 136 [('symbol', '@', 0), ('::', None, 1), ('end', None, 3)]
137 137
138 138 '''
139 139
140 140 pos, l = 0, len(program)
141 141 while pos < l:
142 142 c = program[pos]
143 143 if c.isspace(): # skip inter-token whitespace
144 144 pass
145 145 elif c == ':' and program[pos:pos + 2] == '::': # look ahead carefully
146 146 yield ('::', None, pos)
147 147 pos += 1 # skip ahead
148 148 elif c == '.' and program[pos:pos + 2] == '..': # look ahead carefully
149 149 yield ('..', None, pos)
150 150 pos += 1 # skip ahead
151 151 elif c in "():,-|&+!~^": # handle simple operators
152 152 yield (c, None, pos)
153 153 elif (c in '"\'' or c == 'r' and
154 154 program[pos:pos + 2] in ("r'", 'r"')): # handle quoted strings
155 155 if c == 'r':
156 156 pos += 1
157 157 c = program[pos]
158 158 decode = lambda x: x
159 159 else:
160 160 decode = lambda x: x.decode('string-escape')
161 161 pos += 1
162 162 s = pos
163 163 while pos < l: # find closing quote
164 164 d = program[pos]
165 165 if d == '\\': # skip over escaped characters
166 166 pos += 2
167 167 continue
168 168 if d == c:
169 169 yield ('string', decode(program[s:pos]), s)
170 170 break
171 171 pos += 1
172 172 else:
173 173 raise error.ParseError(_("unterminated string"), s)
174 174 # gather up a symbol/keyword
175 175 elif c.isalnum() or c in '._@' or ord(c) > 127:
176 176 s = pos
177 177 pos += 1
178 178 while pos < l: # find end of symbol
179 179 d = program[pos]
180 180 if not (d.isalnum() or d in "-._/@" or ord(d) > 127):
181 181 break
182 182 if d == '.' and program[pos - 1] == '.': # special case for ..
183 183 pos -= 1
184 184 break
185 185 pos += 1
186 186 sym = program[s:pos]
187 187 if sym in keywords: # operator keywords
188 188 yield (sym, None, s)
189 189 elif '-' in sym:
190 190 # some jerk gave us foo-bar-baz, try to check if it's a symbol
191 191 if lookup and lookup(sym):
192 192 # looks like a real symbol
193 193 yield ('symbol', sym, s)
194 194 else:
195 195 # looks like an expression
196 196 parts = sym.split('-')
197 197 for p in parts[:-1]:
198 198 if p: # possible consecutive -
199 199 yield ('symbol', p, s)
200 200 s += len(p)
201 201 yield ('-', None, pos)
202 202 s += 1
203 203 if parts[-1]: # possible trailing -
204 204 yield ('symbol', parts[-1], s)
205 205 else:
206 206 yield ('symbol', sym, s)
207 207 pos -= 1
208 208 else:
209 209 raise error.ParseError(_("syntax error"), pos)
210 210 pos += 1
211 211 yield ('end', None, pos)
212 212
213 213 # helpers
214 214
215 215 def getstring(x, err):
216 216 if x and (x[0] == 'string' or x[0] == 'symbol'):
217 217 return x[1]
218 218 raise error.ParseError(err)
219 219
220 220 def getlist(x):
221 221 if not x:
222 222 return []
223 223 if x[0] == 'list':
224 224 return getlist(x[1]) + [x[2]]
225 225 return [x]
226 226
227 227 def getargs(x, min, max, err):
228 228 l = getlist(x)
229 229 if len(l) < min or (max >= 0 and len(l) > max):
230 230 raise error.ParseError(err)
231 231 return l
232 232
233 233 def getset(repo, subset, x):
234 234 if not x:
235 235 raise error.ParseError(_("missing argument"))
236 236 s = methods[x[0]](repo, subset, *x[1:])
237 237 if util.safehasattr(s, 'isascending'):
238 238 return s
239 239 return baseset(s)
240 240
241 241 def _getrevsource(repo, r):
242 242 extra = repo[r].extra()
243 243 for label in ('source', 'transplant_source', 'rebase_source'):
244 244 if label in extra:
245 245 try:
246 246 return repo[extra[label]].rev()
247 247 except error.RepoLookupError:
248 248 pass
249 249 return None
250 250
251 251 # operator methods
252 252
253 253 def stringset(repo, subset, x):
254 254 x = repo[x].rev()
255 255 if x == -1 and len(subset) == len(repo):
256 256 return baseset([-1])
257 257 if len(subset) == len(repo) or x in subset:
258 258 return baseset([x])
259 259 return baseset()
260 260
261 261 def symbolset(repo, subset, x):
262 262 if x in symbols:
263 263 raise error.ParseError(_("can't use %s here") % x)
264 264 return stringset(repo, subset, x)
265 265
266 266 def rangeset(repo, subset, x, y):
267 267 m = getset(repo, fullreposet(repo), x)
268 268 n = getset(repo, fullreposet(repo), y)
269 269
270 270 if not m or not n:
271 271 return baseset()
272 272 m, n = m.first(), n.last()
273 273
274 274 if m < n:
275 275 r = spanset(repo, m, n + 1)
276 276 else:
277 277 r = spanset(repo, m, n - 1)
278 278 return r & subset
279 279
280 280 def dagrange(repo, subset, x, y):
281 281 r = spanset(repo)
282 282 xs = _revsbetween(repo, getset(repo, r, x), getset(repo, r, y))
283 283 return xs & subset
284 284
285 285 def andset(repo, subset, x, y):
286 286 return getset(repo, getset(repo, subset, x), y)
287 287
288 288 def orset(repo, subset, x, y):
289 289 xl = getset(repo, subset, x)
290 290 yl = getset(repo, subset - xl, y)
291 291 return xl + yl
292 292
293 293 def notset(repo, subset, x):
294 294 return subset - getset(repo, subset, x)
295 295
296 296 def listset(repo, subset, a, b):
297 297 raise error.ParseError(_("can't use a list in this context"))
298 298
299 299 def func(repo, subset, a, b):
300 300 if a[0] == 'symbol' and a[1] in symbols:
301 301 return symbols[a[1]](repo, subset, b)
302 302 raise error.ParseError(_("not a function: %s") % a[1])
303 303
304 304 # functions
305 305
306 306 def adds(repo, subset, x):
307 307 """``adds(pattern)``
308 308 Changesets that add a file matching pattern.
309 309
310 310 The pattern without explicit kind like ``glob:`` is expected to be
311 311 relative to the current directory and match against a file or a
312 312 directory.
313 313 """
314 314 # i18n: "adds" is a keyword
315 315 pat = getstring(x, _("adds requires a pattern"))
316 316 return checkstatus(repo, subset, pat, 1)
317 317
318 318 def ancestor(repo, subset, x):
319 319 """``ancestor(*changeset)``
320 320 A greatest common ancestor of the changesets.
321 321
322 322 Accepts 0 or more changesets.
323 323 Will return empty list when passed no args.
324 324 Greatest common ancestor of a single changeset is that changeset.
325 325 """
326 326 # i18n: "ancestor" is a keyword
327 327 l = getlist(x)
328 328 rl = spanset(repo)
329 329 anc = None
330 330
331 331 # (getset(repo, rl, i) for i in l) generates a list of lists
332 332 for revs in (getset(repo, rl, i) for i in l):
333 333 for r in revs:
334 334 if anc is None:
335 335 anc = repo[r]
336 336 else:
337 337 anc = anc.ancestor(repo[r])
338 338
339 339 if anc is not None and anc.rev() in subset:
340 340 return baseset([anc.rev()])
341 341 return baseset()
342 342
343 343 def _ancestors(repo, subset, x, followfirst=False):
344 344 heads = getset(repo, spanset(repo), x)
345 345 if not heads:
346 346 return baseset()
347 347 s = _revancestors(repo, heads, followfirst)
348 348 return subset & s
349 349
350 350 def ancestors(repo, subset, x):
351 351 """``ancestors(set)``
352 352 Changesets that are ancestors of a changeset in set.
353 353 """
354 354 return _ancestors(repo, subset, x)
355 355
356 356 def _firstancestors(repo, subset, x):
357 357 # ``_firstancestors(set)``
358 358 # Like ``ancestors(set)`` but follows only the first parents.
359 359 return _ancestors(repo, subset, x, followfirst=True)
360 360
361 361 def ancestorspec(repo, subset, x, n):
362 362 """``set~n``
363 363 Changesets that are the Nth ancestor (first parents only) of a changeset
364 364 in set.
365 365 """
366 366 try:
367 367 n = int(n[1])
368 368 except (TypeError, ValueError):
369 369 raise error.ParseError(_("~ expects a number"))
370 370 ps = set()
371 371 cl = repo.changelog
372 372 for r in getset(repo, fullreposet(repo), x):
373 373 for i in range(n):
374 374 r = cl.parentrevs(r)[0]
375 375 ps.add(r)
376 376 return subset & ps
377 377
378 378 def author(repo, subset, x):
379 379 """``author(string)``
380 380 Alias for ``user(string)``.
381 381 """
382 382 # i18n: "author" is a keyword
383 383 n = encoding.lower(getstring(x, _("author requires a string")))
384 384 kind, pattern, matcher = _substringmatcher(n)
385 385 return subset.filter(lambda x: matcher(encoding.lower(repo[x].user())))
386 386
387 def only(repo, subset, x):
388 """``only(set, [set])``
389 Changesets that are ancestors of the first set that are not ancestors
390 of any other head in the repo. If a second set is specified, the result
391 is ancestors of the first set that are not ancestors of the second set
392 (i.e. ::<set1> - ::<set2>).
393 """
394 cl = repo.changelog
395 # i18n: "only" is a keyword
396 args = getargs(x, 1, 2, _('only takes one or two arguments'))
397 include = getset(repo, spanset(repo), args[0])
398 if len(args) == 1:
399 if not include:
400 return baseset()
401
402 descendants = set(_revdescendants(repo, include, False))
403 exclude = [rev for rev in cl.headrevs()
404 if not rev in descendants and not rev in include]
405 else:
406 exclude = getset(repo, spanset(repo), args[1])
407
408 results = set(cl.findmissingrevs(common=exclude, heads=include))
409 return subset & results
410
411 387 def bisect(repo, subset, x):
412 388 """``bisect(string)``
413 389 Changesets marked in the specified bisect status:
414 390
415 391 - ``good``, ``bad``, ``skip``: csets explicitly marked as good/bad/skip
416 392 - ``goods``, ``bads`` : csets topologically good/bad
417 393 - ``range`` : csets taking part in the bisection
418 394 - ``pruned`` : csets that are goods, bads or skipped
419 395 - ``untested`` : csets whose fate is yet unknown
420 396 - ``ignored`` : csets ignored due to DAG topology
421 397 - ``current`` : the cset currently being bisected
422 398 """
423 399 # i18n: "bisect" is a keyword
424 400 status = getstring(x, _("bisect requires a string")).lower()
425 401 state = set(hbisect.get(repo, status))
426 402 return subset & state
427 403
428 404 # Backward-compatibility
429 405 # - no help entry so that we do not advertise it any more
430 406 def bisected(repo, subset, x):
431 407 return bisect(repo, subset, x)
432 408
433 409 def bookmark(repo, subset, x):
434 410 """``bookmark([name])``
435 411 The named bookmark or all bookmarks.
436 412
437 413 If `name` starts with `re:`, the remainder of the name is treated as
438 414 a regular expression. To match a bookmark that actually starts with `re:`,
439 415 use the prefix `literal:`.
440 416 """
441 417 # i18n: "bookmark" is a keyword
442 418 args = getargs(x, 0, 1, _('bookmark takes one or no arguments'))
443 419 if args:
444 420 bm = getstring(args[0],
445 421 # i18n: "bookmark" is a keyword
446 422 _('the argument to bookmark must be a string'))
447 423 kind, pattern, matcher = _stringmatcher(bm)
448 424 bms = set()
449 425 if kind == 'literal':
450 426 bmrev = repo._bookmarks.get(pattern, None)
451 427 if not bmrev:
452 428 raise util.Abort(_("bookmark '%s' does not exist") % bm)
453 429 bms.add(repo[bmrev].rev())
454 430 else:
455 431 matchrevs = set()
456 432 for name, bmrev in repo._bookmarks.iteritems():
457 433 if matcher(name):
458 434 matchrevs.add(bmrev)
459 435 if not matchrevs:
460 436 raise util.Abort(_("no bookmarks exist that match '%s'")
461 437 % pattern)
462 438 for bmrev in matchrevs:
463 439 bms.add(repo[bmrev].rev())
464 440 else:
465 441 bms = set([repo[r].rev()
466 442 for r in repo._bookmarks.values()])
467 443 bms -= set([node.nullrev])
468 444 return subset & bms
469 445
470 446 def branch(repo, subset, x):
471 447 """``branch(string or set)``
472 448 All changesets belonging to the given branch or the branches of the given
473 449 changesets.
474 450
475 451 If `string` starts with `re:`, the remainder of the name is treated as
476 452 a regular expression. To match a branch that actually starts with `re:`,
477 453 use the prefix `literal:`.
478 454 """
479 455 try:
480 456 b = getstring(x, '')
481 457 except error.ParseError:
482 458 # not a string, but another revspec, e.g. tip()
483 459 pass
484 460 else:
485 461 kind, pattern, matcher = _stringmatcher(b)
486 462 if kind == 'literal':
487 463 # note: falls through to the revspec case if no branch with
488 464 # this name exists
489 465 if pattern in repo.branchmap():
490 466 return subset.filter(lambda r: matcher(repo[r].branch()))
491 467 else:
492 468 return subset.filter(lambda r: matcher(repo[r].branch()))
493 469
494 470 s = getset(repo, spanset(repo), x)
495 471 b = set()
496 472 for r in s:
497 473 b.add(repo[r].branch())
498 474 c = s.__contains__
499 475 return subset.filter(lambda r: c(r) or repo[r].branch() in b)
500 476
501 477 def bumped(repo, subset, x):
502 478 """``bumped()``
503 479 Mutable changesets marked as successors of public changesets.
504 480
505 481 Only non-public and non-obsolete changesets can be `bumped`.
506 482 """
507 483 # i18n: "bumped" is a keyword
508 484 getargs(x, 0, 0, _("bumped takes no arguments"))
509 485 bumped = obsmod.getrevs(repo, 'bumped')
510 486 return subset & bumped
511 487
512 488 def bundle(repo, subset, x):
513 489 """``bundle()``
514 490 Changesets in the bundle.
515 491
516 492 Bundle must be specified by the -R option."""
517 493
518 494 try:
519 495 bundlerevs = repo.changelog.bundlerevs
520 496 except AttributeError:
521 497 raise util.Abort(_("no bundle provided - specify with -R"))
522 498 return subset & bundlerevs
523 499
524 500 def checkstatus(repo, subset, pat, field):
525 501 hasset = matchmod.patkind(pat) == 'set'
526 502
527 503 mcache = [None]
528 504 def matches(x):
529 505 c = repo[x]
530 506 if not mcache[0] or hasset:
531 507 mcache[0] = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
532 508 m = mcache[0]
533 509 fname = None
534 510 if not m.anypats() and len(m.files()) == 1:
535 511 fname = m.files()[0]
536 512 if fname is not None:
537 513 if fname not in c.files():
538 514 return False
539 515 else:
540 516 for f in c.files():
541 517 if m(f):
542 518 break
543 519 else:
544 520 return False
545 521 files = repo.status(c.p1().node(), c.node())[field]
546 522 if fname is not None:
547 523 if fname in files:
548 524 return True
549 525 else:
550 526 for f in files:
551 527 if m(f):
552 528 return True
553 529
554 530 return subset.filter(matches)
555 531
556 532 def _children(repo, narrow, parentset):
557 533 cs = set()
558 534 if not parentset:
559 535 return baseset(cs)
560 536 pr = repo.changelog.parentrevs
561 537 minrev = min(parentset)
562 538 for r in narrow:
563 539 if r <= minrev:
564 540 continue
565 541 for p in pr(r):
566 542 if p in parentset:
567 543 cs.add(r)
568 544 return baseset(cs)
569 545
570 546 def children(repo, subset, x):
571 547 """``children(set)``
572 548 Child changesets of changesets in set.
573 549 """
574 550 s = getset(repo, fullreposet(repo), x)
575 551 cs = _children(repo, subset, s)
576 552 return subset & cs
577 553
578 554 def closed(repo, subset, x):
579 555 """``closed()``
580 556 Changeset is closed.
581 557 """
582 558 # i18n: "closed" is a keyword
583 559 getargs(x, 0, 0, _("closed takes no arguments"))
584 560 return subset.filter(lambda r: repo[r].closesbranch())
585 561
586 562 def contains(repo, subset, x):
587 563 """``contains(pattern)``
588 564 The revision's manifest contains a file matching pattern (but might not
589 565 modify it). See :hg:`help patterns` for information about file patterns.
590 566
591 567 The pattern without explicit kind like ``glob:`` is expected to be
592 568 relative to the current directory and match against a file exactly
593 569 for efficiency.
594 570 """
595 571 # i18n: "contains" is a keyword
596 572 pat = getstring(x, _("contains requires a pattern"))
597 573
598 574 def matches(x):
599 575 if not matchmod.patkind(pat):
600 576 pats = pathutil.canonpath(repo.root, repo.getcwd(), pat)
601 577 if pats in repo[x]:
602 578 return True
603 579 else:
604 580 c = repo[x]
605 581 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
606 582 for f in c.manifest():
607 583 if m(f):
608 584 return True
609 585 return False
610 586
611 587 return subset.filter(matches)
612 588
613 589 def converted(repo, subset, x):
614 590 """``converted([id])``
615 591 Changesets converted from the given identifier in the old repository if
616 592 present, or all converted changesets if no identifier is specified.
617 593 """
618 594
619 595 # There is exactly no chance of resolving the revision, so do a simple
620 596 # string compare and hope for the best
621 597
622 598 rev = None
623 599 # i18n: "converted" is a keyword
624 600 l = getargs(x, 0, 1, _('converted takes one or no arguments'))
625 601 if l:
626 602 # i18n: "converted" is a keyword
627 603 rev = getstring(l[0], _('converted requires a revision'))
628 604
629 605 def _matchvalue(r):
630 606 source = repo[r].extra().get('convert_revision', None)
631 607 return source is not None and (rev is None or source.startswith(rev))
632 608
633 609 return subset.filter(lambda r: _matchvalue(r))
634 610
635 611 def date(repo, subset, x):
636 612 """``date(interval)``
637 613 Changesets within the interval, see :hg:`help dates`.
638 614 """
639 615 # i18n: "date" is a keyword
640 616 ds = getstring(x, _("date requires a string"))
641 617 dm = util.matchdate(ds)
642 618 return subset.filter(lambda x: dm(repo[x].date()[0]))
643 619
644 620 def desc(repo, subset, x):
645 621 """``desc(string)``
646 622 Search commit message for string. The match is case-insensitive.
647 623 """
648 624 # i18n: "desc" is a keyword
649 625 ds = encoding.lower(getstring(x, _("desc requires a string")))
650 626
651 627 def matches(x):
652 628 c = repo[x]
653 629 return ds in encoding.lower(c.description())
654 630
655 631 return subset.filter(matches)
656 632
657 633 def _descendants(repo, subset, x, followfirst=False):
658 634 roots = getset(repo, spanset(repo), x)
659 635 if not roots:
660 636 return baseset()
661 637 s = _revdescendants(repo, roots, followfirst)
662 638
663 639 # Both sets need to be ascending in order to lazily return the union
664 640 # in the correct order.
665 641 base = subset & roots
666 642 desc = subset & s
667 643 result = base + desc
668 644 if subset.isascending():
669 645 result.sort()
670 646 elif subset.isdescending():
671 647 result.sort(reverse=True)
672 648 else:
673 649 result = subset & result
674 650 return result
675 651
676 652 def descendants(repo, subset, x):
677 653 """``descendants(set)``
678 654 Changesets which are descendants of changesets in set.
679 655 """
680 656 return _descendants(repo, subset, x)
681 657
682 658 def _firstdescendants(repo, subset, x):
683 659 # ``_firstdescendants(set)``
684 660 # Like ``descendants(set)`` but follows only the first parents.
685 661 return _descendants(repo, subset, x, followfirst=True)
686 662
687 663 def destination(repo, subset, x):
688 664 """``destination([set])``
689 665 Changesets that were created by a graft, transplant or rebase operation,
690 666 with the given revisions specified as the source. Omitting the optional set
691 667 is the same as passing all().
692 668 """
693 669 if x is not None:
694 670 sources = getset(repo, spanset(repo), x)
695 671 else:
696 672 sources = getall(repo, spanset(repo), x)
697 673
698 674 dests = set()
699 675
700 676 # subset contains all of the possible destinations that can be returned, so
701 677 # iterate over them and see if their source(s) were provided in the arg set.
702 678 # Even if the immediate src of r is not in the arg set, src's source (or
703 679 # further back) may be. Scanning back further than the immediate src allows
704 680 # transitive transplants and rebases to yield the same results as transitive
705 681 # grafts.
706 682 for r in subset:
707 683 src = _getrevsource(repo, r)
708 684 lineage = None
709 685
710 686 while src is not None:
711 687 if lineage is None:
712 688 lineage = list()
713 689
714 690 lineage.append(r)
715 691
716 692 # The visited lineage is a match if the current source is in the arg
717 693 # set. Since every candidate dest is visited by way of iterating
718 694 # subset, any dests further back in the lineage will be tested by a
719 695 # different iteration over subset. Likewise, if the src was already
720 696 # selected, the current lineage can be selected without going back
721 697 # further.
722 698 if src in sources or src in dests:
723 699 dests.update(lineage)
724 700 break
725 701
726 702 r = src
727 703 src = _getrevsource(repo, r)
728 704
729 705 return subset.filter(dests.__contains__)
730 706
731 707 def divergent(repo, subset, x):
732 708 """``divergent()``
733 709 Final successors of changesets with an alternative set of final successors.
734 710 """
735 711 # i18n: "divergent" is a keyword
736 712 getargs(x, 0, 0, _("divergent takes no arguments"))
737 713 divergent = obsmod.getrevs(repo, 'divergent')
738 714 return subset & divergent
739 715
740 716 def draft(repo, subset, x):
741 717 """``draft()``
742 718 Changeset in draft phase."""
743 719 # i18n: "draft" is a keyword
744 720 getargs(x, 0, 0, _("draft takes no arguments"))
745 721 phase = repo._phasecache.phase
746 722 target = phases.draft
747 723 condition = lambda r: phase(repo, r) == target
748 724 return subset.filter(condition, cache=False)
749 725
750 726 def extinct(repo, subset, x):
751 727 """``extinct()``
752 728 Obsolete changesets with obsolete descendants only.
753 729 """
754 730 # i18n: "extinct" is a keyword
755 731 getargs(x, 0, 0, _("extinct takes no arguments"))
756 732 extincts = obsmod.getrevs(repo, 'extinct')
757 733 return subset & extincts
758 734
759 735 def extra(repo, subset, x):
760 736 """``extra(label, [value])``
761 737 Changesets with the given label in the extra metadata, with the given
762 738 optional value.
763 739
764 740 If `value` starts with `re:`, the remainder of the value is treated as
765 741 a regular expression. To match a value that actually starts with `re:`,
766 742 use the prefix `literal:`.
767 743 """
768 744
769 745 # i18n: "extra" is a keyword
770 746 l = getargs(x, 1, 2, _('extra takes at least 1 and at most 2 arguments'))
771 747 # i18n: "extra" is a keyword
772 748 label = getstring(l[0], _('first argument to extra must be a string'))
773 749 value = None
774 750
775 751 if len(l) > 1:
776 752 # i18n: "extra" is a keyword
777 753 value = getstring(l[1], _('second argument to extra must be a string'))
778 754 kind, value, matcher = _stringmatcher(value)
779 755
780 756 def _matchvalue(r):
781 757 extra = repo[r].extra()
782 758 return label in extra and (value is None or matcher(extra[label]))
783 759
784 760 return subset.filter(lambda r: _matchvalue(r))
785 761
786 762 def filelog(repo, subset, x):
787 763 """``filelog(pattern)``
788 764 Changesets connected to the specified filelog.
789 765
790 766 For performance reasons, visits only revisions mentioned in the file-level
791 767 filelog, rather than filtering through all changesets (much faster, but
792 768 doesn't include deletes or duplicate changes). For a slower, more accurate
793 769 result, use ``file()``.
794 770
795 771 The pattern without explicit kind like ``glob:`` is expected to be
796 772 relative to the current directory and match against a file exactly
797 773 for efficiency.
798 774 """
799 775
800 776 # i18n: "filelog" is a keyword
801 777 pat = getstring(x, _("filelog requires a pattern"))
802 778 s = set()
803 779
804 780 if not matchmod.patkind(pat):
805 781 f = pathutil.canonpath(repo.root, repo.getcwd(), pat)
806 782 fl = repo.file(f)
807 783 for fr in fl:
808 784 s.add(fl.linkrev(fr))
809 785 else:
810 786 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=repo[None])
811 787 for f in repo[None]:
812 788 if m(f):
813 789 fl = repo.file(f)
814 790 for fr in fl:
815 791 s.add(fl.linkrev(fr))
816 792
817 793 return subset & s
818 794
819 795 def first(repo, subset, x):
820 796 """``first(set, [n])``
821 797 An alias for limit().
822 798 """
823 799 return limit(repo, subset, x)
824 800
825 801 def _follow(repo, subset, x, name, followfirst=False):
826 802 l = getargs(x, 0, 1, _("%s takes no arguments or a filename") % name)
827 803 c = repo['.']
828 804 if l:
829 805 x = getstring(l[0], _("%s expected a filename") % name)
830 806 if x in c:
831 807 cx = c[x]
832 808 s = set(ctx.rev() for ctx in cx.ancestors(followfirst=followfirst))
833 809 # include the revision responsible for the most recent version
834 810 s.add(cx.linkrev())
835 811 else:
836 812 return baseset()
837 813 else:
838 814 s = _revancestors(repo, baseset([c.rev()]), followfirst)
839 815
840 816 return subset & s
841 817
842 818 def follow(repo, subset, x):
843 819 """``follow([file])``
844 820 An alias for ``::.`` (ancestors of the working copy's first parent).
845 821 If a filename is specified, the history of the given file is followed,
846 822 including copies.
847 823 """
848 824 return _follow(repo, subset, x, 'follow')
849 825
850 826 def _followfirst(repo, subset, x):
851 827 # ``followfirst([file])``
852 828 # Like ``follow([file])`` but follows only the first parent of
853 829 # every revision or file revision.
854 830 return _follow(repo, subset, x, '_followfirst', followfirst=True)
855 831
856 832 def getall(repo, subset, x):
857 833 """``all()``
858 834 All changesets, the same as ``0:tip``.
859 835 """
860 836 # i18n: "all" is a keyword
861 837 getargs(x, 0, 0, _("all takes no arguments"))
862 838 return subset
863 839
864 840 def grep(repo, subset, x):
865 841 """``grep(regex)``
866 842 Like ``keyword(string)`` but accepts a regex. Use ``grep(r'...')``
867 843 to ensure special escape characters are handled correctly. Unlike
868 844 ``keyword(string)``, the match is case-sensitive.
869 845 """
870 846 try:
871 847 # i18n: "grep" is a keyword
872 848 gr = re.compile(getstring(x, _("grep requires a string")))
873 849 except re.error, e:
874 850 raise error.ParseError(_('invalid match pattern: %s') % e)
875 851
876 852 def matches(x):
877 853 c = repo[x]
878 854 for e in c.files() + [c.user(), c.description()]:
879 855 if gr.search(e):
880 856 return True
881 857 return False
882 858
883 859 return subset.filter(matches)
884 860
885 861 def _matchfiles(repo, subset, x):
886 862 # _matchfiles takes a revset list of prefixed arguments:
887 863 #
888 864 # [p:foo, i:bar, x:baz]
889 865 #
890 866 # builds a match object from them and filters subset. Allowed
891 867 # prefixes are 'p:' for regular patterns, 'i:' for include
892 868 # patterns and 'x:' for exclude patterns. Use 'r:' prefix to pass
893 869 # a revision identifier, or the empty string to reference the
894 870 # working directory, from which the match object is
895 871 # initialized. Use 'd:' to set the default matching mode, default
896 872 # to 'glob'. At most one 'r:' and 'd:' argument can be passed.
897 873
898 874 # i18n: "_matchfiles" is a keyword
899 875 l = getargs(x, 1, -1, _("_matchfiles requires at least one argument"))
900 876 pats, inc, exc = [], [], []
901 877 rev, default = None, None
902 878 for arg in l:
903 879 # i18n: "_matchfiles" is a keyword
904 880 s = getstring(arg, _("_matchfiles requires string arguments"))
905 881 prefix, value = s[:2], s[2:]
906 882 if prefix == 'p:':
907 883 pats.append(value)
908 884 elif prefix == 'i:':
909 885 inc.append(value)
910 886 elif prefix == 'x:':
911 887 exc.append(value)
912 888 elif prefix == 'r:':
913 889 if rev is not None:
914 890 # i18n: "_matchfiles" is a keyword
915 891 raise error.ParseError(_('_matchfiles expected at most one '
916 892 'revision'))
917 893 rev = value
918 894 elif prefix == 'd:':
919 895 if default is not None:
920 896 # i18n: "_matchfiles" is a keyword
921 897 raise error.ParseError(_('_matchfiles expected at most one '
922 898 'default mode'))
923 899 default = value
924 900 else:
925 901 # i18n: "_matchfiles" is a keyword
926 902 raise error.ParseError(_('invalid _matchfiles prefix: %s') % prefix)
927 903 if not default:
928 904 default = 'glob'
929 905
930 906 m = matchmod.match(repo.root, repo.getcwd(), pats, include=inc,
931 907 exclude=exc, ctx=repo[rev], default=default)
932 908
933 909 def matches(x):
934 910 for f in repo[x].files():
935 911 if m(f):
936 912 return True
937 913 return False
938 914
939 915 return subset.filter(matches)
940 916
941 917 def hasfile(repo, subset, x):
942 918 """``file(pattern)``
943 919 Changesets affecting files matched by pattern.
944 920
945 921 For a faster but less accurate result, consider using ``filelog()``
946 922 instead.
947 923
948 924 This predicate uses ``glob:`` as the default kind of pattern.
949 925 """
950 926 # i18n: "file" is a keyword
951 927 pat = getstring(x, _("file requires a pattern"))
952 928 return _matchfiles(repo, subset, ('string', 'p:' + pat))
953 929
954 930 def head(repo, subset, x):
955 931 """``head()``
956 932 Changeset is a named branch head.
957 933 """
958 934 # i18n: "head" is a keyword
959 935 getargs(x, 0, 0, _("head takes no arguments"))
960 936 hs = set()
961 937 for b, ls in repo.branchmap().iteritems():
962 938 hs.update(repo[h].rev() for h in ls)
963 939 return baseset(hs).filter(subset.__contains__)
964 940
965 941 def heads(repo, subset, x):
966 942 """``heads(set)``
967 943 Members of set with no children in set.
968 944 """
969 945 s = getset(repo, subset, x)
970 946 ps = parents(repo, subset, x)
971 947 return s - ps
972 948
973 949 def hidden(repo, subset, x):
974 950 """``hidden()``
975 951 Hidden changesets.
976 952 """
977 953 # i18n: "hidden" is a keyword
978 954 getargs(x, 0, 0, _("hidden takes no arguments"))
979 955 hiddenrevs = repoview.filterrevs(repo, 'visible')
980 956 return subset & hiddenrevs
981 957
982 958 def keyword(repo, subset, x):
983 959 """``keyword(string)``
984 960 Search commit message, user name, and names of changed files for
985 961 string. The match is case-insensitive.
986 962 """
987 963 # i18n: "keyword" is a keyword
988 964 kw = encoding.lower(getstring(x, _("keyword requires a string")))
989 965
990 966 def matches(r):
991 967 c = repo[r]
992 968 return util.any(kw in encoding.lower(t) for t in c.files() + [c.user(),
993 969 c.description()])
994 970
995 971 return subset.filter(matches)
996 972
997 973 def limit(repo, subset, x):
998 974 """``limit(set, [n])``
999 975 First n members of set, defaulting to 1.
1000 976 """
1001 977 # i18n: "limit" is a keyword
1002 978 l = getargs(x, 1, 2, _("limit requires one or two arguments"))
1003 979 try:
1004 980 lim = 1
1005 981 if len(l) == 2:
1006 982 # i18n: "limit" is a keyword
1007 983 lim = int(getstring(l[1], _("limit requires a number")))
1008 984 except (TypeError, ValueError):
1009 985 # i18n: "limit" is a keyword
1010 986 raise error.ParseError(_("limit expects a number"))
1011 987 ss = subset
1012 988 os = getset(repo, spanset(repo), l[0])
1013 989 result = []
1014 990 it = iter(os)
1015 991 for x in xrange(lim):
1016 992 try:
1017 993 y = it.next()
1018 994 if y in ss:
1019 995 result.append(y)
1020 996 except (StopIteration):
1021 997 break
1022 998 return baseset(result)
1023 999
1024 1000 def last(repo, subset, x):
1025 1001 """``last(set, [n])``
1026 1002 Last n members of set, defaulting to 1.
1027 1003 """
1028 1004 # i18n: "last" is a keyword
1029 1005 l = getargs(x, 1, 2, _("last requires one or two arguments"))
1030 1006 try:
1031 1007 lim = 1
1032 1008 if len(l) == 2:
1033 1009 # i18n: "last" is a keyword
1034 1010 lim = int(getstring(l[1], _("last requires a number")))
1035 1011 except (TypeError, ValueError):
1036 1012 # i18n: "last" is a keyword
1037 1013 raise error.ParseError(_("last expects a number"))
1038 1014 ss = subset
1039 1015 os = getset(repo, spanset(repo), l[0])
1040 1016 os.reverse()
1041 1017 result = []
1042 1018 it = iter(os)
1043 1019 for x in xrange(lim):
1044 1020 try:
1045 1021 y = it.next()
1046 1022 if y in ss:
1047 1023 result.append(y)
1048 1024 except (StopIteration):
1049 1025 break
1050 1026 return baseset(result)
1051 1027
1052 1028 def maxrev(repo, subset, x):
1053 1029 """``max(set)``
1054 1030 Changeset with highest revision number in set.
1055 1031 """
1056 1032 os = getset(repo, spanset(repo), x)
1057 1033 if os:
1058 1034 m = os.max()
1059 1035 if m in subset:
1060 1036 return baseset([m])
1061 1037 return baseset()
1062 1038
1063 1039 def merge(repo, subset, x):
1064 1040 """``merge()``
1065 1041 Changeset is a merge changeset.
1066 1042 """
1067 1043 # i18n: "merge" is a keyword
1068 1044 getargs(x, 0, 0, _("merge takes no arguments"))
1069 1045 cl = repo.changelog
1070 1046 return subset.filter(lambda r: cl.parentrevs(r)[1] != -1)
1071 1047
1072 1048 def branchpoint(repo, subset, x):
1073 1049 """``branchpoint()``
1074 1050 Changesets with more than one child.
1075 1051 """
1076 1052 # i18n: "branchpoint" is a keyword
1077 1053 getargs(x, 0, 0, _("branchpoint takes no arguments"))
1078 1054 cl = repo.changelog
1079 1055 if not subset:
1080 1056 return baseset()
1081 1057 baserev = min(subset)
1082 1058 parentscount = [0]*(len(repo) - baserev)
1083 1059 for r in cl.revs(start=baserev + 1):
1084 1060 for p in cl.parentrevs(r):
1085 1061 if p >= baserev:
1086 1062 parentscount[p - baserev] += 1
1087 1063 return subset.filter(lambda r: parentscount[r - baserev] > 1)
1088 1064
1089 1065 def minrev(repo, subset, x):
1090 1066 """``min(set)``
1091 1067 Changeset with lowest revision number in set.
1092 1068 """
1093 1069 os = getset(repo, spanset(repo), x)
1094 1070 if os:
1095 1071 m = os.min()
1096 1072 if m in subset:
1097 1073 return baseset([m])
1098 1074 return baseset()
1099 1075
1100 1076 def modifies(repo, subset, x):
1101 1077 """``modifies(pattern)``
1102 1078 Changesets modifying files matched by pattern.
1103 1079
1104 1080 The pattern without explicit kind like ``glob:`` is expected to be
1105 1081 relative to the current directory and match against a file or a
1106 1082 directory.
1107 1083 """
1108 1084 # i18n: "modifies" is a keyword
1109 1085 pat = getstring(x, _("modifies requires a pattern"))
1110 1086 return checkstatus(repo, subset, pat, 0)
1111 1087
1112 1088 def node_(repo, subset, x):
1113 1089 """``id(string)``
1114 1090 Revision non-ambiguously specified by the given hex string prefix.
1115 1091 """
1116 1092 # i18n: "id" is a keyword
1117 1093 l = getargs(x, 1, 1, _("id requires one argument"))
1118 1094 # i18n: "id" is a keyword
1119 1095 n = getstring(l[0], _("id requires a string"))
1120 1096 if len(n) == 40:
1121 1097 rn = repo[n].rev()
1122 1098 else:
1123 1099 rn = None
1124 1100 pm = repo.changelog._partialmatch(n)
1125 1101 if pm is not None:
1126 1102 rn = repo.changelog.rev(pm)
1127 1103
1128 1104 if rn is None:
1129 1105 return baseset()
1130 1106 result = baseset([rn])
1131 1107 return result & subset
1132 1108
1133 1109 def obsolete(repo, subset, x):
1134 1110 """``obsolete()``
1135 1111 Mutable changeset with a newer version."""
1136 1112 # i18n: "obsolete" is a keyword
1137 1113 getargs(x, 0, 0, _("obsolete takes no arguments"))
1138 1114 obsoletes = obsmod.getrevs(repo, 'obsolete')
1139 1115 return subset & obsoletes
1140 1116
1117 def only(repo, subset, x):
1118 """``only(set, [set])``
1119 Changesets that are ancestors of the first set that are not ancestors
1120 of any other head in the repo. If a second set is specified, the result
1121 is ancestors of the first set that are not ancestors of the second set
1122 (i.e. ::<set1> - ::<set2>).
1123 """
1124 cl = repo.changelog
1125 # i18n: "only" is a keyword
1126 args = getargs(x, 1, 2, _('only takes one or two arguments'))
1127 include = getset(repo, spanset(repo), args[0])
1128 if len(args) == 1:
1129 if not include:
1130 return baseset()
1131
1132 descendants = set(_revdescendants(repo, include, False))
1133 exclude = [rev for rev in cl.headrevs()
1134 if not rev in descendants and not rev in include]
1135 else:
1136 exclude = getset(repo, spanset(repo), args[1])
1137
1138 results = set(cl.findmissingrevs(common=exclude, heads=include))
1139 return subset & results
1140
1141 1141 def origin(repo, subset, x):
1142 1142 """``origin([set])``
1143 1143 Changesets that were specified as a source for the grafts, transplants or
1144 1144 rebases that created the given revisions. Omitting the optional set is the
1145 1145 same as passing all(). If a changeset created by these operations is itself
1146 1146 specified as a source for one of these operations, only the source changeset
1147 1147 for the first operation is selected.
1148 1148 """
1149 1149 if x is not None:
1150 1150 dests = getset(repo, spanset(repo), x)
1151 1151 else:
1152 1152 dests = getall(repo, spanset(repo), x)
1153 1153
1154 1154 def _firstsrc(rev):
1155 1155 src = _getrevsource(repo, rev)
1156 1156 if src is None:
1157 1157 return None
1158 1158
1159 1159 while True:
1160 1160 prev = _getrevsource(repo, src)
1161 1161
1162 1162 if prev is None:
1163 1163 return src
1164 1164 src = prev
1165 1165
1166 1166 o = set([_firstsrc(r) for r in dests])
1167 1167 o -= set([None])
1168 1168 return subset & o
1169 1169
1170 1170 def outgoing(repo, subset, x):
1171 1171 """``outgoing([path])``
1172 1172 Changesets not found in the specified destination repository, or the
1173 1173 default push location.
1174 1174 """
1175 1175 import hg # avoid start-up nasties
1176 1176 # i18n: "outgoing" is a keyword
1177 1177 l = getargs(x, 0, 1, _("outgoing takes one or no arguments"))
1178 1178 # i18n: "outgoing" is a keyword
1179 1179 dest = l and getstring(l[0], _("outgoing requires a repository path")) or ''
1180 1180 dest = repo.ui.expandpath(dest or 'default-push', dest or 'default')
1181 1181 dest, branches = hg.parseurl(dest)
1182 1182 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1183 1183 if revs:
1184 1184 revs = [repo.lookup(rev) for rev in revs]
1185 1185 other = hg.peer(repo, {}, dest)
1186 1186 repo.ui.pushbuffer()
1187 1187 outgoing = discovery.findcommonoutgoing(repo, other, onlyheads=revs)
1188 1188 repo.ui.popbuffer()
1189 1189 cl = repo.changelog
1190 1190 o = set([cl.rev(r) for r in outgoing.missing])
1191 1191 return subset & o
1192 1192
1193 1193 def p1(repo, subset, x):
1194 1194 """``p1([set])``
1195 1195 First parent of changesets in set, or the working directory.
1196 1196 """
1197 1197 if x is None:
1198 1198 p = repo[x].p1().rev()
1199 1199 if p >= 0:
1200 1200 return subset & baseset([p])
1201 1201 return baseset()
1202 1202
1203 1203 ps = set()
1204 1204 cl = repo.changelog
1205 1205 for r in getset(repo, spanset(repo), x):
1206 1206 ps.add(cl.parentrevs(r)[0])
1207 1207 ps -= set([node.nullrev])
1208 1208 return subset & ps
1209 1209
1210 1210 def p2(repo, subset, x):
1211 1211 """``p2([set])``
1212 1212 Second parent of changesets in set, or the working directory.
1213 1213 """
1214 1214 if x is None:
1215 1215 ps = repo[x].parents()
1216 1216 try:
1217 1217 p = ps[1].rev()
1218 1218 if p >= 0:
1219 1219 return subset & baseset([p])
1220 1220 return baseset()
1221 1221 except IndexError:
1222 1222 return baseset()
1223 1223
1224 1224 ps = set()
1225 1225 cl = repo.changelog
1226 1226 for r in getset(repo, spanset(repo), x):
1227 1227 ps.add(cl.parentrevs(r)[1])
1228 1228 ps -= set([node.nullrev])
1229 1229 return subset & ps
1230 1230
1231 1231 def parents(repo, subset, x):
1232 1232 """``parents([set])``
1233 1233 The set of all parents for all changesets in set, or the working directory.
1234 1234 """
1235 1235 if x is None:
1236 1236 ps = set(p.rev() for p in repo[x].parents())
1237 1237 else:
1238 1238 ps = set()
1239 1239 cl = repo.changelog
1240 1240 for r in getset(repo, spanset(repo), x):
1241 1241 ps.update(cl.parentrevs(r))
1242 1242 ps -= set([node.nullrev])
1243 1243 return subset & ps
1244 1244
1245 1245 def parentspec(repo, subset, x, n):
1246 1246 """``set^0``
1247 1247 The set.
1248 1248 ``set^1`` (or ``set^``), ``set^2``
1249 1249 First or second parent, respectively, of all changesets in set.
1250 1250 """
1251 1251 try:
1252 1252 n = int(n[1])
1253 1253 if n not in (0, 1, 2):
1254 1254 raise ValueError
1255 1255 except (TypeError, ValueError):
1256 1256 raise error.ParseError(_("^ expects a number 0, 1, or 2"))
1257 1257 ps = set()
1258 1258 cl = repo.changelog
1259 1259 for r in getset(repo, fullreposet(repo), x):
1260 1260 if n == 0:
1261 1261 ps.add(r)
1262 1262 elif n == 1:
1263 1263 ps.add(cl.parentrevs(r)[0])
1264 1264 elif n == 2:
1265 1265 parents = cl.parentrevs(r)
1266 1266 if len(parents) > 1:
1267 1267 ps.add(parents[1])
1268 1268 return subset & ps
1269 1269
1270 1270 def present(repo, subset, x):
1271 1271 """``present(set)``
1272 1272 An empty set, if any revision in set isn't found; otherwise,
1273 1273 all revisions in set.
1274 1274
1275 1275 If any of specified revisions is not present in the local repository,
1276 1276 the query is normally aborted. But this predicate allows the query
1277 1277 to continue even in such cases.
1278 1278 """
1279 1279 try:
1280 1280 return getset(repo, subset, x)
1281 1281 except error.RepoLookupError:
1282 1282 return baseset()
1283 1283
1284 1284 def public(repo, subset, x):
1285 1285 """``public()``
1286 1286 Changeset in public phase."""
1287 1287 # i18n: "public" is a keyword
1288 1288 getargs(x, 0, 0, _("public takes no arguments"))
1289 1289 phase = repo._phasecache.phase
1290 1290 target = phases.public
1291 1291 condition = lambda r: phase(repo, r) == target
1292 1292 return subset.filter(condition, cache=False)
1293 1293
1294 1294 def remote(repo, subset, x):
1295 1295 """``remote([id [,path]])``
1296 1296 Local revision that corresponds to the given identifier in a
1297 1297 remote repository, if present. Here, the '.' identifier is a
1298 1298 synonym for the current local branch.
1299 1299 """
1300 1300
1301 1301 import hg # avoid start-up nasties
1302 1302 # i18n: "remote" is a keyword
1303 1303 l = getargs(x, 0, 2, _("remote takes one, two or no arguments"))
1304 1304
1305 1305 q = '.'
1306 1306 if len(l) > 0:
1307 1307 # i18n: "remote" is a keyword
1308 1308 q = getstring(l[0], _("remote requires a string id"))
1309 1309 if q == '.':
1310 1310 q = repo['.'].branch()
1311 1311
1312 1312 dest = ''
1313 1313 if len(l) > 1:
1314 1314 # i18n: "remote" is a keyword
1315 1315 dest = getstring(l[1], _("remote requires a repository path"))
1316 1316 dest = repo.ui.expandpath(dest or 'default')
1317 1317 dest, branches = hg.parseurl(dest)
1318 1318 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1319 1319 if revs:
1320 1320 revs = [repo.lookup(rev) for rev in revs]
1321 1321 other = hg.peer(repo, {}, dest)
1322 1322 n = other.lookup(q)
1323 1323 if n in repo:
1324 1324 r = repo[n].rev()
1325 1325 if r in subset:
1326 1326 return baseset([r])
1327 1327 return baseset()
1328 1328
1329 1329 def removes(repo, subset, x):
1330 1330 """``removes(pattern)``
1331 1331 Changesets which remove files matching pattern.
1332 1332
1333 1333 The pattern without explicit kind like ``glob:`` is expected to be
1334 1334 relative to the current directory and match against a file or a
1335 1335 directory.
1336 1336 """
1337 1337 # i18n: "removes" is a keyword
1338 1338 pat = getstring(x, _("removes requires a pattern"))
1339 1339 return checkstatus(repo, subset, pat, 2)
1340 1340
1341 1341 def rev(repo, subset, x):
1342 1342 """``rev(number)``
1343 1343 Revision with the given numeric identifier.
1344 1344 """
1345 1345 # i18n: "rev" is a keyword
1346 1346 l = getargs(x, 1, 1, _("rev requires one argument"))
1347 1347 try:
1348 1348 # i18n: "rev" is a keyword
1349 1349 l = int(getstring(l[0], _("rev requires a number")))
1350 1350 except (TypeError, ValueError):
1351 1351 # i18n: "rev" is a keyword
1352 1352 raise error.ParseError(_("rev expects a number"))
1353 1353 if l not in fullreposet(repo):
1354 1354 return baseset()
1355 1355 return subset & baseset([l])
1356 1356
1357 1357 def matching(repo, subset, x):
1358 1358 """``matching(revision [, field])``
1359 1359 Changesets in which a given set of fields match the set of fields in the
1360 1360 selected revision or set.
1361 1361
1362 1362 To match more than one field pass the list of fields to match separated
1363 1363 by spaces (e.g. ``author description``).
1364 1364
1365 1365 Valid fields are most regular revision fields and some special fields.
1366 1366
1367 1367 Regular revision fields are ``description``, ``author``, ``branch``,
1368 1368 ``date``, ``files``, ``phase``, ``parents``, ``substate``, ``user``
1369 1369 and ``diff``.
1370 1370 Note that ``author`` and ``user`` are synonyms. ``diff`` refers to the
1371 1371 contents of the revision. Two revisions matching their ``diff`` will
1372 1372 also match their ``files``.
1373 1373
1374 1374 Special fields are ``summary`` and ``metadata``:
1375 1375 ``summary`` matches the first line of the description.
1376 1376 ``metadata`` is equivalent to matching ``description user date``
1377 1377 (i.e. it matches the main metadata fields).
1378 1378
1379 1379 ``metadata`` is the default field which is used when no fields are
1380 1380 specified. You can match more than one field at a time.
1381 1381 """
1382 1382 # i18n: "matching" is a keyword
1383 1383 l = getargs(x, 1, 2, _("matching takes 1 or 2 arguments"))
1384 1384
1385 1385 revs = getset(repo, fullreposet(repo), l[0])
1386 1386
1387 1387 fieldlist = ['metadata']
1388 1388 if len(l) > 1:
1389 1389 fieldlist = getstring(l[1],
1390 1390 # i18n: "matching" is a keyword
1391 1391 _("matching requires a string "
1392 1392 "as its second argument")).split()
1393 1393
1394 1394 # Make sure that there are no repeated fields,
1395 1395 # expand the 'special' 'metadata' field type
1396 1396 # and check the 'files' whenever we check the 'diff'
1397 1397 fields = []
1398 1398 for field in fieldlist:
1399 1399 if field == 'metadata':
1400 1400 fields += ['user', 'description', 'date']
1401 1401 elif field == 'diff':
1402 1402 # a revision matching the diff must also match the files
1403 1403 # since matching the diff is very costly, make sure to
1404 1404 # also match the files first
1405 1405 fields += ['files', 'diff']
1406 1406 else:
1407 1407 if field == 'author':
1408 1408 field = 'user'
1409 1409 fields.append(field)
1410 1410 fields = set(fields)
1411 1411 if 'summary' in fields and 'description' in fields:
1412 1412 # If a revision matches its description it also matches its summary
1413 1413 fields.discard('summary')
1414 1414
1415 1415 # We may want to match more than one field
1416 1416 # Not all fields take the same amount of time to be matched
1417 1417 # Sort the selected fields in order of increasing matching cost
1418 1418 fieldorder = ['phase', 'parents', 'user', 'date', 'branch', 'summary',
1419 1419 'files', 'description', 'substate', 'diff']
1420 1420 def fieldkeyfunc(f):
1421 1421 try:
1422 1422 return fieldorder.index(f)
1423 1423 except ValueError:
1424 1424 # assume an unknown field is very costly
1425 1425 return len(fieldorder)
1426 1426 fields = list(fields)
1427 1427 fields.sort(key=fieldkeyfunc)
1428 1428
1429 1429 # Each field will be matched with its own "getfield" function
1430 1430 # which will be added to the getfieldfuncs array of functions
1431 1431 getfieldfuncs = []
1432 1432 _funcs = {
1433 1433 'user': lambda r: repo[r].user(),
1434 1434 'branch': lambda r: repo[r].branch(),
1435 1435 'date': lambda r: repo[r].date(),
1436 1436 'description': lambda r: repo[r].description(),
1437 1437 'files': lambda r: repo[r].files(),
1438 1438 'parents': lambda r: repo[r].parents(),
1439 1439 'phase': lambda r: repo[r].phase(),
1440 1440 'substate': lambda r: repo[r].substate,
1441 1441 'summary': lambda r: repo[r].description().splitlines()[0],
1442 1442 'diff': lambda r: list(repo[r].diff(git=True),)
1443 1443 }
1444 1444 for info in fields:
1445 1445 getfield = _funcs.get(info, None)
1446 1446 if getfield is None:
1447 1447 raise error.ParseError(
1448 1448 # i18n: "matching" is a keyword
1449 1449 _("unexpected field name passed to matching: %s") % info)
1450 1450 getfieldfuncs.append(getfield)
1451 1451 # convert the getfield array of functions into a "getinfo" function
1452 1452 # which returns an array of field values (or a single value if there
1453 1453 # is only one field to match)
1454 1454 getinfo = lambda r: [f(r) for f in getfieldfuncs]
1455 1455
1456 1456 def matches(x):
1457 1457 for rev in revs:
1458 1458 target = getinfo(rev)
1459 1459 match = True
1460 1460 for n, f in enumerate(getfieldfuncs):
1461 1461 if target[n] != f(x):
1462 1462 match = False
1463 1463 if match:
1464 1464 return True
1465 1465 return False
1466 1466
1467 1467 return subset.filter(matches)
1468 1468
1469 1469 def reverse(repo, subset, x):
1470 1470 """``reverse(set)``
1471 1471 Reverse order of set.
1472 1472 """
1473 1473 l = getset(repo, subset, x)
1474 1474 l.reverse()
1475 1475 return l
1476 1476
1477 1477 def roots(repo, subset, x):
1478 1478 """``roots(set)``
1479 1479 Changesets in set with no parent changeset in set.
1480 1480 """
1481 1481 s = getset(repo, spanset(repo), x)
1482 1482 subset = baseset([r for r in s if r in subset])
1483 1483 cs = _children(repo, subset, s)
1484 1484 return subset - cs
1485 1485
1486 1486 def secret(repo, subset, x):
1487 1487 """``secret()``
1488 1488 Changeset in secret phase."""
1489 1489 # i18n: "secret" is a keyword
1490 1490 getargs(x, 0, 0, _("secret takes no arguments"))
1491 1491 phase = repo._phasecache.phase
1492 1492 target = phases.secret
1493 1493 condition = lambda r: phase(repo, r) == target
1494 1494 return subset.filter(condition, cache=False)
1495 1495
1496 1496 def sort(repo, subset, x):
1497 1497 """``sort(set[, [-]key...])``
1498 1498 Sort set by keys. The default sort order is ascending, specify a key
1499 1499 as ``-key`` to sort in descending order.
1500 1500
1501 1501 The keys can be:
1502 1502
1503 1503 - ``rev`` for the revision number,
1504 1504 - ``branch`` for the branch name,
1505 1505 - ``desc`` for the commit message (description),
1506 1506 - ``user`` for user name (``author`` can be used as an alias),
1507 1507 - ``date`` for the commit date
1508 1508 """
1509 1509 # i18n: "sort" is a keyword
1510 1510 l = getargs(x, 1, 2, _("sort requires one or two arguments"))
1511 1511 keys = "rev"
1512 1512 if len(l) == 2:
1513 1513 # i18n: "sort" is a keyword
1514 1514 keys = getstring(l[1], _("sort spec must be a string"))
1515 1515
1516 1516 s = l[0]
1517 1517 keys = keys.split()
1518 1518 l = []
1519 1519 def invert(s):
1520 1520 return "".join(chr(255 - ord(c)) for c in s)
1521 1521 revs = getset(repo, subset, s)
1522 1522 if keys == ["rev"]:
1523 1523 revs.sort()
1524 1524 return revs
1525 1525 elif keys == ["-rev"]:
1526 1526 revs.sort(reverse=True)
1527 1527 return revs
1528 1528 for r in revs:
1529 1529 c = repo[r]
1530 1530 e = []
1531 1531 for k in keys:
1532 1532 if k == 'rev':
1533 1533 e.append(r)
1534 1534 elif k == '-rev':
1535 1535 e.append(-r)
1536 1536 elif k == 'branch':
1537 1537 e.append(c.branch())
1538 1538 elif k == '-branch':
1539 1539 e.append(invert(c.branch()))
1540 1540 elif k == 'desc':
1541 1541 e.append(c.description())
1542 1542 elif k == '-desc':
1543 1543 e.append(invert(c.description()))
1544 1544 elif k in 'user author':
1545 1545 e.append(c.user())
1546 1546 elif k in '-user -author':
1547 1547 e.append(invert(c.user()))
1548 1548 elif k == 'date':
1549 1549 e.append(c.date()[0])
1550 1550 elif k == '-date':
1551 1551 e.append(-c.date()[0])
1552 1552 else:
1553 1553 raise error.ParseError(_("unknown sort key %r") % k)
1554 1554 e.append(r)
1555 1555 l.append(e)
1556 1556 l.sort()
1557 1557 return baseset([e[-1] for e in l])
1558 1558
1559 1559 def _stringmatcher(pattern):
1560 1560 """
1561 1561 accepts a string, possibly starting with 're:' or 'literal:' prefix.
1562 1562 returns the matcher name, pattern, and matcher function.
1563 1563 missing or unknown prefixes are treated as literal matches.
1564 1564
1565 1565 helper for tests:
1566 1566 >>> def test(pattern, *tests):
1567 1567 ... kind, pattern, matcher = _stringmatcher(pattern)
1568 1568 ... return (kind, pattern, [bool(matcher(t)) for t in tests])
1569 1569
1570 1570 exact matching (no prefix):
1571 1571 >>> test('abcdefg', 'abc', 'def', 'abcdefg')
1572 1572 ('literal', 'abcdefg', [False, False, True])
1573 1573
1574 1574 regex matching ('re:' prefix)
1575 1575 >>> test('re:a.+b', 'nomatch', 'fooadef', 'fooadefbar')
1576 1576 ('re', 'a.+b', [False, False, True])
1577 1577
1578 1578 force exact matches ('literal:' prefix)
1579 1579 >>> test('literal:re:foobar', 'foobar', 're:foobar')
1580 1580 ('literal', 're:foobar', [False, True])
1581 1581
1582 1582 unknown prefixes are ignored and treated as literals
1583 1583 >>> test('foo:bar', 'foo', 'bar', 'foo:bar')
1584 1584 ('literal', 'foo:bar', [False, False, True])
1585 1585 """
1586 1586 if pattern.startswith('re:'):
1587 1587 pattern = pattern[3:]
1588 1588 try:
1589 1589 regex = re.compile(pattern)
1590 1590 except re.error, e:
1591 1591 raise error.ParseError(_('invalid regular expression: %s')
1592 1592 % e)
1593 1593 return 're', pattern, regex.search
1594 1594 elif pattern.startswith('literal:'):
1595 1595 pattern = pattern[8:]
1596 1596 return 'literal', pattern, pattern.__eq__
1597 1597
1598 1598 def _substringmatcher(pattern):
1599 1599 kind, pattern, matcher = _stringmatcher(pattern)
1600 1600 if kind == 'literal':
1601 1601 matcher = lambda s: pattern in s
1602 1602 return kind, pattern, matcher
1603 1603
1604 1604 def tag(repo, subset, x):
1605 1605 """``tag([name])``
1606 1606 The specified tag by name, or all tagged revisions if no name is given.
1607 1607
1608 1608 If `name` starts with `re:`, the remainder of the name is treated as
1609 1609 a regular expression. To match a tag that actually starts with `re:`,
1610 1610 use the prefix `literal:`.
1611 1611 """
1612 1612 # i18n: "tag" is a keyword
1613 1613 args = getargs(x, 0, 1, _("tag takes one or no arguments"))
1614 1614 cl = repo.changelog
1615 1615 if args:
1616 1616 pattern = getstring(args[0],
1617 1617 # i18n: "tag" is a keyword
1618 1618 _('the argument to tag must be a string'))
1619 1619 kind, pattern, matcher = _stringmatcher(pattern)
1620 1620 if kind == 'literal':
1621 1621 # avoid resolving all tags
1622 1622 tn = repo._tagscache.tags.get(pattern, None)
1623 1623 if tn is None:
1624 1624 raise util.Abort(_("tag '%s' does not exist") % pattern)
1625 1625 s = set([repo[tn].rev()])
1626 1626 else:
1627 1627 s = set([cl.rev(n) for t, n in repo.tagslist() if matcher(t)])
1628 1628 else:
1629 1629 s = set([cl.rev(n) for t, n in repo.tagslist() if t != 'tip'])
1630 1630 return subset & s
1631 1631
1632 1632 def tagged(repo, subset, x):
1633 1633 return tag(repo, subset, x)
1634 1634
1635 1635 def unstable(repo, subset, x):
1636 1636 """``unstable()``
1637 1637 Non-obsolete changesets with obsolete ancestors.
1638 1638 """
1639 1639 # i18n: "unstable" is a keyword
1640 1640 getargs(x, 0, 0, _("unstable takes no arguments"))
1641 1641 unstables = obsmod.getrevs(repo, 'unstable')
1642 1642 return subset & unstables
1643 1643
1644 1644
1645 1645 def user(repo, subset, x):
1646 1646 """``user(string)``
1647 1647 User name contains string. The match is case-insensitive.
1648 1648
1649 1649 If `string` starts with `re:`, the remainder of the string is treated as
1650 1650 a regular expression. To match a user that actually contains `re:`, use
1651 1651 the prefix `literal:`.
1652 1652 """
1653 1653 return author(repo, subset, x)
1654 1654
1655 1655 # for internal use
1656 1656 def _list(repo, subset, x):
1657 1657 s = getstring(x, "internal error")
1658 1658 if not s:
1659 1659 return baseset()
1660 1660 ls = [repo[r].rev() for r in s.split('\0')]
1661 1661 s = subset
1662 1662 return baseset([r for r in ls if r in s])
1663 1663
1664 1664 # for internal use
1665 1665 def _intlist(repo, subset, x):
1666 1666 s = getstring(x, "internal error")
1667 1667 if not s:
1668 1668 return baseset()
1669 1669 ls = [int(r) for r in s.split('\0')]
1670 1670 s = subset
1671 1671 return baseset([r for r in ls if r in s])
1672 1672
1673 1673 # for internal use
1674 1674 def _hexlist(repo, subset, x):
1675 1675 s = getstring(x, "internal error")
1676 1676 if not s:
1677 1677 return baseset()
1678 1678 cl = repo.changelog
1679 1679 ls = [cl.rev(node.bin(r)) for r in s.split('\0')]
1680 1680 s = subset
1681 1681 return baseset([r for r in ls if r in s])
1682 1682
1683 1683 symbols = {
1684 1684 "adds": adds,
1685 1685 "all": getall,
1686 1686 "ancestor": ancestor,
1687 1687 "ancestors": ancestors,
1688 1688 "_firstancestors": _firstancestors,
1689 1689 "author": author,
1690 "only": only,
1691 1690 "bisect": bisect,
1692 1691 "bisected": bisected,
1693 1692 "bookmark": bookmark,
1694 1693 "branch": branch,
1695 1694 "branchpoint": branchpoint,
1696 1695 "bumped": bumped,
1697 1696 "bundle": bundle,
1698 1697 "children": children,
1699 1698 "closed": closed,
1700 1699 "contains": contains,
1701 1700 "converted": converted,
1702 1701 "date": date,
1703 1702 "desc": desc,
1704 1703 "descendants": descendants,
1705 1704 "_firstdescendants": _firstdescendants,
1706 1705 "destination": destination,
1707 1706 "divergent": divergent,
1708 1707 "draft": draft,
1709 1708 "extinct": extinct,
1710 1709 "extra": extra,
1711 1710 "file": hasfile,
1712 1711 "filelog": filelog,
1713 1712 "first": first,
1714 1713 "follow": follow,
1715 1714 "_followfirst": _followfirst,
1716 1715 "grep": grep,
1717 1716 "head": head,
1718 1717 "heads": heads,
1719 1718 "hidden": hidden,
1720 1719 "id": node_,
1721 1720 "keyword": keyword,
1722 1721 "last": last,
1723 1722 "limit": limit,
1724 1723 "_matchfiles": _matchfiles,
1725 1724 "max": maxrev,
1726 1725 "merge": merge,
1727 1726 "min": minrev,
1728 1727 "modifies": modifies,
1729 1728 "obsolete": obsolete,
1729 "only": only,
1730 1730 "origin": origin,
1731 1731 "outgoing": outgoing,
1732 1732 "p1": p1,
1733 1733 "p2": p2,
1734 1734 "parents": parents,
1735 1735 "present": present,
1736 1736 "public": public,
1737 1737 "remote": remote,
1738 1738 "removes": removes,
1739 1739 "rev": rev,
1740 1740 "reverse": reverse,
1741 1741 "roots": roots,
1742 1742 "sort": sort,
1743 1743 "secret": secret,
1744 1744 "matching": matching,
1745 1745 "tag": tag,
1746 1746 "tagged": tagged,
1747 1747 "user": user,
1748 1748 "unstable": unstable,
1749 1749 "_list": _list,
1750 1750 "_intlist": _intlist,
1751 1751 "_hexlist": _hexlist,
1752 1752 }
1753 1753
1754 1754 # symbols which can't be used for a DoS attack for any given input
1755 1755 # (e.g. those which accept regexes as plain strings shouldn't be included)
1756 1756 # functions that just return a lot of changesets (like all) don't count here
1757 1757 safesymbols = set([
1758 1758 "adds",
1759 1759 "all",
1760 1760 "ancestor",
1761 1761 "ancestors",
1762 1762 "_firstancestors",
1763 1763 "author",
1764 1764 "bisect",
1765 1765 "bisected",
1766 1766 "bookmark",
1767 1767 "branch",
1768 1768 "branchpoint",
1769 1769 "bumped",
1770 1770 "bundle",
1771 1771 "children",
1772 1772 "closed",
1773 1773 "converted",
1774 1774 "date",
1775 1775 "desc",
1776 1776 "descendants",
1777 1777 "_firstdescendants",
1778 1778 "destination",
1779 1779 "divergent",
1780 1780 "draft",
1781 1781 "extinct",
1782 1782 "extra",
1783 1783 "file",
1784 1784 "filelog",
1785 1785 "first",
1786 1786 "follow",
1787 1787 "_followfirst",
1788 1788 "head",
1789 1789 "heads",
1790 1790 "hidden",
1791 1791 "id",
1792 1792 "keyword",
1793 1793 "last",
1794 1794 "limit",
1795 1795 "_matchfiles",
1796 1796 "max",
1797 1797 "merge",
1798 1798 "min",
1799 1799 "modifies",
1800 1800 "obsolete",
1801 1801 "origin",
1802 1802 "outgoing",
1803 1803 "p1",
1804 1804 "p2",
1805 1805 "parents",
1806 1806 "present",
1807 1807 "public",
1808 1808 "remote",
1809 1809 "removes",
1810 1810 "rev",
1811 1811 "reverse",
1812 1812 "roots",
1813 1813 "sort",
1814 1814 "secret",
1815 1815 "matching",
1816 1816 "tag",
1817 1817 "tagged",
1818 1818 "user",
1819 1819 "unstable",
1820 1820 "_list",
1821 1821 "_intlist",
1822 1822 "_hexlist",
1823 1823 ])
1824 1824
1825 1825 methods = {
1826 1826 "range": rangeset,
1827 1827 "dagrange": dagrange,
1828 1828 "string": stringset,
1829 1829 "symbol": symbolset,
1830 1830 "and": andset,
1831 1831 "or": orset,
1832 1832 "not": notset,
1833 1833 "list": listset,
1834 1834 "func": func,
1835 1835 "ancestor": ancestorspec,
1836 1836 "parent": parentspec,
1837 1837 "parentpost": p1,
1838 1838 }
1839 1839
1840 1840 def optimize(x, small):
1841 1841 if x is None:
1842 1842 return 0, x
1843 1843
1844 1844 smallbonus = 1
1845 1845 if small:
1846 1846 smallbonus = .5
1847 1847
1848 1848 op = x[0]
1849 1849 if op == 'minus':
1850 1850 return optimize(('and', x[1], ('not', x[2])), small)
1851 1851 elif op == 'dagrangepre':
1852 1852 return optimize(('func', ('symbol', 'ancestors'), x[1]), small)
1853 1853 elif op == 'dagrangepost':
1854 1854 return optimize(('func', ('symbol', 'descendants'), x[1]), small)
1855 1855 elif op == 'rangepre':
1856 1856 return optimize(('range', ('string', '0'), x[1]), small)
1857 1857 elif op == 'rangepost':
1858 1858 return optimize(('range', x[1], ('string', 'tip')), small)
1859 1859 elif op == 'negate':
1860 1860 return optimize(('string',
1861 1861 '-' + getstring(x[1], _("can't negate that"))), small)
1862 1862 elif op in 'string symbol negate':
1863 1863 return smallbonus, x # single revisions are small
1864 1864 elif op == 'and':
1865 1865 wa, ta = optimize(x[1], True)
1866 1866 wb, tb = optimize(x[2], True)
1867 1867
1868 1868 # (::x and not ::y)/(not ::y and ::x) have a fast path
1869 1869 def isonly(revs, bases):
1870 1870 return (
1871 1871 revs[0] == 'func'
1872 1872 and getstring(revs[1], _('not a symbol')) == 'ancestors'
1873 1873 and bases[0] == 'not'
1874 1874 and bases[1][0] == 'func'
1875 1875 and getstring(bases[1][1], _('not a symbol')) == 'ancestors')
1876 1876
1877 1877 w = min(wa, wb)
1878 1878 if isonly(ta, tb):
1879 1879 return w, ('func', ('symbol', 'only'), ('list', ta[2], tb[1][2]))
1880 1880 if isonly(tb, ta):
1881 1881 return w, ('func', ('symbol', 'only'), ('list', tb[2], ta[1][2]))
1882 1882
1883 1883 if wa > wb:
1884 1884 return w, (op, tb, ta)
1885 1885 return w, (op, ta, tb)
1886 1886 elif op == 'or':
1887 1887 wa, ta = optimize(x[1], False)
1888 1888 wb, tb = optimize(x[2], False)
1889 1889 if wb < wa:
1890 1890 wb, wa = wa, wb
1891 1891 return max(wa, wb), (op, ta, tb)
1892 1892 elif op == 'not':
1893 1893 o = optimize(x[1], not small)
1894 1894 return o[0], (op, o[1])
1895 1895 elif op == 'parentpost':
1896 1896 o = optimize(x[1], small)
1897 1897 return o[0], (op, o[1])
1898 1898 elif op == 'group':
1899 1899 return optimize(x[1], small)
1900 1900 elif op in 'dagrange range list parent ancestorspec':
1901 1901 if op == 'parent':
1902 1902 # x^:y means (x^) : y, not x ^ (:y)
1903 1903 post = ('parentpost', x[1])
1904 1904 if x[2][0] == 'dagrangepre':
1905 1905 return optimize(('dagrange', post, x[2][1]), small)
1906 1906 elif x[2][0] == 'rangepre':
1907 1907 return optimize(('range', post, x[2][1]), small)
1908 1908
1909 1909 wa, ta = optimize(x[1], small)
1910 1910 wb, tb = optimize(x[2], small)
1911 1911 return wa + wb, (op, ta, tb)
1912 1912 elif op == 'func':
1913 1913 f = getstring(x[1], _("not a symbol"))
1914 1914 wa, ta = optimize(x[2], small)
1915 1915 if f in ("author branch closed date desc file grep keyword "
1916 1916 "outgoing user"):
1917 1917 w = 10 # slow
1918 1918 elif f in "modifies adds removes":
1919 1919 w = 30 # slower
1920 1920 elif f == "contains":
1921 1921 w = 100 # very slow
1922 1922 elif f == "ancestor":
1923 1923 w = 1 * smallbonus
1924 1924 elif f in "reverse limit first _intlist":
1925 1925 w = 0
1926 1926 elif f in "sort":
1927 1927 w = 10 # assume most sorts look at changelog
1928 1928 else:
1929 1929 w = 1
1930 1930 return w + wa, (op, x[1], ta)
1931 1931 return 1, x
1932 1932
1933 1933 _aliasarg = ('func', ('symbol', '_aliasarg'))
1934 1934 def _getaliasarg(tree):
1935 1935 """If tree matches ('func', ('symbol', '_aliasarg'), ('string', X))
1936 1936 return X, None otherwise.
1937 1937 """
1938 1938 if (len(tree) == 3 and tree[:2] == _aliasarg
1939 1939 and tree[2][0] == 'string'):
1940 1940 return tree[2][1]
1941 1941 return None
1942 1942
1943 1943 def _checkaliasarg(tree, known=None):
1944 1944 """Check tree contains no _aliasarg construct or only ones which
1945 1945 value is in known. Used to avoid alias placeholders injection.
1946 1946 """
1947 1947 if isinstance(tree, tuple):
1948 1948 arg = _getaliasarg(tree)
1949 1949 if arg is not None and (not known or arg not in known):
1950 1950 raise error.ParseError(_("not a function: %s") % '_aliasarg')
1951 1951 for t in tree:
1952 1952 _checkaliasarg(t, known)
1953 1953
1954 1954 class revsetalias(object):
1955 1955 funcre = re.compile('^([^(]+)\(([^)]+)\)$')
1956 1956 args = None
1957 1957
1958 1958 def __init__(self, name, value):
1959 1959 '''Aliases like:
1960 1960
1961 1961 h = heads(default)
1962 1962 b($1) = ancestors($1) - ancestors(default)
1963 1963 '''
1964 1964 m = self.funcre.search(name)
1965 1965 if m:
1966 1966 self.name = m.group(1)
1967 1967 self.tree = ('func', ('symbol', m.group(1)))
1968 1968 self.args = [x.strip() for x in m.group(2).split(',')]
1969 1969 for arg in self.args:
1970 1970 # _aliasarg() is an unknown symbol only used separate
1971 1971 # alias argument placeholders from regular strings.
1972 1972 value = value.replace(arg, '_aliasarg(%r)' % (arg,))
1973 1973 else:
1974 1974 self.name = name
1975 1975 self.tree = ('symbol', name)
1976 1976
1977 1977 self.replacement, pos = parse(value)
1978 1978 if pos != len(value):
1979 1979 raise error.ParseError(_('invalid token'), pos)
1980 1980 # Check for placeholder injection
1981 1981 _checkaliasarg(self.replacement, self.args)
1982 1982
1983 1983 def _getalias(aliases, tree):
1984 1984 """If tree looks like an unexpanded alias, return it. Return None
1985 1985 otherwise.
1986 1986 """
1987 1987 if isinstance(tree, tuple) and tree:
1988 1988 if tree[0] == 'symbol' and len(tree) == 2:
1989 1989 name = tree[1]
1990 1990 alias = aliases.get(name)
1991 1991 if alias and alias.args is None and alias.tree == tree:
1992 1992 return alias
1993 1993 if tree[0] == 'func' and len(tree) > 1:
1994 1994 if tree[1][0] == 'symbol' and len(tree[1]) == 2:
1995 1995 name = tree[1][1]
1996 1996 alias = aliases.get(name)
1997 1997 if alias and alias.args is not None and alias.tree == tree[:2]:
1998 1998 return alias
1999 1999 return None
2000 2000
2001 2001 def _expandargs(tree, args):
2002 2002 """Replace _aliasarg instances with the substitution value of the
2003 2003 same name in args, recursively.
2004 2004 """
2005 2005 if not tree or not isinstance(tree, tuple):
2006 2006 return tree
2007 2007 arg = _getaliasarg(tree)
2008 2008 if arg is not None:
2009 2009 return args[arg]
2010 2010 return tuple(_expandargs(t, args) for t in tree)
2011 2011
2012 2012 def _expandaliases(aliases, tree, expanding, cache):
2013 2013 """Expand aliases in tree, recursively.
2014 2014
2015 2015 'aliases' is a dictionary mapping user defined aliases to
2016 2016 revsetalias objects.
2017 2017 """
2018 2018 if not isinstance(tree, tuple):
2019 2019 # Do not expand raw strings
2020 2020 return tree
2021 2021 alias = _getalias(aliases, tree)
2022 2022 if alias is not None:
2023 2023 if alias in expanding:
2024 2024 raise error.ParseError(_('infinite expansion of revset alias "%s" '
2025 2025 'detected') % alias.name)
2026 2026 expanding.append(alias)
2027 2027 if alias.name not in cache:
2028 2028 cache[alias.name] = _expandaliases(aliases, alias.replacement,
2029 2029 expanding, cache)
2030 2030 result = cache[alias.name]
2031 2031 expanding.pop()
2032 2032 if alias.args is not None:
2033 2033 l = getlist(tree[2])
2034 2034 if len(l) != len(alias.args):
2035 2035 raise error.ParseError(
2036 2036 _('invalid number of arguments: %s') % len(l))
2037 2037 l = [_expandaliases(aliases, a, [], cache) for a in l]
2038 2038 result = _expandargs(result, dict(zip(alias.args, l)))
2039 2039 else:
2040 2040 result = tuple(_expandaliases(aliases, t, expanding, cache)
2041 2041 for t in tree)
2042 2042 return result
2043 2043
2044 2044 def findaliases(ui, tree):
2045 2045 _checkaliasarg(tree)
2046 2046 aliases = {}
2047 2047 for k, v in ui.configitems('revsetalias'):
2048 2048 alias = revsetalias(k, v)
2049 2049 aliases[alias.name] = alias
2050 2050 return _expandaliases(aliases, tree, [], {})
2051 2051
2052 2052 def parse(spec, lookup=None):
2053 2053 p = parser.parser(tokenize, elements)
2054 2054 return p.parse(spec, lookup=lookup)
2055 2055
2056 2056 def match(ui, spec, repo=None):
2057 2057 if not spec:
2058 2058 raise error.ParseError(_("empty query"))
2059 2059 lookup = None
2060 2060 if repo:
2061 2061 lookup = repo.__contains__
2062 2062 tree, pos = parse(spec, lookup)
2063 2063 if (pos != len(spec)):
2064 2064 raise error.ParseError(_("invalid token"), pos)
2065 2065 if ui:
2066 2066 tree = findaliases(ui, tree)
2067 2067 weight, tree = optimize(tree, True)
2068 2068 def mfunc(repo, subset):
2069 2069 if util.safehasattr(subset, 'isascending'):
2070 2070 result = getset(repo, subset, tree)
2071 2071 else:
2072 2072 result = getset(repo, baseset(subset), tree)
2073 2073 return result
2074 2074 return mfunc
2075 2075
2076 2076 def formatspec(expr, *args):
2077 2077 '''
2078 2078 This is a convenience function for using revsets internally, and
2079 2079 escapes arguments appropriately. Aliases are intentionally ignored
2080 2080 so that intended expression behavior isn't accidentally subverted.
2081 2081
2082 2082 Supported arguments:
2083 2083
2084 2084 %r = revset expression, parenthesized
2085 2085 %d = int(arg), no quoting
2086 2086 %s = string(arg), escaped and single-quoted
2087 2087 %b = arg.branch(), escaped and single-quoted
2088 2088 %n = hex(arg), single-quoted
2089 2089 %% = a literal '%'
2090 2090
2091 2091 Prefixing the type with 'l' specifies a parenthesized list of that type.
2092 2092
2093 2093 >>> formatspec('%r:: and %lr', '10 or 11', ("this()", "that()"))
2094 2094 '(10 or 11):: and ((this()) or (that()))'
2095 2095 >>> formatspec('%d:: and not %d::', 10, 20)
2096 2096 '10:: and not 20::'
2097 2097 >>> formatspec('%ld or %ld', [], [1])
2098 2098 "_list('') or 1"
2099 2099 >>> formatspec('keyword(%s)', 'foo\\xe9')
2100 2100 "keyword('foo\\\\xe9')"
2101 2101 >>> b = lambda: 'default'
2102 2102 >>> b.branch = b
2103 2103 >>> formatspec('branch(%b)', b)
2104 2104 "branch('default')"
2105 2105 >>> formatspec('root(%ls)', ['a', 'b', 'c', 'd'])
2106 2106 "root(_list('a\\x00b\\x00c\\x00d'))"
2107 2107 '''
2108 2108
2109 2109 def quote(s):
2110 2110 return repr(str(s))
2111 2111
2112 2112 def argtype(c, arg):
2113 2113 if c == 'd':
2114 2114 return str(int(arg))
2115 2115 elif c == 's':
2116 2116 return quote(arg)
2117 2117 elif c == 'r':
2118 2118 parse(arg) # make sure syntax errors are confined
2119 2119 return '(%s)' % arg
2120 2120 elif c == 'n':
2121 2121 return quote(node.hex(arg))
2122 2122 elif c == 'b':
2123 2123 return quote(arg.branch())
2124 2124
2125 2125 def listexp(s, t):
2126 2126 l = len(s)
2127 2127 if l == 0:
2128 2128 return "_list('')"
2129 2129 elif l == 1:
2130 2130 return argtype(t, s[0])
2131 2131 elif t == 'd':
2132 2132 return "_intlist('%s')" % "\0".join(str(int(a)) for a in s)
2133 2133 elif t == 's':
2134 2134 return "_list('%s')" % "\0".join(s)
2135 2135 elif t == 'n':
2136 2136 return "_hexlist('%s')" % "\0".join(node.hex(a) for a in s)
2137 2137 elif t == 'b':
2138 2138 return "_list('%s')" % "\0".join(a.branch() for a in s)
2139 2139
2140 2140 m = l // 2
2141 2141 return '(%s or %s)' % (listexp(s[:m], t), listexp(s[m:], t))
2142 2142
2143 2143 ret = ''
2144 2144 pos = 0
2145 2145 arg = 0
2146 2146 while pos < len(expr):
2147 2147 c = expr[pos]
2148 2148 if c == '%':
2149 2149 pos += 1
2150 2150 d = expr[pos]
2151 2151 if d == '%':
2152 2152 ret += d
2153 2153 elif d in 'dsnbr':
2154 2154 ret += argtype(d, args[arg])
2155 2155 arg += 1
2156 2156 elif d == 'l':
2157 2157 # a list of some type
2158 2158 pos += 1
2159 2159 d = expr[pos]
2160 2160 ret += listexp(list(args[arg]), d)
2161 2161 arg += 1
2162 2162 else:
2163 2163 raise util.Abort('unexpected revspec format character %s' % d)
2164 2164 else:
2165 2165 ret += c
2166 2166 pos += 1
2167 2167
2168 2168 return ret
2169 2169
2170 2170 def prettyformat(tree):
2171 2171 def _prettyformat(tree, level, lines):
2172 2172 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2173 2173 lines.append((level, str(tree)))
2174 2174 else:
2175 2175 lines.append((level, '(%s' % tree[0]))
2176 2176 for s in tree[1:]:
2177 2177 _prettyformat(s, level + 1, lines)
2178 2178 lines[-1:] = [(lines[-1][0], lines[-1][1] + ')')]
2179 2179
2180 2180 lines = []
2181 2181 _prettyformat(tree, 0, lines)
2182 2182 output = '\n'.join((' '*l + s) for l, s in lines)
2183 2183 return output
2184 2184
2185 2185 def depth(tree):
2186 2186 if isinstance(tree, tuple):
2187 2187 return max(map(depth, tree)) + 1
2188 2188 else:
2189 2189 return 0
2190 2190
2191 2191 def funcsused(tree):
2192 2192 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2193 2193 return set()
2194 2194 else:
2195 2195 funcs = set()
2196 2196 for s in tree[1:]:
2197 2197 funcs |= funcsused(s)
2198 2198 if tree[0] == 'func':
2199 2199 funcs.add(tree[1][1])
2200 2200 return funcs
2201 2201
2202 2202 class abstractsmartset(object):
2203 2203
2204 2204 def __nonzero__(self):
2205 2205 """True if the smartset is not empty"""
2206 2206 raise NotImplementedError()
2207 2207
2208 2208 def __contains__(self, rev):
2209 2209 """provide fast membership testing"""
2210 2210 raise NotImplementedError()
2211 2211
2212 2212 def __iter__(self):
2213 2213 """iterate the set in the order it is supposed to be iterated"""
2214 2214 raise NotImplementedError()
2215 2215
2216 2216 # Attributes containing a function to perform a fast iteration in a given
2217 2217 # direction. A smartset can have none, one, or both defined.
2218 2218 #
2219 2219 # Default value is None instead of a function returning None to avoid
2220 2220 # initializing an iterator just for testing if a fast method exists.
2221 2221 fastasc = None
2222 2222 fastdesc = None
2223 2223
2224 2224 def isascending(self):
2225 2225 """True if the set will iterate in ascending order"""
2226 2226 raise NotImplementedError()
2227 2227
2228 2228 def isdescending(self):
2229 2229 """True if the set will iterate in descending order"""
2230 2230 raise NotImplementedError()
2231 2231
2232 2232 def min(self):
2233 2233 """return the minimum element in the set"""
2234 2234 if self.fastasc is not None:
2235 2235 for r in self.fastasc():
2236 2236 return r
2237 2237 raise ValueError('arg is an empty sequence')
2238 2238 return min(self)
2239 2239
2240 2240 def max(self):
2241 2241 """return the maximum element in the set"""
2242 2242 if self.fastdesc is not None:
2243 2243 for r in self.fastdesc():
2244 2244 return r
2245 2245 raise ValueError('arg is an empty sequence')
2246 2246 return max(self)
2247 2247
2248 2248 def first(self):
2249 2249 """return the first element in the set (user iteration perspective)
2250 2250
2251 2251 Return None if the set is empty"""
2252 2252 raise NotImplementedError()
2253 2253
2254 2254 def last(self):
2255 2255 """return the last element in the set (user iteration perspective)
2256 2256
2257 2257 Return None if the set is empty"""
2258 2258 raise NotImplementedError()
2259 2259
2260 2260 def __len__(self):
2261 2261 """return the length of the smartsets
2262 2262
2263 2263 This can be expensive on smartset that could be lazy otherwise."""
2264 2264 raise NotImplementedError()
2265 2265
2266 2266 def reverse(self):
2267 2267 """reverse the expected iteration order"""
2268 2268 raise NotImplementedError()
2269 2269
2270 2270 def sort(self, reverse=True):
2271 2271 """get the set to iterate in an ascending or descending order"""
2272 2272 raise NotImplementedError()
2273 2273
2274 2274 def __and__(self, other):
2275 2275 """Returns a new object with the intersection of the two collections.
2276 2276
2277 2277 This is part of the mandatory API for smartset."""
2278 2278 return self.filter(other.__contains__, cache=False)
2279 2279
2280 2280 def __add__(self, other):
2281 2281 """Returns a new object with the union of the two collections.
2282 2282
2283 2283 This is part of the mandatory API for smartset."""
2284 2284 return addset(self, other)
2285 2285
2286 2286 def __sub__(self, other):
2287 2287 """Returns a new object with the substraction of the two collections.
2288 2288
2289 2289 This is part of the mandatory API for smartset."""
2290 2290 c = other.__contains__
2291 2291 return self.filter(lambda r: not c(r), cache=False)
2292 2292
2293 2293 def filter(self, condition, cache=True):
2294 2294 """Returns this smartset filtered by condition as a new smartset.
2295 2295
2296 2296 `condition` is a callable which takes a revision number and returns a
2297 2297 boolean.
2298 2298
2299 2299 This is part of the mandatory API for smartset."""
2300 2300 # builtin cannot be cached. but do not needs to
2301 2301 if cache and util.safehasattr(condition, 'func_code'):
2302 2302 condition = util.cachefunc(condition)
2303 2303 return filteredset(self, condition)
2304 2304
2305 2305 class baseset(abstractsmartset):
2306 2306 """Basic data structure that represents a revset and contains the basic
2307 2307 operation that it should be able to perform.
2308 2308
2309 2309 Every method in this class should be implemented by any smartset class.
2310 2310 """
2311 2311 def __init__(self, data=()):
2312 2312 if not isinstance(data, list):
2313 2313 data = list(data)
2314 2314 self._list = data
2315 2315 self._ascending = None
2316 2316
2317 2317 @util.propertycache
2318 2318 def _set(self):
2319 2319 return set(self._list)
2320 2320
2321 2321 @util.propertycache
2322 2322 def _asclist(self):
2323 2323 asclist = self._list[:]
2324 2324 asclist.sort()
2325 2325 return asclist
2326 2326
2327 2327 def __iter__(self):
2328 2328 if self._ascending is None:
2329 2329 return iter(self._list)
2330 2330 elif self._ascending:
2331 2331 return iter(self._asclist)
2332 2332 else:
2333 2333 return reversed(self._asclist)
2334 2334
2335 2335 def fastasc(self):
2336 2336 return iter(self._asclist)
2337 2337
2338 2338 def fastdesc(self):
2339 2339 return reversed(self._asclist)
2340 2340
2341 2341 @util.propertycache
2342 2342 def __contains__(self):
2343 2343 return self._set.__contains__
2344 2344
2345 2345 def __nonzero__(self):
2346 2346 return bool(self._list)
2347 2347
2348 2348 def sort(self, reverse=False):
2349 2349 self._ascending = not bool(reverse)
2350 2350
2351 2351 def reverse(self):
2352 2352 if self._ascending is None:
2353 2353 self._list.reverse()
2354 2354 else:
2355 2355 self._ascending = not self._ascending
2356 2356
2357 2357 def __len__(self):
2358 2358 return len(self._list)
2359 2359
2360 2360 def isascending(self):
2361 2361 """Returns True if the collection is ascending order, False if not.
2362 2362
2363 2363 This is part of the mandatory API for smartset."""
2364 2364 if len(self) <= 1:
2365 2365 return True
2366 2366 return self._ascending is not None and self._ascending
2367 2367
2368 2368 def isdescending(self):
2369 2369 """Returns True if the collection is descending order, False if not.
2370 2370
2371 2371 This is part of the mandatory API for smartset."""
2372 2372 if len(self) <= 1:
2373 2373 return True
2374 2374 return self._ascending is not None and not self._ascending
2375 2375
2376 2376 def first(self):
2377 2377 if self:
2378 2378 if self._ascending is None:
2379 2379 return self._list[0]
2380 2380 elif self._ascending:
2381 2381 return self._asclist[0]
2382 2382 else:
2383 2383 return self._asclist[-1]
2384 2384 return None
2385 2385
2386 2386 def last(self):
2387 2387 if self:
2388 2388 if self._ascending is None:
2389 2389 return self._list[-1]
2390 2390 elif self._ascending:
2391 2391 return self._asclist[-1]
2392 2392 else:
2393 2393 return self._asclist[0]
2394 2394 return None
2395 2395
2396 2396 class filteredset(abstractsmartset):
2397 2397 """Duck type for baseset class which iterates lazily over the revisions in
2398 2398 the subset and contains a function which tests for membership in the
2399 2399 revset
2400 2400 """
2401 2401 def __init__(self, subset, condition=lambda x: True):
2402 2402 """
2403 2403 condition: a function that decide whether a revision in the subset
2404 2404 belongs to the revset or not.
2405 2405 """
2406 2406 self._subset = subset
2407 2407 self._condition = condition
2408 2408 self._cache = {}
2409 2409
2410 2410 def __contains__(self, x):
2411 2411 c = self._cache
2412 2412 if x not in c:
2413 2413 v = c[x] = x in self._subset and self._condition(x)
2414 2414 return v
2415 2415 return c[x]
2416 2416
2417 2417 def __iter__(self):
2418 2418 return self._iterfilter(self._subset)
2419 2419
2420 2420 def _iterfilter(self, it):
2421 2421 cond = self._condition
2422 2422 for x in it:
2423 2423 if cond(x):
2424 2424 yield x
2425 2425
2426 2426 @property
2427 2427 def fastasc(self):
2428 2428 it = self._subset.fastasc
2429 2429 if it is None:
2430 2430 return None
2431 2431 return lambda: self._iterfilter(it())
2432 2432
2433 2433 @property
2434 2434 def fastdesc(self):
2435 2435 it = self._subset.fastdesc
2436 2436 if it is None:
2437 2437 return None
2438 2438 return lambda: self._iterfilter(it())
2439 2439
2440 2440 def __nonzero__(self):
2441 2441 for r in self:
2442 2442 return True
2443 2443 return False
2444 2444
2445 2445 def __len__(self):
2446 2446 # Basic implementation to be changed in future patches.
2447 2447 l = baseset([r for r in self])
2448 2448 return len(l)
2449 2449
2450 2450 def sort(self, reverse=False):
2451 2451 self._subset.sort(reverse=reverse)
2452 2452
2453 2453 def reverse(self):
2454 2454 self._subset.reverse()
2455 2455
2456 2456 def isascending(self):
2457 2457 return self._subset.isascending()
2458 2458
2459 2459 def isdescending(self):
2460 2460 return self._subset.isdescending()
2461 2461
2462 2462 def first(self):
2463 2463 for x in self:
2464 2464 return x
2465 2465 return None
2466 2466
2467 2467 def last(self):
2468 2468 it = None
2469 2469 if self._subset.isascending:
2470 2470 it = self.fastdesc
2471 2471 elif self._subset.isdescending:
2472 2472 it = self.fastdesc
2473 2473 if it is None:
2474 2474 # slowly consume everything. This needs improvement
2475 2475 it = lambda: reversed(list(self))
2476 2476 for x in it():
2477 2477 return x
2478 2478 return None
2479 2479
2480 2480 class addset(abstractsmartset):
2481 2481 """Represent the addition of two sets
2482 2482
2483 2483 Wrapper structure for lazily adding two structures without losing much
2484 2484 performance on the __contains__ method
2485 2485
2486 2486 If the ascending attribute is set, that means the two structures are
2487 2487 ordered in either an ascending or descending way. Therefore, we can add
2488 2488 them maintaining the order by iterating over both at the same time
2489 2489 """
2490 2490 def __init__(self, revs1, revs2, ascending=None):
2491 2491 self._r1 = revs1
2492 2492 self._r2 = revs2
2493 2493 self._iter = None
2494 2494 self._ascending = ascending
2495 2495 self._genlist = None
2496 2496 self._asclist = None
2497 2497
2498 2498 def __len__(self):
2499 2499 return len(self._list)
2500 2500
2501 2501 def __nonzero__(self):
2502 2502 return bool(self._r1) or bool(self._r2)
2503 2503
2504 2504 @util.propertycache
2505 2505 def _list(self):
2506 2506 if not self._genlist:
2507 2507 self._genlist = baseset(self._iterator())
2508 2508 return self._genlist
2509 2509
2510 2510 def _iterator(self):
2511 2511 """Iterate over both collections without repeating elements
2512 2512
2513 2513 If the ascending attribute is not set, iterate over the first one and
2514 2514 then over the second one checking for membership on the first one so we
2515 2515 dont yield any duplicates.
2516 2516
2517 2517 If the ascending attribute is set, iterate over both collections at the
2518 2518 same time, yielding only one value at a time in the given order.
2519 2519 """
2520 2520 if self._ascending is None:
2521 2521 def gen():
2522 2522 for r in self._r1:
2523 2523 yield r
2524 2524 inr1 = self._r1.__contains__
2525 2525 for r in self._r2:
2526 2526 if not inr1(r):
2527 2527 yield r
2528 2528 gen = gen()
2529 2529 else:
2530 2530 iter1 = iter(self._r1)
2531 2531 iter2 = iter(self._r2)
2532 2532 gen = self._iterordered(self._ascending, iter1, iter2)
2533 2533 return gen
2534 2534
2535 2535 def __iter__(self):
2536 2536 if self._ascending is None:
2537 2537 if self._genlist:
2538 2538 return iter(self._genlist)
2539 2539 return iter(self._iterator())
2540 2540 self._trysetasclist()
2541 2541 if self._ascending:
2542 2542 it = self.fastasc
2543 2543 else:
2544 2544 it = self.fastdesc
2545 2545 if it is None:
2546 2546 # consume the gen and try again
2547 2547 self._list
2548 2548 return iter(self)
2549 2549 return it()
2550 2550
2551 2551 def _trysetasclist(self):
2552 2552 """populate the _asclist attribute if possible and necessary"""
2553 2553 if self._genlist is not None and self._asclist is None:
2554 2554 self._asclist = sorted(self._genlist)
2555 2555
2556 2556 @property
2557 2557 def fastasc(self):
2558 2558 self._trysetasclist()
2559 2559 if self._asclist is not None:
2560 2560 return self._asclist.__iter__
2561 2561 iter1 = self._r1.fastasc
2562 2562 iter2 = self._r2.fastasc
2563 2563 if None in (iter1, iter2):
2564 2564 return None
2565 2565 return lambda: self._iterordered(True, iter1(), iter2())
2566 2566
2567 2567 @property
2568 2568 def fastdesc(self):
2569 2569 self._trysetasclist()
2570 2570 if self._asclist is not None:
2571 2571 return self._asclist.__reversed__
2572 2572 iter1 = self._r1.fastdesc
2573 2573 iter2 = self._r2.fastdesc
2574 2574 if None in (iter1, iter2):
2575 2575 return None
2576 2576 return lambda: self._iterordered(False, iter1(), iter2())
2577 2577
2578 2578 def _iterordered(self, ascending, iter1, iter2):
2579 2579 """produce an ordered iteration from two iterators with the same order
2580 2580
2581 2581 The ascending is used to indicated the iteration direction.
2582 2582 """
2583 2583 choice = max
2584 2584 if ascending:
2585 2585 choice = min
2586 2586
2587 2587 val1 = None
2588 2588 val2 = None
2589 2589
2590 2590 choice = max
2591 2591 if ascending:
2592 2592 choice = min
2593 2593 try:
2594 2594 # Consume both iterators in an ordered way until one is
2595 2595 # empty
2596 2596 while True:
2597 2597 if val1 is None:
2598 2598 val1 = iter1.next()
2599 2599 if val2 is None:
2600 2600 val2 = iter2.next()
2601 2601 next = choice(val1, val2)
2602 2602 yield next
2603 2603 if val1 == next:
2604 2604 val1 = None
2605 2605 if val2 == next:
2606 2606 val2 = None
2607 2607 except StopIteration:
2608 2608 # Flush any remaining values and consume the other one
2609 2609 it = iter2
2610 2610 if val1 is not None:
2611 2611 yield val1
2612 2612 it = iter1
2613 2613 elif val2 is not None:
2614 2614 # might have been equality and both are empty
2615 2615 yield val2
2616 2616 for val in it:
2617 2617 yield val
2618 2618
2619 2619 def __contains__(self, x):
2620 2620 return x in self._r1 or x in self._r2
2621 2621
2622 2622 def sort(self, reverse=False):
2623 2623 """Sort the added set
2624 2624
2625 2625 For this we use the cached list with all the generated values and if we
2626 2626 know they are ascending or descending we can sort them in a smart way.
2627 2627 """
2628 2628 self._ascending = not reverse
2629 2629
2630 2630 def isascending(self):
2631 2631 return self._ascending is not None and self._ascending
2632 2632
2633 2633 def isdescending(self):
2634 2634 return self._ascending is not None and not self._ascending
2635 2635
2636 2636 def reverse(self):
2637 2637 if self._ascending is None:
2638 2638 self._list.reverse()
2639 2639 else:
2640 2640 self._ascending = not self._ascending
2641 2641
2642 2642 def first(self):
2643 2643 for x in self:
2644 2644 return x
2645 2645 return None
2646 2646
2647 2647 def last(self):
2648 2648 self.reverse()
2649 2649 val = self.first()
2650 2650 self.reverse()
2651 2651 return val
2652 2652
2653 2653 class generatorset(abstractsmartset):
2654 2654 """Wrap a generator for lazy iteration
2655 2655
2656 2656 Wrapper structure for generators that provides lazy membership and can
2657 2657 be iterated more than once.
2658 2658 When asked for membership it generates values until either it finds the
2659 2659 requested one or has gone through all the elements in the generator
2660 2660 """
2661 2661 def __init__(self, gen, iterasc=None):
2662 2662 """
2663 2663 gen: a generator producing the values for the generatorset.
2664 2664 """
2665 2665 self._gen = gen
2666 2666 self._asclist = None
2667 2667 self._cache = {}
2668 2668 self._genlist = []
2669 2669 self._finished = False
2670 2670 self._ascending = True
2671 2671 if iterasc is not None:
2672 2672 if iterasc:
2673 2673 self.fastasc = self._iterator
2674 2674 self.__contains__ = self._asccontains
2675 2675 else:
2676 2676 self.fastdesc = self._iterator
2677 2677 self.__contains__ = self._desccontains
2678 2678
2679 2679 def __nonzero__(self):
2680 2680 for r in self:
2681 2681 return True
2682 2682 return False
2683 2683
2684 2684 def __contains__(self, x):
2685 2685 if x in self._cache:
2686 2686 return self._cache[x]
2687 2687
2688 2688 # Use new values only, as existing values would be cached.
2689 2689 for l in self._consumegen():
2690 2690 if l == x:
2691 2691 return True
2692 2692
2693 2693 self._cache[x] = False
2694 2694 return False
2695 2695
2696 2696 def _asccontains(self, x):
2697 2697 """version of contains optimised for ascending generator"""
2698 2698 if x in self._cache:
2699 2699 return self._cache[x]
2700 2700
2701 2701 # Use new values only, as existing values would be cached.
2702 2702 for l in self._consumegen():
2703 2703 if l == x:
2704 2704 return True
2705 2705 if l > x:
2706 2706 break
2707 2707
2708 2708 self._cache[x] = False
2709 2709 return False
2710 2710
2711 2711 def _desccontains(self, x):
2712 2712 """version of contains optimised for descending generator"""
2713 2713 if x in self._cache:
2714 2714 return self._cache[x]
2715 2715
2716 2716 # Use new values only, as existing values would be cached.
2717 2717 for l in self._consumegen():
2718 2718 if l == x:
2719 2719 return True
2720 2720 if l < x:
2721 2721 break
2722 2722
2723 2723 self._cache[x] = False
2724 2724 return False
2725 2725
2726 2726 def __iter__(self):
2727 2727 if self._ascending:
2728 2728 it = self.fastasc
2729 2729 else:
2730 2730 it = self.fastdesc
2731 2731 if it is not None:
2732 2732 return it()
2733 2733 # we need to consume the iterator
2734 2734 for x in self._consumegen():
2735 2735 pass
2736 2736 # recall the same code
2737 2737 return iter(self)
2738 2738
2739 2739 def _iterator(self):
2740 2740 if self._finished:
2741 2741 return iter(self._genlist)
2742 2742
2743 2743 # We have to use this complex iteration strategy to allow multiple
2744 2744 # iterations at the same time. We need to be able to catch revision
2745 2745 # removed from _consumegen and added to genlist in another instance.
2746 2746 #
2747 2747 # Getting rid of it would provide an about 15% speed up on this
2748 2748 # iteration.
2749 2749 genlist = self._genlist
2750 2750 nextrev = self._consumegen().next
2751 2751 _len = len # cache global lookup
2752 2752 def gen():
2753 2753 i = 0
2754 2754 while True:
2755 2755 if i < _len(genlist):
2756 2756 yield genlist[i]
2757 2757 else:
2758 2758 yield nextrev()
2759 2759 i += 1
2760 2760 return gen()
2761 2761
2762 2762 def _consumegen(self):
2763 2763 cache = self._cache
2764 2764 genlist = self._genlist.append
2765 2765 for item in self._gen:
2766 2766 cache[item] = True
2767 2767 genlist(item)
2768 2768 yield item
2769 2769 if not self._finished:
2770 2770 self._finished = True
2771 2771 asc = self._genlist[:]
2772 2772 asc.sort()
2773 2773 self._asclist = asc
2774 2774 self.fastasc = asc.__iter__
2775 2775 self.fastdesc = asc.__reversed__
2776 2776
2777 2777 def __len__(self):
2778 2778 for x in self._consumegen():
2779 2779 pass
2780 2780 return len(self._genlist)
2781 2781
2782 2782 def sort(self, reverse=False):
2783 2783 self._ascending = not reverse
2784 2784
2785 2785 def reverse(self):
2786 2786 self._ascending = not self._ascending
2787 2787
2788 2788 def isascending(self):
2789 2789 return self._ascending
2790 2790
2791 2791 def isdescending(self):
2792 2792 return not self._ascending
2793 2793
2794 2794 def first(self):
2795 2795 if self._ascending:
2796 2796 it = self.fastasc
2797 2797 else:
2798 2798 it = self.fastdesc
2799 2799 if it is None:
2800 2800 # we need to consume all and try again
2801 2801 for x in self._consumegen():
2802 2802 pass
2803 2803 return self.first()
2804 2804 if self:
2805 2805 return it().next()
2806 2806 return None
2807 2807
2808 2808 def last(self):
2809 2809 if self._ascending:
2810 2810 it = self.fastdesc
2811 2811 else:
2812 2812 it = self.fastasc
2813 2813 if it is None:
2814 2814 # we need to consume all and try again
2815 2815 for x in self._consumegen():
2816 2816 pass
2817 2817 return self.first()
2818 2818 if self:
2819 2819 return it().next()
2820 2820 return None
2821 2821
2822 2822 def spanset(repo, start=None, end=None):
2823 2823 """factory function to dispatch between fullreposet and actual spanset
2824 2824
2825 2825 Feel free to update all spanset call sites and kill this function at some
2826 2826 point.
2827 2827 """
2828 2828 if start is None and end is None:
2829 2829 return fullreposet(repo)
2830 2830 return _spanset(repo, start, end)
2831 2831
2832 2832
2833 2833 class _spanset(abstractsmartset):
2834 2834 """Duck type for baseset class which represents a range of revisions and
2835 2835 can work lazily and without having all the range in memory
2836 2836
2837 2837 Note that spanset(x, y) behave almost like xrange(x, y) except for two
2838 2838 notable points:
2839 2839 - when x < y it will be automatically descending,
2840 2840 - revision filtered with this repoview will be skipped.
2841 2841
2842 2842 """
2843 2843 def __init__(self, repo, start=0, end=None):
2844 2844 """
2845 2845 start: first revision included the set
2846 2846 (default to 0)
2847 2847 end: first revision excluded (last+1)
2848 2848 (default to len(repo)
2849 2849
2850 2850 Spanset will be descending if `end` < `start`.
2851 2851 """
2852 2852 if end is None:
2853 2853 end = len(repo)
2854 2854 self._ascending = start <= end
2855 2855 if not self._ascending:
2856 2856 start, end = end + 1, start +1
2857 2857 self._start = start
2858 2858 self._end = end
2859 2859 self._hiddenrevs = repo.changelog.filteredrevs
2860 2860
2861 2861 def sort(self, reverse=False):
2862 2862 self._ascending = not reverse
2863 2863
2864 2864 def reverse(self):
2865 2865 self._ascending = not self._ascending
2866 2866
2867 2867 def _iterfilter(self, iterrange):
2868 2868 s = self._hiddenrevs
2869 2869 for r in iterrange:
2870 2870 if r not in s:
2871 2871 yield r
2872 2872
2873 2873 def __iter__(self):
2874 2874 if self._ascending:
2875 2875 return self.fastasc()
2876 2876 else:
2877 2877 return self.fastdesc()
2878 2878
2879 2879 def fastasc(self):
2880 2880 iterrange = xrange(self._start, self._end)
2881 2881 if self._hiddenrevs:
2882 2882 return self._iterfilter(iterrange)
2883 2883 return iter(iterrange)
2884 2884
2885 2885 def fastdesc(self):
2886 2886 iterrange = xrange(self._end - 1, self._start - 1, -1)
2887 2887 if self._hiddenrevs:
2888 2888 return self._iterfilter(iterrange)
2889 2889 return iter(iterrange)
2890 2890
2891 2891 def __contains__(self, rev):
2892 2892 hidden = self._hiddenrevs
2893 2893 return ((self._start <= rev < self._end)
2894 2894 and not (hidden and rev in hidden))
2895 2895
2896 2896 def __nonzero__(self):
2897 2897 for r in self:
2898 2898 return True
2899 2899 return False
2900 2900
2901 2901 def __len__(self):
2902 2902 if not self._hiddenrevs:
2903 2903 return abs(self._end - self._start)
2904 2904 else:
2905 2905 count = 0
2906 2906 start = self._start
2907 2907 end = self._end
2908 2908 for rev in self._hiddenrevs:
2909 2909 if (end < rev <= start) or (start <= rev < end):
2910 2910 count += 1
2911 2911 return abs(self._end - self._start) - count
2912 2912
2913 2913 def isascending(self):
2914 2914 return self._start <= self._end
2915 2915
2916 2916 def isdescending(self):
2917 2917 return self._start >= self._end
2918 2918
2919 2919 def first(self):
2920 2920 if self._ascending:
2921 2921 it = self.fastasc
2922 2922 else:
2923 2923 it = self.fastdesc
2924 2924 for x in it():
2925 2925 return x
2926 2926 return None
2927 2927
2928 2928 def last(self):
2929 2929 if self._ascending:
2930 2930 it = self.fastdesc
2931 2931 else:
2932 2932 it = self.fastasc
2933 2933 for x in it():
2934 2934 return x
2935 2935 return None
2936 2936
2937 2937 class fullreposet(_spanset):
2938 2938 """a set containing all revisions in the repo
2939 2939
2940 2940 This class exists to host special optimization.
2941 2941 """
2942 2942
2943 2943 def __init__(self, repo):
2944 2944 super(fullreposet, self).__init__(repo)
2945 2945
2946 2946 def __and__(self, other):
2947 2947 """As self contains the whole repo, all of the other set should also be
2948 2948 in self. Therefore `self & other = other`.
2949 2949
2950 2950 This boldly assumes the other contains valid revs only.
2951 2951 """
2952 2952 # other not a smartset, make is so
2953 2953 if not util.safehasattr(other, 'isascending'):
2954 2954 # filter out hidden revision
2955 2955 # (this boldly assumes all smartset are pure)
2956 2956 #
2957 2957 # `other` was used with "&", let's assume this is a set like
2958 2958 # object.
2959 2959 other = baseset(other - self._hiddenrevs)
2960 2960
2961 2961 if self.isascending():
2962 2962 other.sort()
2963 2963 else:
2964 2964 other.sort(reverse)
2965 2965 return other
2966 2966
2967 2967 # tell hggettext to extract docstrings from these functions:
2968 2968 i18nfunctions = symbols.values()
General Comments 0
You need to be logged in to leave comments. Login now