##// END OF EJS Templates
revset: cosmetic changes in spanset range comparison...
Pierre-Yves David -
r21284:3e53a643 default
parent child Browse files
Show More
@@ -1,2860 +1,2860 b''
1 1 # revset.py - revision set queries for mercurial
2 2 #
3 3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 import re
9 9 import parser, util, error, discovery, hbisect, phases
10 10 import node
11 11 import heapq
12 12 import match as matchmod
13 13 import ancestor as ancestormod
14 14 from i18n import _
15 15 import encoding
16 16 import obsolete as obsmod
17 17 import pathutil
18 18 import repoview
19 19
20 20 def _revancestors(repo, revs, followfirst):
21 21 """Like revlog.ancestors(), but supports followfirst."""
22 22 cut = followfirst and 1 or None
23 23 cl = repo.changelog
24 24
25 25 def iterate():
26 26 revqueue, revsnode = None, None
27 27 h = []
28 28
29 29 revs.descending()
30 30 revqueue = util.deque(revs)
31 31 if revqueue:
32 32 revsnode = revqueue.popleft()
33 33 heapq.heappush(h, -revsnode)
34 34
35 35 seen = set([node.nullrev])
36 36 while h:
37 37 current = -heapq.heappop(h)
38 38 if current not in seen:
39 39 if revsnode and current == revsnode:
40 40 if revqueue:
41 41 revsnode = revqueue.popleft()
42 42 heapq.heappush(h, -revsnode)
43 43 seen.add(current)
44 44 yield current
45 45 for parent in cl.parentrevs(current)[:cut]:
46 46 if parent != node.nullrev:
47 47 heapq.heappush(h, -parent)
48 48
49 49 return _descgeneratorset(iterate())
50 50
51 51 def _revdescendants(repo, revs, followfirst):
52 52 """Like revlog.descendants() but supports followfirst."""
53 53 cut = followfirst and 1 or None
54 54
55 55 def iterate():
56 56 cl = repo.changelog
57 57 first = min(revs)
58 58 nullrev = node.nullrev
59 59 if first == nullrev:
60 60 # Are there nodes with a null first parent and a non-null
61 61 # second one? Maybe. Do we care? Probably not.
62 62 for i in cl:
63 63 yield i
64 64 else:
65 65 seen = set(revs)
66 66 for i in cl.revs(first + 1):
67 67 for x in cl.parentrevs(i)[:cut]:
68 68 if x != nullrev and x in seen:
69 69 seen.add(i)
70 70 yield i
71 71 break
72 72
73 73 return _ascgeneratorset(iterate())
74 74
75 75 def _revsbetween(repo, roots, heads):
76 76 """Return all paths between roots and heads, inclusive of both endpoint
77 77 sets."""
78 78 if not roots:
79 79 return baseset([])
80 80 parentrevs = repo.changelog.parentrevs
81 81 visit = baseset(heads)
82 82 reachable = set()
83 83 seen = {}
84 84 minroot = min(roots)
85 85 roots = set(roots)
86 86 # open-code the post-order traversal due to the tiny size of
87 87 # sys.getrecursionlimit()
88 88 while visit:
89 89 rev = visit.pop()
90 90 if rev in roots:
91 91 reachable.add(rev)
92 92 parents = parentrevs(rev)
93 93 seen[rev] = parents
94 94 for parent in parents:
95 95 if parent >= minroot and parent not in seen:
96 96 visit.append(parent)
97 97 if not reachable:
98 98 return baseset([])
99 99 for rev in sorted(seen):
100 100 for parent in seen[rev]:
101 101 if parent in reachable:
102 102 reachable.add(rev)
103 103 return baseset(sorted(reachable))
104 104
105 105 elements = {
106 106 "(": (20, ("group", 1, ")"), ("func", 1, ")")),
107 107 "~": (18, None, ("ancestor", 18)),
108 108 "^": (18, None, ("parent", 18), ("parentpost", 18)),
109 109 "-": (5, ("negate", 19), ("minus", 5)),
110 110 "::": (17, ("dagrangepre", 17), ("dagrange", 17),
111 111 ("dagrangepost", 17)),
112 112 "..": (17, ("dagrangepre", 17), ("dagrange", 17),
113 113 ("dagrangepost", 17)),
114 114 ":": (15, ("rangepre", 15), ("range", 15), ("rangepost", 15)),
115 115 "not": (10, ("not", 10)),
116 116 "!": (10, ("not", 10)),
117 117 "and": (5, None, ("and", 5)),
118 118 "&": (5, None, ("and", 5)),
119 119 "or": (4, None, ("or", 4)),
120 120 "|": (4, None, ("or", 4)),
121 121 "+": (4, None, ("or", 4)),
122 122 ",": (2, None, ("list", 2)),
123 123 ")": (0, None, None),
124 124 "symbol": (0, ("symbol",), None),
125 125 "string": (0, ("string",), None),
126 126 "end": (0, None, None),
127 127 }
128 128
129 129 keywords = set(['and', 'or', 'not'])
130 130
131 131 def tokenize(program, lookup=None):
132 132 '''
133 133 Parse a revset statement into a stream of tokens
134 134
135 135 Check that @ is a valid unquoted token character (issue3686):
136 136 >>> list(tokenize("@::"))
137 137 [('symbol', '@', 0), ('::', None, 1), ('end', None, 3)]
138 138
139 139 '''
140 140
141 141 pos, l = 0, len(program)
142 142 while pos < l:
143 143 c = program[pos]
144 144 if c.isspace(): # skip inter-token whitespace
145 145 pass
146 146 elif c == ':' and program[pos:pos + 2] == '::': # look ahead carefully
147 147 yield ('::', None, pos)
148 148 pos += 1 # skip ahead
149 149 elif c == '.' and program[pos:pos + 2] == '..': # look ahead carefully
150 150 yield ('..', None, pos)
151 151 pos += 1 # skip ahead
152 152 elif c in "():,-|&+!~^": # handle simple operators
153 153 yield (c, None, pos)
154 154 elif (c in '"\'' or c == 'r' and
155 155 program[pos:pos + 2] in ("r'", 'r"')): # handle quoted strings
156 156 if c == 'r':
157 157 pos += 1
158 158 c = program[pos]
159 159 decode = lambda x: x
160 160 else:
161 161 decode = lambda x: x.decode('string-escape')
162 162 pos += 1
163 163 s = pos
164 164 while pos < l: # find closing quote
165 165 d = program[pos]
166 166 if d == '\\': # skip over escaped characters
167 167 pos += 2
168 168 continue
169 169 if d == c:
170 170 yield ('string', decode(program[s:pos]), s)
171 171 break
172 172 pos += 1
173 173 else:
174 174 raise error.ParseError(_("unterminated string"), s)
175 175 # gather up a symbol/keyword
176 176 elif c.isalnum() or c in '._@' or ord(c) > 127:
177 177 s = pos
178 178 pos += 1
179 179 while pos < l: # find end of symbol
180 180 d = program[pos]
181 181 if not (d.isalnum() or d in "-._/@" or ord(d) > 127):
182 182 break
183 183 if d == '.' and program[pos - 1] == '.': # special case for ..
184 184 pos -= 1
185 185 break
186 186 pos += 1
187 187 sym = program[s:pos]
188 188 if sym in keywords: # operator keywords
189 189 yield (sym, None, s)
190 190 elif '-' in sym:
191 191 # some jerk gave us foo-bar-baz, try to check if it's a symbol
192 192 if lookup and lookup(sym):
193 193 # looks like a real symbol
194 194 yield ('symbol', sym, s)
195 195 else:
196 196 # looks like an expression
197 197 parts = sym.split('-')
198 198 for p in parts[:-1]:
199 199 if p: # possible consecutive -
200 200 yield ('symbol', p, s)
201 201 s += len(p)
202 202 yield ('-', None, pos)
203 203 s += 1
204 204 if parts[-1]: # possible trailing -
205 205 yield ('symbol', parts[-1], s)
206 206 else:
207 207 yield ('symbol', sym, s)
208 208 pos -= 1
209 209 else:
210 210 raise error.ParseError(_("syntax error"), pos)
211 211 pos += 1
212 212 yield ('end', None, pos)
213 213
214 214 # helpers
215 215
216 216 def getstring(x, err):
217 217 if x and (x[0] == 'string' or x[0] == 'symbol'):
218 218 return x[1]
219 219 raise error.ParseError(err)
220 220
221 221 def getlist(x):
222 222 if not x:
223 223 return []
224 224 if x[0] == 'list':
225 225 return getlist(x[1]) + [x[2]]
226 226 return [x]
227 227
228 228 def getargs(x, min, max, err):
229 229 l = getlist(x)
230 230 if len(l) < min or (max >= 0 and len(l) > max):
231 231 raise error.ParseError(err)
232 232 return l
233 233
234 234 def getset(repo, subset, x):
235 235 if not x:
236 236 raise error.ParseError(_("missing argument"))
237 237 s = methods[x[0]](repo, subset, *x[1:])
238 238 if util.safehasattr(s, 'set'):
239 239 return s
240 240 return baseset(s)
241 241
242 242 def _getrevsource(repo, r):
243 243 extra = repo[r].extra()
244 244 for label in ('source', 'transplant_source', 'rebase_source'):
245 245 if label in extra:
246 246 try:
247 247 return repo[extra[label]].rev()
248 248 except error.RepoLookupError:
249 249 pass
250 250 return None
251 251
252 252 # operator methods
253 253
254 254 def stringset(repo, subset, x):
255 255 x = repo[x].rev()
256 256 if x == -1 and len(subset) == len(repo):
257 257 return baseset([-1])
258 258 if len(subset) == len(repo) or x in subset:
259 259 return baseset([x])
260 260 return baseset([])
261 261
262 262 def symbolset(repo, subset, x):
263 263 if x in symbols:
264 264 raise error.ParseError(_("can't use %s here") % x)
265 265 return stringset(repo, subset, x)
266 266
267 267 def rangeset(repo, subset, x, y):
268 268 cl = baseset(repo.changelog)
269 269 m = getset(repo, cl, x)
270 270 n = getset(repo, cl, y)
271 271
272 272 if not m or not n:
273 273 return baseset([])
274 274 m, n = m[0], n[-1]
275 275
276 276 if m < n:
277 277 r = spanset(repo, m, n + 1)
278 278 else:
279 279 r = spanset(repo, m, n - 1)
280 280 return r & subset
281 281
282 282 def dagrange(repo, subset, x, y):
283 283 r = spanset(repo)
284 284 xs = _revsbetween(repo, getset(repo, r, x), getset(repo, r, y))
285 285 s = subset.set()
286 286 return xs.filter(s.__contains__)
287 287
288 288 def andset(repo, subset, x, y):
289 289 return getset(repo, getset(repo, subset, x), y)
290 290
291 291 def orset(repo, subset, x, y):
292 292 xl = getset(repo, subset, x)
293 293 yl = getset(repo, subset - xl, y)
294 294 return xl + yl
295 295
296 296 def notset(repo, subset, x):
297 297 return subset - getset(repo, subset, x)
298 298
299 299 def listset(repo, subset, a, b):
300 300 raise error.ParseError(_("can't use a list in this context"))
301 301
302 302 def func(repo, subset, a, b):
303 303 if a[0] == 'symbol' and a[1] in symbols:
304 304 return symbols[a[1]](repo, subset, b)
305 305 raise error.ParseError(_("not a function: %s") % a[1])
306 306
307 307 # functions
308 308
309 309 def adds(repo, subset, x):
310 310 """``adds(pattern)``
311 311 Changesets that add a file matching pattern.
312 312
313 313 The pattern without explicit kind like ``glob:`` is expected to be
314 314 relative to the current directory and match against a file or a
315 315 directory.
316 316 """
317 317 # i18n: "adds" is a keyword
318 318 pat = getstring(x, _("adds requires a pattern"))
319 319 return checkstatus(repo, subset, pat, 1)
320 320
321 321 def ancestor(repo, subset, x):
322 322 """``ancestor(*changeset)``
323 323 A greatest common ancestor of the changesets.
324 324
325 325 Accepts 0 or more changesets.
326 326 Will return empty list when passed no args.
327 327 Greatest common ancestor of a single changeset is that changeset.
328 328 """
329 329 # i18n: "ancestor" is a keyword
330 330 l = getlist(x)
331 331 rl = spanset(repo)
332 332 anc = None
333 333
334 334 # (getset(repo, rl, i) for i in l) generates a list of lists
335 335 for revs in (getset(repo, rl, i) for i in l):
336 336 for r in revs:
337 337 if anc is None:
338 338 anc = repo[r]
339 339 else:
340 340 anc = anc.ancestor(repo[r])
341 341
342 342 if anc is not None and anc.rev() in subset:
343 343 return baseset([anc.rev()])
344 344 return baseset([])
345 345
346 346 def _ancestors(repo, subset, x, followfirst=False):
347 347 args = getset(repo, spanset(repo), x)
348 348 if not args:
349 349 return baseset([])
350 350 s = _revancestors(repo, args, followfirst)
351 351 return subset.filter(s.__contains__)
352 352
353 353 def ancestors(repo, subset, x):
354 354 """``ancestors(set)``
355 355 Changesets that are ancestors of a changeset in set.
356 356 """
357 357 return _ancestors(repo, subset, x)
358 358
359 359 def _firstancestors(repo, subset, x):
360 360 # ``_firstancestors(set)``
361 361 # Like ``ancestors(set)`` but follows only the first parents.
362 362 return _ancestors(repo, subset, x, followfirst=True)
363 363
364 364 def ancestorspec(repo, subset, x, n):
365 365 """``set~n``
366 366 Changesets that are the Nth ancestor (first parents only) of a changeset
367 367 in set.
368 368 """
369 369 try:
370 370 n = int(n[1])
371 371 except (TypeError, ValueError):
372 372 raise error.ParseError(_("~ expects a number"))
373 373 ps = set()
374 374 cl = repo.changelog
375 375 for r in getset(repo, baseset(cl), x):
376 376 for i in range(n):
377 377 r = cl.parentrevs(r)[0]
378 378 ps.add(r)
379 379 return subset.filter(ps.__contains__)
380 380
381 381 def author(repo, subset, x):
382 382 """``author(string)``
383 383 Alias for ``user(string)``.
384 384 """
385 385 # i18n: "author" is a keyword
386 386 n = encoding.lower(getstring(x, _("author requires a string")))
387 387 kind, pattern, matcher = _substringmatcher(n)
388 388 return subset.filter(lambda x: matcher(encoding.lower(repo[x].user())))
389 389
390 390 def only(repo, subset, x):
391 391 """``only(set, [set])``
392 392 Changesets that are ancestors of the first set that are not ancestors
393 393 of any other head in the repo. If a second set is specified, the result
394 394 is ancestors of the first set that are not ancestors of the second set
395 395 (i.e. ::<set1> - ::<set2>).
396 396 """
397 397 cl = repo.changelog
398 398 # i18n: "only" is a keyword
399 399 args = getargs(x, 1, 2, _('only takes one or two arguments'))
400 400 include = getset(repo, spanset(repo), args[0]).set()
401 401 if len(args) == 1:
402 402 descendants = set(_revdescendants(repo, include, False))
403 403 exclude = [rev for rev in cl.headrevs()
404 404 if not rev in descendants and not rev in include]
405 405 else:
406 406 exclude = getset(repo, spanset(repo), args[1])
407 407
408 408 results = set(ancestormod.missingancestors(include, exclude, cl.parentrevs))
409 409 return lazyset(subset, results.__contains__)
410 410
411 411 def bisect(repo, subset, x):
412 412 """``bisect(string)``
413 413 Changesets marked in the specified bisect status:
414 414
415 415 - ``good``, ``bad``, ``skip``: csets explicitly marked as good/bad/skip
416 416 - ``goods``, ``bads`` : csets topologically good/bad
417 417 - ``range`` : csets taking part in the bisection
418 418 - ``pruned`` : csets that are goods, bads or skipped
419 419 - ``untested`` : csets whose fate is yet unknown
420 420 - ``ignored`` : csets ignored due to DAG topology
421 421 - ``current`` : the cset currently being bisected
422 422 """
423 423 # i18n: "bisect" is a keyword
424 424 status = getstring(x, _("bisect requires a string")).lower()
425 425 state = set(hbisect.get(repo, status))
426 426 return subset.filter(state.__contains__)
427 427
428 428 # Backward-compatibility
429 429 # - no help entry so that we do not advertise it any more
430 430 def bisected(repo, subset, x):
431 431 return bisect(repo, subset, x)
432 432
433 433 def bookmark(repo, subset, x):
434 434 """``bookmark([name])``
435 435 The named bookmark or all bookmarks.
436 436
437 437 If `name` starts with `re:`, the remainder of the name is treated as
438 438 a regular expression. To match a bookmark that actually starts with `re:`,
439 439 use the prefix `literal:`.
440 440 """
441 441 # i18n: "bookmark" is a keyword
442 442 args = getargs(x, 0, 1, _('bookmark takes one or no arguments'))
443 443 if args:
444 444 bm = getstring(args[0],
445 445 # i18n: "bookmark" is a keyword
446 446 _('the argument to bookmark must be a string'))
447 447 kind, pattern, matcher = _stringmatcher(bm)
448 448 if kind == 'literal':
449 449 bmrev = repo._bookmarks.get(bm, None)
450 450 if not bmrev:
451 451 raise util.Abort(_("bookmark '%s' does not exist") % bm)
452 452 bmrev = repo[bmrev].rev()
453 453 return subset.filter(lambda r: r == bmrev)
454 454 else:
455 455 matchrevs = set()
456 456 for name, bmrev in repo._bookmarks.iteritems():
457 457 if matcher(name):
458 458 matchrevs.add(bmrev)
459 459 if not matchrevs:
460 460 raise util.Abort(_("no bookmarks exist that match '%s'")
461 461 % pattern)
462 462 bmrevs = set()
463 463 for bmrev in matchrevs:
464 464 bmrevs.add(repo[bmrev].rev())
465 465 return subset & bmrevs
466 466
467 467 bms = set([repo[r].rev()
468 468 for r in repo._bookmarks.values()])
469 469 return subset.filter(bms.__contains__)
470 470
471 471 def branch(repo, subset, x):
472 472 """``branch(string or set)``
473 473 All changesets belonging to the given branch or the branches of the given
474 474 changesets.
475 475
476 476 If `string` starts with `re:`, the remainder of the name is treated as
477 477 a regular expression. To match a branch that actually starts with `re:`,
478 478 use the prefix `literal:`.
479 479 """
480 480 try:
481 481 b = getstring(x, '')
482 482 except error.ParseError:
483 483 # not a string, but another revspec, e.g. tip()
484 484 pass
485 485 else:
486 486 kind, pattern, matcher = _stringmatcher(b)
487 487 if kind == 'literal':
488 488 # note: falls through to the revspec case if no branch with
489 489 # this name exists
490 490 if pattern in repo.branchmap():
491 491 return subset.filter(lambda r: matcher(repo[r].branch()))
492 492 else:
493 493 return subset.filter(lambda r: matcher(repo[r].branch()))
494 494
495 495 s = getset(repo, spanset(repo), x)
496 496 b = set()
497 497 for r in s:
498 498 b.add(repo[r].branch())
499 499 s = s.set()
500 500 return subset.filter(lambda r: r in s or repo[r].branch() in b)
501 501
502 502 def bumped(repo, subset, x):
503 503 """``bumped()``
504 504 Mutable changesets marked as successors of public changesets.
505 505
506 506 Only non-public and non-obsolete changesets can be `bumped`.
507 507 """
508 508 # i18n: "bumped" is a keyword
509 509 getargs(x, 0, 0, _("bumped takes no arguments"))
510 510 bumped = obsmod.getrevs(repo, 'bumped')
511 511 return subset & bumped
512 512
513 513 def bundle(repo, subset, x):
514 514 """``bundle()``
515 515 Changesets in the bundle.
516 516
517 517 Bundle must be specified by the -R option."""
518 518
519 519 try:
520 520 bundlerevs = repo.changelog.bundlerevs
521 521 except AttributeError:
522 522 raise util.Abort(_("no bundle provided - specify with -R"))
523 523 return subset & bundlerevs
524 524
525 525 def checkstatus(repo, subset, pat, field):
526 526 hasset = matchmod.patkind(pat) == 'set'
527 527
528 528 def matches(x):
529 529 m = None
530 530 fname = None
531 531 c = repo[x]
532 532 if not m or hasset:
533 533 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
534 534 if not m.anypats() and len(m.files()) == 1:
535 535 fname = m.files()[0]
536 536 if fname is not None:
537 537 if fname not in c.files():
538 538 return False
539 539 else:
540 540 for f in c.files():
541 541 if m(f):
542 542 break
543 543 else:
544 544 return False
545 545 files = repo.status(c.p1().node(), c.node())[field]
546 546 if fname is not None:
547 547 if fname in files:
548 548 return True
549 549 else:
550 550 for f in files:
551 551 if m(f):
552 552 return True
553 553
554 554 return subset.filter(matches)
555 555
556 556 def _children(repo, narrow, parentset):
557 557 cs = set()
558 558 if not parentset:
559 559 return baseset(cs)
560 560 pr = repo.changelog.parentrevs
561 561 minrev = min(parentset)
562 562 for r in narrow:
563 563 if r <= minrev:
564 564 continue
565 565 for p in pr(r):
566 566 if p in parentset:
567 567 cs.add(r)
568 568 return baseset(cs)
569 569
570 570 def children(repo, subset, x):
571 571 """``children(set)``
572 572 Child changesets of changesets in set.
573 573 """
574 574 s = getset(repo, baseset(repo), x).set()
575 575 cs = _children(repo, subset, s)
576 576 return subset & cs
577 577
578 578 def closed(repo, subset, x):
579 579 """``closed()``
580 580 Changeset is closed.
581 581 """
582 582 # i18n: "closed" is a keyword
583 583 getargs(x, 0, 0, _("closed takes no arguments"))
584 584 return subset.filter(lambda r: repo[r].closesbranch())
585 585
586 586 def contains(repo, subset, x):
587 587 """``contains(pattern)``
588 588 The revision's manifest contains a file matching pattern (but might not
589 589 modify it). See :hg:`help patterns` for information about file patterns.
590 590
591 591 The pattern without explicit kind like ``glob:`` is expected to be
592 592 relative to the current directory and match against a file exactly
593 593 for efficiency.
594 594 """
595 595 # i18n: "contains" is a keyword
596 596 pat = getstring(x, _("contains requires a pattern"))
597 597
598 598 def matches(x):
599 599 if not matchmod.patkind(pat):
600 600 pats = pathutil.canonpath(repo.root, repo.getcwd(), pat)
601 601 if pats in repo[x]:
602 602 return True
603 603 else:
604 604 c = repo[x]
605 605 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
606 606 for f in c.manifest():
607 607 if m(f):
608 608 return True
609 609 return False
610 610
611 611 return subset.filter(matches)
612 612
613 613 def converted(repo, subset, x):
614 614 """``converted([id])``
615 615 Changesets converted from the given identifier in the old repository if
616 616 present, or all converted changesets if no identifier is specified.
617 617 """
618 618
619 619 # There is exactly no chance of resolving the revision, so do a simple
620 620 # string compare and hope for the best
621 621
622 622 rev = None
623 623 # i18n: "converted" is a keyword
624 624 l = getargs(x, 0, 1, _('converted takes one or no arguments'))
625 625 if l:
626 626 # i18n: "converted" is a keyword
627 627 rev = getstring(l[0], _('converted requires a revision'))
628 628
629 629 def _matchvalue(r):
630 630 source = repo[r].extra().get('convert_revision', None)
631 631 return source is not None and (rev is None or source.startswith(rev))
632 632
633 633 return subset.filter(lambda r: _matchvalue(r))
634 634
635 635 def date(repo, subset, x):
636 636 """``date(interval)``
637 637 Changesets within the interval, see :hg:`help dates`.
638 638 """
639 639 # i18n: "date" is a keyword
640 640 ds = getstring(x, _("date requires a string"))
641 641 dm = util.matchdate(ds)
642 642 return subset.filter(lambda x: dm(repo[x].date()[0]))
643 643
644 644 def desc(repo, subset, x):
645 645 """``desc(string)``
646 646 Search commit message for string. The match is case-insensitive.
647 647 """
648 648 # i18n: "desc" is a keyword
649 649 ds = encoding.lower(getstring(x, _("desc requires a string")))
650 650
651 651 def matches(x):
652 652 c = repo[x]
653 653 return ds in encoding.lower(c.description())
654 654
655 655 return subset.filter(matches)
656 656
657 657 def _descendants(repo, subset, x, followfirst=False):
658 658 args = getset(repo, spanset(repo), x)
659 659 if not args:
660 660 return baseset([])
661 661 s = _revdescendants(repo, args, followfirst)
662 662
663 663 # Both sets need to be ascending in order to lazily return the union
664 664 # in the correct order.
665 665 args.ascending()
666 666
667 667 subsetset = subset.set()
668 668 result = (orderedlazyset(s, subsetset.__contains__, ascending=True) +
669 669 orderedlazyset(args, subsetset.__contains__, ascending=True))
670 670
671 671 # Wrap result in a lazyset since it's an _addset, which doesn't implement
672 672 # all the necessary functions to be consumed by callers.
673 673 return orderedlazyset(result, lambda r: True, ascending=True)
674 674
675 675 def descendants(repo, subset, x):
676 676 """``descendants(set)``
677 677 Changesets which are descendants of changesets in set.
678 678 """
679 679 return _descendants(repo, subset, x)
680 680
681 681 def _firstdescendants(repo, subset, x):
682 682 # ``_firstdescendants(set)``
683 683 # Like ``descendants(set)`` but follows only the first parents.
684 684 return _descendants(repo, subset, x, followfirst=True)
685 685
686 686 def destination(repo, subset, x):
687 687 """``destination([set])``
688 688 Changesets that were created by a graft, transplant or rebase operation,
689 689 with the given revisions specified as the source. Omitting the optional set
690 690 is the same as passing all().
691 691 """
692 692 if x is not None:
693 693 args = getset(repo, spanset(repo), x).set()
694 694 else:
695 695 args = getall(repo, spanset(repo), x).set()
696 696
697 697 dests = set()
698 698
699 699 # subset contains all of the possible destinations that can be returned, so
700 700 # iterate over them and see if their source(s) were provided in the args.
701 701 # Even if the immediate src of r is not in the args, src's source (or
702 702 # further back) may be. Scanning back further than the immediate src allows
703 703 # transitive transplants and rebases to yield the same results as transitive
704 704 # grafts.
705 705 for r in subset:
706 706 src = _getrevsource(repo, r)
707 707 lineage = None
708 708
709 709 while src is not None:
710 710 if lineage is None:
711 711 lineage = list()
712 712
713 713 lineage.append(r)
714 714
715 715 # The visited lineage is a match if the current source is in the arg
716 716 # set. Since every candidate dest is visited by way of iterating
717 717 # subset, any dests further back in the lineage will be tested by a
718 718 # different iteration over subset. Likewise, if the src was already
719 719 # selected, the current lineage can be selected without going back
720 720 # further.
721 721 if src in args or src in dests:
722 722 dests.update(lineage)
723 723 break
724 724
725 725 r = src
726 726 src = _getrevsource(repo, r)
727 727
728 728 return subset.filter(dests.__contains__)
729 729
730 730 def divergent(repo, subset, x):
731 731 """``divergent()``
732 732 Final successors of changesets with an alternative set of final successors.
733 733 """
734 734 # i18n: "divergent" is a keyword
735 735 getargs(x, 0, 0, _("divergent takes no arguments"))
736 736 divergent = obsmod.getrevs(repo, 'divergent')
737 737 return subset.filter(divergent.__contains__)
738 738
739 739 def draft(repo, subset, x):
740 740 """``draft()``
741 741 Changeset in draft phase."""
742 742 # i18n: "draft" is a keyword
743 743 getargs(x, 0, 0, _("draft takes no arguments"))
744 744 pc = repo._phasecache
745 745 return subset.filter(lambda r: pc.phase(repo, r) == phases.draft)
746 746
747 747 def extinct(repo, subset, x):
748 748 """``extinct()``
749 749 Obsolete changesets with obsolete descendants only.
750 750 """
751 751 # i18n: "extinct" is a keyword
752 752 getargs(x, 0, 0, _("extinct takes no arguments"))
753 753 extincts = obsmod.getrevs(repo, 'extinct')
754 754 return subset & extincts
755 755
756 756 def extra(repo, subset, x):
757 757 """``extra(label, [value])``
758 758 Changesets with the given label in the extra metadata, with the given
759 759 optional value.
760 760
761 761 If `value` starts with `re:`, the remainder of the value is treated as
762 762 a regular expression. To match a value that actually starts with `re:`,
763 763 use the prefix `literal:`.
764 764 """
765 765
766 766 # i18n: "extra" is a keyword
767 767 l = getargs(x, 1, 2, _('extra takes at least 1 and at most 2 arguments'))
768 768 # i18n: "extra" is a keyword
769 769 label = getstring(l[0], _('first argument to extra must be a string'))
770 770 value = None
771 771
772 772 if len(l) > 1:
773 773 # i18n: "extra" is a keyword
774 774 value = getstring(l[1], _('second argument to extra must be a string'))
775 775 kind, value, matcher = _stringmatcher(value)
776 776
777 777 def _matchvalue(r):
778 778 extra = repo[r].extra()
779 779 return label in extra and (value is None or matcher(extra[label]))
780 780
781 781 return subset.filter(lambda r: _matchvalue(r))
782 782
783 783 def filelog(repo, subset, x):
784 784 """``filelog(pattern)``
785 785 Changesets connected to the specified filelog.
786 786
787 787 For performance reasons, visits only revisions mentioned in the file-level
788 788 filelog, rather than filtering through all changesets (much faster, but
789 789 doesn't include deletes or duplicate changes). For a slower, more accurate
790 790 result, use ``file()``.
791 791
792 792 The pattern without explicit kind like ``glob:`` is expected to be
793 793 relative to the current directory and match against a file exactly
794 794 for efficiency.
795 795 """
796 796
797 797 # i18n: "filelog" is a keyword
798 798 pat = getstring(x, _("filelog requires a pattern"))
799 799 s = set()
800 800
801 801 if not matchmod.patkind(pat):
802 802 f = pathutil.canonpath(repo.root, repo.getcwd(), pat)
803 803 fl = repo.file(f)
804 804 for fr in fl:
805 805 s.add(fl.linkrev(fr))
806 806 else:
807 807 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=repo[None])
808 808 for f in repo[None]:
809 809 if m(f):
810 810 fl = repo.file(f)
811 811 for fr in fl:
812 812 s.add(fl.linkrev(fr))
813 813
814 814 return subset.filter(s.__contains__)
815 815
816 816 def first(repo, subset, x):
817 817 """``first(set, [n])``
818 818 An alias for limit().
819 819 """
820 820 return limit(repo, subset, x)
821 821
822 822 def _follow(repo, subset, x, name, followfirst=False):
823 823 l = getargs(x, 0, 1, _("%s takes no arguments or a filename") % name)
824 824 c = repo['.']
825 825 if l:
826 826 x = getstring(l[0], _("%s expected a filename") % name)
827 827 if x in c:
828 828 cx = c[x]
829 829 s = set(ctx.rev() for ctx in cx.ancestors(followfirst=followfirst))
830 830 # include the revision responsible for the most recent version
831 831 s.add(cx.linkrev())
832 832 else:
833 833 return baseset([])
834 834 else:
835 835 s = _revancestors(repo, baseset([c.rev()]), followfirst)
836 836
837 837 return subset.filter(s.__contains__)
838 838
839 839 def follow(repo, subset, x):
840 840 """``follow([file])``
841 841 An alias for ``::.`` (ancestors of the working copy's first parent).
842 842 If a filename is specified, the history of the given file is followed,
843 843 including copies.
844 844 """
845 845 return _follow(repo, subset, x, 'follow')
846 846
847 847 def _followfirst(repo, subset, x):
848 848 # ``followfirst([file])``
849 849 # Like ``follow([file])`` but follows only the first parent of
850 850 # every revision or file revision.
851 851 return _follow(repo, subset, x, '_followfirst', followfirst=True)
852 852
853 853 def getall(repo, subset, x):
854 854 """``all()``
855 855 All changesets, the same as ``0:tip``.
856 856 """
857 857 # i18n: "all" is a keyword
858 858 getargs(x, 0, 0, _("all takes no arguments"))
859 859 return subset
860 860
861 861 def grep(repo, subset, x):
862 862 """``grep(regex)``
863 863 Like ``keyword(string)`` but accepts a regex. Use ``grep(r'...')``
864 864 to ensure special escape characters are handled correctly. Unlike
865 865 ``keyword(string)``, the match is case-sensitive.
866 866 """
867 867 try:
868 868 # i18n: "grep" is a keyword
869 869 gr = re.compile(getstring(x, _("grep requires a string")))
870 870 except re.error, e:
871 871 raise error.ParseError(_('invalid match pattern: %s') % e)
872 872
873 873 def matches(x):
874 874 c = repo[x]
875 875 for e in c.files() + [c.user(), c.description()]:
876 876 if gr.search(e):
877 877 return True
878 878 return False
879 879
880 880 return subset.filter(matches)
881 881
882 882 def _matchfiles(repo, subset, x):
883 883 # _matchfiles takes a revset list of prefixed arguments:
884 884 #
885 885 # [p:foo, i:bar, x:baz]
886 886 #
887 887 # builds a match object from them and filters subset. Allowed
888 888 # prefixes are 'p:' for regular patterns, 'i:' for include
889 889 # patterns and 'x:' for exclude patterns. Use 'r:' prefix to pass
890 890 # a revision identifier, or the empty string to reference the
891 891 # working directory, from which the match object is
892 892 # initialized. Use 'd:' to set the default matching mode, default
893 893 # to 'glob'. At most one 'r:' and 'd:' argument can be passed.
894 894
895 895 # i18n: "_matchfiles" is a keyword
896 896 l = getargs(x, 1, -1, _("_matchfiles requires at least one argument"))
897 897 pats, inc, exc = [], [], []
898 898 hasset = False
899 899 rev, default = None, None
900 900 for arg in l:
901 901 # i18n: "_matchfiles" is a keyword
902 902 s = getstring(arg, _("_matchfiles requires string arguments"))
903 903 prefix, value = s[:2], s[2:]
904 904 if prefix == 'p:':
905 905 pats.append(value)
906 906 elif prefix == 'i:':
907 907 inc.append(value)
908 908 elif prefix == 'x:':
909 909 exc.append(value)
910 910 elif prefix == 'r:':
911 911 if rev is not None:
912 912 # i18n: "_matchfiles" is a keyword
913 913 raise error.ParseError(_('_matchfiles expected at most one '
914 914 'revision'))
915 915 rev = value
916 916 elif prefix == 'd:':
917 917 if default is not None:
918 918 # i18n: "_matchfiles" is a keyword
919 919 raise error.ParseError(_('_matchfiles expected at most one '
920 920 'default mode'))
921 921 default = value
922 922 else:
923 923 # i18n: "_matchfiles" is a keyword
924 924 raise error.ParseError(_('invalid _matchfiles prefix: %s') % prefix)
925 925 if not hasset and matchmod.patkind(value) == 'set':
926 926 hasset = True
927 927 if not default:
928 928 default = 'glob'
929 929
930 930 def matches(x):
931 931 m = None
932 932 c = repo[x]
933 933 if not m or (hasset and rev is None):
934 934 ctx = c
935 935 if rev is not None:
936 936 ctx = repo[rev or None]
937 937 m = matchmod.match(repo.root, repo.getcwd(), pats, include=inc,
938 938 exclude=exc, ctx=ctx, default=default)
939 939 for f in c.files():
940 940 if m(f):
941 941 return True
942 942 return False
943 943
944 944 return subset.filter(matches)
945 945
946 946 def hasfile(repo, subset, x):
947 947 """``file(pattern)``
948 948 Changesets affecting files matched by pattern.
949 949
950 950 For a faster but less accurate result, consider using ``filelog()``
951 951 instead.
952 952
953 953 This predicate uses ``glob:`` as the default kind of pattern.
954 954 """
955 955 # i18n: "file" is a keyword
956 956 pat = getstring(x, _("file requires a pattern"))
957 957 return _matchfiles(repo, subset, ('string', 'p:' + pat))
958 958
959 959 def head(repo, subset, x):
960 960 """``head()``
961 961 Changeset is a named branch head.
962 962 """
963 963 # i18n: "head" is a keyword
964 964 getargs(x, 0, 0, _("head takes no arguments"))
965 965 hs = set()
966 966 for b, ls in repo.branchmap().iteritems():
967 967 hs.update(repo[h].rev() for h in ls)
968 968 return baseset(hs).filter(subset.__contains__)
969 969
970 970 def heads(repo, subset, x):
971 971 """``heads(set)``
972 972 Members of set with no children in set.
973 973 """
974 974 s = getset(repo, subset, x)
975 975 ps = parents(repo, subset, x)
976 976 return s - ps
977 977
978 978 def hidden(repo, subset, x):
979 979 """``hidden()``
980 980 Hidden changesets.
981 981 """
982 982 # i18n: "hidden" is a keyword
983 983 getargs(x, 0, 0, _("hidden takes no arguments"))
984 984 hiddenrevs = repoview.filterrevs(repo, 'visible')
985 985 return subset & hiddenrevs
986 986
987 987 def keyword(repo, subset, x):
988 988 """``keyword(string)``
989 989 Search commit message, user name, and names of changed files for
990 990 string. The match is case-insensitive.
991 991 """
992 992 # i18n: "keyword" is a keyword
993 993 kw = encoding.lower(getstring(x, _("keyword requires a string")))
994 994
995 995 def matches(r):
996 996 c = repo[r]
997 997 return util.any(kw in encoding.lower(t) for t in c.files() + [c.user(),
998 998 c.description()])
999 999
1000 1000 return subset.filter(matches)
1001 1001
1002 1002 def limit(repo, subset, x):
1003 1003 """``limit(set, [n])``
1004 1004 First n members of set, defaulting to 1.
1005 1005 """
1006 1006 # i18n: "limit" is a keyword
1007 1007 l = getargs(x, 1, 2, _("limit requires one or two arguments"))
1008 1008 try:
1009 1009 lim = 1
1010 1010 if len(l) == 2:
1011 1011 # i18n: "limit" is a keyword
1012 1012 lim = int(getstring(l[1], _("limit requires a number")))
1013 1013 except (TypeError, ValueError):
1014 1014 # i18n: "limit" is a keyword
1015 1015 raise error.ParseError(_("limit expects a number"))
1016 1016 ss = subset.set()
1017 1017 os = getset(repo, spanset(repo), l[0])
1018 1018 bs = baseset([])
1019 1019 it = iter(os)
1020 1020 for x in xrange(lim):
1021 1021 try:
1022 1022 y = it.next()
1023 1023 if y in ss:
1024 1024 bs.append(y)
1025 1025 except (StopIteration):
1026 1026 break
1027 1027 return bs
1028 1028
1029 1029 def last(repo, subset, x):
1030 1030 """``last(set, [n])``
1031 1031 Last n members of set, defaulting to 1.
1032 1032 """
1033 1033 # i18n: "last" is a keyword
1034 1034 l = getargs(x, 1, 2, _("last requires one or two arguments"))
1035 1035 try:
1036 1036 lim = 1
1037 1037 if len(l) == 2:
1038 1038 # i18n: "last" is a keyword
1039 1039 lim = int(getstring(l[1], _("last requires a number")))
1040 1040 except (TypeError, ValueError):
1041 1041 # i18n: "last" is a keyword
1042 1042 raise error.ParseError(_("last expects a number"))
1043 1043 ss = subset.set()
1044 1044 os = getset(repo, spanset(repo), l[0])
1045 1045 os.reverse()
1046 1046 bs = baseset([])
1047 1047 it = iter(os)
1048 1048 for x in xrange(lim):
1049 1049 try:
1050 1050 y = it.next()
1051 1051 if y in ss:
1052 1052 bs.append(y)
1053 1053 except (StopIteration):
1054 1054 break
1055 1055 return bs
1056 1056
1057 1057 def maxrev(repo, subset, x):
1058 1058 """``max(set)``
1059 1059 Changeset with highest revision number in set.
1060 1060 """
1061 1061 os = getset(repo, spanset(repo), x)
1062 1062 if os:
1063 1063 m = os.max()
1064 1064 if m in subset:
1065 1065 return baseset([m])
1066 1066 return baseset([])
1067 1067
1068 1068 def merge(repo, subset, x):
1069 1069 """``merge()``
1070 1070 Changeset is a merge changeset.
1071 1071 """
1072 1072 # i18n: "merge" is a keyword
1073 1073 getargs(x, 0, 0, _("merge takes no arguments"))
1074 1074 cl = repo.changelog
1075 1075 return subset.filter(lambda r: cl.parentrevs(r)[1] != -1)
1076 1076
1077 1077 def branchpoint(repo, subset, x):
1078 1078 """``branchpoint()``
1079 1079 Changesets with more than one child.
1080 1080 """
1081 1081 # i18n: "branchpoint" is a keyword
1082 1082 getargs(x, 0, 0, _("branchpoint takes no arguments"))
1083 1083 cl = repo.changelog
1084 1084 if not subset:
1085 1085 return baseset([])
1086 1086 baserev = min(subset)
1087 1087 parentscount = [0]*(len(repo) - baserev)
1088 1088 for r in cl.revs(start=baserev + 1):
1089 1089 for p in cl.parentrevs(r):
1090 1090 if p >= baserev:
1091 1091 parentscount[p - baserev] += 1
1092 1092 return subset.filter(lambda r: parentscount[r - baserev] > 1)
1093 1093
1094 1094 def minrev(repo, subset, x):
1095 1095 """``min(set)``
1096 1096 Changeset with lowest revision number in set.
1097 1097 """
1098 1098 os = getset(repo, spanset(repo), x)
1099 1099 if os:
1100 1100 m = os.min()
1101 1101 if m in subset:
1102 1102 return baseset([m])
1103 1103 return baseset([])
1104 1104
1105 1105 def _missingancestors(repo, subset, x):
1106 1106 # i18n: "_missingancestors" is a keyword
1107 1107 revs, bases = getargs(x, 2, 2,
1108 1108 _("_missingancestors requires two arguments"))
1109 1109 rs = baseset(repo)
1110 1110 revs = getset(repo, rs, revs)
1111 1111 bases = getset(repo, rs, bases)
1112 1112 missing = set(repo.changelog.findmissingrevs(bases, revs))
1113 1113 return baseset([r for r in subset if r in missing])
1114 1114
1115 1115 def modifies(repo, subset, x):
1116 1116 """``modifies(pattern)``
1117 1117 Changesets modifying files matched by pattern.
1118 1118
1119 1119 The pattern without explicit kind like ``glob:`` is expected to be
1120 1120 relative to the current directory and match against a file or a
1121 1121 directory.
1122 1122 """
1123 1123 # i18n: "modifies" is a keyword
1124 1124 pat = getstring(x, _("modifies requires a pattern"))
1125 1125 return checkstatus(repo, subset, pat, 0)
1126 1126
1127 1127 def node_(repo, subset, x):
1128 1128 """``id(string)``
1129 1129 Revision non-ambiguously specified by the given hex string prefix.
1130 1130 """
1131 1131 # i18n: "id" is a keyword
1132 1132 l = getargs(x, 1, 1, _("id requires one argument"))
1133 1133 # i18n: "id" is a keyword
1134 1134 n = getstring(l[0], _("id requires a string"))
1135 1135 if len(n) == 40:
1136 1136 rn = repo[n].rev()
1137 1137 else:
1138 1138 rn = None
1139 1139 pm = repo.changelog._partialmatch(n)
1140 1140 if pm is not None:
1141 1141 rn = repo.changelog.rev(pm)
1142 1142
1143 1143 return subset.filter(lambda r: r == rn)
1144 1144
1145 1145 def obsolete(repo, subset, x):
1146 1146 """``obsolete()``
1147 1147 Mutable changeset with a newer version."""
1148 1148 # i18n: "obsolete" is a keyword
1149 1149 getargs(x, 0, 0, _("obsolete takes no arguments"))
1150 1150 obsoletes = obsmod.getrevs(repo, 'obsolete')
1151 1151 return subset & obsoletes
1152 1152
1153 1153 def origin(repo, subset, x):
1154 1154 """``origin([set])``
1155 1155 Changesets that were specified as a source for the grafts, transplants or
1156 1156 rebases that created the given revisions. Omitting the optional set is the
1157 1157 same as passing all(). If a changeset created by these operations is itself
1158 1158 specified as a source for one of these operations, only the source changeset
1159 1159 for the first operation is selected.
1160 1160 """
1161 1161 if x is not None:
1162 1162 args = getset(repo, spanset(repo), x).set()
1163 1163 else:
1164 1164 args = getall(repo, spanset(repo), x).set()
1165 1165
1166 1166 def _firstsrc(rev):
1167 1167 src = _getrevsource(repo, rev)
1168 1168 if src is None:
1169 1169 return None
1170 1170
1171 1171 while True:
1172 1172 prev = _getrevsource(repo, src)
1173 1173
1174 1174 if prev is None:
1175 1175 return src
1176 1176 src = prev
1177 1177
1178 1178 o = set([_firstsrc(r) for r in args])
1179 1179 return subset.filter(o.__contains__)
1180 1180
1181 1181 def outgoing(repo, subset, x):
1182 1182 """``outgoing([path])``
1183 1183 Changesets not found in the specified destination repository, or the
1184 1184 default push location.
1185 1185 """
1186 1186 import hg # avoid start-up nasties
1187 1187 # i18n: "outgoing" is a keyword
1188 1188 l = getargs(x, 0, 1, _("outgoing takes one or no arguments"))
1189 1189 # i18n: "outgoing" is a keyword
1190 1190 dest = l and getstring(l[0], _("outgoing requires a repository path")) or ''
1191 1191 dest = repo.ui.expandpath(dest or 'default-push', dest or 'default')
1192 1192 dest, branches = hg.parseurl(dest)
1193 1193 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1194 1194 if revs:
1195 1195 revs = [repo.lookup(rev) for rev in revs]
1196 1196 other = hg.peer(repo, {}, dest)
1197 1197 repo.ui.pushbuffer()
1198 1198 outgoing = discovery.findcommonoutgoing(repo, other, onlyheads=revs)
1199 1199 repo.ui.popbuffer()
1200 1200 cl = repo.changelog
1201 1201 o = set([cl.rev(r) for r in outgoing.missing])
1202 1202 return subset.filter(o.__contains__)
1203 1203
1204 1204 def p1(repo, subset, x):
1205 1205 """``p1([set])``
1206 1206 First parent of changesets in set, or the working directory.
1207 1207 """
1208 1208 if x is None:
1209 1209 p = repo[x].p1().rev()
1210 1210 return subset.filter(lambda r: r == p)
1211 1211
1212 1212 ps = set()
1213 1213 cl = repo.changelog
1214 1214 for r in getset(repo, spanset(repo), x):
1215 1215 ps.add(cl.parentrevs(r)[0])
1216 1216 return subset & ps
1217 1217
1218 1218 def p2(repo, subset, x):
1219 1219 """``p2([set])``
1220 1220 Second parent of changesets in set, or the working directory.
1221 1221 """
1222 1222 if x is None:
1223 1223 ps = repo[x].parents()
1224 1224 try:
1225 1225 p = ps[1].rev()
1226 1226 return subset.filter(lambda r: r == p)
1227 1227 except IndexError:
1228 1228 return baseset([])
1229 1229
1230 1230 ps = set()
1231 1231 cl = repo.changelog
1232 1232 for r in getset(repo, spanset(repo), x):
1233 1233 ps.add(cl.parentrevs(r)[1])
1234 1234 return subset & ps
1235 1235
1236 1236 def parents(repo, subset, x):
1237 1237 """``parents([set])``
1238 1238 The set of all parents for all changesets in set, or the working directory.
1239 1239 """
1240 1240 if x is None:
1241 1241 ps = tuple(p.rev() for p in repo[x].parents())
1242 1242 return subset & ps
1243 1243
1244 1244 ps = set()
1245 1245 cl = repo.changelog
1246 1246 for r in getset(repo, spanset(repo), x):
1247 1247 ps.update(cl.parentrevs(r))
1248 1248 return subset & ps
1249 1249
1250 1250 def parentspec(repo, subset, x, n):
1251 1251 """``set^0``
1252 1252 The set.
1253 1253 ``set^1`` (or ``set^``), ``set^2``
1254 1254 First or second parent, respectively, of all changesets in set.
1255 1255 """
1256 1256 try:
1257 1257 n = int(n[1])
1258 1258 if n not in (0, 1, 2):
1259 1259 raise ValueError
1260 1260 except (TypeError, ValueError):
1261 1261 raise error.ParseError(_("^ expects a number 0, 1, or 2"))
1262 1262 ps = set()
1263 1263 cl = repo.changelog
1264 1264 for r in getset(repo, baseset(cl), x):
1265 1265 if n == 0:
1266 1266 ps.add(r)
1267 1267 elif n == 1:
1268 1268 ps.add(cl.parentrevs(r)[0])
1269 1269 elif n == 2:
1270 1270 parents = cl.parentrevs(r)
1271 1271 if len(parents) > 1:
1272 1272 ps.add(parents[1])
1273 1273 return subset & ps
1274 1274
1275 1275 def present(repo, subset, x):
1276 1276 """``present(set)``
1277 1277 An empty set, if any revision in set isn't found; otherwise,
1278 1278 all revisions in set.
1279 1279
1280 1280 If any of specified revisions is not present in the local repository,
1281 1281 the query is normally aborted. But this predicate allows the query
1282 1282 to continue even in such cases.
1283 1283 """
1284 1284 try:
1285 1285 return getset(repo, subset, x)
1286 1286 except error.RepoLookupError:
1287 1287 return baseset([])
1288 1288
1289 1289 def public(repo, subset, x):
1290 1290 """``public()``
1291 1291 Changeset in public phase."""
1292 1292 # i18n: "public" is a keyword
1293 1293 getargs(x, 0, 0, _("public takes no arguments"))
1294 1294 pc = repo._phasecache
1295 1295 return subset.filter(lambda r: pc.phase(repo, r) == phases.public)
1296 1296
1297 1297 def remote(repo, subset, x):
1298 1298 """``remote([id [,path]])``
1299 1299 Local revision that corresponds to the given identifier in a
1300 1300 remote repository, if present. Here, the '.' identifier is a
1301 1301 synonym for the current local branch.
1302 1302 """
1303 1303
1304 1304 import hg # avoid start-up nasties
1305 1305 # i18n: "remote" is a keyword
1306 1306 l = getargs(x, 0, 2, _("remote takes one, two or no arguments"))
1307 1307
1308 1308 q = '.'
1309 1309 if len(l) > 0:
1310 1310 # i18n: "remote" is a keyword
1311 1311 q = getstring(l[0], _("remote requires a string id"))
1312 1312 if q == '.':
1313 1313 q = repo['.'].branch()
1314 1314
1315 1315 dest = ''
1316 1316 if len(l) > 1:
1317 1317 # i18n: "remote" is a keyword
1318 1318 dest = getstring(l[1], _("remote requires a repository path"))
1319 1319 dest = repo.ui.expandpath(dest or 'default')
1320 1320 dest, branches = hg.parseurl(dest)
1321 1321 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1322 1322 if revs:
1323 1323 revs = [repo.lookup(rev) for rev in revs]
1324 1324 other = hg.peer(repo, {}, dest)
1325 1325 n = other.lookup(q)
1326 1326 if n in repo:
1327 1327 r = repo[n].rev()
1328 1328 if r in subset:
1329 1329 return baseset([r])
1330 1330 return baseset([])
1331 1331
1332 1332 def removes(repo, subset, x):
1333 1333 """``removes(pattern)``
1334 1334 Changesets which remove files matching pattern.
1335 1335
1336 1336 The pattern without explicit kind like ``glob:`` is expected to be
1337 1337 relative to the current directory and match against a file or a
1338 1338 directory.
1339 1339 """
1340 1340 # i18n: "removes" is a keyword
1341 1341 pat = getstring(x, _("removes requires a pattern"))
1342 1342 return checkstatus(repo, subset, pat, 2)
1343 1343
1344 1344 def rev(repo, subset, x):
1345 1345 """``rev(number)``
1346 1346 Revision with the given numeric identifier.
1347 1347 """
1348 1348 # i18n: "rev" is a keyword
1349 1349 l = getargs(x, 1, 1, _("rev requires one argument"))
1350 1350 try:
1351 1351 # i18n: "rev" is a keyword
1352 1352 l = int(getstring(l[0], _("rev requires a number")))
1353 1353 except (TypeError, ValueError):
1354 1354 # i18n: "rev" is a keyword
1355 1355 raise error.ParseError(_("rev expects a number"))
1356 1356 return subset.filter(lambda r: r == l)
1357 1357
1358 1358 def matching(repo, subset, x):
1359 1359 """``matching(revision [, field])``
1360 1360 Changesets in which a given set of fields match the set of fields in the
1361 1361 selected revision or set.
1362 1362
1363 1363 To match more than one field pass the list of fields to match separated
1364 1364 by spaces (e.g. ``author description``).
1365 1365
1366 1366 Valid fields are most regular revision fields and some special fields.
1367 1367
1368 1368 Regular revision fields are ``description``, ``author``, ``branch``,
1369 1369 ``date``, ``files``, ``phase``, ``parents``, ``substate``, ``user``
1370 1370 and ``diff``.
1371 1371 Note that ``author`` and ``user`` are synonyms. ``diff`` refers to the
1372 1372 contents of the revision. Two revisions matching their ``diff`` will
1373 1373 also match their ``files``.
1374 1374
1375 1375 Special fields are ``summary`` and ``metadata``:
1376 1376 ``summary`` matches the first line of the description.
1377 1377 ``metadata`` is equivalent to matching ``description user date``
1378 1378 (i.e. it matches the main metadata fields).
1379 1379
1380 1380 ``metadata`` is the default field which is used when no fields are
1381 1381 specified. You can match more than one field at a time.
1382 1382 """
1383 1383 # i18n: "matching" is a keyword
1384 1384 l = getargs(x, 1, 2, _("matching takes 1 or 2 arguments"))
1385 1385
1386 1386 revs = getset(repo, baseset(repo.changelog), l[0])
1387 1387
1388 1388 fieldlist = ['metadata']
1389 1389 if len(l) > 1:
1390 1390 fieldlist = getstring(l[1],
1391 1391 # i18n: "matching" is a keyword
1392 1392 _("matching requires a string "
1393 1393 "as its second argument")).split()
1394 1394
1395 1395 # Make sure that there are no repeated fields,
1396 1396 # expand the 'special' 'metadata' field type
1397 1397 # and check the 'files' whenever we check the 'diff'
1398 1398 fields = []
1399 1399 for field in fieldlist:
1400 1400 if field == 'metadata':
1401 1401 fields += ['user', 'description', 'date']
1402 1402 elif field == 'diff':
1403 1403 # a revision matching the diff must also match the files
1404 1404 # since matching the diff is very costly, make sure to
1405 1405 # also match the files first
1406 1406 fields += ['files', 'diff']
1407 1407 else:
1408 1408 if field == 'author':
1409 1409 field = 'user'
1410 1410 fields.append(field)
1411 1411 fields = set(fields)
1412 1412 if 'summary' in fields and 'description' in fields:
1413 1413 # If a revision matches its description it also matches its summary
1414 1414 fields.discard('summary')
1415 1415
1416 1416 # We may want to match more than one field
1417 1417 # Not all fields take the same amount of time to be matched
1418 1418 # Sort the selected fields in order of increasing matching cost
1419 1419 fieldorder = ['phase', 'parents', 'user', 'date', 'branch', 'summary',
1420 1420 'files', 'description', 'substate', 'diff']
1421 1421 def fieldkeyfunc(f):
1422 1422 try:
1423 1423 return fieldorder.index(f)
1424 1424 except ValueError:
1425 1425 # assume an unknown field is very costly
1426 1426 return len(fieldorder)
1427 1427 fields = list(fields)
1428 1428 fields.sort(key=fieldkeyfunc)
1429 1429
1430 1430 # Each field will be matched with its own "getfield" function
1431 1431 # which will be added to the getfieldfuncs array of functions
1432 1432 getfieldfuncs = []
1433 1433 _funcs = {
1434 1434 'user': lambda r: repo[r].user(),
1435 1435 'branch': lambda r: repo[r].branch(),
1436 1436 'date': lambda r: repo[r].date(),
1437 1437 'description': lambda r: repo[r].description(),
1438 1438 'files': lambda r: repo[r].files(),
1439 1439 'parents': lambda r: repo[r].parents(),
1440 1440 'phase': lambda r: repo[r].phase(),
1441 1441 'substate': lambda r: repo[r].substate,
1442 1442 'summary': lambda r: repo[r].description().splitlines()[0],
1443 1443 'diff': lambda r: list(repo[r].diff(git=True),)
1444 1444 }
1445 1445 for info in fields:
1446 1446 getfield = _funcs.get(info, None)
1447 1447 if getfield is None:
1448 1448 raise error.ParseError(
1449 1449 # i18n: "matching" is a keyword
1450 1450 _("unexpected field name passed to matching: %s") % info)
1451 1451 getfieldfuncs.append(getfield)
1452 1452 # convert the getfield array of functions into a "getinfo" function
1453 1453 # which returns an array of field values (or a single value if there
1454 1454 # is only one field to match)
1455 1455 getinfo = lambda r: [f(r) for f in getfieldfuncs]
1456 1456
1457 1457 def matches(x):
1458 1458 for rev in revs:
1459 1459 target = getinfo(rev)
1460 1460 match = True
1461 1461 for n, f in enumerate(getfieldfuncs):
1462 1462 if target[n] != f(x):
1463 1463 match = False
1464 1464 if match:
1465 1465 return True
1466 1466 return False
1467 1467
1468 1468 return subset.filter(matches)
1469 1469
1470 1470 def reverse(repo, subset, x):
1471 1471 """``reverse(set)``
1472 1472 Reverse order of set.
1473 1473 """
1474 1474 l = getset(repo, subset, x)
1475 1475 l.reverse()
1476 1476 return l
1477 1477
1478 1478 def roots(repo, subset, x):
1479 1479 """``roots(set)``
1480 1480 Changesets in set with no parent changeset in set.
1481 1481 """
1482 1482 s = getset(repo, spanset(repo), x).set()
1483 1483 subset = baseset([r for r in s if r in subset.set()])
1484 1484 cs = _children(repo, subset, s)
1485 1485 return subset - cs
1486 1486
1487 1487 def secret(repo, subset, x):
1488 1488 """``secret()``
1489 1489 Changeset in secret phase."""
1490 1490 # i18n: "secret" is a keyword
1491 1491 getargs(x, 0, 0, _("secret takes no arguments"))
1492 1492 pc = repo._phasecache
1493 1493 return subset.filter(lambda x: pc.phase(repo, x) == phases.secret)
1494 1494
1495 1495 def sort(repo, subset, x):
1496 1496 """``sort(set[, [-]key...])``
1497 1497 Sort set by keys. The default sort order is ascending, specify a key
1498 1498 as ``-key`` to sort in descending order.
1499 1499
1500 1500 The keys can be:
1501 1501
1502 1502 - ``rev`` for the revision number,
1503 1503 - ``branch`` for the branch name,
1504 1504 - ``desc`` for the commit message (description),
1505 1505 - ``user`` for user name (``author`` can be used as an alias),
1506 1506 - ``date`` for the commit date
1507 1507 """
1508 1508 # i18n: "sort" is a keyword
1509 1509 l = getargs(x, 1, 2, _("sort requires one or two arguments"))
1510 1510 keys = "rev"
1511 1511 if len(l) == 2:
1512 1512 # i18n: "sort" is a keyword
1513 1513 keys = getstring(l[1], _("sort spec must be a string"))
1514 1514
1515 1515 s = l[0]
1516 1516 keys = keys.split()
1517 1517 l = []
1518 1518 def invert(s):
1519 1519 return "".join(chr(255 - ord(c)) for c in s)
1520 1520 revs = getset(repo, subset, s)
1521 1521 if keys == ["rev"]:
1522 1522 revs.sort()
1523 1523 return revs
1524 1524 elif keys == ["-rev"]:
1525 1525 revs.sort(reverse=True)
1526 1526 return revs
1527 1527 for r in revs:
1528 1528 c = repo[r]
1529 1529 e = []
1530 1530 for k in keys:
1531 1531 if k == 'rev':
1532 1532 e.append(r)
1533 1533 elif k == '-rev':
1534 1534 e.append(-r)
1535 1535 elif k == 'branch':
1536 1536 e.append(c.branch())
1537 1537 elif k == '-branch':
1538 1538 e.append(invert(c.branch()))
1539 1539 elif k == 'desc':
1540 1540 e.append(c.description())
1541 1541 elif k == '-desc':
1542 1542 e.append(invert(c.description()))
1543 1543 elif k in 'user author':
1544 1544 e.append(c.user())
1545 1545 elif k in '-user -author':
1546 1546 e.append(invert(c.user()))
1547 1547 elif k == 'date':
1548 1548 e.append(c.date()[0])
1549 1549 elif k == '-date':
1550 1550 e.append(-c.date()[0])
1551 1551 else:
1552 1552 raise error.ParseError(_("unknown sort key %r") % k)
1553 1553 e.append(r)
1554 1554 l.append(e)
1555 1555 l.sort()
1556 1556 return baseset([e[-1] for e in l])
1557 1557
1558 1558 def _stringmatcher(pattern):
1559 1559 """
1560 1560 accepts a string, possibly starting with 're:' or 'literal:' prefix.
1561 1561 returns the matcher name, pattern, and matcher function.
1562 1562 missing or unknown prefixes are treated as literal matches.
1563 1563
1564 1564 helper for tests:
1565 1565 >>> def test(pattern, *tests):
1566 1566 ... kind, pattern, matcher = _stringmatcher(pattern)
1567 1567 ... return (kind, pattern, [bool(matcher(t)) for t in tests])
1568 1568
1569 1569 exact matching (no prefix):
1570 1570 >>> test('abcdefg', 'abc', 'def', 'abcdefg')
1571 1571 ('literal', 'abcdefg', [False, False, True])
1572 1572
1573 1573 regex matching ('re:' prefix)
1574 1574 >>> test('re:a.+b', 'nomatch', 'fooadef', 'fooadefbar')
1575 1575 ('re', 'a.+b', [False, False, True])
1576 1576
1577 1577 force exact matches ('literal:' prefix)
1578 1578 >>> test('literal:re:foobar', 'foobar', 're:foobar')
1579 1579 ('literal', 're:foobar', [False, True])
1580 1580
1581 1581 unknown prefixes are ignored and treated as literals
1582 1582 >>> test('foo:bar', 'foo', 'bar', 'foo:bar')
1583 1583 ('literal', 'foo:bar', [False, False, True])
1584 1584 """
1585 1585 if pattern.startswith('re:'):
1586 1586 pattern = pattern[3:]
1587 1587 try:
1588 1588 regex = re.compile(pattern)
1589 1589 except re.error, e:
1590 1590 raise error.ParseError(_('invalid regular expression: %s')
1591 1591 % e)
1592 1592 return 're', pattern, regex.search
1593 1593 elif pattern.startswith('literal:'):
1594 1594 pattern = pattern[8:]
1595 1595 return 'literal', pattern, pattern.__eq__
1596 1596
1597 1597 def _substringmatcher(pattern):
1598 1598 kind, pattern, matcher = _stringmatcher(pattern)
1599 1599 if kind == 'literal':
1600 1600 matcher = lambda s: pattern in s
1601 1601 return kind, pattern, matcher
1602 1602
1603 1603 def tag(repo, subset, x):
1604 1604 """``tag([name])``
1605 1605 The specified tag by name, or all tagged revisions if no name is given.
1606 1606
1607 1607 If `name` starts with `re:`, the remainder of the name is treated as
1608 1608 a regular expression. To match a tag that actually starts with `re:`,
1609 1609 use the prefix `literal:`.
1610 1610 """
1611 1611 # i18n: "tag" is a keyword
1612 1612 args = getargs(x, 0, 1, _("tag takes one or no arguments"))
1613 1613 cl = repo.changelog
1614 1614 if args:
1615 1615 pattern = getstring(args[0],
1616 1616 # i18n: "tag" is a keyword
1617 1617 _('the argument to tag must be a string'))
1618 1618 kind, pattern, matcher = _stringmatcher(pattern)
1619 1619 if kind == 'literal':
1620 1620 # avoid resolving all tags
1621 1621 tn = repo._tagscache.tags.get(pattern, None)
1622 1622 if tn is None:
1623 1623 raise util.Abort(_("tag '%s' does not exist") % pattern)
1624 1624 s = set([repo[tn].rev()])
1625 1625 else:
1626 1626 s = set([cl.rev(n) for t, n in repo.tagslist() if matcher(t)])
1627 1627 else:
1628 1628 s = set([cl.rev(n) for t, n in repo.tagslist() if t != 'tip'])
1629 1629 return subset & s
1630 1630
1631 1631 def tagged(repo, subset, x):
1632 1632 return tag(repo, subset, x)
1633 1633
1634 1634 def unstable(repo, subset, x):
1635 1635 """``unstable()``
1636 1636 Non-obsolete changesets with obsolete ancestors.
1637 1637 """
1638 1638 # i18n: "unstable" is a keyword
1639 1639 getargs(x, 0, 0, _("unstable takes no arguments"))
1640 1640 unstables = obsmod.getrevs(repo, 'unstable')
1641 1641 return subset & unstables
1642 1642
1643 1643
1644 1644 def user(repo, subset, x):
1645 1645 """``user(string)``
1646 1646 User name contains string. The match is case-insensitive.
1647 1647
1648 1648 If `string` starts with `re:`, the remainder of the string is treated as
1649 1649 a regular expression. To match a user that actually contains `re:`, use
1650 1650 the prefix `literal:`.
1651 1651 """
1652 1652 return author(repo, subset, x)
1653 1653
1654 1654 # for internal use
1655 1655 def _list(repo, subset, x):
1656 1656 s = getstring(x, "internal error")
1657 1657 if not s:
1658 1658 return baseset([])
1659 1659 ls = [repo[r].rev() for r in s.split('\0')]
1660 1660 s = subset.set()
1661 1661 return baseset([r for r in ls if r in s])
1662 1662
1663 1663 # for internal use
1664 1664 def _intlist(repo, subset, x):
1665 1665 s = getstring(x, "internal error")
1666 1666 if not s:
1667 1667 return baseset([])
1668 1668 ls = [int(r) for r in s.split('\0')]
1669 1669 s = subset.set()
1670 1670 return baseset([r for r in ls if r in s])
1671 1671
1672 1672 # for internal use
1673 1673 def _hexlist(repo, subset, x):
1674 1674 s = getstring(x, "internal error")
1675 1675 if not s:
1676 1676 return baseset([])
1677 1677 cl = repo.changelog
1678 1678 ls = [cl.rev(node.bin(r)) for r in s.split('\0')]
1679 1679 s = subset.set()
1680 1680 return baseset([r for r in ls if r in s])
1681 1681
1682 1682 symbols = {
1683 1683 "adds": adds,
1684 1684 "all": getall,
1685 1685 "ancestor": ancestor,
1686 1686 "ancestors": ancestors,
1687 1687 "_firstancestors": _firstancestors,
1688 1688 "author": author,
1689 1689 "only": only,
1690 1690 "bisect": bisect,
1691 1691 "bisected": bisected,
1692 1692 "bookmark": bookmark,
1693 1693 "branch": branch,
1694 1694 "branchpoint": branchpoint,
1695 1695 "bumped": bumped,
1696 1696 "bundle": bundle,
1697 1697 "children": children,
1698 1698 "closed": closed,
1699 1699 "contains": contains,
1700 1700 "converted": converted,
1701 1701 "date": date,
1702 1702 "desc": desc,
1703 1703 "descendants": descendants,
1704 1704 "_firstdescendants": _firstdescendants,
1705 1705 "destination": destination,
1706 1706 "divergent": divergent,
1707 1707 "draft": draft,
1708 1708 "extinct": extinct,
1709 1709 "extra": extra,
1710 1710 "file": hasfile,
1711 1711 "filelog": filelog,
1712 1712 "first": first,
1713 1713 "follow": follow,
1714 1714 "_followfirst": _followfirst,
1715 1715 "grep": grep,
1716 1716 "head": head,
1717 1717 "heads": heads,
1718 1718 "hidden": hidden,
1719 1719 "id": node_,
1720 1720 "keyword": keyword,
1721 1721 "last": last,
1722 1722 "limit": limit,
1723 1723 "_matchfiles": _matchfiles,
1724 1724 "max": maxrev,
1725 1725 "merge": merge,
1726 1726 "min": minrev,
1727 1727 "_missingancestors": _missingancestors,
1728 1728 "modifies": modifies,
1729 1729 "obsolete": obsolete,
1730 1730 "origin": origin,
1731 1731 "outgoing": outgoing,
1732 1732 "p1": p1,
1733 1733 "p2": p2,
1734 1734 "parents": parents,
1735 1735 "present": present,
1736 1736 "public": public,
1737 1737 "remote": remote,
1738 1738 "removes": removes,
1739 1739 "rev": rev,
1740 1740 "reverse": reverse,
1741 1741 "roots": roots,
1742 1742 "sort": sort,
1743 1743 "secret": secret,
1744 1744 "matching": matching,
1745 1745 "tag": tag,
1746 1746 "tagged": tagged,
1747 1747 "user": user,
1748 1748 "unstable": unstable,
1749 1749 "_list": _list,
1750 1750 "_intlist": _intlist,
1751 1751 "_hexlist": _hexlist,
1752 1752 }
1753 1753
1754 1754 # symbols which can't be used for a DoS attack for any given input
1755 1755 # (e.g. those which accept regexes as plain strings shouldn't be included)
1756 1756 # functions that just return a lot of changesets (like all) don't count here
1757 1757 safesymbols = set([
1758 1758 "adds",
1759 1759 "all",
1760 1760 "ancestor",
1761 1761 "ancestors",
1762 1762 "_firstancestors",
1763 1763 "author",
1764 1764 "bisect",
1765 1765 "bisected",
1766 1766 "bookmark",
1767 1767 "branch",
1768 1768 "branchpoint",
1769 1769 "bumped",
1770 1770 "bundle",
1771 1771 "children",
1772 1772 "closed",
1773 1773 "converted",
1774 1774 "date",
1775 1775 "desc",
1776 1776 "descendants",
1777 1777 "_firstdescendants",
1778 1778 "destination",
1779 1779 "divergent",
1780 1780 "draft",
1781 1781 "extinct",
1782 1782 "extra",
1783 1783 "file",
1784 1784 "filelog",
1785 1785 "first",
1786 1786 "follow",
1787 1787 "_followfirst",
1788 1788 "head",
1789 1789 "heads",
1790 1790 "hidden",
1791 1791 "id",
1792 1792 "keyword",
1793 1793 "last",
1794 1794 "limit",
1795 1795 "_matchfiles",
1796 1796 "max",
1797 1797 "merge",
1798 1798 "min",
1799 1799 "_missingancestors",
1800 1800 "modifies",
1801 1801 "obsolete",
1802 1802 "origin",
1803 1803 "outgoing",
1804 1804 "p1",
1805 1805 "p2",
1806 1806 "parents",
1807 1807 "present",
1808 1808 "public",
1809 1809 "remote",
1810 1810 "removes",
1811 1811 "rev",
1812 1812 "reverse",
1813 1813 "roots",
1814 1814 "sort",
1815 1815 "secret",
1816 1816 "matching",
1817 1817 "tag",
1818 1818 "tagged",
1819 1819 "user",
1820 1820 "unstable",
1821 1821 "_list",
1822 1822 "_intlist",
1823 1823 "_hexlist",
1824 1824 ])
1825 1825
1826 1826 methods = {
1827 1827 "range": rangeset,
1828 1828 "dagrange": dagrange,
1829 1829 "string": stringset,
1830 1830 "symbol": symbolset,
1831 1831 "and": andset,
1832 1832 "or": orset,
1833 1833 "not": notset,
1834 1834 "list": listset,
1835 1835 "func": func,
1836 1836 "ancestor": ancestorspec,
1837 1837 "parent": parentspec,
1838 1838 "parentpost": p1,
1839 1839 }
1840 1840
1841 1841 def optimize(x, small):
1842 1842 if x is None:
1843 1843 return 0, x
1844 1844
1845 1845 smallbonus = 1
1846 1846 if small:
1847 1847 smallbonus = .5
1848 1848
1849 1849 op = x[0]
1850 1850 if op == 'minus':
1851 1851 return optimize(('and', x[1], ('not', x[2])), small)
1852 1852 elif op == 'dagrangepre':
1853 1853 return optimize(('func', ('symbol', 'ancestors'), x[1]), small)
1854 1854 elif op == 'dagrangepost':
1855 1855 return optimize(('func', ('symbol', 'descendants'), x[1]), small)
1856 1856 elif op == 'rangepre':
1857 1857 return optimize(('range', ('string', '0'), x[1]), small)
1858 1858 elif op == 'rangepost':
1859 1859 return optimize(('range', x[1], ('string', 'tip')), small)
1860 1860 elif op == 'negate':
1861 1861 return optimize(('string',
1862 1862 '-' + getstring(x[1], _("can't negate that"))), small)
1863 1863 elif op in 'string symbol negate':
1864 1864 return smallbonus, x # single revisions are small
1865 1865 elif op == 'and':
1866 1866 wa, ta = optimize(x[1], True)
1867 1867 wb, tb = optimize(x[2], True)
1868 1868
1869 1869 # (::x and not ::y)/(not ::y and ::x) have a fast path
1870 1870 def ismissingancestors(revs, bases):
1871 1871 return (
1872 1872 revs[0] == 'func'
1873 1873 and getstring(revs[1], _('not a symbol')) == 'ancestors'
1874 1874 and bases[0] == 'not'
1875 1875 and bases[1][0] == 'func'
1876 1876 and getstring(bases[1][1], _('not a symbol')) == 'ancestors')
1877 1877
1878 1878 w = min(wa, wb)
1879 1879 if ismissingancestors(ta, tb):
1880 1880 return w, ('func', ('symbol', '_missingancestors'),
1881 1881 ('list', ta[2], tb[1][2]))
1882 1882 if ismissingancestors(tb, ta):
1883 1883 return w, ('func', ('symbol', '_missingancestors'),
1884 1884 ('list', tb[2], ta[1][2]))
1885 1885
1886 1886 if wa > wb:
1887 1887 return w, (op, tb, ta)
1888 1888 return w, (op, ta, tb)
1889 1889 elif op == 'or':
1890 1890 wa, ta = optimize(x[1], False)
1891 1891 wb, tb = optimize(x[2], False)
1892 1892 if wb < wa:
1893 1893 wb, wa = wa, wb
1894 1894 return max(wa, wb), (op, ta, tb)
1895 1895 elif op == 'not':
1896 1896 o = optimize(x[1], not small)
1897 1897 return o[0], (op, o[1])
1898 1898 elif op == 'parentpost':
1899 1899 o = optimize(x[1], small)
1900 1900 return o[0], (op, o[1])
1901 1901 elif op == 'group':
1902 1902 return optimize(x[1], small)
1903 1903 elif op in 'dagrange range list parent ancestorspec':
1904 1904 if op == 'parent':
1905 1905 # x^:y means (x^) : y, not x ^ (:y)
1906 1906 post = ('parentpost', x[1])
1907 1907 if x[2][0] == 'dagrangepre':
1908 1908 return optimize(('dagrange', post, x[2][1]), small)
1909 1909 elif x[2][0] == 'rangepre':
1910 1910 return optimize(('range', post, x[2][1]), small)
1911 1911
1912 1912 wa, ta = optimize(x[1], small)
1913 1913 wb, tb = optimize(x[2], small)
1914 1914 return wa + wb, (op, ta, tb)
1915 1915 elif op == 'func':
1916 1916 f = getstring(x[1], _("not a symbol"))
1917 1917 wa, ta = optimize(x[2], small)
1918 1918 if f in ("author branch closed date desc file grep keyword "
1919 1919 "outgoing user"):
1920 1920 w = 10 # slow
1921 1921 elif f in "modifies adds removes":
1922 1922 w = 30 # slower
1923 1923 elif f == "contains":
1924 1924 w = 100 # very slow
1925 1925 elif f == "ancestor":
1926 1926 w = 1 * smallbonus
1927 1927 elif f in "reverse limit first":
1928 1928 w = 0
1929 1929 elif f in "sort":
1930 1930 w = 10 # assume most sorts look at changelog
1931 1931 else:
1932 1932 w = 1
1933 1933 return w + wa, (op, x[1], ta)
1934 1934 return 1, x
1935 1935
1936 1936 _aliasarg = ('func', ('symbol', '_aliasarg'))
1937 1937 def _getaliasarg(tree):
1938 1938 """If tree matches ('func', ('symbol', '_aliasarg'), ('string', X))
1939 1939 return X, None otherwise.
1940 1940 """
1941 1941 if (len(tree) == 3 and tree[:2] == _aliasarg
1942 1942 and tree[2][0] == 'string'):
1943 1943 return tree[2][1]
1944 1944 return None
1945 1945
1946 1946 def _checkaliasarg(tree, known=None):
1947 1947 """Check tree contains no _aliasarg construct or only ones which
1948 1948 value is in known. Used to avoid alias placeholders injection.
1949 1949 """
1950 1950 if isinstance(tree, tuple):
1951 1951 arg = _getaliasarg(tree)
1952 1952 if arg is not None and (not known or arg not in known):
1953 1953 raise error.ParseError(_("not a function: %s") % '_aliasarg')
1954 1954 for t in tree:
1955 1955 _checkaliasarg(t, known)
1956 1956
1957 1957 class revsetalias(object):
1958 1958 funcre = re.compile('^([^(]+)\(([^)]+)\)$')
1959 1959 args = None
1960 1960
1961 1961 def __init__(self, name, value):
1962 1962 '''Aliases like:
1963 1963
1964 1964 h = heads(default)
1965 1965 b($1) = ancestors($1) - ancestors(default)
1966 1966 '''
1967 1967 m = self.funcre.search(name)
1968 1968 if m:
1969 1969 self.name = m.group(1)
1970 1970 self.tree = ('func', ('symbol', m.group(1)))
1971 1971 self.args = [x.strip() for x in m.group(2).split(',')]
1972 1972 for arg in self.args:
1973 1973 # _aliasarg() is an unknown symbol only used separate
1974 1974 # alias argument placeholders from regular strings.
1975 1975 value = value.replace(arg, '_aliasarg(%r)' % (arg,))
1976 1976 else:
1977 1977 self.name = name
1978 1978 self.tree = ('symbol', name)
1979 1979
1980 1980 self.replacement, pos = parse(value)
1981 1981 if pos != len(value):
1982 1982 raise error.ParseError(_('invalid token'), pos)
1983 1983 # Check for placeholder injection
1984 1984 _checkaliasarg(self.replacement, self.args)
1985 1985
1986 1986 def _getalias(aliases, tree):
1987 1987 """If tree looks like an unexpanded alias, return it. Return None
1988 1988 otherwise.
1989 1989 """
1990 1990 if isinstance(tree, tuple) and tree:
1991 1991 if tree[0] == 'symbol' and len(tree) == 2:
1992 1992 name = tree[1]
1993 1993 alias = aliases.get(name)
1994 1994 if alias and alias.args is None and alias.tree == tree:
1995 1995 return alias
1996 1996 if tree[0] == 'func' and len(tree) > 1:
1997 1997 if tree[1][0] == 'symbol' and len(tree[1]) == 2:
1998 1998 name = tree[1][1]
1999 1999 alias = aliases.get(name)
2000 2000 if alias and alias.args is not None and alias.tree == tree[:2]:
2001 2001 return alias
2002 2002 return None
2003 2003
2004 2004 def _expandargs(tree, args):
2005 2005 """Replace _aliasarg instances with the substitution value of the
2006 2006 same name in args, recursively.
2007 2007 """
2008 2008 if not tree or not isinstance(tree, tuple):
2009 2009 return tree
2010 2010 arg = _getaliasarg(tree)
2011 2011 if arg is not None:
2012 2012 return args[arg]
2013 2013 return tuple(_expandargs(t, args) for t in tree)
2014 2014
2015 2015 def _expandaliases(aliases, tree, expanding, cache):
2016 2016 """Expand aliases in tree, recursively.
2017 2017
2018 2018 'aliases' is a dictionary mapping user defined aliases to
2019 2019 revsetalias objects.
2020 2020 """
2021 2021 if not isinstance(tree, tuple):
2022 2022 # Do not expand raw strings
2023 2023 return tree
2024 2024 alias = _getalias(aliases, tree)
2025 2025 if alias is not None:
2026 2026 if alias in expanding:
2027 2027 raise error.ParseError(_('infinite expansion of revset alias "%s" '
2028 2028 'detected') % alias.name)
2029 2029 expanding.append(alias)
2030 2030 if alias.name not in cache:
2031 2031 cache[alias.name] = _expandaliases(aliases, alias.replacement,
2032 2032 expanding, cache)
2033 2033 result = cache[alias.name]
2034 2034 expanding.pop()
2035 2035 if alias.args is not None:
2036 2036 l = getlist(tree[2])
2037 2037 if len(l) != len(alias.args):
2038 2038 raise error.ParseError(
2039 2039 _('invalid number of arguments: %s') % len(l))
2040 2040 l = [_expandaliases(aliases, a, [], cache) for a in l]
2041 2041 result = _expandargs(result, dict(zip(alias.args, l)))
2042 2042 else:
2043 2043 result = tuple(_expandaliases(aliases, t, expanding, cache)
2044 2044 for t in tree)
2045 2045 return result
2046 2046
2047 2047 def findaliases(ui, tree):
2048 2048 _checkaliasarg(tree)
2049 2049 aliases = {}
2050 2050 for k, v in ui.configitems('revsetalias'):
2051 2051 alias = revsetalias(k, v)
2052 2052 aliases[alias.name] = alias
2053 2053 return _expandaliases(aliases, tree, [], {})
2054 2054
2055 2055 def parse(spec, lookup=None):
2056 2056 p = parser.parser(tokenize, elements)
2057 2057 return p.parse(spec, lookup=lookup)
2058 2058
2059 2059 def match(ui, spec, repo=None):
2060 2060 if not spec:
2061 2061 raise error.ParseError(_("empty query"))
2062 2062 lookup = None
2063 2063 if repo:
2064 2064 lookup = repo.__contains__
2065 2065 tree, pos = parse(spec, lookup)
2066 2066 if (pos != len(spec)):
2067 2067 raise error.ParseError(_("invalid token"), pos)
2068 2068 if ui:
2069 2069 tree = findaliases(ui, tree)
2070 2070 weight, tree = optimize(tree, True)
2071 2071 def mfunc(repo, subset):
2072 2072 if util.safehasattr(subset, 'set'):
2073 2073 return getset(repo, subset, tree)
2074 2074 return getset(repo, baseset(subset), tree)
2075 2075 return mfunc
2076 2076
2077 2077 def formatspec(expr, *args):
2078 2078 '''
2079 2079 This is a convenience function for using revsets internally, and
2080 2080 escapes arguments appropriately. Aliases are intentionally ignored
2081 2081 so that intended expression behavior isn't accidentally subverted.
2082 2082
2083 2083 Supported arguments:
2084 2084
2085 2085 %r = revset expression, parenthesized
2086 2086 %d = int(arg), no quoting
2087 2087 %s = string(arg), escaped and single-quoted
2088 2088 %b = arg.branch(), escaped and single-quoted
2089 2089 %n = hex(arg), single-quoted
2090 2090 %% = a literal '%'
2091 2091
2092 2092 Prefixing the type with 'l' specifies a parenthesized list of that type.
2093 2093
2094 2094 >>> formatspec('%r:: and %lr', '10 or 11', ("this()", "that()"))
2095 2095 '(10 or 11):: and ((this()) or (that()))'
2096 2096 >>> formatspec('%d:: and not %d::', 10, 20)
2097 2097 '10:: and not 20::'
2098 2098 >>> formatspec('%ld or %ld', [], [1])
2099 2099 "_list('') or 1"
2100 2100 >>> formatspec('keyword(%s)', 'foo\\xe9')
2101 2101 "keyword('foo\\\\xe9')"
2102 2102 >>> b = lambda: 'default'
2103 2103 >>> b.branch = b
2104 2104 >>> formatspec('branch(%b)', b)
2105 2105 "branch('default')"
2106 2106 >>> formatspec('root(%ls)', ['a', 'b', 'c', 'd'])
2107 2107 "root(_list('a\\x00b\\x00c\\x00d'))"
2108 2108 '''
2109 2109
2110 2110 def quote(s):
2111 2111 return repr(str(s))
2112 2112
2113 2113 def argtype(c, arg):
2114 2114 if c == 'd':
2115 2115 return str(int(arg))
2116 2116 elif c == 's':
2117 2117 return quote(arg)
2118 2118 elif c == 'r':
2119 2119 parse(arg) # make sure syntax errors are confined
2120 2120 return '(%s)' % arg
2121 2121 elif c == 'n':
2122 2122 return quote(node.hex(arg))
2123 2123 elif c == 'b':
2124 2124 return quote(arg.branch())
2125 2125
2126 2126 def listexp(s, t):
2127 2127 l = len(s)
2128 2128 if l == 0:
2129 2129 return "_list('')"
2130 2130 elif l == 1:
2131 2131 return argtype(t, s[0])
2132 2132 elif t == 'd':
2133 2133 return "_intlist('%s')" % "\0".join(str(int(a)) for a in s)
2134 2134 elif t == 's':
2135 2135 return "_list('%s')" % "\0".join(s)
2136 2136 elif t == 'n':
2137 2137 return "_hexlist('%s')" % "\0".join(node.hex(a) for a in s)
2138 2138 elif t == 'b':
2139 2139 return "_list('%s')" % "\0".join(a.branch() for a in s)
2140 2140
2141 2141 m = l // 2
2142 2142 return '(%s or %s)' % (listexp(s[:m], t), listexp(s[m:], t))
2143 2143
2144 2144 ret = ''
2145 2145 pos = 0
2146 2146 arg = 0
2147 2147 while pos < len(expr):
2148 2148 c = expr[pos]
2149 2149 if c == '%':
2150 2150 pos += 1
2151 2151 d = expr[pos]
2152 2152 if d == '%':
2153 2153 ret += d
2154 2154 elif d in 'dsnbr':
2155 2155 ret += argtype(d, args[arg])
2156 2156 arg += 1
2157 2157 elif d == 'l':
2158 2158 # a list of some type
2159 2159 pos += 1
2160 2160 d = expr[pos]
2161 2161 ret += listexp(list(args[arg]), d)
2162 2162 arg += 1
2163 2163 else:
2164 2164 raise util.Abort('unexpected revspec format character %s' % d)
2165 2165 else:
2166 2166 ret += c
2167 2167 pos += 1
2168 2168
2169 2169 return ret
2170 2170
2171 2171 def prettyformat(tree):
2172 2172 def _prettyformat(tree, level, lines):
2173 2173 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2174 2174 lines.append((level, str(tree)))
2175 2175 else:
2176 2176 lines.append((level, '(%s' % tree[0]))
2177 2177 for s in tree[1:]:
2178 2178 _prettyformat(s, level + 1, lines)
2179 2179 lines[-1:] = [(lines[-1][0], lines[-1][1] + ')')]
2180 2180
2181 2181 lines = []
2182 2182 _prettyformat(tree, 0, lines)
2183 2183 output = '\n'.join((' '*l + s) for l, s in lines)
2184 2184 return output
2185 2185
2186 2186 def depth(tree):
2187 2187 if isinstance(tree, tuple):
2188 2188 return max(map(depth, tree)) + 1
2189 2189 else:
2190 2190 return 0
2191 2191
2192 2192 def funcsused(tree):
2193 2193 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2194 2194 return set()
2195 2195 else:
2196 2196 funcs = set()
2197 2197 for s in tree[1:]:
2198 2198 funcs |= funcsused(s)
2199 2199 if tree[0] == 'func':
2200 2200 funcs.add(tree[1][1])
2201 2201 return funcs
2202 2202
2203 2203 class baseset(list):
2204 2204 """Basic data structure that represents a revset and contains the basic
2205 2205 operation that it should be able to perform.
2206 2206
2207 2207 Every method in this class should be implemented by any smartset class.
2208 2208 """
2209 2209 def __init__(self, data=()):
2210 2210 super(baseset, self).__init__(data)
2211 2211 self._set = None
2212 2212
2213 2213 def ascending(self):
2214 2214 """Sorts the set in ascending order (in place).
2215 2215
2216 2216 This is part of the mandatory API for smartset."""
2217 2217 self.sort()
2218 2218
2219 2219 def descending(self):
2220 2220 """Sorts the set in descending order (in place).
2221 2221
2222 2222 This is part of the mandatory API for smartset."""
2223 2223 self.sort(reverse=True)
2224 2224
2225 2225 def min(self):
2226 2226 return min(self)
2227 2227
2228 2228 def max(self):
2229 2229 return max(self)
2230 2230
2231 2231 def set(self):
2232 2232 """Returns a set or a smartset containing all the elements.
2233 2233
2234 2234 The returned structure should be the fastest option for membership
2235 2235 testing.
2236 2236
2237 2237 This is part of the mandatory API for smartset."""
2238 2238 if not self._set:
2239 2239 self._set = set(self)
2240 2240 return self._set
2241 2241
2242 2242 def __sub__(self, other):
2243 2243 """Returns a new object with the substraction of the two collections.
2244 2244
2245 2245 This is part of the mandatory API for smartset."""
2246 2246 if isinstance(other, baseset):
2247 2247 s = other.set()
2248 2248 else:
2249 2249 s = set(other)
2250 2250 return baseset(self.set() - s)
2251 2251
2252 2252 def __and__(self, other):
2253 2253 """Returns a new object with the intersection of the two collections.
2254 2254
2255 2255 This is part of the mandatory API for smartset."""
2256 2256 if isinstance(other, baseset):
2257 2257 other = other.set()
2258 2258 return baseset([y for y in self if y in other])
2259 2259
2260 2260 def __add__(self, other):
2261 2261 """Returns a new object with the union of the two collections.
2262 2262
2263 2263 This is part of the mandatory API for smartset."""
2264 2264 s = self.set()
2265 2265 l = [r for r in other if r not in s]
2266 2266 return baseset(list(self) + l)
2267 2267
2268 2268 def isascending(self):
2269 2269 """Returns True if the collection is ascending order, False if not.
2270 2270
2271 2271 This is part of the mandatory API for smartset."""
2272 2272 return False
2273 2273
2274 2274 def isdescending(self):
2275 2275 """Returns True if the collection is descending order, False if not.
2276 2276
2277 2277 This is part of the mandatory API for smartset."""
2278 2278 return False
2279 2279
2280 2280 def filter(self, condition):
2281 2281 """Returns this smartset filtered by condition as a new smartset.
2282 2282
2283 2283 `condition` is a callable which takes a revision number and returns a
2284 2284 boolean.
2285 2285
2286 2286 This is part of the mandatory API for smartset."""
2287 2287 return lazyset(self, condition)
2288 2288
2289 2289 class _orderedsetmixin(object):
2290 2290 """Mixin class with utility methods for smartsets
2291 2291
2292 2292 This should be extended by smartsets which have the isascending(),
2293 2293 isdescending() and reverse() methods"""
2294 2294
2295 2295 def _first(self):
2296 2296 """return the first revision in the set"""
2297 2297 for r in self:
2298 2298 return r
2299 2299 raise ValueError('arg is an empty sequence')
2300 2300
2301 2301 def _last(self):
2302 2302 """return the last revision in the set"""
2303 2303 self.reverse()
2304 2304 m = self._first()
2305 2305 self.reverse()
2306 2306 return m
2307 2307
2308 2308 def min(self):
2309 2309 """return the smallest element in the set"""
2310 2310 if self.isascending():
2311 2311 return self._first()
2312 2312 return self._last()
2313 2313
2314 2314 def max(self):
2315 2315 """return the largest element in the set"""
2316 2316 if self.isascending():
2317 2317 return self._last()
2318 2318 return self._first()
2319 2319
2320 2320 class lazyset(object):
2321 2321 """Duck type for baseset class which iterates lazily over the revisions in
2322 2322 the subset and contains a function which tests for membership in the
2323 2323 revset
2324 2324 """
2325 2325 def __init__(self, subset, condition=lambda x: True):
2326 2326 """
2327 2327 condition: a function that decide whether a revision in the subset
2328 2328 belongs to the revset or not.
2329 2329 """
2330 2330 self._subset = subset
2331 2331 self._condition = condition
2332 2332 self._cache = {}
2333 2333
2334 2334 def ascending(self):
2335 2335 self._subset.sort()
2336 2336
2337 2337 def descending(self):
2338 2338 self._subset.sort(reverse=True)
2339 2339
2340 2340 def min(self):
2341 2341 return min(self)
2342 2342
2343 2343 def max(self):
2344 2344 return max(self)
2345 2345
2346 2346 def __contains__(self, x):
2347 2347 c = self._cache
2348 2348 if x not in c:
2349 2349 c[x] = x in self._subset and self._condition(x)
2350 2350 return c[x]
2351 2351
2352 2352 def __iter__(self):
2353 2353 cond = self._condition
2354 2354 for x in self._subset:
2355 2355 if cond(x):
2356 2356 yield x
2357 2357
2358 2358 def __and__(self, x):
2359 2359 return lazyset(self, x.__contains__)
2360 2360
2361 2361 def __sub__(self, x):
2362 2362 return lazyset(self, lambda r: r not in x)
2363 2363
2364 2364 def __add__(self, x):
2365 2365 return _addset(self, x)
2366 2366
2367 2367 def __nonzero__(self):
2368 2368 for r in self:
2369 2369 return True
2370 2370 return False
2371 2371
2372 2372 def __len__(self):
2373 2373 # Basic implementation to be changed in future patches.
2374 2374 l = baseset([r for r in self])
2375 2375 return len(l)
2376 2376
2377 2377 def __getitem__(self, x):
2378 2378 # Basic implementation to be changed in future patches.
2379 2379 l = baseset([r for r in self])
2380 2380 return l[x]
2381 2381
2382 2382 def sort(self, reverse=False):
2383 2383 if not util.safehasattr(self._subset, 'sort'):
2384 2384 self._subset = baseset(self._subset)
2385 2385 self._subset.sort(reverse=reverse)
2386 2386
2387 2387 def reverse(self):
2388 2388 self._subset.reverse()
2389 2389
2390 2390 def set(self):
2391 2391 return set([r for r in self])
2392 2392
2393 2393 def isascending(self):
2394 2394 return False
2395 2395
2396 2396 def isdescending(self):
2397 2397 return False
2398 2398
2399 2399 def filter(self, l):
2400 2400 return lazyset(self, l)
2401 2401
2402 2402 class orderedlazyset(_orderedsetmixin, lazyset):
2403 2403 """Subclass of lazyset which subset can be ordered either ascending or
2404 2404 descendingly
2405 2405 """
2406 2406 def __init__(self, subset, condition, ascending=True):
2407 2407 super(orderedlazyset, self).__init__(subset, condition)
2408 2408 self._ascending = ascending
2409 2409
2410 2410 def filter(self, l):
2411 2411 return orderedlazyset(self, l, ascending=self._ascending)
2412 2412
2413 2413 def ascending(self):
2414 2414 if not self._ascending:
2415 2415 self.reverse()
2416 2416
2417 2417 def descending(self):
2418 2418 if self._ascending:
2419 2419 self.reverse()
2420 2420
2421 2421 def __and__(self, x):
2422 2422 return orderedlazyset(self, x.__contains__,
2423 2423 ascending=self._ascending)
2424 2424
2425 2425 def __sub__(self, x):
2426 2426 return orderedlazyset(self, lambda r: r not in x,
2427 2427 ascending=self._ascending)
2428 2428
2429 2429 def __add__(self, x):
2430 2430 kwargs = {}
2431 2431 if self.isascending() and x.isascending():
2432 2432 kwargs['ascending'] = True
2433 2433 if self.isdescending() and x.isdescending():
2434 2434 kwargs['ascending'] = False
2435 2435 return _addset(self, x, **kwargs)
2436 2436
2437 2437 def sort(self, reverse=False):
2438 2438 if reverse:
2439 2439 if self._ascending:
2440 2440 self._subset.sort(reverse=reverse)
2441 2441 else:
2442 2442 if not self._ascending:
2443 2443 self._subset.sort(reverse=reverse)
2444 2444 self._ascending = not reverse
2445 2445
2446 2446 def isascending(self):
2447 2447 return self._ascending
2448 2448
2449 2449 def isdescending(self):
2450 2450 return not self._ascending
2451 2451
2452 2452 def reverse(self):
2453 2453 self._subset.reverse()
2454 2454 self._ascending = not self._ascending
2455 2455
2456 2456 class _addset(_orderedsetmixin):
2457 2457 """Represent the addition of two sets
2458 2458
2459 2459 Wrapper structure for lazily adding two structures without losing much
2460 2460 performance on the __contains__ method
2461 2461
2462 2462 If the ascending attribute is set, that means the two structures are
2463 2463 ordered in either an ascending or descending way. Therefore, we can add
2464 2464 them maintaining the order by iterating over both at the same time
2465 2465
2466 2466 This class does not duck-type baseset and it's only supposed to be used
2467 2467 internally
2468 2468 """
2469 2469 def __init__(self, revs1, revs2, ascending=None):
2470 2470 self._r1 = revs1
2471 2471 self._r2 = revs2
2472 2472 self._iter = None
2473 2473 self._ascending = ascending
2474 2474 self._genlist = None
2475 2475
2476 2476 def __len__(self):
2477 2477 return len(self._list)
2478 2478
2479 2479 @util.propertycache
2480 2480 def _list(self):
2481 2481 if not self._genlist:
2482 2482 self._genlist = baseset(self._iterator())
2483 2483 return self._genlist
2484 2484
2485 2485 def filter(self, condition):
2486 2486 if self._ascending is not None:
2487 2487 return orderedlazyset(self, condition, ascending=self._ascending)
2488 2488 return lazyset(self, condition)
2489 2489
2490 2490 def ascending(self):
2491 2491 if self._ascending is None:
2492 2492 self.sort()
2493 2493 self._ascending = True
2494 2494 else:
2495 2495 if not self._ascending:
2496 2496 self.reverse()
2497 2497
2498 2498 def descending(self):
2499 2499 if self._ascending is None:
2500 2500 self.sort(reverse=True)
2501 2501 self._ascending = False
2502 2502 else:
2503 2503 if self._ascending:
2504 2504 self.reverse()
2505 2505
2506 2506 def __and__(self, other):
2507 2507 filterfunc = other.__contains__
2508 2508 if self._ascending is not None:
2509 2509 return orderedlazyset(self, filterfunc, ascending=self._ascending)
2510 2510 return lazyset(self, filterfunc)
2511 2511
2512 2512 def __sub__(self, other):
2513 2513 filterfunc = lambda r: r not in other
2514 2514 if self._ascending is not None:
2515 2515 return orderedlazyset(self, filterfunc, ascending=self._ascending)
2516 2516 return lazyset(self, filterfunc)
2517 2517
2518 2518 def __add__(self, other):
2519 2519 """When both collections are ascending or descending, preserve the order
2520 2520 """
2521 2521 kwargs = {}
2522 2522 if self._ascending is not None:
2523 2523 if self.isascending() and other.isascending():
2524 2524 kwargs['ascending'] = True
2525 2525 if self.isdescending() and other.isdescending():
2526 2526 kwargs['ascending'] = False
2527 2527 return _addset(self, other, **kwargs)
2528 2528
2529 2529 def _iterator(self):
2530 2530 """Iterate over both collections without repeating elements
2531 2531
2532 2532 If the ascending attribute is not set, iterate over the first one and
2533 2533 then over the second one checking for membership on the first one so we
2534 2534 dont yield any duplicates.
2535 2535
2536 2536 If the ascending attribute is set, iterate over both collections at the
2537 2537 same time, yielding only one value at a time in the given order.
2538 2538 """
2539 2539 if not self._iter:
2540 2540 def gen():
2541 2541 if self._ascending is None:
2542 2542 for r in self._r1:
2543 2543 yield r
2544 2544 s = self._r1.set()
2545 2545 for r in self._r2:
2546 2546 if r not in s:
2547 2547 yield r
2548 2548 else:
2549 2549 iter1 = iter(self._r1)
2550 2550 iter2 = iter(self._r2)
2551 2551
2552 2552 val1 = None
2553 2553 val2 = None
2554 2554
2555 2555 choice = max
2556 2556 if self._ascending:
2557 2557 choice = min
2558 2558 try:
2559 2559 # Consume both iterators in an ordered way until one is
2560 2560 # empty
2561 2561 while True:
2562 2562 if val1 is None:
2563 2563 val1 = iter1.next()
2564 2564 if val2 is None:
2565 2565 val2 = iter2.next()
2566 2566 next = choice(val1, val2)
2567 2567 yield next
2568 2568 if val1 == next:
2569 2569 val1 = None
2570 2570 if val2 == next:
2571 2571 val2 = None
2572 2572 except StopIteration:
2573 2573 # Flush any remaining values and consume the other one
2574 2574 it = iter2
2575 2575 if val1 is not None:
2576 2576 yield val1
2577 2577 it = iter1
2578 2578 elif val2 is not None:
2579 2579 # might have been equality and both are empty
2580 2580 yield val2
2581 2581 for val in it:
2582 2582 yield val
2583 2583
2584 2584 self._iter = _generatorset(gen())
2585 2585
2586 2586 return self._iter
2587 2587
2588 2588 def __iter__(self):
2589 2589 if self._genlist:
2590 2590 return iter(self._genlist)
2591 2591 return iter(self._iterator())
2592 2592
2593 2593 def __contains__(self, x):
2594 2594 return x in self._r1 or x in self._r2
2595 2595
2596 2596 def set(self):
2597 2597 return self
2598 2598
2599 2599 def sort(self, reverse=False):
2600 2600 """Sort the added set
2601 2601
2602 2602 For this we use the cached list with all the generated values and if we
2603 2603 know they are ascending or descending we can sort them in a smart way.
2604 2604 """
2605 2605 if self._ascending is None:
2606 2606 self._list.sort(reverse=reverse)
2607 2607 self._ascending = not reverse
2608 2608 else:
2609 2609 if bool(self._ascending) == bool(reverse):
2610 2610 self.reverse()
2611 2611
2612 2612 def isascending(self):
2613 2613 return self._ascending is not None and self._ascending
2614 2614
2615 2615 def isdescending(self):
2616 2616 return self._ascending is not None and not self._ascending
2617 2617
2618 2618 def reverse(self):
2619 2619 self._list.reverse()
2620 2620 if self._ascending is not None:
2621 2621 self._ascending = not self._ascending
2622 2622
2623 2623 class _generatorset(object):
2624 2624 """Wrap a generator for lazy iteration
2625 2625
2626 2626 Wrapper structure for generators that provides lazy membership and can
2627 2627 be iterated more than once.
2628 2628 When asked for membership it generates values until either it finds the
2629 2629 requested one or has gone through all the elements in the generator
2630 2630
2631 2631 This class does not duck-type baseset and it's only supposed to be used
2632 2632 internally
2633 2633 """
2634 2634 def __init__(self, gen):
2635 2635 """
2636 2636 gen: a generator producing the values for the generatorset.
2637 2637 """
2638 2638 self._gen = gen
2639 2639 self._cache = {}
2640 2640 self._genlist = baseset([])
2641 2641 self._finished = False
2642 2642
2643 2643 def __contains__(self, x):
2644 2644 if x in self._cache:
2645 2645 return self._cache[x]
2646 2646
2647 2647 # Use new values only, as existing values would be cached.
2648 2648 for l in self._consumegen():
2649 2649 if l == x:
2650 2650 return True
2651 2651
2652 2652 self._cache[x] = False
2653 2653 return False
2654 2654
2655 2655 def __iter__(self):
2656 2656 if self._finished:
2657 2657 for x in self._genlist:
2658 2658 yield x
2659 2659 return
2660 2660
2661 2661 i = 0
2662 2662 genlist = self._genlist
2663 2663 consume = self._consumegen()
2664 2664 while True:
2665 2665 if i < len(genlist):
2666 2666 yield genlist[i]
2667 2667 else:
2668 2668 yield consume.next()
2669 2669 i += 1
2670 2670
2671 2671 def _consumegen(self):
2672 2672 for item in self._gen:
2673 2673 self._cache[item] = True
2674 2674 self._genlist.append(item)
2675 2675 yield item
2676 2676 self._finished = True
2677 2677
2678 2678 def set(self):
2679 2679 return self
2680 2680
2681 2681 def sort(self, reverse=False):
2682 2682 if not self._finished:
2683 2683 for i in self:
2684 2684 continue
2685 2685 self._genlist.sort(reverse=reverse)
2686 2686
2687 2687 class _ascgeneratorset(_generatorset):
2688 2688 """Wrap a generator of ascending elements for lazy iteration
2689 2689
2690 2690 Same structure as _generatorset but stops iterating after it goes past
2691 2691 the value when asked for membership and the element is not contained
2692 2692
2693 2693 This class does not duck-type baseset and it's only supposed to be used
2694 2694 internally
2695 2695 """
2696 2696 def __contains__(self, x):
2697 2697 if x in self._cache:
2698 2698 return self._cache[x]
2699 2699
2700 2700 # Use new values only, as existing values would be cached.
2701 2701 for l in self._consumegen():
2702 2702 if l == x:
2703 2703 return True
2704 2704 if l > x:
2705 2705 break
2706 2706
2707 2707 self._cache[x] = False
2708 2708 return False
2709 2709
2710 2710 class _descgeneratorset(_generatorset):
2711 2711 """Wrap a generator of descending elements for lazy iteration
2712 2712
2713 2713 Same structure as _generatorset but stops iterating after it goes past
2714 2714 the value when asked for membership and the element is not contained
2715 2715
2716 2716 This class does not duck-type baseset and it's only supposed to be used
2717 2717 internally
2718 2718 """
2719 2719 def __contains__(self, x):
2720 2720 if x in self._cache:
2721 2721 return self._cache[x]
2722 2722
2723 2723 # Use new values only, as existing values would be cached.
2724 2724 for l in self._consumegen():
2725 2725 if l == x:
2726 2726 return True
2727 2727 if l < x:
2728 2728 break
2729 2729
2730 2730 self._cache[x] = False
2731 2731 return False
2732 2732
2733 2733 class spanset(_orderedsetmixin):
2734 2734 """Duck type for baseset class which represents a range of revisions and
2735 2735 can work lazily and without having all the range in memory
2736 2736
2737 2737 Note that spanset(x, y) behave almost like xrange(x, y) except for two
2738 2738 notable points:
2739 2739 - when x < y it will be automatically descending,
2740 2740 - revision filtered with this repoview will be skipped.
2741 2741
2742 2742 """
2743 2743 def __init__(self, repo, start=0, end=None):
2744 2744 """
2745 2745 start: first revision included the set
2746 2746 (default to 0)
2747 2747 end: first revision excluded (last+1)
2748 2748 (default to len(repo)
2749 2749
2750 2750 Spanset will be descending if `end` < `start`.
2751 2751 """
2752 2752 self._start = start
2753 2753 if end is not None:
2754 2754 self._end = end
2755 2755 else:
2756 2756 self._end = len(repo)
2757 2757 self._hiddenrevs = repo.changelog.filteredrevs
2758 2758
2759 2759 def ascending(self):
2760 2760 if self._start > self._end:
2761 2761 self.reverse()
2762 2762
2763 2763 def descending(self):
2764 2764 if self._start < self._end:
2765 2765 self.reverse()
2766 2766
2767 2767 def __iter__(self):
2768 2768 if self._start <= self._end:
2769 2769 iterrange = xrange(self._start, self._end)
2770 2770 else:
2771 2771 iterrange = xrange(self._start, self._end, -1)
2772 2772
2773 2773 if self._hiddenrevs:
2774 2774 s = self._hiddenrevs
2775 2775 for r in iterrange:
2776 2776 if r not in s:
2777 2777 yield r
2778 2778 else:
2779 2779 for r in iterrange:
2780 2780 yield r
2781 2781
2782 2782 def __contains__(self, rev):
2783 2783 return (((self._end < rev <= self._start)
2784 2784 or (self._start <= rev < self._end))
2785 2785 and not (self._hiddenrevs and rev in self._hiddenrevs))
2786 2786
2787 2787 def __nonzero__(self):
2788 2788 for r in self:
2789 2789 return True
2790 2790 return False
2791 2791
2792 2792 def __and__(self, x):
2793 2793 if isinstance(x, baseset):
2794 2794 x = x.set()
2795 2795 if self._start <= self._end:
2796 2796 return orderedlazyset(self, x.__contains__)
2797 2797 else:
2798 2798 return orderedlazyset(self, x.__contains__, ascending=False)
2799 2799
2800 2800 def __sub__(self, x):
2801 2801 if isinstance(x, baseset):
2802 2802 x = x.set()
2803 2803 if self._start <= self._end:
2804 2804 return orderedlazyset(self, lambda r: r not in x)
2805 2805 else:
2806 2806 return orderedlazyset(self, lambda r: r not in x, ascending=False)
2807 2807
2808 2808 def __add__(self, x):
2809 2809 kwargs = {}
2810 2810 if self.isascending() and x.isascending():
2811 2811 kwargs['ascending'] = True
2812 2812 if self.isdescending() and x.isdescending():
2813 2813 kwargs['ascending'] = False
2814 2814 return _addset(self, x, **kwargs)
2815 2815
2816 2816 def __len__(self):
2817 2817 if not self._hiddenrevs:
2818 2818 return abs(self._end - self._start)
2819 2819 else:
2820 2820 count = 0
2821 2821 start = self._start
2822 2822 end = self._end
2823 2823 for rev in self._hiddenrevs:
2824 if (end < rev <= start) or (start <= rev and rev < end):
2824 if (end < rev <= start) or (start <= rev < end):
2825 2825 count += 1
2826 2826 return abs(self._end - self._start) - count
2827 2827
2828 2828 def __getitem__(self, x):
2829 2829 # Basic implementation to be changed in future patches.
2830 2830 l = baseset([r for r in self])
2831 2831 return l[x]
2832 2832
2833 2833 def sort(self, reverse=False):
2834 2834 if bool(reverse) != (self._start > self._end):
2835 2835 self.reverse()
2836 2836
2837 2837 def reverse(self):
2838 2838 # Just switch the _start and _end parameters
2839 2839 if self._start <= self._end:
2840 2840 self._start, self._end = self._end - 1, self._start - 1
2841 2841 else:
2842 2842 self._start, self._end = self._end + 1, self._start + 1
2843 2843
2844 2844 def set(self):
2845 2845 return self
2846 2846
2847 2847 def isascending(self):
2848 2848 return self._start < self._end
2849 2849
2850 2850 def isdescending(self):
2851 2851 return self._start > self._end
2852 2852
2853 2853 def filter(self, l):
2854 2854 if self._start <= self._end:
2855 2855 return orderedlazyset(self, l)
2856 2856 else:
2857 2857 return orderedlazyset(self, l, ascending=False)
2858 2858
2859 2859 # tell hggettext to extract docstrings from these functions:
2860 2860 i18nfunctions = symbols.values()
General Comments 0
You need to be logged in to leave comments. Login now