##// END OF EJS Templates
revlog: use context ancestor instead of changelog ancestor...
Mads Kiilerich -
r20991:a05d6945 default
parent child Browse files
Show More
@@ -1,2862 +1,2859
1 1 # revset.py - revision set queries for mercurial
2 2 #
3 3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 import re
9 9 import parser, util, error, discovery, hbisect, phases
10 10 import node
11 11 import heapq
12 12 import match as matchmod
13 13 import ancestor as ancestormod
14 14 from i18n import _
15 15 import encoding
16 16 import obsolete as obsmod
17 17 import pathutil
18 18 import repoview
19 19
20 20 def _revancestors(repo, revs, followfirst):
21 21 """Like revlog.ancestors(), but supports followfirst."""
22 22 cut = followfirst and 1 or None
23 23 cl = repo.changelog
24 24
25 25 def iterate():
26 26 revqueue, revsnode = None, None
27 27 h = []
28 28
29 29 revs.descending()
30 30 revqueue = util.deque(revs)
31 31 if revqueue:
32 32 revsnode = revqueue.popleft()
33 33 heapq.heappush(h, -revsnode)
34 34
35 35 seen = set([node.nullrev])
36 36 while h:
37 37 current = -heapq.heappop(h)
38 38 if current not in seen:
39 39 if revsnode and current == revsnode:
40 40 if revqueue:
41 41 revsnode = revqueue.popleft()
42 42 heapq.heappush(h, -revsnode)
43 43 seen.add(current)
44 44 yield current
45 45 for parent in cl.parentrevs(current)[:cut]:
46 46 if parent != node.nullrev:
47 47 heapq.heappush(h, -parent)
48 48
49 49 return _descgeneratorset(iterate())
50 50
51 51 def _revdescendants(repo, revs, followfirst):
52 52 """Like revlog.descendants() but supports followfirst."""
53 53 cut = followfirst and 1 or None
54 54
55 55 def iterate():
56 56 cl = repo.changelog
57 57 first = min(revs)
58 58 nullrev = node.nullrev
59 59 if first == nullrev:
60 60 # Are there nodes with a null first parent and a non-null
61 61 # second one? Maybe. Do we care? Probably not.
62 62 for i in cl:
63 63 yield i
64 64 else:
65 65 seen = set(revs)
66 66 for i in cl.revs(first + 1):
67 67 for x in cl.parentrevs(i)[:cut]:
68 68 if x != nullrev and x in seen:
69 69 seen.add(i)
70 70 yield i
71 71 break
72 72
73 73 return _ascgeneratorset(iterate())
74 74
75 75 def _revsbetween(repo, roots, heads):
76 76 """Return all paths between roots and heads, inclusive of both endpoint
77 77 sets."""
78 78 if not roots:
79 79 return baseset([])
80 80 parentrevs = repo.changelog.parentrevs
81 81 visit = baseset(heads)
82 82 reachable = set()
83 83 seen = {}
84 84 minroot = min(roots)
85 85 roots = set(roots)
86 86 # open-code the post-order traversal due to the tiny size of
87 87 # sys.getrecursionlimit()
88 88 while visit:
89 89 rev = visit.pop()
90 90 if rev in roots:
91 91 reachable.add(rev)
92 92 parents = parentrevs(rev)
93 93 seen[rev] = parents
94 94 for parent in parents:
95 95 if parent >= minroot and parent not in seen:
96 96 visit.append(parent)
97 97 if not reachable:
98 98 return baseset([])
99 99 for rev in sorted(seen):
100 100 for parent in seen[rev]:
101 101 if parent in reachable:
102 102 reachable.add(rev)
103 103 return baseset(sorted(reachable))
104 104
105 105 elements = {
106 106 "(": (20, ("group", 1, ")"), ("func", 1, ")")),
107 107 "~": (18, None, ("ancestor", 18)),
108 108 "^": (18, None, ("parent", 18), ("parentpost", 18)),
109 109 "-": (5, ("negate", 19), ("minus", 5)),
110 110 "::": (17, ("dagrangepre", 17), ("dagrange", 17),
111 111 ("dagrangepost", 17)),
112 112 "..": (17, ("dagrangepre", 17), ("dagrange", 17),
113 113 ("dagrangepost", 17)),
114 114 ":": (15, ("rangepre", 15), ("range", 15), ("rangepost", 15)),
115 115 "not": (10, ("not", 10)),
116 116 "!": (10, ("not", 10)),
117 117 "and": (5, None, ("and", 5)),
118 118 "&": (5, None, ("and", 5)),
119 119 "or": (4, None, ("or", 4)),
120 120 "|": (4, None, ("or", 4)),
121 121 "+": (4, None, ("or", 4)),
122 122 ",": (2, None, ("list", 2)),
123 123 ")": (0, None, None),
124 124 "symbol": (0, ("symbol",), None),
125 125 "string": (0, ("string",), None),
126 126 "end": (0, None, None),
127 127 }
128 128
129 129 keywords = set(['and', 'or', 'not'])
130 130
131 131 def tokenize(program, lookup=None):
132 132 '''
133 133 Parse a revset statement into a stream of tokens
134 134
135 135 Check that @ is a valid unquoted token character (issue3686):
136 136 >>> list(tokenize("@::"))
137 137 [('symbol', '@', 0), ('::', None, 1), ('end', None, 3)]
138 138
139 139 '''
140 140
141 141 pos, l = 0, len(program)
142 142 while pos < l:
143 143 c = program[pos]
144 144 if c.isspace(): # skip inter-token whitespace
145 145 pass
146 146 elif c == ':' and program[pos:pos + 2] == '::': # look ahead carefully
147 147 yield ('::', None, pos)
148 148 pos += 1 # skip ahead
149 149 elif c == '.' and program[pos:pos + 2] == '..': # look ahead carefully
150 150 yield ('..', None, pos)
151 151 pos += 1 # skip ahead
152 152 elif c in "():,-|&+!~^": # handle simple operators
153 153 yield (c, None, pos)
154 154 elif (c in '"\'' or c == 'r' and
155 155 program[pos:pos + 2] in ("r'", 'r"')): # handle quoted strings
156 156 if c == 'r':
157 157 pos += 1
158 158 c = program[pos]
159 159 decode = lambda x: x
160 160 else:
161 161 decode = lambda x: x.decode('string-escape')
162 162 pos += 1
163 163 s = pos
164 164 while pos < l: # find closing quote
165 165 d = program[pos]
166 166 if d == '\\': # skip over escaped characters
167 167 pos += 2
168 168 continue
169 169 if d == c:
170 170 yield ('string', decode(program[s:pos]), s)
171 171 break
172 172 pos += 1
173 173 else:
174 174 raise error.ParseError(_("unterminated string"), s)
175 175 # gather up a symbol/keyword
176 176 elif c.isalnum() or c in '._@' or ord(c) > 127:
177 177 s = pos
178 178 pos += 1
179 179 while pos < l: # find end of symbol
180 180 d = program[pos]
181 181 if not (d.isalnum() or d in "-._/@" or ord(d) > 127):
182 182 break
183 183 if d == '.' and program[pos - 1] == '.': # special case for ..
184 184 pos -= 1
185 185 break
186 186 pos += 1
187 187 sym = program[s:pos]
188 188 if sym in keywords: # operator keywords
189 189 yield (sym, None, s)
190 190 elif '-' in sym:
191 191 # some jerk gave us foo-bar-baz, try to check if it's a symbol
192 192 if lookup and lookup(sym):
193 193 # looks like a real symbol
194 194 yield ('symbol', sym, s)
195 195 else:
196 196 # looks like an expression
197 197 parts = sym.split('-')
198 198 for p in parts[:-1]:
199 199 if p: # possible consecutive -
200 200 yield ('symbol', p, s)
201 201 s += len(p)
202 202 yield ('-', None, pos)
203 203 s += 1
204 204 if parts[-1]: # possible trailing -
205 205 yield ('symbol', parts[-1], s)
206 206 else:
207 207 yield ('symbol', sym, s)
208 208 pos -= 1
209 209 else:
210 210 raise error.ParseError(_("syntax error"), pos)
211 211 pos += 1
212 212 yield ('end', None, pos)
213 213
214 214 # helpers
215 215
216 216 def getstring(x, err):
217 217 if x and (x[0] == 'string' or x[0] == 'symbol'):
218 218 return x[1]
219 219 raise error.ParseError(err)
220 220
221 221 def getlist(x):
222 222 if not x:
223 223 return []
224 224 if x[0] == 'list':
225 225 return getlist(x[1]) + [x[2]]
226 226 return [x]
227 227
228 228 def getargs(x, min, max, err):
229 229 l = getlist(x)
230 230 if len(l) < min or (max >= 0 and len(l) > max):
231 231 raise error.ParseError(err)
232 232 return l
233 233
234 234 def getset(repo, subset, x):
235 235 if not x:
236 236 raise error.ParseError(_("missing argument"))
237 237 s = methods[x[0]](repo, subset, *x[1:])
238 238 if util.safehasattr(s, 'set'):
239 239 return s
240 240 return baseset(s)
241 241
242 242 def _getrevsource(repo, r):
243 243 extra = repo[r].extra()
244 244 for label in ('source', 'transplant_source', 'rebase_source'):
245 245 if label in extra:
246 246 try:
247 247 return repo[extra[label]].rev()
248 248 except error.RepoLookupError:
249 249 pass
250 250 return None
251 251
252 252 # operator methods
253 253
254 254 def stringset(repo, subset, x):
255 255 x = repo[x].rev()
256 256 if x == -1 and len(subset) == len(repo):
257 257 return baseset([-1])
258 258 if len(subset) == len(repo) or x in subset:
259 259 return baseset([x])
260 260 return baseset([])
261 261
262 262 def symbolset(repo, subset, x):
263 263 if x in symbols:
264 264 raise error.ParseError(_("can't use %s here") % x)
265 265 return stringset(repo, subset, x)
266 266
267 267 def rangeset(repo, subset, x, y):
268 268 cl = baseset(repo.changelog)
269 269 m = getset(repo, cl, x)
270 270 n = getset(repo, cl, y)
271 271
272 272 if not m or not n:
273 273 return baseset([])
274 274 m, n = m[0], n[-1]
275 275
276 276 if m < n:
277 277 r = spanset(repo, m, n + 1)
278 278 else:
279 279 r = spanset(repo, m, n - 1)
280 280 return r & subset
281 281
282 282 def dagrange(repo, subset, x, y):
283 283 r = spanset(repo)
284 284 xs = _revsbetween(repo, getset(repo, r, x), getset(repo, r, y))
285 285 s = subset.set()
286 286 return xs.filter(lambda r: r in s)
287 287
288 288 def andset(repo, subset, x, y):
289 289 return getset(repo, getset(repo, subset, x), y)
290 290
291 291 def orset(repo, subset, x, y):
292 292 xl = getset(repo, subset, x)
293 293 yl = getset(repo, subset - xl, y)
294 294 return xl + yl
295 295
296 296 def notset(repo, subset, x):
297 297 return subset - getset(repo, subset, x)
298 298
299 299 def listset(repo, subset, a, b):
300 300 raise error.ParseError(_("can't use a list in this context"))
301 301
302 302 def func(repo, subset, a, b):
303 303 if a[0] == 'symbol' and a[1] in symbols:
304 304 return symbols[a[1]](repo, subset, b)
305 305 raise error.ParseError(_("not a function: %s") % a[1])
306 306
307 307 # functions
308 308
309 309 def adds(repo, subset, x):
310 310 """``adds(pattern)``
311 311 Changesets that add a file matching pattern.
312 312
313 313 The pattern without explicit kind like ``glob:`` is expected to be
314 314 relative to the current directory and match against a file or a
315 315 directory.
316 316 """
317 317 # i18n: "adds" is a keyword
318 318 pat = getstring(x, _("adds requires a pattern"))
319 319 return checkstatus(repo, subset, pat, 1)
320 320
321 321 def ancestor(repo, subset, x):
322 322 """``ancestor(*changeset)``
323 Greatest common ancestor of the changesets.
323 A greatest common ancestor of the changesets.
324 324
325 325 Accepts 0 or more changesets.
326 326 Will return empty list when passed no args.
327 327 Greatest common ancestor of a single changeset is that changeset.
328 328 """
329 329 # i18n: "ancestor" is a keyword
330 330 l = getlist(x)
331 331 rl = spanset(repo)
332 332 anc = None
333 333
334 334 # (getset(repo, rl, i) for i in l) generates a list of lists
335 rev = repo.changelog.rev
336 ancestor = repo.changelog.ancestor
337 node = repo.changelog.node
338 335 for revs in (getset(repo, rl, i) for i in l):
339 336 for r in revs:
340 337 if anc is None:
341 anc = r
338 anc = repo[r]
342 339 else:
343 anc = rev(ancestor(node(anc), node(r)))
344
345 if anc is not None and anc in subset:
346 return baseset([anc])
340 anc = anc.ancestor(repo[r])
341
342 if anc is not None and anc.rev() in subset:
343 return baseset([anc.rev()])
347 344 return baseset([])
348 345
349 346 def _ancestors(repo, subset, x, followfirst=False):
350 347 args = getset(repo, spanset(repo), x)
351 348 if not args:
352 349 return baseset([])
353 350 s = _revancestors(repo, args, followfirst)
354 351 return subset.filter(lambda r: r in s)
355 352
356 353 def ancestors(repo, subset, x):
357 354 """``ancestors(set)``
358 355 Changesets that are ancestors of a changeset in set.
359 356 """
360 357 return _ancestors(repo, subset, x)
361 358
362 359 def _firstancestors(repo, subset, x):
363 360 # ``_firstancestors(set)``
364 361 # Like ``ancestors(set)`` but follows only the first parents.
365 362 return _ancestors(repo, subset, x, followfirst=True)
366 363
367 364 def ancestorspec(repo, subset, x, n):
368 365 """``set~n``
369 366 Changesets that are the Nth ancestor (first parents only) of a changeset
370 367 in set.
371 368 """
372 369 try:
373 370 n = int(n[1])
374 371 except (TypeError, ValueError):
375 372 raise error.ParseError(_("~ expects a number"))
376 373 ps = set()
377 374 cl = repo.changelog
378 375 for r in getset(repo, baseset(cl), x):
379 376 for i in range(n):
380 377 r = cl.parentrevs(r)[0]
381 378 ps.add(r)
382 379 return subset.filter(lambda r: r in ps)
383 380
384 381 def author(repo, subset, x):
385 382 """``author(string)``
386 383 Alias for ``user(string)``.
387 384 """
388 385 # i18n: "author" is a keyword
389 386 n = encoding.lower(getstring(x, _("author requires a string")))
390 387 kind, pattern, matcher = _substringmatcher(n)
391 388 return subset.filter(lambda x: matcher(encoding.lower(repo[x].user())))
392 389
393 390 def only(repo, subset, x):
394 391 """``only(set, [set])``
395 392 Changesets that are ancestors of the first set that are not ancestors
396 393 of any other head in the repo. If a second set is specified, the result
397 394 is ancestors of the first set that are not ancestors of the second set
398 395 (i.e. ::<set1> - ::<set2>).
399 396 """
400 397 cl = repo.changelog
401 398 args = getargs(x, 1, 2, _('only takes one or two arguments'))
402 399 include = getset(repo, spanset(repo), args[0]).set()
403 400 if len(args) == 1:
404 401 descendants = set(_revdescendants(repo, include, False))
405 402 exclude = [rev for rev in cl.headrevs()
406 403 if not rev in descendants and not rev in include]
407 404 else:
408 405 exclude = getset(repo, spanset(repo), args[1])
409 406
410 407 results = set(ancestormod.missingancestors(include, exclude, cl.parentrevs))
411 408 return lazyset(subset, lambda x: x in results)
412 409
413 410 def bisect(repo, subset, x):
414 411 """``bisect(string)``
415 412 Changesets marked in the specified bisect status:
416 413
417 414 - ``good``, ``bad``, ``skip``: csets explicitly marked as good/bad/skip
418 415 - ``goods``, ``bads`` : csets topologically good/bad
419 416 - ``range`` : csets taking part in the bisection
420 417 - ``pruned`` : csets that are goods, bads or skipped
421 418 - ``untested`` : csets whose fate is yet unknown
422 419 - ``ignored`` : csets ignored due to DAG topology
423 420 - ``current`` : the cset currently being bisected
424 421 """
425 422 # i18n: "bisect" is a keyword
426 423 status = getstring(x, _("bisect requires a string")).lower()
427 424 state = set(hbisect.get(repo, status))
428 425 return subset.filter(lambda r: r in state)
429 426
430 427 # Backward-compatibility
431 428 # - no help entry so that we do not advertise it any more
432 429 def bisected(repo, subset, x):
433 430 return bisect(repo, subset, x)
434 431
435 432 def bookmark(repo, subset, x):
436 433 """``bookmark([name])``
437 434 The named bookmark or all bookmarks.
438 435
439 436 If `name` starts with `re:`, the remainder of the name is treated as
440 437 a regular expression. To match a bookmark that actually starts with `re:`,
441 438 use the prefix `literal:`.
442 439 """
443 440 # i18n: "bookmark" is a keyword
444 441 args = getargs(x, 0, 1, _('bookmark takes one or no arguments'))
445 442 if args:
446 443 bm = getstring(args[0],
447 444 # i18n: "bookmark" is a keyword
448 445 _('the argument to bookmark must be a string'))
449 446 kind, pattern, matcher = _stringmatcher(bm)
450 447 if kind == 'literal':
451 448 bmrev = repo._bookmarks.get(bm, None)
452 449 if not bmrev:
453 450 raise util.Abort(_("bookmark '%s' does not exist") % bm)
454 451 bmrev = repo[bmrev].rev()
455 452 return subset.filter(lambda r: r == bmrev)
456 453 else:
457 454 matchrevs = set()
458 455 for name, bmrev in repo._bookmarks.iteritems():
459 456 if matcher(name):
460 457 matchrevs.add(bmrev)
461 458 if not matchrevs:
462 459 raise util.Abort(_("no bookmarks exist that match '%s'")
463 460 % pattern)
464 461 bmrevs = set()
465 462 for bmrev in matchrevs:
466 463 bmrevs.add(repo[bmrev].rev())
467 464 return subset & bmrevs
468 465
469 466 bms = set([repo[r].rev()
470 467 for r in repo._bookmarks.values()])
471 468 return subset.filter(lambda r: r in bms)
472 469
473 470 def branch(repo, subset, x):
474 471 """``branch(string or set)``
475 472 All changesets belonging to the given branch or the branches of the given
476 473 changesets.
477 474
478 475 If `string` starts with `re:`, the remainder of the name is treated as
479 476 a regular expression. To match a branch that actually starts with `re:`,
480 477 use the prefix `literal:`.
481 478 """
482 479 try:
483 480 b = getstring(x, '')
484 481 except error.ParseError:
485 482 # not a string, but another revspec, e.g. tip()
486 483 pass
487 484 else:
488 485 kind, pattern, matcher = _stringmatcher(b)
489 486 if kind == 'literal':
490 487 # note: falls through to the revspec case if no branch with
491 488 # this name exists
492 489 if pattern in repo.branchmap():
493 490 return subset.filter(lambda r: matcher(repo[r].branch()))
494 491 else:
495 492 return subset.filter(lambda r: matcher(repo[r].branch()))
496 493
497 494 s = getset(repo, spanset(repo), x)
498 495 b = set()
499 496 for r in s:
500 497 b.add(repo[r].branch())
501 498 s = s.set()
502 499 return subset.filter(lambda r: r in s or repo[r].branch() in b)
503 500
504 501 def bumped(repo, subset, x):
505 502 """``bumped()``
506 503 Mutable changesets marked as successors of public changesets.
507 504
508 505 Only non-public and non-obsolete changesets can be `bumped`.
509 506 """
510 507 # i18n: "bumped" is a keyword
511 508 getargs(x, 0, 0, _("bumped takes no arguments"))
512 509 bumped = obsmod.getrevs(repo, 'bumped')
513 510 return subset & bumped
514 511
515 512 def bundle(repo, subset, x):
516 513 """``bundle()``
517 514 Changesets in the bundle.
518 515
519 516 Bundle must be specified by the -R option."""
520 517
521 518 try:
522 519 bundlerevs = repo.changelog.bundlerevs
523 520 except AttributeError:
524 521 raise util.Abort(_("no bundle provided - specify with -R"))
525 522 return subset & bundlerevs
526 523
527 524 def checkstatus(repo, subset, pat, field):
528 525 hasset = matchmod.patkind(pat) == 'set'
529 526
530 527 def matches(x):
531 528 m = None
532 529 fname = None
533 530 c = repo[x]
534 531 if not m or hasset:
535 532 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
536 533 if not m.anypats() and len(m.files()) == 1:
537 534 fname = m.files()[0]
538 535 if fname is not None:
539 536 if fname not in c.files():
540 537 return False
541 538 else:
542 539 for f in c.files():
543 540 if m(f):
544 541 break
545 542 else:
546 543 return False
547 544 files = repo.status(c.p1().node(), c.node())[field]
548 545 if fname is not None:
549 546 if fname in files:
550 547 return True
551 548 else:
552 549 for f in files:
553 550 if m(f):
554 551 return True
555 552
556 553 return subset.filter(matches)
557 554
558 555 def _children(repo, narrow, parentset):
559 556 cs = set()
560 557 if not parentset:
561 558 return baseset(cs)
562 559 pr = repo.changelog.parentrevs
563 560 minrev = min(parentset)
564 561 for r in narrow:
565 562 if r <= minrev:
566 563 continue
567 564 for p in pr(r):
568 565 if p in parentset:
569 566 cs.add(r)
570 567 return baseset(cs)
571 568
572 569 def children(repo, subset, x):
573 570 """``children(set)``
574 571 Child changesets of changesets in set.
575 572 """
576 573 s = getset(repo, baseset(repo), x).set()
577 574 cs = _children(repo, subset, s)
578 575 return subset & cs
579 576
580 577 def closed(repo, subset, x):
581 578 """``closed()``
582 579 Changeset is closed.
583 580 """
584 581 # i18n: "closed" is a keyword
585 582 getargs(x, 0, 0, _("closed takes no arguments"))
586 583 return subset.filter(lambda r: repo[r].closesbranch())
587 584
588 585 def contains(repo, subset, x):
589 586 """``contains(pattern)``
590 587 Revision contains a file matching pattern. See :hg:`help patterns`
591 588 for information about file patterns.
592 589
593 590 The pattern without explicit kind like ``glob:`` is expected to be
594 591 relative to the current directory and match against a file exactly
595 592 for efficiency.
596 593 """
597 594 # i18n: "contains" is a keyword
598 595 pat = getstring(x, _("contains requires a pattern"))
599 596
600 597 def matches(x):
601 598 if not matchmod.patkind(pat):
602 599 pats = pathutil.canonpath(repo.root, repo.getcwd(), pat)
603 600 if pats in repo[x]:
604 601 return True
605 602 else:
606 603 c = repo[x]
607 604 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
608 605 for f in c.manifest():
609 606 if m(f):
610 607 return True
611 608 return False
612 609
613 610 return subset.filter(matches)
614 611
615 612 def converted(repo, subset, x):
616 613 """``converted([id])``
617 614 Changesets converted from the given identifier in the old repository if
618 615 present, or all converted changesets if no identifier is specified.
619 616 """
620 617
621 618 # There is exactly no chance of resolving the revision, so do a simple
622 619 # string compare and hope for the best
623 620
624 621 rev = None
625 622 # i18n: "converted" is a keyword
626 623 l = getargs(x, 0, 1, _('converted takes one or no arguments'))
627 624 if l:
628 625 # i18n: "converted" is a keyword
629 626 rev = getstring(l[0], _('converted requires a revision'))
630 627
631 628 def _matchvalue(r):
632 629 source = repo[r].extra().get('convert_revision', None)
633 630 return source is not None and (rev is None or source.startswith(rev))
634 631
635 632 return subset.filter(lambda r: _matchvalue(r))
636 633
637 634 def date(repo, subset, x):
638 635 """``date(interval)``
639 636 Changesets within the interval, see :hg:`help dates`.
640 637 """
641 638 # i18n: "date" is a keyword
642 639 ds = getstring(x, _("date requires a string"))
643 640 dm = util.matchdate(ds)
644 641 return subset.filter(lambda x: dm(repo[x].date()[0]))
645 642
646 643 def desc(repo, subset, x):
647 644 """``desc(string)``
648 645 Search commit message for string. The match is case-insensitive.
649 646 """
650 647 # i18n: "desc" is a keyword
651 648 ds = encoding.lower(getstring(x, _("desc requires a string")))
652 649
653 650 def matches(x):
654 651 c = repo[x]
655 652 return ds in encoding.lower(c.description())
656 653
657 654 return subset.filter(matches)
658 655
659 656 def _descendants(repo, subset, x, followfirst=False):
660 657 args = getset(repo, spanset(repo), x)
661 658 if not args:
662 659 return baseset([])
663 660 s = _revdescendants(repo, args, followfirst)
664 661
665 662 # Both sets need to be ascending in order to lazily return the union
666 663 # in the correct order.
667 664 args.ascending()
668 665
669 666 subsetset = subset.set()
670 667 result = (orderedlazyset(s, subsetset.__contains__, ascending=True) +
671 668 orderedlazyset(args, subsetset.__contains__, ascending=True))
672 669
673 670 # Wrap result in a lazyset since it's an _addset, which doesn't implement
674 671 # all the necessary functions to be consumed by callers.
675 672 return orderedlazyset(result, lambda r: True, ascending=True)
676 673
677 674 def descendants(repo, subset, x):
678 675 """``descendants(set)``
679 676 Changesets which are descendants of changesets in set.
680 677 """
681 678 return _descendants(repo, subset, x)
682 679
683 680 def _firstdescendants(repo, subset, x):
684 681 # ``_firstdescendants(set)``
685 682 # Like ``descendants(set)`` but follows only the first parents.
686 683 return _descendants(repo, subset, x, followfirst=True)
687 684
688 685 def destination(repo, subset, x):
689 686 """``destination([set])``
690 687 Changesets that were created by a graft, transplant or rebase operation,
691 688 with the given revisions specified as the source. Omitting the optional set
692 689 is the same as passing all().
693 690 """
694 691 if x is not None:
695 692 args = getset(repo, spanset(repo), x).set()
696 693 else:
697 694 args = getall(repo, spanset(repo), x).set()
698 695
699 696 dests = set()
700 697
701 698 # subset contains all of the possible destinations that can be returned, so
702 699 # iterate over them and see if their source(s) were provided in the args.
703 700 # Even if the immediate src of r is not in the args, src's source (or
704 701 # further back) may be. Scanning back further than the immediate src allows
705 702 # transitive transplants and rebases to yield the same results as transitive
706 703 # grafts.
707 704 for r in subset:
708 705 src = _getrevsource(repo, r)
709 706 lineage = None
710 707
711 708 while src is not None:
712 709 if lineage is None:
713 710 lineage = list()
714 711
715 712 lineage.append(r)
716 713
717 714 # The visited lineage is a match if the current source is in the arg
718 715 # set. Since every candidate dest is visited by way of iterating
719 716 # subset, any dests further back in the lineage will be tested by a
720 717 # different iteration over subset. Likewise, if the src was already
721 718 # selected, the current lineage can be selected without going back
722 719 # further.
723 720 if src in args or src in dests:
724 721 dests.update(lineage)
725 722 break
726 723
727 724 r = src
728 725 src = _getrevsource(repo, r)
729 726
730 727 return subset.filter(lambda r: r in dests)
731 728
732 729 def divergent(repo, subset, x):
733 730 """``divergent()``
734 731 Final successors of changesets with an alternative set of final successors.
735 732 """
736 733 # i18n: "divergent" is a keyword
737 734 getargs(x, 0, 0, _("divergent takes no arguments"))
738 735 divergent = obsmod.getrevs(repo, 'divergent')
739 736 return subset.filter(lambda r: r in divergent)
740 737
741 738 def draft(repo, subset, x):
742 739 """``draft()``
743 740 Changeset in draft phase."""
744 741 # i18n: "draft" is a keyword
745 742 getargs(x, 0, 0, _("draft takes no arguments"))
746 743 pc = repo._phasecache
747 744 return subset.filter(lambda r: pc.phase(repo, r) == phases.draft)
748 745
749 746 def extinct(repo, subset, x):
750 747 """``extinct()``
751 748 Obsolete changesets with obsolete descendants only.
752 749 """
753 750 # i18n: "extinct" is a keyword
754 751 getargs(x, 0, 0, _("extinct takes no arguments"))
755 752 extincts = obsmod.getrevs(repo, 'extinct')
756 753 return subset & extincts
757 754
758 755 def extra(repo, subset, x):
759 756 """``extra(label, [value])``
760 757 Changesets with the given label in the extra metadata, with the given
761 758 optional value.
762 759
763 760 If `value` starts with `re:`, the remainder of the value is treated as
764 761 a regular expression. To match a value that actually starts with `re:`,
765 762 use the prefix `literal:`.
766 763 """
767 764
768 765 # i18n: "extra" is a keyword
769 766 l = getargs(x, 1, 2, _('extra takes at least 1 and at most 2 arguments'))
770 767 # i18n: "extra" is a keyword
771 768 label = getstring(l[0], _('first argument to extra must be a string'))
772 769 value = None
773 770
774 771 if len(l) > 1:
775 772 # i18n: "extra" is a keyword
776 773 value = getstring(l[1], _('second argument to extra must be a string'))
777 774 kind, value, matcher = _stringmatcher(value)
778 775
779 776 def _matchvalue(r):
780 777 extra = repo[r].extra()
781 778 return label in extra and (value is None or matcher(extra[label]))
782 779
783 780 return subset.filter(lambda r: _matchvalue(r))
784 781
785 782 def filelog(repo, subset, x):
786 783 """``filelog(pattern)``
787 784 Changesets connected to the specified filelog.
788 785
789 786 For performance reasons, ``filelog()`` does not show every changeset
790 787 that affects the requested file(s). See :hg:`help log` for details. For
791 788 a slower, more accurate result, use ``file()``.
792 789
793 790 The pattern without explicit kind like ``glob:`` is expected to be
794 791 relative to the current directory and match against a file exactly
795 792 for efficiency.
796 793 """
797 794
798 795 # i18n: "filelog" is a keyword
799 796 pat = getstring(x, _("filelog requires a pattern"))
800 797 s = set()
801 798
802 799 if not matchmod.patkind(pat):
803 800 f = pathutil.canonpath(repo.root, repo.getcwd(), pat)
804 801 fl = repo.file(f)
805 802 for fr in fl:
806 803 s.add(fl.linkrev(fr))
807 804 else:
808 805 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=repo[None])
809 806 for f in repo[None]:
810 807 if m(f):
811 808 fl = repo.file(f)
812 809 for fr in fl:
813 810 s.add(fl.linkrev(fr))
814 811
815 812 return subset.filter(lambda r: r in s)
816 813
817 814 def first(repo, subset, x):
818 815 """``first(set, [n])``
819 816 An alias for limit().
820 817 """
821 818 return limit(repo, subset, x)
822 819
823 820 def _follow(repo, subset, x, name, followfirst=False):
824 821 l = getargs(x, 0, 1, _("%s takes no arguments or a filename") % name)
825 822 c = repo['.']
826 823 if l:
827 824 x = getstring(l[0], _("%s expected a filename") % name)
828 825 if x in c:
829 826 cx = c[x]
830 827 s = set(ctx.rev() for ctx in cx.ancestors(followfirst=followfirst))
831 828 # include the revision responsible for the most recent version
832 829 s.add(cx.linkrev())
833 830 else:
834 831 return baseset([])
835 832 else:
836 833 s = _revancestors(repo, baseset([c.rev()]), followfirst)
837 834
838 835 return subset.filter(lambda r: r in s)
839 836
840 837 def follow(repo, subset, x):
841 838 """``follow([file])``
842 839 An alias for ``::.`` (ancestors of the working copy's first parent).
843 840 If a filename is specified, the history of the given file is followed,
844 841 including copies.
845 842 """
846 843 return _follow(repo, subset, x, 'follow')
847 844
848 845 def _followfirst(repo, subset, x):
849 846 # ``followfirst([file])``
850 847 # Like ``follow([file])`` but follows only the first parent of
851 848 # every revision or file revision.
852 849 return _follow(repo, subset, x, '_followfirst', followfirst=True)
853 850
854 851 def getall(repo, subset, x):
855 852 """``all()``
856 853 All changesets, the same as ``0:tip``.
857 854 """
858 855 # i18n: "all" is a keyword
859 856 getargs(x, 0, 0, _("all takes no arguments"))
860 857 return subset
861 858
862 859 def grep(repo, subset, x):
863 860 """``grep(regex)``
864 861 Like ``keyword(string)`` but accepts a regex. Use ``grep(r'...')``
865 862 to ensure special escape characters are handled correctly. Unlike
866 863 ``keyword(string)``, the match is case-sensitive.
867 864 """
868 865 try:
869 866 # i18n: "grep" is a keyword
870 867 gr = re.compile(getstring(x, _("grep requires a string")))
871 868 except re.error, e:
872 869 raise error.ParseError(_('invalid match pattern: %s') % e)
873 870
874 871 def matches(x):
875 872 c = repo[x]
876 873 for e in c.files() + [c.user(), c.description()]:
877 874 if gr.search(e):
878 875 return True
879 876 return False
880 877
881 878 return subset.filter(matches)
882 879
883 880 def _matchfiles(repo, subset, x):
884 881 # _matchfiles takes a revset list of prefixed arguments:
885 882 #
886 883 # [p:foo, i:bar, x:baz]
887 884 #
888 885 # builds a match object from them and filters subset. Allowed
889 886 # prefixes are 'p:' for regular patterns, 'i:' for include
890 887 # patterns and 'x:' for exclude patterns. Use 'r:' prefix to pass
891 888 # a revision identifier, or the empty string to reference the
892 889 # working directory, from which the match object is
893 890 # initialized. Use 'd:' to set the default matching mode, default
894 891 # to 'glob'. At most one 'r:' and 'd:' argument can be passed.
895 892
896 893 # i18n: "_matchfiles" is a keyword
897 894 l = getargs(x, 1, -1, _("_matchfiles requires at least one argument"))
898 895 pats, inc, exc = [], [], []
899 896 hasset = False
900 897 rev, default = None, None
901 898 for arg in l:
902 899 # i18n: "_matchfiles" is a keyword
903 900 s = getstring(arg, _("_matchfiles requires string arguments"))
904 901 prefix, value = s[:2], s[2:]
905 902 if prefix == 'p:':
906 903 pats.append(value)
907 904 elif prefix == 'i:':
908 905 inc.append(value)
909 906 elif prefix == 'x:':
910 907 exc.append(value)
911 908 elif prefix == 'r:':
912 909 if rev is not None:
913 910 # i18n: "_matchfiles" is a keyword
914 911 raise error.ParseError(_('_matchfiles expected at most one '
915 912 'revision'))
916 913 rev = value
917 914 elif prefix == 'd:':
918 915 if default is not None:
919 916 # i18n: "_matchfiles" is a keyword
920 917 raise error.ParseError(_('_matchfiles expected at most one '
921 918 'default mode'))
922 919 default = value
923 920 else:
924 921 # i18n: "_matchfiles" is a keyword
925 922 raise error.ParseError(_('invalid _matchfiles prefix: %s') % prefix)
926 923 if not hasset and matchmod.patkind(value) == 'set':
927 924 hasset = True
928 925 if not default:
929 926 default = 'glob'
930 927
931 928 def matches(x):
932 929 m = None
933 930 c = repo[x]
934 931 if not m or (hasset and rev is None):
935 932 ctx = c
936 933 if rev is not None:
937 934 ctx = repo[rev or None]
938 935 m = matchmod.match(repo.root, repo.getcwd(), pats, include=inc,
939 936 exclude=exc, ctx=ctx, default=default)
940 937 for f in c.files():
941 938 if m(f):
942 939 return True
943 940 return False
944 941
945 942 return subset.filter(matches)
946 943
947 944 def hasfile(repo, subset, x):
948 945 """``file(pattern)``
949 946 Changesets affecting files matched by pattern.
950 947
951 948 For a faster but less accurate result, consider using ``filelog()``
952 949 instead.
953 950
954 951 This predicate uses ``glob:`` as the default kind of pattern.
955 952 """
956 953 # i18n: "file" is a keyword
957 954 pat = getstring(x, _("file requires a pattern"))
958 955 return _matchfiles(repo, subset, ('string', 'p:' + pat))
959 956
960 957 def head(repo, subset, x):
961 958 """``head()``
962 959 Changeset is a named branch head.
963 960 """
964 961 # i18n: "head" is a keyword
965 962 getargs(x, 0, 0, _("head takes no arguments"))
966 963 hs = set()
967 964 for b, ls in repo.branchmap().iteritems():
968 965 hs.update(repo[h].rev() for h in ls)
969 966 return baseset(hs).filter(subset.__contains__)
970 967
971 968 def heads(repo, subset, x):
972 969 """``heads(set)``
973 970 Members of set with no children in set.
974 971 """
975 972 s = getset(repo, subset, x)
976 973 ps = parents(repo, subset, x)
977 974 return s - ps
978 975
979 976 def hidden(repo, subset, x):
980 977 """``hidden()``
981 978 Hidden changesets.
982 979 """
983 980 # i18n: "hidden" is a keyword
984 981 getargs(x, 0, 0, _("hidden takes no arguments"))
985 982 hiddenrevs = repoview.filterrevs(repo, 'visible')
986 983 return subset & hiddenrevs
987 984
988 985 def keyword(repo, subset, x):
989 986 """``keyword(string)``
990 987 Search commit message, user name, and names of changed files for
991 988 string. The match is case-insensitive.
992 989 """
993 990 # i18n: "keyword" is a keyword
994 991 kw = encoding.lower(getstring(x, _("keyword requires a string")))
995 992
996 993 def matches(r):
997 994 c = repo[r]
998 995 return util.any(kw in encoding.lower(t) for t in c.files() + [c.user(),
999 996 c.description()])
1000 997
1001 998 return subset.filter(matches)
1002 999
1003 1000 def limit(repo, subset, x):
1004 1001 """``limit(set, [n])``
1005 1002 First n members of set, defaulting to 1.
1006 1003 """
1007 1004 # i18n: "limit" is a keyword
1008 1005 l = getargs(x, 1, 2, _("limit requires one or two arguments"))
1009 1006 try:
1010 1007 lim = 1
1011 1008 if len(l) == 2:
1012 1009 # i18n: "limit" is a keyword
1013 1010 lim = int(getstring(l[1], _("limit requires a number")))
1014 1011 except (TypeError, ValueError):
1015 1012 # i18n: "limit" is a keyword
1016 1013 raise error.ParseError(_("limit expects a number"))
1017 1014 ss = subset.set()
1018 1015 os = getset(repo, spanset(repo), l[0])
1019 1016 bs = baseset([])
1020 1017 it = iter(os)
1021 1018 for x in xrange(lim):
1022 1019 try:
1023 1020 y = it.next()
1024 1021 if y in ss:
1025 1022 bs.append(y)
1026 1023 except (StopIteration):
1027 1024 break
1028 1025 return bs
1029 1026
1030 1027 def last(repo, subset, x):
1031 1028 """``last(set, [n])``
1032 1029 Last n members of set, defaulting to 1.
1033 1030 """
1034 1031 # i18n: "last" is a keyword
1035 1032 l = getargs(x, 1, 2, _("last requires one or two arguments"))
1036 1033 try:
1037 1034 lim = 1
1038 1035 if len(l) == 2:
1039 1036 # i18n: "last" is a keyword
1040 1037 lim = int(getstring(l[1], _("last requires a number")))
1041 1038 except (TypeError, ValueError):
1042 1039 # i18n: "last" is a keyword
1043 1040 raise error.ParseError(_("last expects a number"))
1044 1041 ss = subset.set()
1045 1042 os = getset(repo, spanset(repo), l[0])
1046 1043 os.reverse()
1047 1044 bs = baseset([])
1048 1045 it = iter(os)
1049 1046 for x in xrange(lim):
1050 1047 try:
1051 1048 y = it.next()
1052 1049 if y in ss:
1053 1050 bs.append(y)
1054 1051 except (StopIteration):
1055 1052 break
1056 1053 return bs
1057 1054
1058 1055 def maxrev(repo, subset, x):
1059 1056 """``max(set)``
1060 1057 Changeset with highest revision number in set.
1061 1058 """
1062 1059 os = getset(repo, spanset(repo), x)
1063 1060 if os:
1064 1061 m = os.max()
1065 1062 if m in subset:
1066 1063 return baseset([m])
1067 1064 return baseset([])
1068 1065
1069 1066 def merge(repo, subset, x):
1070 1067 """``merge()``
1071 1068 Changeset is a merge changeset.
1072 1069 """
1073 1070 # i18n: "merge" is a keyword
1074 1071 getargs(x, 0, 0, _("merge takes no arguments"))
1075 1072 cl = repo.changelog
1076 1073 return subset.filter(lambda r: cl.parentrevs(r)[1] != -1)
1077 1074
1078 1075 def branchpoint(repo, subset, x):
1079 1076 """``branchpoint()``
1080 1077 Changesets with more than one child.
1081 1078 """
1082 1079 # i18n: "branchpoint" is a keyword
1083 1080 getargs(x, 0, 0, _("branchpoint takes no arguments"))
1084 1081 cl = repo.changelog
1085 1082 if not subset:
1086 1083 return baseset([])
1087 1084 baserev = min(subset)
1088 1085 parentscount = [0]*(len(repo) - baserev)
1089 1086 for r in cl.revs(start=baserev + 1):
1090 1087 for p in cl.parentrevs(r):
1091 1088 if p >= baserev:
1092 1089 parentscount[p - baserev] += 1
1093 1090 return subset.filter(lambda r: parentscount[r - baserev] > 1)
1094 1091
1095 1092 def minrev(repo, subset, x):
1096 1093 """``min(set)``
1097 1094 Changeset with lowest revision number in set.
1098 1095 """
1099 1096 os = getset(repo, spanset(repo), x)
1100 1097 if os:
1101 1098 m = os.min()
1102 1099 if m in subset:
1103 1100 return baseset([m])
1104 1101 return baseset([])
1105 1102
1106 1103 def _missingancestors(repo, subset, x):
1107 1104 # i18n: "_missingancestors" is a keyword
1108 1105 revs, bases = getargs(x, 2, 2,
1109 1106 _("_missingancestors requires two arguments"))
1110 1107 rs = baseset(repo)
1111 1108 revs = getset(repo, rs, revs)
1112 1109 bases = getset(repo, rs, bases)
1113 1110 missing = set(repo.changelog.findmissingrevs(bases, revs))
1114 1111 return baseset([r for r in subset if r in missing])
1115 1112
1116 1113 def modifies(repo, subset, x):
1117 1114 """``modifies(pattern)``
1118 1115 Changesets modifying files matched by pattern.
1119 1116
1120 1117 The pattern without explicit kind like ``glob:`` is expected to be
1121 1118 relative to the current directory and match against a file or a
1122 1119 directory.
1123 1120 """
1124 1121 # i18n: "modifies" is a keyword
1125 1122 pat = getstring(x, _("modifies requires a pattern"))
1126 1123 return checkstatus(repo, subset, pat, 0)
1127 1124
1128 1125 def node_(repo, subset, x):
1129 1126 """``id(string)``
1130 1127 Revision non-ambiguously specified by the given hex string prefix.
1131 1128 """
1132 1129 # i18n: "id" is a keyword
1133 1130 l = getargs(x, 1, 1, _("id requires one argument"))
1134 1131 # i18n: "id" is a keyword
1135 1132 n = getstring(l[0], _("id requires a string"))
1136 1133 if len(n) == 40:
1137 1134 rn = repo[n].rev()
1138 1135 else:
1139 1136 rn = None
1140 1137 pm = repo.changelog._partialmatch(n)
1141 1138 if pm is not None:
1142 1139 rn = repo.changelog.rev(pm)
1143 1140
1144 1141 return subset.filter(lambda r: r == rn)
1145 1142
1146 1143 def obsolete(repo, subset, x):
1147 1144 """``obsolete()``
1148 1145 Mutable changeset with a newer version."""
1149 1146 # i18n: "obsolete" is a keyword
1150 1147 getargs(x, 0, 0, _("obsolete takes no arguments"))
1151 1148 obsoletes = obsmod.getrevs(repo, 'obsolete')
1152 1149 return subset & obsoletes
1153 1150
1154 1151 def origin(repo, subset, x):
1155 1152 """``origin([set])``
1156 1153 Changesets that were specified as a source for the grafts, transplants or
1157 1154 rebases that created the given revisions. Omitting the optional set is the
1158 1155 same as passing all(). If a changeset created by these operations is itself
1159 1156 specified as a source for one of these operations, only the source changeset
1160 1157 for the first operation is selected.
1161 1158 """
1162 1159 if x is not None:
1163 1160 args = getset(repo, spanset(repo), x).set()
1164 1161 else:
1165 1162 args = getall(repo, spanset(repo), x).set()
1166 1163
1167 1164 def _firstsrc(rev):
1168 1165 src = _getrevsource(repo, rev)
1169 1166 if src is None:
1170 1167 return None
1171 1168
1172 1169 while True:
1173 1170 prev = _getrevsource(repo, src)
1174 1171
1175 1172 if prev is None:
1176 1173 return src
1177 1174 src = prev
1178 1175
1179 1176 o = set([_firstsrc(r) for r in args])
1180 1177 return subset.filter(lambda r: r in o)
1181 1178
1182 1179 def outgoing(repo, subset, x):
1183 1180 """``outgoing([path])``
1184 1181 Changesets not found in the specified destination repository, or the
1185 1182 default push location.
1186 1183 """
1187 1184 import hg # avoid start-up nasties
1188 1185 # i18n: "outgoing" is a keyword
1189 1186 l = getargs(x, 0, 1, _("outgoing takes one or no arguments"))
1190 1187 # i18n: "outgoing" is a keyword
1191 1188 dest = l and getstring(l[0], _("outgoing requires a repository path")) or ''
1192 1189 dest = repo.ui.expandpath(dest or 'default-push', dest or 'default')
1193 1190 dest, branches = hg.parseurl(dest)
1194 1191 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1195 1192 if revs:
1196 1193 revs = [repo.lookup(rev) for rev in revs]
1197 1194 other = hg.peer(repo, {}, dest)
1198 1195 repo.ui.pushbuffer()
1199 1196 outgoing = discovery.findcommonoutgoing(repo, other, onlyheads=revs)
1200 1197 repo.ui.popbuffer()
1201 1198 cl = repo.changelog
1202 1199 o = set([cl.rev(r) for r in outgoing.missing])
1203 1200 return subset.filter(lambda r: r in o)
1204 1201
1205 1202 def p1(repo, subset, x):
1206 1203 """``p1([set])``
1207 1204 First parent of changesets in set, or the working directory.
1208 1205 """
1209 1206 if x is None:
1210 1207 p = repo[x].p1().rev()
1211 1208 return subset.filter(lambda r: r == p)
1212 1209
1213 1210 ps = set()
1214 1211 cl = repo.changelog
1215 1212 for r in getset(repo, spanset(repo), x):
1216 1213 ps.add(cl.parentrevs(r)[0])
1217 1214 return subset & ps
1218 1215
1219 1216 def p2(repo, subset, x):
1220 1217 """``p2([set])``
1221 1218 Second parent of changesets in set, or the working directory.
1222 1219 """
1223 1220 if x is None:
1224 1221 ps = repo[x].parents()
1225 1222 try:
1226 1223 p = ps[1].rev()
1227 1224 return subset.filter(lambda r: r == p)
1228 1225 except IndexError:
1229 1226 return baseset([])
1230 1227
1231 1228 ps = set()
1232 1229 cl = repo.changelog
1233 1230 for r in getset(repo, spanset(repo), x):
1234 1231 ps.add(cl.parentrevs(r)[1])
1235 1232 return subset & ps
1236 1233
1237 1234 def parents(repo, subset, x):
1238 1235 """``parents([set])``
1239 1236 The set of all parents for all changesets in set, or the working directory.
1240 1237 """
1241 1238 if x is None:
1242 1239 ps = tuple(p.rev() for p in repo[x].parents())
1243 1240 return subset & ps
1244 1241
1245 1242 ps = set()
1246 1243 cl = repo.changelog
1247 1244 for r in getset(repo, spanset(repo), x):
1248 1245 ps.update(cl.parentrevs(r))
1249 1246 return subset & ps
1250 1247
1251 1248 def parentspec(repo, subset, x, n):
1252 1249 """``set^0``
1253 1250 The set.
1254 1251 ``set^1`` (or ``set^``), ``set^2``
1255 1252 First or second parent, respectively, of all changesets in set.
1256 1253 """
1257 1254 try:
1258 1255 n = int(n[1])
1259 1256 if n not in (0, 1, 2):
1260 1257 raise ValueError
1261 1258 except (TypeError, ValueError):
1262 1259 raise error.ParseError(_("^ expects a number 0, 1, or 2"))
1263 1260 ps = set()
1264 1261 cl = repo.changelog
1265 1262 for r in getset(repo, baseset(cl), x):
1266 1263 if n == 0:
1267 1264 ps.add(r)
1268 1265 elif n == 1:
1269 1266 ps.add(cl.parentrevs(r)[0])
1270 1267 elif n == 2:
1271 1268 parents = cl.parentrevs(r)
1272 1269 if len(parents) > 1:
1273 1270 ps.add(parents[1])
1274 1271 return subset & ps
1275 1272
1276 1273 def present(repo, subset, x):
1277 1274 """``present(set)``
1278 1275 An empty set, if any revision in set isn't found; otherwise,
1279 1276 all revisions in set.
1280 1277
1281 1278 If any of specified revisions is not present in the local repository,
1282 1279 the query is normally aborted. But this predicate allows the query
1283 1280 to continue even in such cases.
1284 1281 """
1285 1282 try:
1286 1283 return getset(repo, subset, x)
1287 1284 except error.RepoLookupError:
1288 1285 return baseset([])
1289 1286
1290 1287 def public(repo, subset, x):
1291 1288 """``public()``
1292 1289 Changeset in public phase."""
1293 1290 # i18n: "public" is a keyword
1294 1291 getargs(x, 0, 0, _("public takes no arguments"))
1295 1292 pc = repo._phasecache
1296 1293 return subset.filter(lambda r: pc.phase(repo, r) == phases.public)
1297 1294
1298 1295 def remote(repo, subset, x):
1299 1296 """``remote([id [,path]])``
1300 1297 Local revision that corresponds to the given identifier in a
1301 1298 remote repository, if present. Here, the '.' identifier is a
1302 1299 synonym for the current local branch.
1303 1300 """
1304 1301
1305 1302 import hg # avoid start-up nasties
1306 1303 # i18n: "remote" is a keyword
1307 1304 l = getargs(x, 0, 2, _("remote takes one, two or no arguments"))
1308 1305
1309 1306 q = '.'
1310 1307 if len(l) > 0:
1311 1308 # i18n: "remote" is a keyword
1312 1309 q = getstring(l[0], _("remote requires a string id"))
1313 1310 if q == '.':
1314 1311 q = repo['.'].branch()
1315 1312
1316 1313 dest = ''
1317 1314 if len(l) > 1:
1318 1315 # i18n: "remote" is a keyword
1319 1316 dest = getstring(l[1], _("remote requires a repository path"))
1320 1317 dest = repo.ui.expandpath(dest or 'default')
1321 1318 dest, branches = hg.parseurl(dest)
1322 1319 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1323 1320 if revs:
1324 1321 revs = [repo.lookup(rev) for rev in revs]
1325 1322 other = hg.peer(repo, {}, dest)
1326 1323 n = other.lookup(q)
1327 1324 if n in repo:
1328 1325 r = repo[n].rev()
1329 1326 if r in subset:
1330 1327 return baseset([r])
1331 1328 return baseset([])
1332 1329
1333 1330 def removes(repo, subset, x):
1334 1331 """``removes(pattern)``
1335 1332 Changesets which remove files matching pattern.
1336 1333
1337 1334 The pattern without explicit kind like ``glob:`` is expected to be
1338 1335 relative to the current directory and match against a file or a
1339 1336 directory.
1340 1337 """
1341 1338 # i18n: "removes" is a keyword
1342 1339 pat = getstring(x, _("removes requires a pattern"))
1343 1340 return checkstatus(repo, subset, pat, 2)
1344 1341
1345 1342 def rev(repo, subset, x):
1346 1343 """``rev(number)``
1347 1344 Revision with the given numeric identifier.
1348 1345 """
1349 1346 # i18n: "rev" is a keyword
1350 1347 l = getargs(x, 1, 1, _("rev requires one argument"))
1351 1348 try:
1352 1349 # i18n: "rev" is a keyword
1353 1350 l = int(getstring(l[0], _("rev requires a number")))
1354 1351 except (TypeError, ValueError):
1355 1352 # i18n: "rev" is a keyword
1356 1353 raise error.ParseError(_("rev expects a number"))
1357 1354 return subset.filter(lambda r: r == l)
1358 1355
1359 1356 def matching(repo, subset, x):
1360 1357 """``matching(revision [, field])``
1361 1358 Changesets in which a given set of fields match the set of fields in the
1362 1359 selected revision or set.
1363 1360
1364 1361 To match more than one field pass the list of fields to match separated
1365 1362 by spaces (e.g. ``author description``).
1366 1363
1367 1364 Valid fields are most regular revision fields and some special fields.
1368 1365
1369 1366 Regular revision fields are ``description``, ``author``, ``branch``,
1370 1367 ``date``, ``files``, ``phase``, ``parents``, ``substate``, ``user``
1371 1368 and ``diff``.
1372 1369 Note that ``author`` and ``user`` are synonyms. ``diff`` refers to the
1373 1370 contents of the revision. Two revisions matching their ``diff`` will
1374 1371 also match their ``files``.
1375 1372
1376 1373 Special fields are ``summary`` and ``metadata``:
1377 1374 ``summary`` matches the first line of the description.
1378 1375 ``metadata`` is equivalent to matching ``description user date``
1379 1376 (i.e. it matches the main metadata fields).
1380 1377
1381 1378 ``metadata`` is the default field which is used when no fields are
1382 1379 specified. You can match more than one field at a time.
1383 1380 """
1384 1381 # i18n: "matching" is a keyword
1385 1382 l = getargs(x, 1, 2, _("matching takes 1 or 2 arguments"))
1386 1383
1387 1384 revs = getset(repo, baseset(repo.changelog), l[0])
1388 1385
1389 1386 fieldlist = ['metadata']
1390 1387 if len(l) > 1:
1391 1388 fieldlist = getstring(l[1],
1392 1389 # i18n: "matching" is a keyword
1393 1390 _("matching requires a string "
1394 1391 "as its second argument")).split()
1395 1392
1396 1393 # Make sure that there are no repeated fields,
1397 1394 # expand the 'special' 'metadata' field type
1398 1395 # and check the 'files' whenever we check the 'diff'
1399 1396 fields = []
1400 1397 for field in fieldlist:
1401 1398 if field == 'metadata':
1402 1399 fields += ['user', 'description', 'date']
1403 1400 elif field == 'diff':
1404 1401 # a revision matching the diff must also match the files
1405 1402 # since matching the diff is very costly, make sure to
1406 1403 # also match the files first
1407 1404 fields += ['files', 'diff']
1408 1405 else:
1409 1406 if field == 'author':
1410 1407 field = 'user'
1411 1408 fields.append(field)
1412 1409 fields = set(fields)
1413 1410 if 'summary' in fields and 'description' in fields:
1414 1411 # If a revision matches its description it also matches its summary
1415 1412 fields.discard('summary')
1416 1413
1417 1414 # We may want to match more than one field
1418 1415 # Not all fields take the same amount of time to be matched
1419 1416 # Sort the selected fields in order of increasing matching cost
1420 1417 fieldorder = ['phase', 'parents', 'user', 'date', 'branch', 'summary',
1421 1418 'files', 'description', 'substate', 'diff']
1422 1419 def fieldkeyfunc(f):
1423 1420 try:
1424 1421 return fieldorder.index(f)
1425 1422 except ValueError:
1426 1423 # assume an unknown field is very costly
1427 1424 return len(fieldorder)
1428 1425 fields = list(fields)
1429 1426 fields.sort(key=fieldkeyfunc)
1430 1427
1431 1428 # Each field will be matched with its own "getfield" function
1432 1429 # which will be added to the getfieldfuncs array of functions
1433 1430 getfieldfuncs = []
1434 1431 _funcs = {
1435 1432 'user': lambda r: repo[r].user(),
1436 1433 'branch': lambda r: repo[r].branch(),
1437 1434 'date': lambda r: repo[r].date(),
1438 1435 'description': lambda r: repo[r].description(),
1439 1436 'files': lambda r: repo[r].files(),
1440 1437 'parents': lambda r: repo[r].parents(),
1441 1438 'phase': lambda r: repo[r].phase(),
1442 1439 'substate': lambda r: repo[r].substate,
1443 1440 'summary': lambda r: repo[r].description().splitlines()[0],
1444 1441 'diff': lambda r: list(repo[r].diff(git=True),)
1445 1442 }
1446 1443 for info in fields:
1447 1444 getfield = _funcs.get(info, None)
1448 1445 if getfield is None:
1449 1446 raise error.ParseError(
1450 1447 # i18n: "matching" is a keyword
1451 1448 _("unexpected field name passed to matching: %s") % info)
1452 1449 getfieldfuncs.append(getfield)
1453 1450 # convert the getfield array of functions into a "getinfo" function
1454 1451 # which returns an array of field values (or a single value if there
1455 1452 # is only one field to match)
1456 1453 getinfo = lambda r: [f(r) for f in getfieldfuncs]
1457 1454
1458 1455 def matches(x):
1459 1456 for rev in revs:
1460 1457 target = getinfo(rev)
1461 1458 match = True
1462 1459 for n, f in enumerate(getfieldfuncs):
1463 1460 if target[n] != f(x):
1464 1461 match = False
1465 1462 if match:
1466 1463 return True
1467 1464 return False
1468 1465
1469 1466 return subset.filter(matches)
1470 1467
1471 1468 def reverse(repo, subset, x):
1472 1469 """``reverse(set)``
1473 1470 Reverse order of set.
1474 1471 """
1475 1472 l = getset(repo, subset, x)
1476 1473 l.reverse()
1477 1474 return l
1478 1475
1479 1476 def roots(repo, subset, x):
1480 1477 """``roots(set)``
1481 1478 Changesets in set with no parent changeset in set.
1482 1479 """
1483 1480 s = getset(repo, spanset(repo), x).set()
1484 1481 subset = baseset([r for r in s if r in subset.set()])
1485 1482 cs = _children(repo, subset, s)
1486 1483 return subset - cs
1487 1484
1488 1485 def secret(repo, subset, x):
1489 1486 """``secret()``
1490 1487 Changeset in secret phase."""
1491 1488 # i18n: "secret" is a keyword
1492 1489 getargs(x, 0, 0, _("secret takes no arguments"))
1493 1490 pc = repo._phasecache
1494 1491 return subset.filter(lambda x: pc.phase(repo, x) == phases.secret)
1495 1492
1496 1493 def sort(repo, subset, x):
1497 1494 """``sort(set[, [-]key...])``
1498 1495 Sort set by keys. The default sort order is ascending, specify a key
1499 1496 as ``-key`` to sort in descending order.
1500 1497
1501 1498 The keys can be:
1502 1499
1503 1500 - ``rev`` for the revision number,
1504 1501 - ``branch`` for the branch name,
1505 1502 - ``desc`` for the commit message (description),
1506 1503 - ``user`` for user name (``author`` can be used as an alias),
1507 1504 - ``date`` for the commit date
1508 1505 """
1509 1506 # i18n: "sort" is a keyword
1510 1507 l = getargs(x, 1, 2, _("sort requires one or two arguments"))
1511 1508 keys = "rev"
1512 1509 if len(l) == 2:
1513 1510 # i18n: "sort" is a keyword
1514 1511 keys = getstring(l[1], _("sort spec must be a string"))
1515 1512
1516 1513 s = l[0]
1517 1514 keys = keys.split()
1518 1515 l = []
1519 1516 def invert(s):
1520 1517 return "".join(chr(255 - ord(c)) for c in s)
1521 1518 revs = getset(repo, subset, s)
1522 1519 if keys == ["rev"]:
1523 1520 revs.sort()
1524 1521 return revs
1525 1522 elif keys == ["-rev"]:
1526 1523 revs.sort(reverse=True)
1527 1524 return revs
1528 1525 for r in revs:
1529 1526 c = repo[r]
1530 1527 e = []
1531 1528 for k in keys:
1532 1529 if k == 'rev':
1533 1530 e.append(r)
1534 1531 elif k == '-rev':
1535 1532 e.append(-r)
1536 1533 elif k == 'branch':
1537 1534 e.append(c.branch())
1538 1535 elif k == '-branch':
1539 1536 e.append(invert(c.branch()))
1540 1537 elif k == 'desc':
1541 1538 e.append(c.description())
1542 1539 elif k == '-desc':
1543 1540 e.append(invert(c.description()))
1544 1541 elif k in 'user author':
1545 1542 e.append(c.user())
1546 1543 elif k in '-user -author':
1547 1544 e.append(invert(c.user()))
1548 1545 elif k == 'date':
1549 1546 e.append(c.date()[0])
1550 1547 elif k == '-date':
1551 1548 e.append(-c.date()[0])
1552 1549 else:
1553 1550 raise error.ParseError(_("unknown sort key %r") % k)
1554 1551 e.append(r)
1555 1552 l.append(e)
1556 1553 l.sort()
1557 1554 return baseset([e[-1] for e in l])
1558 1555
1559 1556 def _stringmatcher(pattern):
1560 1557 """
1561 1558 accepts a string, possibly starting with 're:' or 'literal:' prefix.
1562 1559 returns the matcher name, pattern, and matcher function.
1563 1560 missing or unknown prefixes are treated as literal matches.
1564 1561
1565 1562 helper for tests:
1566 1563 >>> def test(pattern, *tests):
1567 1564 ... kind, pattern, matcher = _stringmatcher(pattern)
1568 1565 ... return (kind, pattern, [bool(matcher(t)) for t in tests])
1569 1566
1570 1567 exact matching (no prefix):
1571 1568 >>> test('abcdefg', 'abc', 'def', 'abcdefg')
1572 1569 ('literal', 'abcdefg', [False, False, True])
1573 1570
1574 1571 regex matching ('re:' prefix)
1575 1572 >>> test('re:a.+b', 'nomatch', 'fooadef', 'fooadefbar')
1576 1573 ('re', 'a.+b', [False, False, True])
1577 1574
1578 1575 force exact matches ('literal:' prefix)
1579 1576 >>> test('literal:re:foobar', 'foobar', 're:foobar')
1580 1577 ('literal', 're:foobar', [False, True])
1581 1578
1582 1579 unknown prefixes are ignored and treated as literals
1583 1580 >>> test('foo:bar', 'foo', 'bar', 'foo:bar')
1584 1581 ('literal', 'foo:bar', [False, False, True])
1585 1582 """
1586 1583 if pattern.startswith('re:'):
1587 1584 pattern = pattern[3:]
1588 1585 try:
1589 1586 regex = re.compile(pattern)
1590 1587 except re.error, e:
1591 1588 raise error.ParseError(_('invalid regular expression: %s')
1592 1589 % e)
1593 1590 return 're', pattern, regex.search
1594 1591 elif pattern.startswith('literal:'):
1595 1592 pattern = pattern[8:]
1596 1593 return 'literal', pattern, pattern.__eq__
1597 1594
1598 1595 def _substringmatcher(pattern):
1599 1596 kind, pattern, matcher = _stringmatcher(pattern)
1600 1597 if kind == 'literal':
1601 1598 matcher = lambda s: pattern in s
1602 1599 return kind, pattern, matcher
1603 1600
1604 1601 def tag(repo, subset, x):
1605 1602 """``tag([name])``
1606 1603 The specified tag by name, or all tagged revisions if no name is given.
1607 1604
1608 1605 If `name` starts with `re:`, the remainder of the name is treated as
1609 1606 a regular expression. To match a tag that actually starts with `re:`,
1610 1607 use the prefix `literal:`.
1611 1608 """
1612 1609 # i18n: "tag" is a keyword
1613 1610 args = getargs(x, 0, 1, _("tag takes one or no arguments"))
1614 1611 cl = repo.changelog
1615 1612 if args:
1616 1613 pattern = getstring(args[0],
1617 1614 # i18n: "tag" is a keyword
1618 1615 _('the argument to tag must be a string'))
1619 1616 kind, pattern, matcher = _stringmatcher(pattern)
1620 1617 if kind == 'literal':
1621 1618 # avoid resolving all tags
1622 1619 tn = repo._tagscache.tags.get(pattern, None)
1623 1620 if tn is None:
1624 1621 raise util.Abort(_("tag '%s' does not exist") % pattern)
1625 1622 s = set([repo[tn].rev()])
1626 1623 else:
1627 1624 s = set([cl.rev(n) for t, n in repo.tagslist() if matcher(t)])
1628 1625 else:
1629 1626 s = set([cl.rev(n) for t, n in repo.tagslist() if t != 'tip'])
1630 1627 return subset & s
1631 1628
1632 1629 def tagged(repo, subset, x):
1633 1630 return tag(repo, subset, x)
1634 1631
1635 1632 def unstable(repo, subset, x):
1636 1633 """``unstable()``
1637 1634 Non-obsolete changesets with obsolete ancestors.
1638 1635 """
1639 1636 # i18n: "unstable" is a keyword
1640 1637 getargs(x, 0, 0, _("unstable takes no arguments"))
1641 1638 unstables = obsmod.getrevs(repo, 'unstable')
1642 1639 return subset & unstables
1643 1640
1644 1641
1645 1642 def user(repo, subset, x):
1646 1643 """``user(string)``
1647 1644 User name contains string. The match is case-insensitive.
1648 1645
1649 1646 If `string` starts with `re:`, the remainder of the string is treated as
1650 1647 a regular expression. To match a user that actually contains `re:`, use
1651 1648 the prefix `literal:`.
1652 1649 """
1653 1650 return author(repo, subset, x)
1654 1651
1655 1652 # for internal use
1656 1653 def _list(repo, subset, x):
1657 1654 s = getstring(x, "internal error")
1658 1655 if not s:
1659 1656 return baseset([])
1660 1657 ls = [repo[r].rev() for r in s.split('\0')]
1661 1658 s = subset.set()
1662 1659 return baseset([r for r in ls if r in s])
1663 1660
1664 1661 # for internal use
1665 1662 def _intlist(repo, subset, x):
1666 1663 s = getstring(x, "internal error")
1667 1664 if not s:
1668 1665 return baseset([])
1669 1666 ls = [int(r) for r in s.split('\0')]
1670 1667 s = subset.set()
1671 1668 return baseset([r for r in ls if r in s])
1672 1669
1673 1670 # for internal use
1674 1671 def _hexlist(repo, subset, x):
1675 1672 s = getstring(x, "internal error")
1676 1673 if not s:
1677 1674 return baseset([])
1678 1675 cl = repo.changelog
1679 1676 ls = [cl.rev(node.bin(r)) for r in s.split('\0')]
1680 1677 s = subset.set()
1681 1678 return baseset([r for r in ls if r in s])
1682 1679
1683 1680 symbols = {
1684 1681 "adds": adds,
1685 1682 "all": getall,
1686 1683 "ancestor": ancestor,
1687 1684 "ancestors": ancestors,
1688 1685 "_firstancestors": _firstancestors,
1689 1686 "author": author,
1690 1687 "only": only,
1691 1688 "bisect": bisect,
1692 1689 "bisected": bisected,
1693 1690 "bookmark": bookmark,
1694 1691 "branch": branch,
1695 1692 "branchpoint": branchpoint,
1696 1693 "bumped": bumped,
1697 1694 "bundle": bundle,
1698 1695 "children": children,
1699 1696 "closed": closed,
1700 1697 "contains": contains,
1701 1698 "converted": converted,
1702 1699 "date": date,
1703 1700 "desc": desc,
1704 1701 "descendants": descendants,
1705 1702 "_firstdescendants": _firstdescendants,
1706 1703 "destination": destination,
1707 1704 "divergent": divergent,
1708 1705 "draft": draft,
1709 1706 "extinct": extinct,
1710 1707 "extra": extra,
1711 1708 "file": hasfile,
1712 1709 "filelog": filelog,
1713 1710 "first": first,
1714 1711 "follow": follow,
1715 1712 "_followfirst": _followfirst,
1716 1713 "grep": grep,
1717 1714 "head": head,
1718 1715 "heads": heads,
1719 1716 "hidden": hidden,
1720 1717 "id": node_,
1721 1718 "keyword": keyword,
1722 1719 "last": last,
1723 1720 "limit": limit,
1724 1721 "_matchfiles": _matchfiles,
1725 1722 "max": maxrev,
1726 1723 "merge": merge,
1727 1724 "min": minrev,
1728 1725 "_missingancestors": _missingancestors,
1729 1726 "modifies": modifies,
1730 1727 "obsolete": obsolete,
1731 1728 "origin": origin,
1732 1729 "outgoing": outgoing,
1733 1730 "p1": p1,
1734 1731 "p2": p2,
1735 1732 "parents": parents,
1736 1733 "present": present,
1737 1734 "public": public,
1738 1735 "remote": remote,
1739 1736 "removes": removes,
1740 1737 "rev": rev,
1741 1738 "reverse": reverse,
1742 1739 "roots": roots,
1743 1740 "sort": sort,
1744 1741 "secret": secret,
1745 1742 "matching": matching,
1746 1743 "tag": tag,
1747 1744 "tagged": tagged,
1748 1745 "user": user,
1749 1746 "unstable": unstable,
1750 1747 "_list": _list,
1751 1748 "_intlist": _intlist,
1752 1749 "_hexlist": _hexlist,
1753 1750 }
1754 1751
1755 1752 # symbols which can't be used for a DoS attack for any given input
1756 1753 # (e.g. those which accept regexes as plain strings shouldn't be included)
1757 1754 # functions that just return a lot of changesets (like all) don't count here
1758 1755 safesymbols = set([
1759 1756 "adds",
1760 1757 "all",
1761 1758 "ancestor",
1762 1759 "ancestors",
1763 1760 "_firstancestors",
1764 1761 "author",
1765 1762 "bisect",
1766 1763 "bisected",
1767 1764 "bookmark",
1768 1765 "branch",
1769 1766 "branchpoint",
1770 1767 "bumped",
1771 1768 "bundle",
1772 1769 "children",
1773 1770 "closed",
1774 1771 "converted",
1775 1772 "date",
1776 1773 "desc",
1777 1774 "descendants",
1778 1775 "_firstdescendants",
1779 1776 "destination",
1780 1777 "divergent",
1781 1778 "draft",
1782 1779 "extinct",
1783 1780 "extra",
1784 1781 "file",
1785 1782 "filelog",
1786 1783 "first",
1787 1784 "follow",
1788 1785 "_followfirst",
1789 1786 "head",
1790 1787 "heads",
1791 1788 "hidden",
1792 1789 "id",
1793 1790 "keyword",
1794 1791 "last",
1795 1792 "limit",
1796 1793 "_matchfiles",
1797 1794 "max",
1798 1795 "merge",
1799 1796 "min",
1800 1797 "_missingancestors",
1801 1798 "modifies",
1802 1799 "obsolete",
1803 1800 "origin",
1804 1801 "outgoing",
1805 1802 "p1",
1806 1803 "p2",
1807 1804 "parents",
1808 1805 "present",
1809 1806 "public",
1810 1807 "remote",
1811 1808 "removes",
1812 1809 "rev",
1813 1810 "reverse",
1814 1811 "roots",
1815 1812 "sort",
1816 1813 "secret",
1817 1814 "matching",
1818 1815 "tag",
1819 1816 "tagged",
1820 1817 "user",
1821 1818 "unstable",
1822 1819 "_list",
1823 1820 "_intlist",
1824 1821 "_hexlist",
1825 1822 ])
1826 1823
1827 1824 methods = {
1828 1825 "range": rangeset,
1829 1826 "dagrange": dagrange,
1830 1827 "string": stringset,
1831 1828 "symbol": symbolset,
1832 1829 "and": andset,
1833 1830 "or": orset,
1834 1831 "not": notset,
1835 1832 "list": listset,
1836 1833 "func": func,
1837 1834 "ancestor": ancestorspec,
1838 1835 "parent": parentspec,
1839 1836 "parentpost": p1,
1840 1837 }
1841 1838
1842 1839 def optimize(x, small):
1843 1840 if x is None:
1844 1841 return 0, x
1845 1842
1846 1843 smallbonus = 1
1847 1844 if small:
1848 1845 smallbonus = .5
1849 1846
1850 1847 op = x[0]
1851 1848 if op == 'minus':
1852 1849 return optimize(('and', x[1], ('not', x[2])), small)
1853 1850 elif op == 'dagrangepre':
1854 1851 return optimize(('func', ('symbol', 'ancestors'), x[1]), small)
1855 1852 elif op == 'dagrangepost':
1856 1853 return optimize(('func', ('symbol', 'descendants'), x[1]), small)
1857 1854 elif op == 'rangepre':
1858 1855 return optimize(('range', ('string', '0'), x[1]), small)
1859 1856 elif op == 'rangepost':
1860 1857 return optimize(('range', x[1], ('string', 'tip')), small)
1861 1858 elif op == 'negate':
1862 1859 return optimize(('string',
1863 1860 '-' + getstring(x[1], _("can't negate that"))), small)
1864 1861 elif op in 'string symbol negate':
1865 1862 return smallbonus, x # single revisions are small
1866 1863 elif op == 'and':
1867 1864 wa, ta = optimize(x[1], True)
1868 1865 wb, tb = optimize(x[2], True)
1869 1866
1870 1867 # (::x and not ::y)/(not ::y and ::x) have a fast path
1871 1868 def ismissingancestors(revs, bases):
1872 1869 return (
1873 1870 revs[0] == 'func'
1874 1871 and getstring(revs[1], _('not a symbol')) == 'ancestors'
1875 1872 and bases[0] == 'not'
1876 1873 and bases[1][0] == 'func'
1877 1874 and getstring(bases[1][1], _('not a symbol')) == 'ancestors')
1878 1875
1879 1876 w = min(wa, wb)
1880 1877 if ismissingancestors(ta, tb):
1881 1878 return w, ('func', ('symbol', '_missingancestors'),
1882 1879 ('list', ta[2], tb[1][2]))
1883 1880 if ismissingancestors(tb, ta):
1884 1881 return w, ('func', ('symbol', '_missingancestors'),
1885 1882 ('list', tb[2], ta[1][2]))
1886 1883
1887 1884 if wa > wb:
1888 1885 return w, (op, tb, ta)
1889 1886 return w, (op, ta, tb)
1890 1887 elif op == 'or':
1891 1888 wa, ta = optimize(x[1], False)
1892 1889 wb, tb = optimize(x[2], False)
1893 1890 if wb < wa:
1894 1891 wb, wa = wa, wb
1895 1892 return max(wa, wb), (op, ta, tb)
1896 1893 elif op == 'not':
1897 1894 o = optimize(x[1], not small)
1898 1895 return o[0], (op, o[1])
1899 1896 elif op == 'parentpost':
1900 1897 o = optimize(x[1], small)
1901 1898 return o[0], (op, o[1])
1902 1899 elif op == 'group':
1903 1900 return optimize(x[1], small)
1904 1901 elif op in 'dagrange range list parent ancestorspec':
1905 1902 if op == 'parent':
1906 1903 # x^:y means (x^) : y, not x ^ (:y)
1907 1904 post = ('parentpost', x[1])
1908 1905 if x[2][0] == 'dagrangepre':
1909 1906 return optimize(('dagrange', post, x[2][1]), small)
1910 1907 elif x[2][0] == 'rangepre':
1911 1908 return optimize(('range', post, x[2][1]), small)
1912 1909
1913 1910 wa, ta = optimize(x[1], small)
1914 1911 wb, tb = optimize(x[2], small)
1915 1912 return wa + wb, (op, ta, tb)
1916 1913 elif op == 'func':
1917 1914 f = getstring(x[1], _("not a symbol"))
1918 1915 wa, ta = optimize(x[2], small)
1919 1916 if f in ("author branch closed date desc file grep keyword "
1920 1917 "outgoing user"):
1921 1918 w = 10 # slow
1922 1919 elif f in "modifies adds removes":
1923 1920 w = 30 # slower
1924 1921 elif f == "contains":
1925 1922 w = 100 # very slow
1926 1923 elif f == "ancestor":
1927 1924 w = 1 * smallbonus
1928 1925 elif f in "reverse limit first":
1929 1926 w = 0
1930 1927 elif f in "sort":
1931 1928 w = 10 # assume most sorts look at changelog
1932 1929 else:
1933 1930 w = 1
1934 1931 return w + wa, (op, x[1], ta)
1935 1932 return 1, x
1936 1933
1937 1934 _aliasarg = ('func', ('symbol', '_aliasarg'))
1938 1935 def _getaliasarg(tree):
1939 1936 """If tree matches ('func', ('symbol', '_aliasarg'), ('string', X))
1940 1937 return X, None otherwise.
1941 1938 """
1942 1939 if (len(tree) == 3 and tree[:2] == _aliasarg
1943 1940 and tree[2][0] == 'string'):
1944 1941 return tree[2][1]
1945 1942 return None
1946 1943
1947 1944 def _checkaliasarg(tree, known=None):
1948 1945 """Check tree contains no _aliasarg construct or only ones which
1949 1946 value is in known. Used to avoid alias placeholders injection.
1950 1947 """
1951 1948 if isinstance(tree, tuple):
1952 1949 arg = _getaliasarg(tree)
1953 1950 if arg is not None and (not known or arg not in known):
1954 1951 raise error.ParseError(_("not a function: %s") % '_aliasarg')
1955 1952 for t in tree:
1956 1953 _checkaliasarg(t, known)
1957 1954
1958 1955 class revsetalias(object):
1959 1956 funcre = re.compile('^([^(]+)\(([^)]+)\)$')
1960 1957 args = None
1961 1958
1962 1959 def __init__(self, name, value):
1963 1960 '''Aliases like:
1964 1961
1965 1962 h = heads(default)
1966 1963 b($1) = ancestors($1) - ancestors(default)
1967 1964 '''
1968 1965 m = self.funcre.search(name)
1969 1966 if m:
1970 1967 self.name = m.group(1)
1971 1968 self.tree = ('func', ('symbol', m.group(1)))
1972 1969 self.args = [x.strip() for x in m.group(2).split(',')]
1973 1970 for arg in self.args:
1974 1971 # _aliasarg() is an unknown symbol only used separate
1975 1972 # alias argument placeholders from regular strings.
1976 1973 value = value.replace(arg, '_aliasarg(%r)' % (arg,))
1977 1974 else:
1978 1975 self.name = name
1979 1976 self.tree = ('symbol', name)
1980 1977
1981 1978 self.replacement, pos = parse(value)
1982 1979 if pos != len(value):
1983 1980 raise error.ParseError(_('invalid token'), pos)
1984 1981 # Check for placeholder injection
1985 1982 _checkaliasarg(self.replacement, self.args)
1986 1983
1987 1984 def _getalias(aliases, tree):
1988 1985 """If tree looks like an unexpanded alias, return it. Return None
1989 1986 otherwise.
1990 1987 """
1991 1988 if isinstance(tree, tuple) and tree:
1992 1989 if tree[0] == 'symbol' and len(tree) == 2:
1993 1990 name = tree[1]
1994 1991 alias = aliases.get(name)
1995 1992 if alias and alias.args is None and alias.tree == tree:
1996 1993 return alias
1997 1994 if tree[0] == 'func' and len(tree) > 1:
1998 1995 if tree[1][0] == 'symbol' and len(tree[1]) == 2:
1999 1996 name = tree[1][1]
2000 1997 alias = aliases.get(name)
2001 1998 if alias and alias.args is not None and alias.tree == tree[:2]:
2002 1999 return alias
2003 2000 return None
2004 2001
2005 2002 def _expandargs(tree, args):
2006 2003 """Replace _aliasarg instances with the substitution value of the
2007 2004 same name in args, recursively.
2008 2005 """
2009 2006 if not tree or not isinstance(tree, tuple):
2010 2007 return tree
2011 2008 arg = _getaliasarg(tree)
2012 2009 if arg is not None:
2013 2010 return args[arg]
2014 2011 return tuple(_expandargs(t, args) for t in tree)
2015 2012
2016 2013 def _expandaliases(aliases, tree, expanding, cache):
2017 2014 """Expand aliases in tree, recursively.
2018 2015
2019 2016 'aliases' is a dictionary mapping user defined aliases to
2020 2017 revsetalias objects.
2021 2018 """
2022 2019 if not isinstance(tree, tuple):
2023 2020 # Do not expand raw strings
2024 2021 return tree
2025 2022 alias = _getalias(aliases, tree)
2026 2023 if alias is not None:
2027 2024 if alias in expanding:
2028 2025 raise error.ParseError(_('infinite expansion of revset alias "%s" '
2029 2026 'detected') % alias.name)
2030 2027 expanding.append(alias)
2031 2028 if alias.name not in cache:
2032 2029 cache[alias.name] = _expandaliases(aliases, alias.replacement,
2033 2030 expanding, cache)
2034 2031 result = cache[alias.name]
2035 2032 expanding.pop()
2036 2033 if alias.args is not None:
2037 2034 l = getlist(tree[2])
2038 2035 if len(l) != len(alias.args):
2039 2036 raise error.ParseError(
2040 2037 _('invalid number of arguments: %s') % len(l))
2041 2038 l = [_expandaliases(aliases, a, [], cache) for a in l]
2042 2039 result = _expandargs(result, dict(zip(alias.args, l)))
2043 2040 else:
2044 2041 result = tuple(_expandaliases(aliases, t, expanding, cache)
2045 2042 for t in tree)
2046 2043 return result
2047 2044
2048 2045 def findaliases(ui, tree):
2049 2046 _checkaliasarg(tree)
2050 2047 aliases = {}
2051 2048 for k, v in ui.configitems('revsetalias'):
2052 2049 alias = revsetalias(k, v)
2053 2050 aliases[alias.name] = alias
2054 2051 return _expandaliases(aliases, tree, [], {})
2055 2052
2056 2053 def parse(spec, lookup=None):
2057 2054 p = parser.parser(tokenize, elements)
2058 2055 return p.parse(spec, lookup=lookup)
2059 2056
2060 2057 def match(ui, spec, repo=None):
2061 2058 if not spec:
2062 2059 raise error.ParseError(_("empty query"))
2063 2060 lookup = None
2064 2061 if repo:
2065 2062 lookup = repo.__contains__
2066 2063 tree, pos = parse(spec, lookup)
2067 2064 if (pos != len(spec)):
2068 2065 raise error.ParseError(_("invalid token"), pos)
2069 2066 if ui:
2070 2067 tree = findaliases(ui, tree)
2071 2068 weight, tree = optimize(tree, True)
2072 2069 def mfunc(repo, subset):
2073 2070 if util.safehasattr(subset, 'set'):
2074 2071 return getset(repo, subset, tree)
2075 2072 return getset(repo, baseset(subset), tree)
2076 2073 return mfunc
2077 2074
2078 2075 def formatspec(expr, *args):
2079 2076 '''
2080 2077 This is a convenience function for using revsets internally, and
2081 2078 escapes arguments appropriately. Aliases are intentionally ignored
2082 2079 so that intended expression behavior isn't accidentally subverted.
2083 2080
2084 2081 Supported arguments:
2085 2082
2086 2083 %r = revset expression, parenthesized
2087 2084 %d = int(arg), no quoting
2088 2085 %s = string(arg), escaped and single-quoted
2089 2086 %b = arg.branch(), escaped and single-quoted
2090 2087 %n = hex(arg), single-quoted
2091 2088 %% = a literal '%'
2092 2089
2093 2090 Prefixing the type with 'l' specifies a parenthesized list of that type.
2094 2091
2095 2092 >>> formatspec('%r:: and %lr', '10 or 11', ("this()", "that()"))
2096 2093 '(10 or 11):: and ((this()) or (that()))'
2097 2094 >>> formatspec('%d:: and not %d::', 10, 20)
2098 2095 '10:: and not 20::'
2099 2096 >>> formatspec('%ld or %ld', [], [1])
2100 2097 "_list('') or 1"
2101 2098 >>> formatspec('keyword(%s)', 'foo\\xe9')
2102 2099 "keyword('foo\\\\xe9')"
2103 2100 >>> b = lambda: 'default'
2104 2101 >>> b.branch = b
2105 2102 >>> formatspec('branch(%b)', b)
2106 2103 "branch('default')"
2107 2104 >>> formatspec('root(%ls)', ['a', 'b', 'c', 'd'])
2108 2105 "root(_list('a\\x00b\\x00c\\x00d'))"
2109 2106 '''
2110 2107
2111 2108 def quote(s):
2112 2109 return repr(str(s))
2113 2110
2114 2111 def argtype(c, arg):
2115 2112 if c == 'd':
2116 2113 return str(int(arg))
2117 2114 elif c == 's':
2118 2115 return quote(arg)
2119 2116 elif c == 'r':
2120 2117 parse(arg) # make sure syntax errors are confined
2121 2118 return '(%s)' % arg
2122 2119 elif c == 'n':
2123 2120 return quote(node.hex(arg))
2124 2121 elif c == 'b':
2125 2122 return quote(arg.branch())
2126 2123
2127 2124 def listexp(s, t):
2128 2125 l = len(s)
2129 2126 if l == 0:
2130 2127 return "_list('')"
2131 2128 elif l == 1:
2132 2129 return argtype(t, s[0])
2133 2130 elif t == 'd':
2134 2131 return "_intlist('%s')" % "\0".join(str(int(a)) for a in s)
2135 2132 elif t == 's':
2136 2133 return "_list('%s')" % "\0".join(s)
2137 2134 elif t == 'n':
2138 2135 return "_hexlist('%s')" % "\0".join(node.hex(a) for a in s)
2139 2136 elif t == 'b':
2140 2137 return "_list('%s')" % "\0".join(a.branch() for a in s)
2141 2138
2142 2139 m = l // 2
2143 2140 return '(%s or %s)' % (listexp(s[:m], t), listexp(s[m:], t))
2144 2141
2145 2142 ret = ''
2146 2143 pos = 0
2147 2144 arg = 0
2148 2145 while pos < len(expr):
2149 2146 c = expr[pos]
2150 2147 if c == '%':
2151 2148 pos += 1
2152 2149 d = expr[pos]
2153 2150 if d == '%':
2154 2151 ret += d
2155 2152 elif d in 'dsnbr':
2156 2153 ret += argtype(d, args[arg])
2157 2154 arg += 1
2158 2155 elif d == 'l':
2159 2156 # a list of some type
2160 2157 pos += 1
2161 2158 d = expr[pos]
2162 2159 ret += listexp(list(args[arg]), d)
2163 2160 arg += 1
2164 2161 else:
2165 2162 raise util.Abort('unexpected revspec format character %s' % d)
2166 2163 else:
2167 2164 ret += c
2168 2165 pos += 1
2169 2166
2170 2167 return ret
2171 2168
2172 2169 def prettyformat(tree):
2173 2170 def _prettyformat(tree, level, lines):
2174 2171 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2175 2172 lines.append((level, str(tree)))
2176 2173 else:
2177 2174 lines.append((level, '(%s' % tree[0]))
2178 2175 for s in tree[1:]:
2179 2176 _prettyformat(s, level + 1, lines)
2180 2177 lines[-1:] = [(lines[-1][0], lines[-1][1] + ')')]
2181 2178
2182 2179 lines = []
2183 2180 _prettyformat(tree, 0, lines)
2184 2181 output = '\n'.join((' '*l + s) for l, s in lines)
2185 2182 return output
2186 2183
2187 2184 def depth(tree):
2188 2185 if isinstance(tree, tuple):
2189 2186 return max(map(depth, tree)) + 1
2190 2187 else:
2191 2188 return 0
2192 2189
2193 2190 def funcsused(tree):
2194 2191 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2195 2192 return set()
2196 2193 else:
2197 2194 funcs = set()
2198 2195 for s in tree[1:]:
2199 2196 funcs |= funcsused(s)
2200 2197 if tree[0] == 'func':
2201 2198 funcs.add(tree[1][1])
2202 2199 return funcs
2203 2200
2204 2201 class baseset(list):
2205 2202 """Basic data structure that represents a revset and contains the basic
2206 2203 operation that it should be able to perform.
2207 2204
2208 2205 Every method in this class should be implemented by any smartset class.
2209 2206 """
2210 2207 def __init__(self, data=()):
2211 2208 super(baseset, self).__init__(data)
2212 2209 self._set = None
2213 2210
2214 2211 def ascending(self):
2215 2212 """Sorts the set in ascending order (in place).
2216 2213
2217 2214 This is part of the mandatory API for smartset."""
2218 2215 self.sort()
2219 2216
2220 2217 def descending(self):
2221 2218 """Sorts the set in descending order (in place).
2222 2219
2223 2220 This is part of the mandatory API for smartset."""
2224 2221 self.sort(reverse=True)
2225 2222
2226 2223 def min(self):
2227 2224 return min(self)
2228 2225
2229 2226 def max(self):
2230 2227 return max(self)
2231 2228
2232 2229 def set(self):
2233 2230 """Returns a set or a smartset containing all the elements.
2234 2231
2235 2232 The returned structure should be the fastest option for membership
2236 2233 testing.
2237 2234
2238 2235 This is part of the mandatory API for smartset."""
2239 2236 if not self._set:
2240 2237 self._set = set(self)
2241 2238 return self._set
2242 2239
2243 2240 def __sub__(self, other):
2244 2241 """Returns a new object with the substraction of the two collections.
2245 2242
2246 2243 This is part of the mandatory API for smartset."""
2247 2244 if isinstance(other, baseset):
2248 2245 s = other.set()
2249 2246 else:
2250 2247 s = set(other)
2251 2248 return baseset(self.set() - s)
2252 2249
2253 2250 def __and__(self, other):
2254 2251 """Returns a new object with the intersection of the two collections.
2255 2252
2256 2253 This is part of the mandatory API for smartset."""
2257 2254 if isinstance(other, baseset):
2258 2255 other = other.set()
2259 2256 return baseset([y for y in self if y in other])
2260 2257
2261 2258 def __add__(self, other):
2262 2259 """Returns a new object with the union of the two collections.
2263 2260
2264 2261 This is part of the mandatory API for smartset."""
2265 2262 s = self.set()
2266 2263 l = [r for r in other if r not in s]
2267 2264 return baseset(list(self) + l)
2268 2265
2269 2266 def isascending(self):
2270 2267 """Returns True if the collection is ascending order, False if not.
2271 2268
2272 2269 This is part of the mandatory API for smartset."""
2273 2270 return False
2274 2271
2275 2272 def isdescending(self):
2276 2273 """Returns True if the collection is descending order, False if not.
2277 2274
2278 2275 This is part of the mandatory API for smartset."""
2279 2276 return False
2280 2277
2281 2278 def filter(self, condition):
2282 2279 """Returns this smartset filtered by condition as a new smartset.
2283 2280
2284 2281 `condition` is a callable which takes a revision number and returns a
2285 2282 boolean.
2286 2283
2287 2284 This is part of the mandatory API for smartset."""
2288 2285 return lazyset(self, condition)
2289 2286
2290 2287 class _orderedsetmixin(object):
2291 2288 """Mixin class with utility methods for smartsets
2292 2289
2293 2290 This should be extended by smartsets which have the isascending(),
2294 2291 isdescending() and reverse() methods"""
2295 2292
2296 2293 def _first(self):
2297 2294 """return the first revision in the set"""
2298 2295 for r in self:
2299 2296 return r
2300 2297 raise ValueError('arg is an empty sequence')
2301 2298
2302 2299 def _last(self):
2303 2300 """return the last revision in the set"""
2304 2301 self.reverse()
2305 2302 m = self._first()
2306 2303 self.reverse()
2307 2304 return m
2308 2305
2309 2306 def min(self):
2310 2307 """return the smallest element in the set"""
2311 2308 if self.isascending():
2312 2309 return self._first()
2313 2310 return self._last()
2314 2311
2315 2312 def max(self):
2316 2313 """return the largest element in the set"""
2317 2314 if self.isascending():
2318 2315 return self._last()
2319 2316 return self._first()
2320 2317
2321 2318 class lazyset(object):
2322 2319 """Duck type for baseset class which iterates lazily over the revisions in
2323 2320 the subset and contains a function which tests for membership in the
2324 2321 revset
2325 2322 """
2326 2323 def __init__(self, subset, condition=lambda x: True):
2327 2324 """
2328 2325 condition: a function that decide whether a revision in the subset
2329 2326 belongs to the revset or not.
2330 2327 """
2331 2328 self._subset = subset
2332 2329 self._condition = condition
2333 2330 self._cache = {}
2334 2331
2335 2332 def ascending(self):
2336 2333 self._subset.sort()
2337 2334
2338 2335 def descending(self):
2339 2336 self._subset.sort(reverse=True)
2340 2337
2341 2338 def min(self):
2342 2339 return min(self)
2343 2340
2344 2341 def max(self):
2345 2342 return max(self)
2346 2343
2347 2344 def __contains__(self, x):
2348 2345 c = self._cache
2349 2346 if x not in c:
2350 2347 c[x] = x in self._subset and self._condition(x)
2351 2348 return c[x]
2352 2349
2353 2350 def __iter__(self):
2354 2351 cond = self._condition
2355 2352 for x in self._subset:
2356 2353 if cond(x):
2357 2354 yield x
2358 2355
2359 2356 def __and__(self, x):
2360 2357 return lazyset(self, lambda r: r in x)
2361 2358
2362 2359 def __sub__(self, x):
2363 2360 return lazyset(self, lambda r: r not in x)
2364 2361
2365 2362 def __add__(self, x):
2366 2363 return _addset(self, x)
2367 2364
2368 2365 def __nonzero__(self):
2369 2366 for r in self:
2370 2367 return True
2371 2368 return False
2372 2369
2373 2370 def __len__(self):
2374 2371 # Basic implementation to be changed in future patches.
2375 2372 l = baseset([r for r in self])
2376 2373 return len(l)
2377 2374
2378 2375 def __getitem__(self, x):
2379 2376 # Basic implementation to be changed in future patches.
2380 2377 l = baseset([r for r in self])
2381 2378 return l[x]
2382 2379
2383 2380 def sort(self, reverse=False):
2384 2381 if not util.safehasattr(self._subset, 'sort'):
2385 2382 self._subset = baseset(self._subset)
2386 2383 self._subset.sort(reverse=reverse)
2387 2384
2388 2385 def reverse(self):
2389 2386 self._subset.reverse()
2390 2387
2391 2388 def set(self):
2392 2389 return set([r for r in self])
2393 2390
2394 2391 def isascending(self):
2395 2392 return False
2396 2393
2397 2394 def isdescending(self):
2398 2395 return False
2399 2396
2400 2397 def filter(self, l):
2401 2398 return lazyset(self, l)
2402 2399
2403 2400 class orderedlazyset(_orderedsetmixin, lazyset):
2404 2401 """Subclass of lazyset which subset can be ordered either ascending or
2405 2402 descendingly
2406 2403 """
2407 2404 def __init__(self, subset, condition, ascending=True):
2408 2405 super(orderedlazyset, self).__init__(subset, condition)
2409 2406 self._ascending = ascending
2410 2407
2411 2408 def filter(self, l):
2412 2409 return orderedlazyset(self, l, ascending=self._ascending)
2413 2410
2414 2411 def ascending(self):
2415 2412 if not self._ascending:
2416 2413 self.reverse()
2417 2414
2418 2415 def descending(self):
2419 2416 if self._ascending:
2420 2417 self.reverse()
2421 2418
2422 2419 def __and__(self, x):
2423 2420 return orderedlazyset(self, lambda r: r in x,
2424 2421 ascending=self._ascending)
2425 2422
2426 2423 def __sub__(self, x):
2427 2424 return orderedlazyset(self, lambda r: r not in x,
2428 2425 ascending=self._ascending)
2429 2426
2430 2427 def __add__(self, x):
2431 2428 kwargs = {}
2432 2429 if self.isascending() and x.isascending():
2433 2430 kwargs['ascending'] = True
2434 2431 if self.isdescending() and x.isdescending():
2435 2432 kwargs['ascending'] = False
2436 2433 return _addset(self, x, **kwargs)
2437 2434
2438 2435 def sort(self, reverse=False):
2439 2436 if reverse:
2440 2437 if self._ascending:
2441 2438 self._subset.sort(reverse=reverse)
2442 2439 else:
2443 2440 if not self._ascending:
2444 2441 self._subset.sort(reverse=reverse)
2445 2442 self._ascending = not reverse
2446 2443
2447 2444 def isascending(self):
2448 2445 return self._ascending
2449 2446
2450 2447 def isdescending(self):
2451 2448 return not self._ascending
2452 2449
2453 2450 def reverse(self):
2454 2451 self._subset.reverse()
2455 2452 self._ascending = not self._ascending
2456 2453
2457 2454 class _addset(_orderedsetmixin):
2458 2455 """Represent the addition of two sets
2459 2456
2460 2457 Wrapper structure for lazily adding two structures without losing much
2461 2458 performance on the __contains__ method
2462 2459
2463 2460 If the ascending attribute is set, that means the two structures are
2464 2461 ordered in either an ascending or descending way. Therefore, we can add
2465 2462 them mantaining the order by iterating over both at the same time
2466 2463
2467 2464 This class does not duck-type baseset and it's only supposed to be used
2468 2465 internally
2469 2466 """
2470 2467 def __init__(self, revs1, revs2, ascending=None):
2471 2468 self._r1 = revs1
2472 2469 self._r2 = revs2
2473 2470 self._iter = None
2474 2471 self._ascending = ascending
2475 2472 self._genlist = None
2476 2473
2477 2474 def __len__(self):
2478 2475 return len(self._list)
2479 2476
2480 2477 @util.propertycache
2481 2478 def _list(self):
2482 2479 if not self._genlist:
2483 2480 self._genlist = baseset(self._iterator())
2484 2481 return self._genlist
2485 2482
2486 2483 def filter(self, condition):
2487 2484 if self._ascending is not None:
2488 2485 return orderedlazyset(self, condition, ascending=self._ascending)
2489 2486 return lazyset(self, condition)
2490 2487
2491 2488 def ascending(self):
2492 2489 if self._ascending is None:
2493 2490 self.sort()
2494 2491 self._ascending = True
2495 2492 else:
2496 2493 if not self._ascending:
2497 2494 self.reverse()
2498 2495
2499 2496 def descending(self):
2500 2497 if self._ascending is None:
2501 2498 self.sort(reverse=True)
2502 2499 self._ascending = False
2503 2500 else:
2504 2501 if self._ascending:
2505 2502 self.reverse()
2506 2503
2507 2504 def __and__(self, other):
2508 2505 filterfunc = other.__contains__
2509 2506 if self._ascending is not None:
2510 2507 return orderedlazyset(self, filterfunc, ascending=self._ascending)
2511 2508 return lazyset(self, filterfunc)
2512 2509
2513 2510 def __sub__(self, other):
2514 2511 filterfunc = lambda r: r not in other
2515 2512 if self._ascending is not None:
2516 2513 return orderedlazyset(self, filterfunc, ascending=self._ascending)
2517 2514 return lazyset(self, filterfunc)
2518 2515
2519 2516 def __add__(self, other):
2520 2517 """When both collections are ascending or descending, preserve the order
2521 2518 """
2522 2519 kwargs = {}
2523 2520 if self._ascending is not None:
2524 2521 if self.isascending() and other.isascending():
2525 2522 kwargs['ascending'] = True
2526 2523 if self.isdescending() and other.isdescending():
2527 2524 kwargs['ascending'] = False
2528 2525 return _addset(self, other, **kwargs)
2529 2526
2530 2527 def _iterator(self):
2531 2528 """Iterate over both collections without repeating elements
2532 2529
2533 2530 If the ascending attribute is not set, iterate over the first one and
2534 2531 then over the second one checking for membership on the first one so we
2535 2532 dont yield any duplicates.
2536 2533
2537 2534 If the ascending attribute is set, iterate over both collections at the
2538 2535 same time, yielding only one value at a time in the given order.
2539 2536 """
2540 2537 if not self._iter:
2541 2538 def gen():
2542 2539 if self._ascending is None:
2543 2540 for r in self._r1:
2544 2541 yield r
2545 2542 s = self._r1.set()
2546 2543 for r in self._r2:
2547 2544 if r not in s:
2548 2545 yield r
2549 2546 else:
2550 2547 iter1 = iter(self._r1)
2551 2548 iter2 = iter(self._r2)
2552 2549
2553 2550 val1 = None
2554 2551 val2 = None
2555 2552
2556 2553 choice = max
2557 2554 if self._ascending:
2558 2555 choice = min
2559 2556 try:
2560 2557 # Consume both iterators in an ordered way until one is
2561 2558 # empty
2562 2559 while True:
2563 2560 if val1 is None:
2564 2561 val1 = iter1.next()
2565 2562 if val2 is None:
2566 2563 val2 = iter2.next()
2567 2564 next = choice(val1, val2)
2568 2565 yield next
2569 2566 if val1 == next:
2570 2567 val1 = None
2571 2568 if val2 == next:
2572 2569 val2 = None
2573 2570 except StopIteration:
2574 2571 # Flush any remaining values and consume the other one
2575 2572 it = iter2
2576 2573 if val1 is not None:
2577 2574 yield val1
2578 2575 it = iter1
2579 2576 elif val2 is not None:
2580 2577 # might have been equality and both are empty
2581 2578 yield val2
2582 2579 for val in it:
2583 2580 yield val
2584 2581
2585 2582 self._iter = _generatorset(gen())
2586 2583
2587 2584 return self._iter
2588 2585
2589 2586 def __iter__(self):
2590 2587 if self._genlist:
2591 2588 return iter(self._genlist)
2592 2589 return iter(self._iterator())
2593 2590
2594 2591 def __contains__(self, x):
2595 2592 return x in self._r1 or x in self._r2
2596 2593
2597 2594 def set(self):
2598 2595 return self
2599 2596
2600 2597 def sort(self, reverse=False):
2601 2598 """Sort the added set
2602 2599
2603 2600 For this we use the cached list with all the generated values and if we
2604 2601 know they are ascending or descending we can sort them in a smart way.
2605 2602 """
2606 2603 if self._ascending is None:
2607 2604 self._list.sort(reverse=reverse)
2608 2605 self._ascending = not reverse
2609 2606 else:
2610 2607 if bool(self._ascending) == bool(reverse):
2611 2608 self.reverse()
2612 2609
2613 2610 def isascending(self):
2614 2611 return self._ascending is not None and self._ascending
2615 2612
2616 2613 def isdescending(self):
2617 2614 return self._ascending is not None and not self._ascending
2618 2615
2619 2616 def reverse(self):
2620 2617 self._list.reverse()
2621 2618 if self._ascending is not None:
2622 2619 self._ascending = not self._ascending
2623 2620
2624 2621 class _generatorset(object):
2625 2622 """Wrap a generator for lazy iteration
2626 2623
2627 2624 Wrapper structure for generators that provides lazy membership and can
2628 2625 be iterated more than once.
2629 2626 When asked for membership it generates values until either it finds the
2630 2627 requested one or has gone through all the elements in the generator
2631 2628
2632 2629 This class does not duck-type baseset and it's only supposed to be used
2633 2630 internally
2634 2631 """
2635 2632 def __init__(self, gen):
2636 2633 """
2637 2634 gen: a generator producing the values for the generatorset.
2638 2635 """
2639 2636 self._gen = gen
2640 2637 self._cache = {}
2641 2638 self._genlist = baseset([])
2642 2639 self._finished = False
2643 2640
2644 2641 def __contains__(self, x):
2645 2642 if x in self._cache:
2646 2643 return self._cache[x]
2647 2644
2648 2645 # Use new values only, as existing values would be cached.
2649 2646 for l in self._consumegen():
2650 2647 if l == x:
2651 2648 return True
2652 2649
2653 2650 self._cache[x] = False
2654 2651 return False
2655 2652
2656 2653 def __iter__(self):
2657 2654 if self._finished:
2658 2655 for x in self._genlist:
2659 2656 yield x
2660 2657 return
2661 2658
2662 2659 i = 0
2663 2660 genlist = self._genlist
2664 2661 consume = self._consumegen()
2665 2662 while True:
2666 2663 if i < len(genlist):
2667 2664 yield genlist[i]
2668 2665 else:
2669 2666 yield consume.next()
2670 2667 i += 1
2671 2668
2672 2669 def _consumegen(self):
2673 2670 for item in self._gen:
2674 2671 self._cache[item] = True
2675 2672 self._genlist.append(item)
2676 2673 yield item
2677 2674 self._finished = True
2678 2675
2679 2676 def set(self):
2680 2677 return self
2681 2678
2682 2679 def sort(self, reverse=False):
2683 2680 if not self._finished:
2684 2681 for i in self:
2685 2682 continue
2686 2683 self._genlist.sort(reverse=reverse)
2687 2684
2688 2685 class _ascgeneratorset(_generatorset):
2689 2686 """Wrap a generator of ascending elements for lazy iteration
2690 2687
2691 2688 Same structure as _generatorset but stops iterating after it goes past
2692 2689 the value when asked for membership and the element is not contained
2693 2690
2694 2691 This class does not duck-type baseset and it's only supposed to be used
2695 2692 internally
2696 2693 """
2697 2694 def __contains__(self, x):
2698 2695 if x in self._cache:
2699 2696 return self._cache[x]
2700 2697
2701 2698 # Use new values only, as existing values would be cached.
2702 2699 for l in self._consumegen():
2703 2700 if l == x:
2704 2701 return True
2705 2702 if l > x:
2706 2703 break
2707 2704
2708 2705 self._cache[x] = False
2709 2706 return False
2710 2707
2711 2708 class _descgeneratorset(_generatorset):
2712 2709 """Wrap a generator of descending elements for lazy iteration
2713 2710
2714 2711 Same structure as _generatorset but stops iterating after it goes past
2715 2712 the value when asked for membership and the element is not contained
2716 2713
2717 2714 This class does not duck-type baseset and it's only supposed to be used
2718 2715 internally
2719 2716 """
2720 2717 def __contains__(self, x):
2721 2718 if x in self._cache:
2722 2719 return self._cache[x]
2723 2720
2724 2721 # Use new values only, as existing values would be cached.
2725 2722 for l in self._consumegen():
2726 2723 if l == x:
2727 2724 return True
2728 2725 if l < x:
2729 2726 break
2730 2727
2731 2728 self._cache[x] = False
2732 2729 return False
2733 2730
2734 2731 class spanset(_orderedsetmixin):
2735 2732 """Duck type for baseset class which represents a range of revisions and
2736 2733 can work lazily and without having all the range in memory
2737 2734
2738 2735 Note that spanset(x, y) behave almost like xrange(x, y) except for two
2739 2736 notable points:
2740 2737 - when x < y it will be automatically descending,
2741 2738 - revision filtered with this repoview will be skipped.
2742 2739
2743 2740 """
2744 2741 def __init__(self, repo, start=0, end=None):
2745 2742 """
2746 2743 start: first revision included the set
2747 2744 (default to 0)
2748 2745 end: first revision excluded (last+1)
2749 2746 (default to len(repo)
2750 2747
2751 2748 Spanset will be descending if `end` < `start`.
2752 2749 """
2753 2750 self._start = start
2754 2751 if end is not None:
2755 2752 self._end = end
2756 2753 else:
2757 2754 self._end = len(repo)
2758 2755 self._hiddenrevs = repo.changelog.filteredrevs
2759 2756
2760 2757 def ascending(self):
2761 2758 if self._start > self._end:
2762 2759 self.reverse()
2763 2760
2764 2761 def descending(self):
2765 2762 if self._start < self._end:
2766 2763 self.reverse()
2767 2764
2768 2765 def _contained(self, rev):
2769 2766 return (rev <= self._start and rev > self._end) or (rev >= self._start
2770 2767 and rev < self._end)
2771 2768
2772 2769 def __iter__(self):
2773 2770 if self._start <= self._end:
2774 2771 iterrange = xrange(self._start, self._end)
2775 2772 else:
2776 2773 iterrange = xrange(self._start, self._end, -1)
2777 2774
2778 2775 if self._hiddenrevs:
2779 2776 s = self._hiddenrevs
2780 2777 for r in iterrange:
2781 2778 if r not in s:
2782 2779 yield r
2783 2780 else:
2784 2781 for r in iterrange:
2785 2782 yield r
2786 2783
2787 2784 def __contains__(self, x):
2788 2785 return self._contained(x) and not (self._hiddenrevs and rev in
2789 2786 self._hiddenrevs)
2790 2787
2791 2788 def __nonzero__(self):
2792 2789 for r in self:
2793 2790 return True
2794 2791 return False
2795 2792
2796 2793 def __and__(self, x):
2797 2794 if isinstance(x, baseset):
2798 2795 x = x.set()
2799 2796 if self._start <= self._end:
2800 2797 return orderedlazyset(self, lambda r: r in x)
2801 2798 else:
2802 2799 return orderedlazyset(self, lambda r: r in x, ascending=False)
2803 2800
2804 2801 def __sub__(self, x):
2805 2802 if isinstance(x, baseset):
2806 2803 x = x.set()
2807 2804 if self._start <= self._end:
2808 2805 return orderedlazyset(self, lambda r: r not in x)
2809 2806 else:
2810 2807 return orderedlazyset(self, lambda r: r not in x, ascending=False)
2811 2808
2812 2809 def __add__(self, x):
2813 2810 kwargs = {}
2814 2811 if self.isascending() and x.isascending():
2815 2812 kwargs['ascending'] = True
2816 2813 if self.isdescending() and x.isdescending():
2817 2814 kwargs['ascending'] = False
2818 2815 return _addset(self, x, **kwargs)
2819 2816
2820 2817 def __len__(self):
2821 2818 if not self._hiddenrevs:
2822 2819 return abs(self._end - self._start)
2823 2820 else:
2824 2821 count = 0
2825 2822 for rev in self._hiddenrevs:
2826 2823 if self._contained(rev):
2827 2824 count += 1
2828 2825 return abs(self._end - self._start) - count
2829 2826
2830 2827 def __getitem__(self, x):
2831 2828 # Basic implementation to be changed in future patches.
2832 2829 l = baseset([r for r in self])
2833 2830 return l[x]
2834 2831
2835 2832 def sort(self, reverse=False):
2836 2833 if bool(reverse) != (self._start > self._end):
2837 2834 self.reverse()
2838 2835
2839 2836 def reverse(self):
2840 2837 # Just switch the _start and _end parameters
2841 2838 if self._start <= self._end:
2842 2839 self._start, self._end = self._end - 1, self._start - 1
2843 2840 else:
2844 2841 self._start, self._end = self._end + 1, self._start + 1
2845 2842
2846 2843 def set(self):
2847 2844 return self
2848 2845
2849 2846 def isascending(self):
2850 2847 return self._start < self._end
2851 2848
2852 2849 def isdescending(self):
2853 2850 return self._start > self._end
2854 2851
2855 2852 def filter(self, l):
2856 2853 if self._start <= self._end:
2857 2854 return orderedlazyset(self, l)
2858 2855 else:
2859 2856 return orderedlazyset(self, l, ascending=False)
2860 2857
2861 2858 # tell hggettext to extract docstrings from these functions:
2862 2859 i18nfunctions = symbols.values()
General Comments 0
You need to be logged in to leave comments. Login now