##// END OF EJS Templates
revset, i18n: add translator comment to "only"
Wagner Bruna -
r21173:d4daebb2 stable
parent child Browse files
Show More
@@ -1,2859 +1,2860 b''
1 1 # revset.py - revision set queries for mercurial
2 2 #
3 3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 import re
9 9 import parser, util, error, discovery, hbisect, phases
10 10 import node
11 11 import heapq
12 12 import match as matchmod
13 13 import ancestor as ancestormod
14 14 from i18n import _
15 15 import encoding
16 16 import obsolete as obsmod
17 17 import pathutil
18 18 import repoview
19 19
20 20 def _revancestors(repo, revs, followfirst):
21 21 """Like revlog.ancestors(), but supports followfirst."""
22 22 cut = followfirst and 1 or None
23 23 cl = repo.changelog
24 24
25 25 def iterate():
26 26 revqueue, revsnode = None, None
27 27 h = []
28 28
29 29 revs.descending()
30 30 revqueue = util.deque(revs)
31 31 if revqueue:
32 32 revsnode = revqueue.popleft()
33 33 heapq.heappush(h, -revsnode)
34 34
35 35 seen = set([node.nullrev])
36 36 while h:
37 37 current = -heapq.heappop(h)
38 38 if current not in seen:
39 39 if revsnode and current == revsnode:
40 40 if revqueue:
41 41 revsnode = revqueue.popleft()
42 42 heapq.heappush(h, -revsnode)
43 43 seen.add(current)
44 44 yield current
45 45 for parent in cl.parentrevs(current)[:cut]:
46 46 if parent != node.nullrev:
47 47 heapq.heappush(h, -parent)
48 48
49 49 return _descgeneratorset(iterate())
50 50
51 51 def _revdescendants(repo, revs, followfirst):
52 52 """Like revlog.descendants() but supports followfirst."""
53 53 cut = followfirst and 1 or None
54 54
55 55 def iterate():
56 56 cl = repo.changelog
57 57 first = min(revs)
58 58 nullrev = node.nullrev
59 59 if first == nullrev:
60 60 # Are there nodes with a null first parent and a non-null
61 61 # second one? Maybe. Do we care? Probably not.
62 62 for i in cl:
63 63 yield i
64 64 else:
65 65 seen = set(revs)
66 66 for i in cl.revs(first + 1):
67 67 for x in cl.parentrevs(i)[:cut]:
68 68 if x != nullrev and x in seen:
69 69 seen.add(i)
70 70 yield i
71 71 break
72 72
73 73 return _ascgeneratorset(iterate())
74 74
75 75 def _revsbetween(repo, roots, heads):
76 76 """Return all paths between roots and heads, inclusive of both endpoint
77 77 sets."""
78 78 if not roots:
79 79 return baseset([])
80 80 parentrevs = repo.changelog.parentrevs
81 81 visit = baseset(heads)
82 82 reachable = set()
83 83 seen = {}
84 84 minroot = min(roots)
85 85 roots = set(roots)
86 86 # open-code the post-order traversal due to the tiny size of
87 87 # sys.getrecursionlimit()
88 88 while visit:
89 89 rev = visit.pop()
90 90 if rev in roots:
91 91 reachable.add(rev)
92 92 parents = parentrevs(rev)
93 93 seen[rev] = parents
94 94 for parent in parents:
95 95 if parent >= minroot and parent not in seen:
96 96 visit.append(parent)
97 97 if not reachable:
98 98 return baseset([])
99 99 for rev in sorted(seen):
100 100 for parent in seen[rev]:
101 101 if parent in reachable:
102 102 reachable.add(rev)
103 103 return baseset(sorted(reachable))
104 104
105 105 elements = {
106 106 "(": (20, ("group", 1, ")"), ("func", 1, ")")),
107 107 "~": (18, None, ("ancestor", 18)),
108 108 "^": (18, None, ("parent", 18), ("parentpost", 18)),
109 109 "-": (5, ("negate", 19), ("minus", 5)),
110 110 "::": (17, ("dagrangepre", 17), ("dagrange", 17),
111 111 ("dagrangepost", 17)),
112 112 "..": (17, ("dagrangepre", 17), ("dagrange", 17),
113 113 ("dagrangepost", 17)),
114 114 ":": (15, ("rangepre", 15), ("range", 15), ("rangepost", 15)),
115 115 "not": (10, ("not", 10)),
116 116 "!": (10, ("not", 10)),
117 117 "and": (5, None, ("and", 5)),
118 118 "&": (5, None, ("and", 5)),
119 119 "or": (4, None, ("or", 4)),
120 120 "|": (4, None, ("or", 4)),
121 121 "+": (4, None, ("or", 4)),
122 122 ",": (2, None, ("list", 2)),
123 123 ")": (0, None, None),
124 124 "symbol": (0, ("symbol",), None),
125 125 "string": (0, ("string",), None),
126 126 "end": (0, None, None),
127 127 }
128 128
129 129 keywords = set(['and', 'or', 'not'])
130 130
131 131 def tokenize(program, lookup=None):
132 132 '''
133 133 Parse a revset statement into a stream of tokens
134 134
135 135 Check that @ is a valid unquoted token character (issue3686):
136 136 >>> list(tokenize("@::"))
137 137 [('symbol', '@', 0), ('::', None, 1), ('end', None, 3)]
138 138
139 139 '''
140 140
141 141 pos, l = 0, len(program)
142 142 while pos < l:
143 143 c = program[pos]
144 144 if c.isspace(): # skip inter-token whitespace
145 145 pass
146 146 elif c == ':' and program[pos:pos + 2] == '::': # look ahead carefully
147 147 yield ('::', None, pos)
148 148 pos += 1 # skip ahead
149 149 elif c == '.' and program[pos:pos + 2] == '..': # look ahead carefully
150 150 yield ('..', None, pos)
151 151 pos += 1 # skip ahead
152 152 elif c in "():,-|&+!~^": # handle simple operators
153 153 yield (c, None, pos)
154 154 elif (c in '"\'' or c == 'r' and
155 155 program[pos:pos + 2] in ("r'", 'r"')): # handle quoted strings
156 156 if c == 'r':
157 157 pos += 1
158 158 c = program[pos]
159 159 decode = lambda x: x
160 160 else:
161 161 decode = lambda x: x.decode('string-escape')
162 162 pos += 1
163 163 s = pos
164 164 while pos < l: # find closing quote
165 165 d = program[pos]
166 166 if d == '\\': # skip over escaped characters
167 167 pos += 2
168 168 continue
169 169 if d == c:
170 170 yield ('string', decode(program[s:pos]), s)
171 171 break
172 172 pos += 1
173 173 else:
174 174 raise error.ParseError(_("unterminated string"), s)
175 175 # gather up a symbol/keyword
176 176 elif c.isalnum() or c in '._@' or ord(c) > 127:
177 177 s = pos
178 178 pos += 1
179 179 while pos < l: # find end of symbol
180 180 d = program[pos]
181 181 if not (d.isalnum() or d in "-._/@" or ord(d) > 127):
182 182 break
183 183 if d == '.' and program[pos - 1] == '.': # special case for ..
184 184 pos -= 1
185 185 break
186 186 pos += 1
187 187 sym = program[s:pos]
188 188 if sym in keywords: # operator keywords
189 189 yield (sym, None, s)
190 190 elif '-' in sym:
191 191 # some jerk gave us foo-bar-baz, try to check if it's a symbol
192 192 if lookup and lookup(sym):
193 193 # looks like a real symbol
194 194 yield ('symbol', sym, s)
195 195 else:
196 196 # looks like an expression
197 197 parts = sym.split('-')
198 198 for p in parts[:-1]:
199 199 if p: # possible consecutive -
200 200 yield ('symbol', p, s)
201 201 s += len(p)
202 202 yield ('-', None, pos)
203 203 s += 1
204 204 if parts[-1]: # possible trailing -
205 205 yield ('symbol', parts[-1], s)
206 206 else:
207 207 yield ('symbol', sym, s)
208 208 pos -= 1
209 209 else:
210 210 raise error.ParseError(_("syntax error"), pos)
211 211 pos += 1
212 212 yield ('end', None, pos)
213 213
214 214 # helpers
215 215
216 216 def getstring(x, err):
217 217 if x and (x[0] == 'string' or x[0] == 'symbol'):
218 218 return x[1]
219 219 raise error.ParseError(err)
220 220
221 221 def getlist(x):
222 222 if not x:
223 223 return []
224 224 if x[0] == 'list':
225 225 return getlist(x[1]) + [x[2]]
226 226 return [x]
227 227
228 228 def getargs(x, min, max, err):
229 229 l = getlist(x)
230 230 if len(l) < min or (max >= 0 and len(l) > max):
231 231 raise error.ParseError(err)
232 232 return l
233 233
234 234 def getset(repo, subset, x):
235 235 if not x:
236 236 raise error.ParseError(_("missing argument"))
237 237 s = methods[x[0]](repo, subset, *x[1:])
238 238 if util.safehasattr(s, 'set'):
239 239 return s
240 240 return baseset(s)
241 241
242 242 def _getrevsource(repo, r):
243 243 extra = repo[r].extra()
244 244 for label in ('source', 'transplant_source', 'rebase_source'):
245 245 if label in extra:
246 246 try:
247 247 return repo[extra[label]].rev()
248 248 except error.RepoLookupError:
249 249 pass
250 250 return None
251 251
252 252 # operator methods
253 253
254 254 def stringset(repo, subset, x):
255 255 x = repo[x].rev()
256 256 if x == -1 and len(subset) == len(repo):
257 257 return baseset([-1])
258 258 if len(subset) == len(repo) or x in subset:
259 259 return baseset([x])
260 260 return baseset([])
261 261
262 262 def symbolset(repo, subset, x):
263 263 if x in symbols:
264 264 raise error.ParseError(_("can't use %s here") % x)
265 265 return stringset(repo, subset, x)
266 266
267 267 def rangeset(repo, subset, x, y):
268 268 cl = baseset(repo.changelog)
269 269 m = getset(repo, cl, x)
270 270 n = getset(repo, cl, y)
271 271
272 272 if not m or not n:
273 273 return baseset([])
274 274 m, n = m[0], n[-1]
275 275
276 276 if m < n:
277 277 r = spanset(repo, m, n + 1)
278 278 else:
279 279 r = spanset(repo, m, n - 1)
280 280 return r & subset
281 281
282 282 def dagrange(repo, subset, x, y):
283 283 r = spanset(repo)
284 284 xs = _revsbetween(repo, getset(repo, r, x), getset(repo, r, y))
285 285 s = subset.set()
286 286 return xs.filter(lambda r: r in s)
287 287
288 288 def andset(repo, subset, x, y):
289 289 return getset(repo, getset(repo, subset, x), y)
290 290
291 291 def orset(repo, subset, x, y):
292 292 xl = getset(repo, subset, x)
293 293 yl = getset(repo, subset - xl, y)
294 294 return xl + yl
295 295
296 296 def notset(repo, subset, x):
297 297 return subset - getset(repo, subset, x)
298 298
299 299 def listset(repo, subset, a, b):
300 300 raise error.ParseError(_("can't use a list in this context"))
301 301
302 302 def func(repo, subset, a, b):
303 303 if a[0] == 'symbol' and a[1] in symbols:
304 304 return symbols[a[1]](repo, subset, b)
305 305 raise error.ParseError(_("not a function: %s") % a[1])
306 306
307 307 # functions
308 308
309 309 def adds(repo, subset, x):
310 310 """``adds(pattern)``
311 311 Changesets that add a file matching pattern.
312 312
313 313 The pattern without explicit kind like ``glob:`` is expected to be
314 314 relative to the current directory and match against a file or a
315 315 directory.
316 316 """
317 317 # i18n: "adds" is a keyword
318 318 pat = getstring(x, _("adds requires a pattern"))
319 319 return checkstatus(repo, subset, pat, 1)
320 320
321 321 def ancestor(repo, subset, x):
322 322 """``ancestor(*changeset)``
323 323 A greatest common ancestor of the changesets.
324 324
325 325 Accepts 0 or more changesets.
326 326 Will return empty list when passed no args.
327 327 Greatest common ancestor of a single changeset is that changeset.
328 328 """
329 329 # i18n: "ancestor" is a keyword
330 330 l = getlist(x)
331 331 rl = spanset(repo)
332 332 anc = None
333 333
334 334 # (getset(repo, rl, i) for i in l) generates a list of lists
335 335 for revs in (getset(repo, rl, i) for i in l):
336 336 for r in revs:
337 337 if anc is None:
338 338 anc = repo[r]
339 339 else:
340 340 anc = anc.ancestor(repo[r])
341 341
342 342 if anc is not None and anc.rev() in subset:
343 343 return baseset([anc.rev()])
344 344 return baseset([])
345 345
346 346 def _ancestors(repo, subset, x, followfirst=False):
347 347 args = getset(repo, spanset(repo), x)
348 348 if not args:
349 349 return baseset([])
350 350 s = _revancestors(repo, args, followfirst)
351 351 return subset.filter(lambda r: r in s)
352 352
353 353 def ancestors(repo, subset, x):
354 354 """``ancestors(set)``
355 355 Changesets that are ancestors of a changeset in set.
356 356 """
357 357 return _ancestors(repo, subset, x)
358 358
359 359 def _firstancestors(repo, subset, x):
360 360 # ``_firstancestors(set)``
361 361 # Like ``ancestors(set)`` but follows only the first parents.
362 362 return _ancestors(repo, subset, x, followfirst=True)
363 363
364 364 def ancestorspec(repo, subset, x, n):
365 365 """``set~n``
366 366 Changesets that are the Nth ancestor (first parents only) of a changeset
367 367 in set.
368 368 """
369 369 try:
370 370 n = int(n[1])
371 371 except (TypeError, ValueError):
372 372 raise error.ParseError(_("~ expects a number"))
373 373 ps = set()
374 374 cl = repo.changelog
375 375 for r in getset(repo, baseset(cl), x):
376 376 for i in range(n):
377 377 r = cl.parentrevs(r)[0]
378 378 ps.add(r)
379 379 return subset.filter(lambda r: r in ps)
380 380
381 381 def author(repo, subset, x):
382 382 """``author(string)``
383 383 Alias for ``user(string)``.
384 384 """
385 385 # i18n: "author" is a keyword
386 386 n = encoding.lower(getstring(x, _("author requires a string")))
387 387 kind, pattern, matcher = _substringmatcher(n)
388 388 return subset.filter(lambda x: matcher(encoding.lower(repo[x].user())))
389 389
390 390 def only(repo, subset, x):
391 391 """``only(set, [set])``
392 392 Changesets that are ancestors of the first set that are not ancestors
393 393 of any other head in the repo. If a second set is specified, the result
394 394 is ancestors of the first set that are not ancestors of the second set
395 395 (i.e. ::<set1> - ::<set2>).
396 396 """
397 397 cl = repo.changelog
398 # i18n: "only" is a keyword
398 399 args = getargs(x, 1, 2, _('only takes one or two arguments'))
399 400 include = getset(repo, spanset(repo), args[0]).set()
400 401 if len(args) == 1:
401 402 descendants = set(_revdescendants(repo, include, False))
402 403 exclude = [rev for rev in cl.headrevs()
403 404 if not rev in descendants and not rev in include]
404 405 else:
405 406 exclude = getset(repo, spanset(repo), args[1])
406 407
407 408 results = set(ancestormod.missingancestors(include, exclude, cl.parentrevs))
408 409 return lazyset(subset, lambda x: x in results)
409 410
410 411 def bisect(repo, subset, x):
411 412 """``bisect(string)``
412 413 Changesets marked in the specified bisect status:
413 414
414 415 - ``good``, ``bad``, ``skip``: csets explicitly marked as good/bad/skip
415 416 - ``goods``, ``bads`` : csets topologically good/bad
416 417 - ``range`` : csets taking part in the bisection
417 418 - ``pruned`` : csets that are goods, bads or skipped
418 419 - ``untested`` : csets whose fate is yet unknown
419 420 - ``ignored`` : csets ignored due to DAG topology
420 421 - ``current`` : the cset currently being bisected
421 422 """
422 423 # i18n: "bisect" is a keyword
423 424 status = getstring(x, _("bisect requires a string")).lower()
424 425 state = set(hbisect.get(repo, status))
425 426 return subset.filter(lambda r: r in state)
426 427
427 428 # Backward-compatibility
428 429 # - no help entry so that we do not advertise it any more
429 430 def bisected(repo, subset, x):
430 431 return bisect(repo, subset, x)
431 432
432 433 def bookmark(repo, subset, x):
433 434 """``bookmark([name])``
434 435 The named bookmark or all bookmarks.
435 436
436 437 If `name` starts with `re:`, the remainder of the name is treated as
437 438 a regular expression. To match a bookmark that actually starts with `re:`,
438 439 use the prefix `literal:`.
439 440 """
440 441 # i18n: "bookmark" is a keyword
441 442 args = getargs(x, 0, 1, _('bookmark takes one or no arguments'))
442 443 if args:
443 444 bm = getstring(args[0],
444 445 # i18n: "bookmark" is a keyword
445 446 _('the argument to bookmark must be a string'))
446 447 kind, pattern, matcher = _stringmatcher(bm)
447 448 if kind == 'literal':
448 449 bmrev = repo._bookmarks.get(bm, None)
449 450 if not bmrev:
450 451 raise util.Abort(_("bookmark '%s' does not exist") % bm)
451 452 bmrev = repo[bmrev].rev()
452 453 return subset.filter(lambda r: r == bmrev)
453 454 else:
454 455 matchrevs = set()
455 456 for name, bmrev in repo._bookmarks.iteritems():
456 457 if matcher(name):
457 458 matchrevs.add(bmrev)
458 459 if not matchrevs:
459 460 raise util.Abort(_("no bookmarks exist that match '%s'")
460 461 % pattern)
461 462 bmrevs = set()
462 463 for bmrev in matchrevs:
463 464 bmrevs.add(repo[bmrev].rev())
464 465 return subset & bmrevs
465 466
466 467 bms = set([repo[r].rev()
467 468 for r in repo._bookmarks.values()])
468 469 return subset.filter(lambda r: r in bms)
469 470
470 471 def branch(repo, subset, x):
471 472 """``branch(string or set)``
472 473 All changesets belonging to the given branch or the branches of the given
473 474 changesets.
474 475
475 476 If `string` starts with `re:`, the remainder of the name is treated as
476 477 a regular expression. To match a branch that actually starts with `re:`,
477 478 use the prefix `literal:`.
478 479 """
479 480 try:
480 481 b = getstring(x, '')
481 482 except error.ParseError:
482 483 # not a string, but another revspec, e.g. tip()
483 484 pass
484 485 else:
485 486 kind, pattern, matcher = _stringmatcher(b)
486 487 if kind == 'literal':
487 488 # note: falls through to the revspec case if no branch with
488 489 # this name exists
489 490 if pattern in repo.branchmap():
490 491 return subset.filter(lambda r: matcher(repo[r].branch()))
491 492 else:
492 493 return subset.filter(lambda r: matcher(repo[r].branch()))
493 494
494 495 s = getset(repo, spanset(repo), x)
495 496 b = set()
496 497 for r in s:
497 498 b.add(repo[r].branch())
498 499 s = s.set()
499 500 return subset.filter(lambda r: r in s or repo[r].branch() in b)
500 501
501 502 def bumped(repo, subset, x):
502 503 """``bumped()``
503 504 Mutable changesets marked as successors of public changesets.
504 505
505 506 Only non-public and non-obsolete changesets can be `bumped`.
506 507 """
507 508 # i18n: "bumped" is a keyword
508 509 getargs(x, 0, 0, _("bumped takes no arguments"))
509 510 bumped = obsmod.getrevs(repo, 'bumped')
510 511 return subset & bumped
511 512
512 513 def bundle(repo, subset, x):
513 514 """``bundle()``
514 515 Changesets in the bundle.
515 516
516 517 Bundle must be specified by the -R option."""
517 518
518 519 try:
519 520 bundlerevs = repo.changelog.bundlerevs
520 521 except AttributeError:
521 522 raise util.Abort(_("no bundle provided - specify with -R"))
522 523 return subset & bundlerevs
523 524
524 525 def checkstatus(repo, subset, pat, field):
525 526 hasset = matchmod.patkind(pat) == 'set'
526 527
527 528 def matches(x):
528 529 m = None
529 530 fname = None
530 531 c = repo[x]
531 532 if not m or hasset:
532 533 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
533 534 if not m.anypats() and len(m.files()) == 1:
534 535 fname = m.files()[0]
535 536 if fname is not None:
536 537 if fname not in c.files():
537 538 return False
538 539 else:
539 540 for f in c.files():
540 541 if m(f):
541 542 break
542 543 else:
543 544 return False
544 545 files = repo.status(c.p1().node(), c.node())[field]
545 546 if fname is not None:
546 547 if fname in files:
547 548 return True
548 549 else:
549 550 for f in files:
550 551 if m(f):
551 552 return True
552 553
553 554 return subset.filter(matches)
554 555
555 556 def _children(repo, narrow, parentset):
556 557 cs = set()
557 558 if not parentset:
558 559 return baseset(cs)
559 560 pr = repo.changelog.parentrevs
560 561 minrev = min(parentset)
561 562 for r in narrow:
562 563 if r <= minrev:
563 564 continue
564 565 for p in pr(r):
565 566 if p in parentset:
566 567 cs.add(r)
567 568 return baseset(cs)
568 569
569 570 def children(repo, subset, x):
570 571 """``children(set)``
571 572 Child changesets of changesets in set.
572 573 """
573 574 s = getset(repo, baseset(repo), x).set()
574 575 cs = _children(repo, subset, s)
575 576 return subset & cs
576 577
577 578 def closed(repo, subset, x):
578 579 """``closed()``
579 580 Changeset is closed.
580 581 """
581 582 # i18n: "closed" is a keyword
582 583 getargs(x, 0, 0, _("closed takes no arguments"))
583 584 return subset.filter(lambda r: repo[r].closesbranch())
584 585
585 586 def contains(repo, subset, x):
586 587 """``contains(pattern)``
587 588 Revision contains a file matching pattern. See :hg:`help patterns`
588 589 for information about file patterns.
589 590
590 591 The pattern without explicit kind like ``glob:`` is expected to be
591 592 relative to the current directory and match against a file exactly
592 593 for efficiency.
593 594 """
594 595 # i18n: "contains" is a keyword
595 596 pat = getstring(x, _("contains requires a pattern"))
596 597
597 598 def matches(x):
598 599 if not matchmod.patkind(pat):
599 600 pats = pathutil.canonpath(repo.root, repo.getcwd(), pat)
600 601 if pats in repo[x]:
601 602 return True
602 603 else:
603 604 c = repo[x]
604 605 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
605 606 for f in c.manifest():
606 607 if m(f):
607 608 return True
608 609 return False
609 610
610 611 return subset.filter(matches)
611 612
612 613 def converted(repo, subset, x):
613 614 """``converted([id])``
614 615 Changesets converted from the given identifier in the old repository if
615 616 present, or all converted changesets if no identifier is specified.
616 617 """
617 618
618 619 # There is exactly no chance of resolving the revision, so do a simple
619 620 # string compare and hope for the best
620 621
621 622 rev = None
622 623 # i18n: "converted" is a keyword
623 624 l = getargs(x, 0, 1, _('converted takes one or no arguments'))
624 625 if l:
625 626 # i18n: "converted" is a keyword
626 627 rev = getstring(l[0], _('converted requires a revision'))
627 628
628 629 def _matchvalue(r):
629 630 source = repo[r].extra().get('convert_revision', None)
630 631 return source is not None and (rev is None or source.startswith(rev))
631 632
632 633 return subset.filter(lambda r: _matchvalue(r))
633 634
634 635 def date(repo, subset, x):
635 636 """``date(interval)``
636 637 Changesets within the interval, see :hg:`help dates`.
637 638 """
638 639 # i18n: "date" is a keyword
639 640 ds = getstring(x, _("date requires a string"))
640 641 dm = util.matchdate(ds)
641 642 return subset.filter(lambda x: dm(repo[x].date()[0]))
642 643
643 644 def desc(repo, subset, x):
644 645 """``desc(string)``
645 646 Search commit message for string. The match is case-insensitive.
646 647 """
647 648 # i18n: "desc" is a keyword
648 649 ds = encoding.lower(getstring(x, _("desc requires a string")))
649 650
650 651 def matches(x):
651 652 c = repo[x]
652 653 return ds in encoding.lower(c.description())
653 654
654 655 return subset.filter(matches)
655 656
656 657 def _descendants(repo, subset, x, followfirst=False):
657 658 args = getset(repo, spanset(repo), x)
658 659 if not args:
659 660 return baseset([])
660 661 s = _revdescendants(repo, args, followfirst)
661 662
662 663 # Both sets need to be ascending in order to lazily return the union
663 664 # in the correct order.
664 665 args.ascending()
665 666
666 667 subsetset = subset.set()
667 668 result = (orderedlazyset(s, subsetset.__contains__, ascending=True) +
668 669 orderedlazyset(args, subsetset.__contains__, ascending=True))
669 670
670 671 # Wrap result in a lazyset since it's an _addset, which doesn't implement
671 672 # all the necessary functions to be consumed by callers.
672 673 return orderedlazyset(result, lambda r: True, ascending=True)
673 674
674 675 def descendants(repo, subset, x):
675 676 """``descendants(set)``
676 677 Changesets which are descendants of changesets in set.
677 678 """
678 679 return _descendants(repo, subset, x)
679 680
680 681 def _firstdescendants(repo, subset, x):
681 682 # ``_firstdescendants(set)``
682 683 # Like ``descendants(set)`` but follows only the first parents.
683 684 return _descendants(repo, subset, x, followfirst=True)
684 685
685 686 def destination(repo, subset, x):
686 687 """``destination([set])``
687 688 Changesets that were created by a graft, transplant or rebase operation,
688 689 with the given revisions specified as the source. Omitting the optional set
689 690 is the same as passing all().
690 691 """
691 692 if x is not None:
692 693 args = getset(repo, spanset(repo), x).set()
693 694 else:
694 695 args = getall(repo, spanset(repo), x).set()
695 696
696 697 dests = set()
697 698
698 699 # subset contains all of the possible destinations that can be returned, so
699 700 # iterate over them and see if their source(s) were provided in the args.
700 701 # Even if the immediate src of r is not in the args, src's source (or
701 702 # further back) may be. Scanning back further than the immediate src allows
702 703 # transitive transplants and rebases to yield the same results as transitive
703 704 # grafts.
704 705 for r in subset:
705 706 src = _getrevsource(repo, r)
706 707 lineage = None
707 708
708 709 while src is not None:
709 710 if lineage is None:
710 711 lineage = list()
711 712
712 713 lineage.append(r)
713 714
714 715 # The visited lineage is a match if the current source is in the arg
715 716 # set. Since every candidate dest is visited by way of iterating
716 717 # subset, any dests further back in the lineage will be tested by a
717 718 # different iteration over subset. Likewise, if the src was already
718 719 # selected, the current lineage can be selected without going back
719 720 # further.
720 721 if src in args or src in dests:
721 722 dests.update(lineage)
722 723 break
723 724
724 725 r = src
725 726 src = _getrevsource(repo, r)
726 727
727 728 return subset.filter(lambda r: r in dests)
728 729
729 730 def divergent(repo, subset, x):
730 731 """``divergent()``
731 732 Final successors of changesets with an alternative set of final successors.
732 733 """
733 734 # i18n: "divergent" is a keyword
734 735 getargs(x, 0, 0, _("divergent takes no arguments"))
735 736 divergent = obsmod.getrevs(repo, 'divergent')
736 737 return subset.filter(lambda r: r in divergent)
737 738
738 739 def draft(repo, subset, x):
739 740 """``draft()``
740 741 Changeset in draft phase."""
741 742 # i18n: "draft" is a keyword
742 743 getargs(x, 0, 0, _("draft takes no arguments"))
743 744 pc = repo._phasecache
744 745 return subset.filter(lambda r: pc.phase(repo, r) == phases.draft)
745 746
746 747 def extinct(repo, subset, x):
747 748 """``extinct()``
748 749 Obsolete changesets with obsolete descendants only.
749 750 """
750 751 # i18n: "extinct" is a keyword
751 752 getargs(x, 0, 0, _("extinct takes no arguments"))
752 753 extincts = obsmod.getrevs(repo, 'extinct')
753 754 return subset & extincts
754 755
755 756 def extra(repo, subset, x):
756 757 """``extra(label, [value])``
757 758 Changesets with the given label in the extra metadata, with the given
758 759 optional value.
759 760
760 761 If `value` starts with `re:`, the remainder of the value is treated as
761 762 a regular expression. To match a value that actually starts with `re:`,
762 763 use the prefix `literal:`.
763 764 """
764 765
765 766 # i18n: "extra" is a keyword
766 767 l = getargs(x, 1, 2, _('extra takes at least 1 and at most 2 arguments'))
767 768 # i18n: "extra" is a keyword
768 769 label = getstring(l[0], _('first argument to extra must be a string'))
769 770 value = None
770 771
771 772 if len(l) > 1:
772 773 # i18n: "extra" is a keyword
773 774 value = getstring(l[1], _('second argument to extra must be a string'))
774 775 kind, value, matcher = _stringmatcher(value)
775 776
776 777 def _matchvalue(r):
777 778 extra = repo[r].extra()
778 779 return label in extra and (value is None or matcher(extra[label]))
779 780
780 781 return subset.filter(lambda r: _matchvalue(r))
781 782
782 783 def filelog(repo, subset, x):
783 784 """``filelog(pattern)``
784 785 Changesets connected to the specified filelog.
785 786
786 787 For performance reasons, ``filelog()`` does not show every changeset
787 788 that affects the requested file(s). See :hg:`help log` for details. For
788 789 a slower, more accurate result, use ``file()``.
789 790
790 791 The pattern without explicit kind like ``glob:`` is expected to be
791 792 relative to the current directory and match against a file exactly
792 793 for efficiency.
793 794 """
794 795
795 796 # i18n: "filelog" is a keyword
796 797 pat = getstring(x, _("filelog requires a pattern"))
797 798 s = set()
798 799
799 800 if not matchmod.patkind(pat):
800 801 f = pathutil.canonpath(repo.root, repo.getcwd(), pat)
801 802 fl = repo.file(f)
802 803 for fr in fl:
803 804 s.add(fl.linkrev(fr))
804 805 else:
805 806 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=repo[None])
806 807 for f in repo[None]:
807 808 if m(f):
808 809 fl = repo.file(f)
809 810 for fr in fl:
810 811 s.add(fl.linkrev(fr))
811 812
812 813 return subset.filter(lambda r: r in s)
813 814
814 815 def first(repo, subset, x):
815 816 """``first(set, [n])``
816 817 An alias for limit().
817 818 """
818 819 return limit(repo, subset, x)
819 820
820 821 def _follow(repo, subset, x, name, followfirst=False):
821 822 l = getargs(x, 0, 1, _("%s takes no arguments or a filename") % name)
822 823 c = repo['.']
823 824 if l:
824 825 x = getstring(l[0], _("%s expected a filename") % name)
825 826 if x in c:
826 827 cx = c[x]
827 828 s = set(ctx.rev() for ctx in cx.ancestors(followfirst=followfirst))
828 829 # include the revision responsible for the most recent version
829 830 s.add(cx.linkrev())
830 831 else:
831 832 return baseset([])
832 833 else:
833 834 s = _revancestors(repo, baseset([c.rev()]), followfirst)
834 835
835 836 return subset.filter(lambda r: r in s)
836 837
837 838 def follow(repo, subset, x):
838 839 """``follow([file])``
839 840 An alias for ``::.`` (ancestors of the working copy's first parent).
840 841 If a filename is specified, the history of the given file is followed,
841 842 including copies.
842 843 """
843 844 return _follow(repo, subset, x, 'follow')
844 845
845 846 def _followfirst(repo, subset, x):
846 847 # ``followfirst([file])``
847 848 # Like ``follow([file])`` but follows only the first parent of
848 849 # every revision or file revision.
849 850 return _follow(repo, subset, x, '_followfirst', followfirst=True)
850 851
851 852 def getall(repo, subset, x):
852 853 """``all()``
853 854 All changesets, the same as ``0:tip``.
854 855 """
855 856 # i18n: "all" is a keyword
856 857 getargs(x, 0, 0, _("all takes no arguments"))
857 858 return subset
858 859
859 860 def grep(repo, subset, x):
860 861 """``grep(regex)``
861 862 Like ``keyword(string)`` but accepts a regex. Use ``grep(r'...')``
862 863 to ensure special escape characters are handled correctly. Unlike
863 864 ``keyword(string)``, the match is case-sensitive.
864 865 """
865 866 try:
866 867 # i18n: "grep" is a keyword
867 868 gr = re.compile(getstring(x, _("grep requires a string")))
868 869 except re.error, e:
869 870 raise error.ParseError(_('invalid match pattern: %s') % e)
870 871
871 872 def matches(x):
872 873 c = repo[x]
873 874 for e in c.files() + [c.user(), c.description()]:
874 875 if gr.search(e):
875 876 return True
876 877 return False
877 878
878 879 return subset.filter(matches)
879 880
880 881 def _matchfiles(repo, subset, x):
881 882 # _matchfiles takes a revset list of prefixed arguments:
882 883 #
883 884 # [p:foo, i:bar, x:baz]
884 885 #
885 886 # builds a match object from them and filters subset. Allowed
886 887 # prefixes are 'p:' for regular patterns, 'i:' for include
887 888 # patterns and 'x:' for exclude patterns. Use 'r:' prefix to pass
888 889 # a revision identifier, or the empty string to reference the
889 890 # working directory, from which the match object is
890 891 # initialized. Use 'd:' to set the default matching mode, default
891 892 # to 'glob'. At most one 'r:' and 'd:' argument can be passed.
892 893
893 894 # i18n: "_matchfiles" is a keyword
894 895 l = getargs(x, 1, -1, _("_matchfiles requires at least one argument"))
895 896 pats, inc, exc = [], [], []
896 897 hasset = False
897 898 rev, default = None, None
898 899 for arg in l:
899 900 # i18n: "_matchfiles" is a keyword
900 901 s = getstring(arg, _("_matchfiles requires string arguments"))
901 902 prefix, value = s[:2], s[2:]
902 903 if prefix == 'p:':
903 904 pats.append(value)
904 905 elif prefix == 'i:':
905 906 inc.append(value)
906 907 elif prefix == 'x:':
907 908 exc.append(value)
908 909 elif prefix == 'r:':
909 910 if rev is not None:
910 911 # i18n: "_matchfiles" is a keyword
911 912 raise error.ParseError(_('_matchfiles expected at most one '
912 913 'revision'))
913 914 rev = value
914 915 elif prefix == 'd:':
915 916 if default is not None:
916 917 # i18n: "_matchfiles" is a keyword
917 918 raise error.ParseError(_('_matchfiles expected at most one '
918 919 'default mode'))
919 920 default = value
920 921 else:
921 922 # i18n: "_matchfiles" is a keyword
922 923 raise error.ParseError(_('invalid _matchfiles prefix: %s') % prefix)
923 924 if not hasset and matchmod.patkind(value) == 'set':
924 925 hasset = True
925 926 if not default:
926 927 default = 'glob'
927 928
928 929 def matches(x):
929 930 m = None
930 931 c = repo[x]
931 932 if not m or (hasset and rev is None):
932 933 ctx = c
933 934 if rev is not None:
934 935 ctx = repo[rev or None]
935 936 m = matchmod.match(repo.root, repo.getcwd(), pats, include=inc,
936 937 exclude=exc, ctx=ctx, default=default)
937 938 for f in c.files():
938 939 if m(f):
939 940 return True
940 941 return False
941 942
942 943 return subset.filter(matches)
943 944
944 945 def hasfile(repo, subset, x):
945 946 """``file(pattern)``
946 947 Changesets affecting files matched by pattern.
947 948
948 949 For a faster but less accurate result, consider using ``filelog()``
949 950 instead.
950 951
951 952 This predicate uses ``glob:`` as the default kind of pattern.
952 953 """
953 954 # i18n: "file" is a keyword
954 955 pat = getstring(x, _("file requires a pattern"))
955 956 return _matchfiles(repo, subset, ('string', 'p:' + pat))
956 957
957 958 def head(repo, subset, x):
958 959 """``head()``
959 960 Changeset is a named branch head.
960 961 """
961 962 # i18n: "head" is a keyword
962 963 getargs(x, 0, 0, _("head takes no arguments"))
963 964 hs = set()
964 965 for b, ls in repo.branchmap().iteritems():
965 966 hs.update(repo[h].rev() for h in ls)
966 967 return baseset(hs).filter(subset.__contains__)
967 968
968 969 def heads(repo, subset, x):
969 970 """``heads(set)``
970 971 Members of set with no children in set.
971 972 """
972 973 s = getset(repo, subset, x)
973 974 ps = parents(repo, subset, x)
974 975 return s - ps
975 976
976 977 def hidden(repo, subset, x):
977 978 """``hidden()``
978 979 Hidden changesets.
979 980 """
980 981 # i18n: "hidden" is a keyword
981 982 getargs(x, 0, 0, _("hidden takes no arguments"))
982 983 hiddenrevs = repoview.filterrevs(repo, 'visible')
983 984 return subset & hiddenrevs
984 985
985 986 def keyword(repo, subset, x):
986 987 """``keyword(string)``
987 988 Search commit message, user name, and names of changed files for
988 989 string. The match is case-insensitive.
989 990 """
990 991 # i18n: "keyword" is a keyword
991 992 kw = encoding.lower(getstring(x, _("keyword requires a string")))
992 993
993 994 def matches(r):
994 995 c = repo[r]
995 996 return util.any(kw in encoding.lower(t) for t in c.files() + [c.user(),
996 997 c.description()])
997 998
998 999 return subset.filter(matches)
999 1000
1000 1001 def limit(repo, subset, x):
1001 1002 """``limit(set, [n])``
1002 1003 First n members of set, defaulting to 1.
1003 1004 """
1004 1005 # i18n: "limit" is a keyword
1005 1006 l = getargs(x, 1, 2, _("limit requires one or two arguments"))
1006 1007 try:
1007 1008 lim = 1
1008 1009 if len(l) == 2:
1009 1010 # i18n: "limit" is a keyword
1010 1011 lim = int(getstring(l[1], _("limit requires a number")))
1011 1012 except (TypeError, ValueError):
1012 1013 # i18n: "limit" is a keyword
1013 1014 raise error.ParseError(_("limit expects a number"))
1014 1015 ss = subset.set()
1015 1016 os = getset(repo, spanset(repo), l[0])
1016 1017 bs = baseset([])
1017 1018 it = iter(os)
1018 1019 for x in xrange(lim):
1019 1020 try:
1020 1021 y = it.next()
1021 1022 if y in ss:
1022 1023 bs.append(y)
1023 1024 except (StopIteration):
1024 1025 break
1025 1026 return bs
1026 1027
1027 1028 def last(repo, subset, x):
1028 1029 """``last(set, [n])``
1029 1030 Last n members of set, defaulting to 1.
1030 1031 """
1031 1032 # i18n: "last" is a keyword
1032 1033 l = getargs(x, 1, 2, _("last requires one or two arguments"))
1033 1034 try:
1034 1035 lim = 1
1035 1036 if len(l) == 2:
1036 1037 # i18n: "last" is a keyword
1037 1038 lim = int(getstring(l[1], _("last requires a number")))
1038 1039 except (TypeError, ValueError):
1039 1040 # i18n: "last" is a keyword
1040 1041 raise error.ParseError(_("last expects a number"))
1041 1042 ss = subset.set()
1042 1043 os = getset(repo, spanset(repo), l[0])
1043 1044 os.reverse()
1044 1045 bs = baseset([])
1045 1046 it = iter(os)
1046 1047 for x in xrange(lim):
1047 1048 try:
1048 1049 y = it.next()
1049 1050 if y in ss:
1050 1051 bs.append(y)
1051 1052 except (StopIteration):
1052 1053 break
1053 1054 return bs
1054 1055
1055 1056 def maxrev(repo, subset, x):
1056 1057 """``max(set)``
1057 1058 Changeset with highest revision number in set.
1058 1059 """
1059 1060 os = getset(repo, spanset(repo), x)
1060 1061 if os:
1061 1062 m = os.max()
1062 1063 if m in subset:
1063 1064 return baseset([m])
1064 1065 return baseset([])
1065 1066
1066 1067 def merge(repo, subset, x):
1067 1068 """``merge()``
1068 1069 Changeset is a merge changeset.
1069 1070 """
1070 1071 # i18n: "merge" is a keyword
1071 1072 getargs(x, 0, 0, _("merge takes no arguments"))
1072 1073 cl = repo.changelog
1073 1074 return subset.filter(lambda r: cl.parentrevs(r)[1] != -1)
1074 1075
1075 1076 def branchpoint(repo, subset, x):
1076 1077 """``branchpoint()``
1077 1078 Changesets with more than one child.
1078 1079 """
1079 1080 # i18n: "branchpoint" is a keyword
1080 1081 getargs(x, 0, 0, _("branchpoint takes no arguments"))
1081 1082 cl = repo.changelog
1082 1083 if not subset:
1083 1084 return baseset([])
1084 1085 baserev = min(subset)
1085 1086 parentscount = [0]*(len(repo) - baserev)
1086 1087 for r in cl.revs(start=baserev + 1):
1087 1088 for p in cl.parentrevs(r):
1088 1089 if p >= baserev:
1089 1090 parentscount[p - baserev] += 1
1090 1091 return subset.filter(lambda r: parentscount[r - baserev] > 1)
1091 1092
1092 1093 def minrev(repo, subset, x):
1093 1094 """``min(set)``
1094 1095 Changeset with lowest revision number in set.
1095 1096 """
1096 1097 os = getset(repo, spanset(repo), x)
1097 1098 if os:
1098 1099 m = os.min()
1099 1100 if m in subset:
1100 1101 return baseset([m])
1101 1102 return baseset([])
1102 1103
1103 1104 def _missingancestors(repo, subset, x):
1104 1105 # i18n: "_missingancestors" is a keyword
1105 1106 revs, bases = getargs(x, 2, 2,
1106 1107 _("_missingancestors requires two arguments"))
1107 1108 rs = baseset(repo)
1108 1109 revs = getset(repo, rs, revs)
1109 1110 bases = getset(repo, rs, bases)
1110 1111 missing = set(repo.changelog.findmissingrevs(bases, revs))
1111 1112 return baseset([r for r in subset if r in missing])
1112 1113
1113 1114 def modifies(repo, subset, x):
1114 1115 """``modifies(pattern)``
1115 1116 Changesets modifying files matched by pattern.
1116 1117
1117 1118 The pattern without explicit kind like ``glob:`` is expected to be
1118 1119 relative to the current directory and match against a file or a
1119 1120 directory.
1120 1121 """
1121 1122 # i18n: "modifies" is a keyword
1122 1123 pat = getstring(x, _("modifies requires a pattern"))
1123 1124 return checkstatus(repo, subset, pat, 0)
1124 1125
1125 1126 def node_(repo, subset, x):
1126 1127 """``id(string)``
1127 1128 Revision non-ambiguously specified by the given hex string prefix.
1128 1129 """
1129 1130 # i18n: "id" is a keyword
1130 1131 l = getargs(x, 1, 1, _("id requires one argument"))
1131 1132 # i18n: "id" is a keyword
1132 1133 n = getstring(l[0], _("id requires a string"))
1133 1134 if len(n) == 40:
1134 1135 rn = repo[n].rev()
1135 1136 else:
1136 1137 rn = None
1137 1138 pm = repo.changelog._partialmatch(n)
1138 1139 if pm is not None:
1139 1140 rn = repo.changelog.rev(pm)
1140 1141
1141 1142 return subset.filter(lambda r: r == rn)
1142 1143
1143 1144 def obsolete(repo, subset, x):
1144 1145 """``obsolete()``
1145 1146 Mutable changeset with a newer version."""
1146 1147 # i18n: "obsolete" is a keyword
1147 1148 getargs(x, 0, 0, _("obsolete takes no arguments"))
1148 1149 obsoletes = obsmod.getrevs(repo, 'obsolete')
1149 1150 return subset & obsoletes
1150 1151
1151 1152 def origin(repo, subset, x):
1152 1153 """``origin([set])``
1153 1154 Changesets that were specified as a source for the grafts, transplants or
1154 1155 rebases that created the given revisions. Omitting the optional set is the
1155 1156 same as passing all(). If a changeset created by these operations is itself
1156 1157 specified as a source for one of these operations, only the source changeset
1157 1158 for the first operation is selected.
1158 1159 """
1159 1160 if x is not None:
1160 1161 args = getset(repo, spanset(repo), x).set()
1161 1162 else:
1162 1163 args = getall(repo, spanset(repo), x).set()
1163 1164
1164 1165 def _firstsrc(rev):
1165 1166 src = _getrevsource(repo, rev)
1166 1167 if src is None:
1167 1168 return None
1168 1169
1169 1170 while True:
1170 1171 prev = _getrevsource(repo, src)
1171 1172
1172 1173 if prev is None:
1173 1174 return src
1174 1175 src = prev
1175 1176
1176 1177 o = set([_firstsrc(r) for r in args])
1177 1178 return subset.filter(lambda r: r in o)
1178 1179
1179 1180 def outgoing(repo, subset, x):
1180 1181 """``outgoing([path])``
1181 1182 Changesets not found in the specified destination repository, or the
1182 1183 default push location.
1183 1184 """
1184 1185 import hg # avoid start-up nasties
1185 1186 # i18n: "outgoing" is a keyword
1186 1187 l = getargs(x, 0, 1, _("outgoing takes one or no arguments"))
1187 1188 # i18n: "outgoing" is a keyword
1188 1189 dest = l and getstring(l[0], _("outgoing requires a repository path")) or ''
1189 1190 dest = repo.ui.expandpath(dest or 'default-push', dest or 'default')
1190 1191 dest, branches = hg.parseurl(dest)
1191 1192 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1192 1193 if revs:
1193 1194 revs = [repo.lookup(rev) for rev in revs]
1194 1195 other = hg.peer(repo, {}, dest)
1195 1196 repo.ui.pushbuffer()
1196 1197 outgoing = discovery.findcommonoutgoing(repo, other, onlyheads=revs)
1197 1198 repo.ui.popbuffer()
1198 1199 cl = repo.changelog
1199 1200 o = set([cl.rev(r) for r in outgoing.missing])
1200 1201 return subset.filter(lambda r: r in o)
1201 1202
1202 1203 def p1(repo, subset, x):
1203 1204 """``p1([set])``
1204 1205 First parent of changesets in set, or the working directory.
1205 1206 """
1206 1207 if x is None:
1207 1208 p = repo[x].p1().rev()
1208 1209 return subset.filter(lambda r: r == p)
1209 1210
1210 1211 ps = set()
1211 1212 cl = repo.changelog
1212 1213 for r in getset(repo, spanset(repo), x):
1213 1214 ps.add(cl.parentrevs(r)[0])
1214 1215 return subset & ps
1215 1216
1216 1217 def p2(repo, subset, x):
1217 1218 """``p2([set])``
1218 1219 Second parent of changesets in set, or the working directory.
1219 1220 """
1220 1221 if x is None:
1221 1222 ps = repo[x].parents()
1222 1223 try:
1223 1224 p = ps[1].rev()
1224 1225 return subset.filter(lambda r: r == p)
1225 1226 except IndexError:
1226 1227 return baseset([])
1227 1228
1228 1229 ps = set()
1229 1230 cl = repo.changelog
1230 1231 for r in getset(repo, spanset(repo), x):
1231 1232 ps.add(cl.parentrevs(r)[1])
1232 1233 return subset & ps
1233 1234
1234 1235 def parents(repo, subset, x):
1235 1236 """``parents([set])``
1236 1237 The set of all parents for all changesets in set, or the working directory.
1237 1238 """
1238 1239 if x is None:
1239 1240 ps = tuple(p.rev() for p in repo[x].parents())
1240 1241 return subset & ps
1241 1242
1242 1243 ps = set()
1243 1244 cl = repo.changelog
1244 1245 for r in getset(repo, spanset(repo), x):
1245 1246 ps.update(cl.parentrevs(r))
1246 1247 return subset & ps
1247 1248
1248 1249 def parentspec(repo, subset, x, n):
1249 1250 """``set^0``
1250 1251 The set.
1251 1252 ``set^1`` (or ``set^``), ``set^2``
1252 1253 First or second parent, respectively, of all changesets in set.
1253 1254 """
1254 1255 try:
1255 1256 n = int(n[1])
1256 1257 if n not in (0, 1, 2):
1257 1258 raise ValueError
1258 1259 except (TypeError, ValueError):
1259 1260 raise error.ParseError(_("^ expects a number 0, 1, or 2"))
1260 1261 ps = set()
1261 1262 cl = repo.changelog
1262 1263 for r in getset(repo, baseset(cl), x):
1263 1264 if n == 0:
1264 1265 ps.add(r)
1265 1266 elif n == 1:
1266 1267 ps.add(cl.parentrevs(r)[0])
1267 1268 elif n == 2:
1268 1269 parents = cl.parentrevs(r)
1269 1270 if len(parents) > 1:
1270 1271 ps.add(parents[1])
1271 1272 return subset & ps
1272 1273
1273 1274 def present(repo, subset, x):
1274 1275 """``present(set)``
1275 1276 An empty set, if any revision in set isn't found; otherwise,
1276 1277 all revisions in set.
1277 1278
1278 1279 If any of specified revisions is not present in the local repository,
1279 1280 the query is normally aborted. But this predicate allows the query
1280 1281 to continue even in such cases.
1281 1282 """
1282 1283 try:
1283 1284 return getset(repo, subset, x)
1284 1285 except error.RepoLookupError:
1285 1286 return baseset([])
1286 1287
1287 1288 def public(repo, subset, x):
1288 1289 """``public()``
1289 1290 Changeset in public phase."""
1290 1291 # i18n: "public" is a keyword
1291 1292 getargs(x, 0, 0, _("public takes no arguments"))
1292 1293 pc = repo._phasecache
1293 1294 return subset.filter(lambda r: pc.phase(repo, r) == phases.public)
1294 1295
1295 1296 def remote(repo, subset, x):
1296 1297 """``remote([id [,path]])``
1297 1298 Local revision that corresponds to the given identifier in a
1298 1299 remote repository, if present. Here, the '.' identifier is a
1299 1300 synonym for the current local branch.
1300 1301 """
1301 1302
1302 1303 import hg # avoid start-up nasties
1303 1304 # i18n: "remote" is a keyword
1304 1305 l = getargs(x, 0, 2, _("remote takes one, two or no arguments"))
1305 1306
1306 1307 q = '.'
1307 1308 if len(l) > 0:
1308 1309 # i18n: "remote" is a keyword
1309 1310 q = getstring(l[0], _("remote requires a string id"))
1310 1311 if q == '.':
1311 1312 q = repo['.'].branch()
1312 1313
1313 1314 dest = ''
1314 1315 if len(l) > 1:
1315 1316 # i18n: "remote" is a keyword
1316 1317 dest = getstring(l[1], _("remote requires a repository path"))
1317 1318 dest = repo.ui.expandpath(dest or 'default')
1318 1319 dest, branches = hg.parseurl(dest)
1319 1320 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1320 1321 if revs:
1321 1322 revs = [repo.lookup(rev) for rev in revs]
1322 1323 other = hg.peer(repo, {}, dest)
1323 1324 n = other.lookup(q)
1324 1325 if n in repo:
1325 1326 r = repo[n].rev()
1326 1327 if r in subset:
1327 1328 return baseset([r])
1328 1329 return baseset([])
1329 1330
1330 1331 def removes(repo, subset, x):
1331 1332 """``removes(pattern)``
1332 1333 Changesets which remove files matching pattern.
1333 1334
1334 1335 The pattern without explicit kind like ``glob:`` is expected to be
1335 1336 relative to the current directory and match against a file or a
1336 1337 directory.
1337 1338 """
1338 1339 # i18n: "removes" is a keyword
1339 1340 pat = getstring(x, _("removes requires a pattern"))
1340 1341 return checkstatus(repo, subset, pat, 2)
1341 1342
1342 1343 def rev(repo, subset, x):
1343 1344 """``rev(number)``
1344 1345 Revision with the given numeric identifier.
1345 1346 """
1346 1347 # i18n: "rev" is a keyword
1347 1348 l = getargs(x, 1, 1, _("rev requires one argument"))
1348 1349 try:
1349 1350 # i18n: "rev" is a keyword
1350 1351 l = int(getstring(l[0], _("rev requires a number")))
1351 1352 except (TypeError, ValueError):
1352 1353 # i18n: "rev" is a keyword
1353 1354 raise error.ParseError(_("rev expects a number"))
1354 1355 return subset.filter(lambda r: r == l)
1355 1356
1356 1357 def matching(repo, subset, x):
1357 1358 """``matching(revision [, field])``
1358 1359 Changesets in which a given set of fields match the set of fields in the
1359 1360 selected revision or set.
1360 1361
1361 1362 To match more than one field pass the list of fields to match separated
1362 1363 by spaces (e.g. ``author description``).
1363 1364
1364 1365 Valid fields are most regular revision fields and some special fields.
1365 1366
1366 1367 Regular revision fields are ``description``, ``author``, ``branch``,
1367 1368 ``date``, ``files``, ``phase``, ``parents``, ``substate``, ``user``
1368 1369 and ``diff``.
1369 1370 Note that ``author`` and ``user`` are synonyms. ``diff`` refers to the
1370 1371 contents of the revision. Two revisions matching their ``diff`` will
1371 1372 also match their ``files``.
1372 1373
1373 1374 Special fields are ``summary`` and ``metadata``:
1374 1375 ``summary`` matches the first line of the description.
1375 1376 ``metadata`` is equivalent to matching ``description user date``
1376 1377 (i.e. it matches the main metadata fields).
1377 1378
1378 1379 ``metadata`` is the default field which is used when no fields are
1379 1380 specified. You can match more than one field at a time.
1380 1381 """
1381 1382 # i18n: "matching" is a keyword
1382 1383 l = getargs(x, 1, 2, _("matching takes 1 or 2 arguments"))
1383 1384
1384 1385 revs = getset(repo, baseset(repo.changelog), l[0])
1385 1386
1386 1387 fieldlist = ['metadata']
1387 1388 if len(l) > 1:
1388 1389 fieldlist = getstring(l[1],
1389 1390 # i18n: "matching" is a keyword
1390 1391 _("matching requires a string "
1391 1392 "as its second argument")).split()
1392 1393
1393 1394 # Make sure that there are no repeated fields,
1394 1395 # expand the 'special' 'metadata' field type
1395 1396 # and check the 'files' whenever we check the 'diff'
1396 1397 fields = []
1397 1398 for field in fieldlist:
1398 1399 if field == 'metadata':
1399 1400 fields += ['user', 'description', 'date']
1400 1401 elif field == 'diff':
1401 1402 # a revision matching the diff must also match the files
1402 1403 # since matching the diff is very costly, make sure to
1403 1404 # also match the files first
1404 1405 fields += ['files', 'diff']
1405 1406 else:
1406 1407 if field == 'author':
1407 1408 field = 'user'
1408 1409 fields.append(field)
1409 1410 fields = set(fields)
1410 1411 if 'summary' in fields and 'description' in fields:
1411 1412 # If a revision matches its description it also matches its summary
1412 1413 fields.discard('summary')
1413 1414
1414 1415 # We may want to match more than one field
1415 1416 # Not all fields take the same amount of time to be matched
1416 1417 # Sort the selected fields in order of increasing matching cost
1417 1418 fieldorder = ['phase', 'parents', 'user', 'date', 'branch', 'summary',
1418 1419 'files', 'description', 'substate', 'diff']
1419 1420 def fieldkeyfunc(f):
1420 1421 try:
1421 1422 return fieldorder.index(f)
1422 1423 except ValueError:
1423 1424 # assume an unknown field is very costly
1424 1425 return len(fieldorder)
1425 1426 fields = list(fields)
1426 1427 fields.sort(key=fieldkeyfunc)
1427 1428
1428 1429 # Each field will be matched with its own "getfield" function
1429 1430 # which will be added to the getfieldfuncs array of functions
1430 1431 getfieldfuncs = []
1431 1432 _funcs = {
1432 1433 'user': lambda r: repo[r].user(),
1433 1434 'branch': lambda r: repo[r].branch(),
1434 1435 'date': lambda r: repo[r].date(),
1435 1436 'description': lambda r: repo[r].description(),
1436 1437 'files': lambda r: repo[r].files(),
1437 1438 'parents': lambda r: repo[r].parents(),
1438 1439 'phase': lambda r: repo[r].phase(),
1439 1440 'substate': lambda r: repo[r].substate,
1440 1441 'summary': lambda r: repo[r].description().splitlines()[0],
1441 1442 'diff': lambda r: list(repo[r].diff(git=True),)
1442 1443 }
1443 1444 for info in fields:
1444 1445 getfield = _funcs.get(info, None)
1445 1446 if getfield is None:
1446 1447 raise error.ParseError(
1447 1448 # i18n: "matching" is a keyword
1448 1449 _("unexpected field name passed to matching: %s") % info)
1449 1450 getfieldfuncs.append(getfield)
1450 1451 # convert the getfield array of functions into a "getinfo" function
1451 1452 # which returns an array of field values (or a single value if there
1452 1453 # is only one field to match)
1453 1454 getinfo = lambda r: [f(r) for f in getfieldfuncs]
1454 1455
1455 1456 def matches(x):
1456 1457 for rev in revs:
1457 1458 target = getinfo(rev)
1458 1459 match = True
1459 1460 for n, f in enumerate(getfieldfuncs):
1460 1461 if target[n] != f(x):
1461 1462 match = False
1462 1463 if match:
1463 1464 return True
1464 1465 return False
1465 1466
1466 1467 return subset.filter(matches)
1467 1468
1468 1469 def reverse(repo, subset, x):
1469 1470 """``reverse(set)``
1470 1471 Reverse order of set.
1471 1472 """
1472 1473 l = getset(repo, subset, x)
1473 1474 l.reverse()
1474 1475 return l
1475 1476
1476 1477 def roots(repo, subset, x):
1477 1478 """``roots(set)``
1478 1479 Changesets in set with no parent changeset in set.
1479 1480 """
1480 1481 s = getset(repo, spanset(repo), x).set()
1481 1482 subset = baseset([r for r in s if r in subset.set()])
1482 1483 cs = _children(repo, subset, s)
1483 1484 return subset - cs
1484 1485
1485 1486 def secret(repo, subset, x):
1486 1487 """``secret()``
1487 1488 Changeset in secret phase."""
1488 1489 # i18n: "secret" is a keyword
1489 1490 getargs(x, 0, 0, _("secret takes no arguments"))
1490 1491 pc = repo._phasecache
1491 1492 return subset.filter(lambda x: pc.phase(repo, x) == phases.secret)
1492 1493
1493 1494 def sort(repo, subset, x):
1494 1495 """``sort(set[, [-]key...])``
1495 1496 Sort set by keys. The default sort order is ascending, specify a key
1496 1497 as ``-key`` to sort in descending order.
1497 1498
1498 1499 The keys can be:
1499 1500
1500 1501 - ``rev`` for the revision number,
1501 1502 - ``branch`` for the branch name,
1502 1503 - ``desc`` for the commit message (description),
1503 1504 - ``user`` for user name (``author`` can be used as an alias),
1504 1505 - ``date`` for the commit date
1505 1506 """
1506 1507 # i18n: "sort" is a keyword
1507 1508 l = getargs(x, 1, 2, _("sort requires one or two arguments"))
1508 1509 keys = "rev"
1509 1510 if len(l) == 2:
1510 1511 # i18n: "sort" is a keyword
1511 1512 keys = getstring(l[1], _("sort spec must be a string"))
1512 1513
1513 1514 s = l[0]
1514 1515 keys = keys.split()
1515 1516 l = []
1516 1517 def invert(s):
1517 1518 return "".join(chr(255 - ord(c)) for c in s)
1518 1519 revs = getset(repo, subset, s)
1519 1520 if keys == ["rev"]:
1520 1521 revs.sort()
1521 1522 return revs
1522 1523 elif keys == ["-rev"]:
1523 1524 revs.sort(reverse=True)
1524 1525 return revs
1525 1526 for r in revs:
1526 1527 c = repo[r]
1527 1528 e = []
1528 1529 for k in keys:
1529 1530 if k == 'rev':
1530 1531 e.append(r)
1531 1532 elif k == '-rev':
1532 1533 e.append(-r)
1533 1534 elif k == 'branch':
1534 1535 e.append(c.branch())
1535 1536 elif k == '-branch':
1536 1537 e.append(invert(c.branch()))
1537 1538 elif k == 'desc':
1538 1539 e.append(c.description())
1539 1540 elif k == '-desc':
1540 1541 e.append(invert(c.description()))
1541 1542 elif k in 'user author':
1542 1543 e.append(c.user())
1543 1544 elif k in '-user -author':
1544 1545 e.append(invert(c.user()))
1545 1546 elif k == 'date':
1546 1547 e.append(c.date()[0])
1547 1548 elif k == '-date':
1548 1549 e.append(-c.date()[0])
1549 1550 else:
1550 1551 raise error.ParseError(_("unknown sort key %r") % k)
1551 1552 e.append(r)
1552 1553 l.append(e)
1553 1554 l.sort()
1554 1555 return baseset([e[-1] for e in l])
1555 1556
1556 1557 def _stringmatcher(pattern):
1557 1558 """
1558 1559 accepts a string, possibly starting with 're:' or 'literal:' prefix.
1559 1560 returns the matcher name, pattern, and matcher function.
1560 1561 missing or unknown prefixes are treated as literal matches.
1561 1562
1562 1563 helper for tests:
1563 1564 >>> def test(pattern, *tests):
1564 1565 ... kind, pattern, matcher = _stringmatcher(pattern)
1565 1566 ... return (kind, pattern, [bool(matcher(t)) for t in tests])
1566 1567
1567 1568 exact matching (no prefix):
1568 1569 >>> test('abcdefg', 'abc', 'def', 'abcdefg')
1569 1570 ('literal', 'abcdefg', [False, False, True])
1570 1571
1571 1572 regex matching ('re:' prefix)
1572 1573 >>> test('re:a.+b', 'nomatch', 'fooadef', 'fooadefbar')
1573 1574 ('re', 'a.+b', [False, False, True])
1574 1575
1575 1576 force exact matches ('literal:' prefix)
1576 1577 >>> test('literal:re:foobar', 'foobar', 're:foobar')
1577 1578 ('literal', 're:foobar', [False, True])
1578 1579
1579 1580 unknown prefixes are ignored and treated as literals
1580 1581 >>> test('foo:bar', 'foo', 'bar', 'foo:bar')
1581 1582 ('literal', 'foo:bar', [False, False, True])
1582 1583 """
1583 1584 if pattern.startswith('re:'):
1584 1585 pattern = pattern[3:]
1585 1586 try:
1586 1587 regex = re.compile(pattern)
1587 1588 except re.error, e:
1588 1589 raise error.ParseError(_('invalid regular expression: %s')
1589 1590 % e)
1590 1591 return 're', pattern, regex.search
1591 1592 elif pattern.startswith('literal:'):
1592 1593 pattern = pattern[8:]
1593 1594 return 'literal', pattern, pattern.__eq__
1594 1595
1595 1596 def _substringmatcher(pattern):
1596 1597 kind, pattern, matcher = _stringmatcher(pattern)
1597 1598 if kind == 'literal':
1598 1599 matcher = lambda s: pattern in s
1599 1600 return kind, pattern, matcher
1600 1601
1601 1602 def tag(repo, subset, x):
1602 1603 """``tag([name])``
1603 1604 The specified tag by name, or all tagged revisions if no name is given.
1604 1605
1605 1606 If `name` starts with `re:`, the remainder of the name is treated as
1606 1607 a regular expression. To match a tag that actually starts with `re:`,
1607 1608 use the prefix `literal:`.
1608 1609 """
1609 1610 # i18n: "tag" is a keyword
1610 1611 args = getargs(x, 0, 1, _("tag takes one or no arguments"))
1611 1612 cl = repo.changelog
1612 1613 if args:
1613 1614 pattern = getstring(args[0],
1614 1615 # i18n: "tag" is a keyword
1615 1616 _('the argument to tag must be a string'))
1616 1617 kind, pattern, matcher = _stringmatcher(pattern)
1617 1618 if kind == 'literal':
1618 1619 # avoid resolving all tags
1619 1620 tn = repo._tagscache.tags.get(pattern, None)
1620 1621 if tn is None:
1621 1622 raise util.Abort(_("tag '%s' does not exist") % pattern)
1622 1623 s = set([repo[tn].rev()])
1623 1624 else:
1624 1625 s = set([cl.rev(n) for t, n in repo.tagslist() if matcher(t)])
1625 1626 else:
1626 1627 s = set([cl.rev(n) for t, n in repo.tagslist() if t != 'tip'])
1627 1628 return subset & s
1628 1629
1629 1630 def tagged(repo, subset, x):
1630 1631 return tag(repo, subset, x)
1631 1632
1632 1633 def unstable(repo, subset, x):
1633 1634 """``unstable()``
1634 1635 Non-obsolete changesets with obsolete ancestors.
1635 1636 """
1636 1637 # i18n: "unstable" is a keyword
1637 1638 getargs(x, 0, 0, _("unstable takes no arguments"))
1638 1639 unstables = obsmod.getrevs(repo, 'unstable')
1639 1640 return subset & unstables
1640 1641
1641 1642
1642 1643 def user(repo, subset, x):
1643 1644 """``user(string)``
1644 1645 User name contains string. The match is case-insensitive.
1645 1646
1646 1647 If `string` starts with `re:`, the remainder of the string is treated as
1647 1648 a regular expression. To match a user that actually contains `re:`, use
1648 1649 the prefix `literal:`.
1649 1650 """
1650 1651 return author(repo, subset, x)
1651 1652
1652 1653 # for internal use
1653 1654 def _list(repo, subset, x):
1654 1655 s = getstring(x, "internal error")
1655 1656 if not s:
1656 1657 return baseset([])
1657 1658 ls = [repo[r].rev() for r in s.split('\0')]
1658 1659 s = subset.set()
1659 1660 return baseset([r for r in ls if r in s])
1660 1661
1661 1662 # for internal use
1662 1663 def _intlist(repo, subset, x):
1663 1664 s = getstring(x, "internal error")
1664 1665 if not s:
1665 1666 return baseset([])
1666 1667 ls = [int(r) for r in s.split('\0')]
1667 1668 s = subset.set()
1668 1669 return baseset([r for r in ls if r in s])
1669 1670
1670 1671 # for internal use
1671 1672 def _hexlist(repo, subset, x):
1672 1673 s = getstring(x, "internal error")
1673 1674 if not s:
1674 1675 return baseset([])
1675 1676 cl = repo.changelog
1676 1677 ls = [cl.rev(node.bin(r)) for r in s.split('\0')]
1677 1678 s = subset.set()
1678 1679 return baseset([r for r in ls if r in s])
1679 1680
1680 1681 symbols = {
1681 1682 "adds": adds,
1682 1683 "all": getall,
1683 1684 "ancestor": ancestor,
1684 1685 "ancestors": ancestors,
1685 1686 "_firstancestors": _firstancestors,
1686 1687 "author": author,
1687 1688 "only": only,
1688 1689 "bisect": bisect,
1689 1690 "bisected": bisected,
1690 1691 "bookmark": bookmark,
1691 1692 "branch": branch,
1692 1693 "branchpoint": branchpoint,
1693 1694 "bumped": bumped,
1694 1695 "bundle": bundle,
1695 1696 "children": children,
1696 1697 "closed": closed,
1697 1698 "contains": contains,
1698 1699 "converted": converted,
1699 1700 "date": date,
1700 1701 "desc": desc,
1701 1702 "descendants": descendants,
1702 1703 "_firstdescendants": _firstdescendants,
1703 1704 "destination": destination,
1704 1705 "divergent": divergent,
1705 1706 "draft": draft,
1706 1707 "extinct": extinct,
1707 1708 "extra": extra,
1708 1709 "file": hasfile,
1709 1710 "filelog": filelog,
1710 1711 "first": first,
1711 1712 "follow": follow,
1712 1713 "_followfirst": _followfirst,
1713 1714 "grep": grep,
1714 1715 "head": head,
1715 1716 "heads": heads,
1716 1717 "hidden": hidden,
1717 1718 "id": node_,
1718 1719 "keyword": keyword,
1719 1720 "last": last,
1720 1721 "limit": limit,
1721 1722 "_matchfiles": _matchfiles,
1722 1723 "max": maxrev,
1723 1724 "merge": merge,
1724 1725 "min": minrev,
1725 1726 "_missingancestors": _missingancestors,
1726 1727 "modifies": modifies,
1727 1728 "obsolete": obsolete,
1728 1729 "origin": origin,
1729 1730 "outgoing": outgoing,
1730 1731 "p1": p1,
1731 1732 "p2": p2,
1732 1733 "parents": parents,
1733 1734 "present": present,
1734 1735 "public": public,
1735 1736 "remote": remote,
1736 1737 "removes": removes,
1737 1738 "rev": rev,
1738 1739 "reverse": reverse,
1739 1740 "roots": roots,
1740 1741 "sort": sort,
1741 1742 "secret": secret,
1742 1743 "matching": matching,
1743 1744 "tag": tag,
1744 1745 "tagged": tagged,
1745 1746 "user": user,
1746 1747 "unstable": unstable,
1747 1748 "_list": _list,
1748 1749 "_intlist": _intlist,
1749 1750 "_hexlist": _hexlist,
1750 1751 }
1751 1752
1752 1753 # symbols which can't be used for a DoS attack for any given input
1753 1754 # (e.g. those which accept regexes as plain strings shouldn't be included)
1754 1755 # functions that just return a lot of changesets (like all) don't count here
1755 1756 safesymbols = set([
1756 1757 "adds",
1757 1758 "all",
1758 1759 "ancestor",
1759 1760 "ancestors",
1760 1761 "_firstancestors",
1761 1762 "author",
1762 1763 "bisect",
1763 1764 "bisected",
1764 1765 "bookmark",
1765 1766 "branch",
1766 1767 "branchpoint",
1767 1768 "bumped",
1768 1769 "bundle",
1769 1770 "children",
1770 1771 "closed",
1771 1772 "converted",
1772 1773 "date",
1773 1774 "desc",
1774 1775 "descendants",
1775 1776 "_firstdescendants",
1776 1777 "destination",
1777 1778 "divergent",
1778 1779 "draft",
1779 1780 "extinct",
1780 1781 "extra",
1781 1782 "file",
1782 1783 "filelog",
1783 1784 "first",
1784 1785 "follow",
1785 1786 "_followfirst",
1786 1787 "head",
1787 1788 "heads",
1788 1789 "hidden",
1789 1790 "id",
1790 1791 "keyword",
1791 1792 "last",
1792 1793 "limit",
1793 1794 "_matchfiles",
1794 1795 "max",
1795 1796 "merge",
1796 1797 "min",
1797 1798 "_missingancestors",
1798 1799 "modifies",
1799 1800 "obsolete",
1800 1801 "origin",
1801 1802 "outgoing",
1802 1803 "p1",
1803 1804 "p2",
1804 1805 "parents",
1805 1806 "present",
1806 1807 "public",
1807 1808 "remote",
1808 1809 "removes",
1809 1810 "rev",
1810 1811 "reverse",
1811 1812 "roots",
1812 1813 "sort",
1813 1814 "secret",
1814 1815 "matching",
1815 1816 "tag",
1816 1817 "tagged",
1817 1818 "user",
1818 1819 "unstable",
1819 1820 "_list",
1820 1821 "_intlist",
1821 1822 "_hexlist",
1822 1823 ])
1823 1824
1824 1825 methods = {
1825 1826 "range": rangeset,
1826 1827 "dagrange": dagrange,
1827 1828 "string": stringset,
1828 1829 "symbol": symbolset,
1829 1830 "and": andset,
1830 1831 "or": orset,
1831 1832 "not": notset,
1832 1833 "list": listset,
1833 1834 "func": func,
1834 1835 "ancestor": ancestorspec,
1835 1836 "parent": parentspec,
1836 1837 "parentpost": p1,
1837 1838 }
1838 1839
1839 1840 def optimize(x, small):
1840 1841 if x is None:
1841 1842 return 0, x
1842 1843
1843 1844 smallbonus = 1
1844 1845 if small:
1845 1846 smallbonus = .5
1846 1847
1847 1848 op = x[0]
1848 1849 if op == 'minus':
1849 1850 return optimize(('and', x[1], ('not', x[2])), small)
1850 1851 elif op == 'dagrangepre':
1851 1852 return optimize(('func', ('symbol', 'ancestors'), x[1]), small)
1852 1853 elif op == 'dagrangepost':
1853 1854 return optimize(('func', ('symbol', 'descendants'), x[1]), small)
1854 1855 elif op == 'rangepre':
1855 1856 return optimize(('range', ('string', '0'), x[1]), small)
1856 1857 elif op == 'rangepost':
1857 1858 return optimize(('range', x[1], ('string', 'tip')), small)
1858 1859 elif op == 'negate':
1859 1860 return optimize(('string',
1860 1861 '-' + getstring(x[1], _("can't negate that"))), small)
1861 1862 elif op in 'string symbol negate':
1862 1863 return smallbonus, x # single revisions are small
1863 1864 elif op == 'and':
1864 1865 wa, ta = optimize(x[1], True)
1865 1866 wb, tb = optimize(x[2], True)
1866 1867
1867 1868 # (::x and not ::y)/(not ::y and ::x) have a fast path
1868 1869 def ismissingancestors(revs, bases):
1869 1870 return (
1870 1871 revs[0] == 'func'
1871 1872 and getstring(revs[1], _('not a symbol')) == 'ancestors'
1872 1873 and bases[0] == 'not'
1873 1874 and bases[1][0] == 'func'
1874 1875 and getstring(bases[1][1], _('not a symbol')) == 'ancestors')
1875 1876
1876 1877 w = min(wa, wb)
1877 1878 if ismissingancestors(ta, tb):
1878 1879 return w, ('func', ('symbol', '_missingancestors'),
1879 1880 ('list', ta[2], tb[1][2]))
1880 1881 if ismissingancestors(tb, ta):
1881 1882 return w, ('func', ('symbol', '_missingancestors'),
1882 1883 ('list', tb[2], ta[1][2]))
1883 1884
1884 1885 if wa > wb:
1885 1886 return w, (op, tb, ta)
1886 1887 return w, (op, ta, tb)
1887 1888 elif op == 'or':
1888 1889 wa, ta = optimize(x[1], False)
1889 1890 wb, tb = optimize(x[2], False)
1890 1891 if wb < wa:
1891 1892 wb, wa = wa, wb
1892 1893 return max(wa, wb), (op, ta, tb)
1893 1894 elif op == 'not':
1894 1895 o = optimize(x[1], not small)
1895 1896 return o[0], (op, o[1])
1896 1897 elif op == 'parentpost':
1897 1898 o = optimize(x[1], small)
1898 1899 return o[0], (op, o[1])
1899 1900 elif op == 'group':
1900 1901 return optimize(x[1], small)
1901 1902 elif op in 'dagrange range list parent ancestorspec':
1902 1903 if op == 'parent':
1903 1904 # x^:y means (x^) : y, not x ^ (:y)
1904 1905 post = ('parentpost', x[1])
1905 1906 if x[2][0] == 'dagrangepre':
1906 1907 return optimize(('dagrange', post, x[2][1]), small)
1907 1908 elif x[2][0] == 'rangepre':
1908 1909 return optimize(('range', post, x[2][1]), small)
1909 1910
1910 1911 wa, ta = optimize(x[1], small)
1911 1912 wb, tb = optimize(x[2], small)
1912 1913 return wa + wb, (op, ta, tb)
1913 1914 elif op == 'func':
1914 1915 f = getstring(x[1], _("not a symbol"))
1915 1916 wa, ta = optimize(x[2], small)
1916 1917 if f in ("author branch closed date desc file grep keyword "
1917 1918 "outgoing user"):
1918 1919 w = 10 # slow
1919 1920 elif f in "modifies adds removes":
1920 1921 w = 30 # slower
1921 1922 elif f == "contains":
1922 1923 w = 100 # very slow
1923 1924 elif f == "ancestor":
1924 1925 w = 1 * smallbonus
1925 1926 elif f in "reverse limit first":
1926 1927 w = 0
1927 1928 elif f in "sort":
1928 1929 w = 10 # assume most sorts look at changelog
1929 1930 else:
1930 1931 w = 1
1931 1932 return w + wa, (op, x[1], ta)
1932 1933 return 1, x
1933 1934
1934 1935 _aliasarg = ('func', ('symbol', '_aliasarg'))
1935 1936 def _getaliasarg(tree):
1936 1937 """If tree matches ('func', ('symbol', '_aliasarg'), ('string', X))
1937 1938 return X, None otherwise.
1938 1939 """
1939 1940 if (len(tree) == 3 and tree[:2] == _aliasarg
1940 1941 and tree[2][0] == 'string'):
1941 1942 return tree[2][1]
1942 1943 return None
1943 1944
1944 1945 def _checkaliasarg(tree, known=None):
1945 1946 """Check tree contains no _aliasarg construct or only ones which
1946 1947 value is in known. Used to avoid alias placeholders injection.
1947 1948 """
1948 1949 if isinstance(tree, tuple):
1949 1950 arg = _getaliasarg(tree)
1950 1951 if arg is not None and (not known or arg not in known):
1951 1952 raise error.ParseError(_("not a function: %s") % '_aliasarg')
1952 1953 for t in tree:
1953 1954 _checkaliasarg(t, known)
1954 1955
1955 1956 class revsetalias(object):
1956 1957 funcre = re.compile('^([^(]+)\(([^)]+)\)$')
1957 1958 args = None
1958 1959
1959 1960 def __init__(self, name, value):
1960 1961 '''Aliases like:
1961 1962
1962 1963 h = heads(default)
1963 1964 b($1) = ancestors($1) - ancestors(default)
1964 1965 '''
1965 1966 m = self.funcre.search(name)
1966 1967 if m:
1967 1968 self.name = m.group(1)
1968 1969 self.tree = ('func', ('symbol', m.group(1)))
1969 1970 self.args = [x.strip() for x in m.group(2).split(',')]
1970 1971 for arg in self.args:
1971 1972 # _aliasarg() is an unknown symbol only used separate
1972 1973 # alias argument placeholders from regular strings.
1973 1974 value = value.replace(arg, '_aliasarg(%r)' % (arg,))
1974 1975 else:
1975 1976 self.name = name
1976 1977 self.tree = ('symbol', name)
1977 1978
1978 1979 self.replacement, pos = parse(value)
1979 1980 if pos != len(value):
1980 1981 raise error.ParseError(_('invalid token'), pos)
1981 1982 # Check for placeholder injection
1982 1983 _checkaliasarg(self.replacement, self.args)
1983 1984
1984 1985 def _getalias(aliases, tree):
1985 1986 """If tree looks like an unexpanded alias, return it. Return None
1986 1987 otherwise.
1987 1988 """
1988 1989 if isinstance(tree, tuple) and tree:
1989 1990 if tree[0] == 'symbol' and len(tree) == 2:
1990 1991 name = tree[1]
1991 1992 alias = aliases.get(name)
1992 1993 if alias and alias.args is None and alias.tree == tree:
1993 1994 return alias
1994 1995 if tree[0] == 'func' and len(tree) > 1:
1995 1996 if tree[1][0] == 'symbol' and len(tree[1]) == 2:
1996 1997 name = tree[1][1]
1997 1998 alias = aliases.get(name)
1998 1999 if alias and alias.args is not None and alias.tree == tree[:2]:
1999 2000 return alias
2000 2001 return None
2001 2002
2002 2003 def _expandargs(tree, args):
2003 2004 """Replace _aliasarg instances with the substitution value of the
2004 2005 same name in args, recursively.
2005 2006 """
2006 2007 if not tree or not isinstance(tree, tuple):
2007 2008 return tree
2008 2009 arg = _getaliasarg(tree)
2009 2010 if arg is not None:
2010 2011 return args[arg]
2011 2012 return tuple(_expandargs(t, args) for t in tree)
2012 2013
2013 2014 def _expandaliases(aliases, tree, expanding, cache):
2014 2015 """Expand aliases in tree, recursively.
2015 2016
2016 2017 'aliases' is a dictionary mapping user defined aliases to
2017 2018 revsetalias objects.
2018 2019 """
2019 2020 if not isinstance(tree, tuple):
2020 2021 # Do not expand raw strings
2021 2022 return tree
2022 2023 alias = _getalias(aliases, tree)
2023 2024 if alias is not None:
2024 2025 if alias in expanding:
2025 2026 raise error.ParseError(_('infinite expansion of revset alias "%s" '
2026 2027 'detected') % alias.name)
2027 2028 expanding.append(alias)
2028 2029 if alias.name not in cache:
2029 2030 cache[alias.name] = _expandaliases(aliases, alias.replacement,
2030 2031 expanding, cache)
2031 2032 result = cache[alias.name]
2032 2033 expanding.pop()
2033 2034 if alias.args is not None:
2034 2035 l = getlist(tree[2])
2035 2036 if len(l) != len(alias.args):
2036 2037 raise error.ParseError(
2037 2038 _('invalid number of arguments: %s') % len(l))
2038 2039 l = [_expandaliases(aliases, a, [], cache) for a in l]
2039 2040 result = _expandargs(result, dict(zip(alias.args, l)))
2040 2041 else:
2041 2042 result = tuple(_expandaliases(aliases, t, expanding, cache)
2042 2043 for t in tree)
2043 2044 return result
2044 2045
2045 2046 def findaliases(ui, tree):
2046 2047 _checkaliasarg(tree)
2047 2048 aliases = {}
2048 2049 for k, v in ui.configitems('revsetalias'):
2049 2050 alias = revsetalias(k, v)
2050 2051 aliases[alias.name] = alias
2051 2052 return _expandaliases(aliases, tree, [], {})
2052 2053
2053 2054 def parse(spec, lookup=None):
2054 2055 p = parser.parser(tokenize, elements)
2055 2056 return p.parse(spec, lookup=lookup)
2056 2057
2057 2058 def match(ui, spec, repo=None):
2058 2059 if not spec:
2059 2060 raise error.ParseError(_("empty query"))
2060 2061 lookup = None
2061 2062 if repo:
2062 2063 lookup = repo.__contains__
2063 2064 tree, pos = parse(spec, lookup)
2064 2065 if (pos != len(spec)):
2065 2066 raise error.ParseError(_("invalid token"), pos)
2066 2067 if ui:
2067 2068 tree = findaliases(ui, tree)
2068 2069 weight, tree = optimize(tree, True)
2069 2070 def mfunc(repo, subset):
2070 2071 if util.safehasattr(subset, 'set'):
2071 2072 return getset(repo, subset, tree)
2072 2073 return getset(repo, baseset(subset), tree)
2073 2074 return mfunc
2074 2075
2075 2076 def formatspec(expr, *args):
2076 2077 '''
2077 2078 This is a convenience function for using revsets internally, and
2078 2079 escapes arguments appropriately. Aliases are intentionally ignored
2079 2080 so that intended expression behavior isn't accidentally subverted.
2080 2081
2081 2082 Supported arguments:
2082 2083
2083 2084 %r = revset expression, parenthesized
2084 2085 %d = int(arg), no quoting
2085 2086 %s = string(arg), escaped and single-quoted
2086 2087 %b = arg.branch(), escaped and single-quoted
2087 2088 %n = hex(arg), single-quoted
2088 2089 %% = a literal '%'
2089 2090
2090 2091 Prefixing the type with 'l' specifies a parenthesized list of that type.
2091 2092
2092 2093 >>> formatspec('%r:: and %lr', '10 or 11', ("this()", "that()"))
2093 2094 '(10 or 11):: and ((this()) or (that()))'
2094 2095 >>> formatspec('%d:: and not %d::', 10, 20)
2095 2096 '10:: and not 20::'
2096 2097 >>> formatspec('%ld or %ld', [], [1])
2097 2098 "_list('') or 1"
2098 2099 >>> formatspec('keyword(%s)', 'foo\\xe9')
2099 2100 "keyword('foo\\\\xe9')"
2100 2101 >>> b = lambda: 'default'
2101 2102 >>> b.branch = b
2102 2103 >>> formatspec('branch(%b)', b)
2103 2104 "branch('default')"
2104 2105 >>> formatspec('root(%ls)', ['a', 'b', 'c', 'd'])
2105 2106 "root(_list('a\\x00b\\x00c\\x00d'))"
2106 2107 '''
2107 2108
2108 2109 def quote(s):
2109 2110 return repr(str(s))
2110 2111
2111 2112 def argtype(c, arg):
2112 2113 if c == 'd':
2113 2114 return str(int(arg))
2114 2115 elif c == 's':
2115 2116 return quote(arg)
2116 2117 elif c == 'r':
2117 2118 parse(arg) # make sure syntax errors are confined
2118 2119 return '(%s)' % arg
2119 2120 elif c == 'n':
2120 2121 return quote(node.hex(arg))
2121 2122 elif c == 'b':
2122 2123 return quote(arg.branch())
2123 2124
2124 2125 def listexp(s, t):
2125 2126 l = len(s)
2126 2127 if l == 0:
2127 2128 return "_list('')"
2128 2129 elif l == 1:
2129 2130 return argtype(t, s[0])
2130 2131 elif t == 'd':
2131 2132 return "_intlist('%s')" % "\0".join(str(int(a)) for a in s)
2132 2133 elif t == 's':
2133 2134 return "_list('%s')" % "\0".join(s)
2134 2135 elif t == 'n':
2135 2136 return "_hexlist('%s')" % "\0".join(node.hex(a) for a in s)
2136 2137 elif t == 'b':
2137 2138 return "_list('%s')" % "\0".join(a.branch() for a in s)
2138 2139
2139 2140 m = l // 2
2140 2141 return '(%s or %s)' % (listexp(s[:m], t), listexp(s[m:], t))
2141 2142
2142 2143 ret = ''
2143 2144 pos = 0
2144 2145 arg = 0
2145 2146 while pos < len(expr):
2146 2147 c = expr[pos]
2147 2148 if c == '%':
2148 2149 pos += 1
2149 2150 d = expr[pos]
2150 2151 if d == '%':
2151 2152 ret += d
2152 2153 elif d in 'dsnbr':
2153 2154 ret += argtype(d, args[arg])
2154 2155 arg += 1
2155 2156 elif d == 'l':
2156 2157 # a list of some type
2157 2158 pos += 1
2158 2159 d = expr[pos]
2159 2160 ret += listexp(list(args[arg]), d)
2160 2161 arg += 1
2161 2162 else:
2162 2163 raise util.Abort('unexpected revspec format character %s' % d)
2163 2164 else:
2164 2165 ret += c
2165 2166 pos += 1
2166 2167
2167 2168 return ret
2168 2169
2169 2170 def prettyformat(tree):
2170 2171 def _prettyformat(tree, level, lines):
2171 2172 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2172 2173 lines.append((level, str(tree)))
2173 2174 else:
2174 2175 lines.append((level, '(%s' % tree[0]))
2175 2176 for s in tree[1:]:
2176 2177 _prettyformat(s, level + 1, lines)
2177 2178 lines[-1:] = [(lines[-1][0], lines[-1][1] + ')')]
2178 2179
2179 2180 lines = []
2180 2181 _prettyformat(tree, 0, lines)
2181 2182 output = '\n'.join((' '*l + s) for l, s in lines)
2182 2183 return output
2183 2184
2184 2185 def depth(tree):
2185 2186 if isinstance(tree, tuple):
2186 2187 return max(map(depth, tree)) + 1
2187 2188 else:
2188 2189 return 0
2189 2190
2190 2191 def funcsused(tree):
2191 2192 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2192 2193 return set()
2193 2194 else:
2194 2195 funcs = set()
2195 2196 for s in tree[1:]:
2196 2197 funcs |= funcsused(s)
2197 2198 if tree[0] == 'func':
2198 2199 funcs.add(tree[1][1])
2199 2200 return funcs
2200 2201
2201 2202 class baseset(list):
2202 2203 """Basic data structure that represents a revset and contains the basic
2203 2204 operation that it should be able to perform.
2204 2205
2205 2206 Every method in this class should be implemented by any smartset class.
2206 2207 """
2207 2208 def __init__(self, data=()):
2208 2209 super(baseset, self).__init__(data)
2209 2210 self._set = None
2210 2211
2211 2212 def ascending(self):
2212 2213 """Sorts the set in ascending order (in place).
2213 2214
2214 2215 This is part of the mandatory API for smartset."""
2215 2216 self.sort()
2216 2217
2217 2218 def descending(self):
2218 2219 """Sorts the set in descending order (in place).
2219 2220
2220 2221 This is part of the mandatory API for smartset."""
2221 2222 self.sort(reverse=True)
2222 2223
2223 2224 def min(self):
2224 2225 return min(self)
2225 2226
2226 2227 def max(self):
2227 2228 return max(self)
2228 2229
2229 2230 def set(self):
2230 2231 """Returns a set or a smartset containing all the elements.
2231 2232
2232 2233 The returned structure should be the fastest option for membership
2233 2234 testing.
2234 2235
2235 2236 This is part of the mandatory API for smartset."""
2236 2237 if not self._set:
2237 2238 self._set = set(self)
2238 2239 return self._set
2239 2240
2240 2241 def __sub__(self, other):
2241 2242 """Returns a new object with the substraction of the two collections.
2242 2243
2243 2244 This is part of the mandatory API for smartset."""
2244 2245 if isinstance(other, baseset):
2245 2246 s = other.set()
2246 2247 else:
2247 2248 s = set(other)
2248 2249 return baseset(self.set() - s)
2249 2250
2250 2251 def __and__(self, other):
2251 2252 """Returns a new object with the intersection of the two collections.
2252 2253
2253 2254 This is part of the mandatory API for smartset."""
2254 2255 if isinstance(other, baseset):
2255 2256 other = other.set()
2256 2257 return baseset([y for y in self if y in other])
2257 2258
2258 2259 def __add__(self, other):
2259 2260 """Returns a new object with the union of the two collections.
2260 2261
2261 2262 This is part of the mandatory API for smartset."""
2262 2263 s = self.set()
2263 2264 l = [r for r in other if r not in s]
2264 2265 return baseset(list(self) + l)
2265 2266
2266 2267 def isascending(self):
2267 2268 """Returns True if the collection is ascending order, False if not.
2268 2269
2269 2270 This is part of the mandatory API for smartset."""
2270 2271 return False
2271 2272
2272 2273 def isdescending(self):
2273 2274 """Returns True if the collection is descending order, False if not.
2274 2275
2275 2276 This is part of the mandatory API for smartset."""
2276 2277 return False
2277 2278
2278 2279 def filter(self, condition):
2279 2280 """Returns this smartset filtered by condition as a new smartset.
2280 2281
2281 2282 `condition` is a callable which takes a revision number and returns a
2282 2283 boolean.
2283 2284
2284 2285 This is part of the mandatory API for smartset."""
2285 2286 return lazyset(self, condition)
2286 2287
2287 2288 class _orderedsetmixin(object):
2288 2289 """Mixin class with utility methods for smartsets
2289 2290
2290 2291 This should be extended by smartsets which have the isascending(),
2291 2292 isdescending() and reverse() methods"""
2292 2293
2293 2294 def _first(self):
2294 2295 """return the first revision in the set"""
2295 2296 for r in self:
2296 2297 return r
2297 2298 raise ValueError('arg is an empty sequence')
2298 2299
2299 2300 def _last(self):
2300 2301 """return the last revision in the set"""
2301 2302 self.reverse()
2302 2303 m = self._first()
2303 2304 self.reverse()
2304 2305 return m
2305 2306
2306 2307 def min(self):
2307 2308 """return the smallest element in the set"""
2308 2309 if self.isascending():
2309 2310 return self._first()
2310 2311 return self._last()
2311 2312
2312 2313 def max(self):
2313 2314 """return the largest element in the set"""
2314 2315 if self.isascending():
2315 2316 return self._last()
2316 2317 return self._first()
2317 2318
2318 2319 class lazyset(object):
2319 2320 """Duck type for baseset class which iterates lazily over the revisions in
2320 2321 the subset and contains a function which tests for membership in the
2321 2322 revset
2322 2323 """
2323 2324 def __init__(self, subset, condition=lambda x: True):
2324 2325 """
2325 2326 condition: a function that decide whether a revision in the subset
2326 2327 belongs to the revset or not.
2327 2328 """
2328 2329 self._subset = subset
2329 2330 self._condition = condition
2330 2331 self._cache = {}
2331 2332
2332 2333 def ascending(self):
2333 2334 self._subset.sort()
2334 2335
2335 2336 def descending(self):
2336 2337 self._subset.sort(reverse=True)
2337 2338
2338 2339 def min(self):
2339 2340 return min(self)
2340 2341
2341 2342 def max(self):
2342 2343 return max(self)
2343 2344
2344 2345 def __contains__(self, x):
2345 2346 c = self._cache
2346 2347 if x not in c:
2347 2348 c[x] = x in self._subset and self._condition(x)
2348 2349 return c[x]
2349 2350
2350 2351 def __iter__(self):
2351 2352 cond = self._condition
2352 2353 for x in self._subset:
2353 2354 if cond(x):
2354 2355 yield x
2355 2356
2356 2357 def __and__(self, x):
2357 2358 return lazyset(self, lambda r: r in x)
2358 2359
2359 2360 def __sub__(self, x):
2360 2361 return lazyset(self, lambda r: r not in x)
2361 2362
2362 2363 def __add__(self, x):
2363 2364 return _addset(self, x)
2364 2365
2365 2366 def __nonzero__(self):
2366 2367 for r in self:
2367 2368 return True
2368 2369 return False
2369 2370
2370 2371 def __len__(self):
2371 2372 # Basic implementation to be changed in future patches.
2372 2373 l = baseset([r for r in self])
2373 2374 return len(l)
2374 2375
2375 2376 def __getitem__(self, x):
2376 2377 # Basic implementation to be changed in future patches.
2377 2378 l = baseset([r for r in self])
2378 2379 return l[x]
2379 2380
2380 2381 def sort(self, reverse=False):
2381 2382 if not util.safehasattr(self._subset, 'sort'):
2382 2383 self._subset = baseset(self._subset)
2383 2384 self._subset.sort(reverse=reverse)
2384 2385
2385 2386 def reverse(self):
2386 2387 self._subset.reverse()
2387 2388
2388 2389 def set(self):
2389 2390 return set([r for r in self])
2390 2391
2391 2392 def isascending(self):
2392 2393 return False
2393 2394
2394 2395 def isdescending(self):
2395 2396 return False
2396 2397
2397 2398 def filter(self, l):
2398 2399 return lazyset(self, l)
2399 2400
2400 2401 class orderedlazyset(_orderedsetmixin, lazyset):
2401 2402 """Subclass of lazyset which subset can be ordered either ascending or
2402 2403 descendingly
2403 2404 """
2404 2405 def __init__(self, subset, condition, ascending=True):
2405 2406 super(orderedlazyset, self).__init__(subset, condition)
2406 2407 self._ascending = ascending
2407 2408
2408 2409 def filter(self, l):
2409 2410 return orderedlazyset(self, l, ascending=self._ascending)
2410 2411
2411 2412 def ascending(self):
2412 2413 if not self._ascending:
2413 2414 self.reverse()
2414 2415
2415 2416 def descending(self):
2416 2417 if self._ascending:
2417 2418 self.reverse()
2418 2419
2419 2420 def __and__(self, x):
2420 2421 return orderedlazyset(self, lambda r: r in x,
2421 2422 ascending=self._ascending)
2422 2423
2423 2424 def __sub__(self, x):
2424 2425 return orderedlazyset(self, lambda r: r not in x,
2425 2426 ascending=self._ascending)
2426 2427
2427 2428 def __add__(self, x):
2428 2429 kwargs = {}
2429 2430 if self.isascending() and x.isascending():
2430 2431 kwargs['ascending'] = True
2431 2432 if self.isdescending() and x.isdescending():
2432 2433 kwargs['ascending'] = False
2433 2434 return _addset(self, x, **kwargs)
2434 2435
2435 2436 def sort(self, reverse=False):
2436 2437 if reverse:
2437 2438 if self._ascending:
2438 2439 self._subset.sort(reverse=reverse)
2439 2440 else:
2440 2441 if not self._ascending:
2441 2442 self._subset.sort(reverse=reverse)
2442 2443 self._ascending = not reverse
2443 2444
2444 2445 def isascending(self):
2445 2446 return self._ascending
2446 2447
2447 2448 def isdescending(self):
2448 2449 return not self._ascending
2449 2450
2450 2451 def reverse(self):
2451 2452 self._subset.reverse()
2452 2453 self._ascending = not self._ascending
2453 2454
2454 2455 class _addset(_orderedsetmixin):
2455 2456 """Represent the addition of two sets
2456 2457
2457 2458 Wrapper structure for lazily adding two structures without losing much
2458 2459 performance on the __contains__ method
2459 2460
2460 2461 If the ascending attribute is set, that means the two structures are
2461 2462 ordered in either an ascending or descending way. Therefore, we can add
2462 2463 them maintaining the order by iterating over both at the same time
2463 2464
2464 2465 This class does not duck-type baseset and it's only supposed to be used
2465 2466 internally
2466 2467 """
2467 2468 def __init__(self, revs1, revs2, ascending=None):
2468 2469 self._r1 = revs1
2469 2470 self._r2 = revs2
2470 2471 self._iter = None
2471 2472 self._ascending = ascending
2472 2473 self._genlist = None
2473 2474
2474 2475 def __len__(self):
2475 2476 return len(self._list)
2476 2477
2477 2478 @util.propertycache
2478 2479 def _list(self):
2479 2480 if not self._genlist:
2480 2481 self._genlist = baseset(self._iterator())
2481 2482 return self._genlist
2482 2483
2483 2484 def filter(self, condition):
2484 2485 if self._ascending is not None:
2485 2486 return orderedlazyset(self, condition, ascending=self._ascending)
2486 2487 return lazyset(self, condition)
2487 2488
2488 2489 def ascending(self):
2489 2490 if self._ascending is None:
2490 2491 self.sort()
2491 2492 self._ascending = True
2492 2493 else:
2493 2494 if not self._ascending:
2494 2495 self.reverse()
2495 2496
2496 2497 def descending(self):
2497 2498 if self._ascending is None:
2498 2499 self.sort(reverse=True)
2499 2500 self._ascending = False
2500 2501 else:
2501 2502 if self._ascending:
2502 2503 self.reverse()
2503 2504
2504 2505 def __and__(self, other):
2505 2506 filterfunc = other.__contains__
2506 2507 if self._ascending is not None:
2507 2508 return orderedlazyset(self, filterfunc, ascending=self._ascending)
2508 2509 return lazyset(self, filterfunc)
2509 2510
2510 2511 def __sub__(self, other):
2511 2512 filterfunc = lambda r: r not in other
2512 2513 if self._ascending is not None:
2513 2514 return orderedlazyset(self, filterfunc, ascending=self._ascending)
2514 2515 return lazyset(self, filterfunc)
2515 2516
2516 2517 def __add__(self, other):
2517 2518 """When both collections are ascending or descending, preserve the order
2518 2519 """
2519 2520 kwargs = {}
2520 2521 if self._ascending is not None:
2521 2522 if self.isascending() and other.isascending():
2522 2523 kwargs['ascending'] = True
2523 2524 if self.isdescending() and other.isdescending():
2524 2525 kwargs['ascending'] = False
2525 2526 return _addset(self, other, **kwargs)
2526 2527
2527 2528 def _iterator(self):
2528 2529 """Iterate over both collections without repeating elements
2529 2530
2530 2531 If the ascending attribute is not set, iterate over the first one and
2531 2532 then over the second one checking for membership on the first one so we
2532 2533 dont yield any duplicates.
2533 2534
2534 2535 If the ascending attribute is set, iterate over both collections at the
2535 2536 same time, yielding only one value at a time in the given order.
2536 2537 """
2537 2538 if not self._iter:
2538 2539 def gen():
2539 2540 if self._ascending is None:
2540 2541 for r in self._r1:
2541 2542 yield r
2542 2543 s = self._r1.set()
2543 2544 for r in self._r2:
2544 2545 if r not in s:
2545 2546 yield r
2546 2547 else:
2547 2548 iter1 = iter(self._r1)
2548 2549 iter2 = iter(self._r2)
2549 2550
2550 2551 val1 = None
2551 2552 val2 = None
2552 2553
2553 2554 choice = max
2554 2555 if self._ascending:
2555 2556 choice = min
2556 2557 try:
2557 2558 # Consume both iterators in an ordered way until one is
2558 2559 # empty
2559 2560 while True:
2560 2561 if val1 is None:
2561 2562 val1 = iter1.next()
2562 2563 if val2 is None:
2563 2564 val2 = iter2.next()
2564 2565 next = choice(val1, val2)
2565 2566 yield next
2566 2567 if val1 == next:
2567 2568 val1 = None
2568 2569 if val2 == next:
2569 2570 val2 = None
2570 2571 except StopIteration:
2571 2572 # Flush any remaining values and consume the other one
2572 2573 it = iter2
2573 2574 if val1 is not None:
2574 2575 yield val1
2575 2576 it = iter1
2576 2577 elif val2 is not None:
2577 2578 # might have been equality and both are empty
2578 2579 yield val2
2579 2580 for val in it:
2580 2581 yield val
2581 2582
2582 2583 self._iter = _generatorset(gen())
2583 2584
2584 2585 return self._iter
2585 2586
2586 2587 def __iter__(self):
2587 2588 if self._genlist:
2588 2589 return iter(self._genlist)
2589 2590 return iter(self._iterator())
2590 2591
2591 2592 def __contains__(self, x):
2592 2593 return x in self._r1 or x in self._r2
2593 2594
2594 2595 def set(self):
2595 2596 return self
2596 2597
2597 2598 def sort(self, reverse=False):
2598 2599 """Sort the added set
2599 2600
2600 2601 For this we use the cached list with all the generated values and if we
2601 2602 know they are ascending or descending we can sort them in a smart way.
2602 2603 """
2603 2604 if self._ascending is None:
2604 2605 self._list.sort(reverse=reverse)
2605 2606 self._ascending = not reverse
2606 2607 else:
2607 2608 if bool(self._ascending) == bool(reverse):
2608 2609 self.reverse()
2609 2610
2610 2611 def isascending(self):
2611 2612 return self._ascending is not None and self._ascending
2612 2613
2613 2614 def isdescending(self):
2614 2615 return self._ascending is not None and not self._ascending
2615 2616
2616 2617 def reverse(self):
2617 2618 self._list.reverse()
2618 2619 if self._ascending is not None:
2619 2620 self._ascending = not self._ascending
2620 2621
2621 2622 class _generatorset(object):
2622 2623 """Wrap a generator for lazy iteration
2623 2624
2624 2625 Wrapper structure for generators that provides lazy membership and can
2625 2626 be iterated more than once.
2626 2627 When asked for membership it generates values until either it finds the
2627 2628 requested one or has gone through all the elements in the generator
2628 2629
2629 2630 This class does not duck-type baseset and it's only supposed to be used
2630 2631 internally
2631 2632 """
2632 2633 def __init__(self, gen):
2633 2634 """
2634 2635 gen: a generator producing the values for the generatorset.
2635 2636 """
2636 2637 self._gen = gen
2637 2638 self._cache = {}
2638 2639 self._genlist = baseset([])
2639 2640 self._finished = False
2640 2641
2641 2642 def __contains__(self, x):
2642 2643 if x in self._cache:
2643 2644 return self._cache[x]
2644 2645
2645 2646 # Use new values only, as existing values would be cached.
2646 2647 for l in self._consumegen():
2647 2648 if l == x:
2648 2649 return True
2649 2650
2650 2651 self._cache[x] = False
2651 2652 return False
2652 2653
2653 2654 def __iter__(self):
2654 2655 if self._finished:
2655 2656 for x in self._genlist:
2656 2657 yield x
2657 2658 return
2658 2659
2659 2660 i = 0
2660 2661 genlist = self._genlist
2661 2662 consume = self._consumegen()
2662 2663 while True:
2663 2664 if i < len(genlist):
2664 2665 yield genlist[i]
2665 2666 else:
2666 2667 yield consume.next()
2667 2668 i += 1
2668 2669
2669 2670 def _consumegen(self):
2670 2671 for item in self._gen:
2671 2672 self._cache[item] = True
2672 2673 self._genlist.append(item)
2673 2674 yield item
2674 2675 self._finished = True
2675 2676
2676 2677 def set(self):
2677 2678 return self
2678 2679
2679 2680 def sort(self, reverse=False):
2680 2681 if not self._finished:
2681 2682 for i in self:
2682 2683 continue
2683 2684 self._genlist.sort(reverse=reverse)
2684 2685
2685 2686 class _ascgeneratorset(_generatorset):
2686 2687 """Wrap a generator of ascending elements for lazy iteration
2687 2688
2688 2689 Same structure as _generatorset but stops iterating after it goes past
2689 2690 the value when asked for membership and the element is not contained
2690 2691
2691 2692 This class does not duck-type baseset and it's only supposed to be used
2692 2693 internally
2693 2694 """
2694 2695 def __contains__(self, x):
2695 2696 if x in self._cache:
2696 2697 return self._cache[x]
2697 2698
2698 2699 # Use new values only, as existing values would be cached.
2699 2700 for l in self._consumegen():
2700 2701 if l == x:
2701 2702 return True
2702 2703 if l > x:
2703 2704 break
2704 2705
2705 2706 self._cache[x] = False
2706 2707 return False
2707 2708
2708 2709 class _descgeneratorset(_generatorset):
2709 2710 """Wrap a generator of descending elements for lazy iteration
2710 2711
2711 2712 Same structure as _generatorset but stops iterating after it goes past
2712 2713 the value when asked for membership and the element is not contained
2713 2714
2714 2715 This class does not duck-type baseset and it's only supposed to be used
2715 2716 internally
2716 2717 """
2717 2718 def __contains__(self, x):
2718 2719 if x in self._cache:
2719 2720 return self._cache[x]
2720 2721
2721 2722 # Use new values only, as existing values would be cached.
2722 2723 for l in self._consumegen():
2723 2724 if l == x:
2724 2725 return True
2725 2726 if l < x:
2726 2727 break
2727 2728
2728 2729 self._cache[x] = False
2729 2730 return False
2730 2731
2731 2732 class spanset(_orderedsetmixin):
2732 2733 """Duck type for baseset class which represents a range of revisions and
2733 2734 can work lazily and without having all the range in memory
2734 2735
2735 2736 Note that spanset(x, y) behave almost like xrange(x, y) except for two
2736 2737 notable points:
2737 2738 - when x < y it will be automatically descending,
2738 2739 - revision filtered with this repoview will be skipped.
2739 2740
2740 2741 """
2741 2742 def __init__(self, repo, start=0, end=None):
2742 2743 """
2743 2744 start: first revision included the set
2744 2745 (default to 0)
2745 2746 end: first revision excluded (last+1)
2746 2747 (default to len(repo)
2747 2748
2748 2749 Spanset will be descending if `end` < `start`.
2749 2750 """
2750 2751 self._start = start
2751 2752 if end is not None:
2752 2753 self._end = end
2753 2754 else:
2754 2755 self._end = len(repo)
2755 2756 self._hiddenrevs = repo.changelog.filteredrevs
2756 2757
2757 2758 def ascending(self):
2758 2759 if self._start > self._end:
2759 2760 self.reverse()
2760 2761
2761 2762 def descending(self):
2762 2763 if self._start < self._end:
2763 2764 self.reverse()
2764 2765
2765 2766 def _contained(self, rev):
2766 2767 return (rev <= self._start and rev > self._end) or (rev >= self._start
2767 2768 and rev < self._end)
2768 2769
2769 2770 def __iter__(self):
2770 2771 if self._start <= self._end:
2771 2772 iterrange = xrange(self._start, self._end)
2772 2773 else:
2773 2774 iterrange = xrange(self._start, self._end, -1)
2774 2775
2775 2776 if self._hiddenrevs:
2776 2777 s = self._hiddenrevs
2777 2778 for r in iterrange:
2778 2779 if r not in s:
2779 2780 yield r
2780 2781 else:
2781 2782 for r in iterrange:
2782 2783 yield r
2783 2784
2784 2785 def __contains__(self, x):
2785 2786 return self._contained(x) and not (self._hiddenrevs and rev in
2786 2787 self._hiddenrevs)
2787 2788
2788 2789 def __nonzero__(self):
2789 2790 for r in self:
2790 2791 return True
2791 2792 return False
2792 2793
2793 2794 def __and__(self, x):
2794 2795 if isinstance(x, baseset):
2795 2796 x = x.set()
2796 2797 if self._start <= self._end:
2797 2798 return orderedlazyset(self, lambda r: r in x)
2798 2799 else:
2799 2800 return orderedlazyset(self, lambda r: r in x, ascending=False)
2800 2801
2801 2802 def __sub__(self, x):
2802 2803 if isinstance(x, baseset):
2803 2804 x = x.set()
2804 2805 if self._start <= self._end:
2805 2806 return orderedlazyset(self, lambda r: r not in x)
2806 2807 else:
2807 2808 return orderedlazyset(self, lambda r: r not in x, ascending=False)
2808 2809
2809 2810 def __add__(self, x):
2810 2811 kwargs = {}
2811 2812 if self.isascending() and x.isascending():
2812 2813 kwargs['ascending'] = True
2813 2814 if self.isdescending() and x.isdescending():
2814 2815 kwargs['ascending'] = False
2815 2816 return _addset(self, x, **kwargs)
2816 2817
2817 2818 def __len__(self):
2818 2819 if not self._hiddenrevs:
2819 2820 return abs(self._end - self._start)
2820 2821 else:
2821 2822 count = 0
2822 2823 for rev in self._hiddenrevs:
2823 2824 if self._contained(rev):
2824 2825 count += 1
2825 2826 return abs(self._end - self._start) - count
2826 2827
2827 2828 def __getitem__(self, x):
2828 2829 # Basic implementation to be changed in future patches.
2829 2830 l = baseset([r for r in self])
2830 2831 return l[x]
2831 2832
2832 2833 def sort(self, reverse=False):
2833 2834 if bool(reverse) != (self._start > self._end):
2834 2835 self.reverse()
2835 2836
2836 2837 def reverse(self):
2837 2838 # Just switch the _start and _end parameters
2838 2839 if self._start <= self._end:
2839 2840 self._start, self._end = self._end - 1, self._start - 1
2840 2841 else:
2841 2842 self._start, self._end = self._end + 1, self._start + 1
2842 2843
2843 2844 def set(self):
2844 2845 return self
2845 2846
2846 2847 def isascending(self):
2847 2848 return self._start < self._end
2848 2849
2849 2850 def isdescending(self):
2850 2851 return self._start > self._end
2851 2852
2852 2853 def filter(self, l):
2853 2854 if self._start <= self._end:
2854 2855 return orderedlazyset(self, l)
2855 2856 else:
2856 2857 return orderedlazyset(self, l, ascending=False)
2857 2858
2858 2859 # tell hggettext to extract docstrings from these functions:
2859 2860 i18nfunctions = symbols.values()
General Comments 0
You need to be logged in to leave comments. Login now