##// END OF EJS Templates
revset: added set method to addset to duck type generatorset...
Lucas Moscovicz -
r20711:b95490cf default
parent child Browse files
Show More
@@ -1,2540 +1,2543 b''
1 1 # revset.py - revision set queries for mercurial
2 2 #
3 3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 import re
9 9 import parser, util, error, discovery, hbisect, phases
10 10 import node
11 11 import heapq
12 12 import match as matchmod
13 13 import ancestor as ancestormod
14 14 from i18n import _
15 15 import encoding
16 16 import obsolete as obsmod
17 17 import pathutil
18 18 import repoview
19 19
20 20 def _revancestors(repo, revs, followfirst):
21 21 """Like revlog.ancestors(), but supports followfirst."""
22 22 cut = followfirst and 1 or None
23 23 cl = repo.changelog
24 24
25 25 def iterate():
26 26 revqueue, revsnode = None, None
27 27 h = []
28 28
29 29 revs.descending()
30 30 revqueue = util.deque(revs)
31 31 if revqueue:
32 32 revsnode = revqueue.popleft()
33 33 heapq.heappush(h, -revsnode)
34 34
35 35 seen = set([node.nullrev])
36 36 while h:
37 37 current = -heapq.heappop(h)
38 38 if current not in seen:
39 39 if revsnode and current == revsnode:
40 40 if revqueue:
41 41 revsnode = revqueue.popleft()
42 42 heapq.heappush(h, -revsnode)
43 43 seen.add(current)
44 44 yield current
45 45 for parent in cl.parentrevs(current)[:cut]:
46 46 if parent != node.nullrev:
47 47 heapq.heappush(h, -parent)
48 48
49 49 return _descgeneratorset(iterate())
50 50
51 51 def _revdescendants(repo, revs, followfirst):
52 52 """Like revlog.descendants() but supports followfirst."""
53 53 cut = followfirst and 1 or None
54 54
55 55 def iterate():
56 56 cl = repo.changelog
57 57 first = min(revs)
58 58 nullrev = node.nullrev
59 59 if first == nullrev:
60 60 # Are there nodes with a null first parent and a non-null
61 61 # second one? Maybe. Do we care? Probably not.
62 62 for i in cl:
63 63 yield i
64 64 else:
65 65 seen = set(revs)
66 66 for i in cl.revs(first + 1):
67 67 for x in cl.parentrevs(i)[:cut]:
68 68 if x != nullrev and x in seen:
69 69 seen.add(i)
70 70 yield i
71 71 break
72 72
73 73 return _ascgeneratorset(iterate())
74 74
75 75 def _revsbetween(repo, roots, heads):
76 76 """Return all paths between roots and heads, inclusive of both endpoint
77 77 sets."""
78 78 if not roots:
79 79 return baseset([])
80 80 parentrevs = repo.changelog.parentrevs
81 81 visit = baseset(heads)
82 82 reachable = set()
83 83 seen = {}
84 84 minroot = min(roots)
85 85 roots = set(roots)
86 86 # open-code the post-order traversal due to the tiny size of
87 87 # sys.getrecursionlimit()
88 88 while visit:
89 89 rev = visit.pop()
90 90 if rev in roots:
91 91 reachable.add(rev)
92 92 parents = parentrevs(rev)
93 93 seen[rev] = parents
94 94 for parent in parents:
95 95 if parent >= minroot and parent not in seen:
96 96 visit.append(parent)
97 97 if not reachable:
98 98 return baseset([])
99 99 for rev in sorted(seen):
100 100 for parent in seen[rev]:
101 101 if parent in reachable:
102 102 reachable.add(rev)
103 103 return baseset(sorted(reachable))
104 104
105 105 elements = {
106 106 "(": (20, ("group", 1, ")"), ("func", 1, ")")),
107 107 "~": (18, None, ("ancestor", 18)),
108 108 "^": (18, None, ("parent", 18), ("parentpost", 18)),
109 109 "-": (5, ("negate", 19), ("minus", 5)),
110 110 "::": (17, ("dagrangepre", 17), ("dagrange", 17),
111 111 ("dagrangepost", 17)),
112 112 "..": (17, ("dagrangepre", 17), ("dagrange", 17),
113 113 ("dagrangepost", 17)),
114 114 ":": (15, ("rangepre", 15), ("range", 15), ("rangepost", 15)),
115 115 "not": (10, ("not", 10)),
116 116 "!": (10, ("not", 10)),
117 117 "and": (5, None, ("and", 5)),
118 118 "&": (5, None, ("and", 5)),
119 119 "or": (4, None, ("or", 4)),
120 120 "|": (4, None, ("or", 4)),
121 121 "+": (4, None, ("or", 4)),
122 122 ",": (2, None, ("list", 2)),
123 123 ")": (0, None, None),
124 124 "symbol": (0, ("symbol",), None),
125 125 "string": (0, ("string",), None),
126 126 "end": (0, None, None),
127 127 }
128 128
129 129 keywords = set(['and', 'or', 'not'])
130 130
131 131 def tokenize(program):
132 132 '''
133 133 Parse a revset statement into a stream of tokens
134 134
135 135 Check that @ is a valid unquoted token character (issue3686):
136 136 >>> list(tokenize("@::"))
137 137 [('symbol', '@', 0), ('::', None, 1), ('end', None, 3)]
138 138
139 139 '''
140 140
141 141 pos, l = 0, len(program)
142 142 while pos < l:
143 143 c = program[pos]
144 144 if c.isspace(): # skip inter-token whitespace
145 145 pass
146 146 elif c == ':' and program[pos:pos + 2] == '::': # look ahead carefully
147 147 yield ('::', None, pos)
148 148 pos += 1 # skip ahead
149 149 elif c == '.' and program[pos:pos + 2] == '..': # look ahead carefully
150 150 yield ('..', None, pos)
151 151 pos += 1 # skip ahead
152 152 elif c in "():,-|&+!~^": # handle simple operators
153 153 yield (c, None, pos)
154 154 elif (c in '"\'' or c == 'r' and
155 155 program[pos:pos + 2] in ("r'", 'r"')): # handle quoted strings
156 156 if c == 'r':
157 157 pos += 1
158 158 c = program[pos]
159 159 decode = lambda x: x
160 160 else:
161 161 decode = lambda x: x.decode('string-escape')
162 162 pos += 1
163 163 s = pos
164 164 while pos < l: # find closing quote
165 165 d = program[pos]
166 166 if d == '\\': # skip over escaped characters
167 167 pos += 2
168 168 continue
169 169 if d == c:
170 170 yield ('string', decode(program[s:pos]), s)
171 171 break
172 172 pos += 1
173 173 else:
174 174 raise error.ParseError(_("unterminated string"), s)
175 175 # gather up a symbol/keyword
176 176 elif c.isalnum() or c in '._@' or ord(c) > 127:
177 177 s = pos
178 178 pos += 1
179 179 while pos < l: # find end of symbol
180 180 d = program[pos]
181 181 if not (d.isalnum() or d in "._/@" or ord(d) > 127):
182 182 break
183 183 if d == '.' and program[pos - 1] == '.': # special case for ..
184 184 pos -= 1
185 185 break
186 186 pos += 1
187 187 sym = program[s:pos]
188 188 if sym in keywords: # operator keywords
189 189 yield (sym, None, s)
190 190 else:
191 191 yield ('symbol', sym, s)
192 192 pos -= 1
193 193 else:
194 194 raise error.ParseError(_("syntax error"), pos)
195 195 pos += 1
196 196 yield ('end', None, pos)
197 197
198 198 # helpers
199 199
200 200 def getstring(x, err):
201 201 if x and (x[0] == 'string' or x[0] == 'symbol'):
202 202 return x[1]
203 203 raise error.ParseError(err)
204 204
205 205 def getlist(x):
206 206 if not x:
207 207 return []
208 208 if x[0] == 'list':
209 209 return getlist(x[1]) + [x[2]]
210 210 return [x]
211 211
212 212 def getargs(x, min, max, err):
213 213 l = getlist(x)
214 214 if len(l) < min or (max >= 0 and len(l) > max):
215 215 raise error.ParseError(err)
216 216 return l
217 217
218 218 def getset(repo, subset, x):
219 219 if not x:
220 220 raise error.ParseError(_("missing argument"))
221 221 s = methods[x[0]](repo, subset, *x[1:])
222 222 if util.safehasattr(s, 'set'):
223 223 return s
224 224 return baseset(s)
225 225
226 226 def _getrevsource(repo, r):
227 227 extra = repo[r].extra()
228 228 for label in ('source', 'transplant_source', 'rebase_source'):
229 229 if label in extra:
230 230 try:
231 231 return repo[extra[label]].rev()
232 232 except error.RepoLookupError:
233 233 pass
234 234 return None
235 235
236 236 # operator methods
237 237
238 238 def stringset(repo, subset, x):
239 239 x = repo[x].rev()
240 240 if x == -1 and len(subset) == len(repo):
241 241 return baseset([-1])
242 242 if len(subset) == len(repo) or x in subset:
243 243 return baseset([x])
244 244 return baseset([])
245 245
246 246 def symbolset(repo, subset, x):
247 247 if x in symbols:
248 248 raise error.ParseError(_("can't use %s here") % x)
249 249 return stringset(repo, subset, x)
250 250
251 251 def rangeset(repo, subset, x, y):
252 252 cl = baseset(repo.changelog)
253 253 m = getset(repo, cl, x)
254 254 n = getset(repo, cl, y)
255 255
256 256 if not m or not n:
257 257 return baseset([])
258 258 m, n = m[0], n[-1]
259 259
260 260 if m < n:
261 261 r = spanset(repo, m, n + 1)
262 262 else:
263 263 r = spanset(repo, m, n - 1)
264 264 return r & subset
265 265
266 266 def dagrange(repo, subset, x, y):
267 267 r = spanset(repo)
268 268 xs = _revsbetween(repo, getset(repo, r, x), getset(repo, r, y))
269 269 s = subset.set()
270 270 return xs.filter(lambda r: r in s)
271 271
272 272 def andset(repo, subset, x, y):
273 273 return getset(repo, getset(repo, subset, x), y)
274 274
275 275 def orset(repo, subset, x, y):
276 276 xl = getset(repo, subset, x)
277 277 yl = getset(repo, subset - xl, y)
278 278 return xl + yl
279 279
280 280 def notset(repo, subset, x):
281 281 return subset - getset(repo, subset, x)
282 282
283 283 def listset(repo, subset, a, b):
284 284 raise error.ParseError(_("can't use a list in this context"))
285 285
286 286 def func(repo, subset, a, b):
287 287 if a[0] == 'symbol' and a[1] in symbols:
288 288 return symbols[a[1]](repo, subset, b)
289 289 raise error.ParseError(_("not a function: %s") % a[1])
290 290
291 291 # functions
292 292
293 293 def adds(repo, subset, x):
294 294 """``adds(pattern)``
295 295 Changesets that add a file matching pattern.
296 296
297 297 The pattern without explicit kind like ``glob:`` is expected to be
298 298 relative to the current directory and match against a file or a
299 299 directory.
300 300 """
301 301 # i18n: "adds" is a keyword
302 302 pat = getstring(x, _("adds requires a pattern"))
303 303 return checkstatus(repo, subset, pat, 1)
304 304
305 305 def ancestor(repo, subset, x):
306 306 """``ancestor(*changeset)``
307 307 Greatest common ancestor of the changesets.
308 308
309 309 Accepts 0 or more changesets.
310 310 Will return empty list when passed no args.
311 311 Greatest common ancestor of a single changeset is that changeset.
312 312 """
313 313 # i18n: "ancestor" is a keyword
314 314 l = getlist(x)
315 315 rl = spanset(repo)
316 316 anc = None
317 317
318 318 # (getset(repo, rl, i) for i in l) generates a list of lists
319 319 rev = repo.changelog.rev
320 320 ancestor = repo.changelog.ancestor
321 321 node = repo.changelog.node
322 322 for revs in (getset(repo, rl, i) for i in l):
323 323 for r in revs:
324 324 if anc is None:
325 325 anc = r
326 326 else:
327 327 anc = rev(ancestor(node(anc), node(r)))
328 328
329 329 if anc is not None and anc in subset:
330 330 return baseset([anc])
331 331 return baseset([])
332 332
333 333 def _ancestors(repo, subset, x, followfirst=False):
334 334 args = getset(repo, spanset(repo), x)
335 335 if not args:
336 336 return baseset([])
337 337 s = _revancestors(repo, args, followfirst)
338 338 return subset.filter(lambda r: r in s)
339 339
340 340 def ancestors(repo, subset, x):
341 341 """``ancestors(set)``
342 342 Changesets that are ancestors of a changeset in set.
343 343 """
344 344 return _ancestors(repo, subset, x)
345 345
346 346 def _firstancestors(repo, subset, x):
347 347 # ``_firstancestors(set)``
348 348 # Like ``ancestors(set)`` but follows only the first parents.
349 349 return _ancestors(repo, subset, x, followfirst=True)
350 350
351 351 def ancestorspec(repo, subset, x, n):
352 352 """``set~n``
353 353 Changesets that are the Nth ancestor (first parents only) of a changeset
354 354 in set.
355 355 """
356 356 try:
357 357 n = int(n[1])
358 358 except (TypeError, ValueError):
359 359 raise error.ParseError(_("~ expects a number"))
360 360 ps = set()
361 361 cl = repo.changelog
362 362 for r in getset(repo, baseset(cl), x):
363 363 for i in range(n):
364 364 r = cl.parentrevs(r)[0]
365 365 ps.add(r)
366 366 return subset.filter(lambda r: r in ps)
367 367
368 368 def author(repo, subset, x):
369 369 """``author(string)``
370 370 Alias for ``user(string)``.
371 371 """
372 372 # i18n: "author" is a keyword
373 373 n = encoding.lower(getstring(x, _("author requires a string")))
374 374 kind, pattern, matcher = _substringmatcher(n)
375 375 return subset.filter(lambda x: matcher(encoding.lower(repo[x].user())))
376 376
377 377 def only(repo, subset, x):
378 378 """``only(set, [set])``
379 379 Changesets that are ancestors of the first set that are not ancestors
380 380 of any other head in the repo. If a second set is specified, the result
381 381 is ancestors of the first set that are not ancestors of the second set
382 382 (i.e. ::<set1> - ::<set2>).
383 383 """
384 384 cl = repo.changelog
385 385 args = getargs(x, 1, 2, _('only takes one or two arguments'))
386 386 include = getset(repo, spanset(repo), args[0]).set()
387 387 if len(args) == 1:
388 388 descendants = set(_revdescendants(repo, include, False))
389 389 exclude = [rev for rev in cl.headrevs()
390 390 if not rev in descendants and not rev in include]
391 391 else:
392 392 exclude = getset(repo, spanset(repo), args[1])
393 393
394 394 results = set(ancestormod.missingancestors(include, exclude, cl.parentrevs))
395 395 return lazyset(subset, lambda x: x in results)
396 396
397 397 def bisect(repo, subset, x):
398 398 """``bisect(string)``
399 399 Changesets marked in the specified bisect status:
400 400
401 401 - ``good``, ``bad``, ``skip``: csets explicitly marked as good/bad/skip
402 402 - ``goods``, ``bads`` : csets topologically good/bad
403 403 - ``range`` : csets taking part in the bisection
404 404 - ``pruned`` : csets that are goods, bads or skipped
405 405 - ``untested`` : csets whose fate is yet unknown
406 406 - ``ignored`` : csets ignored due to DAG topology
407 407 - ``current`` : the cset currently being bisected
408 408 """
409 409 # i18n: "bisect" is a keyword
410 410 status = getstring(x, _("bisect requires a string")).lower()
411 411 state = set(hbisect.get(repo, status))
412 412 return subset.filter(lambda r: r in state)
413 413
414 414 # Backward-compatibility
415 415 # - no help entry so that we do not advertise it any more
416 416 def bisected(repo, subset, x):
417 417 return bisect(repo, subset, x)
418 418
419 419 def bookmark(repo, subset, x):
420 420 """``bookmark([name])``
421 421 The named bookmark or all bookmarks.
422 422
423 423 If `name` starts with `re:`, the remainder of the name is treated as
424 424 a regular expression. To match a bookmark that actually starts with `re:`,
425 425 use the prefix `literal:`.
426 426 """
427 427 # i18n: "bookmark" is a keyword
428 428 args = getargs(x, 0, 1, _('bookmark takes one or no arguments'))
429 429 if args:
430 430 bm = getstring(args[0],
431 431 # i18n: "bookmark" is a keyword
432 432 _('the argument to bookmark must be a string'))
433 433 kind, pattern, matcher = _stringmatcher(bm)
434 434 if kind == 'literal':
435 435 bmrev = repo._bookmarks.get(bm, None)
436 436 if not bmrev:
437 437 raise util.Abort(_("bookmark '%s' does not exist") % bm)
438 438 bmrev = repo[bmrev].rev()
439 439 return subset.filter(lambda r: r == bmrev)
440 440 else:
441 441 matchrevs = set()
442 442 for name, bmrev in repo._bookmarks.iteritems():
443 443 if matcher(name):
444 444 matchrevs.add(bmrev)
445 445 if not matchrevs:
446 446 raise util.Abort(_("no bookmarks exist that match '%s'")
447 447 % pattern)
448 448 bmrevs = set()
449 449 for bmrev in matchrevs:
450 450 bmrevs.add(repo[bmrev].rev())
451 451 return subset & bmrevs
452 452
453 453 bms = set([repo[r].rev()
454 454 for r in repo._bookmarks.values()])
455 455 return subset.filter(lambda r: r in bms)
456 456
457 457 def branch(repo, subset, x):
458 458 """``branch(string or set)``
459 459 All changesets belonging to the given branch or the branches of the given
460 460 changesets.
461 461
462 462 If `string` starts with `re:`, the remainder of the name is treated as
463 463 a regular expression. To match a branch that actually starts with `re:`,
464 464 use the prefix `literal:`.
465 465 """
466 466 try:
467 467 b = getstring(x, '')
468 468 except error.ParseError:
469 469 # not a string, but another revspec, e.g. tip()
470 470 pass
471 471 else:
472 472 kind, pattern, matcher = _stringmatcher(b)
473 473 if kind == 'literal':
474 474 # note: falls through to the revspec case if no branch with
475 475 # this name exists
476 476 if pattern in repo.branchmap():
477 477 return subset.filter(lambda r: matcher(repo[r].branch()))
478 478 else:
479 479 return subset.filter(lambda r: matcher(repo[r].branch()))
480 480
481 481 s = getset(repo, spanset(repo), x)
482 482 b = set()
483 483 for r in s:
484 484 b.add(repo[r].branch())
485 485 s = s.set()
486 486 return subset.filter(lambda r: r in s or repo[r].branch() in b)
487 487
488 488 def bumped(repo, subset, x):
489 489 """``bumped()``
490 490 Mutable changesets marked as successors of public changesets.
491 491
492 492 Only non-public and non-obsolete changesets can be `bumped`.
493 493 """
494 494 # i18n: "bumped" is a keyword
495 495 getargs(x, 0, 0, _("bumped takes no arguments"))
496 496 bumped = obsmod.getrevs(repo, 'bumped')
497 497 return subset & bumped
498 498
499 499 def bundle(repo, subset, x):
500 500 """``bundle()``
501 501 Changesets in the bundle.
502 502
503 503 Bundle must be specified by the -R option."""
504 504
505 505 try:
506 506 bundlerevs = repo.changelog.bundlerevs
507 507 except AttributeError:
508 508 raise util.Abort(_("no bundle provided - specify with -R"))
509 509 return subset & bundlerevs
510 510
511 511 def checkstatus(repo, subset, pat, field):
512 512 hasset = matchmod.patkind(pat) == 'set'
513 513
514 514 def matches(x):
515 515 m = None
516 516 fname = None
517 517 c = repo[x]
518 518 if not m or hasset:
519 519 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
520 520 if not m.anypats() and len(m.files()) == 1:
521 521 fname = m.files()[0]
522 522 if fname is not None:
523 523 if fname not in c.files():
524 524 return False
525 525 else:
526 526 for f in c.files():
527 527 if m(f):
528 528 break
529 529 else:
530 530 return False
531 531 files = repo.status(c.p1().node(), c.node())[field]
532 532 if fname is not None:
533 533 if fname in files:
534 534 return True
535 535 else:
536 536 for f in files:
537 537 if m(f):
538 538 return True
539 539
540 540 return subset.filter(matches)
541 541
542 542 def _children(repo, narrow, parentset):
543 543 cs = set()
544 544 if not parentset:
545 545 return baseset(cs)
546 546 pr = repo.changelog.parentrevs
547 547 minrev = min(parentset)
548 548 for r in narrow:
549 549 if r <= minrev:
550 550 continue
551 551 for p in pr(r):
552 552 if p in parentset:
553 553 cs.add(r)
554 554 return baseset(cs)
555 555
556 556 def children(repo, subset, x):
557 557 """``children(set)``
558 558 Child changesets of changesets in set.
559 559 """
560 560 s = getset(repo, baseset(repo), x).set()
561 561 cs = _children(repo, subset, s)
562 562 return subset & cs
563 563
564 564 def closed(repo, subset, x):
565 565 """``closed()``
566 566 Changeset is closed.
567 567 """
568 568 # i18n: "closed" is a keyword
569 569 getargs(x, 0, 0, _("closed takes no arguments"))
570 570 return subset.filter(lambda r: repo[r].closesbranch())
571 571
572 572 def contains(repo, subset, x):
573 573 """``contains(pattern)``
574 574 Revision contains a file matching pattern. See :hg:`help patterns`
575 575 for information about file patterns.
576 576
577 577 The pattern without explicit kind like ``glob:`` is expected to be
578 578 relative to the current directory and match against a file exactly
579 579 for efficiency.
580 580 """
581 581 # i18n: "contains" is a keyword
582 582 pat = getstring(x, _("contains requires a pattern"))
583 583
584 584 def matches(x):
585 585 if not matchmod.patkind(pat):
586 586 pats = pathutil.canonpath(repo.root, repo.getcwd(), pat)
587 587 if pats in repo[x]:
588 588 return True
589 589 else:
590 590 c = repo[x]
591 591 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
592 592 for f in c.manifest():
593 593 if m(f):
594 594 return True
595 595 return False
596 596
597 597 return subset.filter(matches)
598 598
599 599 def converted(repo, subset, x):
600 600 """``converted([id])``
601 601 Changesets converted from the given identifier in the old repository if
602 602 present, or all converted changesets if no identifier is specified.
603 603 """
604 604
605 605 # There is exactly no chance of resolving the revision, so do a simple
606 606 # string compare and hope for the best
607 607
608 608 rev = None
609 609 # i18n: "converted" is a keyword
610 610 l = getargs(x, 0, 1, _('converted takes one or no arguments'))
611 611 if l:
612 612 # i18n: "converted" is a keyword
613 613 rev = getstring(l[0], _('converted requires a revision'))
614 614
615 615 def _matchvalue(r):
616 616 source = repo[r].extra().get('convert_revision', None)
617 617 return source is not None and (rev is None or source.startswith(rev))
618 618
619 619 return subset.filter(lambda r: _matchvalue(r))
620 620
621 621 def date(repo, subset, x):
622 622 """``date(interval)``
623 623 Changesets within the interval, see :hg:`help dates`.
624 624 """
625 625 # i18n: "date" is a keyword
626 626 ds = getstring(x, _("date requires a string"))
627 627 dm = util.matchdate(ds)
628 628 return subset.filter(lambda x: dm(repo[x].date()[0]))
629 629
630 630 def desc(repo, subset, x):
631 631 """``desc(string)``
632 632 Search commit message for string. The match is case-insensitive.
633 633 """
634 634 # i18n: "desc" is a keyword
635 635 ds = encoding.lower(getstring(x, _("desc requires a string")))
636 636
637 637 def matches(x):
638 638 c = repo[x]
639 639 return ds in encoding.lower(c.description())
640 640
641 641 return subset.filter(matches)
642 642
643 643 def _descendants(repo, subset, x, followfirst=False):
644 644 args = getset(repo, spanset(repo), x)
645 645 if not args:
646 646 return baseset([])
647 647 s = _revdescendants(repo, args, followfirst)
648 648 a = set(args)
649 649 return subset.filter(lambda r: r in s or r in a)
650 650
651 651 def descendants(repo, subset, x):
652 652 """``descendants(set)``
653 653 Changesets which are descendants of changesets in set.
654 654 """
655 655 return _descendants(repo, subset, x)
656 656
657 657 def _firstdescendants(repo, subset, x):
658 658 # ``_firstdescendants(set)``
659 659 # Like ``descendants(set)`` but follows only the first parents.
660 660 return _descendants(repo, subset, x, followfirst=True)
661 661
662 662 def destination(repo, subset, x):
663 663 """``destination([set])``
664 664 Changesets that were created by a graft, transplant or rebase operation,
665 665 with the given revisions specified as the source. Omitting the optional set
666 666 is the same as passing all().
667 667 """
668 668 if x is not None:
669 669 args = getset(repo, spanset(repo), x).set()
670 670 else:
671 671 args = getall(repo, spanset(repo), x).set()
672 672
673 673 dests = set()
674 674
675 675 # subset contains all of the possible destinations that can be returned, so
676 676 # iterate over them and see if their source(s) were provided in the args.
677 677 # Even if the immediate src of r is not in the args, src's source (or
678 678 # further back) may be. Scanning back further than the immediate src allows
679 679 # transitive transplants and rebases to yield the same results as transitive
680 680 # grafts.
681 681 for r in subset:
682 682 src = _getrevsource(repo, r)
683 683 lineage = None
684 684
685 685 while src is not None:
686 686 if lineage is None:
687 687 lineage = list()
688 688
689 689 lineage.append(r)
690 690
691 691 # The visited lineage is a match if the current source is in the arg
692 692 # set. Since every candidate dest is visited by way of iterating
693 693 # subset, any dests further back in the lineage will be tested by a
694 694 # different iteration over subset. Likewise, if the src was already
695 695 # selected, the current lineage can be selected without going back
696 696 # further.
697 697 if src in args or src in dests:
698 698 dests.update(lineage)
699 699 break
700 700
701 701 r = src
702 702 src = _getrevsource(repo, r)
703 703
704 704 return subset.filter(lambda r: r in dests)
705 705
706 706 def divergent(repo, subset, x):
707 707 """``divergent()``
708 708 Final successors of changesets with an alternative set of final successors.
709 709 """
710 710 # i18n: "divergent" is a keyword
711 711 getargs(x, 0, 0, _("divergent takes no arguments"))
712 712 divergent = obsmod.getrevs(repo, 'divergent')
713 713 return subset.filter(lambda r: r in divergent)
714 714
715 715 def draft(repo, subset, x):
716 716 """``draft()``
717 717 Changeset in draft phase."""
718 718 # i18n: "draft" is a keyword
719 719 getargs(x, 0, 0, _("draft takes no arguments"))
720 720 pc = repo._phasecache
721 721 return subset.filter(lambda r: pc.phase(repo, r) == phases.draft)
722 722
723 723 def extinct(repo, subset, x):
724 724 """``extinct()``
725 725 Obsolete changesets with obsolete descendants only.
726 726 """
727 727 # i18n: "extinct" is a keyword
728 728 getargs(x, 0, 0, _("extinct takes no arguments"))
729 729 extincts = obsmod.getrevs(repo, 'extinct')
730 730 return subset & extincts
731 731
732 732 def extra(repo, subset, x):
733 733 """``extra(label, [value])``
734 734 Changesets with the given label in the extra metadata, with the given
735 735 optional value.
736 736
737 737 If `value` starts with `re:`, the remainder of the value is treated as
738 738 a regular expression. To match a value that actually starts with `re:`,
739 739 use the prefix `literal:`.
740 740 """
741 741
742 742 # i18n: "extra" is a keyword
743 743 l = getargs(x, 1, 2, _('extra takes at least 1 and at most 2 arguments'))
744 744 # i18n: "extra" is a keyword
745 745 label = getstring(l[0], _('first argument to extra must be a string'))
746 746 value = None
747 747
748 748 if len(l) > 1:
749 749 # i18n: "extra" is a keyword
750 750 value = getstring(l[1], _('second argument to extra must be a string'))
751 751 kind, value, matcher = _stringmatcher(value)
752 752
753 753 def _matchvalue(r):
754 754 extra = repo[r].extra()
755 755 return label in extra and (value is None or matcher(extra[label]))
756 756
757 757 return subset.filter(lambda r: _matchvalue(r))
758 758
759 759 def filelog(repo, subset, x):
760 760 """``filelog(pattern)``
761 761 Changesets connected to the specified filelog.
762 762
763 763 For performance reasons, ``filelog()`` does not show every changeset
764 764 that affects the requested file(s). See :hg:`help log` for details. For
765 765 a slower, more accurate result, use ``file()``.
766 766
767 767 The pattern without explicit kind like ``glob:`` is expected to be
768 768 relative to the current directory and match against a file exactly
769 769 for efficiency.
770 770 """
771 771
772 772 # i18n: "filelog" is a keyword
773 773 pat = getstring(x, _("filelog requires a pattern"))
774 774 s = set()
775 775
776 776 if not matchmod.patkind(pat):
777 777 f = pathutil.canonpath(repo.root, repo.getcwd(), pat)
778 778 fl = repo.file(f)
779 779 for fr in fl:
780 780 s.add(fl.linkrev(fr))
781 781 else:
782 782 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=repo[None])
783 783 for f in repo[None]:
784 784 if m(f):
785 785 fl = repo.file(f)
786 786 for fr in fl:
787 787 s.add(fl.linkrev(fr))
788 788
789 789 return subset.filter(lambda r: r in s)
790 790
791 791 def first(repo, subset, x):
792 792 """``first(set, [n])``
793 793 An alias for limit().
794 794 """
795 795 return limit(repo, subset, x)
796 796
797 797 def _follow(repo, subset, x, name, followfirst=False):
798 798 l = getargs(x, 0, 1, _("%s takes no arguments or a filename") % name)
799 799 c = repo['.']
800 800 if l:
801 801 x = getstring(l[0], _("%s expected a filename") % name)
802 802 if x in c:
803 803 cx = c[x]
804 804 s = set(ctx.rev() for ctx in cx.ancestors(followfirst=followfirst))
805 805 # include the revision responsible for the most recent version
806 806 s.add(cx.linkrev())
807 807 else:
808 808 return baseset([])
809 809 else:
810 810 s = _revancestors(repo, baseset([c.rev()]), followfirst)
811 811
812 812 return subset.filter(lambda r: r in s)
813 813
814 814 def follow(repo, subset, x):
815 815 """``follow([file])``
816 816 An alias for ``::.`` (ancestors of the working copy's first parent).
817 817 If a filename is specified, the history of the given file is followed,
818 818 including copies.
819 819 """
820 820 return _follow(repo, subset, x, 'follow')
821 821
822 822 def _followfirst(repo, subset, x):
823 823 # ``followfirst([file])``
824 824 # Like ``follow([file])`` but follows only the first parent of
825 825 # every revision or file revision.
826 826 return _follow(repo, subset, x, '_followfirst', followfirst=True)
827 827
828 828 def getall(repo, subset, x):
829 829 """``all()``
830 830 All changesets, the same as ``0:tip``.
831 831 """
832 832 # i18n: "all" is a keyword
833 833 getargs(x, 0, 0, _("all takes no arguments"))
834 834 return subset
835 835
836 836 def grep(repo, subset, x):
837 837 """``grep(regex)``
838 838 Like ``keyword(string)`` but accepts a regex. Use ``grep(r'...')``
839 839 to ensure special escape characters are handled correctly. Unlike
840 840 ``keyword(string)``, the match is case-sensitive.
841 841 """
842 842 try:
843 843 # i18n: "grep" is a keyword
844 844 gr = re.compile(getstring(x, _("grep requires a string")))
845 845 except re.error, e:
846 846 raise error.ParseError(_('invalid match pattern: %s') % e)
847 847
848 848 def matches(x):
849 849 c = repo[x]
850 850 for e in c.files() + [c.user(), c.description()]:
851 851 if gr.search(e):
852 852 return True
853 853 return False
854 854
855 855 return subset.filter(matches)
856 856
857 857 def _matchfiles(repo, subset, x):
858 858 # _matchfiles takes a revset list of prefixed arguments:
859 859 #
860 860 # [p:foo, i:bar, x:baz]
861 861 #
862 862 # builds a match object from them and filters subset. Allowed
863 863 # prefixes are 'p:' for regular patterns, 'i:' for include
864 864 # patterns and 'x:' for exclude patterns. Use 'r:' prefix to pass
865 865 # a revision identifier, or the empty string to reference the
866 866 # working directory, from which the match object is
867 867 # initialized. Use 'd:' to set the default matching mode, default
868 868 # to 'glob'. At most one 'r:' and 'd:' argument can be passed.
869 869
870 870 # i18n: "_matchfiles" is a keyword
871 871 l = getargs(x, 1, -1, _("_matchfiles requires at least one argument"))
872 872 pats, inc, exc = [], [], []
873 873 hasset = False
874 874 rev, default = None, None
875 875 for arg in l:
876 876 # i18n: "_matchfiles" is a keyword
877 877 s = getstring(arg, _("_matchfiles requires string arguments"))
878 878 prefix, value = s[:2], s[2:]
879 879 if prefix == 'p:':
880 880 pats.append(value)
881 881 elif prefix == 'i:':
882 882 inc.append(value)
883 883 elif prefix == 'x:':
884 884 exc.append(value)
885 885 elif prefix == 'r:':
886 886 if rev is not None:
887 887 # i18n: "_matchfiles" is a keyword
888 888 raise error.ParseError(_('_matchfiles expected at most one '
889 889 'revision'))
890 890 rev = value
891 891 elif prefix == 'd:':
892 892 if default is not None:
893 893 # i18n: "_matchfiles" is a keyword
894 894 raise error.ParseError(_('_matchfiles expected at most one '
895 895 'default mode'))
896 896 default = value
897 897 else:
898 898 # i18n: "_matchfiles" is a keyword
899 899 raise error.ParseError(_('invalid _matchfiles prefix: %s') % prefix)
900 900 if not hasset and matchmod.patkind(value) == 'set':
901 901 hasset = True
902 902 if not default:
903 903 default = 'glob'
904 904
905 905 def matches(x):
906 906 m = None
907 907 c = repo[x]
908 908 if not m or (hasset and rev is None):
909 909 ctx = c
910 910 if rev is not None:
911 911 ctx = repo[rev or None]
912 912 m = matchmod.match(repo.root, repo.getcwd(), pats, include=inc,
913 913 exclude=exc, ctx=ctx, default=default)
914 914 for f in c.files():
915 915 if m(f):
916 916 return True
917 917 return False
918 918
919 919 return subset.filter(matches)
920 920
921 921 def hasfile(repo, subset, x):
922 922 """``file(pattern)``
923 923 Changesets affecting files matched by pattern.
924 924
925 925 For a faster but less accurate result, consider using ``filelog()``
926 926 instead.
927 927
928 928 This predicate uses ``glob:`` as the default kind of pattern.
929 929 """
930 930 # i18n: "file" is a keyword
931 931 pat = getstring(x, _("file requires a pattern"))
932 932 return _matchfiles(repo, subset, ('string', 'p:' + pat))
933 933
934 934 def head(repo, subset, x):
935 935 """``head()``
936 936 Changeset is a named branch head.
937 937 """
938 938 # i18n: "head" is a keyword
939 939 getargs(x, 0, 0, _("head takes no arguments"))
940 940 hs = set()
941 941 for b, ls in repo.branchmap().iteritems():
942 942 hs.update(repo[h].rev() for h in ls)
943 943 return subset.filter(lambda r: r in hs)
944 944
945 945 def heads(repo, subset, x):
946 946 """``heads(set)``
947 947 Members of set with no children in set.
948 948 """
949 949 s = getset(repo, subset, x)
950 950 ps = parents(repo, subset, x)
951 951 return s - ps
952 952
953 953 def hidden(repo, subset, x):
954 954 """``hidden()``
955 955 Hidden changesets.
956 956 """
957 957 # i18n: "hidden" is a keyword
958 958 getargs(x, 0, 0, _("hidden takes no arguments"))
959 959 hiddenrevs = repoview.filterrevs(repo, 'visible')
960 960 return subset & hiddenrevs
961 961
962 962 def keyword(repo, subset, x):
963 963 """``keyword(string)``
964 964 Search commit message, user name, and names of changed files for
965 965 string. The match is case-insensitive.
966 966 """
967 967 # i18n: "keyword" is a keyword
968 968 kw = encoding.lower(getstring(x, _("keyword requires a string")))
969 969
970 970 def matches(r):
971 971 c = repo[r]
972 972 return util.any(kw in encoding.lower(t) for t in c.files() + [c.user(),
973 973 c.description()])
974 974
975 975 return subset.filter(matches)
976 976
977 977 def limit(repo, subset, x):
978 978 """``limit(set, [n])``
979 979 First n members of set, defaulting to 1.
980 980 """
981 981 # i18n: "limit" is a keyword
982 982 l = getargs(x, 1, 2, _("limit requires one or two arguments"))
983 983 try:
984 984 lim = 1
985 985 if len(l) == 2:
986 986 # i18n: "limit" is a keyword
987 987 lim = int(getstring(l[1], _("limit requires a number")))
988 988 except (TypeError, ValueError):
989 989 # i18n: "limit" is a keyword
990 990 raise error.ParseError(_("limit expects a number"))
991 991 ss = subset.set()
992 992 os = getset(repo, spanset(repo), l[0])
993 993 bs = baseset([])
994 994 it = iter(os)
995 995 for x in xrange(lim):
996 996 try:
997 997 y = it.next()
998 998 if y in ss:
999 999 bs.append(y)
1000 1000 except (StopIteration):
1001 1001 break
1002 1002 return bs
1003 1003
1004 1004 def last(repo, subset, x):
1005 1005 """``last(set, [n])``
1006 1006 Last n members of set, defaulting to 1.
1007 1007 """
1008 1008 # i18n: "last" is a keyword
1009 1009 l = getargs(x, 1, 2, _("last requires one or two arguments"))
1010 1010 try:
1011 1011 lim = 1
1012 1012 if len(l) == 2:
1013 1013 # i18n: "last" is a keyword
1014 1014 lim = int(getstring(l[1], _("last requires a number")))
1015 1015 except (TypeError, ValueError):
1016 1016 # i18n: "last" is a keyword
1017 1017 raise error.ParseError(_("last expects a number"))
1018 1018 ss = subset.set()
1019 1019 os = getset(repo, spanset(repo), l[0])
1020 1020 os.reverse()
1021 1021 bs = baseset([])
1022 1022 it = iter(os)
1023 1023 for x in xrange(lim):
1024 1024 try:
1025 1025 y = it.next()
1026 1026 if y in ss:
1027 1027 bs.append(y)
1028 1028 except (StopIteration):
1029 1029 break
1030 1030 return bs
1031 1031
1032 1032 def maxrev(repo, subset, x):
1033 1033 """``max(set)``
1034 1034 Changeset with highest revision number in set.
1035 1035 """
1036 1036 os = getset(repo, spanset(repo), x)
1037 1037 if os:
1038 1038 m = max(os)
1039 1039 if m in subset:
1040 1040 return baseset([m])
1041 1041 return baseset([])
1042 1042
1043 1043 def merge(repo, subset, x):
1044 1044 """``merge()``
1045 1045 Changeset is a merge changeset.
1046 1046 """
1047 1047 # i18n: "merge" is a keyword
1048 1048 getargs(x, 0, 0, _("merge takes no arguments"))
1049 1049 cl = repo.changelog
1050 1050 return subset.filter(lambda r: cl.parentrevs(r)[1] != -1)
1051 1051
1052 1052 def branchpoint(repo, subset, x):
1053 1053 """``branchpoint()``
1054 1054 Changesets with more than one child.
1055 1055 """
1056 1056 # i18n: "branchpoint" is a keyword
1057 1057 getargs(x, 0, 0, _("branchpoint takes no arguments"))
1058 1058 cl = repo.changelog
1059 1059 if not subset:
1060 1060 return baseset([])
1061 1061 baserev = min(subset)
1062 1062 parentscount = [0]*(len(repo) - baserev)
1063 1063 for r in cl.revs(start=baserev + 1):
1064 1064 for p in cl.parentrevs(r):
1065 1065 if p >= baserev:
1066 1066 parentscount[p - baserev] += 1
1067 1067 return subset.filter(lambda r: parentscount[r - baserev] > 1)
1068 1068
1069 1069 def minrev(repo, subset, x):
1070 1070 """``min(set)``
1071 1071 Changeset with lowest revision number in set.
1072 1072 """
1073 1073 os = getset(repo, spanset(repo), x)
1074 1074 if os:
1075 1075 m = min(os)
1076 1076 if m in subset:
1077 1077 return baseset([m])
1078 1078 return baseset([])
1079 1079
1080 1080 def _missingancestors(repo, subset, x):
1081 1081 # i18n: "_missingancestors" is a keyword
1082 1082 revs, bases = getargs(x, 2, 2,
1083 1083 _("_missingancestors requires two arguments"))
1084 1084 rs = baseset(repo)
1085 1085 revs = getset(repo, rs, revs)
1086 1086 bases = getset(repo, rs, bases)
1087 1087 missing = set(repo.changelog.findmissingrevs(bases, revs))
1088 1088 return baseset([r for r in subset if r in missing])
1089 1089
1090 1090 def modifies(repo, subset, x):
1091 1091 """``modifies(pattern)``
1092 1092 Changesets modifying files matched by pattern.
1093 1093
1094 1094 The pattern without explicit kind like ``glob:`` is expected to be
1095 1095 relative to the current directory and match against a file or a
1096 1096 directory.
1097 1097 """
1098 1098 # i18n: "modifies" is a keyword
1099 1099 pat = getstring(x, _("modifies requires a pattern"))
1100 1100 return checkstatus(repo, subset, pat, 0)
1101 1101
1102 1102 def node_(repo, subset, x):
1103 1103 """``id(string)``
1104 1104 Revision non-ambiguously specified by the given hex string prefix.
1105 1105 """
1106 1106 # i18n: "id" is a keyword
1107 1107 l = getargs(x, 1, 1, _("id requires one argument"))
1108 1108 # i18n: "id" is a keyword
1109 1109 n = getstring(l[0], _("id requires a string"))
1110 1110 if len(n) == 40:
1111 1111 rn = repo[n].rev()
1112 1112 else:
1113 1113 rn = None
1114 1114 pm = repo.changelog._partialmatch(n)
1115 1115 if pm is not None:
1116 1116 rn = repo.changelog.rev(pm)
1117 1117
1118 1118 return subset.filter(lambda r: r == rn)
1119 1119
1120 1120 def obsolete(repo, subset, x):
1121 1121 """``obsolete()``
1122 1122 Mutable changeset with a newer version."""
1123 1123 # i18n: "obsolete" is a keyword
1124 1124 getargs(x, 0, 0, _("obsolete takes no arguments"))
1125 1125 obsoletes = obsmod.getrevs(repo, 'obsolete')
1126 1126 return subset & obsoletes
1127 1127
1128 1128 def origin(repo, subset, x):
1129 1129 """``origin([set])``
1130 1130 Changesets that were specified as a source for the grafts, transplants or
1131 1131 rebases that created the given revisions. Omitting the optional set is the
1132 1132 same as passing all(). If a changeset created by these operations is itself
1133 1133 specified as a source for one of these operations, only the source changeset
1134 1134 for the first operation is selected.
1135 1135 """
1136 1136 if x is not None:
1137 1137 args = getset(repo, spanset(repo), x).set()
1138 1138 else:
1139 1139 args = getall(repo, spanset(repo), x).set()
1140 1140
1141 1141 def _firstsrc(rev):
1142 1142 src = _getrevsource(repo, rev)
1143 1143 if src is None:
1144 1144 return None
1145 1145
1146 1146 while True:
1147 1147 prev = _getrevsource(repo, src)
1148 1148
1149 1149 if prev is None:
1150 1150 return src
1151 1151 src = prev
1152 1152
1153 1153 o = set([_firstsrc(r) for r in args])
1154 1154 return subset.filter(lambda r: r in o)
1155 1155
1156 1156 def outgoing(repo, subset, x):
1157 1157 """``outgoing([path])``
1158 1158 Changesets not found in the specified destination repository, or the
1159 1159 default push location.
1160 1160 """
1161 1161 import hg # avoid start-up nasties
1162 1162 # i18n: "outgoing" is a keyword
1163 1163 l = getargs(x, 0, 1, _("outgoing takes one or no arguments"))
1164 1164 # i18n: "outgoing" is a keyword
1165 1165 dest = l and getstring(l[0], _("outgoing requires a repository path")) or ''
1166 1166 dest = repo.ui.expandpath(dest or 'default-push', dest or 'default')
1167 1167 dest, branches = hg.parseurl(dest)
1168 1168 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1169 1169 if revs:
1170 1170 revs = [repo.lookup(rev) for rev in revs]
1171 1171 other = hg.peer(repo, {}, dest)
1172 1172 repo.ui.pushbuffer()
1173 1173 outgoing = discovery.findcommonoutgoing(repo, other, onlyheads=revs)
1174 1174 repo.ui.popbuffer()
1175 1175 cl = repo.changelog
1176 1176 o = set([cl.rev(r) for r in outgoing.missing])
1177 1177 return subset.filter(lambda r: r in o)
1178 1178
1179 1179 def p1(repo, subset, x):
1180 1180 """``p1([set])``
1181 1181 First parent of changesets in set, or the working directory.
1182 1182 """
1183 1183 if x is None:
1184 1184 p = repo[x].p1().rev()
1185 1185 return subset.filter(lambda r: r == p)
1186 1186
1187 1187 ps = set()
1188 1188 cl = repo.changelog
1189 1189 for r in getset(repo, spanset(repo), x):
1190 1190 ps.add(cl.parentrevs(r)[0])
1191 1191 return subset & ps
1192 1192
1193 1193 def p2(repo, subset, x):
1194 1194 """``p2([set])``
1195 1195 Second parent of changesets in set, or the working directory.
1196 1196 """
1197 1197 if x is None:
1198 1198 ps = repo[x].parents()
1199 1199 try:
1200 1200 p = ps[1].rev()
1201 1201 return subset.filter(lambda r: r == p)
1202 1202 except IndexError:
1203 1203 return baseset([])
1204 1204
1205 1205 ps = set()
1206 1206 cl = repo.changelog
1207 1207 for r in getset(repo, spanset(repo), x):
1208 1208 ps.add(cl.parentrevs(r)[1])
1209 1209 return subset & ps
1210 1210
1211 1211 def parents(repo, subset, x):
1212 1212 """``parents([set])``
1213 1213 The set of all parents for all changesets in set, or the working directory.
1214 1214 """
1215 1215 if x is None:
1216 1216 ps = tuple(p.rev() for p in repo[x].parents())
1217 1217 return subset & ps
1218 1218
1219 1219 ps = set()
1220 1220 cl = repo.changelog
1221 1221 for r in getset(repo, spanset(repo), x):
1222 1222 ps.update(cl.parentrevs(r))
1223 1223 return subset & ps
1224 1224
1225 1225 def parentspec(repo, subset, x, n):
1226 1226 """``set^0``
1227 1227 The set.
1228 1228 ``set^1`` (or ``set^``), ``set^2``
1229 1229 First or second parent, respectively, of all changesets in set.
1230 1230 """
1231 1231 try:
1232 1232 n = int(n[1])
1233 1233 if n not in (0, 1, 2):
1234 1234 raise ValueError
1235 1235 except (TypeError, ValueError):
1236 1236 raise error.ParseError(_("^ expects a number 0, 1, or 2"))
1237 1237 ps = set()
1238 1238 cl = repo.changelog
1239 1239 for r in getset(repo, baseset(cl), x):
1240 1240 if n == 0:
1241 1241 ps.add(r)
1242 1242 elif n == 1:
1243 1243 ps.add(cl.parentrevs(r)[0])
1244 1244 elif n == 2:
1245 1245 parents = cl.parentrevs(r)
1246 1246 if len(parents) > 1:
1247 1247 ps.add(parents[1])
1248 1248 return subset & ps
1249 1249
1250 1250 def present(repo, subset, x):
1251 1251 """``present(set)``
1252 1252 An empty set, if any revision in set isn't found; otherwise,
1253 1253 all revisions in set.
1254 1254
1255 1255 If any of specified revisions is not present in the local repository,
1256 1256 the query is normally aborted. But this predicate allows the query
1257 1257 to continue even in such cases.
1258 1258 """
1259 1259 try:
1260 1260 return getset(repo, subset, x)
1261 1261 except error.RepoLookupError:
1262 1262 return baseset([])
1263 1263
1264 1264 def public(repo, subset, x):
1265 1265 """``public()``
1266 1266 Changeset in public phase."""
1267 1267 # i18n: "public" is a keyword
1268 1268 getargs(x, 0, 0, _("public takes no arguments"))
1269 1269 pc = repo._phasecache
1270 1270 return subset.filter(lambda r: pc.phase(repo, r) == phases.public)
1271 1271
1272 1272 def remote(repo, subset, x):
1273 1273 """``remote([id [,path]])``
1274 1274 Local revision that corresponds to the given identifier in a
1275 1275 remote repository, if present. Here, the '.' identifier is a
1276 1276 synonym for the current local branch.
1277 1277 """
1278 1278
1279 1279 import hg # avoid start-up nasties
1280 1280 # i18n: "remote" is a keyword
1281 1281 l = getargs(x, 0, 2, _("remote takes one, two or no arguments"))
1282 1282
1283 1283 q = '.'
1284 1284 if len(l) > 0:
1285 1285 # i18n: "remote" is a keyword
1286 1286 q = getstring(l[0], _("remote requires a string id"))
1287 1287 if q == '.':
1288 1288 q = repo['.'].branch()
1289 1289
1290 1290 dest = ''
1291 1291 if len(l) > 1:
1292 1292 # i18n: "remote" is a keyword
1293 1293 dest = getstring(l[1], _("remote requires a repository path"))
1294 1294 dest = repo.ui.expandpath(dest or 'default')
1295 1295 dest, branches = hg.parseurl(dest)
1296 1296 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1297 1297 if revs:
1298 1298 revs = [repo.lookup(rev) for rev in revs]
1299 1299 other = hg.peer(repo, {}, dest)
1300 1300 n = other.lookup(q)
1301 1301 if n in repo:
1302 1302 r = repo[n].rev()
1303 1303 if r in subset:
1304 1304 return baseset([r])
1305 1305 return baseset([])
1306 1306
1307 1307 def removes(repo, subset, x):
1308 1308 """``removes(pattern)``
1309 1309 Changesets which remove files matching pattern.
1310 1310
1311 1311 The pattern without explicit kind like ``glob:`` is expected to be
1312 1312 relative to the current directory and match against a file or a
1313 1313 directory.
1314 1314 """
1315 1315 # i18n: "removes" is a keyword
1316 1316 pat = getstring(x, _("removes requires a pattern"))
1317 1317 return checkstatus(repo, subset, pat, 2)
1318 1318
1319 1319 def rev(repo, subset, x):
1320 1320 """``rev(number)``
1321 1321 Revision with the given numeric identifier.
1322 1322 """
1323 1323 # i18n: "rev" is a keyword
1324 1324 l = getargs(x, 1, 1, _("rev requires one argument"))
1325 1325 try:
1326 1326 # i18n: "rev" is a keyword
1327 1327 l = int(getstring(l[0], _("rev requires a number")))
1328 1328 except (TypeError, ValueError):
1329 1329 # i18n: "rev" is a keyword
1330 1330 raise error.ParseError(_("rev expects a number"))
1331 1331 return subset.filter(lambda r: r == l)
1332 1332
1333 1333 def matching(repo, subset, x):
1334 1334 """``matching(revision [, field])``
1335 1335 Changesets in which a given set of fields match the set of fields in the
1336 1336 selected revision or set.
1337 1337
1338 1338 To match more than one field pass the list of fields to match separated
1339 1339 by spaces (e.g. ``author description``).
1340 1340
1341 1341 Valid fields are most regular revision fields and some special fields.
1342 1342
1343 1343 Regular revision fields are ``description``, ``author``, ``branch``,
1344 1344 ``date``, ``files``, ``phase``, ``parents``, ``substate``, ``user``
1345 1345 and ``diff``.
1346 1346 Note that ``author`` and ``user`` are synonyms. ``diff`` refers to the
1347 1347 contents of the revision. Two revisions matching their ``diff`` will
1348 1348 also match their ``files``.
1349 1349
1350 1350 Special fields are ``summary`` and ``metadata``:
1351 1351 ``summary`` matches the first line of the description.
1352 1352 ``metadata`` is equivalent to matching ``description user date``
1353 1353 (i.e. it matches the main metadata fields).
1354 1354
1355 1355 ``metadata`` is the default field which is used when no fields are
1356 1356 specified. You can match more than one field at a time.
1357 1357 """
1358 1358 # i18n: "matching" is a keyword
1359 1359 l = getargs(x, 1, 2, _("matching takes 1 or 2 arguments"))
1360 1360
1361 1361 revs = getset(repo, baseset(repo.changelog), l[0])
1362 1362
1363 1363 fieldlist = ['metadata']
1364 1364 if len(l) > 1:
1365 1365 fieldlist = getstring(l[1],
1366 1366 # i18n: "matching" is a keyword
1367 1367 _("matching requires a string "
1368 1368 "as its second argument")).split()
1369 1369
1370 1370 # Make sure that there are no repeated fields,
1371 1371 # expand the 'special' 'metadata' field type
1372 1372 # and check the 'files' whenever we check the 'diff'
1373 1373 fields = []
1374 1374 for field in fieldlist:
1375 1375 if field == 'metadata':
1376 1376 fields += ['user', 'description', 'date']
1377 1377 elif field == 'diff':
1378 1378 # a revision matching the diff must also match the files
1379 1379 # since matching the diff is very costly, make sure to
1380 1380 # also match the files first
1381 1381 fields += ['files', 'diff']
1382 1382 else:
1383 1383 if field == 'author':
1384 1384 field = 'user'
1385 1385 fields.append(field)
1386 1386 fields = set(fields)
1387 1387 if 'summary' in fields and 'description' in fields:
1388 1388 # If a revision matches its description it also matches its summary
1389 1389 fields.discard('summary')
1390 1390
1391 1391 # We may want to match more than one field
1392 1392 # Not all fields take the same amount of time to be matched
1393 1393 # Sort the selected fields in order of increasing matching cost
1394 1394 fieldorder = ['phase', 'parents', 'user', 'date', 'branch', 'summary',
1395 1395 'files', 'description', 'substate', 'diff']
1396 1396 def fieldkeyfunc(f):
1397 1397 try:
1398 1398 return fieldorder.index(f)
1399 1399 except ValueError:
1400 1400 # assume an unknown field is very costly
1401 1401 return len(fieldorder)
1402 1402 fields = list(fields)
1403 1403 fields.sort(key=fieldkeyfunc)
1404 1404
1405 1405 # Each field will be matched with its own "getfield" function
1406 1406 # which will be added to the getfieldfuncs array of functions
1407 1407 getfieldfuncs = []
1408 1408 _funcs = {
1409 1409 'user': lambda r: repo[r].user(),
1410 1410 'branch': lambda r: repo[r].branch(),
1411 1411 'date': lambda r: repo[r].date(),
1412 1412 'description': lambda r: repo[r].description(),
1413 1413 'files': lambda r: repo[r].files(),
1414 1414 'parents': lambda r: repo[r].parents(),
1415 1415 'phase': lambda r: repo[r].phase(),
1416 1416 'substate': lambda r: repo[r].substate,
1417 1417 'summary': lambda r: repo[r].description().splitlines()[0],
1418 1418 'diff': lambda r: list(repo[r].diff(git=True),)
1419 1419 }
1420 1420 for info in fields:
1421 1421 getfield = _funcs.get(info, None)
1422 1422 if getfield is None:
1423 1423 raise error.ParseError(
1424 1424 # i18n: "matching" is a keyword
1425 1425 _("unexpected field name passed to matching: %s") % info)
1426 1426 getfieldfuncs.append(getfield)
1427 1427 # convert the getfield array of functions into a "getinfo" function
1428 1428 # which returns an array of field values (or a single value if there
1429 1429 # is only one field to match)
1430 1430 getinfo = lambda r: [f(r) for f in getfieldfuncs]
1431 1431
1432 1432 def matches(x):
1433 1433 for rev in revs:
1434 1434 target = getinfo(rev)
1435 1435 match = True
1436 1436 for n, f in enumerate(getfieldfuncs):
1437 1437 if target[n] != f(x):
1438 1438 match = False
1439 1439 if match:
1440 1440 return True
1441 1441 return False
1442 1442
1443 1443 return subset.filter(matches)
1444 1444
1445 1445 def reverse(repo, subset, x):
1446 1446 """``reverse(set)``
1447 1447 Reverse order of set.
1448 1448 """
1449 1449 l = getset(repo, subset, x)
1450 1450 l.reverse()
1451 1451 return l
1452 1452
1453 1453 def roots(repo, subset, x):
1454 1454 """``roots(set)``
1455 1455 Changesets in set with no parent changeset in set.
1456 1456 """
1457 1457 s = getset(repo, baseset(repo.changelog), x).set()
1458 1458 subset = baseset([r for r in subset if r in s])
1459 1459 cs = _children(repo, subset, s)
1460 1460 return subset - cs
1461 1461
1462 1462 def secret(repo, subset, x):
1463 1463 """``secret()``
1464 1464 Changeset in secret phase."""
1465 1465 # i18n: "secret" is a keyword
1466 1466 getargs(x, 0, 0, _("secret takes no arguments"))
1467 1467 pc = repo._phasecache
1468 1468 return subset.filter(lambda x: pc.phase(repo, x) == phases.secret)
1469 1469
1470 1470 def sort(repo, subset, x):
1471 1471 """``sort(set[, [-]key...])``
1472 1472 Sort set by keys. The default sort order is ascending, specify a key
1473 1473 as ``-key`` to sort in descending order.
1474 1474
1475 1475 The keys can be:
1476 1476
1477 1477 - ``rev`` for the revision number,
1478 1478 - ``branch`` for the branch name,
1479 1479 - ``desc`` for the commit message (description),
1480 1480 - ``user`` for user name (``author`` can be used as an alias),
1481 1481 - ``date`` for the commit date
1482 1482 """
1483 1483 # i18n: "sort" is a keyword
1484 1484 l = getargs(x, 1, 2, _("sort requires one or two arguments"))
1485 1485 keys = "rev"
1486 1486 if len(l) == 2:
1487 1487 # i18n: "sort" is a keyword
1488 1488 keys = getstring(l[1], _("sort spec must be a string"))
1489 1489
1490 1490 s = l[0]
1491 1491 keys = keys.split()
1492 1492 l = []
1493 1493 def invert(s):
1494 1494 return "".join(chr(255 - ord(c)) for c in s)
1495 1495 for r in getset(repo, subset, s):
1496 1496 c = repo[r]
1497 1497 e = []
1498 1498 for k in keys:
1499 1499 if k == 'rev':
1500 1500 e.append(r)
1501 1501 elif k == '-rev':
1502 1502 e.append(-r)
1503 1503 elif k == 'branch':
1504 1504 e.append(c.branch())
1505 1505 elif k == '-branch':
1506 1506 e.append(invert(c.branch()))
1507 1507 elif k == 'desc':
1508 1508 e.append(c.description())
1509 1509 elif k == '-desc':
1510 1510 e.append(invert(c.description()))
1511 1511 elif k in 'user author':
1512 1512 e.append(c.user())
1513 1513 elif k in '-user -author':
1514 1514 e.append(invert(c.user()))
1515 1515 elif k == 'date':
1516 1516 e.append(c.date()[0])
1517 1517 elif k == '-date':
1518 1518 e.append(-c.date()[0])
1519 1519 else:
1520 1520 raise error.ParseError(_("unknown sort key %r") % k)
1521 1521 e.append(r)
1522 1522 l.append(e)
1523 1523 l.sort()
1524 1524 return baseset([e[-1] for e in l])
1525 1525
1526 1526 def _stringmatcher(pattern):
1527 1527 """
1528 1528 accepts a string, possibly starting with 're:' or 'literal:' prefix.
1529 1529 returns the matcher name, pattern, and matcher function.
1530 1530 missing or unknown prefixes are treated as literal matches.
1531 1531
1532 1532 helper for tests:
1533 1533 >>> def test(pattern, *tests):
1534 1534 ... kind, pattern, matcher = _stringmatcher(pattern)
1535 1535 ... return (kind, pattern, [bool(matcher(t)) for t in tests])
1536 1536
1537 1537 exact matching (no prefix):
1538 1538 >>> test('abcdefg', 'abc', 'def', 'abcdefg')
1539 1539 ('literal', 'abcdefg', [False, False, True])
1540 1540
1541 1541 regex matching ('re:' prefix)
1542 1542 >>> test('re:a.+b', 'nomatch', 'fooadef', 'fooadefbar')
1543 1543 ('re', 'a.+b', [False, False, True])
1544 1544
1545 1545 force exact matches ('literal:' prefix)
1546 1546 >>> test('literal:re:foobar', 'foobar', 're:foobar')
1547 1547 ('literal', 're:foobar', [False, True])
1548 1548
1549 1549 unknown prefixes are ignored and treated as literals
1550 1550 >>> test('foo:bar', 'foo', 'bar', 'foo:bar')
1551 1551 ('literal', 'foo:bar', [False, False, True])
1552 1552 """
1553 1553 if pattern.startswith('re:'):
1554 1554 pattern = pattern[3:]
1555 1555 try:
1556 1556 regex = re.compile(pattern)
1557 1557 except re.error, e:
1558 1558 raise error.ParseError(_('invalid regular expression: %s')
1559 1559 % e)
1560 1560 return 're', pattern, regex.search
1561 1561 elif pattern.startswith('literal:'):
1562 1562 pattern = pattern[8:]
1563 1563 return 'literal', pattern, pattern.__eq__
1564 1564
1565 1565 def _substringmatcher(pattern):
1566 1566 kind, pattern, matcher = _stringmatcher(pattern)
1567 1567 if kind == 'literal':
1568 1568 matcher = lambda s: pattern in s
1569 1569 return kind, pattern, matcher
1570 1570
1571 1571 def tag(repo, subset, x):
1572 1572 """``tag([name])``
1573 1573 The specified tag by name, or all tagged revisions if no name is given.
1574 1574 """
1575 1575 # i18n: "tag" is a keyword
1576 1576 args = getargs(x, 0, 1, _("tag takes one or no arguments"))
1577 1577 cl = repo.changelog
1578 1578 if args:
1579 1579 pattern = getstring(args[0],
1580 1580 # i18n: "tag" is a keyword
1581 1581 _('the argument to tag must be a string'))
1582 1582 kind, pattern, matcher = _stringmatcher(pattern)
1583 1583 if kind == 'literal':
1584 1584 # avoid resolving all tags
1585 1585 tn = repo._tagscache.tags.get(pattern, None)
1586 1586 if tn is None:
1587 1587 raise util.Abort(_("tag '%s' does not exist") % pattern)
1588 1588 s = set([repo[tn].rev()])
1589 1589 else:
1590 1590 s = set([cl.rev(n) for t, n in repo.tagslist() if matcher(t)])
1591 1591 else:
1592 1592 s = set([cl.rev(n) for t, n in repo.tagslist() if t != 'tip'])
1593 1593 return subset & s
1594 1594
1595 1595 def tagged(repo, subset, x):
1596 1596 return tag(repo, subset, x)
1597 1597
1598 1598 def unstable(repo, subset, x):
1599 1599 """``unstable()``
1600 1600 Non-obsolete changesets with obsolete ancestors.
1601 1601 """
1602 1602 # i18n: "unstable" is a keyword
1603 1603 getargs(x, 0, 0, _("unstable takes no arguments"))
1604 1604 unstables = obsmod.getrevs(repo, 'unstable')
1605 1605 return subset & unstables
1606 1606
1607 1607
1608 1608 def user(repo, subset, x):
1609 1609 """``user(string)``
1610 1610 User name contains string. The match is case-insensitive.
1611 1611
1612 1612 If `string` starts with `re:`, the remainder of the string is treated as
1613 1613 a regular expression. To match a user that actually contains `re:`, use
1614 1614 the prefix `literal:`.
1615 1615 """
1616 1616 return author(repo, subset, x)
1617 1617
1618 1618 # for internal use
1619 1619 def _list(repo, subset, x):
1620 1620 s = getstring(x, "internal error")
1621 1621 if not s:
1622 1622 return baseset([])
1623 1623 ls = [repo[r].rev() for r in s.split('\0')]
1624 1624 s = subset.set()
1625 1625 return baseset([r for r in ls if r in s])
1626 1626
1627 1627 # for internal use
1628 1628 def _intlist(repo, subset, x):
1629 1629 s = getstring(x, "internal error")
1630 1630 if not s:
1631 1631 return baseset([])
1632 1632 ls = [int(r) for r in s.split('\0')]
1633 1633 s = subset.set()
1634 1634 return baseset([r for r in ls if r in s])
1635 1635
1636 1636 # for internal use
1637 1637 def _hexlist(repo, subset, x):
1638 1638 s = getstring(x, "internal error")
1639 1639 if not s:
1640 1640 return baseset([])
1641 1641 cl = repo.changelog
1642 1642 ls = [cl.rev(node.bin(r)) for r in s.split('\0')]
1643 1643 s = subset.set()
1644 1644 return baseset([r for r in ls if r in s])
1645 1645
1646 1646 symbols = {
1647 1647 "adds": adds,
1648 1648 "all": getall,
1649 1649 "ancestor": ancestor,
1650 1650 "ancestors": ancestors,
1651 1651 "_firstancestors": _firstancestors,
1652 1652 "author": author,
1653 1653 "only": only,
1654 1654 "bisect": bisect,
1655 1655 "bisected": bisected,
1656 1656 "bookmark": bookmark,
1657 1657 "branch": branch,
1658 1658 "branchpoint": branchpoint,
1659 1659 "bumped": bumped,
1660 1660 "bundle": bundle,
1661 1661 "children": children,
1662 1662 "closed": closed,
1663 1663 "contains": contains,
1664 1664 "converted": converted,
1665 1665 "date": date,
1666 1666 "desc": desc,
1667 1667 "descendants": descendants,
1668 1668 "_firstdescendants": _firstdescendants,
1669 1669 "destination": destination,
1670 1670 "divergent": divergent,
1671 1671 "draft": draft,
1672 1672 "extinct": extinct,
1673 1673 "extra": extra,
1674 1674 "file": hasfile,
1675 1675 "filelog": filelog,
1676 1676 "first": first,
1677 1677 "follow": follow,
1678 1678 "_followfirst": _followfirst,
1679 1679 "grep": grep,
1680 1680 "head": head,
1681 1681 "heads": heads,
1682 1682 "hidden": hidden,
1683 1683 "id": node_,
1684 1684 "keyword": keyword,
1685 1685 "last": last,
1686 1686 "limit": limit,
1687 1687 "_matchfiles": _matchfiles,
1688 1688 "max": maxrev,
1689 1689 "merge": merge,
1690 1690 "min": minrev,
1691 1691 "_missingancestors": _missingancestors,
1692 1692 "modifies": modifies,
1693 1693 "obsolete": obsolete,
1694 1694 "origin": origin,
1695 1695 "outgoing": outgoing,
1696 1696 "p1": p1,
1697 1697 "p2": p2,
1698 1698 "parents": parents,
1699 1699 "present": present,
1700 1700 "public": public,
1701 1701 "remote": remote,
1702 1702 "removes": removes,
1703 1703 "rev": rev,
1704 1704 "reverse": reverse,
1705 1705 "roots": roots,
1706 1706 "sort": sort,
1707 1707 "secret": secret,
1708 1708 "matching": matching,
1709 1709 "tag": tag,
1710 1710 "tagged": tagged,
1711 1711 "user": user,
1712 1712 "unstable": unstable,
1713 1713 "_list": _list,
1714 1714 "_intlist": _intlist,
1715 1715 "_hexlist": _hexlist,
1716 1716 }
1717 1717
1718 1718 # symbols which can't be used for a DoS attack for any given input
1719 1719 # (e.g. those which accept regexes as plain strings shouldn't be included)
1720 1720 # functions that just return a lot of changesets (like all) don't count here
1721 1721 safesymbols = set([
1722 1722 "adds",
1723 1723 "all",
1724 1724 "ancestor",
1725 1725 "ancestors",
1726 1726 "_firstancestors",
1727 1727 "author",
1728 1728 "bisect",
1729 1729 "bisected",
1730 1730 "bookmark",
1731 1731 "branch",
1732 1732 "branchpoint",
1733 1733 "bumped",
1734 1734 "bundle",
1735 1735 "children",
1736 1736 "closed",
1737 1737 "converted",
1738 1738 "date",
1739 1739 "desc",
1740 1740 "descendants",
1741 1741 "_firstdescendants",
1742 1742 "destination",
1743 1743 "divergent",
1744 1744 "draft",
1745 1745 "extinct",
1746 1746 "extra",
1747 1747 "file",
1748 1748 "filelog",
1749 1749 "first",
1750 1750 "follow",
1751 1751 "_followfirst",
1752 1752 "head",
1753 1753 "heads",
1754 1754 "hidden",
1755 1755 "id",
1756 1756 "keyword",
1757 1757 "last",
1758 1758 "limit",
1759 1759 "_matchfiles",
1760 1760 "max",
1761 1761 "merge",
1762 1762 "min",
1763 1763 "_missingancestors",
1764 1764 "modifies",
1765 1765 "obsolete",
1766 1766 "origin",
1767 1767 "outgoing",
1768 1768 "p1",
1769 1769 "p2",
1770 1770 "parents",
1771 1771 "present",
1772 1772 "public",
1773 1773 "remote",
1774 1774 "removes",
1775 1775 "rev",
1776 1776 "reverse",
1777 1777 "roots",
1778 1778 "sort",
1779 1779 "secret",
1780 1780 "matching",
1781 1781 "tag",
1782 1782 "tagged",
1783 1783 "user",
1784 1784 "unstable",
1785 1785 "_list",
1786 1786 "_intlist",
1787 1787 "_hexlist",
1788 1788 ])
1789 1789
1790 1790 methods = {
1791 1791 "range": rangeset,
1792 1792 "dagrange": dagrange,
1793 1793 "string": stringset,
1794 1794 "symbol": symbolset,
1795 1795 "and": andset,
1796 1796 "or": orset,
1797 1797 "not": notset,
1798 1798 "list": listset,
1799 1799 "func": func,
1800 1800 "ancestor": ancestorspec,
1801 1801 "parent": parentspec,
1802 1802 "parentpost": p1,
1803 1803 }
1804 1804
1805 1805 def optimize(x, small):
1806 1806 if x is None:
1807 1807 return 0, x
1808 1808
1809 1809 smallbonus = 1
1810 1810 if small:
1811 1811 smallbonus = .5
1812 1812
1813 1813 op = x[0]
1814 1814 if op == 'minus':
1815 1815 return optimize(('and', x[1], ('not', x[2])), small)
1816 1816 elif op == 'dagrangepre':
1817 1817 return optimize(('func', ('symbol', 'ancestors'), x[1]), small)
1818 1818 elif op == 'dagrangepost':
1819 1819 return optimize(('func', ('symbol', 'descendants'), x[1]), small)
1820 1820 elif op == 'rangepre':
1821 1821 return optimize(('range', ('string', '0'), x[1]), small)
1822 1822 elif op == 'rangepost':
1823 1823 return optimize(('range', x[1], ('string', 'tip')), small)
1824 1824 elif op == 'negate':
1825 1825 return optimize(('string',
1826 1826 '-' + getstring(x[1], _("can't negate that"))), small)
1827 1827 elif op in 'string symbol negate':
1828 1828 return smallbonus, x # single revisions are small
1829 1829 elif op == 'and':
1830 1830 wa, ta = optimize(x[1], True)
1831 1831 wb, tb = optimize(x[2], True)
1832 1832
1833 1833 # (::x and not ::y)/(not ::y and ::x) have a fast path
1834 1834 def ismissingancestors(revs, bases):
1835 1835 return (
1836 1836 revs[0] == 'func'
1837 1837 and getstring(revs[1], _('not a symbol')) == 'ancestors'
1838 1838 and bases[0] == 'not'
1839 1839 and bases[1][0] == 'func'
1840 1840 and getstring(bases[1][1], _('not a symbol')) == 'ancestors')
1841 1841
1842 1842 w = min(wa, wb)
1843 1843 if ismissingancestors(ta, tb):
1844 1844 return w, ('func', ('symbol', '_missingancestors'),
1845 1845 ('list', ta[2], tb[1][2]))
1846 1846 if ismissingancestors(tb, ta):
1847 1847 return w, ('func', ('symbol', '_missingancestors'),
1848 1848 ('list', tb[2], ta[1][2]))
1849 1849
1850 1850 if wa > wb:
1851 1851 return w, (op, tb, ta)
1852 1852 return w, (op, ta, tb)
1853 1853 elif op == 'or':
1854 1854 wa, ta = optimize(x[1], False)
1855 1855 wb, tb = optimize(x[2], False)
1856 1856 if wb < wa:
1857 1857 wb, wa = wa, wb
1858 1858 return max(wa, wb), (op, ta, tb)
1859 1859 elif op == 'not':
1860 1860 o = optimize(x[1], not small)
1861 1861 return o[0], (op, o[1])
1862 1862 elif op == 'parentpost':
1863 1863 o = optimize(x[1], small)
1864 1864 return o[0], (op, o[1])
1865 1865 elif op == 'group':
1866 1866 return optimize(x[1], small)
1867 1867 elif op in 'dagrange range list parent ancestorspec':
1868 1868 if op == 'parent':
1869 1869 # x^:y means (x^) : y, not x ^ (:y)
1870 1870 post = ('parentpost', x[1])
1871 1871 if x[2][0] == 'dagrangepre':
1872 1872 return optimize(('dagrange', post, x[2][1]), small)
1873 1873 elif x[2][0] == 'rangepre':
1874 1874 return optimize(('range', post, x[2][1]), small)
1875 1875
1876 1876 wa, ta = optimize(x[1], small)
1877 1877 wb, tb = optimize(x[2], small)
1878 1878 return wa + wb, (op, ta, tb)
1879 1879 elif op == 'func':
1880 1880 f = getstring(x[1], _("not a symbol"))
1881 1881 wa, ta = optimize(x[2], small)
1882 1882 if f in ("author branch closed date desc file grep keyword "
1883 1883 "outgoing user"):
1884 1884 w = 10 # slow
1885 1885 elif f in "modifies adds removes":
1886 1886 w = 30 # slower
1887 1887 elif f == "contains":
1888 1888 w = 100 # very slow
1889 1889 elif f == "ancestor":
1890 1890 w = 1 * smallbonus
1891 1891 elif f in "reverse limit first":
1892 1892 w = 0
1893 1893 elif f in "sort":
1894 1894 w = 10 # assume most sorts look at changelog
1895 1895 else:
1896 1896 w = 1
1897 1897 return w + wa, (op, x[1], ta)
1898 1898 return 1, x
1899 1899
1900 1900 _aliasarg = ('func', ('symbol', '_aliasarg'))
1901 1901 def _getaliasarg(tree):
1902 1902 """If tree matches ('func', ('symbol', '_aliasarg'), ('string', X))
1903 1903 return X, None otherwise.
1904 1904 """
1905 1905 if (len(tree) == 3 and tree[:2] == _aliasarg
1906 1906 and tree[2][0] == 'string'):
1907 1907 return tree[2][1]
1908 1908 return None
1909 1909
1910 1910 def _checkaliasarg(tree, known=None):
1911 1911 """Check tree contains no _aliasarg construct or only ones which
1912 1912 value is in known. Used to avoid alias placeholders injection.
1913 1913 """
1914 1914 if isinstance(tree, tuple):
1915 1915 arg = _getaliasarg(tree)
1916 1916 if arg is not None and (not known or arg not in known):
1917 1917 raise error.ParseError(_("not a function: %s") % '_aliasarg')
1918 1918 for t in tree:
1919 1919 _checkaliasarg(t, known)
1920 1920
1921 1921 class revsetalias(object):
1922 1922 funcre = re.compile('^([^(]+)\(([^)]+)\)$')
1923 1923 args = None
1924 1924
1925 1925 def __init__(self, name, value):
1926 1926 '''Aliases like:
1927 1927
1928 1928 h = heads(default)
1929 1929 b($1) = ancestors($1) - ancestors(default)
1930 1930 '''
1931 1931 m = self.funcre.search(name)
1932 1932 if m:
1933 1933 self.name = m.group(1)
1934 1934 self.tree = ('func', ('symbol', m.group(1)))
1935 1935 self.args = [x.strip() for x in m.group(2).split(',')]
1936 1936 for arg in self.args:
1937 1937 # _aliasarg() is an unknown symbol only used separate
1938 1938 # alias argument placeholders from regular strings.
1939 1939 value = value.replace(arg, '_aliasarg(%r)' % (arg,))
1940 1940 else:
1941 1941 self.name = name
1942 1942 self.tree = ('symbol', name)
1943 1943
1944 1944 self.replacement, pos = parse(value)
1945 1945 if pos != len(value):
1946 1946 raise error.ParseError(_('invalid token'), pos)
1947 1947 # Check for placeholder injection
1948 1948 _checkaliasarg(self.replacement, self.args)
1949 1949
1950 1950 def _getalias(aliases, tree):
1951 1951 """If tree looks like an unexpanded alias, return it. Return None
1952 1952 otherwise.
1953 1953 """
1954 1954 if isinstance(tree, tuple) and tree:
1955 1955 if tree[0] == 'symbol' and len(tree) == 2:
1956 1956 name = tree[1]
1957 1957 alias = aliases.get(name)
1958 1958 if alias and alias.args is None and alias.tree == tree:
1959 1959 return alias
1960 1960 if tree[0] == 'func' and len(tree) > 1:
1961 1961 if tree[1][0] == 'symbol' and len(tree[1]) == 2:
1962 1962 name = tree[1][1]
1963 1963 alias = aliases.get(name)
1964 1964 if alias and alias.args is not None and alias.tree == tree[:2]:
1965 1965 return alias
1966 1966 return None
1967 1967
1968 1968 def _expandargs(tree, args):
1969 1969 """Replace _aliasarg instances with the substitution value of the
1970 1970 same name in args, recursively.
1971 1971 """
1972 1972 if not tree or not isinstance(tree, tuple):
1973 1973 return tree
1974 1974 arg = _getaliasarg(tree)
1975 1975 if arg is not None:
1976 1976 return args[arg]
1977 1977 return tuple(_expandargs(t, args) for t in tree)
1978 1978
1979 1979 def _expandaliases(aliases, tree, expanding, cache):
1980 1980 """Expand aliases in tree, recursively.
1981 1981
1982 1982 'aliases' is a dictionary mapping user defined aliases to
1983 1983 revsetalias objects.
1984 1984 """
1985 1985 if not isinstance(tree, tuple):
1986 1986 # Do not expand raw strings
1987 1987 return tree
1988 1988 alias = _getalias(aliases, tree)
1989 1989 if alias is not None:
1990 1990 if alias in expanding:
1991 1991 raise error.ParseError(_('infinite expansion of revset alias "%s" '
1992 1992 'detected') % alias.name)
1993 1993 expanding.append(alias)
1994 1994 if alias.name not in cache:
1995 1995 cache[alias.name] = _expandaliases(aliases, alias.replacement,
1996 1996 expanding, cache)
1997 1997 result = cache[alias.name]
1998 1998 expanding.pop()
1999 1999 if alias.args is not None:
2000 2000 l = getlist(tree[2])
2001 2001 if len(l) != len(alias.args):
2002 2002 raise error.ParseError(
2003 2003 _('invalid number of arguments: %s') % len(l))
2004 2004 l = [_expandaliases(aliases, a, [], cache) for a in l]
2005 2005 result = _expandargs(result, dict(zip(alias.args, l)))
2006 2006 else:
2007 2007 result = tuple(_expandaliases(aliases, t, expanding, cache)
2008 2008 for t in tree)
2009 2009 return result
2010 2010
2011 2011 def findaliases(ui, tree):
2012 2012 _checkaliasarg(tree)
2013 2013 aliases = {}
2014 2014 for k, v in ui.configitems('revsetalias'):
2015 2015 alias = revsetalias(k, v)
2016 2016 aliases[alias.name] = alias
2017 2017 return _expandaliases(aliases, tree, [], {})
2018 2018
2019 2019 def parse(spec):
2020 2020 p = parser.parser(tokenize, elements)
2021 2021 return p.parse(spec)
2022 2022
2023 2023 def match(ui, spec):
2024 2024 if not spec:
2025 2025 raise error.ParseError(_("empty query"))
2026 2026 tree, pos = parse(spec)
2027 2027 if (pos != len(spec)):
2028 2028 raise error.ParseError(_("invalid token"), pos)
2029 2029 if ui:
2030 2030 tree = findaliases(ui, tree)
2031 2031 weight, tree = optimize(tree, True)
2032 2032 def mfunc(repo, subset):
2033 2033 if util.safehasattr(subset, 'set'):
2034 2034 return getset(repo, subset, tree)
2035 2035 return getset(repo, baseset(subset), tree)
2036 2036 return mfunc
2037 2037
2038 2038 def formatspec(expr, *args):
2039 2039 '''
2040 2040 This is a convenience function for using revsets internally, and
2041 2041 escapes arguments appropriately. Aliases are intentionally ignored
2042 2042 so that intended expression behavior isn't accidentally subverted.
2043 2043
2044 2044 Supported arguments:
2045 2045
2046 2046 %r = revset expression, parenthesized
2047 2047 %d = int(arg), no quoting
2048 2048 %s = string(arg), escaped and single-quoted
2049 2049 %b = arg.branch(), escaped and single-quoted
2050 2050 %n = hex(arg), single-quoted
2051 2051 %% = a literal '%'
2052 2052
2053 2053 Prefixing the type with 'l' specifies a parenthesized list of that type.
2054 2054
2055 2055 >>> formatspec('%r:: and %lr', '10 or 11', ("this()", "that()"))
2056 2056 '(10 or 11):: and ((this()) or (that()))'
2057 2057 >>> formatspec('%d:: and not %d::', 10, 20)
2058 2058 '10:: and not 20::'
2059 2059 >>> formatspec('%ld or %ld', [], [1])
2060 2060 "_list('') or 1"
2061 2061 >>> formatspec('keyword(%s)', 'foo\\xe9')
2062 2062 "keyword('foo\\\\xe9')"
2063 2063 >>> b = lambda: 'default'
2064 2064 >>> b.branch = b
2065 2065 >>> formatspec('branch(%b)', b)
2066 2066 "branch('default')"
2067 2067 >>> formatspec('root(%ls)', ['a', 'b', 'c', 'd'])
2068 2068 "root(_list('a\\x00b\\x00c\\x00d'))"
2069 2069 '''
2070 2070
2071 2071 def quote(s):
2072 2072 return repr(str(s))
2073 2073
2074 2074 def argtype(c, arg):
2075 2075 if c == 'd':
2076 2076 return str(int(arg))
2077 2077 elif c == 's':
2078 2078 return quote(arg)
2079 2079 elif c == 'r':
2080 2080 parse(arg) # make sure syntax errors are confined
2081 2081 return '(%s)' % arg
2082 2082 elif c == 'n':
2083 2083 return quote(node.hex(arg))
2084 2084 elif c == 'b':
2085 2085 return quote(arg.branch())
2086 2086
2087 2087 def listexp(s, t):
2088 2088 l = len(s)
2089 2089 if l == 0:
2090 2090 return "_list('')"
2091 2091 elif l == 1:
2092 2092 return argtype(t, s[0])
2093 2093 elif t == 'd':
2094 2094 return "_intlist('%s')" % "\0".join(str(int(a)) for a in s)
2095 2095 elif t == 's':
2096 2096 return "_list('%s')" % "\0".join(s)
2097 2097 elif t == 'n':
2098 2098 return "_hexlist('%s')" % "\0".join(node.hex(a) for a in s)
2099 2099 elif t == 'b':
2100 2100 return "_list('%s')" % "\0".join(a.branch() for a in s)
2101 2101
2102 2102 m = l // 2
2103 2103 return '(%s or %s)' % (listexp(s[:m], t), listexp(s[m:], t))
2104 2104
2105 2105 ret = ''
2106 2106 pos = 0
2107 2107 arg = 0
2108 2108 while pos < len(expr):
2109 2109 c = expr[pos]
2110 2110 if c == '%':
2111 2111 pos += 1
2112 2112 d = expr[pos]
2113 2113 if d == '%':
2114 2114 ret += d
2115 2115 elif d in 'dsnbr':
2116 2116 ret += argtype(d, args[arg])
2117 2117 arg += 1
2118 2118 elif d == 'l':
2119 2119 # a list of some type
2120 2120 pos += 1
2121 2121 d = expr[pos]
2122 2122 ret += listexp(list(args[arg]), d)
2123 2123 arg += 1
2124 2124 else:
2125 2125 raise util.Abort('unexpected revspec format character %s' % d)
2126 2126 else:
2127 2127 ret += c
2128 2128 pos += 1
2129 2129
2130 2130 return ret
2131 2131
2132 2132 def prettyformat(tree):
2133 2133 def _prettyformat(tree, level, lines):
2134 2134 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2135 2135 lines.append((level, str(tree)))
2136 2136 else:
2137 2137 lines.append((level, '(%s' % tree[0]))
2138 2138 for s in tree[1:]:
2139 2139 _prettyformat(s, level + 1, lines)
2140 2140 lines[-1:] = [(lines[-1][0], lines[-1][1] + ')')]
2141 2141
2142 2142 lines = []
2143 2143 _prettyformat(tree, 0, lines)
2144 2144 output = '\n'.join((' '*l + s) for l, s in lines)
2145 2145 return output
2146 2146
2147 2147 def depth(tree):
2148 2148 if isinstance(tree, tuple):
2149 2149 return max(map(depth, tree)) + 1
2150 2150 else:
2151 2151 return 0
2152 2152
2153 2153 def funcsused(tree):
2154 2154 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2155 2155 return set()
2156 2156 else:
2157 2157 funcs = set()
2158 2158 for s in tree[1:]:
2159 2159 funcs |= funcsused(s)
2160 2160 if tree[0] == 'func':
2161 2161 funcs.add(tree[1][1])
2162 2162 return funcs
2163 2163
2164 2164 class baseset(list):
2165 2165 """Basic data structure that represents a revset and contains the basic
2166 2166 operation that it should be able to perform.
2167 2167 """
2168 2168 def __init__(self, data):
2169 2169 super(baseset, self).__init__(data)
2170 2170 self._set = None
2171 2171
2172 2172 def ascending(self):
2173 2173 self.sort()
2174 2174
2175 2175 def descending(self):
2176 2176 self.sort(reverse=True)
2177 2177
2178 2178 def set(self):
2179 2179 if not self._set:
2180 2180 self._set = set(self)
2181 2181 return self._set
2182 2182
2183 2183 def __sub__(self, x):
2184 2184 if isinstance(x, baseset):
2185 2185 s = x.set()
2186 2186 else:
2187 2187 s = set(x)
2188 2188 return baseset(self.set() - s)
2189 2189
2190 2190 def __and__(self, x):
2191 2191 if isinstance(x, baseset):
2192 2192 x = x.set()
2193 2193 return baseset([y for y in self if y in x])
2194 2194
2195 2195 def __add__(self, x):
2196 2196 s = self.set()
2197 2197 l = [r for r in x if r not in s]
2198 2198 return baseset(list(self) + l)
2199 2199
2200 2200 def filter(self, l):
2201 2201 return lazyset(self, l)
2202 2202
2203 2203 class lazyset(object):
2204 2204 """Duck type for baseset class which iterates lazily over the revisions in
2205 2205 the subset and contains a function which tests for membership in the
2206 2206 revset
2207 2207 """
2208 2208 def __init__(self, subset, condition=lambda x: True):
2209 2209 self._subset = subset
2210 2210 self._condition = condition
2211 2211 self._cache = {}
2212 2212
2213 2213 def ascending(self):
2214 2214 self._subset.sort()
2215 2215
2216 2216 def descending(self):
2217 2217 self._subset.sort(reverse=True)
2218 2218
2219 2219 def __contains__(self, x):
2220 2220 c = self._cache
2221 2221 if x not in c:
2222 2222 c[x] = x in self._subset and self._condition(x)
2223 2223 return c[x]
2224 2224
2225 2225 def __iter__(self):
2226 2226 cond = self._condition
2227 2227 for x in self._subset:
2228 2228 if cond(x):
2229 2229 yield x
2230 2230
2231 2231 def __and__(self, x):
2232 2232 return lazyset(self, lambda r: r in x)
2233 2233
2234 2234 def __sub__(self, x):
2235 2235 return lazyset(self, lambda r: r not in x)
2236 2236
2237 2237 def __add__(self, x):
2238 2238 return lazyset(_addset(self, x))
2239 2239
2240 2240 def __nonzero__(self):
2241 2241 for r in self:
2242 2242 return True
2243 2243 return False
2244 2244
2245 2245 def __len__(self):
2246 2246 # Basic implementation to be changed in future patches.
2247 2247 l = baseset([r for r in self])
2248 2248 return len(l)
2249 2249
2250 2250 def __getitem__(self, x):
2251 2251 # Basic implementation to be changed in future patches.
2252 2252 l = baseset([r for r in self])
2253 2253 return l[x]
2254 2254
2255 2255 def sort(self, reverse=False):
2256 2256 # Basic implementation to be changed in future patches.
2257 2257 self._subset = baseset(self._subset)
2258 2258 self._subset.sort(reverse=reverse)
2259 2259
2260 2260 def reverse(self):
2261 2261 self._subset.reverse()
2262 2262
2263 2263 def set(self):
2264 2264 return set([r for r in self])
2265 2265
2266 2266 def filter(self, l):
2267 2267 return lazyset(self, l)
2268 2268
2269 2269 class orderedlazyset(lazyset):
2270 2270 """Subclass of lazyset which subset can be ordered either ascending or
2271 2271 descendingly
2272 2272 """
2273 2273 def __init__(self, subset, condition, ascending=True):
2274 2274 super(orderedlazyset, self).__init__(subset, condition)
2275 2275 self._ascending = ascending
2276 2276
2277 2277 def filter(self, l):
2278 2278 return orderedlazyset(self, l, ascending=self._ascending)
2279 2279
2280 2280 def ascending(self):
2281 2281 if not self._ascending:
2282 2282 self.reverse()
2283 2283
2284 2284 def descending(self):
2285 2285 if self._ascending:
2286 2286 self.reverse()
2287 2287
2288 2288 def __and__(self, x):
2289 2289 return orderedlazyset(self, lambda r: r in x,
2290 2290 ascending=self._ascending)
2291 2291
2292 2292 def __sub__(self, x):
2293 2293 return orderedlazyset(self, lambda r: r not in x,
2294 2294 ascending=self._ascending)
2295 2295
2296 2296 def sort(self, reverse=False):
2297 2297 if reverse:
2298 2298 if self._ascending:
2299 2299 self._subset.sort(reverse=reverse)
2300 2300 else:
2301 2301 if not self._ascending:
2302 2302 self._subset.sort(reverse=reverse)
2303 2303 self._ascending = not reverse
2304 2304
2305 2305 def reverse(self):
2306 2306 self._subset.reverse()
2307 2307 self._ascending = not self._ascending
2308 2308
2309 2309 class _addset(object):
2310 2310 """Represent the addition of two sets
2311 2311
2312 2312 Wrapper structure for lazily adding two structures without losing much
2313 2313 performance on the __contains__ method
2314 2314
2315 2315 This class does not duck-type baseset and it's only supposed to be used
2316 2316 internally
2317 2317 """
2318 2318 def __init__(self, revs1, revs2):
2319 2319 self._r1 = revs1
2320 2320 self._r2 = revs2
2321 2321 self._iter = None
2322 2322
2323 2323 def _iterator(self):
2324 2324 if not self._iter:
2325 2325 def gen():
2326 2326 for r in self._r1:
2327 2327 yield r
2328 2328 s = self._r1.set()
2329 2329 for r in self._r2:
2330 2330 if r not in s:
2331 2331 yield r
2332 2332 self._iter = _generatorset(gen())
2333 2333
2334 2334 return self._iter
2335 2335
2336 2336 def __iter__(self):
2337 2337 for r in self._iterator():
2338 2338 yield r
2339 2339
2340 2340 def __contains__(self, x):
2341 2341 return x in self._r1 or x in self._r2
2342 2342
2343 def set(self):
2344 return self
2345
2343 2346 class _generatorset(object):
2344 2347 """Wrap a generator for lazy iteration
2345 2348
2346 2349 Wrapper structure for generators that provides lazy membership and can
2347 2350 be iterated more than once.
2348 2351 When asked for membership it generates values until either it finds the
2349 2352 requested one or has gone through all the elements in the generator
2350 2353
2351 2354 This class does not duck-type baseset and it's only supposed to be used
2352 2355 internally
2353 2356 """
2354 2357 def __init__(self, gen):
2355 2358 self._gen = gen
2356 2359 self._iter = iter(gen)
2357 2360 self._cache = {}
2358 2361 self._genlist = baseset([])
2359 2362 self._iterated = False
2360 2363 self._finished = False
2361 2364
2362 2365 def __contains__(self, x):
2363 2366 if x in self._cache:
2364 2367 return self._cache[x]
2365 2368
2366 2369 for l in self:
2367 2370 if l == x:
2368 2371 return True
2369 2372
2370 2373 self._finished = True
2371 2374 self._cache[x] = False
2372 2375 return False
2373 2376
2374 2377 def __iter__(self):
2375 2378 if self._iterated:
2376 2379 for l in self._genlist:
2377 2380 yield l
2378 2381 else:
2379 2382 self._iterated = True
2380 2383
2381 2384 for item in self._gen:
2382 2385 self._cache[item] = True
2383 2386 self._genlist.append(item)
2384 2387 yield item
2385 2388
2386 2389 self._finished = True
2387 2390
2388 2391 def set(self):
2389 2392 return self
2390 2393
2391 2394 def sort(self, reverse=False):
2392 2395 # Basic implementation to be changed in future patches
2393 2396 if not self._finished:
2394 2397 for i in self:
2395 2398 continue
2396 2399 self._genlist.sort(reverse=reverse)
2397 2400
2398 2401 class _ascgeneratorset(_generatorset):
2399 2402 """Wrap a generator of ascending elements for lazy iteration
2400 2403
2401 2404 Same structure as _generatorset but stops iterating after it goes past
2402 2405 the value when asked for membership and the element is not contained
2403 2406
2404 2407 This class does not duck-type baseset and it's only supposed to be used
2405 2408 internally
2406 2409 """
2407 2410 def __contains__(self, x):
2408 2411 if x in self._cache:
2409 2412 return self._cache[x]
2410 2413
2411 2414 for l in self:
2412 2415 if l == x:
2413 2416 return True
2414 2417 if l > x:
2415 2418 break
2416 2419
2417 2420 self._cache[x] = False
2418 2421 return False
2419 2422
2420 2423 class _descgeneratorset(_generatorset):
2421 2424 """Wrap a generator of descending elements for lazy iteration
2422 2425
2423 2426 Same structure as _generatorset but stops iterating after it goes past
2424 2427 the value when asked for membership and the element is not contained
2425 2428
2426 2429 This class does not duck-type baseset and it's only supposed to be used
2427 2430 internally
2428 2431 """
2429 2432 def __contains__(self, x):
2430 2433 if x in self._cache:
2431 2434 return self._cache[x]
2432 2435
2433 2436 for l in self:
2434 2437 if l == x:
2435 2438 return True
2436 2439 if l < x:
2437 2440 break
2438 2441
2439 2442 self._cache[x] = False
2440 2443 return False
2441 2444
2442 2445 class spanset(object):
2443 2446 """Duck type for baseset class which represents a range of revisions and
2444 2447 can work lazily and without having all the range in memory
2445 2448 """
2446 2449 def __init__(self, repo, start=0, end=None):
2447 2450 self._start = start
2448 2451 if end is not None:
2449 2452 self._end = end
2450 2453 else:
2451 2454 self._end = len(repo)
2452 2455 self._hiddenrevs = repo.changelog.filteredrevs
2453 2456
2454 2457 def ascending(self):
2455 2458 if self._start > self._end:
2456 2459 self.reverse()
2457 2460
2458 2461 def descending(self):
2459 2462 if self._start < self._end:
2460 2463 self.reverse()
2461 2464
2462 2465 def _contained(self, rev):
2463 2466 return (rev <= self._start and rev > self._end) or (rev >= self._start
2464 2467 and rev < self._end)
2465 2468
2466 2469 def __iter__(self):
2467 2470 if self._start <= self._end:
2468 2471 iterrange = xrange(self._start, self._end)
2469 2472 else:
2470 2473 iterrange = xrange(self._start, self._end, -1)
2471 2474
2472 2475 if self._hiddenrevs:
2473 2476 s = self._hiddenrevs
2474 2477 for r in iterrange:
2475 2478 if r not in s:
2476 2479 yield r
2477 2480 else:
2478 2481 for r in iterrange:
2479 2482 yield r
2480 2483
2481 2484 def __contains__(self, x):
2482 2485 return self._contained(x) and not (self._hiddenrevs and rev in
2483 2486 self._hiddenrevs)
2484 2487
2485 2488 def __and__(self, x):
2486 2489 if isinstance(x, baseset):
2487 2490 x = x.set()
2488 2491 if self._start <= self._end:
2489 2492 return orderedlazyset(self, lambda r: r in x)
2490 2493 else:
2491 2494 return orderedlazyset(self, lambda r: r in x, ascending=False)
2492 2495
2493 2496 def __sub__(self, x):
2494 2497 if isinstance(x, baseset):
2495 2498 x = x.set()
2496 2499 if self._start <= self._end:
2497 2500 return orderedlazyset(self, lambda r: r not in x)
2498 2501 else:
2499 2502 return orderedlazyset(self, lambda r: r not in x, ascending=False)
2500 2503
2501 2504 def __add__(self, x):
2502 2505 return lazyset(_addset(self, x))
2503 2506
2504 2507 def __len__(self):
2505 2508 if not self._hiddenrevs:
2506 2509 return abs(self._end - self._start)
2507 2510 else:
2508 2511 count = 0
2509 2512 for rev in self._hiddenrevs:
2510 2513 if self._contained(rev):
2511 2514 count += 1
2512 2515 return abs(self._end - self._start) - count
2513 2516
2514 2517 def __getitem__(self, x):
2515 2518 # Basic implementation to be changed in future patches.
2516 2519 l = baseset([r for r in self])
2517 2520 return l[x]
2518 2521
2519 2522 def sort(self, reverse=False):
2520 2523 # Basic implementation to be changed in future patches.
2521 2524 if reverse:
2522 2525 self.reverse()
2523 2526
2524 2527 def reverse(self):
2525 2528 if self._start <= self._end:
2526 2529 self._start, self._end = self._end - 1, self._start - 1
2527 2530 else:
2528 2531 self._start, self._end = self._end + 1, self._start + 1
2529 2532
2530 2533 def set(self):
2531 2534 return self
2532 2535
2533 2536 def filter(self, l):
2534 2537 if self._start <= self._end:
2535 2538 return orderedlazyset(self, l)
2536 2539 else:
2537 2540 return orderedlazyset(self, l, ascending=False)
2538 2541
2539 2542 # tell hggettext to extract docstrings from these functions:
2540 2543 i18nfunctions = symbols.values()
General Comments 0
You need to be logged in to leave comments. Login now