##// END OF EJS Templates
revset-children: call 'getset' on a 'fullreposet'...
Pierre-Yves David -
r23164:7a42e5d4 default
parent child Browse files
Show More
@@ -1,2969 +1,2969 b''
1 1 # revset.py - revision set queries for mercurial
2 2 #
3 3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 import re
9 9 import parser, util, error, discovery, hbisect, phases
10 10 import node
11 11 import heapq
12 12 import match as matchmod
13 13 import ancestor as ancestormod
14 14 from i18n import _
15 15 import encoding
16 16 import obsolete as obsmod
17 17 import pathutil
18 18 import repoview
19 19
20 20 def _revancestors(repo, revs, followfirst):
21 21 """Like revlog.ancestors(), but supports followfirst."""
22 22 cut = followfirst and 1 or None
23 23 cl = repo.changelog
24 24
25 25 def iterate():
26 26 revqueue, revsnode = None, None
27 27 h = []
28 28
29 29 revs.sort(reverse=True)
30 30 revqueue = util.deque(revs)
31 31 if revqueue:
32 32 revsnode = revqueue.popleft()
33 33 heapq.heappush(h, -revsnode)
34 34
35 35 seen = set([node.nullrev])
36 36 while h:
37 37 current = -heapq.heappop(h)
38 38 if current not in seen:
39 39 if revsnode and current == revsnode:
40 40 if revqueue:
41 41 revsnode = revqueue.popleft()
42 42 heapq.heappush(h, -revsnode)
43 43 seen.add(current)
44 44 yield current
45 45 for parent in cl.parentrevs(current)[:cut]:
46 46 if parent != node.nullrev:
47 47 heapq.heappush(h, -parent)
48 48
49 49 return generatorset(iterate(), iterasc=False)
50 50
51 51 def _revdescendants(repo, revs, followfirst):
52 52 """Like revlog.descendants() but supports followfirst."""
53 53 cut = followfirst and 1 or None
54 54
55 55 def iterate():
56 56 cl = repo.changelog
57 57 first = min(revs)
58 58 nullrev = node.nullrev
59 59 if first == nullrev:
60 60 # Are there nodes with a null first parent and a non-null
61 61 # second one? Maybe. Do we care? Probably not.
62 62 for i in cl:
63 63 yield i
64 64 else:
65 65 seen = set(revs)
66 66 for i in cl.revs(first + 1):
67 67 for x in cl.parentrevs(i)[:cut]:
68 68 if x != nullrev and x in seen:
69 69 seen.add(i)
70 70 yield i
71 71 break
72 72
73 73 return generatorset(iterate(), iterasc=True)
74 74
75 75 def _revsbetween(repo, roots, heads):
76 76 """Return all paths between roots and heads, inclusive of both endpoint
77 77 sets."""
78 78 if not roots:
79 79 return baseset()
80 80 parentrevs = repo.changelog.parentrevs
81 81 visit = list(heads)
82 82 reachable = set()
83 83 seen = {}
84 84 minroot = min(roots)
85 85 roots = set(roots)
86 86 # open-code the post-order traversal due to the tiny size of
87 87 # sys.getrecursionlimit()
88 88 while visit:
89 89 rev = visit.pop()
90 90 if rev in roots:
91 91 reachable.add(rev)
92 92 parents = parentrevs(rev)
93 93 seen[rev] = parents
94 94 for parent in parents:
95 95 if parent >= minroot and parent not in seen:
96 96 visit.append(parent)
97 97 if not reachable:
98 98 return baseset()
99 99 for rev in sorted(seen):
100 100 for parent in seen[rev]:
101 101 if parent in reachable:
102 102 reachable.add(rev)
103 103 return baseset(sorted(reachable))
104 104
105 105 elements = {
106 106 "(": (20, ("group", 1, ")"), ("func", 1, ")")),
107 107 "~": (18, None, ("ancestor", 18)),
108 108 "^": (18, None, ("parent", 18), ("parentpost", 18)),
109 109 "-": (5, ("negate", 19), ("minus", 5)),
110 110 "::": (17, ("dagrangepre", 17), ("dagrange", 17),
111 111 ("dagrangepost", 17)),
112 112 "..": (17, ("dagrangepre", 17), ("dagrange", 17),
113 113 ("dagrangepost", 17)),
114 114 ":": (15, ("rangepre", 15), ("range", 15), ("rangepost", 15)),
115 115 "not": (10, ("not", 10)),
116 116 "!": (10, ("not", 10)),
117 117 "and": (5, None, ("and", 5)),
118 118 "&": (5, None, ("and", 5)),
119 119 "or": (4, None, ("or", 4)),
120 120 "|": (4, None, ("or", 4)),
121 121 "+": (4, None, ("or", 4)),
122 122 ",": (2, None, ("list", 2)),
123 123 ")": (0, None, None),
124 124 "symbol": (0, ("symbol",), None),
125 125 "string": (0, ("string",), None),
126 126 "end": (0, None, None),
127 127 }
128 128
129 129 keywords = set(['and', 'or', 'not'])
130 130
131 131 def tokenize(program, lookup=None):
132 132 '''
133 133 Parse a revset statement into a stream of tokens
134 134
135 135 Check that @ is a valid unquoted token character (issue3686):
136 136 >>> list(tokenize("@::"))
137 137 [('symbol', '@', 0), ('::', None, 1), ('end', None, 3)]
138 138
139 139 '''
140 140
141 141 pos, l = 0, len(program)
142 142 while pos < l:
143 143 c = program[pos]
144 144 if c.isspace(): # skip inter-token whitespace
145 145 pass
146 146 elif c == ':' and program[pos:pos + 2] == '::': # look ahead carefully
147 147 yield ('::', None, pos)
148 148 pos += 1 # skip ahead
149 149 elif c == '.' and program[pos:pos + 2] == '..': # look ahead carefully
150 150 yield ('..', None, pos)
151 151 pos += 1 # skip ahead
152 152 elif c in "():,-|&+!~^": # handle simple operators
153 153 yield (c, None, pos)
154 154 elif (c in '"\'' or c == 'r' and
155 155 program[pos:pos + 2] in ("r'", 'r"')): # handle quoted strings
156 156 if c == 'r':
157 157 pos += 1
158 158 c = program[pos]
159 159 decode = lambda x: x
160 160 else:
161 161 decode = lambda x: x.decode('string-escape')
162 162 pos += 1
163 163 s = pos
164 164 while pos < l: # find closing quote
165 165 d = program[pos]
166 166 if d == '\\': # skip over escaped characters
167 167 pos += 2
168 168 continue
169 169 if d == c:
170 170 yield ('string', decode(program[s:pos]), s)
171 171 break
172 172 pos += 1
173 173 else:
174 174 raise error.ParseError(_("unterminated string"), s)
175 175 # gather up a symbol/keyword
176 176 elif c.isalnum() or c in '._@' or ord(c) > 127:
177 177 s = pos
178 178 pos += 1
179 179 while pos < l: # find end of symbol
180 180 d = program[pos]
181 181 if not (d.isalnum() or d in "-._/@" or ord(d) > 127):
182 182 break
183 183 if d == '.' and program[pos - 1] == '.': # special case for ..
184 184 pos -= 1
185 185 break
186 186 pos += 1
187 187 sym = program[s:pos]
188 188 if sym in keywords: # operator keywords
189 189 yield (sym, None, s)
190 190 elif '-' in sym:
191 191 # some jerk gave us foo-bar-baz, try to check if it's a symbol
192 192 if lookup and lookup(sym):
193 193 # looks like a real symbol
194 194 yield ('symbol', sym, s)
195 195 else:
196 196 # looks like an expression
197 197 parts = sym.split('-')
198 198 for p in parts[:-1]:
199 199 if p: # possible consecutive -
200 200 yield ('symbol', p, s)
201 201 s += len(p)
202 202 yield ('-', None, pos)
203 203 s += 1
204 204 if parts[-1]: # possible trailing -
205 205 yield ('symbol', parts[-1], s)
206 206 else:
207 207 yield ('symbol', sym, s)
208 208 pos -= 1
209 209 else:
210 210 raise error.ParseError(_("syntax error"), pos)
211 211 pos += 1
212 212 yield ('end', None, pos)
213 213
214 214 # helpers
215 215
216 216 def getstring(x, err):
217 217 if x and (x[0] == 'string' or x[0] == 'symbol'):
218 218 return x[1]
219 219 raise error.ParseError(err)
220 220
221 221 def getlist(x):
222 222 if not x:
223 223 return []
224 224 if x[0] == 'list':
225 225 return getlist(x[1]) + [x[2]]
226 226 return [x]
227 227
228 228 def getargs(x, min, max, err):
229 229 l = getlist(x)
230 230 if len(l) < min or (max >= 0 and len(l) > max):
231 231 raise error.ParseError(err)
232 232 return l
233 233
234 234 def getset(repo, subset, x):
235 235 if not x:
236 236 raise error.ParseError(_("missing argument"))
237 237 s = methods[x[0]](repo, subset, *x[1:])
238 238 if util.safehasattr(s, 'isascending'):
239 239 return s
240 240 return baseset(s)
241 241
242 242 def _getrevsource(repo, r):
243 243 extra = repo[r].extra()
244 244 for label in ('source', 'transplant_source', 'rebase_source'):
245 245 if label in extra:
246 246 try:
247 247 return repo[extra[label]].rev()
248 248 except error.RepoLookupError:
249 249 pass
250 250 return None
251 251
252 252 # operator methods
253 253
254 254 def stringset(repo, subset, x):
255 255 x = repo[x].rev()
256 256 if x == -1 and len(subset) == len(repo):
257 257 return baseset([-1])
258 258 if len(subset) == len(repo) or x in subset:
259 259 return baseset([x])
260 260 return baseset()
261 261
262 262 def symbolset(repo, subset, x):
263 263 if x in symbols:
264 264 raise error.ParseError(_("can't use %s here") % x)
265 265 return stringset(repo, subset, x)
266 266
267 267 def rangeset(repo, subset, x, y):
268 268 m = getset(repo, fullreposet(repo), x)
269 269 n = getset(repo, fullreposet(repo), y)
270 270
271 271 if not m or not n:
272 272 return baseset()
273 273 m, n = m.first(), n.last()
274 274
275 275 if m < n:
276 276 r = spanset(repo, m, n + 1)
277 277 else:
278 278 r = spanset(repo, m, n - 1)
279 279 return r & subset
280 280
281 281 def dagrange(repo, subset, x, y):
282 282 r = spanset(repo)
283 283 xs = _revsbetween(repo, getset(repo, r, x), getset(repo, r, y))
284 284 return xs & subset
285 285
286 286 def andset(repo, subset, x, y):
287 287 return getset(repo, getset(repo, subset, x), y)
288 288
289 289 def orset(repo, subset, x, y):
290 290 xl = getset(repo, subset, x)
291 291 yl = getset(repo, subset - xl, y)
292 292 return xl + yl
293 293
294 294 def notset(repo, subset, x):
295 295 return subset - getset(repo, subset, x)
296 296
297 297 def listset(repo, subset, a, b):
298 298 raise error.ParseError(_("can't use a list in this context"))
299 299
300 300 def func(repo, subset, a, b):
301 301 if a[0] == 'symbol' and a[1] in symbols:
302 302 return symbols[a[1]](repo, subset, b)
303 303 raise error.ParseError(_("not a function: %s") % a[1])
304 304
305 305 # functions
306 306
307 307 def adds(repo, subset, x):
308 308 """``adds(pattern)``
309 309 Changesets that add a file matching pattern.
310 310
311 311 The pattern without explicit kind like ``glob:`` is expected to be
312 312 relative to the current directory and match against a file or a
313 313 directory.
314 314 """
315 315 # i18n: "adds" is a keyword
316 316 pat = getstring(x, _("adds requires a pattern"))
317 317 return checkstatus(repo, subset, pat, 1)
318 318
319 319 def ancestor(repo, subset, x):
320 320 """``ancestor(*changeset)``
321 321 A greatest common ancestor of the changesets.
322 322
323 323 Accepts 0 or more changesets.
324 324 Will return empty list when passed no args.
325 325 Greatest common ancestor of a single changeset is that changeset.
326 326 """
327 327 # i18n: "ancestor" is a keyword
328 328 l = getlist(x)
329 329 rl = spanset(repo)
330 330 anc = None
331 331
332 332 # (getset(repo, rl, i) for i in l) generates a list of lists
333 333 for revs in (getset(repo, rl, i) for i in l):
334 334 for r in revs:
335 335 if anc is None:
336 336 anc = repo[r]
337 337 else:
338 338 anc = anc.ancestor(repo[r])
339 339
340 340 if anc is not None and anc.rev() in subset:
341 341 return baseset([anc.rev()])
342 342 return baseset()
343 343
344 344 def _ancestors(repo, subset, x, followfirst=False):
345 345 heads = getset(repo, spanset(repo), x)
346 346 if not heads:
347 347 return baseset()
348 348 s = _revancestors(repo, heads, followfirst)
349 349 return subset & s
350 350
351 351 def ancestors(repo, subset, x):
352 352 """``ancestors(set)``
353 353 Changesets that are ancestors of a changeset in set.
354 354 """
355 355 return _ancestors(repo, subset, x)
356 356
357 357 def _firstancestors(repo, subset, x):
358 358 # ``_firstancestors(set)``
359 359 # Like ``ancestors(set)`` but follows only the first parents.
360 360 return _ancestors(repo, subset, x, followfirst=True)
361 361
362 362 def ancestorspec(repo, subset, x, n):
363 363 """``set~n``
364 364 Changesets that are the Nth ancestor (first parents only) of a changeset
365 365 in set.
366 366 """
367 367 try:
368 368 n = int(n[1])
369 369 except (TypeError, ValueError):
370 370 raise error.ParseError(_("~ expects a number"))
371 371 ps = set()
372 372 cl = repo.changelog
373 373 for r in getset(repo, fullreposet(repo), x):
374 374 for i in range(n):
375 375 r = cl.parentrevs(r)[0]
376 376 ps.add(r)
377 377 return subset & ps
378 378
379 379 def author(repo, subset, x):
380 380 """``author(string)``
381 381 Alias for ``user(string)``.
382 382 """
383 383 # i18n: "author" is a keyword
384 384 n = encoding.lower(getstring(x, _("author requires a string")))
385 385 kind, pattern, matcher = _substringmatcher(n)
386 386 return subset.filter(lambda x: matcher(encoding.lower(repo[x].user())))
387 387
388 388 def only(repo, subset, x):
389 389 """``only(set, [set])``
390 390 Changesets that are ancestors of the first set that are not ancestors
391 391 of any other head in the repo. If a second set is specified, the result
392 392 is ancestors of the first set that are not ancestors of the second set
393 393 (i.e. ::<set1> - ::<set2>).
394 394 """
395 395 cl = repo.changelog
396 396 # i18n: "only" is a keyword
397 397 args = getargs(x, 1, 2, _('only takes one or two arguments'))
398 398 include = getset(repo, spanset(repo), args[0])
399 399 if len(args) == 1:
400 400 if not include:
401 401 return baseset()
402 402
403 403 descendants = set(_revdescendants(repo, include, False))
404 404 exclude = [rev for rev in cl.headrevs()
405 405 if not rev in descendants and not rev in include]
406 406 else:
407 407 exclude = getset(repo, spanset(repo), args[1])
408 408
409 409 results = set(ancestormod.missingancestors(include, exclude, cl.parentrevs))
410 410 return subset & results
411 411
412 412 def bisect(repo, subset, x):
413 413 """``bisect(string)``
414 414 Changesets marked in the specified bisect status:
415 415
416 416 - ``good``, ``bad``, ``skip``: csets explicitly marked as good/bad/skip
417 417 - ``goods``, ``bads`` : csets topologically good/bad
418 418 - ``range`` : csets taking part in the bisection
419 419 - ``pruned`` : csets that are goods, bads or skipped
420 420 - ``untested`` : csets whose fate is yet unknown
421 421 - ``ignored`` : csets ignored due to DAG topology
422 422 - ``current`` : the cset currently being bisected
423 423 """
424 424 # i18n: "bisect" is a keyword
425 425 status = getstring(x, _("bisect requires a string")).lower()
426 426 state = set(hbisect.get(repo, status))
427 427 return subset & state
428 428
429 429 # Backward-compatibility
430 430 # - no help entry so that we do not advertise it any more
431 431 def bisected(repo, subset, x):
432 432 return bisect(repo, subset, x)
433 433
434 434 def bookmark(repo, subset, x):
435 435 """``bookmark([name])``
436 436 The named bookmark or all bookmarks.
437 437
438 438 If `name` starts with `re:`, the remainder of the name is treated as
439 439 a regular expression. To match a bookmark that actually starts with `re:`,
440 440 use the prefix `literal:`.
441 441 """
442 442 # i18n: "bookmark" is a keyword
443 443 args = getargs(x, 0, 1, _('bookmark takes one or no arguments'))
444 444 if args:
445 445 bm = getstring(args[0],
446 446 # i18n: "bookmark" is a keyword
447 447 _('the argument to bookmark must be a string'))
448 448 kind, pattern, matcher = _stringmatcher(bm)
449 449 bms = set()
450 450 if kind == 'literal':
451 451 bmrev = repo._bookmarks.get(pattern, None)
452 452 if not bmrev:
453 453 raise util.Abort(_("bookmark '%s' does not exist") % bm)
454 454 bms.add(repo[bmrev].rev())
455 455 else:
456 456 matchrevs = set()
457 457 for name, bmrev in repo._bookmarks.iteritems():
458 458 if matcher(name):
459 459 matchrevs.add(bmrev)
460 460 if not matchrevs:
461 461 raise util.Abort(_("no bookmarks exist that match '%s'")
462 462 % pattern)
463 463 for bmrev in matchrevs:
464 464 bms.add(repo[bmrev].rev())
465 465 else:
466 466 bms = set([repo[r].rev()
467 467 for r in repo._bookmarks.values()])
468 468 bms -= set([node.nullrev])
469 469 return subset & bms
470 470
471 471 def branch(repo, subset, x):
472 472 """``branch(string or set)``
473 473 All changesets belonging to the given branch or the branches of the given
474 474 changesets.
475 475
476 476 If `string` starts with `re:`, the remainder of the name is treated as
477 477 a regular expression. To match a branch that actually starts with `re:`,
478 478 use the prefix `literal:`.
479 479 """
480 480 try:
481 481 b = getstring(x, '')
482 482 except error.ParseError:
483 483 # not a string, but another revspec, e.g. tip()
484 484 pass
485 485 else:
486 486 kind, pattern, matcher = _stringmatcher(b)
487 487 if kind == 'literal':
488 488 # note: falls through to the revspec case if no branch with
489 489 # this name exists
490 490 if pattern in repo.branchmap():
491 491 return subset.filter(lambda r: matcher(repo[r].branch()))
492 492 else:
493 493 return subset.filter(lambda r: matcher(repo[r].branch()))
494 494
495 495 s = getset(repo, spanset(repo), x)
496 496 b = set()
497 497 for r in s:
498 498 b.add(repo[r].branch())
499 499 c = s.__contains__
500 500 return subset.filter(lambda r: c(r) or repo[r].branch() in b)
501 501
502 502 def bumped(repo, subset, x):
503 503 """``bumped()``
504 504 Mutable changesets marked as successors of public changesets.
505 505
506 506 Only non-public and non-obsolete changesets can be `bumped`.
507 507 """
508 508 # i18n: "bumped" is a keyword
509 509 getargs(x, 0, 0, _("bumped takes no arguments"))
510 510 bumped = obsmod.getrevs(repo, 'bumped')
511 511 return subset & bumped
512 512
513 513 def bundle(repo, subset, x):
514 514 """``bundle()``
515 515 Changesets in the bundle.
516 516
517 517 Bundle must be specified by the -R option."""
518 518
519 519 try:
520 520 bundlerevs = repo.changelog.bundlerevs
521 521 except AttributeError:
522 522 raise util.Abort(_("no bundle provided - specify with -R"))
523 523 return subset & bundlerevs
524 524
525 525 def checkstatus(repo, subset, pat, field):
526 526 hasset = matchmod.patkind(pat) == 'set'
527 527
528 528 mcache = [None]
529 529 def matches(x):
530 530 c = repo[x]
531 531 if not mcache[0] or hasset:
532 532 mcache[0] = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
533 533 m = mcache[0]
534 534 fname = None
535 535 if not m.anypats() and len(m.files()) == 1:
536 536 fname = m.files()[0]
537 537 if fname is not None:
538 538 if fname not in c.files():
539 539 return False
540 540 else:
541 541 for f in c.files():
542 542 if m(f):
543 543 break
544 544 else:
545 545 return False
546 546 files = repo.status(c.p1().node(), c.node())[field]
547 547 if fname is not None:
548 548 if fname in files:
549 549 return True
550 550 else:
551 551 for f in files:
552 552 if m(f):
553 553 return True
554 554
555 555 return subset.filter(matches)
556 556
557 557 def _children(repo, narrow, parentset):
558 558 cs = set()
559 559 if not parentset:
560 560 return baseset(cs)
561 561 pr = repo.changelog.parentrevs
562 562 minrev = min(parentset)
563 563 for r in narrow:
564 564 if r <= minrev:
565 565 continue
566 566 for p in pr(r):
567 567 if p in parentset:
568 568 cs.add(r)
569 569 return baseset(cs)
570 570
571 571 def children(repo, subset, x):
572 572 """``children(set)``
573 573 Child changesets of changesets in set.
574 574 """
575 s = getset(repo, baseset(repo), x)
575 s = getset(repo, fullreposet(repo), x)
576 576 cs = _children(repo, subset, s)
577 577 return subset & cs
578 578
579 579 def closed(repo, subset, x):
580 580 """``closed()``
581 581 Changeset is closed.
582 582 """
583 583 # i18n: "closed" is a keyword
584 584 getargs(x, 0, 0, _("closed takes no arguments"))
585 585 return subset.filter(lambda r: repo[r].closesbranch())
586 586
587 587 def contains(repo, subset, x):
588 588 """``contains(pattern)``
589 589 The revision's manifest contains a file matching pattern (but might not
590 590 modify it). See :hg:`help patterns` for information about file patterns.
591 591
592 592 The pattern without explicit kind like ``glob:`` is expected to be
593 593 relative to the current directory and match against a file exactly
594 594 for efficiency.
595 595 """
596 596 # i18n: "contains" is a keyword
597 597 pat = getstring(x, _("contains requires a pattern"))
598 598
599 599 def matches(x):
600 600 if not matchmod.patkind(pat):
601 601 pats = pathutil.canonpath(repo.root, repo.getcwd(), pat)
602 602 if pats in repo[x]:
603 603 return True
604 604 else:
605 605 c = repo[x]
606 606 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
607 607 for f in c.manifest():
608 608 if m(f):
609 609 return True
610 610 return False
611 611
612 612 return subset.filter(matches)
613 613
614 614 def converted(repo, subset, x):
615 615 """``converted([id])``
616 616 Changesets converted from the given identifier in the old repository if
617 617 present, or all converted changesets if no identifier is specified.
618 618 """
619 619
620 620 # There is exactly no chance of resolving the revision, so do a simple
621 621 # string compare and hope for the best
622 622
623 623 rev = None
624 624 # i18n: "converted" is a keyword
625 625 l = getargs(x, 0, 1, _('converted takes one or no arguments'))
626 626 if l:
627 627 # i18n: "converted" is a keyword
628 628 rev = getstring(l[0], _('converted requires a revision'))
629 629
630 630 def _matchvalue(r):
631 631 source = repo[r].extra().get('convert_revision', None)
632 632 return source is not None and (rev is None or source.startswith(rev))
633 633
634 634 return subset.filter(lambda r: _matchvalue(r))
635 635
636 636 def date(repo, subset, x):
637 637 """``date(interval)``
638 638 Changesets within the interval, see :hg:`help dates`.
639 639 """
640 640 # i18n: "date" is a keyword
641 641 ds = getstring(x, _("date requires a string"))
642 642 dm = util.matchdate(ds)
643 643 return subset.filter(lambda x: dm(repo[x].date()[0]))
644 644
645 645 def desc(repo, subset, x):
646 646 """``desc(string)``
647 647 Search commit message for string. The match is case-insensitive.
648 648 """
649 649 # i18n: "desc" is a keyword
650 650 ds = encoding.lower(getstring(x, _("desc requires a string")))
651 651
652 652 def matches(x):
653 653 c = repo[x]
654 654 return ds in encoding.lower(c.description())
655 655
656 656 return subset.filter(matches)
657 657
658 658 def _descendants(repo, subset, x, followfirst=False):
659 659 roots = getset(repo, spanset(repo), x)
660 660 if not roots:
661 661 return baseset()
662 662 s = _revdescendants(repo, roots, followfirst)
663 663
664 664 # Both sets need to be ascending in order to lazily return the union
665 665 # in the correct order.
666 666 base = subset & roots
667 667 desc = subset & s
668 668 result = base + desc
669 669 if subset.isascending():
670 670 result.sort()
671 671 elif subset.isdescending():
672 672 result.sort(reverse=True)
673 673 else:
674 674 result = subset & result
675 675 return result
676 676
677 677 def descendants(repo, subset, x):
678 678 """``descendants(set)``
679 679 Changesets which are descendants of changesets in set.
680 680 """
681 681 return _descendants(repo, subset, x)
682 682
683 683 def _firstdescendants(repo, subset, x):
684 684 # ``_firstdescendants(set)``
685 685 # Like ``descendants(set)`` but follows only the first parents.
686 686 return _descendants(repo, subset, x, followfirst=True)
687 687
688 688 def destination(repo, subset, x):
689 689 """``destination([set])``
690 690 Changesets that were created by a graft, transplant or rebase operation,
691 691 with the given revisions specified as the source. Omitting the optional set
692 692 is the same as passing all().
693 693 """
694 694 if x is not None:
695 695 sources = getset(repo, spanset(repo), x)
696 696 else:
697 697 sources = getall(repo, spanset(repo), x)
698 698
699 699 dests = set()
700 700
701 701 # subset contains all of the possible destinations that can be returned, so
702 702 # iterate over them and see if their source(s) were provided in the arg set.
703 703 # Even if the immediate src of r is not in the arg set, src's source (or
704 704 # further back) may be. Scanning back further than the immediate src allows
705 705 # transitive transplants and rebases to yield the same results as transitive
706 706 # grafts.
707 707 for r in subset:
708 708 src = _getrevsource(repo, r)
709 709 lineage = None
710 710
711 711 while src is not None:
712 712 if lineage is None:
713 713 lineage = list()
714 714
715 715 lineage.append(r)
716 716
717 717 # The visited lineage is a match if the current source is in the arg
718 718 # set. Since every candidate dest is visited by way of iterating
719 719 # subset, any dests further back in the lineage will be tested by a
720 720 # different iteration over subset. Likewise, if the src was already
721 721 # selected, the current lineage can be selected without going back
722 722 # further.
723 723 if src in sources or src in dests:
724 724 dests.update(lineage)
725 725 break
726 726
727 727 r = src
728 728 src = _getrevsource(repo, r)
729 729
730 730 return subset.filter(dests.__contains__)
731 731
732 732 def divergent(repo, subset, x):
733 733 """``divergent()``
734 734 Final successors of changesets with an alternative set of final successors.
735 735 """
736 736 # i18n: "divergent" is a keyword
737 737 getargs(x, 0, 0, _("divergent takes no arguments"))
738 738 divergent = obsmod.getrevs(repo, 'divergent')
739 739 return subset & divergent
740 740
741 741 def draft(repo, subset, x):
742 742 """``draft()``
743 743 Changeset in draft phase."""
744 744 # i18n: "draft" is a keyword
745 745 getargs(x, 0, 0, _("draft takes no arguments"))
746 746 phase = repo._phasecache.phase
747 747 target = phases.draft
748 748 condition = lambda r: phase(repo, r) == target
749 749 return subset.filter(condition, cache=False)
750 750
751 751 def extinct(repo, subset, x):
752 752 """``extinct()``
753 753 Obsolete changesets with obsolete descendants only.
754 754 """
755 755 # i18n: "extinct" is a keyword
756 756 getargs(x, 0, 0, _("extinct takes no arguments"))
757 757 extincts = obsmod.getrevs(repo, 'extinct')
758 758 return subset & extincts
759 759
760 760 def extra(repo, subset, x):
761 761 """``extra(label, [value])``
762 762 Changesets with the given label in the extra metadata, with the given
763 763 optional value.
764 764
765 765 If `value` starts with `re:`, the remainder of the value is treated as
766 766 a regular expression. To match a value that actually starts with `re:`,
767 767 use the prefix `literal:`.
768 768 """
769 769
770 770 # i18n: "extra" is a keyword
771 771 l = getargs(x, 1, 2, _('extra takes at least 1 and at most 2 arguments'))
772 772 # i18n: "extra" is a keyword
773 773 label = getstring(l[0], _('first argument to extra must be a string'))
774 774 value = None
775 775
776 776 if len(l) > 1:
777 777 # i18n: "extra" is a keyword
778 778 value = getstring(l[1], _('second argument to extra must be a string'))
779 779 kind, value, matcher = _stringmatcher(value)
780 780
781 781 def _matchvalue(r):
782 782 extra = repo[r].extra()
783 783 return label in extra and (value is None or matcher(extra[label]))
784 784
785 785 return subset.filter(lambda r: _matchvalue(r))
786 786
787 787 def filelog(repo, subset, x):
788 788 """``filelog(pattern)``
789 789 Changesets connected to the specified filelog.
790 790
791 791 For performance reasons, visits only revisions mentioned in the file-level
792 792 filelog, rather than filtering through all changesets (much faster, but
793 793 doesn't include deletes or duplicate changes). For a slower, more accurate
794 794 result, use ``file()``.
795 795
796 796 The pattern without explicit kind like ``glob:`` is expected to be
797 797 relative to the current directory and match against a file exactly
798 798 for efficiency.
799 799 """
800 800
801 801 # i18n: "filelog" is a keyword
802 802 pat = getstring(x, _("filelog requires a pattern"))
803 803 s = set()
804 804
805 805 if not matchmod.patkind(pat):
806 806 f = pathutil.canonpath(repo.root, repo.getcwd(), pat)
807 807 fl = repo.file(f)
808 808 for fr in fl:
809 809 s.add(fl.linkrev(fr))
810 810 else:
811 811 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=repo[None])
812 812 for f in repo[None]:
813 813 if m(f):
814 814 fl = repo.file(f)
815 815 for fr in fl:
816 816 s.add(fl.linkrev(fr))
817 817
818 818 return subset & s
819 819
820 820 def first(repo, subset, x):
821 821 """``first(set, [n])``
822 822 An alias for limit().
823 823 """
824 824 return limit(repo, subset, x)
825 825
826 826 def _follow(repo, subset, x, name, followfirst=False):
827 827 l = getargs(x, 0, 1, _("%s takes no arguments or a filename") % name)
828 828 c = repo['.']
829 829 if l:
830 830 x = getstring(l[0], _("%s expected a filename") % name)
831 831 if x in c:
832 832 cx = c[x]
833 833 s = set(ctx.rev() for ctx in cx.ancestors(followfirst=followfirst))
834 834 # include the revision responsible for the most recent version
835 835 s.add(cx.linkrev())
836 836 else:
837 837 return baseset()
838 838 else:
839 839 s = _revancestors(repo, baseset([c.rev()]), followfirst)
840 840
841 841 return subset & s
842 842
843 843 def follow(repo, subset, x):
844 844 """``follow([file])``
845 845 An alias for ``::.`` (ancestors of the working copy's first parent).
846 846 If a filename is specified, the history of the given file is followed,
847 847 including copies.
848 848 """
849 849 return _follow(repo, subset, x, 'follow')
850 850
851 851 def _followfirst(repo, subset, x):
852 852 # ``followfirst([file])``
853 853 # Like ``follow([file])`` but follows only the first parent of
854 854 # every revision or file revision.
855 855 return _follow(repo, subset, x, '_followfirst', followfirst=True)
856 856
857 857 def getall(repo, subset, x):
858 858 """``all()``
859 859 All changesets, the same as ``0:tip``.
860 860 """
861 861 # i18n: "all" is a keyword
862 862 getargs(x, 0, 0, _("all takes no arguments"))
863 863 return subset
864 864
865 865 def grep(repo, subset, x):
866 866 """``grep(regex)``
867 867 Like ``keyword(string)`` but accepts a regex. Use ``grep(r'...')``
868 868 to ensure special escape characters are handled correctly. Unlike
869 869 ``keyword(string)``, the match is case-sensitive.
870 870 """
871 871 try:
872 872 # i18n: "grep" is a keyword
873 873 gr = re.compile(getstring(x, _("grep requires a string")))
874 874 except re.error, e:
875 875 raise error.ParseError(_('invalid match pattern: %s') % e)
876 876
877 877 def matches(x):
878 878 c = repo[x]
879 879 for e in c.files() + [c.user(), c.description()]:
880 880 if gr.search(e):
881 881 return True
882 882 return False
883 883
884 884 return subset.filter(matches)
885 885
886 886 def _matchfiles(repo, subset, x):
887 887 # _matchfiles takes a revset list of prefixed arguments:
888 888 #
889 889 # [p:foo, i:bar, x:baz]
890 890 #
891 891 # builds a match object from them and filters subset. Allowed
892 892 # prefixes are 'p:' for regular patterns, 'i:' for include
893 893 # patterns and 'x:' for exclude patterns. Use 'r:' prefix to pass
894 894 # a revision identifier, or the empty string to reference the
895 895 # working directory, from which the match object is
896 896 # initialized. Use 'd:' to set the default matching mode, default
897 897 # to 'glob'. At most one 'r:' and 'd:' argument can be passed.
898 898
899 899 # i18n: "_matchfiles" is a keyword
900 900 l = getargs(x, 1, -1, _("_matchfiles requires at least one argument"))
901 901 pats, inc, exc = [], [], []
902 902 rev, default = None, None
903 903 for arg in l:
904 904 # i18n: "_matchfiles" is a keyword
905 905 s = getstring(arg, _("_matchfiles requires string arguments"))
906 906 prefix, value = s[:2], s[2:]
907 907 if prefix == 'p:':
908 908 pats.append(value)
909 909 elif prefix == 'i:':
910 910 inc.append(value)
911 911 elif prefix == 'x:':
912 912 exc.append(value)
913 913 elif prefix == 'r:':
914 914 if rev is not None:
915 915 # i18n: "_matchfiles" is a keyword
916 916 raise error.ParseError(_('_matchfiles expected at most one '
917 917 'revision'))
918 918 rev = value
919 919 elif prefix == 'd:':
920 920 if default is not None:
921 921 # i18n: "_matchfiles" is a keyword
922 922 raise error.ParseError(_('_matchfiles expected at most one '
923 923 'default mode'))
924 924 default = value
925 925 else:
926 926 # i18n: "_matchfiles" is a keyword
927 927 raise error.ParseError(_('invalid _matchfiles prefix: %s') % prefix)
928 928 if not default:
929 929 default = 'glob'
930 930
931 931 m = matchmod.match(repo.root, repo.getcwd(), pats, include=inc,
932 932 exclude=exc, ctx=repo[rev], default=default)
933 933
934 934 def matches(x):
935 935 for f in repo[x].files():
936 936 if m(f):
937 937 return True
938 938 return False
939 939
940 940 return subset.filter(matches)
941 941
942 942 def hasfile(repo, subset, x):
943 943 """``file(pattern)``
944 944 Changesets affecting files matched by pattern.
945 945
946 946 For a faster but less accurate result, consider using ``filelog()``
947 947 instead.
948 948
949 949 This predicate uses ``glob:`` as the default kind of pattern.
950 950 """
951 951 # i18n: "file" is a keyword
952 952 pat = getstring(x, _("file requires a pattern"))
953 953 return _matchfiles(repo, subset, ('string', 'p:' + pat))
954 954
955 955 def head(repo, subset, x):
956 956 """``head()``
957 957 Changeset is a named branch head.
958 958 """
959 959 # i18n: "head" is a keyword
960 960 getargs(x, 0, 0, _("head takes no arguments"))
961 961 hs = set()
962 962 for b, ls in repo.branchmap().iteritems():
963 963 hs.update(repo[h].rev() for h in ls)
964 964 return baseset(hs).filter(subset.__contains__)
965 965
966 966 def heads(repo, subset, x):
967 967 """``heads(set)``
968 968 Members of set with no children in set.
969 969 """
970 970 s = getset(repo, subset, x)
971 971 ps = parents(repo, subset, x)
972 972 return s - ps
973 973
974 974 def hidden(repo, subset, x):
975 975 """``hidden()``
976 976 Hidden changesets.
977 977 """
978 978 # i18n: "hidden" is a keyword
979 979 getargs(x, 0, 0, _("hidden takes no arguments"))
980 980 hiddenrevs = repoview.filterrevs(repo, 'visible')
981 981 return subset & hiddenrevs
982 982
983 983 def keyword(repo, subset, x):
984 984 """``keyword(string)``
985 985 Search commit message, user name, and names of changed files for
986 986 string. The match is case-insensitive.
987 987 """
988 988 # i18n: "keyword" is a keyword
989 989 kw = encoding.lower(getstring(x, _("keyword requires a string")))
990 990
991 991 def matches(r):
992 992 c = repo[r]
993 993 return util.any(kw in encoding.lower(t) for t in c.files() + [c.user(),
994 994 c.description()])
995 995
996 996 return subset.filter(matches)
997 997
998 998 def limit(repo, subset, x):
999 999 """``limit(set, [n])``
1000 1000 First n members of set, defaulting to 1.
1001 1001 """
1002 1002 # i18n: "limit" is a keyword
1003 1003 l = getargs(x, 1, 2, _("limit requires one or two arguments"))
1004 1004 try:
1005 1005 lim = 1
1006 1006 if len(l) == 2:
1007 1007 # i18n: "limit" is a keyword
1008 1008 lim = int(getstring(l[1], _("limit requires a number")))
1009 1009 except (TypeError, ValueError):
1010 1010 # i18n: "limit" is a keyword
1011 1011 raise error.ParseError(_("limit expects a number"))
1012 1012 ss = subset
1013 1013 os = getset(repo, spanset(repo), l[0])
1014 1014 result = []
1015 1015 it = iter(os)
1016 1016 for x in xrange(lim):
1017 1017 try:
1018 1018 y = it.next()
1019 1019 if y in ss:
1020 1020 result.append(y)
1021 1021 except (StopIteration):
1022 1022 break
1023 1023 return baseset(result)
1024 1024
1025 1025 def last(repo, subset, x):
1026 1026 """``last(set, [n])``
1027 1027 Last n members of set, defaulting to 1.
1028 1028 """
1029 1029 # i18n: "last" is a keyword
1030 1030 l = getargs(x, 1, 2, _("last requires one or two arguments"))
1031 1031 try:
1032 1032 lim = 1
1033 1033 if len(l) == 2:
1034 1034 # i18n: "last" is a keyword
1035 1035 lim = int(getstring(l[1], _("last requires a number")))
1036 1036 except (TypeError, ValueError):
1037 1037 # i18n: "last" is a keyword
1038 1038 raise error.ParseError(_("last expects a number"))
1039 1039 ss = subset
1040 1040 os = getset(repo, spanset(repo), l[0])
1041 1041 os.reverse()
1042 1042 result = []
1043 1043 it = iter(os)
1044 1044 for x in xrange(lim):
1045 1045 try:
1046 1046 y = it.next()
1047 1047 if y in ss:
1048 1048 result.append(y)
1049 1049 except (StopIteration):
1050 1050 break
1051 1051 return baseset(result)
1052 1052
1053 1053 def maxrev(repo, subset, x):
1054 1054 """``max(set)``
1055 1055 Changeset with highest revision number in set.
1056 1056 """
1057 1057 os = getset(repo, spanset(repo), x)
1058 1058 if os:
1059 1059 m = os.max()
1060 1060 if m in subset:
1061 1061 return baseset([m])
1062 1062 return baseset()
1063 1063
1064 1064 def merge(repo, subset, x):
1065 1065 """``merge()``
1066 1066 Changeset is a merge changeset.
1067 1067 """
1068 1068 # i18n: "merge" is a keyword
1069 1069 getargs(x, 0, 0, _("merge takes no arguments"))
1070 1070 cl = repo.changelog
1071 1071 return subset.filter(lambda r: cl.parentrevs(r)[1] != -1)
1072 1072
1073 1073 def branchpoint(repo, subset, x):
1074 1074 """``branchpoint()``
1075 1075 Changesets with more than one child.
1076 1076 """
1077 1077 # i18n: "branchpoint" is a keyword
1078 1078 getargs(x, 0, 0, _("branchpoint takes no arguments"))
1079 1079 cl = repo.changelog
1080 1080 if not subset:
1081 1081 return baseset()
1082 1082 baserev = min(subset)
1083 1083 parentscount = [0]*(len(repo) - baserev)
1084 1084 for r in cl.revs(start=baserev + 1):
1085 1085 for p in cl.parentrevs(r):
1086 1086 if p >= baserev:
1087 1087 parentscount[p - baserev] += 1
1088 1088 return subset.filter(lambda r: parentscount[r - baserev] > 1)
1089 1089
1090 1090 def minrev(repo, subset, x):
1091 1091 """``min(set)``
1092 1092 Changeset with lowest revision number in set.
1093 1093 """
1094 1094 os = getset(repo, spanset(repo), x)
1095 1095 if os:
1096 1096 m = os.min()
1097 1097 if m in subset:
1098 1098 return baseset([m])
1099 1099 return baseset()
1100 1100
1101 1101 def modifies(repo, subset, x):
1102 1102 """``modifies(pattern)``
1103 1103 Changesets modifying files matched by pattern.
1104 1104
1105 1105 The pattern without explicit kind like ``glob:`` is expected to be
1106 1106 relative to the current directory and match against a file or a
1107 1107 directory.
1108 1108 """
1109 1109 # i18n: "modifies" is a keyword
1110 1110 pat = getstring(x, _("modifies requires a pattern"))
1111 1111 return checkstatus(repo, subset, pat, 0)
1112 1112
1113 1113 def node_(repo, subset, x):
1114 1114 """``id(string)``
1115 1115 Revision non-ambiguously specified by the given hex string prefix.
1116 1116 """
1117 1117 # i18n: "id" is a keyword
1118 1118 l = getargs(x, 1, 1, _("id requires one argument"))
1119 1119 # i18n: "id" is a keyword
1120 1120 n = getstring(l[0], _("id requires a string"))
1121 1121 if len(n) == 40:
1122 1122 rn = repo[n].rev()
1123 1123 else:
1124 1124 rn = None
1125 1125 pm = repo.changelog._partialmatch(n)
1126 1126 if pm is not None:
1127 1127 rn = repo.changelog.rev(pm)
1128 1128
1129 1129 if rn is None:
1130 1130 return baseset()
1131 1131 result = baseset([rn])
1132 1132 return result & subset
1133 1133
1134 1134 def obsolete(repo, subset, x):
1135 1135 """``obsolete()``
1136 1136 Mutable changeset with a newer version."""
1137 1137 # i18n: "obsolete" is a keyword
1138 1138 getargs(x, 0, 0, _("obsolete takes no arguments"))
1139 1139 obsoletes = obsmod.getrevs(repo, 'obsolete')
1140 1140 return subset & obsoletes
1141 1141
1142 1142 def origin(repo, subset, x):
1143 1143 """``origin([set])``
1144 1144 Changesets that were specified as a source for the grafts, transplants or
1145 1145 rebases that created the given revisions. Omitting the optional set is the
1146 1146 same as passing all(). If a changeset created by these operations is itself
1147 1147 specified as a source for one of these operations, only the source changeset
1148 1148 for the first operation is selected.
1149 1149 """
1150 1150 if x is not None:
1151 1151 dests = getset(repo, spanset(repo), x)
1152 1152 else:
1153 1153 dests = getall(repo, spanset(repo), x)
1154 1154
1155 1155 def _firstsrc(rev):
1156 1156 src = _getrevsource(repo, rev)
1157 1157 if src is None:
1158 1158 return None
1159 1159
1160 1160 while True:
1161 1161 prev = _getrevsource(repo, src)
1162 1162
1163 1163 if prev is None:
1164 1164 return src
1165 1165 src = prev
1166 1166
1167 1167 o = set([_firstsrc(r) for r in dests])
1168 1168 o -= set([None])
1169 1169 return subset & o
1170 1170
1171 1171 def outgoing(repo, subset, x):
1172 1172 """``outgoing([path])``
1173 1173 Changesets not found in the specified destination repository, or the
1174 1174 default push location.
1175 1175 """
1176 1176 import hg # avoid start-up nasties
1177 1177 # i18n: "outgoing" is a keyword
1178 1178 l = getargs(x, 0, 1, _("outgoing takes one or no arguments"))
1179 1179 # i18n: "outgoing" is a keyword
1180 1180 dest = l and getstring(l[0], _("outgoing requires a repository path")) or ''
1181 1181 dest = repo.ui.expandpath(dest or 'default-push', dest or 'default')
1182 1182 dest, branches = hg.parseurl(dest)
1183 1183 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1184 1184 if revs:
1185 1185 revs = [repo.lookup(rev) for rev in revs]
1186 1186 other = hg.peer(repo, {}, dest)
1187 1187 repo.ui.pushbuffer()
1188 1188 outgoing = discovery.findcommonoutgoing(repo, other, onlyheads=revs)
1189 1189 repo.ui.popbuffer()
1190 1190 cl = repo.changelog
1191 1191 o = set([cl.rev(r) for r in outgoing.missing])
1192 1192 return subset & o
1193 1193
1194 1194 def p1(repo, subset, x):
1195 1195 """``p1([set])``
1196 1196 First parent of changesets in set, or the working directory.
1197 1197 """
1198 1198 if x is None:
1199 1199 p = repo[x].p1().rev()
1200 1200 if p >= 0:
1201 1201 return subset & baseset([p])
1202 1202 return baseset()
1203 1203
1204 1204 ps = set()
1205 1205 cl = repo.changelog
1206 1206 for r in getset(repo, spanset(repo), x):
1207 1207 ps.add(cl.parentrevs(r)[0])
1208 1208 ps -= set([node.nullrev])
1209 1209 return subset & ps
1210 1210
1211 1211 def p2(repo, subset, x):
1212 1212 """``p2([set])``
1213 1213 Second parent of changesets in set, or the working directory.
1214 1214 """
1215 1215 if x is None:
1216 1216 ps = repo[x].parents()
1217 1217 try:
1218 1218 p = ps[1].rev()
1219 1219 if p >= 0:
1220 1220 return subset & baseset([p])
1221 1221 return baseset()
1222 1222 except IndexError:
1223 1223 return baseset()
1224 1224
1225 1225 ps = set()
1226 1226 cl = repo.changelog
1227 1227 for r in getset(repo, spanset(repo), x):
1228 1228 ps.add(cl.parentrevs(r)[1])
1229 1229 ps -= set([node.nullrev])
1230 1230 return subset & ps
1231 1231
1232 1232 def parents(repo, subset, x):
1233 1233 """``parents([set])``
1234 1234 The set of all parents for all changesets in set, or the working directory.
1235 1235 """
1236 1236 if x is None:
1237 1237 ps = set(p.rev() for p in repo[x].parents())
1238 1238 else:
1239 1239 ps = set()
1240 1240 cl = repo.changelog
1241 1241 for r in getset(repo, spanset(repo), x):
1242 1242 ps.update(cl.parentrevs(r))
1243 1243 ps -= set([node.nullrev])
1244 1244 return subset & ps
1245 1245
1246 1246 def parentspec(repo, subset, x, n):
1247 1247 """``set^0``
1248 1248 The set.
1249 1249 ``set^1`` (or ``set^``), ``set^2``
1250 1250 First or second parent, respectively, of all changesets in set.
1251 1251 """
1252 1252 try:
1253 1253 n = int(n[1])
1254 1254 if n not in (0, 1, 2):
1255 1255 raise ValueError
1256 1256 except (TypeError, ValueError):
1257 1257 raise error.ParseError(_("^ expects a number 0, 1, or 2"))
1258 1258 ps = set()
1259 1259 cl = repo.changelog
1260 1260 for r in getset(repo, baseset(cl), x):
1261 1261 if n == 0:
1262 1262 ps.add(r)
1263 1263 elif n == 1:
1264 1264 ps.add(cl.parentrevs(r)[0])
1265 1265 elif n == 2:
1266 1266 parents = cl.parentrevs(r)
1267 1267 if len(parents) > 1:
1268 1268 ps.add(parents[1])
1269 1269 return subset & ps
1270 1270
1271 1271 def present(repo, subset, x):
1272 1272 """``present(set)``
1273 1273 An empty set, if any revision in set isn't found; otherwise,
1274 1274 all revisions in set.
1275 1275
1276 1276 If any of specified revisions is not present in the local repository,
1277 1277 the query is normally aborted. But this predicate allows the query
1278 1278 to continue even in such cases.
1279 1279 """
1280 1280 try:
1281 1281 return getset(repo, subset, x)
1282 1282 except error.RepoLookupError:
1283 1283 return baseset()
1284 1284
1285 1285 def public(repo, subset, x):
1286 1286 """``public()``
1287 1287 Changeset in public phase."""
1288 1288 # i18n: "public" is a keyword
1289 1289 getargs(x, 0, 0, _("public takes no arguments"))
1290 1290 phase = repo._phasecache.phase
1291 1291 target = phases.public
1292 1292 condition = lambda r: phase(repo, r) == target
1293 1293 return subset.filter(condition, cache=False)
1294 1294
1295 1295 def remote(repo, subset, x):
1296 1296 """``remote([id [,path]])``
1297 1297 Local revision that corresponds to the given identifier in a
1298 1298 remote repository, if present. Here, the '.' identifier is a
1299 1299 synonym for the current local branch.
1300 1300 """
1301 1301
1302 1302 import hg # avoid start-up nasties
1303 1303 # i18n: "remote" is a keyword
1304 1304 l = getargs(x, 0, 2, _("remote takes one, two or no arguments"))
1305 1305
1306 1306 q = '.'
1307 1307 if len(l) > 0:
1308 1308 # i18n: "remote" is a keyword
1309 1309 q = getstring(l[0], _("remote requires a string id"))
1310 1310 if q == '.':
1311 1311 q = repo['.'].branch()
1312 1312
1313 1313 dest = ''
1314 1314 if len(l) > 1:
1315 1315 # i18n: "remote" is a keyword
1316 1316 dest = getstring(l[1], _("remote requires a repository path"))
1317 1317 dest = repo.ui.expandpath(dest or 'default')
1318 1318 dest, branches = hg.parseurl(dest)
1319 1319 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1320 1320 if revs:
1321 1321 revs = [repo.lookup(rev) for rev in revs]
1322 1322 other = hg.peer(repo, {}, dest)
1323 1323 n = other.lookup(q)
1324 1324 if n in repo:
1325 1325 r = repo[n].rev()
1326 1326 if r in subset:
1327 1327 return baseset([r])
1328 1328 return baseset()
1329 1329
1330 1330 def removes(repo, subset, x):
1331 1331 """``removes(pattern)``
1332 1332 Changesets which remove files matching pattern.
1333 1333
1334 1334 The pattern without explicit kind like ``glob:`` is expected to be
1335 1335 relative to the current directory and match against a file or a
1336 1336 directory.
1337 1337 """
1338 1338 # i18n: "removes" is a keyword
1339 1339 pat = getstring(x, _("removes requires a pattern"))
1340 1340 return checkstatus(repo, subset, pat, 2)
1341 1341
1342 1342 def rev(repo, subset, x):
1343 1343 """``rev(number)``
1344 1344 Revision with the given numeric identifier.
1345 1345 """
1346 1346 # i18n: "rev" is a keyword
1347 1347 l = getargs(x, 1, 1, _("rev requires one argument"))
1348 1348 try:
1349 1349 # i18n: "rev" is a keyword
1350 1350 l = int(getstring(l[0], _("rev requires a number")))
1351 1351 except (TypeError, ValueError):
1352 1352 # i18n: "rev" is a keyword
1353 1353 raise error.ParseError(_("rev expects a number"))
1354 1354 if l not in fullreposet(repo):
1355 1355 return baseset()
1356 1356 return subset & baseset([l])
1357 1357
1358 1358 def matching(repo, subset, x):
1359 1359 """``matching(revision [, field])``
1360 1360 Changesets in which a given set of fields match the set of fields in the
1361 1361 selected revision or set.
1362 1362
1363 1363 To match more than one field pass the list of fields to match separated
1364 1364 by spaces (e.g. ``author description``).
1365 1365
1366 1366 Valid fields are most regular revision fields and some special fields.
1367 1367
1368 1368 Regular revision fields are ``description``, ``author``, ``branch``,
1369 1369 ``date``, ``files``, ``phase``, ``parents``, ``substate``, ``user``
1370 1370 and ``diff``.
1371 1371 Note that ``author`` and ``user`` are synonyms. ``diff`` refers to the
1372 1372 contents of the revision. Two revisions matching their ``diff`` will
1373 1373 also match their ``files``.
1374 1374
1375 1375 Special fields are ``summary`` and ``metadata``:
1376 1376 ``summary`` matches the first line of the description.
1377 1377 ``metadata`` is equivalent to matching ``description user date``
1378 1378 (i.e. it matches the main metadata fields).
1379 1379
1380 1380 ``metadata`` is the default field which is used when no fields are
1381 1381 specified. You can match more than one field at a time.
1382 1382 """
1383 1383 # i18n: "matching" is a keyword
1384 1384 l = getargs(x, 1, 2, _("matching takes 1 or 2 arguments"))
1385 1385
1386 1386 revs = getset(repo, baseset(repo.changelog), l[0])
1387 1387
1388 1388 fieldlist = ['metadata']
1389 1389 if len(l) > 1:
1390 1390 fieldlist = getstring(l[1],
1391 1391 # i18n: "matching" is a keyword
1392 1392 _("matching requires a string "
1393 1393 "as its second argument")).split()
1394 1394
1395 1395 # Make sure that there are no repeated fields,
1396 1396 # expand the 'special' 'metadata' field type
1397 1397 # and check the 'files' whenever we check the 'diff'
1398 1398 fields = []
1399 1399 for field in fieldlist:
1400 1400 if field == 'metadata':
1401 1401 fields += ['user', 'description', 'date']
1402 1402 elif field == 'diff':
1403 1403 # a revision matching the diff must also match the files
1404 1404 # since matching the diff is very costly, make sure to
1405 1405 # also match the files first
1406 1406 fields += ['files', 'diff']
1407 1407 else:
1408 1408 if field == 'author':
1409 1409 field = 'user'
1410 1410 fields.append(field)
1411 1411 fields = set(fields)
1412 1412 if 'summary' in fields and 'description' in fields:
1413 1413 # If a revision matches its description it also matches its summary
1414 1414 fields.discard('summary')
1415 1415
1416 1416 # We may want to match more than one field
1417 1417 # Not all fields take the same amount of time to be matched
1418 1418 # Sort the selected fields in order of increasing matching cost
1419 1419 fieldorder = ['phase', 'parents', 'user', 'date', 'branch', 'summary',
1420 1420 'files', 'description', 'substate', 'diff']
1421 1421 def fieldkeyfunc(f):
1422 1422 try:
1423 1423 return fieldorder.index(f)
1424 1424 except ValueError:
1425 1425 # assume an unknown field is very costly
1426 1426 return len(fieldorder)
1427 1427 fields = list(fields)
1428 1428 fields.sort(key=fieldkeyfunc)
1429 1429
1430 1430 # Each field will be matched with its own "getfield" function
1431 1431 # which will be added to the getfieldfuncs array of functions
1432 1432 getfieldfuncs = []
1433 1433 _funcs = {
1434 1434 'user': lambda r: repo[r].user(),
1435 1435 'branch': lambda r: repo[r].branch(),
1436 1436 'date': lambda r: repo[r].date(),
1437 1437 'description': lambda r: repo[r].description(),
1438 1438 'files': lambda r: repo[r].files(),
1439 1439 'parents': lambda r: repo[r].parents(),
1440 1440 'phase': lambda r: repo[r].phase(),
1441 1441 'substate': lambda r: repo[r].substate,
1442 1442 'summary': lambda r: repo[r].description().splitlines()[0],
1443 1443 'diff': lambda r: list(repo[r].diff(git=True),)
1444 1444 }
1445 1445 for info in fields:
1446 1446 getfield = _funcs.get(info, None)
1447 1447 if getfield is None:
1448 1448 raise error.ParseError(
1449 1449 # i18n: "matching" is a keyword
1450 1450 _("unexpected field name passed to matching: %s") % info)
1451 1451 getfieldfuncs.append(getfield)
1452 1452 # convert the getfield array of functions into a "getinfo" function
1453 1453 # which returns an array of field values (or a single value if there
1454 1454 # is only one field to match)
1455 1455 getinfo = lambda r: [f(r) for f in getfieldfuncs]
1456 1456
1457 1457 def matches(x):
1458 1458 for rev in revs:
1459 1459 target = getinfo(rev)
1460 1460 match = True
1461 1461 for n, f in enumerate(getfieldfuncs):
1462 1462 if target[n] != f(x):
1463 1463 match = False
1464 1464 if match:
1465 1465 return True
1466 1466 return False
1467 1467
1468 1468 return subset.filter(matches)
1469 1469
1470 1470 def reverse(repo, subset, x):
1471 1471 """``reverse(set)``
1472 1472 Reverse order of set.
1473 1473 """
1474 1474 l = getset(repo, subset, x)
1475 1475 l.reverse()
1476 1476 return l
1477 1477
1478 1478 def roots(repo, subset, x):
1479 1479 """``roots(set)``
1480 1480 Changesets in set with no parent changeset in set.
1481 1481 """
1482 1482 s = getset(repo, spanset(repo), x)
1483 1483 subset = baseset([r for r in s if r in subset])
1484 1484 cs = _children(repo, subset, s)
1485 1485 return subset - cs
1486 1486
1487 1487 def secret(repo, subset, x):
1488 1488 """``secret()``
1489 1489 Changeset in secret phase."""
1490 1490 # i18n: "secret" is a keyword
1491 1491 getargs(x, 0, 0, _("secret takes no arguments"))
1492 1492 phase = repo._phasecache.phase
1493 1493 target = phases.secret
1494 1494 condition = lambda r: phase(repo, r) == target
1495 1495 return subset.filter(condition, cache=False)
1496 1496
1497 1497 def sort(repo, subset, x):
1498 1498 """``sort(set[, [-]key...])``
1499 1499 Sort set by keys. The default sort order is ascending, specify a key
1500 1500 as ``-key`` to sort in descending order.
1501 1501
1502 1502 The keys can be:
1503 1503
1504 1504 - ``rev`` for the revision number,
1505 1505 - ``branch`` for the branch name,
1506 1506 - ``desc`` for the commit message (description),
1507 1507 - ``user`` for user name (``author`` can be used as an alias),
1508 1508 - ``date`` for the commit date
1509 1509 """
1510 1510 # i18n: "sort" is a keyword
1511 1511 l = getargs(x, 1, 2, _("sort requires one or two arguments"))
1512 1512 keys = "rev"
1513 1513 if len(l) == 2:
1514 1514 # i18n: "sort" is a keyword
1515 1515 keys = getstring(l[1], _("sort spec must be a string"))
1516 1516
1517 1517 s = l[0]
1518 1518 keys = keys.split()
1519 1519 l = []
1520 1520 def invert(s):
1521 1521 return "".join(chr(255 - ord(c)) for c in s)
1522 1522 revs = getset(repo, subset, s)
1523 1523 if keys == ["rev"]:
1524 1524 revs.sort()
1525 1525 return revs
1526 1526 elif keys == ["-rev"]:
1527 1527 revs.sort(reverse=True)
1528 1528 return revs
1529 1529 for r in revs:
1530 1530 c = repo[r]
1531 1531 e = []
1532 1532 for k in keys:
1533 1533 if k == 'rev':
1534 1534 e.append(r)
1535 1535 elif k == '-rev':
1536 1536 e.append(-r)
1537 1537 elif k == 'branch':
1538 1538 e.append(c.branch())
1539 1539 elif k == '-branch':
1540 1540 e.append(invert(c.branch()))
1541 1541 elif k == 'desc':
1542 1542 e.append(c.description())
1543 1543 elif k == '-desc':
1544 1544 e.append(invert(c.description()))
1545 1545 elif k in 'user author':
1546 1546 e.append(c.user())
1547 1547 elif k in '-user -author':
1548 1548 e.append(invert(c.user()))
1549 1549 elif k == 'date':
1550 1550 e.append(c.date()[0])
1551 1551 elif k == '-date':
1552 1552 e.append(-c.date()[0])
1553 1553 else:
1554 1554 raise error.ParseError(_("unknown sort key %r") % k)
1555 1555 e.append(r)
1556 1556 l.append(e)
1557 1557 l.sort()
1558 1558 return baseset([e[-1] for e in l])
1559 1559
1560 1560 def _stringmatcher(pattern):
1561 1561 """
1562 1562 accepts a string, possibly starting with 're:' or 'literal:' prefix.
1563 1563 returns the matcher name, pattern, and matcher function.
1564 1564 missing or unknown prefixes are treated as literal matches.
1565 1565
1566 1566 helper for tests:
1567 1567 >>> def test(pattern, *tests):
1568 1568 ... kind, pattern, matcher = _stringmatcher(pattern)
1569 1569 ... return (kind, pattern, [bool(matcher(t)) for t in tests])
1570 1570
1571 1571 exact matching (no prefix):
1572 1572 >>> test('abcdefg', 'abc', 'def', 'abcdefg')
1573 1573 ('literal', 'abcdefg', [False, False, True])
1574 1574
1575 1575 regex matching ('re:' prefix)
1576 1576 >>> test('re:a.+b', 'nomatch', 'fooadef', 'fooadefbar')
1577 1577 ('re', 'a.+b', [False, False, True])
1578 1578
1579 1579 force exact matches ('literal:' prefix)
1580 1580 >>> test('literal:re:foobar', 'foobar', 're:foobar')
1581 1581 ('literal', 're:foobar', [False, True])
1582 1582
1583 1583 unknown prefixes are ignored and treated as literals
1584 1584 >>> test('foo:bar', 'foo', 'bar', 'foo:bar')
1585 1585 ('literal', 'foo:bar', [False, False, True])
1586 1586 """
1587 1587 if pattern.startswith('re:'):
1588 1588 pattern = pattern[3:]
1589 1589 try:
1590 1590 regex = re.compile(pattern)
1591 1591 except re.error, e:
1592 1592 raise error.ParseError(_('invalid regular expression: %s')
1593 1593 % e)
1594 1594 return 're', pattern, regex.search
1595 1595 elif pattern.startswith('literal:'):
1596 1596 pattern = pattern[8:]
1597 1597 return 'literal', pattern, pattern.__eq__
1598 1598
1599 1599 def _substringmatcher(pattern):
1600 1600 kind, pattern, matcher = _stringmatcher(pattern)
1601 1601 if kind == 'literal':
1602 1602 matcher = lambda s: pattern in s
1603 1603 return kind, pattern, matcher
1604 1604
1605 1605 def tag(repo, subset, x):
1606 1606 """``tag([name])``
1607 1607 The specified tag by name, or all tagged revisions if no name is given.
1608 1608
1609 1609 If `name` starts with `re:`, the remainder of the name is treated as
1610 1610 a regular expression. To match a tag that actually starts with `re:`,
1611 1611 use the prefix `literal:`.
1612 1612 """
1613 1613 # i18n: "tag" is a keyword
1614 1614 args = getargs(x, 0, 1, _("tag takes one or no arguments"))
1615 1615 cl = repo.changelog
1616 1616 if args:
1617 1617 pattern = getstring(args[0],
1618 1618 # i18n: "tag" is a keyword
1619 1619 _('the argument to tag must be a string'))
1620 1620 kind, pattern, matcher = _stringmatcher(pattern)
1621 1621 if kind == 'literal':
1622 1622 # avoid resolving all tags
1623 1623 tn = repo._tagscache.tags.get(pattern, None)
1624 1624 if tn is None:
1625 1625 raise util.Abort(_("tag '%s' does not exist") % pattern)
1626 1626 s = set([repo[tn].rev()])
1627 1627 else:
1628 1628 s = set([cl.rev(n) for t, n in repo.tagslist() if matcher(t)])
1629 1629 else:
1630 1630 s = set([cl.rev(n) for t, n in repo.tagslist() if t != 'tip'])
1631 1631 return subset & s
1632 1632
1633 1633 def tagged(repo, subset, x):
1634 1634 return tag(repo, subset, x)
1635 1635
1636 1636 def unstable(repo, subset, x):
1637 1637 """``unstable()``
1638 1638 Non-obsolete changesets with obsolete ancestors.
1639 1639 """
1640 1640 # i18n: "unstable" is a keyword
1641 1641 getargs(x, 0, 0, _("unstable takes no arguments"))
1642 1642 unstables = obsmod.getrevs(repo, 'unstable')
1643 1643 return subset & unstables
1644 1644
1645 1645
1646 1646 def user(repo, subset, x):
1647 1647 """``user(string)``
1648 1648 User name contains string. The match is case-insensitive.
1649 1649
1650 1650 If `string` starts with `re:`, the remainder of the string is treated as
1651 1651 a regular expression. To match a user that actually contains `re:`, use
1652 1652 the prefix `literal:`.
1653 1653 """
1654 1654 return author(repo, subset, x)
1655 1655
1656 1656 # for internal use
1657 1657 def _list(repo, subset, x):
1658 1658 s = getstring(x, "internal error")
1659 1659 if not s:
1660 1660 return baseset()
1661 1661 ls = [repo[r].rev() for r in s.split('\0')]
1662 1662 s = subset
1663 1663 return baseset([r for r in ls if r in s])
1664 1664
1665 1665 # for internal use
1666 1666 def _intlist(repo, subset, x):
1667 1667 s = getstring(x, "internal error")
1668 1668 if not s:
1669 1669 return baseset()
1670 1670 ls = [int(r) for r in s.split('\0')]
1671 1671 s = subset
1672 1672 return baseset([r for r in ls if r in s])
1673 1673
1674 1674 # for internal use
1675 1675 def _hexlist(repo, subset, x):
1676 1676 s = getstring(x, "internal error")
1677 1677 if not s:
1678 1678 return baseset()
1679 1679 cl = repo.changelog
1680 1680 ls = [cl.rev(node.bin(r)) for r in s.split('\0')]
1681 1681 s = subset
1682 1682 return baseset([r for r in ls if r in s])
1683 1683
1684 1684 symbols = {
1685 1685 "adds": adds,
1686 1686 "all": getall,
1687 1687 "ancestor": ancestor,
1688 1688 "ancestors": ancestors,
1689 1689 "_firstancestors": _firstancestors,
1690 1690 "author": author,
1691 1691 "only": only,
1692 1692 "bisect": bisect,
1693 1693 "bisected": bisected,
1694 1694 "bookmark": bookmark,
1695 1695 "branch": branch,
1696 1696 "branchpoint": branchpoint,
1697 1697 "bumped": bumped,
1698 1698 "bundle": bundle,
1699 1699 "children": children,
1700 1700 "closed": closed,
1701 1701 "contains": contains,
1702 1702 "converted": converted,
1703 1703 "date": date,
1704 1704 "desc": desc,
1705 1705 "descendants": descendants,
1706 1706 "_firstdescendants": _firstdescendants,
1707 1707 "destination": destination,
1708 1708 "divergent": divergent,
1709 1709 "draft": draft,
1710 1710 "extinct": extinct,
1711 1711 "extra": extra,
1712 1712 "file": hasfile,
1713 1713 "filelog": filelog,
1714 1714 "first": first,
1715 1715 "follow": follow,
1716 1716 "_followfirst": _followfirst,
1717 1717 "grep": grep,
1718 1718 "head": head,
1719 1719 "heads": heads,
1720 1720 "hidden": hidden,
1721 1721 "id": node_,
1722 1722 "keyword": keyword,
1723 1723 "last": last,
1724 1724 "limit": limit,
1725 1725 "_matchfiles": _matchfiles,
1726 1726 "max": maxrev,
1727 1727 "merge": merge,
1728 1728 "min": minrev,
1729 1729 "modifies": modifies,
1730 1730 "obsolete": obsolete,
1731 1731 "origin": origin,
1732 1732 "outgoing": outgoing,
1733 1733 "p1": p1,
1734 1734 "p2": p2,
1735 1735 "parents": parents,
1736 1736 "present": present,
1737 1737 "public": public,
1738 1738 "remote": remote,
1739 1739 "removes": removes,
1740 1740 "rev": rev,
1741 1741 "reverse": reverse,
1742 1742 "roots": roots,
1743 1743 "sort": sort,
1744 1744 "secret": secret,
1745 1745 "matching": matching,
1746 1746 "tag": tag,
1747 1747 "tagged": tagged,
1748 1748 "user": user,
1749 1749 "unstable": unstable,
1750 1750 "_list": _list,
1751 1751 "_intlist": _intlist,
1752 1752 "_hexlist": _hexlist,
1753 1753 }
1754 1754
1755 1755 # symbols which can't be used for a DoS attack for any given input
1756 1756 # (e.g. those which accept regexes as plain strings shouldn't be included)
1757 1757 # functions that just return a lot of changesets (like all) don't count here
1758 1758 safesymbols = set([
1759 1759 "adds",
1760 1760 "all",
1761 1761 "ancestor",
1762 1762 "ancestors",
1763 1763 "_firstancestors",
1764 1764 "author",
1765 1765 "bisect",
1766 1766 "bisected",
1767 1767 "bookmark",
1768 1768 "branch",
1769 1769 "branchpoint",
1770 1770 "bumped",
1771 1771 "bundle",
1772 1772 "children",
1773 1773 "closed",
1774 1774 "converted",
1775 1775 "date",
1776 1776 "desc",
1777 1777 "descendants",
1778 1778 "_firstdescendants",
1779 1779 "destination",
1780 1780 "divergent",
1781 1781 "draft",
1782 1782 "extinct",
1783 1783 "extra",
1784 1784 "file",
1785 1785 "filelog",
1786 1786 "first",
1787 1787 "follow",
1788 1788 "_followfirst",
1789 1789 "head",
1790 1790 "heads",
1791 1791 "hidden",
1792 1792 "id",
1793 1793 "keyword",
1794 1794 "last",
1795 1795 "limit",
1796 1796 "_matchfiles",
1797 1797 "max",
1798 1798 "merge",
1799 1799 "min",
1800 1800 "modifies",
1801 1801 "obsolete",
1802 1802 "origin",
1803 1803 "outgoing",
1804 1804 "p1",
1805 1805 "p2",
1806 1806 "parents",
1807 1807 "present",
1808 1808 "public",
1809 1809 "remote",
1810 1810 "removes",
1811 1811 "rev",
1812 1812 "reverse",
1813 1813 "roots",
1814 1814 "sort",
1815 1815 "secret",
1816 1816 "matching",
1817 1817 "tag",
1818 1818 "tagged",
1819 1819 "user",
1820 1820 "unstable",
1821 1821 "_list",
1822 1822 "_intlist",
1823 1823 "_hexlist",
1824 1824 ])
1825 1825
1826 1826 methods = {
1827 1827 "range": rangeset,
1828 1828 "dagrange": dagrange,
1829 1829 "string": stringset,
1830 1830 "symbol": symbolset,
1831 1831 "and": andset,
1832 1832 "or": orset,
1833 1833 "not": notset,
1834 1834 "list": listset,
1835 1835 "func": func,
1836 1836 "ancestor": ancestorspec,
1837 1837 "parent": parentspec,
1838 1838 "parentpost": p1,
1839 1839 }
1840 1840
1841 1841 def optimize(x, small):
1842 1842 if x is None:
1843 1843 return 0, x
1844 1844
1845 1845 smallbonus = 1
1846 1846 if small:
1847 1847 smallbonus = .5
1848 1848
1849 1849 op = x[0]
1850 1850 if op == 'minus':
1851 1851 return optimize(('and', x[1], ('not', x[2])), small)
1852 1852 elif op == 'dagrangepre':
1853 1853 return optimize(('func', ('symbol', 'ancestors'), x[1]), small)
1854 1854 elif op == 'dagrangepost':
1855 1855 return optimize(('func', ('symbol', 'descendants'), x[1]), small)
1856 1856 elif op == 'rangepre':
1857 1857 return optimize(('range', ('string', '0'), x[1]), small)
1858 1858 elif op == 'rangepost':
1859 1859 return optimize(('range', x[1], ('string', 'tip')), small)
1860 1860 elif op == 'negate':
1861 1861 return optimize(('string',
1862 1862 '-' + getstring(x[1], _("can't negate that"))), small)
1863 1863 elif op in 'string symbol negate':
1864 1864 return smallbonus, x # single revisions are small
1865 1865 elif op == 'and':
1866 1866 wa, ta = optimize(x[1], True)
1867 1867 wb, tb = optimize(x[2], True)
1868 1868
1869 1869 # (::x and not ::y)/(not ::y and ::x) have a fast path
1870 1870 def isonly(revs, bases):
1871 1871 return (
1872 1872 revs[0] == 'func'
1873 1873 and getstring(revs[1], _('not a symbol')) == 'ancestors'
1874 1874 and bases[0] == 'not'
1875 1875 and bases[1][0] == 'func'
1876 1876 and getstring(bases[1][1], _('not a symbol')) == 'ancestors')
1877 1877
1878 1878 w = min(wa, wb)
1879 1879 if isonly(ta, tb):
1880 1880 return w, ('func', ('symbol', 'only'), ('list', ta[2], tb[1][2]))
1881 1881 if isonly(tb, ta):
1882 1882 return w, ('func', ('symbol', 'only'), ('list', tb[2], ta[1][2]))
1883 1883
1884 1884 if wa > wb:
1885 1885 return w, (op, tb, ta)
1886 1886 return w, (op, ta, tb)
1887 1887 elif op == 'or':
1888 1888 wa, ta = optimize(x[1], False)
1889 1889 wb, tb = optimize(x[2], False)
1890 1890 if wb < wa:
1891 1891 wb, wa = wa, wb
1892 1892 return max(wa, wb), (op, ta, tb)
1893 1893 elif op == 'not':
1894 1894 o = optimize(x[1], not small)
1895 1895 return o[0], (op, o[1])
1896 1896 elif op == 'parentpost':
1897 1897 o = optimize(x[1], small)
1898 1898 return o[0], (op, o[1])
1899 1899 elif op == 'group':
1900 1900 return optimize(x[1], small)
1901 1901 elif op in 'dagrange range list parent ancestorspec':
1902 1902 if op == 'parent':
1903 1903 # x^:y means (x^) : y, not x ^ (:y)
1904 1904 post = ('parentpost', x[1])
1905 1905 if x[2][0] == 'dagrangepre':
1906 1906 return optimize(('dagrange', post, x[2][1]), small)
1907 1907 elif x[2][0] == 'rangepre':
1908 1908 return optimize(('range', post, x[2][1]), small)
1909 1909
1910 1910 wa, ta = optimize(x[1], small)
1911 1911 wb, tb = optimize(x[2], small)
1912 1912 return wa + wb, (op, ta, tb)
1913 1913 elif op == 'func':
1914 1914 f = getstring(x[1], _("not a symbol"))
1915 1915 wa, ta = optimize(x[2], small)
1916 1916 if f in ("author branch closed date desc file grep keyword "
1917 1917 "outgoing user"):
1918 1918 w = 10 # slow
1919 1919 elif f in "modifies adds removes":
1920 1920 w = 30 # slower
1921 1921 elif f == "contains":
1922 1922 w = 100 # very slow
1923 1923 elif f == "ancestor":
1924 1924 w = 1 * smallbonus
1925 1925 elif f in "reverse limit first _intlist":
1926 1926 w = 0
1927 1927 elif f in "sort":
1928 1928 w = 10 # assume most sorts look at changelog
1929 1929 else:
1930 1930 w = 1
1931 1931 return w + wa, (op, x[1], ta)
1932 1932 return 1, x
1933 1933
1934 1934 _aliasarg = ('func', ('symbol', '_aliasarg'))
1935 1935 def _getaliasarg(tree):
1936 1936 """If tree matches ('func', ('symbol', '_aliasarg'), ('string', X))
1937 1937 return X, None otherwise.
1938 1938 """
1939 1939 if (len(tree) == 3 and tree[:2] == _aliasarg
1940 1940 and tree[2][0] == 'string'):
1941 1941 return tree[2][1]
1942 1942 return None
1943 1943
1944 1944 def _checkaliasarg(tree, known=None):
1945 1945 """Check tree contains no _aliasarg construct or only ones which
1946 1946 value is in known. Used to avoid alias placeholders injection.
1947 1947 """
1948 1948 if isinstance(tree, tuple):
1949 1949 arg = _getaliasarg(tree)
1950 1950 if arg is not None and (not known or arg not in known):
1951 1951 raise error.ParseError(_("not a function: %s") % '_aliasarg')
1952 1952 for t in tree:
1953 1953 _checkaliasarg(t, known)
1954 1954
1955 1955 class revsetalias(object):
1956 1956 funcre = re.compile('^([^(]+)\(([^)]+)\)$')
1957 1957 args = None
1958 1958
1959 1959 def __init__(self, name, value):
1960 1960 '''Aliases like:
1961 1961
1962 1962 h = heads(default)
1963 1963 b($1) = ancestors($1) - ancestors(default)
1964 1964 '''
1965 1965 m = self.funcre.search(name)
1966 1966 if m:
1967 1967 self.name = m.group(1)
1968 1968 self.tree = ('func', ('symbol', m.group(1)))
1969 1969 self.args = [x.strip() for x in m.group(2).split(',')]
1970 1970 for arg in self.args:
1971 1971 # _aliasarg() is an unknown symbol only used separate
1972 1972 # alias argument placeholders from regular strings.
1973 1973 value = value.replace(arg, '_aliasarg(%r)' % (arg,))
1974 1974 else:
1975 1975 self.name = name
1976 1976 self.tree = ('symbol', name)
1977 1977
1978 1978 self.replacement, pos = parse(value)
1979 1979 if pos != len(value):
1980 1980 raise error.ParseError(_('invalid token'), pos)
1981 1981 # Check for placeholder injection
1982 1982 _checkaliasarg(self.replacement, self.args)
1983 1983
1984 1984 def _getalias(aliases, tree):
1985 1985 """If tree looks like an unexpanded alias, return it. Return None
1986 1986 otherwise.
1987 1987 """
1988 1988 if isinstance(tree, tuple) and tree:
1989 1989 if tree[0] == 'symbol' and len(tree) == 2:
1990 1990 name = tree[1]
1991 1991 alias = aliases.get(name)
1992 1992 if alias and alias.args is None and alias.tree == tree:
1993 1993 return alias
1994 1994 if tree[0] == 'func' and len(tree) > 1:
1995 1995 if tree[1][0] == 'symbol' and len(tree[1]) == 2:
1996 1996 name = tree[1][1]
1997 1997 alias = aliases.get(name)
1998 1998 if alias and alias.args is not None and alias.tree == tree[:2]:
1999 1999 return alias
2000 2000 return None
2001 2001
2002 2002 def _expandargs(tree, args):
2003 2003 """Replace _aliasarg instances with the substitution value of the
2004 2004 same name in args, recursively.
2005 2005 """
2006 2006 if not tree or not isinstance(tree, tuple):
2007 2007 return tree
2008 2008 arg = _getaliasarg(tree)
2009 2009 if arg is not None:
2010 2010 return args[arg]
2011 2011 return tuple(_expandargs(t, args) for t in tree)
2012 2012
2013 2013 def _expandaliases(aliases, tree, expanding, cache):
2014 2014 """Expand aliases in tree, recursively.
2015 2015
2016 2016 'aliases' is a dictionary mapping user defined aliases to
2017 2017 revsetalias objects.
2018 2018 """
2019 2019 if not isinstance(tree, tuple):
2020 2020 # Do not expand raw strings
2021 2021 return tree
2022 2022 alias = _getalias(aliases, tree)
2023 2023 if alias is not None:
2024 2024 if alias in expanding:
2025 2025 raise error.ParseError(_('infinite expansion of revset alias "%s" '
2026 2026 'detected') % alias.name)
2027 2027 expanding.append(alias)
2028 2028 if alias.name not in cache:
2029 2029 cache[alias.name] = _expandaliases(aliases, alias.replacement,
2030 2030 expanding, cache)
2031 2031 result = cache[alias.name]
2032 2032 expanding.pop()
2033 2033 if alias.args is not None:
2034 2034 l = getlist(tree[2])
2035 2035 if len(l) != len(alias.args):
2036 2036 raise error.ParseError(
2037 2037 _('invalid number of arguments: %s') % len(l))
2038 2038 l = [_expandaliases(aliases, a, [], cache) for a in l]
2039 2039 result = _expandargs(result, dict(zip(alias.args, l)))
2040 2040 else:
2041 2041 result = tuple(_expandaliases(aliases, t, expanding, cache)
2042 2042 for t in tree)
2043 2043 return result
2044 2044
2045 2045 def findaliases(ui, tree):
2046 2046 _checkaliasarg(tree)
2047 2047 aliases = {}
2048 2048 for k, v in ui.configitems('revsetalias'):
2049 2049 alias = revsetalias(k, v)
2050 2050 aliases[alias.name] = alias
2051 2051 return _expandaliases(aliases, tree, [], {})
2052 2052
2053 2053 def parse(spec, lookup=None):
2054 2054 p = parser.parser(tokenize, elements)
2055 2055 return p.parse(spec, lookup=lookup)
2056 2056
2057 2057 def match(ui, spec, repo=None):
2058 2058 if not spec:
2059 2059 raise error.ParseError(_("empty query"))
2060 2060 lookup = None
2061 2061 if repo:
2062 2062 lookup = repo.__contains__
2063 2063 tree, pos = parse(spec, lookup)
2064 2064 if (pos != len(spec)):
2065 2065 raise error.ParseError(_("invalid token"), pos)
2066 2066 if ui:
2067 2067 tree = findaliases(ui, tree)
2068 2068 weight, tree = optimize(tree, True)
2069 2069 def mfunc(repo, subset):
2070 2070 if util.safehasattr(subset, 'isascending'):
2071 2071 result = getset(repo, subset, tree)
2072 2072 else:
2073 2073 result = getset(repo, baseset(subset), tree)
2074 2074 return result
2075 2075 return mfunc
2076 2076
2077 2077 def formatspec(expr, *args):
2078 2078 '''
2079 2079 This is a convenience function for using revsets internally, and
2080 2080 escapes arguments appropriately. Aliases are intentionally ignored
2081 2081 so that intended expression behavior isn't accidentally subverted.
2082 2082
2083 2083 Supported arguments:
2084 2084
2085 2085 %r = revset expression, parenthesized
2086 2086 %d = int(arg), no quoting
2087 2087 %s = string(arg), escaped and single-quoted
2088 2088 %b = arg.branch(), escaped and single-quoted
2089 2089 %n = hex(arg), single-quoted
2090 2090 %% = a literal '%'
2091 2091
2092 2092 Prefixing the type with 'l' specifies a parenthesized list of that type.
2093 2093
2094 2094 >>> formatspec('%r:: and %lr', '10 or 11', ("this()", "that()"))
2095 2095 '(10 or 11):: and ((this()) or (that()))'
2096 2096 >>> formatspec('%d:: and not %d::', 10, 20)
2097 2097 '10:: and not 20::'
2098 2098 >>> formatspec('%ld or %ld', [], [1])
2099 2099 "_list('') or 1"
2100 2100 >>> formatspec('keyword(%s)', 'foo\\xe9')
2101 2101 "keyword('foo\\\\xe9')"
2102 2102 >>> b = lambda: 'default'
2103 2103 >>> b.branch = b
2104 2104 >>> formatspec('branch(%b)', b)
2105 2105 "branch('default')"
2106 2106 >>> formatspec('root(%ls)', ['a', 'b', 'c', 'd'])
2107 2107 "root(_list('a\\x00b\\x00c\\x00d'))"
2108 2108 '''
2109 2109
2110 2110 def quote(s):
2111 2111 return repr(str(s))
2112 2112
2113 2113 def argtype(c, arg):
2114 2114 if c == 'd':
2115 2115 return str(int(arg))
2116 2116 elif c == 's':
2117 2117 return quote(arg)
2118 2118 elif c == 'r':
2119 2119 parse(arg) # make sure syntax errors are confined
2120 2120 return '(%s)' % arg
2121 2121 elif c == 'n':
2122 2122 return quote(node.hex(arg))
2123 2123 elif c == 'b':
2124 2124 return quote(arg.branch())
2125 2125
2126 2126 def listexp(s, t):
2127 2127 l = len(s)
2128 2128 if l == 0:
2129 2129 return "_list('')"
2130 2130 elif l == 1:
2131 2131 return argtype(t, s[0])
2132 2132 elif t == 'd':
2133 2133 return "_intlist('%s')" % "\0".join(str(int(a)) for a in s)
2134 2134 elif t == 's':
2135 2135 return "_list('%s')" % "\0".join(s)
2136 2136 elif t == 'n':
2137 2137 return "_hexlist('%s')" % "\0".join(node.hex(a) for a in s)
2138 2138 elif t == 'b':
2139 2139 return "_list('%s')" % "\0".join(a.branch() for a in s)
2140 2140
2141 2141 m = l // 2
2142 2142 return '(%s or %s)' % (listexp(s[:m], t), listexp(s[m:], t))
2143 2143
2144 2144 ret = ''
2145 2145 pos = 0
2146 2146 arg = 0
2147 2147 while pos < len(expr):
2148 2148 c = expr[pos]
2149 2149 if c == '%':
2150 2150 pos += 1
2151 2151 d = expr[pos]
2152 2152 if d == '%':
2153 2153 ret += d
2154 2154 elif d in 'dsnbr':
2155 2155 ret += argtype(d, args[arg])
2156 2156 arg += 1
2157 2157 elif d == 'l':
2158 2158 # a list of some type
2159 2159 pos += 1
2160 2160 d = expr[pos]
2161 2161 ret += listexp(list(args[arg]), d)
2162 2162 arg += 1
2163 2163 else:
2164 2164 raise util.Abort('unexpected revspec format character %s' % d)
2165 2165 else:
2166 2166 ret += c
2167 2167 pos += 1
2168 2168
2169 2169 return ret
2170 2170
2171 2171 def prettyformat(tree):
2172 2172 def _prettyformat(tree, level, lines):
2173 2173 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2174 2174 lines.append((level, str(tree)))
2175 2175 else:
2176 2176 lines.append((level, '(%s' % tree[0]))
2177 2177 for s in tree[1:]:
2178 2178 _prettyformat(s, level + 1, lines)
2179 2179 lines[-1:] = [(lines[-1][0], lines[-1][1] + ')')]
2180 2180
2181 2181 lines = []
2182 2182 _prettyformat(tree, 0, lines)
2183 2183 output = '\n'.join((' '*l + s) for l, s in lines)
2184 2184 return output
2185 2185
2186 2186 def depth(tree):
2187 2187 if isinstance(tree, tuple):
2188 2188 return max(map(depth, tree)) + 1
2189 2189 else:
2190 2190 return 0
2191 2191
2192 2192 def funcsused(tree):
2193 2193 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2194 2194 return set()
2195 2195 else:
2196 2196 funcs = set()
2197 2197 for s in tree[1:]:
2198 2198 funcs |= funcsused(s)
2199 2199 if tree[0] == 'func':
2200 2200 funcs.add(tree[1][1])
2201 2201 return funcs
2202 2202
2203 2203 class abstractsmartset(object):
2204 2204
2205 2205 def __nonzero__(self):
2206 2206 """True if the smartset is not empty"""
2207 2207 raise NotImplementedError()
2208 2208
2209 2209 def __contains__(self, rev):
2210 2210 """provide fast membership testing"""
2211 2211 raise NotImplementedError()
2212 2212
2213 2213 def __iter__(self):
2214 2214 """iterate the set in the order it is supposed to be iterated"""
2215 2215 raise NotImplementedError()
2216 2216
2217 2217 # Attributes containing a function to perform a fast iteration in a given
2218 2218 # direction. A smartset can have none, one, or both defined.
2219 2219 #
2220 2220 # Default value is None instead of a function returning None to avoid
2221 2221 # initializing an iterator just for testing if a fast method exists.
2222 2222 fastasc = None
2223 2223 fastdesc = None
2224 2224
2225 2225 def isascending(self):
2226 2226 """True if the set will iterate in ascending order"""
2227 2227 raise NotImplementedError()
2228 2228
2229 2229 def isdescending(self):
2230 2230 """True if the set will iterate in descending order"""
2231 2231 raise NotImplementedError()
2232 2232
2233 2233 def min(self):
2234 2234 """return the minimum element in the set"""
2235 2235 if self.fastasc is not None:
2236 2236 for r in self.fastasc():
2237 2237 return r
2238 2238 raise ValueError('arg is an empty sequence')
2239 2239 return min(self)
2240 2240
2241 2241 def max(self):
2242 2242 """return the maximum element in the set"""
2243 2243 if self.fastdesc is not None:
2244 2244 for r in self.fastdesc():
2245 2245 return r
2246 2246 raise ValueError('arg is an empty sequence')
2247 2247 return max(self)
2248 2248
2249 2249 def first(self):
2250 2250 """return the first element in the set (user iteration perspective)
2251 2251
2252 2252 Return None if the set is empty"""
2253 2253 raise NotImplementedError()
2254 2254
2255 2255 def last(self):
2256 2256 """return the last element in the set (user iteration perspective)
2257 2257
2258 2258 Return None if the set is empty"""
2259 2259 raise NotImplementedError()
2260 2260
2261 2261 def __len__(self):
2262 2262 """return the length of the smartsets
2263 2263
2264 2264 This can be expensive on smartset that could be lazy otherwise."""
2265 2265 raise NotImplementedError()
2266 2266
2267 2267 def reverse(self):
2268 2268 """reverse the expected iteration order"""
2269 2269 raise NotImplementedError()
2270 2270
2271 2271 def sort(self, reverse=True):
2272 2272 """get the set to iterate in an ascending or descending order"""
2273 2273 raise NotImplementedError()
2274 2274
2275 2275 def __and__(self, other):
2276 2276 """Returns a new object with the intersection of the two collections.
2277 2277
2278 2278 This is part of the mandatory API for smartset."""
2279 2279 return self.filter(other.__contains__, cache=False)
2280 2280
2281 2281 def __add__(self, other):
2282 2282 """Returns a new object with the union of the two collections.
2283 2283
2284 2284 This is part of the mandatory API for smartset."""
2285 2285 return addset(self, other)
2286 2286
2287 2287 def __sub__(self, other):
2288 2288 """Returns a new object with the substraction of the two collections.
2289 2289
2290 2290 This is part of the mandatory API for smartset."""
2291 2291 c = other.__contains__
2292 2292 return self.filter(lambda r: not c(r), cache=False)
2293 2293
2294 2294 def filter(self, condition, cache=True):
2295 2295 """Returns this smartset filtered by condition as a new smartset.
2296 2296
2297 2297 `condition` is a callable which takes a revision number and returns a
2298 2298 boolean.
2299 2299
2300 2300 This is part of the mandatory API for smartset."""
2301 2301 # builtin cannot be cached. but do not needs to
2302 2302 if cache and util.safehasattr(condition, 'func_code'):
2303 2303 condition = util.cachefunc(condition)
2304 2304 return filteredset(self, condition)
2305 2305
2306 2306 class baseset(abstractsmartset):
2307 2307 """Basic data structure that represents a revset and contains the basic
2308 2308 operation that it should be able to perform.
2309 2309
2310 2310 Every method in this class should be implemented by any smartset class.
2311 2311 """
2312 2312 def __init__(self, data=()):
2313 2313 if not isinstance(data, list):
2314 2314 data = list(data)
2315 2315 self._list = data
2316 2316 self._ascending = None
2317 2317
2318 2318 @util.propertycache
2319 2319 def _set(self):
2320 2320 return set(self._list)
2321 2321
2322 2322 @util.propertycache
2323 2323 def _asclist(self):
2324 2324 asclist = self._list[:]
2325 2325 asclist.sort()
2326 2326 return asclist
2327 2327
2328 2328 def __iter__(self):
2329 2329 if self._ascending is None:
2330 2330 return iter(self._list)
2331 2331 elif self._ascending:
2332 2332 return iter(self._asclist)
2333 2333 else:
2334 2334 return reversed(self._asclist)
2335 2335
2336 2336 def fastasc(self):
2337 2337 return iter(self._asclist)
2338 2338
2339 2339 def fastdesc(self):
2340 2340 return reversed(self._asclist)
2341 2341
2342 2342 @util.propertycache
2343 2343 def __contains__(self):
2344 2344 return self._set.__contains__
2345 2345
2346 2346 def __nonzero__(self):
2347 2347 return bool(self._list)
2348 2348
2349 2349 def sort(self, reverse=False):
2350 2350 self._ascending = not bool(reverse)
2351 2351
2352 2352 def reverse(self):
2353 2353 if self._ascending is None:
2354 2354 self._list.reverse()
2355 2355 else:
2356 2356 self._ascending = not self._ascending
2357 2357
2358 2358 def __len__(self):
2359 2359 return len(self._list)
2360 2360
2361 2361 def isascending(self):
2362 2362 """Returns True if the collection is ascending order, False if not.
2363 2363
2364 2364 This is part of the mandatory API for smartset."""
2365 2365 if len(self) <= 1:
2366 2366 return True
2367 2367 return self._ascending is not None and self._ascending
2368 2368
2369 2369 def isdescending(self):
2370 2370 """Returns True if the collection is descending order, False if not.
2371 2371
2372 2372 This is part of the mandatory API for smartset."""
2373 2373 if len(self) <= 1:
2374 2374 return True
2375 2375 return self._ascending is not None and not self._ascending
2376 2376
2377 2377 def first(self):
2378 2378 if self:
2379 2379 if self._ascending is None:
2380 2380 return self._list[0]
2381 2381 elif self._ascending:
2382 2382 return self._asclist[0]
2383 2383 else:
2384 2384 return self._asclist[-1]
2385 2385 return None
2386 2386
2387 2387 def last(self):
2388 2388 if self:
2389 2389 if self._ascending is None:
2390 2390 return self._list[-1]
2391 2391 elif self._ascending:
2392 2392 return self._asclist[-1]
2393 2393 else:
2394 2394 return self._asclist[0]
2395 2395 return None
2396 2396
2397 2397 class filteredset(abstractsmartset):
2398 2398 """Duck type for baseset class which iterates lazily over the revisions in
2399 2399 the subset and contains a function which tests for membership in the
2400 2400 revset
2401 2401 """
2402 2402 def __init__(self, subset, condition=lambda x: True):
2403 2403 """
2404 2404 condition: a function that decide whether a revision in the subset
2405 2405 belongs to the revset or not.
2406 2406 """
2407 2407 self._subset = subset
2408 2408 self._condition = condition
2409 2409 self._cache = {}
2410 2410
2411 2411 def __contains__(self, x):
2412 2412 c = self._cache
2413 2413 if x not in c:
2414 2414 v = c[x] = x in self._subset and self._condition(x)
2415 2415 return v
2416 2416 return c[x]
2417 2417
2418 2418 def __iter__(self):
2419 2419 return self._iterfilter(self._subset)
2420 2420
2421 2421 def _iterfilter(self, it):
2422 2422 cond = self._condition
2423 2423 for x in it:
2424 2424 if cond(x):
2425 2425 yield x
2426 2426
2427 2427 @property
2428 2428 def fastasc(self):
2429 2429 it = self._subset.fastasc
2430 2430 if it is None:
2431 2431 return None
2432 2432 return lambda: self._iterfilter(it())
2433 2433
2434 2434 @property
2435 2435 def fastdesc(self):
2436 2436 it = self._subset.fastdesc
2437 2437 if it is None:
2438 2438 return None
2439 2439 return lambda: self._iterfilter(it())
2440 2440
2441 2441 def __nonzero__(self):
2442 2442 for r in self:
2443 2443 return True
2444 2444 return False
2445 2445
2446 2446 def __len__(self):
2447 2447 # Basic implementation to be changed in future patches.
2448 2448 l = baseset([r for r in self])
2449 2449 return len(l)
2450 2450
2451 2451 def sort(self, reverse=False):
2452 2452 self._subset.sort(reverse=reverse)
2453 2453
2454 2454 def reverse(self):
2455 2455 self._subset.reverse()
2456 2456
2457 2457 def isascending(self):
2458 2458 return self._subset.isascending()
2459 2459
2460 2460 def isdescending(self):
2461 2461 return self._subset.isdescending()
2462 2462
2463 2463 def first(self):
2464 2464 for x in self:
2465 2465 return x
2466 2466 return None
2467 2467
2468 2468 def last(self):
2469 2469 it = None
2470 2470 if self._subset.isascending:
2471 2471 it = self.fastdesc
2472 2472 elif self._subset.isdescending:
2473 2473 it = self.fastdesc
2474 2474 if it is None:
2475 2475 # slowly consume everything. This needs improvement
2476 2476 it = lambda: reversed(list(self))
2477 2477 for x in it():
2478 2478 return x
2479 2479 return None
2480 2480
2481 2481 class addset(abstractsmartset):
2482 2482 """Represent the addition of two sets
2483 2483
2484 2484 Wrapper structure for lazily adding two structures without losing much
2485 2485 performance on the __contains__ method
2486 2486
2487 2487 If the ascending attribute is set, that means the two structures are
2488 2488 ordered in either an ascending or descending way. Therefore, we can add
2489 2489 them maintaining the order by iterating over both at the same time
2490 2490 """
2491 2491 def __init__(self, revs1, revs2, ascending=None):
2492 2492 self._r1 = revs1
2493 2493 self._r2 = revs2
2494 2494 self._iter = None
2495 2495 self._ascending = ascending
2496 2496 self._genlist = None
2497 2497 self._asclist = None
2498 2498
2499 2499 def __len__(self):
2500 2500 return len(self._list)
2501 2501
2502 2502 def __nonzero__(self):
2503 2503 return bool(self._r1) or bool(self._r2)
2504 2504
2505 2505 @util.propertycache
2506 2506 def _list(self):
2507 2507 if not self._genlist:
2508 2508 self._genlist = baseset(self._iterator())
2509 2509 return self._genlist
2510 2510
2511 2511 def _iterator(self):
2512 2512 """Iterate over both collections without repeating elements
2513 2513
2514 2514 If the ascending attribute is not set, iterate over the first one and
2515 2515 then over the second one checking for membership on the first one so we
2516 2516 dont yield any duplicates.
2517 2517
2518 2518 If the ascending attribute is set, iterate over both collections at the
2519 2519 same time, yielding only one value at a time in the given order.
2520 2520 """
2521 2521 if self._ascending is None:
2522 2522 def gen():
2523 2523 for r in self._r1:
2524 2524 yield r
2525 2525 inr1 = self._r1.__contains__
2526 2526 for r in self._r2:
2527 2527 if not inr1(r):
2528 2528 yield r
2529 2529 gen = gen()
2530 2530 else:
2531 2531 iter1 = iter(self._r1)
2532 2532 iter2 = iter(self._r2)
2533 2533 gen = self._iterordered(self._ascending, iter1, iter2)
2534 2534 return gen
2535 2535
2536 2536 def __iter__(self):
2537 2537 if self._ascending is None:
2538 2538 if self._genlist:
2539 2539 return iter(self._genlist)
2540 2540 return iter(self._iterator())
2541 2541 self._trysetasclist()
2542 2542 if self._ascending:
2543 2543 it = self.fastasc
2544 2544 else:
2545 2545 it = self.fastdesc
2546 2546 if it is None:
2547 2547 # consume the gen and try again
2548 2548 self._list
2549 2549 return iter(self)
2550 2550 return it()
2551 2551
2552 2552 def _trysetasclist(self):
2553 2553 """populate the _asclist attribute if possible and necessary"""
2554 2554 if self._genlist is not None and self._asclist is None:
2555 2555 self._asclist = sorted(self._genlist)
2556 2556
2557 2557 @property
2558 2558 def fastasc(self):
2559 2559 self._trysetasclist()
2560 2560 if self._asclist is not None:
2561 2561 return self._asclist.__iter__
2562 2562 iter1 = self._r1.fastasc
2563 2563 iter2 = self._r2.fastasc
2564 2564 if None in (iter1, iter2):
2565 2565 return None
2566 2566 return lambda: self._iterordered(True, iter1(), iter2())
2567 2567
2568 2568 @property
2569 2569 def fastdesc(self):
2570 2570 self._trysetasclist()
2571 2571 if self._asclist is not None:
2572 2572 return self._asclist.__reversed__
2573 2573 iter1 = self._r1.fastdesc
2574 2574 iter2 = self._r2.fastdesc
2575 2575 if None in (iter1, iter2):
2576 2576 return None
2577 2577 return lambda: self._iterordered(False, iter1(), iter2())
2578 2578
2579 2579 def _iterordered(self, ascending, iter1, iter2):
2580 2580 """produce an ordered iteration from two iterators with the same order
2581 2581
2582 2582 The ascending is used to indicated the iteration direction.
2583 2583 """
2584 2584 choice = max
2585 2585 if ascending:
2586 2586 choice = min
2587 2587
2588 2588 val1 = None
2589 2589 val2 = None
2590 2590
2591 2591 choice = max
2592 2592 if ascending:
2593 2593 choice = min
2594 2594 try:
2595 2595 # Consume both iterators in an ordered way until one is
2596 2596 # empty
2597 2597 while True:
2598 2598 if val1 is None:
2599 2599 val1 = iter1.next()
2600 2600 if val2 is None:
2601 2601 val2 = iter2.next()
2602 2602 next = choice(val1, val2)
2603 2603 yield next
2604 2604 if val1 == next:
2605 2605 val1 = None
2606 2606 if val2 == next:
2607 2607 val2 = None
2608 2608 except StopIteration:
2609 2609 # Flush any remaining values and consume the other one
2610 2610 it = iter2
2611 2611 if val1 is not None:
2612 2612 yield val1
2613 2613 it = iter1
2614 2614 elif val2 is not None:
2615 2615 # might have been equality and both are empty
2616 2616 yield val2
2617 2617 for val in it:
2618 2618 yield val
2619 2619
2620 2620 def __contains__(self, x):
2621 2621 return x in self._r1 or x in self._r2
2622 2622
2623 2623 def sort(self, reverse=False):
2624 2624 """Sort the added set
2625 2625
2626 2626 For this we use the cached list with all the generated values and if we
2627 2627 know they are ascending or descending we can sort them in a smart way.
2628 2628 """
2629 2629 self._ascending = not reverse
2630 2630
2631 2631 def isascending(self):
2632 2632 return self._ascending is not None and self._ascending
2633 2633
2634 2634 def isdescending(self):
2635 2635 return self._ascending is not None and not self._ascending
2636 2636
2637 2637 def reverse(self):
2638 2638 if self._ascending is None:
2639 2639 self._list.reverse()
2640 2640 else:
2641 2641 self._ascending = not self._ascending
2642 2642
2643 2643 def first(self):
2644 2644 for x in self:
2645 2645 return x
2646 2646 return None
2647 2647
2648 2648 def last(self):
2649 2649 self.reverse()
2650 2650 val = self.first()
2651 2651 self.reverse()
2652 2652 return val
2653 2653
2654 2654 class generatorset(abstractsmartset):
2655 2655 """Wrap a generator for lazy iteration
2656 2656
2657 2657 Wrapper structure for generators that provides lazy membership and can
2658 2658 be iterated more than once.
2659 2659 When asked for membership it generates values until either it finds the
2660 2660 requested one or has gone through all the elements in the generator
2661 2661 """
2662 2662 def __init__(self, gen, iterasc=None):
2663 2663 """
2664 2664 gen: a generator producing the values for the generatorset.
2665 2665 """
2666 2666 self._gen = gen
2667 2667 self._asclist = None
2668 2668 self._cache = {}
2669 2669 self._genlist = []
2670 2670 self._finished = False
2671 2671 self._ascending = True
2672 2672 if iterasc is not None:
2673 2673 if iterasc:
2674 2674 self.fastasc = self._iterator
2675 2675 self.__contains__ = self._asccontains
2676 2676 else:
2677 2677 self.fastdesc = self._iterator
2678 2678 self.__contains__ = self._desccontains
2679 2679
2680 2680 def __nonzero__(self):
2681 2681 for r in self:
2682 2682 return True
2683 2683 return False
2684 2684
2685 2685 def __contains__(self, x):
2686 2686 if x in self._cache:
2687 2687 return self._cache[x]
2688 2688
2689 2689 # Use new values only, as existing values would be cached.
2690 2690 for l in self._consumegen():
2691 2691 if l == x:
2692 2692 return True
2693 2693
2694 2694 self._cache[x] = False
2695 2695 return False
2696 2696
2697 2697 def _asccontains(self, x):
2698 2698 """version of contains optimised for ascending generator"""
2699 2699 if x in self._cache:
2700 2700 return self._cache[x]
2701 2701
2702 2702 # Use new values only, as existing values would be cached.
2703 2703 for l in self._consumegen():
2704 2704 if l == x:
2705 2705 return True
2706 2706 if l > x:
2707 2707 break
2708 2708
2709 2709 self._cache[x] = False
2710 2710 return False
2711 2711
2712 2712 def _desccontains(self, x):
2713 2713 """version of contains optimised for descending generator"""
2714 2714 if x in self._cache:
2715 2715 return self._cache[x]
2716 2716
2717 2717 # Use new values only, as existing values would be cached.
2718 2718 for l in self._consumegen():
2719 2719 if l == x:
2720 2720 return True
2721 2721 if l < x:
2722 2722 break
2723 2723
2724 2724 self._cache[x] = False
2725 2725 return False
2726 2726
2727 2727 def __iter__(self):
2728 2728 if self._ascending:
2729 2729 it = self.fastasc
2730 2730 else:
2731 2731 it = self.fastdesc
2732 2732 if it is not None:
2733 2733 return it()
2734 2734 # we need to consume the iterator
2735 2735 for x in self._consumegen():
2736 2736 pass
2737 2737 # recall the same code
2738 2738 return iter(self)
2739 2739
2740 2740 def _iterator(self):
2741 2741 if self._finished:
2742 2742 return iter(self._genlist)
2743 2743
2744 2744 # We have to use this complex iteration strategy to allow multiple
2745 2745 # iterations at the same time. We need to be able to catch revision
2746 2746 # removed from _consumegen and added to genlist in another instance.
2747 2747 #
2748 2748 # Getting rid of it would provide an about 15% speed up on this
2749 2749 # iteration.
2750 2750 genlist = self._genlist
2751 2751 nextrev = self._consumegen().next
2752 2752 _len = len # cache global lookup
2753 2753 def gen():
2754 2754 i = 0
2755 2755 while True:
2756 2756 if i < _len(genlist):
2757 2757 yield genlist[i]
2758 2758 else:
2759 2759 yield nextrev()
2760 2760 i += 1
2761 2761 return gen()
2762 2762
2763 2763 def _consumegen(self):
2764 2764 cache = self._cache
2765 2765 genlist = self._genlist.append
2766 2766 for item in self._gen:
2767 2767 cache[item] = True
2768 2768 genlist(item)
2769 2769 yield item
2770 2770 if not self._finished:
2771 2771 self._finished = True
2772 2772 asc = self._genlist[:]
2773 2773 asc.sort()
2774 2774 self._asclist = asc
2775 2775 self.fastasc = asc.__iter__
2776 2776 self.fastdesc = asc.__reversed__
2777 2777
2778 2778 def __len__(self):
2779 2779 for x in self._consumegen():
2780 2780 pass
2781 2781 return len(self._genlist)
2782 2782
2783 2783 def sort(self, reverse=False):
2784 2784 self._ascending = not reverse
2785 2785
2786 2786 def reverse(self):
2787 2787 self._ascending = not self._ascending
2788 2788
2789 2789 def isascending(self):
2790 2790 return self._ascending
2791 2791
2792 2792 def isdescending(self):
2793 2793 return not self._ascending
2794 2794
2795 2795 def first(self):
2796 2796 if self._ascending:
2797 2797 it = self.fastasc
2798 2798 else:
2799 2799 it = self.fastdesc
2800 2800 if it is None:
2801 2801 # we need to consume all and try again
2802 2802 for x in self._consumegen():
2803 2803 pass
2804 2804 return self.first()
2805 2805 if self:
2806 2806 return it.next()
2807 2807 return None
2808 2808
2809 2809 def last(self):
2810 2810 if self._ascending:
2811 2811 it = self.fastdesc
2812 2812 else:
2813 2813 it = self.fastasc
2814 2814 if it is None:
2815 2815 # we need to consume all and try again
2816 2816 for x in self._consumegen():
2817 2817 pass
2818 2818 return self.first()
2819 2819 if self:
2820 2820 return it.next()
2821 2821 return None
2822 2822
2823 2823 def spanset(repo, start=None, end=None):
2824 2824 """factory function to dispatch between fullreposet and actual spanset
2825 2825
2826 2826 Feel free to update all spanset call sites and kill this function at some
2827 2827 point.
2828 2828 """
2829 2829 if start is None and end is None:
2830 2830 return fullreposet(repo)
2831 2831 return _spanset(repo, start, end)
2832 2832
2833 2833
2834 2834 class _spanset(abstractsmartset):
2835 2835 """Duck type for baseset class which represents a range of revisions and
2836 2836 can work lazily and without having all the range in memory
2837 2837
2838 2838 Note that spanset(x, y) behave almost like xrange(x, y) except for two
2839 2839 notable points:
2840 2840 - when x < y it will be automatically descending,
2841 2841 - revision filtered with this repoview will be skipped.
2842 2842
2843 2843 """
2844 2844 def __init__(self, repo, start=0, end=None):
2845 2845 """
2846 2846 start: first revision included the set
2847 2847 (default to 0)
2848 2848 end: first revision excluded (last+1)
2849 2849 (default to len(repo)
2850 2850
2851 2851 Spanset will be descending if `end` < `start`.
2852 2852 """
2853 2853 if end is None:
2854 2854 end = len(repo)
2855 2855 self._ascending = start <= end
2856 2856 if not self._ascending:
2857 2857 start, end = end + 1, start +1
2858 2858 self._start = start
2859 2859 self._end = end
2860 2860 self._hiddenrevs = repo.changelog.filteredrevs
2861 2861
2862 2862 def sort(self, reverse=False):
2863 2863 self._ascending = not reverse
2864 2864
2865 2865 def reverse(self):
2866 2866 self._ascending = not self._ascending
2867 2867
2868 2868 def _iterfilter(self, iterrange):
2869 2869 s = self._hiddenrevs
2870 2870 for r in iterrange:
2871 2871 if r not in s:
2872 2872 yield r
2873 2873
2874 2874 def __iter__(self):
2875 2875 if self._ascending:
2876 2876 return self.fastasc()
2877 2877 else:
2878 2878 return self.fastdesc()
2879 2879
2880 2880 def fastasc(self):
2881 2881 iterrange = xrange(self._start, self._end)
2882 2882 if self._hiddenrevs:
2883 2883 return self._iterfilter(iterrange)
2884 2884 return iter(iterrange)
2885 2885
2886 2886 def fastdesc(self):
2887 2887 iterrange = xrange(self._end - 1, self._start - 1, -1)
2888 2888 if self._hiddenrevs:
2889 2889 return self._iterfilter(iterrange)
2890 2890 return iter(iterrange)
2891 2891
2892 2892 def __contains__(self, rev):
2893 2893 hidden = self._hiddenrevs
2894 2894 return ((self._start <= rev < self._end)
2895 2895 and not (hidden and rev in hidden))
2896 2896
2897 2897 def __nonzero__(self):
2898 2898 for r in self:
2899 2899 return True
2900 2900 return False
2901 2901
2902 2902 def __len__(self):
2903 2903 if not self._hiddenrevs:
2904 2904 return abs(self._end - self._start)
2905 2905 else:
2906 2906 count = 0
2907 2907 start = self._start
2908 2908 end = self._end
2909 2909 for rev in self._hiddenrevs:
2910 2910 if (end < rev <= start) or (start <= rev < end):
2911 2911 count += 1
2912 2912 return abs(self._end - self._start) - count
2913 2913
2914 2914 def isascending(self):
2915 2915 return self._start <= self._end
2916 2916
2917 2917 def isdescending(self):
2918 2918 return self._start >= self._end
2919 2919
2920 2920 def first(self):
2921 2921 if self._ascending:
2922 2922 it = self.fastasc
2923 2923 else:
2924 2924 it = self.fastdesc
2925 2925 for x in it():
2926 2926 return x
2927 2927 return None
2928 2928
2929 2929 def last(self):
2930 2930 if self._ascending:
2931 2931 it = self.fastdesc
2932 2932 else:
2933 2933 it = self.fastasc
2934 2934 for x in it():
2935 2935 return x
2936 2936 return None
2937 2937
2938 2938 class fullreposet(_spanset):
2939 2939 """a set containing all revisions in the repo
2940 2940
2941 2941 This class exists to host special optimization.
2942 2942 """
2943 2943
2944 2944 def __init__(self, repo):
2945 2945 super(fullreposet, self).__init__(repo)
2946 2946
2947 2947 def __and__(self, other):
2948 2948 """As self contains the whole repo, all of the other set should also be
2949 2949 in self. Therefore `self & other = other`.
2950 2950
2951 2951 This boldly assumes the other contains valid revs only.
2952 2952 """
2953 2953 # other not a smartset, make is so
2954 2954 if not util.safehasattr(other, 'isascending'):
2955 2955 # filter out hidden revision
2956 2956 # (this boldly assumes all smartset are pure)
2957 2957 #
2958 2958 # `other` was used with "&", let's assume this is a set like
2959 2959 # object.
2960 2960 other = baseset(other - self._hiddenrevs)
2961 2961
2962 2962 if self.isascending():
2963 2963 other.sort()
2964 2964 else:
2965 2965 other.sort(reverse)
2966 2966 return other
2967 2967
2968 2968 # tell hggettext to extract docstrings from these functions:
2969 2969 i18nfunctions = symbols.values()
General Comments 0
You need to be logged in to leave comments. Login now