##// END OF EJS Templates
baseset: fix isascending and isdescending...
Pierre-Yves David -
r22828:966860f7 default
parent child Browse files
Show More
@@ -1,3023 +1,3023 b''
1 1 # revset.py - revision set queries for mercurial
2 2 #
3 3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 import re
9 9 import parser, util, error, discovery, hbisect, phases
10 10 import node
11 11 import heapq
12 12 import match as matchmod
13 13 import ancestor as ancestormod
14 14 from i18n import _
15 15 import encoding
16 16 import obsolete as obsmod
17 17 import pathutil
18 18 import repoview
19 19
20 20 def _revancestors(repo, revs, followfirst):
21 21 """Like revlog.ancestors(), but supports followfirst."""
22 22 cut = followfirst and 1 or None
23 23 cl = repo.changelog
24 24
25 25 def iterate():
26 26 revqueue, revsnode = None, None
27 27 h = []
28 28
29 29 revs.descending()
30 30 revqueue = util.deque(revs)
31 31 if revqueue:
32 32 revsnode = revqueue.popleft()
33 33 heapq.heappush(h, -revsnode)
34 34
35 35 seen = set([node.nullrev])
36 36 while h:
37 37 current = -heapq.heappop(h)
38 38 if current not in seen:
39 39 if revsnode and current == revsnode:
40 40 if revqueue:
41 41 revsnode = revqueue.popleft()
42 42 heapq.heappush(h, -revsnode)
43 43 seen.add(current)
44 44 yield current
45 45 for parent in cl.parentrevs(current)[:cut]:
46 46 if parent != node.nullrev:
47 47 heapq.heappush(h, -parent)
48 48
49 49 return generatorset(iterate(), iterasc=False)
50 50
51 51 def _revdescendants(repo, revs, followfirst):
52 52 """Like revlog.descendants() but supports followfirst."""
53 53 cut = followfirst and 1 or None
54 54
55 55 def iterate():
56 56 cl = repo.changelog
57 57 first = min(revs)
58 58 nullrev = node.nullrev
59 59 if first == nullrev:
60 60 # Are there nodes with a null first parent and a non-null
61 61 # second one? Maybe. Do we care? Probably not.
62 62 for i in cl:
63 63 yield i
64 64 else:
65 65 seen = set(revs)
66 66 for i in cl.revs(first + 1):
67 67 for x in cl.parentrevs(i)[:cut]:
68 68 if x != nullrev and x in seen:
69 69 seen.add(i)
70 70 yield i
71 71 break
72 72
73 73 return generatorset(iterate(), iterasc=True)
74 74
75 75 def _revsbetween(repo, roots, heads):
76 76 """Return all paths between roots and heads, inclusive of both endpoint
77 77 sets."""
78 78 if not roots:
79 79 return baseset()
80 80 parentrevs = repo.changelog.parentrevs
81 81 visit = list(heads)
82 82 reachable = set()
83 83 seen = {}
84 84 minroot = min(roots)
85 85 roots = set(roots)
86 86 # open-code the post-order traversal due to the tiny size of
87 87 # sys.getrecursionlimit()
88 88 while visit:
89 89 rev = visit.pop()
90 90 if rev in roots:
91 91 reachable.add(rev)
92 92 parents = parentrevs(rev)
93 93 seen[rev] = parents
94 94 for parent in parents:
95 95 if parent >= minroot and parent not in seen:
96 96 visit.append(parent)
97 97 if not reachable:
98 98 return baseset()
99 99 for rev in sorted(seen):
100 100 for parent in seen[rev]:
101 101 if parent in reachable:
102 102 reachable.add(rev)
103 103 return baseset(sorted(reachable))
104 104
105 105 elements = {
106 106 "(": (20, ("group", 1, ")"), ("func", 1, ")")),
107 107 "~": (18, None, ("ancestor", 18)),
108 108 "^": (18, None, ("parent", 18), ("parentpost", 18)),
109 109 "-": (5, ("negate", 19), ("minus", 5)),
110 110 "::": (17, ("dagrangepre", 17), ("dagrange", 17),
111 111 ("dagrangepost", 17)),
112 112 "..": (17, ("dagrangepre", 17), ("dagrange", 17),
113 113 ("dagrangepost", 17)),
114 114 ":": (15, ("rangepre", 15), ("range", 15), ("rangepost", 15)),
115 115 "not": (10, ("not", 10)),
116 116 "!": (10, ("not", 10)),
117 117 "and": (5, None, ("and", 5)),
118 118 "&": (5, None, ("and", 5)),
119 119 "or": (4, None, ("or", 4)),
120 120 "|": (4, None, ("or", 4)),
121 121 "+": (4, None, ("or", 4)),
122 122 ",": (2, None, ("list", 2)),
123 123 ")": (0, None, None),
124 124 "symbol": (0, ("symbol",), None),
125 125 "string": (0, ("string",), None),
126 126 "end": (0, None, None),
127 127 }
128 128
129 129 keywords = set(['and', 'or', 'not'])
130 130
131 131 def tokenize(program, lookup=None):
132 132 '''
133 133 Parse a revset statement into a stream of tokens
134 134
135 135 Check that @ is a valid unquoted token character (issue3686):
136 136 >>> list(tokenize("@::"))
137 137 [('symbol', '@', 0), ('::', None, 1), ('end', None, 3)]
138 138
139 139 '''
140 140
141 141 pos, l = 0, len(program)
142 142 while pos < l:
143 143 c = program[pos]
144 144 if c.isspace(): # skip inter-token whitespace
145 145 pass
146 146 elif c == ':' and program[pos:pos + 2] == '::': # look ahead carefully
147 147 yield ('::', None, pos)
148 148 pos += 1 # skip ahead
149 149 elif c == '.' and program[pos:pos + 2] == '..': # look ahead carefully
150 150 yield ('..', None, pos)
151 151 pos += 1 # skip ahead
152 152 elif c in "():,-|&+!~^": # handle simple operators
153 153 yield (c, None, pos)
154 154 elif (c in '"\'' or c == 'r' and
155 155 program[pos:pos + 2] in ("r'", 'r"')): # handle quoted strings
156 156 if c == 'r':
157 157 pos += 1
158 158 c = program[pos]
159 159 decode = lambda x: x
160 160 else:
161 161 decode = lambda x: x.decode('string-escape')
162 162 pos += 1
163 163 s = pos
164 164 while pos < l: # find closing quote
165 165 d = program[pos]
166 166 if d == '\\': # skip over escaped characters
167 167 pos += 2
168 168 continue
169 169 if d == c:
170 170 yield ('string', decode(program[s:pos]), s)
171 171 break
172 172 pos += 1
173 173 else:
174 174 raise error.ParseError(_("unterminated string"), s)
175 175 # gather up a symbol/keyword
176 176 elif c.isalnum() or c in '._@' or ord(c) > 127:
177 177 s = pos
178 178 pos += 1
179 179 while pos < l: # find end of symbol
180 180 d = program[pos]
181 181 if not (d.isalnum() or d in "-._/@" or ord(d) > 127):
182 182 break
183 183 if d == '.' and program[pos - 1] == '.': # special case for ..
184 184 pos -= 1
185 185 break
186 186 pos += 1
187 187 sym = program[s:pos]
188 188 if sym in keywords: # operator keywords
189 189 yield (sym, None, s)
190 190 elif '-' in sym:
191 191 # some jerk gave us foo-bar-baz, try to check if it's a symbol
192 192 if lookup and lookup(sym):
193 193 # looks like a real symbol
194 194 yield ('symbol', sym, s)
195 195 else:
196 196 # looks like an expression
197 197 parts = sym.split('-')
198 198 for p in parts[:-1]:
199 199 if p: # possible consecutive -
200 200 yield ('symbol', p, s)
201 201 s += len(p)
202 202 yield ('-', None, pos)
203 203 s += 1
204 204 if parts[-1]: # possible trailing -
205 205 yield ('symbol', parts[-1], s)
206 206 else:
207 207 yield ('symbol', sym, s)
208 208 pos -= 1
209 209 else:
210 210 raise error.ParseError(_("syntax error"), pos)
211 211 pos += 1
212 212 yield ('end', None, pos)
213 213
214 214 # helpers
215 215
216 216 def getstring(x, err):
217 217 if x and (x[0] == 'string' or x[0] == 'symbol'):
218 218 return x[1]
219 219 raise error.ParseError(err)
220 220
221 221 def getlist(x):
222 222 if not x:
223 223 return []
224 224 if x[0] == 'list':
225 225 return getlist(x[1]) + [x[2]]
226 226 return [x]
227 227
228 228 def getargs(x, min, max, err):
229 229 l = getlist(x)
230 230 if len(l) < min or (max >= 0 and len(l) > max):
231 231 raise error.ParseError(err)
232 232 return l
233 233
234 234 def getset(repo, subset, x):
235 235 if not x:
236 236 raise error.ParseError(_("missing argument"))
237 237 s = methods[x[0]](repo, subset, *x[1:])
238 238 if util.safehasattr(s, 'set'):
239 239 return s
240 240 return baseset(s)
241 241
242 242 def _getrevsource(repo, r):
243 243 extra = repo[r].extra()
244 244 for label in ('source', 'transplant_source', 'rebase_source'):
245 245 if label in extra:
246 246 try:
247 247 return repo[extra[label]].rev()
248 248 except error.RepoLookupError:
249 249 pass
250 250 return None
251 251
252 252 # operator methods
253 253
254 254 def stringset(repo, subset, x):
255 255 x = repo[x].rev()
256 256 if x == -1 and len(subset) == len(repo):
257 257 return baseset([-1])
258 258 if len(subset) == len(repo) or x in subset:
259 259 return baseset([x])
260 260 return baseset()
261 261
262 262 def symbolset(repo, subset, x):
263 263 if x in symbols:
264 264 raise error.ParseError(_("can't use %s here") % x)
265 265 return stringset(repo, subset, x)
266 266
267 267 def rangeset(repo, subset, x, y):
268 268 cl = baseset(repo.changelog)
269 269 m = getset(repo, cl, x)
270 270 n = getset(repo, cl, y)
271 271
272 272 if not m or not n:
273 273 return baseset()
274 274 m, n = m.first(), n.last()
275 275
276 276 if m < n:
277 277 r = spanset(repo, m, n + 1)
278 278 else:
279 279 r = spanset(repo, m, n - 1)
280 280 return r & subset
281 281
282 282 def dagrange(repo, subset, x, y):
283 283 r = spanset(repo)
284 284 xs = _revsbetween(repo, getset(repo, r, x), getset(repo, r, y))
285 285 s = subset.set()
286 286 return xs.filter(s.__contains__)
287 287
288 288 def andset(repo, subset, x, y):
289 289 return getset(repo, getset(repo, subset, x), y)
290 290
291 291 def orset(repo, subset, x, y):
292 292 xl = getset(repo, subset, x)
293 293 yl = getset(repo, subset - xl, y)
294 294 return xl + yl
295 295
296 296 def notset(repo, subset, x):
297 297 return subset - getset(repo, subset, x)
298 298
299 299 def listset(repo, subset, a, b):
300 300 raise error.ParseError(_("can't use a list in this context"))
301 301
302 302 def func(repo, subset, a, b):
303 303 if a[0] == 'symbol' and a[1] in symbols:
304 304 return symbols[a[1]](repo, subset, b)
305 305 raise error.ParseError(_("not a function: %s") % a[1])
306 306
307 307 # functions
308 308
309 309 def adds(repo, subset, x):
310 310 """``adds(pattern)``
311 311 Changesets that add a file matching pattern.
312 312
313 313 The pattern without explicit kind like ``glob:`` is expected to be
314 314 relative to the current directory and match against a file or a
315 315 directory.
316 316 """
317 317 # i18n: "adds" is a keyword
318 318 pat = getstring(x, _("adds requires a pattern"))
319 319 return checkstatus(repo, subset, pat, 1)
320 320
321 321 def ancestor(repo, subset, x):
322 322 """``ancestor(*changeset)``
323 323 A greatest common ancestor of the changesets.
324 324
325 325 Accepts 0 or more changesets.
326 326 Will return empty list when passed no args.
327 327 Greatest common ancestor of a single changeset is that changeset.
328 328 """
329 329 # i18n: "ancestor" is a keyword
330 330 l = getlist(x)
331 331 rl = spanset(repo)
332 332 anc = None
333 333
334 334 # (getset(repo, rl, i) for i in l) generates a list of lists
335 335 for revs in (getset(repo, rl, i) for i in l):
336 336 for r in revs:
337 337 if anc is None:
338 338 anc = repo[r]
339 339 else:
340 340 anc = anc.ancestor(repo[r])
341 341
342 342 if anc is not None and anc.rev() in subset:
343 343 return baseset([anc.rev()])
344 344 return baseset()
345 345
346 346 def _ancestors(repo, subset, x, followfirst=False):
347 347 args = getset(repo, spanset(repo), x)
348 348 if not args:
349 349 return baseset()
350 350 s = _revancestors(repo, args, followfirst)
351 351 return subset.filter(s.__contains__)
352 352
353 353 def ancestors(repo, subset, x):
354 354 """``ancestors(set)``
355 355 Changesets that are ancestors of a changeset in set.
356 356 """
357 357 return _ancestors(repo, subset, x)
358 358
359 359 def _firstancestors(repo, subset, x):
360 360 # ``_firstancestors(set)``
361 361 # Like ``ancestors(set)`` but follows only the first parents.
362 362 return _ancestors(repo, subset, x, followfirst=True)
363 363
364 364 def ancestorspec(repo, subset, x, n):
365 365 """``set~n``
366 366 Changesets that are the Nth ancestor (first parents only) of a changeset
367 367 in set.
368 368 """
369 369 try:
370 370 n = int(n[1])
371 371 except (TypeError, ValueError):
372 372 raise error.ParseError(_("~ expects a number"))
373 373 ps = set()
374 374 cl = repo.changelog
375 375 for r in getset(repo, baseset(cl), x):
376 376 for i in range(n):
377 377 r = cl.parentrevs(r)[0]
378 378 ps.add(r)
379 379 return subset & ps
380 380
381 381 def author(repo, subset, x):
382 382 """``author(string)``
383 383 Alias for ``user(string)``.
384 384 """
385 385 # i18n: "author" is a keyword
386 386 n = encoding.lower(getstring(x, _("author requires a string")))
387 387 kind, pattern, matcher = _substringmatcher(n)
388 388 return subset.filter(lambda x: matcher(encoding.lower(repo[x].user())))
389 389
390 390 def only(repo, subset, x):
391 391 """``only(set, [set])``
392 392 Changesets that are ancestors of the first set that are not ancestors
393 393 of any other head in the repo. If a second set is specified, the result
394 394 is ancestors of the first set that are not ancestors of the second set
395 395 (i.e. ::<set1> - ::<set2>).
396 396 """
397 397 cl = repo.changelog
398 398 # i18n: "only" is a keyword
399 399 args = getargs(x, 1, 2, _('only takes one or two arguments'))
400 400 include = getset(repo, spanset(repo), args[0]).set()
401 401 if len(args) == 1:
402 402 if len(include) == 0:
403 403 return baseset()
404 404
405 405 descendants = set(_revdescendants(repo, include, False))
406 406 exclude = [rev for rev in cl.headrevs()
407 407 if not rev in descendants and not rev in include]
408 408 else:
409 409 exclude = getset(repo, spanset(repo), args[1])
410 410
411 411 results = set(ancestormod.missingancestors(include, exclude, cl.parentrevs))
412 412 return filteredset(subset, results.__contains__)
413 413
414 414 def bisect(repo, subset, x):
415 415 """``bisect(string)``
416 416 Changesets marked in the specified bisect status:
417 417
418 418 - ``good``, ``bad``, ``skip``: csets explicitly marked as good/bad/skip
419 419 - ``goods``, ``bads`` : csets topologically good/bad
420 420 - ``range`` : csets taking part in the bisection
421 421 - ``pruned`` : csets that are goods, bads or skipped
422 422 - ``untested`` : csets whose fate is yet unknown
423 423 - ``ignored`` : csets ignored due to DAG topology
424 424 - ``current`` : the cset currently being bisected
425 425 """
426 426 # i18n: "bisect" is a keyword
427 427 status = getstring(x, _("bisect requires a string")).lower()
428 428 state = set(hbisect.get(repo, status))
429 429 return subset & state
430 430
431 431 # Backward-compatibility
432 432 # - no help entry so that we do not advertise it any more
433 433 def bisected(repo, subset, x):
434 434 return bisect(repo, subset, x)
435 435
436 436 def bookmark(repo, subset, x):
437 437 """``bookmark([name])``
438 438 The named bookmark or all bookmarks.
439 439
440 440 If `name` starts with `re:`, the remainder of the name is treated as
441 441 a regular expression. To match a bookmark that actually starts with `re:`,
442 442 use the prefix `literal:`.
443 443 """
444 444 # i18n: "bookmark" is a keyword
445 445 args = getargs(x, 0, 1, _('bookmark takes one or no arguments'))
446 446 if args:
447 447 bm = getstring(args[0],
448 448 # i18n: "bookmark" is a keyword
449 449 _('the argument to bookmark must be a string'))
450 450 kind, pattern, matcher = _stringmatcher(bm)
451 451 bms = set()
452 452 if kind == 'literal':
453 453 bmrev = repo._bookmarks.get(pattern, None)
454 454 if not bmrev:
455 455 raise util.Abort(_("bookmark '%s' does not exist") % bm)
456 456 bms.add(repo[bmrev].rev())
457 457 else:
458 458 matchrevs = set()
459 459 for name, bmrev in repo._bookmarks.iteritems():
460 460 if matcher(name):
461 461 matchrevs.add(bmrev)
462 462 if not matchrevs:
463 463 raise util.Abort(_("no bookmarks exist that match '%s'")
464 464 % pattern)
465 465 for bmrev in matchrevs:
466 466 bms.add(repo[bmrev].rev())
467 467 else:
468 468 bms = set([repo[r].rev()
469 469 for r in repo._bookmarks.values()])
470 470 bms -= set([node.nullrev])
471 471 return subset & bms
472 472
473 473 def branch(repo, subset, x):
474 474 """``branch(string or set)``
475 475 All changesets belonging to the given branch or the branches of the given
476 476 changesets.
477 477
478 478 If `string` starts with `re:`, the remainder of the name is treated as
479 479 a regular expression. To match a branch that actually starts with `re:`,
480 480 use the prefix `literal:`.
481 481 """
482 482 try:
483 483 b = getstring(x, '')
484 484 except error.ParseError:
485 485 # not a string, but another revspec, e.g. tip()
486 486 pass
487 487 else:
488 488 kind, pattern, matcher = _stringmatcher(b)
489 489 if kind == 'literal':
490 490 # note: falls through to the revspec case if no branch with
491 491 # this name exists
492 492 if pattern in repo.branchmap():
493 493 return subset.filter(lambda r: matcher(repo[r].branch()))
494 494 else:
495 495 return subset.filter(lambda r: matcher(repo[r].branch()))
496 496
497 497 s = getset(repo, spanset(repo), x)
498 498 b = set()
499 499 for r in s:
500 500 b.add(repo[r].branch())
501 501 s = s.set()
502 502 return subset.filter(lambda r: r in s or repo[r].branch() in b)
503 503
504 504 def bumped(repo, subset, x):
505 505 """``bumped()``
506 506 Mutable changesets marked as successors of public changesets.
507 507
508 508 Only non-public and non-obsolete changesets can be `bumped`.
509 509 """
510 510 # i18n: "bumped" is a keyword
511 511 getargs(x, 0, 0, _("bumped takes no arguments"))
512 512 bumped = obsmod.getrevs(repo, 'bumped')
513 513 return subset & bumped
514 514
515 515 def bundle(repo, subset, x):
516 516 """``bundle()``
517 517 Changesets in the bundle.
518 518
519 519 Bundle must be specified by the -R option."""
520 520
521 521 try:
522 522 bundlerevs = repo.changelog.bundlerevs
523 523 except AttributeError:
524 524 raise util.Abort(_("no bundle provided - specify with -R"))
525 525 return subset & bundlerevs
526 526
527 527 def checkstatus(repo, subset, pat, field):
528 528 hasset = matchmod.patkind(pat) == 'set'
529 529
530 530 def matches(x):
531 531 m = None
532 532 fname = None
533 533 c = repo[x]
534 534 if not m or hasset:
535 535 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
536 536 if not m.anypats() and len(m.files()) == 1:
537 537 fname = m.files()[0]
538 538 if fname is not None:
539 539 if fname not in c.files():
540 540 return False
541 541 else:
542 542 for f in c.files():
543 543 if m(f):
544 544 break
545 545 else:
546 546 return False
547 547 files = repo.status(c.p1().node(), c.node())[field]
548 548 if fname is not None:
549 549 if fname in files:
550 550 return True
551 551 else:
552 552 for f in files:
553 553 if m(f):
554 554 return True
555 555
556 556 return subset.filter(matches)
557 557
558 558 def _children(repo, narrow, parentset):
559 559 cs = set()
560 560 if not parentset:
561 561 return baseset(cs)
562 562 pr = repo.changelog.parentrevs
563 563 minrev = min(parentset)
564 564 for r in narrow:
565 565 if r <= minrev:
566 566 continue
567 567 for p in pr(r):
568 568 if p in parentset:
569 569 cs.add(r)
570 570 return baseset(cs)
571 571
572 572 def children(repo, subset, x):
573 573 """``children(set)``
574 574 Child changesets of changesets in set.
575 575 """
576 576 s = getset(repo, baseset(repo), x).set()
577 577 cs = _children(repo, subset, s)
578 578 return subset & cs
579 579
580 580 def closed(repo, subset, x):
581 581 """``closed()``
582 582 Changeset is closed.
583 583 """
584 584 # i18n: "closed" is a keyword
585 585 getargs(x, 0, 0, _("closed takes no arguments"))
586 586 return subset.filter(lambda r: repo[r].closesbranch())
587 587
588 588 def contains(repo, subset, x):
589 589 """``contains(pattern)``
590 590 The revision's manifest contains a file matching pattern (but might not
591 591 modify it). See :hg:`help patterns` for information about file patterns.
592 592
593 593 The pattern without explicit kind like ``glob:`` is expected to be
594 594 relative to the current directory and match against a file exactly
595 595 for efficiency.
596 596 """
597 597 # i18n: "contains" is a keyword
598 598 pat = getstring(x, _("contains requires a pattern"))
599 599
600 600 def matches(x):
601 601 if not matchmod.patkind(pat):
602 602 pats = pathutil.canonpath(repo.root, repo.getcwd(), pat)
603 603 if pats in repo[x]:
604 604 return True
605 605 else:
606 606 c = repo[x]
607 607 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
608 608 for f in c.manifest():
609 609 if m(f):
610 610 return True
611 611 return False
612 612
613 613 return subset.filter(matches)
614 614
615 615 def converted(repo, subset, x):
616 616 """``converted([id])``
617 617 Changesets converted from the given identifier in the old repository if
618 618 present, or all converted changesets if no identifier is specified.
619 619 """
620 620
621 621 # There is exactly no chance of resolving the revision, so do a simple
622 622 # string compare and hope for the best
623 623
624 624 rev = None
625 625 # i18n: "converted" is a keyword
626 626 l = getargs(x, 0, 1, _('converted takes one or no arguments'))
627 627 if l:
628 628 # i18n: "converted" is a keyword
629 629 rev = getstring(l[0], _('converted requires a revision'))
630 630
631 631 def _matchvalue(r):
632 632 source = repo[r].extra().get('convert_revision', None)
633 633 return source is not None and (rev is None or source.startswith(rev))
634 634
635 635 return subset.filter(lambda r: _matchvalue(r))
636 636
637 637 def date(repo, subset, x):
638 638 """``date(interval)``
639 639 Changesets within the interval, see :hg:`help dates`.
640 640 """
641 641 # i18n: "date" is a keyword
642 642 ds = getstring(x, _("date requires a string"))
643 643 dm = util.matchdate(ds)
644 644 return subset.filter(lambda x: dm(repo[x].date()[0]))
645 645
646 646 def desc(repo, subset, x):
647 647 """``desc(string)``
648 648 Search commit message for string. The match is case-insensitive.
649 649 """
650 650 # i18n: "desc" is a keyword
651 651 ds = encoding.lower(getstring(x, _("desc requires a string")))
652 652
653 653 def matches(x):
654 654 c = repo[x]
655 655 return ds in encoding.lower(c.description())
656 656
657 657 return subset.filter(matches)
658 658
659 659 def _descendants(repo, subset, x, followfirst=False):
660 660 args = getset(repo, spanset(repo), x)
661 661 if not args:
662 662 return baseset()
663 663 s = _revdescendants(repo, args, followfirst)
664 664
665 665 # Both sets need to be ascending in order to lazily return the union
666 666 # in the correct order.
667 667 args.ascending()
668 668 result = (filteredset(s, subset.__contains__, ascending=True) +
669 669 filteredset(args, subset.__contains__, ascending=True))
670 670
671 671 # Wrap result in a filteredset since it's an addset, which doesn't
672 672 # implement all the necessary functions to be consumed by callers.
673 673 return filteredset(result, lambda r: True, ascending=True)
674 674
675 675 def descendants(repo, subset, x):
676 676 """``descendants(set)``
677 677 Changesets which are descendants of changesets in set.
678 678 """
679 679 return _descendants(repo, subset, x)
680 680
681 681 def _firstdescendants(repo, subset, x):
682 682 # ``_firstdescendants(set)``
683 683 # Like ``descendants(set)`` but follows only the first parents.
684 684 return _descendants(repo, subset, x, followfirst=True)
685 685
686 686 def destination(repo, subset, x):
687 687 """``destination([set])``
688 688 Changesets that were created by a graft, transplant or rebase operation,
689 689 with the given revisions specified as the source. Omitting the optional set
690 690 is the same as passing all().
691 691 """
692 692 if x is not None:
693 693 args = getset(repo, spanset(repo), x).set()
694 694 else:
695 695 args = getall(repo, spanset(repo), x).set()
696 696
697 697 dests = set()
698 698
699 699 # subset contains all of the possible destinations that can be returned, so
700 700 # iterate over them and see if their source(s) were provided in the args.
701 701 # Even if the immediate src of r is not in the args, src's source (or
702 702 # further back) may be. Scanning back further than the immediate src allows
703 703 # transitive transplants and rebases to yield the same results as transitive
704 704 # grafts.
705 705 for r in subset:
706 706 src = _getrevsource(repo, r)
707 707 lineage = None
708 708
709 709 while src is not None:
710 710 if lineage is None:
711 711 lineage = list()
712 712
713 713 lineage.append(r)
714 714
715 715 # The visited lineage is a match if the current source is in the arg
716 716 # set. Since every candidate dest is visited by way of iterating
717 717 # subset, any dests further back in the lineage will be tested by a
718 718 # different iteration over subset. Likewise, if the src was already
719 719 # selected, the current lineage can be selected without going back
720 720 # further.
721 721 if src in args or src in dests:
722 722 dests.update(lineage)
723 723 break
724 724
725 725 r = src
726 726 src = _getrevsource(repo, r)
727 727
728 728 return subset.filter(dests.__contains__)
729 729
730 730 def divergent(repo, subset, x):
731 731 """``divergent()``
732 732 Final successors of changesets with an alternative set of final successors.
733 733 """
734 734 # i18n: "divergent" is a keyword
735 735 getargs(x, 0, 0, _("divergent takes no arguments"))
736 736 divergent = obsmod.getrevs(repo, 'divergent')
737 737 return subset & divergent
738 738
739 739 def draft(repo, subset, x):
740 740 """``draft()``
741 741 Changeset in draft phase."""
742 742 # i18n: "draft" is a keyword
743 743 getargs(x, 0, 0, _("draft takes no arguments"))
744 744 pc = repo._phasecache
745 745 return subset.filter(lambda r: pc.phase(repo, r) == phases.draft)
746 746
747 747 def extinct(repo, subset, x):
748 748 """``extinct()``
749 749 Obsolete changesets with obsolete descendants only.
750 750 """
751 751 # i18n: "extinct" is a keyword
752 752 getargs(x, 0, 0, _("extinct takes no arguments"))
753 753 extincts = obsmod.getrevs(repo, 'extinct')
754 754 return subset & extincts
755 755
756 756 def extra(repo, subset, x):
757 757 """``extra(label, [value])``
758 758 Changesets with the given label in the extra metadata, with the given
759 759 optional value.
760 760
761 761 If `value` starts with `re:`, the remainder of the value is treated as
762 762 a regular expression. To match a value that actually starts with `re:`,
763 763 use the prefix `literal:`.
764 764 """
765 765
766 766 # i18n: "extra" is a keyword
767 767 l = getargs(x, 1, 2, _('extra takes at least 1 and at most 2 arguments'))
768 768 # i18n: "extra" is a keyword
769 769 label = getstring(l[0], _('first argument to extra must be a string'))
770 770 value = None
771 771
772 772 if len(l) > 1:
773 773 # i18n: "extra" is a keyword
774 774 value = getstring(l[1], _('second argument to extra must be a string'))
775 775 kind, value, matcher = _stringmatcher(value)
776 776
777 777 def _matchvalue(r):
778 778 extra = repo[r].extra()
779 779 return label in extra and (value is None or matcher(extra[label]))
780 780
781 781 return subset.filter(lambda r: _matchvalue(r))
782 782
783 783 def filelog(repo, subset, x):
784 784 """``filelog(pattern)``
785 785 Changesets connected to the specified filelog.
786 786
787 787 For performance reasons, visits only revisions mentioned in the file-level
788 788 filelog, rather than filtering through all changesets (much faster, but
789 789 doesn't include deletes or duplicate changes). For a slower, more accurate
790 790 result, use ``file()``.
791 791
792 792 The pattern without explicit kind like ``glob:`` is expected to be
793 793 relative to the current directory and match against a file exactly
794 794 for efficiency.
795 795 """
796 796
797 797 # i18n: "filelog" is a keyword
798 798 pat = getstring(x, _("filelog requires a pattern"))
799 799 s = set()
800 800
801 801 if not matchmod.patkind(pat):
802 802 f = pathutil.canonpath(repo.root, repo.getcwd(), pat)
803 803 fl = repo.file(f)
804 804 for fr in fl:
805 805 s.add(fl.linkrev(fr))
806 806 else:
807 807 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=repo[None])
808 808 for f in repo[None]:
809 809 if m(f):
810 810 fl = repo.file(f)
811 811 for fr in fl:
812 812 s.add(fl.linkrev(fr))
813 813
814 814 return subset & s
815 815
816 816 def first(repo, subset, x):
817 817 """``first(set, [n])``
818 818 An alias for limit().
819 819 """
820 820 return limit(repo, subset, x)
821 821
822 822 def _follow(repo, subset, x, name, followfirst=False):
823 823 l = getargs(x, 0, 1, _("%s takes no arguments or a filename") % name)
824 824 c = repo['.']
825 825 if l:
826 826 x = getstring(l[0], _("%s expected a filename") % name)
827 827 if x in c:
828 828 cx = c[x]
829 829 s = set(ctx.rev() for ctx in cx.ancestors(followfirst=followfirst))
830 830 # include the revision responsible for the most recent version
831 831 s.add(cx.linkrev())
832 832 else:
833 833 return baseset()
834 834 else:
835 835 s = _revancestors(repo, baseset([c.rev()]), followfirst)
836 836
837 837 return subset & s
838 838
839 839 def follow(repo, subset, x):
840 840 """``follow([file])``
841 841 An alias for ``::.`` (ancestors of the working copy's first parent).
842 842 If a filename is specified, the history of the given file is followed,
843 843 including copies.
844 844 """
845 845 return _follow(repo, subset, x, 'follow')
846 846
847 847 def _followfirst(repo, subset, x):
848 848 # ``followfirst([file])``
849 849 # Like ``follow([file])`` but follows only the first parent of
850 850 # every revision or file revision.
851 851 return _follow(repo, subset, x, '_followfirst', followfirst=True)
852 852
853 853 def getall(repo, subset, x):
854 854 """``all()``
855 855 All changesets, the same as ``0:tip``.
856 856 """
857 857 # i18n: "all" is a keyword
858 858 getargs(x, 0, 0, _("all takes no arguments"))
859 859 return subset
860 860
861 861 def grep(repo, subset, x):
862 862 """``grep(regex)``
863 863 Like ``keyword(string)`` but accepts a regex. Use ``grep(r'...')``
864 864 to ensure special escape characters are handled correctly. Unlike
865 865 ``keyword(string)``, the match is case-sensitive.
866 866 """
867 867 try:
868 868 # i18n: "grep" is a keyword
869 869 gr = re.compile(getstring(x, _("grep requires a string")))
870 870 except re.error, e:
871 871 raise error.ParseError(_('invalid match pattern: %s') % e)
872 872
873 873 def matches(x):
874 874 c = repo[x]
875 875 for e in c.files() + [c.user(), c.description()]:
876 876 if gr.search(e):
877 877 return True
878 878 return False
879 879
880 880 return subset.filter(matches)
881 881
882 882 def _matchfiles(repo, subset, x):
883 883 # _matchfiles takes a revset list of prefixed arguments:
884 884 #
885 885 # [p:foo, i:bar, x:baz]
886 886 #
887 887 # builds a match object from them and filters subset. Allowed
888 888 # prefixes are 'p:' for regular patterns, 'i:' for include
889 889 # patterns and 'x:' for exclude patterns. Use 'r:' prefix to pass
890 890 # a revision identifier, or the empty string to reference the
891 891 # working directory, from which the match object is
892 892 # initialized. Use 'd:' to set the default matching mode, default
893 893 # to 'glob'. At most one 'r:' and 'd:' argument can be passed.
894 894
895 895 # i18n: "_matchfiles" is a keyword
896 896 l = getargs(x, 1, -1, _("_matchfiles requires at least one argument"))
897 897 pats, inc, exc = [], [], []
898 898 hasset = False
899 899 rev, default = None, None
900 900 for arg in l:
901 901 # i18n: "_matchfiles" is a keyword
902 902 s = getstring(arg, _("_matchfiles requires string arguments"))
903 903 prefix, value = s[:2], s[2:]
904 904 if prefix == 'p:':
905 905 pats.append(value)
906 906 elif prefix == 'i:':
907 907 inc.append(value)
908 908 elif prefix == 'x:':
909 909 exc.append(value)
910 910 elif prefix == 'r:':
911 911 if rev is not None:
912 912 # i18n: "_matchfiles" is a keyword
913 913 raise error.ParseError(_('_matchfiles expected at most one '
914 914 'revision'))
915 915 rev = value
916 916 elif prefix == 'd:':
917 917 if default is not None:
918 918 # i18n: "_matchfiles" is a keyword
919 919 raise error.ParseError(_('_matchfiles expected at most one '
920 920 'default mode'))
921 921 default = value
922 922 else:
923 923 # i18n: "_matchfiles" is a keyword
924 924 raise error.ParseError(_('invalid _matchfiles prefix: %s') % prefix)
925 925 if not hasset and matchmod.patkind(value) == 'set':
926 926 hasset = True
927 927 if not default:
928 928 default = 'glob'
929 929
930 930 def matches(x):
931 931 m = None
932 932 c = repo[x]
933 933 if not m or (hasset and rev is None):
934 934 ctx = c
935 935 if rev is not None:
936 936 ctx = repo[rev or None]
937 937 m = matchmod.match(repo.root, repo.getcwd(), pats, include=inc,
938 938 exclude=exc, ctx=ctx, default=default)
939 939 for f in c.files():
940 940 if m(f):
941 941 return True
942 942 return False
943 943
944 944 return subset.filter(matches)
945 945
946 946 def hasfile(repo, subset, x):
947 947 """``file(pattern)``
948 948 Changesets affecting files matched by pattern.
949 949
950 950 For a faster but less accurate result, consider using ``filelog()``
951 951 instead.
952 952
953 953 This predicate uses ``glob:`` as the default kind of pattern.
954 954 """
955 955 # i18n: "file" is a keyword
956 956 pat = getstring(x, _("file requires a pattern"))
957 957 return _matchfiles(repo, subset, ('string', 'p:' + pat))
958 958
959 959 def head(repo, subset, x):
960 960 """``head()``
961 961 Changeset is a named branch head.
962 962 """
963 963 # i18n: "head" is a keyword
964 964 getargs(x, 0, 0, _("head takes no arguments"))
965 965 hs = set()
966 966 for b, ls in repo.branchmap().iteritems():
967 967 hs.update(repo[h].rev() for h in ls)
968 968 return baseset(hs).filter(subset.__contains__)
969 969
970 970 def heads(repo, subset, x):
971 971 """``heads(set)``
972 972 Members of set with no children in set.
973 973 """
974 974 s = getset(repo, subset, x)
975 975 ps = parents(repo, subset, x)
976 976 return s - ps
977 977
978 978 def hidden(repo, subset, x):
979 979 """``hidden()``
980 980 Hidden changesets.
981 981 """
982 982 # i18n: "hidden" is a keyword
983 983 getargs(x, 0, 0, _("hidden takes no arguments"))
984 984 hiddenrevs = repoview.filterrevs(repo, 'visible')
985 985 return subset & hiddenrevs
986 986
987 987 def keyword(repo, subset, x):
988 988 """``keyword(string)``
989 989 Search commit message, user name, and names of changed files for
990 990 string. The match is case-insensitive.
991 991 """
992 992 # i18n: "keyword" is a keyword
993 993 kw = encoding.lower(getstring(x, _("keyword requires a string")))
994 994
995 995 def matches(r):
996 996 c = repo[r]
997 997 return util.any(kw in encoding.lower(t) for t in c.files() + [c.user(),
998 998 c.description()])
999 999
1000 1000 return subset.filter(matches)
1001 1001
1002 1002 def limit(repo, subset, x):
1003 1003 """``limit(set, [n])``
1004 1004 First n members of set, defaulting to 1.
1005 1005 """
1006 1006 # i18n: "limit" is a keyword
1007 1007 l = getargs(x, 1, 2, _("limit requires one or two arguments"))
1008 1008 try:
1009 1009 lim = 1
1010 1010 if len(l) == 2:
1011 1011 # i18n: "limit" is a keyword
1012 1012 lim = int(getstring(l[1], _("limit requires a number")))
1013 1013 except (TypeError, ValueError):
1014 1014 # i18n: "limit" is a keyword
1015 1015 raise error.ParseError(_("limit expects a number"))
1016 1016 ss = subset.set()
1017 1017 os = getset(repo, spanset(repo), l[0])
1018 1018 result = []
1019 1019 it = iter(os)
1020 1020 for x in xrange(lim):
1021 1021 try:
1022 1022 y = it.next()
1023 1023 if y in ss:
1024 1024 result.append(y)
1025 1025 except (StopIteration):
1026 1026 break
1027 1027 return baseset(result)
1028 1028
1029 1029 def last(repo, subset, x):
1030 1030 """``last(set, [n])``
1031 1031 Last n members of set, defaulting to 1.
1032 1032 """
1033 1033 # i18n: "last" is a keyword
1034 1034 l = getargs(x, 1, 2, _("last requires one or two arguments"))
1035 1035 try:
1036 1036 lim = 1
1037 1037 if len(l) == 2:
1038 1038 # i18n: "last" is a keyword
1039 1039 lim = int(getstring(l[1], _("last requires a number")))
1040 1040 except (TypeError, ValueError):
1041 1041 # i18n: "last" is a keyword
1042 1042 raise error.ParseError(_("last expects a number"))
1043 1043 ss = subset.set()
1044 1044 os = getset(repo, spanset(repo), l[0])
1045 1045 os.reverse()
1046 1046 result = []
1047 1047 it = iter(os)
1048 1048 for x in xrange(lim):
1049 1049 try:
1050 1050 y = it.next()
1051 1051 if y in ss:
1052 1052 result.append(y)
1053 1053 except (StopIteration):
1054 1054 break
1055 1055 return baseset(result)
1056 1056
1057 1057 def maxrev(repo, subset, x):
1058 1058 """``max(set)``
1059 1059 Changeset with highest revision number in set.
1060 1060 """
1061 1061 os = getset(repo, spanset(repo), x)
1062 1062 if os:
1063 1063 m = os.max()
1064 1064 if m in subset:
1065 1065 return baseset([m])
1066 1066 return baseset()
1067 1067
1068 1068 def merge(repo, subset, x):
1069 1069 """``merge()``
1070 1070 Changeset is a merge changeset.
1071 1071 """
1072 1072 # i18n: "merge" is a keyword
1073 1073 getargs(x, 0, 0, _("merge takes no arguments"))
1074 1074 cl = repo.changelog
1075 1075 return subset.filter(lambda r: cl.parentrevs(r)[1] != -1)
1076 1076
1077 1077 def branchpoint(repo, subset, x):
1078 1078 """``branchpoint()``
1079 1079 Changesets with more than one child.
1080 1080 """
1081 1081 # i18n: "branchpoint" is a keyword
1082 1082 getargs(x, 0, 0, _("branchpoint takes no arguments"))
1083 1083 cl = repo.changelog
1084 1084 if not subset:
1085 1085 return baseset()
1086 1086 baserev = min(subset)
1087 1087 parentscount = [0]*(len(repo) - baserev)
1088 1088 for r in cl.revs(start=baserev + 1):
1089 1089 for p in cl.parentrevs(r):
1090 1090 if p >= baserev:
1091 1091 parentscount[p - baserev] += 1
1092 1092 return subset.filter(lambda r: parentscount[r - baserev] > 1)
1093 1093
1094 1094 def minrev(repo, subset, x):
1095 1095 """``min(set)``
1096 1096 Changeset with lowest revision number in set.
1097 1097 """
1098 1098 os = getset(repo, spanset(repo), x)
1099 1099 if os:
1100 1100 m = os.min()
1101 1101 if m in subset:
1102 1102 return baseset([m])
1103 1103 return baseset()
1104 1104
1105 1105 def modifies(repo, subset, x):
1106 1106 """``modifies(pattern)``
1107 1107 Changesets modifying files matched by pattern.
1108 1108
1109 1109 The pattern without explicit kind like ``glob:`` is expected to be
1110 1110 relative to the current directory and match against a file or a
1111 1111 directory.
1112 1112 """
1113 1113 # i18n: "modifies" is a keyword
1114 1114 pat = getstring(x, _("modifies requires a pattern"))
1115 1115 return checkstatus(repo, subset, pat, 0)
1116 1116
1117 1117 def node_(repo, subset, x):
1118 1118 """``id(string)``
1119 1119 Revision non-ambiguously specified by the given hex string prefix.
1120 1120 """
1121 1121 # i18n: "id" is a keyword
1122 1122 l = getargs(x, 1, 1, _("id requires one argument"))
1123 1123 # i18n: "id" is a keyword
1124 1124 n = getstring(l[0], _("id requires a string"))
1125 1125 if len(n) == 40:
1126 1126 rn = repo[n].rev()
1127 1127 else:
1128 1128 rn = None
1129 1129 pm = repo.changelog._partialmatch(n)
1130 1130 if pm is not None:
1131 1131 rn = repo.changelog.rev(pm)
1132 1132
1133 1133 return subset.filter(lambda r: r == rn)
1134 1134
1135 1135 def obsolete(repo, subset, x):
1136 1136 """``obsolete()``
1137 1137 Mutable changeset with a newer version."""
1138 1138 # i18n: "obsolete" is a keyword
1139 1139 getargs(x, 0, 0, _("obsolete takes no arguments"))
1140 1140 obsoletes = obsmod.getrevs(repo, 'obsolete')
1141 1141 return subset & obsoletes
1142 1142
1143 1143 def origin(repo, subset, x):
1144 1144 """``origin([set])``
1145 1145 Changesets that were specified as a source for the grafts, transplants or
1146 1146 rebases that created the given revisions. Omitting the optional set is the
1147 1147 same as passing all(). If a changeset created by these operations is itself
1148 1148 specified as a source for one of these operations, only the source changeset
1149 1149 for the first operation is selected.
1150 1150 """
1151 1151 if x is not None:
1152 1152 args = getset(repo, spanset(repo), x).set()
1153 1153 else:
1154 1154 args = getall(repo, spanset(repo), x).set()
1155 1155
1156 1156 def _firstsrc(rev):
1157 1157 src = _getrevsource(repo, rev)
1158 1158 if src is None:
1159 1159 return None
1160 1160
1161 1161 while True:
1162 1162 prev = _getrevsource(repo, src)
1163 1163
1164 1164 if prev is None:
1165 1165 return src
1166 1166 src = prev
1167 1167
1168 1168 o = set([_firstsrc(r) for r in args])
1169 1169 o -= set([None])
1170 1170 return subset & o
1171 1171
1172 1172 def outgoing(repo, subset, x):
1173 1173 """``outgoing([path])``
1174 1174 Changesets not found in the specified destination repository, or the
1175 1175 default push location.
1176 1176 """
1177 1177 import hg # avoid start-up nasties
1178 1178 # i18n: "outgoing" is a keyword
1179 1179 l = getargs(x, 0, 1, _("outgoing takes one or no arguments"))
1180 1180 # i18n: "outgoing" is a keyword
1181 1181 dest = l and getstring(l[0], _("outgoing requires a repository path")) or ''
1182 1182 dest = repo.ui.expandpath(dest or 'default-push', dest or 'default')
1183 1183 dest, branches = hg.parseurl(dest)
1184 1184 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1185 1185 if revs:
1186 1186 revs = [repo.lookup(rev) for rev in revs]
1187 1187 other = hg.peer(repo, {}, dest)
1188 1188 repo.ui.pushbuffer()
1189 1189 outgoing = discovery.findcommonoutgoing(repo, other, onlyheads=revs)
1190 1190 repo.ui.popbuffer()
1191 1191 cl = repo.changelog
1192 1192 o = set([cl.rev(r) for r in outgoing.missing])
1193 1193 return subset & o
1194 1194
1195 1195 def p1(repo, subset, x):
1196 1196 """``p1([set])``
1197 1197 First parent of changesets in set, or the working directory.
1198 1198 """
1199 1199 if x is None:
1200 1200 p = repo[x].p1().rev()
1201 1201 if p >= 0:
1202 1202 return subset & baseset([p])
1203 1203 return baseset()
1204 1204
1205 1205 ps = set()
1206 1206 cl = repo.changelog
1207 1207 for r in getset(repo, spanset(repo), x):
1208 1208 ps.add(cl.parentrevs(r)[0])
1209 1209 ps -= set([node.nullrev])
1210 1210 return subset & ps
1211 1211
1212 1212 def p2(repo, subset, x):
1213 1213 """``p2([set])``
1214 1214 Second parent of changesets in set, or the working directory.
1215 1215 """
1216 1216 if x is None:
1217 1217 ps = repo[x].parents()
1218 1218 try:
1219 1219 p = ps[1].rev()
1220 1220 if p >= 0:
1221 1221 return subset & baseset([p])
1222 1222 return baseset()
1223 1223 except IndexError:
1224 1224 return baseset()
1225 1225
1226 1226 ps = set()
1227 1227 cl = repo.changelog
1228 1228 for r in getset(repo, spanset(repo), x):
1229 1229 ps.add(cl.parentrevs(r)[1])
1230 1230 ps -= set([node.nullrev])
1231 1231 return subset & ps
1232 1232
1233 1233 def parents(repo, subset, x):
1234 1234 """``parents([set])``
1235 1235 The set of all parents for all changesets in set, or the working directory.
1236 1236 """
1237 1237 if x is None:
1238 1238 ps = set(p.rev() for p in repo[x].parents())
1239 1239 else:
1240 1240 ps = set()
1241 1241 cl = repo.changelog
1242 1242 for r in getset(repo, spanset(repo), x):
1243 1243 ps.update(cl.parentrevs(r))
1244 1244 ps -= set([node.nullrev])
1245 1245 return subset & ps
1246 1246
1247 1247 def parentspec(repo, subset, x, n):
1248 1248 """``set^0``
1249 1249 The set.
1250 1250 ``set^1`` (or ``set^``), ``set^2``
1251 1251 First or second parent, respectively, of all changesets in set.
1252 1252 """
1253 1253 try:
1254 1254 n = int(n[1])
1255 1255 if n not in (0, 1, 2):
1256 1256 raise ValueError
1257 1257 except (TypeError, ValueError):
1258 1258 raise error.ParseError(_("^ expects a number 0, 1, or 2"))
1259 1259 ps = set()
1260 1260 cl = repo.changelog
1261 1261 for r in getset(repo, baseset(cl), x):
1262 1262 if n == 0:
1263 1263 ps.add(r)
1264 1264 elif n == 1:
1265 1265 ps.add(cl.parentrevs(r)[0])
1266 1266 elif n == 2:
1267 1267 parents = cl.parentrevs(r)
1268 1268 if len(parents) > 1:
1269 1269 ps.add(parents[1])
1270 1270 return subset & ps
1271 1271
1272 1272 def present(repo, subset, x):
1273 1273 """``present(set)``
1274 1274 An empty set, if any revision in set isn't found; otherwise,
1275 1275 all revisions in set.
1276 1276
1277 1277 If any of specified revisions is not present in the local repository,
1278 1278 the query is normally aborted. But this predicate allows the query
1279 1279 to continue even in such cases.
1280 1280 """
1281 1281 try:
1282 1282 return getset(repo, subset, x)
1283 1283 except error.RepoLookupError:
1284 1284 return baseset()
1285 1285
1286 1286 def public(repo, subset, x):
1287 1287 """``public()``
1288 1288 Changeset in public phase."""
1289 1289 # i18n: "public" is a keyword
1290 1290 getargs(x, 0, 0, _("public takes no arguments"))
1291 1291 pc = repo._phasecache
1292 1292 return subset.filter(lambda r: pc.phase(repo, r) == phases.public)
1293 1293
1294 1294 def remote(repo, subset, x):
1295 1295 """``remote([id [,path]])``
1296 1296 Local revision that corresponds to the given identifier in a
1297 1297 remote repository, if present. Here, the '.' identifier is a
1298 1298 synonym for the current local branch.
1299 1299 """
1300 1300
1301 1301 import hg # avoid start-up nasties
1302 1302 # i18n: "remote" is a keyword
1303 1303 l = getargs(x, 0, 2, _("remote takes one, two or no arguments"))
1304 1304
1305 1305 q = '.'
1306 1306 if len(l) > 0:
1307 1307 # i18n: "remote" is a keyword
1308 1308 q = getstring(l[0], _("remote requires a string id"))
1309 1309 if q == '.':
1310 1310 q = repo['.'].branch()
1311 1311
1312 1312 dest = ''
1313 1313 if len(l) > 1:
1314 1314 # i18n: "remote" is a keyword
1315 1315 dest = getstring(l[1], _("remote requires a repository path"))
1316 1316 dest = repo.ui.expandpath(dest or 'default')
1317 1317 dest, branches = hg.parseurl(dest)
1318 1318 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1319 1319 if revs:
1320 1320 revs = [repo.lookup(rev) for rev in revs]
1321 1321 other = hg.peer(repo, {}, dest)
1322 1322 n = other.lookup(q)
1323 1323 if n in repo:
1324 1324 r = repo[n].rev()
1325 1325 if r in subset:
1326 1326 return baseset([r])
1327 1327 return baseset()
1328 1328
1329 1329 def removes(repo, subset, x):
1330 1330 """``removes(pattern)``
1331 1331 Changesets which remove files matching pattern.
1332 1332
1333 1333 The pattern without explicit kind like ``glob:`` is expected to be
1334 1334 relative to the current directory and match against a file or a
1335 1335 directory.
1336 1336 """
1337 1337 # i18n: "removes" is a keyword
1338 1338 pat = getstring(x, _("removes requires a pattern"))
1339 1339 return checkstatus(repo, subset, pat, 2)
1340 1340
1341 1341 def rev(repo, subset, x):
1342 1342 """``rev(number)``
1343 1343 Revision with the given numeric identifier.
1344 1344 """
1345 1345 # i18n: "rev" is a keyword
1346 1346 l = getargs(x, 1, 1, _("rev requires one argument"))
1347 1347 try:
1348 1348 # i18n: "rev" is a keyword
1349 1349 l = int(getstring(l[0], _("rev requires a number")))
1350 1350 except (TypeError, ValueError):
1351 1351 # i18n: "rev" is a keyword
1352 1352 raise error.ParseError(_("rev expects a number"))
1353 1353 return subset & baseset([l])
1354 1354
1355 1355 def matching(repo, subset, x):
1356 1356 """``matching(revision [, field])``
1357 1357 Changesets in which a given set of fields match the set of fields in the
1358 1358 selected revision or set.
1359 1359
1360 1360 To match more than one field pass the list of fields to match separated
1361 1361 by spaces (e.g. ``author description``).
1362 1362
1363 1363 Valid fields are most regular revision fields and some special fields.
1364 1364
1365 1365 Regular revision fields are ``description``, ``author``, ``branch``,
1366 1366 ``date``, ``files``, ``phase``, ``parents``, ``substate``, ``user``
1367 1367 and ``diff``.
1368 1368 Note that ``author`` and ``user`` are synonyms. ``diff`` refers to the
1369 1369 contents of the revision. Two revisions matching their ``diff`` will
1370 1370 also match their ``files``.
1371 1371
1372 1372 Special fields are ``summary`` and ``metadata``:
1373 1373 ``summary`` matches the first line of the description.
1374 1374 ``metadata`` is equivalent to matching ``description user date``
1375 1375 (i.e. it matches the main metadata fields).
1376 1376
1377 1377 ``metadata`` is the default field which is used when no fields are
1378 1378 specified. You can match more than one field at a time.
1379 1379 """
1380 1380 # i18n: "matching" is a keyword
1381 1381 l = getargs(x, 1, 2, _("matching takes 1 or 2 arguments"))
1382 1382
1383 1383 revs = getset(repo, baseset(repo.changelog), l[0])
1384 1384
1385 1385 fieldlist = ['metadata']
1386 1386 if len(l) > 1:
1387 1387 fieldlist = getstring(l[1],
1388 1388 # i18n: "matching" is a keyword
1389 1389 _("matching requires a string "
1390 1390 "as its second argument")).split()
1391 1391
1392 1392 # Make sure that there are no repeated fields,
1393 1393 # expand the 'special' 'metadata' field type
1394 1394 # and check the 'files' whenever we check the 'diff'
1395 1395 fields = []
1396 1396 for field in fieldlist:
1397 1397 if field == 'metadata':
1398 1398 fields += ['user', 'description', 'date']
1399 1399 elif field == 'diff':
1400 1400 # a revision matching the diff must also match the files
1401 1401 # since matching the diff is very costly, make sure to
1402 1402 # also match the files first
1403 1403 fields += ['files', 'diff']
1404 1404 else:
1405 1405 if field == 'author':
1406 1406 field = 'user'
1407 1407 fields.append(field)
1408 1408 fields = set(fields)
1409 1409 if 'summary' in fields and 'description' in fields:
1410 1410 # If a revision matches its description it also matches its summary
1411 1411 fields.discard('summary')
1412 1412
1413 1413 # We may want to match more than one field
1414 1414 # Not all fields take the same amount of time to be matched
1415 1415 # Sort the selected fields in order of increasing matching cost
1416 1416 fieldorder = ['phase', 'parents', 'user', 'date', 'branch', 'summary',
1417 1417 'files', 'description', 'substate', 'diff']
1418 1418 def fieldkeyfunc(f):
1419 1419 try:
1420 1420 return fieldorder.index(f)
1421 1421 except ValueError:
1422 1422 # assume an unknown field is very costly
1423 1423 return len(fieldorder)
1424 1424 fields = list(fields)
1425 1425 fields.sort(key=fieldkeyfunc)
1426 1426
1427 1427 # Each field will be matched with its own "getfield" function
1428 1428 # which will be added to the getfieldfuncs array of functions
1429 1429 getfieldfuncs = []
1430 1430 _funcs = {
1431 1431 'user': lambda r: repo[r].user(),
1432 1432 'branch': lambda r: repo[r].branch(),
1433 1433 'date': lambda r: repo[r].date(),
1434 1434 'description': lambda r: repo[r].description(),
1435 1435 'files': lambda r: repo[r].files(),
1436 1436 'parents': lambda r: repo[r].parents(),
1437 1437 'phase': lambda r: repo[r].phase(),
1438 1438 'substate': lambda r: repo[r].substate,
1439 1439 'summary': lambda r: repo[r].description().splitlines()[0],
1440 1440 'diff': lambda r: list(repo[r].diff(git=True),)
1441 1441 }
1442 1442 for info in fields:
1443 1443 getfield = _funcs.get(info, None)
1444 1444 if getfield is None:
1445 1445 raise error.ParseError(
1446 1446 # i18n: "matching" is a keyword
1447 1447 _("unexpected field name passed to matching: %s") % info)
1448 1448 getfieldfuncs.append(getfield)
1449 1449 # convert the getfield array of functions into a "getinfo" function
1450 1450 # which returns an array of field values (or a single value if there
1451 1451 # is only one field to match)
1452 1452 getinfo = lambda r: [f(r) for f in getfieldfuncs]
1453 1453
1454 1454 def matches(x):
1455 1455 for rev in revs:
1456 1456 target = getinfo(rev)
1457 1457 match = True
1458 1458 for n, f in enumerate(getfieldfuncs):
1459 1459 if target[n] != f(x):
1460 1460 match = False
1461 1461 if match:
1462 1462 return True
1463 1463 return False
1464 1464
1465 1465 return subset.filter(matches)
1466 1466
1467 1467 def reverse(repo, subset, x):
1468 1468 """``reverse(set)``
1469 1469 Reverse order of set.
1470 1470 """
1471 1471 l = getset(repo, subset, x)
1472 1472 l.reverse()
1473 1473 return l
1474 1474
1475 1475 def roots(repo, subset, x):
1476 1476 """``roots(set)``
1477 1477 Changesets in set with no parent changeset in set.
1478 1478 """
1479 1479 s = getset(repo, spanset(repo), x).set()
1480 1480 subset = baseset([r for r in s if r in subset.set()])
1481 1481 cs = _children(repo, subset, s)
1482 1482 return subset - cs
1483 1483
1484 1484 def secret(repo, subset, x):
1485 1485 """``secret()``
1486 1486 Changeset in secret phase."""
1487 1487 # i18n: "secret" is a keyword
1488 1488 getargs(x, 0, 0, _("secret takes no arguments"))
1489 1489 pc = repo._phasecache
1490 1490 return subset.filter(lambda x: pc.phase(repo, x) == phases.secret)
1491 1491
1492 1492 def sort(repo, subset, x):
1493 1493 """``sort(set[, [-]key...])``
1494 1494 Sort set by keys. The default sort order is ascending, specify a key
1495 1495 as ``-key`` to sort in descending order.
1496 1496
1497 1497 The keys can be:
1498 1498
1499 1499 - ``rev`` for the revision number,
1500 1500 - ``branch`` for the branch name,
1501 1501 - ``desc`` for the commit message (description),
1502 1502 - ``user`` for user name (``author`` can be used as an alias),
1503 1503 - ``date`` for the commit date
1504 1504 """
1505 1505 # i18n: "sort" is a keyword
1506 1506 l = getargs(x, 1, 2, _("sort requires one or two arguments"))
1507 1507 keys = "rev"
1508 1508 if len(l) == 2:
1509 1509 # i18n: "sort" is a keyword
1510 1510 keys = getstring(l[1], _("sort spec must be a string"))
1511 1511
1512 1512 s = l[0]
1513 1513 keys = keys.split()
1514 1514 l = []
1515 1515 def invert(s):
1516 1516 return "".join(chr(255 - ord(c)) for c in s)
1517 1517 revs = getset(repo, subset, s)
1518 1518 if keys == ["rev"]:
1519 1519 revs.sort()
1520 1520 return revs
1521 1521 elif keys == ["-rev"]:
1522 1522 revs.sort(reverse=True)
1523 1523 return revs
1524 1524 for r in revs:
1525 1525 c = repo[r]
1526 1526 e = []
1527 1527 for k in keys:
1528 1528 if k == 'rev':
1529 1529 e.append(r)
1530 1530 elif k == '-rev':
1531 1531 e.append(-r)
1532 1532 elif k == 'branch':
1533 1533 e.append(c.branch())
1534 1534 elif k == '-branch':
1535 1535 e.append(invert(c.branch()))
1536 1536 elif k == 'desc':
1537 1537 e.append(c.description())
1538 1538 elif k == '-desc':
1539 1539 e.append(invert(c.description()))
1540 1540 elif k in 'user author':
1541 1541 e.append(c.user())
1542 1542 elif k in '-user -author':
1543 1543 e.append(invert(c.user()))
1544 1544 elif k == 'date':
1545 1545 e.append(c.date()[0])
1546 1546 elif k == '-date':
1547 1547 e.append(-c.date()[0])
1548 1548 else:
1549 1549 raise error.ParseError(_("unknown sort key %r") % k)
1550 1550 e.append(r)
1551 1551 l.append(e)
1552 1552 l.sort()
1553 1553 return baseset([e[-1] for e in l])
1554 1554
1555 1555 def _stringmatcher(pattern):
1556 1556 """
1557 1557 accepts a string, possibly starting with 're:' or 'literal:' prefix.
1558 1558 returns the matcher name, pattern, and matcher function.
1559 1559 missing or unknown prefixes are treated as literal matches.
1560 1560
1561 1561 helper for tests:
1562 1562 >>> def test(pattern, *tests):
1563 1563 ... kind, pattern, matcher = _stringmatcher(pattern)
1564 1564 ... return (kind, pattern, [bool(matcher(t)) for t in tests])
1565 1565
1566 1566 exact matching (no prefix):
1567 1567 >>> test('abcdefg', 'abc', 'def', 'abcdefg')
1568 1568 ('literal', 'abcdefg', [False, False, True])
1569 1569
1570 1570 regex matching ('re:' prefix)
1571 1571 >>> test('re:a.+b', 'nomatch', 'fooadef', 'fooadefbar')
1572 1572 ('re', 'a.+b', [False, False, True])
1573 1573
1574 1574 force exact matches ('literal:' prefix)
1575 1575 >>> test('literal:re:foobar', 'foobar', 're:foobar')
1576 1576 ('literal', 're:foobar', [False, True])
1577 1577
1578 1578 unknown prefixes are ignored and treated as literals
1579 1579 >>> test('foo:bar', 'foo', 'bar', 'foo:bar')
1580 1580 ('literal', 'foo:bar', [False, False, True])
1581 1581 """
1582 1582 if pattern.startswith('re:'):
1583 1583 pattern = pattern[3:]
1584 1584 try:
1585 1585 regex = re.compile(pattern)
1586 1586 except re.error, e:
1587 1587 raise error.ParseError(_('invalid regular expression: %s')
1588 1588 % e)
1589 1589 return 're', pattern, regex.search
1590 1590 elif pattern.startswith('literal:'):
1591 1591 pattern = pattern[8:]
1592 1592 return 'literal', pattern, pattern.__eq__
1593 1593
1594 1594 def _substringmatcher(pattern):
1595 1595 kind, pattern, matcher = _stringmatcher(pattern)
1596 1596 if kind == 'literal':
1597 1597 matcher = lambda s: pattern in s
1598 1598 return kind, pattern, matcher
1599 1599
1600 1600 def tag(repo, subset, x):
1601 1601 """``tag([name])``
1602 1602 The specified tag by name, or all tagged revisions if no name is given.
1603 1603
1604 1604 If `name` starts with `re:`, the remainder of the name is treated as
1605 1605 a regular expression. To match a tag that actually starts with `re:`,
1606 1606 use the prefix `literal:`.
1607 1607 """
1608 1608 # i18n: "tag" is a keyword
1609 1609 args = getargs(x, 0, 1, _("tag takes one or no arguments"))
1610 1610 cl = repo.changelog
1611 1611 if args:
1612 1612 pattern = getstring(args[0],
1613 1613 # i18n: "tag" is a keyword
1614 1614 _('the argument to tag must be a string'))
1615 1615 kind, pattern, matcher = _stringmatcher(pattern)
1616 1616 if kind == 'literal':
1617 1617 # avoid resolving all tags
1618 1618 tn = repo._tagscache.tags.get(pattern, None)
1619 1619 if tn is None:
1620 1620 raise util.Abort(_("tag '%s' does not exist") % pattern)
1621 1621 s = set([repo[tn].rev()])
1622 1622 else:
1623 1623 s = set([cl.rev(n) for t, n in repo.tagslist() if matcher(t)])
1624 1624 else:
1625 1625 s = set([cl.rev(n) for t, n in repo.tagslist() if t != 'tip'])
1626 1626 return subset & s
1627 1627
1628 1628 def tagged(repo, subset, x):
1629 1629 return tag(repo, subset, x)
1630 1630
1631 1631 def unstable(repo, subset, x):
1632 1632 """``unstable()``
1633 1633 Non-obsolete changesets with obsolete ancestors.
1634 1634 """
1635 1635 # i18n: "unstable" is a keyword
1636 1636 getargs(x, 0, 0, _("unstable takes no arguments"))
1637 1637 unstables = obsmod.getrevs(repo, 'unstable')
1638 1638 return subset & unstables
1639 1639
1640 1640
1641 1641 def user(repo, subset, x):
1642 1642 """``user(string)``
1643 1643 User name contains string. The match is case-insensitive.
1644 1644
1645 1645 If `string` starts with `re:`, the remainder of the string is treated as
1646 1646 a regular expression. To match a user that actually contains `re:`, use
1647 1647 the prefix `literal:`.
1648 1648 """
1649 1649 return author(repo, subset, x)
1650 1650
1651 1651 # for internal use
1652 1652 def _list(repo, subset, x):
1653 1653 s = getstring(x, "internal error")
1654 1654 if not s:
1655 1655 return baseset()
1656 1656 ls = [repo[r].rev() for r in s.split('\0')]
1657 1657 s = subset.set()
1658 1658 return baseset([r for r in ls if r in s])
1659 1659
1660 1660 # for internal use
1661 1661 def _intlist(repo, subset, x):
1662 1662 s = getstring(x, "internal error")
1663 1663 if not s:
1664 1664 return baseset()
1665 1665 ls = [int(r) for r in s.split('\0')]
1666 1666 s = subset.set()
1667 1667 return baseset([r for r in ls if r in s])
1668 1668
1669 1669 # for internal use
1670 1670 def _hexlist(repo, subset, x):
1671 1671 s = getstring(x, "internal error")
1672 1672 if not s:
1673 1673 return baseset()
1674 1674 cl = repo.changelog
1675 1675 ls = [cl.rev(node.bin(r)) for r in s.split('\0')]
1676 1676 s = subset.set()
1677 1677 return baseset([r for r in ls if r in s])
1678 1678
1679 1679 symbols = {
1680 1680 "adds": adds,
1681 1681 "all": getall,
1682 1682 "ancestor": ancestor,
1683 1683 "ancestors": ancestors,
1684 1684 "_firstancestors": _firstancestors,
1685 1685 "author": author,
1686 1686 "only": only,
1687 1687 "bisect": bisect,
1688 1688 "bisected": bisected,
1689 1689 "bookmark": bookmark,
1690 1690 "branch": branch,
1691 1691 "branchpoint": branchpoint,
1692 1692 "bumped": bumped,
1693 1693 "bundle": bundle,
1694 1694 "children": children,
1695 1695 "closed": closed,
1696 1696 "contains": contains,
1697 1697 "converted": converted,
1698 1698 "date": date,
1699 1699 "desc": desc,
1700 1700 "descendants": descendants,
1701 1701 "_firstdescendants": _firstdescendants,
1702 1702 "destination": destination,
1703 1703 "divergent": divergent,
1704 1704 "draft": draft,
1705 1705 "extinct": extinct,
1706 1706 "extra": extra,
1707 1707 "file": hasfile,
1708 1708 "filelog": filelog,
1709 1709 "first": first,
1710 1710 "follow": follow,
1711 1711 "_followfirst": _followfirst,
1712 1712 "grep": grep,
1713 1713 "head": head,
1714 1714 "heads": heads,
1715 1715 "hidden": hidden,
1716 1716 "id": node_,
1717 1717 "keyword": keyword,
1718 1718 "last": last,
1719 1719 "limit": limit,
1720 1720 "_matchfiles": _matchfiles,
1721 1721 "max": maxrev,
1722 1722 "merge": merge,
1723 1723 "min": minrev,
1724 1724 "modifies": modifies,
1725 1725 "obsolete": obsolete,
1726 1726 "origin": origin,
1727 1727 "outgoing": outgoing,
1728 1728 "p1": p1,
1729 1729 "p2": p2,
1730 1730 "parents": parents,
1731 1731 "present": present,
1732 1732 "public": public,
1733 1733 "remote": remote,
1734 1734 "removes": removes,
1735 1735 "rev": rev,
1736 1736 "reverse": reverse,
1737 1737 "roots": roots,
1738 1738 "sort": sort,
1739 1739 "secret": secret,
1740 1740 "matching": matching,
1741 1741 "tag": tag,
1742 1742 "tagged": tagged,
1743 1743 "user": user,
1744 1744 "unstable": unstable,
1745 1745 "_list": _list,
1746 1746 "_intlist": _intlist,
1747 1747 "_hexlist": _hexlist,
1748 1748 }
1749 1749
1750 1750 # symbols which can't be used for a DoS attack for any given input
1751 1751 # (e.g. those which accept regexes as plain strings shouldn't be included)
1752 1752 # functions that just return a lot of changesets (like all) don't count here
1753 1753 safesymbols = set([
1754 1754 "adds",
1755 1755 "all",
1756 1756 "ancestor",
1757 1757 "ancestors",
1758 1758 "_firstancestors",
1759 1759 "author",
1760 1760 "bisect",
1761 1761 "bisected",
1762 1762 "bookmark",
1763 1763 "branch",
1764 1764 "branchpoint",
1765 1765 "bumped",
1766 1766 "bundle",
1767 1767 "children",
1768 1768 "closed",
1769 1769 "converted",
1770 1770 "date",
1771 1771 "desc",
1772 1772 "descendants",
1773 1773 "_firstdescendants",
1774 1774 "destination",
1775 1775 "divergent",
1776 1776 "draft",
1777 1777 "extinct",
1778 1778 "extra",
1779 1779 "file",
1780 1780 "filelog",
1781 1781 "first",
1782 1782 "follow",
1783 1783 "_followfirst",
1784 1784 "head",
1785 1785 "heads",
1786 1786 "hidden",
1787 1787 "id",
1788 1788 "keyword",
1789 1789 "last",
1790 1790 "limit",
1791 1791 "_matchfiles",
1792 1792 "max",
1793 1793 "merge",
1794 1794 "min",
1795 1795 "modifies",
1796 1796 "obsolete",
1797 1797 "origin",
1798 1798 "outgoing",
1799 1799 "p1",
1800 1800 "p2",
1801 1801 "parents",
1802 1802 "present",
1803 1803 "public",
1804 1804 "remote",
1805 1805 "removes",
1806 1806 "rev",
1807 1807 "reverse",
1808 1808 "roots",
1809 1809 "sort",
1810 1810 "secret",
1811 1811 "matching",
1812 1812 "tag",
1813 1813 "tagged",
1814 1814 "user",
1815 1815 "unstable",
1816 1816 "_list",
1817 1817 "_intlist",
1818 1818 "_hexlist",
1819 1819 ])
1820 1820
1821 1821 methods = {
1822 1822 "range": rangeset,
1823 1823 "dagrange": dagrange,
1824 1824 "string": stringset,
1825 1825 "symbol": symbolset,
1826 1826 "and": andset,
1827 1827 "or": orset,
1828 1828 "not": notset,
1829 1829 "list": listset,
1830 1830 "func": func,
1831 1831 "ancestor": ancestorspec,
1832 1832 "parent": parentspec,
1833 1833 "parentpost": p1,
1834 1834 }
1835 1835
1836 1836 def optimize(x, small):
1837 1837 if x is None:
1838 1838 return 0, x
1839 1839
1840 1840 smallbonus = 1
1841 1841 if small:
1842 1842 smallbonus = .5
1843 1843
1844 1844 op = x[0]
1845 1845 if op == 'minus':
1846 1846 return optimize(('and', x[1], ('not', x[2])), small)
1847 1847 elif op == 'dagrangepre':
1848 1848 return optimize(('func', ('symbol', 'ancestors'), x[1]), small)
1849 1849 elif op == 'dagrangepost':
1850 1850 return optimize(('func', ('symbol', 'descendants'), x[1]), small)
1851 1851 elif op == 'rangepre':
1852 1852 return optimize(('range', ('string', '0'), x[1]), small)
1853 1853 elif op == 'rangepost':
1854 1854 return optimize(('range', x[1], ('string', 'tip')), small)
1855 1855 elif op == 'negate':
1856 1856 return optimize(('string',
1857 1857 '-' + getstring(x[1], _("can't negate that"))), small)
1858 1858 elif op in 'string symbol negate':
1859 1859 return smallbonus, x # single revisions are small
1860 1860 elif op == 'and':
1861 1861 wa, ta = optimize(x[1], True)
1862 1862 wb, tb = optimize(x[2], True)
1863 1863
1864 1864 # (::x and not ::y)/(not ::y and ::x) have a fast path
1865 1865 def isonly(revs, bases):
1866 1866 return (
1867 1867 revs[0] == 'func'
1868 1868 and getstring(revs[1], _('not a symbol')) == 'ancestors'
1869 1869 and bases[0] == 'not'
1870 1870 and bases[1][0] == 'func'
1871 1871 and getstring(bases[1][1], _('not a symbol')) == 'ancestors')
1872 1872
1873 1873 w = min(wa, wb)
1874 1874 if isonly(ta, tb):
1875 1875 return w, ('func', ('symbol', 'only'), ('list', ta[2], tb[1][2]))
1876 1876 if isonly(tb, ta):
1877 1877 return w, ('func', ('symbol', 'only'), ('list', tb[2], ta[1][2]))
1878 1878
1879 1879 if wa > wb:
1880 1880 return w, (op, tb, ta)
1881 1881 return w, (op, ta, tb)
1882 1882 elif op == 'or':
1883 1883 wa, ta = optimize(x[1], False)
1884 1884 wb, tb = optimize(x[2], False)
1885 1885 if wb < wa:
1886 1886 wb, wa = wa, wb
1887 1887 return max(wa, wb), (op, ta, tb)
1888 1888 elif op == 'not':
1889 1889 o = optimize(x[1], not small)
1890 1890 return o[0], (op, o[1])
1891 1891 elif op == 'parentpost':
1892 1892 o = optimize(x[1], small)
1893 1893 return o[0], (op, o[1])
1894 1894 elif op == 'group':
1895 1895 return optimize(x[1], small)
1896 1896 elif op in 'dagrange range list parent ancestorspec':
1897 1897 if op == 'parent':
1898 1898 # x^:y means (x^) : y, not x ^ (:y)
1899 1899 post = ('parentpost', x[1])
1900 1900 if x[2][0] == 'dagrangepre':
1901 1901 return optimize(('dagrange', post, x[2][1]), small)
1902 1902 elif x[2][0] == 'rangepre':
1903 1903 return optimize(('range', post, x[2][1]), small)
1904 1904
1905 1905 wa, ta = optimize(x[1], small)
1906 1906 wb, tb = optimize(x[2], small)
1907 1907 return wa + wb, (op, ta, tb)
1908 1908 elif op == 'func':
1909 1909 f = getstring(x[1], _("not a symbol"))
1910 1910 wa, ta = optimize(x[2], small)
1911 1911 if f in ("author branch closed date desc file grep keyword "
1912 1912 "outgoing user"):
1913 1913 w = 10 # slow
1914 1914 elif f in "modifies adds removes":
1915 1915 w = 30 # slower
1916 1916 elif f == "contains":
1917 1917 w = 100 # very slow
1918 1918 elif f == "ancestor":
1919 1919 w = 1 * smallbonus
1920 1920 elif f in "reverse limit first _intlist":
1921 1921 w = 0
1922 1922 elif f in "sort":
1923 1923 w = 10 # assume most sorts look at changelog
1924 1924 else:
1925 1925 w = 1
1926 1926 return w + wa, (op, x[1], ta)
1927 1927 return 1, x
1928 1928
1929 1929 _aliasarg = ('func', ('symbol', '_aliasarg'))
1930 1930 def _getaliasarg(tree):
1931 1931 """If tree matches ('func', ('symbol', '_aliasarg'), ('string', X))
1932 1932 return X, None otherwise.
1933 1933 """
1934 1934 if (len(tree) == 3 and tree[:2] == _aliasarg
1935 1935 and tree[2][0] == 'string'):
1936 1936 return tree[2][1]
1937 1937 return None
1938 1938
1939 1939 def _checkaliasarg(tree, known=None):
1940 1940 """Check tree contains no _aliasarg construct or only ones which
1941 1941 value is in known. Used to avoid alias placeholders injection.
1942 1942 """
1943 1943 if isinstance(tree, tuple):
1944 1944 arg = _getaliasarg(tree)
1945 1945 if arg is not None and (not known or arg not in known):
1946 1946 raise error.ParseError(_("not a function: %s") % '_aliasarg')
1947 1947 for t in tree:
1948 1948 _checkaliasarg(t, known)
1949 1949
1950 1950 class revsetalias(object):
1951 1951 funcre = re.compile('^([^(]+)\(([^)]+)\)$')
1952 1952 args = None
1953 1953
1954 1954 def __init__(self, name, value):
1955 1955 '''Aliases like:
1956 1956
1957 1957 h = heads(default)
1958 1958 b($1) = ancestors($1) - ancestors(default)
1959 1959 '''
1960 1960 m = self.funcre.search(name)
1961 1961 if m:
1962 1962 self.name = m.group(1)
1963 1963 self.tree = ('func', ('symbol', m.group(1)))
1964 1964 self.args = [x.strip() for x in m.group(2).split(',')]
1965 1965 for arg in self.args:
1966 1966 # _aliasarg() is an unknown symbol only used separate
1967 1967 # alias argument placeholders from regular strings.
1968 1968 value = value.replace(arg, '_aliasarg(%r)' % (arg,))
1969 1969 else:
1970 1970 self.name = name
1971 1971 self.tree = ('symbol', name)
1972 1972
1973 1973 self.replacement, pos = parse(value)
1974 1974 if pos != len(value):
1975 1975 raise error.ParseError(_('invalid token'), pos)
1976 1976 # Check for placeholder injection
1977 1977 _checkaliasarg(self.replacement, self.args)
1978 1978
1979 1979 def _getalias(aliases, tree):
1980 1980 """If tree looks like an unexpanded alias, return it. Return None
1981 1981 otherwise.
1982 1982 """
1983 1983 if isinstance(tree, tuple) and tree:
1984 1984 if tree[0] == 'symbol' and len(tree) == 2:
1985 1985 name = tree[1]
1986 1986 alias = aliases.get(name)
1987 1987 if alias and alias.args is None and alias.tree == tree:
1988 1988 return alias
1989 1989 if tree[0] == 'func' and len(tree) > 1:
1990 1990 if tree[1][0] == 'symbol' and len(tree[1]) == 2:
1991 1991 name = tree[1][1]
1992 1992 alias = aliases.get(name)
1993 1993 if alias and alias.args is not None and alias.tree == tree[:2]:
1994 1994 return alias
1995 1995 return None
1996 1996
1997 1997 def _expandargs(tree, args):
1998 1998 """Replace _aliasarg instances with the substitution value of the
1999 1999 same name in args, recursively.
2000 2000 """
2001 2001 if not tree or not isinstance(tree, tuple):
2002 2002 return tree
2003 2003 arg = _getaliasarg(tree)
2004 2004 if arg is not None:
2005 2005 return args[arg]
2006 2006 return tuple(_expandargs(t, args) for t in tree)
2007 2007
2008 2008 def _expandaliases(aliases, tree, expanding, cache):
2009 2009 """Expand aliases in tree, recursively.
2010 2010
2011 2011 'aliases' is a dictionary mapping user defined aliases to
2012 2012 revsetalias objects.
2013 2013 """
2014 2014 if not isinstance(tree, tuple):
2015 2015 # Do not expand raw strings
2016 2016 return tree
2017 2017 alias = _getalias(aliases, tree)
2018 2018 if alias is not None:
2019 2019 if alias in expanding:
2020 2020 raise error.ParseError(_('infinite expansion of revset alias "%s" '
2021 2021 'detected') % alias.name)
2022 2022 expanding.append(alias)
2023 2023 if alias.name not in cache:
2024 2024 cache[alias.name] = _expandaliases(aliases, alias.replacement,
2025 2025 expanding, cache)
2026 2026 result = cache[alias.name]
2027 2027 expanding.pop()
2028 2028 if alias.args is not None:
2029 2029 l = getlist(tree[2])
2030 2030 if len(l) != len(alias.args):
2031 2031 raise error.ParseError(
2032 2032 _('invalid number of arguments: %s') % len(l))
2033 2033 l = [_expandaliases(aliases, a, [], cache) for a in l]
2034 2034 result = _expandargs(result, dict(zip(alias.args, l)))
2035 2035 else:
2036 2036 result = tuple(_expandaliases(aliases, t, expanding, cache)
2037 2037 for t in tree)
2038 2038 return result
2039 2039
2040 2040 def findaliases(ui, tree):
2041 2041 _checkaliasarg(tree)
2042 2042 aliases = {}
2043 2043 for k, v in ui.configitems('revsetalias'):
2044 2044 alias = revsetalias(k, v)
2045 2045 aliases[alias.name] = alias
2046 2046 return _expandaliases(aliases, tree, [], {})
2047 2047
2048 2048 def parse(spec, lookup=None):
2049 2049 p = parser.parser(tokenize, elements)
2050 2050 return p.parse(spec, lookup=lookup)
2051 2051
2052 2052 def match(ui, spec, repo=None):
2053 2053 if not spec:
2054 2054 raise error.ParseError(_("empty query"))
2055 2055 lookup = None
2056 2056 if repo:
2057 2057 lookup = repo.__contains__
2058 2058 tree, pos = parse(spec, lookup)
2059 2059 if (pos != len(spec)):
2060 2060 raise error.ParseError(_("invalid token"), pos)
2061 2061 if ui:
2062 2062 tree = findaliases(ui, tree)
2063 2063 weight, tree = optimize(tree, True)
2064 2064 def mfunc(repo, subset):
2065 2065 if util.safehasattr(subset, 'set'):
2066 2066 result = getset(repo, subset, tree)
2067 2067 else:
2068 2068 result = getset(repo, baseset(subset), tree)
2069 2069 return result
2070 2070 return mfunc
2071 2071
2072 2072 def formatspec(expr, *args):
2073 2073 '''
2074 2074 This is a convenience function for using revsets internally, and
2075 2075 escapes arguments appropriately. Aliases are intentionally ignored
2076 2076 so that intended expression behavior isn't accidentally subverted.
2077 2077
2078 2078 Supported arguments:
2079 2079
2080 2080 %r = revset expression, parenthesized
2081 2081 %d = int(arg), no quoting
2082 2082 %s = string(arg), escaped and single-quoted
2083 2083 %b = arg.branch(), escaped and single-quoted
2084 2084 %n = hex(arg), single-quoted
2085 2085 %% = a literal '%'
2086 2086
2087 2087 Prefixing the type with 'l' specifies a parenthesized list of that type.
2088 2088
2089 2089 >>> formatspec('%r:: and %lr', '10 or 11', ("this()", "that()"))
2090 2090 '(10 or 11):: and ((this()) or (that()))'
2091 2091 >>> formatspec('%d:: and not %d::', 10, 20)
2092 2092 '10:: and not 20::'
2093 2093 >>> formatspec('%ld or %ld', [], [1])
2094 2094 "_list('') or 1"
2095 2095 >>> formatspec('keyword(%s)', 'foo\\xe9')
2096 2096 "keyword('foo\\\\xe9')"
2097 2097 >>> b = lambda: 'default'
2098 2098 >>> b.branch = b
2099 2099 >>> formatspec('branch(%b)', b)
2100 2100 "branch('default')"
2101 2101 >>> formatspec('root(%ls)', ['a', 'b', 'c', 'd'])
2102 2102 "root(_list('a\\x00b\\x00c\\x00d'))"
2103 2103 '''
2104 2104
2105 2105 def quote(s):
2106 2106 return repr(str(s))
2107 2107
2108 2108 def argtype(c, arg):
2109 2109 if c == 'd':
2110 2110 return str(int(arg))
2111 2111 elif c == 's':
2112 2112 return quote(arg)
2113 2113 elif c == 'r':
2114 2114 parse(arg) # make sure syntax errors are confined
2115 2115 return '(%s)' % arg
2116 2116 elif c == 'n':
2117 2117 return quote(node.hex(arg))
2118 2118 elif c == 'b':
2119 2119 return quote(arg.branch())
2120 2120
2121 2121 def listexp(s, t):
2122 2122 l = len(s)
2123 2123 if l == 0:
2124 2124 return "_list('')"
2125 2125 elif l == 1:
2126 2126 return argtype(t, s[0])
2127 2127 elif t == 'd':
2128 2128 return "_intlist('%s')" % "\0".join(str(int(a)) for a in s)
2129 2129 elif t == 's':
2130 2130 return "_list('%s')" % "\0".join(s)
2131 2131 elif t == 'n':
2132 2132 return "_hexlist('%s')" % "\0".join(node.hex(a) for a in s)
2133 2133 elif t == 'b':
2134 2134 return "_list('%s')" % "\0".join(a.branch() for a in s)
2135 2135
2136 2136 m = l // 2
2137 2137 return '(%s or %s)' % (listexp(s[:m], t), listexp(s[m:], t))
2138 2138
2139 2139 ret = ''
2140 2140 pos = 0
2141 2141 arg = 0
2142 2142 while pos < len(expr):
2143 2143 c = expr[pos]
2144 2144 if c == '%':
2145 2145 pos += 1
2146 2146 d = expr[pos]
2147 2147 if d == '%':
2148 2148 ret += d
2149 2149 elif d in 'dsnbr':
2150 2150 ret += argtype(d, args[arg])
2151 2151 arg += 1
2152 2152 elif d == 'l':
2153 2153 # a list of some type
2154 2154 pos += 1
2155 2155 d = expr[pos]
2156 2156 ret += listexp(list(args[arg]), d)
2157 2157 arg += 1
2158 2158 else:
2159 2159 raise util.Abort('unexpected revspec format character %s' % d)
2160 2160 else:
2161 2161 ret += c
2162 2162 pos += 1
2163 2163
2164 2164 return ret
2165 2165
2166 2166 def prettyformat(tree):
2167 2167 def _prettyformat(tree, level, lines):
2168 2168 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2169 2169 lines.append((level, str(tree)))
2170 2170 else:
2171 2171 lines.append((level, '(%s' % tree[0]))
2172 2172 for s in tree[1:]:
2173 2173 _prettyformat(s, level + 1, lines)
2174 2174 lines[-1:] = [(lines[-1][0], lines[-1][1] + ')')]
2175 2175
2176 2176 lines = []
2177 2177 _prettyformat(tree, 0, lines)
2178 2178 output = '\n'.join((' '*l + s) for l, s in lines)
2179 2179 return output
2180 2180
2181 2181 def depth(tree):
2182 2182 if isinstance(tree, tuple):
2183 2183 return max(map(depth, tree)) + 1
2184 2184 else:
2185 2185 return 0
2186 2186
2187 2187 def funcsused(tree):
2188 2188 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2189 2189 return set()
2190 2190 else:
2191 2191 funcs = set()
2192 2192 for s in tree[1:]:
2193 2193 funcs |= funcsused(s)
2194 2194 if tree[0] == 'func':
2195 2195 funcs.add(tree[1][1])
2196 2196 return funcs
2197 2197
2198 2198 class abstractsmartset(object):
2199 2199
2200 2200 def __nonzero__(self):
2201 2201 """True if the smartset is not empty"""
2202 2202 raise NotImplementedError()
2203 2203
2204 2204 def __contains__(self, rev):
2205 2205 """provide fast membership testing"""
2206 2206 raise NotImplementedError()
2207 2207
2208 2208 def __set__(self):
2209 2209 """Returns a set or a smartset containing all the elements.
2210 2210
2211 2211 The returned structure should be the fastest option for membership
2212 2212 testing.
2213 2213
2214 2214 This is part of the mandatory API for smartset."""
2215 2215 raise NotImplementedError()
2216 2216
2217 2217 def __iter__(self):
2218 2218 """iterate the set in the order it is supposed to be iterated"""
2219 2219 raise NotImplementedError()
2220 2220
2221 2221 # Attributes containing a function to perform a fast iteration in a given
2222 2222 # direction. A smartset can have none, one, or both defined.
2223 2223 #
2224 2224 # Default value is None instead of a function returning None to avoid
2225 2225 # initializing an iterator just for testing if a fast method exists.
2226 2226 fastasc = None
2227 2227 fastdesc = None
2228 2228
2229 2229 def isascending(self):
2230 2230 """True if the set will iterate in ascending order"""
2231 2231 raise NotImplementedError()
2232 2232
2233 2233 def ascending(self):
2234 2234 """Sorts the set in ascending order (in place).
2235 2235
2236 2236 This is part of the mandatory API for smartset."""
2237 2237 self.sort()
2238 2238
2239 2239 def isdescending(self):
2240 2240 """True if the set will iterate in descending order"""
2241 2241 raise NotImplementedError()
2242 2242
2243 2243 def descending(self):
2244 2244 """Sorts the set in descending order (in place).
2245 2245
2246 2246 This is part of the mandatory API for smartset."""
2247 2247 self.sort(reverse=True)
2248 2248
2249 2249 def min(self):
2250 2250 """return the minimum element in the set"""
2251 2251 if self.fastasc is not None:
2252 2252 for r in self.fastasc():
2253 2253 return r
2254 2254 raise ValueError('arg is an empty sequence')
2255 2255 return min(self)
2256 2256
2257 2257 def max(self):
2258 2258 """return the maximum element in the set"""
2259 2259 if self.fastdesc is not None:
2260 2260 for r in self.fastdesc():
2261 2261 return r
2262 2262 raise ValueError('arg is an empty sequence')
2263 2263 return max(self)
2264 2264
2265 2265 def first(self):
2266 2266 """return the first element in the set (user iteration perspective)
2267 2267
2268 2268 Return None if the set is empty"""
2269 2269 raise NotImplementedError()
2270 2270
2271 2271 def last(self):
2272 2272 """return the last element in the set (user iteration perspective)
2273 2273
2274 2274 Return None if the set is empty"""
2275 2275 raise NotImplementedError()
2276 2276
2277 2277 def reverse(self):
2278 2278 """reverse the expected iteration order"""
2279 2279 raise NotImplementedError()
2280 2280
2281 2281 def sort(self, reverse=True):
2282 2282 """get the set to iterate in an ascending or descending order"""
2283 2283 raise NotImplementedError()
2284 2284
2285 2285 def __and__(self, other):
2286 2286 """Returns a new object with the intersection of the two collections.
2287 2287
2288 2288 This is part of the mandatory API for smartset."""
2289 2289 return self.filter(other.__contains__)
2290 2290
2291 2291 def __add__(self, other):
2292 2292 """Returns a new object with the union of the two collections.
2293 2293
2294 2294 This is part of the mandatory API for smartset."""
2295 2295 kwargs = {}
2296 2296 if self.isascending() and other.isascending():
2297 2297 kwargs['ascending'] = True
2298 2298 if self.isdescending() and other.isdescending():
2299 2299 kwargs['ascending'] = False
2300 2300 return addset(self, other, **kwargs)
2301 2301
2302 2302 def __sub__(self, other):
2303 2303 """Returns a new object with the substraction of the two collections.
2304 2304
2305 2305 This is part of the mandatory API for smartset."""
2306 2306 c = other.__contains__
2307 2307 return self.filter(lambda r: not c(r))
2308 2308
2309 2309 def filter(self, condition):
2310 2310 """Returns this smartset filtered by condition as a new smartset.
2311 2311
2312 2312 `condition` is a callable which takes a revision number and returns a
2313 2313 boolean.
2314 2314
2315 2315 This is part of the mandatory API for smartset."""
2316 2316 kwargs = {}
2317 2317 if self.isascending():
2318 2318 kwargs['ascending'] = True
2319 2319 elif self.isdescending():
2320 2320 kwargs['ascending'] = False
2321 2321 return filteredset(self, condition, **kwargs)
2322 2322
2323 2323 class baseset(abstractsmartset):
2324 2324 """Basic data structure that represents a revset and contains the basic
2325 2325 operation that it should be able to perform.
2326 2326
2327 2327 Every method in this class should be implemented by any smartset class.
2328 2328 """
2329 2329 def __init__(self, data=()):
2330 2330 if not isinstance(data, list):
2331 2331 data = list(data)
2332 2332 self._list = data
2333 2333 self._set = None
2334 2334 self._ascending = None
2335 2335
2336 2336 @util.propertycache
2337 2337 def _asclist(self):
2338 2338 asclist = self._list[:]
2339 2339 asclist.sort()
2340 2340 return asclist
2341 2341
2342 2342 def __iter__(self):
2343 2343 if self._ascending is None:
2344 2344 return iter(self._list)
2345 2345 elif self._ascending:
2346 2346 return iter(self._asclist)
2347 2347 else:
2348 2348 return reversed(self._asclist)
2349 2349
2350 2350 def fastasc(self):
2351 2351 return iter(self._asclist)
2352 2352
2353 2353 def fastdesc(self):
2354 2354 return reversed(self._asclist)
2355 2355
2356 2356 def set(self):
2357 2357 """Returns a set or a smartset containing all the elements.
2358 2358
2359 2359 The returned structure should be the fastest option for membership
2360 2360 testing.
2361 2361
2362 2362 This is part of the mandatory API for smartset."""
2363 2363 if not self._set:
2364 2364 self._set = set(self)
2365 2365 return self._set
2366 2366
2367 2367 @util.propertycache
2368 2368 def __contains__(self):
2369 2369 return self.set().__contains__
2370 2370
2371 2371 def __nonzero__(self):
2372 2372 return bool(self._list)
2373 2373
2374 2374 def sort(self, reverse=False):
2375 2375 self._list.sort(reverse=reverse)
2376 2376
2377 2377 def reverse(self):
2378 2378 self._list.reverse()
2379 2379
2380 2380 def __len__(self):
2381 2381 return len(self._list)
2382 2382
2383 2383 def __sub__(self, other):
2384 2384 """Returns a new object with the substraction of the two collections.
2385 2385
2386 2386 This is part of the mandatory API for smartset."""
2387 2387 # If we are operating on 2 baseset, do the computation now since all
2388 2388 # data is available. The alternative is to involve a filteredset, which
2389 2389 # may be slow.
2390 2390 if isinstance(other, baseset):
2391 2391 other = other.set()
2392 2392 return baseset([x for x in self if x not in other])
2393 2393
2394 2394 return self.filter(lambda x: x not in other)
2395 2395
2396 2396 def __and__(self, other):
2397 2397 """Returns a new object with the intersection of the two collections.
2398 2398
2399 2399 This is part of the mandatory API for smartset."""
2400 2400 return baseset([y for y in self if y in other])
2401 2401
2402 2402 def __add__(self, other):
2403 2403 """Returns a new object with the union of the two collections.
2404 2404
2405 2405 This is part of the mandatory API for smartset."""
2406 2406 s = self.set()
2407 2407 l = [r for r in other if r not in s]
2408 2408 return baseset(list(self) + l)
2409 2409
2410 2410 def isascending(self):
2411 2411 """Returns True if the collection is ascending order, False if not.
2412 2412
2413 2413 This is part of the mandatory API for smartset."""
2414 return False
2414 return self._ascending is not None and self._ascending
2415 2415
2416 2416 def isdescending(self):
2417 2417 """Returns True if the collection is descending order, False if not.
2418 2418
2419 2419 This is part of the mandatory API for smartset."""
2420 return False
2420 return self._ascending is not None and not self._ascending
2421 2421
2422 2422 def first(self):
2423 2423 if self:
2424 2424 return self._list[0]
2425 2425 return None
2426 2426
2427 2427 def last(self):
2428 2428 if self:
2429 2429 return self._list[-1]
2430 2430 return None
2431 2431
2432 2432 class filteredset(abstractsmartset):
2433 2433 """Duck type for baseset class which iterates lazily over the revisions in
2434 2434 the subset and contains a function which tests for membership in the
2435 2435 revset
2436 2436 """
2437 2437 def __init__(self, subset, condition=lambda x: True, ascending=None):
2438 2438 """
2439 2439 condition: a function that decide whether a revision in the subset
2440 2440 belongs to the revset or not.
2441 2441 """
2442 2442 self._subset = subset
2443 2443 self._condition = condition
2444 2444 self._cache = {}
2445 2445 if ascending is not None:
2446 2446 ascending = bool(ascending)
2447 2447 self._ascending = ascending
2448 2448
2449 2449 def __contains__(self, x):
2450 2450 c = self._cache
2451 2451 if x not in c:
2452 2452 v = c[x] = x in self._subset and self._condition(x)
2453 2453 return v
2454 2454 return c[x]
2455 2455
2456 2456 def __iter__(self):
2457 2457 return self._iterfilter(self._subset)
2458 2458
2459 2459 def _iterfilter(self, it):
2460 2460 cond = self._condition
2461 2461 for x in it:
2462 2462 if cond(x):
2463 2463 yield x
2464 2464
2465 2465 @property
2466 2466 def fastasc(self):
2467 2467 it = self._subset.fastasc
2468 2468 if it is None:
2469 2469 return None
2470 2470 return lambda: self._iterfilter(it())
2471 2471
2472 2472 @property
2473 2473 def fastdesc(self):
2474 2474 it = self._subset.fastdesc
2475 2475 if it is None:
2476 2476 return None
2477 2477 return lambda: self._iterfilter(it())
2478 2478
2479 2479 def __nonzero__(self):
2480 2480 for r in self:
2481 2481 return True
2482 2482 return False
2483 2483
2484 2484 def __len__(self):
2485 2485 # Basic implementation to be changed in future patches.
2486 2486 l = baseset([r for r in self])
2487 2487 return len(l)
2488 2488
2489 2489 def __getitem__(self, x):
2490 2490 # Basic implementation to be changed in future patches.
2491 2491 l = baseset([r for r in self])
2492 2492 return l[x]
2493 2493
2494 2494 def sort(self, reverse=False):
2495 2495 if self._ascending is None:
2496 2496 if not util.safehasattr(self._subset, 'sort'):
2497 2497 self._subset = baseset(self._subset)
2498 2498 self._subset.sort(reverse=reverse)
2499 2499 self._ascending = not reverse
2500 2500 elif bool(reverse) == self._ascending:
2501 2501 self.reverse()
2502 2502
2503 2503 def reverse(self):
2504 2504 self._subset.reverse()
2505 2505 if self._ascending is not None:
2506 2506 self._ascending = not self._ascending
2507 2507
2508 2508 def set(self):
2509 2509 return set([r for r in self])
2510 2510
2511 2511 def isascending(self):
2512 2512 return self._ascending is not None and self._ascending
2513 2513
2514 2514 def isdescending(self):
2515 2515 return self._ascending is not None and not self._ascending
2516 2516
2517 2517 def first(self):
2518 2518 for x in self:
2519 2519 return x
2520 2520 return None
2521 2521
2522 2522 def last(self):
2523 2523 it = None
2524 2524 if self._ascending is not None:
2525 2525 if self._ascending:
2526 2526 it = self.fastdesc
2527 2527 else:
2528 2528 it = self.fastasc
2529 2529 if it is None:
2530 2530 # slowly consume everything. This needs improvement
2531 2531 it = lambda: reversed(list(self))
2532 2532 for x in it():
2533 2533 return x
2534 2534 return None
2535 2535
2536 2536 class addset(abstractsmartset):
2537 2537 """Represent the addition of two sets
2538 2538
2539 2539 Wrapper structure for lazily adding two structures without losing much
2540 2540 performance on the __contains__ method
2541 2541
2542 2542 If the ascending attribute is set, that means the two structures are
2543 2543 ordered in either an ascending or descending way. Therefore, we can add
2544 2544 them maintaining the order by iterating over both at the same time
2545 2545 """
2546 2546 def __init__(self, revs1, revs2, ascending=None):
2547 2547 self._r1 = revs1
2548 2548 self._r2 = revs2
2549 2549 self._iter = None
2550 2550 self._ascending = ascending
2551 2551 self._genlist = None
2552 2552
2553 2553 def __len__(self):
2554 2554 return len(self._list)
2555 2555
2556 2556 def __nonzero__(self):
2557 2557 return bool(self._r1 or self._r2)
2558 2558
2559 2559 @util.propertycache
2560 2560 def _list(self):
2561 2561 if not self._genlist:
2562 2562 self._genlist = baseset(self._iterator())
2563 2563 return self._genlist
2564 2564
2565 2565 def _iterator(self):
2566 2566 """Iterate over both collections without repeating elements
2567 2567
2568 2568 If the ascending attribute is not set, iterate over the first one and
2569 2569 then over the second one checking for membership on the first one so we
2570 2570 dont yield any duplicates.
2571 2571
2572 2572 If the ascending attribute is set, iterate over both collections at the
2573 2573 same time, yielding only one value at a time in the given order.
2574 2574 """
2575 2575 if self._ascending is None:
2576 2576 def gen():
2577 2577 for r in self._r1:
2578 2578 yield r
2579 2579 s = self._r1.set()
2580 2580 for r in self._r2:
2581 2581 if r not in s:
2582 2582 yield r
2583 2583 gen = gen()
2584 2584 else:
2585 2585 iter1 = iter(self._r1)
2586 2586 iter2 = iter(self._r2)
2587 2587 gen = self._iterordered(self._ascending, iter1, iter2)
2588 2588 return gen
2589 2589
2590 2590 def __iter__(self):
2591 2591 if self._genlist:
2592 2592 return iter(self._genlist)
2593 2593 return iter(self._iterator())
2594 2594
2595 2595 @property
2596 2596 def fastasc(self):
2597 2597 iter1 = self._r1.fastasc
2598 2598 iter2 = self._r2.fastasc
2599 2599 if None in (iter1, iter2):
2600 2600 return None
2601 2601 return lambda: self._iterordered(True, iter1(), iter2())
2602 2602
2603 2603 @property
2604 2604 def fastdesc(self):
2605 2605 iter1 = self._r1.fastdesc
2606 2606 iter2 = self._r2.fastdesc
2607 2607 if None in (iter1, iter2):
2608 2608 return None
2609 2609 return lambda: self._iterordered(False, iter1(), iter2())
2610 2610
2611 2611 def _iterordered(self, ascending, iter1, iter2):
2612 2612 """produce an ordered iteration from two iterators with the same order
2613 2613
2614 2614 The ascending is used to indicated the iteration direction.
2615 2615 """
2616 2616 choice = max
2617 2617 if ascending:
2618 2618 choice = min
2619 2619
2620 2620 val1 = None
2621 2621 val2 = None
2622 2622
2623 2623 choice = max
2624 2624 if ascending:
2625 2625 choice = min
2626 2626 try:
2627 2627 # Consume both iterators in an ordered way until one is
2628 2628 # empty
2629 2629 while True:
2630 2630 if val1 is None:
2631 2631 val1 = iter1.next()
2632 2632 if val2 is None:
2633 2633 val2 = iter2.next()
2634 2634 next = choice(val1, val2)
2635 2635 yield next
2636 2636 if val1 == next:
2637 2637 val1 = None
2638 2638 if val2 == next:
2639 2639 val2 = None
2640 2640 except StopIteration:
2641 2641 # Flush any remaining values and consume the other one
2642 2642 it = iter2
2643 2643 if val1 is not None:
2644 2644 yield val1
2645 2645 it = iter1
2646 2646 elif val2 is not None:
2647 2647 # might have been equality and both are empty
2648 2648 yield val2
2649 2649 for val in it:
2650 2650 yield val
2651 2651
2652 2652 def __contains__(self, x):
2653 2653 return x in self._r1 or x in self._r2
2654 2654
2655 2655 def set(self):
2656 2656 return self
2657 2657
2658 2658 def sort(self, reverse=False):
2659 2659 """Sort the added set
2660 2660
2661 2661 For this we use the cached list with all the generated values and if we
2662 2662 know they are ascending or descending we can sort them in a smart way.
2663 2663 """
2664 2664 if self._ascending is None:
2665 2665 self._list.sort(reverse=reverse)
2666 2666 self._ascending = not reverse
2667 2667 else:
2668 2668 if bool(self._ascending) == bool(reverse):
2669 2669 self.reverse()
2670 2670
2671 2671 def isascending(self):
2672 2672 return self._ascending is not None and self._ascending
2673 2673
2674 2674 def isdescending(self):
2675 2675 return self._ascending is not None and not self._ascending
2676 2676
2677 2677 def reverse(self):
2678 2678 self._list.reverse()
2679 2679 if self._ascending is not None:
2680 2680 self._ascending = not self._ascending
2681 2681
2682 2682 def first(self):
2683 2683 if self:
2684 2684 return self._list.first()
2685 2685 return None
2686 2686
2687 2687 def last(self):
2688 2688 if self:
2689 2689 return self._list.last()
2690 2690 return None
2691 2691
2692 2692 class generatorset(abstractsmartset):
2693 2693 """Wrap a generator for lazy iteration
2694 2694
2695 2695 Wrapper structure for generators that provides lazy membership and can
2696 2696 be iterated more than once.
2697 2697 When asked for membership it generates values until either it finds the
2698 2698 requested one or has gone through all the elements in the generator
2699 2699 """
2700 2700 def __init__(self, gen, iterasc=None):
2701 2701 """
2702 2702 gen: a generator producing the values for the generatorset.
2703 2703 """
2704 2704 self._gen = gen
2705 2705 self._asclist = None
2706 2706 self._cache = {}
2707 2707 self._genlist = []
2708 2708 self._finished = False
2709 2709 self._ascending = True
2710 2710 if iterasc is not None:
2711 2711 if iterasc:
2712 2712 self.fastasc = self._iterator
2713 2713 self.__contains__ = self._asccontains
2714 2714 else:
2715 2715 self.fastdesc = self._iterator
2716 2716 self.__contains__ = self._desccontains
2717 2717
2718 2718 def __nonzero__(self):
2719 2719 for r in self:
2720 2720 return True
2721 2721 return False
2722 2722
2723 2723 def __contains__(self, x):
2724 2724 if x in self._cache:
2725 2725 return self._cache[x]
2726 2726
2727 2727 # Use new values only, as existing values would be cached.
2728 2728 for l in self._consumegen():
2729 2729 if l == x:
2730 2730 return True
2731 2731
2732 2732 self._cache[x] = False
2733 2733 return False
2734 2734
2735 2735 def _asccontains(self, x):
2736 2736 """version of contains optimised for ascending generator"""
2737 2737 if x in self._cache:
2738 2738 return self._cache[x]
2739 2739
2740 2740 # Use new values only, as existing values would be cached.
2741 2741 for l in self._consumegen():
2742 2742 if l == x:
2743 2743 return True
2744 2744 if l > x:
2745 2745 break
2746 2746
2747 2747 self._cache[x] = False
2748 2748 return False
2749 2749
2750 2750 def _desccontains(self, x):
2751 2751 """version of contains optimised for descending generator"""
2752 2752 if x in self._cache:
2753 2753 return self._cache[x]
2754 2754
2755 2755 # Use new values only, as existing values would be cached.
2756 2756 for l in self._consumegen():
2757 2757 if l == x:
2758 2758 return True
2759 2759 if l < x:
2760 2760 break
2761 2761
2762 2762 self._cache[x] = False
2763 2763 return False
2764 2764
2765 2765 def __iter__(self):
2766 2766 if self._ascending:
2767 2767 it = self.fastasc
2768 2768 else:
2769 2769 it = self.fastdesc
2770 2770 if it is not None:
2771 2771 return it()
2772 2772 # we need to consume the iterator
2773 2773 for x in self._consumegen():
2774 2774 pass
2775 2775 # recall the same code
2776 2776 return iter(self)
2777 2777
2778 2778 def _iterator(self):
2779 2779 if self._finished:
2780 2780 return iter(self._genlist)
2781 2781
2782 2782 # We have to use this complex iteration strategy to allow multiple
2783 2783 # iterations at the same time. We need to be able to catch revision
2784 2784 # removed from `consumegen` and added to genlist in another instance.
2785 2785 #
2786 2786 # Getting rid of it would provide an about 15% speed up on this
2787 2787 # iteration.
2788 2788 genlist = self._genlist
2789 2789 nextrev = self._consumegen().next
2790 2790 _len = len # cache global lookup
2791 2791 def gen():
2792 2792 i = 0
2793 2793 while True:
2794 2794 if i < _len(genlist):
2795 2795 yield genlist[i]
2796 2796 else:
2797 2797 yield nextrev()
2798 2798 i += 1
2799 2799 return gen()
2800 2800
2801 2801 def _consumegen(self):
2802 2802 cache = self._cache
2803 2803 genlist = self._genlist.append
2804 2804 for item in self._gen:
2805 2805 cache[item] = True
2806 2806 genlist(item)
2807 2807 yield item
2808 2808 if not self._finished:
2809 2809 self._finished = True
2810 2810 asc = self._genlist[:]
2811 2811 asc.sort()
2812 2812 self._asclist = asc
2813 2813 self.fastasc = asc.__iter__
2814 2814 self.fastdesc = asc.__reversed__
2815 2815
2816 2816 def set(self):
2817 2817 return self
2818 2818
2819 2819 def sort(self, reverse=False):
2820 2820 self._ascending = not reverse
2821 2821
2822 2822 def reverse(self):
2823 2823 self._ascending = not self._ascending
2824 2824
2825 2825 def isascending(self):
2826 2826 return self._ascending
2827 2827
2828 2828 def isdescending(self):
2829 2829 return not self._ascending
2830 2830
2831 2831 def first(self):
2832 2832 if self._ascending:
2833 2833 it = self.fastasc
2834 2834 else:
2835 2835 it = self.fastdesc
2836 2836 if it is None:
2837 2837 # we need to consume all and try again
2838 2838 for x in self._consumegen():
2839 2839 pass
2840 2840 return self.first()
2841 2841 if self:
2842 2842 return it.next()
2843 2843 return None
2844 2844
2845 2845 def last(self):
2846 2846 if self._ascending:
2847 2847 it = self.fastdesc
2848 2848 else:
2849 2849 it = self.fastasc
2850 2850 if it is None:
2851 2851 # we need to consume all and try again
2852 2852 for x in self._consumegen():
2853 2853 pass
2854 2854 return self.first()
2855 2855 if self:
2856 2856 return it.next()
2857 2857 return None
2858 2858
2859 2859 def spanset(repo, start=None, end=None):
2860 2860 """factory function to dispatch between fullreposet and actual spanset
2861 2861
2862 2862 Feel free to update all spanset call sites and kill this function at some
2863 2863 point.
2864 2864 """
2865 2865 if start is None and end is None:
2866 2866 return fullreposet(repo)
2867 2867 return _spanset(repo, start, end)
2868 2868
2869 2869
2870 2870 class _spanset(abstractsmartset):
2871 2871 """Duck type for baseset class which represents a range of revisions and
2872 2872 can work lazily and without having all the range in memory
2873 2873
2874 2874 Note that spanset(x, y) behave almost like xrange(x, y) except for two
2875 2875 notable points:
2876 2876 - when x < y it will be automatically descending,
2877 2877 - revision filtered with this repoview will be skipped.
2878 2878
2879 2879 """
2880 2880 def __init__(self, repo, start=0, end=None):
2881 2881 """
2882 2882 start: first revision included the set
2883 2883 (default to 0)
2884 2884 end: first revision excluded (last+1)
2885 2885 (default to len(repo)
2886 2886
2887 2887 Spanset will be descending if `end` < `start`.
2888 2888 """
2889 2889 if end is None:
2890 2890 end = len(repo)
2891 2891 self._ascending = start <= end
2892 2892 if not self._ascending:
2893 2893 start, end = end + 1, start +1
2894 2894 self._start = start
2895 2895 self._end = end
2896 2896 self._hiddenrevs = repo.changelog.filteredrevs
2897 2897
2898 2898 def sort(self, reverse=False):
2899 2899 self._ascending = not reverse
2900 2900
2901 2901 def reverse(self):
2902 2902 self._ascending = not self._ascending
2903 2903
2904 2904 def _iterfilter(self, iterrange):
2905 2905 s = self._hiddenrevs
2906 2906 for r in iterrange:
2907 2907 if r not in s:
2908 2908 yield r
2909 2909
2910 2910 def __iter__(self):
2911 2911 if self._ascending:
2912 2912 return self.fastasc()
2913 2913 else:
2914 2914 return self.fastdesc()
2915 2915
2916 2916 def fastasc(self):
2917 2917 iterrange = xrange(self._start, self._end)
2918 2918 if self._hiddenrevs:
2919 2919 return self._iterfilter(iterrange)
2920 2920 return iter(iterrange)
2921 2921
2922 2922 def fastdesc(self):
2923 2923 iterrange = xrange(self._end - 1, self._start - 1, -1)
2924 2924 if self._hiddenrevs:
2925 2925 return self._iterfilter(iterrange)
2926 2926 return iter(iterrange)
2927 2927
2928 2928 def __contains__(self, rev):
2929 2929 hidden = self._hiddenrevs
2930 2930 return ((self._start <= rev < self._end)
2931 2931 and not (hidden and rev in hidden))
2932 2932
2933 2933 def __nonzero__(self):
2934 2934 for r in self:
2935 2935 return True
2936 2936 return False
2937 2937
2938 2938 def __len__(self):
2939 2939 if not self._hiddenrevs:
2940 2940 return abs(self._end - self._start)
2941 2941 else:
2942 2942 count = 0
2943 2943 start = self._start
2944 2944 end = self._end
2945 2945 for rev in self._hiddenrevs:
2946 2946 if (end < rev <= start) or (start <= rev < end):
2947 2947 count += 1
2948 2948 return abs(self._end - self._start) - count
2949 2949
2950 2950 def __getitem__(self, x):
2951 2951 # Basic implementation to be changed in future patches.
2952 2952 l = baseset([r for r in self])
2953 2953 return l[x]
2954 2954
2955 2955 def set(self):
2956 2956 return self
2957 2957
2958 2958 def isascending(self):
2959 2959 return self._start <= self._end
2960 2960
2961 2961 def isdescending(self):
2962 2962 return self._start >= self._end
2963 2963
2964 2964 def first(self):
2965 2965 if self._ascending:
2966 2966 it = self.fastasc
2967 2967 else:
2968 2968 it = self.fastdesc
2969 2969 for x in it():
2970 2970 return x
2971 2971 return None
2972 2972
2973 2973 def last(self):
2974 2974 if self._ascending:
2975 2975 it = self.fastdesc
2976 2976 else:
2977 2977 it = self.fastasc
2978 2978 for x in it():
2979 2979 return x
2980 2980 return None
2981 2981
2982 2982 class fullreposet(_spanset):
2983 2983 """a set containing all revisions in the repo
2984 2984
2985 2985 This class exists to host special optimisation.
2986 2986 """
2987 2987
2988 2988 def __init__(self, repo):
2989 2989 super(fullreposet, self).__init__(repo)
2990 2990
2991 2991 def __and__(self, other):
2992 2992 """fullrepo & other -> other
2993 2993
2994 2994 As self contains the whole repo, all of the other set should also be in
2995 2995 self. Therefor `self & other = other`.
2996 2996
2997 2997 This boldly assumes the other contains valid revs only.
2998 2998 """
2999 2999 # other not a smartset, make is so
3000 3000 if not util.safehasattr(other, 'set'):
3001 3001 # filter out hidden revision
3002 3002 # (this boldly assumes all smartset are pure)
3003 3003 #
3004 3004 # `other` was used with "&", let's assume this is a set like
3005 3005 # object.
3006 3006 other = baseset(other - self._hiddenrevs)
3007 3007 elif not util.safehasattr(other, 'ascending'):
3008 3008 # "other" is generatorset not a real smart set
3009 3009 # we fallback to the old way (sad kitten)
3010 3010 return super(fullreposet, self).__and__(other)
3011 3011
3012 3012 # preserve order:
3013 3013 #
3014 3014 # this is probably useless and harmful in multiple cases but matches
3015 3015 # the current behavior.
3016 3016 if self.isascending():
3017 3017 other.ascending()
3018 3018 else:
3019 3019 other.descending()
3020 3020 return other
3021 3021
3022 3022 # tell hggettext to extract docstrings from these functions:
3023 3023 i18nfunctions = symbols.values()
General Comments 0
You need to be logged in to leave comments. Login now