##// END OF EJS Templates
filelog: remove unused variable 'lkr'...
Martin von Zweigbergk -
r23820:60178888 default
parent child Browse files
Show More
@@ -1,3112 +1,3112 b''
1 1 # revset.py - revision set queries for mercurial
2 2 #
3 3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 import re
9 9 import parser, util, error, discovery, hbisect, phases
10 10 import node
11 11 import heapq
12 12 import match as matchmod
13 13 from i18n import _
14 14 import encoding
15 15 import obsolete as obsmod
16 16 import pathutil
17 17 import repoview
18 18
19 19 def _revancestors(repo, revs, followfirst):
20 20 """Like revlog.ancestors(), but supports followfirst."""
21 21 cut = followfirst and 1 or None
22 22 cl = repo.changelog
23 23
24 24 def iterate():
25 25 revqueue, revsnode = None, None
26 26 h = []
27 27
28 28 revs.sort(reverse=True)
29 29 revqueue = util.deque(revs)
30 30 if revqueue:
31 31 revsnode = revqueue.popleft()
32 32 heapq.heappush(h, -revsnode)
33 33
34 34 seen = set([node.nullrev])
35 35 while h:
36 36 current = -heapq.heappop(h)
37 37 if current not in seen:
38 38 if revsnode and current == revsnode:
39 39 if revqueue:
40 40 revsnode = revqueue.popleft()
41 41 heapq.heappush(h, -revsnode)
42 42 seen.add(current)
43 43 yield current
44 44 for parent in cl.parentrevs(current)[:cut]:
45 45 if parent != node.nullrev:
46 46 heapq.heappush(h, -parent)
47 47
48 48 return generatorset(iterate(), iterasc=False)
49 49
50 50 def _revdescendants(repo, revs, followfirst):
51 51 """Like revlog.descendants() but supports followfirst."""
52 52 cut = followfirst and 1 or None
53 53
54 54 def iterate():
55 55 cl = repo.changelog
56 56 first = min(revs)
57 57 nullrev = node.nullrev
58 58 if first == nullrev:
59 59 # Are there nodes with a null first parent and a non-null
60 60 # second one? Maybe. Do we care? Probably not.
61 61 for i in cl:
62 62 yield i
63 63 else:
64 64 seen = set(revs)
65 65 for i in cl.revs(first + 1):
66 66 for x in cl.parentrevs(i)[:cut]:
67 67 if x != nullrev and x in seen:
68 68 seen.add(i)
69 69 yield i
70 70 break
71 71
72 72 return generatorset(iterate(), iterasc=True)
73 73
74 74 def _revsbetween(repo, roots, heads):
75 75 """Return all paths between roots and heads, inclusive of both endpoint
76 76 sets."""
77 77 if not roots:
78 78 return baseset()
79 79 parentrevs = repo.changelog.parentrevs
80 80 visit = list(heads)
81 81 reachable = set()
82 82 seen = {}
83 83 minroot = min(roots)
84 84 roots = set(roots)
85 85 # open-code the post-order traversal due to the tiny size of
86 86 # sys.getrecursionlimit()
87 87 while visit:
88 88 rev = visit.pop()
89 89 if rev in roots:
90 90 reachable.add(rev)
91 91 parents = parentrevs(rev)
92 92 seen[rev] = parents
93 93 for parent in parents:
94 94 if parent >= minroot and parent not in seen:
95 95 visit.append(parent)
96 96 if not reachable:
97 97 return baseset()
98 98 for rev in sorted(seen):
99 99 for parent in seen[rev]:
100 100 if parent in reachable:
101 101 reachable.add(rev)
102 102 return baseset(sorted(reachable))
103 103
104 104 elements = {
105 105 "(": (21, ("group", 1, ")"), ("func", 1, ")")),
106 106 "##": (20, None, ("_concat", 20)),
107 107 "~": (18, None, ("ancestor", 18)),
108 108 "^": (18, None, ("parent", 18), ("parentpost", 18)),
109 109 "-": (5, ("negate", 19), ("minus", 5)),
110 110 "::": (17, ("dagrangepre", 17), ("dagrange", 17),
111 111 ("dagrangepost", 17)),
112 112 "..": (17, ("dagrangepre", 17), ("dagrange", 17),
113 113 ("dagrangepost", 17)),
114 114 ":": (15, ("rangepre", 15), ("range", 15), ("rangepost", 15)),
115 115 "not": (10, ("not", 10)),
116 116 "!": (10, ("not", 10)),
117 117 "and": (5, None, ("and", 5)),
118 118 "&": (5, None, ("and", 5)),
119 119 "%": (5, None, ("only", 5), ("onlypost", 5)),
120 120 "or": (4, None, ("or", 4)),
121 121 "|": (4, None, ("or", 4)),
122 122 "+": (4, None, ("or", 4)),
123 123 ",": (2, None, ("list", 2)),
124 124 ")": (0, None, None),
125 125 "symbol": (0, ("symbol",), None),
126 126 "string": (0, ("string",), None),
127 127 "end": (0, None, None),
128 128 }
129 129
130 130 keywords = set(['and', 'or', 'not'])
131 131
132 132 def tokenize(program, lookup=None):
133 133 '''
134 134 Parse a revset statement into a stream of tokens
135 135
136 136 Check that @ is a valid unquoted token character (issue3686):
137 137 >>> list(tokenize("@::"))
138 138 [('symbol', '@', 0), ('::', None, 1), ('end', None, 3)]
139 139
140 140 '''
141 141
142 142 pos, l = 0, len(program)
143 143 while pos < l:
144 144 c = program[pos]
145 145 if c.isspace(): # skip inter-token whitespace
146 146 pass
147 147 elif c == ':' and program[pos:pos + 2] == '::': # look ahead carefully
148 148 yield ('::', None, pos)
149 149 pos += 1 # skip ahead
150 150 elif c == '.' and program[pos:pos + 2] == '..': # look ahead carefully
151 151 yield ('..', None, pos)
152 152 pos += 1 # skip ahead
153 153 elif c == '#' and program[pos:pos + 2] == '##': # look ahead carefully
154 154 yield ('##', None, pos)
155 155 pos += 1 # skip ahead
156 156 elif c in "():,-|&+!~^%": # handle simple operators
157 157 yield (c, None, pos)
158 158 elif (c in '"\'' or c == 'r' and
159 159 program[pos:pos + 2] in ("r'", 'r"')): # handle quoted strings
160 160 if c == 'r':
161 161 pos += 1
162 162 c = program[pos]
163 163 decode = lambda x: x
164 164 else:
165 165 decode = lambda x: x.decode('string-escape')
166 166 pos += 1
167 167 s = pos
168 168 while pos < l: # find closing quote
169 169 d = program[pos]
170 170 if d == '\\': # skip over escaped characters
171 171 pos += 2
172 172 continue
173 173 if d == c:
174 174 yield ('string', decode(program[s:pos]), s)
175 175 break
176 176 pos += 1
177 177 else:
178 178 raise error.ParseError(_("unterminated string"), s)
179 179 # gather up a symbol/keyword
180 180 elif c.isalnum() or c in '._@' or ord(c) > 127:
181 181 s = pos
182 182 pos += 1
183 183 while pos < l: # find end of symbol
184 184 d = program[pos]
185 185 if not (d.isalnum() or d in "-._/@" or ord(d) > 127):
186 186 break
187 187 if d == '.' and program[pos - 1] == '.': # special case for ..
188 188 pos -= 1
189 189 break
190 190 pos += 1
191 191 sym = program[s:pos]
192 192 if sym in keywords: # operator keywords
193 193 yield (sym, None, s)
194 194 elif '-' in sym:
195 195 # some jerk gave us foo-bar-baz, try to check if it's a symbol
196 196 if lookup and lookup(sym):
197 197 # looks like a real symbol
198 198 yield ('symbol', sym, s)
199 199 else:
200 200 # looks like an expression
201 201 parts = sym.split('-')
202 202 for p in parts[:-1]:
203 203 if p: # possible consecutive -
204 204 yield ('symbol', p, s)
205 205 s += len(p)
206 206 yield ('-', None, pos)
207 207 s += 1
208 208 if parts[-1]: # possible trailing -
209 209 yield ('symbol', parts[-1], s)
210 210 else:
211 211 yield ('symbol', sym, s)
212 212 pos -= 1
213 213 else:
214 214 raise error.ParseError(_("syntax error"), pos)
215 215 pos += 1
216 216 yield ('end', None, pos)
217 217
218 218 # helpers
219 219
220 220 def getstring(x, err):
221 221 if x and (x[0] == 'string' or x[0] == 'symbol'):
222 222 return x[1]
223 223 raise error.ParseError(err)
224 224
225 225 def getlist(x):
226 226 if not x:
227 227 return []
228 228 if x[0] == 'list':
229 229 return getlist(x[1]) + [x[2]]
230 230 return [x]
231 231
232 232 def getargs(x, min, max, err):
233 233 l = getlist(x)
234 234 if len(l) < min or (max >= 0 and len(l) > max):
235 235 raise error.ParseError(err)
236 236 return l
237 237
238 238 def getset(repo, subset, x):
239 239 if not x:
240 240 raise error.ParseError(_("missing argument"))
241 241 s = methods[x[0]](repo, subset, *x[1:])
242 242 if util.safehasattr(s, 'isascending'):
243 243 return s
244 244 return baseset(s)
245 245
246 246 def _getrevsource(repo, r):
247 247 extra = repo[r].extra()
248 248 for label in ('source', 'transplant_source', 'rebase_source'):
249 249 if label in extra:
250 250 try:
251 251 return repo[extra[label]].rev()
252 252 except error.RepoLookupError:
253 253 pass
254 254 return None
255 255
256 256 # operator methods
257 257
258 258 def stringset(repo, subset, x):
259 259 x = repo[x].rev()
260 260 if x == -1 and len(subset) == len(repo):
261 261 return baseset([-1])
262 262 if x in subset:
263 263 return baseset([x])
264 264 return baseset()
265 265
266 266 def symbolset(repo, subset, x):
267 267 if x in symbols:
268 268 raise error.ParseError(_("can't use %s here") % x)
269 269 return stringset(repo, subset, x)
270 270
271 271 def rangeset(repo, subset, x, y):
272 272 m = getset(repo, fullreposet(repo), x)
273 273 n = getset(repo, fullreposet(repo), y)
274 274
275 275 if not m or not n:
276 276 return baseset()
277 277 m, n = m.first(), n.last()
278 278
279 279 if m < n:
280 280 r = spanset(repo, m, n + 1)
281 281 else:
282 282 r = spanset(repo, m, n - 1)
283 283 return r & subset
284 284
285 285 def dagrange(repo, subset, x, y):
286 286 r = spanset(repo)
287 287 xs = _revsbetween(repo, getset(repo, r, x), getset(repo, r, y))
288 288 return xs & subset
289 289
290 290 def andset(repo, subset, x, y):
291 291 return getset(repo, getset(repo, subset, x), y)
292 292
293 293 def orset(repo, subset, x, y):
294 294 xl = getset(repo, subset, x)
295 295 yl = getset(repo, subset - xl, y)
296 296 return xl + yl
297 297
298 298 def notset(repo, subset, x):
299 299 return subset - getset(repo, subset, x)
300 300
301 301 def listset(repo, subset, a, b):
302 302 raise error.ParseError(_("can't use a list in this context"))
303 303
304 304 def func(repo, subset, a, b):
305 305 if a[0] == 'symbol' and a[1] in symbols:
306 306 return symbols[a[1]](repo, subset, b)
307 307 raise error.ParseError(_("not a function: %s") % a[1])
308 308
309 309 # functions
310 310
311 311 def adds(repo, subset, x):
312 312 """``adds(pattern)``
313 313 Changesets that add a file matching pattern.
314 314
315 315 The pattern without explicit kind like ``glob:`` is expected to be
316 316 relative to the current directory and match against a file or a
317 317 directory.
318 318 """
319 319 # i18n: "adds" is a keyword
320 320 pat = getstring(x, _("adds requires a pattern"))
321 321 return checkstatus(repo, subset, pat, 1)
322 322
323 323 def ancestor(repo, subset, x):
324 324 """``ancestor(*changeset)``
325 325 A greatest common ancestor of the changesets.
326 326
327 327 Accepts 0 or more changesets.
328 328 Will return empty list when passed no args.
329 329 Greatest common ancestor of a single changeset is that changeset.
330 330 """
331 331 # i18n: "ancestor" is a keyword
332 332 l = getlist(x)
333 333 rl = spanset(repo)
334 334 anc = None
335 335
336 336 # (getset(repo, rl, i) for i in l) generates a list of lists
337 337 for revs in (getset(repo, rl, i) for i in l):
338 338 for r in revs:
339 339 if anc is None:
340 340 anc = repo[r]
341 341 else:
342 342 anc = anc.ancestor(repo[r])
343 343
344 344 if anc is not None and anc.rev() in subset:
345 345 return baseset([anc.rev()])
346 346 return baseset()
347 347
348 348 def _ancestors(repo, subset, x, followfirst=False):
349 349 heads = getset(repo, spanset(repo), x)
350 350 if not heads:
351 351 return baseset()
352 352 s = _revancestors(repo, heads, followfirst)
353 353 return subset & s
354 354
355 355 def ancestors(repo, subset, x):
356 356 """``ancestors(set)``
357 357 Changesets that are ancestors of a changeset in set.
358 358 """
359 359 return _ancestors(repo, subset, x)
360 360
361 361 def _firstancestors(repo, subset, x):
362 362 # ``_firstancestors(set)``
363 363 # Like ``ancestors(set)`` but follows only the first parents.
364 364 return _ancestors(repo, subset, x, followfirst=True)
365 365
366 366 def ancestorspec(repo, subset, x, n):
367 367 """``set~n``
368 368 Changesets that are the Nth ancestor (first parents only) of a changeset
369 369 in set.
370 370 """
371 371 try:
372 372 n = int(n[1])
373 373 except (TypeError, ValueError):
374 374 raise error.ParseError(_("~ expects a number"))
375 375 ps = set()
376 376 cl = repo.changelog
377 377 for r in getset(repo, fullreposet(repo), x):
378 378 for i in range(n):
379 379 r = cl.parentrevs(r)[0]
380 380 ps.add(r)
381 381 return subset & ps
382 382
383 383 def author(repo, subset, x):
384 384 """``author(string)``
385 385 Alias for ``user(string)``.
386 386 """
387 387 # i18n: "author" is a keyword
388 388 n = encoding.lower(getstring(x, _("author requires a string")))
389 389 kind, pattern, matcher = _substringmatcher(n)
390 390 return subset.filter(lambda x: matcher(encoding.lower(repo[x].user())))
391 391
392 392 def bisect(repo, subset, x):
393 393 """``bisect(string)``
394 394 Changesets marked in the specified bisect status:
395 395
396 396 - ``good``, ``bad``, ``skip``: csets explicitly marked as good/bad/skip
397 397 - ``goods``, ``bads`` : csets topologically good/bad
398 398 - ``range`` : csets taking part in the bisection
399 399 - ``pruned`` : csets that are goods, bads or skipped
400 400 - ``untested`` : csets whose fate is yet unknown
401 401 - ``ignored`` : csets ignored due to DAG topology
402 402 - ``current`` : the cset currently being bisected
403 403 """
404 404 # i18n: "bisect" is a keyword
405 405 status = getstring(x, _("bisect requires a string")).lower()
406 406 state = set(hbisect.get(repo, status))
407 407 return subset & state
408 408
409 409 # Backward-compatibility
410 410 # - no help entry so that we do not advertise it any more
411 411 def bisected(repo, subset, x):
412 412 return bisect(repo, subset, x)
413 413
414 414 def bookmark(repo, subset, x):
415 415 """``bookmark([name])``
416 416 The named bookmark or all bookmarks.
417 417
418 418 If `name` starts with `re:`, the remainder of the name is treated as
419 419 a regular expression. To match a bookmark that actually starts with `re:`,
420 420 use the prefix `literal:`.
421 421 """
422 422 # i18n: "bookmark" is a keyword
423 423 args = getargs(x, 0, 1, _('bookmark takes one or no arguments'))
424 424 if args:
425 425 bm = getstring(args[0],
426 426 # i18n: "bookmark" is a keyword
427 427 _('the argument to bookmark must be a string'))
428 428 kind, pattern, matcher = _stringmatcher(bm)
429 429 bms = set()
430 430 if kind == 'literal':
431 431 bmrev = repo._bookmarks.get(pattern, None)
432 432 if not bmrev:
433 433 raise util.Abort(_("bookmark '%s' does not exist") % bm)
434 434 bms.add(repo[bmrev].rev())
435 435 else:
436 436 matchrevs = set()
437 437 for name, bmrev in repo._bookmarks.iteritems():
438 438 if matcher(name):
439 439 matchrevs.add(bmrev)
440 440 if not matchrevs:
441 441 raise util.Abort(_("no bookmarks exist that match '%s'")
442 442 % pattern)
443 443 for bmrev in matchrevs:
444 444 bms.add(repo[bmrev].rev())
445 445 else:
446 446 bms = set([repo[r].rev()
447 447 for r in repo._bookmarks.values()])
448 448 bms -= set([node.nullrev])
449 449 return subset & bms
450 450
451 451 def branch(repo, subset, x):
452 452 """``branch(string or set)``
453 453 All changesets belonging to the given branch or the branches of the given
454 454 changesets.
455 455
456 456 If `string` starts with `re:`, the remainder of the name is treated as
457 457 a regular expression. To match a branch that actually starts with `re:`,
458 458 use the prefix `literal:`.
459 459 """
460 460 import branchmap
461 461 urepo = repo.unfiltered()
462 462 ucl = urepo.changelog
463 463 getbi = branchmap.revbranchcache(urepo).branchinfo
464 464
465 465 try:
466 466 b = getstring(x, '')
467 467 except error.ParseError:
468 468 # not a string, but another revspec, e.g. tip()
469 469 pass
470 470 else:
471 471 kind, pattern, matcher = _stringmatcher(b)
472 472 if kind == 'literal':
473 473 # note: falls through to the revspec case if no branch with
474 474 # this name exists
475 475 if pattern in repo.branchmap():
476 476 return subset.filter(lambda r: matcher(getbi(ucl, r)[0]))
477 477 else:
478 478 return subset.filter(lambda r: matcher(getbi(ucl, r)[0]))
479 479
480 480 s = getset(repo, spanset(repo), x)
481 481 b = set()
482 482 for r in s:
483 483 b.add(getbi(ucl, r)[0])
484 484 c = s.__contains__
485 485 return subset.filter(lambda r: c(r) or getbi(ucl, r)[0] in b)
486 486
487 487 def bumped(repo, subset, x):
488 488 """``bumped()``
489 489 Mutable changesets marked as successors of public changesets.
490 490
491 491 Only non-public and non-obsolete changesets can be `bumped`.
492 492 """
493 493 # i18n: "bumped" is a keyword
494 494 getargs(x, 0, 0, _("bumped takes no arguments"))
495 495 bumped = obsmod.getrevs(repo, 'bumped')
496 496 return subset & bumped
497 497
498 498 def bundle(repo, subset, x):
499 499 """``bundle()``
500 500 Changesets in the bundle.
501 501
502 502 Bundle must be specified by the -R option."""
503 503
504 504 try:
505 505 bundlerevs = repo.changelog.bundlerevs
506 506 except AttributeError:
507 507 raise util.Abort(_("no bundle provided - specify with -R"))
508 508 return subset & bundlerevs
509 509
510 510 def checkstatus(repo, subset, pat, field):
511 511 hasset = matchmod.patkind(pat) == 'set'
512 512
513 513 mcache = [None]
514 514 def matches(x):
515 515 c = repo[x]
516 516 if not mcache[0] or hasset:
517 517 mcache[0] = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
518 518 m = mcache[0]
519 519 fname = None
520 520 if not m.anypats() and len(m.files()) == 1:
521 521 fname = m.files()[0]
522 522 if fname is not None:
523 523 if fname not in c.files():
524 524 return False
525 525 else:
526 526 for f in c.files():
527 527 if m(f):
528 528 break
529 529 else:
530 530 return False
531 531 files = repo.status(c.p1().node(), c.node())[field]
532 532 if fname is not None:
533 533 if fname in files:
534 534 return True
535 535 else:
536 536 for f in files:
537 537 if m(f):
538 538 return True
539 539
540 540 return subset.filter(matches)
541 541
542 542 def _children(repo, narrow, parentset):
543 543 cs = set()
544 544 if not parentset:
545 545 return baseset(cs)
546 546 pr = repo.changelog.parentrevs
547 547 minrev = min(parentset)
548 548 for r in narrow:
549 549 if r <= minrev:
550 550 continue
551 551 for p in pr(r):
552 552 if p in parentset:
553 553 cs.add(r)
554 554 return baseset(cs)
555 555
556 556 def children(repo, subset, x):
557 557 """``children(set)``
558 558 Child changesets of changesets in set.
559 559 """
560 560 s = getset(repo, fullreposet(repo), x)
561 561 cs = _children(repo, subset, s)
562 562 return subset & cs
563 563
564 564 def closed(repo, subset, x):
565 565 """``closed()``
566 566 Changeset is closed.
567 567 """
568 568 # i18n: "closed" is a keyword
569 569 getargs(x, 0, 0, _("closed takes no arguments"))
570 570 return subset.filter(lambda r: repo[r].closesbranch())
571 571
572 572 def contains(repo, subset, x):
573 573 """``contains(pattern)``
574 574 The revision's manifest contains a file matching pattern (but might not
575 575 modify it). See :hg:`help patterns` for information about file patterns.
576 576
577 577 The pattern without explicit kind like ``glob:`` is expected to be
578 578 relative to the current directory and match against a file exactly
579 579 for efficiency.
580 580 """
581 581 # i18n: "contains" is a keyword
582 582 pat = getstring(x, _("contains requires a pattern"))
583 583
584 584 def matches(x):
585 585 if not matchmod.patkind(pat):
586 586 pats = pathutil.canonpath(repo.root, repo.getcwd(), pat)
587 587 if pats in repo[x]:
588 588 return True
589 589 else:
590 590 c = repo[x]
591 591 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
592 592 for f in c.manifest():
593 593 if m(f):
594 594 return True
595 595 return False
596 596
597 597 return subset.filter(matches)
598 598
599 599 def converted(repo, subset, x):
600 600 """``converted([id])``
601 601 Changesets converted from the given identifier in the old repository if
602 602 present, or all converted changesets if no identifier is specified.
603 603 """
604 604
605 605 # There is exactly no chance of resolving the revision, so do a simple
606 606 # string compare and hope for the best
607 607
608 608 rev = None
609 609 # i18n: "converted" is a keyword
610 610 l = getargs(x, 0, 1, _('converted takes one or no arguments'))
611 611 if l:
612 612 # i18n: "converted" is a keyword
613 613 rev = getstring(l[0], _('converted requires a revision'))
614 614
615 615 def _matchvalue(r):
616 616 source = repo[r].extra().get('convert_revision', None)
617 617 return source is not None and (rev is None or source.startswith(rev))
618 618
619 619 return subset.filter(lambda r: _matchvalue(r))
620 620
621 621 def date(repo, subset, x):
622 622 """``date(interval)``
623 623 Changesets within the interval, see :hg:`help dates`.
624 624 """
625 625 # i18n: "date" is a keyword
626 626 ds = getstring(x, _("date requires a string"))
627 627 dm = util.matchdate(ds)
628 628 return subset.filter(lambda x: dm(repo[x].date()[0]))
629 629
630 630 def desc(repo, subset, x):
631 631 """``desc(string)``
632 632 Search commit message for string. The match is case-insensitive.
633 633 """
634 634 # i18n: "desc" is a keyword
635 635 ds = encoding.lower(getstring(x, _("desc requires a string")))
636 636
637 637 def matches(x):
638 638 c = repo[x]
639 639 return ds in encoding.lower(c.description())
640 640
641 641 return subset.filter(matches)
642 642
643 643 def _descendants(repo, subset, x, followfirst=False):
644 644 roots = getset(repo, spanset(repo), x)
645 645 if not roots:
646 646 return baseset()
647 647 s = _revdescendants(repo, roots, followfirst)
648 648
649 649 # Both sets need to be ascending in order to lazily return the union
650 650 # in the correct order.
651 651 base = subset & roots
652 652 desc = subset & s
653 653 result = base + desc
654 654 if subset.isascending():
655 655 result.sort()
656 656 elif subset.isdescending():
657 657 result.sort(reverse=True)
658 658 else:
659 659 result = subset & result
660 660 return result
661 661
662 662 def descendants(repo, subset, x):
663 663 """``descendants(set)``
664 664 Changesets which are descendants of changesets in set.
665 665 """
666 666 return _descendants(repo, subset, x)
667 667
668 668 def _firstdescendants(repo, subset, x):
669 669 # ``_firstdescendants(set)``
670 670 # Like ``descendants(set)`` but follows only the first parents.
671 671 return _descendants(repo, subset, x, followfirst=True)
672 672
673 673 def destination(repo, subset, x):
674 674 """``destination([set])``
675 675 Changesets that were created by a graft, transplant or rebase operation,
676 676 with the given revisions specified as the source. Omitting the optional set
677 677 is the same as passing all().
678 678 """
679 679 if x is not None:
680 680 sources = getset(repo, spanset(repo), x)
681 681 else:
682 682 sources = getall(repo, spanset(repo), x)
683 683
684 684 dests = set()
685 685
686 686 # subset contains all of the possible destinations that can be returned, so
687 687 # iterate over them and see if their source(s) were provided in the arg set.
688 688 # Even if the immediate src of r is not in the arg set, src's source (or
689 689 # further back) may be. Scanning back further than the immediate src allows
690 690 # transitive transplants and rebases to yield the same results as transitive
691 691 # grafts.
692 692 for r in subset:
693 693 src = _getrevsource(repo, r)
694 694 lineage = None
695 695
696 696 while src is not None:
697 697 if lineage is None:
698 698 lineage = list()
699 699
700 700 lineage.append(r)
701 701
702 702 # The visited lineage is a match if the current source is in the arg
703 703 # set. Since every candidate dest is visited by way of iterating
704 704 # subset, any dests further back in the lineage will be tested by a
705 705 # different iteration over subset. Likewise, if the src was already
706 706 # selected, the current lineage can be selected without going back
707 707 # further.
708 708 if src in sources or src in dests:
709 709 dests.update(lineage)
710 710 break
711 711
712 712 r = src
713 713 src = _getrevsource(repo, r)
714 714
715 715 return subset.filter(dests.__contains__)
716 716
717 717 def divergent(repo, subset, x):
718 718 """``divergent()``
719 719 Final successors of changesets with an alternative set of final successors.
720 720 """
721 721 # i18n: "divergent" is a keyword
722 722 getargs(x, 0, 0, _("divergent takes no arguments"))
723 723 divergent = obsmod.getrevs(repo, 'divergent')
724 724 return subset & divergent
725 725
726 726 def draft(repo, subset, x):
727 727 """``draft()``
728 728 Changeset in draft phase."""
729 729 # i18n: "draft" is a keyword
730 730 getargs(x, 0, 0, _("draft takes no arguments"))
731 731 phase = repo._phasecache.phase
732 732 target = phases.draft
733 733 condition = lambda r: phase(repo, r) == target
734 734 return subset.filter(condition, cache=False)
735 735
736 736 def extinct(repo, subset, x):
737 737 """``extinct()``
738 738 Obsolete changesets with obsolete descendants only.
739 739 """
740 740 # i18n: "extinct" is a keyword
741 741 getargs(x, 0, 0, _("extinct takes no arguments"))
742 742 extincts = obsmod.getrevs(repo, 'extinct')
743 743 return subset & extincts
744 744
745 745 def extra(repo, subset, x):
746 746 """``extra(label, [value])``
747 747 Changesets with the given label in the extra metadata, with the given
748 748 optional value.
749 749
750 750 If `value` starts with `re:`, the remainder of the value is treated as
751 751 a regular expression. To match a value that actually starts with `re:`,
752 752 use the prefix `literal:`.
753 753 """
754 754
755 755 # i18n: "extra" is a keyword
756 756 l = getargs(x, 1, 2, _('extra takes at least 1 and at most 2 arguments'))
757 757 # i18n: "extra" is a keyword
758 758 label = getstring(l[0], _('first argument to extra must be a string'))
759 759 value = None
760 760
761 761 if len(l) > 1:
762 762 # i18n: "extra" is a keyword
763 763 value = getstring(l[1], _('second argument to extra must be a string'))
764 764 kind, value, matcher = _stringmatcher(value)
765 765
766 766 def _matchvalue(r):
767 767 extra = repo[r].extra()
768 768 return label in extra and (value is None or matcher(extra[label]))
769 769
770 770 return subset.filter(lambda r: _matchvalue(r))
771 771
772 772 def filelog(repo, subset, x):
773 773 """``filelog(pattern)``
774 774 Changesets connected to the specified filelog.
775 775
776 776 For performance reasons, visits only revisions mentioned in the file-level
777 777 filelog, rather than filtering through all changesets (much faster, but
778 778 doesn't include deletes or duplicate changes). For a slower, more accurate
779 779 result, use ``file()``.
780 780
781 781 The pattern without explicit kind like ``glob:`` is expected to be
782 782 relative to the current directory and match against a file exactly
783 783 for efficiency.
784 784
785 785 If some linkrev points to revisions filtered by the current repoview, we'll
786 786 work around it to return a non-filtered value.
787 787 """
788 788
789 789 # i18n: "filelog" is a keyword
790 790 pat = getstring(x, _("filelog requires a pattern"))
791 791 s = set()
792 792 cl = repo.changelog
793 793
794 794 if not matchmod.patkind(pat):
795 795 f = pathutil.canonpath(repo.root, repo.getcwd(), pat)
796 796 files = [f]
797 797 else:
798 798 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=repo[None])
799 799 files = (f for f in repo[None] if m(f))
800 800
801 801 for f in files:
802 802 backrevref = {} # final value for: changerev -> filerev
803 803 lowestchild = {} # lowest known filerev child of a filerev
804 804 delayed = [] # filerev with filtered linkrev, for post-processing
805 805 lowesthead = None # cache for manifest content of all head revisions
806 806 fl = repo.file(f)
807 807 for fr in list(fl):
808 lkr = rev = fl.linkrev(fr)
808 rev = fl.linkrev(fr)
809 809 if rev not in cl:
810 810 # changerev pointed in linkrev is filtered
811 811 # record it for post processing.
812 812 delayed.append((fr, rev))
813 813 continue
814 814 for p in fl.parentrevs(fr):
815 815 if 0 <= p and p not in lowestchild:
816 816 lowestchild[p] = fr
817 817 backrevref[fr] = rev
818 818 s.add(rev)
819 819
820 820 # Post-processing of all filerevs we skipped because they were
821 821 # filtered. If such filerevs have known and unfiltered children, this
822 822 # means they have an unfiltered appearance out there. We'll use linkrev
823 823 # adjustment to find one of these appearances. The lowest known child
824 824 # will be used as a starting point because it is the best upper-bound we
825 825 # have.
826 826 #
827 827 # This approach will fail when an unfiltered but linkrev-shadowed
828 828 # appearance exists in a head changeset without unfiltered filerev
829 829 # children anywhere.
830 830 while delayed:
831 831 # must be a descending iteration. To slowly fill lowest child
832 832 # information that is of potential use by the next item.
833 833 fr, rev = delayed.pop()
834 834 lkr = rev
835 835
836 836 child = lowestchild.get(fr)
837 837
838 838 if child is None:
839 839 # search for existence of this file revision in a head revision.
840 840 # There are three possibilities:
841 841 # - the revision exists in a head and we can find an
842 842 # introduction from there,
843 843 # - the revision does not exist in a head because it has been
844 844 # changed since its introduction: we would have found a child
845 845 # and be in the other 'else' clause,
846 846 # - all versions of the revision are hidden.
847 847 if lowesthead is None:
848 848 lowesthead = {}
849 849 for h in repo.heads():
850 850 fnode = repo[h].manifest().get(f)
851 851 if fnode is not None:
852 852 lowesthead[fl.rev(fnode)] = h
853 853 headrev = lowesthead.get(fr)
854 854 if headrev is None:
855 855 # content is nowhere unfiltered
856 856 continue
857 857 rev = repo[headrev][f].introrev()
858 858 else:
859 859 # the lowest known child is a good upper bound
860 860 childcrev = backrevref[child]
861 861 # XXX this does not guarantee returning the lowest
862 862 # introduction of this revision, but this gives a
863 863 # result which is a good start and will fit in most
864 864 # cases. We probably need to fix the multiple
865 865 # introductions case properly (report each
866 866 # introduction, even for identical file revisions)
867 867 # once and for all at some point anyway.
868 868 for p in repo[childcrev][f].parents():
869 869 if p.filerev() == fr:
870 870 rev = p.rev()
871 871 break
872 872 if rev == lkr: # no shadowed entry found
873 873 # XXX This should never happen unless some manifest points
874 874 # to biggish file revisions (like a revision that uses a
875 875 # parent that never appears in the manifest ancestors)
876 876 continue
877 877
878 878 # Fill the data for the next iteration.
879 879 for p in fl.parentrevs(fr):
880 880 if 0 <= p and p not in lowestchild:
881 881 lowestchild[p] = fr
882 882 backrevref[fr] = rev
883 883 s.add(rev)
884 884
885 885 return subset & s
886 886
887 887 def first(repo, subset, x):
888 888 """``first(set, [n])``
889 889 An alias for limit().
890 890 """
891 891 return limit(repo, subset, x)
892 892
893 893 def _follow(repo, subset, x, name, followfirst=False):
894 894 l = getargs(x, 0, 1, _("%s takes no arguments or a filename") % name)
895 895 c = repo['.']
896 896 if l:
897 897 x = getstring(l[0], _("%s expected a filename") % name)
898 898 if x in c:
899 899 cx = c[x]
900 900 s = set(ctx.rev() for ctx in cx.ancestors(followfirst=followfirst))
901 901 # include the revision responsible for the most recent version
902 902 s.add(cx.introrev())
903 903 else:
904 904 return baseset()
905 905 else:
906 906 s = _revancestors(repo, baseset([c.rev()]), followfirst)
907 907
908 908 return subset & s
909 909
910 910 def follow(repo, subset, x):
911 911 """``follow([file])``
912 912 An alias for ``::.`` (ancestors of the working copy's first parent).
913 913 If a filename is specified, the history of the given file is followed,
914 914 including copies.
915 915 """
916 916 return _follow(repo, subset, x, 'follow')
917 917
918 918 def _followfirst(repo, subset, x):
919 919 # ``followfirst([file])``
920 920 # Like ``follow([file])`` but follows only the first parent of
921 921 # every revision or file revision.
922 922 return _follow(repo, subset, x, '_followfirst', followfirst=True)
923 923
924 924 def getall(repo, subset, x):
925 925 """``all()``
926 926 All changesets, the same as ``0:tip``.
927 927 """
928 928 # i18n: "all" is a keyword
929 929 getargs(x, 0, 0, _("all takes no arguments"))
930 930 return subset
931 931
932 932 def grep(repo, subset, x):
933 933 """``grep(regex)``
934 934 Like ``keyword(string)`` but accepts a regex. Use ``grep(r'...')``
935 935 to ensure special escape characters are handled correctly. Unlike
936 936 ``keyword(string)``, the match is case-sensitive.
937 937 """
938 938 try:
939 939 # i18n: "grep" is a keyword
940 940 gr = re.compile(getstring(x, _("grep requires a string")))
941 941 except re.error, e:
942 942 raise error.ParseError(_('invalid match pattern: %s') % e)
943 943
944 944 def matches(x):
945 945 c = repo[x]
946 946 for e in c.files() + [c.user(), c.description()]:
947 947 if gr.search(e):
948 948 return True
949 949 return False
950 950
951 951 return subset.filter(matches)
952 952
953 953 def _matchfiles(repo, subset, x):
954 954 # _matchfiles takes a revset list of prefixed arguments:
955 955 #
956 956 # [p:foo, i:bar, x:baz]
957 957 #
958 958 # builds a match object from them and filters subset. Allowed
959 959 # prefixes are 'p:' for regular patterns, 'i:' for include
960 960 # patterns and 'x:' for exclude patterns. Use 'r:' prefix to pass
961 961 # a revision identifier, or the empty string to reference the
962 962 # working directory, from which the match object is
963 963 # initialized. Use 'd:' to set the default matching mode, default
964 964 # to 'glob'. At most one 'r:' and 'd:' argument can be passed.
965 965
966 966 # i18n: "_matchfiles" is a keyword
967 967 l = getargs(x, 1, -1, _("_matchfiles requires at least one argument"))
968 968 pats, inc, exc = [], [], []
969 969 rev, default = None, None
970 970 for arg in l:
971 971 # i18n: "_matchfiles" is a keyword
972 972 s = getstring(arg, _("_matchfiles requires string arguments"))
973 973 prefix, value = s[:2], s[2:]
974 974 if prefix == 'p:':
975 975 pats.append(value)
976 976 elif prefix == 'i:':
977 977 inc.append(value)
978 978 elif prefix == 'x:':
979 979 exc.append(value)
980 980 elif prefix == 'r:':
981 981 if rev is not None:
982 982 # i18n: "_matchfiles" is a keyword
983 983 raise error.ParseError(_('_matchfiles expected at most one '
984 984 'revision'))
985 985 rev = value
986 986 elif prefix == 'd:':
987 987 if default is not None:
988 988 # i18n: "_matchfiles" is a keyword
989 989 raise error.ParseError(_('_matchfiles expected at most one '
990 990 'default mode'))
991 991 default = value
992 992 else:
993 993 # i18n: "_matchfiles" is a keyword
994 994 raise error.ParseError(_('invalid _matchfiles prefix: %s') % prefix)
995 995 if not default:
996 996 default = 'glob'
997 997
998 998 m = matchmod.match(repo.root, repo.getcwd(), pats, include=inc,
999 999 exclude=exc, ctx=repo[rev], default=default)
1000 1000
1001 1001 def matches(x):
1002 1002 for f in repo[x].files():
1003 1003 if m(f):
1004 1004 return True
1005 1005 return False
1006 1006
1007 1007 return subset.filter(matches)
1008 1008
1009 1009 def hasfile(repo, subset, x):
1010 1010 """``file(pattern)``
1011 1011 Changesets affecting files matched by pattern.
1012 1012
1013 1013 For a faster but less accurate result, consider using ``filelog()``
1014 1014 instead.
1015 1015
1016 1016 This predicate uses ``glob:`` as the default kind of pattern.
1017 1017 """
1018 1018 # i18n: "file" is a keyword
1019 1019 pat = getstring(x, _("file requires a pattern"))
1020 1020 return _matchfiles(repo, subset, ('string', 'p:' + pat))
1021 1021
1022 1022 def head(repo, subset, x):
1023 1023 """``head()``
1024 1024 Changeset is a named branch head.
1025 1025 """
1026 1026 # i18n: "head" is a keyword
1027 1027 getargs(x, 0, 0, _("head takes no arguments"))
1028 1028 hs = set()
1029 1029 for b, ls in repo.branchmap().iteritems():
1030 1030 hs.update(repo[h].rev() for h in ls)
1031 1031 return baseset(hs).filter(subset.__contains__)
1032 1032
1033 1033 def heads(repo, subset, x):
1034 1034 """``heads(set)``
1035 1035 Members of set with no children in set.
1036 1036 """
1037 1037 s = getset(repo, subset, x)
1038 1038 ps = parents(repo, subset, x)
1039 1039 return s - ps
1040 1040
1041 1041 def hidden(repo, subset, x):
1042 1042 """``hidden()``
1043 1043 Hidden changesets.
1044 1044 """
1045 1045 # i18n: "hidden" is a keyword
1046 1046 getargs(x, 0, 0, _("hidden takes no arguments"))
1047 1047 hiddenrevs = repoview.filterrevs(repo, 'visible')
1048 1048 return subset & hiddenrevs
1049 1049
1050 1050 def keyword(repo, subset, x):
1051 1051 """``keyword(string)``
1052 1052 Search commit message, user name, and names of changed files for
1053 1053 string. The match is case-insensitive.
1054 1054 """
1055 1055 # i18n: "keyword" is a keyword
1056 1056 kw = encoding.lower(getstring(x, _("keyword requires a string")))
1057 1057
1058 1058 def matches(r):
1059 1059 c = repo[r]
1060 1060 return util.any(kw in encoding.lower(t) for t in c.files() + [c.user(),
1061 1061 c.description()])
1062 1062
1063 1063 return subset.filter(matches)
1064 1064
1065 1065 def limit(repo, subset, x):
1066 1066 """``limit(set, [n])``
1067 1067 First n members of set, defaulting to 1.
1068 1068 """
1069 1069 # i18n: "limit" is a keyword
1070 1070 l = getargs(x, 1, 2, _("limit requires one or two arguments"))
1071 1071 try:
1072 1072 lim = 1
1073 1073 if len(l) == 2:
1074 1074 # i18n: "limit" is a keyword
1075 1075 lim = int(getstring(l[1], _("limit requires a number")))
1076 1076 except (TypeError, ValueError):
1077 1077 # i18n: "limit" is a keyword
1078 1078 raise error.ParseError(_("limit expects a number"))
1079 1079 ss = subset
1080 1080 os = getset(repo, spanset(repo), l[0])
1081 1081 result = []
1082 1082 it = iter(os)
1083 1083 for x in xrange(lim):
1084 1084 try:
1085 1085 y = it.next()
1086 1086 if y in ss:
1087 1087 result.append(y)
1088 1088 except (StopIteration):
1089 1089 break
1090 1090 return baseset(result)
1091 1091
1092 1092 def last(repo, subset, x):
1093 1093 """``last(set, [n])``
1094 1094 Last n members of set, defaulting to 1.
1095 1095 """
1096 1096 # i18n: "last" is a keyword
1097 1097 l = getargs(x, 1, 2, _("last requires one or two arguments"))
1098 1098 try:
1099 1099 lim = 1
1100 1100 if len(l) == 2:
1101 1101 # i18n: "last" is a keyword
1102 1102 lim = int(getstring(l[1], _("last requires a number")))
1103 1103 except (TypeError, ValueError):
1104 1104 # i18n: "last" is a keyword
1105 1105 raise error.ParseError(_("last expects a number"))
1106 1106 ss = subset
1107 1107 os = getset(repo, spanset(repo), l[0])
1108 1108 os.reverse()
1109 1109 result = []
1110 1110 it = iter(os)
1111 1111 for x in xrange(lim):
1112 1112 try:
1113 1113 y = it.next()
1114 1114 if y in ss:
1115 1115 result.append(y)
1116 1116 except (StopIteration):
1117 1117 break
1118 1118 return baseset(result)
1119 1119
1120 1120 def maxrev(repo, subset, x):
1121 1121 """``max(set)``
1122 1122 Changeset with highest revision number in set.
1123 1123 """
1124 1124 os = getset(repo, spanset(repo), x)
1125 1125 if os:
1126 1126 m = os.max()
1127 1127 if m in subset:
1128 1128 return baseset([m])
1129 1129 return baseset()
1130 1130
1131 1131 def merge(repo, subset, x):
1132 1132 """``merge()``
1133 1133 Changeset is a merge changeset.
1134 1134 """
1135 1135 # i18n: "merge" is a keyword
1136 1136 getargs(x, 0, 0, _("merge takes no arguments"))
1137 1137 cl = repo.changelog
1138 1138 return subset.filter(lambda r: cl.parentrevs(r)[1] != -1)
1139 1139
1140 1140 def branchpoint(repo, subset, x):
1141 1141 """``branchpoint()``
1142 1142 Changesets with more than one child.
1143 1143 """
1144 1144 # i18n: "branchpoint" is a keyword
1145 1145 getargs(x, 0, 0, _("branchpoint takes no arguments"))
1146 1146 cl = repo.changelog
1147 1147 if not subset:
1148 1148 return baseset()
1149 1149 baserev = min(subset)
1150 1150 parentscount = [0]*(len(repo) - baserev)
1151 1151 for r in cl.revs(start=baserev + 1):
1152 1152 for p in cl.parentrevs(r):
1153 1153 if p >= baserev:
1154 1154 parentscount[p - baserev] += 1
1155 1155 return subset.filter(lambda r: parentscount[r - baserev] > 1)
1156 1156
1157 1157 def minrev(repo, subset, x):
1158 1158 """``min(set)``
1159 1159 Changeset with lowest revision number in set.
1160 1160 """
1161 1161 os = getset(repo, spanset(repo), x)
1162 1162 if os:
1163 1163 m = os.min()
1164 1164 if m in subset:
1165 1165 return baseset([m])
1166 1166 return baseset()
1167 1167
1168 1168 def modifies(repo, subset, x):
1169 1169 """``modifies(pattern)``
1170 1170 Changesets modifying files matched by pattern.
1171 1171
1172 1172 The pattern without explicit kind like ``glob:`` is expected to be
1173 1173 relative to the current directory and match against a file or a
1174 1174 directory.
1175 1175 """
1176 1176 # i18n: "modifies" is a keyword
1177 1177 pat = getstring(x, _("modifies requires a pattern"))
1178 1178 return checkstatus(repo, subset, pat, 0)
1179 1179
1180 1180 def node_(repo, subset, x):
1181 1181 """``id(string)``
1182 1182 Revision non-ambiguously specified by the given hex string prefix.
1183 1183 """
1184 1184 # i18n: "id" is a keyword
1185 1185 l = getargs(x, 1, 1, _("id requires one argument"))
1186 1186 # i18n: "id" is a keyword
1187 1187 n = getstring(l[0], _("id requires a string"))
1188 1188 if len(n) == 40:
1189 1189 rn = repo[n].rev()
1190 1190 else:
1191 1191 rn = None
1192 1192 pm = repo.changelog._partialmatch(n)
1193 1193 if pm is not None:
1194 1194 rn = repo.changelog.rev(pm)
1195 1195
1196 1196 if rn is None:
1197 1197 return baseset()
1198 1198 result = baseset([rn])
1199 1199 return result & subset
1200 1200
1201 1201 def obsolete(repo, subset, x):
1202 1202 """``obsolete()``
1203 1203 Mutable changeset with a newer version."""
1204 1204 # i18n: "obsolete" is a keyword
1205 1205 getargs(x, 0, 0, _("obsolete takes no arguments"))
1206 1206 obsoletes = obsmod.getrevs(repo, 'obsolete')
1207 1207 return subset & obsoletes
1208 1208
1209 1209 def only(repo, subset, x):
1210 1210 """``only(set, [set])``
1211 1211 Changesets that are ancestors of the first set that are not ancestors
1212 1212 of any other head in the repo. If a second set is specified, the result
1213 1213 is ancestors of the first set that are not ancestors of the second set
1214 1214 (i.e. ::<set1> - ::<set2>).
1215 1215 """
1216 1216 cl = repo.changelog
1217 1217 # i18n: "only" is a keyword
1218 1218 args = getargs(x, 1, 2, _('only takes one or two arguments'))
1219 1219 include = getset(repo, spanset(repo), args[0])
1220 1220 if len(args) == 1:
1221 1221 if not include:
1222 1222 return baseset()
1223 1223
1224 1224 descendants = set(_revdescendants(repo, include, False))
1225 1225 exclude = [rev for rev in cl.headrevs()
1226 1226 if not rev in descendants and not rev in include]
1227 1227 else:
1228 1228 exclude = getset(repo, spanset(repo), args[1])
1229 1229
1230 1230 results = set(cl.findmissingrevs(common=exclude, heads=include))
1231 1231 return subset & results
1232 1232
1233 1233 def origin(repo, subset, x):
1234 1234 """``origin([set])``
1235 1235 Changesets that were specified as a source for the grafts, transplants or
1236 1236 rebases that created the given revisions. Omitting the optional set is the
1237 1237 same as passing all(). If a changeset created by these operations is itself
1238 1238 specified as a source for one of these operations, only the source changeset
1239 1239 for the first operation is selected.
1240 1240 """
1241 1241 if x is not None:
1242 1242 dests = getset(repo, spanset(repo), x)
1243 1243 else:
1244 1244 dests = getall(repo, spanset(repo), x)
1245 1245
1246 1246 def _firstsrc(rev):
1247 1247 src = _getrevsource(repo, rev)
1248 1248 if src is None:
1249 1249 return None
1250 1250
1251 1251 while True:
1252 1252 prev = _getrevsource(repo, src)
1253 1253
1254 1254 if prev is None:
1255 1255 return src
1256 1256 src = prev
1257 1257
1258 1258 o = set([_firstsrc(r) for r in dests])
1259 1259 o -= set([None])
1260 1260 return subset & o
1261 1261
1262 1262 def outgoing(repo, subset, x):
1263 1263 """``outgoing([path])``
1264 1264 Changesets not found in the specified destination repository, or the
1265 1265 default push location.
1266 1266 """
1267 1267 import hg # avoid start-up nasties
1268 1268 # i18n: "outgoing" is a keyword
1269 1269 l = getargs(x, 0, 1, _("outgoing takes one or no arguments"))
1270 1270 # i18n: "outgoing" is a keyword
1271 1271 dest = l and getstring(l[0], _("outgoing requires a repository path")) or ''
1272 1272 dest = repo.ui.expandpath(dest or 'default-push', dest or 'default')
1273 1273 dest, branches = hg.parseurl(dest)
1274 1274 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1275 1275 if revs:
1276 1276 revs = [repo.lookup(rev) for rev in revs]
1277 1277 other = hg.peer(repo, {}, dest)
1278 1278 repo.ui.pushbuffer()
1279 1279 outgoing = discovery.findcommonoutgoing(repo, other, onlyheads=revs)
1280 1280 repo.ui.popbuffer()
1281 1281 cl = repo.changelog
1282 1282 o = set([cl.rev(r) for r in outgoing.missing])
1283 1283 return subset & o
1284 1284
1285 1285 def p1(repo, subset, x):
1286 1286 """``p1([set])``
1287 1287 First parent of changesets in set, or the working directory.
1288 1288 """
1289 1289 if x is None:
1290 1290 p = repo[x].p1().rev()
1291 1291 if p >= 0:
1292 1292 return subset & baseset([p])
1293 1293 return baseset()
1294 1294
1295 1295 ps = set()
1296 1296 cl = repo.changelog
1297 1297 for r in getset(repo, spanset(repo), x):
1298 1298 ps.add(cl.parentrevs(r)[0])
1299 1299 ps -= set([node.nullrev])
1300 1300 return subset & ps
1301 1301
1302 1302 def p2(repo, subset, x):
1303 1303 """``p2([set])``
1304 1304 Second parent of changesets in set, or the working directory.
1305 1305 """
1306 1306 if x is None:
1307 1307 ps = repo[x].parents()
1308 1308 try:
1309 1309 p = ps[1].rev()
1310 1310 if p >= 0:
1311 1311 return subset & baseset([p])
1312 1312 return baseset()
1313 1313 except IndexError:
1314 1314 return baseset()
1315 1315
1316 1316 ps = set()
1317 1317 cl = repo.changelog
1318 1318 for r in getset(repo, spanset(repo), x):
1319 1319 ps.add(cl.parentrevs(r)[1])
1320 1320 ps -= set([node.nullrev])
1321 1321 return subset & ps
1322 1322
1323 1323 def parents(repo, subset, x):
1324 1324 """``parents([set])``
1325 1325 The set of all parents for all changesets in set, or the working directory.
1326 1326 """
1327 1327 if x is None:
1328 1328 ps = set(p.rev() for p in repo[x].parents())
1329 1329 else:
1330 1330 ps = set()
1331 1331 cl = repo.changelog
1332 1332 for r in getset(repo, spanset(repo), x):
1333 1333 ps.update(cl.parentrevs(r))
1334 1334 ps -= set([node.nullrev])
1335 1335 return subset & ps
1336 1336
1337 1337 def parentspec(repo, subset, x, n):
1338 1338 """``set^0``
1339 1339 The set.
1340 1340 ``set^1`` (or ``set^``), ``set^2``
1341 1341 First or second parent, respectively, of all changesets in set.
1342 1342 """
1343 1343 try:
1344 1344 n = int(n[1])
1345 1345 if n not in (0, 1, 2):
1346 1346 raise ValueError
1347 1347 except (TypeError, ValueError):
1348 1348 raise error.ParseError(_("^ expects a number 0, 1, or 2"))
1349 1349 ps = set()
1350 1350 cl = repo.changelog
1351 1351 for r in getset(repo, fullreposet(repo), x):
1352 1352 if n == 0:
1353 1353 ps.add(r)
1354 1354 elif n == 1:
1355 1355 ps.add(cl.parentrevs(r)[0])
1356 1356 elif n == 2:
1357 1357 parents = cl.parentrevs(r)
1358 1358 if len(parents) > 1:
1359 1359 ps.add(parents[1])
1360 1360 return subset & ps
1361 1361
1362 1362 def present(repo, subset, x):
1363 1363 """``present(set)``
1364 1364 An empty set, if any revision in set isn't found; otherwise,
1365 1365 all revisions in set.
1366 1366
1367 1367 If any of specified revisions is not present in the local repository,
1368 1368 the query is normally aborted. But this predicate allows the query
1369 1369 to continue even in such cases.
1370 1370 """
1371 1371 try:
1372 1372 return getset(repo, subset, x)
1373 1373 except error.RepoLookupError:
1374 1374 return baseset()
1375 1375
1376 1376 def public(repo, subset, x):
1377 1377 """``public()``
1378 1378 Changeset in public phase."""
1379 1379 # i18n: "public" is a keyword
1380 1380 getargs(x, 0, 0, _("public takes no arguments"))
1381 1381 phase = repo._phasecache.phase
1382 1382 target = phases.public
1383 1383 condition = lambda r: phase(repo, r) == target
1384 1384 return subset.filter(condition, cache=False)
1385 1385
1386 1386 def remote(repo, subset, x):
1387 1387 """``remote([id [,path]])``
1388 1388 Local revision that corresponds to the given identifier in a
1389 1389 remote repository, if present. Here, the '.' identifier is a
1390 1390 synonym for the current local branch.
1391 1391 """
1392 1392
1393 1393 import hg # avoid start-up nasties
1394 1394 # i18n: "remote" is a keyword
1395 1395 l = getargs(x, 0, 2, _("remote takes one, two or no arguments"))
1396 1396
1397 1397 q = '.'
1398 1398 if len(l) > 0:
1399 1399 # i18n: "remote" is a keyword
1400 1400 q = getstring(l[0], _("remote requires a string id"))
1401 1401 if q == '.':
1402 1402 q = repo['.'].branch()
1403 1403
1404 1404 dest = ''
1405 1405 if len(l) > 1:
1406 1406 # i18n: "remote" is a keyword
1407 1407 dest = getstring(l[1], _("remote requires a repository path"))
1408 1408 dest = repo.ui.expandpath(dest or 'default')
1409 1409 dest, branches = hg.parseurl(dest)
1410 1410 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1411 1411 if revs:
1412 1412 revs = [repo.lookup(rev) for rev in revs]
1413 1413 other = hg.peer(repo, {}, dest)
1414 1414 n = other.lookup(q)
1415 1415 if n in repo:
1416 1416 r = repo[n].rev()
1417 1417 if r in subset:
1418 1418 return baseset([r])
1419 1419 return baseset()
1420 1420
1421 1421 def removes(repo, subset, x):
1422 1422 """``removes(pattern)``
1423 1423 Changesets which remove files matching pattern.
1424 1424
1425 1425 The pattern without explicit kind like ``glob:`` is expected to be
1426 1426 relative to the current directory and match against a file or a
1427 1427 directory.
1428 1428 """
1429 1429 # i18n: "removes" is a keyword
1430 1430 pat = getstring(x, _("removes requires a pattern"))
1431 1431 return checkstatus(repo, subset, pat, 2)
1432 1432
1433 1433 def rev(repo, subset, x):
1434 1434 """``rev(number)``
1435 1435 Revision with the given numeric identifier.
1436 1436 """
1437 1437 # i18n: "rev" is a keyword
1438 1438 l = getargs(x, 1, 1, _("rev requires one argument"))
1439 1439 try:
1440 1440 # i18n: "rev" is a keyword
1441 1441 l = int(getstring(l[0], _("rev requires a number")))
1442 1442 except (TypeError, ValueError):
1443 1443 # i18n: "rev" is a keyword
1444 1444 raise error.ParseError(_("rev expects a number"))
1445 1445 if l not in fullreposet(repo):
1446 1446 return baseset()
1447 1447 return subset & baseset([l])
1448 1448
1449 1449 def matching(repo, subset, x):
1450 1450 """``matching(revision [, field])``
1451 1451 Changesets in which a given set of fields match the set of fields in the
1452 1452 selected revision or set.
1453 1453
1454 1454 To match more than one field pass the list of fields to match separated
1455 1455 by spaces (e.g. ``author description``).
1456 1456
1457 1457 Valid fields are most regular revision fields and some special fields.
1458 1458
1459 1459 Regular revision fields are ``description``, ``author``, ``branch``,
1460 1460 ``date``, ``files``, ``phase``, ``parents``, ``substate``, ``user``
1461 1461 and ``diff``.
1462 1462 Note that ``author`` and ``user`` are synonyms. ``diff`` refers to the
1463 1463 contents of the revision. Two revisions matching their ``diff`` will
1464 1464 also match their ``files``.
1465 1465
1466 1466 Special fields are ``summary`` and ``metadata``:
1467 1467 ``summary`` matches the first line of the description.
1468 1468 ``metadata`` is equivalent to matching ``description user date``
1469 1469 (i.e. it matches the main metadata fields).
1470 1470
1471 1471 ``metadata`` is the default field which is used when no fields are
1472 1472 specified. You can match more than one field at a time.
1473 1473 """
1474 1474 # i18n: "matching" is a keyword
1475 1475 l = getargs(x, 1, 2, _("matching takes 1 or 2 arguments"))
1476 1476
1477 1477 revs = getset(repo, fullreposet(repo), l[0])
1478 1478
1479 1479 fieldlist = ['metadata']
1480 1480 if len(l) > 1:
1481 1481 fieldlist = getstring(l[1],
1482 1482 # i18n: "matching" is a keyword
1483 1483 _("matching requires a string "
1484 1484 "as its second argument")).split()
1485 1485
1486 1486 # Make sure that there are no repeated fields,
1487 1487 # expand the 'special' 'metadata' field type
1488 1488 # and check the 'files' whenever we check the 'diff'
1489 1489 fields = []
1490 1490 for field in fieldlist:
1491 1491 if field == 'metadata':
1492 1492 fields += ['user', 'description', 'date']
1493 1493 elif field == 'diff':
1494 1494 # a revision matching the diff must also match the files
1495 1495 # since matching the diff is very costly, make sure to
1496 1496 # also match the files first
1497 1497 fields += ['files', 'diff']
1498 1498 else:
1499 1499 if field == 'author':
1500 1500 field = 'user'
1501 1501 fields.append(field)
1502 1502 fields = set(fields)
1503 1503 if 'summary' in fields and 'description' in fields:
1504 1504 # If a revision matches its description it also matches its summary
1505 1505 fields.discard('summary')
1506 1506
1507 1507 # We may want to match more than one field
1508 1508 # Not all fields take the same amount of time to be matched
1509 1509 # Sort the selected fields in order of increasing matching cost
1510 1510 fieldorder = ['phase', 'parents', 'user', 'date', 'branch', 'summary',
1511 1511 'files', 'description', 'substate', 'diff']
1512 1512 def fieldkeyfunc(f):
1513 1513 try:
1514 1514 return fieldorder.index(f)
1515 1515 except ValueError:
1516 1516 # assume an unknown field is very costly
1517 1517 return len(fieldorder)
1518 1518 fields = list(fields)
1519 1519 fields.sort(key=fieldkeyfunc)
1520 1520
1521 1521 # Each field will be matched with its own "getfield" function
1522 1522 # which will be added to the getfieldfuncs array of functions
1523 1523 getfieldfuncs = []
1524 1524 _funcs = {
1525 1525 'user': lambda r: repo[r].user(),
1526 1526 'branch': lambda r: repo[r].branch(),
1527 1527 'date': lambda r: repo[r].date(),
1528 1528 'description': lambda r: repo[r].description(),
1529 1529 'files': lambda r: repo[r].files(),
1530 1530 'parents': lambda r: repo[r].parents(),
1531 1531 'phase': lambda r: repo[r].phase(),
1532 1532 'substate': lambda r: repo[r].substate,
1533 1533 'summary': lambda r: repo[r].description().splitlines()[0],
1534 1534 'diff': lambda r: list(repo[r].diff(git=True),)
1535 1535 }
1536 1536 for info in fields:
1537 1537 getfield = _funcs.get(info, None)
1538 1538 if getfield is None:
1539 1539 raise error.ParseError(
1540 1540 # i18n: "matching" is a keyword
1541 1541 _("unexpected field name passed to matching: %s") % info)
1542 1542 getfieldfuncs.append(getfield)
1543 1543 # convert the getfield array of functions into a "getinfo" function
1544 1544 # which returns an array of field values (or a single value if there
1545 1545 # is only one field to match)
1546 1546 getinfo = lambda r: [f(r) for f in getfieldfuncs]
1547 1547
1548 1548 def matches(x):
1549 1549 for rev in revs:
1550 1550 target = getinfo(rev)
1551 1551 match = True
1552 1552 for n, f in enumerate(getfieldfuncs):
1553 1553 if target[n] != f(x):
1554 1554 match = False
1555 1555 if match:
1556 1556 return True
1557 1557 return False
1558 1558
1559 1559 return subset.filter(matches)
1560 1560
1561 1561 def reverse(repo, subset, x):
1562 1562 """``reverse(set)``
1563 1563 Reverse order of set.
1564 1564 """
1565 1565 l = getset(repo, subset, x)
1566 1566 l.reverse()
1567 1567 return l
1568 1568
1569 1569 def roots(repo, subset, x):
1570 1570 """``roots(set)``
1571 1571 Changesets in set with no parent changeset in set.
1572 1572 """
1573 1573 s = getset(repo, spanset(repo), x)
1574 1574 subset = baseset([r for r in s if r in subset])
1575 1575 cs = _children(repo, subset, s)
1576 1576 return subset - cs
1577 1577
1578 1578 def secret(repo, subset, x):
1579 1579 """``secret()``
1580 1580 Changeset in secret phase."""
1581 1581 # i18n: "secret" is a keyword
1582 1582 getargs(x, 0, 0, _("secret takes no arguments"))
1583 1583 phase = repo._phasecache.phase
1584 1584 target = phases.secret
1585 1585 condition = lambda r: phase(repo, r) == target
1586 1586 return subset.filter(condition, cache=False)
1587 1587
1588 1588 def sort(repo, subset, x):
1589 1589 """``sort(set[, [-]key...])``
1590 1590 Sort set by keys. The default sort order is ascending, specify a key
1591 1591 as ``-key`` to sort in descending order.
1592 1592
1593 1593 The keys can be:
1594 1594
1595 1595 - ``rev`` for the revision number,
1596 1596 - ``branch`` for the branch name,
1597 1597 - ``desc`` for the commit message (description),
1598 1598 - ``user`` for user name (``author`` can be used as an alias),
1599 1599 - ``date`` for the commit date
1600 1600 """
1601 1601 # i18n: "sort" is a keyword
1602 1602 l = getargs(x, 1, 2, _("sort requires one or two arguments"))
1603 1603 keys = "rev"
1604 1604 if len(l) == 2:
1605 1605 # i18n: "sort" is a keyword
1606 1606 keys = getstring(l[1], _("sort spec must be a string"))
1607 1607
1608 1608 s = l[0]
1609 1609 keys = keys.split()
1610 1610 l = []
1611 1611 def invert(s):
1612 1612 return "".join(chr(255 - ord(c)) for c in s)
1613 1613 revs = getset(repo, subset, s)
1614 1614 if keys == ["rev"]:
1615 1615 revs.sort()
1616 1616 return revs
1617 1617 elif keys == ["-rev"]:
1618 1618 revs.sort(reverse=True)
1619 1619 return revs
1620 1620 for r in revs:
1621 1621 c = repo[r]
1622 1622 e = []
1623 1623 for k in keys:
1624 1624 if k == 'rev':
1625 1625 e.append(r)
1626 1626 elif k == '-rev':
1627 1627 e.append(-r)
1628 1628 elif k == 'branch':
1629 1629 e.append(c.branch())
1630 1630 elif k == '-branch':
1631 1631 e.append(invert(c.branch()))
1632 1632 elif k == 'desc':
1633 1633 e.append(c.description())
1634 1634 elif k == '-desc':
1635 1635 e.append(invert(c.description()))
1636 1636 elif k in 'user author':
1637 1637 e.append(c.user())
1638 1638 elif k in '-user -author':
1639 1639 e.append(invert(c.user()))
1640 1640 elif k == 'date':
1641 1641 e.append(c.date()[0])
1642 1642 elif k == '-date':
1643 1643 e.append(-c.date()[0])
1644 1644 else:
1645 1645 raise error.ParseError(_("unknown sort key %r") % k)
1646 1646 e.append(r)
1647 1647 l.append(e)
1648 1648 l.sort()
1649 1649 return baseset([e[-1] for e in l])
1650 1650
1651 1651 def _stringmatcher(pattern):
1652 1652 """
1653 1653 accepts a string, possibly starting with 're:' or 'literal:' prefix.
1654 1654 returns the matcher name, pattern, and matcher function.
1655 1655 missing or unknown prefixes are treated as literal matches.
1656 1656
1657 1657 helper for tests:
1658 1658 >>> def test(pattern, *tests):
1659 1659 ... kind, pattern, matcher = _stringmatcher(pattern)
1660 1660 ... return (kind, pattern, [bool(matcher(t)) for t in tests])
1661 1661
1662 1662 exact matching (no prefix):
1663 1663 >>> test('abcdefg', 'abc', 'def', 'abcdefg')
1664 1664 ('literal', 'abcdefg', [False, False, True])
1665 1665
1666 1666 regex matching ('re:' prefix)
1667 1667 >>> test('re:a.+b', 'nomatch', 'fooadef', 'fooadefbar')
1668 1668 ('re', 'a.+b', [False, False, True])
1669 1669
1670 1670 force exact matches ('literal:' prefix)
1671 1671 >>> test('literal:re:foobar', 'foobar', 're:foobar')
1672 1672 ('literal', 're:foobar', [False, True])
1673 1673
1674 1674 unknown prefixes are ignored and treated as literals
1675 1675 >>> test('foo:bar', 'foo', 'bar', 'foo:bar')
1676 1676 ('literal', 'foo:bar', [False, False, True])
1677 1677 """
1678 1678 if pattern.startswith('re:'):
1679 1679 pattern = pattern[3:]
1680 1680 try:
1681 1681 regex = re.compile(pattern)
1682 1682 except re.error, e:
1683 1683 raise error.ParseError(_('invalid regular expression: %s')
1684 1684 % e)
1685 1685 return 're', pattern, regex.search
1686 1686 elif pattern.startswith('literal:'):
1687 1687 pattern = pattern[8:]
1688 1688 return 'literal', pattern, pattern.__eq__
1689 1689
1690 1690 def _substringmatcher(pattern):
1691 1691 kind, pattern, matcher = _stringmatcher(pattern)
1692 1692 if kind == 'literal':
1693 1693 matcher = lambda s: pattern in s
1694 1694 return kind, pattern, matcher
1695 1695
1696 1696 def tag(repo, subset, x):
1697 1697 """``tag([name])``
1698 1698 The specified tag by name, or all tagged revisions if no name is given.
1699 1699
1700 1700 If `name` starts with `re:`, the remainder of the name is treated as
1701 1701 a regular expression. To match a tag that actually starts with `re:`,
1702 1702 use the prefix `literal:`.
1703 1703 """
1704 1704 # i18n: "tag" is a keyword
1705 1705 args = getargs(x, 0, 1, _("tag takes one or no arguments"))
1706 1706 cl = repo.changelog
1707 1707 if args:
1708 1708 pattern = getstring(args[0],
1709 1709 # i18n: "tag" is a keyword
1710 1710 _('the argument to tag must be a string'))
1711 1711 kind, pattern, matcher = _stringmatcher(pattern)
1712 1712 if kind == 'literal':
1713 1713 # avoid resolving all tags
1714 1714 tn = repo._tagscache.tags.get(pattern, None)
1715 1715 if tn is None:
1716 1716 raise util.Abort(_("tag '%s' does not exist") % pattern)
1717 1717 s = set([repo[tn].rev()])
1718 1718 else:
1719 1719 s = set([cl.rev(n) for t, n in repo.tagslist() if matcher(t)])
1720 1720 else:
1721 1721 s = set([cl.rev(n) for t, n in repo.tagslist() if t != 'tip'])
1722 1722 return subset & s
1723 1723
1724 1724 def tagged(repo, subset, x):
1725 1725 return tag(repo, subset, x)
1726 1726
1727 1727 def unstable(repo, subset, x):
1728 1728 """``unstable()``
1729 1729 Non-obsolete changesets with obsolete ancestors.
1730 1730 """
1731 1731 # i18n: "unstable" is a keyword
1732 1732 getargs(x, 0, 0, _("unstable takes no arguments"))
1733 1733 unstables = obsmod.getrevs(repo, 'unstable')
1734 1734 return subset & unstables
1735 1735
1736 1736
1737 1737 def user(repo, subset, x):
1738 1738 """``user(string)``
1739 1739 User name contains string. The match is case-insensitive.
1740 1740
1741 1741 If `string` starts with `re:`, the remainder of the string is treated as
1742 1742 a regular expression. To match a user that actually contains `re:`, use
1743 1743 the prefix `literal:`.
1744 1744 """
1745 1745 return author(repo, subset, x)
1746 1746
1747 1747 # for internal use
1748 1748 def _list(repo, subset, x):
1749 1749 s = getstring(x, "internal error")
1750 1750 if not s:
1751 1751 return baseset()
1752 1752 ls = [repo[r].rev() for r in s.split('\0')]
1753 1753 s = subset
1754 1754 return baseset([r for r in ls if r in s])
1755 1755
1756 1756 # for internal use
1757 1757 def _intlist(repo, subset, x):
1758 1758 s = getstring(x, "internal error")
1759 1759 if not s:
1760 1760 return baseset()
1761 1761 ls = [int(r) for r in s.split('\0')]
1762 1762 s = subset
1763 1763 return baseset([r for r in ls if r in s])
1764 1764
1765 1765 # for internal use
1766 1766 def _hexlist(repo, subset, x):
1767 1767 s = getstring(x, "internal error")
1768 1768 if not s:
1769 1769 return baseset()
1770 1770 cl = repo.changelog
1771 1771 ls = [cl.rev(node.bin(r)) for r in s.split('\0')]
1772 1772 s = subset
1773 1773 return baseset([r for r in ls if r in s])
1774 1774
1775 1775 symbols = {
1776 1776 "adds": adds,
1777 1777 "all": getall,
1778 1778 "ancestor": ancestor,
1779 1779 "ancestors": ancestors,
1780 1780 "_firstancestors": _firstancestors,
1781 1781 "author": author,
1782 1782 "bisect": bisect,
1783 1783 "bisected": bisected,
1784 1784 "bookmark": bookmark,
1785 1785 "branch": branch,
1786 1786 "branchpoint": branchpoint,
1787 1787 "bumped": bumped,
1788 1788 "bundle": bundle,
1789 1789 "children": children,
1790 1790 "closed": closed,
1791 1791 "contains": contains,
1792 1792 "converted": converted,
1793 1793 "date": date,
1794 1794 "desc": desc,
1795 1795 "descendants": descendants,
1796 1796 "_firstdescendants": _firstdescendants,
1797 1797 "destination": destination,
1798 1798 "divergent": divergent,
1799 1799 "draft": draft,
1800 1800 "extinct": extinct,
1801 1801 "extra": extra,
1802 1802 "file": hasfile,
1803 1803 "filelog": filelog,
1804 1804 "first": first,
1805 1805 "follow": follow,
1806 1806 "_followfirst": _followfirst,
1807 1807 "grep": grep,
1808 1808 "head": head,
1809 1809 "heads": heads,
1810 1810 "hidden": hidden,
1811 1811 "id": node_,
1812 1812 "keyword": keyword,
1813 1813 "last": last,
1814 1814 "limit": limit,
1815 1815 "_matchfiles": _matchfiles,
1816 1816 "max": maxrev,
1817 1817 "merge": merge,
1818 1818 "min": minrev,
1819 1819 "modifies": modifies,
1820 1820 "obsolete": obsolete,
1821 1821 "only": only,
1822 1822 "origin": origin,
1823 1823 "outgoing": outgoing,
1824 1824 "p1": p1,
1825 1825 "p2": p2,
1826 1826 "parents": parents,
1827 1827 "present": present,
1828 1828 "public": public,
1829 1829 "remote": remote,
1830 1830 "removes": removes,
1831 1831 "rev": rev,
1832 1832 "reverse": reverse,
1833 1833 "roots": roots,
1834 1834 "sort": sort,
1835 1835 "secret": secret,
1836 1836 "matching": matching,
1837 1837 "tag": tag,
1838 1838 "tagged": tagged,
1839 1839 "user": user,
1840 1840 "unstable": unstable,
1841 1841 "_list": _list,
1842 1842 "_intlist": _intlist,
1843 1843 "_hexlist": _hexlist,
1844 1844 }
1845 1845
1846 1846 # symbols which can't be used for a DoS attack for any given input
1847 1847 # (e.g. those which accept regexes as plain strings shouldn't be included)
1848 1848 # functions that just return a lot of changesets (like all) don't count here
1849 1849 safesymbols = set([
1850 1850 "adds",
1851 1851 "all",
1852 1852 "ancestor",
1853 1853 "ancestors",
1854 1854 "_firstancestors",
1855 1855 "author",
1856 1856 "bisect",
1857 1857 "bisected",
1858 1858 "bookmark",
1859 1859 "branch",
1860 1860 "branchpoint",
1861 1861 "bumped",
1862 1862 "bundle",
1863 1863 "children",
1864 1864 "closed",
1865 1865 "converted",
1866 1866 "date",
1867 1867 "desc",
1868 1868 "descendants",
1869 1869 "_firstdescendants",
1870 1870 "destination",
1871 1871 "divergent",
1872 1872 "draft",
1873 1873 "extinct",
1874 1874 "extra",
1875 1875 "file",
1876 1876 "filelog",
1877 1877 "first",
1878 1878 "follow",
1879 1879 "_followfirst",
1880 1880 "head",
1881 1881 "heads",
1882 1882 "hidden",
1883 1883 "id",
1884 1884 "keyword",
1885 1885 "last",
1886 1886 "limit",
1887 1887 "_matchfiles",
1888 1888 "max",
1889 1889 "merge",
1890 1890 "min",
1891 1891 "modifies",
1892 1892 "obsolete",
1893 1893 "only",
1894 1894 "origin",
1895 1895 "outgoing",
1896 1896 "p1",
1897 1897 "p2",
1898 1898 "parents",
1899 1899 "present",
1900 1900 "public",
1901 1901 "remote",
1902 1902 "removes",
1903 1903 "rev",
1904 1904 "reverse",
1905 1905 "roots",
1906 1906 "sort",
1907 1907 "secret",
1908 1908 "matching",
1909 1909 "tag",
1910 1910 "tagged",
1911 1911 "user",
1912 1912 "unstable",
1913 1913 "_list",
1914 1914 "_intlist",
1915 1915 "_hexlist",
1916 1916 ])
1917 1917
1918 1918 methods = {
1919 1919 "range": rangeset,
1920 1920 "dagrange": dagrange,
1921 1921 "string": stringset,
1922 1922 "symbol": symbolset,
1923 1923 "and": andset,
1924 1924 "or": orset,
1925 1925 "not": notset,
1926 1926 "list": listset,
1927 1927 "func": func,
1928 1928 "ancestor": ancestorspec,
1929 1929 "parent": parentspec,
1930 1930 "parentpost": p1,
1931 1931 "only": only,
1932 1932 "onlypost": only,
1933 1933 }
1934 1934
1935 1935 def optimize(x, small):
1936 1936 if x is None:
1937 1937 return 0, x
1938 1938
1939 1939 smallbonus = 1
1940 1940 if small:
1941 1941 smallbonus = .5
1942 1942
1943 1943 op = x[0]
1944 1944 if op == 'minus':
1945 1945 return optimize(('and', x[1], ('not', x[2])), small)
1946 1946 elif op == 'only':
1947 1947 return optimize(('func', ('symbol', 'only'),
1948 1948 ('list', x[1], x[2])), small)
1949 1949 elif op == 'dagrangepre':
1950 1950 return optimize(('func', ('symbol', 'ancestors'), x[1]), small)
1951 1951 elif op == 'dagrangepost':
1952 1952 return optimize(('func', ('symbol', 'descendants'), x[1]), small)
1953 1953 elif op == 'rangepre':
1954 1954 return optimize(('range', ('string', '0'), x[1]), small)
1955 1955 elif op == 'rangepost':
1956 1956 return optimize(('range', x[1], ('string', 'tip')), small)
1957 1957 elif op == 'negate':
1958 1958 return optimize(('string',
1959 1959 '-' + getstring(x[1], _("can't negate that"))), small)
1960 1960 elif op in 'string symbol negate':
1961 1961 return smallbonus, x # single revisions are small
1962 1962 elif op == 'and':
1963 1963 wa, ta = optimize(x[1], True)
1964 1964 wb, tb = optimize(x[2], True)
1965 1965
1966 1966 # (::x and not ::y)/(not ::y and ::x) have a fast path
1967 1967 def isonly(revs, bases):
1968 1968 return (
1969 1969 revs[0] == 'func'
1970 1970 and getstring(revs[1], _('not a symbol')) == 'ancestors'
1971 1971 and bases[0] == 'not'
1972 1972 and bases[1][0] == 'func'
1973 1973 and getstring(bases[1][1], _('not a symbol')) == 'ancestors')
1974 1974
1975 1975 w = min(wa, wb)
1976 1976 if isonly(ta, tb):
1977 1977 return w, ('func', ('symbol', 'only'), ('list', ta[2], tb[1][2]))
1978 1978 if isonly(tb, ta):
1979 1979 return w, ('func', ('symbol', 'only'), ('list', tb[2], ta[1][2]))
1980 1980
1981 1981 if wa > wb:
1982 1982 return w, (op, tb, ta)
1983 1983 return w, (op, ta, tb)
1984 1984 elif op == 'or':
1985 1985 wa, ta = optimize(x[1], False)
1986 1986 wb, tb = optimize(x[2], False)
1987 1987 if wb < wa:
1988 1988 wb, wa = wa, wb
1989 1989 return max(wa, wb), (op, ta, tb)
1990 1990 elif op == 'not':
1991 1991 o = optimize(x[1], not small)
1992 1992 return o[0], (op, o[1])
1993 1993 elif op == 'parentpost':
1994 1994 o = optimize(x[1], small)
1995 1995 return o[0], (op, o[1])
1996 1996 elif op == 'group':
1997 1997 return optimize(x[1], small)
1998 1998 elif op in 'dagrange range list parent ancestorspec':
1999 1999 if op == 'parent':
2000 2000 # x^:y means (x^) : y, not x ^ (:y)
2001 2001 post = ('parentpost', x[1])
2002 2002 if x[2][0] == 'dagrangepre':
2003 2003 return optimize(('dagrange', post, x[2][1]), small)
2004 2004 elif x[2][0] == 'rangepre':
2005 2005 return optimize(('range', post, x[2][1]), small)
2006 2006
2007 2007 wa, ta = optimize(x[1], small)
2008 2008 wb, tb = optimize(x[2], small)
2009 2009 return wa + wb, (op, ta, tb)
2010 2010 elif op == 'func':
2011 2011 f = getstring(x[1], _("not a symbol"))
2012 2012 wa, ta = optimize(x[2], small)
2013 2013 if f in ("author branch closed date desc file grep keyword "
2014 2014 "outgoing user"):
2015 2015 w = 10 # slow
2016 2016 elif f in "modifies adds removes":
2017 2017 w = 30 # slower
2018 2018 elif f == "contains":
2019 2019 w = 100 # very slow
2020 2020 elif f == "ancestor":
2021 2021 w = 1 * smallbonus
2022 2022 elif f in "reverse limit first _intlist":
2023 2023 w = 0
2024 2024 elif f in "sort":
2025 2025 w = 10 # assume most sorts look at changelog
2026 2026 else:
2027 2027 w = 1
2028 2028 return w + wa, (op, x[1], ta)
2029 2029 return 1, x
2030 2030
2031 2031 _aliasarg = ('func', ('symbol', '_aliasarg'))
2032 2032 def _getaliasarg(tree):
2033 2033 """If tree matches ('func', ('symbol', '_aliasarg'), ('string', X))
2034 2034 return X, None otherwise.
2035 2035 """
2036 2036 if (len(tree) == 3 and tree[:2] == _aliasarg
2037 2037 and tree[2][0] == 'string'):
2038 2038 return tree[2][1]
2039 2039 return None
2040 2040
2041 2041 def _checkaliasarg(tree, known=None):
2042 2042 """Check tree contains no _aliasarg construct or only ones which
2043 2043 value is in known. Used to avoid alias placeholders injection.
2044 2044 """
2045 2045 if isinstance(tree, tuple):
2046 2046 arg = _getaliasarg(tree)
2047 2047 if arg is not None and (not known or arg not in known):
2048 2048 raise error.ParseError(_("not a function: %s") % '_aliasarg')
2049 2049 for t in tree:
2050 2050 _checkaliasarg(t, known)
2051 2051
2052 2052 class revsetalias(object):
2053 2053 funcre = re.compile('^([^(]+)\(([^)]+)\)$')
2054 2054 args = None
2055 2055
2056 2056 # error message at parsing, or None
2057 2057 error = None
2058 2058 # whether own `error` information is already shown or not.
2059 2059 # this avoids showing same warning multiple times at each `findaliases`.
2060 2060 warned = False
2061 2061
2062 2062 def __init__(self, name, value):
2063 2063 '''Aliases like:
2064 2064
2065 2065 h = heads(default)
2066 2066 b($1) = ancestors($1) - ancestors(default)
2067 2067 '''
2068 2068 m = self.funcre.search(name)
2069 2069 if m:
2070 2070 self.name = m.group(1)
2071 2071 self.tree = ('func', ('symbol', m.group(1)))
2072 2072 self.args = [x.strip() for x in m.group(2).split(',')]
2073 2073 for arg in self.args:
2074 2074 # _aliasarg() is an unknown symbol only used separate
2075 2075 # alias argument placeholders from regular strings.
2076 2076 value = value.replace(arg, '_aliasarg(%r)' % (arg,))
2077 2077 else:
2078 2078 self.name = name
2079 2079 self.tree = ('symbol', name)
2080 2080
2081 2081 try:
2082 2082 self.replacement, pos = parse(value)
2083 2083 if pos != len(value):
2084 2084 raise error.ParseError(_('invalid token'), pos)
2085 2085 # Check for placeholder injection
2086 2086 _checkaliasarg(self.replacement, self.args)
2087 2087 except error.ParseError, inst:
2088 2088 if len(inst.args) > 1:
2089 2089 self.error = _('at %s: %s') % (inst.args[1], inst.args[0])
2090 2090 else:
2091 2091 self.error = inst.args[0]
2092 2092
2093 2093 def _getalias(aliases, tree):
2094 2094 """If tree looks like an unexpanded alias, return it. Return None
2095 2095 otherwise.
2096 2096 """
2097 2097 if isinstance(tree, tuple) and tree:
2098 2098 if tree[0] == 'symbol' and len(tree) == 2:
2099 2099 name = tree[1]
2100 2100 alias = aliases.get(name)
2101 2101 if alias and alias.args is None and alias.tree == tree:
2102 2102 return alias
2103 2103 if tree[0] == 'func' and len(tree) > 1:
2104 2104 if tree[1][0] == 'symbol' and len(tree[1]) == 2:
2105 2105 name = tree[1][1]
2106 2106 alias = aliases.get(name)
2107 2107 if alias and alias.args is not None and alias.tree == tree[:2]:
2108 2108 return alias
2109 2109 return None
2110 2110
2111 2111 def _expandargs(tree, args):
2112 2112 """Replace _aliasarg instances with the substitution value of the
2113 2113 same name in args, recursively.
2114 2114 """
2115 2115 if not tree or not isinstance(tree, tuple):
2116 2116 return tree
2117 2117 arg = _getaliasarg(tree)
2118 2118 if arg is not None:
2119 2119 return args[arg]
2120 2120 return tuple(_expandargs(t, args) for t in tree)
2121 2121
2122 2122 def _expandaliases(aliases, tree, expanding, cache):
2123 2123 """Expand aliases in tree, recursively.
2124 2124
2125 2125 'aliases' is a dictionary mapping user defined aliases to
2126 2126 revsetalias objects.
2127 2127 """
2128 2128 if not isinstance(tree, tuple):
2129 2129 # Do not expand raw strings
2130 2130 return tree
2131 2131 alias = _getalias(aliases, tree)
2132 2132 if alias is not None:
2133 2133 if alias.error:
2134 2134 raise util.Abort(_('failed to parse revset alias "%s": %s') %
2135 2135 (alias.name, alias.error))
2136 2136 if alias in expanding:
2137 2137 raise error.ParseError(_('infinite expansion of revset alias "%s" '
2138 2138 'detected') % alias.name)
2139 2139 expanding.append(alias)
2140 2140 if alias.name not in cache:
2141 2141 cache[alias.name] = _expandaliases(aliases, alias.replacement,
2142 2142 expanding, cache)
2143 2143 result = cache[alias.name]
2144 2144 expanding.pop()
2145 2145 if alias.args is not None:
2146 2146 l = getlist(tree[2])
2147 2147 if len(l) != len(alias.args):
2148 2148 raise error.ParseError(
2149 2149 _('invalid number of arguments: %s') % len(l))
2150 2150 l = [_expandaliases(aliases, a, [], cache) for a in l]
2151 2151 result = _expandargs(result, dict(zip(alias.args, l)))
2152 2152 else:
2153 2153 result = tuple(_expandaliases(aliases, t, expanding, cache)
2154 2154 for t in tree)
2155 2155 return result
2156 2156
2157 2157 def findaliases(ui, tree, showwarning=None):
2158 2158 _checkaliasarg(tree)
2159 2159 aliases = {}
2160 2160 for k, v in ui.configitems('revsetalias'):
2161 2161 alias = revsetalias(k, v)
2162 2162 aliases[alias.name] = alias
2163 2163 tree = _expandaliases(aliases, tree, [], {})
2164 2164 if showwarning:
2165 2165 # warn about problematic (but not referred) aliases
2166 2166 for name, alias in sorted(aliases.iteritems()):
2167 2167 if alias.error and not alias.warned:
2168 2168 msg = _('failed to parse revset alias "%s": %s'
2169 2169 ) % (name, alias.error)
2170 2170 showwarning(_('warning: %s\n') % (msg))
2171 2171 alias.warned = True
2172 2172 return tree
2173 2173
2174 2174 def foldconcat(tree):
2175 2175 """Fold elements to be concatenated by `##`
2176 2176 """
2177 2177 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2178 2178 return tree
2179 2179 if tree[0] == '_concat':
2180 2180 pending = [tree]
2181 2181 l = []
2182 2182 while pending:
2183 2183 e = pending.pop()
2184 2184 if e[0] == '_concat':
2185 2185 pending.extend(reversed(e[1:]))
2186 2186 elif e[0] in ('string', 'symbol'):
2187 2187 l.append(e[1])
2188 2188 else:
2189 2189 msg = _("\"##\" can't concatenate \"%s\" element") % (e[0])
2190 2190 raise error.ParseError(msg)
2191 2191 return ('string', ''.join(l))
2192 2192 else:
2193 2193 return tuple(foldconcat(t) for t in tree)
2194 2194
2195 2195 def parse(spec, lookup=None):
2196 2196 p = parser.parser(tokenize, elements)
2197 2197 return p.parse(spec, lookup=lookup)
2198 2198
2199 2199 def match(ui, spec, repo=None):
2200 2200 if not spec:
2201 2201 raise error.ParseError(_("empty query"))
2202 2202 lookup = None
2203 2203 if repo:
2204 2204 lookup = repo.__contains__
2205 2205 tree, pos = parse(spec, lookup)
2206 2206 if (pos != len(spec)):
2207 2207 raise error.ParseError(_("invalid token"), pos)
2208 2208 if ui:
2209 2209 tree = findaliases(ui, tree, showwarning=ui.warn)
2210 2210 tree = foldconcat(tree)
2211 2211 weight, tree = optimize(tree, True)
2212 2212 def mfunc(repo, subset):
2213 2213 if util.safehasattr(subset, 'isascending'):
2214 2214 result = getset(repo, subset, tree)
2215 2215 else:
2216 2216 result = getset(repo, baseset(subset), tree)
2217 2217 return result
2218 2218 return mfunc
2219 2219
2220 2220 def formatspec(expr, *args):
2221 2221 '''
2222 2222 This is a convenience function for using revsets internally, and
2223 2223 escapes arguments appropriately. Aliases are intentionally ignored
2224 2224 so that intended expression behavior isn't accidentally subverted.
2225 2225
2226 2226 Supported arguments:
2227 2227
2228 2228 %r = revset expression, parenthesized
2229 2229 %d = int(arg), no quoting
2230 2230 %s = string(arg), escaped and single-quoted
2231 2231 %b = arg.branch(), escaped and single-quoted
2232 2232 %n = hex(arg), single-quoted
2233 2233 %% = a literal '%'
2234 2234
2235 2235 Prefixing the type with 'l' specifies a parenthesized list of that type.
2236 2236
2237 2237 >>> formatspec('%r:: and %lr', '10 or 11', ("this()", "that()"))
2238 2238 '(10 or 11):: and ((this()) or (that()))'
2239 2239 >>> formatspec('%d:: and not %d::', 10, 20)
2240 2240 '10:: and not 20::'
2241 2241 >>> formatspec('%ld or %ld', [], [1])
2242 2242 "_list('') or 1"
2243 2243 >>> formatspec('keyword(%s)', 'foo\\xe9')
2244 2244 "keyword('foo\\\\xe9')"
2245 2245 >>> b = lambda: 'default'
2246 2246 >>> b.branch = b
2247 2247 >>> formatspec('branch(%b)', b)
2248 2248 "branch('default')"
2249 2249 >>> formatspec('root(%ls)', ['a', 'b', 'c', 'd'])
2250 2250 "root(_list('a\\x00b\\x00c\\x00d'))"
2251 2251 '''
2252 2252
2253 2253 def quote(s):
2254 2254 return repr(str(s))
2255 2255
2256 2256 def argtype(c, arg):
2257 2257 if c == 'd':
2258 2258 return str(int(arg))
2259 2259 elif c == 's':
2260 2260 return quote(arg)
2261 2261 elif c == 'r':
2262 2262 parse(arg) # make sure syntax errors are confined
2263 2263 return '(%s)' % arg
2264 2264 elif c == 'n':
2265 2265 return quote(node.hex(arg))
2266 2266 elif c == 'b':
2267 2267 return quote(arg.branch())
2268 2268
2269 2269 def listexp(s, t):
2270 2270 l = len(s)
2271 2271 if l == 0:
2272 2272 return "_list('')"
2273 2273 elif l == 1:
2274 2274 return argtype(t, s[0])
2275 2275 elif t == 'd':
2276 2276 return "_intlist('%s')" % "\0".join(str(int(a)) for a in s)
2277 2277 elif t == 's':
2278 2278 return "_list('%s')" % "\0".join(s)
2279 2279 elif t == 'n':
2280 2280 return "_hexlist('%s')" % "\0".join(node.hex(a) for a in s)
2281 2281 elif t == 'b':
2282 2282 return "_list('%s')" % "\0".join(a.branch() for a in s)
2283 2283
2284 2284 m = l // 2
2285 2285 return '(%s or %s)' % (listexp(s[:m], t), listexp(s[m:], t))
2286 2286
2287 2287 ret = ''
2288 2288 pos = 0
2289 2289 arg = 0
2290 2290 while pos < len(expr):
2291 2291 c = expr[pos]
2292 2292 if c == '%':
2293 2293 pos += 1
2294 2294 d = expr[pos]
2295 2295 if d == '%':
2296 2296 ret += d
2297 2297 elif d in 'dsnbr':
2298 2298 ret += argtype(d, args[arg])
2299 2299 arg += 1
2300 2300 elif d == 'l':
2301 2301 # a list of some type
2302 2302 pos += 1
2303 2303 d = expr[pos]
2304 2304 ret += listexp(list(args[arg]), d)
2305 2305 arg += 1
2306 2306 else:
2307 2307 raise util.Abort('unexpected revspec format character %s' % d)
2308 2308 else:
2309 2309 ret += c
2310 2310 pos += 1
2311 2311
2312 2312 return ret
2313 2313
2314 2314 def prettyformat(tree):
2315 2315 def _prettyformat(tree, level, lines):
2316 2316 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2317 2317 lines.append((level, str(tree)))
2318 2318 else:
2319 2319 lines.append((level, '(%s' % tree[0]))
2320 2320 for s in tree[1:]:
2321 2321 _prettyformat(s, level + 1, lines)
2322 2322 lines[-1:] = [(lines[-1][0], lines[-1][1] + ')')]
2323 2323
2324 2324 lines = []
2325 2325 _prettyformat(tree, 0, lines)
2326 2326 output = '\n'.join((' '*l + s) for l, s in lines)
2327 2327 return output
2328 2328
2329 2329 def depth(tree):
2330 2330 if isinstance(tree, tuple):
2331 2331 return max(map(depth, tree)) + 1
2332 2332 else:
2333 2333 return 0
2334 2334
2335 2335 def funcsused(tree):
2336 2336 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2337 2337 return set()
2338 2338 else:
2339 2339 funcs = set()
2340 2340 for s in tree[1:]:
2341 2341 funcs |= funcsused(s)
2342 2342 if tree[0] == 'func':
2343 2343 funcs.add(tree[1][1])
2344 2344 return funcs
2345 2345
2346 2346 class abstractsmartset(object):
2347 2347
2348 2348 def __nonzero__(self):
2349 2349 """True if the smartset is not empty"""
2350 2350 raise NotImplementedError()
2351 2351
2352 2352 def __contains__(self, rev):
2353 2353 """provide fast membership testing"""
2354 2354 raise NotImplementedError()
2355 2355
2356 2356 def __iter__(self):
2357 2357 """iterate the set in the order it is supposed to be iterated"""
2358 2358 raise NotImplementedError()
2359 2359
2360 2360 # Attributes containing a function to perform a fast iteration in a given
2361 2361 # direction. A smartset can have none, one, or both defined.
2362 2362 #
2363 2363 # Default value is None instead of a function returning None to avoid
2364 2364 # initializing an iterator just for testing if a fast method exists.
2365 2365 fastasc = None
2366 2366 fastdesc = None
2367 2367
2368 2368 def isascending(self):
2369 2369 """True if the set will iterate in ascending order"""
2370 2370 raise NotImplementedError()
2371 2371
2372 2372 def isdescending(self):
2373 2373 """True if the set will iterate in descending order"""
2374 2374 raise NotImplementedError()
2375 2375
2376 2376 def min(self):
2377 2377 """return the minimum element in the set"""
2378 2378 if self.fastasc is not None:
2379 2379 for r in self.fastasc():
2380 2380 return r
2381 2381 raise ValueError('arg is an empty sequence')
2382 2382 return min(self)
2383 2383
2384 2384 def max(self):
2385 2385 """return the maximum element in the set"""
2386 2386 if self.fastdesc is not None:
2387 2387 for r in self.fastdesc():
2388 2388 return r
2389 2389 raise ValueError('arg is an empty sequence')
2390 2390 return max(self)
2391 2391
2392 2392 def first(self):
2393 2393 """return the first element in the set (user iteration perspective)
2394 2394
2395 2395 Return None if the set is empty"""
2396 2396 raise NotImplementedError()
2397 2397
2398 2398 def last(self):
2399 2399 """return the last element in the set (user iteration perspective)
2400 2400
2401 2401 Return None if the set is empty"""
2402 2402 raise NotImplementedError()
2403 2403
2404 2404 def __len__(self):
2405 2405 """return the length of the smartsets
2406 2406
2407 2407 This can be expensive on smartset that could be lazy otherwise."""
2408 2408 raise NotImplementedError()
2409 2409
2410 2410 def reverse(self):
2411 2411 """reverse the expected iteration order"""
2412 2412 raise NotImplementedError()
2413 2413
2414 2414 def sort(self, reverse=True):
2415 2415 """get the set to iterate in an ascending or descending order"""
2416 2416 raise NotImplementedError()
2417 2417
2418 2418 def __and__(self, other):
2419 2419 """Returns a new object with the intersection of the two collections.
2420 2420
2421 2421 This is part of the mandatory API for smartset."""
2422 2422 return self.filter(other.__contains__, cache=False)
2423 2423
2424 2424 def __add__(self, other):
2425 2425 """Returns a new object with the union of the two collections.
2426 2426
2427 2427 This is part of the mandatory API for smartset."""
2428 2428 return addset(self, other)
2429 2429
2430 2430 def __sub__(self, other):
2431 2431 """Returns a new object with the substraction of the two collections.
2432 2432
2433 2433 This is part of the mandatory API for smartset."""
2434 2434 c = other.__contains__
2435 2435 return self.filter(lambda r: not c(r), cache=False)
2436 2436
2437 2437 def filter(self, condition, cache=True):
2438 2438 """Returns this smartset filtered by condition as a new smartset.
2439 2439
2440 2440 `condition` is a callable which takes a revision number and returns a
2441 2441 boolean.
2442 2442
2443 2443 This is part of the mandatory API for smartset."""
2444 2444 # builtin cannot be cached. but do not needs to
2445 2445 if cache and util.safehasattr(condition, 'func_code'):
2446 2446 condition = util.cachefunc(condition)
2447 2447 return filteredset(self, condition)
2448 2448
2449 2449 class baseset(abstractsmartset):
2450 2450 """Basic data structure that represents a revset and contains the basic
2451 2451 operation that it should be able to perform.
2452 2452
2453 2453 Every method in this class should be implemented by any smartset class.
2454 2454 """
2455 2455 def __init__(self, data=()):
2456 2456 if not isinstance(data, list):
2457 2457 data = list(data)
2458 2458 self._list = data
2459 2459 self._ascending = None
2460 2460
2461 2461 @util.propertycache
2462 2462 def _set(self):
2463 2463 return set(self._list)
2464 2464
2465 2465 @util.propertycache
2466 2466 def _asclist(self):
2467 2467 asclist = self._list[:]
2468 2468 asclist.sort()
2469 2469 return asclist
2470 2470
2471 2471 def __iter__(self):
2472 2472 if self._ascending is None:
2473 2473 return iter(self._list)
2474 2474 elif self._ascending:
2475 2475 return iter(self._asclist)
2476 2476 else:
2477 2477 return reversed(self._asclist)
2478 2478
2479 2479 def fastasc(self):
2480 2480 return iter(self._asclist)
2481 2481
2482 2482 def fastdesc(self):
2483 2483 return reversed(self._asclist)
2484 2484
2485 2485 @util.propertycache
2486 2486 def __contains__(self):
2487 2487 return self._set.__contains__
2488 2488
2489 2489 def __nonzero__(self):
2490 2490 return bool(self._list)
2491 2491
2492 2492 def sort(self, reverse=False):
2493 2493 self._ascending = not bool(reverse)
2494 2494
2495 2495 def reverse(self):
2496 2496 if self._ascending is None:
2497 2497 self._list.reverse()
2498 2498 else:
2499 2499 self._ascending = not self._ascending
2500 2500
2501 2501 def __len__(self):
2502 2502 return len(self._list)
2503 2503
2504 2504 def isascending(self):
2505 2505 """Returns True if the collection is ascending order, False if not.
2506 2506
2507 2507 This is part of the mandatory API for smartset."""
2508 2508 if len(self) <= 1:
2509 2509 return True
2510 2510 return self._ascending is not None and self._ascending
2511 2511
2512 2512 def isdescending(self):
2513 2513 """Returns True if the collection is descending order, False if not.
2514 2514
2515 2515 This is part of the mandatory API for smartset."""
2516 2516 if len(self) <= 1:
2517 2517 return True
2518 2518 return self._ascending is not None and not self._ascending
2519 2519
2520 2520 def first(self):
2521 2521 if self:
2522 2522 if self._ascending is None:
2523 2523 return self._list[0]
2524 2524 elif self._ascending:
2525 2525 return self._asclist[0]
2526 2526 else:
2527 2527 return self._asclist[-1]
2528 2528 return None
2529 2529
2530 2530 def last(self):
2531 2531 if self:
2532 2532 if self._ascending is None:
2533 2533 return self._list[-1]
2534 2534 elif self._ascending:
2535 2535 return self._asclist[-1]
2536 2536 else:
2537 2537 return self._asclist[0]
2538 2538 return None
2539 2539
2540 2540 class filteredset(abstractsmartset):
2541 2541 """Duck type for baseset class which iterates lazily over the revisions in
2542 2542 the subset and contains a function which tests for membership in the
2543 2543 revset
2544 2544 """
2545 2545 def __init__(self, subset, condition=lambda x: True):
2546 2546 """
2547 2547 condition: a function that decide whether a revision in the subset
2548 2548 belongs to the revset or not.
2549 2549 """
2550 2550 self._subset = subset
2551 2551 self._condition = condition
2552 2552 self._cache = {}
2553 2553
2554 2554 def __contains__(self, x):
2555 2555 c = self._cache
2556 2556 if x not in c:
2557 2557 v = c[x] = x in self._subset and self._condition(x)
2558 2558 return v
2559 2559 return c[x]
2560 2560
2561 2561 def __iter__(self):
2562 2562 return self._iterfilter(self._subset)
2563 2563
2564 2564 def _iterfilter(self, it):
2565 2565 cond = self._condition
2566 2566 for x in it:
2567 2567 if cond(x):
2568 2568 yield x
2569 2569
2570 2570 @property
2571 2571 def fastasc(self):
2572 2572 it = self._subset.fastasc
2573 2573 if it is None:
2574 2574 return None
2575 2575 return lambda: self._iterfilter(it())
2576 2576
2577 2577 @property
2578 2578 def fastdesc(self):
2579 2579 it = self._subset.fastdesc
2580 2580 if it is None:
2581 2581 return None
2582 2582 return lambda: self._iterfilter(it())
2583 2583
2584 2584 def __nonzero__(self):
2585 2585 for r in self:
2586 2586 return True
2587 2587 return False
2588 2588
2589 2589 def __len__(self):
2590 2590 # Basic implementation to be changed in future patches.
2591 2591 l = baseset([r for r in self])
2592 2592 return len(l)
2593 2593
2594 2594 def sort(self, reverse=False):
2595 2595 self._subset.sort(reverse=reverse)
2596 2596
2597 2597 def reverse(self):
2598 2598 self._subset.reverse()
2599 2599
2600 2600 def isascending(self):
2601 2601 return self._subset.isascending()
2602 2602
2603 2603 def isdescending(self):
2604 2604 return self._subset.isdescending()
2605 2605
2606 2606 def first(self):
2607 2607 for x in self:
2608 2608 return x
2609 2609 return None
2610 2610
2611 2611 def last(self):
2612 2612 it = None
2613 2613 if self._subset.isascending:
2614 2614 it = self.fastdesc
2615 2615 elif self._subset.isdescending:
2616 2616 it = self.fastdesc
2617 2617 if it is None:
2618 2618 # slowly consume everything. This needs improvement
2619 2619 it = lambda: reversed(list(self))
2620 2620 for x in it():
2621 2621 return x
2622 2622 return None
2623 2623
2624 2624 class addset(abstractsmartset):
2625 2625 """Represent the addition of two sets
2626 2626
2627 2627 Wrapper structure for lazily adding two structures without losing much
2628 2628 performance on the __contains__ method
2629 2629
2630 2630 If the ascending attribute is set, that means the two structures are
2631 2631 ordered in either an ascending or descending way. Therefore, we can add
2632 2632 them maintaining the order by iterating over both at the same time
2633 2633 """
2634 2634 def __init__(self, revs1, revs2, ascending=None):
2635 2635 self._r1 = revs1
2636 2636 self._r2 = revs2
2637 2637 self._iter = None
2638 2638 self._ascending = ascending
2639 2639 self._genlist = None
2640 2640 self._asclist = None
2641 2641
2642 2642 def __len__(self):
2643 2643 return len(self._list)
2644 2644
2645 2645 def __nonzero__(self):
2646 2646 return bool(self._r1) or bool(self._r2)
2647 2647
2648 2648 @util.propertycache
2649 2649 def _list(self):
2650 2650 if not self._genlist:
2651 2651 self._genlist = baseset(self._iterator())
2652 2652 return self._genlist
2653 2653
2654 2654 def _iterator(self):
2655 2655 """Iterate over both collections without repeating elements
2656 2656
2657 2657 If the ascending attribute is not set, iterate over the first one and
2658 2658 then over the second one checking for membership on the first one so we
2659 2659 dont yield any duplicates.
2660 2660
2661 2661 If the ascending attribute is set, iterate over both collections at the
2662 2662 same time, yielding only one value at a time in the given order.
2663 2663 """
2664 2664 if self._ascending is None:
2665 2665 def gen():
2666 2666 for r in self._r1:
2667 2667 yield r
2668 2668 inr1 = self._r1.__contains__
2669 2669 for r in self._r2:
2670 2670 if not inr1(r):
2671 2671 yield r
2672 2672 gen = gen()
2673 2673 else:
2674 2674 iter1 = iter(self._r1)
2675 2675 iter2 = iter(self._r2)
2676 2676 gen = self._iterordered(self._ascending, iter1, iter2)
2677 2677 return gen
2678 2678
2679 2679 def __iter__(self):
2680 2680 if self._ascending is None:
2681 2681 if self._genlist:
2682 2682 return iter(self._genlist)
2683 2683 return iter(self._iterator())
2684 2684 self._trysetasclist()
2685 2685 if self._ascending:
2686 2686 it = self.fastasc
2687 2687 else:
2688 2688 it = self.fastdesc
2689 2689 if it is None:
2690 2690 # consume the gen and try again
2691 2691 self._list
2692 2692 return iter(self)
2693 2693 return it()
2694 2694
2695 2695 def _trysetasclist(self):
2696 2696 """populate the _asclist attribute if possible and necessary"""
2697 2697 if self._genlist is not None and self._asclist is None:
2698 2698 self._asclist = sorted(self._genlist)
2699 2699
2700 2700 @property
2701 2701 def fastasc(self):
2702 2702 self._trysetasclist()
2703 2703 if self._asclist is not None:
2704 2704 return self._asclist.__iter__
2705 2705 iter1 = self._r1.fastasc
2706 2706 iter2 = self._r2.fastasc
2707 2707 if None in (iter1, iter2):
2708 2708 return None
2709 2709 return lambda: self._iterordered(True, iter1(), iter2())
2710 2710
2711 2711 @property
2712 2712 def fastdesc(self):
2713 2713 self._trysetasclist()
2714 2714 if self._asclist is not None:
2715 2715 return self._asclist.__reversed__
2716 2716 iter1 = self._r1.fastdesc
2717 2717 iter2 = self._r2.fastdesc
2718 2718 if None in (iter1, iter2):
2719 2719 return None
2720 2720 return lambda: self._iterordered(False, iter1(), iter2())
2721 2721
2722 2722 def _iterordered(self, ascending, iter1, iter2):
2723 2723 """produce an ordered iteration from two iterators with the same order
2724 2724
2725 2725 The ascending is used to indicated the iteration direction.
2726 2726 """
2727 2727 choice = max
2728 2728 if ascending:
2729 2729 choice = min
2730 2730
2731 2731 val1 = None
2732 2732 val2 = None
2733 2733
2734 2734 choice = max
2735 2735 if ascending:
2736 2736 choice = min
2737 2737 try:
2738 2738 # Consume both iterators in an ordered way until one is
2739 2739 # empty
2740 2740 while True:
2741 2741 if val1 is None:
2742 2742 val1 = iter1.next()
2743 2743 if val2 is None:
2744 2744 val2 = iter2.next()
2745 2745 next = choice(val1, val2)
2746 2746 yield next
2747 2747 if val1 == next:
2748 2748 val1 = None
2749 2749 if val2 == next:
2750 2750 val2 = None
2751 2751 except StopIteration:
2752 2752 # Flush any remaining values and consume the other one
2753 2753 it = iter2
2754 2754 if val1 is not None:
2755 2755 yield val1
2756 2756 it = iter1
2757 2757 elif val2 is not None:
2758 2758 # might have been equality and both are empty
2759 2759 yield val2
2760 2760 for val in it:
2761 2761 yield val
2762 2762
2763 2763 def __contains__(self, x):
2764 2764 return x in self._r1 or x in self._r2
2765 2765
2766 2766 def sort(self, reverse=False):
2767 2767 """Sort the added set
2768 2768
2769 2769 For this we use the cached list with all the generated values and if we
2770 2770 know they are ascending or descending we can sort them in a smart way.
2771 2771 """
2772 2772 self._ascending = not reverse
2773 2773
2774 2774 def isascending(self):
2775 2775 return self._ascending is not None and self._ascending
2776 2776
2777 2777 def isdescending(self):
2778 2778 return self._ascending is not None and not self._ascending
2779 2779
2780 2780 def reverse(self):
2781 2781 if self._ascending is None:
2782 2782 self._list.reverse()
2783 2783 else:
2784 2784 self._ascending = not self._ascending
2785 2785
2786 2786 def first(self):
2787 2787 for x in self:
2788 2788 return x
2789 2789 return None
2790 2790
2791 2791 def last(self):
2792 2792 self.reverse()
2793 2793 val = self.first()
2794 2794 self.reverse()
2795 2795 return val
2796 2796
2797 2797 class generatorset(abstractsmartset):
2798 2798 """Wrap a generator for lazy iteration
2799 2799
2800 2800 Wrapper structure for generators that provides lazy membership and can
2801 2801 be iterated more than once.
2802 2802 When asked for membership it generates values until either it finds the
2803 2803 requested one or has gone through all the elements in the generator
2804 2804 """
2805 2805 def __init__(self, gen, iterasc=None):
2806 2806 """
2807 2807 gen: a generator producing the values for the generatorset.
2808 2808 """
2809 2809 self._gen = gen
2810 2810 self._asclist = None
2811 2811 self._cache = {}
2812 2812 self._genlist = []
2813 2813 self._finished = False
2814 2814 self._ascending = True
2815 2815 if iterasc is not None:
2816 2816 if iterasc:
2817 2817 self.fastasc = self._iterator
2818 2818 self.__contains__ = self._asccontains
2819 2819 else:
2820 2820 self.fastdesc = self._iterator
2821 2821 self.__contains__ = self._desccontains
2822 2822
2823 2823 def __nonzero__(self):
2824 2824 for r in self:
2825 2825 return True
2826 2826 return False
2827 2827
2828 2828 def __contains__(self, x):
2829 2829 if x in self._cache:
2830 2830 return self._cache[x]
2831 2831
2832 2832 # Use new values only, as existing values would be cached.
2833 2833 for l in self._consumegen():
2834 2834 if l == x:
2835 2835 return True
2836 2836
2837 2837 self._cache[x] = False
2838 2838 return False
2839 2839
2840 2840 def _asccontains(self, x):
2841 2841 """version of contains optimised for ascending generator"""
2842 2842 if x in self._cache:
2843 2843 return self._cache[x]
2844 2844
2845 2845 # Use new values only, as existing values would be cached.
2846 2846 for l in self._consumegen():
2847 2847 if l == x:
2848 2848 return True
2849 2849 if l > x:
2850 2850 break
2851 2851
2852 2852 self._cache[x] = False
2853 2853 return False
2854 2854
2855 2855 def _desccontains(self, x):
2856 2856 """version of contains optimised for descending generator"""
2857 2857 if x in self._cache:
2858 2858 return self._cache[x]
2859 2859
2860 2860 # Use new values only, as existing values would be cached.
2861 2861 for l in self._consumegen():
2862 2862 if l == x:
2863 2863 return True
2864 2864 if l < x:
2865 2865 break
2866 2866
2867 2867 self._cache[x] = False
2868 2868 return False
2869 2869
2870 2870 def __iter__(self):
2871 2871 if self._ascending:
2872 2872 it = self.fastasc
2873 2873 else:
2874 2874 it = self.fastdesc
2875 2875 if it is not None:
2876 2876 return it()
2877 2877 # we need to consume the iterator
2878 2878 for x in self._consumegen():
2879 2879 pass
2880 2880 # recall the same code
2881 2881 return iter(self)
2882 2882
2883 2883 def _iterator(self):
2884 2884 if self._finished:
2885 2885 return iter(self._genlist)
2886 2886
2887 2887 # We have to use this complex iteration strategy to allow multiple
2888 2888 # iterations at the same time. We need to be able to catch revision
2889 2889 # removed from _consumegen and added to genlist in another instance.
2890 2890 #
2891 2891 # Getting rid of it would provide an about 15% speed up on this
2892 2892 # iteration.
2893 2893 genlist = self._genlist
2894 2894 nextrev = self._consumegen().next
2895 2895 _len = len # cache global lookup
2896 2896 def gen():
2897 2897 i = 0
2898 2898 while True:
2899 2899 if i < _len(genlist):
2900 2900 yield genlist[i]
2901 2901 else:
2902 2902 yield nextrev()
2903 2903 i += 1
2904 2904 return gen()
2905 2905
2906 2906 def _consumegen(self):
2907 2907 cache = self._cache
2908 2908 genlist = self._genlist.append
2909 2909 for item in self._gen:
2910 2910 cache[item] = True
2911 2911 genlist(item)
2912 2912 yield item
2913 2913 if not self._finished:
2914 2914 self._finished = True
2915 2915 asc = self._genlist[:]
2916 2916 asc.sort()
2917 2917 self._asclist = asc
2918 2918 self.fastasc = asc.__iter__
2919 2919 self.fastdesc = asc.__reversed__
2920 2920
2921 2921 def __len__(self):
2922 2922 for x in self._consumegen():
2923 2923 pass
2924 2924 return len(self._genlist)
2925 2925
2926 2926 def sort(self, reverse=False):
2927 2927 self._ascending = not reverse
2928 2928
2929 2929 def reverse(self):
2930 2930 self._ascending = not self._ascending
2931 2931
2932 2932 def isascending(self):
2933 2933 return self._ascending
2934 2934
2935 2935 def isdescending(self):
2936 2936 return not self._ascending
2937 2937
2938 2938 def first(self):
2939 2939 if self._ascending:
2940 2940 it = self.fastasc
2941 2941 else:
2942 2942 it = self.fastdesc
2943 2943 if it is None:
2944 2944 # we need to consume all and try again
2945 2945 for x in self._consumegen():
2946 2946 pass
2947 2947 return self.first()
2948 2948 if self:
2949 2949 return it().next()
2950 2950 return None
2951 2951
2952 2952 def last(self):
2953 2953 if self._ascending:
2954 2954 it = self.fastdesc
2955 2955 else:
2956 2956 it = self.fastasc
2957 2957 if it is None:
2958 2958 # we need to consume all and try again
2959 2959 for x in self._consumegen():
2960 2960 pass
2961 2961 return self.first()
2962 2962 if self:
2963 2963 return it().next()
2964 2964 return None
2965 2965
2966 2966 def spanset(repo, start=None, end=None):
2967 2967 """factory function to dispatch between fullreposet and actual spanset
2968 2968
2969 2969 Feel free to update all spanset call sites and kill this function at some
2970 2970 point.
2971 2971 """
2972 2972 if start is None and end is None:
2973 2973 return fullreposet(repo)
2974 2974 return _spanset(repo, start, end)
2975 2975
2976 2976
2977 2977 class _spanset(abstractsmartset):
2978 2978 """Duck type for baseset class which represents a range of revisions and
2979 2979 can work lazily and without having all the range in memory
2980 2980
2981 2981 Note that spanset(x, y) behave almost like xrange(x, y) except for two
2982 2982 notable points:
2983 2983 - when x < y it will be automatically descending,
2984 2984 - revision filtered with this repoview will be skipped.
2985 2985
2986 2986 """
2987 2987 def __init__(self, repo, start=0, end=None):
2988 2988 """
2989 2989 start: first revision included the set
2990 2990 (default to 0)
2991 2991 end: first revision excluded (last+1)
2992 2992 (default to len(repo)
2993 2993
2994 2994 Spanset will be descending if `end` < `start`.
2995 2995 """
2996 2996 if end is None:
2997 2997 end = len(repo)
2998 2998 self._ascending = start <= end
2999 2999 if not self._ascending:
3000 3000 start, end = end + 1, start +1
3001 3001 self._start = start
3002 3002 self._end = end
3003 3003 self._hiddenrevs = repo.changelog.filteredrevs
3004 3004
3005 3005 def sort(self, reverse=False):
3006 3006 self._ascending = not reverse
3007 3007
3008 3008 def reverse(self):
3009 3009 self._ascending = not self._ascending
3010 3010
3011 3011 def _iterfilter(self, iterrange):
3012 3012 s = self._hiddenrevs
3013 3013 for r in iterrange:
3014 3014 if r not in s:
3015 3015 yield r
3016 3016
3017 3017 def __iter__(self):
3018 3018 if self._ascending:
3019 3019 return self.fastasc()
3020 3020 else:
3021 3021 return self.fastdesc()
3022 3022
3023 3023 def fastasc(self):
3024 3024 iterrange = xrange(self._start, self._end)
3025 3025 if self._hiddenrevs:
3026 3026 return self._iterfilter(iterrange)
3027 3027 return iter(iterrange)
3028 3028
3029 3029 def fastdesc(self):
3030 3030 iterrange = xrange(self._end - 1, self._start - 1, -1)
3031 3031 if self._hiddenrevs:
3032 3032 return self._iterfilter(iterrange)
3033 3033 return iter(iterrange)
3034 3034
3035 3035 def __contains__(self, rev):
3036 3036 hidden = self._hiddenrevs
3037 3037 return ((self._start <= rev < self._end)
3038 3038 and not (hidden and rev in hidden))
3039 3039
3040 3040 def __nonzero__(self):
3041 3041 for r in self:
3042 3042 return True
3043 3043 return False
3044 3044
3045 3045 def __len__(self):
3046 3046 if not self._hiddenrevs:
3047 3047 return abs(self._end - self._start)
3048 3048 else:
3049 3049 count = 0
3050 3050 start = self._start
3051 3051 end = self._end
3052 3052 for rev in self._hiddenrevs:
3053 3053 if (end < rev <= start) or (start <= rev < end):
3054 3054 count += 1
3055 3055 return abs(self._end - self._start) - count
3056 3056
3057 3057 def isascending(self):
3058 3058 return self._start <= self._end
3059 3059
3060 3060 def isdescending(self):
3061 3061 return self._start >= self._end
3062 3062
3063 3063 def first(self):
3064 3064 if self._ascending:
3065 3065 it = self.fastasc
3066 3066 else:
3067 3067 it = self.fastdesc
3068 3068 for x in it():
3069 3069 return x
3070 3070 return None
3071 3071
3072 3072 def last(self):
3073 3073 if self._ascending:
3074 3074 it = self.fastdesc
3075 3075 else:
3076 3076 it = self.fastasc
3077 3077 for x in it():
3078 3078 return x
3079 3079 return None
3080 3080
3081 3081 class fullreposet(_spanset):
3082 3082 """a set containing all revisions in the repo
3083 3083
3084 3084 This class exists to host special optimization.
3085 3085 """
3086 3086
3087 3087 def __init__(self, repo):
3088 3088 super(fullreposet, self).__init__(repo)
3089 3089
3090 3090 def __and__(self, other):
3091 3091 """As self contains the whole repo, all of the other set should also be
3092 3092 in self. Therefore `self & other = other`.
3093 3093
3094 3094 This boldly assumes the other contains valid revs only.
3095 3095 """
3096 3096 # other not a smartset, make is so
3097 3097 if not util.safehasattr(other, 'isascending'):
3098 3098 # filter out hidden revision
3099 3099 # (this boldly assumes all smartset are pure)
3100 3100 #
3101 3101 # `other` was used with "&", let's assume this is a set like
3102 3102 # object.
3103 3103 other = baseset(other - self._hiddenrevs)
3104 3104
3105 3105 if self.isascending():
3106 3106 other.sort()
3107 3107 else:
3108 3108 other.sort(reverse)
3109 3109 return other
3110 3110
3111 3111 # tell hggettext to extract docstrings from these functions:
3112 3112 i18nfunctions = symbols.values()
General Comments 0
You need to be logged in to leave comments. Login now