##// END OF EJS Templates
revset: added isascending and isdescending methods to _addset...
Lucas Moscovicz -
r20733:adf4ec7e default
parent child Browse files
Show More
@@ -1,2734 +1,2740 b''
1 1 # revset.py - revision set queries for mercurial
2 2 #
3 3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 import re
9 9 import parser, util, error, discovery, hbisect, phases
10 10 import node
11 11 import heapq
12 12 import match as matchmod
13 13 import ancestor as ancestormod
14 14 from i18n import _
15 15 import encoding
16 16 import obsolete as obsmod
17 17 import pathutil
18 18 import repoview
19 19
20 20 def _revancestors(repo, revs, followfirst):
21 21 """Like revlog.ancestors(), but supports followfirst."""
22 22 cut = followfirst and 1 or None
23 23 cl = repo.changelog
24 24
25 25 def iterate():
26 26 revqueue, revsnode = None, None
27 27 h = []
28 28
29 29 revs.descending()
30 30 revqueue = util.deque(revs)
31 31 if revqueue:
32 32 revsnode = revqueue.popleft()
33 33 heapq.heappush(h, -revsnode)
34 34
35 35 seen = set([node.nullrev])
36 36 while h:
37 37 current = -heapq.heappop(h)
38 38 if current not in seen:
39 39 if revsnode and current == revsnode:
40 40 if revqueue:
41 41 revsnode = revqueue.popleft()
42 42 heapq.heappush(h, -revsnode)
43 43 seen.add(current)
44 44 yield current
45 45 for parent in cl.parentrevs(current)[:cut]:
46 46 if parent != node.nullrev:
47 47 heapq.heappush(h, -parent)
48 48
49 49 return _descgeneratorset(iterate())
50 50
51 51 def _revdescendants(repo, revs, followfirst):
52 52 """Like revlog.descendants() but supports followfirst."""
53 53 cut = followfirst and 1 or None
54 54
55 55 def iterate():
56 56 cl = repo.changelog
57 57 first = min(revs)
58 58 nullrev = node.nullrev
59 59 if first == nullrev:
60 60 # Are there nodes with a null first parent and a non-null
61 61 # second one? Maybe. Do we care? Probably not.
62 62 for i in cl:
63 63 yield i
64 64 else:
65 65 seen = set(revs)
66 66 for i in cl.revs(first + 1):
67 67 for x in cl.parentrevs(i)[:cut]:
68 68 if x != nullrev and x in seen:
69 69 seen.add(i)
70 70 yield i
71 71 break
72 72
73 73 return _ascgeneratorset(iterate())
74 74
75 75 def _revsbetween(repo, roots, heads):
76 76 """Return all paths between roots and heads, inclusive of both endpoint
77 77 sets."""
78 78 if not roots:
79 79 return baseset([])
80 80 parentrevs = repo.changelog.parentrevs
81 81 visit = baseset(heads)
82 82 reachable = set()
83 83 seen = {}
84 84 minroot = min(roots)
85 85 roots = set(roots)
86 86 # open-code the post-order traversal due to the tiny size of
87 87 # sys.getrecursionlimit()
88 88 while visit:
89 89 rev = visit.pop()
90 90 if rev in roots:
91 91 reachable.add(rev)
92 92 parents = parentrevs(rev)
93 93 seen[rev] = parents
94 94 for parent in parents:
95 95 if parent >= minroot and parent not in seen:
96 96 visit.append(parent)
97 97 if not reachable:
98 98 return baseset([])
99 99 for rev in sorted(seen):
100 100 for parent in seen[rev]:
101 101 if parent in reachable:
102 102 reachable.add(rev)
103 103 return baseset(sorted(reachable))
104 104
105 105 elements = {
106 106 "(": (20, ("group", 1, ")"), ("func", 1, ")")),
107 107 "~": (18, None, ("ancestor", 18)),
108 108 "^": (18, None, ("parent", 18), ("parentpost", 18)),
109 109 "-": (5, ("negate", 19), ("minus", 5)),
110 110 "::": (17, ("dagrangepre", 17), ("dagrange", 17),
111 111 ("dagrangepost", 17)),
112 112 "..": (17, ("dagrangepre", 17), ("dagrange", 17),
113 113 ("dagrangepost", 17)),
114 114 ":": (15, ("rangepre", 15), ("range", 15), ("rangepost", 15)),
115 115 "not": (10, ("not", 10)),
116 116 "!": (10, ("not", 10)),
117 117 "and": (5, None, ("and", 5)),
118 118 "&": (5, None, ("and", 5)),
119 119 "or": (4, None, ("or", 4)),
120 120 "|": (4, None, ("or", 4)),
121 121 "+": (4, None, ("or", 4)),
122 122 ",": (2, None, ("list", 2)),
123 123 ")": (0, None, None),
124 124 "symbol": (0, ("symbol",), None),
125 125 "string": (0, ("string",), None),
126 126 "end": (0, None, None),
127 127 }
128 128
129 129 keywords = set(['and', 'or', 'not'])
130 130
131 131 def tokenize(program):
132 132 '''
133 133 Parse a revset statement into a stream of tokens
134 134
135 135 Check that @ is a valid unquoted token character (issue3686):
136 136 >>> list(tokenize("@::"))
137 137 [('symbol', '@', 0), ('::', None, 1), ('end', None, 3)]
138 138
139 139 '''
140 140
141 141 pos, l = 0, len(program)
142 142 while pos < l:
143 143 c = program[pos]
144 144 if c.isspace(): # skip inter-token whitespace
145 145 pass
146 146 elif c == ':' and program[pos:pos + 2] == '::': # look ahead carefully
147 147 yield ('::', None, pos)
148 148 pos += 1 # skip ahead
149 149 elif c == '.' and program[pos:pos + 2] == '..': # look ahead carefully
150 150 yield ('..', None, pos)
151 151 pos += 1 # skip ahead
152 152 elif c in "():,-|&+!~^": # handle simple operators
153 153 yield (c, None, pos)
154 154 elif (c in '"\'' or c == 'r' and
155 155 program[pos:pos + 2] in ("r'", 'r"')): # handle quoted strings
156 156 if c == 'r':
157 157 pos += 1
158 158 c = program[pos]
159 159 decode = lambda x: x
160 160 else:
161 161 decode = lambda x: x.decode('string-escape')
162 162 pos += 1
163 163 s = pos
164 164 while pos < l: # find closing quote
165 165 d = program[pos]
166 166 if d == '\\': # skip over escaped characters
167 167 pos += 2
168 168 continue
169 169 if d == c:
170 170 yield ('string', decode(program[s:pos]), s)
171 171 break
172 172 pos += 1
173 173 else:
174 174 raise error.ParseError(_("unterminated string"), s)
175 175 # gather up a symbol/keyword
176 176 elif c.isalnum() or c in '._@' or ord(c) > 127:
177 177 s = pos
178 178 pos += 1
179 179 while pos < l: # find end of symbol
180 180 d = program[pos]
181 181 if not (d.isalnum() or d in "._/@" or ord(d) > 127):
182 182 break
183 183 if d == '.' and program[pos - 1] == '.': # special case for ..
184 184 pos -= 1
185 185 break
186 186 pos += 1
187 187 sym = program[s:pos]
188 188 if sym in keywords: # operator keywords
189 189 yield (sym, None, s)
190 190 else:
191 191 yield ('symbol', sym, s)
192 192 pos -= 1
193 193 else:
194 194 raise error.ParseError(_("syntax error"), pos)
195 195 pos += 1
196 196 yield ('end', None, pos)
197 197
198 198 # helpers
199 199
200 200 def getstring(x, err):
201 201 if x and (x[0] == 'string' or x[0] == 'symbol'):
202 202 return x[1]
203 203 raise error.ParseError(err)
204 204
205 205 def getlist(x):
206 206 if not x:
207 207 return []
208 208 if x[0] == 'list':
209 209 return getlist(x[1]) + [x[2]]
210 210 return [x]
211 211
212 212 def getargs(x, min, max, err):
213 213 l = getlist(x)
214 214 if len(l) < min or (max >= 0 and len(l) > max):
215 215 raise error.ParseError(err)
216 216 return l
217 217
218 218 def getset(repo, subset, x):
219 219 if not x:
220 220 raise error.ParseError(_("missing argument"))
221 221 s = methods[x[0]](repo, subset, *x[1:])
222 222 if util.safehasattr(s, 'set'):
223 223 return s
224 224 return baseset(s)
225 225
226 226 def _getrevsource(repo, r):
227 227 extra = repo[r].extra()
228 228 for label in ('source', 'transplant_source', 'rebase_source'):
229 229 if label in extra:
230 230 try:
231 231 return repo[extra[label]].rev()
232 232 except error.RepoLookupError:
233 233 pass
234 234 return None
235 235
236 236 # operator methods
237 237
238 238 def stringset(repo, subset, x):
239 239 x = repo[x].rev()
240 240 if x == -1 and len(subset) == len(repo):
241 241 return baseset([-1])
242 242 if len(subset) == len(repo) or x in subset:
243 243 return baseset([x])
244 244 return baseset([])
245 245
246 246 def symbolset(repo, subset, x):
247 247 if x in symbols:
248 248 raise error.ParseError(_("can't use %s here") % x)
249 249 return stringset(repo, subset, x)
250 250
251 251 def rangeset(repo, subset, x, y):
252 252 cl = baseset(repo.changelog)
253 253 m = getset(repo, cl, x)
254 254 n = getset(repo, cl, y)
255 255
256 256 if not m or not n:
257 257 return baseset([])
258 258 m, n = m[0], n[-1]
259 259
260 260 if m < n:
261 261 r = spanset(repo, m, n + 1)
262 262 else:
263 263 r = spanset(repo, m, n - 1)
264 264 return r & subset
265 265
266 266 def dagrange(repo, subset, x, y):
267 267 r = spanset(repo)
268 268 xs = _revsbetween(repo, getset(repo, r, x), getset(repo, r, y))
269 269 s = subset.set()
270 270 return xs.filter(lambda r: r in s)
271 271
272 272 def andset(repo, subset, x, y):
273 273 return getset(repo, getset(repo, subset, x), y)
274 274
275 275 def orset(repo, subset, x, y):
276 276 xl = getset(repo, subset, x)
277 277 yl = getset(repo, subset - xl, y)
278 278 return xl + yl
279 279
280 280 def notset(repo, subset, x):
281 281 return subset - getset(repo, subset, x)
282 282
283 283 def listset(repo, subset, a, b):
284 284 raise error.ParseError(_("can't use a list in this context"))
285 285
286 286 def func(repo, subset, a, b):
287 287 if a[0] == 'symbol' and a[1] in symbols:
288 288 return symbols[a[1]](repo, subset, b)
289 289 raise error.ParseError(_("not a function: %s") % a[1])
290 290
291 291 # functions
292 292
293 293 def adds(repo, subset, x):
294 294 """``adds(pattern)``
295 295 Changesets that add a file matching pattern.
296 296
297 297 The pattern without explicit kind like ``glob:`` is expected to be
298 298 relative to the current directory and match against a file or a
299 299 directory.
300 300 """
301 301 # i18n: "adds" is a keyword
302 302 pat = getstring(x, _("adds requires a pattern"))
303 303 return checkstatus(repo, subset, pat, 1)
304 304
305 305 def ancestor(repo, subset, x):
306 306 """``ancestor(*changeset)``
307 307 Greatest common ancestor of the changesets.
308 308
309 309 Accepts 0 or more changesets.
310 310 Will return empty list when passed no args.
311 311 Greatest common ancestor of a single changeset is that changeset.
312 312 """
313 313 # i18n: "ancestor" is a keyword
314 314 l = getlist(x)
315 315 rl = spanset(repo)
316 316 anc = None
317 317
318 318 # (getset(repo, rl, i) for i in l) generates a list of lists
319 319 rev = repo.changelog.rev
320 320 ancestor = repo.changelog.ancestor
321 321 node = repo.changelog.node
322 322 for revs in (getset(repo, rl, i) for i in l):
323 323 for r in revs:
324 324 if anc is None:
325 325 anc = r
326 326 else:
327 327 anc = rev(ancestor(node(anc), node(r)))
328 328
329 329 if anc is not None and anc in subset:
330 330 return baseset([anc])
331 331 return baseset([])
332 332
333 333 def _ancestors(repo, subset, x, followfirst=False):
334 334 args = getset(repo, spanset(repo), x)
335 335 if not args:
336 336 return baseset([])
337 337 s = _revancestors(repo, args, followfirst)
338 338 return subset.filter(lambda r: r in s)
339 339
340 340 def ancestors(repo, subset, x):
341 341 """``ancestors(set)``
342 342 Changesets that are ancestors of a changeset in set.
343 343 """
344 344 return _ancestors(repo, subset, x)
345 345
346 346 def _firstancestors(repo, subset, x):
347 347 # ``_firstancestors(set)``
348 348 # Like ``ancestors(set)`` but follows only the first parents.
349 349 return _ancestors(repo, subset, x, followfirst=True)
350 350
351 351 def ancestorspec(repo, subset, x, n):
352 352 """``set~n``
353 353 Changesets that are the Nth ancestor (first parents only) of a changeset
354 354 in set.
355 355 """
356 356 try:
357 357 n = int(n[1])
358 358 except (TypeError, ValueError):
359 359 raise error.ParseError(_("~ expects a number"))
360 360 ps = set()
361 361 cl = repo.changelog
362 362 for r in getset(repo, baseset(cl), x):
363 363 for i in range(n):
364 364 r = cl.parentrevs(r)[0]
365 365 ps.add(r)
366 366 return subset.filter(lambda r: r in ps)
367 367
368 368 def author(repo, subset, x):
369 369 """``author(string)``
370 370 Alias for ``user(string)``.
371 371 """
372 372 # i18n: "author" is a keyword
373 373 n = encoding.lower(getstring(x, _("author requires a string")))
374 374 kind, pattern, matcher = _substringmatcher(n)
375 375 return subset.filter(lambda x: matcher(encoding.lower(repo[x].user())))
376 376
377 377 def only(repo, subset, x):
378 378 """``only(set, [set])``
379 379 Changesets that are ancestors of the first set that are not ancestors
380 380 of any other head in the repo. If a second set is specified, the result
381 381 is ancestors of the first set that are not ancestors of the second set
382 382 (i.e. ::<set1> - ::<set2>).
383 383 """
384 384 cl = repo.changelog
385 385 args = getargs(x, 1, 2, _('only takes one or two arguments'))
386 386 include = getset(repo, spanset(repo), args[0]).set()
387 387 if len(args) == 1:
388 388 descendants = set(_revdescendants(repo, include, False))
389 389 exclude = [rev for rev in cl.headrevs()
390 390 if not rev in descendants and not rev in include]
391 391 else:
392 392 exclude = getset(repo, spanset(repo), args[1])
393 393
394 394 results = set(ancestormod.missingancestors(include, exclude, cl.parentrevs))
395 395 return lazyset(subset, lambda x: x in results)
396 396
397 397 def bisect(repo, subset, x):
398 398 """``bisect(string)``
399 399 Changesets marked in the specified bisect status:
400 400
401 401 - ``good``, ``bad``, ``skip``: csets explicitly marked as good/bad/skip
402 402 - ``goods``, ``bads`` : csets topologically good/bad
403 403 - ``range`` : csets taking part in the bisection
404 404 - ``pruned`` : csets that are goods, bads or skipped
405 405 - ``untested`` : csets whose fate is yet unknown
406 406 - ``ignored`` : csets ignored due to DAG topology
407 407 - ``current`` : the cset currently being bisected
408 408 """
409 409 # i18n: "bisect" is a keyword
410 410 status = getstring(x, _("bisect requires a string")).lower()
411 411 state = set(hbisect.get(repo, status))
412 412 return subset.filter(lambda r: r in state)
413 413
414 414 # Backward-compatibility
415 415 # - no help entry so that we do not advertise it any more
416 416 def bisected(repo, subset, x):
417 417 return bisect(repo, subset, x)
418 418
419 419 def bookmark(repo, subset, x):
420 420 """``bookmark([name])``
421 421 The named bookmark or all bookmarks.
422 422
423 423 If `name` starts with `re:`, the remainder of the name is treated as
424 424 a regular expression. To match a bookmark that actually starts with `re:`,
425 425 use the prefix `literal:`.
426 426 """
427 427 # i18n: "bookmark" is a keyword
428 428 args = getargs(x, 0, 1, _('bookmark takes one or no arguments'))
429 429 if args:
430 430 bm = getstring(args[0],
431 431 # i18n: "bookmark" is a keyword
432 432 _('the argument to bookmark must be a string'))
433 433 kind, pattern, matcher = _stringmatcher(bm)
434 434 if kind == 'literal':
435 435 bmrev = repo._bookmarks.get(bm, None)
436 436 if not bmrev:
437 437 raise util.Abort(_("bookmark '%s' does not exist") % bm)
438 438 bmrev = repo[bmrev].rev()
439 439 return subset.filter(lambda r: r == bmrev)
440 440 else:
441 441 matchrevs = set()
442 442 for name, bmrev in repo._bookmarks.iteritems():
443 443 if matcher(name):
444 444 matchrevs.add(bmrev)
445 445 if not matchrevs:
446 446 raise util.Abort(_("no bookmarks exist that match '%s'")
447 447 % pattern)
448 448 bmrevs = set()
449 449 for bmrev in matchrevs:
450 450 bmrevs.add(repo[bmrev].rev())
451 451 return subset & bmrevs
452 452
453 453 bms = set([repo[r].rev()
454 454 for r in repo._bookmarks.values()])
455 455 return subset.filter(lambda r: r in bms)
456 456
457 457 def branch(repo, subset, x):
458 458 """``branch(string or set)``
459 459 All changesets belonging to the given branch or the branches of the given
460 460 changesets.
461 461
462 462 If `string` starts with `re:`, the remainder of the name is treated as
463 463 a regular expression. To match a branch that actually starts with `re:`,
464 464 use the prefix `literal:`.
465 465 """
466 466 try:
467 467 b = getstring(x, '')
468 468 except error.ParseError:
469 469 # not a string, but another revspec, e.g. tip()
470 470 pass
471 471 else:
472 472 kind, pattern, matcher = _stringmatcher(b)
473 473 if kind == 'literal':
474 474 # note: falls through to the revspec case if no branch with
475 475 # this name exists
476 476 if pattern in repo.branchmap():
477 477 return subset.filter(lambda r: matcher(repo[r].branch()))
478 478 else:
479 479 return subset.filter(lambda r: matcher(repo[r].branch()))
480 480
481 481 s = getset(repo, spanset(repo), x)
482 482 b = set()
483 483 for r in s:
484 484 b.add(repo[r].branch())
485 485 s = s.set()
486 486 return subset.filter(lambda r: r in s or repo[r].branch() in b)
487 487
488 488 def bumped(repo, subset, x):
489 489 """``bumped()``
490 490 Mutable changesets marked as successors of public changesets.
491 491
492 492 Only non-public and non-obsolete changesets can be `bumped`.
493 493 """
494 494 # i18n: "bumped" is a keyword
495 495 getargs(x, 0, 0, _("bumped takes no arguments"))
496 496 bumped = obsmod.getrevs(repo, 'bumped')
497 497 return subset & bumped
498 498
499 499 def bundle(repo, subset, x):
500 500 """``bundle()``
501 501 Changesets in the bundle.
502 502
503 503 Bundle must be specified by the -R option."""
504 504
505 505 try:
506 506 bundlerevs = repo.changelog.bundlerevs
507 507 except AttributeError:
508 508 raise util.Abort(_("no bundle provided - specify with -R"))
509 509 return subset & bundlerevs
510 510
511 511 def checkstatus(repo, subset, pat, field):
512 512 hasset = matchmod.patkind(pat) == 'set'
513 513
514 514 def matches(x):
515 515 m = None
516 516 fname = None
517 517 c = repo[x]
518 518 if not m or hasset:
519 519 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
520 520 if not m.anypats() and len(m.files()) == 1:
521 521 fname = m.files()[0]
522 522 if fname is not None:
523 523 if fname not in c.files():
524 524 return False
525 525 else:
526 526 for f in c.files():
527 527 if m(f):
528 528 break
529 529 else:
530 530 return False
531 531 files = repo.status(c.p1().node(), c.node())[field]
532 532 if fname is not None:
533 533 if fname in files:
534 534 return True
535 535 else:
536 536 for f in files:
537 537 if m(f):
538 538 return True
539 539
540 540 return subset.filter(matches)
541 541
542 542 def _children(repo, narrow, parentset):
543 543 cs = set()
544 544 if not parentset:
545 545 return baseset(cs)
546 546 pr = repo.changelog.parentrevs
547 547 minrev = min(parentset)
548 548 for r in narrow:
549 549 if r <= minrev:
550 550 continue
551 551 for p in pr(r):
552 552 if p in parentset:
553 553 cs.add(r)
554 554 return baseset(cs)
555 555
556 556 def children(repo, subset, x):
557 557 """``children(set)``
558 558 Child changesets of changesets in set.
559 559 """
560 560 s = getset(repo, baseset(repo), x).set()
561 561 cs = _children(repo, subset, s)
562 562 return subset & cs
563 563
564 564 def closed(repo, subset, x):
565 565 """``closed()``
566 566 Changeset is closed.
567 567 """
568 568 # i18n: "closed" is a keyword
569 569 getargs(x, 0, 0, _("closed takes no arguments"))
570 570 return subset.filter(lambda r: repo[r].closesbranch())
571 571
572 572 def contains(repo, subset, x):
573 573 """``contains(pattern)``
574 574 Revision contains a file matching pattern. See :hg:`help patterns`
575 575 for information about file patterns.
576 576
577 577 The pattern without explicit kind like ``glob:`` is expected to be
578 578 relative to the current directory and match against a file exactly
579 579 for efficiency.
580 580 """
581 581 # i18n: "contains" is a keyword
582 582 pat = getstring(x, _("contains requires a pattern"))
583 583
584 584 def matches(x):
585 585 if not matchmod.patkind(pat):
586 586 pats = pathutil.canonpath(repo.root, repo.getcwd(), pat)
587 587 if pats in repo[x]:
588 588 return True
589 589 else:
590 590 c = repo[x]
591 591 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
592 592 for f in c.manifest():
593 593 if m(f):
594 594 return True
595 595 return False
596 596
597 597 return subset.filter(matches)
598 598
599 599 def converted(repo, subset, x):
600 600 """``converted([id])``
601 601 Changesets converted from the given identifier in the old repository if
602 602 present, or all converted changesets if no identifier is specified.
603 603 """
604 604
605 605 # There is exactly no chance of resolving the revision, so do a simple
606 606 # string compare and hope for the best
607 607
608 608 rev = None
609 609 # i18n: "converted" is a keyword
610 610 l = getargs(x, 0, 1, _('converted takes one or no arguments'))
611 611 if l:
612 612 # i18n: "converted" is a keyword
613 613 rev = getstring(l[0], _('converted requires a revision'))
614 614
615 615 def _matchvalue(r):
616 616 source = repo[r].extra().get('convert_revision', None)
617 617 return source is not None and (rev is None or source.startswith(rev))
618 618
619 619 return subset.filter(lambda r: _matchvalue(r))
620 620
621 621 def date(repo, subset, x):
622 622 """``date(interval)``
623 623 Changesets within the interval, see :hg:`help dates`.
624 624 """
625 625 # i18n: "date" is a keyword
626 626 ds = getstring(x, _("date requires a string"))
627 627 dm = util.matchdate(ds)
628 628 return subset.filter(lambda x: dm(repo[x].date()[0]))
629 629
630 630 def desc(repo, subset, x):
631 631 """``desc(string)``
632 632 Search commit message for string. The match is case-insensitive.
633 633 """
634 634 # i18n: "desc" is a keyword
635 635 ds = encoding.lower(getstring(x, _("desc requires a string")))
636 636
637 637 def matches(x):
638 638 c = repo[x]
639 639 return ds in encoding.lower(c.description())
640 640
641 641 return subset.filter(matches)
642 642
643 643 def _descendants(repo, subset, x, followfirst=False):
644 644 args = getset(repo, spanset(repo), x)
645 645 if not args:
646 646 return baseset([])
647 647 s = _revdescendants(repo, args, followfirst)
648 648 a = set(args)
649 649 return subset.filter(lambda r: r in s or r in a)
650 650
651 651 def descendants(repo, subset, x):
652 652 """``descendants(set)``
653 653 Changesets which are descendants of changesets in set.
654 654 """
655 655 return _descendants(repo, subset, x)
656 656
657 657 def _firstdescendants(repo, subset, x):
658 658 # ``_firstdescendants(set)``
659 659 # Like ``descendants(set)`` but follows only the first parents.
660 660 return _descendants(repo, subset, x, followfirst=True)
661 661
662 662 def destination(repo, subset, x):
663 663 """``destination([set])``
664 664 Changesets that were created by a graft, transplant or rebase operation,
665 665 with the given revisions specified as the source. Omitting the optional set
666 666 is the same as passing all().
667 667 """
668 668 if x is not None:
669 669 args = getset(repo, spanset(repo), x).set()
670 670 else:
671 671 args = getall(repo, spanset(repo), x).set()
672 672
673 673 dests = set()
674 674
675 675 # subset contains all of the possible destinations that can be returned, so
676 676 # iterate over them and see if their source(s) were provided in the args.
677 677 # Even if the immediate src of r is not in the args, src's source (or
678 678 # further back) may be. Scanning back further than the immediate src allows
679 679 # transitive transplants and rebases to yield the same results as transitive
680 680 # grafts.
681 681 for r in subset:
682 682 src = _getrevsource(repo, r)
683 683 lineage = None
684 684
685 685 while src is not None:
686 686 if lineage is None:
687 687 lineage = list()
688 688
689 689 lineage.append(r)
690 690
691 691 # The visited lineage is a match if the current source is in the arg
692 692 # set. Since every candidate dest is visited by way of iterating
693 693 # subset, any dests further back in the lineage will be tested by a
694 694 # different iteration over subset. Likewise, if the src was already
695 695 # selected, the current lineage can be selected without going back
696 696 # further.
697 697 if src in args or src in dests:
698 698 dests.update(lineage)
699 699 break
700 700
701 701 r = src
702 702 src = _getrevsource(repo, r)
703 703
704 704 return subset.filter(lambda r: r in dests)
705 705
706 706 def divergent(repo, subset, x):
707 707 """``divergent()``
708 708 Final successors of changesets with an alternative set of final successors.
709 709 """
710 710 # i18n: "divergent" is a keyword
711 711 getargs(x, 0, 0, _("divergent takes no arguments"))
712 712 divergent = obsmod.getrevs(repo, 'divergent')
713 713 return subset.filter(lambda r: r in divergent)
714 714
715 715 def draft(repo, subset, x):
716 716 """``draft()``
717 717 Changeset in draft phase."""
718 718 # i18n: "draft" is a keyword
719 719 getargs(x, 0, 0, _("draft takes no arguments"))
720 720 pc = repo._phasecache
721 721 return subset.filter(lambda r: pc.phase(repo, r) == phases.draft)
722 722
723 723 def extinct(repo, subset, x):
724 724 """``extinct()``
725 725 Obsolete changesets with obsolete descendants only.
726 726 """
727 727 # i18n: "extinct" is a keyword
728 728 getargs(x, 0, 0, _("extinct takes no arguments"))
729 729 extincts = obsmod.getrevs(repo, 'extinct')
730 730 return subset & extincts
731 731
732 732 def extra(repo, subset, x):
733 733 """``extra(label, [value])``
734 734 Changesets with the given label in the extra metadata, with the given
735 735 optional value.
736 736
737 737 If `value` starts with `re:`, the remainder of the value is treated as
738 738 a regular expression. To match a value that actually starts with `re:`,
739 739 use the prefix `literal:`.
740 740 """
741 741
742 742 # i18n: "extra" is a keyword
743 743 l = getargs(x, 1, 2, _('extra takes at least 1 and at most 2 arguments'))
744 744 # i18n: "extra" is a keyword
745 745 label = getstring(l[0], _('first argument to extra must be a string'))
746 746 value = None
747 747
748 748 if len(l) > 1:
749 749 # i18n: "extra" is a keyword
750 750 value = getstring(l[1], _('second argument to extra must be a string'))
751 751 kind, value, matcher = _stringmatcher(value)
752 752
753 753 def _matchvalue(r):
754 754 extra = repo[r].extra()
755 755 return label in extra and (value is None or matcher(extra[label]))
756 756
757 757 return subset.filter(lambda r: _matchvalue(r))
758 758
759 759 def filelog(repo, subset, x):
760 760 """``filelog(pattern)``
761 761 Changesets connected to the specified filelog.
762 762
763 763 For performance reasons, ``filelog()`` does not show every changeset
764 764 that affects the requested file(s). See :hg:`help log` for details. For
765 765 a slower, more accurate result, use ``file()``.
766 766
767 767 The pattern without explicit kind like ``glob:`` is expected to be
768 768 relative to the current directory and match against a file exactly
769 769 for efficiency.
770 770 """
771 771
772 772 # i18n: "filelog" is a keyword
773 773 pat = getstring(x, _("filelog requires a pattern"))
774 774 s = set()
775 775
776 776 if not matchmod.patkind(pat):
777 777 f = pathutil.canonpath(repo.root, repo.getcwd(), pat)
778 778 fl = repo.file(f)
779 779 for fr in fl:
780 780 s.add(fl.linkrev(fr))
781 781 else:
782 782 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=repo[None])
783 783 for f in repo[None]:
784 784 if m(f):
785 785 fl = repo.file(f)
786 786 for fr in fl:
787 787 s.add(fl.linkrev(fr))
788 788
789 789 return subset.filter(lambda r: r in s)
790 790
791 791 def first(repo, subset, x):
792 792 """``first(set, [n])``
793 793 An alias for limit().
794 794 """
795 795 return limit(repo, subset, x)
796 796
797 797 def _follow(repo, subset, x, name, followfirst=False):
798 798 l = getargs(x, 0, 1, _("%s takes no arguments or a filename") % name)
799 799 c = repo['.']
800 800 if l:
801 801 x = getstring(l[0], _("%s expected a filename") % name)
802 802 if x in c:
803 803 cx = c[x]
804 804 s = set(ctx.rev() for ctx in cx.ancestors(followfirst=followfirst))
805 805 # include the revision responsible for the most recent version
806 806 s.add(cx.linkrev())
807 807 else:
808 808 return baseset([])
809 809 else:
810 810 s = _revancestors(repo, baseset([c.rev()]), followfirst)
811 811
812 812 return subset.filter(lambda r: r in s)
813 813
814 814 def follow(repo, subset, x):
815 815 """``follow([file])``
816 816 An alias for ``::.`` (ancestors of the working copy's first parent).
817 817 If a filename is specified, the history of the given file is followed,
818 818 including copies.
819 819 """
820 820 return _follow(repo, subset, x, 'follow')
821 821
822 822 def _followfirst(repo, subset, x):
823 823 # ``followfirst([file])``
824 824 # Like ``follow([file])`` but follows only the first parent of
825 825 # every revision or file revision.
826 826 return _follow(repo, subset, x, '_followfirst', followfirst=True)
827 827
828 828 def getall(repo, subset, x):
829 829 """``all()``
830 830 All changesets, the same as ``0:tip``.
831 831 """
832 832 # i18n: "all" is a keyword
833 833 getargs(x, 0, 0, _("all takes no arguments"))
834 834 return subset
835 835
836 836 def grep(repo, subset, x):
837 837 """``grep(regex)``
838 838 Like ``keyword(string)`` but accepts a regex. Use ``grep(r'...')``
839 839 to ensure special escape characters are handled correctly. Unlike
840 840 ``keyword(string)``, the match is case-sensitive.
841 841 """
842 842 try:
843 843 # i18n: "grep" is a keyword
844 844 gr = re.compile(getstring(x, _("grep requires a string")))
845 845 except re.error, e:
846 846 raise error.ParseError(_('invalid match pattern: %s') % e)
847 847
848 848 def matches(x):
849 849 c = repo[x]
850 850 for e in c.files() + [c.user(), c.description()]:
851 851 if gr.search(e):
852 852 return True
853 853 return False
854 854
855 855 return subset.filter(matches)
856 856
857 857 def _matchfiles(repo, subset, x):
858 858 # _matchfiles takes a revset list of prefixed arguments:
859 859 #
860 860 # [p:foo, i:bar, x:baz]
861 861 #
862 862 # builds a match object from them and filters subset. Allowed
863 863 # prefixes are 'p:' for regular patterns, 'i:' for include
864 864 # patterns and 'x:' for exclude patterns. Use 'r:' prefix to pass
865 865 # a revision identifier, or the empty string to reference the
866 866 # working directory, from which the match object is
867 867 # initialized. Use 'd:' to set the default matching mode, default
868 868 # to 'glob'. At most one 'r:' and 'd:' argument can be passed.
869 869
870 870 # i18n: "_matchfiles" is a keyword
871 871 l = getargs(x, 1, -1, _("_matchfiles requires at least one argument"))
872 872 pats, inc, exc = [], [], []
873 873 hasset = False
874 874 rev, default = None, None
875 875 for arg in l:
876 876 # i18n: "_matchfiles" is a keyword
877 877 s = getstring(arg, _("_matchfiles requires string arguments"))
878 878 prefix, value = s[:2], s[2:]
879 879 if prefix == 'p:':
880 880 pats.append(value)
881 881 elif prefix == 'i:':
882 882 inc.append(value)
883 883 elif prefix == 'x:':
884 884 exc.append(value)
885 885 elif prefix == 'r:':
886 886 if rev is not None:
887 887 # i18n: "_matchfiles" is a keyword
888 888 raise error.ParseError(_('_matchfiles expected at most one '
889 889 'revision'))
890 890 rev = value
891 891 elif prefix == 'd:':
892 892 if default is not None:
893 893 # i18n: "_matchfiles" is a keyword
894 894 raise error.ParseError(_('_matchfiles expected at most one '
895 895 'default mode'))
896 896 default = value
897 897 else:
898 898 # i18n: "_matchfiles" is a keyword
899 899 raise error.ParseError(_('invalid _matchfiles prefix: %s') % prefix)
900 900 if not hasset and matchmod.patkind(value) == 'set':
901 901 hasset = True
902 902 if not default:
903 903 default = 'glob'
904 904
905 905 def matches(x):
906 906 m = None
907 907 c = repo[x]
908 908 if not m or (hasset and rev is None):
909 909 ctx = c
910 910 if rev is not None:
911 911 ctx = repo[rev or None]
912 912 m = matchmod.match(repo.root, repo.getcwd(), pats, include=inc,
913 913 exclude=exc, ctx=ctx, default=default)
914 914 for f in c.files():
915 915 if m(f):
916 916 return True
917 917 return False
918 918
919 919 return subset.filter(matches)
920 920
921 921 def hasfile(repo, subset, x):
922 922 """``file(pattern)``
923 923 Changesets affecting files matched by pattern.
924 924
925 925 For a faster but less accurate result, consider using ``filelog()``
926 926 instead.
927 927
928 928 This predicate uses ``glob:`` as the default kind of pattern.
929 929 """
930 930 # i18n: "file" is a keyword
931 931 pat = getstring(x, _("file requires a pattern"))
932 932 return _matchfiles(repo, subset, ('string', 'p:' + pat))
933 933
934 934 def head(repo, subset, x):
935 935 """``head()``
936 936 Changeset is a named branch head.
937 937 """
938 938 # i18n: "head" is a keyword
939 939 getargs(x, 0, 0, _("head takes no arguments"))
940 940 hs = set()
941 941 for b, ls in repo.branchmap().iteritems():
942 942 hs.update(repo[h].rev() for h in ls)
943 943 return baseset(hs).filter(subset.__contains__)
944 944
945 945 def heads(repo, subset, x):
946 946 """``heads(set)``
947 947 Members of set with no children in set.
948 948 """
949 949 s = getset(repo, subset, x)
950 950 ps = parents(repo, subset, x)
951 951 return s - ps
952 952
953 953 def hidden(repo, subset, x):
954 954 """``hidden()``
955 955 Hidden changesets.
956 956 """
957 957 # i18n: "hidden" is a keyword
958 958 getargs(x, 0, 0, _("hidden takes no arguments"))
959 959 hiddenrevs = repoview.filterrevs(repo, 'visible')
960 960 return subset & hiddenrevs
961 961
962 962 def keyword(repo, subset, x):
963 963 """``keyword(string)``
964 964 Search commit message, user name, and names of changed files for
965 965 string. The match is case-insensitive.
966 966 """
967 967 # i18n: "keyword" is a keyword
968 968 kw = encoding.lower(getstring(x, _("keyword requires a string")))
969 969
970 970 def matches(r):
971 971 c = repo[r]
972 972 return util.any(kw in encoding.lower(t) for t in c.files() + [c.user(),
973 973 c.description()])
974 974
975 975 return subset.filter(matches)
976 976
977 977 def limit(repo, subset, x):
978 978 """``limit(set, [n])``
979 979 First n members of set, defaulting to 1.
980 980 """
981 981 # i18n: "limit" is a keyword
982 982 l = getargs(x, 1, 2, _("limit requires one or two arguments"))
983 983 try:
984 984 lim = 1
985 985 if len(l) == 2:
986 986 # i18n: "limit" is a keyword
987 987 lim = int(getstring(l[1], _("limit requires a number")))
988 988 except (TypeError, ValueError):
989 989 # i18n: "limit" is a keyword
990 990 raise error.ParseError(_("limit expects a number"))
991 991 ss = subset.set()
992 992 os = getset(repo, spanset(repo), l[0])
993 993 bs = baseset([])
994 994 it = iter(os)
995 995 for x in xrange(lim):
996 996 try:
997 997 y = it.next()
998 998 if y in ss:
999 999 bs.append(y)
1000 1000 except (StopIteration):
1001 1001 break
1002 1002 return bs
1003 1003
1004 1004 def last(repo, subset, x):
1005 1005 """``last(set, [n])``
1006 1006 Last n members of set, defaulting to 1.
1007 1007 """
1008 1008 # i18n: "last" is a keyword
1009 1009 l = getargs(x, 1, 2, _("last requires one or two arguments"))
1010 1010 try:
1011 1011 lim = 1
1012 1012 if len(l) == 2:
1013 1013 # i18n: "last" is a keyword
1014 1014 lim = int(getstring(l[1], _("last requires a number")))
1015 1015 except (TypeError, ValueError):
1016 1016 # i18n: "last" is a keyword
1017 1017 raise error.ParseError(_("last expects a number"))
1018 1018 ss = subset.set()
1019 1019 os = getset(repo, spanset(repo), l[0])
1020 1020 os.reverse()
1021 1021 bs = baseset([])
1022 1022 it = iter(os)
1023 1023 for x in xrange(lim):
1024 1024 try:
1025 1025 y = it.next()
1026 1026 if y in ss:
1027 1027 bs.append(y)
1028 1028 except (StopIteration):
1029 1029 break
1030 1030 return bs
1031 1031
1032 1032 def maxrev(repo, subset, x):
1033 1033 """``max(set)``
1034 1034 Changeset with highest revision number in set.
1035 1035 """
1036 1036 os = getset(repo, spanset(repo), x)
1037 1037 if os:
1038 1038 m = max(os)
1039 1039 if m in subset:
1040 1040 return baseset([m])
1041 1041 return baseset([])
1042 1042
1043 1043 def merge(repo, subset, x):
1044 1044 """``merge()``
1045 1045 Changeset is a merge changeset.
1046 1046 """
1047 1047 # i18n: "merge" is a keyword
1048 1048 getargs(x, 0, 0, _("merge takes no arguments"))
1049 1049 cl = repo.changelog
1050 1050 return subset.filter(lambda r: cl.parentrevs(r)[1] != -1)
1051 1051
1052 1052 def branchpoint(repo, subset, x):
1053 1053 """``branchpoint()``
1054 1054 Changesets with more than one child.
1055 1055 """
1056 1056 # i18n: "branchpoint" is a keyword
1057 1057 getargs(x, 0, 0, _("branchpoint takes no arguments"))
1058 1058 cl = repo.changelog
1059 1059 if not subset:
1060 1060 return baseset([])
1061 1061 baserev = min(subset)
1062 1062 parentscount = [0]*(len(repo) - baserev)
1063 1063 for r in cl.revs(start=baserev + 1):
1064 1064 for p in cl.parentrevs(r):
1065 1065 if p >= baserev:
1066 1066 parentscount[p - baserev] += 1
1067 1067 return subset.filter(lambda r: parentscount[r - baserev] > 1)
1068 1068
1069 1069 def minrev(repo, subset, x):
1070 1070 """``min(set)``
1071 1071 Changeset with lowest revision number in set.
1072 1072 """
1073 1073 os = getset(repo, spanset(repo), x)
1074 1074 if os:
1075 1075 m = min(os)
1076 1076 if m in subset:
1077 1077 return baseset([m])
1078 1078 return baseset([])
1079 1079
1080 1080 def _missingancestors(repo, subset, x):
1081 1081 # i18n: "_missingancestors" is a keyword
1082 1082 revs, bases = getargs(x, 2, 2,
1083 1083 _("_missingancestors requires two arguments"))
1084 1084 rs = baseset(repo)
1085 1085 revs = getset(repo, rs, revs)
1086 1086 bases = getset(repo, rs, bases)
1087 1087 missing = set(repo.changelog.findmissingrevs(bases, revs))
1088 1088 return baseset([r for r in subset if r in missing])
1089 1089
1090 1090 def modifies(repo, subset, x):
1091 1091 """``modifies(pattern)``
1092 1092 Changesets modifying files matched by pattern.
1093 1093
1094 1094 The pattern without explicit kind like ``glob:`` is expected to be
1095 1095 relative to the current directory and match against a file or a
1096 1096 directory.
1097 1097 """
1098 1098 # i18n: "modifies" is a keyword
1099 1099 pat = getstring(x, _("modifies requires a pattern"))
1100 1100 return checkstatus(repo, subset, pat, 0)
1101 1101
1102 1102 def node_(repo, subset, x):
1103 1103 """``id(string)``
1104 1104 Revision non-ambiguously specified by the given hex string prefix.
1105 1105 """
1106 1106 # i18n: "id" is a keyword
1107 1107 l = getargs(x, 1, 1, _("id requires one argument"))
1108 1108 # i18n: "id" is a keyword
1109 1109 n = getstring(l[0], _("id requires a string"))
1110 1110 if len(n) == 40:
1111 1111 rn = repo[n].rev()
1112 1112 else:
1113 1113 rn = None
1114 1114 pm = repo.changelog._partialmatch(n)
1115 1115 if pm is not None:
1116 1116 rn = repo.changelog.rev(pm)
1117 1117
1118 1118 return subset.filter(lambda r: r == rn)
1119 1119
1120 1120 def obsolete(repo, subset, x):
1121 1121 """``obsolete()``
1122 1122 Mutable changeset with a newer version."""
1123 1123 # i18n: "obsolete" is a keyword
1124 1124 getargs(x, 0, 0, _("obsolete takes no arguments"))
1125 1125 obsoletes = obsmod.getrevs(repo, 'obsolete')
1126 1126 return subset & obsoletes
1127 1127
1128 1128 def origin(repo, subset, x):
1129 1129 """``origin([set])``
1130 1130 Changesets that were specified as a source for the grafts, transplants or
1131 1131 rebases that created the given revisions. Omitting the optional set is the
1132 1132 same as passing all(). If a changeset created by these operations is itself
1133 1133 specified as a source for one of these operations, only the source changeset
1134 1134 for the first operation is selected.
1135 1135 """
1136 1136 if x is not None:
1137 1137 args = getset(repo, spanset(repo), x).set()
1138 1138 else:
1139 1139 args = getall(repo, spanset(repo), x).set()
1140 1140
1141 1141 def _firstsrc(rev):
1142 1142 src = _getrevsource(repo, rev)
1143 1143 if src is None:
1144 1144 return None
1145 1145
1146 1146 while True:
1147 1147 prev = _getrevsource(repo, src)
1148 1148
1149 1149 if prev is None:
1150 1150 return src
1151 1151 src = prev
1152 1152
1153 1153 o = set([_firstsrc(r) for r in args])
1154 1154 return subset.filter(lambda r: r in o)
1155 1155
1156 1156 def outgoing(repo, subset, x):
1157 1157 """``outgoing([path])``
1158 1158 Changesets not found in the specified destination repository, or the
1159 1159 default push location.
1160 1160 """
1161 1161 import hg # avoid start-up nasties
1162 1162 # i18n: "outgoing" is a keyword
1163 1163 l = getargs(x, 0, 1, _("outgoing takes one or no arguments"))
1164 1164 # i18n: "outgoing" is a keyword
1165 1165 dest = l and getstring(l[0], _("outgoing requires a repository path")) or ''
1166 1166 dest = repo.ui.expandpath(dest or 'default-push', dest or 'default')
1167 1167 dest, branches = hg.parseurl(dest)
1168 1168 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1169 1169 if revs:
1170 1170 revs = [repo.lookup(rev) for rev in revs]
1171 1171 other = hg.peer(repo, {}, dest)
1172 1172 repo.ui.pushbuffer()
1173 1173 outgoing = discovery.findcommonoutgoing(repo, other, onlyheads=revs)
1174 1174 repo.ui.popbuffer()
1175 1175 cl = repo.changelog
1176 1176 o = set([cl.rev(r) for r in outgoing.missing])
1177 1177 return subset.filter(lambda r: r in o)
1178 1178
1179 1179 def p1(repo, subset, x):
1180 1180 """``p1([set])``
1181 1181 First parent of changesets in set, or the working directory.
1182 1182 """
1183 1183 if x is None:
1184 1184 p = repo[x].p1().rev()
1185 1185 return subset.filter(lambda r: r == p)
1186 1186
1187 1187 ps = set()
1188 1188 cl = repo.changelog
1189 1189 for r in getset(repo, spanset(repo), x):
1190 1190 ps.add(cl.parentrevs(r)[0])
1191 1191 return subset & ps
1192 1192
1193 1193 def p2(repo, subset, x):
1194 1194 """``p2([set])``
1195 1195 Second parent of changesets in set, or the working directory.
1196 1196 """
1197 1197 if x is None:
1198 1198 ps = repo[x].parents()
1199 1199 try:
1200 1200 p = ps[1].rev()
1201 1201 return subset.filter(lambda r: r == p)
1202 1202 except IndexError:
1203 1203 return baseset([])
1204 1204
1205 1205 ps = set()
1206 1206 cl = repo.changelog
1207 1207 for r in getset(repo, spanset(repo), x):
1208 1208 ps.add(cl.parentrevs(r)[1])
1209 1209 return subset & ps
1210 1210
1211 1211 def parents(repo, subset, x):
1212 1212 """``parents([set])``
1213 1213 The set of all parents for all changesets in set, or the working directory.
1214 1214 """
1215 1215 if x is None:
1216 1216 ps = tuple(p.rev() for p in repo[x].parents())
1217 1217 return subset & ps
1218 1218
1219 1219 ps = set()
1220 1220 cl = repo.changelog
1221 1221 for r in getset(repo, spanset(repo), x):
1222 1222 ps.update(cl.parentrevs(r))
1223 1223 return subset & ps
1224 1224
1225 1225 def parentspec(repo, subset, x, n):
1226 1226 """``set^0``
1227 1227 The set.
1228 1228 ``set^1`` (or ``set^``), ``set^2``
1229 1229 First or second parent, respectively, of all changesets in set.
1230 1230 """
1231 1231 try:
1232 1232 n = int(n[1])
1233 1233 if n not in (0, 1, 2):
1234 1234 raise ValueError
1235 1235 except (TypeError, ValueError):
1236 1236 raise error.ParseError(_("^ expects a number 0, 1, or 2"))
1237 1237 ps = set()
1238 1238 cl = repo.changelog
1239 1239 for r in getset(repo, baseset(cl), x):
1240 1240 if n == 0:
1241 1241 ps.add(r)
1242 1242 elif n == 1:
1243 1243 ps.add(cl.parentrevs(r)[0])
1244 1244 elif n == 2:
1245 1245 parents = cl.parentrevs(r)
1246 1246 if len(parents) > 1:
1247 1247 ps.add(parents[1])
1248 1248 return subset & ps
1249 1249
1250 1250 def present(repo, subset, x):
1251 1251 """``present(set)``
1252 1252 An empty set, if any revision in set isn't found; otherwise,
1253 1253 all revisions in set.
1254 1254
1255 1255 If any of specified revisions is not present in the local repository,
1256 1256 the query is normally aborted. But this predicate allows the query
1257 1257 to continue even in such cases.
1258 1258 """
1259 1259 try:
1260 1260 return getset(repo, subset, x)
1261 1261 except error.RepoLookupError:
1262 1262 return baseset([])
1263 1263
1264 1264 def public(repo, subset, x):
1265 1265 """``public()``
1266 1266 Changeset in public phase."""
1267 1267 # i18n: "public" is a keyword
1268 1268 getargs(x, 0, 0, _("public takes no arguments"))
1269 1269 pc = repo._phasecache
1270 1270 return subset.filter(lambda r: pc.phase(repo, r) == phases.public)
1271 1271
1272 1272 def remote(repo, subset, x):
1273 1273 """``remote([id [,path]])``
1274 1274 Local revision that corresponds to the given identifier in a
1275 1275 remote repository, if present. Here, the '.' identifier is a
1276 1276 synonym for the current local branch.
1277 1277 """
1278 1278
1279 1279 import hg # avoid start-up nasties
1280 1280 # i18n: "remote" is a keyword
1281 1281 l = getargs(x, 0, 2, _("remote takes one, two or no arguments"))
1282 1282
1283 1283 q = '.'
1284 1284 if len(l) > 0:
1285 1285 # i18n: "remote" is a keyword
1286 1286 q = getstring(l[0], _("remote requires a string id"))
1287 1287 if q == '.':
1288 1288 q = repo['.'].branch()
1289 1289
1290 1290 dest = ''
1291 1291 if len(l) > 1:
1292 1292 # i18n: "remote" is a keyword
1293 1293 dest = getstring(l[1], _("remote requires a repository path"))
1294 1294 dest = repo.ui.expandpath(dest or 'default')
1295 1295 dest, branches = hg.parseurl(dest)
1296 1296 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1297 1297 if revs:
1298 1298 revs = [repo.lookup(rev) for rev in revs]
1299 1299 other = hg.peer(repo, {}, dest)
1300 1300 n = other.lookup(q)
1301 1301 if n in repo:
1302 1302 r = repo[n].rev()
1303 1303 if r in subset:
1304 1304 return baseset([r])
1305 1305 return baseset([])
1306 1306
1307 1307 def removes(repo, subset, x):
1308 1308 """``removes(pattern)``
1309 1309 Changesets which remove files matching pattern.
1310 1310
1311 1311 The pattern without explicit kind like ``glob:`` is expected to be
1312 1312 relative to the current directory and match against a file or a
1313 1313 directory.
1314 1314 """
1315 1315 # i18n: "removes" is a keyword
1316 1316 pat = getstring(x, _("removes requires a pattern"))
1317 1317 return checkstatus(repo, subset, pat, 2)
1318 1318
1319 1319 def rev(repo, subset, x):
1320 1320 """``rev(number)``
1321 1321 Revision with the given numeric identifier.
1322 1322 """
1323 1323 # i18n: "rev" is a keyword
1324 1324 l = getargs(x, 1, 1, _("rev requires one argument"))
1325 1325 try:
1326 1326 # i18n: "rev" is a keyword
1327 1327 l = int(getstring(l[0], _("rev requires a number")))
1328 1328 except (TypeError, ValueError):
1329 1329 # i18n: "rev" is a keyword
1330 1330 raise error.ParseError(_("rev expects a number"))
1331 1331 return subset.filter(lambda r: r == l)
1332 1332
1333 1333 def matching(repo, subset, x):
1334 1334 """``matching(revision [, field])``
1335 1335 Changesets in which a given set of fields match the set of fields in the
1336 1336 selected revision or set.
1337 1337
1338 1338 To match more than one field pass the list of fields to match separated
1339 1339 by spaces (e.g. ``author description``).
1340 1340
1341 1341 Valid fields are most regular revision fields and some special fields.
1342 1342
1343 1343 Regular revision fields are ``description``, ``author``, ``branch``,
1344 1344 ``date``, ``files``, ``phase``, ``parents``, ``substate``, ``user``
1345 1345 and ``diff``.
1346 1346 Note that ``author`` and ``user`` are synonyms. ``diff`` refers to the
1347 1347 contents of the revision. Two revisions matching their ``diff`` will
1348 1348 also match their ``files``.
1349 1349
1350 1350 Special fields are ``summary`` and ``metadata``:
1351 1351 ``summary`` matches the first line of the description.
1352 1352 ``metadata`` is equivalent to matching ``description user date``
1353 1353 (i.e. it matches the main metadata fields).
1354 1354
1355 1355 ``metadata`` is the default field which is used when no fields are
1356 1356 specified. You can match more than one field at a time.
1357 1357 """
1358 1358 # i18n: "matching" is a keyword
1359 1359 l = getargs(x, 1, 2, _("matching takes 1 or 2 arguments"))
1360 1360
1361 1361 revs = getset(repo, baseset(repo.changelog), l[0])
1362 1362
1363 1363 fieldlist = ['metadata']
1364 1364 if len(l) > 1:
1365 1365 fieldlist = getstring(l[1],
1366 1366 # i18n: "matching" is a keyword
1367 1367 _("matching requires a string "
1368 1368 "as its second argument")).split()
1369 1369
1370 1370 # Make sure that there are no repeated fields,
1371 1371 # expand the 'special' 'metadata' field type
1372 1372 # and check the 'files' whenever we check the 'diff'
1373 1373 fields = []
1374 1374 for field in fieldlist:
1375 1375 if field == 'metadata':
1376 1376 fields += ['user', 'description', 'date']
1377 1377 elif field == 'diff':
1378 1378 # a revision matching the diff must also match the files
1379 1379 # since matching the diff is very costly, make sure to
1380 1380 # also match the files first
1381 1381 fields += ['files', 'diff']
1382 1382 else:
1383 1383 if field == 'author':
1384 1384 field = 'user'
1385 1385 fields.append(field)
1386 1386 fields = set(fields)
1387 1387 if 'summary' in fields and 'description' in fields:
1388 1388 # If a revision matches its description it also matches its summary
1389 1389 fields.discard('summary')
1390 1390
1391 1391 # We may want to match more than one field
1392 1392 # Not all fields take the same amount of time to be matched
1393 1393 # Sort the selected fields in order of increasing matching cost
1394 1394 fieldorder = ['phase', 'parents', 'user', 'date', 'branch', 'summary',
1395 1395 'files', 'description', 'substate', 'diff']
1396 1396 def fieldkeyfunc(f):
1397 1397 try:
1398 1398 return fieldorder.index(f)
1399 1399 except ValueError:
1400 1400 # assume an unknown field is very costly
1401 1401 return len(fieldorder)
1402 1402 fields = list(fields)
1403 1403 fields.sort(key=fieldkeyfunc)
1404 1404
1405 1405 # Each field will be matched with its own "getfield" function
1406 1406 # which will be added to the getfieldfuncs array of functions
1407 1407 getfieldfuncs = []
1408 1408 _funcs = {
1409 1409 'user': lambda r: repo[r].user(),
1410 1410 'branch': lambda r: repo[r].branch(),
1411 1411 'date': lambda r: repo[r].date(),
1412 1412 'description': lambda r: repo[r].description(),
1413 1413 'files': lambda r: repo[r].files(),
1414 1414 'parents': lambda r: repo[r].parents(),
1415 1415 'phase': lambda r: repo[r].phase(),
1416 1416 'substate': lambda r: repo[r].substate,
1417 1417 'summary': lambda r: repo[r].description().splitlines()[0],
1418 1418 'diff': lambda r: list(repo[r].diff(git=True),)
1419 1419 }
1420 1420 for info in fields:
1421 1421 getfield = _funcs.get(info, None)
1422 1422 if getfield is None:
1423 1423 raise error.ParseError(
1424 1424 # i18n: "matching" is a keyword
1425 1425 _("unexpected field name passed to matching: %s") % info)
1426 1426 getfieldfuncs.append(getfield)
1427 1427 # convert the getfield array of functions into a "getinfo" function
1428 1428 # which returns an array of field values (or a single value if there
1429 1429 # is only one field to match)
1430 1430 getinfo = lambda r: [f(r) for f in getfieldfuncs]
1431 1431
1432 1432 def matches(x):
1433 1433 for rev in revs:
1434 1434 target = getinfo(rev)
1435 1435 match = True
1436 1436 for n, f in enumerate(getfieldfuncs):
1437 1437 if target[n] != f(x):
1438 1438 match = False
1439 1439 if match:
1440 1440 return True
1441 1441 return False
1442 1442
1443 1443 return subset.filter(matches)
1444 1444
1445 1445 def reverse(repo, subset, x):
1446 1446 """``reverse(set)``
1447 1447 Reverse order of set.
1448 1448 """
1449 1449 l = getset(repo, subset, x)
1450 1450 l.reverse()
1451 1451 return l
1452 1452
1453 1453 def roots(repo, subset, x):
1454 1454 """``roots(set)``
1455 1455 Changesets in set with no parent changeset in set.
1456 1456 """
1457 1457 s = getset(repo, baseset(repo.changelog), x).set()
1458 1458 subset = baseset([r for r in subset if r in s])
1459 1459 cs = _children(repo, subset, s)
1460 1460 return subset - cs
1461 1461
1462 1462 def secret(repo, subset, x):
1463 1463 """``secret()``
1464 1464 Changeset in secret phase."""
1465 1465 # i18n: "secret" is a keyword
1466 1466 getargs(x, 0, 0, _("secret takes no arguments"))
1467 1467 pc = repo._phasecache
1468 1468 return subset.filter(lambda x: pc.phase(repo, x) == phases.secret)
1469 1469
1470 1470 def sort(repo, subset, x):
1471 1471 """``sort(set[, [-]key...])``
1472 1472 Sort set by keys. The default sort order is ascending, specify a key
1473 1473 as ``-key`` to sort in descending order.
1474 1474
1475 1475 The keys can be:
1476 1476
1477 1477 - ``rev`` for the revision number,
1478 1478 - ``branch`` for the branch name,
1479 1479 - ``desc`` for the commit message (description),
1480 1480 - ``user`` for user name (``author`` can be used as an alias),
1481 1481 - ``date`` for the commit date
1482 1482 """
1483 1483 # i18n: "sort" is a keyword
1484 1484 l = getargs(x, 1, 2, _("sort requires one or two arguments"))
1485 1485 keys = "rev"
1486 1486 if len(l) == 2:
1487 1487 # i18n: "sort" is a keyword
1488 1488 keys = getstring(l[1], _("sort spec must be a string"))
1489 1489
1490 1490 s = l[0]
1491 1491 keys = keys.split()
1492 1492 l = []
1493 1493 def invert(s):
1494 1494 return "".join(chr(255 - ord(c)) for c in s)
1495 1495 revs = getset(repo, subset, s)
1496 1496 if keys == ["rev"]:
1497 1497 revs.sort()
1498 1498 return revs
1499 1499 elif keys == ["-rev"]:
1500 1500 revs.sort(reverse=True)
1501 1501 return revs
1502 1502 for r in revs:
1503 1503 c = repo[r]
1504 1504 e = []
1505 1505 for k in keys:
1506 1506 if k == 'rev':
1507 1507 e.append(r)
1508 1508 elif k == '-rev':
1509 1509 e.append(-r)
1510 1510 elif k == 'branch':
1511 1511 e.append(c.branch())
1512 1512 elif k == '-branch':
1513 1513 e.append(invert(c.branch()))
1514 1514 elif k == 'desc':
1515 1515 e.append(c.description())
1516 1516 elif k == '-desc':
1517 1517 e.append(invert(c.description()))
1518 1518 elif k in 'user author':
1519 1519 e.append(c.user())
1520 1520 elif k in '-user -author':
1521 1521 e.append(invert(c.user()))
1522 1522 elif k == 'date':
1523 1523 e.append(c.date()[0])
1524 1524 elif k == '-date':
1525 1525 e.append(-c.date()[0])
1526 1526 else:
1527 1527 raise error.ParseError(_("unknown sort key %r") % k)
1528 1528 e.append(r)
1529 1529 l.append(e)
1530 1530 l.sort()
1531 1531 return baseset([e[-1] for e in l])
1532 1532
1533 1533 def _stringmatcher(pattern):
1534 1534 """
1535 1535 accepts a string, possibly starting with 're:' or 'literal:' prefix.
1536 1536 returns the matcher name, pattern, and matcher function.
1537 1537 missing or unknown prefixes are treated as literal matches.
1538 1538
1539 1539 helper for tests:
1540 1540 >>> def test(pattern, *tests):
1541 1541 ... kind, pattern, matcher = _stringmatcher(pattern)
1542 1542 ... return (kind, pattern, [bool(matcher(t)) for t in tests])
1543 1543
1544 1544 exact matching (no prefix):
1545 1545 >>> test('abcdefg', 'abc', 'def', 'abcdefg')
1546 1546 ('literal', 'abcdefg', [False, False, True])
1547 1547
1548 1548 regex matching ('re:' prefix)
1549 1549 >>> test('re:a.+b', 'nomatch', 'fooadef', 'fooadefbar')
1550 1550 ('re', 'a.+b', [False, False, True])
1551 1551
1552 1552 force exact matches ('literal:' prefix)
1553 1553 >>> test('literal:re:foobar', 'foobar', 're:foobar')
1554 1554 ('literal', 're:foobar', [False, True])
1555 1555
1556 1556 unknown prefixes are ignored and treated as literals
1557 1557 >>> test('foo:bar', 'foo', 'bar', 'foo:bar')
1558 1558 ('literal', 'foo:bar', [False, False, True])
1559 1559 """
1560 1560 if pattern.startswith('re:'):
1561 1561 pattern = pattern[3:]
1562 1562 try:
1563 1563 regex = re.compile(pattern)
1564 1564 except re.error, e:
1565 1565 raise error.ParseError(_('invalid regular expression: %s')
1566 1566 % e)
1567 1567 return 're', pattern, regex.search
1568 1568 elif pattern.startswith('literal:'):
1569 1569 pattern = pattern[8:]
1570 1570 return 'literal', pattern, pattern.__eq__
1571 1571
1572 1572 def _substringmatcher(pattern):
1573 1573 kind, pattern, matcher = _stringmatcher(pattern)
1574 1574 if kind == 'literal':
1575 1575 matcher = lambda s: pattern in s
1576 1576 return kind, pattern, matcher
1577 1577
1578 1578 def tag(repo, subset, x):
1579 1579 """``tag([name])``
1580 1580 The specified tag by name, or all tagged revisions if no name is given.
1581 1581 """
1582 1582 # i18n: "tag" is a keyword
1583 1583 args = getargs(x, 0, 1, _("tag takes one or no arguments"))
1584 1584 cl = repo.changelog
1585 1585 if args:
1586 1586 pattern = getstring(args[0],
1587 1587 # i18n: "tag" is a keyword
1588 1588 _('the argument to tag must be a string'))
1589 1589 kind, pattern, matcher = _stringmatcher(pattern)
1590 1590 if kind == 'literal':
1591 1591 # avoid resolving all tags
1592 1592 tn = repo._tagscache.tags.get(pattern, None)
1593 1593 if tn is None:
1594 1594 raise util.Abort(_("tag '%s' does not exist") % pattern)
1595 1595 s = set([repo[tn].rev()])
1596 1596 else:
1597 1597 s = set([cl.rev(n) for t, n in repo.tagslist() if matcher(t)])
1598 1598 else:
1599 1599 s = set([cl.rev(n) for t, n in repo.tagslist() if t != 'tip'])
1600 1600 return subset & s
1601 1601
1602 1602 def tagged(repo, subset, x):
1603 1603 return tag(repo, subset, x)
1604 1604
1605 1605 def unstable(repo, subset, x):
1606 1606 """``unstable()``
1607 1607 Non-obsolete changesets with obsolete ancestors.
1608 1608 """
1609 1609 # i18n: "unstable" is a keyword
1610 1610 getargs(x, 0, 0, _("unstable takes no arguments"))
1611 1611 unstables = obsmod.getrevs(repo, 'unstable')
1612 1612 return subset & unstables
1613 1613
1614 1614
1615 1615 def user(repo, subset, x):
1616 1616 """``user(string)``
1617 1617 User name contains string. The match is case-insensitive.
1618 1618
1619 1619 If `string` starts with `re:`, the remainder of the string is treated as
1620 1620 a regular expression. To match a user that actually contains `re:`, use
1621 1621 the prefix `literal:`.
1622 1622 """
1623 1623 return author(repo, subset, x)
1624 1624
1625 1625 # for internal use
1626 1626 def _list(repo, subset, x):
1627 1627 s = getstring(x, "internal error")
1628 1628 if not s:
1629 1629 return baseset([])
1630 1630 ls = [repo[r].rev() for r in s.split('\0')]
1631 1631 s = subset.set()
1632 1632 return baseset([r for r in ls if r in s])
1633 1633
1634 1634 # for internal use
1635 1635 def _intlist(repo, subset, x):
1636 1636 s = getstring(x, "internal error")
1637 1637 if not s:
1638 1638 return baseset([])
1639 1639 ls = [int(r) for r in s.split('\0')]
1640 1640 s = subset.set()
1641 1641 return baseset([r for r in ls if r in s])
1642 1642
1643 1643 # for internal use
1644 1644 def _hexlist(repo, subset, x):
1645 1645 s = getstring(x, "internal error")
1646 1646 if not s:
1647 1647 return baseset([])
1648 1648 cl = repo.changelog
1649 1649 ls = [cl.rev(node.bin(r)) for r in s.split('\0')]
1650 1650 s = subset.set()
1651 1651 return baseset([r for r in ls if r in s])
1652 1652
1653 1653 symbols = {
1654 1654 "adds": adds,
1655 1655 "all": getall,
1656 1656 "ancestor": ancestor,
1657 1657 "ancestors": ancestors,
1658 1658 "_firstancestors": _firstancestors,
1659 1659 "author": author,
1660 1660 "only": only,
1661 1661 "bisect": bisect,
1662 1662 "bisected": bisected,
1663 1663 "bookmark": bookmark,
1664 1664 "branch": branch,
1665 1665 "branchpoint": branchpoint,
1666 1666 "bumped": bumped,
1667 1667 "bundle": bundle,
1668 1668 "children": children,
1669 1669 "closed": closed,
1670 1670 "contains": contains,
1671 1671 "converted": converted,
1672 1672 "date": date,
1673 1673 "desc": desc,
1674 1674 "descendants": descendants,
1675 1675 "_firstdescendants": _firstdescendants,
1676 1676 "destination": destination,
1677 1677 "divergent": divergent,
1678 1678 "draft": draft,
1679 1679 "extinct": extinct,
1680 1680 "extra": extra,
1681 1681 "file": hasfile,
1682 1682 "filelog": filelog,
1683 1683 "first": first,
1684 1684 "follow": follow,
1685 1685 "_followfirst": _followfirst,
1686 1686 "grep": grep,
1687 1687 "head": head,
1688 1688 "heads": heads,
1689 1689 "hidden": hidden,
1690 1690 "id": node_,
1691 1691 "keyword": keyword,
1692 1692 "last": last,
1693 1693 "limit": limit,
1694 1694 "_matchfiles": _matchfiles,
1695 1695 "max": maxrev,
1696 1696 "merge": merge,
1697 1697 "min": minrev,
1698 1698 "_missingancestors": _missingancestors,
1699 1699 "modifies": modifies,
1700 1700 "obsolete": obsolete,
1701 1701 "origin": origin,
1702 1702 "outgoing": outgoing,
1703 1703 "p1": p1,
1704 1704 "p2": p2,
1705 1705 "parents": parents,
1706 1706 "present": present,
1707 1707 "public": public,
1708 1708 "remote": remote,
1709 1709 "removes": removes,
1710 1710 "rev": rev,
1711 1711 "reverse": reverse,
1712 1712 "roots": roots,
1713 1713 "sort": sort,
1714 1714 "secret": secret,
1715 1715 "matching": matching,
1716 1716 "tag": tag,
1717 1717 "tagged": tagged,
1718 1718 "user": user,
1719 1719 "unstable": unstable,
1720 1720 "_list": _list,
1721 1721 "_intlist": _intlist,
1722 1722 "_hexlist": _hexlist,
1723 1723 }
1724 1724
1725 1725 # symbols which can't be used for a DoS attack for any given input
1726 1726 # (e.g. those which accept regexes as plain strings shouldn't be included)
1727 1727 # functions that just return a lot of changesets (like all) don't count here
1728 1728 safesymbols = set([
1729 1729 "adds",
1730 1730 "all",
1731 1731 "ancestor",
1732 1732 "ancestors",
1733 1733 "_firstancestors",
1734 1734 "author",
1735 1735 "bisect",
1736 1736 "bisected",
1737 1737 "bookmark",
1738 1738 "branch",
1739 1739 "branchpoint",
1740 1740 "bumped",
1741 1741 "bundle",
1742 1742 "children",
1743 1743 "closed",
1744 1744 "converted",
1745 1745 "date",
1746 1746 "desc",
1747 1747 "descendants",
1748 1748 "_firstdescendants",
1749 1749 "destination",
1750 1750 "divergent",
1751 1751 "draft",
1752 1752 "extinct",
1753 1753 "extra",
1754 1754 "file",
1755 1755 "filelog",
1756 1756 "first",
1757 1757 "follow",
1758 1758 "_followfirst",
1759 1759 "head",
1760 1760 "heads",
1761 1761 "hidden",
1762 1762 "id",
1763 1763 "keyword",
1764 1764 "last",
1765 1765 "limit",
1766 1766 "_matchfiles",
1767 1767 "max",
1768 1768 "merge",
1769 1769 "min",
1770 1770 "_missingancestors",
1771 1771 "modifies",
1772 1772 "obsolete",
1773 1773 "origin",
1774 1774 "outgoing",
1775 1775 "p1",
1776 1776 "p2",
1777 1777 "parents",
1778 1778 "present",
1779 1779 "public",
1780 1780 "remote",
1781 1781 "removes",
1782 1782 "rev",
1783 1783 "reverse",
1784 1784 "roots",
1785 1785 "sort",
1786 1786 "secret",
1787 1787 "matching",
1788 1788 "tag",
1789 1789 "tagged",
1790 1790 "user",
1791 1791 "unstable",
1792 1792 "_list",
1793 1793 "_intlist",
1794 1794 "_hexlist",
1795 1795 ])
1796 1796
1797 1797 methods = {
1798 1798 "range": rangeset,
1799 1799 "dagrange": dagrange,
1800 1800 "string": stringset,
1801 1801 "symbol": symbolset,
1802 1802 "and": andset,
1803 1803 "or": orset,
1804 1804 "not": notset,
1805 1805 "list": listset,
1806 1806 "func": func,
1807 1807 "ancestor": ancestorspec,
1808 1808 "parent": parentspec,
1809 1809 "parentpost": p1,
1810 1810 }
1811 1811
1812 1812 def optimize(x, small):
1813 1813 if x is None:
1814 1814 return 0, x
1815 1815
1816 1816 smallbonus = 1
1817 1817 if small:
1818 1818 smallbonus = .5
1819 1819
1820 1820 op = x[0]
1821 1821 if op == 'minus':
1822 1822 return optimize(('and', x[1], ('not', x[2])), small)
1823 1823 elif op == 'dagrangepre':
1824 1824 return optimize(('func', ('symbol', 'ancestors'), x[1]), small)
1825 1825 elif op == 'dagrangepost':
1826 1826 return optimize(('func', ('symbol', 'descendants'), x[1]), small)
1827 1827 elif op == 'rangepre':
1828 1828 return optimize(('range', ('string', '0'), x[1]), small)
1829 1829 elif op == 'rangepost':
1830 1830 return optimize(('range', x[1], ('string', 'tip')), small)
1831 1831 elif op == 'negate':
1832 1832 return optimize(('string',
1833 1833 '-' + getstring(x[1], _("can't negate that"))), small)
1834 1834 elif op in 'string symbol negate':
1835 1835 return smallbonus, x # single revisions are small
1836 1836 elif op == 'and':
1837 1837 wa, ta = optimize(x[1], True)
1838 1838 wb, tb = optimize(x[2], True)
1839 1839
1840 1840 # (::x and not ::y)/(not ::y and ::x) have a fast path
1841 1841 def ismissingancestors(revs, bases):
1842 1842 return (
1843 1843 revs[0] == 'func'
1844 1844 and getstring(revs[1], _('not a symbol')) == 'ancestors'
1845 1845 and bases[0] == 'not'
1846 1846 and bases[1][0] == 'func'
1847 1847 and getstring(bases[1][1], _('not a symbol')) == 'ancestors')
1848 1848
1849 1849 w = min(wa, wb)
1850 1850 if ismissingancestors(ta, tb):
1851 1851 return w, ('func', ('symbol', '_missingancestors'),
1852 1852 ('list', ta[2], tb[1][2]))
1853 1853 if ismissingancestors(tb, ta):
1854 1854 return w, ('func', ('symbol', '_missingancestors'),
1855 1855 ('list', tb[2], ta[1][2]))
1856 1856
1857 1857 if wa > wb:
1858 1858 return w, (op, tb, ta)
1859 1859 return w, (op, ta, tb)
1860 1860 elif op == 'or':
1861 1861 wa, ta = optimize(x[1], False)
1862 1862 wb, tb = optimize(x[2], False)
1863 1863 if wb < wa:
1864 1864 wb, wa = wa, wb
1865 1865 return max(wa, wb), (op, ta, tb)
1866 1866 elif op == 'not':
1867 1867 o = optimize(x[1], not small)
1868 1868 return o[0], (op, o[1])
1869 1869 elif op == 'parentpost':
1870 1870 o = optimize(x[1], small)
1871 1871 return o[0], (op, o[1])
1872 1872 elif op == 'group':
1873 1873 return optimize(x[1], small)
1874 1874 elif op in 'dagrange range list parent ancestorspec':
1875 1875 if op == 'parent':
1876 1876 # x^:y means (x^) : y, not x ^ (:y)
1877 1877 post = ('parentpost', x[1])
1878 1878 if x[2][0] == 'dagrangepre':
1879 1879 return optimize(('dagrange', post, x[2][1]), small)
1880 1880 elif x[2][0] == 'rangepre':
1881 1881 return optimize(('range', post, x[2][1]), small)
1882 1882
1883 1883 wa, ta = optimize(x[1], small)
1884 1884 wb, tb = optimize(x[2], small)
1885 1885 return wa + wb, (op, ta, tb)
1886 1886 elif op == 'func':
1887 1887 f = getstring(x[1], _("not a symbol"))
1888 1888 wa, ta = optimize(x[2], small)
1889 1889 if f in ("author branch closed date desc file grep keyword "
1890 1890 "outgoing user"):
1891 1891 w = 10 # slow
1892 1892 elif f in "modifies adds removes":
1893 1893 w = 30 # slower
1894 1894 elif f == "contains":
1895 1895 w = 100 # very slow
1896 1896 elif f == "ancestor":
1897 1897 w = 1 * smallbonus
1898 1898 elif f in "reverse limit first":
1899 1899 w = 0
1900 1900 elif f in "sort":
1901 1901 w = 10 # assume most sorts look at changelog
1902 1902 else:
1903 1903 w = 1
1904 1904 return w + wa, (op, x[1], ta)
1905 1905 return 1, x
1906 1906
1907 1907 _aliasarg = ('func', ('symbol', '_aliasarg'))
1908 1908 def _getaliasarg(tree):
1909 1909 """If tree matches ('func', ('symbol', '_aliasarg'), ('string', X))
1910 1910 return X, None otherwise.
1911 1911 """
1912 1912 if (len(tree) == 3 and tree[:2] == _aliasarg
1913 1913 and tree[2][0] == 'string'):
1914 1914 return tree[2][1]
1915 1915 return None
1916 1916
1917 1917 def _checkaliasarg(tree, known=None):
1918 1918 """Check tree contains no _aliasarg construct or only ones which
1919 1919 value is in known. Used to avoid alias placeholders injection.
1920 1920 """
1921 1921 if isinstance(tree, tuple):
1922 1922 arg = _getaliasarg(tree)
1923 1923 if arg is not None and (not known or arg not in known):
1924 1924 raise error.ParseError(_("not a function: %s") % '_aliasarg')
1925 1925 for t in tree:
1926 1926 _checkaliasarg(t, known)
1927 1927
1928 1928 class revsetalias(object):
1929 1929 funcre = re.compile('^([^(]+)\(([^)]+)\)$')
1930 1930 args = None
1931 1931
1932 1932 def __init__(self, name, value):
1933 1933 '''Aliases like:
1934 1934
1935 1935 h = heads(default)
1936 1936 b($1) = ancestors($1) - ancestors(default)
1937 1937 '''
1938 1938 m = self.funcre.search(name)
1939 1939 if m:
1940 1940 self.name = m.group(1)
1941 1941 self.tree = ('func', ('symbol', m.group(1)))
1942 1942 self.args = [x.strip() for x in m.group(2).split(',')]
1943 1943 for arg in self.args:
1944 1944 # _aliasarg() is an unknown symbol only used separate
1945 1945 # alias argument placeholders from regular strings.
1946 1946 value = value.replace(arg, '_aliasarg(%r)' % (arg,))
1947 1947 else:
1948 1948 self.name = name
1949 1949 self.tree = ('symbol', name)
1950 1950
1951 1951 self.replacement, pos = parse(value)
1952 1952 if pos != len(value):
1953 1953 raise error.ParseError(_('invalid token'), pos)
1954 1954 # Check for placeholder injection
1955 1955 _checkaliasarg(self.replacement, self.args)
1956 1956
1957 1957 def _getalias(aliases, tree):
1958 1958 """If tree looks like an unexpanded alias, return it. Return None
1959 1959 otherwise.
1960 1960 """
1961 1961 if isinstance(tree, tuple) and tree:
1962 1962 if tree[0] == 'symbol' and len(tree) == 2:
1963 1963 name = tree[1]
1964 1964 alias = aliases.get(name)
1965 1965 if alias and alias.args is None and alias.tree == tree:
1966 1966 return alias
1967 1967 if tree[0] == 'func' and len(tree) > 1:
1968 1968 if tree[1][0] == 'symbol' and len(tree[1]) == 2:
1969 1969 name = tree[1][1]
1970 1970 alias = aliases.get(name)
1971 1971 if alias and alias.args is not None and alias.tree == tree[:2]:
1972 1972 return alias
1973 1973 return None
1974 1974
1975 1975 def _expandargs(tree, args):
1976 1976 """Replace _aliasarg instances with the substitution value of the
1977 1977 same name in args, recursively.
1978 1978 """
1979 1979 if not tree or not isinstance(tree, tuple):
1980 1980 return tree
1981 1981 arg = _getaliasarg(tree)
1982 1982 if arg is not None:
1983 1983 return args[arg]
1984 1984 return tuple(_expandargs(t, args) for t in tree)
1985 1985
1986 1986 def _expandaliases(aliases, tree, expanding, cache):
1987 1987 """Expand aliases in tree, recursively.
1988 1988
1989 1989 'aliases' is a dictionary mapping user defined aliases to
1990 1990 revsetalias objects.
1991 1991 """
1992 1992 if not isinstance(tree, tuple):
1993 1993 # Do not expand raw strings
1994 1994 return tree
1995 1995 alias = _getalias(aliases, tree)
1996 1996 if alias is not None:
1997 1997 if alias in expanding:
1998 1998 raise error.ParseError(_('infinite expansion of revset alias "%s" '
1999 1999 'detected') % alias.name)
2000 2000 expanding.append(alias)
2001 2001 if alias.name not in cache:
2002 2002 cache[alias.name] = _expandaliases(aliases, alias.replacement,
2003 2003 expanding, cache)
2004 2004 result = cache[alias.name]
2005 2005 expanding.pop()
2006 2006 if alias.args is not None:
2007 2007 l = getlist(tree[2])
2008 2008 if len(l) != len(alias.args):
2009 2009 raise error.ParseError(
2010 2010 _('invalid number of arguments: %s') % len(l))
2011 2011 l = [_expandaliases(aliases, a, [], cache) for a in l]
2012 2012 result = _expandargs(result, dict(zip(alias.args, l)))
2013 2013 else:
2014 2014 result = tuple(_expandaliases(aliases, t, expanding, cache)
2015 2015 for t in tree)
2016 2016 return result
2017 2017
2018 2018 def findaliases(ui, tree):
2019 2019 _checkaliasarg(tree)
2020 2020 aliases = {}
2021 2021 for k, v in ui.configitems('revsetalias'):
2022 2022 alias = revsetalias(k, v)
2023 2023 aliases[alias.name] = alias
2024 2024 return _expandaliases(aliases, tree, [], {})
2025 2025
2026 2026 def parse(spec):
2027 2027 p = parser.parser(tokenize, elements)
2028 2028 return p.parse(spec)
2029 2029
2030 2030 def match(ui, spec):
2031 2031 if not spec:
2032 2032 raise error.ParseError(_("empty query"))
2033 2033 tree, pos = parse(spec)
2034 2034 if (pos != len(spec)):
2035 2035 raise error.ParseError(_("invalid token"), pos)
2036 2036 if ui:
2037 2037 tree = findaliases(ui, tree)
2038 2038 weight, tree = optimize(tree, True)
2039 2039 def mfunc(repo, subset):
2040 2040 if util.safehasattr(subset, 'set'):
2041 2041 return getset(repo, subset, tree)
2042 2042 return getset(repo, baseset(subset), tree)
2043 2043 return mfunc
2044 2044
2045 2045 def formatspec(expr, *args):
2046 2046 '''
2047 2047 This is a convenience function for using revsets internally, and
2048 2048 escapes arguments appropriately. Aliases are intentionally ignored
2049 2049 so that intended expression behavior isn't accidentally subverted.
2050 2050
2051 2051 Supported arguments:
2052 2052
2053 2053 %r = revset expression, parenthesized
2054 2054 %d = int(arg), no quoting
2055 2055 %s = string(arg), escaped and single-quoted
2056 2056 %b = arg.branch(), escaped and single-quoted
2057 2057 %n = hex(arg), single-quoted
2058 2058 %% = a literal '%'
2059 2059
2060 2060 Prefixing the type with 'l' specifies a parenthesized list of that type.
2061 2061
2062 2062 >>> formatspec('%r:: and %lr', '10 or 11', ("this()", "that()"))
2063 2063 '(10 or 11):: and ((this()) or (that()))'
2064 2064 >>> formatspec('%d:: and not %d::', 10, 20)
2065 2065 '10:: and not 20::'
2066 2066 >>> formatspec('%ld or %ld', [], [1])
2067 2067 "_list('') or 1"
2068 2068 >>> formatspec('keyword(%s)', 'foo\\xe9')
2069 2069 "keyword('foo\\\\xe9')"
2070 2070 >>> b = lambda: 'default'
2071 2071 >>> b.branch = b
2072 2072 >>> formatspec('branch(%b)', b)
2073 2073 "branch('default')"
2074 2074 >>> formatspec('root(%ls)', ['a', 'b', 'c', 'd'])
2075 2075 "root(_list('a\\x00b\\x00c\\x00d'))"
2076 2076 '''
2077 2077
2078 2078 def quote(s):
2079 2079 return repr(str(s))
2080 2080
2081 2081 def argtype(c, arg):
2082 2082 if c == 'd':
2083 2083 return str(int(arg))
2084 2084 elif c == 's':
2085 2085 return quote(arg)
2086 2086 elif c == 'r':
2087 2087 parse(arg) # make sure syntax errors are confined
2088 2088 return '(%s)' % arg
2089 2089 elif c == 'n':
2090 2090 return quote(node.hex(arg))
2091 2091 elif c == 'b':
2092 2092 return quote(arg.branch())
2093 2093
2094 2094 def listexp(s, t):
2095 2095 l = len(s)
2096 2096 if l == 0:
2097 2097 return "_list('')"
2098 2098 elif l == 1:
2099 2099 return argtype(t, s[0])
2100 2100 elif t == 'd':
2101 2101 return "_intlist('%s')" % "\0".join(str(int(a)) for a in s)
2102 2102 elif t == 's':
2103 2103 return "_list('%s')" % "\0".join(s)
2104 2104 elif t == 'n':
2105 2105 return "_hexlist('%s')" % "\0".join(node.hex(a) for a in s)
2106 2106 elif t == 'b':
2107 2107 return "_list('%s')" % "\0".join(a.branch() for a in s)
2108 2108
2109 2109 m = l // 2
2110 2110 return '(%s or %s)' % (listexp(s[:m], t), listexp(s[m:], t))
2111 2111
2112 2112 ret = ''
2113 2113 pos = 0
2114 2114 arg = 0
2115 2115 while pos < len(expr):
2116 2116 c = expr[pos]
2117 2117 if c == '%':
2118 2118 pos += 1
2119 2119 d = expr[pos]
2120 2120 if d == '%':
2121 2121 ret += d
2122 2122 elif d in 'dsnbr':
2123 2123 ret += argtype(d, args[arg])
2124 2124 arg += 1
2125 2125 elif d == 'l':
2126 2126 # a list of some type
2127 2127 pos += 1
2128 2128 d = expr[pos]
2129 2129 ret += listexp(list(args[arg]), d)
2130 2130 arg += 1
2131 2131 else:
2132 2132 raise util.Abort('unexpected revspec format character %s' % d)
2133 2133 else:
2134 2134 ret += c
2135 2135 pos += 1
2136 2136
2137 2137 return ret
2138 2138
2139 2139 def prettyformat(tree):
2140 2140 def _prettyformat(tree, level, lines):
2141 2141 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2142 2142 lines.append((level, str(tree)))
2143 2143 else:
2144 2144 lines.append((level, '(%s' % tree[0]))
2145 2145 for s in tree[1:]:
2146 2146 _prettyformat(s, level + 1, lines)
2147 2147 lines[-1:] = [(lines[-1][0], lines[-1][1] + ')')]
2148 2148
2149 2149 lines = []
2150 2150 _prettyformat(tree, 0, lines)
2151 2151 output = '\n'.join((' '*l + s) for l, s in lines)
2152 2152 return output
2153 2153
2154 2154 def depth(tree):
2155 2155 if isinstance(tree, tuple):
2156 2156 return max(map(depth, tree)) + 1
2157 2157 else:
2158 2158 return 0
2159 2159
2160 2160 def funcsused(tree):
2161 2161 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2162 2162 return set()
2163 2163 else:
2164 2164 funcs = set()
2165 2165 for s in tree[1:]:
2166 2166 funcs |= funcsused(s)
2167 2167 if tree[0] == 'func':
2168 2168 funcs.add(tree[1][1])
2169 2169 return funcs
2170 2170
2171 2171 class baseset(list):
2172 2172 """Basic data structure that represents a revset and contains the basic
2173 2173 operation that it should be able to perform.
2174 2174
2175 2175 Every method in this class should be implemented by any smartset class.
2176 2176 """
2177 2177 def __init__(self, data):
2178 2178 super(baseset, self).__init__(data)
2179 2179 self._set = None
2180 2180
2181 2181 def ascending(self):
2182 2182 """Sorts the set in ascending order (in place).
2183 2183
2184 2184 This is part of the mandatory API for smartset."""
2185 2185 self.sort()
2186 2186
2187 2187 def descending(self):
2188 2188 """Sorts the set in descending order (in place).
2189 2189
2190 2190 This is part of the mandatory API for smartset."""
2191 2191 self.sort(reverse=True)
2192 2192
2193 2193 def set(self):
2194 2194 """Returns a set or a smartset containing all the elements.
2195 2195
2196 2196 The returned structure should be the fastest option for membership
2197 2197 testing.
2198 2198
2199 2199 This is part of the mandatory API for smartset."""
2200 2200 if not self._set:
2201 2201 self._set = set(self)
2202 2202 return self._set
2203 2203
2204 2204 def __sub__(self, other):
2205 2205 """Returns a new object with the substraction of the two collections.
2206 2206
2207 2207 This is part of the mandatory API for smartset."""
2208 2208 if isinstance(other, baseset):
2209 2209 s = other.set()
2210 2210 else:
2211 2211 s = set(other)
2212 2212 return baseset(self.set() - s)
2213 2213
2214 2214 def __and__(self, other):
2215 2215 """Returns a new object with the intersection of the two collections.
2216 2216
2217 2217 This is part of the mandatory API for smartset."""
2218 2218 if isinstance(other, baseset):
2219 2219 other = other.set()
2220 2220 return baseset([y for y in self if y in other])
2221 2221
2222 2222 def __add__(self, other):
2223 2223 """Returns a new object with the union of the two collections.
2224 2224
2225 2225 This is part of the mandatory API for smartset."""
2226 2226 s = self.set()
2227 2227 l = [r for r in other if r not in s]
2228 2228 return baseset(list(self) + l)
2229 2229
2230 2230 def isascending(self):
2231 2231 """Returns True if the collection is ascending order, False if not.
2232 2232
2233 2233 This is part of the mandatory API for smartset."""
2234 2234 return False
2235 2235
2236 2236 def isdescending(self):
2237 2237 """Returns True if the collection is descending order, False if not.
2238 2238
2239 2239 This is part of the mandatory API for smartset."""
2240 2240 return False
2241 2241
2242 2242 def filter(self, condition):
2243 2243 """Returns this smartset filtered by condition as a new smartset.
2244 2244
2245 2245 `condition` is a callable which takes a revision number and returns a
2246 2246 boolean.
2247 2247
2248 2248 This is part of the mandatory API for smartset."""
2249 2249 return lazyset(self, condition)
2250 2250
2251 2251 class lazyset(object):
2252 2252 """Duck type for baseset class which iterates lazily over the revisions in
2253 2253 the subset and contains a function which tests for membership in the
2254 2254 revset
2255 2255 """
2256 2256 def __init__(self, subset, condition=lambda x: True):
2257 2257 self._subset = subset
2258 2258 self._condition = condition
2259 2259 self._cache = {}
2260 2260
2261 2261 def ascending(self):
2262 2262 self._subset.sort()
2263 2263
2264 2264 def descending(self):
2265 2265 self._subset.sort(reverse=True)
2266 2266
2267 2267 def __contains__(self, x):
2268 2268 c = self._cache
2269 2269 if x not in c:
2270 2270 c[x] = x in self._subset and self._condition(x)
2271 2271 return c[x]
2272 2272
2273 2273 def __iter__(self):
2274 2274 cond = self._condition
2275 2275 for x in self._subset:
2276 2276 if cond(x):
2277 2277 yield x
2278 2278
2279 2279 def __and__(self, x):
2280 2280 return lazyset(self, lambda r: r in x)
2281 2281
2282 2282 def __sub__(self, x):
2283 2283 return lazyset(self, lambda r: r not in x)
2284 2284
2285 2285 def __add__(self, x):
2286 2286 return lazyset(_addset(self, x))
2287 2287
2288 2288 def __nonzero__(self):
2289 2289 for r in self:
2290 2290 return True
2291 2291 return False
2292 2292
2293 2293 def __len__(self):
2294 2294 # Basic implementation to be changed in future patches.
2295 2295 l = baseset([r for r in self])
2296 2296 return len(l)
2297 2297
2298 2298 def __getitem__(self, x):
2299 2299 # Basic implementation to be changed in future patches.
2300 2300 l = baseset([r for r in self])
2301 2301 return l[x]
2302 2302
2303 2303 def sort(self, reverse=False):
2304 2304 if not util.safehasattr(self._subset, 'sort'):
2305 2305 self._subset = baseset(self._subset)
2306 2306 self._subset.sort(reverse=reverse)
2307 2307
2308 2308 def reverse(self):
2309 2309 self._subset.reverse()
2310 2310
2311 2311 def set(self):
2312 2312 return set([r for r in self])
2313 2313
2314 2314 def isascending(self):
2315 2315 return False
2316 2316
2317 2317 def isdescending(self):
2318 2318 return False
2319 2319
2320 2320 def filter(self, l):
2321 2321 return lazyset(self, l)
2322 2322
2323 2323 class orderedlazyset(lazyset):
2324 2324 """Subclass of lazyset which subset can be ordered either ascending or
2325 2325 descendingly
2326 2326 """
2327 2327 def __init__(self, subset, condition, ascending=True):
2328 2328 super(orderedlazyset, self).__init__(subset, condition)
2329 2329 self._ascending = ascending
2330 2330
2331 2331 def filter(self, l):
2332 2332 return orderedlazyset(self, l, ascending=self._ascending)
2333 2333
2334 2334 def ascending(self):
2335 2335 if not self._ascending:
2336 2336 self.reverse()
2337 2337
2338 2338 def descending(self):
2339 2339 if self._ascending:
2340 2340 self.reverse()
2341 2341
2342 2342 def __and__(self, x):
2343 2343 return orderedlazyset(self, lambda r: r in x,
2344 2344 ascending=self._ascending)
2345 2345
2346 2346 def __sub__(self, x):
2347 2347 return orderedlazyset(self, lambda r: r not in x,
2348 2348 ascending=self._ascending)
2349 2349
2350 2350 def sort(self, reverse=False):
2351 2351 if reverse:
2352 2352 if self._ascending:
2353 2353 self._subset.sort(reverse=reverse)
2354 2354 else:
2355 2355 if not self._ascending:
2356 2356 self._subset.sort(reverse=reverse)
2357 2357 self._ascending = not reverse
2358 2358
2359 2359 def isascending(self):
2360 2360 return self._ascending
2361 2361
2362 2362 def isdescending(self):
2363 2363 return not self._ascending
2364 2364
2365 2365 def reverse(self):
2366 2366 self._subset.reverse()
2367 2367 self._ascending = not self._ascending
2368 2368
2369 2369 class _addset(object):
2370 2370 """Represent the addition of two sets
2371 2371
2372 2372 Wrapper structure for lazily adding two structures without losing much
2373 2373 performance on the __contains__ method
2374 2374
2375 2375 If the ascending attribute is set, that means the two structures are
2376 2376 ordered in either an ascending or descending way. Therefore, we can add
2377 2377 them mantaining the order by iterating over both at the same time
2378 2378
2379 2379 This class does not duck-type baseset and it's only supposed to be used
2380 2380 internally
2381 2381 """
2382 2382 def __init__(self, revs1, revs2, ascending=None):
2383 2383 self._r1 = revs1
2384 2384 self._r2 = revs2
2385 2385 self._iter = None
2386 2386 self._ascending = ascending
2387 2387 self._genlist = None
2388 2388
2389 2389 @util.propertycache
2390 2390 def _list(self):
2391 2391 if not self._genlist:
2392 2392 self._genlist = baseset(self._iterator())
2393 2393 return self._genlist
2394 2394
2395 2395 def filter(self, condition):
2396 2396 if self._ascending is not None:
2397 2397 return orderedlazyset(self, condition, ascending=self._ascending)
2398 2398 return lazyset(self, condition)
2399 2399
2400 2400 def ascending(self):
2401 2401 if self._ascending is None:
2402 2402 self.sort()
2403 2403 self._ascending = True
2404 2404 else:
2405 2405 if not self._ascending:
2406 2406 self.reverse()
2407 2407
2408 2408 def descending(self):
2409 2409 if self._ascending is None:
2410 2410 self.sort(reverse=True)
2411 2411 self._ascending = False
2412 2412 else:
2413 2413 if self._ascending:
2414 2414 self.reverse()
2415 2415
2416 2416 def __and__(self, other):
2417 2417 filterfunc = other.__contains__
2418 2418 if self._ascending is not None:
2419 2419 return orderedlazyset(self, filterfunc, ascending=self._ascending)
2420 2420 return lazyset(self, filterfunc)
2421 2421
2422 2422 def __sub__(self, other):
2423 2423 filterfunc = lambda r: r not in other
2424 2424 if self._ascending is not None:
2425 2425 return orderedlazyset(self, filterfunc, ascending=self._ascending)
2426 2426 return lazyset(self, filterfunc)
2427 2427
2428 2428 def __add__(self, other):
2429 2429 """When both collections are ascending or descending, preserve the order
2430 2430 """
2431 2431 kwargs = {}
2432 2432 if self._ascending is not None:
2433 2433 if self.isascending() and other.isascending():
2434 2434 kwargs['ascending'] = True
2435 2435 if self.isdescending() and other.isdescending():
2436 2436 kwargs['ascending'] = False
2437 2437 return _addset(self, other, **kwargs)
2438 2438
2439 2439 def _iterator(self):
2440 2440 """Iterate over both collections without repeating elements
2441 2441
2442 2442 If the ascending attribute is not set, iterate over the first one and
2443 2443 then over the second one checking for membership on the first one so we
2444 2444 dont yield any duplicates.
2445 2445
2446 2446 If the ascending attribute is set, iterate over both collections at the
2447 2447 same time, yielding only one value at a time in the given order.
2448 2448 """
2449 2449 if not self._iter:
2450 2450 def gen():
2451 2451 if self._ascending is None:
2452 2452 for r in self._r1:
2453 2453 yield r
2454 2454 s = self._r1.set()
2455 2455 for r in self._r2:
2456 2456 if r not in s:
2457 2457 yield r
2458 2458 else:
2459 2459 iter1 = iter(self._r1)
2460 2460 iter2 = iter(self._r2)
2461 2461
2462 2462 val1 = None
2463 2463 val2 = None
2464 2464
2465 2465 choice = max
2466 2466 if self._ascending:
2467 2467 choice = min
2468 2468 try:
2469 2469 # Consume both iterators in an ordered way until one is
2470 2470 # empty
2471 2471 while True:
2472 2472 if val1 is None:
2473 2473 val1 = iter1.next()
2474 2474 if val2 is None:
2475 2475 val2 = iter2.next()
2476 2476 next = choice(val1, val2)
2477 2477 yield next
2478 2478 if val1 == next:
2479 2479 val1 = None
2480 2480 if val2 == next:
2481 2481 val2 = None
2482 2482 except StopIteration:
2483 2483 # Flush any remaining values and consume the other one
2484 2484 it = iter2
2485 2485 if val1 is not None:
2486 2486 yield val1
2487 2487 it = iter1
2488 2488 elif val2 is not None:
2489 2489 # might have been equality and both are empty
2490 2490 yield val2
2491 2491 for val in it:
2492 2492 yield val
2493 2493
2494 2494 self._iter = _generatorset(gen())
2495 2495
2496 2496 return self._iter
2497 2497
2498 2498 def __iter__(self):
2499 2499 if self._genlist:
2500 2500 return iter(self._genlist)
2501 2501 return iter(self._iterator())
2502 2502
2503 2503 def __contains__(self, x):
2504 2504 return x in self._r1 or x in self._r2
2505 2505
2506 2506 def set(self):
2507 2507 return self
2508 2508
2509 2509 def sort(self, reverse=False):
2510 2510 """Sort the added set
2511 2511
2512 2512 For this we use the cached list with all the generated values and if we
2513 2513 know they are ascending or descending we can sort them in a smart way.
2514 2514 """
2515 2515 if self._ascending is None:
2516 2516 self._list.sort(reverse=reverse)
2517 2517 self._ascending = not reverse
2518 2518 else:
2519 2519 if bool(self._ascending) == bool(reverse):
2520 2520 self.reverse()
2521 2521
2522 def isascending(self):
2523 return self._ascending is not None and self._ascending
2524
2525 def isdescending(self):
2526 return self._ascending is not None and not self._ascending
2527
2522 2528 def reverse(self):
2523 2529 self._list.reverse()
2524 2530 if self._ascending is not None:
2525 2531 self._ascending = not self._ascending
2526 2532
2527 2533 class _generatorset(object):
2528 2534 """Wrap a generator for lazy iteration
2529 2535
2530 2536 Wrapper structure for generators that provides lazy membership and can
2531 2537 be iterated more than once.
2532 2538 When asked for membership it generates values until either it finds the
2533 2539 requested one or has gone through all the elements in the generator
2534 2540
2535 2541 This class does not duck-type baseset and it's only supposed to be used
2536 2542 internally
2537 2543 """
2538 2544 def __init__(self, gen):
2539 2545 self._gen = gen
2540 2546 self._iter = iter(gen)
2541 2547 self._cache = {}
2542 2548 self._genlist = baseset([])
2543 2549 self._iterated = False
2544 2550 self._finished = False
2545 2551
2546 2552 def __contains__(self, x):
2547 2553 if x in self._cache:
2548 2554 return self._cache[x]
2549 2555
2550 2556 for l in self:
2551 2557 if l == x:
2552 2558 return True
2553 2559
2554 2560 self._finished = True
2555 2561 self._cache[x] = False
2556 2562 return False
2557 2563
2558 2564 def __iter__(self):
2559 2565 if self._iterated:
2560 2566 for l in self._genlist:
2561 2567 yield l
2562 2568 else:
2563 2569 self._iterated = True
2564 2570
2565 2571 for item in self._gen:
2566 2572 self._cache[item] = True
2567 2573 self._genlist.append(item)
2568 2574 yield item
2569 2575
2570 2576 self._finished = True
2571 2577
2572 2578 def set(self):
2573 2579 return self
2574 2580
2575 2581 def sort(self, reverse=False):
2576 2582 # Basic implementation to be changed in future patches
2577 2583 if not self._finished:
2578 2584 for i in self:
2579 2585 continue
2580 2586 self._genlist.sort(reverse=reverse)
2581 2587
2582 2588 class _ascgeneratorset(_generatorset):
2583 2589 """Wrap a generator of ascending elements for lazy iteration
2584 2590
2585 2591 Same structure as _generatorset but stops iterating after it goes past
2586 2592 the value when asked for membership and the element is not contained
2587 2593
2588 2594 This class does not duck-type baseset and it's only supposed to be used
2589 2595 internally
2590 2596 """
2591 2597 def __contains__(self, x):
2592 2598 if x in self._cache:
2593 2599 return self._cache[x]
2594 2600
2595 2601 for l in self:
2596 2602 if l == x:
2597 2603 return True
2598 2604 if l > x:
2599 2605 break
2600 2606
2601 2607 self._cache[x] = False
2602 2608 return False
2603 2609
2604 2610 class _descgeneratorset(_generatorset):
2605 2611 """Wrap a generator of descending elements for lazy iteration
2606 2612
2607 2613 Same structure as _generatorset but stops iterating after it goes past
2608 2614 the value when asked for membership and the element is not contained
2609 2615
2610 2616 This class does not duck-type baseset and it's only supposed to be used
2611 2617 internally
2612 2618 """
2613 2619 def __contains__(self, x):
2614 2620 if x in self._cache:
2615 2621 return self._cache[x]
2616 2622
2617 2623 for l in self:
2618 2624 if l == x:
2619 2625 return True
2620 2626 if l < x:
2621 2627 break
2622 2628
2623 2629 self._cache[x] = False
2624 2630 return False
2625 2631
2626 2632 class spanset(object):
2627 2633 """Duck type for baseset class which represents a range of revisions and
2628 2634 can work lazily and without having all the range in memory
2629 2635 """
2630 2636 def __init__(self, repo, start=0, end=None):
2631 2637 self._start = start
2632 2638 if end is not None:
2633 2639 self._end = end
2634 2640 else:
2635 2641 self._end = len(repo)
2636 2642 self._hiddenrevs = repo.changelog.filteredrevs
2637 2643
2638 2644 def ascending(self):
2639 2645 if self._start > self._end:
2640 2646 self.reverse()
2641 2647
2642 2648 def descending(self):
2643 2649 if self._start < self._end:
2644 2650 self.reverse()
2645 2651
2646 2652 def _contained(self, rev):
2647 2653 return (rev <= self._start and rev > self._end) or (rev >= self._start
2648 2654 and rev < self._end)
2649 2655
2650 2656 def __iter__(self):
2651 2657 if self._start <= self._end:
2652 2658 iterrange = xrange(self._start, self._end)
2653 2659 else:
2654 2660 iterrange = xrange(self._start, self._end, -1)
2655 2661
2656 2662 if self._hiddenrevs:
2657 2663 s = self._hiddenrevs
2658 2664 for r in iterrange:
2659 2665 if r not in s:
2660 2666 yield r
2661 2667 else:
2662 2668 for r in iterrange:
2663 2669 yield r
2664 2670
2665 2671 def __contains__(self, x):
2666 2672 return self._contained(x) and not (self._hiddenrevs and rev in
2667 2673 self._hiddenrevs)
2668 2674
2669 2675 def __nonzero__(self):
2670 2676 for r in self:
2671 2677 return True
2672 2678 return False
2673 2679
2674 2680 def __and__(self, x):
2675 2681 if isinstance(x, baseset):
2676 2682 x = x.set()
2677 2683 if self._start <= self._end:
2678 2684 return orderedlazyset(self, lambda r: r in x)
2679 2685 else:
2680 2686 return orderedlazyset(self, lambda r: r in x, ascending=False)
2681 2687
2682 2688 def __sub__(self, x):
2683 2689 if isinstance(x, baseset):
2684 2690 x = x.set()
2685 2691 if self._start <= self._end:
2686 2692 return orderedlazyset(self, lambda r: r not in x)
2687 2693 else:
2688 2694 return orderedlazyset(self, lambda r: r not in x, ascending=False)
2689 2695
2690 2696 def __add__(self, x):
2691 2697 return lazyset(_addset(self, x))
2692 2698
2693 2699 def __len__(self):
2694 2700 if not self._hiddenrevs:
2695 2701 return abs(self._end - self._start)
2696 2702 else:
2697 2703 count = 0
2698 2704 for rev in self._hiddenrevs:
2699 2705 if self._contained(rev):
2700 2706 count += 1
2701 2707 return abs(self._end - self._start) - count
2702 2708
2703 2709 def __getitem__(self, x):
2704 2710 # Basic implementation to be changed in future patches.
2705 2711 l = baseset([r for r in self])
2706 2712 return l[x]
2707 2713
2708 2714 def sort(self, reverse=False):
2709 2715 if bool(reverse) != (self._start > self._end):
2710 2716 self.reverse()
2711 2717
2712 2718 def reverse(self):
2713 2719 if self._start <= self._end:
2714 2720 self._start, self._end = self._end - 1, self._start - 1
2715 2721 else:
2716 2722 self._start, self._end = self._end + 1, self._start + 1
2717 2723
2718 2724 def set(self):
2719 2725 return self
2720 2726
2721 2727 def isascending(self):
2722 2728 return self._start < self._end
2723 2729
2724 2730 def isdescending(self):
2725 2731 return self._start > self._end
2726 2732
2727 2733 def filter(self, l):
2728 2734 if self._start <= self._end:
2729 2735 return orderedlazyset(self, l)
2730 2736 else:
2731 2737 return orderedlazyset(self, l, ascending=False)
2732 2738
2733 2739 # tell hggettext to extract docstrings from these functions:
2734 2740 i18nfunctions = symbols.values()
General Comments 0
You need to be logged in to leave comments. Login now