##// END OF EJS Templates
revset: replace "working copy" with "working directory" in function help
Yuya Nishihara -
r24366:e8ea3113 default
parent child Browse files
Show More
@@ -1,3350 +1,3350
1 1 # revset.py - revision set queries for mercurial
2 2 #
3 3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 import re
9 9 import parser, util, error, discovery, hbisect, phases
10 10 import node
11 11 import heapq
12 12 import match as matchmod
13 13 from i18n import _
14 14 import encoding
15 15 import obsolete as obsmod
16 16 import pathutil
17 17 import repoview
18 18
19 19 def _revancestors(repo, revs, followfirst):
20 20 """Like revlog.ancestors(), but supports followfirst."""
21 21 if followfirst:
22 22 cut = 1
23 23 else:
24 24 cut = None
25 25 cl = repo.changelog
26 26
27 27 def iterate():
28 28 revqueue, revsnode = None, None
29 29 h = []
30 30
31 31 revs.sort(reverse=True)
32 32 revqueue = util.deque(revs)
33 33 if revqueue:
34 34 revsnode = revqueue.popleft()
35 35 heapq.heappush(h, -revsnode)
36 36
37 37 seen = set()
38 38 while h:
39 39 current = -heapq.heappop(h)
40 40 if current not in seen:
41 41 if revsnode and current == revsnode:
42 42 if revqueue:
43 43 revsnode = revqueue.popleft()
44 44 heapq.heappush(h, -revsnode)
45 45 seen.add(current)
46 46 yield current
47 47 for parent in cl.parentrevs(current)[:cut]:
48 48 if parent != node.nullrev:
49 49 heapq.heappush(h, -parent)
50 50
51 51 return generatorset(iterate(), iterasc=False)
52 52
53 53 def _revdescendants(repo, revs, followfirst):
54 54 """Like revlog.descendants() but supports followfirst."""
55 55 if followfirst:
56 56 cut = 1
57 57 else:
58 58 cut = None
59 59
60 60 def iterate():
61 61 cl = repo.changelog
62 62 first = min(revs)
63 63 nullrev = node.nullrev
64 64 if first == nullrev:
65 65 # Are there nodes with a null first parent and a non-null
66 66 # second one? Maybe. Do we care? Probably not.
67 67 for i in cl:
68 68 yield i
69 69 else:
70 70 seen = set(revs)
71 71 for i in cl.revs(first + 1):
72 72 for x in cl.parentrevs(i)[:cut]:
73 73 if x != nullrev and x in seen:
74 74 seen.add(i)
75 75 yield i
76 76 break
77 77
78 78 return generatorset(iterate(), iterasc=True)
79 79
80 80 def _revsbetween(repo, roots, heads):
81 81 """Return all paths between roots and heads, inclusive of both endpoint
82 82 sets."""
83 83 if not roots:
84 84 return baseset()
85 85 parentrevs = repo.changelog.parentrevs
86 86 visit = list(heads)
87 87 reachable = set()
88 88 seen = {}
89 89 minroot = min(roots)
90 90 roots = set(roots)
91 91 # open-code the post-order traversal due to the tiny size of
92 92 # sys.getrecursionlimit()
93 93 while visit:
94 94 rev = visit.pop()
95 95 if rev in roots:
96 96 reachable.add(rev)
97 97 parents = parentrevs(rev)
98 98 seen[rev] = parents
99 99 for parent in parents:
100 100 if parent >= minroot and parent not in seen:
101 101 visit.append(parent)
102 102 if not reachable:
103 103 return baseset()
104 104 for rev in sorted(seen):
105 105 for parent in seen[rev]:
106 106 if parent in reachable:
107 107 reachable.add(rev)
108 108 return baseset(sorted(reachable))
109 109
110 110 elements = {
111 111 "(": (21, ("group", 1, ")"), ("func", 1, ")")),
112 112 "##": (20, None, ("_concat", 20)),
113 113 "~": (18, None, ("ancestor", 18)),
114 114 "^": (18, None, ("parent", 18), ("parentpost", 18)),
115 115 "-": (5, ("negate", 19), ("minus", 5)),
116 116 "::": (17, ("dagrangepre", 17), ("dagrange", 17),
117 117 ("dagrangepost", 17)),
118 118 "..": (17, ("dagrangepre", 17), ("dagrange", 17),
119 119 ("dagrangepost", 17)),
120 120 ":": (15, ("rangepre", 15), ("range", 15), ("rangepost", 15)),
121 121 "not": (10, ("not", 10)),
122 122 "!": (10, ("not", 10)),
123 123 "and": (5, None, ("and", 5)),
124 124 "&": (5, None, ("and", 5)),
125 125 "%": (5, None, ("only", 5), ("onlypost", 5)),
126 126 "or": (4, None, ("or", 4)),
127 127 "|": (4, None, ("or", 4)),
128 128 "+": (4, None, ("or", 4)),
129 129 ",": (2, None, ("list", 2)),
130 130 ")": (0, None, None),
131 131 "symbol": (0, ("symbol",), None),
132 132 "string": (0, ("string",), None),
133 133 "end": (0, None, None),
134 134 }
135 135
136 136 keywords = set(['and', 'or', 'not'])
137 137
138 138 # default set of valid characters for the initial letter of symbols
139 139 _syminitletters = set(c for c in [chr(i) for i in xrange(256)]
140 140 if c.isalnum() or c in '._@' or ord(c) > 127)
141 141
142 142 # default set of valid characters for non-initial letters of symbols
143 143 _symletters = set(c for c in [chr(i) for i in xrange(256)]
144 144 if c.isalnum() or c in '-._/@' or ord(c) > 127)
145 145
146 146 def tokenize(program, lookup=None, syminitletters=None, symletters=None):
147 147 '''
148 148 Parse a revset statement into a stream of tokens
149 149
150 150 ``syminitletters`` is the set of valid characters for the initial
151 151 letter of symbols.
152 152
153 153 By default, character ``c`` is recognized as valid for initial
154 154 letter of symbols, if ``c.isalnum() or c in '._@' or ord(c) > 127``.
155 155
156 156 ``symletters`` is the set of valid characters for non-initial
157 157 letters of symbols.
158 158
159 159 By default, character ``c`` is recognized as valid for non-initial
160 160 letters of symbols, if ``c.isalnum() or c in '-._/@' or ord(c) > 127``.
161 161
162 162 Check that @ is a valid unquoted token character (issue3686):
163 163 >>> list(tokenize("@::"))
164 164 [('symbol', '@', 0), ('::', None, 1), ('end', None, 3)]
165 165
166 166 '''
167 167 if syminitletters is None:
168 168 syminitletters = _syminitletters
169 169 if symletters is None:
170 170 symletters = _symletters
171 171
172 172 pos, l = 0, len(program)
173 173 while pos < l:
174 174 c = program[pos]
175 175 if c.isspace(): # skip inter-token whitespace
176 176 pass
177 177 elif c == ':' and program[pos:pos + 2] == '::': # look ahead carefully
178 178 yield ('::', None, pos)
179 179 pos += 1 # skip ahead
180 180 elif c == '.' and program[pos:pos + 2] == '..': # look ahead carefully
181 181 yield ('..', None, pos)
182 182 pos += 1 # skip ahead
183 183 elif c == '#' and program[pos:pos + 2] == '##': # look ahead carefully
184 184 yield ('##', None, pos)
185 185 pos += 1 # skip ahead
186 186 elif c in "():,-|&+!~^%": # handle simple operators
187 187 yield (c, None, pos)
188 188 elif (c in '"\'' or c == 'r' and
189 189 program[pos:pos + 2] in ("r'", 'r"')): # handle quoted strings
190 190 if c == 'r':
191 191 pos += 1
192 192 c = program[pos]
193 193 decode = lambda x: x
194 194 else:
195 195 decode = lambda x: x.decode('string-escape')
196 196 pos += 1
197 197 s = pos
198 198 while pos < l: # find closing quote
199 199 d = program[pos]
200 200 if d == '\\': # skip over escaped characters
201 201 pos += 2
202 202 continue
203 203 if d == c:
204 204 yield ('string', decode(program[s:pos]), s)
205 205 break
206 206 pos += 1
207 207 else:
208 208 raise error.ParseError(_("unterminated string"), s)
209 209 # gather up a symbol/keyword
210 210 elif c in syminitletters:
211 211 s = pos
212 212 pos += 1
213 213 while pos < l: # find end of symbol
214 214 d = program[pos]
215 215 if d not in symletters:
216 216 break
217 217 if d == '.' and program[pos - 1] == '.': # special case for ..
218 218 pos -= 1
219 219 break
220 220 pos += 1
221 221 sym = program[s:pos]
222 222 if sym in keywords: # operator keywords
223 223 yield (sym, None, s)
224 224 elif '-' in sym:
225 225 # some jerk gave us foo-bar-baz, try to check if it's a symbol
226 226 if lookup and lookup(sym):
227 227 # looks like a real symbol
228 228 yield ('symbol', sym, s)
229 229 else:
230 230 # looks like an expression
231 231 parts = sym.split('-')
232 232 for p in parts[:-1]:
233 233 if p: # possible consecutive -
234 234 yield ('symbol', p, s)
235 235 s += len(p)
236 236 yield ('-', None, pos)
237 237 s += 1
238 238 if parts[-1]: # possible trailing -
239 239 yield ('symbol', parts[-1], s)
240 240 else:
241 241 yield ('symbol', sym, s)
242 242 pos -= 1
243 243 else:
244 244 raise error.ParseError(_("syntax error"), pos)
245 245 pos += 1
246 246 yield ('end', None, pos)
247 247
248 248 def parseerrordetail(inst):
249 249 """Compose error message from specified ParseError object
250 250 """
251 251 if len(inst.args) > 1:
252 252 return _('at %s: %s') % (inst.args[1], inst.args[0])
253 253 else:
254 254 return inst.args[0]
255 255
256 256 # helpers
257 257
258 258 def getstring(x, err):
259 259 if x and (x[0] == 'string' or x[0] == 'symbol'):
260 260 return x[1]
261 261 raise error.ParseError(err)
262 262
263 263 def getlist(x):
264 264 if not x:
265 265 return []
266 266 if x[0] == 'list':
267 267 return getlist(x[1]) + [x[2]]
268 268 return [x]
269 269
270 270 def getargs(x, min, max, err):
271 271 l = getlist(x)
272 272 if len(l) < min or (max >= 0 and len(l) > max):
273 273 raise error.ParseError(err)
274 274 return l
275 275
276 276 def isvalidsymbol(tree):
277 277 """Examine whether specified ``tree`` is valid ``symbol`` or not
278 278 """
279 279 return tree[0] == 'symbol' and len(tree) > 1
280 280
281 281 def getsymbol(tree):
282 282 """Get symbol name from valid ``symbol`` in ``tree``
283 283
284 284 This assumes that ``tree`` is already examined by ``isvalidsymbol``.
285 285 """
286 286 return tree[1]
287 287
288 288 def isvalidfunc(tree):
289 289 """Examine whether specified ``tree`` is valid ``func`` or not
290 290 """
291 291 return tree[0] == 'func' and len(tree) > 1 and isvalidsymbol(tree[1])
292 292
293 293 def getfuncname(tree):
294 294 """Get function name from valid ``func`` in ``tree``
295 295
296 296 This assumes that ``tree`` is already examined by ``isvalidfunc``.
297 297 """
298 298 return getsymbol(tree[1])
299 299
300 300 def getfuncargs(tree):
301 301 """Get list of function arguments from valid ``func`` in ``tree``
302 302
303 303 This assumes that ``tree`` is already examined by ``isvalidfunc``.
304 304 """
305 305 if len(tree) > 2:
306 306 return getlist(tree[2])
307 307 else:
308 308 return []
309 309
310 310 def getset(repo, subset, x):
311 311 if not x:
312 312 raise error.ParseError(_("missing argument"))
313 313 s = methods[x[0]](repo, subset, *x[1:])
314 314 if util.safehasattr(s, 'isascending'):
315 315 return s
316 316 return baseset(s)
317 317
318 318 def _getrevsource(repo, r):
319 319 extra = repo[r].extra()
320 320 for label in ('source', 'transplant_source', 'rebase_source'):
321 321 if label in extra:
322 322 try:
323 323 return repo[extra[label]].rev()
324 324 except error.RepoLookupError:
325 325 pass
326 326 return None
327 327
328 328 # operator methods
329 329
330 330 def stringset(repo, subset, x):
331 331 x = repo[x].rev()
332 332 if x in subset:
333 333 return baseset([x])
334 334 return baseset()
335 335
336 336 def symbolset(repo, subset, x):
337 337 if x in symbols:
338 338 raise error.ParseError(_("can't use %s here") % x)
339 339 return stringset(repo, subset, x)
340 340
341 341 def rangeset(repo, subset, x, y):
342 342 m = getset(repo, fullreposet(repo), x)
343 343 n = getset(repo, fullreposet(repo), y)
344 344
345 345 if not m or not n:
346 346 return baseset()
347 347 m, n = m.first(), n.last()
348 348
349 349 if m < n:
350 350 r = spanset(repo, m, n + 1)
351 351 else:
352 352 r = spanset(repo, m, n - 1)
353 353 return r & subset
354 354
355 355 def dagrange(repo, subset, x, y):
356 356 r = fullreposet(repo)
357 357 xs = _revsbetween(repo, getset(repo, r, x), getset(repo, r, y))
358 358 return xs & subset
359 359
360 360 def andset(repo, subset, x, y):
361 361 return getset(repo, getset(repo, subset, x), y)
362 362
363 363 def orset(repo, subset, x, y):
364 364 xl = getset(repo, subset, x)
365 365 yl = getset(repo, subset - xl, y)
366 366 return xl + yl
367 367
368 368 def notset(repo, subset, x):
369 369 return subset - getset(repo, subset, x)
370 370
371 371 def listset(repo, subset, a, b):
372 372 raise error.ParseError(_("can't use a list in this context"))
373 373
374 374 def func(repo, subset, a, b):
375 375 if a[0] == 'symbol' and a[1] in symbols:
376 376 return symbols[a[1]](repo, subset, b)
377 377 raise error.UnknownIdentifier(a[1], symbols.keys())
378 378
379 379 # functions
380 380
381 381 def adds(repo, subset, x):
382 382 """``adds(pattern)``
383 383 Changesets that add a file matching pattern.
384 384
385 385 The pattern without explicit kind like ``glob:`` is expected to be
386 386 relative to the current directory and match against a file or a
387 387 directory.
388 388 """
389 389 # i18n: "adds" is a keyword
390 390 pat = getstring(x, _("adds requires a pattern"))
391 391 return checkstatus(repo, subset, pat, 1)
392 392
393 393 def ancestor(repo, subset, x):
394 394 """``ancestor(*changeset)``
395 395 A greatest common ancestor of the changesets.
396 396
397 397 Accepts 0 or more changesets.
398 398 Will return empty list when passed no args.
399 399 Greatest common ancestor of a single changeset is that changeset.
400 400 """
401 401 # i18n: "ancestor" is a keyword
402 402 l = getlist(x)
403 403 rl = fullreposet(repo)
404 404 anc = None
405 405
406 406 # (getset(repo, rl, i) for i in l) generates a list of lists
407 407 for revs in (getset(repo, rl, i) for i in l):
408 408 for r in revs:
409 409 if anc is None:
410 410 anc = repo[r]
411 411 else:
412 412 anc = anc.ancestor(repo[r])
413 413
414 414 if anc is not None and anc.rev() in subset:
415 415 return baseset([anc.rev()])
416 416 return baseset()
417 417
418 418 def _ancestors(repo, subset, x, followfirst=False):
419 419 heads = getset(repo, fullreposet(repo), x)
420 420 if not heads:
421 421 return baseset()
422 422 s = _revancestors(repo, heads, followfirst)
423 423 return subset & s
424 424
425 425 def ancestors(repo, subset, x):
426 426 """``ancestors(set)``
427 427 Changesets that are ancestors of a changeset in set.
428 428 """
429 429 return _ancestors(repo, subset, x)
430 430
431 431 def _firstancestors(repo, subset, x):
432 432 # ``_firstancestors(set)``
433 433 # Like ``ancestors(set)`` but follows only the first parents.
434 434 return _ancestors(repo, subset, x, followfirst=True)
435 435
436 436 def ancestorspec(repo, subset, x, n):
437 437 """``set~n``
438 438 Changesets that are the Nth ancestor (first parents only) of a changeset
439 439 in set.
440 440 """
441 441 try:
442 442 n = int(n[1])
443 443 except (TypeError, ValueError):
444 444 raise error.ParseError(_("~ expects a number"))
445 445 ps = set()
446 446 cl = repo.changelog
447 447 for r in getset(repo, fullreposet(repo), x):
448 448 for i in range(n):
449 449 r = cl.parentrevs(r)[0]
450 450 ps.add(r)
451 451 return subset & ps
452 452
453 453 def author(repo, subset, x):
454 454 """``author(string)``
455 455 Alias for ``user(string)``.
456 456 """
457 457 # i18n: "author" is a keyword
458 458 n = encoding.lower(getstring(x, _("author requires a string")))
459 459 kind, pattern, matcher = _substringmatcher(n)
460 460 return subset.filter(lambda x: matcher(encoding.lower(repo[x].user())))
461 461
462 462 def bisect(repo, subset, x):
463 463 """``bisect(string)``
464 464 Changesets marked in the specified bisect status:
465 465
466 466 - ``good``, ``bad``, ``skip``: csets explicitly marked as good/bad/skip
467 467 - ``goods``, ``bads`` : csets topologically good/bad
468 468 - ``range`` : csets taking part in the bisection
469 469 - ``pruned`` : csets that are goods, bads or skipped
470 470 - ``untested`` : csets whose fate is yet unknown
471 471 - ``ignored`` : csets ignored due to DAG topology
472 472 - ``current`` : the cset currently being bisected
473 473 """
474 474 # i18n: "bisect" is a keyword
475 475 status = getstring(x, _("bisect requires a string")).lower()
476 476 state = set(hbisect.get(repo, status))
477 477 return subset & state
478 478
479 479 # Backward-compatibility
480 480 # - no help entry so that we do not advertise it any more
481 481 def bisected(repo, subset, x):
482 482 return bisect(repo, subset, x)
483 483
484 484 def bookmark(repo, subset, x):
485 485 """``bookmark([name])``
486 486 The named bookmark or all bookmarks.
487 487
488 488 If `name` starts with `re:`, the remainder of the name is treated as
489 489 a regular expression. To match a bookmark that actually starts with `re:`,
490 490 use the prefix `literal:`.
491 491 """
492 492 # i18n: "bookmark" is a keyword
493 493 args = getargs(x, 0, 1, _('bookmark takes one or no arguments'))
494 494 if args:
495 495 bm = getstring(args[0],
496 496 # i18n: "bookmark" is a keyword
497 497 _('the argument to bookmark must be a string'))
498 498 kind, pattern, matcher = _stringmatcher(bm)
499 499 bms = set()
500 500 if kind == 'literal':
501 501 bmrev = repo._bookmarks.get(pattern, None)
502 502 if not bmrev:
503 503 raise error.RepoLookupError(_("bookmark '%s' does not exist")
504 504 % bm)
505 505 bms.add(repo[bmrev].rev())
506 506 else:
507 507 matchrevs = set()
508 508 for name, bmrev in repo._bookmarks.iteritems():
509 509 if matcher(name):
510 510 matchrevs.add(bmrev)
511 511 if not matchrevs:
512 512 raise error.RepoLookupError(_("no bookmarks exist"
513 513 " that match '%s'") % pattern)
514 514 for bmrev in matchrevs:
515 515 bms.add(repo[bmrev].rev())
516 516 else:
517 517 bms = set([repo[r].rev()
518 518 for r in repo._bookmarks.values()])
519 519 bms -= set([node.nullrev])
520 520 return subset & bms
521 521
522 522 def branch(repo, subset, x):
523 523 """``branch(string or set)``
524 524 All changesets belonging to the given branch or the branches of the given
525 525 changesets.
526 526
527 527 If `string` starts with `re:`, the remainder of the name is treated as
528 528 a regular expression. To match a branch that actually starts with `re:`,
529 529 use the prefix `literal:`.
530 530 """
531 531 import branchmap
532 532 urepo = repo.unfiltered()
533 533 ucl = urepo.changelog
534 534 getbi = branchmap.revbranchcache(urepo, readonly=True).branchinfo
535 535
536 536 try:
537 537 b = getstring(x, '')
538 538 except error.ParseError:
539 539 # not a string, but another revspec, e.g. tip()
540 540 pass
541 541 else:
542 542 kind, pattern, matcher = _stringmatcher(b)
543 543 if kind == 'literal':
544 544 # note: falls through to the revspec case if no branch with
545 545 # this name exists
546 546 if pattern in repo.branchmap():
547 547 return subset.filter(lambda r: matcher(getbi(ucl, r)[0]))
548 548 else:
549 549 return subset.filter(lambda r: matcher(getbi(ucl, r)[0]))
550 550
551 551 s = getset(repo, fullreposet(repo), x)
552 552 b = set()
553 553 for r in s:
554 554 b.add(getbi(ucl, r)[0])
555 555 c = s.__contains__
556 556 return subset.filter(lambda r: c(r) or getbi(ucl, r)[0] in b)
557 557
558 558 def bumped(repo, subset, x):
559 559 """``bumped()``
560 560 Mutable changesets marked as successors of public changesets.
561 561
562 562 Only non-public and non-obsolete changesets can be `bumped`.
563 563 """
564 564 # i18n: "bumped" is a keyword
565 565 getargs(x, 0, 0, _("bumped takes no arguments"))
566 566 bumped = obsmod.getrevs(repo, 'bumped')
567 567 return subset & bumped
568 568
569 569 def bundle(repo, subset, x):
570 570 """``bundle()``
571 571 Changesets in the bundle.
572 572
573 573 Bundle must be specified by the -R option."""
574 574
575 575 try:
576 576 bundlerevs = repo.changelog.bundlerevs
577 577 except AttributeError:
578 578 raise util.Abort(_("no bundle provided - specify with -R"))
579 579 return subset & bundlerevs
580 580
581 581 def checkstatus(repo, subset, pat, field):
582 582 hasset = matchmod.patkind(pat) == 'set'
583 583
584 584 mcache = [None]
585 585 def matches(x):
586 586 c = repo[x]
587 587 if not mcache[0] or hasset:
588 588 mcache[0] = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
589 589 m = mcache[0]
590 590 fname = None
591 591 if not m.anypats() and len(m.files()) == 1:
592 592 fname = m.files()[0]
593 593 if fname is not None:
594 594 if fname not in c.files():
595 595 return False
596 596 else:
597 597 for f in c.files():
598 598 if m(f):
599 599 break
600 600 else:
601 601 return False
602 602 files = repo.status(c.p1().node(), c.node())[field]
603 603 if fname is not None:
604 604 if fname in files:
605 605 return True
606 606 else:
607 607 for f in files:
608 608 if m(f):
609 609 return True
610 610
611 611 return subset.filter(matches)
612 612
613 613 def _children(repo, narrow, parentset):
614 614 cs = set()
615 615 if not parentset:
616 616 return baseset(cs)
617 617 pr = repo.changelog.parentrevs
618 618 minrev = min(parentset)
619 619 for r in narrow:
620 620 if r <= minrev:
621 621 continue
622 622 for p in pr(r):
623 623 if p in parentset:
624 624 cs.add(r)
625 625 return baseset(cs)
626 626
627 627 def children(repo, subset, x):
628 628 """``children(set)``
629 629 Child changesets of changesets in set.
630 630 """
631 631 s = getset(repo, fullreposet(repo), x)
632 632 cs = _children(repo, subset, s)
633 633 return subset & cs
634 634
635 635 def closed(repo, subset, x):
636 636 """``closed()``
637 637 Changeset is closed.
638 638 """
639 639 # i18n: "closed" is a keyword
640 640 getargs(x, 0, 0, _("closed takes no arguments"))
641 641 return subset.filter(lambda r: repo[r].closesbranch())
642 642
643 643 def contains(repo, subset, x):
644 644 """``contains(pattern)``
645 645 The revision's manifest contains a file matching pattern (but might not
646 646 modify it). See :hg:`help patterns` for information about file patterns.
647 647
648 648 The pattern without explicit kind like ``glob:`` is expected to be
649 649 relative to the current directory and match against a file exactly
650 650 for efficiency.
651 651 """
652 652 # i18n: "contains" is a keyword
653 653 pat = getstring(x, _("contains requires a pattern"))
654 654
655 655 def matches(x):
656 656 if not matchmod.patkind(pat):
657 657 pats = pathutil.canonpath(repo.root, repo.getcwd(), pat)
658 658 if pats in repo[x]:
659 659 return True
660 660 else:
661 661 c = repo[x]
662 662 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
663 663 for f in c.manifest():
664 664 if m(f):
665 665 return True
666 666 return False
667 667
668 668 return subset.filter(matches)
669 669
670 670 def converted(repo, subset, x):
671 671 """``converted([id])``
672 672 Changesets converted from the given identifier in the old repository if
673 673 present, or all converted changesets if no identifier is specified.
674 674 """
675 675
676 676 # There is exactly no chance of resolving the revision, so do a simple
677 677 # string compare and hope for the best
678 678
679 679 rev = None
680 680 # i18n: "converted" is a keyword
681 681 l = getargs(x, 0, 1, _('converted takes one or no arguments'))
682 682 if l:
683 683 # i18n: "converted" is a keyword
684 684 rev = getstring(l[0], _('converted requires a revision'))
685 685
686 686 def _matchvalue(r):
687 687 source = repo[r].extra().get('convert_revision', None)
688 688 return source is not None and (rev is None or source.startswith(rev))
689 689
690 690 return subset.filter(lambda r: _matchvalue(r))
691 691
692 692 def date(repo, subset, x):
693 693 """``date(interval)``
694 694 Changesets within the interval, see :hg:`help dates`.
695 695 """
696 696 # i18n: "date" is a keyword
697 697 ds = getstring(x, _("date requires a string"))
698 698 dm = util.matchdate(ds)
699 699 return subset.filter(lambda x: dm(repo[x].date()[0]))
700 700
701 701 def desc(repo, subset, x):
702 702 """``desc(string)``
703 703 Search commit message for string. The match is case-insensitive.
704 704 """
705 705 # i18n: "desc" is a keyword
706 706 ds = encoding.lower(getstring(x, _("desc requires a string")))
707 707
708 708 def matches(x):
709 709 c = repo[x]
710 710 return ds in encoding.lower(c.description())
711 711
712 712 return subset.filter(matches)
713 713
714 714 def _descendants(repo, subset, x, followfirst=False):
715 715 roots = getset(repo, fullreposet(repo), x)
716 716 if not roots:
717 717 return baseset()
718 718 s = _revdescendants(repo, roots, followfirst)
719 719
720 720 # Both sets need to be ascending in order to lazily return the union
721 721 # in the correct order.
722 722 base = subset & roots
723 723 desc = subset & s
724 724 result = base + desc
725 725 if subset.isascending():
726 726 result.sort()
727 727 elif subset.isdescending():
728 728 result.sort(reverse=True)
729 729 else:
730 730 result = subset & result
731 731 return result
732 732
733 733 def descendants(repo, subset, x):
734 734 """``descendants(set)``
735 735 Changesets which are descendants of changesets in set.
736 736 """
737 737 return _descendants(repo, subset, x)
738 738
739 739 def _firstdescendants(repo, subset, x):
740 740 # ``_firstdescendants(set)``
741 741 # Like ``descendants(set)`` but follows only the first parents.
742 742 return _descendants(repo, subset, x, followfirst=True)
743 743
744 744 def destination(repo, subset, x):
745 745 """``destination([set])``
746 746 Changesets that were created by a graft, transplant or rebase operation,
747 747 with the given revisions specified as the source. Omitting the optional set
748 748 is the same as passing all().
749 749 """
750 750 if x is not None:
751 751 sources = getset(repo, fullreposet(repo), x)
752 752 else:
753 753 sources = fullreposet(repo)
754 754
755 755 dests = set()
756 756
757 757 # subset contains all of the possible destinations that can be returned, so
758 758 # iterate over them and see if their source(s) were provided in the arg set.
759 759 # Even if the immediate src of r is not in the arg set, src's source (or
760 760 # further back) may be. Scanning back further than the immediate src allows
761 761 # transitive transplants and rebases to yield the same results as transitive
762 762 # grafts.
763 763 for r in subset:
764 764 src = _getrevsource(repo, r)
765 765 lineage = None
766 766
767 767 while src is not None:
768 768 if lineage is None:
769 769 lineage = list()
770 770
771 771 lineage.append(r)
772 772
773 773 # The visited lineage is a match if the current source is in the arg
774 774 # set. Since every candidate dest is visited by way of iterating
775 775 # subset, any dests further back in the lineage will be tested by a
776 776 # different iteration over subset. Likewise, if the src was already
777 777 # selected, the current lineage can be selected without going back
778 778 # further.
779 779 if src in sources or src in dests:
780 780 dests.update(lineage)
781 781 break
782 782
783 783 r = src
784 784 src = _getrevsource(repo, r)
785 785
786 786 return subset.filter(dests.__contains__)
787 787
788 788 def divergent(repo, subset, x):
789 789 """``divergent()``
790 790 Final successors of changesets with an alternative set of final successors.
791 791 """
792 792 # i18n: "divergent" is a keyword
793 793 getargs(x, 0, 0, _("divergent takes no arguments"))
794 794 divergent = obsmod.getrevs(repo, 'divergent')
795 795 return subset & divergent
796 796
797 797 def draft(repo, subset, x):
798 798 """``draft()``
799 799 Changeset in draft phase."""
800 800 # i18n: "draft" is a keyword
801 801 getargs(x, 0, 0, _("draft takes no arguments"))
802 802 phase = repo._phasecache.phase
803 803 target = phases.draft
804 804 condition = lambda r: phase(repo, r) == target
805 805 return subset.filter(condition, cache=False)
806 806
807 807 def extinct(repo, subset, x):
808 808 """``extinct()``
809 809 Obsolete changesets with obsolete descendants only.
810 810 """
811 811 # i18n: "extinct" is a keyword
812 812 getargs(x, 0, 0, _("extinct takes no arguments"))
813 813 extincts = obsmod.getrevs(repo, 'extinct')
814 814 return subset & extincts
815 815
816 816 def extra(repo, subset, x):
817 817 """``extra(label, [value])``
818 818 Changesets with the given label in the extra metadata, with the given
819 819 optional value.
820 820
821 821 If `value` starts with `re:`, the remainder of the value is treated as
822 822 a regular expression. To match a value that actually starts with `re:`,
823 823 use the prefix `literal:`.
824 824 """
825 825
826 826 # i18n: "extra" is a keyword
827 827 l = getargs(x, 1, 2, _('extra takes at least 1 and at most 2 arguments'))
828 828 # i18n: "extra" is a keyword
829 829 label = getstring(l[0], _('first argument to extra must be a string'))
830 830 value = None
831 831
832 832 if len(l) > 1:
833 833 # i18n: "extra" is a keyword
834 834 value = getstring(l[1], _('second argument to extra must be a string'))
835 835 kind, value, matcher = _stringmatcher(value)
836 836
837 837 def _matchvalue(r):
838 838 extra = repo[r].extra()
839 839 return label in extra and (value is None or matcher(extra[label]))
840 840
841 841 return subset.filter(lambda r: _matchvalue(r))
842 842
843 843 def filelog(repo, subset, x):
844 844 """``filelog(pattern)``
845 845 Changesets connected to the specified filelog.
846 846
847 847 For performance reasons, visits only revisions mentioned in the file-level
848 848 filelog, rather than filtering through all changesets (much faster, but
849 849 doesn't include deletes or duplicate changes). For a slower, more accurate
850 850 result, use ``file()``.
851 851
852 852 The pattern without explicit kind like ``glob:`` is expected to be
853 853 relative to the current directory and match against a file exactly
854 854 for efficiency.
855 855
856 856 If some linkrev points to revisions filtered by the current repoview, we'll
857 857 work around it to return a non-filtered value.
858 858 """
859 859
860 860 # i18n: "filelog" is a keyword
861 861 pat = getstring(x, _("filelog requires a pattern"))
862 862 s = set()
863 863 cl = repo.changelog
864 864
865 865 if not matchmod.patkind(pat):
866 866 f = pathutil.canonpath(repo.root, repo.getcwd(), pat)
867 867 files = [f]
868 868 else:
869 869 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=repo[None])
870 870 files = (f for f in repo[None] if m(f))
871 871
872 872 for f in files:
873 873 backrevref = {} # final value for: filerev -> changerev
874 874 lowestchild = {} # lowest known filerev child of a filerev
875 875 delayed = [] # filerev with filtered linkrev, for post-processing
876 876 lowesthead = None # cache for manifest content of all head revisions
877 877 fl = repo.file(f)
878 878 for fr in list(fl):
879 879 rev = fl.linkrev(fr)
880 880 if rev not in cl:
881 881 # changerev pointed in linkrev is filtered
882 882 # record it for post processing.
883 883 delayed.append((fr, rev))
884 884 continue
885 885 for p in fl.parentrevs(fr):
886 886 if 0 <= p and p not in lowestchild:
887 887 lowestchild[p] = fr
888 888 backrevref[fr] = rev
889 889 s.add(rev)
890 890
891 891 # Post-processing of all filerevs we skipped because they were
892 892 # filtered. If such filerevs have known and unfiltered children, this
893 893 # means they have an unfiltered appearance out there. We'll use linkrev
894 894 # adjustment to find one of these appearances. The lowest known child
895 895 # will be used as a starting point because it is the best upper-bound we
896 896 # have.
897 897 #
898 898 # This approach will fail when an unfiltered but linkrev-shadowed
899 899 # appearance exists in a head changeset without unfiltered filerev
900 900 # children anywhere.
901 901 while delayed:
902 902 # must be a descending iteration. To slowly fill lowest child
903 903 # information that is of potential use by the next item.
904 904 fr, rev = delayed.pop()
905 905 lkr = rev
906 906
907 907 child = lowestchild.get(fr)
908 908
909 909 if child is None:
910 910 # search for existence of this file revision in a head revision.
911 911 # There are three possibilities:
912 912 # - the revision exists in a head and we can find an
913 913 # introduction from there,
914 914 # - the revision does not exist in a head because it has been
915 915 # changed since its introduction: we would have found a child
916 916 # and be in the other 'else' clause,
917 917 # - all versions of the revision are hidden.
918 918 if lowesthead is None:
919 919 lowesthead = {}
920 920 for h in repo.heads():
921 921 fnode = repo[h].manifest().get(f)
922 922 if fnode is not None:
923 923 lowesthead[fl.rev(fnode)] = h
924 924 headrev = lowesthead.get(fr)
925 925 if headrev is None:
926 926 # content is nowhere unfiltered
927 927 continue
928 928 rev = repo[headrev][f].introrev()
929 929 else:
930 930 # the lowest known child is a good upper bound
931 931 childcrev = backrevref[child]
932 932 # XXX this does not guarantee returning the lowest
933 933 # introduction of this revision, but this gives a
934 934 # result which is a good start and will fit in most
935 935 # cases. We probably need to fix the multiple
936 936 # introductions case properly (report each
937 937 # introduction, even for identical file revisions)
938 938 # once and for all at some point anyway.
939 939 for p in repo[childcrev][f].parents():
940 940 if p.filerev() == fr:
941 941 rev = p.rev()
942 942 break
943 943 if rev == lkr: # no shadowed entry found
944 944 # XXX This should never happen unless some manifest points
945 945 # to biggish file revisions (like a revision that uses a
946 946 # parent that never appears in the manifest ancestors)
947 947 continue
948 948
949 949 # Fill the data for the next iteration.
950 950 for p in fl.parentrevs(fr):
951 951 if 0 <= p and p not in lowestchild:
952 952 lowestchild[p] = fr
953 953 backrevref[fr] = rev
954 954 s.add(rev)
955 955
956 956 return subset & s
957 957
958 958 def first(repo, subset, x):
959 959 """``first(set, [n])``
960 960 An alias for limit().
961 961 """
962 962 return limit(repo, subset, x)
963 963
964 964 def _follow(repo, subset, x, name, followfirst=False):
965 965 l = getargs(x, 0, 1, _("%s takes no arguments or a filename") % name)
966 966 c = repo['.']
967 967 if l:
968 968 x = getstring(l[0], _("%s expected a filename") % name)
969 969 if x in c:
970 970 cx = c[x]
971 971 s = set(ctx.rev() for ctx in cx.ancestors(followfirst=followfirst))
972 972 # include the revision responsible for the most recent version
973 973 s.add(cx.introrev())
974 974 else:
975 975 return baseset()
976 976 else:
977 977 s = _revancestors(repo, baseset([c.rev()]), followfirst)
978 978
979 979 return subset & s
980 980
981 981 def follow(repo, subset, x):
982 982 """``follow([file])``
983 An alias for ``::.`` (ancestors of the working copy's first parent).
983 An alias for ``::.`` (ancestors of the working directory's first parent).
984 984 If a filename is specified, the history of the given file is followed,
985 985 including copies.
986 986 """
987 987 return _follow(repo, subset, x, 'follow')
988 988
989 989 def _followfirst(repo, subset, x):
990 990 # ``followfirst([file])``
991 991 # Like ``follow([file])`` but follows only the first parent of
992 992 # every revision or file revision.
993 993 return _follow(repo, subset, x, '_followfirst', followfirst=True)
994 994
995 995 def getall(repo, subset, x):
996 996 """``all()``
997 997 All changesets, the same as ``0:tip``.
998 998 """
999 999 # i18n: "all" is a keyword
1000 1000 getargs(x, 0, 0, _("all takes no arguments"))
1001 1001 return subset & spanset(repo) # drop "null" if any
1002 1002
1003 1003 def grep(repo, subset, x):
1004 1004 """``grep(regex)``
1005 1005 Like ``keyword(string)`` but accepts a regex. Use ``grep(r'...')``
1006 1006 to ensure special escape characters are handled correctly. Unlike
1007 1007 ``keyword(string)``, the match is case-sensitive.
1008 1008 """
1009 1009 try:
1010 1010 # i18n: "grep" is a keyword
1011 1011 gr = re.compile(getstring(x, _("grep requires a string")))
1012 1012 except re.error, e:
1013 1013 raise error.ParseError(_('invalid match pattern: %s') % e)
1014 1014
1015 1015 def matches(x):
1016 1016 c = repo[x]
1017 1017 for e in c.files() + [c.user(), c.description()]:
1018 1018 if gr.search(e):
1019 1019 return True
1020 1020 return False
1021 1021
1022 1022 return subset.filter(matches)
1023 1023
1024 1024 def _matchfiles(repo, subset, x):
1025 1025 # _matchfiles takes a revset list of prefixed arguments:
1026 1026 #
1027 1027 # [p:foo, i:bar, x:baz]
1028 1028 #
1029 1029 # builds a match object from them and filters subset. Allowed
1030 1030 # prefixes are 'p:' for regular patterns, 'i:' for include
1031 1031 # patterns and 'x:' for exclude patterns. Use 'r:' prefix to pass
1032 1032 # a revision identifier, or the empty string to reference the
1033 1033 # working directory, from which the match object is
1034 1034 # initialized. Use 'd:' to set the default matching mode, default
1035 1035 # to 'glob'. At most one 'r:' and 'd:' argument can be passed.
1036 1036
1037 1037 # i18n: "_matchfiles" is a keyword
1038 1038 l = getargs(x, 1, -1, _("_matchfiles requires at least one argument"))
1039 1039 pats, inc, exc = [], [], []
1040 1040 rev, default = None, None
1041 1041 for arg in l:
1042 1042 # i18n: "_matchfiles" is a keyword
1043 1043 s = getstring(arg, _("_matchfiles requires string arguments"))
1044 1044 prefix, value = s[:2], s[2:]
1045 1045 if prefix == 'p:':
1046 1046 pats.append(value)
1047 1047 elif prefix == 'i:':
1048 1048 inc.append(value)
1049 1049 elif prefix == 'x:':
1050 1050 exc.append(value)
1051 1051 elif prefix == 'r:':
1052 1052 if rev is not None:
1053 1053 # i18n: "_matchfiles" is a keyword
1054 1054 raise error.ParseError(_('_matchfiles expected at most one '
1055 1055 'revision'))
1056 1056 if value != '': # empty means working directory; leave rev as None
1057 1057 rev = value
1058 1058 elif prefix == 'd:':
1059 1059 if default is not None:
1060 1060 # i18n: "_matchfiles" is a keyword
1061 1061 raise error.ParseError(_('_matchfiles expected at most one '
1062 1062 'default mode'))
1063 1063 default = value
1064 1064 else:
1065 1065 # i18n: "_matchfiles" is a keyword
1066 1066 raise error.ParseError(_('invalid _matchfiles prefix: %s') % prefix)
1067 1067 if not default:
1068 1068 default = 'glob'
1069 1069
1070 1070 m = matchmod.match(repo.root, repo.getcwd(), pats, include=inc,
1071 1071 exclude=exc, ctx=repo[rev], default=default)
1072 1072
1073 1073 def matches(x):
1074 1074 for f in repo[x].files():
1075 1075 if m(f):
1076 1076 return True
1077 1077 return False
1078 1078
1079 1079 return subset.filter(matches)
1080 1080
1081 1081 def hasfile(repo, subset, x):
1082 1082 """``file(pattern)``
1083 1083 Changesets affecting files matched by pattern.
1084 1084
1085 1085 For a faster but less accurate result, consider using ``filelog()``
1086 1086 instead.
1087 1087
1088 1088 This predicate uses ``glob:`` as the default kind of pattern.
1089 1089 """
1090 1090 # i18n: "file" is a keyword
1091 1091 pat = getstring(x, _("file requires a pattern"))
1092 1092 return _matchfiles(repo, subset, ('string', 'p:' + pat))
1093 1093
1094 1094 def head(repo, subset, x):
1095 1095 """``head()``
1096 1096 Changeset is a named branch head.
1097 1097 """
1098 1098 # i18n: "head" is a keyword
1099 1099 getargs(x, 0, 0, _("head takes no arguments"))
1100 1100 hs = set()
1101 1101 for b, ls in repo.branchmap().iteritems():
1102 1102 hs.update(repo[h].rev() for h in ls)
1103 1103 return baseset(hs).filter(subset.__contains__)
1104 1104
1105 1105 def heads(repo, subset, x):
1106 1106 """``heads(set)``
1107 1107 Members of set with no children in set.
1108 1108 """
1109 1109 s = getset(repo, subset, x)
1110 1110 ps = parents(repo, subset, x)
1111 1111 return s - ps
1112 1112
1113 1113 def hidden(repo, subset, x):
1114 1114 """``hidden()``
1115 1115 Hidden changesets.
1116 1116 """
1117 1117 # i18n: "hidden" is a keyword
1118 1118 getargs(x, 0, 0, _("hidden takes no arguments"))
1119 1119 hiddenrevs = repoview.filterrevs(repo, 'visible')
1120 1120 return subset & hiddenrevs
1121 1121
1122 1122 def keyword(repo, subset, x):
1123 1123 """``keyword(string)``
1124 1124 Search commit message, user name, and names of changed files for
1125 1125 string. The match is case-insensitive.
1126 1126 """
1127 1127 # i18n: "keyword" is a keyword
1128 1128 kw = encoding.lower(getstring(x, _("keyword requires a string")))
1129 1129
1130 1130 def matches(r):
1131 1131 c = repo[r]
1132 1132 return util.any(kw in encoding.lower(t) for t in c.files() + [c.user(),
1133 1133 c.description()])
1134 1134
1135 1135 return subset.filter(matches)
1136 1136
1137 1137 def limit(repo, subset, x):
1138 1138 """``limit(set, [n])``
1139 1139 First n members of set, defaulting to 1.
1140 1140 """
1141 1141 # i18n: "limit" is a keyword
1142 1142 l = getargs(x, 1, 2, _("limit requires one or two arguments"))
1143 1143 try:
1144 1144 lim = 1
1145 1145 if len(l) == 2:
1146 1146 # i18n: "limit" is a keyword
1147 1147 lim = int(getstring(l[1], _("limit requires a number")))
1148 1148 except (TypeError, ValueError):
1149 1149 # i18n: "limit" is a keyword
1150 1150 raise error.ParseError(_("limit expects a number"))
1151 1151 ss = subset
1152 1152 os = getset(repo, fullreposet(repo), l[0])
1153 1153 result = []
1154 1154 it = iter(os)
1155 1155 for x in xrange(lim):
1156 1156 try:
1157 1157 y = it.next()
1158 1158 if y in ss:
1159 1159 result.append(y)
1160 1160 except (StopIteration):
1161 1161 break
1162 1162 return baseset(result)
1163 1163
1164 1164 def last(repo, subset, x):
1165 1165 """``last(set, [n])``
1166 1166 Last n members of set, defaulting to 1.
1167 1167 """
1168 1168 # i18n: "last" is a keyword
1169 1169 l = getargs(x, 1, 2, _("last requires one or two arguments"))
1170 1170 try:
1171 1171 lim = 1
1172 1172 if len(l) == 2:
1173 1173 # i18n: "last" is a keyword
1174 1174 lim = int(getstring(l[1], _("last requires a number")))
1175 1175 except (TypeError, ValueError):
1176 1176 # i18n: "last" is a keyword
1177 1177 raise error.ParseError(_("last expects a number"))
1178 1178 ss = subset
1179 1179 os = getset(repo, fullreposet(repo), l[0])
1180 1180 os.reverse()
1181 1181 result = []
1182 1182 it = iter(os)
1183 1183 for x in xrange(lim):
1184 1184 try:
1185 1185 y = it.next()
1186 1186 if y in ss:
1187 1187 result.append(y)
1188 1188 except (StopIteration):
1189 1189 break
1190 1190 return baseset(result)
1191 1191
1192 1192 def maxrev(repo, subset, x):
1193 1193 """``max(set)``
1194 1194 Changeset with highest revision number in set.
1195 1195 """
1196 1196 os = getset(repo, fullreposet(repo), x)
1197 1197 if os:
1198 1198 m = os.max()
1199 1199 if m in subset:
1200 1200 return baseset([m])
1201 1201 return baseset()
1202 1202
1203 1203 def merge(repo, subset, x):
1204 1204 """``merge()``
1205 1205 Changeset is a merge changeset.
1206 1206 """
1207 1207 # i18n: "merge" is a keyword
1208 1208 getargs(x, 0, 0, _("merge takes no arguments"))
1209 1209 cl = repo.changelog
1210 1210 return subset.filter(lambda r: cl.parentrevs(r)[1] != -1)
1211 1211
1212 1212 def branchpoint(repo, subset, x):
1213 1213 """``branchpoint()``
1214 1214 Changesets with more than one child.
1215 1215 """
1216 1216 # i18n: "branchpoint" is a keyword
1217 1217 getargs(x, 0, 0, _("branchpoint takes no arguments"))
1218 1218 cl = repo.changelog
1219 1219 if not subset:
1220 1220 return baseset()
1221 1221 baserev = min(subset)
1222 1222 parentscount = [0]*(len(repo) - baserev)
1223 1223 for r in cl.revs(start=baserev + 1):
1224 1224 for p in cl.parentrevs(r):
1225 1225 if p >= baserev:
1226 1226 parentscount[p - baserev] += 1
1227 1227 return subset.filter(lambda r: parentscount[r - baserev] > 1)
1228 1228
1229 1229 def minrev(repo, subset, x):
1230 1230 """``min(set)``
1231 1231 Changeset with lowest revision number in set.
1232 1232 """
1233 1233 os = getset(repo, fullreposet(repo), x)
1234 1234 if os:
1235 1235 m = os.min()
1236 1236 if m in subset:
1237 1237 return baseset([m])
1238 1238 return baseset()
1239 1239
1240 1240 def modifies(repo, subset, x):
1241 1241 """``modifies(pattern)``
1242 1242 Changesets modifying files matched by pattern.
1243 1243
1244 1244 The pattern without explicit kind like ``glob:`` is expected to be
1245 1245 relative to the current directory and match against a file or a
1246 1246 directory.
1247 1247 """
1248 1248 # i18n: "modifies" is a keyword
1249 1249 pat = getstring(x, _("modifies requires a pattern"))
1250 1250 return checkstatus(repo, subset, pat, 0)
1251 1251
1252 1252 def named(repo, subset, x):
1253 1253 """``named(namespace)``
1254 1254 The changesets in a given namespace.
1255 1255
1256 1256 If `namespace` starts with `re:`, the remainder of the string is treated as
1257 1257 a regular expression. To match a namespace that actually starts with `re:`,
1258 1258 use the prefix `literal:`.
1259 1259 """
1260 1260 # i18n: "named" is a keyword
1261 1261 args = getargs(x, 1, 1, _('named requires a namespace argument'))
1262 1262
1263 1263 ns = getstring(args[0],
1264 1264 # i18n: "named" is a keyword
1265 1265 _('the argument to named must be a string'))
1266 1266 kind, pattern, matcher = _stringmatcher(ns)
1267 1267 namespaces = set()
1268 1268 if kind == 'literal':
1269 1269 if pattern not in repo.names:
1270 1270 raise error.RepoLookupError(_("namespace '%s' does not exist")
1271 1271 % ns)
1272 1272 namespaces.add(repo.names[pattern])
1273 1273 else:
1274 1274 for name, ns in repo.names.iteritems():
1275 1275 if matcher(name):
1276 1276 namespaces.add(ns)
1277 1277 if not namespaces:
1278 1278 raise error.RepoLookupError(_("no namespace exists"
1279 1279 " that match '%s'") % pattern)
1280 1280
1281 1281 names = set()
1282 1282 for ns in namespaces:
1283 1283 for name in ns.listnames(repo):
1284 1284 if name not in ns.deprecated:
1285 1285 names.update(repo[n].rev() for n in ns.nodes(repo, name))
1286 1286
1287 1287 names -= set([node.nullrev])
1288 1288 return subset & names
1289 1289
1290 1290 def node_(repo, subset, x):
1291 1291 """``id(string)``
1292 1292 Revision non-ambiguously specified by the given hex string prefix.
1293 1293 """
1294 1294 # i18n: "id" is a keyword
1295 1295 l = getargs(x, 1, 1, _("id requires one argument"))
1296 1296 # i18n: "id" is a keyword
1297 1297 n = getstring(l[0], _("id requires a string"))
1298 1298 if len(n) == 40:
1299 1299 rn = repo[n].rev()
1300 1300 else:
1301 1301 rn = None
1302 1302 pm = repo.changelog._partialmatch(n)
1303 1303 if pm is not None:
1304 1304 rn = repo.changelog.rev(pm)
1305 1305
1306 1306 if rn is None:
1307 1307 return baseset()
1308 1308 result = baseset([rn])
1309 1309 return result & subset
1310 1310
1311 1311 def obsolete(repo, subset, x):
1312 1312 """``obsolete()``
1313 1313 Mutable changeset with a newer version."""
1314 1314 # i18n: "obsolete" is a keyword
1315 1315 getargs(x, 0, 0, _("obsolete takes no arguments"))
1316 1316 obsoletes = obsmod.getrevs(repo, 'obsolete')
1317 1317 return subset & obsoletes
1318 1318
1319 1319 def only(repo, subset, x):
1320 1320 """``only(set, [set])``
1321 1321 Changesets that are ancestors of the first set that are not ancestors
1322 1322 of any other head in the repo. If a second set is specified, the result
1323 1323 is ancestors of the first set that are not ancestors of the second set
1324 1324 (i.e. ::<set1> - ::<set2>).
1325 1325 """
1326 1326 cl = repo.changelog
1327 1327 # i18n: "only" is a keyword
1328 1328 args = getargs(x, 1, 2, _('only takes one or two arguments'))
1329 1329 include = getset(repo, fullreposet(repo), args[0])
1330 1330 if len(args) == 1:
1331 1331 if not include:
1332 1332 return baseset()
1333 1333
1334 1334 descendants = set(_revdescendants(repo, include, False))
1335 1335 exclude = [rev for rev in cl.headrevs()
1336 1336 if not rev in descendants and not rev in include]
1337 1337 else:
1338 1338 exclude = getset(repo, fullreposet(repo), args[1])
1339 1339
1340 1340 results = set(cl.findmissingrevs(common=exclude, heads=include))
1341 1341 return subset & results
1342 1342
1343 1343 def origin(repo, subset, x):
1344 1344 """``origin([set])``
1345 1345 Changesets that were specified as a source for the grafts, transplants or
1346 1346 rebases that created the given revisions. Omitting the optional set is the
1347 1347 same as passing all(). If a changeset created by these operations is itself
1348 1348 specified as a source for one of these operations, only the source changeset
1349 1349 for the first operation is selected.
1350 1350 """
1351 1351 if x is not None:
1352 1352 dests = getset(repo, fullreposet(repo), x)
1353 1353 else:
1354 1354 dests = fullreposet(repo)
1355 1355
1356 1356 def _firstsrc(rev):
1357 1357 src = _getrevsource(repo, rev)
1358 1358 if src is None:
1359 1359 return None
1360 1360
1361 1361 while True:
1362 1362 prev = _getrevsource(repo, src)
1363 1363
1364 1364 if prev is None:
1365 1365 return src
1366 1366 src = prev
1367 1367
1368 1368 o = set([_firstsrc(r) for r in dests])
1369 1369 o -= set([None])
1370 1370 return subset & o
1371 1371
1372 1372 def outgoing(repo, subset, x):
1373 1373 """``outgoing([path])``
1374 1374 Changesets not found in the specified destination repository, or the
1375 1375 default push location.
1376 1376 """
1377 1377 import hg # avoid start-up nasties
1378 1378 # i18n: "outgoing" is a keyword
1379 1379 l = getargs(x, 0, 1, _("outgoing takes one or no arguments"))
1380 1380 # i18n: "outgoing" is a keyword
1381 1381 dest = l and getstring(l[0], _("outgoing requires a repository path")) or ''
1382 1382 dest = repo.ui.expandpath(dest or 'default-push', dest or 'default')
1383 1383 dest, branches = hg.parseurl(dest)
1384 1384 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1385 1385 if revs:
1386 1386 revs = [repo.lookup(rev) for rev in revs]
1387 1387 other = hg.peer(repo, {}, dest)
1388 1388 repo.ui.pushbuffer()
1389 1389 outgoing = discovery.findcommonoutgoing(repo, other, onlyheads=revs)
1390 1390 repo.ui.popbuffer()
1391 1391 cl = repo.changelog
1392 1392 o = set([cl.rev(r) for r in outgoing.missing])
1393 1393 return subset & o
1394 1394
1395 1395 def p1(repo, subset, x):
1396 1396 """``p1([set])``
1397 1397 First parent of changesets in set, or the working directory.
1398 1398 """
1399 1399 if x is None:
1400 1400 p = repo[x].p1().rev()
1401 1401 if p >= 0:
1402 1402 return subset & baseset([p])
1403 1403 return baseset()
1404 1404
1405 1405 ps = set()
1406 1406 cl = repo.changelog
1407 1407 for r in getset(repo, fullreposet(repo), x):
1408 1408 ps.add(cl.parentrevs(r)[0])
1409 1409 ps -= set([node.nullrev])
1410 1410 return subset & ps
1411 1411
1412 1412 def p2(repo, subset, x):
1413 1413 """``p2([set])``
1414 1414 Second parent of changesets in set, or the working directory.
1415 1415 """
1416 1416 if x is None:
1417 1417 ps = repo[x].parents()
1418 1418 try:
1419 1419 p = ps[1].rev()
1420 1420 if p >= 0:
1421 1421 return subset & baseset([p])
1422 1422 return baseset()
1423 1423 except IndexError:
1424 1424 return baseset()
1425 1425
1426 1426 ps = set()
1427 1427 cl = repo.changelog
1428 1428 for r in getset(repo, fullreposet(repo), x):
1429 1429 ps.add(cl.parentrevs(r)[1])
1430 1430 ps -= set([node.nullrev])
1431 1431 return subset & ps
1432 1432
1433 1433 def parents(repo, subset, x):
1434 1434 """``parents([set])``
1435 1435 The set of all parents for all changesets in set, or the working directory.
1436 1436 """
1437 1437 if x is None:
1438 1438 ps = set(p.rev() for p in repo[x].parents())
1439 1439 else:
1440 1440 ps = set()
1441 1441 cl = repo.changelog
1442 1442 for r in getset(repo, fullreposet(repo), x):
1443 1443 ps.update(cl.parentrevs(r))
1444 1444 ps -= set([node.nullrev])
1445 1445 return subset & ps
1446 1446
1447 1447 def parentspec(repo, subset, x, n):
1448 1448 """``set^0``
1449 1449 The set.
1450 1450 ``set^1`` (or ``set^``), ``set^2``
1451 1451 First or second parent, respectively, of all changesets in set.
1452 1452 """
1453 1453 try:
1454 1454 n = int(n[1])
1455 1455 if n not in (0, 1, 2):
1456 1456 raise ValueError
1457 1457 except (TypeError, ValueError):
1458 1458 raise error.ParseError(_("^ expects a number 0, 1, or 2"))
1459 1459 ps = set()
1460 1460 cl = repo.changelog
1461 1461 for r in getset(repo, fullreposet(repo), x):
1462 1462 if n == 0:
1463 1463 ps.add(r)
1464 1464 elif n == 1:
1465 1465 ps.add(cl.parentrevs(r)[0])
1466 1466 elif n == 2:
1467 1467 parents = cl.parentrevs(r)
1468 1468 if len(parents) > 1:
1469 1469 ps.add(parents[1])
1470 1470 return subset & ps
1471 1471
1472 1472 def present(repo, subset, x):
1473 1473 """``present(set)``
1474 1474 An empty set, if any revision in set isn't found; otherwise,
1475 1475 all revisions in set.
1476 1476
1477 1477 If any of specified revisions is not present in the local repository,
1478 1478 the query is normally aborted. But this predicate allows the query
1479 1479 to continue even in such cases.
1480 1480 """
1481 1481 try:
1482 1482 return getset(repo, subset, x)
1483 1483 except error.RepoLookupError:
1484 1484 return baseset()
1485 1485
1486 1486 def public(repo, subset, x):
1487 1487 """``public()``
1488 1488 Changeset in public phase."""
1489 1489 # i18n: "public" is a keyword
1490 1490 getargs(x, 0, 0, _("public takes no arguments"))
1491 1491 phase = repo._phasecache.phase
1492 1492 target = phases.public
1493 1493 condition = lambda r: phase(repo, r) == target
1494 1494 return subset.filter(condition, cache=False)
1495 1495
1496 1496 def remote(repo, subset, x):
1497 1497 """``remote([id [,path]])``
1498 1498 Local revision that corresponds to the given identifier in a
1499 1499 remote repository, if present. Here, the '.' identifier is a
1500 1500 synonym for the current local branch.
1501 1501 """
1502 1502
1503 1503 import hg # avoid start-up nasties
1504 1504 # i18n: "remote" is a keyword
1505 1505 l = getargs(x, 0, 2, _("remote takes one, two or no arguments"))
1506 1506
1507 1507 q = '.'
1508 1508 if len(l) > 0:
1509 1509 # i18n: "remote" is a keyword
1510 1510 q = getstring(l[0], _("remote requires a string id"))
1511 1511 if q == '.':
1512 1512 q = repo['.'].branch()
1513 1513
1514 1514 dest = ''
1515 1515 if len(l) > 1:
1516 1516 # i18n: "remote" is a keyword
1517 1517 dest = getstring(l[1], _("remote requires a repository path"))
1518 1518 dest = repo.ui.expandpath(dest or 'default')
1519 1519 dest, branches = hg.parseurl(dest)
1520 1520 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1521 1521 if revs:
1522 1522 revs = [repo.lookup(rev) for rev in revs]
1523 1523 other = hg.peer(repo, {}, dest)
1524 1524 n = other.lookup(q)
1525 1525 if n in repo:
1526 1526 r = repo[n].rev()
1527 1527 if r in subset:
1528 1528 return baseset([r])
1529 1529 return baseset()
1530 1530
1531 1531 def removes(repo, subset, x):
1532 1532 """``removes(pattern)``
1533 1533 Changesets which remove files matching pattern.
1534 1534
1535 1535 The pattern without explicit kind like ``glob:`` is expected to be
1536 1536 relative to the current directory and match against a file or a
1537 1537 directory.
1538 1538 """
1539 1539 # i18n: "removes" is a keyword
1540 1540 pat = getstring(x, _("removes requires a pattern"))
1541 1541 return checkstatus(repo, subset, pat, 2)
1542 1542
1543 1543 def rev(repo, subset, x):
1544 1544 """``rev(number)``
1545 1545 Revision with the given numeric identifier.
1546 1546 """
1547 1547 # i18n: "rev" is a keyword
1548 1548 l = getargs(x, 1, 1, _("rev requires one argument"))
1549 1549 try:
1550 1550 # i18n: "rev" is a keyword
1551 1551 l = int(getstring(l[0], _("rev requires a number")))
1552 1552 except (TypeError, ValueError):
1553 1553 # i18n: "rev" is a keyword
1554 1554 raise error.ParseError(_("rev expects a number"))
1555 1555 if l not in repo.changelog and l != node.nullrev:
1556 1556 return baseset()
1557 1557 return subset & baseset([l])
1558 1558
1559 1559 def matching(repo, subset, x):
1560 1560 """``matching(revision [, field])``
1561 1561 Changesets in which a given set of fields match the set of fields in the
1562 1562 selected revision or set.
1563 1563
1564 1564 To match more than one field pass the list of fields to match separated
1565 1565 by spaces (e.g. ``author description``).
1566 1566
1567 1567 Valid fields are most regular revision fields and some special fields.
1568 1568
1569 1569 Regular revision fields are ``description``, ``author``, ``branch``,
1570 1570 ``date``, ``files``, ``phase``, ``parents``, ``substate``, ``user``
1571 1571 and ``diff``.
1572 1572 Note that ``author`` and ``user`` are synonyms. ``diff`` refers to the
1573 1573 contents of the revision. Two revisions matching their ``diff`` will
1574 1574 also match their ``files``.
1575 1575
1576 1576 Special fields are ``summary`` and ``metadata``:
1577 1577 ``summary`` matches the first line of the description.
1578 1578 ``metadata`` is equivalent to matching ``description user date``
1579 1579 (i.e. it matches the main metadata fields).
1580 1580
1581 1581 ``metadata`` is the default field which is used when no fields are
1582 1582 specified. You can match more than one field at a time.
1583 1583 """
1584 1584 # i18n: "matching" is a keyword
1585 1585 l = getargs(x, 1, 2, _("matching takes 1 or 2 arguments"))
1586 1586
1587 1587 revs = getset(repo, fullreposet(repo), l[0])
1588 1588
1589 1589 fieldlist = ['metadata']
1590 1590 if len(l) > 1:
1591 1591 fieldlist = getstring(l[1],
1592 1592 # i18n: "matching" is a keyword
1593 1593 _("matching requires a string "
1594 1594 "as its second argument")).split()
1595 1595
1596 1596 # Make sure that there are no repeated fields,
1597 1597 # expand the 'special' 'metadata' field type
1598 1598 # and check the 'files' whenever we check the 'diff'
1599 1599 fields = []
1600 1600 for field in fieldlist:
1601 1601 if field == 'metadata':
1602 1602 fields += ['user', 'description', 'date']
1603 1603 elif field == 'diff':
1604 1604 # a revision matching the diff must also match the files
1605 1605 # since matching the diff is very costly, make sure to
1606 1606 # also match the files first
1607 1607 fields += ['files', 'diff']
1608 1608 else:
1609 1609 if field == 'author':
1610 1610 field = 'user'
1611 1611 fields.append(field)
1612 1612 fields = set(fields)
1613 1613 if 'summary' in fields and 'description' in fields:
1614 1614 # If a revision matches its description it also matches its summary
1615 1615 fields.discard('summary')
1616 1616
1617 1617 # We may want to match more than one field
1618 1618 # Not all fields take the same amount of time to be matched
1619 1619 # Sort the selected fields in order of increasing matching cost
1620 1620 fieldorder = ['phase', 'parents', 'user', 'date', 'branch', 'summary',
1621 1621 'files', 'description', 'substate', 'diff']
1622 1622 def fieldkeyfunc(f):
1623 1623 try:
1624 1624 return fieldorder.index(f)
1625 1625 except ValueError:
1626 1626 # assume an unknown field is very costly
1627 1627 return len(fieldorder)
1628 1628 fields = list(fields)
1629 1629 fields.sort(key=fieldkeyfunc)
1630 1630
1631 1631 # Each field will be matched with its own "getfield" function
1632 1632 # which will be added to the getfieldfuncs array of functions
1633 1633 getfieldfuncs = []
1634 1634 _funcs = {
1635 1635 'user': lambda r: repo[r].user(),
1636 1636 'branch': lambda r: repo[r].branch(),
1637 1637 'date': lambda r: repo[r].date(),
1638 1638 'description': lambda r: repo[r].description(),
1639 1639 'files': lambda r: repo[r].files(),
1640 1640 'parents': lambda r: repo[r].parents(),
1641 1641 'phase': lambda r: repo[r].phase(),
1642 1642 'substate': lambda r: repo[r].substate,
1643 1643 'summary': lambda r: repo[r].description().splitlines()[0],
1644 1644 'diff': lambda r: list(repo[r].diff(git=True),)
1645 1645 }
1646 1646 for info in fields:
1647 1647 getfield = _funcs.get(info, None)
1648 1648 if getfield is None:
1649 1649 raise error.ParseError(
1650 1650 # i18n: "matching" is a keyword
1651 1651 _("unexpected field name passed to matching: %s") % info)
1652 1652 getfieldfuncs.append(getfield)
1653 1653 # convert the getfield array of functions into a "getinfo" function
1654 1654 # which returns an array of field values (or a single value if there
1655 1655 # is only one field to match)
1656 1656 getinfo = lambda r: [f(r) for f in getfieldfuncs]
1657 1657
1658 1658 def matches(x):
1659 1659 for rev in revs:
1660 1660 target = getinfo(rev)
1661 1661 match = True
1662 1662 for n, f in enumerate(getfieldfuncs):
1663 1663 if target[n] != f(x):
1664 1664 match = False
1665 1665 if match:
1666 1666 return True
1667 1667 return False
1668 1668
1669 1669 return subset.filter(matches)
1670 1670
1671 1671 def reverse(repo, subset, x):
1672 1672 """``reverse(set)``
1673 1673 Reverse order of set.
1674 1674 """
1675 1675 l = getset(repo, subset, x)
1676 1676 l.reverse()
1677 1677 return l
1678 1678
1679 1679 def roots(repo, subset, x):
1680 1680 """``roots(set)``
1681 1681 Changesets in set with no parent changeset in set.
1682 1682 """
1683 1683 s = getset(repo, fullreposet(repo), x)
1684 1684 subset = baseset([r for r in s if r in subset])
1685 1685 cs = _children(repo, subset, s)
1686 1686 return subset - cs
1687 1687
1688 1688 def secret(repo, subset, x):
1689 1689 """``secret()``
1690 1690 Changeset in secret phase."""
1691 1691 # i18n: "secret" is a keyword
1692 1692 getargs(x, 0, 0, _("secret takes no arguments"))
1693 1693 phase = repo._phasecache.phase
1694 1694 target = phases.secret
1695 1695 condition = lambda r: phase(repo, r) == target
1696 1696 return subset.filter(condition, cache=False)
1697 1697
1698 1698 def sort(repo, subset, x):
1699 1699 """``sort(set[, [-]key...])``
1700 1700 Sort set by keys. The default sort order is ascending, specify a key
1701 1701 as ``-key`` to sort in descending order.
1702 1702
1703 1703 The keys can be:
1704 1704
1705 1705 - ``rev`` for the revision number,
1706 1706 - ``branch`` for the branch name,
1707 1707 - ``desc`` for the commit message (description),
1708 1708 - ``user`` for user name (``author`` can be used as an alias),
1709 1709 - ``date`` for the commit date
1710 1710 """
1711 1711 # i18n: "sort" is a keyword
1712 1712 l = getargs(x, 1, 2, _("sort requires one or two arguments"))
1713 1713 keys = "rev"
1714 1714 if len(l) == 2:
1715 1715 # i18n: "sort" is a keyword
1716 1716 keys = getstring(l[1], _("sort spec must be a string"))
1717 1717
1718 1718 s = l[0]
1719 1719 keys = keys.split()
1720 1720 l = []
1721 1721 def invert(s):
1722 1722 return "".join(chr(255 - ord(c)) for c in s)
1723 1723 revs = getset(repo, subset, s)
1724 1724 if keys == ["rev"]:
1725 1725 revs.sort()
1726 1726 return revs
1727 1727 elif keys == ["-rev"]:
1728 1728 revs.sort(reverse=True)
1729 1729 return revs
1730 1730 for r in revs:
1731 1731 c = repo[r]
1732 1732 e = []
1733 1733 for k in keys:
1734 1734 if k == 'rev':
1735 1735 e.append(r)
1736 1736 elif k == '-rev':
1737 1737 e.append(-r)
1738 1738 elif k == 'branch':
1739 1739 e.append(c.branch())
1740 1740 elif k == '-branch':
1741 1741 e.append(invert(c.branch()))
1742 1742 elif k == 'desc':
1743 1743 e.append(c.description())
1744 1744 elif k == '-desc':
1745 1745 e.append(invert(c.description()))
1746 1746 elif k in 'user author':
1747 1747 e.append(c.user())
1748 1748 elif k in '-user -author':
1749 1749 e.append(invert(c.user()))
1750 1750 elif k == 'date':
1751 1751 e.append(c.date()[0])
1752 1752 elif k == '-date':
1753 1753 e.append(-c.date()[0])
1754 1754 else:
1755 1755 raise error.ParseError(_("unknown sort key %r") % k)
1756 1756 e.append(r)
1757 1757 l.append(e)
1758 1758 l.sort()
1759 1759 return baseset([e[-1] for e in l])
1760 1760
1761 1761 def _stringmatcher(pattern):
1762 1762 """
1763 1763 accepts a string, possibly starting with 're:' or 'literal:' prefix.
1764 1764 returns the matcher name, pattern, and matcher function.
1765 1765 missing or unknown prefixes are treated as literal matches.
1766 1766
1767 1767 helper for tests:
1768 1768 >>> def test(pattern, *tests):
1769 1769 ... kind, pattern, matcher = _stringmatcher(pattern)
1770 1770 ... return (kind, pattern, [bool(matcher(t)) for t in tests])
1771 1771
1772 1772 exact matching (no prefix):
1773 1773 >>> test('abcdefg', 'abc', 'def', 'abcdefg')
1774 1774 ('literal', 'abcdefg', [False, False, True])
1775 1775
1776 1776 regex matching ('re:' prefix)
1777 1777 >>> test('re:a.+b', 'nomatch', 'fooadef', 'fooadefbar')
1778 1778 ('re', 'a.+b', [False, False, True])
1779 1779
1780 1780 force exact matches ('literal:' prefix)
1781 1781 >>> test('literal:re:foobar', 'foobar', 're:foobar')
1782 1782 ('literal', 're:foobar', [False, True])
1783 1783
1784 1784 unknown prefixes are ignored and treated as literals
1785 1785 >>> test('foo:bar', 'foo', 'bar', 'foo:bar')
1786 1786 ('literal', 'foo:bar', [False, False, True])
1787 1787 """
1788 1788 if pattern.startswith('re:'):
1789 1789 pattern = pattern[3:]
1790 1790 try:
1791 1791 regex = re.compile(pattern)
1792 1792 except re.error, e:
1793 1793 raise error.ParseError(_('invalid regular expression: %s')
1794 1794 % e)
1795 1795 return 're', pattern, regex.search
1796 1796 elif pattern.startswith('literal:'):
1797 1797 pattern = pattern[8:]
1798 1798 return 'literal', pattern, pattern.__eq__
1799 1799
1800 1800 def _substringmatcher(pattern):
1801 1801 kind, pattern, matcher = _stringmatcher(pattern)
1802 1802 if kind == 'literal':
1803 1803 matcher = lambda s: pattern in s
1804 1804 return kind, pattern, matcher
1805 1805
1806 1806 def tag(repo, subset, x):
1807 1807 """``tag([name])``
1808 1808 The specified tag by name, or all tagged revisions if no name is given.
1809 1809
1810 1810 If `name` starts with `re:`, the remainder of the name is treated as
1811 1811 a regular expression. To match a tag that actually starts with `re:`,
1812 1812 use the prefix `literal:`.
1813 1813 """
1814 1814 # i18n: "tag" is a keyword
1815 1815 args = getargs(x, 0, 1, _("tag takes one or no arguments"))
1816 1816 cl = repo.changelog
1817 1817 if args:
1818 1818 pattern = getstring(args[0],
1819 1819 # i18n: "tag" is a keyword
1820 1820 _('the argument to tag must be a string'))
1821 1821 kind, pattern, matcher = _stringmatcher(pattern)
1822 1822 if kind == 'literal':
1823 1823 # avoid resolving all tags
1824 1824 tn = repo._tagscache.tags.get(pattern, None)
1825 1825 if tn is None:
1826 1826 raise error.RepoLookupError(_("tag '%s' does not exist")
1827 1827 % pattern)
1828 1828 s = set([repo[tn].rev()])
1829 1829 else:
1830 1830 s = set([cl.rev(n) for t, n in repo.tagslist() if matcher(t)])
1831 1831 else:
1832 1832 s = set([cl.rev(n) for t, n in repo.tagslist() if t != 'tip'])
1833 1833 return subset & s
1834 1834
1835 1835 def tagged(repo, subset, x):
1836 1836 return tag(repo, subset, x)
1837 1837
1838 1838 def unstable(repo, subset, x):
1839 1839 """``unstable()``
1840 1840 Non-obsolete changesets with obsolete ancestors.
1841 1841 """
1842 1842 # i18n: "unstable" is a keyword
1843 1843 getargs(x, 0, 0, _("unstable takes no arguments"))
1844 1844 unstables = obsmod.getrevs(repo, 'unstable')
1845 1845 return subset & unstables
1846 1846
1847 1847
1848 1848 def user(repo, subset, x):
1849 1849 """``user(string)``
1850 1850 User name contains string. The match is case-insensitive.
1851 1851
1852 1852 If `string` starts with `re:`, the remainder of the string is treated as
1853 1853 a regular expression. To match a user that actually contains `re:`, use
1854 1854 the prefix `literal:`.
1855 1855 """
1856 1856 return author(repo, subset, x)
1857 1857
1858 1858 # for internal use
1859 1859 def _list(repo, subset, x):
1860 1860 s = getstring(x, "internal error")
1861 1861 if not s:
1862 1862 return baseset()
1863 1863 ls = [repo[r].rev() for r in s.split('\0')]
1864 1864 s = subset
1865 1865 return baseset([r for r in ls if r in s])
1866 1866
1867 1867 # for internal use
1868 1868 def _intlist(repo, subset, x):
1869 1869 s = getstring(x, "internal error")
1870 1870 if not s:
1871 1871 return baseset()
1872 1872 ls = [int(r) for r in s.split('\0')]
1873 1873 s = subset
1874 1874 return baseset([r for r in ls if r in s])
1875 1875
1876 1876 # for internal use
1877 1877 def _hexlist(repo, subset, x):
1878 1878 s = getstring(x, "internal error")
1879 1879 if not s:
1880 1880 return baseset()
1881 1881 cl = repo.changelog
1882 1882 ls = [cl.rev(node.bin(r)) for r in s.split('\0')]
1883 1883 s = subset
1884 1884 return baseset([r for r in ls if r in s])
1885 1885
1886 1886 symbols = {
1887 1887 "adds": adds,
1888 1888 "all": getall,
1889 1889 "ancestor": ancestor,
1890 1890 "ancestors": ancestors,
1891 1891 "_firstancestors": _firstancestors,
1892 1892 "author": author,
1893 1893 "bisect": bisect,
1894 1894 "bisected": bisected,
1895 1895 "bookmark": bookmark,
1896 1896 "branch": branch,
1897 1897 "branchpoint": branchpoint,
1898 1898 "bumped": bumped,
1899 1899 "bundle": bundle,
1900 1900 "children": children,
1901 1901 "closed": closed,
1902 1902 "contains": contains,
1903 1903 "converted": converted,
1904 1904 "date": date,
1905 1905 "desc": desc,
1906 1906 "descendants": descendants,
1907 1907 "_firstdescendants": _firstdescendants,
1908 1908 "destination": destination,
1909 1909 "divergent": divergent,
1910 1910 "draft": draft,
1911 1911 "extinct": extinct,
1912 1912 "extra": extra,
1913 1913 "file": hasfile,
1914 1914 "filelog": filelog,
1915 1915 "first": first,
1916 1916 "follow": follow,
1917 1917 "_followfirst": _followfirst,
1918 1918 "grep": grep,
1919 1919 "head": head,
1920 1920 "heads": heads,
1921 1921 "hidden": hidden,
1922 1922 "id": node_,
1923 1923 "keyword": keyword,
1924 1924 "last": last,
1925 1925 "limit": limit,
1926 1926 "_matchfiles": _matchfiles,
1927 1927 "max": maxrev,
1928 1928 "merge": merge,
1929 1929 "min": minrev,
1930 1930 "modifies": modifies,
1931 1931 "named": named,
1932 1932 "obsolete": obsolete,
1933 1933 "only": only,
1934 1934 "origin": origin,
1935 1935 "outgoing": outgoing,
1936 1936 "p1": p1,
1937 1937 "p2": p2,
1938 1938 "parents": parents,
1939 1939 "present": present,
1940 1940 "public": public,
1941 1941 "remote": remote,
1942 1942 "removes": removes,
1943 1943 "rev": rev,
1944 1944 "reverse": reverse,
1945 1945 "roots": roots,
1946 1946 "sort": sort,
1947 1947 "secret": secret,
1948 1948 "matching": matching,
1949 1949 "tag": tag,
1950 1950 "tagged": tagged,
1951 1951 "user": user,
1952 1952 "unstable": unstable,
1953 1953 "_list": _list,
1954 1954 "_intlist": _intlist,
1955 1955 "_hexlist": _hexlist,
1956 1956 }
1957 1957
1958 1958 # symbols which can't be used for a DoS attack for any given input
1959 1959 # (e.g. those which accept regexes as plain strings shouldn't be included)
1960 1960 # functions that just return a lot of changesets (like all) don't count here
1961 1961 safesymbols = set([
1962 1962 "adds",
1963 1963 "all",
1964 1964 "ancestor",
1965 1965 "ancestors",
1966 1966 "_firstancestors",
1967 1967 "author",
1968 1968 "bisect",
1969 1969 "bisected",
1970 1970 "bookmark",
1971 1971 "branch",
1972 1972 "branchpoint",
1973 1973 "bumped",
1974 1974 "bundle",
1975 1975 "children",
1976 1976 "closed",
1977 1977 "converted",
1978 1978 "date",
1979 1979 "desc",
1980 1980 "descendants",
1981 1981 "_firstdescendants",
1982 1982 "destination",
1983 1983 "divergent",
1984 1984 "draft",
1985 1985 "extinct",
1986 1986 "extra",
1987 1987 "file",
1988 1988 "filelog",
1989 1989 "first",
1990 1990 "follow",
1991 1991 "_followfirst",
1992 1992 "head",
1993 1993 "heads",
1994 1994 "hidden",
1995 1995 "id",
1996 1996 "keyword",
1997 1997 "last",
1998 1998 "limit",
1999 1999 "_matchfiles",
2000 2000 "max",
2001 2001 "merge",
2002 2002 "min",
2003 2003 "modifies",
2004 2004 "obsolete",
2005 2005 "only",
2006 2006 "origin",
2007 2007 "outgoing",
2008 2008 "p1",
2009 2009 "p2",
2010 2010 "parents",
2011 2011 "present",
2012 2012 "public",
2013 2013 "remote",
2014 2014 "removes",
2015 2015 "rev",
2016 2016 "reverse",
2017 2017 "roots",
2018 2018 "sort",
2019 2019 "secret",
2020 2020 "matching",
2021 2021 "tag",
2022 2022 "tagged",
2023 2023 "user",
2024 2024 "unstable",
2025 2025 "_list",
2026 2026 "_intlist",
2027 2027 "_hexlist",
2028 2028 ])
2029 2029
2030 2030 methods = {
2031 2031 "range": rangeset,
2032 2032 "dagrange": dagrange,
2033 2033 "string": stringset,
2034 2034 "symbol": symbolset,
2035 2035 "and": andset,
2036 2036 "or": orset,
2037 2037 "not": notset,
2038 2038 "list": listset,
2039 2039 "func": func,
2040 2040 "ancestor": ancestorspec,
2041 2041 "parent": parentspec,
2042 2042 "parentpost": p1,
2043 2043 "only": only,
2044 2044 "onlypost": only,
2045 2045 }
2046 2046
2047 2047 def optimize(x, small):
2048 2048 if x is None:
2049 2049 return 0, x
2050 2050
2051 2051 smallbonus = 1
2052 2052 if small:
2053 2053 smallbonus = .5
2054 2054
2055 2055 op = x[0]
2056 2056 if op == 'minus':
2057 2057 return optimize(('and', x[1], ('not', x[2])), small)
2058 2058 elif op == 'only':
2059 2059 return optimize(('func', ('symbol', 'only'),
2060 2060 ('list', x[1], x[2])), small)
2061 2061 elif op == 'dagrangepre':
2062 2062 return optimize(('func', ('symbol', 'ancestors'), x[1]), small)
2063 2063 elif op == 'dagrangepost':
2064 2064 return optimize(('func', ('symbol', 'descendants'), x[1]), small)
2065 2065 elif op == 'rangepre':
2066 2066 return optimize(('range', ('string', '0'), x[1]), small)
2067 2067 elif op == 'rangepost':
2068 2068 return optimize(('range', x[1], ('string', 'tip')), small)
2069 2069 elif op == 'negate':
2070 2070 return optimize(('string',
2071 2071 '-' + getstring(x[1], _("can't negate that"))), small)
2072 2072 elif op in 'string symbol negate':
2073 2073 return smallbonus, x # single revisions are small
2074 2074 elif op == 'and':
2075 2075 wa, ta = optimize(x[1], True)
2076 2076 wb, tb = optimize(x[2], True)
2077 2077
2078 2078 # (::x and not ::y)/(not ::y and ::x) have a fast path
2079 2079 def isonly(revs, bases):
2080 2080 return (
2081 2081 revs[0] == 'func'
2082 2082 and getstring(revs[1], _('not a symbol')) == 'ancestors'
2083 2083 and bases[0] == 'not'
2084 2084 and bases[1][0] == 'func'
2085 2085 and getstring(bases[1][1], _('not a symbol')) == 'ancestors')
2086 2086
2087 2087 w = min(wa, wb)
2088 2088 if isonly(ta, tb):
2089 2089 return w, ('func', ('symbol', 'only'), ('list', ta[2], tb[1][2]))
2090 2090 if isonly(tb, ta):
2091 2091 return w, ('func', ('symbol', 'only'), ('list', tb[2], ta[1][2]))
2092 2092
2093 2093 if wa > wb:
2094 2094 return w, (op, tb, ta)
2095 2095 return w, (op, ta, tb)
2096 2096 elif op == 'or':
2097 2097 wa, ta = optimize(x[1], False)
2098 2098 wb, tb = optimize(x[2], False)
2099 2099 if wb < wa:
2100 2100 wb, wa = wa, wb
2101 2101 return max(wa, wb), (op, ta, tb)
2102 2102 elif op == 'not':
2103 2103 o = optimize(x[1], not small)
2104 2104 return o[0], (op, o[1])
2105 2105 elif op == 'parentpost':
2106 2106 o = optimize(x[1], small)
2107 2107 return o[0], (op, o[1])
2108 2108 elif op == 'group':
2109 2109 return optimize(x[1], small)
2110 2110 elif op in 'dagrange range list parent ancestorspec':
2111 2111 if op == 'parent':
2112 2112 # x^:y means (x^) : y, not x ^ (:y)
2113 2113 post = ('parentpost', x[1])
2114 2114 if x[2][0] == 'dagrangepre':
2115 2115 return optimize(('dagrange', post, x[2][1]), small)
2116 2116 elif x[2][0] == 'rangepre':
2117 2117 return optimize(('range', post, x[2][1]), small)
2118 2118
2119 2119 wa, ta = optimize(x[1], small)
2120 2120 wb, tb = optimize(x[2], small)
2121 2121 return wa + wb, (op, ta, tb)
2122 2122 elif op == 'func':
2123 2123 f = getstring(x[1], _("not a symbol"))
2124 2124 wa, ta = optimize(x[2], small)
2125 2125 if f in ("author branch closed date desc file grep keyword "
2126 2126 "outgoing user"):
2127 2127 w = 10 # slow
2128 2128 elif f in "modifies adds removes":
2129 2129 w = 30 # slower
2130 2130 elif f == "contains":
2131 2131 w = 100 # very slow
2132 2132 elif f == "ancestor":
2133 2133 w = 1 * smallbonus
2134 2134 elif f in "reverse limit first _intlist":
2135 2135 w = 0
2136 2136 elif f in "sort":
2137 2137 w = 10 # assume most sorts look at changelog
2138 2138 else:
2139 2139 w = 1
2140 2140 return w + wa, (op, x[1], ta)
2141 2141 return 1, x
2142 2142
2143 2143 _aliasarg = ('func', ('symbol', '_aliasarg'))
2144 2144 def _getaliasarg(tree):
2145 2145 """If tree matches ('func', ('symbol', '_aliasarg'), ('string', X))
2146 2146 return X, None otherwise.
2147 2147 """
2148 2148 if (len(tree) == 3 and tree[:2] == _aliasarg
2149 2149 and tree[2][0] == 'string'):
2150 2150 return tree[2][1]
2151 2151 return None
2152 2152
2153 2153 def _checkaliasarg(tree, known=None):
2154 2154 """Check tree contains no _aliasarg construct or only ones which
2155 2155 value is in known. Used to avoid alias placeholders injection.
2156 2156 """
2157 2157 if isinstance(tree, tuple):
2158 2158 arg = _getaliasarg(tree)
2159 2159 if arg is not None and (not known or arg not in known):
2160 2160 raise error.UnknownIdentifier('_aliasarg', [])
2161 2161 for t in tree:
2162 2162 _checkaliasarg(t, known)
2163 2163
2164 2164 # the set of valid characters for the initial letter of symbols in
2165 2165 # alias declarations and definitions
2166 2166 _aliassyminitletters = set(c for c in [chr(i) for i in xrange(256)]
2167 2167 if c.isalnum() or c in '._@$' or ord(c) > 127)
2168 2168
2169 2169 def _tokenizealias(program, lookup=None):
2170 2170 """Parse alias declaration/definition into a stream of tokens
2171 2171
2172 2172 This allows symbol names to use also ``$`` as an initial letter
2173 2173 (for backward compatibility), and callers of this function should
2174 2174 examine whether ``$`` is used also for unexpected symbols or not.
2175 2175 """
2176 2176 return tokenize(program, lookup=lookup,
2177 2177 syminitletters=_aliassyminitletters)
2178 2178
2179 2179 def _parsealiasdecl(decl):
2180 2180 """Parse alias declaration ``decl``
2181 2181
2182 2182 This returns ``(name, tree, args, errorstr)`` tuple:
2183 2183
2184 2184 - ``name``: of declared alias (may be ``decl`` itself at error)
2185 2185 - ``tree``: parse result (or ``None`` at error)
2186 2186 - ``args``: list of alias argument names (or None for symbol declaration)
2187 2187 - ``errorstr``: detail about detected error (or None)
2188 2188
2189 2189 >>> _parsealiasdecl('foo')
2190 2190 ('foo', ('symbol', 'foo'), None, None)
2191 2191 >>> _parsealiasdecl('$foo')
2192 2192 ('$foo', None, None, "'$' not for alias arguments")
2193 2193 >>> _parsealiasdecl('foo::bar')
2194 2194 ('foo::bar', None, None, 'invalid format')
2195 2195 >>> _parsealiasdecl('foo bar')
2196 2196 ('foo bar', None, None, 'at 4: invalid token')
2197 2197 >>> _parsealiasdecl('foo()')
2198 2198 ('foo', ('func', ('symbol', 'foo')), [], None)
2199 2199 >>> _parsealiasdecl('$foo()')
2200 2200 ('$foo()', None, None, "'$' not for alias arguments")
2201 2201 >>> _parsealiasdecl('foo($1, $2)')
2202 2202 ('foo', ('func', ('symbol', 'foo')), ['$1', '$2'], None)
2203 2203 >>> _parsealiasdecl('foo(bar_bar, baz.baz)')
2204 2204 ('foo', ('func', ('symbol', 'foo')), ['bar_bar', 'baz.baz'], None)
2205 2205 >>> _parsealiasdecl('foo($1, $2, nested($1, $2))')
2206 2206 ('foo($1, $2, nested($1, $2))', None, None, 'invalid argument list')
2207 2207 >>> _parsealiasdecl('foo(bar($1, $2))')
2208 2208 ('foo(bar($1, $2))', None, None, 'invalid argument list')
2209 2209 >>> _parsealiasdecl('foo("string")')
2210 2210 ('foo("string")', None, None, 'invalid argument list')
2211 2211 >>> _parsealiasdecl('foo($1, $2')
2212 2212 ('foo($1, $2', None, None, 'at 10: unexpected token: end')
2213 2213 >>> _parsealiasdecl('foo("string')
2214 2214 ('foo("string', None, None, 'at 5: unterminated string')
2215 2215 >>> _parsealiasdecl('foo($1, $2, $1)')
2216 2216 ('foo', None, None, 'argument names collide with each other')
2217 2217 """
2218 2218 p = parser.parser(_tokenizealias, elements)
2219 2219 try:
2220 2220 tree, pos = p.parse(decl)
2221 2221 if (pos != len(decl)):
2222 2222 raise error.ParseError(_('invalid token'), pos)
2223 2223
2224 2224 if isvalidsymbol(tree):
2225 2225 # "name = ...." style
2226 2226 name = getsymbol(tree)
2227 2227 if name.startswith('$'):
2228 2228 return (decl, None, None, _("'$' not for alias arguments"))
2229 2229 return (name, ('symbol', name), None, None)
2230 2230
2231 2231 if isvalidfunc(tree):
2232 2232 # "name(arg, ....) = ...." style
2233 2233 name = getfuncname(tree)
2234 2234 if name.startswith('$'):
2235 2235 return (decl, None, None, _("'$' not for alias arguments"))
2236 2236 args = []
2237 2237 for arg in getfuncargs(tree):
2238 2238 if not isvalidsymbol(arg):
2239 2239 return (decl, None, None, _("invalid argument list"))
2240 2240 args.append(getsymbol(arg))
2241 2241 if len(args) != len(set(args)):
2242 2242 return (name, None, None,
2243 2243 _("argument names collide with each other"))
2244 2244 return (name, ('func', ('symbol', name)), args, None)
2245 2245
2246 2246 return (decl, None, None, _("invalid format"))
2247 2247 except error.ParseError, inst:
2248 2248 return (decl, None, None, parseerrordetail(inst))
2249 2249
2250 2250 def _parsealiasdefn(defn, args):
2251 2251 """Parse alias definition ``defn``
2252 2252
2253 2253 This function also replaces alias argument references in the
2254 2254 specified definition by ``_aliasarg(ARGNAME)``.
2255 2255
2256 2256 ``args`` is a list of alias argument names, or None if the alias
2257 2257 is declared as a symbol.
2258 2258
2259 2259 This returns "tree" as parsing result.
2260 2260
2261 2261 >>> args = ['$1', '$2', 'foo']
2262 2262 >>> print prettyformat(_parsealiasdefn('$1 or foo', args))
2263 2263 (or
2264 2264 (func
2265 2265 ('symbol', '_aliasarg')
2266 2266 ('string', '$1'))
2267 2267 (func
2268 2268 ('symbol', '_aliasarg')
2269 2269 ('string', 'foo')))
2270 2270 >>> try:
2271 2271 ... _parsealiasdefn('$1 or $bar', args)
2272 2272 ... except error.ParseError, inst:
2273 2273 ... print parseerrordetail(inst)
2274 2274 at 6: '$' not for alias arguments
2275 2275 >>> args = ['$1', '$10', 'foo']
2276 2276 >>> print prettyformat(_parsealiasdefn('$10 or foobar', args))
2277 2277 (or
2278 2278 (func
2279 2279 ('symbol', '_aliasarg')
2280 2280 ('string', '$10'))
2281 2281 ('symbol', 'foobar'))
2282 2282 >>> print prettyformat(_parsealiasdefn('"$1" or "foo"', args))
2283 2283 (or
2284 2284 ('string', '$1')
2285 2285 ('string', 'foo'))
2286 2286 """
2287 2287 def tokenizedefn(program, lookup=None):
2288 2288 if args:
2289 2289 argset = set(args)
2290 2290 else:
2291 2291 argset = set()
2292 2292
2293 2293 for t, value, pos in _tokenizealias(program, lookup=lookup):
2294 2294 if t == 'symbol':
2295 2295 if value in argset:
2296 2296 # emulate tokenization of "_aliasarg('ARGNAME')":
2297 2297 # "_aliasarg()" is an unknown symbol only used separate
2298 2298 # alias argument placeholders from regular strings.
2299 2299 yield ('symbol', '_aliasarg', pos)
2300 2300 yield ('(', None, pos)
2301 2301 yield ('string', value, pos)
2302 2302 yield (')', None, pos)
2303 2303 continue
2304 2304 elif value.startswith('$'):
2305 2305 raise error.ParseError(_("'$' not for alias arguments"),
2306 2306 pos)
2307 2307 yield (t, value, pos)
2308 2308
2309 2309 p = parser.parser(tokenizedefn, elements)
2310 2310 tree, pos = p.parse(defn)
2311 2311 if pos != len(defn):
2312 2312 raise error.ParseError(_('invalid token'), pos)
2313 2313 return tree
2314 2314
2315 2315 class revsetalias(object):
2316 2316 # whether own `error` information is already shown or not.
2317 2317 # this avoids showing same warning multiple times at each `findaliases`.
2318 2318 warned = False
2319 2319
2320 2320 def __init__(self, name, value):
2321 2321 '''Aliases like:
2322 2322
2323 2323 h = heads(default)
2324 2324 b($1) = ancestors($1) - ancestors(default)
2325 2325 '''
2326 2326 self.name, self.tree, self.args, self.error = _parsealiasdecl(name)
2327 2327 if self.error:
2328 2328 self.error = _('failed to parse the declaration of revset alias'
2329 2329 ' "%s": %s') % (self.name, self.error)
2330 2330 return
2331 2331
2332 2332 try:
2333 2333 self.replacement = _parsealiasdefn(value, self.args)
2334 2334 # Check for placeholder injection
2335 2335 _checkaliasarg(self.replacement, self.args)
2336 2336 except error.ParseError, inst:
2337 2337 self.error = _('failed to parse the definition of revset alias'
2338 2338 ' "%s": %s') % (self.name, parseerrordetail(inst))
2339 2339
2340 2340 def _getalias(aliases, tree):
2341 2341 """If tree looks like an unexpanded alias, return it. Return None
2342 2342 otherwise.
2343 2343 """
2344 2344 if isinstance(tree, tuple) and tree:
2345 2345 if tree[0] == 'symbol' and len(tree) == 2:
2346 2346 name = tree[1]
2347 2347 alias = aliases.get(name)
2348 2348 if alias and alias.args is None and alias.tree == tree:
2349 2349 return alias
2350 2350 if tree[0] == 'func' and len(tree) > 1:
2351 2351 if tree[1][0] == 'symbol' and len(tree[1]) == 2:
2352 2352 name = tree[1][1]
2353 2353 alias = aliases.get(name)
2354 2354 if alias and alias.args is not None and alias.tree == tree[:2]:
2355 2355 return alias
2356 2356 return None
2357 2357
2358 2358 def _expandargs(tree, args):
2359 2359 """Replace _aliasarg instances with the substitution value of the
2360 2360 same name in args, recursively.
2361 2361 """
2362 2362 if not tree or not isinstance(tree, tuple):
2363 2363 return tree
2364 2364 arg = _getaliasarg(tree)
2365 2365 if arg is not None:
2366 2366 return args[arg]
2367 2367 return tuple(_expandargs(t, args) for t in tree)
2368 2368
2369 2369 def _expandaliases(aliases, tree, expanding, cache):
2370 2370 """Expand aliases in tree, recursively.
2371 2371
2372 2372 'aliases' is a dictionary mapping user defined aliases to
2373 2373 revsetalias objects.
2374 2374 """
2375 2375 if not isinstance(tree, tuple):
2376 2376 # Do not expand raw strings
2377 2377 return tree
2378 2378 alias = _getalias(aliases, tree)
2379 2379 if alias is not None:
2380 2380 if alias.error:
2381 2381 raise util.Abort(alias.error)
2382 2382 if alias in expanding:
2383 2383 raise error.ParseError(_('infinite expansion of revset alias "%s" '
2384 2384 'detected') % alias.name)
2385 2385 expanding.append(alias)
2386 2386 if alias.name not in cache:
2387 2387 cache[alias.name] = _expandaliases(aliases, alias.replacement,
2388 2388 expanding, cache)
2389 2389 result = cache[alias.name]
2390 2390 expanding.pop()
2391 2391 if alias.args is not None:
2392 2392 l = getlist(tree[2])
2393 2393 if len(l) != len(alias.args):
2394 2394 raise error.ParseError(
2395 2395 _('invalid number of arguments: %s') % len(l))
2396 2396 l = [_expandaliases(aliases, a, [], cache) for a in l]
2397 2397 result = _expandargs(result, dict(zip(alias.args, l)))
2398 2398 else:
2399 2399 result = tuple(_expandaliases(aliases, t, expanding, cache)
2400 2400 for t in tree)
2401 2401 return result
2402 2402
2403 2403 def findaliases(ui, tree, showwarning=None):
2404 2404 _checkaliasarg(tree)
2405 2405 aliases = {}
2406 2406 for k, v in ui.configitems('revsetalias'):
2407 2407 alias = revsetalias(k, v)
2408 2408 aliases[alias.name] = alias
2409 2409 tree = _expandaliases(aliases, tree, [], {})
2410 2410 if showwarning:
2411 2411 # warn about problematic (but not referred) aliases
2412 2412 for name, alias in sorted(aliases.iteritems()):
2413 2413 if alias.error and not alias.warned:
2414 2414 showwarning(_('warning: %s\n') % (alias.error))
2415 2415 alias.warned = True
2416 2416 return tree
2417 2417
2418 2418 def foldconcat(tree):
2419 2419 """Fold elements to be concatenated by `##`
2420 2420 """
2421 2421 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2422 2422 return tree
2423 2423 if tree[0] == '_concat':
2424 2424 pending = [tree]
2425 2425 l = []
2426 2426 while pending:
2427 2427 e = pending.pop()
2428 2428 if e[0] == '_concat':
2429 2429 pending.extend(reversed(e[1:]))
2430 2430 elif e[0] in ('string', 'symbol'):
2431 2431 l.append(e[1])
2432 2432 else:
2433 2433 msg = _("\"##\" can't concatenate \"%s\" element") % (e[0])
2434 2434 raise error.ParseError(msg)
2435 2435 return ('string', ''.join(l))
2436 2436 else:
2437 2437 return tuple(foldconcat(t) for t in tree)
2438 2438
2439 2439 def parse(spec, lookup=None):
2440 2440 p = parser.parser(tokenize, elements)
2441 2441 return p.parse(spec, lookup=lookup)
2442 2442
2443 2443 def match(ui, spec, repo=None):
2444 2444 if not spec:
2445 2445 raise error.ParseError(_("empty query"))
2446 2446 lookup = None
2447 2447 if repo:
2448 2448 lookup = repo.__contains__
2449 2449 tree, pos = parse(spec, lookup)
2450 2450 if (pos != len(spec)):
2451 2451 raise error.ParseError(_("invalid token"), pos)
2452 2452 if ui:
2453 2453 tree = findaliases(ui, tree, showwarning=ui.warn)
2454 2454 tree = foldconcat(tree)
2455 2455 weight, tree = optimize(tree, True)
2456 2456 def mfunc(repo, subset=None):
2457 2457 if subset is None:
2458 2458 subset = fullreposet(repo)
2459 2459 if util.safehasattr(subset, 'isascending'):
2460 2460 result = getset(repo, subset, tree)
2461 2461 else:
2462 2462 result = getset(repo, baseset(subset), tree)
2463 2463 return result
2464 2464 return mfunc
2465 2465
2466 2466 def formatspec(expr, *args):
2467 2467 '''
2468 2468 This is a convenience function for using revsets internally, and
2469 2469 escapes arguments appropriately. Aliases are intentionally ignored
2470 2470 so that intended expression behavior isn't accidentally subverted.
2471 2471
2472 2472 Supported arguments:
2473 2473
2474 2474 %r = revset expression, parenthesized
2475 2475 %d = int(arg), no quoting
2476 2476 %s = string(arg), escaped and single-quoted
2477 2477 %b = arg.branch(), escaped and single-quoted
2478 2478 %n = hex(arg), single-quoted
2479 2479 %% = a literal '%'
2480 2480
2481 2481 Prefixing the type with 'l' specifies a parenthesized list of that type.
2482 2482
2483 2483 >>> formatspec('%r:: and %lr', '10 or 11', ("this()", "that()"))
2484 2484 '(10 or 11):: and ((this()) or (that()))'
2485 2485 >>> formatspec('%d:: and not %d::', 10, 20)
2486 2486 '10:: and not 20::'
2487 2487 >>> formatspec('%ld or %ld', [], [1])
2488 2488 "_list('') or 1"
2489 2489 >>> formatspec('keyword(%s)', 'foo\\xe9')
2490 2490 "keyword('foo\\\\xe9')"
2491 2491 >>> b = lambda: 'default'
2492 2492 >>> b.branch = b
2493 2493 >>> formatspec('branch(%b)', b)
2494 2494 "branch('default')"
2495 2495 >>> formatspec('root(%ls)', ['a', 'b', 'c', 'd'])
2496 2496 "root(_list('a\\x00b\\x00c\\x00d'))"
2497 2497 '''
2498 2498
2499 2499 def quote(s):
2500 2500 return repr(str(s))
2501 2501
2502 2502 def argtype(c, arg):
2503 2503 if c == 'd':
2504 2504 return str(int(arg))
2505 2505 elif c == 's':
2506 2506 return quote(arg)
2507 2507 elif c == 'r':
2508 2508 parse(arg) # make sure syntax errors are confined
2509 2509 return '(%s)' % arg
2510 2510 elif c == 'n':
2511 2511 return quote(node.hex(arg))
2512 2512 elif c == 'b':
2513 2513 return quote(arg.branch())
2514 2514
2515 2515 def listexp(s, t):
2516 2516 l = len(s)
2517 2517 if l == 0:
2518 2518 return "_list('')"
2519 2519 elif l == 1:
2520 2520 return argtype(t, s[0])
2521 2521 elif t == 'd':
2522 2522 return "_intlist('%s')" % "\0".join(str(int(a)) for a in s)
2523 2523 elif t == 's':
2524 2524 return "_list('%s')" % "\0".join(s)
2525 2525 elif t == 'n':
2526 2526 return "_hexlist('%s')" % "\0".join(node.hex(a) for a in s)
2527 2527 elif t == 'b':
2528 2528 return "_list('%s')" % "\0".join(a.branch() for a in s)
2529 2529
2530 2530 m = l // 2
2531 2531 return '(%s or %s)' % (listexp(s[:m], t), listexp(s[m:], t))
2532 2532
2533 2533 ret = ''
2534 2534 pos = 0
2535 2535 arg = 0
2536 2536 while pos < len(expr):
2537 2537 c = expr[pos]
2538 2538 if c == '%':
2539 2539 pos += 1
2540 2540 d = expr[pos]
2541 2541 if d == '%':
2542 2542 ret += d
2543 2543 elif d in 'dsnbr':
2544 2544 ret += argtype(d, args[arg])
2545 2545 arg += 1
2546 2546 elif d == 'l':
2547 2547 # a list of some type
2548 2548 pos += 1
2549 2549 d = expr[pos]
2550 2550 ret += listexp(list(args[arg]), d)
2551 2551 arg += 1
2552 2552 else:
2553 2553 raise util.Abort('unexpected revspec format character %s' % d)
2554 2554 else:
2555 2555 ret += c
2556 2556 pos += 1
2557 2557
2558 2558 return ret
2559 2559
2560 2560 def prettyformat(tree):
2561 2561 def _prettyformat(tree, level, lines):
2562 2562 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2563 2563 lines.append((level, str(tree)))
2564 2564 else:
2565 2565 lines.append((level, '(%s' % tree[0]))
2566 2566 for s in tree[1:]:
2567 2567 _prettyformat(s, level + 1, lines)
2568 2568 lines[-1:] = [(lines[-1][0], lines[-1][1] + ')')]
2569 2569
2570 2570 lines = []
2571 2571 _prettyformat(tree, 0, lines)
2572 2572 output = '\n'.join((' '*l + s) for l, s in lines)
2573 2573 return output
2574 2574
2575 2575 def depth(tree):
2576 2576 if isinstance(tree, tuple):
2577 2577 return max(map(depth, tree)) + 1
2578 2578 else:
2579 2579 return 0
2580 2580
2581 2581 def funcsused(tree):
2582 2582 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2583 2583 return set()
2584 2584 else:
2585 2585 funcs = set()
2586 2586 for s in tree[1:]:
2587 2587 funcs |= funcsused(s)
2588 2588 if tree[0] == 'func':
2589 2589 funcs.add(tree[1][1])
2590 2590 return funcs
2591 2591
2592 2592 class abstractsmartset(object):
2593 2593
2594 2594 def __nonzero__(self):
2595 2595 """True if the smartset is not empty"""
2596 2596 raise NotImplementedError()
2597 2597
2598 2598 def __contains__(self, rev):
2599 2599 """provide fast membership testing"""
2600 2600 raise NotImplementedError()
2601 2601
2602 2602 def __iter__(self):
2603 2603 """iterate the set in the order it is supposed to be iterated"""
2604 2604 raise NotImplementedError()
2605 2605
2606 2606 # Attributes containing a function to perform a fast iteration in a given
2607 2607 # direction. A smartset can have none, one, or both defined.
2608 2608 #
2609 2609 # Default value is None instead of a function returning None to avoid
2610 2610 # initializing an iterator just for testing if a fast method exists.
2611 2611 fastasc = None
2612 2612 fastdesc = None
2613 2613
2614 2614 def isascending(self):
2615 2615 """True if the set will iterate in ascending order"""
2616 2616 raise NotImplementedError()
2617 2617
2618 2618 def isdescending(self):
2619 2619 """True if the set will iterate in descending order"""
2620 2620 raise NotImplementedError()
2621 2621
2622 2622 def min(self):
2623 2623 """return the minimum element in the set"""
2624 2624 if self.fastasc is not None:
2625 2625 for r in self.fastasc():
2626 2626 return r
2627 2627 raise ValueError('arg is an empty sequence')
2628 2628 return min(self)
2629 2629
2630 2630 def max(self):
2631 2631 """return the maximum element in the set"""
2632 2632 if self.fastdesc is not None:
2633 2633 for r in self.fastdesc():
2634 2634 return r
2635 2635 raise ValueError('arg is an empty sequence')
2636 2636 return max(self)
2637 2637
2638 2638 def first(self):
2639 2639 """return the first element in the set (user iteration perspective)
2640 2640
2641 2641 Return None if the set is empty"""
2642 2642 raise NotImplementedError()
2643 2643
2644 2644 def last(self):
2645 2645 """return the last element in the set (user iteration perspective)
2646 2646
2647 2647 Return None if the set is empty"""
2648 2648 raise NotImplementedError()
2649 2649
2650 2650 def __len__(self):
2651 2651 """return the length of the smartsets
2652 2652
2653 2653 This can be expensive on smartset that could be lazy otherwise."""
2654 2654 raise NotImplementedError()
2655 2655
2656 2656 def reverse(self):
2657 2657 """reverse the expected iteration order"""
2658 2658 raise NotImplementedError()
2659 2659
2660 2660 def sort(self, reverse=True):
2661 2661 """get the set to iterate in an ascending or descending order"""
2662 2662 raise NotImplementedError()
2663 2663
2664 2664 def __and__(self, other):
2665 2665 """Returns a new object with the intersection of the two collections.
2666 2666
2667 2667 This is part of the mandatory API for smartset."""
2668 2668 return self.filter(other.__contains__, cache=False)
2669 2669
2670 2670 def __add__(self, other):
2671 2671 """Returns a new object with the union of the two collections.
2672 2672
2673 2673 This is part of the mandatory API for smartset."""
2674 2674 return addset(self, other)
2675 2675
2676 2676 def __sub__(self, other):
2677 2677 """Returns a new object with the substraction of the two collections.
2678 2678
2679 2679 This is part of the mandatory API for smartset."""
2680 2680 c = other.__contains__
2681 2681 return self.filter(lambda r: not c(r), cache=False)
2682 2682
2683 2683 def filter(self, condition, cache=True):
2684 2684 """Returns this smartset filtered by condition as a new smartset.
2685 2685
2686 2686 `condition` is a callable which takes a revision number and returns a
2687 2687 boolean.
2688 2688
2689 2689 This is part of the mandatory API for smartset."""
2690 2690 # builtin cannot be cached. but do not needs to
2691 2691 if cache and util.safehasattr(condition, 'func_code'):
2692 2692 condition = util.cachefunc(condition)
2693 2693 return filteredset(self, condition)
2694 2694
2695 2695 class baseset(abstractsmartset):
2696 2696 """Basic data structure that represents a revset and contains the basic
2697 2697 operation that it should be able to perform.
2698 2698
2699 2699 Every method in this class should be implemented by any smartset class.
2700 2700 """
2701 2701 def __init__(self, data=()):
2702 2702 if not isinstance(data, list):
2703 2703 data = list(data)
2704 2704 self._list = data
2705 2705 self._ascending = None
2706 2706
2707 2707 @util.propertycache
2708 2708 def _set(self):
2709 2709 return set(self._list)
2710 2710
2711 2711 @util.propertycache
2712 2712 def _asclist(self):
2713 2713 asclist = self._list[:]
2714 2714 asclist.sort()
2715 2715 return asclist
2716 2716
2717 2717 def __iter__(self):
2718 2718 if self._ascending is None:
2719 2719 return iter(self._list)
2720 2720 elif self._ascending:
2721 2721 return iter(self._asclist)
2722 2722 else:
2723 2723 return reversed(self._asclist)
2724 2724
2725 2725 def fastasc(self):
2726 2726 return iter(self._asclist)
2727 2727
2728 2728 def fastdesc(self):
2729 2729 return reversed(self._asclist)
2730 2730
2731 2731 @util.propertycache
2732 2732 def __contains__(self):
2733 2733 return self._set.__contains__
2734 2734
2735 2735 def __nonzero__(self):
2736 2736 return bool(self._list)
2737 2737
2738 2738 def sort(self, reverse=False):
2739 2739 self._ascending = not bool(reverse)
2740 2740
2741 2741 def reverse(self):
2742 2742 if self._ascending is None:
2743 2743 self._list.reverse()
2744 2744 else:
2745 2745 self._ascending = not self._ascending
2746 2746
2747 2747 def __len__(self):
2748 2748 return len(self._list)
2749 2749
2750 2750 def isascending(self):
2751 2751 """Returns True if the collection is ascending order, False if not.
2752 2752
2753 2753 This is part of the mandatory API for smartset."""
2754 2754 if len(self) <= 1:
2755 2755 return True
2756 2756 return self._ascending is not None and self._ascending
2757 2757
2758 2758 def isdescending(self):
2759 2759 """Returns True if the collection is descending order, False if not.
2760 2760
2761 2761 This is part of the mandatory API for smartset."""
2762 2762 if len(self) <= 1:
2763 2763 return True
2764 2764 return self._ascending is not None and not self._ascending
2765 2765
2766 2766 def first(self):
2767 2767 if self:
2768 2768 if self._ascending is None:
2769 2769 return self._list[0]
2770 2770 elif self._ascending:
2771 2771 return self._asclist[0]
2772 2772 else:
2773 2773 return self._asclist[-1]
2774 2774 return None
2775 2775
2776 2776 def last(self):
2777 2777 if self:
2778 2778 if self._ascending is None:
2779 2779 return self._list[-1]
2780 2780 elif self._ascending:
2781 2781 return self._asclist[-1]
2782 2782 else:
2783 2783 return self._asclist[0]
2784 2784 return None
2785 2785
2786 2786 class filteredset(abstractsmartset):
2787 2787 """Duck type for baseset class which iterates lazily over the revisions in
2788 2788 the subset and contains a function which tests for membership in the
2789 2789 revset
2790 2790 """
2791 2791 def __init__(self, subset, condition=lambda x: True):
2792 2792 """
2793 2793 condition: a function that decide whether a revision in the subset
2794 2794 belongs to the revset or not.
2795 2795 """
2796 2796 self._subset = subset
2797 2797 self._condition = condition
2798 2798 self._cache = {}
2799 2799
2800 2800 def __contains__(self, x):
2801 2801 c = self._cache
2802 2802 if x not in c:
2803 2803 v = c[x] = x in self._subset and self._condition(x)
2804 2804 return v
2805 2805 return c[x]
2806 2806
2807 2807 def __iter__(self):
2808 2808 return self._iterfilter(self._subset)
2809 2809
2810 2810 def _iterfilter(self, it):
2811 2811 cond = self._condition
2812 2812 for x in it:
2813 2813 if cond(x):
2814 2814 yield x
2815 2815
2816 2816 @property
2817 2817 def fastasc(self):
2818 2818 it = self._subset.fastasc
2819 2819 if it is None:
2820 2820 return None
2821 2821 return lambda: self._iterfilter(it())
2822 2822
2823 2823 @property
2824 2824 def fastdesc(self):
2825 2825 it = self._subset.fastdesc
2826 2826 if it is None:
2827 2827 return None
2828 2828 return lambda: self._iterfilter(it())
2829 2829
2830 2830 def __nonzero__(self):
2831 2831 for r in self:
2832 2832 return True
2833 2833 return False
2834 2834
2835 2835 def __len__(self):
2836 2836 # Basic implementation to be changed in future patches.
2837 2837 l = baseset([r for r in self])
2838 2838 return len(l)
2839 2839
2840 2840 def sort(self, reverse=False):
2841 2841 self._subset.sort(reverse=reverse)
2842 2842
2843 2843 def reverse(self):
2844 2844 self._subset.reverse()
2845 2845
2846 2846 def isascending(self):
2847 2847 return self._subset.isascending()
2848 2848
2849 2849 def isdescending(self):
2850 2850 return self._subset.isdescending()
2851 2851
2852 2852 def first(self):
2853 2853 for x in self:
2854 2854 return x
2855 2855 return None
2856 2856
2857 2857 def last(self):
2858 2858 it = None
2859 2859 if self._subset.isascending:
2860 2860 it = self.fastdesc
2861 2861 elif self._subset.isdescending:
2862 2862 it = self.fastdesc
2863 2863 if it is None:
2864 2864 # slowly consume everything. This needs improvement
2865 2865 it = lambda: reversed(list(self))
2866 2866 for x in it():
2867 2867 return x
2868 2868 return None
2869 2869
2870 2870 class addset(abstractsmartset):
2871 2871 """Represent the addition of two sets
2872 2872
2873 2873 Wrapper structure for lazily adding two structures without losing much
2874 2874 performance on the __contains__ method
2875 2875
2876 2876 If the ascending attribute is set, that means the two structures are
2877 2877 ordered in either an ascending or descending way. Therefore, we can add
2878 2878 them maintaining the order by iterating over both at the same time
2879 2879 """
2880 2880 def __init__(self, revs1, revs2, ascending=None):
2881 2881 self._r1 = revs1
2882 2882 self._r2 = revs2
2883 2883 self._iter = None
2884 2884 self._ascending = ascending
2885 2885 self._genlist = None
2886 2886 self._asclist = None
2887 2887
2888 2888 def __len__(self):
2889 2889 return len(self._list)
2890 2890
2891 2891 def __nonzero__(self):
2892 2892 return bool(self._r1) or bool(self._r2)
2893 2893
2894 2894 @util.propertycache
2895 2895 def _list(self):
2896 2896 if not self._genlist:
2897 2897 self._genlist = baseset(self._iterator())
2898 2898 return self._genlist
2899 2899
2900 2900 def _iterator(self):
2901 2901 """Iterate over both collections without repeating elements
2902 2902
2903 2903 If the ascending attribute is not set, iterate over the first one and
2904 2904 then over the second one checking for membership on the first one so we
2905 2905 dont yield any duplicates.
2906 2906
2907 2907 If the ascending attribute is set, iterate over both collections at the
2908 2908 same time, yielding only one value at a time in the given order.
2909 2909 """
2910 2910 if self._ascending is None:
2911 2911 def gen():
2912 2912 for r in self._r1:
2913 2913 yield r
2914 2914 inr1 = self._r1.__contains__
2915 2915 for r in self._r2:
2916 2916 if not inr1(r):
2917 2917 yield r
2918 2918 gen = gen()
2919 2919 else:
2920 2920 iter1 = iter(self._r1)
2921 2921 iter2 = iter(self._r2)
2922 2922 gen = self._iterordered(self._ascending, iter1, iter2)
2923 2923 return gen
2924 2924
2925 2925 def __iter__(self):
2926 2926 if self._ascending is None:
2927 2927 if self._genlist:
2928 2928 return iter(self._genlist)
2929 2929 return iter(self._iterator())
2930 2930 self._trysetasclist()
2931 2931 if self._ascending:
2932 2932 it = self.fastasc
2933 2933 else:
2934 2934 it = self.fastdesc
2935 2935 if it is None:
2936 2936 # consume the gen and try again
2937 2937 self._list
2938 2938 return iter(self)
2939 2939 return it()
2940 2940
2941 2941 def _trysetasclist(self):
2942 2942 """populate the _asclist attribute if possible and necessary"""
2943 2943 if self._genlist is not None and self._asclist is None:
2944 2944 self._asclist = sorted(self._genlist)
2945 2945
2946 2946 @property
2947 2947 def fastasc(self):
2948 2948 self._trysetasclist()
2949 2949 if self._asclist is not None:
2950 2950 return self._asclist.__iter__
2951 2951 iter1 = self._r1.fastasc
2952 2952 iter2 = self._r2.fastasc
2953 2953 if None in (iter1, iter2):
2954 2954 return None
2955 2955 return lambda: self._iterordered(True, iter1(), iter2())
2956 2956
2957 2957 @property
2958 2958 def fastdesc(self):
2959 2959 self._trysetasclist()
2960 2960 if self._asclist is not None:
2961 2961 return self._asclist.__reversed__
2962 2962 iter1 = self._r1.fastdesc
2963 2963 iter2 = self._r2.fastdesc
2964 2964 if None in (iter1, iter2):
2965 2965 return None
2966 2966 return lambda: self._iterordered(False, iter1(), iter2())
2967 2967
2968 2968 def _iterordered(self, ascending, iter1, iter2):
2969 2969 """produce an ordered iteration from two iterators with the same order
2970 2970
2971 2971 The ascending is used to indicated the iteration direction.
2972 2972 """
2973 2973 choice = max
2974 2974 if ascending:
2975 2975 choice = min
2976 2976
2977 2977 val1 = None
2978 2978 val2 = None
2979 2979
2980 2980 choice = max
2981 2981 if ascending:
2982 2982 choice = min
2983 2983 try:
2984 2984 # Consume both iterators in an ordered way until one is
2985 2985 # empty
2986 2986 while True:
2987 2987 if val1 is None:
2988 2988 val1 = iter1.next()
2989 2989 if val2 is None:
2990 2990 val2 = iter2.next()
2991 2991 next = choice(val1, val2)
2992 2992 yield next
2993 2993 if val1 == next:
2994 2994 val1 = None
2995 2995 if val2 == next:
2996 2996 val2 = None
2997 2997 except StopIteration:
2998 2998 # Flush any remaining values and consume the other one
2999 2999 it = iter2
3000 3000 if val1 is not None:
3001 3001 yield val1
3002 3002 it = iter1
3003 3003 elif val2 is not None:
3004 3004 # might have been equality and both are empty
3005 3005 yield val2
3006 3006 for val in it:
3007 3007 yield val
3008 3008
3009 3009 def __contains__(self, x):
3010 3010 return x in self._r1 or x in self._r2
3011 3011
3012 3012 def sort(self, reverse=False):
3013 3013 """Sort the added set
3014 3014
3015 3015 For this we use the cached list with all the generated values and if we
3016 3016 know they are ascending or descending we can sort them in a smart way.
3017 3017 """
3018 3018 self._ascending = not reverse
3019 3019
3020 3020 def isascending(self):
3021 3021 return self._ascending is not None and self._ascending
3022 3022
3023 3023 def isdescending(self):
3024 3024 return self._ascending is not None and not self._ascending
3025 3025
3026 3026 def reverse(self):
3027 3027 if self._ascending is None:
3028 3028 self._list.reverse()
3029 3029 else:
3030 3030 self._ascending = not self._ascending
3031 3031
3032 3032 def first(self):
3033 3033 for x in self:
3034 3034 return x
3035 3035 return None
3036 3036
3037 3037 def last(self):
3038 3038 self.reverse()
3039 3039 val = self.first()
3040 3040 self.reverse()
3041 3041 return val
3042 3042
3043 3043 class generatorset(abstractsmartset):
3044 3044 """Wrap a generator for lazy iteration
3045 3045
3046 3046 Wrapper structure for generators that provides lazy membership and can
3047 3047 be iterated more than once.
3048 3048 When asked for membership it generates values until either it finds the
3049 3049 requested one or has gone through all the elements in the generator
3050 3050 """
3051 3051 def __init__(self, gen, iterasc=None):
3052 3052 """
3053 3053 gen: a generator producing the values for the generatorset.
3054 3054 """
3055 3055 self._gen = gen
3056 3056 self._asclist = None
3057 3057 self._cache = {}
3058 3058 self._genlist = []
3059 3059 self._finished = False
3060 3060 self._ascending = True
3061 3061 if iterasc is not None:
3062 3062 if iterasc:
3063 3063 self.fastasc = self._iterator
3064 3064 self.__contains__ = self._asccontains
3065 3065 else:
3066 3066 self.fastdesc = self._iterator
3067 3067 self.__contains__ = self._desccontains
3068 3068
3069 3069 def __nonzero__(self):
3070 3070 for r in self:
3071 3071 return True
3072 3072 return False
3073 3073
3074 3074 def __contains__(self, x):
3075 3075 if x in self._cache:
3076 3076 return self._cache[x]
3077 3077
3078 3078 # Use new values only, as existing values would be cached.
3079 3079 for l in self._consumegen():
3080 3080 if l == x:
3081 3081 return True
3082 3082
3083 3083 self._cache[x] = False
3084 3084 return False
3085 3085
3086 3086 def _asccontains(self, x):
3087 3087 """version of contains optimised for ascending generator"""
3088 3088 if x in self._cache:
3089 3089 return self._cache[x]
3090 3090
3091 3091 # Use new values only, as existing values would be cached.
3092 3092 for l in self._consumegen():
3093 3093 if l == x:
3094 3094 return True
3095 3095 if l > x:
3096 3096 break
3097 3097
3098 3098 self._cache[x] = False
3099 3099 return False
3100 3100
3101 3101 def _desccontains(self, x):
3102 3102 """version of contains optimised for descending generator"""
3103 3103 if x in self._cache:
3104 3104 return self._cache[x]
3105 3105
3106 3106 # Use new values only, as existing values would be cached.
3107 3107 for l in self._consumegen():
3108 3108 if l == x:
3109 3109 return True
3110 3110 if l < x:
3111 3111 break
3112 3112
3113 3113 self._cache[x] = False
3114 3114 return False
3115 3115
3116 3116 def __iter__(self):
3117 3117 if self._ascending:
3118 3118 it = self.fastasc
3119 3119 else:
3120 3120 it = self.fastdesc
3121 3121 if it is not None:
3122 3122 return it()
3123 3123 # we need to consume the iterator
3124 3124 for x in self._consumegen():
3125 3125 pass
3126 3126 # recall the same code
3127 3127 return iter(self)
3128 3128
3129 3129 def _iterator(self):
3130 3130 if self._finished:
3131 3131 return iter(self._genlist)
3132 3132
3133 3133 # We have to use this complex iteration strategy to allow multiple
3134 3134 # iterations at the same time. We need to be able to catch revision
3135 3135 # removed from _consumegen and added to genlist in another instance.
3136 3136 #
3137 3137 # Getting rid of it would provide an about 15% speed up on this
3138 3138 # iteration.
3139 3139 genlist = self._genlist
3140 3140 nextrev = self._consumegen().next
3141 3141 _len = len # cache global lookup
3142 3142 def gen():
3143 3143 i = 0
3144 3144 while True:
3145 3145 if i < _len(genlist):
3146 3146 yield genlist[i]
3147 3147 else:
3148 3148 yield nextrev()
3149 3149 i += 1
3150 3150 return gen()
3151 3151
3152 3152 def _consumegen(self):
3153 3153 cache = self._cache
3154 3154 genlist = self._genlist.append
3155 3155 for item in self._gen:
3156 3156 cache[item] = True
3157 3157 genlist(item)
3158 3158 yield item
3159 3159 if not self._finished:
3160 3160 self._finished = True
3161 3161 asc = self._genlist[:]
3162 3162 asc.sort()
3163 3163 self._asclist = asc
3164 3164 self.fastasc = asc.__iter__
3165 3165 self.fastdesc = asc.__reversed__
3166 3166
3167 3167 def __len__(self):
3168 3168 for x in self._consumegen():
3169 3169 pass
3170 3170 return len(self._genlist)
3171 3171
3172 3172 def sort(self, reverse=False):
3173 3173 self._ascending = not reverse
3174 3174
3175 3175 def reverse(self):
3176 3176 self._ascending = not self._ascending
3177 3177
3178 3178 def isascending(self):
3179 3179 return self._ascending
3180 3180
3181 3181 def isdescending(self):
3182 3182 return not self._ascending
3183 3183
3184 3184 def first(self):
3185 3185 if self._ascending:
3186 3186 it = self.fastasc
3187 3187 else:
3188 3188 it = self.fastdesc
3189 3189 if it is None:
3190 3190 # we need to consume all and try again
3191 3191 for x in self._consumegen():
3192 3192 pass
3193 3193 return self.first()
3194 3194 if self:
3195 3195 return it().next()
3196 3196 return None
3197 3197
3198 3198 def last(self):
3199 3199 if self._ascending:
3200 3200 it = self.fastdesc
3201 3201 else:
3202 3202 it = self.fastasc
3203 3203 if it is None:
3204 3204 # we need to consume all and try again
3205 3205 for x in self._consumegen():
3206 3206 pass
3207 3207 return self.first()
3208 3208 if self:
3209 3209 return it().next()
3210 3210 return None
3211 3211
3212 3212 class spanset(abstractsmartset):
3213 3213 """Duck type for baseset class which represents a range of revisions and
3214 3214 can work lazily and without having all the range in memory
3215 3215
3216 3216 Note that spanset(x, y) behave almost like xrange(x, y) except for two
3217 3217 notable points:
3218 3218 - when x < y it will be automatically descending,
3219 3219 - revision filtered with this repoview will be skipped.
3220 3220
3221 3221 """
3222 3222 def __init__(self, repo, start=0, end=None):
3223 3223 """
3224 3224 start: first revision included the set
3225 3225 (default to 0)
3226 3226 end: first revision excluded (last+1)
3227 3227 (default to len(repo)
3228 3228
3229 3229 Spanset will be descending if `end` < `start`.
3230 3230 """
3231 3231 if end is None:
3232 3232 end = len(repo)
3233 3233 self._ascending = start <= end
3234 3234 if not self._ascending:
3235 3235 start, end = end + 1, start +1
3236 3236 self._start = start
3237 3237 self._end = end
3238 3238 self._hiddenrevs = repo.changelog.filteredrevs
3239 3239
3240 3240 def sort(self, reverse=False):
3241 3241 self._ascending = not reverse
3242 3242
3243 3243 def reverse(self):
3244 3244 self._ascending = not self._ascending
3245 3245
3246 3246 def _iterfilter(self, iterrange):
3247 3247 s = self._hiddenrevs
3248 3248 for r in iterrange:
3249 3249 if r not in s:
3250 3250 yield r
3251 3251
3252 3252 def __iter__(self):
3253 3253 if self._ascending:
3254 3254 return self.fastasc()
3255 3255 else:
3256 3256 return self.fastdesc()
3257 3257
3258 3258 def fastasc(self):
3259 3259 iterrange = xrange(self._start, self._end)
3260 3260 if self._hiddenrevs:
3261 3261 return self._iterfilter(iterrange)
3262 3262 return iter(iterrange)
3263 3263
3264 3264 def fastdesc(self):
3265 3265 iterrange = xrange(self._end - 1, self._start - 1, -1)
3266 3266 if self._hiddenrevs:
3267 3267 return self._iterfilter(iterrange)
3268 3268 return iter(iterrange)
3269 3269
3270 3270 def __contains__(self, rev):
3271 3271 hidden = self._hiddenrevs
3272 3272 return ((self._start <= rev < self._end)
3273 3273 and not (hidden and rev in hidden))
3274 3274
3275 3275 def __nonzero__(self):
3276 3276 for r in self:
3277 3277 return True
3278 3278 return False
3279 3279
3280 3280 def __len__(self):
3281 3281 if not self._hiddenrevs:
3282 3282 return abs(self._end - self._start)
3283 3283 else:
3284 3284 count = 0
3285 3285 start = self._start
3286 3286 end = self._end
3287 3287 for rev in self._hiddenrevs:
3288 3288 if (end < rev <= start) or (start <= rev < end):
3289 3289 count += 1
3290 3290 return abs(self._end - self._start) - count
3291 3291
3292 3292 def isascending(self):
3293 3293 return self._ascending
3294 3294
3295 3295 def isdescending(self):
3296 3296 return not self._ascending
3297 3297
3298 3298 def first(self):
3299 3299 if self._ascending:
3300 3300 it = self.fastasc
3301 3301 else:
3302 3302 it = self.fastdesc
3303 3303 for x in it():
3304 3304 return x
3305 3305 return None
3306 3306
3307 3307 def last(self):
3308 3308 if self._ascending:
3309 3309 it = self.fastdesc
3310 3310 else:
3311 3311 it = self.fastasc
3312 3312 for x in it():
3313 3313 return x
3314 3314 return None
3315 3315
3316 3316 class fullreposet(spanset):
3317 3317 """a set containing all revisions in the repo
3318 3318
3319 3319 This class exists to host special optimization and magic to handle virtual
3320 3320 revisions such as "null".
3321 3321 """
3322 3322
3323 3323 def __init__(self, repo):
3324 3324 super(fullreposet, self).__init__(repo)
3325 3325
3326 3326 def __contains__(self, rev):
3327 3327 # assumes the given rev is valid
3328 3328 hidden = self._hiddenrevs
3329 3329 return not (hidden and rev in hidden)
3330 3330
3331 3331 def __and__(self, other):
3332 3332 """As self contains the whole repo, all of the other set should also be
3333 3333 in self. Therefore `self & other = other`.
3334 3334
3335 3335 This boldly assumes the other contains valid revs only.
3336 3336 """
3337 3337 # other not a smartset, make is so
3338 3338 if not util.safehasattr(other, 'isascending'):
3339 3339 # filter out hidden revision
3340 3340 # (this boldly assumes all smartset are pure)
3341 3341 #
3342 3342 # `other` was used with "&", let's assume this is a set like
3343 3343 # object.
3344 3344 other = baseset(other - self._hiddenrevs)
3345 3345
3346 3346 other.sort(reverse=self.isdescending())
3347 3347 return other
3348 3348
3349 3349 # tell hggettext to extract docstrings from these functions:
3350 3350 i18nfunctions = symbols.values()
General Comments 0
You need to be logged in to leave comments. Login now