##// END OF EJS Templates
revset: don't import discovery at module level...
Gregory Szorc -
r24722:02a5618e default
parent child Browse files
Show More
@@ -1,3445 +1,3447
1 1 # revset.py - revision set queries for mercurial
2 2 #
3 3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 import re
9 import parser, util, error, discovery, hbisect, phases
9 import parser, util, error, hbisect, phases
10 10 import node
11 11 import heapq
12 12 import match as matchmod
13 13 from i18n import _
14 14 import encoding
15 15 import obsolete as obsmod
16 16 import pathutil
17 17 import repoview
18 18
19 19 def _revancestors(repo, revs, followfirst):
20 20 """Like revlog.ancestors(), but supports followfirst."""
21 21 if followfirst:
22 22 cut = 1
23 23 else:
24 24 cut = None
25 25 cl = repo.changelog
26 26
27 27 def iterate():
28 28 revqueue, revsnode = None, None
29 29 h = []
30 30
31 31 revs.sort(reverse=True)
32 32 revqueue = util.deque(revs)
33 33 if revqueue:
34 34 revsnode = revqueue.popleft()
35 35 heapq.heappush(h, -revsnode)
36 36
37 37 seen = set()
38 38 while h:
39 39 current = -heapq.heappop(h)
40 40 if current not in seen:
41 41 if revsnode and current == revsnode:
42 42 if revqueue:
43 43 revsnode = revqueue.popleft()
44 44 heapq.heappush(h, -revsnode)
45 45 seen.add(current)
46 46 yield current
47 47 for parent in cl.parentrevs(current)[:cut]:
48 48 if parent != node.nullrev:
49 49 heapq.heappush(h, -parent)
50 50
51 51 return generatorset(iterate(), iterasc=False)
52 52
53 53 def _revdescendants(repo, revs, followfirst):
54 54 """Like revlog.descendants() but supports followfirst."""
55 55 if followfirst:
56 56 cut = 1
57 57 else:
58 58 cut = None
59 59
60 60 def iterate():
61 61 cl = repo.changelog
62 62 first = min(revs)
63 63 nullrev = node.nullrev
64 64 if first == nullrev:
65 65 # Are there nodes with a null first parent and a non-null
66 66 # second one? Maybe. Do we care? Probably not.
67 67 for i in cl:
68 68 yield i
69 69 else:
70 70 seen = set(revs)
71 71 for i in cl.revs(first + 1):
72 72 for x in cl.parentrevs(i)[:cut]:
73 73 if x != nullrev and x in seen:
74 74 seen.add(i)
75 75 yield i
76 76 break
77 77
78 78 return generatorset(iterate(), iterasc=True)
79 79
80 80 def _revsbetween(repo, roots, heads):
81 81 """Return all paths between roots and heads, inclusive of both endpoint
82 82 sets."""
83 83 if not roots:
84 84 return baseset()
85 85 parentrevs = repo.changelog.parentrevs
86 86 visit = list(heads)
87 87 reachable = set()
88 88 seen = {}
89 89 minroot = min(roots)
90 90 roots = set(roots)
91 91 # open-code the post-order traversal due to the tiny size of
92 92 # sys.getrecursionlimit()
93 93 while visit:
94 94 rev = visit.pop()
95 95 if rev in roots:
96 96 reachable.add(rev)
97 97 parents = parentrevs(rev)
98 98 seen[rev] = parents
99 99 for parent in parents:
100 100 if parent >= minroot and parent not in seen:
101 101 visit.append(parent)
102 102 if not reachable:
103 103 return baseset()
104 104 for rev in sorted(seen):
105 105 for parent in seen[rev]:
106 106 if parent in reachable:
107 107 reachable.add(rev)
108 108 return baseset(sorted(reachable))
109 109
110 110 elements = {
111 111 "(": (21, ("group", 1, ")"), ("func", 1, ")")),
112 112 "##": (20, None, ("_concat", 20)),
113 113 "~": (18, None, ("ancestor", 18)),
114 114 "^": (18, None, ("parent", 18), ("parentpost", 18)),
115 115 "-": (5, ("negate", 19), ("minus", 5)),
116 116 "::": (17, ("dagrangepre", 17), ("dagrange", 17),
117 117 ("dagrangepost", 17)),
118 118 "..": (17, ("dagrangepre", 17), ("dagrange", 17),
119 119 ("dagrangepost", 17)),
120 120 ":": (15, ("rangepre", 15), ("range", 15), ("rangepost", 15)),
121 121 "not": (10, ("not", 10)),
122 122 "!": (10, ("not", 10)),
123 123 "and": (5, None, ("and", 5)),
124 124 "&": (5, None, ("and", 5)),
125 125 "%": (5, None, ("only", 5), ("onlypost", 5)),
126 126 "or": (4, None, ("or", 4)),
127 127 "|": (4, None, ("or", 4)),
128 128 "+": (4, None, ("or", 4)),
129 129 ",": (2, None, ("list", 2)),
130 130 ")": (0, None, None),
131 131 "symbol": (0, ("symbol",), None),
132 132 "string": (0, ("string",), None),
133 133 "end": (0, None, None),
134 134 }
135 135
136 136 keywords = set(['and', 'or', 'not'])
137 137
138 138 # default set of valid characters for the initial letter of symbols
139 139 _syminitletters = set(c for c in [chr(i) for i in xrange(256)]
140 140 if c.isalnum() or c in '._@' or ord(c) > 127)
141 141
142 142 # default set of valid characters for non-initial letters of symbols
143 143 _symletters = set(c for c in [chr(i) for i in xrange(256)]
144 144 if c.isalnum() or c in '-._/@' or ord(c) > 127)
145 145
146 146 def tokenize(program, lookup=None, syminitletters=None, symletters=None):
147 147 '''
148 148 Parse a revset statement into a stream of tokens
149 149
150 150 ``syminitletters`` is the set of valid characters for the initial
151 151 letter of symbols.
152 152
153 153 By default, character ``c`` is recognized as valid for initial
154 154 letter of symbols, if ``c.isalnum() or c in '._@' or ord(c) > 127``.
155 155
156 156 ``symletters`` is the set of valid characters for non-initial
157 157 letters of symbols.
158 158
159 159 By default, character ``c`` is recognized as valid for non-initial
160 160 letters of symbols, if ``c.isalnum() or c in '-._/@' or ord(c) > 127``.
161 161
162 162 Check that @ is a valid unquoted token character (issue3686):
163 163 >>> list(tokenize("@::"))
164 164 [('symbol', '@', 0), ('::', None, 1), ('end', None, 3)]
165 165
166 166 '''
167 167 if syminitletters is None:
168 168 syminitletters = _syminitletters
169 169 if symletters is None:
170 170 symletters = _symletters
171 171
172 172 pos, l = 0, len(program)
173 173 while pos < l:
174 174 c = program[pos]
175 175 if c.isspace(): # skip inter-token whitespace
176 176 pass
177 177 elif c == ':' and program[pos:pos + 2] == '::': # look ahead carefully
178 178 yield ('::', None, pos)
179 179 pos += 1 # skip ahead
180 180 elif c == '.' and program[pos:pos + 2] == '..': # look ahead carefully
181 181 yield ('..', None, pos)
182 182 pos += 1 # skip ahead
183 183 elif c == '#' and program[pos:pos + 2] == '##': # look ahead carefully
184 184 yield ('##', None, pos)
185 185 pos += 1 # skip ahead
186 186 elif c in "():,-|&+!~^%": # handle simple operators
187 187 yield (c, None, pos)
188 188 elif (c in '"\'' or c == 'r' and
189 189 program[pos:pos + 2] in ("r'", 'r"')): # handle quoted strings
190 190 if c == 'r':
191 191 pos += 1
192 192 c = program[pos]
193 193 decode = lambda x: x
194 194 else:
195 195 decode = lambda x: x.decode('string-escape')
196 196 pos += 1
197 197 s = pos
198 198 while pos < l: # find closing quote
199 199 d = program[pos]
200 200 if d == '\\': # skip over escaped characters
201 201 pos += 2
202 202 continue
203 203 if d == c:
204 204 yield ('string', decode(program[s:pos]), s)
205 205 break
206 206 pos += 1
207 207 else:
208 208 raise error.ParseError(_("unterminated string"), s)
209 209 # gather up a symbol/keyword
210 210 elif c in syminitletters:
211 211 s = pos
212 212 pos += 1
213 213 while pos < l: # find end of symbol
214 214 d = program[pos]
215 215 if d not in symletters:
216 216 break
217 217 if d == '.' and program[pos - 1] == '.': # special case for ..
218 218 pos -= 1
219 219 break
220 220 pos += 1
221 221 sym = program[s:pos]
222 222 if sym in keywords: # operator keywords
223 223 yield (sym, None, s)
224 224 elif '-' in sym:
225 225 # some jerk gave us foo-bar-baz, try to check if it's a symbol
226 226 if lookup and lookup(sym):
227 227 # looks like a real symbol
228 228 yield ('symbol', sym, s)
229 229 else:
230 230 # looks like an expression
231 231 parts = sym.split('-')
232 232 for p in parts[:-1]:
233 233 if p: # possible consecutive -
234 234 yield ('symbol', p, s)
235 235 s += len(p)
236 236 yield ('-', None, pos)
237 237 s += 1
238 238 if parts[-1]: # possible trailing -
239 239 yield ('symbol', parts[-1], s)
240 240 else:
241 241 yield ('symbol', sym, s)
242 242 pos -= 1
243 243 else:
244 244 raise error.ParseError(_("syntax error in revset '%s'") %
245 245 program, pos)
246 246 pos += 1
247 247 yield ('end', None, pos)
248 248
249 249 def parseerrordetail(inst):
250 250 """Compose error message from specified ParseError object
251 251 """
252 252 if len(inst.args) > 1:
253 253 return _('at %s: %s') % (inst.args[1], inst.args[0])
254 254 else:
255 255 return inst.args[0]
256 256
257 257 # helpers
258 258
259 259 def getstring(x, err):
260 260 if x and (x[0] == 'string' or x[0] == 'symbol'):
261 261 return x[1]
262 262 raise error.ParseError(err)
263 263
264 264 def getlist(x):
265 265 if not x:
266 266 return []
267 267 if x[0] == 'list':
268 268 return getlist(x[1]) + [x[2]]
269 269 return [x]
270 270
271 271 def getargs(x, min, max, err):
272 272 l = getlist(x)
273 273 if len(l) < min or (max >= 0 and len(l) > max):
274 274 raise error.ParseError(err)
275 275 return l
276 276
277 277 def isvalidsymbol(tree):
278 278 """Examine whether specified ``tree`` is valid ``symbol`` or not
279 279 """
280 280 return tree[0] == 'symbol' and len(tree) > 1
281 281
282 282 def getsymbol(tree):
283 283 """Get symbol name from valid ``symbol`` in ``tree``
284 284
285 285 This assumes that ``tree`` is already examined by ``isvalidsymbol``.
286 286 """
287 287 return tree[1]
288 288
289 289 def isvalidfunc(tree):
290 290 """Examine whether specified ``tree`` is valid ``func`` or not
291 291 """
292 292 return tree[0] == 'func' and len(tree) > 1 and isvalidsymbol(tree[1])
293 293
294 294 def getfuncname(tree):
295 295 """Get function name from valid ``func`` in ``tree``
296 296
297 297 This assumes that ``tree`` is already examined by ``isvalidfunc``.
298 298 """
299 299 return getsymbol(tree[1])
300 300
301 301 def getfuncargs(tree):
302 302 """Get list of function arguments from valid ``func`` in ``tree``
303 303
304 304 This assumes that ``tree`` is already examined by ``isvalidfunc``.
305 305 """
306 306 if len(tree) > 2:
307 307 return getlist(tree[2])
308 308 else:
309 309 return []
310 310
311 311 def getset(repo, subset, x):
312 312 if not x:
313 313 raise error.ParseError(_("missing argument"))
314 314 s = methods[x[0]](repo, subset, *x[1:])
315 315 if util.safehasattr(s, 'isascending'):
316 316 return s
317 317 return baseset(s)
318 318
319 319 def _getrevsource(repo, r):
320 320 extra = repo[r].extra()
321 321 for label in ('source', 'transplant_source', 'rebase_source'):
322 322 if label in extra:
323 323 try:
324 324 return repo[extra[label]].rev()
325 325 except error.RepoLookupError:
326 326 pass
327 327 return None
328 328
329 329 # operator methods
330 330
331 331 def stringset(repo, subset, x):
332 332 x = repo[x].rev()
333 333 if x in subset:
334 334 return baseset([x])
335 335 return baseset()
336 336
337 337 def symbolset(repo, subset, x):
338 338 if x in symbols:
339 339 raise error.ParseError(_("can't use %s here") % x)
340 340 return stringset(repo, subset, x)
341 341
342 342 def rangeset(repo, subset, x, y):
343 343 m = getset(repo, fullreposet(repo), x)
344 344 n = getset(repo, fullreposet(repo), y)
345 345
346 346 if not m or not n:
347 347 return baseset()
348 348 m, n = m.first(), n.last()
349 349
350 350 if m < n:
351 351 r = spanset(repo, m, n + 1)
352 352 else:
353 353 r = spanset(repo, m, n - 1)
354 354 return r & subset
355 355
356 356 def dagrange(repo, subset, x, y):
357 357 r = fullreposet(repo)
358 358 xs = _revsbetween(repo, getset(repo, r, x), getset(repo, r, y))
359 359 return xs & subset
360 360
361 361 def andset(repo, subset, x, y):
362 362 return getset(repo, getset(repo, subset, x), y)
363 363
364 364 def orset(repo, subset, x, y):
365 365 xl = getset(repo, subset, x)
366 366 yl = getset(repo, subset - xl, y)
367 367 return xl + yl
368 368
369 369 def notset(repo, subset, x):
370 370 return subset - getset(repo, subset, x)
371 371
372 372 def listset(repo, subset, a, b):
373 373 raise error.ParseError(_("can't use a list in this context"))
374 374
375 375 def func(repo, subset, a, b):
376 376 if a[0] == 'symbol' and a[1] in symbols:
377 377 return symbols[a[1]](repo, subset, b)
378 378 raise error.UnknownIdentifier(a[1], symbols.keys())
379 379
380 380 # functions
381 381
382 382 def adds(repo, subset, x):
383 383 """``adds(pattern)``
384 384 Changesets that add a file matching pattern.
385 385
386 386 The pattern without explicit kind like ``glob:`` is expected to be
387 387 relative to the current directory and match against a file or a
388 388 directory.
389 389 """
390 390 # i18n: "adds" is a keyword
391 391 pat = getstring(x, _("adds requires a pattern"))
392 392 return checkstatus(repo, subset, pat, 1)
393 393
394 394 def ancestor(repo, subset, x):
395 395 """``ancestor(*changeset)``
396 396 A greatest common ancestor of the changesets.
397 397
398 398 Accepts 0 or more changesets.
399 399 Will return empty list when passed no args.
400 400 Greatest common ancestor of a single changeset is that changeset.
401 401 """
402 402 # i18n: "ancestor" is a keyword
403 403 l = getlist(x)
404 404 rl = fullreposet(repo)
405 405 anc = None
406 406
407 407 # (getset(repo, rl, i) for i in l) generates a list of lists
408 408 for revs in (getset(repo, rl, i) for i in l):
409 409 for r in revs:
410 410 if anc is None:
411 411 anc = repo[r]
412 412 else:
413 413 anc = anc.ancestor(repo[r])
414 414
415 415 if anc is not None and anc.rev() in subset:
416 416 return baseset([anc.rev()])
417 417 return baseset()
418 418
419 419 def _ancestors(repo, subset, x, followfirst=False):
420 420 heads = getset(repo, fullreposet(repo), x)
421 421 if not heads:
422 422 return baseset()
423 423 s = _revancestors(repo, heads, followfirst)
424 424 return subset & s
425 425
426 426 def ancestors(repo, subset, x):
427 427 """``ancestors(set)``
428 428 Changesets that are ancestors of a changeset in set.
429 429 """
430 430 return _ancestors(repo, subset, x)
431 431
432 432 def _firstancestors(repo, subset, x):
433 433 # ``_firstancestors(set)``
434 434 # Like ``ancestors(set)`` but follows only the first parents.
435 435 return _ancestors(repo, subset, x, followfirst=True)
436 436
437 437 def ancestorspec(repo, subset, x, n):
438 438 """``set~n``
439 439 Changesets that are the Nth ancestor (first parents only) of a changeset
440 440 in set.
441 441 """
442 442 try:
443 443 n = int(n[1])
444 444 except (TypeError, ValueError):
445 445 raise error.ParseError(_("~ expects a number"))
446 446 ps = set()
447 447 cl = repo.changelog
448 448 for r in getset(repo, fullreposet(repo), x):
449 449 for i in range(n):
450 450 r = cl.parentrevs(r)[0]
451 451 ps.add(r)
452 452 return subset & ps
453 453
454 454 def author(repo, subset, x):
455 455 """``author(string)``
456 456 Alias for ``user(string)``.
457 457 """
458 458 # i18n: "author" is a keyword
459 459 n = encoding.lower(getstring(x, _("author requires a string")))
460 460 kind, pattern, matcher = _substringmatcher(n)
461 461 return subset.filter(lambda x: matcher(encoding.lower(repo[x].user())))
462 462
463 463 def bisect(repo, subset, x):
464 464 """``bisect(string)``
465 465 Changesets marked in the specified bisect status:
466 466
467 467 - ``good``, ``bad``, ``skip``: csets explicitly marked as good/bad/skip
468 468 - ``goods``, ``bads`` : csets topologically good/bad
469 469 - ``range`` : csets taking part in the bisection
470 470 - ``pruned`` : csets that are goods, bads or skipped
471 471 - ``untested`` : csets whose fate is yet unknown
472 472 - ``ignored`` : csets ignored due to DAG topology
473 473 - ``current`` : the cset currently being bisected
474 474 """
475 475 # i18n: "bisect" is a keyword
476 476 status = getstring(x, _("bisect requires a string")).lower()
477 477 state = set(hbisect.get(repo, status))
478 478 return subset & state
479 479
480 480 # Backward-compatibility
481 481 # - no help entry so that we do not advertise it any more
482 482 def bisected(repo, subset, x):
483 483 return bisect(repo, subset, x)
484 484
485 485 def bookmark(repo, subset, x):
486 486 """``bookmark([name])``
487 487 The named bookmark or all bookmarks.
488 488
489 489 If `name` starts with `re:`, the remainder of the name is treated as
490 490 a regular expression. To match a bookmark that actually starts with `re:`,
491 491 use the prefix `literal:`.
492 492 """
493 493 # i18n: "bookmark" is a keyword
494 494 args = getargs(x, 0, 1, _('bookmark takes one or no arguments'))
495 495 if args:
496 496 bm = getstring(args[0],
497 497 # i18n: "bookmark" is a keyword
498 498 _('the argument to bookmark must be a string'))
499 499 kind, pattern, matcher = _stringmatcher(bm)
500 500 bms = set()
501 501 if kind == 'literal':
502 502 bmrev = repo._bookmarks.get(pattern, None)
503 503 if not bmrev:
504 504 raise error.RepoLookupError(_("bookmark '%s' does not exist")
505 505 % bm)
506 506 bms.add(repo[bmrev].rev())
507 507 else:
508 508 matchrevs = set()
509 509 for name, bmrev in repo._bookmarks.iteritems():
510 510 if matcher(name):
511 511 matchrevs.add(bmrev)
512 512 if not matchrevs:
513 513 raise error.RepoLookupError(_("no bookmarks exist"
514 514 " that match '%s'") % pattern)
515 515 for bmrev in matchrevs:
516 516 bms.add(repo[bmrev].rev())
517 517 else:
518 518 bms = set([repo[r].rev()
519 519 for r in repo._bookmarks.values()])
520 520 bms -= set([node.nullrev])
521 521 return subset & bms
522 522
523 523 def branch(repo, subset, x):
524 524 """``branch(string or set)``
525 525 All changesets belonging to the given branch or the branches of the given
526 526 changesets.
527 527
528 528 If `string` starts with `re:`, the remainder of the name is treated as
529 529 a regular expression. To match a branch that actually starts with `re:`,
530 530 use the prefix `literal:`.
531 531 """
532 532 getbi = repo.revbranchcache().branchinfo
533 533
534 534 try:
535 535 b = getstring(x, '')
536 536 except error.ParseError:
537 537 # not a string, but another revspec, e.g. tip()
538 538 pass
539 539 else:
540 540 kind, pattern, matcher = _stringmatcher(b)
541 541 if kind == 'literal':
542 542 # note: falls through to the revspec case if no branch with
543 543 # this name exists
544 544 if pattern in repo.branchmap():
545 545 return subset.filter(lambda r: matcher(getbi(r)[0]))
546 546 else:
547 547 return subset.filter(lambda r: matcher(getbi(r)[0]))
548 548
549 549 s = getset(repo, fullreposet(repo), x)
550 550 b = set()
551 551 for r in s:
552 552 b.add(getbi(r)[0])
553 553 c = s.__contains__
554 554 return subset.filter(lambda r: c(r) or getbi(r)[0] in b)
555 555
556 556 def bumped(repo, subset, x):
557 557 """``bumped()``
558 558 Mutable changesets marked as successors of public changesets.
559 559
560 560 Only non-public and non-obsolete changesets can be `bumped`.
561 561 """
562 562 # i18n: "bumped" is a keyword
563 563 getargs(x, 0, 0, _("bumped takes no arguments"))
564 564 bumped = obsmod.getrevs(repo, 'bumped')
565 565 return subset & bumped
566 566
567 567 def bundle(repo, subset, x):
568 568 """``bundle()``
569 569 Changesets in the bundle.
570 570
571 571 Bundle must be specified by the -R option."""
572 572
573 573 try:
574 574 bundlerevs = repo.changelog.bundlerevs
575 575 except AttributeError:
576 576 raise util.Abort(_("no bundle provided - specify with -R"))
577 577 return subset & bundlerevs
578 578
579 579 def checkstatus(repo, subset, pat, field):
580 580 hasset = matchmod.patkind(pat) == 'set'
581 581
582 582 mcache = [None]
583 583 def matches(x):
584 584 c = repo[x]
585 585 if not mcache[0] or hasset:
586 586 mcache[0] = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
587 587 m = mcache[0]
588 588 fname = None
589 589 if not m.anypats() and len(m.files()) == 1:
590 590 fname = m.files()[0]
591 591 if fname is not None:
592 592 if fname not in c.files():
593 593 return False
594 594 else:
595 595 for f in c.files():
596 596 if m(f):
597 597 break
598 598 else:
599 599 return False
600 600 files = repo.status(c.p1().node(), c.node())[field]
601 601 if fname is not None:
602 602 if fname in files:
603 603 return True
604 604 else:
605 605 for f in files:
606 606 if m(f):
607 607 return True
608 608
609 609 return subset.filter(matches)
610 610
611 611 def _children(repo, narrow, parentset):
612 612 cs = set()
613 613 if not parentset:
614 614 return baseset(cs)
615 615 pr = repo.changelog.parentrevs
616 616 minrev = min(parentset)
617 617 for r in narrow:
618 618 if r <= minrev:
619 619 continue
620 620 for p in pr(r):
621 621 if p in parentset:
622 622 cs.add(r)
623 623 return baseset(cs)
624 624
625 625 def children(repo, subset, x):
626 626 """``children(set)``
627 627 Child changesets of changesets in set.
628 628 """
629 629 s = getset(repo, fullreposet(repo), x)
630 630 cs = _children(repo, subset, s)
631 631 return subset & cs
632 632
633 633 def closed(repo, subset, x):
634 634 """``closed()``
635 635 Changeset is closed.
636 636 """
637 637 # i18n: "closed" is a keyword
638 638 getargs(x, 0, 0, _("closed takes no arguments"))
639 639 return subset.filter(lambda r: repo[r].closesbranch())
640 640
641 641 def contains(repo, subset, x):
642 642 """``contains(pattern)``
643 643 The revision's manifest contains a file matching pattern (but might not
644 644 modify it). See :hg:`help patterns` for information about file patterns.
645 645
646 646 The pattern without explicit kind like ``glob:`` is expected to be
647 647 relative to the current directory and match against a file exactly
648 648 for efficiency.
649 649 """
650 650 # i18n: "contains" is a keyword
651 651 pat = getstring(x, _("contains requires a pattern"))
652 652
653 653 def matches(x):
654 654 if not matchmod.patkind(pat):
655 655 pats = pathutil.canonpath(repo.root, repo.getcwd(), pat)
656 656 if pats in repo[x]:
657 657 return True
658 658 else:
659 659 c = repo[x]
660 660 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
661 661 for f in c.manifest():
662 662 if m(f):
663 663 return True
664 664 return False
665 665
666 666 return subset.filter(matches)
667 667
668 668 def converted(repo, subset, x):
669 669 """``converted([id])``
670 670 Changesets converted from the given identifier in the old repository if
671 671 present, or all converted changesets if no identifier is specified.
672 672 """
673 673
674 674 # There is exactly no chance of resolving the revision, so do a simple
675 675 # string compare and hope for the best
676 676
677 677 rev = None
678 678 # i18n: "converted" is a keyword
679 679 l = getargs(x, 0, 1, _('converted takes one or no arguments'))
680 680 if l:
681 681 # i18n: "converted" is a keyword
682 682 rev = getstring(l[0], _('converted requires a revision'))
683 683
684 684 def _matchvalue(r):
685 685 source = repo[r].extra().get('convert_revision', None)
686 686 return source is not None and (rev is None or source.startswith(rev))
687 687
688 688 return subset.filter(lambda r: _matchvalue(r))
689 689
690 690 def date(repo, subset, x):
691 691 """``date(interval)``
692 692 Changesets within the interval, see :hg:`help dates`.
693 693 """
694 694 # i18n: "date" is a keyword
695 695 ds = getstring(x, _("date requires a string"))
696 696 dm = util.matchdate(ds)
697 697 return subset.filter(lambda x: dm(repo[x].date()[0]))
698 698
699 699 def desc(repo, subset, x):
700 700 """``desc(string)``
701 701 Search commit message for string. The match is case-insensitive.
702 702 """
703 703 # i18n: "desc" is a keyword
704 704 ds = encoding.lower(getstring(x, _("desc requires a string")))
705 705
706 706 def matches(x):
707 707 c = repo[x]
708 708 return ds in encoding.lower(c.description())
709 709
710 710 return subset.filter(matches)
711 711
712 712 def _descendants(repo, subset, x, followfirst=False):
713 713 roots = getset(repo, fullreposet(repo), x)
714 714 if not roots:
715 715 return baseset()
716 716 s = _revdescendants(repo, roots, followfirst)
717 717
718 718 # Both sets need to be ascending in order to lazily return the union
719 719 # in the correct order.
720 720 base = subset & roots
721 721 desc = subset & s
722 722 result = base + desc
723 723 if subset.isascending():
724 724 result.sort()
725 725 elif subset.isdescending():
726 726 result.sort(reverse=True)
727 727 else:
728 728 result = subset & result
729 729 return result
730 730
731 731 def descendants(repo, subset, x):
732 732 """``descendants(set)``
733 733 Changesets which are descendants of changesets in set.
734 734 """
735 735 return _descendants(repo, subset, x)
736 736
737 737 def _firstdescendants(repo, subset, x):
738 738 # ``_firstdescendants(set)``
739 739 # Like ``descendants(set)`` but follows only the first parents.
740 740 return _descendants(repo, subset, x, followfirst=True)
741 741
742 742 def destination(repo, subset, x):
743 743 """``destination([set])``
744 744 Changesets that were created by a graft, transplant or rebase operation,
745 745 with the given revisions specified as the source. Omitting the optional set
746 746 is the same as passing all().
747 747 """
748 748 if x is not None:
749 749 sources = getset(repo, fullreposet(repo), x)
750 750 else:
751 751 sources = fullreposet(repo)
752 752
753 753 dests = set()
754 754
755 755 # subset contains all of the possible destinations that can be returned, so
756 756 # iterate over them and see if their source(s) were provided in the arg set.
757 757 # Even if the immediate src of r is not in the arg set, src's source (or
758 758 # further back) may be. Scanning back further than the immediate src allows
759 759 # transitive transplants and rebases to yield the same results as transitive
760 760 # grafts.
761 761 for r in subset:
762 762 src = _getrevsource(repo, r)
763 763 lineage = None
764 764
765 765 while src is not None:
766 766 if lineage is None:
767 767 lineage = list()
768 768
769 769 lineage.append(r)
770 770
771 771 # The visited lineage is a match if the current source is in the arg
772 772 # set. Since every candidate dest is visited by way of iterating
773 773 # subset, any dests further back in the lineage will be tested by a
774 774 # different iteration over subset. Likewise, if the src was already
775 775 # selected, the current lineage can be selected without going back
776 776 # further.
777 777 if src in sources or src in dests:
778 778 dests.update(lineage)
779 779 break
780 780
781 781 r = src
782 782 src = _getrevsource(repo, r)
783 783
784 784 return subset.filter(dests.__contains__)
785 785
786 786 def divergent(repo, subset, x):
787 787 """``divergent()``
788 788 Final successors of changesets with an alternative set of final successors.
789 789 """
790 790 # i18n: "divergent" is a keyword
791 791 getargs(x, 0, 0, _("divergent takes no arguments"))
792 792 divergent = obsmod.getrevs(repo, 'divergent')
793 793 return subset & divergent
794 794
795 795 def draft(repo, subset, x):
796 796 """``draft()``
797 797 Changeset in draft phase."""
798 798 # i18n: "draft" is a keyword
799 799 getargs(x, 0, 0, _("draft takes no arguments"))
800 800 phase = repo._phasecache.phase
801 801 target = phases.draft
802 802 condition = lambda r: phase(repo, r) == target
803 803 return subset.filter(condition, cache=False)
804 804
805 805 def extinct(repo, subset, x):
806 806 """``extinct()``
807 807 Obsolete changesets with obsolete descendants only.
808 808 """
809 809 # i18n: "extinct" is a keyword
810 810 getargs(x, 0, 0, _("extinct takes no arguments"))
811 811 extincts = obsmod.getrevs(repo, 'extinct')
812 812 return subset & extincts
813 813
814 814 def extra(repo, subset, x):
815 815 """``extra(label, [value])``
816 816 Changesets with the given label in the extra metadata, with the given
817 817 optional value.
818 818
819 819 If `value` starts with `re:`, the remainder of the value is treated as
820 820 a regular expression. To match a value that actually starts with `re:`,
821 821 use the prefix `literal:`.
822 822 """
823 823
824 824 # i18n: "extra" is a keyword
825 825 l = getargs(x, 1, 2, _('extra takes at least 1 and at most 2 arguments'))
826 826 # i18n: "extra" is a keyword
827 827 label = getstring(l[0], _('first argument to extra must be a string'))
828 828 value = None
829 829
830 830 if len(l) > 1:
831 831 # i18n: "extra" is a keyword
832 832 value = getstring(l[1], _('second argument to extra must be a string'))
833 833 kind, value, matcher = _stringmatcher(value)
834 834
835 835 def _matchvalue(r):
836 836 extra = repo[r].extra()
837 837 return label in extra and (value is None or matcher(extra[label]))
838 838
839 839 return subset.filter(lambda r: _matchvalue(r))
840 840
841 841 def filelog(repo, subset, x):
842 842 """``filelog(pattern)``
843 843 Changesets connected to the specified filelog.
844 844
845 845 For performance reasons, visits only revisions mentioned in the file-level
846 846 filelog, rather than filtering through all changesets (much faster, but
847 847 doesn't include deletes or duplicate changes). For a slower, more accurate
848 848 result, use ``file()``.
849 849
850 850 The pattern without explicit kind like ``glob:`` is expected to be
851 851 relative to the current directory and match against a file exactly
852 852 for efficiency.
853 853
854 854 If some linkrev points to revisions filtered by the current repoview, we'll
855 855 work around it to return a non-filtered value.
856 856 """
857 857
858 858 # i18n: "filelog" is a keyword
859 859 pat = getstring(x, _("filelog requires a pattern"))
860 860 s = set()
861 861 cl = repo.changelog
862 862
863 863 if not matchmod.patkind(pat):
864 864 f = pathutil.canonpath(repo.root, repo.getcwd(), pat)
865 865 files = [f]
866 866 else:
867 867 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=repo[None])
868 868 files = (f for f in repo[None] if m(f))
869 869
870 870 for f in files:
871 871 backrevref = {} # final value for: filerev -> changerev
872 872 lowestchild = {} # lowest known filerev child of a filerev
873 873 delayed = [] # filerev with filtered linkrev, for post-processing
874 874 lowesthead = None # cache for manifest content of all head revisions
875 875 fl = repo.file(f)
876 876 for fr in list(fl):
877 877 rev = fl.linkrev(fr)
878 878 if rev not in cl:
879 879 # changerev pointed in linkrev is filtered
880 880 # record it for post processing.
881 881 delayed.append((fr, rev))
882 882 continue
883 883 for p in fl.parentrevs(fr):
884 884 if 0 <= p and p not in lowestchild:
885 885 lowestchild[p] = fr
886 886 backrevref[fr] = rev
887 887 s.add(rev)
888 888
889 889 # Post-processing of all filerevs we skipped because they were
890 890 # filtered. If such filerevs have known and unfiltered children, this
891 891 # means they have an unfiltered appearance out there. We'll use linkrev
892 892 # adjustment to find one of these appearances. The lowest known child
893 893 # will be used as a starting point because it is the best upper-bound we
894 894 # have.
895 895 #
896 896 # This approach will fail when an unfiltered but linkrev-shadowed
897 897 # appearance exists in a head changeset without unfiltered filerev
898 898 # children anywhere.
899 899 while delayed:
900 900 # must be a descending iteration. To slowly fill lowest child
901 901 # information that is of potential use by the next item.
902 902 fr, rev = delayed.pop()
903 903 lkr = rev
904 904
905 905 child = lowestchild.get(fr)
906 906
907 907 if child is None:
908 908 # search for existence of this file revision in a head revision.
909 909 # There are three possibilities:
910 910 # - the revision exists in a head and we can find an
911 911 # introduction from there,
912 912 # - the revision does not exist in a head because it has been
913 913 # changed since its introduction: we would have found a child
914 914 # and be in the other 'else' clause,
915 915 # - all versions of the revision are hidden.
916 916 if lowesthead is None:
917 917 lowesthead = {}
918 918 for h in repo.heads():
919 919 fnode = repo[h].manifest().get(f)
920 920 if fnode is not None:
921 921 lowesthead[fl.rev(fnode)] = h
922 922 headrev = lowesthead.get(fr)
923 923 if headrev is None:
924 924 # content is nowhere unfiltered
925 925 continue
926 926 rev = repo[headrev][f].introrev()
927 927 else:
928 928 # the lowest known child is a good upper bound
929 929 childcrev = backrevref[child]
930 930 # XXX this does not guarantee returning the lowest
931 931 # introduction of this revision, but this gives a
932 932 # result which is a good start and will fit in most
933 933 # cases. We probably need to fix the multiple
934 934 # introductions case properly (report each
935 935 # introduction, even for identical file revisions)
936 936 # once and for all at some point anyway.
937 937 for p in repo[childcrev][f].parents():
938 938 if p.filerev() == fr:
939 939 rev = p.rev()
940 940 break
941 941 if rev == lkr: # no shadowed entry found
942 942 # XXX This should never happen unless some manifest points
943 943 # to biggish file revisions (like a revision that uses a
944 944 # parent that never appears in the manifest ancestors)
945 945 continue
946 946
947 947 # Fill the data for the next iteration.
948 948 for p in fl.parentrevs(fr):
949 949 if 0 <= p and p not in lowestchild:
950 950 lowestchild[p] = fr
951 951 backrevref[fr] = rev
952 952 s.add(rev)
953 953
954 954 return subset & s
955 955
956 956 def first(repo, subset, x):
957 957 """``first(set, [n])``
958 958 An alias for limit().
959 959 """
960 960 return limit(repo, subset, x)
961 961
962 962 def _follow(repo, subset, x, name, followfirst=False):
963 963 l = getargs(x, 0, 1, _("%s takes no arguments or a filename") % name)
964 964 c = repo['.']
965 965 if l:
966 966 x = getstring(l[0], _("%s expected a filename") % name)
967 967 if x in c:
968 968 cx = c[x]
969 969 s = set(ctx.rev() for ctx in cx.ancestors(followfirst=followfirst))
970 970 # include the revision responsible for the most recent version
971 971 s.add(cx.introrev())
972 972 else:
973 973 return baseset()
974 974 else:
975 975 s = _revancestors(repo, baseset([c.rev()]), followfirst)
976 976
977 977 return subset & s
978 978
979 979 def follow(repo, subset, x):
980 980 """``follow([file])``
981 981 An alias for ``::.`` (ancestors of the working directory's first parent).
982 982 If a filename is specified, the history of the given file is followed,
983 983 including copies.
984 984 """
985 985 return _follow(repo, subset, x, 'follow')
986 986
987 987 def _followfirst(repo, subset, x):
988 988 # ``followfirst([file])``
989 989 # Like ``follow([file])`` but follows only the first parent of
990 990 # every revision or file revision.
991 991 return _follow(repo, subset, x, '_followfirst', followfirst=True)
992 992
993 993 def getall(repo, subset, x):
994 994 """``all()``
995 995 All changesets, the same as ``0:tip``.
996 996 """
997 997 # i18n: "all" is a keyword
998 998 getargs(x, 0, 0, _("all takes no arguments"))
999 999 return subset & spanset(repo) # drop "null" if any
1000 1000
1001 1001 def grep(repo, subset, x):
1002 1002 """``grep(regex)``
1003 1003 Like ``keyword(string)`` but accepts a regex. Use ``grep(r'...')``
1004 1004 to ensure special escape characters are handled correctly. Unlike
1005 1005 ``keyword(string)``, the match is case-sensitive.
1006 1006 """
1007 1007 try:
1008 1008 # i18n: "grep" is a keyword
1009 1009 gr = re.compile(getstring(x, _("grep requires a string")))
1010 1010 except re.error, e:
1011 1011 raise error.ParseError(_('invalid match pattern: %s') % e)
1012 1012
1013 1013 def matches(x):
1014 1014 c = repo[x]
1015 1015 for e in c.files() + [c.user(), c.description()]:
1016 1016 if gr.search(e):
1017 1017 return True
1018 1018 return False
1019 1019
1020 1020 return subset.filter(matches)
1021 1021
1022 1022 def _matchfiles(repo, subset, x):
1023 1023 # _matchfiles takes a revset list of prefixed arguments:
1024 1024 #
1025 1025 # [p:foo, i:bar, x:baz]
1026 1026 #
1027 1027 # builds a match object from them and filters subset. Allowed
1028 1028 # prefixes are 'p:' for regular patterns, 'i:' for include
1029 1029 # patterns and 'x:' for exclude patterns. Use 'r:' prefix to pass
1030 1030 # a revision identifier, or the empty string to reference the
1031 1031 # working directory, from which the match object is
1032 1032 # initialized. Use 'd:' to set the default matching mode, default
1033 1033 # to 'glob'. At most one 'r:' and 'd:' argument can be passed.
1034 1034
1035 1035 # i18n: "_matchfiles" is a keyword
1036 1036 l = getargs(x, 1, -1, _("_matchfiles requires at least one argument"))
1037 1037 pats, inc, exc = [], [], []
1038 1038 rev, default = None, None
1039 1039 for arg in l:
1040 1040 # i18n: "_matchfiles" is a keyword
1041 1041 s = getstring(arg, _("_matchfiles requires string arguments"))
1042 1042 prefix, value = s[:2], s[2:]
1043 1043 if prefix == 'p:':
1044 1044 pats.append(value)
1045 1045 elif prefix == 'i:':
1046 1046 inc.append(value)
1047 1047 elif prefix == 'x:':
1048 1048 exc.append(value)
1049 1049 elif prefix == 'r:':
1050 1050 if rev is not None:
1051 1051 # i18n: "_matchfiles" is a keyword
1052 1052 raise error.ParseError(_('_matchfiles expected at most one '
1053 1053 'revision'))
1054 1054 if value != '': # empty means working directory; leave rev as None
1055 1055 rev = value
1056 1056 elif prefix == 'd:':
1057 1057 if default is not None:
1058 1058 # i18n: "_matchfiles" is a keyword
1059 1059 raise error.ParseError(_('_matchfiles expected at most one '
1060 1060 'default mode'))
1061 1061 default = value
1062 1062 else:
1063 1063 # i18n: "_matchfiles" is a keyword
1064 1064 raise error.ParseError(_('invalid _matchfiles prefix: %s') % prefix)
1065 1065 if not default:
1066 1066 default = 'glob'
1067 1067
1068 1068 m = matchmod.match(repo.root, repo.getcwd(), pats, include=inc,
1069 1069 exclude=exc, ctx=repo[rev], default=default)
1070 1070
1071 1071 def matches(x):
1072 1072 for f in repo[x].files():
1073 1073 if m(f):
1074 1074 return True
1075 1075 return False
1076 1076
1077 1077 return subset.filter(matches)
1078 1078
1079 1079 def hasfile(repo, subset, x):
1080 1080 """``file(pattern)``
1081 1081 Changesets affecting files matched by pattern.
1082 1082
1083 1083 For a faster but less accurate result, consider using ``filelog()``
1084 1084 instead.
1085 1085
1086 1086 This predicate uses ``glob:`` as the default kind of pattern.
1087 1087 """
1088 1088 # i18n: "file" is a keyword
1089 1089 pat = getstring(x, _("file requires a pattern"))
1090 1090 return _matchfiles(repo, subset, ('string', 'p:' + pat))
1091 1091
1092 1092 def head(repo, subset, x):
1093 1093 """``head()``
1094 1094 Changeset is a named branch head.
1095 1095 """
1096 1096 # i18n: "head" is a keyword
1097 1097 getargs(x, 0, 0, _("head takes no arguments"))
1098 1098 hs = set()
1099 1099 for b, ls in repo.branchmap().iteritems():
1100 1100 hs.update(repo[h].rev() for h in ls)
1101 1101 return baseset(hs).filter(subset.__contains__)
1102 1102
1103 1103 def heads(repo, subset, x):
1104 1104 """``heads(set)``
1105 1105 Members of set with no children in set.
1106 1106 """
1107 1107 s = getset(repo, subset, x)
1108 1108 ps = parents(repo, subset, x)
1109 1109 return s - ps
1110 1110
1111 1111 def hidden(repo, subset, x):
1112 1112 """``hidden()``
1113 1113 Hidden changesets.
1114 1114 """
1115 1115 # i18n: "hidden" is a keyword
1116 1116 getargs(x, 0, 0, _("hidden takes no arguments"))
1117 1117 hiddenrevs = repoview.filterrevs(repo, 'visible')
1118 1118 return subset & hiddenrevs
1119 1119
1120 1120 def keyword(repo, subset, x):
1121 1121 """``keyword(string)``
1122 1122 Search commit message, user name, and names of changed files for
1123 1123 string. The match is case-insensitive.
1124 1124 """
1125 1125 # i18n: "keyword" is a keyword
1126 1126 kw = encoding.lower(getstring(x, _("keyword requires a string")))
1127 1127
1128 1128 def matches(r):
1129 1129 c = repo[r]
1130 1130 return util.any(kw in encoding.lower(t) for t in c.files() + [c.user(),
1131 1131 c.description()])
1132 1132
1133 1133 return subset.filter(matches)
1134 1134
1135 1135 def limit(repo, subset, x):
1136 1136 """``limit(set, [n])``
1137 1137 First n members of set, defaulting to 1.
1138 1138 """
1139 1139 # i18n: "limit" is a keyword
1140 1140 l = getargs(x, 1, 2, _("limit requires one or two arguments"))
1141 1141 try:
1142 1142 lim = 1
1143 1143 if len(l) == 2:
1144 1144 # i18n: "limit" is a keyword
1145 1145 lim = int(getstring(l[1], _("limit requires a number")))
1146 1146 except (TypeError, ValueError):
1147 1147 # i18n: "limit" is a keyword
1148 1148 raise error.ParseError(_("limit expects a number"))
1149 1149 ss = subset
1150 1150 os = getset(repo, fullreposet(repo), l[0])
1151 1151 result = []
1152 1152 it = iter(os)
1153 1153 for x in xrange(lim):
1154 1154 try:
1155 1155 y = it.next()
1156 1156 if y in ss:
1157 1157 result.append(y)
1158 1158 except (StopIteration):
1159 1159 break
1160 1160 return baseset(result)
1161 1161
1162 1162 def last(repo, subset, x):
1163 1163 """``last(set, [n])``
1164 1164 Last n members of set, defaulting to 1.
1165 1165 """
1166 1166 # i18n: "last" is a keyword
1167 1167 l = getargs(x, 1, 2, _("last requires one or two arguments"))
1168 1168 try:
1169 1169 lim = 1
1170 1170 if len(l) == 2:
1171 1171 # i18n: "last" is a keyword
1172 1172 lim = int(getstring(l[1], _("last requires a number")))
1173 1173 except (TypeError, ValueError):
1174 1174 # i18n: "last" is a keyword
1175 1175 raise error.ParseError(_("last expects a number"))
1176 1176 ss = subset
1177 1177 os = getset(repo, fullreposet(repo), l[0])
1178 1178 os.reverse()
1179 1179 result = []
1180 1180 it = iter(os)
1181 1181 for x in xrange(lim):
1182 1182 try:
1183 1183 y = it.next()
1184 1184 if y in ss:
1185 1185 result.append(y)
1186 1186 except (StopIteration):
1187 1187 break
1188 1188 return baseset(result)
1189 1189
1190 1190 def maxrev(repo, subset, x):
1191 1191 """``max(set)``
1192 1192 Changeset with highest revision number in set.
1193 1193 """
1194 1194 os = getset(repo, fullreposet(repo), x)
1195 1195 if os:
1196 1196 m = os.max()
1197 1197 if m in subset:
1198 1198 return baseset([m])
1199 1199 return baseset()
1200 1200
1201 1201 def merge(repo, subset, x):
1202 1202 """``merge()``
1203 1203 Changeset is a merge changeset.
1204 1204 """
1205 1205 # i18n: "merge" is a keyword
1206 1206 getargs(x, 0, 0, _("merge takes no arguments"))
1207 1207 cl = repo.changelog
1208 1208 return subset.filter(lambda r: cl.parentrevs(r)[1] != -1)
1209 1209
1210 1210 def branchpoint(repo, subset, x):
1211 1211 """``branchpoint()``
1212 1212 Changesets with more than one child.
1213 1213 """
1214 1214 # i18n: "branchpoint" is a keyword
1215 1215 getargs(x, 0, 0, _("branchpoint takes no arguments"))
1216 1216 cl = repo.changelog
1217 1217 if not subset:
1218 1218 return baseset()
1219 1219 baserev = min(subset)
1220 1220 parentscount = [0]*(len(repo) - baserev)
1221 1221 for r in cl.revs(start=baserev + 1):
1222 1222 for p in cl.parentrevs(r):
1223 1223 if p >= baserev:
1224 1224 parentscount[p - baserev] += 1
1225 1225 return subset.filter(lambda r: parentscount[r - baserev] > 1)
1226 1226
1227 1227 def minrev(repo, subset, x):
1228 1228 """``min(set)``
1229 1229 Changeset with lowest revision number in set.
1230 1230 """
1231 1231 os = getset(repo, fullreposet(repo), x)
1232 1232 if os:
1233 1233 m = os.min()
1234 1234 if m in subset:
1235 1235 return baseset([m])
1236 1236 return baseset()
1237 1237
1238 1238 def modifies(repo, subset, x):
1239 1239 """``modifies(pattern)``
1240 1240 Changesets modifying files matched by pattern.
1241 1241
1242 1242 The pattern without explicit kind like ``glob:`` is expected to be
1243 1243 relative to the current directory and match against a file or a
1244 1244 directory.
1245 1245 """
1246 1246 # i18n: "modifies" is a keyword
1247 1247 pat = getstring(x, _("modifies requires a pattern"))
1248 1248 return checkstatus(repo, subset, pat, 0)
1249 1249
1250 1250 def named(repo, subset, x):
1251 1251 """``named(namespace)``
1252 1252 The changesets in a given namespace.
1253 1253
1254 1254 If `namespace` starts with `re:`, the remainder of the string is treated as
1255 1255 a regular expression. To match a namespace that actually starts with `re:`,
1256 1256 use the prefix `literal:`.
1257 1257 """
1258 1258 # i18n: "named" is a keyword
1259 1259 args = getargs(x, 1, 1, _('named requires a namespace argument'))
1260 1260
1261 1261 ns = getstring(args[0],
1262 1262 # i18n: "named" is a keyword
1263 1263 _('the argument to named must be a string'))
1264 1264 kind, pattern, matcher = _stringmatcher(ns)
1265 1265 namespaces = set()
1266 1266 if kind == 'literal':
1267 1267 if pattern not in repo.names:
1268 1268 raise error.RepoLookupError(_("namespace '%s' does not exist")
1269 1269 % ns)
1270 1270 namespaces.add(repo.names[pattern])
1271 1271 else:
1272 1272 for name, ns in repo.names.iteritems():
1273 1273 if matcher(name):
1274 1274 namespaces.add(ns)
1275 1275 if not namespaces:
1276 1276 raise error.RepoLookupError(_("no namespace exists"
1277 1277 " that match '%s'") % pattern)
1278 1278
1279 1279 names = set()
1280 1280 for ns in namespaces:
1281 1281 for name in ns.listnames(repo):
1282 1282 if name not in ns.deprecated:
1283 1283 names.update(repo[n].rev() for n in ns.nodes(repo, name))
1284 1284
1285 1285 names -= set([node.nullrev])
1286 1286 return subset & names
1287 1287
1288 1288 def node_(repo, subset, x):
1289 1289 """``id(string)``
1290 1290 Revision non-ambiguously specified by the given hex string prefix.
1291 1291 """
1292 1292 # i18n: "id" is a keyword
1293 1293 l = getargs(x, 1, 1, _("id requires one argument"))
1294 1294 # i18n: "id" is a keyword
1295 1295 n = getstring(l[0], _("id requires a string"))
1296 1296 if len(n) == 40:
1297 1297 rn = repo[n].rev()
1298 1298 else:
1299 1299 rn = None
1300 1300 pm = repo.changelog._partialmatch(n)
1301 1301 if pm is not None:
1302 1302 rn = repo.changelog.rev(pm)
1303 1303
1304 1304 if rn is None:
1305 1305 return baseset()
1306 1306 result = baseset([rn])
1307 1307 return result & subset
1308 1308
1309 1309 def obsolete(repo, subset, x):
1310 1310 """``obsolete()``
1311 1311 Mutable changeset with a newer version."""
1312 1312 # i18n: "obsolete" is a keyword
1313 1313 getargs(x, 0, 0, _("obsolete takes no arguments"))
1314 1314 obsoletes = obsmod.getrevs(repo, 'obsolete')
1315 1315 return subset & obsoletes
1316 1316
1317 1317 def only(repo, subset, x):
1318 1318 """``only(set, [set])``
1319 1319 Changesets that are ancestors of the first set that are not ancestors
1320 1320 of any other head in the repo. If a second set is specified, the result
1321 1321 is ancestors of the first set that are not ancestors of the second set
1322 1322 (i.e. ::<set1> - ::<set2>).
1323 1323 """
1324 1324 cl = repo.changelog
1325 1325 # i18n: "only" is a keyword
1326 1326 args = getargs(x, 1, 2, _('only takes one or two arguments'))
1327 1327 include = getset(repo, fullreposet(repo), args[0])
1328 1328 if len(args) == 1:
1329 1329 if not include:
1330 1330 return baseset()
1331 1331
1332 1332 descendants = set(_revdescendants(repo, include, False))
1333 1333 exclude = [rev for rev in cl.headrevs()
1334 1334 if not rev in descendants and not rev in include]
1335 1335 else:
1336 1336 exclude = getset(repo, fullreposet(repo), args[1])
1337 1337
1338 1338 results = set(cl.findmissingrevs(common=exclude, heads=include))
1339 1339 return subset & results
1340 1340
1341 1341 def origin(repo, subset, x):
1342 1342 """``origin([set])``
1343 1343 Changesets that were specified as a source for the grafts, transplants or
1344 1344 rebases that created the given revisions. Omitting the optional set is the
1345 1345 same as passing all(). If a changeset created by these operations is itself
1346 1346 specified as a source for one of these operations, only the source changeset
1347 1347 for the first operation is selected.
1348 1348 """
1349 1349 if x is not None:
1350 1350 dests = getset(repo, fullreposet(repo), x)
1351 1351 else:
1352 1352 dests = fullreposet(repo)
1353 1353
1354 1354 def _firstsrc(rev):
1355 1355 src = _getrevsource(repo, rev)
1356 1356 if src is None:
1357 1357 return None
1358 1358
1359 1359 while True:
1360 1360 prev = _getrevsource(repo, src)
1361 1361
1362 1362 if prev is None:
1363 1363 return src
1364 1364 src = prev
1365 1365
1366 1366 o = set([_firstsrc(r) for r in dests])
1367 1367 o -= set([None])
1368 1368 return subset & o
1369 1369
1370 1370 def outgoing(repo, subset, x):
1371 1371 """``outgoing([path])``
1372 1372 Changesets not found in the specified destination repository, or the
1373 1373 default push location.
1374 1374 """
1375 import hg # avoid start-up nasties
1375 # Avoid cycles.
1376 import discovery
1377 import hg
1376 1378 # i18n: "outgoing" is a keyword
1377 1379 l = getargs(x, 0, 1, _("outgoing takes one or no arguments"))
1378 1380 # i18n: "outgoing" is a keyword
1379 1381 dest = l and getstring(l[0], _("outgoing requires a repository path")) or ''
1380 1382 dest = repo.ui.expandpath(dest or 'default-push', dest or 'default')
1381 1383 dest, branches = hg.parseurl(dest)
1382 1384 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1383 1385 if revs:
1384 1386 revs = [repo.lookup(rev) for rev in revs]
1385 1387 other = hg.peer(repo, {}, dest)
1386 1388 repo.ui.pushbuffer()
1387 1389 outgoing = discovery.findcommonoutgoing(repo, other, onlyheads=revs)
1388 1390 repo.ui.popbuffer()
1389 1391 cl = repo.changelog
1390 1392 o = set([cl.rev(r) for r in outgoing.missing])
1391 1393 return subset & o
1392 1394
1393 1395 def p1(repo, subset, x):
1394 1396 """``p1([set])``
1395 1397 First parent of changesets in set, or the working directory.
1396 1398 """
1397 1399 if x is None:
1398 1400 p = repo[x].p1().rev()
1399 1401 if p >= 0:
1400 1402 return subset & baseset([p])
1401 1403 return baseset()
1402 1404
1403 1405 ps = set()
1404 1406 cl = repo.changelog
1405 1407 for r in getset(repo, fullreposet(repo), x):
1406 1408 ps.add(cl.parentrevs(r)[0])
1407 1409 ps -= set([node.nullrev])
1408 1410 return subset & ps
1409 1411
1410 1412 def p2(repo, subset, x):
1411 1413 """``p2([set])``
1412 1414 Second parent of changesets in set, or the working directory.
1413 1415 """
1414 1416 if x is None:
1415 1417 ps = repo[x].parents()
1416 1418 try:
1417 1419 p = ps[1].rev()
1418 1420 if p >= 0:
1419 1421 return subset & baseset([p])
1420 1422 return baseset()
1421 1423 except IndexError:
1422 1424 return baseset()
1423 1425
1424 1426 ps = set()
1425 1427 cl = repo.changelog
1426 1428 for r in getset(repo, fullreposet(repo), x):
1427 1429 ps.add(cl.parentrevs(r)[1])
1428 1430 ps -= set([node.nullrev])
1429 1431 return subset & ps
1430 1432
1431 1433 def parents(repo, subset, x):
1432 1434 """``parents([set])``
1433 1435 The set of all parents for all changesets in set, or the working directory.
1434 1436 """
1435 1437 if x is None:
1436 1438 ps = set(p.rev() for p in repo[x].parents())
1437 1439 else:
1438 1440 ps = set()
1439 1441 cl = repo.changelog
1440 1442 for r in getset(repo, fullreposet(repo), x):
1441 1443 ps.update(cl.parentrevs(r))
1442 1444 ps -= set([node.nullrev])
1443 1445 return subset & ps
1444 1446
1445 1447 def parentspec(repo, subset, x, n):
1446 1448 """``set^0``
1447 1449 The set.
1448 1450 ``set^1`` (or ``set^``), ``set^2``
1449 1451 First or second parent, respectively, of all changesets in set.
1450 1452 """
1451 1453 try:
1452 1454 n = int(n[1])
1453 1455 if n not in (0, 1, 2):
1454 1456 raise ValueError
1455 1457 except (TypeError, ValueError):
1456 1458 raise error.ParseError(_("^ expects a number 0, 1, or 2"))
1457 1459 ps = set()
1458 1460 cl = repo.changelog
1459 1461 for r in getset(repo, fullreposet(repo), x):
1460 1462 if n == 0:
1461 1463 ps.add(r)
1462 1464 elif n == 1:
1463 1465 ps.add(cl.parentrevs(r)[0])
1464 1466 elif n == 2:
1465 1467 parents = cl.parentrevs(r)
1466 1468 if len(parents) > 1:
1467 1469 ps.add(parents[1])
1468 1470 return subset & ps
1469 1471
1470 1472 def present(repo, subset, x):
1471 1473 """``present(set)``
1472 1474 An empty set, if any revision in set isn't found; otherwise,
1473 1475 all revisions in set.
1474 1476
1475 1477 If any of specified revisions is not present in the local repository,
1476 1478 the query is normally aborted. But this predicate allows the query
1477 1479 to continue even in such cases.
1478 1480 """
1479 1481 try:
1480 1482 return getset(repo, subset, x)
1481 1483 except error.RepoLookupError:
1482 1484 return baseset()
1483 1485
1484 1486 def public(repo, subset, x):
1485 1487 """``public()``
1486 1488 Changeset in public phase."""
1487 1489 # i18n: "public" is a keyword
1488 1490 getargs(x, 0, 0, _("public takes no arguments"))
1489 1491 phase = repo._phasecache.phase
1490 1492 target = phases.public
1491 1493 condition = lambda r: phase(repo, r) == target
1492 1494 return subset.filter(condition, cache=False)
1493 1495
1494 1496 def remote(repo, subset, x):
1495 1497 """``remote([id [,path]])``
1496 1498 Local revision that corresponds to the given identifier in a
1497 1499 remote repository, if present. Here, the '.' identifier is a
1498 1500 synonym for the current local branch.
1499 1501 """
1500 1502
1501 1503 import hg # avoid start-up nasties
1502 1504 # i18n: "remote" is a keyword
1503 1505 l = getargs(x, 0, 2, _("remote takes one, two or no arguments"))
1504 1506
1505 1507 q = '.'
1506 1508 if len(l) > 0:
1507 1509 # i18n: "remote" is a keyword
1508 1510 q = getstring(l[0], _("remote requires a string id"))
1509 1511 if q == '.':
1510 1512 q = repo['.'].branch()
1511 1513
1512 1514 dest = ''
1513 1515 if len(l) > 1:
1514 1516 # i18n: "remote" is a keyword
1515 1517 dest = getstring(l[1], _("remote requires a repository path"))
1516 1518 dest = repo.ui.expandpath(dest or 'default')
1517 1519 dest, branches = hg.parseurl(dest)
1518 1520 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1519 1521 if revs:
1520 1522 revs = [repo.lookup(rev) for rev in revs]
1521 1523 other = hg.peer(repo, {}, dest)
1522 1524 n = other.lookup(q)
1523 1525 if n in repo:
1524 1526 r = repo[n].rev()
1525 1527 if r in subset:
1526 1528 return baseset([r])
1527 1529 return baseset()
1528 1530
1529 1531 def removes(repo, subset, x):
1530 1532 """``removes(pattern)``
1531 1533 Changesets which remove files matching pattern.
1532 1534
1533 1535 The pattern without explicit kind like ``glob:`` is expected to be
1534 1536 relative to the current directory and match against a file or a
1535 1537 directory.
1536 1538 """
1537 1539 # i18n: "removes" is a keyword
1538 1540 pat = getstring(x, _("removes requires a pattern"))
1539 1541 return checkstatus(repo, subset, pat, 2)
1540 1542
1541 1543 def rev(repo, subset, x):
1542 1544 """``rev(number)``
1543 1545 Revision with the given numeric identifier.
1544 1546 """
1545 1547 # i18n: "rev" is a keyword
1546 1548 l = getargs(x, 1, 1, _("rev requires one argument"))
1547 1549 try:
1548 1550 # i18n: "rev" is a keyword
1549 1551 l = int(getstring(l[0], _("rev requires a number")))
1550 1552 except (TypeError, ValueError):
1551 1553 # i18n: "rev" is a keyword
1552 1554 raise error.ParseError(_("rev expects a number"))
1553 1555 if l not in repo.changelog and l != node.nullrev:
1554 1556 return baseset()
1555 1557 return subset & baseset([l])
1556 1558
1557 1559 def matching(repo, subset, x):
1558 1560 """``matching(revision [, field])``
1559 1561 Changesets in which a given set of fields match the set of fields in the
1560 1562 selected revision or set.
1561 1563
1562 1564 To match more than one field pass the list of fields to match separated
1563 1565 by spaces (e.g. ``author description``).
1564 1566
1565 1567 Valid fields are most regular revision fields and some special fields.
1566 1568
1567 1569 Regular revision fields are ``description``, ``author``, ``branch``,
1568 1570 ``date``, ``files``, ``phase``, ``parents``, ``substate``, ``user``
1569 1571 and ``diff``.
1570 1572 Note that ``author`` and ``user`` are synonyms. ``diff`` refers to the
1571 1573 contents of the revision. Two revisions matching their ``diff`` will
1572 1574 also match their ``files``.
1573 1575
1574 1576 Special fields are ``summary`` and ``metadata``:
1575 1577 ``summary`` matches the first line of the description.
1576 1578 ``metadata`` is equivalent to matching ``description user date``
1577 1579 (i.e. it matches the main metadata fields).
1578 1580
1579 1581 ``metadata`` is the default field which is used when no fields are
1580 1582 specified. You can match more than one field at a time.
1581 1583 """
1582 1584 # i18n: "matching" is a keyword
1583 1585 l = getargs(x, 1, 2, _("matching takes 1 or 2 arguments"))
1584 1586
1585 1587 revs = getset(repo, fullreposet(repo), l[0])
1586 1588
1587 1589 fieldlist = ['metadata']
1588 1590 if len(l) > 1:
1589 1591 fieldlist = getstring(l[1],
1590 1592 # i18n: "matching" is a keyword
1591 1593 _("matching requires a string "
1592 1594 "as its second argument")).split()
1593 1595
1594 1596 # Make sure that there are no repeated fields,
1595 1597 # expand the 'special' 'metadata' field type
1596 1598 # and check the 'files' whenever we check the 'diff'
1597 1599 fields = []
1598 1600 for field in fieldlist:
1599 1601 if field == 'metadata':
1600 1602 fields += ['user', 'description', 'date']
1601 1603 elif field == 'diff':
1602 1604 # a revision matching the diff must also match the files
1603 1605 # since matching the diff is very costly, make sure to
1604 1606 # also match the files first
1605 1607 fields += ['files', 'diff']
1606 1608 else:
1607 1609 if field == 'author':
1608 1610 field = 'user'
1609 1611 fields.append(field)
1610 1612 fields = set(fields)
1611 1613 if 'summary' in fields and 'description' in fields:
1612 1614 # If a revision matches its description it also matches its summary
1613 1615 fields.discard('summary')
1614 1616
1615 1617 # We may want to match more than one field
1616 1618 # Not all fields take the same amount of time to be matched
1617 1619 # Sort the selected fields in order of increasing matching cost
1618 1620 fieldorder = ['phase', 'parents', 'user', 'date', 'branch', 'summary',
1619 1621 'files', 'description', 'substate', 'diff']
1620 1622 def fieldkeyfunc(f):
1621 1623 try:
1622 1624 return fieldorder.index(f)
1623 1625 except ValueError:
1624 1626 # assume an unknown field is very costly
1625 1627 return len(fieldorder)
1626 1628 fields = list(fields)
1627 1629 fields.sort(key=fieldkeyfunc)
1628 1630
1629 1631 # Each field will be matched with its own "getfield" function
1630 1632 # which will be added to the getfieldfuncs array of functions
1631 1633 getfieldfuncs = []
1632 1634 _funcs = {
1633 1635 'user': lambda r: repo[r].user(),
1634 1636 'branch': lambda r: repo[r].branch(),
1635 1637 'date': lambda r: repo[r].date(),
1636 1638 'description': lambda r: repo[r].description(),
1637 1639 'files': lambda r: repo[r].files(),
1638 1640 'parents': lambda r: repo[r].parents(),
1639 1641 'phase': lambda r: repo[r].phase(),
1640 1642 'substate': lambda r: repo[r].substate,
1641 1643 'summary': lambda r: repo[r].description().splitlines()[0],
1642 1644 'diff': lambda r: list(repo[r].diff(git=True),)
1643 1645 }
1644 1646 for info in fields:
1645 1647 getfield = _funcs.get(info, None)
1646 1648 if getfield is None:
1647 1649 raise error.ParseError(
1648 1650 # i18n: "matching" is a keyword
1649 1651 _("unexpected field name passed to matching: %s") % info)
1650 1652 getfieldfuncs.append(getfield)
1651 1653 # convert the getfield array of functions into a "getinfo" function
1652 1654 # which returns an array of field values (or a single value if there
1653 1655 # is only one field to match)
1654 1656 getinfo = lambda r: [f(r) for f in getfieldfuncs]
1655 1657
1656 1658 def matches(x):
1657 1659 for rev in revs:
1658 1660 target = getinfo(rev)
1659 1661 match = True
1660 1662 for n, f in enumerate(getfieldfuncs):
1661 1663 if target[n] != f(x):
1662 1664 match = False
1663 1665 if match:
1664 1666 return True
1665 1667 return False
1666 1668
1667 1669 return subset.filter(matches)
1668 1670
1669 1671 def reverse(repo, subset, x):
1670 1672 """``reverse(set)``
1671 1673 Reverse order of set.
1672 1674 """
1673 1675 l = getset(repo, subset, x)
1674 1676 l.reverse()
1675 1677 return l
1676 1678
1677 1679 def roots(repo, subset, x):
1678 1680 """``roots(set)``
1679 1681 Changesets in set with no parent changeset in set.
1680 1682 """
1681 1683 s = getset(repo, fullreposet(repo), x)
1682 1684 subset = baseset([r for r in s if r in subset])
1683 1685 cs = _children(repo, subset, s)
1684 1686 return subset - cs
1685 1687
1686 1688 def secret(repo, subset, x):
1687 1689 """``secret()``
1688 1690 Changeset in secret phase."""
1689 1691 # i18n: "secret" is a keyword
1690 1692 getargs(x, 0, 0, _("secret takes no arguments"))
1691 1693 phase = repo._phasecache.phase
1692 1694 target = phases.secret
1693 1695 condition = lambda r: phase(repo, r) == target
1694 1696 return subset.filter(condition, cache=False)
1695 1697
1696 1698 def sort(repo, subset, x):
1697 1699 """``sort(set[, [-]key...])``
1698 1700 Sort set by keys. The default sort order is ascending, specify a key
1699 1701 as ``-key`` to sort in descending order.
1700 1702
1701 1703 The keys can be:
1702 1704
1703 1705 - ``rev`` for the revision number,
1704 1706 - ``branch`` for the branch name,
1705 1707 - ``desc`` for the commit message (description),
1706 1708 - ``user`` for user name (``author`` can be used as an alias),
1707 1709 - ``date`` for the commit date
1708 1710 """
1709 1711 # i18n: "sort" is a keyword
1710 1712 l = getargs(x, 1, 2, _("sort requires one or two arguments"))
1711 1713 keys = "rev"
1712 1714 if len(l) == 2:
1713 1715 # i18n: "sort" is a keyword
1714 1716 keys = getstring(l[1], _("sort spec must be a string"))
1715 1717
1716 1718 s = l[0]
1717 1719 keys = keys.split()
1718 1720 l = []
1719 1721 def invert(s):
1720 1722 return "".join(chr(255 - ord(c)) for c in s)
1721 1723 revs = getset(repo, subset, s)
1722 1724 if keys == ["rev"]:
1723 1725 revs.sort()
1724 1726 return revs
1725 1727 elif keys == ["-rev"]:
1726 1728 revs.sort(reverse=True)
1727 1729 return revs
1728 1730 for r in revs:
1729 1731 c = repo[r]
1730 1732 e = []
1731 1733 for k in keys:
1732 1734 if k == 'rev':
1733 1735 e.append(r)
1734 1736 elif k == '-rev':
1735 1737 e.append(-r)
1736 1738 elif k == 'branch':
1737 1739 e.append(c.branch())
1738 1740 elif k == '-branch':
1739 1741 e.append(invert(c.branch()))
1740 1742 elif k == 'desc':
1741 1743 e.append(c.description())
1742 1744 elif k == '-desc':
1743 1745 e.append(invert(c.description()))
1744 1746 elif k in 'user author':
1745 1747 e.append(c.user())
1746 1748 elif k in '-user -author':
1747 1749 e.append(invert(c.user()))
1748 1750 elif k == 'date':
1749 1751 e.append(c.date()[0])
1750 1752 elif k == '-date':
1751 1753 e.append(-c.date()[0])
1752 1754 else:
1753 1755 raise error.ParseError(_("unknown sort key %r") % k)
1754 1756 e.append(r)
1755 1757 l.append(e)
1756 1758 l.sort()
1757 1759 return baseset([e[-1] for e in l])
1758 1760
1759 1761 def subrepo(repo, subset, x):
1760 1762 """``subrepo([pattern])``
1761 1763 Changesets that add, modify or remove the given subrepo. If no subrepo
1762 1764 pattern is named, any subrepo changes are returned.
1763 1765 """
1764 1766 # i18n: "subrepo" is a keyword
1765 1767 args = getargs(x, 0, 1, _('subrepo takes at most one argument'))
1766 1768 if len(args) != 0:
1767 1769 pat = getstring(args[0], _("subrepo requires a pattern"))
1768 1770
1769 1771 m = matchmod.exact(repo.root, repo.root, ['.hgsubstate'])
1770 1772
1771 1773 def submatches(names):
1772 1774 k, p, m = _stringmatcher(pat)
1773 1775 for name in names:
1774 1776 if m(name):
1775 1777 yield name
1776 1778
1777 1779 def matches(x):
1778 1780 c = repo[x]
1779 1781 s = repo.status(c.p1().node(), c.node(), match=m)
1780 1782
1781 1783 if len(args) == 0:
1782 1784 return s.added or s.modified or s.removed
1783 1785
1784 1786 if s.added:
1785 1787 return util.any(submatches(c.substate.keys()))
1786 1788
1787 1789 if s.modified:
1788 1790 subs = set(c.p1().substate.keys())
1789 1791 subs.update(c.substate.keys())
1790 1792
1791 1793 for path in submatches(subs):
1792 1794 if c.p1().substate.get(path) != c.substate.get(path):
1793 1795 return True
1794 1796
1795 1797 if s.removed:
1796 1798 return util.any(submatches(c.p1().substate.keys()))
1797 1799
1798 1800 return False
1799 1801
1800 1802 return subset.filter(matches)
1801 1803
1802 1804 def _stringmatcher(pattern):
1803 1805 """
1804 1806 accepts a string, possibly starting with 're:' or 'literal:' prefix.
1805 1807 returns the matcher name, pattern, and matcher function.
1806 1808 missing or unknown prefixes are treated as literal matches.
1807 1809
1808 1810 helper for tests:
1809 1811 >>> def test(pattern, *tests):
1810 1812 ... kind, pattern, matcher = _stringmatcher(pattern)
1811 1813 ... return (kind, pattern, [bool(matcher(t)) for t in tests])
1812 1814
1813 1815 exact matching (no prefix):
1814 1816 >>> test('abcdefg', 'abc', 'def', 'abcdefg')
1815 1817 ('literal', 'abcdefg', [False, False, True])
1816 1818
1817 1819 regex matching ('re:' prefix)
1818 1820 >>> test('re:a.+b', 'nomatch', 'fooadef', 'fooadefbar')
1819 1821 ('re', 'a.+b', [False, False, True])
1820 1822
1821 1823 force exact matches ('literal:' prefix)
1822 1824 >>> test('literal:re:foobar', 'foobar', 're:foobar')
1823 1825 ('literal', 're:foobar', [False, True])
1824 1826
1825 1827 unknown prefixes are ignored and treated as literals
1826 1828 >>> test('foo:bar', 'foo', 'bar', 'foo:bar')
1827 1829 ('literal', 'foo:bar', [False, False, True])
1828 1830 """
1829 1831 if pattern.startswith('re:'):
1830 1832 pattern = pattern[3:]
1831 1833 try:
1832 1834 regex = re.compile(pattern)
1833 1835 except re.error, e:
1834 1836 raise error.ParseError(_('invalid regular expression: %s')
1835 1837 % e)
1836 1838 return 're', pattern, regex.search
1837 1839 elif pattern.startswith('literal:'):
1838 1840 pattern = pattern[8:]
1839 1841 return 'literal', pattern, pattern.__eq__
1840 1842
1841 1843 def _substringmatcher(pattern):
1842 1844 kind, pattern, matcher = _stringmatcher(pattern)
1843 1845 if kind == 'literal':
1844 1846 matcher = lambda s: pattern in s
1845 1847 return kind, pattern, matcher
1846 1848
1847 1849 def tag(repo, subset, x):
1848 1850 """``tag([name])``
1849 1851 The specified tag by name, or all tagged revisions if no name is given.
1850 1852
1851 1853 If `name` starts with `re:`, the remainder of the name is treated as
1852 1854 a regular expression. To match a tag that actually starts with `re:`,
1853 1855 use the prefix `literal:`.
1854 1856 """
1855 1857 # i18n: "tag" is a keyword
1856 1858 args = getargs(x, 0, 1, _("tag takes one or no arguments"))
1857 1859 cl = repo.changelog
1858 1860 if args:
1859 1861 pattern = getstring(args[0],
1860 1862 # i18n: "tag" is a keyword
1861 1863 _('the argument to tag must be a string'))
1862 1864 kind, pattern, matcher = _stringmatcher(pattern)
1863 1865 if kind == 'literal':
1864 1866 # avoid resolving all tags
1865 1867 tn = repo._tagscache.tags.get(pattern, None)
1866 1868 if tn is None:
1867 1869 raise error.RepoLookupError(_("tag '%s' does not exist")
1868 1870 % pattern)
1869 1871 s = set([repo[tn].rev()])
1870 1872 else:
1871 1873 s = set([cl.rev(n) for t, n in repo.tagslist() if matcher(t)])
1872 1874 else:
1873 1875 s = set([cl.rev(n) for t, n in repo.tagslist() if t != 'tip'])
1874 1876 return subset & s
1875 1877
1876 1878 def tagged(repo, subset, x):
1877 1879 return tag(repo, subset, x)
1878 1880
1879 1881 def unstable(repo, subset, x):
1880 1882 """``unstable()``
1881 1883 Non-obsolete changesets with obsolete ancestors.
1882 1884 """
1883 1885 # i18n: "unstable" is a keyword
1884 1886 getargs(x, 0, 0, _("unstable takes no arguments"))
1885 1887 unstables = obsmod.getrevs(repo, 'unstable')
1886 1888 return subset & unstables
1887 1889
1888 1890
1889 1891 def user(repo, subset, x):
1890 1892 """``user(string)``
1891 1893 User name contains string. The match is case-insensitive.
1892 1894
1893 1895 If `string` starts with `re:`, the remainder of the string is treated as
1894 1896 a regular expression. To match a user that actually contains `re:`, use
1895 1897 the prefix `literal:`.
1896 1898 """
1897 1899 return author(repo, subset, x)
1898 1900
1899 1901 def wdir(repo, subset, x):
1900 1902 """``wdir()``
1901 1903 Working directory.
1902 1904 """
1903 1905 # i18n: "wdir" is a keyword
1904 1906 getargs(x, 0, 0, _("wdir takes no arguments"))
1905 1907 if None in subset:
1906 1908 return baseset([None])
1907 1909 return baseset()
1908 1910
1909 1911 # for internal use
1910 1912 def _list(repo, subset, x):
1911 1913 s = getstring(x, "internal error")
1912 1914 if not s:
1913 1915 return baseset()
1914 1916 ls = [repo[r].rev() for r in s.split('\0')]
1915 1917 s = subset
1916 1918 return baseset([r for r in ls if r in s])
1917 1919
1918 1920 # for internal use
1919 1921 def _intlist(repo, subset, x):
1920 1922 s = getstring(x, "internal error")
1921 1923 if not s:
1922 1924 return baseset()
1923 1925 ls = [int(r) for r in s.split('\0')]
1924 1926 s = subset
1925 1927 return baseset([r for r in ls if r in s])
1926 1928
1927 1929 # for internal use
1928 1930 def _hexlist(repo, subset, x):
1929 1931 s = getstring(x, "internal error")
1930 1932 if not s:
1931 1933 return baseset()
1932 1934 cl = repo.changelog
1933 1935 ls = [cl.rev(node.bin(r)) for r in s.split('\0')]
1934 1936 s = subset
1935 1937 return baseset([r for r in ls if r in s])
1936 1938
1937 1939 symbols = {
1938 1940 "adds": adds,
1939 1941 "all": getall,
1940 1942 "ancestor": ancestor,
1941 1943 "ancestors": ancestors,
1942 1944 "_firstancestors": _firstancestors,
1943 1945 "author": author,
1944 1946 "bisect": bisect,
1945 1947 "bisected": bisected,
1946 1948 "bookmark": bookmark,
1947 1949 "branch": branch,
1948 1950 "branchpoint": branchpoint,
1949 1951 "bumped": bumped,
1950 1952 "bundle": bundle,
1951 1953 "children": children,
1952 1954 "closed": closed,
1953 1955 "contains": contains,
1954 1956 "converted": converted,
1955 1957 "date": date,
1956 1958 "desc": desc,
1957 1959 "descendants": descendants,
1958 1960 "_firstdescendants": _firstdescendants,
1959 1961 "destination": destination,
1960 1962 "divergent": divergent,
1961 1963 "draft": draft,
1962 1964 "extinct": extinct,
1963 1965 "extra": extra,
1964 1966 "file": hasfile,
1965 1967 "filelog": filelog,
1966 1968 "first": first,
1967 1969 "follow": follow,
1968 1970 "_followfirst": _followfirst,
1969 1971 "grep": grep,
1970 1972 "head": head,
1971 1973 "heads": heads,
1972 1974 "hidden": hidden,
1973 1975 "id": node_,
1974 1976 "keyword": keyword,
1975 1977 "last": last,
1976 1978 "limit": limit,
1977 1979 "_matchfiles": _matchfiles,
1978 1980 "max": maxrev,
1979 1981 "merge": merge,
1980 1982 "min": minrev,
1981 1983 "modifies": modifies,
1982 1984 "named": named,
1983 1985 "obsolete": obsolete,
1984 1986 "only": only,
1985 1987 "origin": origin,
1986 1988 "outgoing": outgoing,
1987 1989 "p1": p1,
1988 1990 "p2": p2,
1989 1991 "parents": parents,
1990 1992 "present": present,
1991 1993 "public": public,
1992 1994 "remote": remote,
1993 1995 "removes": removes,
1994 1996 "rev": rev,
1995 1997 "reverse": reverse,
1996 1998 "roots": roots,
1997 1999 "sort": sort,
1998 2000 "secret": secret,
1999 2001 "subrepo": subrepo,
2000 2002 "matching": matching,
2001 2003 "tag": tag,
2002 2004 "tagged": tagged,
2003 2005 "user": user,
2004 2006 "unstable": unstable,
2005 2007 "wdir": wdir,
2006 2008 "_list": _list,
2007 2009 "_intlist": _intlist,
2008 2010 "_hexlist": _hexlist,
2009 2011 }
2010 2012
2011 2013 # symbols which can't be used for a DoS attack for any given input
2012 2014 # (e.g. those which accept regexes as plain strings shouldn't be included)
2013 2015 # functions that just return a lot of changesets (like all) don't count here
2014 2016 safesymbols = set([
2015 2017 "adds",
2016 2018 "all",
2017 2019 "ancestor",
2018 2020 "ancestors",
2019 2021 "_firstancestors",
2020 2022 "author",
2021 2023 "bisect",
2022 2024 "bisected",
2023 2025 "bookmark",
2024 2026 "branch",
2025 2027 "branchpoint",
2026 2028 "bumped",
2027 2029 "bundle",
2028 2030 "children",
2029 2031 "closed",
2030 2032 "converted",
2031 2033 "date",
2032 2034 "desc",
2033 2035 "descendants",
2034 2036 "_firstdescendants",
2035 2037 "destination",
2036 2038 "divergent",
2037 2039 "draft",
2038 2040 "extinct",
2039 2041 "extra",
2040 2042 "file",
2041 2043 "filelog",
2042 2044 "first",
2043 2045 "follow",
2044 2046 "_followfirst",
2045 2047 "head",
2046 2048 "heads",
2047 2049 "hidden",
2048 2050 "id",
2049 2051 "keyword",
2050 2052 "last",
2051 2053 "limit",
2052 2054 "_matchfiles",
2053 2055 "max",
2054 2056 "merge",
2055 2057 "min",
2056 2058 "modifies",
2057 2059 "obsolete",
2058 2060 "only",
2059 2061 "origin",
2060 2062 "outgoing",
2061 2063 "p1",
2062 2064 "p2",
2063 2065 "parents",
2064 2066 "present",
2065 2067 "public",
2066 2068 "remote",
2067 2069 "removes",
2068 2070 "rev",
2069 2071 "reverse",
2070 2072 "roots",
2071 2073 "sort",
2072 2074 "secret",
2073 2075 "matching",
2074 2076 "tag",
2075 2077 "tagged",
2076 2078 "user",
2077 2079 "unstable",
2078 2080 "wdir",
2079 2081 "_list",
2080 2082 "_intlist",
2081 2083 "_hexlist",
2082 2084 ])
2083 2085
2084 2086 methods = {
2085 2087 "range": rangeset,
2086 2088 "dagrange": dagrange,
2087 2089 "string": stringset,
2088 2090 "symbol": symbolset,
2089 2091 "and": andset,
2090 2092 "or": orset,
2091 2093 "not": notset,
2092 2094 "list": listset,
2093 2095 "func": func,
2094 2096 "ancestor": ancestorspec,
2095 2097 "parent": parentspec,
2096 2098 "parentpost": p1,
2097 2099 "only": only,
2098 2100 "onlypost": only,
2099 2101 }
2100 2102
2101 2103 def optimize(x, small):
2102 2104 if x is None:
2103 2105 return 0, x
2104 2106
2105 2107 smallbonus = 1
2106 2108 if small:
2107 2109 smallbonus = .5
2108 2110
2109 2111 op = x[0]
2110 2112 if op == 'minus':
2111 2113 return optimize(('and', x[1], ('not', x[2])), small)
2112 2114 elif op == 'only':
2113 2115 return optimize(('func', ('symbol', 'only'),
2114 2116 ('list', x[1], x[2])), small)
2115 2117 elif op == 'dagrangepre':
2116 2118 return optimize(('func', ('symbol', 'ancestors'), x[1]), small)
2117 2119 elif op == 'dagrangepost':
2118 2120 return optimize(('func', ('symbol', 'descendants'), x[1]), small)
2119 2121 elif op == 'rangepre':
2120 2122 return optimize(('range', ('string', '0'), x[1]), small)
2121 2123 elif op == 'rangepost':
2122 2124 return optimize(('range', x[1], ('string', 'tip')), small)
2123 2125 elif op == 'negate':
2124 2126 return optimize(('string',
2125 2127 '-' + getstring(x[1], _("can't negate that"))), small)
2126 2128 elif op in 'string symbol negate':
2127 2129 return smallbonus, x # single revisions are small
2128 2130 elif op == 'and':
2129 2131 wa, ta = optimize(x[1], True)
2130 2132 wb, tb = optimize(x[2], True)
2131 2133
2132 2134 # (::x and not ::y)/(not ::y and ::x) have a fast path
2133 2135 def isonly(revs, bases):
2134 2136 return (
2135 2137 revs[0] == 'func'
2136 2138 and getstring(revs[1], _('not a symbol')) == 'ancestors'
2137 2139 and bases[0] == 'not'
2138 2140 and bases[1][0] == 'func'
2139 2141 and getstring(bases[1][1], _('not a symbol')) == 'ancestors')
2140 2142
2141 2143 w = min(wa, wb)
2142 2144 if isonly(ta, tb):
2143 2145 return w, ('func', ('symbol', 'only'), ('list', ta[2], tb[1][2]))
2144 2146 if isonly(tb, ta):
2145 2147 return w, ('func', ('symbol', 'only'), ('list', tb[2], ta[1][2]))
2146 2148
2147 2149 if wa > wb:
2148 2150 return w, (op, tb, ta)
2149 2151 return w, (op, ta, tb)
2150 2152 elif op == 'or':
2151 2153 wa, ta = optimize(x[1], False)
2152 2154 wb, tb = optimize(x[2], False)
2153 2155 if wb < wa:
2154 2156 wb, wa = wa, wb
2155 2157 return max(wa, wb), (op, ta, tb)
2156 2158 elif op == 'not':
2157 2159 o = optimize(x[1], not small)
2158 2160 return o[0], (op, o[1])
2159 2161 elif op == 'parentpost':
2160 2162 o = optimize(x[1], small)
2161 2163 return o[0], (op, o[1])
2162 2164 elif op == 'group':
2163 2165 return optimize(x[1], small)
2164 2166 elif op in 'dagrange range list parent ancestorspec':
2165 2167 if op == 'parent':
2166 2168 # x^:y means (x^) : y, not x ^ (:y)
2167 2169 post = ('parentpost', x[1])
2168 2170 if x[2][0] == 'dagrangepre':
2169 2171 return optimize(('dagrange', post, x[2][1]), small)
2170 2172 elif x[2][0] == 'rangepre':
2171 2173 return optimize(('range', post, x[2][1]), small)
2172 2174
2173 2175 wa, ta = optimize(x[1], small)
2174 2176 wb, tb = optimize(x[2], small)
2175 2177 return wa + wb, (op, ta, tb)
2176 2178 elif op == 'func':
2177 2179 f = getstring(x[1], _("not a symbol"))
2178 2180 wa, ta = optimize(x[2], small)
2179 2181 if f in ("author branch closed date desc file grep keyword "
2180 2182 "outgoing user"):
2181 2183 w = 10 # slow
2182 2184 elif f in "modifies adds removes":
2183 2185 w = 30 # slower
2184 2186 elif f == "contains":
2185 2187 w = 100 # very slow
2186 2188 elif f == "ancestor":
2187 2189 w = 1 * smallbonus
2188 2190 elif f in "reverse limit first _intlist":
2189 2191 w = 0
2190 2192 elif f in "sort":
2191 2193 w = 10 # assume most sorts look at changelog
2192 2194 else:
2193 2195 w = 1
2194 2196 return w + wa, (op, x[1], ta)
2195 2197 return 1, x
2196 2198
2197 2199 _aliasarg = ('func', ('symbol', '_aliasarg'))
2198 2200 def _getaliasarg(tree):
2199 2201 """If tree matches ('func', ('symbol', '_aliasarg'), ('string', X))
2200 2202 return X, None otherwise.
2201 2203 """
2202 2204 if (len(tree) == 3 and tree[:2] == _aliasarg
2203 2205 and tree[2][0] == 'string'):
2204 2206 return tree[2][1]
2205 2207 return None
2206 2208
2207 2209 def _checkaliasarg(tree, known=None):
2208 2210 """Check tree contains no _aliasarg construct or only ones which
2209 2211 value is in known. Used to avoid alias placeholders injection.
2210 2212 """
2211 2213 if isinstance(tree, tuple):
2212 2214 arg = _getaliasarg(tree)
2213 2215 if arg is not None and (not known or arg not in known):
2214 2216 raise error.UnknownIdentifier('_aliasarg', [])
2215 2217 for t in tree:
2216 2218 _checkaliasarg(t, known)
2217 2219
2218 2220 # the set of valid characters for the initial letter of symbols in
2219 2221 # alias declarations and definitions
2220 2222 _aliassyminitletters = set(c for c in [chr(i) for i in xrange(256)]
2221 2223 if c.isalnum() or c in '._@$' or ord(c) > 127)
2222 2224
2223 2225 def _tokenizealias(program, lookup=None):
2224 2226 """Parse alias declaration/definition into a stream of tokens
2225 2227
2226 2228 This allows symbol names to use also ``$`` as an initial letter
2227 2229 (for backward compatibility), and callers of this function should
2228 2230 examine whether ``$`` is used also for unexpected symbols or not.
2229 2231 """
2230 2232 return tokenize(program, lookup=lookup,
2231 2233 syminitletters=_aliassyminitletters)
2232 2234
2233 2235 def _parsealiasdecl(decl):
2234 2236 """Parse alias declaration ``decl``
2235 2237
2236 2238 This returns ``(name, tree, args, errorstr)`` tuple:
2237 2239
2238 2240 - ``name``: of declared alias (may be ``decl`` itself at error)
2239 2241 - ``tree``: parse result (or ``None`` at error)
2240 2242 - ``args``: list of alias argument names (or None for symbol declaration)
2241 2243 - ``errorstr``: detail about detected error (or None)
2242 2244
2243 2245 >>> _parsealiasdecl('foo')
2244 2246 ('foo', ('symbol', 'foo'), None, None)
2245 2247 >>> _parsealiasdecl('$foo')
2246 2248 ('$foo', None, None, "'$' not for alias arguments")
2247 2249 >>> _parsealiasdecl('foo::bar')
2248 2250 ('foo::bar', None, None, 'invalid format')
2249 2251 >>> _parsealiasdecl('foo bar')
2250 2252 ('foo bar', None, None, 'at 4: invalid token')
2251 2253 >>> _parsealiasdecl('foo()')
2252 2254 ('foo', ('func', ('symbol', 'foo')), [], None)
2253 2255 >>> _parsealiasdecl('$foo()')
2254 2256 ('$foo()', None, None, "'$' not for alias arguments")
2255 2257 >>> _parsealiasdecl('foo($1, $2)')
2256 2258 ('foo', ('func', ('symbol', 'foo')), ['$1', '$2'], None)
2257 2259 >>> _parsealiasdecl('foo(bar_bar, baz.baz)')
2258 2260 ('foo', ('func', ('symbol', 'foo')), ['bar_bar', 'baz.baz'], None)
2259 2261 >>> _parsealiasdecl('foo($1, $2, nested($1, $2))')
2260 2262 ('foo($1, $2, nested($1, $2))', None, None, 'invalid argument list')
2261 2263 >>> _parsealiasdecl('foo(bar($1, $2))')
2262 2264 ('foo(bar($1, $2))', None, None, 'invalid argument list')
2263 2265 >>> _parsealiasdecl('foo("string")')
2264 2266 ('foo("string")', None, None, 'invalid argument list')
2265 2267 >>> _parsealiasdecl('foo($1, $2')
2266 2268 ('foo($1, $2', None, None, 'at 10: unexpected token: end')
2267 2269 >>> _parsealiasdecl('foo("string')
2268 2270 ('foo("string', None, None, 'at 5: unterminated string')
2269 2271 >>> _parsealiasdecl('foo($1, $2, $1)')
2270 2272 ('foo', None, None, 'argument names collide with each other')
2271 2273 """
2272 2274 p = parser.parser(_tokenizealias, elements)
2273 2275 try:
2274 2276 tree, pos = p.parse(decl)
2275 2277 if (pos != len(decl)):
2276 2278 raise error.ParseError(_('invalid token'), pos)
2277 2279
2278 2280 if isvalidsymbol(tree):
2279 2281 # "name = ...." style
2280 2282 name = getsymbol(tree)
2281 2283 if name.startswith('$'):
2282 2284 return (decl, None, None, _("'$' not for alias arguments"))
2283 2285 return (name, ('symbol', name), None, None)
2284 2286
2285 2287 if isvalidfunc(tree):
2286 2288 # "name(arg, ....) = ...." style
2287 2289 name = getfuncname(tree)
2288 2290 if name.startswith('$'):
2289 2291 return (decl, None, None, _("'$' not for alias arguments"))
2290 2292 args = []
2291 2293 for arg in getfuncargs(tree):
2292 2294 if not isvalidsymbol(arg):
2293 2295 return (decl, None, None, _("invalid argument list"))
2294 2296 args.append(getsymbol(arg))
2295 2297 if len(args) != len(set(args)):
2296 2298 return (name, None, None,
2297 2299 _("argument names collide with each other"))
2298 2300 return (name, ('func', ('symbol', name)), args, None)
2299 2301
2300 2302 return (decl, None, None, _("invalid format"))
2301 2303 except error.ParseError, inst:
2302 2304 return (decl, None, None, parseerrordetail(inst))
2303 2305
2304 2306 def _parsealiasdefn(defn, args):
2305 2307 """Parse alias definition ``defn``
2306 2308
2307 2309 This function also replaces alias argument references in the
2308 2310 specified definition by ``_aliasarg(ARGNAME)``.
2309 2311
2310 2312 ``args`` is a list of alias argument names, or None if the alias
2311 2313 is declared as a symbol.
2312 2314
2313 2315 This returns "tree" as parsing result.
2314 2316
2315 2317 >>> args = ['$1', '$2', 'foo']
2316 2318 >>> print prettyformat(_parsealiasdefn('$1 or foo', args))
2317 2319 (or
2318 2320 (func
2319 2321 ('symbol', '_aliasarg')
2320 2322 ('string', '$1'))
2321 2323 (func
2322 2324 ('symbol', '_aliasarg')
2323 2325 ('string', 'foo')))
2324 2326 >>> try:
2325 2327 ... _parsealiasdefn('$1 or $bar', args)
2326 2328 ... except error.ParseError, inst:
2327 2329 ... print parseerrordetail(inst)
2328 2330 at 6: '$' not for alias arguments
2329 2331 >>> args = ['$1', '$10', 'foo']
2330 2332 >>> print prettyformat(_parsealiasdefn('$10 or foobar', args))
2331 2333 (or
2332 2334 (func
2333 2335 ('symbol', '_aliasarg')
2334 2336 ('string', '$10'))
2335 2337 ('symbol', 'foobar'))
2336 2338 >>> print prettyformat(_parsealiasdefn('"$1" or "foo"', args))
2337 2339 (or
2338 2340 ('string', '$1')
2339 2341 ('string', 'foo'))
2340 2342 """
2341 2343 def tokenizedefn(program, lookup=None):
2342 2344 if args:
2343 2345 argset = set(args)
2344 2346 else:
2345 2347 argset = set()
2346 2348
2347 2349 for t, value, pos in _tokenizealias(program, lookup=lookup):
2348 2350 if t == 'symbol':
2349 2351 if value in argset:
2350 2352 # emulate tokenization of "_aliasarg('ARGNAME')":
2351 2353 # "_aliasarg()" is an unknown symbol only used separate
2352 2354 # alias argument placeholders from regular strings.
2353 2355 yield ('symbol', '_aliasarg', pos)
2354 2356 yield ('(', None, pos)
2355 2357 yield ('string', value, pos)
2356 2358 yield (')', None, pos)
2357 2359 continue
2358 2360 elif value.startswith('$'):
2359 2361 raise error.ParseError(_("'$' not for alias arguments"),
2360 2362 pos)
2361 2363 yield (t, value, pos)
2362 2364
2363 2365 p = parser.parser(tokenizedefn, elements)
2364 2366 tree, pos = p.parse(defn)
2365 2367 if pos != len(defn):
2366 2368 raise error.ParseError(_('invalid token'), pos)
2367 2369 return tree
2368 2370
2369 2371 class revsetalias(object):
2370 2372 # whether own `error` information is already shown or not.
2371 2373 # this avoids showing same warning multiple times at each `findaliases`.
2372 2374 warned = False
2373 2375
2374 2376 def __init__(self, name, value):
2375 2377 '''Aliases like:
2376 2378
2377 2379 h = heads(default)
2378 2380 b($1) = ancestors($1) - ancestors(default)
2379 2381 '''
2380 2382 self.name, self.tree, self.args, self.error = _parsealiasdecl(name)
2381 2383 if self.error:
2382 2384 self.error = _('failed to parse the declaration of revset alias'
2383 2385 ' "%s": %s') % (self.name, self.error)
2384 2386 return
2385 2387
2386 2388 try:
2387 2389 self.replacement = _parsealiasdefn(value, self.args)
2388 2390 # Check for placeholder injection
2389 2391 _checkaliasarg(self.replacement, self.args)
2390 2392 except error.ParseError, inst:
2391 2393 self.error = _('failed to parse the definition of revset alias'
2392 2394 ' "%s": %s') % (self.name, parseerrordetail(inst))
2393 2395
2394 2396 def _getalias(aliases, tree):
2395 2397 """If tree looks like an unexpanded alias, return it. Return None
2396 2398 otherwise.
2397 2399 """
2398 2400 if isinstance(tree, tuple) and tree:
2399 2401 if tree[0] == 'symbol' and len(tree) == 2:
2400 2402 name = tree[1]
2401 2403 alias = aliases.get(name)
2402 2404 if alias and alias.args is None and alias.tree == tree:
2403 2405 return alias
2404 2406 if tree[0] == 'func' and len(tree) > 1:
2405 2407 if tree[1][0] == 'symbol' and len(tree[1]) == 2:
2406 2408 name = tree[1][1]
2407 2409 alias = aliases.get(name)
2408 2410 if alias and alias.args is not None and alias.tree == tree[:2]:
2409 2411 return alias
2410 2412 return None
2411 2413
2412 2414 def _expandargs(tree, args):
2413 2415 """Replace _aliasarg instances with the substitution value of the
2414 2416 same name in args, recursively.
2415 2417 """
2416 2418 if not tree or not isinstance(tree, tuple):
2417 2419 return tree
2418 2420 arg = _getaliasarg(tree)
2419 2421 if arg is not None:
2420 2422 return args[arg]
2421 2423 return tuple(_expandargs(t, args) for t in tree)
2422 2424
2423 2425 def _expandaliases(aliases, tree, expanding, cache):
2424 2426 """Expand aliases in tree, recursively.
2425 2427
2426 2428 'aliases' is a dictionary mapping user defined aliases to
2427 2429 revsetalias objects.
2428 2430 """
2429 2431 if not isinstance(tree, tuple):
2430 2432 # Do not expand raw strings
2431 2433 return tree
2432 2434 alias = _getalias(aliases, tree)
2433 2435 if alias is not None:
2434 2436 if alias.error:
2435 2437 raise util.Abort(alias.error)
2436 2438 if alias in expanding:
2437 2439 raise error.ParseError(_('infinite expansion of revset alias "%s" '
2438 2440 'detected') % alias.name)
2439 2441 expanding.append(alias)
2440 2442 if alias.name not in cache:
2441 2443 cache[alias.name] = _expandaliases(aliases, alias.replacement,
2442 2444 expanding, cache)
2443 2445 result = cache[alias.name]
2444 2446 expanding.pop()
2445 2447 if alias.args is not None:
2446 2448 l = getlist(tree[2])
2447 2449 if len(l) != len(alias.args):
2448 2450 raise error.ParseError(
2449 2451 _('invalid number of arguments: %s') % len(l))
2450 2452 l = [_expandaliases(aliases, a, [], cache) for a in l]
2451 2453 result = _expandargs(result, dict(zip(alias.args, l)))
2452 2454 else:
2453 2455 result = tuple(_expandaliases(aliases, t, expanding, cache)
2454 2456 for t in tree)
2455 2457 return result
2456 2458
2457 2459 def findaliases(ui, tree, showwarning=None):
2458 2460 _checkaliasarg(tree)
2459 2461 aliases = {}
2460 2462 for k, v in ui.configitems('revsetalias'):
2461 2463 alias = revsetalias(k, v)
2462 2464 aliases[alias.name] = alias
2463 2465 tree = _expandaliases(aliases, tree, [], {})
2464 2466 if showwarning:
2465 2467 # warn about problematic (but not referred) aliases
2466 2468 for name, alias in sorted(aliases.iteritems()):
2467 2469 if alias.error and not alias.warned:
2468 2470 showwarning(_('warning: %s\n') % (alias.error))
2469 2471 alias.warned = True
2470 2472 return tree
2471 2473
2472 2474 def foldconcat(tree):
2473 2475 """Fold elements to be concatenated by `##`
2474 2476 """
2475 2477 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2476 2478 return tree
2477 2479 if tree[0] == '_concat':
2478 2480 pending = [tree]
2479 2481 l = []
2480 2482 while pending:
2481 2483 e = pending.pop()
2482 2484 if e[0] == '_concat':
2483 2485 pending.extend(reversed(e[1:]))
2484 2486 elif e[0] in ('string', 'symbol'):
2485 2487 l.append(e[1])
2486 2488 else:
2487 2489 msg = _("\"##\" can't concatenate \"%s\" element") % (e[0])
2488 2490 raise error.ParseError(msg)
2489 2491 return ('string', ''.join(l))
2490 2492 else:
2491 2493 return tuple(foldconcat(t) for t in tree)
2492 2494
2493 2495 def parse(spec, lookup=None):
2494 2496 p = parser.parser(tokenize, elements)
2495 2497 return p.parse(spec, lookup=lookup)
2496 2498
2497 2499 def posttreebuilthook(tree, repo):
2498 2500 # hook for extensions to execute code on the optimized tree
2499 2501 pass
2500 2502
2501 2503 def match(ui, spec, repo=None):
2502 2504 if not spec:
2503 2505 raise error.ParseError(_("empty query"))
2504 2506 lookup = None
2505 2507 if repo:
2506 2508 lookup = repo.__contains__
2507 2509 tree, pos = parse(spec, lookup)
2508 2510 if (pos != len(spec)):
2509 2511 raise error.ParseError(_("invalid token"), pos)
2510 2512 if ui:
2511 2513 tree = findaliases(ui, tree, showwarning=ui.warn)
2512 2514 tree = foldconcat(tree)
2513 2515 weight, tree = optimize(tree, True)
2514 2516 posttreebuilthook(tree, repo)
2515 2517 def mfunc(repo, subset=None):
2516 2518 if subset is None:
2517 2519 subset = fullreposet(repo)
2518 2520 if util.safehasattr(subset, 'isascending'):
2519 2521 result = getset(repo, subset, tree)
2520 2522 else:
2521 2523 result = getset(repo, baseset(subset), tree)
2522 2524 return result
2523 2525 return mfunc
2524 2526
2525 2527 def formatspec(expr, *args):
2526 2528 '''
2527 2529 This is a convenience function for using revsets internally, and
2528 2530 escapes arguments appropriately. Aliases are intentionally ignored
2529 2531 so that intended expression behavior isn't accidentally subverted.
2530 2532
2531 2533 Supported arguments:
2532 2534
2533 2535 %r = revset expression, parenthesized
2534 2536 %d = int(arg), no quoting
2535 2537 %s = string(arg), escaped and single-quoted
2536 2538 %b = arg.branch(), escaped and single-quoted
2537 2539 %n = hex(arg), single-quoted
2538 2540 %% = a literal '%'
2539 2541
2540 2542 Prefixing the type with 'l' specifies a parenthesized list of that type.
2541 2543
2542 2544 >>> formatspec('%r:: and %lr', '10 or 11', ("this()", "that()"))
2543 2545 '(10 or 11):: and ((this()) or (that()))'
2544 2546 >>> formatspec('%d:: and not %d::', 10, 20)
2545 2547 '10:: and not 20::'
2546 2548 >>> formatspec('%ld or %ld', [], [1])
2547 2549 "_list('') or 1"
2548 2550 >>> formatspec('keyword(%s)', 'foo\\xe9')
2549 2551 "keyword('foo\\\\xe9')"
2550 2552 >>> b = lambda: 'default'
2551 2553 >>> b.branch = b
2552 2554 >>> formatspec('branch(%b)', b)
2553 2555 "branch('default')"
2554 2556 >>> formatspec('root(%ls)', ['a', 'b', 'c', 'd'])
2555 2557 "root(_list('a\\x00b\\x00c\\x00d'))"
2556 2558 '''
2557 2559
2558 2560 def quote(s):
2559 2561 return repr(str(s))
2560 2562
2561 2563 def argtype(c, arg):
2562 2564 if c == 'd':
2563 2565 return str(int(arg))
2564 2566 elif c == 's':
2565 2567 return quote(arg)
2566 2568 elif c == 'r':
2567 2569 parse(arg) # make sure syntax errors are confined
2568 2570 return '(%s)' % arg
2569 2571 elif c == 'n':
2570 2572 return quote(node.hex(arg))
2571 2573 elif c == 'b':
2572 2574 return quote(arg.branch())
2573 2575
2574 2576 def listexp(s, t):
2575 2577 l = len(s)
2576 2578 if l == 0:
2577 2579 return "_list('')"
2578 2580 elif l == 1:
2579 2581 return argtype(t, s[0])
2580 2582 elif t == 'd':
2581 2583 return "_intlist('%s')" % "\0".join(str(int(a)) for a in s)
2582 2584 elif t == 's':
2583 2585 return "_list('%s')" % "\0".join(s)
2584 2586 elif t == 'n':
2585 2587 return "_hexlist('%s')" % "\0".join(node.hex(a) for a in s)
2586 2588 elif t == 'b':
2587 2589 return "_list('%s')" % "\0".join(a.branch() for a in s)
2588 2590
2589 2591 m = l // 2
2590 2592 return '(%s or %s)' % (listexp(s[:m], t), listexp(s[m:], t))
2591 2593
2592 2594 ret = ''
2593 2595 pos = 0
2594 2596 arg = 0
2595 2597 while pos < len(expr):
2596 2598 c = expr[pos]
2597 2599 if c == '%':
2598 2600 pos += 1
2599 2601 d = expr[pos]
2600 2602 if d == '%':
2601 2603 ret += d
2602 2604 elif d in 'dsnbr':
2603 2605 ret += argtype(d, args[arg])
2604 2606 arg += 1
2605 2607 elif d == 'l':
2606 2608 # a list of some type
2607 2609 pos += 1
2608 2610 d = expr[pos]
2609 2611 ret += listexp(list(args[arg]), d)
2610 2612 arg += 1
2611 2613 else:
2612 2614 raise util.Abort('unexpected revspec format character %s' % d)
2613 2615 else:
2614 2616 ret += c
2615 2617 pos += 1
2616 2618
2617 2619 return ret
2618 2620
2619 2621 def prettyformat(tree):
2620 2622 def _prettyformat(tree, level, lines):
2621 2623 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2622 2624 lines.append((level, str(tree)))
2623 2625 else:
2624 2626 lines.append((level, '(%s' % tree[0]))
2625 2627 for s in tree[1:]:
2626 2628 _prettyformat(s, level + 1, lines)
2627 2629 lines[-1:] = [(lines[-1][0], lines[-1][1] + ')')]
2628 2630
2629 2631 lines = []
2630 2632 _prettyformat(tree, 0, lines)
2631 2633 output = '\n'.join((' '*l + s) for l, s in lines)
2632 2634 return output
2633 2635
2634 2636 def depth(tree):
2635 2637 if isinstance(tree, tuple):
2636 2638 return max(map(depth, tree)) + 1
2637 2639 else:
2638 2640 return 0
2639 2641
2640 2642 def funcsused(tree):
2641 2643 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2642 2644 return set()
2643 2645 else:
2644 2646 funcs = set()
2645 2647 for s in tree[1:]:
2646 2648 funcs |= funcsused(s)
2647 2649 if tree[0] == 'func':
2648 2650 funcs.add(tree[1][1])
2649 2651 return funcs
2650 2652
2651 2653 class abstractsmartset(object):
2652 2654
2653 2655 def __nonzero__(self):
2654 2656 """True if the smartset is not empty"""
2655 2657 raise NotImplementedError()
2656 2658
2657 2659 def __contains__(self, rev):
2658 2660 """provide fast membership testing"""
2659 2661 raise NotImplementedError()
2660 2662
2661 2663 def __iter__(self):
2662 2664 """iterate the set in the order it is supposed to be iterated"""
2663 2665 raise NotImplementedError()
2664 2666
2665 2667 # Attributes containing a function to perform a fast iteration in a given
2666 2668 # direction. A smartset can have none, one, or both defined.
2667 2669 #
2668 2670 # Default value is None instead of a function returning None to avoid
2669 2671 # initializing an iterator just for testing if a fast method exists.
2670 2672 fastasc = None
2671 2673 fastdesc = None
2672 2674
2673 2675 def isascending(self):
2674 2676 """True if the set will iterate in ascending order"""
2675 2677 raise NotImplementedError()
2676 2678
2677 2679 def isdescending(self):
2678 2680 """True if the set will iterate in descending order"""
2679 2681 raise NotImplementedError()
2680 2682
2681 2683 def min(self):
2682 2684 """return the minimum element in the set"""
2683 2685 if self.fastasc is not None:
2684 2686 for r in self.fastasc():
2685 2687 return r
2686 2688 raise ValueError('arg is an empty sequence')
2687 2689 return min(self)
2688 2690
2689 2691 def max(self):
2690 2692 """return the maximum element in the set"""
2691 2693 if self.fastdesc is not None:
2692 2694 for r in self.fastdesc():
2693 2695 return r
2694 2696 raise ValueError('arg is an empty sequence')
2695 2697 return max(self)
2696 2698
2697 2699 def first(self):
2698 2700 """return the first element in the set (user iteration perspective)
2699 2701
2700 2702 Return None if the set is empty"""
2701 2703 raise NotImplementedError()
2702 2704
2703 2705 def last(self):
2704 2706 """return the last element in the set (user iteration perspective)
2705 2707
2706 2708 Return None if the set is empty"""
2707 2709 raise NotImplementedError()
2708 2710
2709 2711 def __len__(self):
2710 2712 """return the length of the smartsets
2711 2713
2712 2714 This can be expensive on smartset that could be lazy otherwise."""
2713 2715 raise NotImplementedError()
2714 2716
2715 2717 def reverse(self):
2716 2718 """reverse the expected iteration order"""
2717 2719 raise NotImplementedError()
2718 2720
2719 2721 def sort(self, reverse=True):
2720 2722 """get the set to iterate in an ascending or descending order"""
2721 2723 raise NotImplementedError()
2722 2724
2723 2725 def __and__(self, other):
2724 2726 """Returns a new object with the intersection of the two collections.
2725 2727
2726 2728 This is part of the mandatory API for smartset."""
2727 2729 if isinstance(other, fullreposet):
2728 2730 return self
2729 2731 return self.filter(other.__contains__, cache=False)
2730 2732
2731 2733 def __add__(self, other):
2732 2734 """Returns a new object with the union of the two collections.
2733 2735
2734 2736 This is part of the mandatory API for smartset."""
2735 2737 return addset(self, other)
2736 2738
2737 2739 def __sub__(self, other):
2738 2740 """Returns a new object with the substraction of the two collections.
2739 2741
2740 2742 This is part of the mandatory API for smartset."""
2741 2743 c = other.__contains__
2742 2744 return self.filter(lambda r: not c(r), cache=False)
2743 2745
2744 2746 def filter(self, condition, cache=True):
2745 2747 """Returns this smartset filtered by condition as a new smartset.
2746 2748
2747 2749 `condition` is a callable which takes a revision number and returns a
2748 2750 boolean.
2749 2751
2750 2752 This is part of the mandatory API for smartset."""
2751 2753 # builtin cannot be cached. but do not needs to
2752 2754 if cache and util.safehasattr(condition, 'func_code'):
2753 2755 condition = util.cachefunc(condition)
2754 2756 return filteredset(self, condition)
2755 2757
2756 2758 class baseset(abstractsmartset):
2757 2759 """Basic data structure that represents a revset and contains the basic
2758 2760 operation that it should be able to perform.
2759 2761
2760 2762 Every method in this class should be implemented by any smartset class.
2761 2763 """
2762 2764 def __init__(self, data=()):
2763 2765 if not isinstance(data, list):
2764 2766 data = list(data)
2765 2767 self._list = data
2766 2768 self._ascending = None
2767 2769
2768 2770 @util.propertycache
2769 2771 def _set(self):
2770 2772 return set(self._list)
2771 2773
2772 2774 @util.propertycache
2773 2775 def _asclist(self):
2774 2776 asclist = self._list[:]
2775 2777 asclist.sort()
2776 2778 return asclist
2777 2779
2778 2780 def __iter__(self):
2779 2781 if self._ascending is None:
2780 2782 return iter(self._list)
2781 2783 elif self._ascending:
2782 2784 return iter(self._asclist)
2783 2785 else:
2784 2786 return reversed(self._asclist)
2785 2787
2786 2788 def fastasc(self):
2787 2789 return iter(self._asclist)
2788 2790
2789 2791 def fastdesc(self):
2790 2792 return reversed(self._asclist)
2791 2793
2792 2794 @util.propertycache
2793 2795 def __contains__(self):
2794 2796 return self._set.__contains__
2795 2797
2796 2798 def __nonzero__(self):
2797 2799 return bool(self._list)
2798 2800
2799 2801 def sort(self, reverse=False):
2800 2802 self._ascending = not bool(reverse)
2801 2803
2802 2804 def reverse(self):
2803 2805 if self._ascending is None:
2804 2806 self._list.reverse()
2805 2807 else:
2806 2808 self._ascending = not self._ascending
2807 2809
2808 2810 def __len__(self):
2809 2811 return len(self._list)
2810 2812
2811 2813 def isascending(self):
2812 2814 """Returns True if the collection is ascending order, False if not.
2813 2815
2814 2816 This is part of the mandatory API for smartset."""
2815 2817 if len(self) <= 1:
2816 2818 return True
2817 2819 return self._ascending is not None and self._ascending
2818 2820
2819 2821 def isdescending(self):
2820 2822 """Returns True if the collection is descending order, False if not.
2821 2823
2822 2824 This is part of the mandatory API for smartset."""
2823 2825 if len(self) <= 1:
2824 2826 return True
2825 2827 return self._ascending is not None and not self._ascending
2826 2828
2827 2829 def first(self):
2828 2830 if self:
2829 2831 if self._ascending is None:
2830 2832 return self._list[0]
2831 2833 elif self._ascending:
2832 2834 return self._asclist[0]
2833 2835 else:
2834 2836 return self._asclist[-1]
2835 2837 return None
2836 2838
2837 2839 def last(self):
2838 2840 if self:
2839 2841 if self._ascending is None:
2840 2842 return self._list[-1]
2841 2843 elif self._ascending:
2842 2844 return self._asclist[-1]
2843 2845 else:
2844 2846 return self._asclist[0]
2845 2847 return None
2846 2848
2847 2849 def __repr__(self):
2848 2850 d = {None: '', False: '-', True: '+'}[self._ascending]
2849 2851 return '<%s%s %r>' % (type(self).__name__, d, self._list)
2850 2852
2851 2853 class filteredset(abstractsmartset):
2852 2854 """Duck type for baseset class which iterates lazily over the revisions in
2853 2855 the subset and contains a function which tests for membership in the
2854 2856 revset
2855 2857 """
2856 2858 def __init__(self, subset, condition=lambda x: True):
2857 2859 """
2858 2860 condition: a function that decide whether a revision in the subset
2859 2861 belongs to the revset or not.
2860 2862 """
2861 2863 self._subset = subset
2862 2864 self._condition = condition
2863 2865 self._cache = {}
2864 2866
2865 2867 def __contains__(self, x):
2866 2868 c = self._cache
2867 2869 if x not in c:
2868 2870 v = c[x] = x in self._subset and self._condition(x)
2869 2871 return v
2870 2872 return c[x]
2871 2873
2872 2874 def __iter__(self):
2873 2875 return self._iterfilter(self._subset)
2874 2876
2875 2877 def _iterfilter(self, it):
2876 2878 cond = self._condition
2877 2879 for x in it:
2878 2880 if cond(x):
2879 2881 yield x
2880 2882
2881 2883 @property
2882 2884 def fastasc(self):
2883 2885 it = self._subset.fastasc
2884 2886 if it is None:
2885 2887 return None
2886 2888 return lambda: self._iterfilter(it())
2887 2889
2888 2890 @property
2889 2891 def fastdesc(self):
2890 2892 it = self._subset.fastdesc
2891 2893 if it is None:
2892 2894 return None
2893 2895 return lambda: self._iterfilter(it())
2894 2896
2895 2897 def __nonzero__(self):
2896 2898 for r in self:
2897 2899 return True
2898 2900 return False
2899 2901
2900 2902 def __len__(self):
2901 2903 # Basic implementation to be changed in future patches.
2902 2904 l = baseset([r for r in self])
2903 2905 return len(l)
2904 2906
2905 2907 def sort(self, reverse=False):
2906 2908 self._subset.sort(reverse=reverse)
2907 2909
2908 2910 def reverse(self):
2909 2911 self._subset.reverse()
2910 2912
2911 2913 def isascending(self):
2912 2914 return self._subset.isascending()
2913 2915
2914 2916 def isdescending(self):
2915 2917 return self._subset.isdescending()
2916 2918
2917 2919 def first(self):
2918 2920 for x in self:
2919 2921 return x
2920 2922 return None
2921 2923
2922 2924 def last(self):
2923 2925 it = None
2924 2926 if self._subset.isascending:
2925 2927 it = self.fastdesc
2926 2928 elif self._subset.isdescending:
2927 2929 it = self.fastdesc
2928 2930 if it is None:
2929 2931 # slowly consume everything. This needs improvement
2930 2932 it = lambda: reversed(list(self))
2931 2933 for x in it():
2932 2934 return x
2933 2935 return None
2934 2936
2935 2937 def __repr__(self):
2936 2938 return '<%s %r>' % (type(self).__name__, self._subset)
2937 2939
2938 2940 class addset(abstractsmartset):
2939 2941 """Represent the addition of two sets
2940 2942
2941 2943 Wrapper structure for lazily adding two structures without losing much
2942 2944 performance on the __contains__ method
2943 2945
2944 2946 If the ascending attribute is set, that means the two structures are
2945 2947 ordered in either an ascending or descending way. Therefore, we can add
2946 2948 them maintaining the order by iterating over both at the same time
2947 2949 """
2948 2950 def __init__(self, revs1, revs2, ascending=None):
2949 2951 self._r1 = revs1
2950 2952 self._r2 = revs2
2951 2953 self._iter = None
2952 2954 self._ascending = ascending
2953 2955 self._genlist = None
2954 2956 self._asclist = None
2955 2957
2956 2958 def __len__(self):
2957 2959 return len(self._list)
2958 2960
2959 2961 def __nonzero__(self):
2960 2962 return bool(self._r1) or bool(self._r2)
2961 2963
2962 2964 @util.propertycache
2963 2965 def _list(self):
2964 2966 if not self._genlist:
2965 2967 self._genlist = baseset(self._iterator())
2966 2968 return self._genlist
2967 2969
2968 2970 def _iterator(self):
2969 2971 """Iterate over both collections without repeating elements
2970 2972
2971 2973 If the ascending attribute is not set, iterate over the first one and
2972 2974 then over the second one checking for membership on the first one so we
2973 2975 dont yield any duplicates.
2974 2976
2975 2977 If the ascending attribute is set, iterate over both collections at the
2976 2978 same time, yielding only one value at a time in the given order.
2977 2979 """
2978 2980 if self._ascending is None:
2979 2981 def gen():
2980 2982 for r in self._r1:
2981 2983 yield r
2982 2984 inr1 = self._r1.__contains__
2983 2985 for r in self._r2:
2984 2986 if not inr1(r):
2985 2987 yield r
2986 2988 gen = gen()
2987 2989 else:
2988 2990 iter1 = iter(self._r1)
2989 2991 iter2 = iter(self._r2)
2990 2992 gen = self._iterordered(self._ascending, iter1, iter2)
2991 2993 return gen
2992 2994
2993 2995 def __iter__(self):
2994 2996 if self._ascending is None:
2995 2997 if self._genlist:
2996 2998 return iter(self._genlist)
2997 2999 return iter(self._iterator())
2998 3000 self._trysetasclist()
2999 3001 if self._ascending:
3000 3002 it = self.fastasc
3001 3003 else:
3002 3004 it = self.fastdesc
3003 3005 if it is None:
3004 3006 # consume the gen and try again
3005 3007 self._list
3006 3008 return iter(self)
3007 3009 return it()
3008 3010
3009 3011 def _trysetasclist(self):
3010 3012 """populate the _asclist attribute if possible and necessary"""
3011 3013 if self._genlist is not None and self._asclist is None:
3012 3014 self._asclist = sorted(self._genlist)
3013 3015
3014 3016 @property
3015 3017 def fastasc(self):
3016 3018 self._trysetasclist()
3017 3019 if self._asclist is not None:
3018 3020 return self._asclist.__iter__
3019 3021 iter1 = self._r1.fastasc
3020 3022 iter2 = self._r2.fastasc
3021 3023 if None in (iter1, iter2):
3022 3024 return None
3023 3025 return lambda: self._iterordered(True, iter1(), iter2())
3024 3026
3025 3027 @property
3026 3028 def fastdesc(self):
3027 3029 self._trysetasclist()
3028 3030 if self._asclist is not None:
3029 3031 return self._asclist.__reversed__
3030 3032 iter1 = self._r1.fastdesc
3031 3033 iter2 = self._r2.fastdesc
3032 3034 if None in (iter1, iter2):
3033 3035 return None
3034 3036 return lambda: self._iterordered(False, iter1(), iter2())
3035 3037
3036 3038 def _iterordered(self, ascending, iter1, iter2):
3037 3039 """produce an ordered iteration from two iterators with the same order
3038 3040
3039 3041 The ascending is used to indicated the iteration direction.
3040 3042 """
3041 3043 choice = max
3042 3044 if ascending:
3043 3045 choice = min
3044 3046
3045 3047 val1 = None
3046 3048 val2 = None
3047 3049
3048 3050 choice = max
3049 3051 if ascending:
3050 3052 choice = min
3051 3053 try:
3052 3054 # Consume both iterators in an ordered way until one is
3053 3055 # empty
3054 3056 while True:
3055 3057 if val1 is None:
3056 3058 val1 = iter1.next()
3057 3059 if val2 is None:
3058 3060 val2 = iter2.next()
3059 3061 next = choice(val1, val2)
3060 3062 yield next
3061 3063 if val1 == next:
3062 3064 val1 = None
3063 3065 if val2 == next:
3064 3066 val2 = None
3065 3067 except StopIteration:
3066 3068 # Flush any remaining values and consume the other one
3067 3069 it = iter2
3068 3070 if val1 is not None:
3069 3071 yield val1
3070 3072 it = iter1
3071 3073 elif val2 is not None:
3072 3074 # might have been equality and both are empty
3073 3075 yield val2
3074 3076 for val in it:
3075 3077 yield val
3076 3078
3077 3079 def __contains__(self, x):
3078 3080 return x in self._r1 or x in self._r2
3079 3081
3080 3082 def sort(self, reverse=False):
3081 3083 """Sort the added set
3082 3084
3083 3085 For this we use the cached list with all the generated values and if we
3084 3086 know they are ascending or descending we can sort them in a smart way.
3085 3087 """
3086 3088 self._ascending = not reverse
3087 3089
3088 3090 def isascending(self):
3089 3091 return self._ascending is not None and self._ascending
3090 3092
3091 3093 def isdescending(self):
3092 3094 return self._ascending is not None and not self._ascending
3093 3095
3094 3096 def reverse(self):
3095 3097 if self._ascending is None:
3096 3098 self._list.reverse()
3097 3099 else:
3098 3100 self._ascending = not self._ascending
3099 3101
3100 3102 def first(self):
3101 3103 for x in self:
3102 3104 return x
3103 3105 return None
3104 3106
3105 3107 def last(self):
3106 3108 self.reverse()
3107 3109 val = self.first()
3108 3110 self.reverse()
3109 3111 return val
3110 3112
3111 3113 def __repr__(self):
3112 3114 d = {None: '', False: '-', True: '+'}[self._ascending]
3113 3115 return '<%s%s %r, %r>' % (type(self).__name__, d, self._r1, self._r2)
3114 3116
3115 3117 class generatorset(abstractsmartset):
3116 3118 """Wrap a generator for lazy iteration
3117 3119
3118 3120 Wrapper structure for generators that provides lazy membership and can
3119 3121 be iterated more than once.
3120 3122 When asked for membership it generates values until either it finds the
3121 3123 requested one or has gone through all the elements in the generator
3122 3124 """
3123 3125 def __init__(self, gen, iterasc=None):
3124 3126 """
3125 3127 gen: a generator producing the values for the generatorset.
3126 3128 """
3127 3129 self._gen = gen
3128 3130 self._asclist = None
3129 3131 self._cache = {}
3130 3132 self._genlist = []
3131 3133 self._finished = False
3132 3134 self._ascending = True
3133 3135 if iterasc is not None:
3134 3136 if iterasc:
3135 3137 self.fastasc = self._iterator
3136 3138 self.__contains__ = self._asccontains
3137 3139 else:
3138 3140 self.fastdesc = self._iterator
3139 3141 self.__contains__ = self._desccontains
3140 3142
3141 3143 def __nonzero__(self):
3142 3144 for r in self:
3143 3145 return True
3144 3146 return False
3145 3147
3146 3148 def __contains__(self, x):
3147 3149 if x in self._cache:
3148 3150 return self._cache[x]
3149 3151
3150 3152 # Use new values only, as existing values would be cached.
3151 3153 for l in self._consumegen():
3152 3154 if l == x:
3153 3155 return True
3154 3156
3155 3157 self._cache[x] = False
3156 3158 return False
3157 3159
3158 3160 def _asccontains(self, x):
3159 3161 """version of contains optimised for ascending generator"""
3160 3162 if x in self._cache:
3161 3163 return self._cache[x]
3162 3164
3163 3165 # Use new values only, as existing values would be cached.
3164 3166 for l in self._consumegen():
3165 3167 if l == x:
3166 3168 return True
3167 3169 if l > x:
3168 3170 break
3169 3171
3170 3172 self._cache[x] = False
3171 3173 return False
3172 3174
3173 3175 def _desccontains(self, x):
3174 3176 """version of contains optimised for descending generator"""
3175 3177 if x in self._cache:
3176 3178 return self._cache[x]
3177 3179
3178 3180 # Use new values only, as existing values would be cached.
3179 3181 for l in self._consumegen():
3180 3182 if l == x:
3181 3183 return True
3182 3184 if l < x:
3183 3185 break
3184 3186
3185 3187 self._cache[x] = False
3186 3188 return False
3187 3189
3188 3190 def __iter__(self):
3189 3191 if self._ascending:
3190 3192 it = self.fastasc
3191 3193 else:
3192 3194 it = self.fastdesc
3193 3195 if it is not None:
3194 3196 return it()
3195 3197 # we need to consume the iterator
3196 3198 for x in self._consumegen():
3197 3199 pass
3198 3200 # recall the same code
3199 3201 return iter(self)
3200 3202
3201 3203 def _iterator(self):
3202 3204 if self._finished:
3203 3205 return iter(self._genlist)
3204 3206
3205 3207 # We have to use this complex iteration strategy to allow multiple
3206 3208 # iterations at the same time. We need to be able to catch revision
3207 3209 # removed from _consumegen and added to genlist in another instance.
3208 3210 #
3209 3211 # Getting rid of it would provide an about 15% speed up on this
3210 3212 # iteration.
3211 3213 genlist = self._genlist
3212 3214 nextrev = self._consumegen().next
3213 3215 _len = len # cache global lookup
3214 3216 def gen():
3215 3217 i = 0
3216 3218 while True:
3217 3219 if i < _len(genlist):
3218 3220 yield genlist[i]
3219 3221 else:
3220 3222 yield nextrev()
3221 3223 i += 1
3222 3224 return gen()
3223 3225
3224 3226 def _consumegen(self):
3225 3227 cache = self._cache
3226 3228 genlist = self._genlist.append
3227 3229 for item in self._gen:
3228 3230 cache[item] = True
3229 3231 genlist(item)
3230 3232 yield item
3231 3233 if not self._finished:
3232 3234 self._finished = True
3233 3235 asc = self._genlist[:]
3234 3236 asc.sort()
3235 3237 self._asclist = asc
3236 3238 self.fastasc = asc.__iter__
3237 3239 self.fastdesc = asc.__reversed__
3238 3240
3239 3241 def __len__(self):
3240 3242 for x in self._consumegen():
3241 3243 pass
3242 3244 return len(self._genlist)
3243 3245
3244 3246 def sort(self, reverse=False):
3245 3247 self._ascending = not reverse
3246 3248
3247 3249 def reverse(self):
3248 3250 self._ascending = not self._ascending
3249 3251
3250 3252 def isascending(self):
3251 3253 return self._ascending
3252 3254
3253 3255 def isdescending(self):
3254 3256 return not self._ascending
3255 3257
3256 3258 def first(self):
3257 3259 if self._ascending:
3258 3260 it = self.fastasc
3259 3261 else:
3260 3262 it = self.fastdesc
3261 3263 if it is None:
3262 3264 # we need to consume all and try again
3263 3265 for x in self._consumegen():
3264 3266 pass
3265 3267 return self.first()
3266 3268 if self:
3267 3269 return it().next()
3268 3270 return None
3269 3271
3270 3272 def last(self):
3271 3273 if self._ascending:
3272 3274 it = self.fastdesc
3273 3275 else:
3274 3276 it = self.fastasc
3275 3277 if it is None:
3276 3278 # we need to consume all and try again
3277 3279 for x in self._consumegen():
3278 3280 pass
3279 3281 return self.first()
3280 3282 if self:
3281 3283 return it().next()
3282 3284 return None
3283 3285
3284 3286 def __repr__(self):
3285 3287 d = {False: '-', True: '+'}[self._ascending]
3286 3288 return '<%s%s>' % (type(self).__name__, d)
3287 3289
3288 3290 class spanset(abstractsmartset):
3289 3291 """Duck type for baseset class which represents a range of revisions and
3290 3292 can work lazily and without having all the range in memory
3291 3293
3292 3294 Note that spanset(x, y) behave almost like xrange(x, y) except for two
3293 3295 notable points:
3294 3296 - when x < y it will be automatically descending,
3295 3297 - revision filtered with this repoview will be skipped.
3296 3298
3297 3299 """
3298 3300 def __init__(self, repo, start=0, end=None):
3299 3301 """
3300 3302 start: first revision included the set
3301 3303 (default to 0)
3302 3304 end: first revision excluded (last+1)
3303 3305 (default to len(repo)
3304 3306
3305 3307 Spanset will be descending if `end` < `start`.
3306 3308 """
3307 3309 if end is None:
3308 3310 end = len(repo)
3309 3311 self._ascending = start <= end
3310 3312 if not self._ascending:
3311 3313 start, end = end + 1, start +1
3312 3314 self._start = start
3313 3315 self._end = end
3314 3316 self._hiddenrevs = repo.changelog.filteredrevs
3315 3317
3316 3318 def sort(self, reverse=False):
3317 3319 self._ascending = not reverse
3318 3320
3319 3321 def reverse(self):
3320 3322 self._ascending = not self._ascending
3321 3323
3322 3324 def _iterfilter(self, iterrange):
3323 3325 s = self._hiddenrevs
3324 3326 for r in iterrange:
3325 3327 if r not in s:
3326 3328 yield r
3327 3329
3328 3330 def __iter__(self):
3329 3331 if self._ascending:
3330 3332 return self.fastasc()
3331 3333 else:
3332 3334 return self.fastdesc()
3333 3335
3334 3336 def fastasc(self):
3335 3337 iterrange = xrange(self._start, self._end)
3336 3338 if self._hiddenrevs:
3337 3339 return self._iterfilter(iterrange)
3338 3340 return iter(iterrange)
3339 3341
3340 3342 def fastdesc(self):
3341 3343 iterrange = xrange(self._end - 1, self._start - 1, -1)
3342 3344 if self._hiddenrevs:
3343 3345 return self._iterfilter(iterrange)
3344 3346 return iter(iterrange)
3345 3347
3346 3348 def __contains__(self, rev):
3347 3349 hidden = self._hiddenrevs
3348 3350 return ((self._start <= rev < self._end)
3349 3351 and not (hidden and rev in hidden))
3350 3352
3351 3353 def __nonzero__(self):
3352 3354 for r in self:
3353 3355 return True
3354 3356 return False
3355 3357
3356 3358 def __len__(self):
3357 3359 if not self._hiddenrevs:
3358 3360 return abs(self._end - self._start)
3359 3361 else:
3360 3362 count = 0
3361 3363 start = self._start
3362 3364 end = self._end
3363 3365 for rev in self._hiddenrevs:
3364 3366 if (end < rev <= start) or (start <= rev < end):
3365 3367 count += 1
3366 3368 return abs(self._end - self._start) - count
3367 3369
3368 3370 def isascending(self):
3369 3371 return self._ascending
3370 3372
3371 3373 def isdescending(self):
3372 3374 return not self._ascending
3373 3375
3374 3376 def first(self):
3375 3377 if self._ascending:
3376 3378 it = self.fastasc
3377 3379 else:
3378 3380 it = self.fastdesc
3379 3381 for x in it():
3380 3382 return x
3381 3383 return None
3382 3384
3383 3385 def last(self):
3384 3386 if self._ascending:
3385 3387 it = self.fastdesc
3386 3388 else:
3387 3389 it = self.fastasc
3388 3390 for x in it():
3389 3391 return x
3390 3392 return None
3391 3393
3392 3394 def __repr__(self):
3393 3395 d = {False: '-', True: '+'}[self._ascending]
3394 3396 return '<%s%s %d:%d>' % (type(self).__name__, d,
3395 3397 self._start, self._end - 1)
3396 3398
3397 3399 class fullreposet(spanset):
3398 3400 """a set containing all revisions in the repo
3399 3401
3400 3402 This class exists to host special optimization and magic to handle virtual
3401 3403 revisions such as "null".
3402 3404 """
3403 3405
3404 3406 def __init__(self, repo):
3405 3407 super(fullreposet, self).__init__(repo)
3406 3408
3407 3409 def __contains__(self, rev):
3408 3410 # assumes the given rev is valid
3409 3411 hidden = self._hiddenrevs
3410 3412 return not (hidden and rev in hidden)
3411 3413
3412 3414 def __and__(self, other):
3413 3415 """As self contains the whole repo, all of the other set should also be
3414 3416 in self. Therefore `self & other = other`.
3415 3417
3416 3418 This boldly assumes the other contains valid revs only.
3417 3419 """
3418 3420 # other not a smartset, make is so
3419 3421 if not util.safehasattr(other, 'isascending'):
3420 3422 # filter out hidden revision
3421 3423 # (this boldly assumes all smartset are pure)
3422 3424 #
3423 3425 # `other` was used with "&", let's assume this is a set like
3424 3426 # object.
3425 3427 other = baseset(other - self._hiddenrevs)
3426 3428
3427 3429 other.sort(reverse=self.isdescending())
3428 3430 return other
3429 3431
3430 3432 def prettyformatset(revs):
3431 3433 lines = []
3432 3434 rs = repr(revs)
3433 3435 p = 0
3434 3436 while p < len(rs):
3435 3437 q = rs.find('<', p + 1)
3436 3438 if q < 0:
3437 3439 q = len(rs)
3438 3440 l = rs.count('<', 0, p) - rs.count('>', 0, p)
3439 3441 assert l >= 0
3440 3442 lines.append((l, rs[p:q].rstrip()))
3441 3443 p = q
3442 3444 return '\n'.join(' ' * l + s for l, s in lines)
3443 3445
3444 3446 # tell hggettext to extract docstrings from these functions:
3445 3447 i18nfunctions = symbols.values()
@@ -1,42 +1,42
1 1 #require test-repo
2 2
3 3 This code uses the ast module, which was new in 2.6, so we'll skip
4 4 this test on anything earlier.
5 5 $ $PYTHON -c 'import sys ; assert sys.version_info >= (2, 6)' || exit 80
6 6
7 7 $ import_checker="$TESTDIR"/../contrib/import-checker.py
8 8
9 9 Run the doctests from the import checker, and make sure
10 10 it's working correctly.
11 11 $ TERM=dumb
12 12 $ export TERM
13 13 $ python -m doctest $import_checker
14 14
15 15 $ cd "$TESTDIR"/..
16 16
17 17 There are a handful of cases here that require renaming a module so it
18 18 doesn't overlap with a stdlib module name. There are also some cycles
19 19 here that we should still endeavor to fix, and some cycles will be
20 20 hidden by deduplication algorithm in the cycle detector, so fixing
21 21 these may expose other cycles.
22 22
23 23 $ hg locate 'mercurial/**.py' | sed 's-\\-/-g' | xargs python "$import_checker"
24 24 mercurial/crecord.py mixed imports
25 25 stdlib: fcntl, termios
26 26 relative: curses
27 27 mercurial/dispatch.py mixed imports
28 28 stdlib: commands
29 29 relative: error, extensions, fancyopts, hg, hook, util
30 30 mercurial/fileset.py mixed imports
31 31 stdlib: parser
32 32 relative: error, merge, util
33 33 mercurial/revset.py mixed imports
34 34 stdlib: parser
35 relative: discovery, error, hbisect, phases, util
35 relative: error, hbisect, phases, util
36 36 mercurial/templater.py mixed imports
37 37 stdlib: parser
38 38 relative: config, error, templatefilters, templatekw, util
39 39 mercurial/ui.py mixed imports
40 40 stdlib: formatter
41 41 relative: config, error, scmutil, util
42 42 Import cycle: mercurial.cmdutil -> mercurial.context -> mercurial.subrepo -> mercurial.cmdutil
General Comments 0
You need to be logged in to leave comments. Login now