##// END OF EJS Templates
devel: officially deprecate old style revset...
Pierre-Yves David -
r29146:b175d9cc default
parent child Browse files
Show More
@@ -1,3396 +1,3396 b''
1 1 # revset.py - revision set queries for mercurial
2 2 #
3 3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import heapq
11 11 import re
12 12
13 13 from .i18n import _
14 14 from . import (
15 15 destutil,
16 16 encoding,
17 17 error,
18 18 hbisect,
19 19 match as matchmod,
20 20 node,
21 21 obsolete as obsmod,
22 22 parser,
23 23 pathutil,
24 24 phases,
25 25 registrar,
26 26 repoview,
27 27 util,
28 28 )
29 29
30 30 def _revancestors(repo, revs, followfirst):
31 31 """Like revlog.ancestors(), but supports followfirst."""
32 32 if followfirst:
33 33 cut = 1
34 34 else:
35 35 cut = None
36 36 cl = repo.changelog
37 37
38 38 def iterate():
39 39 revs.sort(reverse=True)
40 40 irevs = iter(revs)
41 41 h = []
42 42
43 43 inputrev = next(irevs, None)
44 44 if inputrev is not None:
45 45 heapq.heappush(h, -inputrev)
46 46
47 47 seen = set()
48 48 while h:
49 49 current = -heapq.heappop(h)
50 50 if current == inputrev:
51 51 inputrev = next(irevs, None)
52 52 if inputrev is not None:
53 53 heapq.heappush(h, -inputrev)
54 54 if current not in seen:
55 55 seen.add(current)
56 56 yield current
57 57 for parent in cl.parentrevs(current)[:cut]:
58 58 if parent != node.nullrev:
59 59 heapq.heappush(h, -parent)
60 60
61 61 return generatorset(iterate(), iterasc=False)
62 62
63 63 def _revdescendants(repo, revs, followfirst):
64 64 """Like revlog.descendants() but supports followfirst."""
65 65 if followfirst:
66 66 cut = 1
67 67 else:
68 68 cut = None
69 69
70 70 def iterate():
71 71 cl = repo.changelog
72 72 # XXX this should be 'parentset.min()' assuming 'parentset' is a
73 73 # smartset (and if it is not, it should.)
74 74 first = min(revs)
75 75 nullrev = node.nullrev
76 76 if first == nullrev:
77 77 # Are there nodes with a null first parent and a non-null
78 78 # second one? Maybe. Do we care? Probably not.
79 79 for i in cl:
80 80 yield i
81 81 else:
82 82 seen = set(revs)
83 83 for i in cl.revs(first + 1):
84 84 for x in cl.parentrevs(i)[:cut]:
85 85 if x != nullrev and x in seen:
86 86 seen.add(i)
87 87 yield i
88 88 break
89 89
90 90 return generatorset(iterate(), iterasc=True)
91 91
92 92 def _reachablerootspure(repo, minroot, roots, heads, includepath):
93 93 """return (heads(::<roots> and ::<heads>))
94 94
95 95 If includepath is True, return (<roots>::<heads>)."""
96 96 if not roots:
97 97 return []
98 98 parentrevs = repo.changelog.parentrevs
99 99 roots = set(roots)
100 100 visit = list(heads)
101 101 reachable = set()
102 102 seen = {}
103 103 # prefetch all the things! (because python is slow)
104 104 reached = reachable.add
105 105 dovisit = visit.append
106 106 nextvisit = visit.pop
107 107 # open-code the post-order traversal due to the tiny size of
108 108 # sys.getrecursionlimit()
109 109 while visit:
110 110 rev = nextvisit()
111 111 if rev in roots:
112 112 reached(rev)
113 113 if not includepath:
114 114 continue
115 115 parents = parentrevs(rev)
116 116 seen[rev] = parents
117 117 for parent in parents:
118 118 if parent >= minroot and parent not in seen:
119 119 dovisit(parent)
120 120 if not reachable:
121 121 return baseset()
122 122 if not includepath:
123 123 return reachable
124 124 for rev in sorted(seen):
125 125 for parent in seen[rev]:
126 126 if parent in reachable:
127 127 reached(rev)
128 128 return reachable
129 129
130 130 def reachableroots(repo, roots, heads, includepath=False):
131 131 """return (heads(::<roots> and ::<heads>))
132 132
133 133 If includepath is True, return (<roots>::<heads>)."""
134 134 if not roots:
135 135 return baseset()
136 136 minroot = roots.min()
137 137 roots = list(roots)
138 138 heads = list(heads)
139 139 try:
140 140 revs = repo.changelog.reachableroots(minroot, heads, roots, includepath)
141 141 except AttributeError:
142 142 revs = _reachablerootspure(repo, minroot, roots, heads, includepath)
143 143 revs = baseset(revs)
144 144 revs.sort()
145 145 return revs
146 146
147 147 elements = {
148 148 # token-type: binding-strength, primary, prefix, infix, suffix
149 149 "(": (21, None, ("group", 1, ")"), ("func", 1, ")"), None),
150 150 "##": (20, None, None, ("_concat", 20), None),
151 151 "~": (18, None, None, ("ancestor", 18), None),
152 152 "^": (18, None, None, ("parent", 18), ("parentpost", 18)),
153 153 "-": (5, None, ("negate", 19), ("minus", 5), None),
154 154 "::": (17, None, ("dagrangepre", 17), ("dagrange", 17),
155 155 ("dagrangepost", 17)),
156 156 "..": (17, None, ("dagrangepre", 17), ("dagrange", 17),
157 157 ("dagrangepost", 17)),
158 158 ":": (15, "rangeall", ("rangepre", 15), ("range", 15), ("rangepost", 15)),
159 159 "not": (10, None, ("not", 10), None, None),
160 160 "!": (10, None, ("not", 10), None, None),
161 161 "and": (5, None, None, ("and", 5), None),
162 162 "&": (5, None, None, ("and", 5), None),
163 163 "%": (5, None, None, ("only", 5), ("onlypost", 5)),
164 164 "or": (4, None, None, ("or", 4), None),
165 165 "|": (4, None, None, ("or", 4), None),
166 166 "+": (4, None, None, ("or", 4), None),
167 167 "=": (3, None, None, ("keyvalue", 3), None),
168 168 ",": (2, None, None, ("list", 2), None),
169 169 ")": (0, None, None, None, None),
170 170 "symbol": (0, "symbol", None, None, None),
171 171 "string": (0, "string", None, None, None),
172 172 "end": (0, None, None, None, None),
173 173 }
174 174
175 175 keywords = set(['and', 'or', 'not'])
176 176
177 177 # default set of valid characters for the initial letter of symbols
178 178 _syminitletters = set(c for c in [chr(i) for i in xrange(256)]
179 179 if c.isalnum() or c in '._@' or ord(c) > 127)
180 180
181 181 # default set of valid characters for non-initial letters of symbols
182 182 _symletters = set(c for c in [chr(i) for i in xrange(256)]
183 183 if c.isalnum() or c in '-._/@' or ord(c) > 127)
184 184
185 185 def tokenize(program, lookup=None, syminitletters=None, symletters=None):
186 186 '''
187 187 Parse a revset statement into a stream of tokens
188 188
189 189 ``syminitletters`` is the set of valid characters for the initial
190 190 letter of symbols.
191 191
192 192 By default, character ``c`` is recognized as valid for initial
193 193 letter of symbols, if ``c.isalnum() or c in '._@' or ord(c) > 127``.
194 194
195 195 ``symletters`` is the set of valid characters for non-initial
196 196 letters of symbols.
197 197
198 198 By default, character ``c`` is recognized as valid for non-initial
199 199 letters of symbols, if ``c.isalnum() or c in '-._/@' or ord(c) > 127``.
200 200
201 201 Check that @ is a valid unquoted token character (issue3686):
202 202 >>> list(tokenize("@::"))
203 203 [('symbol', '@', 0), ('::', None, 1), ('end', None, 3)]
204 204
205 205 '''
206 206 if syminitletters is None:
207 207 syminitletters = _syminitletters
208 208 if symletters is None:
209 209 symletters = _symletters
210 210
211 211 if program and lookup:
212 212 # attempt to parse old-style ranges first to deal with
213 213 # things like old-tag which contain query metacharacters
214 214 parts = program.split(':', 1)
215 215 if all(lookup(sym) for sym in parts if sym):
216 216 if parts[0]:
217 217 yield ('symbol', parts[0], 0)
218 218 if len(parts) > 1:
219 219 s = len(parts[0])
220 220 yield (':', None, s)
221 221 if parts[1]:
222 222 yield ('symbol', parts[1], s + 1)
223 223 yield ('end', None, len(program))
224 224 return
225 225
226 226 pos, l = 0, len(program)
227 227 while pos < l:
228 228 c = program[pos]
229 229 if c.isspace(): # skip inter-token whitespace
230 230 pass
231 231 elif c == ':' and program[pos:pos + 2] == '::': # look ahead carefully
232 232 yield ('::', None, pos)
233 233 pos += 1 # skip ahead
234 234 elif c == '.' and program[pos:pos + 2] == '..': # look ahead carefully
235 235 yield ('..', None, pos)
236 236 pos += 1 # skip ahead
237 237 elif c == '#' and program[pos:pos + 2] == '##': # look ahead carefully
238 238 yield ('##', None, pos)
239 239 pos += 1 # skip ahead
240 240 elif c in "():=,-|&+!~^%": # handle simple operators
241 241 yield (c, None, pos)
242 242 elif (c in '"\'' or c == 'r' and
243 243 program[pos:pos + 2] in ("r'", 'r"')): # handle quoted strings
244 244 if c == 'r':
245 245 pos += 1
246 246 c = program[pos]
247 247 decode = lambda x: x
248 248 else:
249 249 decode = parser.unescapestr
250 250 pos += 1
251 251 s = pos
252 252 while pos < l: # find closing quote
253 253 d = program[pos]
254 254 if d == '\\': # skip over escaped characters
255 255 pos += 2
256 256 continue
257 257 if d == c:
258 258 yield ('string', decode(program[s:pos]), s)
259 259 break
260 260 pos += 1
261 261 else:
262 262 raise error.ParseError(_("unterminated string"), s)
263 263 # gather up a symbol/keyword
264 264 elif c in syminitletters:
265 265 s = pos
266 266 pos += 1
267 267 while pos < l: # find end of symbol
268 268 d = program[pos]
269 269 if d not in symletters:
270 270 break
271 271 if d == '.' and program[pos - 1] == '.': # special case for ..
272 272 pos -= 1
273 273 break
274 274 pos += 1
275 275 sym = program[s:pos]
276 276 if sym in keywords: # operator keywords
277 277 yield (sym, None, s)
278 278 elif '-' in sym:
279 279 # some jerk gave us foo-bar-baz, try to check if it's a symbol
280 280 if lookup and lookup(sym):
281 281 # looks like a real symbol
282 282 yield ('symbol', sym, s)
283 283 else:
284 284 # looks like an expression
285 285 parts = sym.split('-')
286 286 for p in parts[:-1]:
287 287 if p: # possible consecutive -
288 288 yield ('symbol', p, s)
289 289 s += len(p)
290 290 yield ('-', None, pos)
291 291 s += 1
292 292 if parts[-1]: # possible trailing -
293 293 yield ('symbol', parts[-1], s)
294 294 else:
295 295 yield ('symbol', sym, s)
296 296 pos -= 1
297 297 else:
298 298 raise error.ParseError(_("syntax error in revset '%s'") %
299 299 program, pos)
300 300 pos += 1
301 301 yield ('end', None, pos)
302 302
303 303 # helpers
304 304
305 305 def getstring(x, err):
306 306 if x and (x[0] == 'string' or x[0] == 'symbol'):
307 307 return x[1]
308 308 raise error.ParseError(err)
309 309
310 310 def getlist(x):
311 311 if not x:
312 312 return []
313 313 if x[0] == 'list':
314 314 return list(x[1:])
315 315 return [x]
316 316
317 317 def getargs(x, min, max, err):
318 318 l = getlist(x)
319 319 if len(l) < min or (max >= 0 and len(l) > max):
320 320 raise error.ParseError(err)
321 321 return l
322 322
323 323 def getargsdict(x, funcname, keys):
324 324 return parser.buildargsdict(getlist(x), funcname, keys.split(),
325 325 keyvaluenode='keyvalue', keynode='symbol')
326 326
327 327 def getset(repo, subset, x):
328 328 if not x:
329 329 raise error.ParseError(_("missing argument"))
330 330 s = methods[x[0]](repo, subset, *x[1:])
331 331 if util.safehasattr(s, 'isascending'):
332 332 return s
333 333 # else case should not happen, because all non-func are internal,
334 334 # ignoring for now.
335 335 if x[0] == 'func' and x[1][0] == 'symbol' and x[1][1] in symbols:
336 repo.ui.develwarn('revset "%s" use list instead of smartset, '
337 '(upgrade your code)' % x[1][1],
338 config='old-revset')
336 repo.ui.deprecwarn('revset "%s" use list instead of smartset'
337 % x[1][1],
338 '3.9')
339 339 return baseset(s)
340 340
341 341 def _getrevsource(repo, r):
342 342 extra = repo[r].extra()
343 343 for label in ('source', 'transplant_source', 'rebase_source'):
344 344 if label in extra:
345 345 try:
346 346 return repo[extra[label]].rev()
347 347 except error.RepoLookupError:
348 348 pass
349 349 return None
350 350
351 351 # operator methods
352 352
353 353 def stringset(repo, subset, x):
354 354 x = repo[x].rev()
355 355 if (x in subset
356 356 or x == node.nullrev and isinstance(subset, fullreposet)):
357 357 return baseset([x])
358 358 return baseset()
359 359
360 360 def rangeset(repo, subset, x, y):
361 361 m = getset(repo, fullreposet(repo), x)
362 362 n = getset(repo, fullreposet(repo), y)
363 363
364 364 if not m or not n:
365 365 return baseset()
366 366 m, n = m.first(), n.last()
367 367
368 368 if m == n:
369 369 r = baseset([m])
370 370 elif n == node.wdirrev:
371 371 r = spanset(repo, m, len(repo)) + baseset([n])
372 372 elif m == node.wdirrev:
373 373 r = baseset([m]) + spanset(repo, len(repo) - 1, n - 1)
374 374 elif m < n:
375 375 r = spanset(repo, m, n + 1)
376 376 else:
377 377 r = spanset(repo, m, n - 1)
378 378 # XXX We should combine with subset first: 'subset & baseset(...)'. This is
379 379 # necessary to ensure we preserve the order in subset.
380 380 #
381 381 # This has performance implication, carrying the sorting over when possible
382 382 # would be more efficient.
383 383 return r & subset
384 384
385 385 def dagrange(repo, subset, x, y):
386 386 r = fullreposet(repo)
387 387 xs = reachableroots(repo, getset(repo, r, x), getset(repo, r, y),
388 388 includepath=True)
389 389 return subset & xs
390 390
391 391 def andset(repo, subset, x, y):
392 392 return getset(repo, getset(repo, subset, x), y)
393 393
394 394 def differenceset(repo, subset, x, y):
395 395 return getset(repo, subset, x) - getset(repo, subset, y)
396 396
397 397 def orset(repo, subset, *xs):
398 398 assert xs
399 399 if len(xs) == 1:
400 400 return getset(repo, subset, xs[0])
401 401 p = len(xs) // 2
402 402 a = orset(repo, subset, *xs[:p])
403 403 b = orset(repo, subset, *xs[p:])
404 404 return a + b
405 405
406 406 def notset(repo, subset, x):
407 407 return subset - getset(repo, subset, x)
408 408
409 409 def listset(repo, subset, *xs):
410 410 raise error.ParseError(_("can't use a list in this context"),
411 411 hint=_('see hg help "revsets.x or y"'))
412 412
413 413 def keyvaluepair(repo, subset, k, v):
414 414 raise error.ParseError(_("can't use a key-value pair in this context"))
415 415
416 416 def func(repo, subset, a, b):
417 417 if a[0] == 'symbol' and a[1] in symbols:
418 418 return symbols[a[1]](repo, subset, b)
419 419
420 420 keep = lambda fn: getattr(fn, '__doc__', None) is not None
421 421
422 422 syms = [s for (s, fn) in symbols.items() if keep(fn)]
423 423 raise error.UnknownIdentifier(a[1], syms)
424 424
425 425 # functions
426 426
427 427 # symbols are callables like:
428 428 # fn(repo, subset, x)
429 429 # with:
430 430 # repo - current repository instance
431 431 # subset - of revisions to be examined
432 432 # x - argument in tree form
433 433 symbols = {}
434 434
435 435 # symbols which can't be used for a DoS attack for any given input
436 436 # (e.g. those which accept regexes as plain strings shouldn't be included)
437 437 # functions that just return a lot of changesets (like all) don't count here
438 438 safesymbols = set()
439 439
440 440 predicate = registrar.revsetpredicate()
441 441
442 442 @predicate('_destupdate')
443 443 def _destupdate(repo, subset, x):
444 444 # experimental revset for update destination
445 445 args = getargsdict(x, 'limit', 'clean check')
446 446 return subset & baseset([destutil.destupdate(repo, **args)[0]])
447 447
448 448 @predicate('_destmerge')
449 449 def _destmerge(repo, subset, x):
450 450 # experimental revset for merge destination
451 451 sourceset = None
452 452 if x is not None:
453 453 sourceset = getset(repo, fullreposet(repo), x)
454 454 return subset & baseset([destutil.destmerge(repo, sourceset=sourceset)])
455 455
456 456 @predicate('adds(pattern)', safe=True)
457 457 def adds(repo, subset, x):
458 458 """Changesets that add a file matching pattern.
459 459
460 460 The pattern without explicit kind like ``glob:`` is expected to be
461 461 relative to the current directory and match against a file or a
462 462 directory.
463 463 """
464 464 # i18n: "adds" is a keyword
465 465 pat = getstring(x, _("adds requires a pattern"))
466 466 return checkstatus(repo, subset, pat, 1)
467 467
468 468 @predicate('ancestor(*changeset)', safe=True)
469 469 def ancestor(repo, subset, x):
470 470 """A greatest common ancestor of the changesets.
471 471
472 472 Accepts 0 or more changesets.
473 473 Will return empty list when passed no args.
474 474 Greatest common ancestor of a single changeset is that changeset.
475 475 """
476 476 # i18n: "ancestor" is a keyword
477 477 l = getlist(x)
478 478 rl = fullreposet(repo)
479 479 anc = None
480 480
481 481 # (getset(repo, rl, i) for i in l) generates a list of lists
482 482 for revs in (getset(repo, rl, i) for i in l):
483 483 for r in revs:
484 484 if anc is None:
485 485 anc = repo[r]
486 486 else:
487 487 anc = anc.ancestor(repo[r])
488 488
489 489 if anc is not None and anc.rev() in subset:
490 490 return baseset([anc.rev()])
491 491 return baseset()
492 492
493 493 def _ancestors(repo, subset, x, followfirst=False):
494 494 heads = getset(repo, fullreposet(repo), x)
495 495 if not heads:
496 496 return baseset()
497 497 s = _revancestors(repo, heads, followfirst)
498 498 return subset & s
499 499
500 500 @predicate('ancestors(set)', safe=True)
501 501 def ancestors(repo, subset, x):
502 502 """Changesets that are ancestors of a changeset in set.
503 503 """
504 504 return _ancestors(repo, subset, x)
505 505
506 506 @predicate('_firstancestors', safe=True)
507 507 def _firstancestors(repo, subset, x):
508 508 # ``_firstancestors(set)``
509 509 # Like ``ancestors(set)`` but follows only the first parents.
510 510 return _ancestors(repo, subset, x, followfirst=True)
511 511
512 512 def ancestorspec(repo, subset, x, n):
513 513 """``set~n``
514 514 Changesets that are the Nth ancestor (first parents only) of a changeset
515 515 in set.
516 516 """
517 517 try:
518 518 n = int(n[1])
519 519 except (TypeError, ValueError):
520 520 raise error.ParseError(_("~ expects a number"))
521 521 ps = set()
522 522 cl = repo.changelog
523 523 for r in getset(repo, fullreposet(repo), x):
524 524 for i in range(n):
525 525 r = cl.parentrevs(r)[0]
526 526 ps.add(r)
527 527 return subset & ps
528 528
529 529 @predicate('author(string)', safe=True)
530 530 def author(repo, subset, x):
531 531 """Alias for ``user(string)``.
532 532 """
533 533 # i18n: "author" is a keyword
534 534 n = encoding.lower(getstring(x, _("author requires a string")))
535 535 kind, pattern, matcher = _substringmatcher(n)
536 536 return subset.filter(lambda x: matcher(encoding.lower(repo[x].user())),
537 537 condrepr=('<user %r>', n))
538 538
539 539 @predicate('bisect(string)', safe=True)
540 540 def bisect(repo, subset, x):
541 541 """Changesets marked in the specified bisect status:
542 542
543 543 - ``good``, ``bad``, ``skip``: csets explicitly marked as good/bad/skip
544 544 - ``goods``, ``bads`` : csets topologically good/bad
545 545 - ``range`` : csets taking part in the bisection
546 546 - ``pruned`` : csets that are goods, bads or skipped
547 547 - ``untested`` : csets whose fate is yet unknown
548 548 - ``ignored`` : csets ignored due to DAG topology
549 549 - ``current`` : the cset currently being bisected
550 550 """
551 551 # i18n: "bisect" is a keyword
552 552 status = getstring(x, _("bisect requires a string")).lower()
553 553 state = set(hbisect.get(repo, status))
554 554 return subset & state
555 555
556 556 # Backward-compatibility
557 557 # - no help entry so that we do not advertise it any more
558 558 @predicate('bisected', safe=True)
559 559 def bisected(repo, subset, x):
560 560 return bisect(repo, subset, x)
561 561
562 562 @predicate('bookmark([name])', safe=True)
563 563 def bookmark(repo, subset, x):
564 564 """The named bookmark or all bookmarks.
565 565
566 566 If `name` starts with `re:`, the remainder of the name is treated as
567 567 a regular expression. To match a bookmark that actually starts with `re:`,
568 568 use the prefix `literal:`.
569 569 """
570 570 # i18n: "bookmark" is a keyword
571 571 args = getargs(x, 0, 1, _('bookmark takes one or no arguments'))
572 572 if args:
573 573 bm = getstring(args[0],
574 574 # i18n: "bookmark" is a keyword
575 575 _('the argument to bookmark must be a string'))
576 576 kind, pattern, matcher = util.stringmatcher(bm)
577 577 bms = set()
578 578 if kind == 'literal':
579 579 bmrev = repo._bookmarks.get(pattern, None)
580 580 if not bmrev:
581 581 raise error.RepoLookupError(_("bookmark '%s' does not exist")
582 582 % pattern)
583 583 bms.add(repo[bmrev].rev())
584 584 else:
585 585 matchrevs = set()
586 586 for name, bmrev in repo._bookmarks.iteritems():
587 587 if matcher(name):
588 588 matchrevs.add(bmrev)
589 589 if not matchrevs:
590 590 raise error.RepoLookupError(_("no bookmarks exist"
591 591 " that match '%s'") % pattern)
592 592 for bmrev in matchrevs:
593 593 bms.add(repo[bmrev].rev())
594 594 else:
595 595 bms = set([repo[r].rev()
596 596 for r in repo._bookmarks.values()])
597 597 bms -= set([node.nullrev])
598 598 return subset & bms
599 599
600 600 @predicate('branch(string or set)', safe=True)
601 601 def branch(repo, subset, x):
602 602 """
603 603 All changesets belonging to the given branch or the branches of the given
604 604 changesets.
605 605
606 606 If `string` starts with `re:`, the remainder of the name is treated as
607 607 a regular expression. To match a branch that actually starts with `re:`,
608 608 use the prefix `literal:`.
609 609 """
610 610 getbi = repo.revbranchcache().branchinfo
611 611
612 612 try:
613 613 b = getstring(x, '')
614 614 except error.ParseError:
615 615 # not a string, but another revspec, e.g. tip()
616 616 pass
617 617 else:
618 618 kind, pattern, matcher = util.stringmatcher(b)
619 619 if kind == 'literal':
620 620 # note: falls through to the revspec case if no branch with
621 621 # this name exists and pattern kind is not specified explicitly
622 622 if pattern in repo.branchmap():
623 623 return subset.filter(lambda r: matcher(getbi(r)[0]),
624 624 condrepr=('<branch %r>', b))
625 625 if b.startswith('literal:'):
626 626 raise error.RepoLookupError(_("branch '%s' does not exist")
627 627 % pattern)
628 628 else:
629 629 return subset.filter(lambda r: matcher(getbi(r)[0]),
630 630 condrepr=('<branch %r>', b))
631 631
632 632 s = getset(repo, fullreposet(repo), x)
633 633 b = set()
634 634 for r in s:
635 635 b.add(getbi(r)[0])
636 636 c = s.__contains__
637 637 return subset.filter(lambda r: c(r) or getbi(r)[0] in b,
638 638 condrepr=lambda: '<branch %r>' % sorted(b))
639 639
640 640 @predicate('bumped()', safe=True)
641 641 def bumped(repo, subset, x):
642 642 """Mutable changesets marked as successors of public changesets.
643 643
644 644 Only non-public and non-obsolete changesets can be `bumped`.
645 645 """
646 646 # i18n: "bumped" is a keyword
647 647 getargs(x, 0, 0, _("bumped takes no arguments"))
648 648 bumped = obsmod.getrevs(repo, 'bumped')
649 649 return subset & bumped
650 650
651 651 @predicate('bundle()', safe=True)
652 652 def bundle(repo, subset, x):
653 653 """Changesets in the bundle.
654 654
655 655 Bundle must be specified by the -R option."""
656 656
657 657 try:
658 658 bundlerevs = repo.changelog.bundlerevs
659 659 except AttributeError:
660 660 raise error.Abort(_("no bundle provided - specify with -R"))
661 661 return subset & bundlerevs
662 662
663 663 def checkstatus(repo, subset, pat, field):
664 664 hasset = matchmod.patkind(pat) == 'set'
665 665
666 666 mcache = [None]
667 667 def matches(x):
668 668 c = repo[x]
669 669 if not mcache[0] or hasset:
670 670 mcache[0] = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
671 671 m = mcache[0]
672 672 fname = None
673 673 if not m.anypats() and len(m.files()) == 1:
674 674 fname = m.files()[0]
675 675 if fname is not None:
676 676 if fname not in c.files():
677 677 return False
678 678 else:
679 679 for f in c.files():
680 680 if m(f):
681 681 break
682 682 else:
683 683 return False
684 684 files = repo.status(c.p1().node(), c.node())[field]
685 685 if fname is not None:
686 686 if fname in files:
687 687 return True
688 688 else:
689 689 for f in files:
690 690 if m(f):
691 691 return True
692 692
693 693 return subset.filter(matches, condrepr=('<status[%r] %r>', field, pat))
694 694
695 695 def _children(repo, narrow, parentset):
696 696 if not parentset:
697 697 return baseset()
698 698 cs = set()
699 699 pr = repo.changelog.parentrevs
700 700 minrev = parentset.min()
701 701 for r in narrow:
702 702 if r <= minrev:
703 703 continue
704 704 for p in pr(r):
705 705 if p in parentset:
706 706 cs.add(r)
707 707 # XXX using a set to feed the baseset is wrong. Sets are not ordered.
708 708 # This does not break because of other fullreposet misbehavior.
709 709 return baseset(cs)
710 710
711 711 @predicate('children(set)', safe=True)
712 712 def children(repo, subset, x):
713 713 """Child changesets of changesets in set.
714 714 """
715 715 s = getset(repo, fullreposet(repo), x)
716 716 cs = _children(repo, subset, s)
717 717 return subset & cs
718 718
719 719 @predicate('closed()', safe=True)
720 720 def closed(repo, subset, x):
721 721 """Changeset is closed.
722 722 """
723 723 # i18n: "closed" is a keyword
724 724 getargs(x, 0, 0, _("closed takes no arguments"))
725 725 return subset.filter(lambda r: repo[r].closesbranch(),
726 726 condrepr='<branch closed>')
727 727
728 728 @predicate('contains(pattern)')
729 729 def contains(repo, subset, x):
730 730 """The revision's manifest contains a file matching pattern (but might not
731 731 modify it). See :hg:`help patterns` for information about file patterns.
732 732
733 733 The pattern without explicit kind like ``glob:`` is expected to be
734 734 relative to the current directory and match against a file exactly
735 735 for efficiency.
736 736 """
737 737 # i18n: "contains" is a keyword
738 738 pat = getstring(x, _("contains requires a pattern"))
739 739
740 740 def matches(x):
741 741 if not matchmod.patkind(pat):
742 742 pats = pathutil.canonpath(repo.root, repo.getcwd(), pat)
743 743 if pats in repo[x]:
744 744 return True
745 745 else:
746 746 c = repo[x]
747 747 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
748 748 for f in c.manifest():
749 749 if m(f):
750 750 return True
751 751 return False
752 752
753 753 return subset.filter(matches, condrepr=('<contains %r>', pat))
754 754
755 755 @predicate('converted([id])', safe=True)
756 756 def converted(repo, subset, x):
757 757 """Changesets converted from the given identifier in the old repository if
758 758 present, or all converted changesets if no identifier is specified.
759 759 """
760 760
761 761 # There is exactly no chance of resolving the revision, so do a simple
762 762 # string compare and hope for the best
763 763
764 764 rev = None
765 765 # i18n: "converted" is a keyword
766 766 l = getargs(x, 0, 1, _('converted takes one or no arguments'))
767 767 if l:
768 768 # i18n: "converted" is a keyword
769 769 rev = getstring(l[0], _('converted requires a revision'))
770 770
771 771 def _matchvalue(r):
772 772 source = repo[r].extra().get('convert_revision', None)
773 773 return source is not None and (rev is None or source.startswith(rev))
774 774
775 775 return subset.filter(lambda r: _matchvalue(r),
776 776 condrepr=('<converted %r>', rev))
777 777
778 778 @predicate('date(interval)', safe=True)
779 779 def date(repo, subset, x):
780 780 """Changesets within the interval, see :hg:`help dates`.
781 781 """
782 782 # i18n: "date" is a keyword
783 783 ds = getstring(x, _("date requires a string"))
784 784 dm = util.matchdate(ds)
785 785 return subset.filter(lambda x: dm(repo[x].date()[0]),
786 786 condrepr=('<date %r>', ds))
787 787
788 788 @predicate('desc(string)', safe=True)
789 789 def desc(repo, subset, x):
790 790 """Search commit message for string. The match is case-insensitive.
791 791 """
792 792 # i18n: "desc" is a keyword
793 793 ds = encoding.lower(getstring(x, _("desc requires a string")))
794 794
795 795 def matches(x):
796 796 c = repo[x]
797 797 return ds in encoding.lower(c.description())
798 798
799 799 return subset.filter(matches, condrepr=('<desc %r>', ds))
800 800
801 801 def _descendants(repo, subset, x, followfirst=False):
802 802 roots = getset(repo, fullreposet(repo), x)
803 803 if not roots:
804 804 return baseset()
805 805 s = _revdescendants(repo, roots, followfirst)
806 806
807 807 # Both sets need to be ascending in order to lazily return the union
808 808 # in the correct order.
809 809 base = subset & roots
810 810 desc = subset & s
811 811 result = base + desc
812 812 if subset.isascending():
813 813 result.sort()
814 814 elif subset.isdescending():
815 815 result.sort(reverse=True)
816 816 else:
817 817 result = subset & result
818 818 return result
819 819
820 820 @predicate('descendants(set)', safe=True)
821 821 def descendants(repo, subset, x):
822 822 """Changesets which are descendants of changesets in set.
823 823 """
824 824 return _descendants(repo, subset, x)
825 825
826 826 @predicate('_firstdescendants', safe=True)
827 827 def _firstdescendants(repo, subset, x):
828 828 # ``_firstdescendants(set)``
829 829 # Like ``descendants(set)`` but follows only the first parents.
830 830 return _descendants(repo, subset, x, followfirst=True)
831 831
832 832 @predicate('destination([set])', safe=True)
833 833 def destination(repo, subset, x):
834 834 """Changesets that were created by a graft, transplant or rebase operation,
835 835 with the given revisions specified as the source. Omitting the optional set
836 836 is the same as passing all().
837 837 """
838 838 if x is not None:
839 839 sources = getset(repo, fullreposet(repo), x)
840 840 else:
841 841 sources = fullreposet(repo)
842 842
843 843 dests = set()
844 844
845 845 # subset contains all of the possible destinations that can be returned, so
846 846 # iterate over them and see if their source(s) were provided in the arg set.
847 847 # Even if the immediate src of r is not in the arg set, src's source (or
848 848 # further back) may be. Scanning back further than the immediate src allows
849 849 # transitive transplants and rebases to yield the same results as transitive
850 850 # grafts.
851 851 for r in subset:
852 852 src = _getrevsource(repo, r)
853 853 lineage = None
854 854
855 855 while src is not None:
856 856 if lineage is None:
857 857 lineage = list()
858 858
859 859 lineage.append(r)
860 860
861 861 # The visited lineage is a match if the current source is in the arg
862 862 # set. Since every candidate dest is visited by way of iterating
863 863 # subset, any dests further back in the lineage will be tested by a
864 864 # different iteration over subset. Likewise, if the src was already
865 865 # selected, the current lineage can be selected without going back
866 866 # further.
867 867 if src in sources or src in dests:
868 868 dests.update(lineage)
869 869 break
870 870
871 871 r = src
872 872 src = _getrevsource(repo, r)
873 873
874 874 return subset.filter(dests.__contains__,
875 875 condrepr=lambda: '<destination %r>' % sorted(dests))
876 876
877 877 @predicate('divergent()', safe=True)
878 878 def divergent(repo, subset, x):
879 879 """
880 880 Final successors of changesets with an alternative set of final successors.
881 881 """
882 882 # i18n: "divergent" is a keyword
883 883 getargs(x, 0, 0, _("divergent takes no arguments"))
884 884 divergent = obsmod.getrevs(repo, 'divergent')
885 885 return subset & divergent
886 886
887 887 @predicate('extinct()', safe=True)
888 888 def extinct(repo, subset, x):
889 889 """Obsolete changesets with obsolete descendants only.
890 890 """
891 891 # i18n: "extinct" is a keyword
892 892 getargs(x, 0, 0, _("extinct takes no arguments"))
893 893 extincts = obsmod.getrevs(repo, 'extinct')
894 894 return subset & extincts
895 895
896 896 @predicate('extra(label, [value])', safe=True)
897 897 def extra(repo, subset, x):
898 898 """Changesets with the given label in the extra metadata, with the given
899 899 optional value.
900 900
901 901 If `value` starts with `re:`, the remainder of the value is treated as
902 902 a regular expression. To match a value that actually starts with `re:`,
903 903 use the prefix `literal:`.
904 904 """
905 905 args = getargsdict(x, 'extra', 'label value')
906 906 if 'label' not in args:
907 907 # i18n: "extra" is a keyword
908 908 raise error.ParseError(_('extra takes at least 1 argument'))
909 909 # i18n: "extra" is a keyword
910 910 label = getstring(args['label'], _('first argument to extra must be '
911 911 'a string'))
912 912 value = None
913 913
914 914 if 'value' in args:
915 915 # i18n: "extra" is a keyword
916 916 value = getstring(args['value'], _('second argument to extra must be '
917 917 'a string'))
918 918 kind, value, matcher = util.stringmatcher(value)
919 919
920 920 def _matchvalue(r):
921 921 extra = repo[r].extra()
922 922 return label in extra and (value is None or matcher(extra[label]))
923 923
924 924 return subset.filter(lambda r: _matchvalue(r),
925 925 condrepr=('<extra[%r] %r>', label, value))
926 926
927 927 @predicate('filelog(pattern)', safe=True)
928 928 def filelog(repo, subset, x):
929 929 """Changesets connected to the specified filelog.
930 930
931 931 For performance reasons, visits only revisions mentioned in the file-level
932 932 filelog, rather than filtering through all changesets (much faster, but
933 933 doesn't include deletes or duplicate changes). For a slower, more accurate
934 934 result, use ``file()``.
935 935
936 936 The pattern without explicit kind like ``glob:`` is expected to be
937 937 relative to the current directory and match against a file exactly
938 938 for efficiency.
939 939
940 940 If some linkrev points to revisions filtered by the current repoview, we'll
941 941 work around it to return a non-filtered value.
942 942 """
943 943
944 944 # i18n: "filelog" is a keyword
945 945 pat = getstring(x, _("filelog requires a pattern"))
946 946 s = set()
947 947 cl = repo.changelog
948 948
949 949 if not matchmod.patkind(pat):
950 950 f = pathutil.canonpath(repo.root, repo.getcwd(), pat)
951 951 files = [f]
952 952 else:
953 953 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=repo[None])
954 954 files = (f for f in repo[None] if m(f))
955 955
956 956 for f in files:
957 957 fl = repo.file(f)
958 958 known = {}
959 959 scanpos = 0
960 960 for fr in list(fl):
961 961 fn = fl.node(fr)
962 962 if fn in known:
963 963 s.add(known[fn])
964 964 continue
965 965
966 966 lr = fl.linkrev(fr)
967 967 if lr in cl:
968 968 s.add(lr)
969 969 elif scanpos is not None:
970 970 # lowest matching changeset is filtered, scan further
971 971 # ahead in changelog
972 972 start = max(lr, scanpos) + 1
973 973 scanpos = None
974 974 for r in cl.revs(start):
975 975 # minimize parsing of non-matching entries
976 976 if f in cl.revision(r) and f in cl.readfiles(r):
977 977 try:
978 978 # try to use manifest delta fastpath
979 979 n = repo[r].filenode(f)
980 980 if n not in known:
981 981 if n == fn:
982 982 s.add(r)
983 983 scanpos = r
984 984 break
985 985 else:
986 986 known[n] = r
987 987 except error.ManifestLookupError:
988 988 # deletion in changelog
989 989 continue
990 990
991 991 return subset & s
992 992
993 993 @predicate('first(set, [n])', safe=True)
994 994 def first(repo, subset, x):
995 995 """An alias for limit().
996 996 """
997 997 return limit(repo, subset, x)
998 998
999 999 def _follow(repo, subset, x, name, followfirst=False):
1000 1000 l = getargs(x, 0, 1, _("%s takes no arguments or a pattern") % name)
1001 1001 c = repo['.']
1002 1002 if l:
1003 1003 x = getstring(l[0], _("%s expected a pattern") % name)
1004 1004 matcher = matchmod.match(repo.root, repo.getcwd(), [x],
1005 1005 ctx=repo[None], default='path')
1006 1006
1007 1007 files = c.manifest().walk(matcher)
1008 1008
1009 1009 s = set()
1010 1010 for fname in files:
1011 1011 fctx = c[fname]
1012 1012 s = s.union(set(c.rev() for c in fctx.ancestors(followfirst)))
1013 1013 # include the revision responsible for the most recent version
1014 1014 s.add(fctx.introrev())
1015 1015 else:
1016 1016 s = _revancestors(repo, baseset([c.rev()]), followfirst)
1017 1017
1018 1018 return subset & s
1019 1019
1020 1020 @predicate('follow([pattern])', safe=True)
1021 1021 def follow(repo, subset, x):
1022 1022 """
1023 1023 An alias for ``::.`` (ancestors of the working directory's first parent).
1024 1024 If pattern is specified, the histories of files matching given
1025 1025 pattern is followed, including copies.
1026 1026 """
1027 1027 return _follow(repo, subset, x, 'follow')
1028 1028
1029 1029 @predicate('_followfirst', safe=True)
1030 1030 def _followfirst(repo, subset, x):
1031 1031 # ``followfirst([pattern])``
1032 1032 # Like ``follow([pattern])`` but follows only the first parent of
1033 1033 # every revisions or files revisions.
1034 1034 return _follow(repo, subset, x, '_followfirst', followfirst=True)
1035 1035
1036 1036 @predicate('all()', safe=True)
1037 1037 def getall(repo, subset, x):
1038 1038 """All changesets, the same as ``0:tip``.
1039 1039 """
1040 1040 # i18n: "all" is a keyword
1041 1041 getargs(x, 0, 0, _("all takes no arguments"))
1042 1042 return subset & spanset(repo) # drop "null" if any
1043 1043
1044 1044 @predicate('grep(regex)')
1045 1045 def grep(repo, subset, x):
1046 1046 """Like ``keyword(string)`` but accepts a regex. Use ``grep(r'...')``
1047 1047 to ensure special escape characters are handled correctly. Unlike
1048 1048 ``keyword(string)``, the match is case-sensitive.
1049 1049 """
1050 1050 try:
1051 1051 # i18n: "grep" is a keyword
1052 1052 gr = re.compile(getstring(x, _("grep requires a string")))
1053 1053 except re.error as e:
1054 1054 raise error.ParseError(_('invalid match pattern: %s') % e)
1055 1055
1056 1056 def matches(x):
1057 1057 c = repo[x]
1058 1058 for e in c.files() + [c.user(), c.description()]:
1059 1059 if gr.search(e):
1060 1060 return True
1061 1061 return False
1062 1062
1063 1063 return subset.filter(matches, condrepr=('<grep %r>', gr.pattern))
1064 1064
1065 1065 @predicate('_matchfiles', safe=True)
1066 1066 def _matchfiles(repo, subset, x):
1067 1067 # _matchfiles takes a revset list of prefixed arguments:
1068 1068 #
1069 1069 # [p:foo, i:bar, x:baz]
1070 1070 #
1071 1071 # builds a match object from them and filters subset. Allowed
1072 1072 # prefixes are 'p:' for regular patterns, 'i:' for include
1073 1073 # patterns and 'x:' for exclude patterns. Use 'r:' prefix to pass
1074 1074 # a revision identifier, or the empty string to reference the
1075 1075 # working directory, from which the match object is
1076 1076 # initialized. Use 'd:' to set the default matching mode, default
1077 1077 # to 'glob'. At most one 'r:' and 'd:' argument can be passed.
1078 1078
1079 1079 l = getargs(x, 1, -1, "_matchfiles requires at least one argument")
1080 1080 pats, inc, exc = [], [], []
1081 1081 rev, default = None, None
1082 1082 for arg in l:
1083 1083 s = getstring(arg, "_matchfiles requires string arguments")
1084 1084 prefix, value = s[:2], s[2:]
1085 1085 if prefix == 'p:':
1086 1086 pats.append(value)
1087 1087 elif prefix == 'i:':
1088 1088 inc.append(value)
1089 1089 elif prefix == 'x:':
1090 1090 exc.append(value)
1091 1091 elif prefix == 'r:':
1092 1092 if rev is not None:
1093 1093 raise error.ParseError('_matchfiles expected at most one '
1094 1094 'revision')
1095 1095 if value != '': # empty means working directory; leave rev as None
1096 1096 rev = value
1097 1097 elif prefix == 'd:':
1098 1098 if default is not None:
1099 1099 raise error.ParseError('_matchfiles expected at most one '
1100 1100 'default mode')
1101 1101 default = value
1102 1102 else:
1103 1103 raise error.ParseError('invalid _matchfiles prefix: %s' % prefix)
1104 1104 if not default:
1105 1105 default = 'glob'
1106 1106
1107 1107 m = matchmod.match(repo.root, repo.getcwd(), pats, include=inc,
1108 1108 exclude=exc, ctx=repo[rev], default=default)
1109 1109
1110 1110 # This directly read the changelog data as creating changectx for all
1111 1111 # revisions is quite expensive.
1112 1112 getfiles = repo.changelog.readfiles
1113 1113 wdirrev = node.wdirrev
1114 1114 def matches(x):
1115 1115 if x == wdirrev:
1116 1116 files = repo[x].files()
1117 1117 else:
1118 1118 files = getfiles(x)
1119 1119 for f in files:
1120 1120 if m(f):
1121 1121 return True
1122 1122 return False
1123 1123
1124 1124 return subset.filter(matches,
1125 1125 condrepr=('<matchfiles patterns=%r, include=%r '
1126 1126 'exclude=%r, default=%r, rev=%r>',
1127 1127 pats, inc, exc, default, rev))
1128 1128
1129 1129 @predicate('file(pattern)', safe=True)
1130 1130 def hasfile(repo, subset, x):
1131 1131 """Changesets affecting files matched by pattern.
1132 1132
1133 1133 For a faster but less accurate result, consider using ``filelog()``
1134 1134 instead.
1135 1135
1136 1136 This predicate uses ``glob:`` as the default kind of pattern.
1137 1137 """
1138 1138 # i18n: "file" is a keyword
1139 1139 pat = getstring(x, _("file requires a pattern"))
1140 1140 return _matchfiles(repo, subset, ('string', 'p:' + pat))
1141 1141
1142 1142 @predicate('head()', safe=True)
1143 1143 def head(repo, subset, x):
1144 1144 """Changeset is a named branch head.
1145 1145 """
1146 1146 # i18n: "head" is a keyword
1147 1147 getargs(x, 0, 0, _("head takes no arguments"))
1148 1148 hs = set()
1149 1149 cl = repo.changelog
1150 1150 for b, ls in repo.branchmap().iteritems():
1151 1151 hs.update(cl.rev(h) for h in ls)
1152 1152 # XXX using a set to feed the baseset is wrong. Sets are not ordered.
1153 1153 # This does not break because of other fullreposet misbehavior.
1154 1154 # XXX We should combine with subset first: 'subset & baseset(...)'. This is
1155 1155 # necessary to ensure we preserve the order in subset.
1156 1156 return baseset(hs) & subset
1157 1157
1158 1158 @predicate('heads(set)', safe=True)
1159 1159 def heads(repo, subset, x):
1160 1160 """Members of set with no children in set.
1161 1161 """
1162 1162 s = getset(repo, subset, x)
1163 1163 ps = parents(repo, subset, x)
1164 1164 return s - ps
1165 1165
1166 1166 @predicate('hidden()', safe=True)
1167 1167 def hidden(repo, subset, x):
1168 1168 """Hidden changesets.
1169 1169 """
1170 1170 # i18n: "hidden" is a keyword
1171 1171 getargs(x, 0, 0, _("hidden takes no arguments"))
1172 1172 hiddenrevs = repoview.filterrevs(repo, 'visible')
1173 1173 return subset & hiddenrevs
1174 1174
1175 1175 @predicate('keyword(string)', safe=True)
1176 1176 def keyword(repo, subset, x):
1177 1177 """Search commit message, user name, and names of changed files for
1178 1178 string. The match is case-insensitive.
1179 1179 """
1180 1180 # i18n: "keyword" is a keyword
1181 1181 kw = encoding.lower(getstring(x, _("keyword requires a string")))
1182 1182
1183 1183 def matches(r):
1184 1184 c = repo[r]
1185 1185 return any(kw in encoding.lower(t)
1186 1186 for t in c.files() + [c.user(), c.description()])
1187 1187
1188 1188 return subset.filter(matches, condrepr=('<keyword %r>', kw))
1189 1189
1190 1190 @predicate('limit(set[, n[, offset]])', safe=True)
1191 1191 def limit(repo, subset, x):
1192 1192 """First n members of set, defaulting to 1, starting from offset.
1193 1193 """
1194 1194 args = getargsdict(x, 'limit', 'set n offset')
1195 1195 if 'set' not in args:
1196 1196 # i18n: "limit" is a keyword
1197 1197 raise error.ParseError(_("limit requires one to three arguments"))
1198 1198 try:
1199 1199 lim, ofs = 1, 0
1200 1200 if 'n' in args:
1201 1201 # i18n: "limit" is a keyword
1202 1202 lim = int(getstring(args['n'], _("limit requires a number")))
1203 1203 if 'offset' in args:
1204 1204 # i18n: "limit" is a keyword
1205 1205 ofs = int(getstring(args['offset'], _("limit requires a number")))
1206 1206 if ofs < 0:
1207 1207 raise error.ParseError(_("negative offset"))
1208 1208 except (TypeError, ValueError):
1209 1209 # i18n: "limit" is a keyword
1210 1210 raise error.ParseError(_("limit expects a number"))
1211 1211 os = getset(repo, fullreposet(repo), args['set'])
1212 1212 result = []
1213 1213 it = iter(os)
1214 1214 for x in xrange(ofs):
1215 1215 y = next(it, None)
1216 1216 if y is None:
1217 1217 break
1218 1218 for x in xrange(lim):
1219 1219 y = next(it, None)
1220 1220 if y is None:
1221 1221 break
1222 1222 elif y in subset:
1223 1223 result.append(y)
1224 1224 return baseset(result, datarepr=('<limit n=%d, offset=%d, %r, %r>',
1225 1225 lim, ofs, subset, os))
1226 1226
1227 1227 @predicate('last(set, [n])', safe=True)
1228 1228 def last(repo, subset, x):
1229 1229 """Last n members of set, defaulting to 1.
1230 1230 """
1231 1231 # i18n: "last" is a keyword
1232 1232 l = getargs(x, 1, 2, _("last requires one or two arguments"))
1233 1233 try:
1234 1234 lim = 1
1235 1235 if len(l) == 2:
1236 1236 # i18n: "last" is a keyword
1237 1237 lim = int(getstring(l[1], _("last requires a number")))
1238 1238 except (TypeError, ValueError):
1239 1239 # i18n: "last" is a keyword
1240 1240 raise error.ParseError(_("last expects a number"))
1241 1241 os = getset(repo, fullreposet(repo), l[0])
1242 1242 os.reverse()
1243 1243 result = []
1244 1244 it = iter(os)
1245 1245 for x in xrange(lim):
1246 1246 y = next(it, None)
1247 1247 if y is None:
1248 1248 break
1249 1249 elif y in subset:
1250 1250 result.append(y)
1251 1251 return baseset(result, datarepr=('<last n=%d, %r, %r>', lim, subset, os))
1252 1252
1253 1253 @predicate('max(set)', safe=True)
1254 1254 def maxrev(repo, subset, x):
1255 1255 """Changeset with highest revision number in set.
1256 1256 """
1257 1257 os = getset(repo, fullreposet(repo), x)
1258 1258 try:
1259 1259 m = os.max()
1260 1260 if m in subset:
1261 1261 return baseset([m], datarepr=('<max %r, %r>', subset, os))
1262 1262 except ValueError:
1263 1263 # os.max() throws a ValueError when the collection is empty.
1264 1264 # Same as python's max().
1265 1265 pass
1266 1266 return baseset(datarepr=('<max %r, %r>', subset, os))
1267 1267
1268 1268 @predicate('merge()', safe=True)
1269 1269 def merge(repo, subset, x):
1270 1270 """Changeset is a merge changeset.
1271 1271 """
1272 1272 # i18n: "merge" is a keyword
1273 1273 getargs(x, 0, 0, _("merge takes no arguments"))
1274 1274 cl = repo.changelog
1275 1275 return subset.filter(lambda r: cl.parentrevs(r)[1] != -1,
1276 1276 condrepr='<merge>')
1277 1277
1278 1278 @predicate('branchpoint()', safe=True)
1279 1279 def branchpoint(repo, subset, x):
1280 1280 """Changesets with more than one child.
1281 1281 """
1282 1282 # i18n: "branchpoint" is a keyword
1283 1283 getargs(x, 0, 0, _("branchpoint takes no arguments"))
1284 1284 cl = repo.changelog
1285 1285 if not subset:
1286 1286 return baseset()
1287 1287 # XXX this should be 'parentset.min()' assuming 'parentset' is a smartset
1288 1288 # (and if it is not, it should.)
1289 1289 baserev = min(subset)
1290 1290 parentscount = [0]*(len(repo) - baserev)
1291 1291 for r in cl.revs(start=baserev + 1):
1292 1292 for p in cl.parentrevs(r):
1293 1293 if p >= baserev:
1294 1294 parentscount[p - baserev] += 1
1295 1295 return subset.filter(lambda r: parentscount[r - baserev] > 1,
1296 1296 condrepr='<branchpoint>')
1297 1297
1298 1298 @predicate('min(set)', safe=True)
1299 1299 def minrev(repo, subset, x):
1300 1300 """Changeset with lowest revision number in set.
1301 1301 """
1302 1302 os = getset(repo, fullreposet(repo), x)
1303 1303 try:
1304 1304 m = os.min()
1305 1305 if m in subset:
1306 1306 return baseset([m], datarepr=('<min %r, %r>', subset, os))
1307 1307 except ValueError:
1308 1308 # os.min() throws a ValueError when the collection is empty.
1309 1309 # Same as python's min().
1310 1310 pass
1311 1311 return baseset(datarepr=('<min %r, %r>', subset, os))
1312 1312
1313 1313 @predicate('modifies(pattern)', safe=True)
1314 1314 def modifies(repo, subset, x):
1315 1315 """Changesets modifying files matched by pattern.
1316 1316
1317 1317 The pattern without explicit kind like ``glob:`` is expected to be
1318 1318 relative to the current directory and match against a file or a
1319 1319 directory.
1320 1320 """
1321 1321 # i18n: "modifies" is a keyword
1322 1322 pat = getstring(x, _("modifies requires a pattern"))
1323 1323 return checkstatus(repo, subset, pat, 0)
1324 1324
1325 1325 @predicate('named(namespace)')
1326 1326 def named(repo, subset, x):
1327 1327 """The changesets in a given namespace.
1328 1328
1329 1329 If `namespace` starts with `re:`, the remainder of the string is treated as
1330 1330 a regular expression. To match a namespace that actually starts with `re:`,
1331 1331 use the prefix `literal:`.
1332 1332 """
1333 1333 # i18n: "named" is a keyword
1334 1334 args = getargs(x, 1, 1, _('named requires a namespace argument'))
1335 1335
1336 1336 ns = getstring(args[0],
1337 1337 # i18n: "named" is a keyword
1338 1338 _('the argument to named must be a string'))
1339 1339 kind, pattern, matcher = util.stringmatcher(ns)
1340 1340 namespaces = set()
1341 1341 if kind == 'literal':
1342 1342 if pattern not in repo.names:
1343 1343 raise error.RepoLookupError(_("namespace '%s' does not exist")
1344 1344 % ns)
1345 1345 namespaces.add(repo.names[pattern])
1346 1346 else:
1347 1347 for name, ns in repo.names.iteritems():
1348 1348 if matcher(name):
1349 1349 namespaces.add(ns)
1350 1350 if not namespaces:
1351 1351 raise error.RepoLookupError(_("no namespace exists"
1352 1352 " that match '%s'") % pattern)
1353 1353
1354 1354 names = set()
1355 1355 for ns in namespaces:
1356 1356 for name in ns.listnames(repo):
1357 1357 if name not in ns.deprecated:
1358 1358 names.update(repo[n].rev() for n in ns.nodes(repo, name))
1359 1359
1360 1360 names -= set([node.nullrev])
1361 1361 return subset & names
1362 1362
1363 1363 @predicate('id(string)', safe=True)
1364 1364 def node_(repo, subset, x):
1365 1365 """Revision non-ambiguously specified by the given hex string prefix.
1366 1366 """
1367 1367 # i18n: "id" is a keyword
1368 1368 l = getargs(x, 1, 1, _("id requires one argument"))
1369 1369 # i18n: "id" is a keyword
1370 1370 n = getstring(l[0], _("id requires a string"))
1371 1371 if len(n) == 40:
1372 1372 try:
1373 1373 rn = repo.changelog.rev(node.bin(n))
1374 1374 except (LookupError, TypeError):
1375 1375 rn = None
1376 1376 else:
1377 1377 rn = None
1378 1378 pm = repo.changelog._partialmatch(n)
1379 1379 if pm is not None:
1380 1380 rn = repo.changelog.rev(pm)
1381 1381
1382 1382 if rn is None:
1383 1383 return baseset()
1384 1384 result = baseset([rn])
1385 1385 return result & subset
1386 1386
1387 1387 @predicate('obsolete()', safe=True)
1388 1388 def obsolete(repo, subset, x):
1389 1389 """Mutable changeset with a newer version."""
1390 1390 # i18n: "obsolete" is a keyword
1391 1391 getargs(x, 0, 0, _("obsolete takes no arguments"))
1392 1392 obsoletes = obsmod.getrevs(repo, 'obsolete')
1393 1393 return subset & obsoletes
1394 1394
1395 1395 @predicate('only(set, [set])', safe=True)
1396 1396 def only(repo, subset, x):
1397 1397 """Changesets that are ancestors of the first set that are not ancestors
1398 1398 of any other head in the repo. If a second set is specified, the result
1399 1399 is ancestors of the first set that are not ancestors of the second set
1400 1400 (i.e. ::<set1> - ::<set2>).
1401 1401 """
1402 1402 cl = repo.changelog
1403 1403 # i18n: "only" is a keyword
1404 1404 args = getargs(x, 1, 2, _('only takes one or two arguments'))
1405 1405 include = getset(repo, fullreposet(repo), args[0])
1406 1406 if len(args) == 1:
1407 1407 if not include:
1408 1408 return baseset()
1409 1409
1410 1410 descendants = set(_revdescendants(repo, include, False))
1411 1411 exclude = [rev for rev in cl.headrevs()
1412 1412 if not rev in descendants and not rev in include]
1413 1413 else:
1414 1414 exclude = getset(repo, fullreposet(repo), args[1])
1415 1415
1416 1416 results = set(cl.findmissingrevs(common=exclude, heads=include))
1417 1417 # XXX we should turn this into a baseset instead of a set, smartset may do
1418 1418 # some optimisations from the fact this is a baseset.
1419 1419 return subset & results
1420 1420
1421 1421 @predicate('origin([set])', safe=True)
1422 1422 def origin(repo, subset, x):
1423 1423 """
1424 1424 Changesets that were specified as a source for the grafts, transplants or
1425 1425 rebases that created the given revisions. Omitting the optional set is the
1426 1426 same as passing all(). If a changeset created by these operations is itself
1427 1427 specified as a source for one of these operations, only the source changeset
1428 1428 for the first operation is selected.
1429 1429 """
1430 1430 if x is not None:
1431 1431 dests = getset(repo, fullreposet(repo), x)
1432 1432 else:
1433 1433 dests = fullreposet(repo)
1434 1434
1435 1435 def _firstsrc(rev):
1436 1436 src = _getrevsource(repo, rev)
1437 1437 if src is None:
1438 1438 return None
1439 1439
1440 1440 while True:
1441 1441 prev = _getrevsource(repo, src)
1442 1442
1443 1443 if prev is None:
1444 1444 return src
1445 1445 src = prev
1446 1446
1447 1447 o = set([_firstsrc(r) for r in dests])
1448 1448 o -= set([None])
1449 1449 # XXX we should turn this into a baseset instead of a set, smartset may do
1450 1450 # some optimisations from the fact this is a baseset.
1451 1451 return subset & o
1452 1452
1453 1453 @predicate('outgoing([path])', safe=True)
1454 1454 def outgoing(repo, subset, x):
1455 1455 """Changesets not found in the specified destination repository, or the
1456 1456 default push location.
1457 1457 """
1458 1458 # Avoid cycles.
1459 1459 from . import (
1460 1460 discovery,
1461 1461 hg,
1462 1462 )
1463 1463 # i18n: "outgoing" is a keyword
1464 1464 l = getargs(x, 0, 1, _("outgoing takes one or no arguments"))
1465 1465 # i18n: "outgoing" is a keyword
1466 1466 dest = l and getstring(l[0], _("outgoing requires a repository path")) or ''
1467 1467 dest = repo.ui.expandpath(dest or 'default-push', dest or 'default')
1468 1468 dest, branches = hg.parseurl(dest)
1469 1469 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1470 1470 if revs:
1471 1471 revs = [repo.lookup(rev) for rev in revs]
1472 1472 other = hg.peer(repo, {}, dest)
1473 1473 repo.ui.pushbuffer()
1474 1474 outgoing = discovery.findcommonoutgoing(repo, other, onlyheads=revs)
1475 1475 repo.ui.popbuffer()
1476 1476 cl = repo.changelog
1477 1477 o = set([cl.rev(r) for r in outgoing.missing])
1478 1478 return subset & o
1479 1479
1480 1480 @predicate('p1([set])', safe=True)
1481 1481 def p1(repo, subset, x):
1482 1482 """First parent of changesets in set, or the working directory.
1483 1483 """
1484 1484 if x is None:
1485 1485 p = repo[x].p1().rev()
1486 1486 if p >= 0:
1487 1487 return subset & baseset([p])
1488 1488 return baseset()
1489 1489
1490 1490 ps = set()
1491 1491 cl = repo.changelog
1492 1492 for r in getset(repo, fullreposet(repo), x):
1493 1493 ps.add(cl.parentrevs(r)[0])
1494 1494 ps -= set([node.nullrev])
1495 1495 # XXX we should turn this into a baseset instead of a set, smartset may do
1496 1496 # some optimisations from the fact this is a baseset.
1497 1497 return subset & ps
1498 1498
1499 1499 @predicate('p2([set])', safe=True)
1500 1500 def p2(repo, subset, x):
1501 1501 """Second parent of changesets in set, or the working directory.
1502 1502 """
1503 1503 if x is None:
1504 1504 ps = repo[x].parents()
1505 1505 try:
1506 1506 p = ps[1].rev()
1507 1507 if p >= 0:
1508 1508 return subset & baseset([p])
1509 1509 return baseset()
1510 1510 except IndexError:
1511 1511 return baseset()
1512 1512
1513 1513 ps = set()
1514 1514 cl = repo.changelog
1515 1515 for r in getset(repo, fullreposet(repo), x):
1516 1516 ps.add(cl.parentrevs(r)[1])
1517 1517 ps -= set([node.nullrev])
1518 1518 # XXX we should turn this into a baseset instead of a set, smartset may do
1519 1519 # some optimisations from the fact this is a baseset.
1520 1520 return subset & ps
1521 1521
1522 1522 @predicate('parents([set])', safe=True)
1523 1523 def parents(repo, subset, x):
1524 1524 """
1525 1525 The set of all parents for all changesets in set, or the working directory.
1526 1526 """
1527 1527 if x is None:
1528 1528 ps = set(p.rev() for p in repo[x].parents())
1529 1529 else:
1530 1530 ps = set()
1531 1531 cl = repo.changelog
1532 1532 up = ps.update
1533 1533 parentrevs = cl.parentrevs
1534 1534 for r in getset(repo, fullreposet(repo), x):
1535 1535 if r == node.wdirrev:
1536 1536 up(p.rev() for p in repo[r].parents())
1537 1537 else:
1538 1538 up(parentrevs(r))
1539 1539 ps -= set([node.nullrev])
1540 1540 return subset & ps
1541 1541
1542 1542 def _phase(repo, subset, target):
1543 1543 """helper to select all rev in phase <target>"""
1544 1544 repo._phasecache.loadphaserevs(repo) # ensure phase's sets are loaded
1545 1545 if repo._phasecache._phasesets:
1546 1546 s = repo._phasecache._phasesets[target] - repo.changelog.filteredrevs
1547 1547 s = baseset(s)
1548 1548 s.sort() # set are non ordered, so we enforce ascending
1549 1549 return subset & s
1550 1550 else:
1551 1551 phase = repo._phasecache.phase
1552 1552 condition = lambda r: phase(repo, r) == target
1553 1553 return subset.filter(condition, condrepr=('<phase %r>', target),
1554 1554 cache=False)
1555 1555
1556 1556 @predicate('draft()', safe=True)
1557 1557 def draft(repo, subset, x):
1558 1558 """Changeset in draft phase."""
1559 1559 # i18n: "draft" is a keyword
1560 1560 getargs(x, 0, 0, _("draft takes no arguments"))
1561 1561 target = phases.draft
1562 1562 return _phase(repo, subset, target)
1563 1563
1564 1564 @predicate('secret()', safe=True)
1565 1565 def secret(repo, subset, x):
1566 1566 """Changeset in secret phase."""
1567 1567 # i18n: "secret" is a keyword
1568 1568 getargs(x, 0, 0, _("secret takes no arguments"))
1569 1569 target = phases.secret
1570 1570 return _phase(repo, subset, target)
1571 1571
1572 1572 def parentspec(repo, subset, x, n):
1573 1573 """``set^0``
1574 1574 The set.
1575 1575 ``set^1`` (or ``set^``), ``set^2``
1576 1576 First or second parent, respectively, of all changesets in set.
1577 1577 """
1578 1578 try:
1579 1579 n = int(n[1])
1580 1580 if n not in (0, 1, 2):
1581 1581 raise ValueError
1582 1582 except (TypeError, ValueError):
1583 1583 raise error.ParseError(_("^ expects a number 0, 1, or 2"))
1584 1584 ps = set()
1585 1585 cl = repo.changelog
1586 1586 for r in getset(repo, fullreposet(repo), x):
1587 1587 if n == 0:
1588 1588 ps.add(r)
1589 1589 elif n == 1:
1590 1590 ps.add(cl.parentrevs(r)[0])
1591 1591 elif n == 2:
1592 1592 parents = cl.parentrevs(r)
1593 1593 if len(parents) > 1:
1594 1594 ps.add(parents[1])
1595 1595 return subset & ps
1596 1596
1597 1597 @predicate('present(set)', safe=True)
1598 1598 def present(repo, subset, x):
1599 1599 """An empty set, if any revision in set isn't found; otherwise,
1600 1600 all revisions in set.
1601 1601
1602 1602 If any of specified revisions is not present in the local repository,
1603 1603 the query is normally aborted. But this predicate allows the query
1604 1604 to continue even in such cases.
1605 1605 """
1606 1606 try:
1607 1607 return getset(repo, subset, x)
1608 1608 except error.RepoLookupError:
1609 1609 return baseset()
1610 1610
1611 1611 # for internal use
1612 1612 @predicate('_notpublic', safe=True)
1613 1613 def _notpublic(repo, subset, x):
1614 1614 getargs(x, 0, 0, "_notpublic takes no arguments")
1615 1615 repo._phasecache.loadphaserevs(repo) # ensure phase's sets are loaded
1616 1616 if repo._phasecache._phasesets:
1617 1617 s = set()
1618 1618 for u in repo._phasecache._phasesets[1:]:
1619 1619 s.update(u)
1620 1620 s = baseset(s - repo.changelog.filteredrevs)
1621 1621 s.sort()
1622 1622 return subset & s
1623 1623 else:
1624 1624 phase = repo._phasecache.phase
1625 1625 target = phases.public
1626 1626 condition = lambda r: phase(repo, r) != target
1627 1627 return subset.filter(condition, condrepr=('<phase %r>', target),
1628 1628 cache=False)
1629 1629
1630 1630 @predicate('public()', safe=True)
1631 1631 def public(repo, subset, x):
1632 1632 """Changeset in public phase."""
1633 1633 # i18n: "public" is a keyword
1634 1634 getargs(x, 0, 0, _("public takes no arguments"))
1635 1635 phase = repo._phasecache.phase
1636 1636 target = phases.public
1637 1637 condition = lambda r: phase(repo, r) == target
1638 1638 return subset.filter(condition, condrepr=('<phase %r>', target),
1639 1639 cache=False)
1640 1640
1641 1641 @predicate('remote([id [,path]])', safe=True)
1642 1642 def remote(repo, subset, x):
1643 1643 """Local revision that corresponds to the given identifier in a
1644 1644 remote repository, if present. Here, the '.' identifier is a
1645 1645 synonym for the current local branch.
1646 1646 """
1647 1647
1648 1648 from . import hg # avoid start-up nasties
1649 1649 # i18n: "remote" is a keyword
1650 1650 l = getargs(x, 0, 2, _("remote takes zero, one, or two arguments"))
1651 1651
1652 1652 q = '.'
1653 1653 if len(l) > 0:
1654 1654 # i18n: "remote" is a keyword
1655 1655 q = getstring(l[0], _("remote requires a string id"))
1656 1656 if q == '.':
1657 1657 q = repo['.'].branch()
1658 1658
1659 1659 dest = ''
1660 1660 if len(l) > 1:
1661 1661 # i18n: "remote" is a keyword
1662 1662 dest = getstring(l[1], _("remote requires a repository path"))
1663 1663 dest = repo.ui.expandpath(dest or 'default')
1664 1664 dest, branches = hg.parseurl(dest)
1665 1665 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1666 1666 if revs:
1667 1667 revs = [repo.lookup(rev) for rev in revs]
1668 1668 other = hg.peer(repo, {}, dest)
1669 1669 n = other.lookup(q)
1670 1670 if n in repo:
1671 1671 r = repo[n].rev()
1672 1672 if r in subset:
1673 1673 return baseset([r])
1674 1674 return baseset()
1675 1675
1676 1676 @predicate('removes(pattern)', safe=True)
1677 1677 def removes(repo, subset, x):
1678 1678 """Changesets which remove files matching pattern.
1679 1679
1680 1680 The pattern without explicit kind like ``glob:`` is expected to be
1681 1681 relative to the current directory and match against a file or a
1682 1682 directory.
1683 1683 """
1684 1684 # i18n: "removes" is a keyword
1685 1685 pat = getstring(x, _("removes requires a pattern"))
1686 1686 return checkstatus(repo, subset, pat, 2)
1687 1687
1688 1688 @predicate('rev(number)', safe=True)
1689 1689 def rev(repo, subset, x):
1690 1690 """Revision with the given numeric identifier.
1691 1691 """
1692 1692 # i18n: "rev" is a keyword
1693 1693 l = getargs(x, 1, 1, _("rev requires one argument"))
1694 1694 try:
1695 1695 # i18n: "rev" is a keyword
1696 1696 l = int(getstring(l[0], _("rev requires a number")))
1697 1697 except (TypeError, ValueError):
1698 1698 # i18n: "rev" is a keyword
1699 1699 raise error.ParseError(_("rev expects a number"))
1700 1700 if l not in repo.changelog and l != node.nullrev:
1701 1701 return baseset()
1702 1702 return subset & baseset([l])
1703 1703
1704 1704 @predicate('matching(revision [, field])', safe=True)
1705 1705 def matching(repo, subset, x):
1706 1706 """Changesets in which a given set of fields match the set of fields in the
1707 1707 selected revision or set.
1708 1708
1709 1709 To match more than one field pass the list of fields to match separated
1710 1710 by spaces (e.g. ``author description``).
1711 1711
1712 1712 Valid fields are most regular revision fields and some special fields.
1713 1713
1714 1714 Regular revision fields are ``description``, ``author``, ``branch``,
1715 1715 ``date``, ``files``, ``phase``, ``parents``, ``substate``, ``user``
1716 1716 and ``diff``.
1717 1717 Note that ``author`` and ``user`` are synonyms. ``diff`` refers to the
1718 1718 contents of the revision. Two revisions matching their ``diff`` will
1719 1719 also match their ``files``.
1720 1720
1721 1721 Special fields are ``summary`` and ``metadata``:
1722 1722 ``summary`` matches the first line of the description.
1723 1723 ``metadata`` is equivalent to matching ``description user date``
1724 1724 (i.e. it matches the main metadata fields).
1725 1725
1726 1726 ``metadata`` is the default field which is used when no fields are
1727 1727 specified. You can match more than one field at a time.
1728 1728 """
1729 1729 # i18n: "matching" is a keyword
1730 1730 l = getargs(x, 1, 2, _("matching takes 1 or 2 arguments"))
1731 1731
1732 1732 revs = getset(repo, fullreposet(repo), l[0])
1733 1733
1734 1734 fieldlist = ['metadata']
1735 1735 if len(l) > 1:
1736 1736 fieldlist = getstring(l[1],
1737 1737 # i18n: "matching" is a keyword
1738 1738 _("matching requires a string "
1739 1739 "as its second argument")).split()
1740 1740
1741 1741 # Make sure that there are no repeated fields,
1742 1742 # expand the 'special' 'metadata' field type
1743 1743 # and check the 'files' whenever we check the 'diff'
1744 1744 fields = []
1745 1745 for field in fieldlist:
1746 1746 if field == 'metadata':
1747 1747 fields += ['user', 'description', 'date']
1748 1748 elif field == 'diff':
1749 1749 # a revision matching the diff must also match the files
1750 1750 # since matching the diff is very costly, make sure to
1751 1751 # also match the files first
1752 1752 fields += ['files', 'diff']
1753 1753 else:
1754 1754 if field == 'author':
1755 1755 field = 'user'
1756 1756 fields.append(field)
1757 1757 fields = set(fields)
1758 1758 if 'summary' in fields and 'description' in fields:
1759 1759 # If a revision matches its description it also matches its summary
1760 1760 fields.discard('summary')
1761 1761
1762 1762 # We may want to match more than one field
1763 1763 # Not all fields take the same amount of time to be matched
1764 1764 # Sort the selected fields in order of increasing matching cost
1765 1765 fieldorder = ['phase', 'parents', 'user', 'date', 'branch', 'summary',
1766 1766 'files', 'description', 'substate', 'diff']
1767 1767 def fieldkeyfunc(f):
1768 1768 try:
1769 1769 return fieldorder.index(f)
1770 1770 except ValueError:
1771 1771 # assume an unknown field is very costly
1772 1772 return len(fieldorder)
1773 1773 fields = list(fields)
1774 1774 fields.sort(key=fieldkeyfunc)
1775 1775
1776 1776 # Each field will be matched with its own "getfield" function
1777 1777 # which will be added to the getfieldfuncs array of functions
1778 1778 getfieldfuncs = []
1779 1779 _funcs = {
1780 1780 'user': lambda r: repo[r].user(),
1781 1781 'branch': lambda r: repo[r].branch(),
1782 1782 'date': lambda r: repo[r].date(),
1783 1783 'description': lambda r: repo[r].description(),
1784 1784 'files': lambda r: repo[r].files(),
1785 1785 'parents': lambda r: repo[r].parents(),
1786 1786 'phase': lambda r: repo[r].phase(),
1787 1787 'substate': lambda r: repo[r].substate,
1788 1788 'summary': lambda r: repo[r].description().splitlines()[0],
1789 1789 'diff': lambda r: list(repo[r].diff(git=True),)
1790 1790 }
1791 1791 for info in fields:
1792 1792 getfield = _funcs.get(info, None)
1793 1793 if getfield is None:
1794 1794 raise error.ParseError(
1795 1795 # i18n: "matching" is a keyword
1796 1796 _("unexpected field name passed to matching: %s") % info)
1797 1797 getfieldfuncs.append(getfield)
1798 1798 # convert the getfield array of functions into a "getinfo" function
1799 1799 # which returns an array of field values (or a single value if there
1800 1800 # is only one field to match)
1801 1801 getinfo = lambda r: [f(r) for f in getfieldfuncs]
1802 1802
1803 1803 def matches(x):
1804 1804 for rev in revs:
1805 1805 target = getinfo(rev)
1806 1806 match = True
1807 1807 for n, f in enumerate(getfieldfuncs):
1808 1808 if target[n] != f(x):
1809 1809 match = False
1810 1810 if match:
1811 1811 return True
1812 1812 return False
1813 1813
1814 1814 return subset.filter(matches, condrepr=('<matching%r %r>', fields, revs))
1815 1815
1816 1816 @predicate('reverse(set)', safe=True)
1817 1817 def reverse(repo, subset, x):
1818 1818 """Reverse order of set.
1819 1819 """
1820 1820 l = getset(repo, subset, x)
1821 1821 l.reverse()
1822 1822 return l
1823 1823
1824 1824 @predicate('roots(set)', safe=True)
1825 1825 def roots(repo, subset, x):
1826 1826 """Changesets in set with no parent changeset in set.
1827 1827 """
1828 1828 s = getset(repo, fullreposet(repo), x)
1829 1829 parents = repo.changelog.parentrevs
1830 1830 def filter(r):
1831 1831 for p in parents(r):
1832 1832 if 0 <= p and p in s:
1833 1833 return False
1834 1834 return True
1835 1835 return subset & s.filter(filter, condrepr='<roots>')
1836 1836
1837 1837 @predicate('sort(set[, [-]key...])', safe=True)
1838 1838 def sort(repo, subset, x):
1839 1839 """Sort set by keys. The default sort order is ascending, specify a key
1840 1840 as ``-key`` to sort in descending order.
1841 1841
1842 1842 The keys can be:
1843 1843
1844 1844 - ``rev`` for the revision number,
1845 1845 - ``branch`` for the branch name,
1846 1846 - ``desc`` for the commit message (description),
1847 1847 - ``user`` for user name (``author`` can be used as an alias),
1848 1848 - ``date`` for the commit date
1849 1849 """
1850 1850 # i18n: "sort" is a keyword
1851 1851 l = getargs(x, 1, 2, _("sort requires one or two arguments"))
1852 1852 keys = "rev"
1853 1853 if len(l) == 2:
1854 1854 # i18n: "sort" is a keyword
1855 1855 keys = getstring(l[1], _("sort spec must be a string"))
1856 1856
1857 1857 s = l[0]
1858 1858 keys = keys.split()
1859 1859 revs = getset(repo, subset, s)
1860 1860 if keys == ["rev"]:
1861 1861 revs.sort()
1862 1862 return revs
1863 1863 elif keys == ["-rev"]:
1864 1864 revs.sort(reverse=True)
1865 1865 return revs
1866 1866 # sort() is guaranteed to be stable
1867 1867 ctxs = [repo[r] for r in revs]
1868 1868 for k in reversed(keys):
1869 1869 if k == 'rev':
1870 1870 ctxs.sort(key=lambda c: c.rev())
1871 1871 elif k == '-rev':
1872 1872 ctxs.sort(key=lambda c: c.rev(), reverse=True)
1873 1873 elif k == 'branch':
1874 1874 ctxs.sort(key=lambda c: c.branch())
1875 1875 elif k == '-branch':
1876 1876 ctxs.sort(key=lambda c: c.branch(), reverse=True)
1877 1877 elif k == 'desc':
1878 1878 ctxs.sort(key=lambda c: c.description())
1879 1879 elif k == '-desc':
1880 1880 ctxs.sort(key=lambda c: c.description(), reverse=True)
1881 1881 elif k in 'user author':
1882 1882 ctxs.sort(key=lambda c: c.user())
1883 1883 elif k in '-user -author':
1884 1884 ctxs.sort(key=lambda c: c.user(), reverse=True)
1885 1885 elif k == 'date':
1886 1886 ctxs.sort(key=lambda c: c.date()[0])
1887 1887 elif k == '-date':
1888 1888 ctxs.sort(key=lambda c: c.date()[0], reverse=True)
1889 1889 else:
1890 1890 raise error.ParseError(_("unknown sort key %r") % k)
1891 1891 return baseset([c.rev() for c in ctxs])
1892 1892
1893 1893 @predicate('subrepo([pattern])')
1894 1894 def subrepo(repo, subset, x):
1895 1895 """Changesets that add, modify or remove the given subrepo. If no subrepo
1896 1896 pattern is named, any subrepo changes are returned.
1897 1897 """
1898 1898 # i18n: "subrepo" is a keyword
1899 1899 args = getargs(x, 0, 1, _('subrepo takes at most one argument'))
1900 1900 pat = None
1901 1901 if len(args) != 0:
1902 1902 pat = getstring(args[0], _("subrepo requires a pattern"))
1903 1903
1904 1904 m = matchmod.exact(repo.root, repo.root, ['.hgsubstate'])
1905 1905
1906 1906 def submatches(names):
1907 1907 k, p, m = util.stringmatcher(pat)
1908 1908 for name in names:
1909 1909 if m(name):
1910 1910 yield name
1911 1911
1912 1912 def matches(x):
1913 1913 c = repo[x]
1914 1914 s = repo.status(c.p1().node(), c.node(), match=m)
1915 1915
1916 1916 if pat is None:
1917 1917 return s.added or s.modified or s.removed
1918 1918
1919 1919 if s.added:
1920 1920 return any(submatches(c.substate.keys()))
1921 1921
1922 1922 if s.modified:
1923 1923 subs = set(c.p1().substate.keys())
1924 1924 subs.update(c.substate.keys())
1925 1925
1926 1926 for path in submatches(subs):
1927 1927 if c.p1().substate.get(path) != c.substate.get(path):
1928 1928 return True
1929 1929
1930 1930 if s.removed:
1931 1931 return any(submatches(c.p1().substate.keys()))
1932 1932
1933 1933 return False
1934 1934
1935 1935 return subset.filter(matches, condrepr=('<subrepo %r>', pat))
1936 1936
1937 1937 def _substringmatcher(pattern):
1938 1938 kind, pattern, matcher = util.stringmatcher(pattern)
1939 1939 if kind == 'literal':
1940 1940 matcher = lambda s: pattern in s
1941 1941 return kind, pattern, matcher
1942 1942
1943 1943 @predicate('tag([name])', safe=True)
1944 1944 def tag(repo, subset, x):
1945 1945 """The specified tag by name, or all tagged revisions if no name is given.
1946 1946
1947 1947 If `name` starts with `re:`, the remainder of the name is treated as
1948 1948 a regular expression. To match a tag that actually starts with `re:`,
1949 1949 use the prefix `literal:`.
1950 1950 """
1951 1951 # i18n: "tag" is a keyword
1952 1952 args = getargs(x, 0, 1, _("tag takes one or no arguments"))
1953 1953 cl = repo.changelog
1954 1954 if args:
1955 1955 pattern = getstring(args[0],
1956 1956 # i18n: "tag" is a keyword
1957 1957 _('the argument to tag must be a string'))
1958 1958 kind, pattern, matcher = util.stringmatcher(pattern)
1959 1959 if kind == 'literal':
1960 1960 # avoid resolving all tags
1961 1961 tn = repo._tagscache.tags.get(pattern, None)
1962 1962 if tn is None:
1963 1963 raise error.RepoLookupError(_("tag '%s' does not exist")
1964 1964 % pattern)
1965 1965 s = set([repo[tn].rev()])
1966 1966 else:
1967 1967 s = set([cl.rev(n) for t, n in repo.tagslist() if matcher(t)])
1968 1968 else:
1969 1969 s = set([cl.rev(n) for t, n in repo.tagslist() if t != 'tip'])
1970 1970 return subset & s
1971 1971
1972 1972 @predicate('tagged', safe=True)
1973 1973 def tagged(repo, subset, x):
1974 1974 return tag(repo, subset, x)
1975 1975
1976 1976 @predicate('unstable()', safe=True)
1977 1977 def unstable(repo, subset, x):
1978 1978 """Non-obsolete changesets with obsolete ancestors.
1979 1979 """
1980 1980 # i18n: "unstable" is a keyword
1981 1981 getargs(x, 0, 0, _("unstable takes no arguments"))
1982 1982 unstables = obsmod.getrevs(repo, 'unstable')
1983 1983 return subset & unstables
1984 1984
1985 1985
1986 1986 @predicate('user(string)', safe=True)
1987 1987 def user(repo, subset, x):
1988 1988 """User name contains string. The match is case-insensitive.
1989 1989
1990 1990 If `string` starts with `re:`, the remainder of the string is treated as
1991 1991 a regular expression. To match a user that actually contains `re:`, use
1992 1992 the prefix `literal:`.
1993 1993 """
1994 1994 return author(repo, subset, x)
1995 1995
1996 1996 # experimental
1997 1997 @predicate('wdir', safe=True)
1998 1998 def wdir(repo, subset, x):
1999 1999 # i18n: "wdir" is a keyword
2000 2000 getargs(x, 0, 0, _("wdir takes no arguments"))
2001 2001 if node.wdirrev in subset or isinstance(subset, fullreposet):
2002 2002 return baseset([node.wdirrev])
2003 2003 return baseset()
2004 2004
2005 2005 # for internal use
2006 2006 @predicate('_list', safe=True)
2007 2007 def _list(repo, subset, x):
2008 2008 s = getstring(x, "internal error")
2009 2009 if not s:
2010 2010 return baseset()
2011 2011 # remove duplicates here. it's difficult for caller to deduplicate sets
2012 2012 # because different symbols can point to the same rev.
2013 2013 cl = repo.changelog
2014 2014 ls = []
2015 2015 seen = set()
2016 2016 for t in s.split('\0'):
2017 2017 try:
2018 2018 # fast path for integer revision
2019 2019 r = int(t)
2020 2020 if str(r) != t or r not in cl:
2021 2021 raise ValueError
2022 2022 revs = [r]
2023 2023 except ValueError:
2024 2024 revs = stringset(repo, subset, t)
2025 2025
2026 2026 for r in revs:
2027 2027 if r in seen:
2028 2028 continue
2029 2029 if (r in subset
2030 2030 or r == node.nullrev and isinstance(subset, fullreposet)):
2031 2031 ls.append(r)
2032 2032 seen.add(r)
2033 2033 return baseset(ls)
2034 2034
2035 2035 # for internal use
2036 2036 @predicate('_intlist', safe=True)
2037 2037 def _intlist(repo, subset, x):
2038 2038 s = getstring(x, "internal error")
2039 2039 if not s:
2040 2040 return baseset()
2041 2041 ls = [int(r) for r in s.split('\0')]
2042 2042 s = subset
2043 2043 return baseset([r for r in ls if r in s])
2044 2044
2045 2045 # for internal use
2046 2046 @predicate('_hexlist', safe=True)
2047 2047 def _hexlist(repo, subset, x):
2048 2048 s = getstring(x, "internal error")
2049 2049 if not s:
2050 2050 return baseset()
2051 2051 cl = repo.changelog
2052 2052 ls = [cl.rev(node.bin(r)) for r in s.split('\0')]
2053 2053 s = subset
2054 2054 return baseset([r for r in ls if r in s])
2055 2055
2056 2056 methods = {
2057 2057 "range": rangeset,
2058 2058 "dagrange": dagrange,
2059 2059 "string": stringset,
2060 2060 "symbol": stringset,
2061 2061 "and": andset,
2062 2062 "or": orset,
2063 2063 "not": notset,
2064 2064 "difference": differenceset,
2065 2065 "list": listset,
2066 2066 "keyvalue": keyvaluepair,
2067 2067 "func": func,
2068 2068 "ancestor": ancestorspec,
2069 2069 "parent": parentspec,
2070 2070 "parentpost": p1,
2071 2071 }
2072 2072
2073 2073 def _matchonly(revs, bases):
2074 2074 """
2075 2075 >>> f = lambda *args: _matchonly(*map(parse, args))
2076 2076 >>> f('ancestors(A)', 'not ancestors(B)')
2077 2077 ('list', ('symbol', 'A'), ('symbol', 'B'))
2078 2078 """
2079 2079 if (revs is not None
2080 2080 and revs[0] == 'func'
2081 2081 and getstring(revs[1], _('not a symbol')) == 'ancestors'
2082 2082 and bases is not None
2083 2083 and bases[0] == 'not'
2084 2084 and bases[1][0] == 'func'
2085 2085 and getstring(bases[1][1], _('not a symbol')) == 'ancestors'):
2086 2086 return ('list', revs[2], bases[1][2])
2087 2087
2088 2088 def _optimize(x, small):
2089 2089 if x is None:
2090 2090 return 0, x
2091 2091
2092 2092 smallbonus = 1
2093 2093 if small:
2094 2094 smallbonus = .5
2095 2095
2096 2096 op = x[0]
2097 2097 if op == 'minus':
2098 2098 return _optimize(('and', x[1], ('not', x[2])), small)
2099 2099 elif op == 'only':
2100 2100 t = ('func', ('symbol', 'only'), ('list', x[1], x[2]))
2101 2101 return _optimize(t, small)
2102 2102 elif op == 'onlypost':
2103 2103 return _optimize(('func', ('symbol', 'only'), x[1]), small)
2104 2104 elif op == 'dagrangepre':
2105 2105 return _optimize(('func', ('symbol', 'ancestors'), x[1]), small)
2106 2106 elif op == 'dagrangepost':
2107 2107 return _optimize(('func', ('symbol', 'descendants'), x[1]), small)
2108 2108 elif op == 'rangeall':
2109 2109 return _optimize(('range', ('string', '0'), ('string', 'tip')), small)
2110 2110 elif op == 'rangepre':
2111 2111 return _optimize(('range', ('string', '0'), x[1]), small)
2112 2112 elif op == 'rangepost':
2113 2113 return _optimize(('range', x[1], ('string', 'tip')), small)
2114 2114 elif op == 'negate':
2115 2115 s = getstring(x[1], _("can't negate that"))
2116 2116 return _optimize(('string', '-' + s), small)
2117 2117 elif op in 'string symbol negate':
2118 2118 return smallbonus, x # single revisions are small
2119 2119 elif op == 'and':
2120 2120 wa, ta = _optimize(x[1], True)
2121 2121 wb, tb = _optimize(x[2], True)
2122 2122 w = min(wa, wb)
2123 2123
2124 2124 # (::x and not ::y)/(not ::y and ::x) have a fast path
2125 2125 tm = _matchonly(ta, tb) or _matchonly(tb, ta)
2126 2126 if tm:
2127 2127 return w, ('func', ('symbol', 'only'), tm)
2128 2128
2129 2129 if tb is not None and tb[0] == 'not':
2130 2130 return wa, ('difference', ta, tb[1])
2131 2131
2132 2132 if wa > wb:
2133 2133 return w, (op, tb, ta)
2134 2134 return w, (op, ta, tb)
2135 2135 elif op == 'or':
2136 2136 # fast path for machine-generated expression, that is likely to have
2137 2137 # lots of trivial revisions: 'a + b + c()' to '_list(a b) + c()'
2138 2138 ws, ts, ss = [], [], []
2139 2139 def flushss():
2140 2140 if not ss:
2141 2141 return
2142 2142 if len(ss) == 1:
2143 2143 w, t = ss[0]
2144 2144 else:
2145 2145 s = '\0'.join(t[1] for w, t in ss)
2146 2146 y = ('func', ('symbol', '_list'), ('string', s))
2147 2147 w, t = _optimize(y, False)
2148 2148 ws.append(w)
2149 2149 ts.append(t)
2150 2150 del ss[:]
2151 2151 for y in x[1:]:
2152 2152 w, t = _optimize(y, False)
2153 2153 if t is not None and (t[0] == 'string' or t[0] == 'symbol'):
2154 2154 ss.append((w, t))
2155 2155 continue
2156 2156 flushss()
2157 2157 ws.append(w)
2158 2158 ts.append(t)
2159 2159 flushss()
2160 2160 if len(ts) == 1:
2161 2161 return ws[0], ts[0] # 'or' operation is fully optimized out
2162 2162 # we can't reorder trees by weight because it would change the order.
2163 2163 # ("sort(a + b)" == "sort(b + a)", but "a + b" != "b + a")
2164 2164 # ts = tuple(t for w, t in sorted(zip(ws, ts), key=lambda wt: wt[0]))
2165 2165 return max(ws), (op,) + tuple(ts)
2166 2166 elif op == 'not':
2167 2167 # Optimize not public() to _notpublic() because we have a fast version
2168 2168 if x[1] == ('func', ('symbol', 'public'), None):
2169 2169 newsym = ('func', ('symbol', '_notpublic'), None)
2170 2170 o = _optimize(newsym, not small)
2171 2171 return o[0], o[1]
2172 2172 else:
2173 2173 o = _optimize(x[1], not small)
2174 2174 return o[0], (op, o[1])
2175 2175 elif op == 'parentpost':
2176 2176 o = _optimize(x[1], small)
2177 2177 return o[0], (op, o[1])
2178 2178 elif op == 'group':
2179 2179 return _optimize(x[1], small)
2180 2180 elif op in 'dagrange range parent ancestorspec':
2181 2181 if op == 'parent':
2182 2182 # x^:y means (x^) : y, not x ^ (:y)
2183 2183 post = ('parentpost', x[1])
2184 2184 if x[2][0] == 'dagrangepre':
2185 2185 return _optimize(('dagrange', post, x[2][1]), small)
2186 2186 elif x[2][0] == 'rangepre':
2187 2187 return _optimize(('range', post, x[2][1]), small)
2188 2188
2189 2189 wa, ta = _optimize(x[1], small)
2190 2190 wb, tb = _optimize(x[2], small)
2191 2191 return wa + wb, (op, ta, tb)
2192 2192 elif op == 'list':
2193 2193 ws, ts = zip(*(_optimize(y, small) for y in x[1:]))
2194 2194 return sum(ws), (op,) + ts
2195 2195 elif op == 'func':
2196 2196 f = getstring(x[1], _("not a symbol"))
2197 2197 wa, ta = _optimize(x[2], small)
2198 2198 if f in ("author branch closed date desc file grep keyword "
2199 2199 "outgoing user"):
2200 2200 w = 10 # slow
2201 2201 elif f in "modifies adds removes":
2202 2202 w = 30 # slower
2203 2203 elif f == "contains":
2204 2204 w = 100 # very slow
2205 2205 elif f == "ancestor":
2206 2206 w = 1 * smallbonus
2207 2207 elif f in "reverse limit first _intlist":
2208 2208 w = 0
2209 2209 elif f in "sort":
2210 2210 w = 10 # assume most sorts look at changelog
2211 2211 else:
2212 2212 w = 1
2213 2213 return w + wa, (op, x[1], ta)
2214 2214 return 1, x
2215 2215
2216 2216 def optimize(tree):
2217 2217 _weight, newtree = _optimize(tree, small=True)
2218 2218 return newtree
2219 2219
2220 2220 # the set of valid characters for the initial letter of symbols in
2221 2221 # alias declarations and definitions
2222 2222 _aliassyminitletters = set(c for c in [chr(i) for i in xrange(256)]
2223 2223 if c.isalnum() or c in '._@$' or ord(c) > 127)
2224 2224
2225 2225 def _parsewith(spec, lookup=None, syminitletters=None):
2226 2226 """Generate a parse tree of given spec with given tokenizing options
2227 2227
2228 2228 >>> _parsewith('foo($1)', syminitletters=_aliassyminitletters)
2229 2229 ('func', ('symbol', 'foo'), ('symbol', '$1'))
2230 2230 >>> _parsewith('$1')
2231 2231 Traceback (most recent call last):
2232 2232 ...
2233 2233 ParseError: ("syntax error in revset '$1'", 0)
2234 2234 >>> _parsewith('foo bar')
2235 2235 Traceback (most recent call last):
2236 2236 ...
2237 2237 ParseError: ('invalid token', 4)
2238 2238 """
2239 2239 p = parser.parser(elements)
2240 2240 tree, pos = p.parse(tokenize(spec, lookup=lookup,
2241 2241 syminitletters=syminitletters))
2242 2242 if pos != len(spec):
2243 2243 raise error.ParseError(_('invalid token'), pos)
2244 2244 return parser.simplifyinfixops(tree, ('list', 'or'))
2245 2245
2246 2246 class _aliasrules(parser.basealiasrules):
2247 2247 """Parsing and expansion rule set of revset aliases"""
2248 2248 _section = _('revset alias')
2249 2249
2250 2250 @staticmethod
2251 2251 def _parse(spec):
2252 2252 """Parse alias declaration/definition ``spec``
2253 2253
2254 2254 This allows symbol names to use also ``$`` as an initial letter
2255 2255 (for backward compatibility), and callers of this function should
2256 2256 examine whether ``$`` is used also for unexpected symbols or not.
2257 2257 """
2258 2258 return _parsewith(spec, syminitletters=_aliassyminitletters)
2259 2259
2260 2260 @staticmethod
2261 2261 def _trygetfunc(tree):
2262 2262 if tree[0] == 'func' and tree[1][0] == 'symbol':
2263 2263 return tree[1][1], getlist(tree[2])
2264 2264
2265 2265 def expandaliases(ui, tree, showwarning=None):
2266 2266 aliases = _aliasrules.buildmap(ui.configitems('revsetalias'))
2267 2267 tree = _aliasrules.expand(aliases, tree)
2268 2268 if showwarning:
2269 2269 # warn about problematic (but not referred) aliases
2270 2270 for name, alias in sorted(aliases.iteritems()):
2271 2271 if alias.error and not alias.warned:
2272 2272 showwarning(_('warning: %s\n') % (alias.error))
2273 2273 alias.warned = True
2274 2274 return tree
2275 2275
2276 2276 def foldconcat(tree):
2277 2277 """Fold elements to be concatenated by `##`
2278 2278 """
2279 2279 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2280 2280 return tree
2281 2281 if tree[0] == '_concat':
2282 2282 pending = [tree]
2283 2283 l = []
2284 2284 while pending:
2285 2285 e = pending.pop()
2286 2286 if e[0] == '_concat':
2287 2287 pending.extend(reversed(e[1:]))
2288 2288 elif e[0] in ('string', 'symbol'):
2289 2289 l.append(e[1])
2290 2290 else:
2291 2291 msg = _("\"##\" can't concatenate \"%s\" element") % (e[0])
2292 2292 raise error.ParseError(msg)
2293 2293 return ('string', ''.join(l))
2294 2294 else:
2295 2295 return tuple(foldconcat(t) for t in tree)
2296 2296
2297 2297 def parse(spec, lookup=None):
2298 2298 return _parsewith(spec, lookup=lookup)
2299 2299
2300 2300 def posttreebuilthook(tree, repo):
2301 2301 # hook for extensions to execute code on the optimized tree
2302 2302 pass
2303 2303
2304 2304 def match(ui, spec, repo=None):
2305 2305 if not spec:
2306 2306 raise error.ParseError(_("empty query"))
2307 2307 lookup = None
2308 2308 if repo:
2309 2309 lookup = repo.__contains__
2310 2310 tree = parse(spec, lookup)
2311 2311 return _makematcher(ui, tree, repo)
2312 2312
2313 2313 def matchany(ui, specs, repo=None):
2314 2314 """Create a matcher that will include any revisions matching one of the
2315 2315 given specs"""
2316 2316 if not specs:
2317 2317 def mfunc(repo, subset=None):
2318 2318 return baseset()
2319 2319 return mfunc
2320 2320 if not all(specs):
2321 2321 raise error.ParseError(_("empty query"))
2322 2322 lookup = None
2323 2323 if repo:
2324 2324 lookup = repo.__contains__
2325 2325 if len(specs) == 1:
2326 2326 tree = parse(specs[0], lookup)
2327 2327 else:
2328 2328 tree = ('or',) + tuple(parse(s, lookup) for s in specs)
2329 2329 return _makematcher(ui, tree, repo)
2330 2330
2331 2331 def _makematcher(ui, tree, repo):
2332 2332 if ui:
2333 2333 tree = expandaliases(ui, tree, showwarning=ui.warn)
2334 2334 tree = foldconcat(tree)
2335 2335 tree = optimize(tree)
2336 2336 posttreebuilthook(tree, repo)
2337 2337 def mfunc(repo, subset=None):
2338 2338 if subset is None:
2339 2339 subset = fullreposet(repo)
2340 2340 if util.safehasattr(subset, 'isascending'):
2341 2341 result = getset(repo, subset, tree)
2342 2342 else:
2343 2343 result = getset(repo, baseset(subset), tree)
2344 2344 return result
2345 2345 return mfunc
2346 2346
2347 2347 def formatspec(expr, *args):
2348 2348 '''
2349 2349 This is a convenience function for using revsets internally, and
2350 2350 escapes arguments appropriately. Aliases are intentionally ignored
2351 2351 so that intended expression behavior isn't accidentally subverted.
2352 2352
2353 2353 Supported arguments:
2354 2354
2355 2355 %r = revset expression, parenthesized
2356 2356 %d = int(arg), no quoting
2357 2357 %s = string(arg), escaped and single-quoted
2358 2358 %b = arg.branch(), escaped and single-quoted
2359 2359 %n = hex(arg), single-quoted
2360 2360 %% = a literal '%'
2361 2361
2362 2362 Prefixing the type with 'l' specifies a parenthesized list of that type.
2363 2363
2364 2364 >>> formatspec('%r:: and %lr', '10 or 11', ("this()", "that()"))
2365 2365 '(10 or 11):: and ((this()) or (that()))'
2366 2366 >>> formatspec('%d:: and not %d::', 10, 20)
2367 2367 '10:: and not 20::'
2368 2368 >>> formatspec('%ld or %ld', [], [1])
2369 2369 "_list('') or 1"
2370 2370 >>> formatspec('keyword(%s)', 'foo\\xe9')
2371 2371 "keyword('foo\\\\xe9')"
2372 2372 >>> b = lambda: 'default'
2373 2373 >>> b.branch = b
2374 2374 >>> formatspec('branch(%b)', b)
2375 2375 "branch('default')"
2376 2376 >>> formatspec('root(%ls)', ['a', 'b', 'c', 'd'])
2377 2377 "root(_list('a\\x00b\\x00c\\x00d'))"
2378 2378 '''
2379 2379
2380 2380 def quote(s):
2381 2381 return repr(str(s))
2382 2382
2383 2383 def argtype(c, arg):
2384 2384 if c == 'd':
2385 2385 return str(int(arg))
2386 2386 elif c == 's':
2387 2387 return quote(arg)
2388 2388 elif c == 'r':
2389 2389 parse(arg) # make sure syntax errors are confined
2390 2390 return '(%s)' % arg
2391 2391 elif c == 'n':
2392 2392 return quote(node.hex(arg))
2393 2393 elif c == 'b':
2394 2394 return quote(arg.branch())
2395 2395
2396 2396 def listexp(s, t):
2397 2397 l = len(s)
2398 2398 if l == 0:
2399 2399 return "_list('')"
2400 2400 elif l == 1:
2401 2401 return argtype(t, s[0])
2402 2402 elif t == 'd':
2403 2403 return "_intlist('%s')" % "\0".join(str(int(a)) for a in s)
2404 2404 elif t == 's':
2405 2405 return "_list('%s')" % "\0".join(s)
2406 2406 elif t == 'n':
2407 2407 return "_hexlist('%s')" % "\0".join(node.hex(a) for a in s)
2408 2408 elif t == 'b':
2409 2409 return "_list('%s')" % "\0".join(a.branch() for a in s)
2410 2410
2411 2411 m = l // 2
2412 2412 return '(%s or %s)' % (listexp(s[:m], t), listexp(s[m:], t))
2413 2413
2414 2414 ret = ''
2415 2415 pos = 0
2416 2416 arg = 0
2417 2417 while pos < len(expr):
2418 2418 c = expr[pos]
2419 2419 if c == '%':
2420 2420 pos += 1
2421 2421 d = expr[pos]
2422 2422 if d == '%':
2423 2423 ret += d
2424 2424 elif d in 'dsnbr':
2425 2425 ret += argtype(d, args[arg])
2426 2426 arg += 1
2427 2427 elif d == 'l':
2428 2428 # a list of some type
2429 2429 pos += 1
2430 2430 d = expr[pos]
2431 2431 ret += listexp(list(args[arg]), d)
2432 2432 arg += 1
2433 2433 else:
2434 2434 raise error.Abort('unexpected revspec format character %s' % d)
2435 2435 else:
2436 2436 ret += c
2437 2437 pos += 1
2438 2438
2439 2439 return ret
2440 2440
2441 2441 def prettyformat(tree):
2442 2442 return parser.prettyformat(tree, ('string', 'symbol'))
2443 2443
2444 2444 def depth(tree):
2445 2445 if isinstance(tree, tuple):
2446 2446 return max(map(depth, tree)) + 1
2447 2447 else:
2448 2448 return 0
2449 2449
2450 2450 def funcsused(tree):
2451 2451 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2452 2452 return set()
2453 2453 else:
2454 2454 funcs = set()
2455 2455 for s in tree[1:]:
2456 2456 funcs |= funcsused(s)
2457 2457 if tree[0] == 'func':
2458 2458 funcs.add(tree[1][1])
2459 2459 return funcs
2460 2460
2461 2461 def _formatsetrepr(r):
2462 2462 """Format an optional printable representation of a set
2463 2463
2464 2464 ======== =================================
2465 2465 type(r) example
2466 2466 ======== =================================
2467 2467 tuple ('<not %r>', other)
2468 2468 str '<branch closed>'
2469 2469 callable lambda: '<branch %r>' % sorted(b)
2470 2470 object other
2471 2471 ======== =================================
2472 2472 """
2473 2473 if r is None:
2474 2474 return ''
2475 2475 elif isinstance(r, tuple):
2476 2476 return r[0] % r[1:]
2477 2477 elif isinstance(r, str):
2478 2478 return r
2479 2479 elif callable(r):
2480 2480 return r()
2481 2481 else:
2482 2482 return repr(r)
2483 2483
2484 2484 class abstractsmartset(object):
2485 2485
2486 2486 def __nonzero__(self):
2487 2487 """True if the smartset is not empty"""
2488 2488 raise NotImplementedError()
2489 2489
2490 2490 def __contains__(self, rev):
2491 2491 """provide fast membership testing"""
2492 2492 raise NotImplementedError()
2493 2493
2494 2494 def __iter__(self):
2495 2495 """iterate the set in the order it is supposed to be iterated"""
2496 2496 raise NotImplementedError()
2497 2497
2498 2498 # Attributes containing a function to perform a fast iteration in a given
2499 2499 # direction. A smartset can have none, one, or both defined.
2500 2500 #
2501 2501 # Default value is None instead of a function returning None to avoid
2502 2502 # initializing an iterator just for testing if a fast method exists.
2503 2503 fastasc = None
2504 2504 fastdesc = None
2505 2505
2506 2506 def isascending(self):
2507 2507 """True if the set will iterate in ascending order"""
2508 2508 raise NotImplementedError()
2509 2509
2510 2510 def isdescending(self):
2511 2511 """True if the set will iterate in descending order"""
2512 2512 raise NotImplementedError()
2513 2513
2514 2514 @util.cachefunc
2515 2515 def min(self):
2516 2516 """return the minimum element in the set"""
2517 2517 if self.fastasc is not None:
2518 2518 for r in self.fastasc():
2519 2519 return r
2520 2520 raise ValueError('arg is an empty sequence')
2521 2521 return min(self)
2522 2522
2523 2523 @util.cachefunc
2524 2524 def max(self):
2525 2525 """return the maximum element in the set"""
2526 2526 if self.fastdesc is not None:
2527 2527 for r in self.fastdesc():
2528 2528 return r
2529 2529 raise ValueError('arg is an empty sequence')
2530 2530 return max(self)
2531 2531
2532 2532 def first(self):
2533 2533 """return the first element in the set (user iteration perspective)
2534 2534
2535 2535 Return None if the set is empty"""
2536 2536 raise NotImplementedError()
2537 2537
2538 2538 def last(self):
2539 2539 """return the last element in the set (user iteration perspective)
2540 2540
2541 2541 Return None if the set is empty"""
2542 2542 raise NotImplementedError()
2543 2543
2544 2544 def __len__(self):
2545 2545 """return the length of the smartsets
2546 2546
2547 2547 This can be expensive on smartset that could be lazy otherwise."""
2548 2548 raise NotImplementedError()
2549 2549
2550 2550 def reverse(self):
2551 2551 """reverse the expected iteration order"""
2552 2552 raise NotImplementedError()
2553 2553
2554 2554 def sort(self, reverse=True):
2555 2555 """get the set to iterate in an ascending or descending order"""
2556 2556 raise NotImplementedError()
2557 2557
2558 2558 def __and__(self, other):
2559 2559 """Returns a new object with the intersection of the two collections.
2560 2560
2561 2561 This is part of the mandatory API for smartset."""
2562 2562 if isinstance(other, fullreposet):
2563 2563 return self
2564 2564 return self.filter(other.__contains__, condrepr=other, cache=False)
2565 2565
2566 2566 def __add__(self, other):
2567 2567 """Returns a new object with the union of the two collections.
2568 2568
2569 2569 This is part of the mandatory API for smartset."""
2570 2570 return addset(self, other)
2571 2571
2572 2572 def __sub__(self, other):
2573 2573 """Returns a new object with the substraction of the two collections.
2574 2574
2575 2575 This is part of the mandatory API for smartset."""
2576 2576 c = other.__contains__
2577 2577 return self.filter(lambda r: not c(r), condrepr=('<not %r>', other),
2578 2578 cache=False)
2579 2579
2580 2580 def filter(self, condition, condrepr=None, cache=True):
2581 2581 """Returns this smartset filtered by condition as a new smartset.
2582 2582
2583 2583 `condition` is a callable which takes a revision number and returns a
2584 2584 boolean. Optional `condrepr` provides a printable representation of
2585 2585 the given `condition`.
2586 2586
2587 2587 This is part of the mandatory API for smartset."""
2588 2588 # builtin cannot be cached. but do not needs to
2589 2589 if cache and util.safehasattr(condition, 'func_code'):
2590 2590 condition = util.cachefunc(condition)
2591 2591 return filteredset(self, condition, condrepr)
2592 2592
2593 2593 class baseset(abstractsmartset):
2594 2594 """Basic data structure that represents a revset and contains the basic
2595 2595 operation that it should be able to perform.
2596 2596
2597 2597 Every method in this class should be implemented by any smartset class.
2598 2598 """
2599 2599 def __init__(self, data=(), datarepr=None):
2600 2600 """
2601 2601 datarepr: a tuple of (format, obj, ...), a function or an object that
2602 2602 provides a printable representation of the given data.
2603 2603 """
2604 2604 self._ascending = None
2605 2605 if not isinstance(data, list):
2606 2606 if isinstance(data, set):
2607 2607 self._set = data
2608 2608 # set has no order we pick one for stability purpose
2609 2609 self._ascending = True
2610 2610 data = list(data)
2611 2611 self._list = data
2612 2612 self._datarepr = datarepr
2613 2613
2614 2614 @util.propertycache
2615 2615 def _set(self):
2616 2616 return set(self._list)
2617 2617
2618 2618 @util.propertycache
2619 2619 def _asclist(self):
2620 2620 asclist = self._list[:]
2621 2621 asclist.sort()
2622 2622 return asclist
2623 2623
2624 2624 def __iter__(self):
2625 2625 if self._ascending is None:
2626 2626 return iter(self._list)
2627 2627 elif self._ascending:
2628 2628 return iter(self._asclist)
2629 2629 else:
2630 2630 return reversed(self._asclist)
2631 2631
2632 2632 def fastasc(self):
2633 2633 return iter(self._asclist)
2634 2634
2635 2635 def fastdesc(self):
2636 2636 return reversed(self._asclist)
2637 2637
2638 2638 @util.propertycache
2639 2639 def __contains__(self):
2640 2640 return self._set.__contains__
2641 2641
2642 2642 def __nonzero__(self):
2643 2643 return bool(self._list)
2644 2644
2645 2645 def sort(self, reverse=False):
2646 2646 self._ascending = not bool(reverse)
2647 2647
2648 2648 def reverse(self):
2649 2649 if self._ascending is None:
2650 2650 self._list.reverse()
2651 2651 else:
2652 2652 self._ascending = not self._ascending
2653 2653
2654 2654 def __len__(self):
2655 2655 return len(self._list)
2656 2656
2657 2657 def isascending(self):
2658 2658 """Returns True if the collection is ascending order, False if not.
2659 2659
2660 2660 This is part of the mandatory API for smartset."""
2661 2661 if len(self) <= 1:
2662 2662 return True
2663 2663 return self._ascending is not None and self._ascending
2664 2664
2665 2665 def isdescending(self):
2666 2666 """Returns True if the collection is descending order, False if not.
2667 2667
2668 2668 This is part of the mandatory API for smartset."""
2669 2669 if len(self) <= 1:
2670 2670 return True
2671 2671 return self._ascending is not None and not self._ascending
2672 2672
2673 2673 def first(self):
2674 2674 if self:
2675 2675 if self._ascending is None:
2676 2676 return self._list[0]
2677 2677 elif self._ascending:
2678 2678 return self._asclist[0]
2679 2679 else:
2680 2680 return self._asclist[-1]
2681 2681 return None
2682 2682
2683 2683 def last(self):
2684 2684 if self:
2685 2685 if self._ascending is None:
2686 2686 return self._list[-1]
2687 2687 elif self._ascending:
2688 2688 return self._asclist[-1]
2689 2689 else:
2690 2690 return self._asclist[0]
2691 2691 return None
2692 2692
2693 2693 def __repr__(self):
2694 2694 d = {None: '', False: '-', True: '+'}[self._ascending]
2695 2695 s = _formatsetrepr(self._datarepr)
2696 2696 if not s:
2697 2697 l = self._list
2698 2698 # if _list has been built from a set, it might have a different
2699 2699 # order from one python implementation to another.
2700 2700 # We fallback to the sorted version for a stable output.
2701 2701 if self._ascending is not None:
2702 2702 l = self._asclist
2703 2703 s = repr(l)
2704 2704 return '<%s%s %s>' % (type(self).__name__, d, s)
2705 2705
2706 2706 class filteredset(abstractsmartset):
2707 2707 """Duck type for baseset class which iterates lazily over the revisions in
2708 2708 the subset and contains a function which tests for membership in the
2709 2709 revset
2710 2710 """
2711 2711 def __init__(self, subset, condition=lambda x: True, condrepr=None):
2712 2712 """
2713 2713 condition: a function that decide whether a revision in the subset
2714 2714 belongs to the revset or not.
2715 2715 condrepr: a tuple of (format, obj, ...), a function or an object that
2716 2716 provides a printable representation of the given condition.
2717 2717 """
2718 2718 self._subset = subset
2719 2719 self._condition = condition
2720 2720 self._condrepr = condrepr
2721 2721
2722 2722 def __contains__(self, x):
2723 2723 return x in self._subset and self._condition(x)
2724 2724
2725 2725 def __iter__(self):
2726 2726 return self._iterfilter(self._subset)
2727 2727
2728 2728 def _iterfilter(self, it):
2729 2729 cond = self._condition
2730 2730 for x in it:
2731 2731 if cond(x):
2732 2732 yield x
2733 2733
2734 2734 @property
2735 2735 def fastasc(self):
2736 2736 it = self._subset.fastasc
2737 2737 if it is None:
2738 2738 return None
2739 2739 return lambda: self._iterfilter(it())
2740 2740
2741 2741 @property
2742 2742 def fastdesc(self):
2743 2743 it = self._subset.fastdesc
2744 2744 if it is None:
2745 2745 return None
2746 2746 return lambda: self._iterfilter(it())
2747 2747
2748 2748 def __nonzero__(self):
2749 2749 fast = self.fastasc
2750 2750 if fast is None:
2751 2751 fast = self.fastdesc
2752 2752 if fast is not None:
2753 2753 it = fast()
2754 2754 else:
2755 2755 it = self
2756 2756
2757 2757 for r in it:
2758 2758 return True
2759 2759 return False
2760 2760
2761 2761 def __len__(self):
2762 2762 # Basic implementation to be changed in future patches.
2763 2763 # until this gets improved, we use generator expression
2764 2764 # here, since list compr is free to call __len__ again
2765 2765 # causing infinite recursion
2766 2766 l = baseset(r for r in self)
2767 2767 return len(l)
2768 2768
2769 2769 def sort(self, reverse=False):
2770 2770 self._subset.sort(reverse=reverse)
2771 2771
2772 2772 def reverse(self):
2773 2773 self._subset.reverse()
2774 2774
2775 2775 def isascending(self):
2776 2776 return self._subset.isascending()
2777 2777
2778 2778 def isdescending(self):
2779 2779 return self._subset.isdescending()
2780 2780
2781 2781 def first(self):
2782 2782 for x in self:
2783 2783 return x
2784 2784 return None
2785 2785
2786 2786 def last(self):
2787 2787 it = None
2788 2788 if self.isascending():
2789 2789 it = self.fastdesc
2790 2790 elif self.isdescending():
2791 2791 it = self.fastasc
2792 2792 if it is not None:
2793 2793 for x in it():
2794 2794 return x
2795 2795 return None #empty case
2796 2796 else:
2797 2797 x = None
2798 2798 for x in self:
2799 2799 pass
2800 2800 return x
2801 2801
2802 2802 def __repr__(self):
2803 2803 xs = [repr(self._subset)]
2804 2804 s = _formatsetrepr(self._condrepr)
2805 2805 if s:
2806 2806 xs.append(s)
2807 2807 return '<%s %s>' % (type(self).__name__, ', '.join(xs))
2808 2808
2809 2809 def _iterordered(ascending, iter1, iter2):
2810 2810 """produce an ordered iteration from two iterators with the same order
2811 2811
2812 2812 The ascending is used to indicated the iteration direction.
2813 2813 """
2814 2814 choice = max
2815 2815 if ascending:
2816 2816 choice = min
2817 2817
2818 2818 val1 = None
2819 2819 val2 = None
2820 2820 try:
2821 2821 # Consume both iterators in an ordered way until one is empty
2822 2822 while True:
2823 2823 if val1 is None:
2824 2824 val1 = iter1.next()
2825 2825 if val2 is None:
2826 2826 val2 = iter2.next()
2827 2827 next = choice(val1, val2)
2828 2828 yield next
2829 2829 if val1 == next:
2830 2830 val1 = None
2831 2831 if val2 == next:
2832 2832 val2 = None
2833 2833 except StopIteration:
2834 2834 # Flush any remaining values and consume the other one
2835 2835 it = iter2
2836 2836 if val1 is not None:
2837 2837 yield val1
2838 2838 it = iter1
2839 2839 elif val2 is not None:
2840 2840 # might have been equality and both are empty
2841 2841 yield val2
2842 2842 for val in it:
2843 2843 yield val
2844 2844
2845 2845 class addset(abstractsmartset):
2846 2846 """Represent the addition of two sets
2847 2847
2848 2848 Wrapper structure for lazily adding two structures without losing much
2849 2849 performance on the __contains__ method
2850 2850
2851 2851 If the ascending attribute is set, that means the two structures are
2852 2852 ordered in either an ascending or descending way. Therefore, we can add
2853 2853 them maintaining the order by iterating over both at the same time
2854 2854
2855 2855 >>> xs = baseset([0, 3, 2])
2856 2856 >>> ys = baseset([5, 2, 4])
2857 2857
2858 2858 >>> rs = addset(xs, ys)
2859 2859 >>> bool(rs), 0 in rs, 1 in rs, 5 in rs, rs.first(), rs.last()
2860 2860 (True, True, False, True, 0, 4)
2861 2861 >>> rs = addset(xs, baseset([]))
2862 2862 >>> bool(rs), 0 in rs, 1 in rs, rs.first(), rs.last()
2863 2863 (True, True, False, 0, 2)
2864 2864 >>> rs = addset(baseset([]), baseset([]))
2865 2865 >>> bool(rs), 0 in rs, rs.first(), rs.last()
2866 2866 (False, False, None, None)
2867 2867
2868 2868 iterate unsorted:
2869 2869 >>> rs = addset(xs, ys)
2870 2870 >>> # (use generator because pypy could call len())
2871 2871 >>> list(x for x in rs) # without _genlist
2872 2872 [0, 3, 2, 5, 4]
2873 2873 >>> assert not rs._genlist
2874 2874 >>> len(rs)
2875 2875 5
2876 2876 >>> [x for x in rs] # with _genlist
2877 2877 [0, 3, 2, 5, 4]
2878 2878 >>> assert rs._genlist
2879 2879
2880 2880 iterate ascending:
2881 2881 >>> rs = addset(xs, ys, ascending=True)
2882 2882 >>> # (use generator because pypy could call len())
2883 2883 >>> list(x for x in rs), list(x for x in rs.fastasc()) # without _asclist
2884 2884 ([0, 2, 3, 4, 5], [0, 2, 3, 4, 5])
2885 2885 >>> assert not rs._asclist
2886 2886 >>> len(rs)
2887 2887 5
2888 2888 >>> [x for x in rs], [x for x in rs.fastasc()]
2889 2889 ([0, 2, 3, 4, 5], [0, 2, 3, 4, 5])
2890 2890 >>> assert rs._asclist
2891 2891
2892 2892 iterate descending:
2893 2893 >>> rs = addset(xs, ys, ascending=False)
2894 2894 >>> # (use generator because pypy could call len())
2895 2895 >>> list(x for x in rs), list(x for x in rs.fastdesc()) # without _asclist
2896 2896 ([5, 4, 3, 2, 0], [5, 4, 3, 2, 0])
2897 2897 >>> assert not rs._asclist
2898 2898 >>> len(rs)
2899 2899 5
2900 2900 >>> [x for x in rs], [x for x in rs.fastdesc()]
2901 2901 ([5, 4, 3, 2, 0], [5, 4, 3, 2, 0])
2902 2902 >>> assert rs._asclist
2903 2903
2904 2904 iterate ascending without fastasc:
2905 2905 >>> rs = addset(xs, generatorset(ys), ascending=True)
2906 2906 >>> assert rs.fastasc is None
2907 2907 >>> [x for x in rs]
2908 2908 [0, 2, 3, 4, 5]
2909 2909
2910 2910 iterate descending without fastdesc:
2911 2911 >>> rs = addset(generatorset(xs), ys, ascending=False)
2912 2912 >>> assert rs.fastdesc is None
2913 2913 >>> [x for x in rs]
2914 2914 [5, 4, 3, 2, 0]
2915 2915 """
2916 2916 def __init__(self, revs1, revs2, ascending=None):
2917 2917 self._r1 = revs1
2918 2918 self._r2 = revs2
2919 2919 self._iter = None
2920 2920 self._ascending = ascending
2921 2921 self._genlist = None
2922 2922 self._asclist = None
2923 2923
2924 2924 def __len__(self):
2925 2925 return len(self._list)
2926 2926
2927 2927 def __nonzero__(self):
2928 2928 return bool(self._r1) or bool(self._r2)
2929 2929
2930 2930 @util.propertycache
2931 2931 def _list(self):
2932 2932 if not self._genlist:
2933 2933 self._genlist = baseset(iter(self))
2934 2934 return self._genlist
2935 2935
2936 2936 def __iter__(self):
2937 2937 """Iterate over both collections without repeating elements
2938 2938
2939 2939 If the ascending attribute is not set, iterate over the first one and
2940 2940 then over the second one checking for membership on the first one so we
2941 2941 dont yield any duplicates.
2942 2942
2943 2943 If the ascending attribute is set, iterate over both collections at the
2944 2944 same time, yielding only one value at a time in the given order.
2945 2945 """
2946 2946 if self._ascending is None:
2947 2947 if self._genlist:
2948 2948 return iter(self._genlist)
2949 2949 def arbitraryordergen():
2950 2950 for r in self._r1:
2951 2951 yield r
2952 2952 inr1 = self._r1.__contains__
2953 2953 for r in self._r2:
2954 2954 if not inr1(r):
2955 2955 yield r
2956 2956 return arbitraryordergen()
2957 2957 # try to use our own fast iterator if it exists
2958 2958 self._trysetasclist()
2959 2959 if self._ascending:
2960 2960 attr = 'fastasc'
2961 2961 else:
2962 2962 attr = 'fastdesc'
2963 2963 it = getattr(self, attr)
2964 2964 if it is not None:
2965 2965 return it()
2966 2966 # maybe half of the component supports fast
2967 2967 # get iterator for _r1
2968 2968 iter1 = getattr(self._r1, attr)
2969 2969 if iter1 is None:
2970 2970 # let's avoid side effect (not sure it matters)
2971 2971 iter1 = iter(sorted(self._r1, reverse=not self._ascending))
2972 2972 else:
2973 2973 iter1 = iter1()
2974 2974 # get iterator for _r2
2975 2975 iter2 = getattr(self._r2, attr)
2976 2976 if iter2 is None:
2977 2977 # let's avoid side effect (not sure it matters)
2978 2978 iter2 = iter(sorted(self._r2, reverse=not self._ascending))
2979 2979 else:
2980 2980 iter2 = iter2()
2981 2981 return _iterordered(self._ascending, iter1, iter2)
2982 2982
2983 2983 def _trysetasclist(self):
2984 2984 """populate the _asclist attribute if possible and necessary"""
2985 2985 if self._genlist is not None and self._asclist is None:
2986 2986 self._asclist = sorted(self._genlist)
2987 2987
2988 2988 @property
2989 2989 def fastasc(self):
2990 2990 self._trysetasclist()
2991 2991 if self._asclist is not None:
2992 2992 return self._asclist.__iter__
2993 2993 iter1 = self._r1.fastasc
2994 2994 iter2 = self._r2.fastasc
2995 2995 if None in (iter1, iter2):
2996 2996 return None
2997 2997 return lambda: _iterordered(True, iter1(), iter2())
2998 2998
2999 2999 @property
3000 3000 def fastdesc(self):
3001 3001 self._trysetasclist()
3002 3002 if self._asclist is not None:
3003 3003 return self._asclist.__reversed__
3004 3004 iter1 = self._r1.fastdesc
3005 3005 iter2 = self._r2.fastdesc
3006 3006 if None in (iter1, iter2):
3007 3007 return None
3008 3008 return lambda: _iterordered(False, iter1(), iter2())
3009 3009
3010 3010 def __contains__(self, x):
3011 3011 return x in self._r1 or x in self._r2
3012 3012
3013 3013 def sort(self, reverse=False):
3014 3014 """Sort the added set
3015 3015
3016 3016 For this we use the cached list with all the generated values and if we
3017 3017 know they are ascending or descending we can sort them in a smart way.
3018 3018 """
3019 3019 self._ascending = not reverse
3020 3020
3021 3021 def isascending(self):
3022 3022 return self._ascending is not None and self._ascending
3023 3023
3024 3024 def isdescending(self):
3025 3025 return self._ascending is not None and not self._ascending
3026 3026
3027 3027 def reverse(self):
3028 3028 if self._ascending is None:
3029 3029 self._list.reverse()
3030 3030 else:
3031 3031 self._ascending = not self._ascending
3032 3032
3033 3033 def first(self):
3034 3034 for x in self:
3035 3035 return x
3036 3036 return None
3037 3037
3038 3038 def last(self):
3039 3039 self.reverse()
3040 3040 val = self.first()
3041 3041 self.reverse()
3042 3042 return val
3043 3043
3044 3044 def __repr__(self):
3045 3045 d = {None: '', False: '-', True: '+'}[self._ascending]
3046 3046 return '<%s%s %r, %r>' % (type(self).__name__, d, self._r1, self._r2)
3047 3047
3048 3048 class generatorset(abstractsmartset):
3049 3049 """Wrap a generator for lazy iteration
3050 3050
3051 3051 Wrapper structure for generators that provides lazy membership and can
3052 3052 be iterated more than once.
3053 3053 When asked for membership it generates values until either it finds the
3054 3054 requested one or has gone through all the elements in the generator
3055 3055 """
3056 3056 def __init__(self, gen, iterasc=None):
3057 3057 """
3058 3058 gen: a generator producing the values for the generatorset.
3059 3059 """
3060 3060 self._gen = gen
3061 3061 self._asclist = None
3062 3062 self._cache = {}
3063 3063 self._genlist = []
3064 3064 self._finished = False
3065 3065 self._ascending = True
3066 3066 if iterasc is not None:
3067 3067 if iterasc:
3068 3068 self.fastasc = self._iterator
3069 3069 self.__contains__ = self._asccontains
3070 3070 else:
3071 3071 self.fastdesc = self._iterator
3072 3072 self.__contains__ = self._desccontains
3073 3073
3074 3074 def __nonzero__(self):
3075 3075 # Do not use 'for r in self' because it will enforce the iteration
3076 3076 # order (default ascending), possibly unrolling a whole descending
3077 3077 # iterator.
3078 3078 if self._genlist:
3079 3079 return True
3080 3080 for r in self._consumegen():
3081 3081 return True
3082 3082 return False
3083 3083
3084 3084 def __contains__(self, x):
3085 3085 if x in self._cache:
3086 3086 return self._cache[x]
3087 3087
3088 3088 # Use new values only, as existing values would be cached.
3089 3089 for l in self._consumegen():
3090 3090 if l == x:
3091 3091 return True
3092 3092
3093 3093 self._cache[x] = False
3094 3094 return False
3095 3095
3096 3096 def _asccontains(self, x):
3097 3097 """version of contains optimised for ascending generator"""
3098 3098 if x in self._cache:
3099 3099 return self._cache[x]
3100 3100
3101 3101 # Use new values only, as existing values would be cached.
3102 3102 for l in self._consumegen():
3103 3103 if l == x:
3104 3104 return True
3105 3105 if l > x:
3106 3106 break
3107 3107
3108 3108 self._cache[x] = False
3109 3109 return False
3110 3110
3111 3111 def _desccontains(self, x):
3112 3112 """version of contains optimised for descending generator"""
3113 3113 if x in self._cache:
3114 3114 return self._cache[x]
3115 3115
3116 3116 # Use new values only, as existing values would be cached.
3117 3117 for l in self._consumegen():
3118 3118 if l == x:
3119 3119 return True
3120 3120 if l < x:
3121 3121 break
3122 3122
3123 3123 self._cache[x] = False
3124 3124 return False
3125 3125
3126 3126 def __iter__(self):
3127 3127 if self._ascending:
3128 3128 it = self.fastasc
3129 3129 else:
3130 3130 it = self.fastdesc
3131 3131 if it is not None:
3132 3132 return it()
3133 3133 # we need to consume the iterator
3134 3134 for x in self._consumegen():
3135 3135 pass
3136 3136 # recall the same code
3137 3137 return iter(self)
3138 3138
3139 3139 def _iterator(self):
3140 3140 if self._finished:
3141 3141 return iter(self._genlist)
3142 3142
3143 3143 # We have to use this complex iteration strategy to allow multiple
3144 3144 # iterations at the same time. We need to be able to catch revision
3145 3145 # removed from _consumegen and added to genlist in another instance.
3146 3146 #
3147 3147 # Getting rid of it would provide an about 15% speed up on this
3148 3148 # iteration.
3149 3149 genlist = self._genlist
3150 3150 nextrev = self._consumegen().next
3151 3151 _len = len # cache global lookup
3152 3152 def gen():
3153 3153 i = 0
3154 3154 while True:
3155 3155 if i < _len(genlist):
3156 3156 yield genlist[i]
3157 3157 else:
3158 3158 yield nextrev()
3159 3159 i += 1
3160 3160 return gen()
3161 3161
3162 3162 def _consumegen(self):
3163 3163 cache = self._cache
3164 3164 genlist = self._genlist.append
3165 3165 for item in self._gen:
3166 3166 cache[item] = True
3167 3167 genlist(item)
3168 3168 yield item
3169 3169 if not self._finished:
3170 3170 self._finished = True
3171 3171 asc = self._genlist[:]
3172 3172 asc.sort()
3173 3173 self._asclist = asc
3174 3174 self.fastasc = asc.__iter__
3175 3175 self.fastdesc = asc.__reversed__
3176 3176
3177 3177 def __len__(self):
3178 3178 for x in self._consumegen():
3179 3179 pass
3180 3180 return len(self._genlist)
3181 3181
3182 3182 def sort(self, reverse=False):
3183 3183 self._ascending = not reverse
3184 3184
3185 3185 def reverse(self):
3186 3186 self._ascending = not self._ascending
3187 3187
3188 3188 def isascending(self):
3189 3189 return self._ascending
3190 3190
3191 3191 def isdescending(self):
3192 3192 return not self._ascending
3193 3193
3194 3194 def first(self):
3195 3195 if self._ascending:
3196 3196 it = self.fastasc
3197 3197 else:
3198 3198 it = self.fastdesc
3199 3199 if it is None:
3200 3200 # we need to consume all and try again
3201 3201 for x in self._consumegen():
3202 3202 pass
3203 3203 return self.first()
3204 3204 return next(it(), None)
3205 3205
3206 3206 def last(self):
3207 3207 if self._ascending:
3208 3208 it = self.fastdesc
3209 3209 else:
3210 3210 it = self.fastasc
3211 3211 if it is None:
3212 3212 # we need to consume all and try again
3213 3213 for x in self._consumegen():
3214 3214 pass
3215 3215 return self.first()
3216 3216 return next(it(), None)
3217 3217
3218 3218 def __repr__(self):
3219 3219 d = {False: '-', True: '+'}[self._ascending]
3220 3220 return '<%s%s>' % (type(self).__name__, d)
3221 3221
3222 3222 class spanset(abstractsmartset):
3223 3223 """Duck type for baseset class which represents a range of revisions and
3224 3224 can work lazily and without having all the range in memory
3225 3225
3226 3226 Note that spanset(x, y) behave almost like xrange(x, y) except for two
3227 3227 notable points:
3228 3228 - when x < y it will be automatically descending,
3229 3229 - revision filtered with this repoview will be skipped.
3230 3230
3231 3231 """
3232 3232 def __init__(self, repo, start=0, end=None):
3233 3233 """
3234 3234 start: first revision included the set
3235 3235 (default to 0)
3236 3236 end: first revision excluded (last+1)
3237 3237 (default to len(repo)
3238 3238
3239 3239 Spanset will be descending if `end` < `start`.
3240 3240 """
3241 3241 if end is None:
3242 3242 end = len(repo)
3243 3243 self._ascending = start <= end
3244 3244 if not self._ascending:
3245 3245 start, end = end + 1, start +1
3246 3246 self._start = start
3247 3247 self._end = end
3248 3248 self._hiddenrevs = repo.changelog.filteredrevs
3249 3249
3250 3250 def sort(self, reverse=False):
3251 3251 self._ascending = not reverse
3252 3252
3253 3253 def reverse(self):
3254 3254 self._ascending = not self._ascending
3255 3255
3256 3256 def _iterfilter(self, iterrange):
3257 3257 s = self._hiddenrevs
3258 3258 for r in iterrange:
3259 3259 if r not in s:
3260 3260 yield r
3261 3261
3262 3262 def __iter__(self):
3263 3263 if self._ascending:
3264 3264 return self.fastasc()
3265 3265 else:
3266 3266 return self.fastdesc()
3267 3267
3268 3268 def fastasc(self):
3269 3269 iterrange = xrange(self._start, self._end)
3270 3270 if self._hiddenrevs:
3271 3271 return self._iterfilter(iterrange)
3272 3272 return iter(iterrange)
3273 3273
3274 3274 def fastdesc(self):
3275 3275 iterrange = xrange(self._end - 1, self._start - 1, -1)
3276 3276 if self._hiddenrevs:
3277 3277 return self._iterfilter(iterrange)
3278 3278 return iter(iterrange)
3279 3279
3280 3280 def __contains__(self, rev):
3281 3281 hidden = self._hiddenrevs
3282 3282 return ((self._start <= rev < self._end)
3283 3283 and not (hidden and rev in hidden))
3284 3284
3285 3285 def __nonzero__(self):
3286 3286 for r in self:
3287 3287 return True
3288 3288 return False
3289 3289
3290 3290 def __len__(self):
3291 3291 if not self._hiddenrevs:
3292 3292 return abs(self._end - self._start)
3293 3293 else:
3294 3294 count = 0
3295 3295 start = self._start
3296 3296 end = self._end
3297 3297 for rev in self._hiddenrevs:
3298 3298 if (end < rev <= start) or (start <= rev < end):
3299 3299 count += 1
3300 3300 return abs(self._end - self._start) - count
3301 3301
3302 3302 def isascending(self):
3303 3303 return self._ascending
3304 3304
3305 3305 def isdescending(self):
3306 3306 return not self._ascending
3307 3307
3308 3308 def first(self):
3309 3309 if self._ascending:
3310 3310 it = self.fastasc
3311 3311 else:
3312 3312 it = self.fastdesc
3313 3313 for x in it():
3314 3314 return x
3315 3315 return None
3316 3316
3317 3317 def last(self):
3318 3318 if self._ascending:
3319 3319 it = self.fastdesc
3320 3320 else:
3321 3321 it = self.fastasc
3322 3322 for x in it():
3323 3323 return x
3324 3324 return None
3325 3325
3326 3326 def __repr__(self):
3327 3327 d = {False: '-', True: '+'}[self._ascending]
3328 3328 return '<%s%s %d:%d>' % (type(self).__name__, d,
3329 3329 self._start, self._end - 1)
3330 3330
3331 3331 class fullreposet(spanset):
3332 3332 """a set containing all revisions in the repo
3333 3333
3334 3334 This class exists to host special optimization and magic to handle virtual
3335 3335 revisions such as "null".
3336 3336 """
3337 3337
3338 3338 def __init__(self, repo):
3339 3339 super(fullreposet, self).__init__(repo)
3340 3340
3341 3341 def __and__(self, other):
3342 3342 """As self contains the whole repo, all of the other set should also be
3343 3343 in self. Therefore `self & other = other`.
3344 3344
3345 3345 This boldly assumes the other contains valid revs only.
3346 3346 """
3347 3347 # other not a smartset, make is so
3348 3348 if not util.safehasattr(other, 'isascending'):
3349 3349 # filter out hidden revision
3350 3350 # (this boldly assumes all smartset are pure)
3351 3351 #
3352 3352 # `other` was used with "&", let's assume this is a set like
3353 3353 # object.
3354 3354 other = baseset(other - self._hiddenrevs)
3355 3355
3356 3356 # XXX As fullreposet is also used as bootstrap, this is wrong.
3357 3357 #
3358 3358 # With a giveme312() revset returning [3,1,2], this makes
3359 3359 # 'hg log -r "giveme312()"' -> 1, 2, 3 (wrong)
3360 3360 # We cannot just drop it because other usage still need to sort it:
3361 3361 # 'hg log -r "all() and giveme312()"' -> 1, 2, 3 (right)
3362 3362 #
3363 3363 # There is also some faulty revset implementations that rely on it
3364 3364 # (eg: children as of its state in e8075329c5fb)
3365 3365 #
3366 3366 # When we fix the two points above we can move this into the if clause
3367 3367 other.sort(reverse=self.isdescending())
3368 3368 return other
3369 3369
3370 3370 def prettyformatset(revs):
3371 3371 lines = []
3372 3372 rs = repr(revs)
3373 3373 p = 0
3374 3374 while p < len(rs):
3375 3375 q = rs.find('<', p + 1)
3376 3376 if q < 0:
3377 3377 q = len(rs)
3378 3378 l = rs.count('<', 0, p) - rs.count('>', 0, p)
3379 3379 assert l >= 0
3380 3380 lines.append((l, rs[p:q].rstrip()))
3381 3381 p = q
3382 3382 return '\n'.join(' ' * l + s for l, s in lines)
3383 3383
3384 3384 def loadpredicate(ui, extname, registrarobj):
3385 3385 """Load revset predicates from specified registrarobj
3386 3386 """
3387 3387 for name, func in registrarobj._table.iteritems():
3388 3388 symbols[name] = func
3389 3389 if func._safe:
3390 3390 safesymbols.add(name)
3391 3391
3392 3392 # load built-in predicates explicitly to setup safesymbols
3393 3393 loadpredicate(None, None, predicate)
3394 3394
3395 3395 # tell hggettext to extract docstrings from these functions:
3396 3396 i18nfunctions = symbols.values()
@@ -1,168 +1,170 b''
1 1
2 2 $ cat << EOF > buggylocking.py
3 3 > """A small extension that tests our developer warnings
4 4 > """
5 5 >
6 6 > from mercurial import cmdutil, repair, revset
7 7 >
8 8 > cmdtable = {}
9 9 > command = cmdutil.command(cmdtable)
10 10 >
11 11 > @command('buggylocking', [], '')
12 12 > def buggylocking(ui, repo):
13 13 > tr = repo.transaction('buggy')
14 14 > # make sure we rollback the transaction as we don't want to rely on the__del__
15 15 > tr.release()
16 16 > lo = repo.lock()
17 17 > wl = repo.wlock()
18 18 > wl.release()
19 19 > lo.release()
20 20 >
21 21 > @command('properlocking', [], '')
22 22 > def properlocking(ui, repo):
23 23 > """check that reentrance is fine"""
24 24 > wl = repo.wlock()
25 25 > lo = repo.lock()
26 26 > tr = repo.transaction('proper')
27 27 > tr2 = repo.transaction('proper')
28 28 > lo2 = repo.lock()
29 29 > wl2 = repo.wlock()
30 30 > wl2.release()
31 31 > lo2.release()
32 32 > tr2.close()
33 33 > tr.close()
34 34 > lo.release()
35 35 > wl.release()
36 36 >
37 37 > @command('nowaitlocking', [], '')
38 38 > def nowaitlocking(ui, repo):
39 39 > lo = repo.lock()
40 40 > wl = repo.wlock(wait=False)
41 41 > wl.release()
42 42 > lo.release()
43 43 >
44 44 > @command('stripintr', [], '')
45 45 > def stripintr(ui, repo):
46 46 > lo = repo.lock()
47 47 > tr = repo.transaction('foobar')
48 48 > try:
49 49 > repair.strip(repo.ui, repo, [repo['.'].node()])
50 50 > finally:
51 51 > lo.release()
52 52 > @command('oldanddeprecated', [], '')
53 53 > def oldanddeprecated(ui, repo):
54 54 > """test deprecation warning API"""
55 55 > def foobar(ui):
56 56 > ui.deprecwarn('foorbar is deprecated, go shopping', '42.1337')
57 57 > foobar(ui)
58 58 >
59 59 > def oldstylerevset(repo, subset, x):
60 60 > return list(subset)
61 61 >
62 62 > revset.symbols['oldstyle'] = oldstylerevset
63 63 > EOF
64 64
65 65 $ cat << EOF >> $HGRCPATH
66 66 > [extensions]
67 67 > buggylocking=$TESTTMP/buggylocking.py
68 68 > mock=$TESTDIR/mockblackbox.py
69 69 > blackbox=
70 70 > [devel]
71 71 > all-warnings=1
72 72 > EOF
73 73
74 74 $ hg init lock-checker
75 75 $ cd lock-checker
76 76 $ hg buggylocking
77 77 devel-warn: transaction with no lock at: $TESTTMP/buggylocking.py:* (buggylocking) (glob)
78 78 devel-warn: "wlock" acquired after "lock" at: $TESTTMP/buggylocking.py:* (buggylocking) (glob)
79 79 $ cat << EOF >> $HGRCPATH
80 80 > [devel]
81 81 > all=0
82 82 > check-locks=1
83 83 > EOF
84 84 $ hg buggylocking
85 85 devel-warn: transaction with no lock at: $TESTTMP/buggylocking.py:* (buggylocking) (glob)
86 86 devel-warn: "wlock" acquired after "lock" at: $TESTTMP/buggylocking.py:* (buggylocking) (glob)
87 87 $ hg buggylocking --traceback
88 88 devel-warn: transaction with no lock at:
89 89 */hg:* in * (glob)
90 90 */mercurial/dispatch.py:* in run (glob)
91 91 */mercurial/dispatch.py:* in dispatch (glob)
92 92 */mercurial/dispatch.py:* in _runcatch (glob)
93 93 */mercurial/dispatch.py:* in _dispatch (glob)
94 94 */mercurial/dispatch.py:* in runcommand (glob)
95 95 */mercurial/dispatch.py:* in _runcommand (glob)
96 96 */mercurial/dispatch.py:* in checkargs (glob)
97 97 */mercurial/dispatch.py:* in <lambda> (glob)
98 98 */mercurial/util.py:* in check (glob)
99 99 $TESTTMP/buggylocking.py:* in buggylocking (glob)
100 100 devel-warn: "wlock" acquired after "lock" at:
101 101 */hg:* in * (glob)
102 102 */mercurial/dispatch.py:* in run (glob)
103 103 */mercurial/dispatch.py:* in dispatch (glob)
104 104 */mercurial/dispatch.py:* in _runcatch (glob)
105 105 */mercurial/dispatch.py:* in _dispatch (glob)
106 106 */mercurial/dispatch.py:* in runcommand (glob)
107 107 */mercurial/dispatch.py:* in _runcommand (glob)
108 108 */mercurial/dispatch.py:* in checkargs (glob)
109 109 */mercurial/dispatch.py:* in <lambda> (glob)
110 110 */mercurial/util.py:* in check (glob)
111 111 $TESTTMP/buggylocking.py:* in buggylocking (glob)
112 112 $ hg properlocking
113 113 $ hg nowaitlocking
114 114
115 115 $ echo a > a
116 116 $ hg add a
117 117 $ hg commit -m a
118 118 $ hg stripintr
119 119 saved backup bundle to $TESTTMP/lock-checker/.hg/strip-backup/*-backup.hg (glob)
120 120 abort: programming error: cannot strip from inside a transaction
121 121 (contact your extension maintainer)
122 122 [255]
123 123
124 124 $ hg log -r "oldstyle()" -T '{rev}\n'
125 devel-warn: revset "oldstyle" use list instead of smartset, (upgrade your code) at: */mercurial/revset.py:* (mfunc) (glob)
125 devel-warn: revset "oldstyle" use list instead of smartset
126 (compatibility will be dropped after Mercurial-3.9, update your code.) at: *mercurial/revset.py:* (mfunc) (glob)
126 127 0
127 128 $ hg oldanddeprecated
128 129 devel-warn: foorbar is deprecated, go shopping
129 130 (compatibility will be dropped after Mercurial-42.1337, update your code.) at: $TESTTMP/buggylocking.py:* (oldanddeprecated) (glob)
130 131
131 132 $ hg oldanddeprecated --traceback
132 133 devel-warn: foorbar is deprecated, go shopping
133 134 (compatibility will be dropped after Mercurial-42.1337, update your code.) at:
134 135 */hg:* in <module> (glob)
135 136 */mercurial/dispatch.py:* in run (glob)
136 137 */mercurial/dispatch.py:* in dispatch (glob)
137 138 */mercurial/dispatch.py:* in _runcatch (glob)
138 139 */mercurial/dispatch.py:* in _dispatch (glob)
139 140 */mercurial/dispatch.py:* in runcommand (glob)
140 141 */mercurial/dispatch.py:* in _runcommand (glob)
141 142 */mercurial/dispatch.py:* in checkargs (glob)
142 143 */mercurial/dispatch.py:* in <lambda> (glob)
143 144 */mercurial/util.py:* in check (glob)
144 145 $TESTTMP/buggylocking.py:* in oldanddeprecated (glob)
145 146 $ hg blackbox -l 9
146 1970/01/01 00:00:00 bob @cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b (5000)> devel-warn: revset "oldstyle" use list instead of smartset, (upgrade your code) at: */mercurial/revset.py:* (mfunc) (glob)
147 1970/01/01 00:00:00 bob @cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b (5000)> devel-warn: revset "oldstyle" use list instead of smartset
148 (compatibility will be dropped after Mercurial-3.9, update your code.) at: *mercurial/revset.py:* (mfunc) (glob)
147 149 1970/01/01 00:00:00 bob @cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b (5000)> log -r oldstyle() -T {rev}\n exited 0 after * seconds (glob)
148 150 1970/01/01 00:00:00 bob @cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b (5000)> oldanddeprecated
149 151 1970/01/01 00:00:00 bob @cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b (5000)> devel-warn: foorbar is deprecated, go shopping
150 152 (compatibility will be dropped after Mercurial-42.1337, update your code.) at: $TESTTMP/buggylocking.py:* (oldanddeprecated) (glob)
151 153 1970/01/01 00:00:00 bob @cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b (5000)> oldanddeprecated exited 0 after * seconds (glob)
152 154 1970/01/01 00:00:00 bob @cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b (5000)> oldanddeprecated --traceback
153 155 1970/01/01 00:00:00 bob @cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b (5000)> devel-warn: foorbar is deprecated, go shopping
154 156 (compatibility will be dropped after Mercurial-42.1337, update your code.) at:
155 157 */hg:* in <module> (glob)
156 158 */mercurial/dispatch.py:* in run (glob)
157 159 */mercurial/dispatch.py:* in dispatch (glob)
158 160 */mercurial/dispatch.py:* in _runcatch (glob)
159 161 */mercurial/dispatch.py:* in _dispatch (glob)
160 162 */mercurial/dispatch.py:* in runcommand (glob)
161 163 */mercurial/dispatch.py:* in _runcommand (glob)
162 164 */mercurial/dispatch.py:* in checkargs (glob)
163 165 */mercurial/dispatch.py:* in <lambda> (glob)
164 166 */mercurial/util.py:* in check (glob)
165 167 $TESTTMP/buggylocking.py:* in oldanddeprecated (glob)
166 168 1970/01/01 00:00:00 bob @cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b (5000)> oldanddeprecated --traceback exited 0 after * seconds (glob)
167 169 1970/01/01 00:00:00 bob @cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b (5000)> blackbox -l 9
168 170 $ cd ..
General Comments 0
You need to be logged in to leave comments. Login now