##// END OF EJS Templates
revset: prevent using outgoing() and remote() in hgweb session (BC)...
Yuya Nishihara -
r30850:41e31a6f stable
parent child Browse files
Show More
@@ -1,3916 +1,3916
1 1 # revset.py - revision set queries for mercurial
2 2 #
3 3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import heapq
11 11 import re
12 12 import string
13 13
14 14 from .i18n import _
15 15 from . import (
16 16 destutil,
17 17 encoding,
18 18 error,
19 19 hbisect,
20 20 match as matchmod,
21 21 node,
22 22 obsolete as obsmod,
23 23 parser,
24 24 pathutil,
25 25 phases,
26 26 pycompat,
27 27 registrar,
28 28 repoview,
29 29 util,
30 30 )
31 31
32 32 def _revancestors(repo, revs, followfirst):
33 33 """Like revlog.ancestors(), but supports followfirst."""
34 34 if followfirst:
35 35 cut = 1
36 36 else:
37 37 cut = None
38 38 cl = repo.changelog
39 39
40 40 def iterate():
41 41 revs.sort(reverse=True)
42 42 irevs = iter(revs)
43 43 h = []
44 44
45 45 inputrev = next(irevs, None)
46 46 if inputrev is not None:
47 47 heapq.heappush(h, -inputrev)
48 48
49 49 seen = set()
50 50 while h:
51 51 current = -heapq.heappop(h)
52 52 if current == inputrev:
53 53 inputrev = next(irevs, None)
54 54 if inputrev is not None:
55 55 heapq.heappush(h, -inputrev)
56 56 if current not in seen:
57 57 seen.add(current)
58 58 yield current
59 59 for parent in cl.parentrevs(current)[:cut]:
60 60 if parent != node.nullrev:
61 61 heapq.heappush(h, -parent)
62 62
63 63 return generatorset(iterate(), iterasc=False)
64 64
65 65 def _revdescendants(repo, revs, followfirst):
66 66 """Like revlog.descendants() but supports followfirst."""
67 67 if followfirst:
68 68 cut = 1
69 69 else:
70 70 cut = None
71 71
72 72 def iterate():
73 73 cl = repo.changelog
74 74 # XXX this should be 'parentset.min()' assuming 'parentset' is a
75 75 # smartset (and if it is not, it should.)
76 76 first = min(revs)
77 77 nullrev = node.nullrev
78 78 if first == nullrev:
79 79 # Are there nodes with a null first parent and a non-null
80 80 # second one? Maybe. Do we care? Probably not.
81 81 for i in cl:
82 82 yield i
83 83 else:
84 84 seen = set(revs)
85 85 for i in cl.revs(first + 1):
86 86 for x in cl.parentrevs(i)[:cut]:
87 87 if x != nullrev and x in seen:
88 88 seen.add(i)
89 89 yield i
90 90 break
91 91
92 92 return generatorset(iterate(), iterasc=True)
93 93
94 94 def _reachablerootspure(repo, minroot, roots, heads, includepath):
95 95 """return (heads(::<roots> and ::<heads>))
96 96
97 97 If includepath is True, return (<roots>::<heads>)."""
98 98 if not roots:
99 99 return []
100 100 parentrevs = repo.changelog.parentrevs
101 101 roots = set(roots)
102 102 visit = list(heads)
103 103 reachable = set()
104 104 seen = {}
105 105 # prefetch all the things! (because python is slow)
106 106 reached = reachable.add
107 107 dovisit = visit.append
108 108 nextvisit = visit.pop
109 109 # open-code the post-order traversal due to the tiny size of
110 110 # sys.getrecursionlimit()
111 111 while visit:
112 112 rev = nextvisit()
113 113 if rev in roots:
114 114 reached(rev)
115 115 if not includepath:
116 116 continue
117 117 parents = parentrevs(rev)
118 118 seen[rev] = parents
119 119 for parent in parents:
120 120 if parent >= minroot and parent not in seen:
121 121 dovisit(parent)
122 122 if not reachable:
123 123 return baseset()
124 124 if not includepath:
125 125 return reachable
126 126 for rev in sorted(seen):
127 127 for parent in seen[rev]:
128 128 if parent in reachable:
129 129 reached(rev)
130 130 return reachable
131 131
132 132 def reachableroots(repo, roots, heads, includepath=False):
133 133 """return (heads(::<roots> and ::<heads>))
134 134
135 135 If includepath is True, return (<roots>::<heads>)."""
136 136 if not roots:
137 137 return baseset()
138 138 minroot = roots.min()
139 139 roots = list(roots)
140 140 heads = list(heads)
141 141 try:
142 142 revs = repo.changelog.reachableroots(minroot, heads, roots, includepath)
143 143 except AttributeError:
144 144 revs = _reachablerootspure(repo, minroot, roots, heads, includepath)
145 145 revs = baseset(revs)
146 146 revs.sort()
147 147 return revs
148 148
149 149 elements = {
150 150 # token-type: binding-strength, primary, prefix, infix, suffix
151 151 "(": (21, None, ("group", 1, ")"), ("func", 1, ")"), None),
152 152 "##": (20, None, None, ("_concat", 20), None),
153 153 "~": (18, None, None, ("ancestor", 18), None),
154 154 "^": (18, None, None, ("parent", 18), "parentpost"),
155 155 "-": (5, None, ("negate", 19), ("minus", 5), None),
156 156 "::": (17, None, ("dagrangepre", 17), ("dagrange", 17), "dagrangepost"),
157 157 "..": (17, None, ("dagrangepre", 17), ("dagrange", 17), "dagrangepost"),
158 158 ":": (15, "rangeall", ("rangepre", 15), ("range", 15), "rangepost"),
159 159 "not": (10, None, ("not", 10), None, None),
160 160 "!": (10, None, ("not", 10), None, None),
161 161 "and": (5, None, None, ("and", 5), None),
162 162 "&": (5, None, None, ("and", 5), None),
163 163 "%": (5, None, None, ("only", 5), "onlypost"),
164 164 "or": (4, None, None, ("or", 4), None),
165 165 "|": (4, None, None, ("or", 4), None),
166 166 "+": (4, None, None, ("or", 4), None),
167 167 "=": (3, None, None, ("keyvalue", 3), None),
168 168 ",": (2, None, None, ("list", 2), None),
169 169 ")": (0, None, None, None, None),
170 170 "symbol": (0, "symbol", None, None, None),
171 171 "string": (0, "string", None, None, None),
172 172 "end": (0, None, None, None, None),
173 173 }
174 174
175 175 keywords = set(['and', 'or', 'not'])
176 176
177 177 # default set of valid characters for the initial letter of symbols
178 178 _syminitletters = set(
179 179 string.ascii_letters +
180 180 string.digits + pycompat.sysstr('._@')) | set(map(chr, xrange(128, 256)))
181 181
182 182 # default set of valid characters for non-initial letters of symbols
183 183 _symletters = _syminitletters | set(pycompat.sysstr('-/'))
184 184
185 185 def tokenize(program, lookup=None, syminitletters=None, symletters=None):
186 186 '''
187 187 Parse a revset statement into a stream of tokens
188 188
189 189 ``syminitletters`` is the set of valid characters for the initial
190 190 letter of symbols.
191 191
192 192 By default, character ``c`` is recognized as valid for initial
193 193 letter of symbols, if ``c.isalnum() or c in '._@' or ord(c) > 127``.
194 194
195 195 ``symletters`` is the set of valid characters for non-initial
196 196 letters of symbols.
197 197
198 198 By default, character ``c`` is recognized as valid for non-initial
199 199 letters of symbols, if ``c.isalnum() or c in '-._/@' or ord(c) > 127``.
200 200
201 201 Check that @ is a valid unquoted token character (issue3686):
202 202 >>> list(tokenize("@::"))
203 203 [('symbol', '@', 0), ('::', None, 1), ('end', None, 3)]
204 204
205 205 '''
206 206 if syminitletters is None:
207 207 syminitletters = _syminitletters
208 208 if symletters is None:
209 209 symletters = _symletters
210 210
211 211 if program and lookup:
212 212 # attempt to parse old-style ranges first to deal with
213 213 # things like old-tag which contain query metacharacters
214 214 parts = program.split(':', 1)
215 215 if all(lookup(sym) for sym in parts if sym):
216 216 if parts[0]:
217 217 yield ('symbol', parts[0], 0)
218 218 if len(parts) > 1:
219 219 s = len(parts[0])
220 220 yield (':', None, s)
221 221 if parts[1]:
222 222 yield ('symbol', parts[1], s + 1)
223 223 yield ('end', None, len(program))
224 224 return
225 225
226 226 pos, l = 0, len(program)
227 227 while pos < l:
228 228 c = program[pos]
229 229 if c.isspace(): # skip inter-token whitespace
230 230 pass
231 231 elif c == ':' and program[pos:pos + 2] == '::': # look ahead carefully
232 232 yield ('::', None, pos)
233 233 pos += 1 # skip ahead
234 234 elif c == '.' and program[pos:pos + 2] == '..': # look ahead carefully
235 235 yield ('..', None, pos)
236 236 pos += 1 # skip ahead
237 237 elif c == '#' and program[pos:pos + 2] == '##': # look ahead carefully
238 238 yield ('##', None, pos)
239 239 pos += 1 # skip ahead
240 240 elif c in "():=,-|&+!~^%": # handle simple operators
241 241 yield (c, None, pos)
242 242 elif (c in '"\'' or c == 'r' and
243 243 program[pos:pos + 2] in ("r'", 'r"')): # handle quoted strings
244 244 if c == 'r':
245 245 pos += 1
246 246 c = program[pos]
247 247 decode = lambda x: x
248 248 else:
249 249 decode = parser.unescapestr
250 250 pos += 1
251 251 s = pos
252 252 while pos < l: # find closing quote
253 253 d = program[pos]
254 254 if d == '\\': # skip over escaped characters
255 255 pos += 2
256 256 continue
257 257 if d == c:
258 258 yield ('string', decode(program[s:pos]), s)
259 259 break
260 260 pos += 1
261 261 else:
262 262 raise error.ParseError(_("unterminated string"), s)
263 263 # gather up a symbol/keyword
264 264 elif c in syminitletters:
265 265 s = pos
266 266 pos += 1
267 267 while pos < l: # find end of symbol
268 268 d = program[pos]
269 269 if d not in symletters:
270 270 break
271 271 if d == '.' and program[pos - 1] == '.': # special case for ..
272 272 pos -= 1
273 273 break
274 274 pos += 1
275 275 sym = program[s:pos]
276 276 if sym in keywords: # operator keywords
277 277 yield (sym, None, s)
278 278 elif '-' in sym:
279 279 # some jerk gave us foo-bar-baz, try to check if it's a symbol
280 280 if lookup and lookup(sym):
281 281 # looks like a real symbol
282 282 yield ('symbol', sym, s)
283 283 else:
284 284 # looks like an expression
285 285 parts = sym.split('-')
286 286 for p in parts[:-1]:
287 287 if p: # possible consecutive -
288 288 yield ('symbol', p, s)
289 289 s += len(p)
290 290 yield ('-', None, pos)
291 291 s += 1
292 292 if parts[-1]: # possible trailing -
293 293 yield ('symbol', parts[-1], s)
294 294 else:
295 295 yield ('symbol', sym, s)
296 296 pos -= 1
297 297 else:
298 298 raise error.ParseError(_("syntax error in revset '%s'") %
299 299 program, pos)
300 300 pos += 1
301 301 yield ('end', None, pos)
302 302
303 303 # helpers
304 304
305 305 _notset = object()
306 306
307 307 def getsymbol(x):
308 308 if x and x[0] == 'symbol':
309 309 return x[1]
310 310 raise error.ParseError(_('not a symbol'))
311 311
312 312 def getstring(x, err):
313 313 if x and (x[0] == 'string' or x[0] == 'symbol'):
314 314 return x[1]
315 315 raise error.ParseError(err)
316 316
317 317 def getinteger(x, err, default=_notset):
318 318 if not x and default is not _notset:
319 319 return default
320 320 try:
321 321 return int(getstring(x, err))
322 322 except ValueError:
323 323 raise error.ParseError(err)
324 324
325 325 def getlist(x):
326 326 if not x:
327 327 return []
328 328 if x[0] == 'list':
329 329 return list(x[1:])
330 330 return [x]
331 331
332 332 def getrange(x, err):
333 333 if not x:
334 334 raise error.ParseError(err)
335 335 op = x[0]
336 336 if op == 'range':
337 337 return x[1], x[2]
338 338 elif op == 'rangepre':
339 339 return None, x[1]
340 340 elif op == 'rangepost':
341 341 return x[1], None
342 342 elif op == 'rangeall':
343 343 return None, None
344 344 raise error.ParseError(err)
345 345
346 346 def getargs(x, min, max, err):
347 347 l = getlist(x)
348 348 if len(l) < min or (max >= 0 and len(l) > max):
349 349 raise error.ParseError(err)
350 350 return l
351 351
352 352 def getargsdict(x, funcname, keys):
353 353 return parser.buildargsdict(getlist(x), funcname, parser.splitargspec(keys),
354 354 keyvaluenode='keyvalue', keynode='symbol')
355 355
356 356 def getset(repo, subset, x):
357 357 if not x:
358 358 raise error.ParseError(_("missing argument"))
359 359 s = methods[x[0]](repo, subset, *x[1:])
360 360 if util.safehasattr(s, 'isascending'):
361 361 return s
362 362 # else case should not happen, because all non-func are internal,
363 363 # ignoring for now.
364 364 if x[0] == 'func' and x[1][0] == 'symbol' and x[1][1] in symbols:
365 365 repo.ui.deprecwarn('revset "%s" uses list instead of smartset'
366 366 % x[1][1],
367 367 '3.9')
368 368 return baseset(s)
369 369
370 370 def _getrevsource(repo, r):
371 371 extra = repo[r].extra()
372 372 for label in ('source', 'transplant_source', 'rebase_source'):
373 373 if label in extra:
374 374 try:
375 375 return repo[extra[label]].rev()
376 376 except error.RepoLookupError:
377 377 pass
378 378 return None
379 379
380 380 # operator methods
381 381
382 382 def stringset(repo, subset, x):
383 383 x = repo[x].rev()
384 384 if (x in subset
385 385 or x == node.nullrev and isinstance(subset, fullreposet)):
386 386 return baseset([x])
387 387 return baseset()
388 388
389 389 def rangeset(repo, subset, x, y, order):
390 390 m = getset(repo, fullreposet(repo), x)
391 391 n = getset(repo, fullreposet(repo), y)
392 392
393 393 if not m or not n:
394 394 return baseset()
395 395 return _makerangeset(repo, subset, m.first(), n.last(), order)
396 396
397 397 def rangeall(repo, subset, x, order):
398 398 assert x is None
399 399 return _makerangeset(repo, subset, 0, len(repo) - 1, order)
400 400
401 401 def rangepre(repo, subset, y, order):
402 402 # ':y' can't be rewritten to '0:y' since '0' may be hidden
403 403 n = getset(repo, fullreposet(repo), y)
404 404 if not n:
405 405 return baseset()
406 406 return _makerangeset(repo, subset, 0, n.last(), order)
407 407
408 408 def rangepost(repo, subset, x, order):
409 409 m = getset(repo, fullreposet(repo), x)
410 410 if not m:
411 411 return baseset()
412 412 return _makerangeset(repo, subset, m.first(), len(repo) - 1, order)
413 413
414 414 def _makerangeset(repo, subset, m, n, order):
415 415 if m == n:
416 416 r = baseset([m])
417 417 elif n == node.wdirrev:
418 418 r = spanset(repo, m, len(repo)) + baseset([n])
419 419 elif m == node.wdirrev:
420 420 r = baseset([m]) + spanset(repo, len(repo) - 1, n - 1)
421 421 elif m < n:
422 422 r = spanset(repo, m, n + 1)
423 423 else:
424 424 r = spanset(repo, m, n - 1)
425 425
426 426 if order == defineorder:
427 427 return r & subset
428 428 else:
429 429 # carrying the sorting over when possible would be more efficient
430 430 return subset & r
431 431
432 432 def dagrange(repo, subset, x, y, order):
433 433 r = fullreposet(repo)
434 434 xs = reachableroots(repo, getset(repo, r, x), getset(repo, r, y),
435 435 includepath=True)
436 436 return subset & xs
437 437
438 438 def andset(repo, subset, x, y, order):
439 439 return getset(repo, getset(repo, subset, x), y)
440 440
441 441 def differenceset(repo, subset, x, y, order):
442 442 return getset(repo, subset, x) - getset(repo, subset, y)
443 443
444 444 def _orsetlist(repo, subset, xs):
445 445 assert xs
446 446 if len(xs) == 1:
447 447 return getset(repo, subset, xs[0])
448 448 p = len(xs) // 2
449 449 a = _orsetlist(repo, subset, xs[:p])
450 450 b = _orsetlist(repo, subset, xs[p:])
451 451 return a + b
452 452
453 453 def orset(repo, subset, x, order):
454 454 xs = getlist(x)
455 455 if order == followorder:
456 456 # slow path to take the subset order
457 457 return subset & _orsetlist(repo, fullreposet(repo), xs)
458 458 else:
459 459 return _orsetlist(repo, subset, xs)
460 460
461 461 def notset(repo, subset, x, order):
462 462 return subset - getset(repo, subset, x)
463 463
464 464 def listset(repo, subset, *xs):
465 465 raise error.ParseError(_("can't use a list in this context"),
466 466 hint=_('see hg help "revsets.x or y"'))
467 467
468 468 def keyvaluepair(repo, subset, k, v):
469 469 raise error.ParseError(_("can't use a key-value pair in this context"))
470 470
471 471 def func(repo, subset, a, b, order):
472 472 f = getsymbol(a)
473 473 if f in symbols:
474 474 func = symbols[f]
475 475 if getattr(func, '_takeorder', False):
476 476 return func(repo, subset, b, order)
477 477 return func(repo, subset, b)
478 478
479 479 keep = lambda fn: getattr(fn, '__doc__', None) is not None
480 480
481 481 syms = [s for (s, fn) in symbols.items() if keep(fn)]
482 482 raise error.UnknownIdentifier(f, syms)
483 483
484 484 # functions
485 485
486 486 # symbols are callables like:
487 487 # fn(repo, subset, x)
488 488 # with:
489 489 # repo - current repository instance
490 490 # subset - of revisions to be examined
491 491 # x - argument in tree form
492 492 symbols = {}
493 493
494 494 # symbols which can't be used for a DoS attack for any given input
495 495 # (e.g. those which accept regexes as plain strings shouldn't be included)
496 496 # functions that just return a lot of changesets (like all) don't count here
497 497 safesymbols = set()
498 498
499 499 predicate = registrar.revsetpredicate()
500 500
501 501 @predicate('_destupdate')
502 502 def _destupdate(repo, subset, x):
503 503 # experimental revset for update destination
504 504 args = getargsdict(x, 'limit', 'clean check')
505 505 return subset & baseset([destutil.destupdate(repo, **args)[0]])
506 506
507 507 @predicate('_destmerge')
508 508 def _destmerge(repo, subset, x):
509 509 # experimental revset for merge destination
510 510 sourceset = None
511 511 if x is not None:
512 512 sourceset = getset(repo, fullreposet(repo), x)
513 513 return subset & baseset([destutil.destmerge(repo, sourceset=sourceset)])
514 514
515 515 @predicate('adds(pattern)', safe=True)
516 516 def adds(repo, subset, x):
517 517 """Changesets that add a file matching pattern.
518 518
519 519 The pattern without explicit kind like ``glob:`` is expected to be
520 520 relative to the current directory and match against a file or a
521 521 directory.
522 522 """
523 523 # i18n: "adds" is a keyword
524 524 pat = getstring(x, _("adds requires a pattern"))
525 525 return checkstatus(repo, subset, pat, 1)
526 526
527 527 @predicate('ancestor(*changeset)', safe=True)
528 528 def ancestor(repo, subset, x):
529 529 """A greatest common ancestor of the changesets.
530 530
531 531 Accepts 0 or more changesets.
532 532 Will return empty list when passed no args.
533 533 Greatest common ancestor of a single changeset is that changeset.
534 534 """
535 535 # i18n: "ancestor" is a keyword
536 536 l = getlist(x)
537 537 rl = fullreposet(repo)
538 538 anc = None
539 539
540 540 # (getset(repo, rl, i) for i in l) generates a list of lists
541 541 for revs in (getset(repo, rl, i) for i in l):
542 542 for r in revs:
543 543 if anc is None:
544 544 anc = repo[r]
545 545 else:
546 546 anc = anc.ancestor(repo[r])
547 547
548 548 if anc is not None and anc.rev() in subset:
549 549 return baseset([anc.rev()])
550 550 return baseset()
551 551
552 552 def _ancestors(repo, subset, x, followfirst=False):
553 553 heads = getset(repo, fullreposet(repo), x)
554 554 if not heads:
555 555 return baseset()
556 556 s = _revancestors(repo, heads, followfirst)
557 557 return subset & s
558 558
559 559 @predicate('ancestors(set)', safe=True)
560 560 def ancestors(repo, subset, x):
561 561 """Changesets that are ancestors of a changeset in set.
562 562 """
563 563 return _ancestors(repo, subset, x)
564 564
565 565 @predicate('_firstancestors', safe=True)
566 566 def _firstancestors(repo, subset, x):
567 567 # ``_firstancestors(set)``
568 568 # Like ``ancestors(set)`` but follows only the first parents.
569 569 return _ancestors(repo, subset, x, followfirst=True)
570 570
571 571 def ancestorspec(repo, subset, x, n, order):
572 572 """``set~n``
573 573 Changesets that are the Nth ancestor (first parents only) of a changeset
574 574 in set.
575 575 """
576 576 n = getinteger(n, _("~ expects a number"))
577 577 ps = set()
578 578 cl = repo.changelog
579 579 for r in getset(repo, fullreposet(repo), x):
580 580 for i in range(n):
581 581 r = cl.parentrevs(r)[0]
582 582 ps.add(r)
583 583 return subset & ps
584 584
585 585 @predicate('author(string)', safe=True)
586 586 def author(repo, subset, x):
587 587 """Alias for ``user(string)``.
588 588 """
589 589 # i18n: "author" is a keyword
590 590 n = getstring(x, _("author requires a string"))
591 591 kind, pattern, matcher = _substringmatcher(n, casesensitive=False)
592 592 return subset.filter(lambda x: matcher(repo[x].user()),
593 593 condrepr=('<user %r>', n))
594 594
595 595 @predicate('bisect(string)', safe=True)
596 596 def bisect(repo, subset, x):
597 597 """Changesets marked in the specified bisect status:
598 598
599 599 - ``good``, ``bad``, ``skip``: csets explicitly marked as good/bad/skip
600 600 - ``goods``, ``bads`` : csets topologically good/bad
601 601 - ``range`` : csets taking part in the bisection
602 602 - ``pruned`` : csets that are goods, bads or skipped
603 603 - ``untested`` : csets whose fate is yet unknown
604 604 - ``ignored`` : csets ignored due to DAG topology
605 605 - ``current`` : the cset currently being bisected
606 606 """
607 607 # i18n: "bisect" is a keyword
608 608 status = getstring(x, _("bisect requires a string")).lower()
609 609 state = set(hbisect.get(repo, status))
610 610 return subset & state
611 611
612 612 # Backward-compatibility
613 613 # - no help entry so that we do not advertise it any more
614 614 @predicate('bisected', safe=True)
615 615 def bisected(repo, subset, x):
616 616 return bisect(repo, subset, x)
617 617
618 618 @predicate('bookmark([name])', safe=True)
619 619 def bookmark(repo, subset, x):
620 620 """The named bookmark or all bookmarks.
621 621
622 622 Pattern matching is supported for `name`. See :hg:`help revisions.patterns`.
623 623 """
624 624 # i18n: "bookmark" is a keyword
625 625 args = getargs(x, 0, 1, _('bookmark takes one or no arguments'))
626 626 if args:
627 627 bm = getstring(args[0],
628 628 # i18n: "bookmark" is a keyword
629 629 _('the argument to bookmark must be a string'))
630 630 kind, pattern, matcher = util.stringmatcher(bm)
631 631 bms = set()
632 632 if kind == 'literal':
633 633 bmrev = repo._bookmarks.get(pattern, None)
634 634 if not bmrev:
635 635 raise error.RepoLookupError(_("bookmark '%s' does not exist")
636 636 % pattern)
637 637 bms.add(repo[bmrev].rev())
638 638 else:
639 639 matchrevs = set()
640 640 for name, bmrev in repo._bookmarks.iteritems():
641 641 if matcher(name):
642 642 matchrevs.add(bmrev)
643 643 if not matchrevs:
644 644 raise error.RepoLookupError(_("no bookmarks exist"
645 645 " that match '%s'") % pattern)
646 646 for bmrev in matchrevs:
647 647 bms.add(repo[bmrev].rev())
648 648 else:
649 649 bms = set([repo[r].rev()
650 650 for r in repo._bookmarks.values()])
651 651 bms -= set([node.nullrev])
652 652 return subset & bms
653 653
654 654 @predicate('branch(string or set)', safe=True)
655 655 def branch(repo, subset, x):
656 656 """
657 657 All changesets belonging to the given branch or the branches of the given
658 658 changesets.
659 659
660 660 Pattern matching is supported for `string`. See
661 661 :hg:`help revisions.patterns`.
662 662 """
663 663 getbi = repo.revbranchcache().branchinfo
664 664
665 665 try:
666 666 b = getstring(x, '')
667 667 except error.ParseError:
668 668 # not a string, but another revspec, e.g. tip()
669 669 pass
670 670 else:
671 671 kind, pattern, matcher = util.stringmatcher(b)
672 672 if kind == 'literal':
673 673 # note: falls through to the revspec case if no branch with
674 674 # this name exists and pattern kind is not specified explicitly
675 675 if pattern in repo.branchmap():
676 676 return subset.filter(lambda r: matcher(getbi(r)[0]),
677 677 condrepr=('<branch %r>', b))
678 678 if b.startswith('literal:'):
679 679 raise error.RepoLookupError(_("branch '%s' does not exist")
680 680 % pattern)
681 681 else:
682 682 return subset.filter(lambda r: matcher(getbi(r)[0]),
683 683 condrepr=('<branch %r>', b))
684 684
685 685 s = getset(repo, fullreposet(repo), x)
686 686 b = set()
687 687 for r in s:
688 688 b.add(getbi(r)[0])
689 689 c = s.__contains__
690 690 return subset.filter(lambda r: c(r) or getbi(r)[0] in b,
691 691 condrepr=lambda: '<branch %r>' % sorted(b))
692 692
693 693 @predicate('bumped()', safe=True)
694 694 def bumped(repo, subset, x):
695 695 """Mutable changesets marked as successors of public changesets.
696 696
697 697 Only non-public and non-obsolete changesets can be `bumped`.
698 698 """
699 699 # i18n: "bumped" is a keyword
700 700 getargs(x, 0, 0, _("bumped takes no arguments"))
701 701 bumped = obsmod.getrevs(repo, 'bumped')
702 702 return subset & bumped
703 703
704 704 @predicate('bundle()', safe=True)
705 705 def bundle(repo, subset, x):
706 706 """Changesets in the bundle.
707 707
708 708 Bundle must be specified by the -R option."""
709 709
710 710 try:
711 711 bundlerevs = repo.changelog.bundlerevs
712 712 except AttributeError:
713 713 raise error.Abort(_("no bundle provided - specify with -R"))
714 714 return subset & bundlerevs
715 715
716 716 def checkstatus(repo, subset, pat, field):
717 717 hasset = matchmod.patkind(pat) == 'set'
718 718
719 719 mcache = [None]
720 720 def matches(x):
721 721 c = repo[x]
722 722 if not mcache[0] or hasset:
723 723 mcache[0] = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
724 724 m = mcache[0]
725 725 fname = None
726 726 if not m.anypats() and len(m.files()) == 1:
727 727 fname = m.files()[0]
728 728 if fname is not None:
729 729 if fname not in c.files():
730 730 return False
731 731 else:
732 732 for f in c.files():
733 733 if m(f):
734 734 break
735 735 else:
736 736 return False
737 737 files = repo.status(c.p1().node(), c.node())[field]
738 738 if fname is not None:
739 739 if fname in files:
740 740 return True
741 741 else:
742 742 for f in files:
743 743 if m(f):
744 744 return True
745 745
746 746 return subset.filter(matches, condrepr=('<status[%r] %r>', field, pat))
747 747
748 748 def _children(repo, subset, parentset):
749 749 if not parentset:
750 750 return baseset()
751 751 cs = set()
752 752 pr = repo.changelog.parentrevs
753 753 minrev = parentset.min()
754 754 nullrev = node.nullrev
755 755 for r in subset:
756 756 if r <= minrev:
757 757 continue
758 758 p1, p2 = pr(r)
759 759 if p1 in parentset:
760 760 cs.add(r)
761 761 if p2 != nullrev and p2 in parentset:
762 762 cs.add(r)
763 763 return baseset(cs)
764 764
765 765 @predicate('children(set)', safe=True)
766 766 def children(repo, subset, x):
767 767 """Child changesets of changesets in set.
768 768 """
769 769 s = getset(repo, fullreposet(repo), x)
770 770 cs = _children(repo, subset, s)
771 771 return subset & cs
772 772
773 773 @predicate('closed()', safe=True)
774 774 def closed(repo, subset, x):
775 775 """Changeset is closed.
776 776 """
777 777 # i18n: "closed" is a keyword
778 778 getargs(x, 0, 0, _("closed takes no arguments"))
779 779 return subset.filter(lambda r: repo[r].closesbranch(),
780 780 condrepr='<branch closed>')
781 781
782 782 @predicate('contains(pattern)')
783 783 def contains(repo, subset, x):
784 784 """The revision's manifest contains a file matching pattern (but might not
785 785 modify it). See :hg:`help patterns` for information about file patterns.
786 786
787 787 The pattern without explicit kind like ``glob:`` is expected to be
788 788 relative to the current directory and match against a file exactly
789 789 for efficiency.
790 790 """
791 791 # i18n: "contains" is a keyword
792 792 pat = getstring(x, _("contains requires a pattern"))
793 793
794 794 def matches(x):
795 795 if not matchmod.patkind(pat):
796 796 pats = pathutil.canonpath(repo.root, repo.getcwd(), pat)
797 797 if pats in repo[x]:
798 798 return True
799 799 else:
800 800 c = repo[x]
801 801 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
802 802 for f in c.manifest():
803 803 if m(f):
804 804 return True
805 805 return False
806 806
807 807 return subset.filter(matches, condrepr=('<contains %r>', pat))
808 808
809 809 @predicate('converted([id])', safe=True)
810 810 def converted(repo, subset, x):
811 811 """Changesets converted from the given identifier in the old repository if
812 812 present, or all converted changesets if no identifier is specified.
813 813 """
814 814
815 815 # There is exactly no chance of resolving the revision, so do a simple
816 816 # string compare and hope for the best
817 817
818 818 rev = None
819 819 # i18n: "converted" is a keyword
820 820 l = getargs(x, 0, 1, _('converted takes one or no arguments'))
821 821 if l:
822 822 # i18n: "converted" is a keyword
823 823 rev = getstring(l[0], _('converted requires a revision'))
824 824
825 825 def _matchvalue(r):
826 826 source = repo[r].extra().get('convert_revision', None)
827 827 return source is not None and (rev is None or source.startswith(rev))
828 828
829 829 return subset.filter(lambda r: _matchvalue(r),
830 830 condrepr=('<converted %r>', rev))
831 831
832 832 @predicate('date(interval)', safe=True)
833 833 def date(repo, subset, x):
834 834 """Changesets within the interval, see :hg:`help dates`.
835 835 """
836 836 # i18n: "date" is a keyword
837 837 ds = getstring(x, _("date requires a string"))
838 838 dm = util.matchdate(ds)
839 839 return subset.filter(lambda x: dm(repo[x].date()[0]),
840 840 condrepr=('<date %r>', ds))
841 841
842 842 @predicate('desc(string)', safe=True)
843 843 def desc(repo, subset, x):
844 844 """Search commit message for string. The match is case-insensitive.
845 845
846 846 Pattern matching is supported for `string`. See
847 847 :hg:`help revisions.patterns`.
848 848 """
849 849 # i18n: "desc" is a keyword
850 850 ds = getstring(x, _("desc requires a string"))
851 851
852 852 kind, pattern, matcher = _substringmatcher(ds, casesensitive=False)
853 853
854 854 return subset.filter(lambda r: matcher(repo[r].description()),
855 855 condrepr=('<desc %r>', ds))
856 856
857 857 def _descendants(repo, subset, x, followfirst=False):
858 858 roots = getset(repo, fullreposet(repo), x)
859 859 if not roots:
860 860 return baseset()
861 861 s = _revdescendants(repo, roots, followfirst)
862 862
863 863 # Both sets need to be ascending in order to lazily return the union
864 864 # in the correct order.
865 865 base = subset & roots
866 866 desc = subset & s
867 867 result = base + desc
868 868 if subset.isascending():
869 869 result.sort()
870 870 elif subset.isdescending():
871 871 result.sort(reverse=True)
872 872 else:
873 873 result = subset & result
874 874 return result
875 875
876 876 @predicate('descendants(set)', safe=True)
877 877 def descendants(repo, subset, x):
878 878 """Changesets which are descendants of changesets in set.
879 879 """
880 880 return _descendants(repo, subset, x)
881 881
882 882 @predicate('_firstdescendants', safe=True)
883 883 def _firstdescendants(repo, subset, x):
884 884 # ``_firstdescendants(set)``
885 885 # Like ``descendants(set)`` but follows only the first parents.
886 886 return _descendants(repo, subset, x, followfirst=True)
887 887
888 888 @predicate('destination([set])', safe=True)
889 889 def destination(repo, subset, x):
890 890 """Changesets that were created by a graft, transplant or rebase operation,
891 891 with the given revisions specified as the source. Omitting the optional set
892 892 is the same as passing all().
893 893 """
894 894 if x is not None:
895 895 sources = getset(repo, fullreposet(repo), x)
896 896 else:
897 897 sources = fullreposet(repo)
898 898
899 899 dests = set()
900 900
901 901 # subset contains all of the possible destinations that can be returned, so
902 902 # iterate over them and see if their source(s) were provided in the arg set.
903 903 # Even if the immediate src of r is not in the arg set, src's source (or
904 904 # further back) may be. Scanning back further than the immediate src allows
905 905 # transitive transplants and rebases to yield the same results as transitive
906 906 # grafts.
907 907 for r in subset:
908 908 src = _getrevsource(repo, r)
909 909 lineage = None
910 910
911 911 while src is not None:
912 912 if lineage is None:
913 913 lineage = list()
914 914
915 915 lineage.append(r)
916 916
917 917 # The visited lineage is a match if the current source is in the arg
918 918 # set. Since every candidate dest is visited by way of iterating
919 919 # subset, any dests further back in the lineage will be tested by a
920 920 # different iteration over subset. Likewise, if the src was already
921 921 # selected, the current lineage can be selected without going back
922 922 # further.
923 923 if src in sources or src in dests:
924 924 dests.update(lineage)
925 925 break
926 926
927 927 r = src
928 928 src = _getrevsource(repo, r)
929 929
930 930 return subset.filter(dests.__contains__,
931 931 condrepr=lambda: '<destination %r>' % sorted(dests))
932 932
933 933 @predicate('divergent()', safe=True)
934 934 def divergent(repo, subset, x):
935 935 """
936 936 Final successors of changesets with an alternative set of final successors.
937 937 """
938 938 # i18n: "divergent" is a keyword
939 939 getargs(x, 0, 0, _("divergent takes no arguments"))
940 940 divergent = obsmod.getrevs(repo, 'divergent')
941 941 return subset & divergent
942 942
943 943 @predicate('extinct()', safe=True)
944 944 def extinct(repo, subset, x):
945 945 """Obsolete changesets with obsolete descendants only.
946 946 """
947 947 # i18n: "extinct" is a keyword
948 948 getargs(x, 0, 0, _("extinct takes no arguments"))
949 949 extincts = obsmod.getrevs(repo, 'extinct')
950 950 return subset & extincts
951 951
952 952 @predicate('extra(label, [value])', safe=True)
953 953 def extra(repo, subset, x):
954 954 """Changesets with the given label in the extra metadata, with the given
955 955 optional value.
956 956
957 957 Pattern matching is supported for `value`. See
958 958 :hg:`help revisions.patterns`.
959 959 """
960 960 args = getargsdict(x, 'extra', 'label value')
961 961 if 'label' not in args:
962 962 # i18n: "extra" is a keyword
963 963 raise error.ParseError(_('extra takes at least 1 argument'))
964 964 # i18n: "extra" is a keyword
965 965 label = getstring(args['label'], _('first argument to extra must be '
966 966 'a string'))
967 967 value = None
968 968
969 969 if 'value' in args:
970 970 # i18n: "extra" is a keyword
971 971 value = getstring(args['value'], _('second argument to extra must be '
972 972 'a string'))
973 973 kind, value, matcher = util.stringmatcher(value)
974 974
975 975 def _matchvalue(r):
976 976 extra = repo[r].extra()
977 977 return label in extra and (value is None or matcher(extra[label]))
978 978
979 979 return subset.filter(lambda r: _matchvalue(r),
980 980 condrepr=('<extra[%r] %r>', label, value))
981 981
982 982 @predicate('filelog(pattern)', safe=True)
983 983 def filelog(repo, subset, x):
984 984 """Changesets connected to the specified filelog.
985 985
986 986 For performance reasons, visits only revisions mentioned in the file-level
987 987 filelog, rather than filtering through all changesets (much faster, but
988 988 doesn't include deletes or duplicate changes). For a slower, more accurate
989 989 result, use ``file()``.
990 990
991 991 The pattern without explicit kind like ``glob:`` is expected to be
992 992 relative to the current directory and match against a file exactly
993 993 for efficiency.
994 994
995 995 If some linkrev points to revisions filtered by the current repoview, we'll
996 996 work around it to return a non-filtered value.
997 997 """
998 998
999 999 # i18n: "filelog" is a keyword
1000 1000 pat = getstring(x, _("filelog requires a pattern"))
1001 1001 s = set()
1002 1002 cl = repo.changelog
1003 1003
1004 1004 if not matchmod.patkind(pat):
1005 1005 f = pathutil.canonpath(repo.root, repo.getcwd(), pat)
1006 1006 files = [f]
1007 1007 else:
1008 1008 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=repo[None])
1009 1009 files = (f for f in repo[None] if m(f))
1010 1010
1011 1011 for f in files:
1012 1012 fl = repo.file(f)
1013 1013 known = {}
1014 1014 scanpos = 0
1015 1015 for fr in list(fl):
1016 1016 fn = fl.node(fr)
1017 1017 if fn in known:
1018 1018 s.add(known[fn])
1019 1019 continue
1020 1020
1021 1021 lr = fl.linkrev(fr)
1022 1022 if lr in cl:
1023 1023 s.add(lr)
1024 1024 elif scanpos is not None:
1025 1025 # lowest matching changeset is filtered, scan further
1026 1026 # ahead in changelog
1027 1027 start = max(lr, scanpos) + 1
1028 1028 scanpos = None
1029 1029 for r in cl.revs(start):
1030 1030 # minimize parsing of non-matching entries
1031 1031 if f in cl.revision(r) and f in cl.readfiles(r):
1032 1032 try:
1033 1033 # try to use manifest delta fastpath
1034 1034 n = repo[r].filenode(f)
1035 1035 if n not in known:
1036 1036 if n == fn:
1037 1037 s.add(r)
1038 1038 scanpos = r
1039 1039 break
1040 1040 else:
1041 1041 known[n] = r
1042 1042 except error.ManifestLookupError:
1043 1043 # deletion in changelog
1044 1044 continue
1045 1045
1046 1046 return subset & s
1047 1047
1048 1048 @predicate('first(set, [n])', safe=True)
1049 1049 def first(repo, subset, x):
1050 1050 """An alias for limit().
1051 1051 """
1052 1052 return limit(repo, subset, x)
1053 1053
1054 1054 def _follow(repo, subset, x, name, followfirst=False):
1055 1055 l = getargs(x, 0, 2, _("%s takes no arguments or a pattern "
1056 1056 "and an optional revset") % name)
1057 1057 c = repo['.']
1058 1058 if l:
1059 1059 x = getstring(l[0], _("%s expected a pattern") % name)
1060 1060 rev = None
1061 1061 if len(l) >= 2:
1062 1062 revs = getset(repo, fullreposet(repo), l[1])
1063 1063 if len(revs) != 1:
1064 1064 raise error.RepoLookupError(
1065 1065 _("%s expected one starting revision") % name)
1066 1066 rev = revs.last()
1067 1067 c = repo[rev]
1068 1068 matcher = matchmod.match(repo.root, repo.getcwd(), [x],
1069 1069 ctx=repo[rev], default='path')
1070 1070
1071 1071 files = c.manifest().walk(matcher)
1072 1072
1073 1073 s = set()
1074 1074 for fname in files:
1075 1075 fctx = c[fname]
1076 1076 s = s.union(set(c.rev() for c in fctx.ancestors(followfirst)))
1077 1077 # include the revision responsible for the most recent version
1078 1078 s.add(fctx.introrev())
1079 1079 else:
1080 1080 s = _revancestors(repo, baseset([c.rev()]), followfirst)
1081 1081
1082 1082 return subset & s
1083 1083
1084 1084 @predicate('follow([pattern[, startrev]])', safe=True)
1085 1085 def follow(repo, subset, x):
1086 1086 """
1087 1087 An alias for ``::.`` (ancestors of the working directory's first parent).
1088 1088 If pattern is specified, the histories of files matching given
1089 1089 pattern in the revision given by startrev are followed, including copies.
1090 1090 """
1091 1091 return _follow(repo, subset, x, 'follow')
1092 1092
1093 1093 @predicate('_followfirst', safe=True)
1094 1094 def _followfirst(repo, subset, x):
1095 1095 # ``followfirst([pattern[, startrev]])``
1096 1096 # Like ``follow([pattern[, startrev]])`` but follows only the first parent
1097 1097 # of every revisions or files revisions.
1098 1098 return _follow(repo, subset, x, '_followfirst', followfirst=True)
1099 1099
1100 1100 @predicate('followlines(file, fromline:toline[, startrev=.])', safe=True)
1101 1101 def followlines(repo, subset, x):
1102 1102 """Changesets modifying `file` in line range ('fromline', 'toline').
1103 1103
1104 1104 Line range corresponds to 'file' content at 'startrev' and should hence be
1105 1105 consistent with file size. If startrev is not specified, working directory's
1106 1106 parent is used.
1107 1107 """
1108 1108 from . import context # avoid circular import issues
1109 1109
1110 1110 args = getargsdict(x, 'followlines', 'file *lines startrev')
1111 1111 if len(args['lines']) != 1:
1112 1112 raise error.ParseError(_("followlines requires a line range"))
1113 1113
1114 1114 rev = '.'
1115 1115 if 'startrev' in args:
1116 1116 revs = getset(repo, fullreposet(repo), args['startrev'])
1117 1117 if len(revs) != 1:
1118 1118 raise error.ParseError(
1119 1119 _("followlines expects exactly one revision"))
1120 1120 rev = revs.last()
1121 1121
1122 1122 pat = getstring(args['file'], _("followlines requires a pattern"))
1123 1123 if not matchmod.patkind(pat):
1124 1124 fname = pathutil.canonpath(repo.root, repo.getcwd(), pat)
1125 1125 else:
1126 1126 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=repo[rev])
1127 1127 files = [f for f in repo[rev] if m(f)]
1128 1128 if len(files) != 1:
1129 1129 raise error.ParseError(_("followlines expects exactly one file"))
1130 1130 fname = files[0]
1131 1131
1132 1132 lr = getrange(args['lines'][0], _("followlines expects a line range"))
1133 1133 fromline, toline = [getinteger(a, _("line range bounds must be integers"))
1134 1134 for a in lr]
1135 1135 if toline - fromline < 0:
1136 1136 raise error.ParseError(_("line range must be positive"))
1137 1137 if fromline < 1:
1138 1138 raise error.ParseError(_("fromline must be strictly positive"))
1139 1139 fromline -= 1
1140 1140
1141 1141 fctx = repo[rev].filectx(fname)
1142 1142 revs = (c.rev() for c in context.blockancestors(fctx, fromline, toline))
1143 1143 return subset & generatorset(revs, iterasc=False)
1144 1144
1145 1145 @predicate('all()', safe=True)
1146 1146 def getall(repo, subset, x):
1147 1147 """All changesets, the same as ``0:tip``.
1148 1148 """
1149 1149 # i18n: "all" is a keyword
1150 1150 getargs(x, 0, 0, _("all takes no arguments"))
1151 1151 return subset & spanset(repo) # drop "null" if any
1152 1152
1153 1153 @predicate('grep(regex)')
1154 1154 def grep(repo, subset, x):
1155 1155 """Like ``keyword(string)`` but accepts a regex. Use ``grep(r'...')``
1156 1156 to ensure special escape characters are handled correctly. Unlike
1157 1157 ``keyword(string)``, the match is case-sensitive.
1158 1158 """
1159 1159 try:
1160 1160 # i18n: "grep" is a keyword
1161 1161 gr = re.compile(getstring(x, _("grep requires a string")))
1162 1162 except re.error as e:
1163 1163 raise error.ParseError(_('invalid match pattern: %s') % e)
1164 1164
1165 1165 def matches(x):
1166 1166 c = repo[x]
1167 1167 for e in c.files() + [c.user(), c.description()]:
1168 1168 if gr.search(e):
1169 1169 return True
1170 1170 return False
1171 1171
1172 1172 return subset.filter(matches, condrepr=('<grep %r>', gr.pattern))
1173 1173
1174 1174 @predicate('_matchfiles', safe=True)
1175 1175 def _matchfiles(repo, subset, x):
1176 1176 # _matchfiles takes a revset list of prefixed arguments:
1177 1177 #
1178 1178 # [p:foo, i:bar, x:baz]
1179 1179 #
1180 1180 # builds a match object from them and filters subset. Allowed
1181 1181 # prefixes are 'p:' for regular patterns, 'i:' for include
1182 1182 # patterns and 'x:' for exclude patterns. Use 'r:' prefix to pass
1183 1183 # a revision identifier, or the empty string to reference the
1184 1184 # working directory, from which the match object is
1185 1185 # initialized. Use 'd:' to set the default matching mode, default
1186 1186 # to 'glob'. At most one 'r:' and 'd:' argument can be passed.
1187 1187
1188 1188 l = getargs(x, 1, -1, "_matchfiles requires at least one argument")
1189 1189 pats, inc, exc = [], [], []
1190 1190 rev, default = None, None
1191 1191 for arg in l:
1192 1192 s = getstring(arg, "_matchfiles requires string arguments")
1193 1193 prefix, value = s[:2], s[2:]
1194 1194 if prefix == 'p:':
1195 1195 pats.append(value)
1196 1196 elif prefix == 'i:':
1197 1197 inc.append(value)
1198 1198 elif prefix == 'x:':
1199 1199 exc.append(value)
1200 1200 elif prefix == 'r:':
1201 1201 if rev is not None:
1202 1202 raise error.ParseError('_matchfiles expected at most one '
1203 1203 'revision')
1204 1204 if value != '': # empty means working directory; leave rev as None
1205 1205 rev = value
1206 1206 elif prefix == 'd:':
1207 1207 if default is not None:
1208 1208 raise error.ParseError('_matchfiles expected at most one '
1209 1209 'default mode')
1210 1210 default = value
1211 1211 else:
1212 1212 raise error.ParseError('invalid _matchfiles prefix: %s' % prefix)
1213 1213 if not default:
1214 1214 default = 'glob'
1215 1215
1216 1216 m = matchmod.match(repo.root, repo.getcwd(), pats, include=inc,
1217 1217 exclude=exc, ctx=repo[rev], default=default)
1218 1218
1219 1219 # This directly read the changelog data as creating changectx for all
1220 1220 # revisions is quite expensive.
1221 1221 getfiles = repo.changelog.readfiles
1222 1222 wdirrev = node.wdirrev
1223 1223 def matches(x):
1224 1224 if x == wdirrev:
1225 1225 files = repo[x].files()
1226 1226 else:
1227 1227 files = getfiles(x)
1228 1228 for f in files:
1229 1229 if m(f):
1230 1230 return True
1231 1231 return False
1232 1232
1233 1233 return subset.filter(matches,
1234 1234 condrepr=('<matchfiles patterns=%r, include=%r '
1235 1235 'exclude=%r, default=%r, rev=%r>',
1236 1236 pats, inc, exc, default, rev))
1237 1237
1238 1238 @predicate('file(pattern)', safe=True)
1239 1239 def hasfile(repo, subset, x):
1240 1240 """Changesets affecting files matched by pattern.
1241 1241
1242 1242 For a faster but less accurate result, consider using ``filelog()``
1243 1243 instead.
1244 1244
1245 1245 This predicate uses ``glob:`` as the default kind of pattern.
1246 1246 """
1247 1247 # i18n: "file" is a keyword
1248 1248 pat = getstring(x, _("file requires a pattern"))
1249 1249 return _matchfiles(repo, subset, ('string', 'p:' + pat))
1250 1250
1251 1251 @predicate('head()', safe=True)
1252 1252 def head(repo, subset, x):
1253 1253 """Changeset is a named branch head.
1254 1254 """
1255 1255 # i18n: "head" is a keyword
1256 1256 getargs(x, 0, 0, _("head takes no arguments"))
1257 1257 hs = set()
1258 1258 cl = repo.changelog
1259 1259 for ls in repo.branchmap().itervalues():
1260 1260 hs.update(cl.rev(h) for h in ls)
1261 1261 return subset & baseset(hs)
1262 1262
1263 1263 @predicate('heads(set)', safe=True)
1264 1264 def heads(repo, subset, x):
1265 1265 """Members of set with no children in set.
1266 1266 """
1267 1267 s = getset(repo, subset, x)
1268 1268 ps = parents(repo, subset, x)
1269 1269 return s - ps
1270 1270
1271 1271 @predicate('hidden()', safe=True)
1272 1272 def hidden(repo, subset, x):
1273 1273 """Hidden changesets.
1274 1274 """
1275 1275 # i18n: "hidden" is a keyword
1276 1276 getargs(x, 0, 0, _("hidden takes no arguments"))
1277 1277 hiddenrevs = repoview.filterrevs(repo, 'visible')
1278 1278 return subset & hiddenrevs
1279 1279
1280 1280 @predicate('keyword(string)', safe=True)
1281 1281 def keyword(repo, subset, x):
1282 1282 """Search commit message, user name, and names of changed files for
1283 1283 string. The match is case-insensitive.
1284 1284
1285 1285 For a regular expression or case sensitive search of these fields, use
1286 1286 ``grep(regex)``.
1287 1287 """
1288 1288 # i18n: "keyword" is a keyword
1289 1289 kw = encoding.lower(getstring(x, _("keyword requires a string")))
1290 1290
1291 1291 def matches(r):
1292 1292 c = repo[r]
1293 1293 return any(kw in encoding.lower(t)
1294 1294 for t in c.files() + [c.user(), c.description()])
1295 1295
1296 1296 return subset.filter(matches, condrepr=('<keyword %r>', kw))
1297 1297
1298 1298 @predicate('limit(set[, n[, offset]])', safe=True)
1299 1299 def limit(repo, subset, x):
1300 1300 """First n members of set, defaulting to 1, starting from offset.
1301 1301 """
1302 1302 args = getargsdict(x, 'limit', 'set n offset')
1303 1303 if 'set' not in args:
1304 1304 # i18n: "limit" is a keyword
1305 1305 raise error.ParseError(_("limit requires one to three arguments"))
1306 1306 # i18n: "limit" is a keyword
1307 1307 lim = getinteger(args.get('n'), _("limit expects a number"), default=1)
1308 1308 # i18n: "limit" is a keyword
1309 1309 ofs = getinteger(args.get('offset'), _("limit expects a number"), default=0)
1310 1310 if ofs < 0:
1311 1311 raise error.ParseError(_("negative offset"))
1312 1312 os = getset(repo, fullreposet(repo), args['set'])
1313 1313 result = []
1314 1314 it = iter(os)
1315 1315 for x in xrange(ofs):
1316 1316 y = next(it, None)
1317 1317 if y is None:
1318 1318 break
1319 1319 for x in xrange(lim):
1320 1320 y = next(it, None)
1321 1321 if y is None:
1322 1322 break
1323 1323 elif y in subset:
1324 1324 result.append(y)
1325 1325 return baseset(result, datarepr=('<limit n=%d, offset=%d, %r, %r>',
1326 1326 lim, ofs, subset, os))
1327 1327
1328 1328 @predicate('last(set, [n])', safe=True)
1329 1329 def last(repo, subset, x):
1330 1330 """Last n members of set, defaulting to 1.
1331 1331 """
1332 1332 # i18n: "last" is a keyword
1333 1333 l = getargs(x, 1, 2, _("last requires one or two arguments"))
1334 1334 lim = 1
1335 1335 if len(l) == 2:
1336 1336 # i18n: "last" is a keyword
1337 1337 lim = getinteger(l[1], _("last expects a number"))
1338 1338 os = getset(repo, fullreposet(repo), l[0])
1339 1339 os.reverse()
1340 1340 result = []
1341 1341 it = iter(os)
1342 1342 for x in xrange(lim):
1343 1343 y = next(it, None)
1344 1344 if y is None:
1345 1345 break
1346 1346 elif y in subset:
1347 1347 result.append(y)
1348 1348 return baseset(result, datarepr=('<last n=%d, %r, %r>', lim, subset, os))
1349 1349
1350 1350 @predicate('max(set)', safe=True)
1351 1351 def maxrev(repo, subset, x):
1352 1352 """Changeset with highest revision number in set.
1353 1353 """
1354 1354 os = getset(repo, fullreposet(repo), x)
1355 1355 try:
1356 1356 m = os.max()
1357 1357 if m in subset:
1358 1358 return baseset([m], datarepr=('<max %r, %r>', subset, os))
1359 1359 except ValueError:
1360 1360 # os.max() throws a ValueError when the collection is empty.
1361 1361 # Same as python's max().
1362 1362 pass
1363 1363 return baseset(datarepr=('<max %r, %r>', subset, os))
1364 1364
1365 1365 @predicate('merge()', safe=True)
1366 1366 def merge(repo, subset, x):
1367 1367 """Changeset is a merge changeset.
1368 1368 """
1369 1369 # i18n: "merge" is a keyword
1370 1370 getargs(x, 0, 0, _("merge takes no arguments"))
1371 1371 cl = repo.changelog
1372 1372 return subset.filter(lambda r: cl.parentrevs(r)[1] != -1,
1373 1373 condrepr='<merge>')
1374 1374
1375 1375 @predicate('branchpoint()', safe=True)
1376 1376 def branchpoint(repo, subset, x):
1377 1377 """Changesets with more than one child.
1378 1378 """
1379 1379 # i18n: "branchpoint" is a keyword
1380 1380 getargs(x, 0, 0, _("branchpoint takes no arguments"))
1381 1381 cl = repo.changelog
1382 1382 if not subset:
1383 1383 return baseset()
1384 1384 # XXX this should be 'parentset.min()' assuming 'parentset' is a smartset
1385 1385 # (and if it is not, it should.)
1386 1386 baserev = min(subset)
1387 1387 parentscount = [0]*(len(repo) - baserev)
1388 1388 for r in cl.revs(start=baserev + 1):
1389 1389 for p in cl.parentrevs(r):
1390 1390 if p >= baserev:
1391 1391 parentscount[p - baserev] += 1
1392 1392 return subset.filter(lambda r: parentscount[r - baserev] > 1,
1393 1393 condrepr='<branchpoint>')
1394 1394
1395 1395 @predicate('min(set)', safe=True)
1396 1396 def minrev(repo, subset, x):
1397 1397 """Changeset with lowest revision number in set.
1398 1398 """
1399 1399 os = getset(repo, fullreposet(repo), x)
1400 1400 try:
1401 1401 m = os.min()
1402 1402 if m in subset:
1403 1403 return baseset([m], datarepr=('<min %r, %r>', subset, os))
1404 1404 except ValueError:
1405 1405 # os.min() throws a ValueError when the collection is empty.
1406 1406 # Same as python's min().
1407 1407 pass
1408 1408 return baseset(datarepr=('<min %r, %r>', subset, os))
1409 1409
1410 1410 @predicate('modifies(pattern)', safe=True)
1411 1411 def modifies(repo, subset, x):
1412 1412 """Changesets modifying files matched by pattern.
1413 1413
1414 1414 The pattern without explicit kind like ``glob:`` is expected to be
1415 1415 relative to the current directory and match against a file or a
1416 1416 directory.
1417 1417 """
1418 1418 # i18n: "modifies" is a keyword
1419 1419 pat = getstring(x, _("modifies requires a pattern"))
1420 1420 return checkstatus(repo, subset, pat, 0)
1421 1421
1422 1422 @predicate('named(namespace)')
1423 1423 def named(repo, subset, x):
1424 1424 """The changesets in a given namespace.
1425 1425
1426 1426 Pattern matching is supported for `namespace`. See
1427 1427 :hg:`help revisions.patterns`.
1428 1428 """
1429 1429 # i18n: "named" is a keyword
1430 1430 args = getargs(x, 1, 1, _('named requires a namespace argument'))
1431 1431
1432 1432 ns = getstring(args[0],
1433 1433 # i18n: "named" is a keyword
1434 1434 _('the argument to named must be a string'))
1435 1435 kind, pattern, matcher = util.stringmatcher(ns)
1436 1436 namespaces = set()
1437 1437 if kind == 'literal':
1438 1438 if pattern not in repo.names:
1439 1439 raise error.RepoLookupError(_("namespace '%s' does not exist")
1440 1440 % ns)
1441 1441 namespaces.add(repo.names[pattern])
1442 1442 else:
1443 1443 for name, ns in repo.names.iteritems():
1444 1444 if matcher(name):
1445 1445 namespaces.add(ns)
1446 1446 if not namespaces:
1447 1447 raise error.RepoLookupError(_("no namespace exists"
1448 1448 " that match '%s'") % pattern)
1449 1449
1450 1450 names = set()
1451 1451 for ns in namespaces:
1452 1452 for name in ns.listnames(repo):
1453 1453 if name not in ns.deprecated:
1454 1454 names.update(repo[n].rev() for n in ns.nodes(repo, name))
1455 1455
1456 1456 names -= set([node.nullrev])
1457 1457 return subset & names
1458 1458
1459 1459 @predicate('id(string)', safe=True)
1460 1460 def node_(repo, subset, x):
1461 1461 """Revision non-ambiguously specified by the given hex string prefix.
1462 1462 """
1463 1463 # i18n: "id" is a keyword
1464 1464 l = getargs(x, 1, 1, _("id requires one argument"))
1465 1465 # i18n: "id" is a keyword
1466 1466 n = getstring(l[0], _("id requires a string"))
1467 1467 if len(n) == 40:
1468 1468 try:
1469 1469 rn = repo.changelog.rev(node.bin(n))
1470 1470 except (LookupError, TypeError):
1471 1471 rn = None
1472 1472 else:
1473 1473 rn = None
1474 1474 pm = repo.changelog._partialmatch(n)
1475 1475 if pm is not None:
1476 1476 rn = repo.changelog.rev(pm)
1477 1477
1478 1478 if rn is None:
1479 1479 return baseset()
1480 1480 result = baseset([rn])
1481 1481 return result & subset
1482 1482
1483 1483 @predicate('obsolete()', safe=True)
1484 1484 def obsolete(repo, subset, x):
1485 1485 """Mutable changeset with a newer version."""
1486 1486 # i18n: "obsolete" is a keyword
1487 1487 getargs(x, 0, 0, _("obsolete takes no arguments"))
1488 1488 obsoletes = obsmod.getrevs(repo, 'obsolete')
1489 1489 return subset & obsoletes
1490 1490
1491 1491 @predicate('only(set, [set])', safe=True)
1492 1492 def only(repo, subset, x):
1493 1493 """Changesets that are ancestors of the first set that are not ancestors
1494 1494 of any other head in the repo. If a second set is specified, the result
1495 1495 is ancestors of the first set that are not ancestors of the second set
1496 1496 (i.e. ::<set1> - ::<set2>).
1497 1497 """
1498 1498 cl = repo.changelog
1499 1499 # i18n: "only" is a keyword
1500 1500 args = getargs(x, 1, 2, _('only takes one or two arguments'))
1501 1501 include = getset(repo, fullreposet(repo), args[0])
1502 1502 if len(args) == 1:
1503 1503 if not include:
1504 1504 return baseset()
1505 1505
1506 1506 descendants = set(_revdescendants(repo, include, False))
1507 1507 exclude = [rev for rev in cl.headrevs()
1508 1508 if not rev in descendants and not rev in include]
1509 1509 else:
1510 1510 exclude = getset(repo, fullreposet(repo), args[1])
1511 1511
1512 1512 results = set(cl.findmissingrevs(common=exclude, heads=include))
1513 1513 # XXX we should turn this into a baseset instead of a set, smartset may do
1514 1514 # some optimizations from the fact this is a baseset.
1515 1515 return subset & results
1516 1516
1517 1517 @predicate('origin([set])', safe=True)
1518 1518 def origin(repo, subset, x):
1519 1519 """
1520 1520 Changesets that were specified as a source for the grafts, transplants or
1521 1521 rebases that created the given revisions. Omitting the optional set is the
1522 1522 same as passing all(). If a changeset created by these operations is itself
1523 1523 specified as a source for one of these operations, only the source changeset
1524 1524 for the first operation is selected.
1525 1525 """
1526 1526 if x is not None:
1527 1527 dests = getset(repo, fullreposet(repo), x)
1528 1528 else:
1529 1529 dests = fullreposet(repo)
1530 1530
1531 1531 def _firstsrc(rev):
1532 1532 src = _getrevsource(repo, rev)
1533 1533 if src is None:
1534 1534 return None
1535 1535
1536 1536 while True:
1537 1537 prev = _getrevsource(repo, src)
1538 1538
1539 1539 if prev is None:
1540 1540 return src
1541 1541 src = prev
1542 1542
1543 1543 o = set([_firstsrc(r) for r in dests])
1544 1544 o -= set([None])
1545 1545 # XXX we should turn this into a baseset instead of a set, smartset may do
1546 1546 # some optimizations from the fact this is a baseset.
1547 1547 return subset & o
1548 1548
1549 @predicate('outgoing([path])', safe=True)
1549 @predicate('outgoing([path])', safe=False)
1550 1550 def outgoing(repo, subset, x):
1551 1551 """Changesets not found in the specified destination repository, or the
1552 1552 default push location.
1553 1553 """
1554 1554 # Avoid cycles.
1555 1555 from . import (
1556 1556 discovery,
1557 1557 hg,
1558 1558 )
1559 1559 # i18n: "outgoing" is a keyword
1560 1560 l = getargs(x, 0, 1, _("outgoing takes one or no arguments"))
1561 1561 # i18n: "outgoing" is a keyword
1562 1562 dest = l and getstring(l[0], _("outgoing requires a repository path")) or ''
1563 1563 dest = repo.ui.expandpath(dest or 'default-push', dest or 'default')
1564 1564 dest, branches = hg.parseurl(dest)
1565 1565 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1566 1566 if revs:
1567 1567 revs = [repo.lookup(rev) for rev in revs]
1568 1568 other = hg.peer(repo, {}, dest)
1569 1569 repo.ui.pushbuffer()
1570 1570 outgoing = discovery.findcommonoutgoing(repo, other, onlyheads=revs)
1571 1571 repo.ui.popbuffer()
1572 1572 cl = repo.changelog
1573 1573 o = set([cl.rev(r) for r in outgoing.missing])
1574 1574 return subset & o
1575 1575
1576 1576 @predicate('p1([set])', safe=True)
1577 1577 def p1(repo, subset, x):
1578 1578 """First parent of changesets in set, or the working directory.
1579 1579 """
1580 1580 if x is None:
1581 1581 p = repo[x].p1().rev()
1582 1582 if p >= 0:
1583 1583 return subset & baseset([p])
1584 1584 return baseset()
1585 1585
1586 1586 ps = set()
1587 1587 cl = repo.changelog
1588 1588 for r in getset(repo, fullreposet(repo), x):
1589 1589 ps.add(cl.parentrevs(r)[0])
1590 1590 ps -= set([node.nullrev])
1591 1591 # XXX we should turn this into a baseset instead of a set, smartset may do
1592 1592 # some optimizations from the fact this is a baseset.
1593 1593 return subset & ps
1594 1594
1595 1595 @predicate('p2([set])', safe=True)
1596 1596 def p2(repo, subset, x):
1597 1597 """Second parent of changesets in set, or the working directory.
1598 1598 """
1599 1599 if x is None:
1600 1600 ps = repo[x].parents()
1601 1601 try:
1602 1602 p = ps[1].rev()
1603 1603 if p >= 0:
1604 1604 return subset & baseset([p])
1605 1605 return baseset()
1606 1606 except IndexError:
1607 1607 return baseset()
1608 1608
1609 1609 ps = set()
1610 1610 cl = repo.changelog
1611 1611 for r in getset(repo, fullreposet(repo), x):
1612 1612 ps.add(cl.parentrevs(r)[1])
1613 1613 ps -= set([node.nullrev])
1614 1614 # XXX we should turn this into a baseset instead of a set, smartset may do
1615 1615 # some optimizations from the fact this is a baseset.
1616 1616 return subset & ps
1617 1617
1618 1618 def parentpost(repo, subset, x, order):
1619 1619 return p1(repo, subset, x)
1620 1620
1621 1621 @predicate('parents([set])', safe=True)
1622 1622 def parents(repo, subset, x):
1623 1623 """
1624 1624 The set of all parents for all changesets in set, or the working directory.
1625 1625 """
1626 1626 if x is None:
1627 1627 ps = set(p.rev() for p in repo[x].parents())
1628 1628 else:
1629 1629 ps = set()
1630 1630 cl = repo.changelog
1631 1631 up = ps.update
1632 1632 parentrevs = cl.parentrevs
1633 1633 for r in getset(repo, fullreposet(repo), x):
1634 1634 if r == node.wdirrev:
1635 1635 up(p.rev() for p in repo[r].parents())
1636 1636 else:
1637 1637 up(parentrevs(r))
1638 1638 ps -= set([node.nullrev])
1639 1639 return subset & ps
1640 1640
1641 1641 def _phase(repo, subset, target):
1642 1642 """helper to select all rev in phase <target>"""
1643 1643 repo._phasecache.loadphaserevs(repo) # ensure phase's sets are loaded
1644 1644 if repo._phasecache._phasesets:
1645 1645 s = repo._phasecache._phasesets[target] - repo.changelog.filteredrevs
1646 1646 s = baseset(s)
1647 1647 s.sort() # set are non ordered, so we enforce ascending
1648 1648 return subset & s
1649 1649 else:
1650 1650 phase = repo._phasecache.phase
1651 1651 condition = lambda r: phase(repo, r) == target
1652 1652 return subset.filter(condition, condrepr=('<phase %r>', target),
1653 1653 cache=False)
1654 1654
1655 1655 @predicate('draft()', safe=True)
1656 1656 def draft(repo, subset, x):
1657 1657 """Changeset in draft phase."""
1658 1658 # i18n: "draft" is a keyword
1659 1659 getargs(x, 0, 0, _("draft takes no arguments"))
1660 1660 target = phases.draft
1661 1661 return _phase(repo, subset, target)
1662 1662
1663 1663 @predicate('secret()', safe=True)
1664 1664 def secret(repo, subset, x):
1665 1665 """Changeset in secret phase."""
1666 1666 # i18n: "secret" is a keyword
1667 1667 getargs(x, 0, 0, _("secret takes no arguments"))
1668 1668 target = phases.secret
1669 1669 return _phase(repo, subset, target)
1670 1670
1671 1671 def parentspec(repo, subset, x, n, order):
1672 1672 """``set^0``
1673 1673 The set.
1674 1674 ``set^1`` (or ``set^``), ``set^2``
1675 1675 First or second parent, respectively, of all changesets in set.
1676 1676 """
1677 1677 try:
1678 1678 n = int(n[1])
1679 1679 if n not in (0, 1, 2):
1680 1680 raise ValueError
1681 1681 except (TypeError, ValueError):
1682 1682 raise error.ParseError(_("^ expects a number 0, 1, or 2"))
1683 1683 ps = set()
1684 1684 cl = repo.changelog
1685 1685 for r in getset(repo, fullreposet(repo), x):
1686 1686 if n == 0:
1687 1687 ps.add(r)
1688 1688 elif n == 1:
1689 1689 ps.add(cl.parentrevs(r)[0])
1690 1690 elif n == 2:
1691 1691 parents = cl.parentrevs(r)
1692 1692 if parents[1] != node.nullrev:
1693 1693 ps.add(parents[1])
1694 1694 return subset & ps
1695 1695
1696 1696 @predicate('present(set)', safe=True)
1697 1697 def present(repo, subset, x):
1698 1698 """An empty set, if any revision in set isn't found; otherwise,
1699 1699 all revisions in set.
1700 1700
1701 1701 If any of specified revisions is not present in the local repository,
1702 1702 the query is normally aborted. But this predicate allows the query
1703 1703 to continue even in such cases.
1704 1704 """
1705 1705 try:
1706 1706 return getset(repo, subset, x)
1707 1707 except error.RepoLookupError:
1708 1708 return baseset()
1709 1709
1710 1710 # for internal use
1711 1711 @predicate('_notpublic', safe=True)
1712 1712 def _notpublic(repo, subset, x):
1713 1713 getargs(x, 0, 0, "_notpublic takes no arguments")
1714 1714 repo._phasecache.loadphaserevs(repo) # ensure phase's sets are loaded
1715 1715 if repo._phasecache._phasesets:
1716 1716 s = set()
1717 1717 for u in repo._phasecache._phasesets[1:]:
1718 1718 s.update(u)
1719 1719 s = baseset(s - repo.changelog.filteredrevs)
1720 1720 s.sort()
1721 1721 return subset & s
1722 1722 else:
1723 1723 phase = repo._phasecache.phase
1724 1724 target = phases.public
1725 1725 condition = lambda r: phase(repo, r) != target
1726 1726 return subset.filter(condition, condrepr=('<phase %r>', target),
1727 1727 cache=False)
1728 1728
1729 1729 @predicate('public()', safe=True)
1730 1730 def public(repo, subset, x):
1731 1731 """Changeset in public phase."""
1732 1732 # i18n: "public" is a keyword
1733 1733 getargs(x, 0, 0, _("public takes no arguments"))
1734 1734 phase = repo._phasecache.phase
1735 1735 target = phases.public
1736 1736 condition = lambda r: phase(repo, r) == target
1737 1737 return subset.filter(condition, condrepr=('<phase %r>', target),
1738 1738 cache=False)
1739 1739
1740 @predicate('remote([id [,path]])', safe=True)
1740 @predicate('remote([id [,path]])', safe=False)
1741 1741 def remote(repo, subset, x):
1742 1742 """Local revision that corresponds to the given identifier in a
1743 1743 remote repository, if present. Here, the '.' identifier is a
1744 1744 synonym for the current local branch.
1745 1745 """
1746 1746
1747 1747 from . import hg # avoid start-up nasties
1748 1748 # i18n: "remote" is a keyword
1749 1749 l = getargs(x, 0, 2, _("remote takes zero, one, or two arguments"))
1750 1750
1751 1751 q = '.'
1752 1752 if len(l) > 0:
1753 1753 # i18n: "remote" is a keyword
1754 1754 q = getstring(l[0], _("remote requires a string id"))
1755 1755 if q == '.':
1756 1756 q = repo['.'].branch()
1757 1757
1758 1758 dest = ''
1759 1759 if len(l) > 1:
1760 1760 # i18n: "remote" is a keyword
1761 1761 dest = getstring(l[1], _("remote requires a repository path"))
1762 1762 dest = repo.ui.expandpath(dest or 'default')
1763 1763 dest, branches = hg.parseurl(dest)
1764 1764 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1765 1765 if revs:
1766 1766 revs = [repo.lookup(rev) for rev in revs]
1767 1767 other = hg.peer(repo, {}, dest)
1768 1768 n = other.lookup(q)
1769 1769 if n in repo:
1770 1770 r = repo[n].rev()
1771 1771 if r in subset:
1772 1772 return baseset([r])
1773 1773 return baseset()
1774 1774
1775 1775 @predicate('removes(pattern)', safe=True)
1776 1776 def removes(repo, subset, x):
1777 1777 """Changesets which remove files matching pattern.
1778 1778
1779 1779 The pattern without explicit kind like ``glob:`` is expected to be
1780 1780 relative to the current directory and match against a file or a
1781 1781 directory.
1782 1782 """
1783 1783 # i18n: "removes" is a keyword
1784 1784 pat = getstring(x, _("removes requires a pattern"))
1785 1785 return checkstatus(repo, subset, pat, 2)
1786 1786
1787 1787 @predicate('rev(number)', safe=True)
1788 1788 def rev(repo, subset, x):
1789 1789 """Revision with the given numeric identifier.
1790 1790 """
1791 1791 # i18n: "rev" is a keyword
1792 1792 l = getargs(x, 1, 1, _("rev requires one argument"))
1793 1793 try:
1794 1794 # i18n: "rev" is a keyword
1795 1795 l = int(getstring(l[0], _("rev requires a number")))
1796 1796 except (TypeError, ValueError):
1797 1797 # i18n: "rev" is a keyword
1798 1798 raise error.ParseError(_("rev expects a number"))
1799 1799 if l not in repo.changelog and l != node.nullrev:
1800 1800 return baseset()
1801 1801 return subset & baseset([l])
1802 1802
1803 1803 @predicate('matching(revision [, field])', safe=True)
1804 1804 def matching(repo, subset, x):
1805 1805 """Changesets in which a given set of fields match the set of fields in the
1806 1806 selected revision or set.
1807 1807
1808 1808 To match more than one field pass the list of fields to match separated
1809 1809 by spaces (e.g. ``author description``).
1810 1810
1811 1811 Valid fields are most regular revision fields and some special fields.
1812 1812
1813 1813 Regular revision fields are ``description``, ``author``, ``branch``,
1814 1814 ``date``, ``files``, ``phase``, ``parents``, ``substate``, ``user``
1815 1815 and ``diff``.
1816 1816 Note that ``author`` and ``user`` are synonyms. ``diff`` refers to the
1817 1817 contents of the revision. Two revisions matching their ``diff`` will
1818 1818 also match their ``files``.
1819 1819
1820 1820 Special fields are ``summary`` and ``metadata``:
1821 1821 ``summary`` matches the first line of the description.
1822 1822 ``metadata`` is equivalent to matching ``description user date``
1823 1823 (i.e. it matches the main metadata fields).
1824 1824
1825 1825 ``metadata`` is the default field which is used when no fields are
1826 1826 specified. You can match more than one field at a time.
1827 1827 """
1828 1828 # i18n: "matching" is a keyword
1829 1829 l = getargs(x, 1, 2, _("matching takes 1 or 2 arguments"))
1830 1830
1831 1831 revs = getset(repo, fullreposet(repo), l[0])
1832 1832
1833 1833 fieldlist = ['metadata']
1834 1834 if len(l) > 1:
1835 1835 fieldlist = getstring(l[1],
1836 1836 # i18n: "matching" is a keyword
1837 1837 _("matching requires a string "
1838 1838 "as its second argument")).split()
1839 1839
1840 1840 # Make sure that there are no repeated fields,
1841 1841 # expand the 'special' 'metadata' field type
1842 1842 # and check the 'files' whenever we check the 'diff'
1843 1843 fields = []
1844 1844 for field in fieldlist:
1845 1845 if field == 'metadata':
1846 1846 fields += ['user', 'description', 'date']
1847 1847 elif field == 'diff':
1848 1848 # a revision matching the diff must also match the files
1849 1849 # since matching the diff is very costly, make sure to
1850 1850 # also match the files first
1851 1851 fields += ['files', 'diff']
1852 1852 else:
1853 1853 if field == 'author':
1854 1854 field = 'user'
1855 1855 fields.append(field)
1856 1856 fields = set(fields)
1857 1857 if 'summary' in fields and 'description' in fields:
1858 1858 # If a revision matches its description it also matches its summary
1859 1859 fields.discard('summary')
1860 1860
1861 1861 # We may want to match more than one field
1862 1862 # Not all fields take the same amount of time to be matched
1863 1863 # Sort the selected fields in order of increasing matching cost
1864 1864 fieldorder = ['phase', 'parents', 'user', 'date', 'branch', 'summary',
1865 1865 'files', 'description', 'substate', 'diff']
1866 1866 def fieldkeyfunc(f):
1867 1867 try:
1868 1868 return fieldorder.index(f)
1869 1869 except ValueError:
1870 1870 # assume an unknown field is very costly
1871 1871 return len(fieldorder)
1872 1872 fields = list(fields)
1873 1873 fields.sort(key=fieldkeyfunc)
1874 1874
1875 1875 # Each field will be matched with its own "getfield" function
1876 1876 # which will be added to the getfieldfuncs array of functions
1877 1877 getfieldfuncs = []
1878 1878 _funcs = {
1879 1879 'user': lambda r: repo[r].user(),
1880 1880 'branch': lambda r: repo[r].branch(),
1881 1881 'date': lambda r: repo[r].date(),
1882 1882 'description': lambda r: repo[r].description(),
1883 1883 'files': lambda r: repo[r].files(),
1884 1884 'parents': lambda r: repo[r].parents(),
1885 1885 'phase': lambda r: repo[r].phase(),
1886 1886 'substate': lambda r: repo[r].substate,
1887 1887 'summary': lambda r: repo[r].description().splitlines()[0],
1888 1888 'diff': lambda r: list(repo[r].diff(git=True),)
1889 1889 }
1890 1890 for info in fields:
1891 1891 getfield = _funcs.get(info, None)
1892 1892 if getfield is None:
1893 1893 raise error.ParseError(
1894 1894 # i18n: "matching" is a keyword
1895 1895 _("unexpected field name passed to matching: %s") % info)
1896 1896 getfieldfuncs.append(getfield)
1897 1897 # convert the getfield array of functions into a "getinfo" function
1898 1898 # which returns an array of field values (or a single value if there
1899 1899 # is only one field to match)
1900 1900 getinfo = lambda r: [f(r) for f in getfieldfuncs]
1901 1901
1902 1902 def matches(x):
1903 1903 for rev in revs:
1904 1904 target = getinfo(rev)
1905 1905 match = True
1906 1906 for n, f in enumerate(getfieldfuncs):
1907 1907 if target[n] != f(x):
1908 1908 match = False
1909 1909 if match:
1910 1910 return True
1911 1911 return False
1912 1912
1913 1913 return subset.filter(matches, condrepr=('<matching%r %r>', fields, revs))
1914 1914
1915 1915 @predicate('reverse(set)', safe=True, takeorder=True)
1916 1916 def reverse(repo, subset, x, order):
1917 1917 """Reverse order of set.
1918 1918 """
1919 1919 l = getset(repo, subset, x)
1920 1920 if order == defineorder:
1921 1921 l.reverse()
1922 1922 return l
1923 1923
1924 1924 @predicate('roots(set)', safe=True)
1925 1925 def roots(repo, subset, x):
1926 1926 """Changesets in set with no parent changeset in set.
1927 1927 """
1928 1928 s = getset(repo, fullreposet(repo), x)
1929 1929 parents = repo.changelog.parentrevs
1930 1930 def filter(r):
1931 1931 for p in parents(r):
1932 1932 if 0 <= p and p in s:
1933 1933 return False
1934 1934 return True
1935 1935 return subset & s.filter(filter, condrepr='<roots>')
1936 1936
1937 1937 _sortkeyfuncs = {
1938 1938 'rev': lambda c: c.rev(),
1939 1939 'branch': lambda c: c.branch(),
1940 1940 'desc': lambda c: c.description(),
1941 1941 'user': lambda c: c.user(),
1942 1942 'author': lambda c: c.user(),
1943 1943 'date': lambda c: c.date()[0],
1944 1944 }
1945 1945
1946 1946 def _getsortargs(x):
1947 1947 """Parse sort options into (set, [(key, reverse)], opts)"""
1948 1948 args = getargsdict(x, 'sort', 'set keys topo.firstbranch')
1949 1949 if 'set' not in args:
1950 1950 # i18n: "sort" is a keyword
1951 1951 raise error.ParseError(_('sort requires one or two arguments'))
1952 1952 keys = "rev"
1953 1953 if 'keys' in args:
1954 1954 # i18n: "sort" is a keyword
1955 1955 keys = getstring(args['keys'], _("sort spec must be a string"))
1956 1956
1957 1957 keyflags = []
1958 1958 for k in keys.split():
1959 1959 fk = k
1960 1960 reverse = (k[0] == '-')
1961 1961 if reverse:
1962 1962 k = k[1:]
1963 1963 if k not in _sortkeyfuncs and k != 'topo':
1964 1964 raise error.ParseError(_("unknown sort key %r") % fk)
1965 1965 keyflags.append((k, reverse))
1966 1966
1967 1967 if len(keyflags) > 1 and any(k == 'topo' for k, reverse in keyflags):
1968 1968 # i18n: "topo" is a keyword
1969 1969 raise error.ParseError(_('topo sort order cannot be combined '
1970 1970 'with other sort keys'))
1971 1971
1972 1972 opts = {}
1973 1973 if 'topo.firstbranch' in args:
1974 1974 if any(k == 'topo' for k, reverse in keyflags):
1975 1975 opts['topo.firstbranch'] = args['topo.firstbranch']
1976 1976 else:
1977 1977 # i18n: "topo" and "topo.firstbranch" are keywords
1978 1978 raise error.ParseError(_('topo.firstbranch can only be used '
1979 1979 'when using the topo sort key'))
1980 1980
1981 1981 return args['set'], keyflags, opts
1982 1982
1983 1983 @predicate('sort(set[, [-]key... [, ...]])', safe=True, takeorder=True)
1984 1984 def sort(repo, subset, x, order):
1985 1985 """Sort set by keys. The default sort order is ascending, specify a key
1986 1986 as ``-key`` to sort in descending order.
1987 1987
1988 1988 The keys can be:
1989 1989
1990 1990 - ``rev`` for the revision number,
1991 1991 - ``branch`` for the branch name,
1992 1992 - ``desc`` for the commit message (description),
1993 1993 - ``user`` for user name (``author`` can be used as an alias),
1994 1994 - ``date`` for the commit date
1995 1995 - ``topo`` for a reverse topographical sort
1996 1996
1997 1997 The ``topo`` sort order cannot be combined with other sort keys. This sort
1998 1998 takes one optional argument, ``topo.firstbranch``, which takes a revset that
1999 1999 specifies what topographical branches to prioritize in the sort.
2000 2000
2001 2001 """
2002 2002 s, keyflags, opts = _getsortargs(x)
2003 2003 revs = getset(repo, subset, s)
2004 2004
2005 2005 if not keyflags or order != defineorder:
2006 2006 return revs
2007 2007 if len(keyflags) == 1 and keyflags[0][0] == "rev":
2008 2008 revs.sort(reverse=keyflags[0][1])
2009 2009 return revs
2010 2010 elif keyflags[0][0] == "topo":
2011 2011 firstbranch = ()
2012 2012 if 'topo.firstbranch' in opts:
2013 2013 firstbranch = getset(repo, subset, opts['topo.firstbranch'])
2014 2014 revs = baseset(_toposort(revs, repo.changelog.parentrevs, firstbranch),
2015 2015 istopo=True)
2016 2016 if keyflags[0][1]:
2017 2017 revs.reverse()
2018 2018 return revs
2019 2019
2020 2020 # sort() is guaranteed to be stable
2021 2021 ctxs = [repo[r] for r in revs]
2022 2022 for k, reverse in reversed(keyflags):
2023 2023 ctxs.sort(key=_sortkeyfuncs[k], reverse=reverse)
2024 2024 return baseset([c.rev() for c in ctxs])
2025 2025
2026 2026 def _toposort(revs, parentsfunc, firstbranch=()):
2027 2027 """Yield revisions from heads to roots one (topo) branch at a time.
2028 2028
2029 2029 This function aims to be used by a graph generator that wishes to minimize
2030 2030 the number of parallel branches and their interleaving.
2031 2031
2032 2032 Example iteration order (numbers show the "true" order in a changelog):
2033 2033
2034 2034 o 4
2035 2035 |
2036 2036 o 1
2037 2037 |
2038 2038 | o 3
2039 2039 | |
2040 2040 | o 2
2041 2041 |/
2042 2042 o 0
2043 2043
2044 2044 Note that the ancestors of merges are understood by the current
2045 2045 algorithm to be on the same branch. This means no reordering will
2046 2046 occur behind a merge.
2047 2047 """
2048 2048
2049 2049 ### Quick summary of the algorithm
2050 2050 #
2051 2051 # This function is based around a "retention" principle. We keep revisions
2052 2052 # in memory until we are ready to emit a whole branch that immediately
2053 2053 # "merges" into an existing one. This reduces the number of parallel
2054 2054 # branches with interleaved revisions.
2055 2055 #
2056 2056 # During iteration revs are split into two groups:
2057 2057 # A) revision already emitted
2058 2058 # B) revision in "retention". They are stored as different subgroups.
2059 2059 #
2060 2060 # for each REV, we do the following logic:
2061 2061 #
2062 2062 # 1) if REV is a parent of (A), we will emit it. If there is a
2063 2063 # retention group ((B) above) that is blocked on REV being
2064 2064 # available, we emit all the revisions out of that retention
2065 2065 # group first.
2066 2066 #
2067 2067 # 2) else, we'll search for a subgroup in (B) awaiting for REV to be
2068 2068 # available, if such subgroup exist, we add REV to it and the subgroup is
2069 2069 # now awaiting for REV.parents() to be available.
2070 2070 #
2071 2071 # 3) finally if no such group existed in (B), we create a new subgroup.
2072 2072 #
2073 2073 #
2074 2074 # To bootstrap the algorithm, we emit the tipmost revision (which
2075 2075 # puts it in group (A) from above).
2076 2076
2077 2077 revs.sort(reverse=True)
2078 2078
2079 2079 # Set of parents of revision that have been emitted. They can be considered
2080 2080 # unblocked as the graph generator is already aware of them so there is no
2081 2081 # need to delay the revisions that reference them.
2082 2082 #
2083 2083 # If someone wants to prioritize a branch over the others, pre-filling this
2084 2084 # set will force all other branches to wait until this branch is ready to be
2085 2085 # emitted.
2086 2086 unblocked = set(firstbranch)
2087 2087
2088 2088 # list of groups waiting to be displayed, each group is defined by:
2089 2089 #
2090 2090 # (revs: lists of revs waiting to be displayed,
2091 2091 # blocked: set of that cannot be displayed before those in 'revs')
2092 2092 #
2093 2093 # The second value ('blocked') correspond to parents of any revision in the
2094 2094 # group ('revs') that is not itself contained in the group. The main idea
2095 2095 # of this algorithm is to delay as much as possible the emission of any
2096 2096 # revision. This means waiting for the moment we are about to display
2097 2097 # these parents to display the revs in a group.
2098 2098 #
2099 2099 # This first implementation is smart until it encounters a merge: it will
2100 2100 # emit revs as soon as any parent is about to be emitted and can grow an
2101 2101 # arbitrary number of revs in 'blocked'. In practice this mean we properly
2102 2102 # retains new branches but gives up on any special ordering for ancestors
2103 2103 # of merges. The implementation can be improved to handle this better.
2104 2104 #
2105 2105 # The first subgroup is special. It corresponds to all the revision that
2106 2106 # were already emitted. The 'revs' lists is expected to be empty and the
2107 2107 # 'blocked' set contains the parents revisions of already emitted revision.
2108 2108 #
2109 2109 # You could pre-seed the <parents> set of groups[0] to a specific
2110 2110 # changesets to select what the first emitted branch should be.
2111 2111 groups = [([], unblocked)]
2112 2112 pendingheap = []
2113 2113 pendingset = set()
2114 2114
2115 2115 heapq.heapify(pendingheap)
2116 2116 heappop = heapq.heappop
2117 2117 heappush = heapq.heappush
2118 2118 for currentrev in revs:
2119 2119 # Heap works with smallest element, we want highest so we invert
2120 2120 if currentrev not in pendingset:
2121 2121 heappush(pendingheap, -currentrev)
2122 2122 pendingset.add(currentrev)
2123 2123 # iterates on pending rev until after the current rev have been
2124 2124 # processed.
2125 2125 rev = None
2126 2126 while rev != currentrev:
2127 2127 rev = -heappop(pendingheap)
2128 2128 pendingset.remove(rev)
2129 2129
2130 2130 # Seek for a subgroup blocked, waiting for the current revision.
2131 2131 matching = [i for i, g in enumerate(groups) if rev in g[1]]
2132 2132
2133 2133 if matching:
2134 2134 # The main idea is to gather together all sets that are blocked
2135 2135 # on the same revision.
2136 2136 #
2137 2137 # Groups are merged when a common blocking ancestor is
2138 2138 # observed. For example, given two groups:
2139 2139 #
2140 2140 # revs [5, 4] waiting for 1
2141 2141 # revs [3, 2] waiting for 1
2142 2142 #
2143 2143 # These two groups will be merged when we process
2144 2144 # 1. In theory, we could have merged the groups when
2145 2145 # we added 2 to the group it is now in (we could have
2146 2146 # noticed the groups were both blocked on 1 then), but
2147 2147 # the way it works now makes the algorithm simpler.
2148 2148 #
2149 2149 # We also always keep the oldest subgroup first. We can
2150 2150 # probably improve the behavior by having the longest set
2151 2151 # first. That way, graph algorithms could minimise the length
2152 2152 # of parallel lines their drawing. This is currently not done.
2153 2153 targetidx = matching.pop(0)
2154 2154 trevs, tparents = groups[targetidx]
2155 2155 for i in matching:
2156 2156 gr = groups[i]
2157 2157 trevs.extend(gr[0])
2158 2158 tparents |= gr[1]
2159 2159 # delete all merged subgroups (except the one we kept)
2160 2160 # (starting from the last subgroup for performance and
2161 2161 # sanity reasons)
2162 2162 for i in reversed(matching):
2163 2163 del groups[i]
2164 2164 else:
2165 2165 # This is a new head. We create a new subgroup for it.
2166 2166 targetidx = len(groups)
2167 2167 groups.append(([], set([rev])))
2168 2168
2169 2169 gr = groups[targetidx]
2170 2170
2171 2171 # We now add the current nodes to this subgroups. This is done
2172 2172 # after the subgroup merging because all elements from a subgroup
2173 2173 # that relied on this rev must precede it.
2174 2174 #
2175 2175 # we also update the <parents> set to include the parents of the
2176 2176 # new nodes.
2177 2177 if rev == currentrev: # only display stuff in rev
2178 2178 gr[0].append(rev)
2179 2179 gr[1].remove(rev)
2180 2180 parents = [p for p in parentsfunc(rev) if p > node.nullrev]
2181 2181 gr[1].update(parents)
2182 2182 for p in parents:
2183 2183 if p not in pendingset:
2184 2184 pendingset.add(p)
2185 2185 heappush(pendingheap, -p)
2186 2186
2187 2187 # Look for a subgroup to display
2188 2188 #
2189 2189 # When unblocked is empty (if clause), we were not waiting for any
2190 2190 # revisions during the first iteration (if no priority was given) or
2191 2191 # if we emitted a whole disconnected set of the graph (reached a
2192 2192 # root). In that case we arbitrarily take the oldest known
2193 2193 # subgroup. The heuristic could probably be better.
2194 2194 #
2195 2195 # Otherwise (elif clause) if the subgroup is blocked on
2196 2196 # a revision we just emitted, we can safely emit it as
2197 2197 # well.
2198 2198 if not unblocked:
2199 2199 if len(groups) > 1: # display other subset
2200 2200 targetidx = 1
2201 2201 gr = groups[1]
2202 2202 elif not gr[1] & unblocked:
2203 2203 gr = None
2204 2204
2205 2205 if gr is not None:
2206 2206 # update the set of awaited revisions with the one from the
2207 2207 # subgroup
2208 2208 unblocked |= gr[1]
2209 2209 # output all revisions in the subgroup
2210 2210 for r in gr[0]:
2211 2211 yield r
2212 2212 # delete the subgroup that you just output
2213 2213 # unless it is groups[0] in which case you just empty it.
2214 2214 if targetidx:
2215 2215 del groups[targetidx]
2216 2216 else:
2217 2217 gr[0][:] = []
2218 2218 # Check if we have some subgroup waiting for revisions we are not going to
2219 2219 # iterate over
2220 2220 for g in groups:
2221 2221 for r in g[0]:
2222 2222 yield r
2223 2223
2224 2224 @predicate('subrepo([pattern])')
2225 2225 def subrepo(repo, subset, x):
2226 2226 """Changesets that add, modify or remove the given subrepo. If no subrepo
2227 2227 pattern is named, any subrepo changes are returned.
2228 2228 """
2229 2229 # i18n: "subrepo" is a keyword
2230 2230 args = getargs(x, 0, 1, _('subrepo takes at most one argument'))
2231 2231 pat = None
2232 2232 if len(args) != 0:
2233 2233 pat = getstring(args[0], _("subrepo requires a pattern"))
2234 2234
2235 2235 m = matchmod.exact(repo.root, repo.root, ['.hgsubstate'])
2236 2236
2237 2237 def submatches(names):
2238 2238 k, p, m = util.stringmatcher(pat)
2239 2239 for name in names:
2240 2240 if m(name):
2241 2241 yield name
2242 2242
2243 2243 def matches(x):
2244 2244 c = repo[x]
2245 2245 s = repo.status(c.p1().node(), c.node(), match=m)
2246 2246
2247 2247 if pat is None:
2248 2248 return s.added or s.modified or s.removed
2249 2249
2250 2250 if s.added:
2251 2251 return any(submatches(c.substate.keys()))
2252 2252
2253 2253 if s.modified:
2254 2254 subs = set(c.p1().substate.keys())
2255 2255 subs.update(c.substate.keys())
2256 2256
2257 2257 for path in submatches(subs):
2258 2258 if c.p1().substate.get(path) != c.substate.get(path):
2259 2259 return True
2260 2260
2261 2261 if s.removed:
2262 2262 return any(submatches(c.p1().substate.keys()))
2263 2263
2264 2264 return False
2265 2265
2266 2266 return subset.filter(matches, condrepr=('<subrepo %r>', pat))
2267 2267
2268 2268 def _substringmatcher(pattern, casesensitive=True):
2269 2269 kind, pattern, matcher = util.stringmatcher(pattern,
2270 2270 casesensitive=casesensitive)
2271 2271 if kind == 'literal':
2272 2272 if not casesensitive:
2273 2273 pattern = encoding.lower(pattern)
2274 2274 matcher = lambda s: pattern in encoding.lower(s)
2275 2275 else:
2276 2276 matcher = lambda s: pattern in s
2277 2277 return kind, pattern, matcher
2278 2278
2279 2279 @predicate('tag([name])', safe=True)
2280 2280 def tag(repo, subset, x):
2281 2281 """The specified tag by name, or all tagged revisions if no name is given.
2282 2282
2283 2283 Pattern matching is supported for `name`. See
2284 2284 :hg:`help revisions.patterns`.
2285 2285 """
2286 2286 # i18n: "tag" is a keyword
2287 2287 args = getargs(x, 0, 1, _("tag takes one or no arguments"))
2288 2288 cl = repo.changelog
2289 2289 if args:
2290 2290 pattern = getstring(args[0],
2291 2291 # i18n: "tag" is a keyword
2292 2292 _('the argument to tag must be a string'))
2293 2293 kind, pattern, matcher = util.stringmatcher(pattern)
2294 2294 if kind == 'literal':
2295 2295 # avoid resolving all tags
2296 2296 tn = repo._tagscache.tags.get(pattern, None)
2297 2297 if tn is None:
2298 2298 raise error.RepoLookupError(_("tag '%s' does not exist")
2299 2299 % pattern)
2300 2300 s = set([repo[tn].rev()])
2301 2301 else:
2302 2302 s = set([cl.rev(n) for t, n in repo.tagslist() if matcher(t)])
2303 2303 else:
2304 2304 s = set([cl.rev(n) for t, n in repo.tagslist() if t != 'tip'])
2305 2305 return subset & s
2306 2306
2307 2307 @predicate('tagged', safe=True)
2308 2308 def tagged(repo, subset, x):
2309 2309 return tag(repo, subset, x)
2310 2310
2311 2311 @predicate('unstable()', safe=True)
2312 2312 def unstable(repo, subset, x):
2313 2313 """Non-obsolete changesets with obsolete ancestors.
2314 2314 """
2315 2315 # i18n: "unstable" is a keyword
2316 2316 getargs(x, 0, 0, _("unstable takes no arguments"))
2317 2317 unstables = obsmod.getrevs(repo, 'unstable')
2318 2318 return subset & unstables
2319 2319
2320 2320
2321 2321 @predicate('user(string)', safe=True)
2322 2322 def user(repo, subset, x):
2323 2323 """User name contains string. The match is case-insensitive.
2324 2324
2325 2325 Pattern matching is supported for `string`. See
2326 2326 :hg:`help revisions.patterns`.
2327 2327 """
2328 2328 return author(repo, subset, x)
2329 2329
2330 2330 @predicate('wdir', safe=True)
2331 2331 def wdir(repo, subset, x):
2332 2332 """Working directory. (EXPERIMENTAL)"""
2333 2333 # i18n: "wdir" is a keyword
2334 2334 getargs(x, 0, 0, _("wdir takes no arguments"))
2335 2335 if node.wdirrev in subset or isinstance(subset, fullreposet):
2336 2336 return baseset([node.wdirrev])
2337 2337 return baseset()
2338 2338
2339 2339 def _orderedlist(repo, subset, x):
2340 2340 s = getstring(x, "internal error")
2341 2341 if not s:
2342 2342 return baseset()
2343 2343 # remove duplicates here. it's difficult for caller to deduplicate sets
2344 2344 # because different symbols can point to the same rev.
2345 2345 cl = repo.changelog
2346 2346 ls = []
2347 2347 seen = set()
2348 2348 for t in s.split('\0'):
2349 2349 try:
2350 2350 # fast path for integer revision
2351 2351 r = int(t)
2352 2352 if str(r) != t or r not in cl:
2353 2353 raise ValueError
2354 2354 revs = [r]
2355 2355 except ValueError:
2356 2356 revs = stringset(repo, subset, t)
2357 2357
2358 2358 for r in revs:
2359 2359 if r in seen:
2360 2360 continue
2361 2361 if (r in subset
2362 2362 or r == node.nullrev and isinstance(subset, fullreposet)):
2363 2363 ls.append(r)
2364 2364 seen.add(r)
2365 2365 return baseset(ls)
2366 2366
2367 2367 # for internal use
2368 2368 @predicate('_list', safe=True, takeorder=True)
2369 2369 def _list(repo, subset, x, order):
2370 2370 if order == followorder:
2371 2371 # slow path to take the subset order
2372 2372 return subset & _orderedlist(repo, fullreposet(repo), x)
2373 2373 else:
2374 2374 return _orderedlist(repo, subset, x)
2375 2375
2376 2376 def _orderedintlist(repo, subset, x):
2377 2377 s = getstring(x, "internal error")
2378 2378 if not s:
2379 2379 return baseset()
2380 2380 ls = [int(r) for r in s.split('\0')]
2381 2381 s = subset
2382 2382 return baseset([r for r in ls if r in s])
2383 2383
2384 2384 # for internal use
2385 2385 @predicate('_intlist', safe=True, takeorder=True)
2386 2386 def _intlist(repo, subset, x, order):
2387 2387 if order == followorder:
2388 2388 # slow path to take the subset order
2389 2389 return subset & _orderedintlist(repo, fullreposet(repo), x)
2390 2390 else:
2391 2391 return _orderedintlist(repo, subset, x)
2392 2392
2393 2393 def _orderedhexlist(repo, subset, x):
2394 2394 s = getstring(x, "internal error")
2395 2395 if not s:
2396 2396 return baseset()
2397 2397 cl = repo.changelog
2398 2398 ls = [cl.rev(node.bin(r)) for r in s.split('\0')]
2399 2399 s = subset
2400 2400 return baseset([r for r in ls if r in s])
2401 2401
2402 2402 # for internal use
2403 2403 @predicate('_hexlist', safe=True, takeorder=True)
2404 2404 def _hexlist(repo, subset, x, order):
2405 2405 if order == followorder:
2406 2406 # slow path to take the subset order
2407 2407 return subset & _orderedhexlist(repo, fullreposet(repo), x)
2408 2408 else:
2409 2409 return _orderedhexlist(repo, subset, x)
2410 2410
2411 2411 methods = {
2412 2412 "range": rangeset,
2413 2413 "rangeall": rangeall,
2414 2414 "rangepre": rangepre,
2415 2415 "rangepost": rangepost,
2416 2416 "dagrange": dagrange,
2417 2417 "string": stringset,
2418 2418 "symbol": stringset,
2419 2419 "and": andset,
2420 2420 "or": orset,
2421 2421 "not": notset,
2422 2422 "difference": differenceset,
2423 2423 "list": listset,
2424 2424 "keyvalue": keyvaluepair,
2425 2425 "func": func,
2426 2426 "ancestor": ancestorspec,
2427 2427 "parent": parentspec,
2428 2428 "parentpost": parentpost,
2429 2429 }
2430 2430
2431 2431 # Constants for ordering requirement, used in _analyze():
2432 2432 #
2433 2433 # If 'define', any nested functions and operations can change the ordering of
2434 2434 # the entries in the set. If 'follow', any nested functions and operations
2435 2435 # should take the ordering specified by the first operand to the '&' operator.
2436 2436 #
2437 2437 # For instance,
2438 2438 #
2439 2439 # X & (Y | Z)
2440 2440 # ^ ^^^^^^^
2441 2441 # | follow
2442 2442 # define
2443 2443 #
2444 2444 # will be evaluated as 'or(y(x()), z(x()))', where 'x()' can change the order
2445 2445 # of the entries in the set, but 'y()', 'z()' and 'or()' shouldn't.
2446 2446 #
2447 2447 # 'any' means the order doesn't matter. For instance,
2448 2448 #
2449 2449 # X & !Y
2450 2450 # ^
2451 2451 # any
2452 2452 #
2453 2453 # 'y()' can either enforce its ordering requirement or take the ordering
2454 2454 # specified by 'x()' because 'not()' doesn't care the order.
2455 2455 #
2456 2456 # Transition of ordering requirement:
2457 2457 #
2458 2458 # 1. starts with 'define'
2459 2459 # 2. shifts to 'follow' by 'x & y'
2460 2460 # 3. changes back to 'define' on function call 'f(x)' or function-like
2461 2461 # operation 'x (f) y' because 'f' may have its own ordering requirement
2462 2462 # for 'x' and 'y' (e.g. 'first(x)')
2463 2463 #
2464 2464 anyorder = 'any' # don't care the order
2465 2465 defineorder = 'define' # should define the order
2466 2466 followorder = 'follow' # must follow the current order
2467 2467
2468 2468 # transition table for 'x & y', from the current expression 'x' to 'y'
2469 2469 _tofolloworder = {
2470 2470 anyorder: anyorder,
2471 2471 defineorder: followorder,
2472 2472 followorder: followorder,
2473 2473 }
2474 2474
2475 2475 def _matchonly(revs, bases):
2476 2476 """
2477 2477 >>> f = lambda *args: _matchonly(*map(parse, args))
2478 2478 >>> f('ancestors(A)', 'not ancestors(B)')
2479 2479 ('list', ('symbol', 'A'), ('symbol', 'B'))
2480 2480 """
2481 2481 if (revs is not None
2482 2482 and revs[0] == 'func'
2483 2483 and getsymbol(revs[1]) == 'ancestors'
2484 2484 and bases is not None
2485 2485 and bases[0] == 'not'
2486 2486 and bases[1][0] == 'func'
2487 2487 and getsymbol(bases[1][1]) == 'ancestors'):
2488 2488 return ('list', revs[2], bases[1][2])
2489 2489
2490 2490 def _fixops(x):
2491 2491 """Rewrite raw parsed tree to resolve ambiguous syntax which cannot be
2492 2492 handled well by our simple top-down parser"""
2493 2493 if not isinstance(x, tuple):
2494 2494 return x
2495 2495
2496 2496 op = x[0]
2497 2497 if op == 'parent':
2498 2498 # x^:y means (x^) : y, not x ^ (:y)
2499 2499 # x^: means (x^) :, not x ^ (:)
2500 2500 post = ('parentpost', x[1])
2501 2501 if x[2][0] == 'dagrangepre':
2502 2502 return _fixops(('dagrange', post, x[2][1]))
2503 2503 elif x[2][0] == 'rangepre':
2504 2504 return _fixops(('range', post, x[2][1]))
2505 2505 elif x[2][0] == 'rangeall':
2506 2506 return _fixops(('rangepost', post))
2507 2507 elif op == 'or':
2508 2508 # make number of arguments deterministic:
2509 2509 # x + y + z -> (or x y z) -> (or (list x y z))
2510 2510 return (op, _fixops(('list',) + x[1:]))
2511 2511
2512 2512 return (op,) + tuple(_fixops(y) for y in x[1:])
2513 2513
2514 2514 def _analyze(x, order):
2515 2515 if x is None:
2516 2516 return x
2517 2517
2518 2518 op = x[0]
2519 2519 if op == 'minus':
2520 2520 return _analyze(('and', x[1], ('not', x[2])), order)
2521 2521 elif op == 'only':
2522 2522 t = ('func', ('symbol', 'only'), ('list', x[1], x[2]))
2523 2523 return _analyze(t, order)
2524 2524 elif op == 'onlypost':
2525 2525 return _analyze(('func', ('symbol', 'only'), x[1]), order)
2526 2526 elif op == 'dagrangepre':
2527 2527 return _analyze(('func', ('symbol', 'ancestors'), x[1]), order)
2528 2528 elif op == 'dagrangepost':
2529 2529 return _analyze(('func', ('symbol', 'descendants'), x[1]), order)
2530 2530 elif op == 'negate':
2531 2531 s = getstring(x[1], _("can't negate that"))
2532 2532 return _analyze(('string', '-' + s), order)
2533 2533 elif op in ('string', 'symbol'):
2534 2534 return x
2535 2535 elif op == 'and':
2536 2536 ta = _analyze(x[1], order)
2537 2537 tb = _analyze(x[2], _tofolloworder[order])
2538 2538 return (op, ta, tb, order)
2539 2539 elif op == 'or':
2540 2540 return (op, _analyze(x[1], order), order)
2541 2541 elif op == 'not':
2542 2542 return (op, _analyze(x[1], anyorder), order)
2543 2543 elif op == 'rangeall':
2544 2544 return (op, None, order)
2545 2545 elif op in ('rangepre', 'rangepost', 'parentpost'):
2546 2546 return (op, _analyze(x[1], defineorder), order)
2547 2547 elif op == 'group':
2548 2548 return _analyze(x[1], order)
2549 2549 elif op in ('dagrange', 'range', 'parent', 'ancestor'):
2550 2550 ta = _analyze(x[1], defineorder)
2551 2551 tb = _analyze(x[2], defineorder)
2552 2552 return (op, ta, tb, order)
2553 2553 elif op == 'list':
2554 2554 return (op,) + tuple(_analyze(y, order) for y in x[1:])
2555 2555 elif op == 'keyvalue':
2556 2556 return (op, x[1], _analyze(x[2], order))
2557 2557 elif op == 'func':
2558 2558 f = getsymbol(x[1])
2559 2559 d = defineorder
2560 2560 if f == 'present':
2561 2561 # 'present(set)' is known to return the argument set with no
2562 2562 # modification, so forward the current order to its argument
2563 2563 d = order
2564 2564 return (op, x[1], _analyze(x[2], d), order)
2565 2565 raise ValueError('invalid operator %r' % op)
2566 2566
2567 2567 def analyze(x, order=defineorder):
2568 2568 """Transform raw parsed tree to evaluatable tree which can be fed to
2569 2569 optimize() or getset()
2570 2570
2571 2571 All pseudo operations should be mapped to real operations or functions
2572 2572 defined in methods or symbols table respectively.
2573 2573
2574 2574 'order' specifies how the current expression 'x' is ordered (see the
2575 2575 constants defined above.)
2576 2576 """
2577 2577 return _analyze(x, order)
2578 2578
2579 2579 def _optimize(x, small):
2580 2580 if x is None:
2581 2581 return 0, x
2582 2582
2583 2583 smallbonus = 1
2584 2584 if small:
2585 2585 smallbonus = .5
2586 2586
2587 2587 op = x[0]
2588 2588 if op in ('string', 'symbol'):
2589 2589 return smallbonus, x # single revisions are small
2590 2590 elif op == 'and':
2591 2591 wa, ta = _optimize(x[1], True)
2592 2592 wb, tb = _optimize(x[2], True)
2593 2593 order = x[3]
2594 2594 w = min(wa, wb)
2595 2595
2596 2596 # (::x and not ::y)/(not ::y and ::x) have a fast path
2597 2597 tm = _matchonly(ta, tb) or _matchonly(tb, ta)
2598 2598 if tm:
2599 2599 return w, ('func', ('symbol', 'only'), tm, order)
2600 2600
2601 2601 if tb is not None and tb[0] == 'not':
2602 2602 return wa, ('difference', ta, tb[1], order)
2603 2603
2604 2604 if wa > wb:
2605 2605 return w, (op, tb, ta, order)
2606 2606 return w, (op, ta, tb, order)
2607 2607 elif op == 'or':
2608 2608 # fast path for machine-generated expression, that is likely to have
2609 2609 # lots of trivial revisions: 'a + b + c()' to '_list(a b) + c()'
2610 2610 order = x[2]
2611 2611 ws, ts, ss = [], [], []
2612 2612 def flushss():
2613 2613 if not ss:
2614 2614 return
2615 2615 if len(ss) == 1:
2616 2616 w, t = ss[0]
2617 2617 else:
2618 2618 s = '\0'.join(t[1] for w, t in ss)
2619 2619 y = ('func', ('symbol', '_list'), ('string', s), order)
2620 2620 w, t = _optimize(y, False)
2621 2621 ws.append(w)
2622 2622 ts.append(t)
2623 2623 del ss[:]
2624 2624 for y in getlist(x[1]):
2625 2625 w, t = _optimize(y, False)
2626 2626 if t is not None and (t[0] == 'string' or t[0] == 'symbol'):
2627 2627 ss.append((w, t))
2628 2628 continue
2629 2629 flushss()
2630 2630 ws.append(w)
2631 2631 ts.append(t)
2632 2632 flushss()
2633 2633 if len(ts) == 1:
2634 2634 return ws[0], ts[0] # 'or' operation is fully optimized out
2635 2635 # we can't reorder trees by weight because it would change the order.
2636 2636 # ("sort(a + b)" == "sort(b + a)", but "a + b" != "b + a")
2637 2637 # ts = tuple(t for w, t in sorted(zip(ws, ts), key=lambda wt: wt[0]))
2638 2638 return max(ws), (op, ('list',) + tuple(ts), order)
2639 2639 elif op == 'not':
2640 2640 # Optimize not public() to _notpublic() because we have a fast version
2641 2641 if x[1][:3] == ('func', ('symbol', 'public'), None):
2642 2642 order = x[1][3]
2643 2643 newsym = ('func', ('symbol', '_notpublic'), None, order)
2644 2644 o = _optimize(newsym, not small)
2645 2645 return o[0], o[1]
2646 2646 else:
2647 2647 o = _optimize(x[1], not small)
2648 2648 order = x[2]
2649 2649 return o[0], (op, o[1], order)
2650 2650 elif op == 'rangeall':
2651 2651 return smallbonus, x
2652 2652 elif op in ('rangepre', 'rangepost', 'parentpost'):
2653 2653 o = _optimize(x[1], small)
2654 2654 order = x[2]
2655 2655 return o[0], (op, o[1], order)
2656 2656 elif op in ('dagrange', 'range', 'parent', 'ancestor'):
2657 2657 wa, ta = _optimize(x[1], small)
2658 2658 wb, tb = _optimize(x[2], small)
2659 2659 order = x[3]
2660 2660 return wa + wb, (op, ta, tb, order)
2661 2661 elif op == 'list':
2662 2662 ws, ts = zip(*(_optimize(y, small) for y in x[1:]))
2663 2663 return sum(ws), (op,) + ts
2664 2664 elif op == 'keyvalue':
2665 2665 w, t = _optimize(x[2], small)
2666 2666 return w, (op, x[1], t)
2667 2667 elif op == 'func':
2668 2668 f = getsymbol(x[1])
2669 2669 wa, ta = _optimize(x[2], small)
2670 2670 if f in ('author', 'branch', 'closed', 'date', 'desc', 'file', 'grep',
2671 2671 'keyword', 'outgoing', 'user', 'destination'):
2672 2672 w = 10 # slow
2673 2673 elif f in ('modifies', 'adds', 'removes'):
2674 2674 w = 30 # slower
2675 2675 elif f == "contains":
2676 2676 w = 100 # very slow
2677 2677 elif f == "ancestor":
2678 2678 w = 1 * smallbonus
2679 2679 elif f in ('reverse', 'limit', 'first', 'wdir', '_intlist'):
2680 2680 w = 0
2681 2681 elif f == "sort":
2682 2682 w = 10 # assume most sorts look at changelog
2683 2683 else:
2684 2684 w = 1
2685 2685 order = x[3]
2686 2686 return w + wa, (op, x[1], ta, order)
2687 2687 raise ValueError('invalid operator %r' % op)
2688 2688
2689 2689 def optimize(tree):
2690 2690 """Optimize evaluatable tree
2691 2691
2692 2692 All pseudo operations should be transformed beforehand.
2693 2693 """
2694 2694 _weight, newtree = _optimize(tree, small=True)
2695 2695 return newtree
2696 2696
2697 2697 # the set of valid characters for the initial letter of symbols in
2698 2698 # alias declarations and definitions
2699 2699 _aliassyminitletters = _syminitletters | set(pycompat.sysstr('$'))
2700 2700
2701 2701 def _parsewith(spec, lookup=None, syminitletters=None):
2702 2702 """Generate a parse tree of given spec with given tokenizing options
2703 2703
2704 2704 >>> _parsewith('foo($1)', syminitletters=_aliassyminitletters)
2705 2705 ('func', ('symbol', 'foo'), ('symbol', '$1'))
2706 2706 >>> _parsewith('$1')
2707 2707 Traceback (most recent call last):
2708 2708 ...
2709 2709 ParseError: ("syntax error in revset '$1'", 0)
2710 2710 >>> _parsewith('foo bar')
2711 2711 Traceback (most recent call last):
2712 2712 ...
2713 2713 ParseError: ('invalid token', 4)
2714 2714 """
2715 2715 p = parser.parser(elements)
2716 2716 tree, pos = p.parse(tokenize(spec, lookup=lookup,
2717 2717 syminitletters=syminitletters))
2718 2718 if pos != len(spec):
2719 2719 raise error.ParseError(_('invalid token'), pos)
2720 2720 return _fixops(parser.simplifyinfixops(tree, ('list', 'or')))
2721 2721
2722 2722 class _aliasrules(parser.basealiasrules):
2723 2723 """Parsing and expansion rule set of revset aliases"""
2724 2724 _section = _('revset alias')
2725 2725
2726 2726 @staticmethod
2727 2727 def _parse(spec):
2728 2728 """Parse alias declaration/definition ``spec``
2729 2729
2730 2730 This allows symbol names to use also ``$`` as an initial letter
2731 2731 (for backward compatibility), and callers of this function should
2732 2732 examine whether ``$`` is used also for unexpected symbols or not.
2733 2733 """
2734 2734 return _parsewith(spec, syminitletters=_aliassyminitletters)
2735 2735
2736 2736 @staticmethod
2737 2737 def _trygetfunc(tree):
2738 2738 if tree[0] == 'func' and tree[1][0] == 'symbol':
2739 2739 return tree[1][1], getlist(tree[2])
2740 2740
2741 2741 def expandaliases(ui, tree):
2742 2742 aliases = _aliasrules.buildmap(ui.configitems('revsetalias'))
2743 2743 tree = _aliasrules.expand(aliases, tree)
2744 2744 # warn about problematic (but not referred) aliases
2745 2745 for name, alias in sorted(aliases.iteritems()):
2746 2746 if alias.error and not alias.warned:
2747 2747 ui.warn(_('warning: %s\n') % (alias.error))
2748 2748 alias.warned = True
2749 2749 return tree
2750 2750
2751 2751 def foldconcat(tree):
2752 2752 """Fold elements to be concatenated by `##`
2753 2753 """
2754 2754 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2755 2755 return tree
2756 2756 if tree[0] == '_concat':
2757 2757 pending = [tree]
2758 2758 l = []
2759 2759 while pending:
2760 2760 e = pending.pop()
2761 2761 if e[0] == '_concat':
2762 2762 pending.extend(reversed(e[1:]))
2763 2763 elif e[0] in ('string', 'symbol'):
2764 2764 l.append(e[1])
2765 2765 else:
2766 2766 msg = _("\"##\" can't concatenate \"%s\" element") % (e[0])
2767 2767 raise error.ParseError(msg)
2768 2768 return ('string', ''.join(l))
2769 2769 else:
2770 2770 return tuple(foldconcat(t) for t in tree)
2771 2771
2772 2772 def parse(spec, lookup=None):
2773 2773 return _parsewith(spec, lookup=lookup)
2774 2774
2775 2775 def posttreebuilthook(tree, repo):
2776 2776 # hook for extensions to execute code on the optimized tree
2777 2777 pass
2778 2778
2779 2779 def match(ui, spec, repo=None, order=defineorder):
2780 2780 """Create a matcher for a single revision spec
2781 2781
2782 2782 If order=followorder, a matcher takes the ordering specified by the input
2783 2783 set.
2784 2784 """
2785 2785 return matchany(ui, [spec], repo=repo, order=order)
2786 2786
2787 2787 def matchany(ui, specs, repo=None, order=defineorder):
2788 2788 """Create a matcher that will include any revisions matching one of the
2789 2789 given specs
2790 2790
2791 2791 If order=followorder, a matcher takes the ordering specified by the input
2792 2792 set.
2793 2793 """
2794 2794 if not specs:
2795 2795 def mfunc(repo, subset=None):
2796 2796 return baseset()
2797 2797 return mfunc
2798 2798 if not all(specs):
2799 2799 raise error.ParseError(_("empty query"))
2800 2800 lookup = None
2801 2801 if repo:
2802 2802 lookup = repo.__contains__
2803 2803 if len(specs) == 1:
2804 2804 tree = parse(specs[0], lookup)
2805 2805 else:
2806 2806 tree = ('or', ('list',) + tuple(parse(s, lookup) for s in specs))
2807 2807
2808 2808 if ui:
2809 2809 tree = expandaliases(ui, tree)
2810 2810 tree = foldconcat(tree)
2811 2811 tree = analyze(tree, order)
2812 2812 tree = optimize(tree)
2813 2813 posttreebuilthook(tree, repo)
2814 2814 return makematcher(tree)
2815 2815
2816 2816 def makematcher(tree):
2817 2817 """Create a matcher from an evaluatable tree"""
2818 2818 def mfunc(repo, subset=None):
2819 2819 if subset is None:
2820 2820 subset = fullreposet(repo)
2821 2821 if util.safehasattr(subset, 'isascending'):
2822 2822 result = getset(repo, subset, tree)
2823 2823 else:
2824 2824 result = getset(repo, baseset(subset), tree)
2825 2825 return result
2826 2826 return mfunc
2827 2827
2828 2828 def formatspec(expr, *args):
2829 2829 '''
2830 2830 This is a convenience function for using revsets internally, and
2831 2831 escapes arguments appropriately. Aliases are intentionally ignored
2832 2832 so that intended expression behavior isn't accidentally subverted.
2833 2833
2834 2834 Supported arguments:
2835 2835
2836 2836 %r = revset expression, parenthesized
2837 2837 %d = int(arg), no quoting
2838 2838 %s = string(arg), escaped and single-quoted
2839 2839 %b = arg.branch(), escaped and single-quoted
2840 2840 %n = hex(arg), single-quoted
2841 2841 %% = a literal '%'
2842 2842
2843 2843 Prefixing the type with 'l' specifies a parenthesized list of that type.
2844 2844
2845 2845 >>> formatspec('%r:: and %lr', '10 or 11', ("this()", "that()"))
2846 2846 '(10 or 11):: and ((this()) or (that()))'
2847 2847 >>> formatspec('%d:: and not %d::', 10, 20)
2848 2848 '10:: and not 20::'
2849 2849 >>> formatspec('%ld or %ld', [], [1])
2850 2850 "_list('') or 1"
2851 2851 >>> formatspec('keyword(%s)', 'foo\\xe9')
2852 2852 "keyword('foo\\\\xe9')"
2853 2853 >>> b = lambda: 'default'
2854 2854 >>> b.branch = b
2855 2855 >>> formatspec('branch(%b)', b)
2856 2856 "branch('default')"
2857 2857 >>> formatspec('root(%ls)', ['a', 'b', 'c', 'd'])
2858 2858 "root(_list('a\\x00b\\x00c\\x00d'))"
2859 2859 '''
2860 2860
2861 2861 def quote(s):
2862 2862 return repr(str(s))
2863 2863
2864 2864 def argtype(c, arg):
2865 2865 if c == 'd':
2866 2866 return str(int(arg))
2867 2867 elif c == 's':
2868 2868 return quote(arg)
2869 2869 elif c == 'r':
2870 2870 parse(arg) # make sure syntax errors are confined
2871 2871 return '(%s)' % arg
2872 2872 elif c == 'n':
2873 2873 return quote(node.hex(arg))
2874 2874 elif c == 'b':
2875 2875 return quote(arg.branch())
2876 2876
2877 2877 def listexp(s, t):
2878 2878 l = len(s)
2879 2879 if l == 0:
2880 2880 return "_list('')"
2881 2881 elif l == 1:
2882 2882 return argtype(t, s[0])
2883 2883 elif t == 'd':
2884 2884 return "_intlist('%s')" % "\0".join(str(int(a)) for a in s)
2885 2885 elif t == 's':
2886 2886 return "_list('%s')" % "\0".join(s)
2887 2887 elif t == 'n':
2888 2888 return "_hexlist('%s')" % "\0".join(node.hex(a) for a in s)
2889 2889 elif t == 'b':
2890 2890 return "_list('%s')" % "\0".join(a.branch() for a in s)
2891 2891
2892 2892 m = l // 2
2893 2893 return '(%s or %s)' % (listexp(s[:m], t), listexp(s[m:], t))
2894 2894
2895 2895 ret = ''
2896 2896 pos = 0
2897 2897 arg = 0
2898 2898 while pos < len(expr):
2899 2899 c = expr[pos]
2900 2900 if c == '%':
2901 2901 pos += 1
2902 2902 d = expr[pos]
2903 2903 if d == '%':
2904 2904 ret += d
2905 2905 elif d in 'dsnbr':
2906 2906 ret += argtype(d, args[arg])
2907 2907 arg += 1
2908 2908 elif d == 'l':
2909 2909 # a list of some type
2910 2910 pos += 1
2911 2911 d = expr[pos]
2912 2912 ret += listexp(list(args[arg]), d)
2913 2913 arg += 1
2914 2914 else:
2915 2915 raise error.Abort(_('unexpected revspec format character %s')
2916 2916 % d)
2917 2917 else:
2918 2918 ret += c
2919 2919 pos += 1
2920 2920
2921 2921 return ret
2922 2922
2923 2923 def prettyformat(tree):
2924 2924 return parser.prettyformat(tree, ('string', 'symbol'))
2925 2925
2926 2926 def depth(tree):
2927 2927 if isinstance(tree, tuple):
2928 2928 return max(map(depth, tree)) + 1
2929 2929 else:
2930 2930 return 0
2931 2931
2932 2932 def funcsused(tree):
2933 2933 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2934 2934 return set()
2935 2935 else:
2936 2936 funcs = set()
2937 2937 for s in tree[1:]:
2938 2938 funcs |= funcsused(s)
2939 2939 if tree[0] == 'func':
2940 2940 funcs.add(tree[1][1])
2941 2941 return funcs
2942 2942
2943 2943 def _formatsetrepr(r):
2944 2944 """Format an optional printable representation of a set
2945 2945
2946 2946 ======== =================================
2947 2947 type(r) example
2948 2948 ======== =================================
2949 2949 tuple ('<not %r>', other)
2950 2950 str '<branch closed>'
2951 2951 callable lambda: '<branch %r>' % sorted(b)
2952 2952 object other
2953 2953 ======== =================================
2954 2954 """
2955 2955 if r is None:
2956 2956 return ''
2957 2957 elif isinstance(r, tuple):
2958 2958 return r[0] % r[1:]
2959 2959 elif isinstance(r, str):
2960 2960 return r
2961 2961 elif callable(r):
2962 2962 return r()
2963 2963 else:
2964 2964 return repr(r)
2965 2965
2966 2966 class abstractsmartset(object):
2967 2967
2968 2968 def __nonzero__(self):
2969 2969 """True if the smartset is not empty"""
2970 2970 raise NotImplementedError()
2971 2971
2972 2972 def __contains__(self, rev):
2973 2973 """provide fast membership testing"""
2974 2974 raise NotImplementedError()
2975 2975
2976 2976 def __iter__(self):
2977 2977 """iterate the set in the order it is supposed to be iterated"""
2978 2978 raise NotImplementedError()
2979 2979
2980 2980 # Attributes containing a function to perform a fast iteration in a given
2981 2981 # direction. A smartset can have none, one, or both defined.
2982 2982 #
2983 2983 # Default value is None instead of a function returning None to avoid
2984 2984 # initializing an iterator just for testing if a fast method exists.
2985 2985 fastasc = None
2986 2986 fastdesc = None
2987 2987
2988 2988 def isascending(self):
2989 2989 """True if the set will iterate in ascending order"""
2990 2990 raise NotImplementedError()
2991 2991
2992 2992 def isdescending(self):
2993 2993 """True if the set will iterate in descending order"""
2994 2994 raise NotImplementedError()
2995 2995
2996 2996 def istopo(self):
2997 2997 """True if the set will iterate in topographical order"""
2998 2998 raise NotImplementedError()
2999 2999
3000 3000 def min(self):
3001 3001 """return the minimum element in the set"""
3002 3002 if self.fastasc is None:
3003 3003 v = min(self)
3004 3004 else:
3005 3005 for v in self.fastasc():
3006 3006 break
3007 3007 else:
3008 3008 raise ValueError('arg is an empty sequence')
3009 3009 self.min = lambda: v
3010 3010 return v
3011 3011
3012 3012 def max(self):
3013 3013 """return the maximum element in the set"""
3014 3014 if self.fastdesc is None:
3015 3015 return max(self)
3016 3016 else:
3017 3017 for v in self.fastdesc():
3018 3018 break
3019 3019 else:
3020 3020 raise ValueError('arg is an empty sequence')
3021 3021 self.max = lambda: v
3022 3022 return v
3023 3023
3024 3024 def first(self):
3025 3025 """return the first element in the set (user iteration perspective)
3026 3026
3027 3027 Return None if the set is empty"""
3028 3028 raise NotImplementedError()
3029 3029
3030 3030 def last(self):
3031 3031 """return the last element in the set (user iteration perspective)
3032 3032
3033 3033 Return None if the set is empty"""
3034 3034 raise NotImplementedError()
3035 3035
3036 3036 def __len__(self):
3037 3037 """return the length of the smartsets
3038 3038
3039 3039 This can be expensive on smartset that could be lazy otherwise."""
3040 3040 raise NotImplementedError()
3041 3041
3042 3042 def reverse(self):
3043 3043 """reverse the expected iteration order"""
3044 3044 raise NotImplementedError()
3045 3045
3046 3046 def sort(self, reverse=True):
3047 3047 """get the set to iterate in an ascending or descending order"""
3048 3048 raise NotImplementedError()
3049 3049
3050 3050 def __and__(self, other):
3051 3051 """Returns a new object with the intersection of the two collections.
3052 3052
3053 3053 This is part of the mandatory API for smartset."""
3054 3054 if isinstance(other, fullreposet):
3055 3055 return self
3056 3056 return self.filter(other.__contains__, condrepr=other, cache=False)
3057 3057
3058 3058 def __add__(self, other):
3059 3059 """Returns a new object with the union of the two collections.
3060 3060
3061 3061 This is part of the mandatory API for smartset."""
3062 3062 return addset(self, other)
3063 3063
3064 3064 def __sub__(self, other):
3065 3065 """Returns a new object with the substraction of the two collections.
3066 3066
3067 3067 This is part of the mandatory API for smartset."""
3068 3068 c = other.__contains__
3069 3069 return self.filter(lambda r: not c(r), condrepr=('<not %r>', other),
3070 3070 cache=False)
3071 3071
3072 3072 def filter(self, condition, condrepr=None, cache=True):
3073 3073 """Returns this smartset filtered by condition as a new smartset.
3074 3074
3075 3075 `condition` is a callable which takes a revision number and returns a
3076 3076 boolean. Optional `condrepr` provides a printable representation of
3077 3077 the given `condition`.
3078 3078
3079 3079 This is part of the mandatory API for smartset."""
3080 3080 # builtin cannot be cached. but do not needs to
3081 3081 if cache and util.safehasattr(condition, 'func_code'):
3082 3082 condition = util.cachefunc(condition)
3083 3083 return filteredset(self, condition, condrepr)
3084 3084
3085 3085 class baseset(abstractsmartset):
3086 3086 """Basic data structure that represents a revset and contains the basic
3087 3087 operation that it should be able to perform.
3088 3088
3089 3089 Every method in this class should be implemented by any smartset class.
3090 3090 """
3091 3091 def __init__(self, data=(), datarepr=None, istopo=False):
3092 3092 """
3093 3093 datarepr: a tuple of (format, obj, ...), a function or an object that
3094 3094 provides a printable representation of the given data.
3095 3095 """
3096 3096 self._ascending = None
3097 3097 self._istopo = istopo
3098 3098 if not isinstance(data, list):
3099 3099 if isinstance(data, set):
3100 3100 self._set = data
3101 3101 # set has no order we pick one for stability purpose
3102 3102 self._ascending = True
3103 3103 data = list(data)
3104 3104 self._list = data
3105 3105 self._datarepr = datarepr
3106 3106
3107 3107 @util.propertycache
3108 3108 def _set(self):
3109 3109 return set(self._list)
3110 3110
3111 3111 @util.propertycache
3112 3112 def _asclist(self):
3113 3113 asclist = self._list[:]
3114 3114 asclist.sort()
3115 3115 return asclist
3116 3116
3117 3117 def __iter__(self):
3118 3118 if self._ascending is None:
3119 3119 return iter(self._list)
3120 3120 elif self._ascending:
3121 3121 return iter(self._asclist)
3122 3122 else:
3123 3123 return reversed(self._asclist)
3124 3124
3125 3125 def fastasc(self):
3126 3126 return iter(self._asclist)
3127 3127
3128 3128 def fastdesc(self):
3129 3129 return reversed(self._asclist)
3130 3130
3131 3131 @util.propertycache
3132 3132 def __contains__(self):
3133 3133 return self._set.__contains__
3134 3134
3135 3135 def __nonzero__(self):
3136 3136 return bool(self._list)
3137 3137
3138 3138 def sort(self, reverse=False):
3139 3139 self._ascending = not bool(reverse)
3140 3140 self._istopo = False
3141 3141
3142 3142 def reverse(self):
3143 3143 if self._ascending is None:
3144 3144 self._list.reverse()
3145 3145 else:
3146 3146 self._ascending = not self._ascending
3147 3147 self._istopo = False
3148 3148
3149 3149 def __len__(self):
3150 3150 return len(self._list)
3151 3151
3152 3152 def isascending(self):
3153 3153 """Returns True if the collection is ascending order, False if not.
3154 3154
3155 3155 This is part of the mandatory API for smartset."""
3156 3156 if len(self) <= 1:
3157 3157 return True
3158 3158 return self._ascending is not None and self._ascending
3159 3159
3160 3160 def isdescending(self):
3161 3161 """Returns True if the collection is descending order, False if not.
3162 3162
3163 3163 This is part of the mandatory API for smartset."""
3164 3164 if len(self) <= 1:
3165 3165 return True
3166 3166 return self._ascending is not None and not self._ascending
3167 3167
3168 3168 def istopo(self):
3169 3169 """Is the collection is in topographical order or not.
3170 3170
3171 3171 This is part of the mandatory API for smartset."""
3172 3172 if len(self) <= 1:
3173 3173 return True
3174 3174 return self._istopo
3175 3175
3176 3176 def first(self):
3177 3177 if self:
3178 3178 if self._ascending is None:
3179 3179 return self._list[0]
3180 3180 elif self._ascending:
3181 3181 return self._asclist[0]
3182 3182 else:
3183 3183 return self._asclist[-1]
3184 3184 return None
3185 3185
3186 3186 def last(self):
3187 3187 if self:
3188 3188 if self._ascending is None:
3189 3189 return self._list[-1]
3190 3190 elif self._ascending:
3191 3191 return self._asclist[-1]
3192 3192 else:
3193 3193 return self._asclist[0]
3194 3194 return None
3195 3195
3196 3196 def __repr__(self):
3197 3197 d = {None: '', False: '-', True: '+'}[self._ascending]
3198 3198 s = _formatsetrepr(self._datarepr)
3199 3199 if not s:
3200 3200 l = self._list
3201 3201 # if _list has been built from a set, it might have a different
3202 3202 # order from one python implementation to another.
3203 3203 # We fallback to the sorted version for a stable output.
3204 3204 if self._ascending is not None:
3205 3205 l = self._asclist
3206 3206 s = repr(l)
3207 3207 return '<%s%s %s>' % (type(self).__name__, d, s)
3208 3208
3209 3209 class filteredset(abstractsmartset):
3210 3210 """Duck type for baseset class which iterates lazily over the revisions in
3211 3211 the subset and contains a function which tests for membership in the
3212 3212 revset
3213 3213 """
3214 3214 def __init__(self, subset, condition=lambda x: True, condrepr=None):
3215 3215 """
3216 3216 condition: a function that decide whether a revision in the subset
3217 3217 belongs to the revset or not.
3218 3218 condrepr: a tuple of (format, obj, ...), a function or an object that
3219 3219 provides a printable representation of the given condition.
3220 3220 """
3221 3221 self._subset = subset
3222 3222 self._condition = condition
3223 3223 self._condrepr = condrepr
3224 3224
3225 3225 def __contains__(self, x):
3226 3226 return x in self._subset and self._condition(x)
3227 3227
3228 3228 def __iter__(self):
3229 3229 return self._iterfilter(self._subset)
3230 3230
3231 3231 def _iterfilter(self, it):
3232 3232 cond = self._condition
3233 3233 for x in it:
3234 3234 if cond(x):
3235 3235 yield x
3236 3236
3237 3237 @property
3238 3238 def fastasc(self):
3239 3239 it = self._subset.fastasc
3240 3240 if it is None:
3241 3241 return None
3242 3242 return lambda: self._iterfilter(it())
3243 3243
3244 3244 @property
3245 3245 def fastdesc(self):
3246 3246 it = self._subset.fastdesc
3247 3247 if it is None:
3248 3248 return None
3249 3249 return lambda: self._iterfilter(it())
3250 3250
3251 3251 def __nonzero__(self):
3252 3252 fast = None
3253 3253 candidates = [self.fastasc if self.isascending() else None,
3254 3254 self.fastdesc if self.isdescending() else None,
3255 3255 self.fastasc,
3256 3256 self.fastdesc]
3257 3257 for candidate in candidates:
3258 3258 if candidate is not None:
3259 3259 fast = candidate
3260 3260 break
3261 3261
3262 3262 if fast is not None:
3263 3263 it = fast()
3264 3264 else:
3265 3265 it = self
3266 3266
3267 3267 for r in it:
3268 3268 return True
3269 3269 return False
3270 3270
3271 3271 def __len__(self):
3272 3272 # Basic implementation to be changed in future patches.
3273 3273 # until this gets improved, we use generator expression
3274 3274 # here, since list comprehensions are free to call __len__ again
3275 3275 # causing infinite recursion
3276 3276 l = baseset(r for r in self)
3277 3277 return len(l)
3278 3278
3279 3279 def sort(self, reverse=False):
3280 3280 self._subset.sort(reverse=reverse)
3281 3281
3282 3282 def reverse(self):
3283 3283 self._subset.reverse()
3284 3284
3285 3285 def isascending(self):
3286 3286 return self._subset.isascending()
3287 3287
3288 3288 def isdescending(self):
3289 3289 return self._subset.isdescending()
3290 3290
3291 3291 def istopo(self):
3292 3292 return self._subset.istopo()
3293 3293
3294 3294 def first(self):
3295 3295 for x in self:
3296 3296 return x
3297 3297 return None
3298 3298
3299 3299 def last(self):
3300 3300 it = None
3301 3301 if self.isascending():
3302 3302 it = self.fastdesc
3303 3303 elif self.isdescending():
3304 3304 it = self.fastasc
3305 3305 if it is not None:
3306 3306 for x in it():
3307 3307 return x
3308 3308 return None #empty case
3309 3309 else:
3310 3310 x = None
3311 3311 for x in self:
3312 3312 pass
3313 3313 return x
3314 3314
3315 3315 def __repr__(self):
3316 3316 xs = [repr(self._subset)]
3317 3317 s = _formatsetrepr(self._condrepr)
3318 3318 if s:
3319 3319 xs.append(s)
3320 3320 return '<%s %s>' % (type(self).__name__, ', '.join(xs))
3321 3321
3322 3322 def _iterordered(ascending, iter1, iter2):
3323 3323 """produce an ordered iteration from two iterators with the same order
3324 3324
3325 3325 The ascending is used to indicated the iteration direction.
3326 3326 """
3327 3327 choice = max
3328 3328 if ascending:
3329 3329 choice = min
3330 3330
3331 3331 val1 = None
3332 3332 val2 = None
3333 3333 try:
3334 3334 # Consume both iterators in an ordered way until one is empty
3335 3335 while True:
3336 3336 if val1 is None:
3337 3337 val1 = next(iter1)
3338 3338 if val2 is None:
3339 3339 val2 = next(iter2)
3340 3340 n = choice(val1, val2)
3341 3341 yield n
3342 3342 if val1 == n:
3343 3343 val1 = None
3344 3344 if val2 == n:
3345 3345 val2 = None
3346 3346 except StopIteration:
3347 3347 # Flush any remaining values and consume the other one
3348 3348 it = iter2
3349 3349 if val1 is not None:
3350 3350 yield val1
3351 3351 it = iter1
3352 3352 elif val2 is not None:
3353 3353 # might have been equality and both are empty
3354 3354 yield val2
3355 3355 for val in it:
3356 3356 yield val
3357 3357
3358 3358 class addset(abstractsmartset):
3359 3359 """Represent the addition of two sets
3360 3360
3361 3361 Wrapper structure for lazily adding two structures without losing much
3362 3362 performance on the __contains__ method
3363 3363
3364 3364 If the ascending attribute is set, that means the two structures are
3365 3365 ordered in either an ascending or descending way. Therefore, we can add
3366 3366 them maintaining the order by iterating over both at the same time
3367 3367
3368 3368 >>> xs = baseset([0, 3, 2])
3369 3369 >>> ys = baseset([5, 2, 4])
3370 3370
3371 3371 >>> rs = addset(xs, ys)
3372 3372 >>> bool(rs), 0 in rs, 1 in rs, 5 in rs, rs.first(), rs.last()
3373 3373 (True, True, False, True, 0, 4)
3374 3374 >>> rs = addset(xs, baseset([]))
3375 3375 >>> bool(rs), 0 in rs, 1 in rs, rs.first(), rs.last()
3376 3376 (True, True, False, 0, 2)
3377 3377 >>> rs = addset(baseset([]), baseset([]))
3378 3378 >>> bool(rs), 0 in rs, rs.first(), rs.last()
3379 3379 (False, False, None, None)
3380 3380
3381 3381 iterate unsorted:
3382 3382 >>> rs = addset(xs, ys)
3383 3383 >>> # (use generator because pypy could call len())
3384 3384 >>> list(x for x in rs) # without _genlist
3385 3385 [0, 3, 2, 5, 4]
3386 3386 >>> assert not rs._genlist
3387 3387 >>> len(rs)
3388 3388 5
3389 3389 >>> [x for x in rs] # with _genlist
3390 3390 [0, 3, 2, 5, 4]
3391 3391 >>> assert rs._genlist
3392 3392
3393 3393 iterate ascending:
3394 3394 >>> rs = addset(xs, ys, ascending=True)
3395 3395 >>> # (use generator because pypy could call len())
3396 3396 >>> list(x for x in rs), list(x for x in rs.fastasc()) # without _asclist
3397 3397 ([0, 2, 3, 4, 5], [0, 2, 3, 4, 5])
3398 3398 >>> assert not rs._asclist
3399 3399 >>> len(rs)
3400 3400 5
3401 3401 >>> [x for x in rs], [x for x in rs.fastasc()]
3402 3402 ([0, 2, 3, 4, 5], [0, 2, 3, 4, 5])
3403 3403 >>> assert rs._asclist
3404 3404
3405 3405 iterate descending:
3406 3406 >>> rs = addset(xs, ys, ascending=False)
3407 3407 >>> # (use generator because pypy could call len())
3408 3408 >>> list(x for x in rs), list(x for x in rs.fastdesc()) # without _asclist
3409 3409 ([5, 4, 3, 2, 0], [5, 4, 3, 2, 0])
3410 3410 >>> assert not rs._asclist
3411 3411 >>> len(rs)
3412 3412 5
3413 3413 >>> [x for x in rs], [x for x in rs.fastdesc()]
3414 3414 ([5, 4, 3, 2, 0], [5, 4, 3, 2, 0])
3415 3415 >>> assert rs._asclist
3416 3416
3417 3417 iterate ascending without fastasc:
3418 3418 >>> rs = addset(xs, generatorset(ys), ascending=True)
3419 3419 >>> assert rs.fastasc is None
3420 3420 >>> [x for x in rs]
3421 3421 [0, 2, 3, 4, 5]
3422 3422
3423 3423 iterate descending without fastdesc:
3424 3424 >>> rs = addset(generatorset(xs), ys, ascending=False)
3425 3425 >>> assert rs.fastdesc is None
3426 3426 >>> [x for x in rs]
3427 3427 [5, 4, 3, 2, 0]
3428 3428 """
3429 3429 def __init__(self, revs1, revs2, ascending=None):
3430 3430 self._r1 = revs1
3431 3431 self._r2 = revs2
3432 3432 self._iter = None
3433 3433 self._ascending = ascending
3434 3434 self._genlist = None
3435 3435 self._asclist = None
3436 3436
3437 3437 def __len__(self):
3438 3438 return len(self._list)
3439 3439
3440 3440 def __nonzero__(self):
3441 3441 return bool(self._r1) or bool(self._r2)
3442 3442
3443 3443 @util.propertycache
3444 3444 def _list(self):
3445 3445 if not self._genlist:
3446 3446 self._genlist = baseset(iter(self))
3447 3447 return self._genlist
3448 3448
3449 3449 def __iter__(self):
3450 3450 """Iterate over both collections without repeating elements
3451 3451
3452 3452 If the ascending attribute is not set, iterate over the first one and
3453 3453 then over the second one checking for membership on the first one so we
3454 3454 dont yield any duplicates.
3455 3455
3456 3456 If the ascending attribute is set, iterate over both collections at the
3457 3457 same time, yielding only one value at a time in the given order.
3458 3458 """
3459 3459 if self._ascending is None:
3460 3460 if self._genlist:
3461 3461 return iter(self._genlist)
3462 3462 def arbitraryordergen():
3463 3463 for r in self._r1:
3464 3464 yield r
3465 3465 inr1 = self._r1.__contains__
3466 3466 for r in self._r2:
3467 3467 if not inr1(r):
3468 3468 yield r
3469 3469 return arbitraryordergen()
3470 3470 # try to use our own fast iterator if it exists
3471 3471 self._trysetasclist()
3472 3472 if self._ascending:
3473 3473 attr = 'fastasc'
3474 3474 else:
3475 3475 attr = 'fastdesc'
3476 3476 it = getattr(self, attr)
3477 3477 if it is not None:
3478 3478 return it()
3479 3479 # maybe half of the component supports fast
3480 3480 # get iterator for _r1
3481 3481 iter1 = getattr(self._r1, attr)
3482 3482 if iter1 is None:
3483 3483 # let's avoid side effect (not sure it matters)
3484 3484 iter1 = iter(sorted(self._r1, reverse=not self._ascending))
3485 3485 else:
3486 3486 iter1 = iter1()
3487 3487 # get iterator for _r2
3488 3488 iter2 = getattr(self._r2, attr)
3489 3489 if iter2 is None:
3490 3490 # let's avoid side effect (not sure it matters)
3491 3491 iter2 = iter(sorted(self._r2, reverse=not self._ascending))
3492 3492 else:
3493 3493 iter2 = iter2()
3494 3494 return _iterordered(self._ascending, iter1, iter2)
3495 3495
3496 3496 def _trysetasclist(self):
3497 3497 """populate the _asclist attribute if possible and necessary"""
3498 3498 if self._genlist is not None and self._asclist is None:
3499 3499 self._asclist = sorted(self._genlist)
3500 3500
3501 3501 @property
3502 3502 def fastasc(self):
3503 3503 self._trysetasclist()
3504 3504 if self._asclist is not None:
3505 3505 return self._asclist.__iter__
3506 3506 iter1 = self._r1.fastasc
3507 3507 iter2 = self._r2.fastasc
3508 3508 if None in (iter1, iter2):
3509 3509 return None
3510 3510 return lambda: _iterordered(True, iter1(), iter2())
3511 3511
3512 3512 @property
3513 3513 def fastdesc(self):
3514 3514 self._trysetasclist()
3515 3515 if self._asclist is not None:
3516 3516 return self._asclist.__reversed__
3517 3517 iter1 = self._r1.fastdesc
3518 3518 iter2 = self._r2.fastdesc
3519 3519 if None in (iter1, iter2):
3520 3520 return None
3521 3521 return lambda: _iterordered(False, iter1(), iter2())
3522 3522
3523 3523 def __contains__(self, x):
3524 3524 return x in self._r1 or x in self._r2
3525 3525
3526 3526 def sort(self, reverse=False):
3527 3527 """Sort the added set
3528 3528
3529 3529 For this we use the cached list with all the generated values and if we
3530 3530 know they are ascending or descending we can sort them in a smart way.
3531 3531 """
3532 3532 self._ascending = not reverse
3533 3533
3534 3534 def isascending(self):
3535 3535 return self._ascending is not None and self._ascending
3536 3536
3537 3537 def isdescending(self):
3538 3538 return self._ascending is not None and not self._ascending
3539 3539
3540 3540 def istopo(self):
3541 3541 # not worth the trouble asserting if the two sets combined are still
3542 3542 # in topographical order. Use the sort() predicate to explicitly sort
3543 3543 # again instead.
3544 3544 return False
3545 3545
3546 3546 def reverse(self):
3547 3547 if self._ascending is None:
3548 3548 self._list.reverse()
3549 3549 else:
3550 3550 self._ascending = not self._ascending
3551 3551
3552 3552 def first(self):
3553 3553 for x in self:
3554 3554 return x
3555 3555 return None
3556 3556
3557 3557 def last(self):
3558 3558 self.reverse()
3559 3559 val = self.first()
3560 3560 self.reverse()
3561 3561 return val
3562 3562
3563 3563 def __repr__(self):
3564 3564 d = {None: '', False: '-', True: '+'}[self._ascending]
3565 3565 return '<%s%s %r, %r>' % (type(self).__name__, d, self._r1, self._r2)
3566 3566
3567 3567 class generatorset(abstractsmartset):
3568 3568 """Wrap a generator for lazy iteration
3569 3569
3570 3570 Wrapper structure for generators that provides lazy membership and can
3571 3571 be iterated more than once.
3572 3572 When asked for membership it generates values until either it finds the
3573 3573 requested one or has gone through all the elements in the generator
3574 3574 """
3575 3575 def __init__(self, gen, iterasc=None):
3576 3576 """
3577 3577 gen: a generator producing the values for the generatorset.
3578 3578 """
3579 3579 self._gen = gen
3580 3580 self._asclist = None
3581 3581 self._cache = {}
3582 3582 self._genlist = []
3583 3583 self._finished = False
3584 3584 self._ascending = True
3585 3585 if iterasc is not None:
3586 3586 if iterasc:
3587 3587 self.fastasc = self._iterator
3588 3588 self.__contains__ = self._asccontains
3589 3589 else:
3590 3590 self.fastdesc = self._iterator
3591 3591 self.__contains__ = self._desccontains
3592 3592
3593 3593 def __nonzero__(self):
3594 3594 # Do not use 'for r in self' because it will enforce the iteration
3595 3595 # order (default ascending), possibly unrolling a whole descending
3596 3596 # iterator.
3597 3597 if self._genlist:
3598 3598 return True
3599 3599 for r in self._consumegen():
3600 3600 return True
3601 3601 return False
3602 3602
3603 3603 def __contains__(self, x):
3604 3604 if x in self._cache:
3605 3605 return self._cache[x]
3606 3606
3607 3607 # Use new values only, as existing values would be cached.
3608 3608 for l in self._consumegen():
3609 3609 if l == x:
3610 3610 return True
3611 3611
3612 3612 self._cache[x] = False
3613 3613 return False
3614 3614
3615 3615 def _asccontains(self, x):
3616 3616 """version of contains optimised for ascending generator"""
3617 3617 if x in self._cache:
3618 3618 return self._cache[x]
3619 3619
3620 3620 # Use new values only, as existing values would be cached.
3621 3621 for l in self._consumegen():
3622 3622 if l == x:
3623 3623 return True
3624 3624 if l > x:
3625 3625 break
3626 3626
3627 3627 self._cache[x] = False
3628 3628 return False
3629 3629
3630 3630 def _desccontains(self, x):
3631 3631 """version of contains optimised for descending generator"""
3632 3632 if x in self._cache:
3633 3633 return self._cache[x]
3634 3634
3635 3635 # Use new values only, as existing values would be cached.
3636 3636 for l in self._consumegen():
3637 3637 if l == x:
3638 3638 return True
3639 3639 if l < x:
3640 3640 break
3641 3641
3642 3642 self._cache[x] = False
3643 3643 return False
3644 3644
3645 3645 def __iter__(self):
3646 3646 if self._ascending:
3647 3647 it = self.fastasc
3648 3648 else:
3649 3649 it = self.fastdesc
3650 3650 if it is not None:
3651 3651 return it()
3652 3652 # we need to consume the iterator
3653 3653 for x in self._consumegen():
3654 3654 pass
3655 3655 # recall the same code
3656 3656 return iter(self)
3657 3657
3658 3658 def _iterator(self):
3659 3659 if self._finished:
3660 3660 return iter(self._genlist)
3661 3661
3662 3662 # We have to use this complex iteration strategy to allow multiple
3663 3663 # iterations at the same time. We need to be able to catch revision
3664 3664 # removed from _consumegen and added to genlist in another instance.
3665 3665 #
3666 3666 # Getting rid of it would provide an about 15% speed up on this
3667 3667 # iteration.
3668 3668 genlist = self._genlist
3669 3669 nextrev = self._consumegen().next
3670 3670 _len = len # cache global lookup
3671 3671 def gen():
3672 3672 i = 0
3673 3673 while True:
3674 3674 if i < _len(genlist):
3675 3675 yield genlist[i]
3676 3676 else:
3677 3677 yield nextrev()
3678 3678 i += 1
3679 3679 return gen()
3680 3680
3681 3681 def _consumegen(self):
3682 3682 cache = self._cache
3683 3683 genlist = self._genlist.append
3684 3684 for item in self._gen:
3685 3685 cache[item] = True
3686 3686 genlist(item)
3687 3687 yield item
3688 3688 if not self._finished:
3689 3689 self._finished = True
3690 3690 asc = self._genlist[:]
3691 3691 asc.sort()
3692 3692 self._asclist = asc
3693 3693 self.fastasc = asc.__iter__
3694 3694 self.fastdesc = asc.__reversed__
3695 3695
3696 3696 def __len__(self):
3697 3697 for x in self._consumegen():
3698 3698 pass
3699 3699 return len(self._genlist)
3700 3700
3701 3701 def sort(self, reverse=False):
3702 3702 self._ascending = not reverse
3703 3703
3704 3704 def reverse(self):
3705 3705 self._ascending = not self._ascending
3706 3706
3707 3707 def isascending(self):
3708 3708 return self._ascending
3709 3709
3710 3710 def isdescending(self):
3711 3711 return not self._ascending
3712 3712
3713 3713 def istopo(self):
3714 3714 # not worth the trouble asserting if the two sets combined are still
3715 3715 # in topographical order. Use the sort() predicate to explicitly sort
3716 3716 # again instead.
3717 3717 return False
3718 3718
3719 3719 def first(self):
3720 3720 if self._ascending:
3721 3721 it = self.fastasc
3722 3722 else:
3723 3723 it = self.fastdesc
3724 3724 if it is None:
3725 3725 # we need to consume all and try again
3726 3726 for x in self._consumegen():
3727 3727 pass
3728 3728 return self.first()
3729 3729 return next(it(), None)
3730 3730
3731 3731 def last(self):
3732 3732 if self._ascending:
3733 3733 it = self.fastdesc
3734 3734 else:
3735 3735 it = self.fastasc
3736 3736 if it is None:
3737 3737 # we need to consume all and try again
3738 3738 for x in self._consumegen():
3739 3739 pass
3740 3740 return self.first()
3741 3741 return next(it(), None)
3742 3742
3743 3743 def __repr__(self):
3744 3744 d = {False: '-', True: '+'}[self._ascending]
3745 3745 return '<%s%s>' % (type(self).__name__, d)
3746 3746
3747 3747 class spanset(abstractsmartset):
3748 3748 """Duck type for baseset class which represents a range of revisions and
3749 3749 can work lazily and without having all the range in memory
3750 3750
3751 3751 Note that spanset(x, y) behave almost like xrange(x, y) except for two
3752 3752 notable points:
3753 3753 - when x < y it will be automatically descending,
3754 3754 - revision filtered with this repoview will be skipped.
3755 3755
3756 3756 """
3757 3757 def __init__(self, repo, start=0, end=None):
3758 3758 """
3759 3759 start: first revision included the set
3760 3760 (default to 0)
3761 3761 end: first revision excluded (last+1)
3762 3762 (default to len(repo)
3763 3763
3764 3764 Spanset will be descending if `end` < `start`.
3765 3765 """
3766 3766 if end is None:
3767 3767 end = len(repo)
3768 3768 self._ascending = start <= end
3769 3769 if not self._ascending:
3770 3770 start, end = end + 1, start +1
3771 3771 self._start = start
3772 3772 self._end = end
3773 3773 self._hiddenrevs = repo.changelog.filteredrevs
3774 3774
3775 3775 def sort(self, reverse=False):
3776 3776 self._ascending = not reverse
3777 3777
3778 3778 def reverse(self):
3779 3779 self._ascending = not self._ascending
3780 3780
3781 3781 def istopo(self):
3782 3782 # not worth the trouble asserting if the two sets combined are still
3783 3783 # in topographical order. Use the sort() predicate to explicitly sort
3784 3784 # again instead.
3785 3785 return False
3786 3786
3787 3787 def _iterfilter(self, iterrange):
3788 3788 s = self._hiddenrevs
3789 3789 for r in iterrange:
3790 3790 if r not in s:
3791 3791 yield r
3792 3792
3793 3793 def __iter__(self):
3794 3794 if self._ascending:
3795 3795 return self.fastasc()
3796 3796 else:
3797 3797 return self.fastdesc()
3798 3798
3799 3799 def fastasc(self):
3800 3800 iterrange = xrange(self._start, self._end)
3801 3801 if self._hiddenrevs:
3802 3802 return self._iterfilter(iterrange)
3803 3803 return iter(iterrange)
3804 3804
3805 3805 def fastdesc(self):
3806 3806 iterrange = xrange(self._end - 1, self._start - 1, -1)
3807 3807 if self._hiddenrevs:
3808 3808 return self._iterfilter(iterrange)
3809 3809 return iter(iterrange)
3810 3810
3811 3811 def __contains__(self, rev):
3812 3812 hidden = self._hiddenrevs
3813 3813 return ((self._start <= rev < self._end)
3814 3814 and not (hidden and rev in hidden))
3815 3815
3816 3816 def __nonzero__(self):
3817 3817 for r in self:
3818 3818 return True
3819 3819 return False
3820 3820
3821 3821 def __len__(self):
3822 3822 if not self._hiddenrevs:
3823 3823 return abs(self._end - self._start)
3824 3824 else:
3825 3825 count = 0
3826 3826 start = self._start
3827 3827 end = self._end
3828 3828 for rev in self._hiddenrevs:
3829 3829 if (end < rev <= start) or (start <= rev < end):
3830 3830 count += 1
3831 3831 return abs(self._end - self._start) - count
3832 3832
3833 3833 def isascending(self):
3834 3834 return self._ascending
3835 3835
3836 3836 def isdescending(self):
3837 3837 return not self._ascending
3838 3838
3839 3839 def first(self):
3840 3840 if self._ascending:
3841 3841 it = self.fastasc
3842 3842 else:
3843 3843 it = self.fastdesc
3844 3844 for x in it():
3845 3845 return x
3846 3846 return None
3847 3847
3848 3848 def last(self):
3849 3849 if self._ascending:
3850 3850 it = self.fastdesc
3851 3851 else:
3852 3852 it = self.fastasc
3853 3853 for x in it():
3854 3854 return x
3855 3855 return None
3856 3856
3857 3857 def __repr__(self):
3858 3858 d = {False: '-', True: '+'}[self._ascending]
3859 3859 return '<%s%s %d:%d>' % (type(self).__name__, d,
3860 3860 self._start, self._end - 1)
3861 3861
3862 3862 class fullreposet(spanset):
3863 3863 """a set containing all revisions in the repo
3864 3864
3865 3865 This class exists to host special optimization and magic to handle virtual
3866 3866 revisions such as "null".
3867 3867 """
3868 3868
3869 3869 def __init__(self, repo):
3870 3870 super(fullreposet, self).__init__(repo)
3871 3871
3872 3872 def __and__(self, other):
3873 3873 """As self contains the whole repo, all of the other set should also be
3874 3874 in self. Therefore `self & other = other`.
3875 3875
3876 3876 This boldly assumes the other contains valid revs only.
3877 3877 """
3878 3878 # other not a smartset, make is so
3879 3879 if not util.safehasattr(other, 'isascending'):
3880 3880 # filter out hidden revision
3881 3881 # (this boldly assumes all smartset are pure)
3882 3882 #
3883 3883 # `other` was used with "&", let's assume this is a set like
3884 3884 # object.
3885 3885 other = baseset(other - self._hiddenrevs)
3886 3886
3887 3887 other.sort(reverse=self.isdescending())
3888 3888 return other
3889 3889
3890 3890 def prettyformatset(revs):
3891 3891 lines = []
3892 3892 rs = repr(revs)
3893 3893 p = 0
3894 3894 while p < len(rs):
3895 3895 q = rs.find('<', p + 1)
3896 3896 if q < 0:
3897 3897 q = len(rs)
3898 3898 l = rs.count('<', 0, p) - rs.count('>', 0, p)
3899 3899 assert l >= 0
3900 3900 lines.append((l, rs[p:q].rstrip()))
3901 3901 p = q
3902 3902 return '\n'.join(' ' * l + s for l, s in lines)
3903 3903
3904 3904 def loadpredicate(ui, extname, registrarobj):
3905 3905 """Load revset predicates from specified registrarobj
3906 3906 """
3907 3907 for name, func in registrarobj._table.iteritems():
3908 3908 symbols[name] = func
3909 3909 if func._safe:
3910 3910 safesymbols.add(name)
3911 3911
3912 3912 # load built-in predicates explicitly to setup safesymbols
3913 3913 loadpredicate(None, None, predicate)
3914 3914
3915 3915 # tell hggettext to extract docstrings from these functions:
3916 3916 i18nfunctions = symbols.values()
General Comments 0
You need to be logged in to leave comments. Login now