##// END OF EJS Templates
revset: remove existence check from min() and max()...
Durham Goode -
r26305:ade5c488 default
parent child Browse files
Show More
@@ -1,3838 +1,3846 b''
1 1 # revset.py - revision set queries for mercurial
2 2 #
3 3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import heapq
11 11 import re
12 12
13 13 from .i18n import _
14 14 from . import (
15 15 encoding,
16 16 error,
17 17 hbisect,
18 18 match as matchmod,
19 19 node,
20 20 obsolete as obsmod,
21 21 parser,
22 22 pathutil,
23 23 phases,
24 24 repoview,
25 25 util,
26 26 )
27 27
28 28 def _revancestors(repo, revs, followfirst):
29 29 """Like revlog.ancestors(), but supports followfirst."""
30 30 if followfirst:
31 31 cut = 1
32 32 else:
33 33 cut = None
34 34 cl = repo.changelog
35 35
36 36 def iterate():
37 37 revs.sort(reverse=True)
38 38 irevs = iter(revs)
39 39 h = []
40 40
41 41 inputrev = next(irevs, None)
42 42 if inputrev is not None:
43 43 heapq.heappush(h, -inputrev)
44 44
45 45 seen = set()
46 46 while h:
47 47 current = -heapq.heappop(h)
48 48 if current == inputrev:
49 49 inputrev = next(irevs, None)
50 50 if inputrev is not None:
51 51 heapq.heappush(h, -inputrev)
52 52 if current not in seen:
53 53 seen.add(current)
54 54 yield current
55 55 for parent in cl.parentrevs(current)[:cut]:
56 56 if parent != node.nullrev:
57 57 heapq.heappush(h, -parent)
58 58
59 59 return generatorset(iterate(), iterasc=False)
60 60
61 61 def _revdescendants(repo, revs, followfirst):
62 62 """Like revlog.descendants() but supports followfirst."""
63 63 if followfirst:
64 64 cut = 1
65 65 else:
66 66 cut = None
67 67
68 68 def iterate():
69 69 cl = repo.changelog
70 70 # XXX this should be 'parentset.min()' assuming 'parentset' is a
71 71 # smartset (and if it is not, it should.)
72 72 first = min(revs)
73 73 nullrev = node.nullrev
74 74 if first == nullrev:
75 75 # Are there nodes with a null first parent and a non-null
76 76 # second one? Maybe. Do we care? Probably not.
77 77 for i in cl:
78 78 yield i
79 79 else:
80 80 seen = set(revs)
81 81 for i in cl.revs(first + 1):
82 82 for x in cl.parentrevs(i)[:cut]:
83 83 if x != nullrev and x in seen:
84 84 seen.add(i)
85 85 yield i
86 86 break
87 87
88 88 return generatorset(iterate(), iterasc=True)
89 89
90 90 def _reachablerootspure(repo, minroot, roots, heads, includepath):
91 91 """return (heads(::<roots> and ::<heads>))
92 92
93 93 If includepath is True, return (<roots>::<heads>)."""
94 94 if not roots:
95 95 return []
96 96 parentrevs = repo.changelog.parentrevs
97 97 roots = set(roots)
98 98 visit = list(heads)
99 99 reachable = set()
100 100 seen = {}
101 101 # prefetch all the things! (because python is slow)
102 102 reached = reachable.add
103 103 dovisit = visit.append
104 104 nextvisit = visit.pop
105 105 # open-code the post-order traversal due to the tiny size of
106 106 # sys.getrecursionlimit()
107 107 while visit:
108 108 rev = nextvisit()
109 109 if rev in roots:
110 110 reached(rev)
111 111 if not includepath:
112 112 continue
113 113 parents = parentrevs(rev)
114 114 seen[rev] = parents
115 115 for parent in parents:
116 116 if parent >= minroot and parent not in seen:
117 117 dovisit(parent)
118 118 if not reachable:
119 119 return baseset()
120 120 if not includepath:
121 121 return reachable
122 122 for rev in sorted(seen):
123 123 for parent in seen[rev]:
124 124 if parent in reachable:
125 125 reached(rev)
126 126 return reachable
127 127
128 128 def reachableroots(repo, roots, heads, includepath=False):
129 129 """return (heads(::<roots> and ::<heads>))
130 130
131 131 If includepath is True, return (<roots>::<heads>)."""
132 132 if not roots:
133 133 return baseset()
134 134 minroot = roots.min()
135 135 roots = list(roots)
136 136 heads = list(heads)
137 137 try:
138 138 revs = repo.changelog.reachableroots(minroot, heads, roots, includepath)
139 139 except AttributeError:
140 140 revs = _reachablerootspure(repo, minroot, roots, heads, includepath)
141 141 revs = baseset(revs)
142 142 revs.sort()
143 143 return revs
144 144
145 145 elements = {
146 146 # token-type: binding-strength, primary, prefix, infix, suffix
147 147 "(": (21, None, ("group", 1, ")"), ("func", 1, ")"), None),
148 148 "##": (20, None, None, ("_concat", 20), None),
149 149 "~": (18, None, None, ("ancestor", 18), None),
150 150 "^": (18, None, None, ("parent", 18), ("parentpost", 18)),
151 151 "-": (5, None, ("negate", 19), ("minus", 5), None),
152 152 "::": (17, None, ("dagrangepre", 17), ("dagrange", 17),
153 153 ("dagrangepost", 17)),
154 154 "..": (17, None, ("dagrangepre", 17), ("dagrange", 17),
155 155 ("dagrangepost", 17)),
156 156 ":": (15, "rangeall", ("rangepre", 15), ("range", 15), ("rangepost", 15)),
157 157 "not": (10, None, ("not", 10), None, None),
158 158 "!": (10, None, ("not", 10), None, None),
159 159 "and": (5, None, None, ("and", 5), None),
160 160 "&": (5, None, None, ("and", 5), None),
161 161 "%": (5, None, None, ("only", 5), ("onlypost", 5)),
162 162 "or": (4, None, None, ("or", 4), None),
163 163 "|": (4, None, None, ("or", 4), None),
164 164 "+": (4, None, None, ("or", 4), None),
165 165 "=": (3, None, None, ("keyvalue", 3), None),
166 166 ",": (2, None, None, ("list", 2), None),
167 167 ")": (0, None, None, None, None),
168 168 "symbol": (0, "symbol", None, None, None),
169 169 "string": (0, "string", None, None, None),
170 170 "end": (0, None, None, None, None),
171 171 }
172 172
173 173 keywords = set(['and', 'or', 'not'])
174 174
175 175 # default set of valid characters for the initial letter of symbols
176 176 _syminitletters = set(c for c in [chr(i) for i in xrange(256)]
177 177 if c.isalnum() or c in '._@' or ord(c) > 127)
178 178
179 179 # default set of valid characters for non-initial letters of symbols
180 180 _symletters = set(c for c in [chr(i) for i in xrange(256)]
181 181 if c.isalnum() or c in '-._/@' or ord(c) > 127)
182 182
183 183 def tokenize(program, lookup=None, syminitletters=None, symletters=None):
184 184 '''
185 185 Parse a revset statement into a stream of tokens
186 186
187 187 ``syminitletters`` is the set of valid characters for the initial
188 188 letter of symbols.
189 189
190 190 By default, character ``c`` is recognized as valid for initial
191 191 letter of symbols, if ``c.isalnum() or c in '._@' or ord(c) > 127``.
192 192
193 193 ``symletters`` is the set of valid characters for non-initial
194 194 letters of symbols.
195 195
196 196 By default, character ``c`` is recognized as valid for non-initial
197 197 letters of symbols, if ``c.isalnum() or c in '-._/@' or ord(c) > 127``.
198 198
199 199 Check that @ is a valid unquoted token character (issue3686):
200 200 >>> list(tokenize("@::"))
201 201 [('symbol', '@', 0), ('::', None, 1), ('end', None, 3)]
202 202
203 203 '''
204 204 if syminitletters is None:
205 205 syminitletters = _syminitletters
206 206 if symletters is None:
207 207 symletters = _symletters
208 208
209 209 if program and lookup:
210 210 # attempt to parse old-style ranges first to deal with
211 211 # things like old-tag which contain query metacharacters
212 212 parts = program.split(':', 1)
213 213 if all(lookup(sym) for sym in parts if sym):
214 214 if parts[0]:
215 215 yield ('symbol', parts[0], 0)
216 216 if len(parts) > 1:
217 217 s = len(parts[0])
218 218 yield (':', None, s)
219 219 if parts[1]:
220 220 yield ('symbol', parts[1], s + 1)
221 221 yield ('end', None, len(program))
222 222 return
223 223
224 224 pos, l = 0, len(program)
225 225 while pos < l:
226 226 c = program[pos]
227 227 if c.isspace(): # skip inter-token whitespace
228 228 pass
229 229 elif c == ':' and program[pos:pos + 2] == '::': # look ahead carefully
230 230 yield ('::', None, pos)
231 231 pos += 1 # skip ahead
232 232 elif c == '.' and program[pos:pos + 2] == '..': # look ahead carefully
233 233 yield ('..', None, pos)
234 234 pos += 1 # skip ahead
235 235 elif c == '#' and program[pos:pos + 2] == '##': # look ahead carefully
236 236 yield ('##', None, pos)
237 237 pos += 1 # skip ahead
238 238 elif c in "():=,-|&+!~^%": # handle simple operators
239 239 yield (c, None, pos)
240 240 elif (c in '"\'' or c == 'r' and
241 241 program[pos:pos + 2] in ("r'", 'r"')): # handle quoted strings
242 242 if c == 'r':
243 243 pos += 1
244 244 c = program[pos]
245 245 decode = lambda x: x
246 246 else:
247 247 decode = parser.unescapestr
248 248 pos += 1
249 249 s = pos
250 250 while pos < l: # find closing quote
251 251 d = program[pos]
252 252 if d == '\\': # skip over escaped characters
253 253 pos += 2
254 254 continue
255 255 if d == c:
256 256 yield ('string', decode(program[s:pos]), s)
257 257 break
258 258 pos += 1
259 259 else:
260 260 raise error.ParseError(_("unterminated string"), s)
261 261 # gather up a symbol/keyword
262 262 elif c in syminitletters:
263 263 s = pos
264 264 pos += 1
265 265 while pos < l: # find end of symbol
266 266 d = program[pos]
267 267 if d not in symletters:
268 268 break
269 269 if d == '.' and program[pos - 1] == '.': # special case for ..
270 270 pos -= 1
271 271 break
272 272 pos += 1
273 273 sym = program[s:pos]
274 274 if sym in keywords: # operator keywords
275 275 yield (sym, None, s)
276 276 elif '-' in sym:
277 277 # some jerk gave us foo-bar-baz, try to check if it's a symbol
278 278 if lookup and lookup(sym):
279 279 # looks like a real symbol
280 280 yield ('symbol', sym, s)
281 281 else:
282 282 # looks like an expression
283 283 parts = sym.split('-')
284 284 for p in parts[:-1]:
285 285 if p: # possible consecutive -
286 286 yield ('symbol', p, s)
287 287 s += len(p)
288 288 yield ('-', None, pos)
289 289 s += 1
290 290 if parts[-1]: # possible trailing -
291 291 yield ('symbol', parts[-1], s)
292 292 else:
293 293 yield ('symbol', sym, s)
294 294 pos -= 1
295 295 else:
296 296 raise error.ParseError(_("syntax error in revset '%s'") %
297 297 program, pos)
298 298 pos += 1
299 299 yield ('end', None, pos)
300 300
301 301 def parseerrordetail(inst):
302 302 """Compose error message from specified ParseError object
303 303 """
304 304 if len(inst.args) > 1:
305 305 return _('at %s: %s') % (inst.args[1], inst.args[0])
306 306 else:
307 307 return inst.args[0]
308 308
309 309 # helpers
310 310
311 311 def getstring(x, err):
312 312 if x and (x[0] == 'string' or x[0] == 'symbol'):
313 313 return x[1]
314 314 raise error.ParseError(err)
315 315
316 316 def getlist(x):
317 317 if not x:
318 318 return []
319 319 if x[0] == 'list':
320 320 return getlist(x[1]) + [x[2]]
321 321 return [x]
322 322
323 323 def getargs(x, min, max, err):
324 324 l = getlist(x)
325 325 if len(l) < min or (max >= 0 and len(l) > max):
326 326 raise error.ParseError(err)
327 327 return l
328 328
329 329 def getargsdict(x, funcname, keys):
330 330 return parser.buildargsdict(getlist(x), funcname, keys.split(),
331 331 keyvaluenode='keyvalue', keynode='symbol')
332 332
333 333 def isvalidsymbol(tree):
334 334 """Examine whether specified ``tree`` is valid ``symbol`` or not
335 335 """
336 336 return tree[0] == 'symbol' and len(tree) > 1
337 337
338 338 def getsymbol(tree):
339 339 """Get symbol name from valid ``symbol`` in ``tree``
340 340
341 341 This assumes that ``tree`` is already examined by ``isvalidsymbol``.
342 342 """
343 343 return tree[1]
344 344
345 345 def isvalidfunc(tree):
346 346 """Examine whether specified ``tree`` is valid ``func`` or not
347 347 """
348 348 return tree[0] == 'func' and len(tree) > 1 and isvalidsymbol(tree[1])
349 349
350 350 def getfuncname(tree):
351 351 """Get function name from valid ``func`` in ``tree``
352 352
353 353 This assumes that ``tree`` is already examined by ``isvalidfunc``.
354 354 """
355 355 return getsymbol(tree[1])
356 356
357 357 def getfuncargs(tree):
358 358 """Get list of function arguments from valid ``func`` in ``tree``
359 359
360 360 This assumes that ``tree`` is already examined by ``isvalidfunc``.
361 361 """
362 362 if len(tree) > 2:
363 363 return getlist(tree[2])
364 364 else:
365 365 return []
366 366
367 367 def getset(repo, subset, x):
368 368 if not x:
369 369 raise error.ParseError(_("missing argument"))
370 370 s = methods[x[0]](repo, subset, *x[1:])
371 371 if util.safehasattr(s, 'isascending'):
372 372 return s
373 373 if (repo.ui.configbool('devel', 'all-warnings')
374 374 or repo.ui.configbool('devel', 'old-revset')):
375 375 # else case should not happen, because all non-func are internal,
376 376 # ignoring for now.
377 377 if x[0] == 'func' and x[1][0] == 'symbol' and x[1][1] in symbols:
378 378 repo.ui.develwarn('revset "%s" use list instead of smartset, '
379 379 '(upgrade your code)' % x[1][1])
380 380 return baseset(s)
381 381
382 382 def _getrevsource(repo, r):
383 383 extra = repo[r].extra()
384 384 for label in ('source', 'transplant_source', 'rebase_source'):
385 385 if label in extra:
386 386 try:
387 387 return repo[extra[label]].rev()
388 388 except error.RepoLookupError:
389 389 pass
390 390 return None
391 391
392 392 # operator methods
393 393
394 394 def stringset(repo, subset, x):
395 395 x = repo[x].rev()
396 396 if (x in subset
397 397 or x == node.nullrev and isinstance(subset, fullreposet)):
398 398 return baseset([x])
399 399 return baseset()
400 400
401 401 def rangeset(repo, subset, x, y):
402 402 m = getset(repo, fullreposet(repo), x)
403 403 n = getset(repo, fullreposet(repo), y)
404 404
405 405 if not m or not n:
406 406 return baseset()
407 407 m, n = m.first(), n.last()
408 408
409 409 if m == n:
410 410 r = baseset([m])
411 411 elif n == node.wdirrev:
412 412 r = spanset(repo, m, len(repo)) + baseset([n])
413 413 elif m == node.wdirrev:
414 414 r = baseset([m]) + spanset(repo, len(repo) - 1, n - 1)
415 415 elif m < n:
416 416 r = spanset(repo, m, n + 1)
417 417 else:
418 418 r = spanset(repo, m, n - 1)
419 419 # XXX We should combine with subset first: 'subset & baseset(...)'. This is
420 420 # necessary to ensure we preserve the order in subset.
421 421 #
422 422 # This has performance implication, carrying the sorting over when possible
423 423 # would be more efficient.
424 424 return r & subset
425 425
426 426 def dagrange(repo, subset, x, y):
427 427 r = fullreposet(repo)
428 428 xs = reachableroots(repo, getset(repo, r, x), getset(repo, r, y),
429 429 includepath=True)
430 430 # XXX We should combine with subset first: 'subset & baseset(...)'. This is
431 431 # necessary to ensure we preserve the order in subset.
432 432 return xs & subset
433 433
434 434 def andset(repo, subset, x, y):
435 435 return getset(repo, getset(repo, subset, x), y)
436 436
437 437 def orset(repo, subset, *xs):
438 438 assert xs
439 439 if len(xs) == 1:
440 440 return getset(repo, subset, xs[0])
441 441 p = len(xs) // 2
442 442 a = orset(repo, subset, *xs[:p])
443 443 b = orset(repo, subset, *xs[p:])
444 444 return a + b
445 445
446 446 def notset(repo, subset, x):
447 447 return subset - getset(repo, subset, x)
448 448
449 449 def listset(repo, subset, a, b):
450 450 raise error.ParseError(_("can't use a list in this context"))
451 451
452 452 def keyvaluepair(repo, subset, k, v):
453 453 raise error.ParseError(_("can't use a key-value pair in this context"))
454 454
455 455 def func(repo, subset, a, b):
456 456 if a[0] == 'symbol' and a[1] in symbols:
457 457 return symbols[a[1]](repo, subset, b)
458 458
459 459 keep = lambda fn: getattr(fn, '__doc__', None) is not None
460 460
461 461 syms = [s for (s, fn) in symbols.items() if keep(fn)]
462 462 raise error.UnknownIdentifier(a[1], syms)
463 463
464 464 # functions
465 465
466 466 def _mergedefaultdest(repo, subset, x):
467 467 # ``_mergedefaultdest()``
468 468
469 469 # default destination for merge.
470 470 # # XXX: Currently private because I expect the signature to change.
471 471 # # XXX: - taking rev as arguments,
472 472 # # XXX: - bailing out in case of ambiguity vs returning all data.
473 473 getargs(x, 0, 0, _("_mergedefaultdest takes no arguments"))
474 474 if repo._activebookmark:
475 475 bmheads = repo.bookmarkheads(repo._activebookmark)
476 476 curhead = repo[repo._activebookmark].node()
477 477 if len(bmheads) == 2:
478 478 if curhead == bmheads[0]:
479 479 node = bmheads[1]
480 480 else:
481 481 node = bmheads[0]
482 482 elif len(bmheads) > 2:
483 483 raise util.Abort(_("multiple matching bookmarks to merge - "
484 484 "please merge with an explicit rev or bookmark"),
485 485 hint=_("run 'hg heads' to see all heads"))
486 486 elif len(bmheads) <= 1:
487 487 raise util.Abort(_("no matching bookmark to merge - "
488 488 "please merge with an explicit rev or bookmark"),
489 489 hint=_("run 'hg heads' to see all heads"))
490 490 else:
491 491 branch = repo[None].branch()
492 492 bheads = repo.branchheads(branch)
493 493 nbhs = [bh for bh in bheads if not repo[bh].bookmarks()]
494 494
495 495 if len(nbhs) > 2:
496 496 raise util.Abort(_("branch '%s' has %d heads - "
497 497 "please merge with an explicit rev")
498 498 % (branch, len(bheads)),
499 499 hint=_("run 'hg heads .' to see heads"))
500 500
501 501 parent = repo.dirstate.p1()
502 502 if len(nbhs) <= 1:
503 503 if len(bheads) > 1:
504 504 raise util.Abort(_("heads are bookmarked - "
505 505 "please merge with an explicit rev"),
506 506 hint=_("run 'hg heads' to see all heads"))
507 507 if len(repo.heads()) > 1:
508 508 raise util.Abort(_("branch '%s' has one head - "
509 509 "please merge with an explicit rev")
510 510 % branch,
511 511 hint=_("run 'hg heads' to see all heads"))
512 512 msg, hint = _('nothing to merge'), None
513 513 if parent != repo.lookup(branch):
514 514 hint = _("use 'hg update' instead")
515 515 raise util.Abort(msg, hint=hint)
516 516
517 517 if parent not in bheads:
518 518 raise util.Abort(_('working directory not at a head revision'),
519 519 hint=_("use 'hg update' or merge with an "
520 520 "explicit revision"))
521 521 if parent == nbhs[0]:
522 522 node = nbhs[-1]
523 523 else:
524 524 node = nbhs[0]
525 525 return subset & baseset([repo[node].rev()])
526 526
527 527 def _updatedefaultdest(repo, subset, x):
528 528 # ``_updatedefaultdest()``
529 529
530 530 # default destination for update.
531 531 # # XXX: Currently private because I expect the signature to change.
532 532 # # XXX: - taking rev as arguments,
533 533 # # XXX: - bailing out in case of ambiguity vs returning all data.
534 534 getargs(x, 0, 0, _("_updatedefaultdest takes no arguments"))
535 535 # Here is where we should consider bookmarks, divergent bookmarks,
536 536 # foreground changesets (successors), and tip of current branch;
537 537 # but currently we are only checking the branch tips.
538 538 node = None
539 539 wc = repo[None]
540 540 p1 = wc.p1()
541 541 try:
542 542 node = repo.branchtip(wc.branch())
543 543 except error.RepoLookupError:
544 544 if wc.branch() == 'default': # no default branch!
545 545 node = repo.lookup('tip') # update to tip
546 546 else:
547 547 raise util.Abort(_("branch %s not found") % wc.branch())
548 548
549 549 if p1.obsolete() and not p1.children():
550 550 # allow updating to successors
551 551 successors = obsmod.successorssets(repo, p1.node())
552 552
553 553 # behavior of certain cases is as follows,
554 554 #
555 555 # divergent changesets: update to highest rev, similar to what
556 556 # is currently done when there are more than one head
557 557 # (i.e. 'tip')
558 558 #
559 559 # replaced changesets: same as divergent except we know there
560 560 # is no conflict
561 561 #
562 562 # pruned changeset: no update is done; though, we could
563 563 # consider updating to the first non-obsolete parent,
564 564 # similar to what is current done for 'hg prune'
565 565
566 566 if successors:
567 567 # flatten the list here handles both divergent (len > 1)
568 568 # and the usual case (len = 1)
569 569 successors = [n for sub in successors for n in sub]
570 570
571 571 # get the max revision for the given successors set,
572 572 # i.e. the 'tip' of a set
573 573 node = repo.revs('max(%ln)', successors).first()
574 574 return subset & baseset([repo[node].rev()])
575 575
576 576 def adds(repo, subset, x):
577 577 """``adds(pattern)``
578 578 Changesets that add a file matching pattern.
579 579
580 580 The pattern without explicit kind like ``glob:`` is expected to be
581 581 relative to the current directory and match against a file or a
582 582 directory.
583 583 """
584 584 # i18n: "adds" is a keyword
585 585 pat = getstring(x, _("adds requires a pattern"))
586 586 return checkstatus(repo, subset, pat, 1)
587 587
588 588 def ancestor(repo, subset, x):
589 589 """``ancestor(*changeset)``
590 590 A greatest common ancestor of the changesets.
591 591
592 592 Accepts 0 or more changesets.
593 593 Will return empty list when passed no args.
594 594 Greatest common ancestor of a single changeset is that changeset.
595 595 """
596 596 # i18n: "ancestor" is a keyword
597 597 l = getlist(x)
598 598 rl = fullreposet(repo)
599 599 anc = None
600 600
601 601 # (getset(repo, rl, i) for i in l) generates a list of lists
602 602 for revs in (getset(repo, rl, i) for i in l):
603 603 for r in revs:
604 604 if anc is None:
605 605 anc = repo[r]
606 606 else:
607 607 anc = anc.ancestor(repo[r])
608 608
609 609 if anc is not None and anc.rev() in subset:
610 610 return baseset([anc.rev()])
611 611 return baseset()
612 612
613 613 def _ancestors(repo, subset, x, followfirst=False):
614 614 heads = getset(repo, fullreposet(repo), x)
615 615 if not heads:
616 616 return baseset()
617 617 s = _revancestors(repo, heads, followfirst)
618 618 return subset & s
619 619
620 620 def ancestors(repo, subset, x):
621 621 """``ancestors(set)``
622 622 Changesets that are ancestors of a changeset in set.
623 623 """
624 624 return _ancestors(repo, subset, x)
625 625
626 626 def _firstancestors(repo, subset, x):
627 627 # ``_firstancestors(set)``
628 628 # Like ``ancestors(set)`` but follows only the first parents.
629 629 return _ancestors(repo, subset, x, followfirst=True)
630 630
631 631 def ancestorspec(repo, subset, x, n):
632 632 """``set~n``
633 633 Changesets that are the Nth ancestor (first parents only) of a changeset
634 634 in set.
635 635 """
636 636 try:
637 637 n = int(n[1])
638 638 except (TypeError, ValueError):
639 639 raise error.ParseError(_("~ expects a number"))
640 640 ps = set()
641 641 cl = repo.changelog
642 642 for r in getset(repo, fullreposet(repo), x):
643 643 for i in range(n):
644 644 r = cl.parentrevs(r)[0]
645 645 ps.add(r)
646 646 return subset & ps
647 647
648 648 def author(repo, subset, x):
649 649 """``author(string)``
650 650 Alias for ``user(string)``.
651 651 """
652 652 # i18n: "author" is a keyword
653 653 n = encoding.lower(getstring(x, _("author requires a string")))
654 654 kind, pattern, matcher = _substringmatcher(n)
655 655 return subset.filter(lambda x: matcher(encoding.lower(repo[x].user())))
656 656
657 657 def bisect(repo, subset, x):
658 658 """``bisect(string)``
659 659 Changesets marked in the specified bisect status:
660 660
661 661 - ``good``, ``bad``, ``skip``: csets explicitly marked as good/bad/skip
662 662 - ``goods``, ``bads`` : csets topologically good/bad
663 663 - ``range`` : csets taking part in the bisection
664 664 - ``pruned`` : csets that are goods, bads or skipped
665 665 - ``untested`` : csets whose fate is yet unknown
666 666 - ``ignored`` : csets ignored due to DAG topology
667 667 - ``current`` : the cset currently being bisected
668 668 """
669 669 # i18n: "bisect" is a keyword
670 670 status = getstring(x, _("bisect requires a string")).lower()
671 671 state = set(hbisect.get(repo, status))
672 672 return subset & state
673 673
674 674 # Backward-compatibility
675 675 # - no help entry so that we do not advertise it any more
676 676 def bisected(repo, subset, x):
677 677 return bisect(repo, subset, x)
678 678
679 679 def bookmark(repo, subset, x):
680 680 """``bookmark([name])``
681 681 The named bookmark or all bookmarks.
682 682
683 683 If `name` starts with `re:`, the remainder of the name is treated as
684 684 a regular expression. To match a bookmark that actually starts with `re:`,
685 685 use the prefix `literal:`.
686 686 """
687 687 # i18n: "bookmark" is a keyword
688 688 args = getargs(x, 0, 1, _('bookmark takes one or no arguments'))
689 689 if args:
690 690 bm = getstring(args[0],
691 691 # i18n: "bookmark" is a keyword
692 692 _('the argument to bookmark must be a string'))
693 693 kind, pattern, matcher = _stringmatcher(bm)
694 694 bms = set()
695 695 if kind == 'literal':
696 696 bmrev = repo._bookmarks.get(pattern, None)
697 697 if not bmrev:
698 698 raise error.RepoLookupError(_("bookmark '%s' does not exist")
699 699 % bm)
700 700 bms.add(repo[bmrev].rev())
701 701 else:
702 702 matchrevs = set()
703 703 for name, bmrev in repo._bookmarks.iteritems():
704 704 if matcher(name):
705 705 matchrevs.add(bmrev)
706 706 if not matchrevs:
707 707 raise error.RepoLookupError(_("no bookmarks exist"
708 708 " that match '%s'") % pattern)
709 709 for bmrev in matchrevs:
710 710 bms.add(repo[bmrev].rev())
711 711 else:
712 712 bms = set([repo[r].rev()
713 713 for r in repo._bookmarks.values()])
714 714 bms -= set([node.nullrev])
715 715 return subset & bms
716 716
717 717 def branch(repo, subset, x):
718 718 """``branch(string or set)``
719 719 All changesets belonging to the given branch or the branches of the given
720 720 changesets.
721 721
722 722 If `string` starts with `re:`, the remainder of the name is treated as
723 723 a regular expression. To match a branch that actually starts with `re:`,
724 724 use the prefix `literal:`.
725 725 """
726 726 getbi = repo.revbranchcache().branchinfo
727 727
728 728 try:
729 729 b = getstring(x, '')
730 730 except error.ParseError:
731 731 # not a string, but another revspec, e.g. tip()
732 732 pass
733 733 else:
734 734 kind, pattern, matcher = _stringmatcher(b)
735 735 if kind == 'literal':
736 736 # note: falls through to the revspec case if no branch with
737 737 # this name exists
738 738 if pattern in repo.branchmap():
739 739 return subset.filter(lambda r: matcher(getbi(r)[0]))
740 740 else:
741 741 return subset.filter(lambda r: matcher(getbi(r)[0]))
742 742
743 743 s = getset(repo, fullreposet(repo), x)
744 744 b = set()
745 745 for r in s:
746 746 b.add(getbi(r)[0])
747 747 c = s.__contains__
748 748 return subset.filter(lambda r: c(r) or getbi(r)[0] in b)
749 749
750 750 def bumped(repo, subset, x):
751 751 """``bumped()``
752 752 Mutable changesets marked as successors of public changesets.
753 753
754 754 Only non-public and non-obsolete changesets can be `bumped`.
755 755 """
756 756 # i18n: "bumped" is a keyword
757 757 getargs(x, 0, 0, _("bumped takes no arguments"))
758 758 bumped = obsmod.getrevs(repo, 'bumped')
759 759 return subset & bumped
760 760
761 761 def bundle(repo, subset, x):
762 762 """``bundle()``
763 763 Changesets in the bundle.
764 764
765 765 Bundle must be specified by the -R option."""
766 766
767 767 try:
768 768 bundlerevs = repo.changelog.bundlerevs
769 769 except AttributeError:
770 770 raise util.Abort(_("no bundle provided - specify with -R"))
771 771 return subset & bundlerevs
772 772
773 773 def checkstatus(repo, subset, pat, field):
774 774 hasset = matchmod.patkind(pat) == 'set'
775 775
776 776 mcache = [None]
777 777 def matches(x):
778 778 c = repo[x]
779 779 if not mcache[0] or hasset:
780 780 mcache[0] = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
781 781 m = mcache[0]
782 782 fname = None
783 783 if not m.anypats() and len(m.files()) == 1:
784 784 fname = m.files()[0]
785 785 if fname is not None:
786 786 if fname not in c.files():
787 787 return False
788 788 else:
789 789 for f in c.files():
790 790 if m(f):
791 791 break
792 792 else:
793 793 return False
794 794 files = repo.status(c.p1().node(), c.node())[field]
795 795 if fname is not None:
796 796 if fname in files:
797 797 return True
798 798 else:
799 799 for f in files:
800 800 if m(f):
801 801 return True
802 802
803 803 return subset.filter(matches)
804 804
805 805 def _children(repo, narrow, parentset):
806 806 if not parentset:
807 807 return baseset()
808 808 cs = set()
809 809 pr = repo.changelog.parentrevs
810 810 minrev = parentset.min()
811 811 for r in narrow:
812 812 if r <= minrev:
813 813 continue
814 814 for p in pr(r):
815 815 if p in parentset:
816 816 cs.add(r)
817 817 # XXX using a set to feed the baseset is wrong. Sets are not ordered.
818 818 # This does not break because of other fullreposet misbehavior.
819 819 return baseset(cs)
820 820
821 821 def children(repo, subset, x):
822 822 """``children(set)``
823 823 Child changesets of changesets in set.
824 824 """
825 825 s = getset(repo, fullreposet(repo), x)
826 826 cs = _children(repo, subset, s)
827 827 return subset & cs
828 828
829 829 def closed(repo, subset, x):
830 830 """``closed()``
831 831 Changeset is closed.
832 832 """
833 833 # i18n: "closed" is a keyword
834 834 getargs(x, 0, 0, _("closed takes no arguments"))
835 835 return subset.filter(lambda r: repo[r].closesbranch())
836 836
837 837 def contains(repo, subset, x):
838 838 """``contains(pattern)``
839 839 The revision's manifest contains a file matching pattern (but might not
840 840 modify it). See :hg:`help patterns` for information about file patterns.
841 841
842 842 The pattern without explicit kind like ``glob:`` is expected to be
843 843 relative to the current directory and match against a file exactly
844 844 for efficiency.
845 845 """
846 846 # i18n: "contains" is a keyword
847 847 pat = getstring(x, _("contains requires a pattern"))
848 848
849 849 def matches(x):
850 850 if not matchmod.patkind(pat):
851 851 pats = pathutil.canonpath(repo.root, repo.getcwd(), pat)
852 852 if pats in repo[x]:
853 853 return True
854 854 else:
855 855 c = repo[x]
856 856 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
857 857 for f in c.manifest():
858 858 if m(f):
859 859 return True
860 860 return False
861 861
862 862 return subset.filter(matches)
863 863
864 864 def converted(repo, subset, x):
865 865 """``converted([id])``
866 866 Changesets converted from the given identifier in the old repository if
867 867 present, or all converted changesets if no identifier is specified.
868 868 """
869 869
870 870 # There is exactly no chance of resolving the revision, so do a simple
871 871 # string compare and hope for the best
872 872
873 873 rev = None
874 874 # i18n: "converted" is a keyword
875 875 l = getargs(x, 0, 1, _('converted takes one or no arguments'))
876 876 if l:
877 877 # i18n: "converted" is a keyword
878 878 rev = getstring(l[0], _('converted requires a revision'))
879 879
880 880 def _matchvalue(r):
881 881 source = repo[r].extra().get('convert_revision', None)
882 882 return source is not None and (rev is None or source.startswith(rev))
883 883
884 884 return subset.filter(lambda r: _matchvalue(r))
885 885
886 886 def date(repo, subset, x):
887 887 """``date(interval)``
888 888 Changesets within the interval, see :hg:`help dates`.
889 889 """
890 890 # i18n: "date" is a keyword
891 891 ds = getstring(x, _("date requires a string"))
892 892 dm = util.matchdate(ds)
893 893 return subset.filter(lambda x: dm(repo[x].date()[0]))
894 894
895 895 def desc(repo, subset, x):
896 896 """``desc(string)``
897 897 Search commit message for string. The match is case-insensitive.
898 898 """
899 899 # i18n: "desc" is a keyword
900 900 ds = encoding.lower(getstring(x, _("desc requires a string")))
901 901
902 902 def matches(x):
903 903 c = repo[x]
904 904 return ds in encoding.lower(c.description())
905 905
906 906 return subset.filter(matches)
907 907
908 908 def _descendants(repo, subset, x, followfirst=False):
909 909 roots = getset(repo, fullreposet(repo), x)
910 910 if not roots:
911 911 return baseset()
912 912 s = _revdescendants(repo, roots, followfirst)
913 913
914 914 # Both sets need to be ascending in order to lazily return the union
915 915 # in the correct order.
916 916 base = subset & roots
917 917 desc = subset & s
918 918 result = base + desc
919 919 if subset.isascending():
920 920 result.sort()
921 921 elif subset.isdescending():
922 922 result.sort(reverse=True)
923 923 else:
924 924 result = subset & result
925 925 return result
926 926
927 927 def descendants(repo, subset, x):
928 928 """``descendants(set)``
929 929 Changesets which are descendants of changesets in set.
930 930 """
931 931 return _descendants(repo, subset, x)
932 932
933 933 def _firstdescendants(repo, subset, x):
934 934 # ``_firstdescendants(set)``
935 935 # Like ``descendants(set)`` but follows only the first parents.
936 936 return _descendants(repo, subset, x, followfirst=True)
937 937
938 938 def destination(repo, subset, x):
939 939 """``destination([set])``
940 940 Changesets that were created by a graft, transplant or rebase operation,
941 941 with the given revisions specified as the source. Omitting the optional set
942 942 is the same as passing all().
943 943 """
944 944 if x is not None:
945 945 sources = getset(repo, fullreposet(repo), x)
946 946 else:
947 947 sources = fullreposet(repo)
948 948
949 949 dests = set()
950 950
951 951 # subset contains all of the possible destinations that can be returned, so
952 952 # iterate over them and see if their source(s) were provided in the arg set.
953 953 # Even if the immediate src of r is not in the arg set, src's source (or
954 954 # further back) may be. Scanning back further than the immediate src allows
955 955 # transitive transplants and rebases to yield the same results as transitive
956 956 # grafts.
957 957 for r in subset:
958 958 src = _getrevsource(repo, r)
959 959 lineage = None
960 960
961 961 while src is not None:
962 962 if lineage is None:
963 963 lineage = list()
964 964
965 965 lineage.append(r)
966 966
967 967 # The visited lineage is a match if the current source is in the arg
968 968 # set. Since every candidate dest is visited by way of iterating
969 969 # subset, any dests further back in the lineage will be tested by a
970 970 # different iteration over subset. Likewise, if the src was already
971 971 # selected, the current lineage can be selected without going back
972 972 # further.
973 973 if src in sources or src in dests:
974 974 dests.update(lineage)
975 975 break
976 976
977 977 r = src
978 978 src = _getrevsource(repo, r)
979 979
980 980 return subset.filter(dests.__contains__)
981 981
982 982 def divergent(repo, subset, x):
983 983 """``divergent()``
984 984 Final successors of changesets with an alternative set of final successors.
985 985 """
986 986 # i18n: "divergent" is a keyword
987 987 getargs(x, 0, 0, _("divergent takes no arguments"))
988 988 divergent = obsmod.getrevs(repo, 'divergent')
989 989 return subset & divergent
990 990
991 991 def extinct(repo, subset, x):
992 992 """``extinct()``
993 993 Obsolete changesets with obsolete descendants only.
994 994 """
995 995 # i18n: "extinct" is a keyword
996 996 getargs(x, 0, 0, _("extinct takes no arguments"))
997 997 extincts = obsmod.getrevs(repo, 'extinct')
998 998 return subset & extincts
999 999
1000 1000 def extra(repo, subset, x):
1001 1001 """``extra(label, [value])``
1002 1002 Changesets with the given label in the extra metadata, with the given
1003 1003 optional value.
1004 1004
1005 1005 If `value` starts with `re:`, the remainder of the value is treated as
1006 1006 a regular expression. To match a value that actually starts with `re:`,
1007 1007 use the prefix `literal:`.
1008 1008 """
1009 1009 args = getargsdict(x, 'extra', 'label value')
1010 1010 if 'label' not in args:
1011 1011 # i18n: "extra" is a keyword
1012 1012 raise error.ParseError(_('extra takes at least 1 argument'))
1013 1013 # i18n: "extra" is a keyword
1014 1014 label = getstring(args['label'], _('first argument to extra must be '
1015 1015 'a string'))
1016 1016 value = None
1017 1017
1018 1018 if 'value' in args:
1019 1019 # i18n: "extra" is a keyword
1020 1020 value = getstring(args['value'], _('second argument to extra must be '
1021 1021 'a string'))
1022 1022 kind, value, matcher = _stringmatcher(value)
1023 1023
1024 1024 def _matchvalue(r):
1025 1025 extra = repo[r].extra()
1026 1026 return label in extra and (value is None or matcher(extra[label]))
1027 1027
1028 1028 return subset.filter(lambda r: _matchvalue(r))
1029 1029
1030 1030 def filelog(repo, subset, x):
1031 1031 """``filelog(pattern)``
1032 1032 Changesets connected to the specified filelog.
1033 1033
1034 1034 For performance reasons, visits only revisions mentioned in the file-level
1035 1035 filelog, rather than filtering through all changesets (much faster, but
1036 1036 doesn't include deletes or duplicate changes). For a slower, more accurate
1037 1037 result, use ``file()``.
1038 1038
1039 1039 The pattern without explicit kind like ``glob:`` is expected to be
1040 1040 relative to the current directory and match against a file exactly
1041 1041 for efficiency.
1042 1042
1043 1043 If some linkrev points to revisions filtered by the current repoview, we'll
1044 1044 work around it to return a non-filtered value.
1045 1045 """
1046 1046
1047 1047 # i18n: "filelog" is a keyword
1048 1048 pat = getstring(x, _("filelog requires a pattern"))
1049 1049 s = set()
1050 1050 cl = repo.changelog
1051 1051
1052 1052 if not matchmod.patkind(pat):
1053 1053 f = pathutil.canonpath(repo.root, repo.getcwd(), pat)
1054 1054 files = [f]
1055 1055 else:
1056 1056 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=repo[None])
1057 1057 files = (f for f in repo[None] if m(f))
1058 1058
1059 1059 for f in files:
1060 1060 backrevref = {} # final value for: filerev -> changerev
1061 1061 lowestchild = {} # lowest known filerev child of a filerev
1062 1062 delayed = [] # filerev with filtered linkrev, for post-processing
1063 1063 lowesthead = None # cache for manifest content of all head revisions
1064 1064 fl = repo.file(f)
1065 1065 for fr in list(fl):
1066 1066 rev = fl.linkrev(fr)
1067 1067 if rev not in cl:
1068 1068 # changerev pointed in linkrev is filtered
1069 1069 # record it for post processing.
1070 1070 delayed.append((fr, rev))
1071 1071 continue
1072 1072 for p in fl.parentrevs(fr):
1073 1073 if 0 <= p and p not in lowestchild:
1074 1074 lowestchild[p] = fr
1075 1075 backrevref[fr] = rev
1076 1076 s.add(rev)
1077 1077
1078 1078 # Post-processing of all filerevs we skipped because they were
1079 1079 # filtered. If such filerevs have known and unfiltered children, this
1080 1080 # means they have an unfiltered appearance out there. We'll use linkrev
1081 1081 # adjustment to find one of these appearances. The lowest known child
1082 1082 # will be used as a starting point because it is the best upper-bound we
1083 1083 # have.
1084 1084 #
1085 1085 # This approach will fail when an unfiltered but linkrev-shadowed
1086 1086 # appearance exists in a head changeset without unfiltered filerev
1087 1087 # children anywhere.
1088 1088 while delayed:
1089 1089 # must be a descending iteration. To slowly fill lowest child
1090 1090 # information that is of potential use by the next item.
1091 1091 fr, rev = delayed.pop()
1092 1092 lkr = rev
1093 1093
1094 1094 child = lowestchild.get(fr)
1095 1095
1096 1096 if child is None:
1097 1097 # search for existence of this file revision in a head revision.
1098 1098 # There are three possibilities:
1099 1099 # - the revision exists in a head and we can find an
1100 1100 # introduction from there,
1101 1101 # - the revision does not exist in a head because it has been
1102 1102 # changed since its introduction: we would have found a child
1103 1103 # and be in the other 'else' clause,
1104 1104 # - all versions of the revision are hidden.
1105 1105 if lowesthead is None:
1106 1106 lowesthead = {}
1107 1107 for h in repo.heads():
1108 1108 fnode = repo[h].manifest().get(f)
1109 1109 if fnode is not None:
1110 1110 lowesthead[fl.rev(fnode)] = h
1111 1111 headrev = lowesthead.get(fr)
1112 1112 if headrev is None:
1113 1113 # content is nowhere unfiltered
1114 1114 continue
1115 1115 rev = repo[headrev][f].introrev()
1116 1116 else:
1117 1117 # the lowest known child is a good upper bound
1118 1118 childcrev = backrevref[child]
1119 1119 # XXX this does not guarantee returning the lowest
1120 1120 # introduction of this revision, but this gives a
1121 1121 # result which is a good start and will fit in most
1122 1122 # cases. We probably need to fix the multiple
1123 1123 # introductions case properly (report each
1124 1124 # introduction, even for identical file revisions)
1125 1125 # once and for all at some point anyway.
1126 1126 for p in repo[childcrev][f].parents():
1127 1127 if p.filerev() == fr:
1128 1128 rev = p.rev()
1129 1129 break
1130 1130 if rev == lkr: # no shadowed entry found
1131 1131 # XXX This should never happen unless some manifest points
1132 1132 # to biggish file revisions (like a revision that uses a
1133 1133 # parent that never appears in the manifest ancestors)
1134 1134 continue
1135 1135
1136 1136 # Fill the data for the next iteration.
1137 1137 for p in fl.parentrevs(fr):
1138 1138 if 0 <= p and p not in lowestchild:
1139 1139 lowestchild[p] = fr
1140 1140 backrevref[fr] = rev
1141 1141 s.add(rev)
1142 1142
1143 1143 return subset & s
1144 1144
1145 1145 def first(repo, subset, x):
1146 1146 """``first(set, [n])``
1147 1147 An alias for limit().
1148 1148 """
1149 1149 return limit(repo, subset, x)
1150 1150
1151 1151 def _follow(repo, subset, x, name, followfirst=False):
1152 1152 l = getargs(x, 0, 1, _("%s takes no arguments or a pattern") % name)
1153 1153 c = repo['.']
1154 1154 if l:
1155 1155 x = getstring(l[0], _("%s expected a pattern") % name)
1156 1156 matcher = matchmod.match(repo.root, repo.getcwd(), [x],
1157 1157 ctx=repo[None], default='path')
1158 1158
1159 1159 s = set()
1160 1160 for fname in c:
1161 1161 if matcher(fname):
1162 1162 fctx = c[fname]
1163 1163 s = s.union(set(c.rev() for c in fctx.ancestors(followfirst)))
1164 1164 # include the revision responsible for the most recent version
1165 1165 s.add(fctx.introrev())
1166 1166 else:
1167 1167 s = _revancestors(repo, baseset([c.rev()]), followfirst)
1168 1168
1169 1169 return subset & s
1170 1170
1171 1171 def follow(repo, subset, x):
1172 1172 """``follow([pattern])``
1173 1173 An alias for ``::.`` (ancestors of the working directory's first parent).
1174 1174 If pattern is specified, the histories of files matching given
1175 1175 pattern is followed, including copies.
1176 1176 """
1177 1177 return _follow(repo, subset, x, 'follow')
1178 1178
1179 1179 def _followfirst(repo, subset, x):
1180 1180 # ``followfirst([pattern])``
1181 1181 # Like ``follow([pattern])`` but follows only the first parent of
1182 1182 # every revisions or files revisions.
1183 1183 return _follow(repo, subset, x, '_followfirst', followfirst=True)
1184 1184
1185 1185 def getall(repo, subset, x):
1186 1186 """``all()``
1187 1187 All changesets, the same as ``0:tip``.
1188 1188 """
1189 1189 # i18n: "all" is a keyword
1190 1190 getargs(x, 0, 0, _("all takes no arguments"))
1191 1191 return subset & spanset(repo) # drop "null" if any
1192 1192
1193 1193 def grep(repo, subset, x):
1194 1194 """``grep(regex)``
1195 1195 Like ``keyword(string)`` but accepts a regex. Use ``grep(r'...')``
1196 1196 to ensure special escape characters are handled correctly. Unlike
1197 1197 ``keyword(string)``, the match is case-sensitive.
1198 1198 """
1199 1199 try:
1200 1200 # i18n: "grep" is a keyword
1201 1201 gr = re.compile(getstring(x, _("grep requires a string")))
1202 1202 except re.error as e:
1203 1203 raise error.ParseError(_('invalid match pattern: %s') % e)
1204 1204
1205 1205 def matches(x):
1206 1206 c = repo[x]
1207 1207 for e in c.files() + [c.user(), c.description()]:
1208 1208 if gr.search(e):
1209 1209 return True
1210 1210 return False
1211 1211
1212 1212 return subset.filter(matches)
1213 1213
1214 1214 def _matchfiles(repo, subset, x):
1215 1215 # _matchfiles takes a revset list of prefixed arguments:
1216 1216 #
1217 1217 # [p:foo, i:bar, x:baz]
1218 1218 #
1219 1219 # builds a match object from them and filters subset. Allowed
1220 1220 # prefixes are 'p:' for regular patterns, 'i:' for include
1221 1221 # patterns and 'x:' for exclude patterns. Use 'r:' prefix to pass
1222 1222 # a revision identifier, or the empty string to reference the
1223 1223 # working directory, from which the match object is
1224 1224 # initialized. Use 'd:' to set the default matching mode, default
1225 1225 # to 'glob'. At most one 'r:' and 'd:' argument can be passed.
1226 1226
1227 1227 # i18n: "_matchfiles" is a keyword
1228 1228 l = getargs(x, 1, -1, _("_matchfiles requires at least one argument"))
1229 1229 pats, inc, exc = [], [], []
1230 1230 rev, default = None, None
1231 1231 for arg in l:
1232 1232 # i18n: "_matchfiles" is a keyword
1233 1233 s = getstring(arg, _("_matchfiles requires string arguments"))
1234 1234 prefix, value = s[:2], s[2:]
1235 1235 if prefix == 'p:':
1236 1236 pats.append(value)
1237 1237 elif prefix == 'i:':
1238 1238 inc.append(value)
1239 1239 elif prefix == 'x:':
1240 1240 exc.append(value)
1241 1241 elif prefix == 'r:':
1242 1242 if rev is not None:
1243 1243 # i18n: "_matchfiles" is a keyword
1244 1244 raise error.ParseError(_('_matchfiles expected at most one '
1245 1245 'revision'))
1246 1246 if value != '': # empty means working directory; leave rev as None
1247 1247 rev = value
1248 1248 elif prefix == 'd:':
1249 1249 if default is not None:
1250 1250 # i18n: "_matchfiles" is a keyword
1251 1251 raise error.ParseError(_('_matchfiles expected at most one '
1252 1252 'default mode'))
1253 1253 default = value
1254 1254 else:
1255 1255 # i18n: "_matchfiles" is a keyword
1256 1256 raise error.ParseError(_('invalid _matchfiles prefix: %s') % prefix)
1257 1257 if not default:
1258 1258 default = 'glob'
1259 1259
1260 1260 m = matchmod.match(repo.root, repo.getcwd(), pats, include=inc,
1261 1261 exclude=exc, ctx=repo[rev], default=default)
1262 1262
1263 1263 def matches(x):
1264 1264 for f in repo[x].files():
1265 1265 if m(f):
1266 1266 return True
1267 1267 return False
1268 1268
1269 1269 return subset.filter(matches)
1270 1270
1271 1271 def hasfile(repo, subset, x):
1272 1272 """``file(pattern)``
1273 1273 Changesets affecting files matched by pattern.
1274 1274
1275 1275 For a faster but less accurate result, consider using ``filelog()``
1276 1276 instead.
1277 1277
1278 1278 This predicate uses ``glob:`` as the default kind of pattern.
1279 1279 """
1280 1280 # i18n: "file" is a keyword
1281 1281 pat = getstring(x, _("file requires a pattern"))
1282 1282 return _matchfiles(repo, subset, ('string', 'p:' + pat))
1283 1283
1284 1284 def head(repo, subset, x):
1285 1285 """``head()``
1286 1286 Changeset is a named branch head.
1287 1287 """
1288 1288 # i18n: "head" is a keyword
1289 1289 getargs(x, 0, 0, _("head takes no arguments"))
1290 1290 hs = set()
1291 1291 cl = repo.changelog
1292 1292 for b, ls in repo.branchmap().iteritems():
1293 1293 hs.update(cl.rev(h) for h in ls)
1294 1294 # XXX using a set to feed the baseset is wrong. Sets are not ordered.
1295 1295 # This does not break because of other fullreposet misbehavior.
1296 1296 # XXX We should combine with subset first: 'subset & baseset(...)'. This is
1297 1297 # necessary to ensure we preserve the order in subset.
1298 1298 return baseset(hs) & subset
1299 1299
1300 1300 def heads(repo, subset, x):
1301 1301 """``heads(set)``
1302 1302 Members of set with no children in set.
1303 1303 """
1304 1304 s = getset(repo, subset, x)
1305 1305 ps = parents(repo, subset, x)
1306 1306 return s - ps
1307 1307
1308 1308 def hidden(repo, subset, x):
1309 1309 """``hidden()``
1310 1310 Hidden changesets.
1311 1311 """
1312 1312 # i18n: "hidden" is a keyword
1313 1313 getargs(x, 0, 0, _("hidden takes no arguments"))
1314 1314 hiddenrevs = repoview.filterrevs(repo, 'visible')
1315 1315 return subset & hiddenrevs
1316 1316
1317 1317 def keyword(repo, subset, x):
1318 1318 """``keyword(string)``
1319 1319 Search commit message, user name, and names of changed files for
1320 1320 string. The match is case-insensitive.
1321 1321 """
1322 1322 # i18n: "keyword" is a keyword
1323 1323 kw = encoding.lower(getstring(x, _("keyword requires a string")))
1324 1324
1325 1325 def matches(r):
1326 1326 c = repo[r]
1327 1327 return any(kw in encoding.lower(t)
1328 1328 for t in c.files() + [c.user(), c.description()])
1329 1329
1330 1330 return subset.filter(matches)
1331 1331
1332 1332 def limit(repo, subset, x):
1333 1333 """``limit(set, [n])``
1334 1334 First n members of set, defaulting to 1.
1335 1335 """
1336 1336 # i18n: "limit" is a keyword
1337 1337 l = getargs(x, 1, 2, _("limit requires one or two arguments"))
1338 1338 try:
1339 1339 lim = 1
1340 1340 if len(l) == 2:
1341 1341 # i18n: "limit" is a keyword
1342 1342 lim = int(getstring(l[1], _("limit requires a number")))
1343 1343 except (TypeError, ValueError):
1344 1344 # i18n: "limit" is a keyword
1345 1345 raise error.ParseError(_("limit expects a number"))
1346 1346 ss = subset
1347 1347 os = getset(repo, fullreposet(repo), l[0])
1348 1348 result = []
1349 1349 it = iter(os)
1350 1350 for x in xrange(lim):
1351 1351 y = next(it, None)
1352 1352 if y is None:
1353 1353 break
1354 1354 elif y in ss:
1355 1355 result.append(y)
1356 1356 return baseset(result)
1357 1357
1358 1358 def last(repo, subset, x):
1359 1359 """``last(set, [n])``
1360 1360 Last n members of set, defaulting to 1.
1361 1361 """
1362 1362 # i18n: "last" is a keyword
1363 1363 l = getargs(x, 1, 2, _("last requires one or two arguments"))
1364 1364 try:
1365 1365 lim = 1
1366 1366 if len(l) == 2:
1367 1367 # i18n: "last" is a keyword
1368 1368 lim = int(getstring(l[1], _("last requires a number")))
1369 1369 except (TypeError, ValueError):
1370 1370 # i18n: "last" is a keyword
1371 1371 raise error.ParseError(_("last expects a number"))
1372 1372 ss = subset
1373 1373 os = getset(repo, fullreposet(repo), l[0])
1374 1374 os.reverse()
1375 1375 result = []
1376 1376 it = iter(os)
1377 1377 for x in xrange(lim):
1378 1378 y = next(it, None)
1379 1379 if y is None:
1380 1380 break
1381 1381 elif y in ss:
1382 1382 result.append(y)
1383 1383 return baseset(result)
1384 1384
1385 1385 def maxrev(repo, subset, x):
1386 1386 """``max(set)``
1387 1387 Changeset with highest revision number in set.
1388 1388 """
1389 1389 os = getset(repo, fullreposet(repo), x)
1390 if os:
1390 try:
1391 1391 m = os.max()
1392 1392 if m in subset:
1393 1393 return baseset([m])
1394 except ValueError:
1395 # os.max() throws a ValueError when the collection is empty.
1396 # Same as python's max().
1397 pass
1394 1398 return baseset()
1395 1399
1396 1400 def merge(repo, subset, x):
1397 1401 """``merge()``
1398 1402 Changeset is a merge changeset.
1399 1403 """
1400 1404 # i18n: "merge" is a keyword
1401 1405 getargs(x, 0, 0, _("merge takes no arguments"))
1402 1406 cl = repo.changelog
1403 1407 return subset.filter(lambda r: cl.parentrevs(r)[1] != -1)
1404 1408
1405 1409 def branchpoint(repo, subset, x):
1406 1410 """``branchpoint()``
1407 1411 Changesets with more than one child.
1408 1412 """
1409 1413 # i18n: "branchpoint" is a keyword
1410 1414 getargs(x, 0, 0, _("branchpoint takes no arguments"))
1411 1415 cl = repo.changelog
1412 1416 if not subset:
1413 1417 return baseset()
1414 1418 # XXX this should be 'parentset.min()' assuming 'parentset' is a smartset
1415 1419 # (and if it is not, it should.)
1416 1420 baserev = min(subset)
1417 1421 parentscount = [0]*(len(repo) - baserev)
1418 1422 for r in cl.revs(start=baserev + 1):
1419 1423 for p in cl.parentrevs(r):
1420 1424 if p >= baserev:
1421 1425 parentscount[p - baserev] += 1
1422 1426 return subset.filter(lambda r: parentscount[r - baserev] > 1)
1423 1427
1424 1428 def minrev(repo, subset, x):
1425 1429 """``min(set)``
1426 1430 Changeset with lowest revision number in set.
1427 1431 """
1428 1432 os = getset(repo, fullreposet(repo), x)
1429 if os:
1433 try:
1430 1434 m = os.min()
1431 1435 if m in subset:
1432 1436 return baseset([m])
1437 except ValueError:
1438 # os.min() throws a ValueError when the collection is empty.
1439 # Same as python's min().
1440 pass
1433 1441 return baseset()
1434 1442
1435 1443 def modifies(repo, subset, x):
1436 1444 """``modifies(pattern)``
1437 1445 Changesets modifying files matched by pattern.
1438 1446
1439 1447 The pattern without explicit kind like ``glob:`` is expected to be
1440 1448 relative to the current directory and match against a file or a
1441 1449 directory.
1442 1450 """
1443 1451 # i18n: "modifies" is a keyword
1444 1452 pat = getstring(x, _("modifies requires a pattern"))
1445 1453 return checkstatus(repo, subset, pat, 0)
1446 1454
1447 1455 def named(repo, subset, x):
1448 1456 """``named(namespace)``
1449 1457 The changesets in a given namespace.
1450 1458
1451 1459 If `namespace` starts with `re:`, the remainder of the string is treated as
1452 1460 a regular expression. To match a namespace that actually starts with `re:`,
1453 1461 use the prefix `literal:`.
1454 1462 """
1455 1463 # i18n: "named" is a keyword
1456 1464 args = getargs(x, 1, 1, _('named requires a namespace argument'))
1457 1465
1458 1466 ns = getstring(args[0],
1459 1467 # i18n: "named" is a keyword
1460 1468 _('the argument to named must be a string'))
1461 1469 kind, pattern, matcher = _stringmatcher(ns)
1462 1470 namespaces = set()
1463 1471 if kind == 'literal':
1464 1472 if pattern not in repo.names:
1465 1473 raise error.RepoLookupError(_("namespace '%s' does not exist")
1466 1474 % ns)
1467 1475 namespaces.add(repo.names[pattern])
1468 1476 else:
1469 1477 for name, ns in repo.names.iteritems():
1470 1478 if matcher(name):
1471 1479 namespaces.add(ns)
1472 1480 if not namespaces:
1473 1481 raise error.RepoLookupError(_("no namespace exists"
1474 1482 " that match '%s'") % pattern)
1475 1483
1476 1484 names = set()
1477 1485 for ns in namespaces:
1478 1486 for name in ns.listnames(repo):
1479 1487 if name not in ns.deprecated:
1480 1488 names.update(repo[n].rev() for n in ns.nodes(repo, name))
1481 1489
1482 1490 names -= set([node.nullrev])
1483 1491 return subset & names
1484 1492
1485 1493 def node_(repo, subset, x):
1486 1494 """``id(string)``
1487 1495 Revision non-ambiguously specified by the given hex string prefix.
1488 1496 """
1489 1497 # i18n: "id" is a keyword
1490 1498 l = getargs(x, 1, 1, _("id requires one argument"))
1491 1499 # i18n: "id" is a keyword
1492 1500 n = getstring(l[0], _("id requires a string"))
1493 1501 if len(n) == 40:
1494 1502 try:
1495 1503 rn = repo.changelog.rev(node.bin(n))
1496 1504 except (LookupError, TypeError):
1497 1505 rn = None
1498 1506 else:
1499 1507 rn = None
1500 1508 pm = repo.changelog._partialmatch(n)
1501 1509 if pm is not None:
1502 1510 rn = repo.changelog.rev(pm)
1503 1511
1504 1512 if rn is None:
1505 1513 return baseset()
1506 1514 result = baseset([rn])
1507 1515 return result & subset
1508 1516
1509 1517 def obsolete(repo, subset, x):
1510 1518 """``obsolete()``
1511 1519 Mutable changeset with a newer version."""
1512 1520 # i18n: "obsolete" is a keyword
1513 1521 getargs(x, 0, 0, _("obsolete takes no arguments"))
1514 1522 obsoletes = obsmod.getrevs(repo, 'obsolete')
1515 1523 return subset & obsoletes
1516 1524
1517 1525 def only(repo, subset, x):
1518 1526 """``only(set, [set])``
1519 1527 Changesets that are ancestors of the first set that are not ancestors
1520 1528 of any other head in the repo. If a second set is specified, the result
1521 1529 is ancestors of the first set that are not ancestors of the second set
1522 1530 (i.e. ::<set1> - ::<set2>).
1523 1531 """
1524 1532 cl = repo.changelog
1525 1533 # i18n: "only" is a keyword
1526 1534 args = getargs(x, 1, 2, _('only takes one or two arguments'))
1527 1535 include = getset(repo, fullreposet(repo), args[0])
1528 1536 if len(args) == 1:
1529 1537 if not include:
1530 1538 return baseset()
1531 1539
1532 1540 descendants = set(_revdescendants(repo, include, False))
1533 1541 exclude = [rev for rev in cl.headrevs()
1534 1542 if not rev in descendants and not rev in include]
1535 1543 else:
1536 1544 exclude = getset(repo, fullreposet(repo), args[1])
1537 1545
1538 1546 results = set(cl.findmissingrevs(common=exclude, heads=include))
1539 1547 # XXX we should turn this into a baseset instead of a set, smartset may do
1540 1548 # some optimisations from the fact this is a baseset.
1541 1549 return subset & results
1542 1550
1543 1551 def origin(repo, subset, x):
1544 1552 """``origin([set])``
1545 1553 Changesets that were specified as a source for the grafts, transplants or
1546 1554 rebases that created the given revisions. Omitting the optional set is the
1547 1555 same as passing all(). If a changeset created by these operations is itself
1548 1556 specified as a source for one of these operations, only the source changeset
1549 1557 for the first operation is selected.
1550 1558 """
1551 1559 if x is not None:
1552 1560 dests = getset(repo, fullreposet(repo), x)
1553 1561 else:
1554 1562 dests = fullreposet(repo)
1555 1563
1556 1564 def _firstsrc(rev):
1557 1565 src = _getrevsource(repo, rev)
1558 1566 if src is None:
1559 1567 return None
1560 1568
1561 1569 while True:
1562 1570 prev = _getrevsource(repo, src)
1563 1571
1564 1572 if prev is None:
1565 1573 return src
1566 1574 src = prev
1567 1575
1568 1576 o = set([_firstsrc(r) for r in dests])
1569 1577 o -= set([None])
1570 1578 # XXX we should turn this into a baseset instead of a set, smartset may do
1571 1579 # some optimisations from the fact this is a baseset.
1572 1580 return subset & o
1573 1581
1574 1582 def outgoing(repo, subset, x):
1575 1583 """``outgoing([path])``
1576 1584 Changesets not found in the specified destination repository, or the
1577 1585 default push location.
1578 1586 """
1579 1587 # Avoid cycles.
1580 1588 from . import (
1581 1589 discovery,
1582 1590 hg,
1583 1591 )
1584 1592 # i18n: "outgoing" is a keyword
1585 1593 l = getargs(x, 0, 1, _("outgoing takes one or no arguments"))
1586 1594 # i18n: "outgoing" is a keyword
1587 1595 dest = l and getstring(l[0], _("outgoing requires a repository path")) or ''
1588 1596 dest = repo.ui.expandpath(dest or 'default-push', dest or 'default')
1589 1597 dest, branches = hg.parseurl(dest)
1590 1598 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1591 1599 if revs:
1592 1600 revs = [repo.lookup(rev) for rev in revs]
1593 1601 other = hg.peer(repo, {}, dest)
1594 1602 repo.ui.pushbuffer()
1595 1603 outgoing = discovery.findcommonoutgoing(repo, other, onlyheads=revs)
1596 1604 repo.ui.popbuffer()
1597 1605 cl = repo.changelog
1598 1606 o = set([cl.rev(r) for r in outgoing.missing])
1599 1607 return subset & o
1600 1608
1601 1609 def p1(repo, subset, x):
1602 1610 """``p1([set])``
1603 1611 First parent of changesets in set, or the working directory.
1604 1612 """
1605 1613 if x is None:
1606 1614 p = repo[x].p1().rev()
1607 1615 if p >= 0:
1608 1616 return subset & baseset([p])
1609 1617 return baseset()
1610 1618
1611 1619 ps = set()
1612 1620 cl = repo.changelog
1613 1621 for r in getset(repo, fullreposet(repo), x):
1614 1622 ps.add(cl.parentrevs(r)[0])
1615 1623 ps -= set([node.nullrev])
1616 1624 # XXX we should turn this into a baseset instead of a set, smartset may do
1617 1625 # some optimisations from the fact this is a baseset.
1618 1626 return subset & ps
1619 1627
1620 1628 def p2(repo, subset, x):
1621 1629 """``p2([set])``
1622 1630 Second parent of changesets in set, or the working directory.
1623 1631 """
1624 1632 if x is None:
1625 1633 ps = repo[x].parents()
1626 1634 try:
1627 1635 p = ps[1].rev()
1628 1636 if p >= 0:
1629 1637 return subset & baseset([p])
1630 1638 return baseset()
1631 1639 except IndexError:
1632 1640 return baseset()
1633 1641
1634 1642 ps = set()
1635 1643 cl = repo.changelog
1636 1644 for r in getset(repo, fullreposet(repo), x):
1637 1645 ps.add(cl.parentrevs(r)[1])
1638 1646 ps -= set([node.nullrev])
1639 1647 # XXX we should turn this into a baseset instead of a set, smartset may do
1640 1648 # some optimisations from the fact this is a baseset.
1641 1649 return subset & ps
1642 1650
1643 1651 def parents(repo, subset, x):
1644 1652 """``parents([set])``
1645 1653 The set of all parents for all changesets in set, or the working directory.
1646 1654 """
1647 1655 if x is None:
1648 1656 ps = set(p.rev() for p in repo[x].parents())
1649 1657 else:
1650 1658 ps = set()
1651 1659 cl = repo.changelog
1652 1660 up = ps.update
1653 1661 parentrevs = cl.parentrevs
1654 1662 for r in getset(repo, fullreposet(repo), x):
1655 1663 if r == node.wdirrev:
1656 1664 up(p.rev() for p in repo[r].parents())
1657 1665 else:
1658 1666 up(parentrevs(r))
1659 1667 ps -= set([node.nullrev])
1660 1668 return subset & ps
1661 1669
1662 1670 def _phase(repo, subset, target):
1663 1671 """helper to select all rev in phase <target>"""
1664 1672 repo._phasecache.loadphaserevs(repo) # ensure phase's sets are loaded
1665 1673 if repo._phasecache._phasesets:
1666 1674 s = repo._phasecache._phasesets[target] - repo.changelog.filteredrevs
1667 1675 s = baseset(s)
1668 1676 s.sort() # set are non ordered, so we enforce ascending
1669 1677 return subset & s
1670 1678 else:
1671 1679 phase = repo._phasecache.phase
1672 1680 condition = lambda r: phase(repo, r) == target
1673 1681 return subset.filter(condition, cache=False)
1674 1682
1675 1683 def draft(repo, subset, x):
1676 1684 """``draft()``
1677 1685 Changeset in draft phase."""
1678 1686 # i18n: "draft" is a keyword
1679 1687 getargs(x, 0, 0, _("draft takes no arguments"))
1680 1688 target = phases.draft
1681 1689 return _phase(repo, subset, target)
1682 1690
1683 1691 def secret(repo, subset, x):
1684 1692 """``secret()``
1685 1693 Changeset in secret phase."""
1686 1694 # i18n: "secret" is a keyword
1687 1695 getargs(x, 0, 0, _("secret takes no arguments"))
1688 1696 target = phases.secret
1689 1697 return _phase(repo, subset, target)
1690 1698
1691 1699 def parentspec(repo, subset, x, n):
1692 1700 """``set^0``
1693 1701 The set.
1694 1702 ``set^1`` (or ``set^``), ``set^2``
1695 1703 First or second parent, respectively, of all changesets in set.
1696 1704 """
1697 1705 try:
1698 1706 n = int(n[1])
1699 1707 if n not in (0, 1, 2):
1700 1708 raise ValueError
1701 1709 except (TypeError, ValueError):
1702 1710 raise error.ParseError(_("^ expects a number 0, 1, or 2"))
1703 1711 ps = set()
1704 1712 cl = repo.changelog
1705 1713 for r in getset(repo, fullreposet(repo), x):
1706 1714 if n == 0:
1707 1715 ps.add(r)
1708 1716 elif n == 1:
1709 1717 ps.add(cl.parentrevs(r)[0])
1710 1718 elif n == 2:
1711 1719 parents = cl.parentrevs(r)
1712 1720 if len(parents) > 1:
1713 1721 ps.add(parents[1])
1714 1722 return subset & ps
1715 1723
1716 1724 def present(repo, subset, x):
1717 1725 """``present(set)``
1718 1726 An empty set, if any revision in set isn't found; otherwise,
1719 1727 all revisions in set.
1720 1728
1721 1729 If any of specified revisions is not present in the local repository,
1722 1730 the query is normally aborted. But this predicate allows the query
1723 1731 to continue even in such cases.
1724 1732 """
1725 1733 try:
1726 1734 return getset(repo, subset, x)
1727 1735 except error.RepoLookupError:
1728 1736 return baseset()
1729 1737
1730 1738 # for internal use
1731 1739 def _notpublic(repo, subset, x):
1732 1740 getargs(x, 0, 0, "_notpublic takes no arguments")
1733 1741 repo._phasecache.loadphaserevs(repo) # ensure phase's sets are loaded
1734 1742 if repo._phasecache._phasesets:
1735 1743 s = set()
1736 1744 for u in repo._phasecache._phasesets[1:]:
1737 1745 s.update(u)
1738 1746 s = baseset(s - repo.changelog.filteredrevs)
1739 1747 s.sort()
1740 1748 return subset & s
1741 1749 else:
1742 1750 phase = repo._phasecache.phase
1743 1751 target = phases.public
1744 1752 condition = lambda r: phase(repo, r) != target
1745 1753 return subset.filter(condition, cache=False)
1746 1754
1747 1755 def public(repo, subset, x):
1748 1756 """``public()``
1749 1757 Changeset in public phase."""
1750 1758 # i18n: "public" is a keyword
1751 1759 getargs(x, 0, 0, _("public takes no arguments"))
1752 1760 phase = repo._phasecache.phase
1753 1761 target = phases.public
1754 1762 condition = lambda r: phase(repo, r) == target
1755 1763 return subset.filter(condition, cache=False)
1756 1764
1757 1765 def remote(repo, subset, x):
1758 1766 """``remote([id [,path]])``
1759 1767 Local revision that corresponds to the given identifier in a
1760 1768 remote repository, if present. Here, the '.' identifier is a
1761 1769 synonym for the current local branch.
1762 1770 """
1763 1771
1764 1772 from . import hg # avoid start-up nasties
1765 1773 # i18n: "remote" is a keyword
1766 1774 l = getargs(x, 0, 2, _("remote takes one, two or no arguments"))
1767 1775
1768 1776 q = '.'
1769 1777 if len(l) > 0:
1770 1778 # i18n: "remote" is a keyword
1771 1779 q = getstring(l[0], _("remote requires a string id"))
1772 1780 if q == '.':
1773 1781 q = repo['.'].branch()
1774 1782
1775 1783 dest = ''
1776 1784 if len(l) > 1:
1777 1785 # i18n: "remote" is a keyword
1778 1786 dest = getstring(l[1], _("remote requires a repository path"))
1779 1787 dest = repo.ui.expandpath(dest or 'default')
1780 1788 dest, branches = hg.parseurl(dest)
1781 1789 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1782 1790 if revs:
1783 1791 revs = [repo.lookup(rev) for rev in revs]
1784 1792 other = hg.peer(repo, {}, dest)
1785 1793 n = other.lookup(q)
1786 1794 if n in repo:
1787 1795 r = repo[n].rev()
1788 1796 if r in subset:
1789 1797 return baseset([r])
1790 1798 return baseset()
1791 1799
1792 1800 def removes(repo, subset, x):
1793 1801 """``removes(pattern)``
1794 1802 Changesets which remove files matching pattern.
1795 1803
1796 1804 The pattern without explicit kind like ``glob:`` is expected to be
1797 1805 relative to the current directory and match against a file or a
1798 1806 directory.
1799 1807 """
1800 1808 # i18n: "removes" is a keyword
1801 1809 pat = getstring(x, _("removes requires a pattern"))
1802 1810 return checkstatus(repo, subset, pat, 2)
1803 1811
1804 1812 def rev(repo, subset, x):
1805 1813 """``rev(number)``
1806 1814 Revision with the given numeric identifier.
1807 1815 """
1808 1816 # i18n: "rev" is a keyword
1809 1817 l = getargs(x, 1, 1, _("rev requires one argument"))
1810 1818 try:
1811 1819 # i18n: "rev" is a keyword
1812 1820 l = int(getstring(l[0], _("rev requires a number")))
1813 1821 except (TypeError, ValueError):
1814 1822 # i18n: "rev" is a keyword
1815 1823 raise error.ParseError(_("rev expects a number"))
1816 1824 if l not in repo.changelog and l != node.nullrev:
1817 1825 return baseset()
1818 1826 return subset & baseset([l])
1819 1827
1820 1828 def matching(repo, subset, x):
1821 1829 """``matching(revision [, field])``
1822 1830 Changesets in which a given set of fields match the set of fields in the
1823 1831 selected revision or set.
1824 1832
1825 1833 To match more than one field pass the list of fields to match separated
1826 1834 by spaces (e.g. ``author description``).
1827 1835
1828 1836 Valid fields are most regular revision fields and some special fields.
1829 1837
1830 1838 Regular revision fields are ``description``, ``author``, ``branch``,
1831 1839 ``date``, ``files``, ``phase``, ``parents``, ``substate``, ``user``
1832 1840 and ``diff``.
1833 1841 Note that ``author`` and ``user`` are synonyms. ``diff`` refers to the
1834 1842 contents of the revision. Two revisions matching their ``diff`` will
1835 1843 also match their ``files``.
1836 1844
1837 1845 Special fields are ``summary`` and ``metadata``:
1838 1846 ``summary`` matches the first line of the description.
1839 1847 ``metadata`` is equivalent to matching ``description user date``
1840 1848 (i.e. it matches the main metadata fields).
1841 1849
1842 1850 ``metadata`` is the default field which is used when no fields are
1843 1851 specified. You can match more than one field at a time.
1844 1852 """
1845 1853 # i18n: "matching" is a keyword
1846 1854 l = getargs(x, 1, 2, _("matching takes 1 or 2 arguments"))
1847 1855
1848 1856 revs = getset(repo, fullreposet(repo), l[0])
1849 1857
1850 1858 fieldlist = ['metadata']
1851 1859 if len(l) > 1:
1852 1860 fieldlist = getstring(l[1],
1853 1861 # i18n: "matching" is a keyword
1854 1862 _("matching requires a string "
1855 1863 "as its second argument")).split()
1856 1864
1857 1865 # Make sure that there are no repeated fields,
1858 1866 # expand the 'special' 'metadata' field type
1859 1867 # and check the 'files' whenever we check the 'diff'
1860 1868 fields = []
1861 1869 for field in fieldlist:
1862 1870 if field == 'metadata':
1863 1871 fields += ['user', 'description', 'date']
1864 1872 elif field == 'diff':
1865 1873 # a revision matching the diff must also match the files
1866 1874 # since matching the diff is very costly, make sure to
1867 1875 # also match the files first
1868 1876 fields += ['files', 'diff']
1869 1877 else:
1870 1878 if field == 'author':
1871 1879 field = 'user'
1872 1880 fields.append(field)
1873 1881 fields = set(fields)
1874 1882 if 'summary' in fields and 'description' in fields:
1875 1883 # If a revision matches its description it also matches its summary
1876 1884 fields.discard('summary')
1877 1885
1878 1886 # We may want to match more than one field
1879 1887 # Not all fields take the same amount of time to be matched
1880 1888 # Sort the selected fields in order of increasing matching cost
1881 1889 fieldorder = ['phase', 'parents', 'user', 'date', 'branch', 'summary',
1882 1890 'files', 'description', 'substate', 'diff']
1883 1891 def fieldkeyfunc(f):
1884 1892 try:
1885 1893 return fieldorder.index(f)
1886 1894 except ValueError:
1887 1895 # assume an unknown field is very costly
1888 1896 return len(fieldorder)
1889 1897 fields = list(fields)
1890 1898 fields.sort(key=fieldkeyfunc)
1891 1899
1892 1900 # Each field will be matched with its own "getfield" function
1893 1901 # which will be added to the getfieldfuncs array of functions
1894 1902 getfieldfuncs = []
1895 1903 _funcs = {
1896 1904 'user': lambda r: repo[r].user(),
1897 1905 'branch': lambda r: repo[r].branch(),
1898 1906 'date': lambda r: repo[r].date(),
1899 1907 'description': lambda r: repo[r].description(),
1900 1908 'files': lambda r: repo[r].files(),
1901 1909 'parents': lambda r: repo[r].parents(),
1902 1910 'phase': lambda r: repo[r].phase(),
1903 1911 'substate': lambda r: repo[r].substate,
1904 1912 'summary': lambda r: repo[r].description().splitlines()[0],
1905 1913 'diff': lambda r: list(repo[r].diff(git=True),)
1906 1914 }
1907 1915 for info in fields:
1908 1916 getfield = _funcs.get(info, None)
1909 1917 if getfield is None:
1910 1918 raise error.ParseError(
1911 1919 # i18n: "matching" is a keyword
1912 1920 _("unexpected field name passed to matching: %s") % info)
1913 1921 getfieldfuncs.append(getfield)
1914 1922 # convert the getfield array of functions into a "getinfo" function
1915 1923 # which returns an array of field values (or a single value if there
1916 1924 # is only one field to match)
1917 1925 getinfo = lambda r: [f(r) for f in getfieldfuncs]
1918 1926
1919 1927 def matches(x):
1920 1928 for rev in revs:
1921 1929 target = getinfo(rev)
1922 1930 match = True
1923 1931 for n, f in enumerate(getfieldfuncs):
1924 1932 if target[n] != f(x):
1925 1933 match = False
1926 1934 if match:
1927 1935 return True
1928 1936 return False
1929 1937
1930 1938 return subset.filter(matches)
1931 1939
1932 1940 def reverse(repo, subset, x):
1933 1941 """``reverse(set)``
1934 1942 Reverse order of set.
1935 1943 """
1936 1944 l = getset(repo, subset, x)
1937 1945 l.reverse()
1938 1946 return l
1939 1947
1940 1948 def roots(repo, subset, x):
1941 1949 """``roots(set)``
1942 1950 Changesets in set with no parent changeset in set.
1943 1951 """
1944 1952 s = getset(repo, fullreposet(repo), x)
1945 1953 parents = repo.changelog.parentrevs
1946 1954 def filter(r):
1947 1955 for p in parents(r):
1948 1956 if 0 <= p and p in s:
1949 1957 return False
1950 1958 return True
1951 1959 return subset & s.filter(filter)
1952 1960
1953 1961 def sort(repo, subset, x):
1954 1962 """``sort(set[, [-]key...])``
1955 1963 Sort set by keys. The default sort order is ascending, specify a key
1956 1964 as ``-key`` to sort in descending order.
1957 1965
1958 1966 The keys can be:
1959 1967
1960 1968 - ``rev`` for the revision number,
1961 1969 - ``branch`` for the branch name,
1962 1970 - ``desc`` for the commit message (description),
1963 1971 - ``user`` for user name (``author`` can be used as an alias),
1964 1972 - ``date`` for the commit date
1965 1973 """
1966 1974 # i18n: "sort" is a keyword
1967 1975 l = getargs(x, 1, 2, _("sort requires one or two arguments"))
1968 1976 keys = "rev"
1969 1977 if len(l) == 2:
1970 1978 # i18n: "sort" is a keyword
1971 1979 keys = getstring(l[1], _("sort spec must be a string"))
1972 1980
1973 1981 s = l[0]
1974 1982 keys = keys.split()
1975 1983 l = []
1976 1984 def invert(s):
1977 1985 return "".join(chr(255 - ord(c)) for c in s)
1978 1986 revs = getset(repo, subset, s)
1979 1987 if keys == ["rev"]:
1980 1988 revs.sort()
1981 1989 return revs
1982 1990 elif keys == ["-rev"]:
1983 1991 revs.sort(reverse=True)
1984 1992 return revs
1985 1993 for r in revs:
1986 1994 c = repo[r]
1987 1995 e = []
1988 1996 for k in keys:
1989 1997 if k == 'rev':
1990 1998 e.append(r)
1991 1999 elif k == '-rev':
1992 2000 e.append(-r)
1993 2001 elif k == 'branch':
1994 2002 e.append(c.branch())
1995 2003 elif k == '-branch':
1996 2004 e.append(invert(c.branch()))
1997 2005 elif k == 'desc':
1998 2006 e.append(c.description())
1999 2007 elif k == '-desc':
2000 2008 e.append(invert(c.description()))
2001 2009 elif k in 'user author':
2002 2010 e.append(c.user())
2003 2011 elif k in '-user -author':
2004 2012 e.append(invert(c.user()))
2005 2013 elif k == 'date':
2006 2014 e.append(c.date()[0])
2007 2015 elif k == '-date':
2008 2016 e.append(-c.date()[0])
2009 2017 else:
2010 2018 raise error.ParseError(_("unknown sort key %r") % k)
2011 2019 e.append(r)
2012 2020 l.append(e)
2013 2021 l.sort()
2014 2022 return baseset([e[-1] for e in l])
2015 2023
2016 2024 def subrepo(repo, subset, x):
2017 2025 """``subrepo([pattern])``
2018 2026 Changesets that add, modify or remove the given subrepo. If no subrepo
2019 2027 pattern is named, any subrepo changes are returned.
2020 2028 """
2021 2029 # i18n: "subrepo" is a keyword
2022 2030 args = getargs(x, 0, 1, _('subrepo takes at most one argument'))
2023 2031 if len(args) != 0:
2024 2032 pat = getstring(args[0], _("subrepo requires a pattern"))
2025 2033
2026 2034 m = matchmod.exact(repo.root, repo.root, ['.hgsubstate'])
2027 2035
2028 2036 def submatches(names):
2029 2037 k, p, m = _stringmatcher(pat)
2030 2038 for name in names:
2031 2039 if m(name):
2032 2040 yield name
2033 2041
2034 2042 def matches(x):
2035 2043 c = repo[x]
2036 2044 s = repo.status(c.p1().node(), c.node(), match=m)
2037 2045
2038 2046 if len(args) == 0:
2039 2047 return s.added or s.modified or s.removed
2040 2048
2041 2049 if s.added:
2042 2050 return any(submatches(c.substate.keys()))
2043 2051
2044 2052 if s.modified:
2045 2053 subs = set(c.p1().substate.keys())
2046 2054 subs.update(c.substate.keys())
2047 2055
2048 2056 for path in submatches(subs):
2049 2057 if c.p1().substate.get(path) != c.substate.get(path):
2050 2058 return True
2051 2059
2052 2060 if s.removed:
2053 2061 return any(submatches(c.p1().substate.keys()))
2054 2062
2055 2063 return False
2056 2064
2057 2065 return subset.filter(matches)
2058 2066
2059 2067 def _stringmatcher(pattern):
2060 2068 """
2061 2069 accepts a string, possibly starting with 're:' or 'literal:' prefix.
2062 2070 returns the matcher name, pattern, and matcher function.
2063 2071 missing or unknown prefixes are treated as literal matches.
2064 2072
2065 2073 helper for tests:
2066 2074 >>> def test(pattern, *tests):
2067 2075 ... kind, pattern, matcher = _stringmatcher(pattern)
2068 2076 ... return (kind, pattern, [bool(matcher(t)) for t in tests])
2069 2077
2070 2078 exact matching (no prefix):
2071 2079 >>> test('abcdefg', 'abc', 'def', 'abcdefg')
2072 2080 ('literal', 'abcdefg', [False, False, True])
2073 2081
2074 2082 regex matching ('re:' prefix)
2075 2083 >>> test('re:a.+b', 'nomatch', 'fooadef', 'fooadefbar')
2076 2084 ('re', 'a.+b', [False, False, True])
2077 2085
2078 2086 force exact matches ('literal:' prefix)
2079 2087 >>> test('literal:re:foobar', 'foobar', 're:foobar')
2080 2088 ('literal', 're:foobar', [False, True])
2081 2089
2082 2090 unknown prefixes are ignored and treated as literals
2083 2091 >>> test('foo:bar', 'foo', 'bar', 'foo:bar')
2084 2092 ('literal', 'foo:bar', [False, False, True])
2085 2093 """
2086 2094 if pattern.startswith('re:'):
2087 2095 pattern = pattern[3:]
2088 2096 try:
2089 2097 regex = re.compile(pattern)
2090 2098 except re.error as e:
2091 2099 raise error.ParseError(_('invalid regular expression: %s')
2092 2100 % e)
2093 2101 return 're', pattern, regex.search
2094 2102 elif pattern.startswith('literal:'):
2095 2103 pattern = pattern[8:]
2096 2104 return 'literal', pattern, pattern.__eq__
2097 2105
2098 2106 def _substringmatcher(pattern):
2099 2107 kind, pattern, matcher = _stringmatcher(pattern)
2100 2108 if kind == 'literal':
2101 2109 matcher = lambda s: pattern in s
2102 2110 return kind, pattern, matcher
2103 2111
2104 2112 def tag(repo, subset, x):
2105 2113 """``tag([name])``
2106 2114 The specified tag by name, or all tagged revisions if no name is given.
2107 2115
2108 2116 If `name` starts with `re:`, the remainder of the name is treated as
2109 2117 a regular expression. To match a tag that actually starts with `re:`,
2110 2118 use the prefix `literal:`.
2111 2119 """
2112 2120 # i18n: "tag" is a keyword
2113 2121 args = getargs(x, 0, 1, _("tag takes one or no arguments"))
2114 2122 cl = repo.changelog
2115 2123 if args:
2116 2124 pattern = getstring(args[0],
2117 2125 # i18n: "tag" is a keyword
2118 2126 _('the argument to tag must be a string'))
2119 2127 kind, pattern, matcher = _stringmatcher(pattern)
2120 2128 if kind == 'literal':
2121 2129 # avoid resolving all tags
2122 2130 tn = repo._tagscache.tags.get(pattern, None)
2123 2131 if tn is None:
2124 2132 raise error.RepoLookupError(_("tag '%s' does not exist")
2125 2133 % pattern)
2126 2134 s = set([repo[tn].rev()])
2127 2135 else:
2128 2136 s = set([cl.rev(n) for t, n in repo.tagslist() if matcher(t)])
2129 2137 else:
2130 2138 s = set([cl.rev(n) for t, n in repo.tagslist() if t != 'tip'])
2131 2139 return subset & s
2132 2140
2133 2141 def tagged(repo, subset, x):
2134 2142 return tag(repo, subset, x)
2135 2143
2136 2144 def unstable(repo, subset, x):
2137 2145 """``unstable()``
2138 2146 Non-obsolete changesets with obsolete ancestors.
2139 2147 """
2140 2148 # i18n: "unstable" is a keyword
2141 2149 getargs(x, 0, 0, _("unstable takes no arguments"))
2142 2150 unstables = obsmod.getrevs(repo, 'unstable')
2143 2151 return subset & unstables
2144 2152
2145 2153
2146 2154 def user(repo, subset, x):
2147 2155 """``user(string)``
2148 2156 User name contains string. The match is case-insensitive.
2149 2157
2150 2158 If `string` starts with `re:`, the remainder of the string is treated as
2151 2159 a regular expression. To match a user that actually contains `re:`, use
2152 2160 the prefix `literal:`.
2153 2161 """
2154 2162 return author(repo, subset, x)
2155 2163
2156 2164 # experimental
2157 2165 def wdir(repo, subset, x):
2158 2166 # i18n: "wdir" is a keyword
2159 2167 getargs(x, 0, 0, _("wdir takes no arguments"))
2160 2168 if node.wdirrev in subset or isinstance(subset, fullreposet):
2161 2169 return baseset([node.wdirrev])
2162 2170 return baseset()
2163 2171
2164 2172 # for internal use
2165 2173 def _list(repo, subset, x):
2166 2174 s = getstring(x, "internal error")
2167 2175 if not s:
2168 2176 return baseset()
2169 2177 # remove duplicates here. it's difficult for caller to deduplicate sets
2170 2178 # because different symbols can point to the same rev.
2171 2179 cl = repo.changelog
2172 2180 ls = []
2173 2181 seen = set()
2174 2182 for t in s.split('\0'):
2175 2183 try:
2176 2184 # fast path for integer revision
2177 2185 r = int(t)
2178 2186 if str(r) != t or r not in cl:
2179 2187 raise ValueError
2180 2188 revs = [r]
2181 2189 except ValueError:
2182 2190 revs = stringset(repo, subset, t)
2183 2191
2184 2192 for r in revs:
2185 2193 if r in seen:
2186 2194 continue
2187 2195 if (r in subset
2188 2196 or r == node.nullrev and isinstance(subset, fullreposet)):
2189 2197 ls.append(r)
2190 2198 seen.add(r)
2191 2199 return baseset(ls)
2192 2200
2193 2201 # for internal use
2194 2202 def _intlist(repo, subset, x):
2195 2203 s = getstring(x, "internal error")
2196 2204 if not s:
2197 2205 return baseset()
2198 2206 ls = [int(r) for r in s.split('\0')]
2199 2207 s = subset
2200 2208 return baseset([r for r in ls if r in s])
2201 2209
2202 2210 # for internal use
2203 2211 def _hexlist(repo, subset, x):
2204 2212 s = getstring(x, "internal error")
2205 2213 if not s:
2206 2214 return baseset()
2207 2215 cl = repo.changelog
2208 2216 ls = [cl.rev(node.bin(r)) for r in s.split('\0')]
2209 2217 s = subset
2210 2218 return baseset([r for r in ls if r in s])
2211 2219
2212 2220 symbols = {
2213 2221 "_mergedefaultdest": _mergedefaultdest,
2214 2222 "_updatedefaultdest": _updatedefaultdest,
2215 2223 "adds": adds,
2216 2224 "all": getall,
2217 2225 "ancestor": ancestor,
2218 2226 "ancestors": ancestors,
2219 2227 "_firstancestors": _firstancestors,
2220 2228 "author": author,
2221 2229 "bisect": bisect,
2222 2230 "bisected": bisected,
2223 2231 "bookmark": bookmark,
2224 2232 "branch": branch,
2225 2233 "branchpoint": branchpoint,
2226 2234 "bumped": bumped,
2227 2235 "bundle": bundle,
2228 2236 "children": children,
2229 2237 "closed": closed,
2230 2238 "contains": contains,
2231 2239 "converted": converted,
2232 2240 "date": date,
2233 2241 "desc": desc,
2234 2242 "descendants": descendants,
2235 2243 "_firstdescendants": _firstdescendants,
2236 2244 "destination": destination,
2237 2245 "divergent": divergent,
2238 2246 "draft": draft,
2239 2247 "extinct": extinct,
2240 2248 "extra": extra,
2241 2249 "file": hasfile,
2242 2250 "filelog": filelog,
2243 2251 "first": first,
2244 2252 "follow": follow,
2245 2253 "_followfirst": _followfirst,
2246 2254 "grep": grep,
2247 2255 "head": head,
2248 2256 "heads": heads,
2249 2257 "hidden": hidden,
2250 2258 "id": node_,
2251 2259 "keyword": keyword,
2252 2260 "last": last,
2253 2261 "limit": limit,
2254 2262 "_matchfiles": _matchfiles,
2255 2263 "max": maxrev,
2256 2264 "merge": merge,
2257 2265 "min": minrev,
2258 2266 "modifies": modifies,
2259 2267 "named": named,
2260 2268 "obsolete": obsolete,
2261 2269 "only": only,
2262 2270 "origin": origin,
2263 2271 "outgoing": outgoing,
2264 2272 "p1": p1,
2265 2273 "p2": p2,
2266 2274 "parents": parents,
2267 2275 "present": present,
2268 2276 "public": public,
2269 2277 "_notpublic": _notpublic,
2270 2278 "remote": remote,
2271 2279 "removes": removes,
2272 2280 "rev": rev,
2273 2281 "reverse": reverse,
2274 2282 "roots": roots,
2275 2283 "sort": sort,
2276 2284 "secret": secret,
2277 2285 "subrepo": subrepo,
2278 2286 "matching": matching,
2279 2287 "tag": tag,
2280 2288 "tagged": tagged,
2281 2289 "user": user,
2282 2290 "unstable": unstable,
2283 2291 "wdir": wdir,
2284 2292 "_list": _list,
2285 2293 "_intlist": _intlist,
2286 2294 "_hexlist": _hexlist,
2287 2295 }
2288 2296
2289 2297 # symbols which can't be used for a DoS attack for any given input
2290 2298 # (e.g. those which accept regexes as plain strings shouldn't be included)
2291 2299 # functions that just return a lot of changesets (like all) don't count here
2292 2300 safesymbols = set([
2293 2301 "adds",
2294 2302 "all",
2295 2303 "ancestor",
2296 2304 "ancestors",
2297 2305 "_firstancestors",
2298 2306 "author",
2299 2307 "bisect",
2300 2308 "bisected",
2301 2309 "bookmark",
2302 2310 "branch",
2303 2311 "branchpoint",
2304 2312 "bumped",
2305 2313 "bundle",
2306 2314 "children",
2307 2315 "closed",
2308 2316 "converted",
2309 2317 "date",
2310 2318 "desc",
2311 2319 "descendants",
2312 2320 "_firstdescendants",
2313 2321 "destination",
2314 2322 "divergent",
2315 2323 "draft",
2316 2324 "extinct",
2317 2325 "extra",
2318 2326 "file",
2319 2327 "filelog",
2320 2328 "first",
2321 2329 "follow",
2322 2330 "_followfirst",
2323 2331 "head",
2324 2332 "heads",
2325 2333 "hidden",
2326 2334 "id",
2327 2335 "keyword",
2328 2336 "last",
2329 2337 "limit",
2330 2338 "_matchfiles",
2331 2339 "max",
2332 2340 "merge",
2333 2341 "min",
2334 2342 "modifies",
2335 2343 "obsolete",
2336 2344 "only",
2337 2345 "origin",
2338 2346 "outgoing",
2339 2347 "p1",
2340 2348 "p2",
2341 2349 "parents",
2342 2350 "present",
2343 2351 "public",
2344 2352 "_notpublic",
2345 2353 "remote",
2346 2354 "removes",
2347 2355 "rev",
2348 2356 "reverse",
2349 2357 "roots",
2350 2358 "sort",
2351 2359 "secret",
2352 2360 "matching",
2353 2361 "tag",
2354 2362 "tagged",
2355 2363 "user",
2356 2364 "unstable",
2357 2365 "wdir",
2358 2366 "_list",
2359 2367 "_intlist",
2360 2368 "_hexlist",
2361 2369 ])
2362 2370
2363 2371 methods = {
2364 2372 "range": rangeset,
2365 2373 "dagrange": dagrange,
2366 2374 "string": stringset,
2367 2375 "symbol": stringset,
2368 2376 "and": andset,
2369 2377 "or": orset,
2370 2378 "not": notset,
2371 2379 "list": listset,
2372 2380 "keyvalue": keyvaluepair,
2373 2381 "func": func,
2374 2382 "ancestor": ancestorspec,
2375 2383 "parent": parentspec,
2376 2384 "parentpost": p1,
2377 2385 }
2378 2386
2379 2387 def optimize(x, small):
2380 2388 if x is None:
2381 2389 return 0, x
2382 2390
2383 2391 smallbonus = 1
2384 2392 if small:
2385 2393 smallbonus = .5
2386 2394
2387 2395 op = x[0]
2388 2396 if op == 'minus':
2389 2397 return optimize(('and', x[1], ('not', x[2])), small)
2390 2398 elif op == 'only':
2391 2399 return optimize(('func', ('symbol', 'only'),
2392 2400 ('list', x[1], x[2])), small)
2393 2401 elif op == 'onlypost':
2394 2402 return optimize(('func', ('symbol', 'only'), x[1]), small)
2395 2403 elif op == 'dagrangepre':
2396 2404 return optimize(('func', ('symbol', 'ancestors'), x[1]), small)
2397 2405 elif op == 'dagrangepost':
2398 2406 return optimize(('func', ('symbol', 'descendants'), x[1]), small)
2399 2407 elif op == 'rangeall':
2400 2408 return optimize(('range', ('string', '0'), ('string', 'tip')), small)
2401 2409 elif op == 'rangepre':
2402 2410 return optimize(('range', ('string', '0'), x[1]), small)
2403 2411 elif op == 'rangepost':
2404 2412 return optimize(('range', x[1], ('string', 'tip')), small)
2405 2413 elif op == 'negate':
2406 2414 return optimize(('string',
2407 2415 '-' + getstring(x[1], _("can't negate that"))), small)
2408 2416 elif op in 'string symbol negate':
2409 2417 return smallbonus, x # single revisions are small
2410 2418 elif op == 'and':
2411 2419 wa, ta = optimize(x[1], True)
2412 2420 wb, tb = optimize(x[2], True)
2413 2421
2414 2422 # (::x and not ::y)/(not ::y and ::x) have a fast path
2415 2423 def isonly(revs, bases):
2416 2424 return (
2417 2425 revs is not None
2418 2426 and revs[0] == 'func'
2419 2427 and getstring(revs[1], _('not a symbol')) == 'ancestors'
2420 2428 and bases is not None
2421 2429 and bases[0] == 'not'
2422 2430 and bases[1][0] == 'func'
2423 2431 and getstring(bases[1][1], _('not a symbol')) == 'ancestors')
2424 2432
2425 2433 w = min(wa, wb)
2426 2434 if isonly(ta, tb):
2427 2435 return w, ('func', ('symbol', 'only'), ('list', ta[2], tb[1][2]))
2428 2436 if isonly(tb, ta):
2429 2437 return w, ('func', ('symbol', 'only'), ('list', tb[2], ta[1][2]))
2430 2438
2431 2439 if wa > wb:
2432 2440 return w, (op, tb, ta)
2433 2441 return w, (op, ta, tb)
2434 2442 elif op == 'or':
2435 2443 # fast path for machine-generated expression, that is likely to have
2436 2444 # lots of trivial revisions: 'a + b + c()' to '_list(a b) + c()'
2437 2445 ws, ts, ss = [], [], []
2438 2446 def flushss():
2439 2447 if not ss:
2440 2448 return
2441 2449 if len(ss) == 1:
2442 2450 w, t = ss[0]
2443 2451 else:
2444 2452 s = '\0'.join(t[1] for w, t in ss)
2445 2453 y = ('func', ('symbol', '_list'), ('string', s))
2446 2454 w, t = optimize(y, False)
2447 2455 ws.append(w)
2448 2456 ts.append(t)
2449 2457 del ss[:]
2450 2458 for y in x[1:]:
2451 2459 w, t = optimize(y, False)
2452 2460 if t is not None and (t[0] == 'string' or t[0] == 'symbol'):
2453 2461 ss.append((w, t))
2454 2462 continue
2455 2463 flushss()
2456 2464 ws.append(w)
2457 2465 ts.append(t)
2458 2466 flushss()
2459 2467 if len(ts) == 1:
2460 2468 return ws[0], ts[0] # 'or' operation is fully optimized out
2461 2469 # we can't reorder trees by weight because it would change the order.
2462 2470 # ("sort(a + b)" == "sort(b + a)", but "a + b" != "b + a")
2463 2471 # ts = tuple(t for w, t in sorted(zip(ws, ts), key=lambda wt: wt[0]))
2464 2472 return max(ws), (op,) + tuple(ts)
2465 2473 elif op == 'not':
2466 2474 # Optimize not public() to _notpublic() because we have a fast version
2467 2475 if x[1] == ('func', ('symbol', 'public'), None):
2468 2476 newsym = ('func', ('symbol', '_notpublic'), None)
2469 2477 o = optimize(newsym, not small)
2470 2478 return o[0], o[1]
2471 2479 else:
2472 2480 o = optimize(x[1], not small)
2473 2481 return o[0], (op, o[1])
2474 2482 elif op == 'parentpost':
2475 2483 o = optimize(x[1], small)
2476 2484 return o[0], (op, o[1])
2477 2485 elif op == 'group':
2478 2486 return optimize(x[1], small)
2479 2487 elif op in 'dagrange range list parent ancestorspec':
2480 2488 if op == 'parent':
2481 2489 # x^:y means (x^) : y, not x ^ (:y)
2482 2490 post = ('parentpost', x[1])
2483 2491 if x[2][0] == 'dagrangepre':
2484 2492 return optimize(('dagrange', post, x[2][1]), small)
2485 2493 elif x[2][0] == 'rangepre':
2486 2494 return optimize(('range', post, x[2][1]), small)
2487 2495
2488 2496 wa, ta = optimize(x[1], small)
2489 2497 wb, tb = optimize(x[2], small)
2490 2498 return wa + wb, (op, ta, tb)
2491 2499 elif op == 'func':
2492 2500 f = getstring(x[1], _("not a symbol"))
2493 2501 wa, ta = optimize(x[2], small)
2494 2502 if f in ("author branch closed date desc file grep keyword "
2495 2503 "outgoing user"):
2496 2504 w = 10 # slow
2497 2505 elif f in "modifies adds removes":
2498 2506 w = 30 # slower
2499 2507 elif f == "contains":
2500 2508 w = 100 # very slow
2501 2509 elif f == "ancestor":
2502 2510 w = 1 * smallbonus
2503 2511 elif f in "reverse limit first _intlist":
2504 2512 w = 0
2505 2513 elif f in "sort":
2506 2514 w = 10 # assume most sorts look at changelog
2507 2515 else:
2508 2516 w = 1
2509 2517 return w + wa, (op, x[1], ta)
2510 2518 return 1, x
2511 2519
2512 2520 _aliasarg = ('func', ('symbol', '_aliasarg'))
2513 2521 def _getaliasarg(tree):
2514 2522 """If tree matches ('func', ('symbol', '_aliasarg'), ('string', X))
2515 2523 return X, None otherwise.
2516 2524 """
2517 2525 if (len(tree) == 3 and tree[:2] == _aliasarg
2518 2526 and tree[2][0] == 'string'):
2519 2527 return tree[2][1]
2520 2528 return None
2521 2529
2522 2530 def _checkaliasarg(tree, known=None):
2523 2531 """Check tree contains no _aliasarg construct or only ones which
2524 2532 value is in known. Used to avoid alias placeholders injection.
2525 2533 """
2526 2534 if isinstance(tree, tuple):
2527 2535 arg = _getaliasarg(tree)
2528 2536 if arg is not None and (not known or arg not in known):
2529 2537 raise error.UnknownIdentifier('_aliasarg', [])
2530 2538 for t in tree:
2531 2539 _checkaliasarg(t, known)
2532 2540
2533 2541 # the set of valid characters for the initial letter of symbols in
2534 2542 # alias declarations and definitions
2535 2543 _aliassyminitletters = set(c for c in [chr(i) for i in xrange(256)]
2536 2544 if c.isalnum() or c in '._@$' or ord(c) > 127)
2537 2545
2538 2546 def _tokenizealias(program, lookup=None):
2539 2547 """Parse alias declaration/definition into a stream of tokens
2540 2548
2541 2549 This allows symbol names to use also ``$`` as an initial letter
2542 2550 (for backward compatibility), and callers of this function should
2543 2551 examine whether ``$`` is used also for unexpected symbols or not.
2544 2552 """
2545 2553 return tokenize(program, lookup=lookup,
2546 2554 syminitletters=_aliassyminitletters)
2547 2555
2548 2556 def _parsealiasdecl(decl):
2549 2557 """Parse alias declaration ``decl``
2550 2558
2551 2559 This returns ``(name, tree, args, errorstr)`` tuple:
2552 2560
2553 2561 - ``name``: of declared alias (may be ``decl`` itself at error)
2554 2562 - ``tree``: parse result (or ``None`` at error)
2555 2563 - ``args``: list of alias argument names (or None for symbol declaration)
2556 2564 - ``errorstr``: detail about detected error (or None)
2557 2565
2558 2566 >>> _parsealiasdecl('foo')
2559 2567 ('foo', ('symbol', 'foo'), None, None)
2560 2568 >>> _parsealiasdecl('$foo')
2561 2569 ('$foo', None, None, "'$' not for alias arguments")
2562 2570 >>> _parsealiasdecl('foo::bar')
2563 2571 ('foo::bar', None, None, 'invalid format')
2564 2572 >>> _parsealiasdecl('foo bar')
2565 2573 ('foo bar', None, None, 'at 4: invalid token')
2566 2574 >>> _parsealiasdecl('foo()')
2567 2575 ('foo', ('func', ('symbol', 'foo')), [], None)
2568 2576 >>> _parsealiasdecl('$foo()')
2569 2577 ('$foo()', None, None, "'$' not for alias arguments")
2570 2578 >>> _parsealiasdecl('foo($1, $2)')
2571 2579 ('foo', ('func', ('symbol', 'foo')), ['$1', '$2'], None)
2572 2580 >>> _parsealiasdecl('foo(bar_bar, baz.baz)')
2573 2581 ('foo', ('func', ('symbol', 'foo')), ['bar_bar', 'baz.baz'], None)
2574 2582 >>> _parsealiasdecl('foo($1, $2, nested($1, $2))')
2575 2583 ('foo($1, $2, nested($1, $2))', None, None, 'invalid argument list')
2576 2584 >>> _parsealiasdecl('foo(bar($1, $2))')
2577 2585 ('foo(bar($1, $2))', None, None, 'invalid argument list')
2578 2586 >>> _parsealiasdecl('foo("string")')
2579 2587 ('foo("string")', None, None, 'invalid argument list')
2580 2588 >>> _parsealiasdecl('foo($1, $2')
2581 2589 ('foo($1, $2', None, None, 'at 10: unexpected token: end')
2582 2590 >>> _parsealiasdecl('foo("string')
2583 2591 ('foo("string', None, None, 'at 5: unterminated string')
2584 2592 >>> _parsealiasdecl('foo($1, $2, $1)')
2585 2593 ('foo', None, None, 'argument names collide with each other')
2586 2594 """
2587 2595 p = parser.parser(elements)
2588 2596 try:
2589 2597 tree, pos = p.parse(_tokenizealias(decl))
2590 2598 if (pos != len(decl)):
2591 2599 raise error.ParseError(_('invalid token'), pos)
2592 2600
2593 2601 if isvalidsymbol(tree):
2594 2602 # "name = ...." style
2595 2603 name = getsymbol(tree)
2596 2604 if name.startswith('$'):
2597 2605 return (decl, None, None, _("'$' not for alias arguments"))
2598 2606 return (name, ('symbol', name), None, None)
2599 2607
2600 2608 if isvalidfunc(tree):
2601 2609 # "name(arg, ....) = ...." style
2602 2610 name = getfuncname(tree)
2603 2611 if name.startswith('$'):
2604 2612 return (decl, None, None, _("'$' not for alias arguments"))
2605 2613 args = []
2606 2614 for arg in getfuncargs(tree):
2607 2615 if not isvalidsymbol(arg):
2608 2616 return (decl, None, None, _("invalid argument list"))
2609 2617 args.append(getsymbol(arg))
2610 2618 if len(args) != len(set(args)):
2611 2619 return (name, None, None,
2612 2620 _("argument names collide with each other"))
2613 2621 return (name, ('func', ('symbol', name)), args, None)
2614 2622
2615 2623 return (decl, None, None, _("invalid format"))
2616 2624 except error.ParseError as inst:
2617 2625 return (decl, None, None, parseerrordetail(inst))
2618 2626
2619 2627 def _parsealiasdefn(defn, args):
2620 2628 """Parse alias definition ``defn``
2621 2629
2622 2630 This function also replaces alias argument references in the
2623 2631 specified definition by ``_aliasarg(ARGNAME)``.
2624 2632
2625 2633 ``args`` is a list of alias argument names, or None if the alias
2626 2634 is declared as a symbol.
2627 2635
2628 2636 This returns "tree" as parsing result.
2629 2637
2630 2638 >>> args = ['$1', '$2', 'foo']
2631 2639 >>> print prettyformat(_parsealiasdefn('$1 or foo', args))
2632 2640 (or
2633 2641 (func
2634 2642 ('symbol', '_aliasarg')
2635 2643 ('string', '$1'))
2636 2644 (func
2637 2645 ('symbol', '_aliasarg')
2638 2646 ('string', 'foo')))
2639 2647 >>> try:
2640 2648 ... _parsealiasdefn('$1 or $bar', args)
2641 2649 ... except error.ParseError, inst:
2642 2650 ... print parseerrordetail(inst)
2643 2651 at 6: '$' not for alias arguments
2644 2652 >>> args = ['$1', '$10', 'foo']
2645 2653 >>> print prettyformat(_parsealiasdefn('$10 or foobar', args))
2646 2654 (or
2647 2655 (func
2648 2656 ('symbol', '_aliasarg')
2649 2657 ('string', '$10'))
2650 2658 ('symbol', 'foobar'))
2651 2659 >>> print prettyformat(_parsealiasdefn('"$1" or "foo"', args))
2652 2660 (or
2653 2661 ('string', '$1')
2654 2662 ('string', 'foo'))
2655 2663 """
2656 2664 def tokenizedefn(program, lookup=None):
2657 2665 if args:
2658 2666 argset = set(args)
2659 2667 else:
2660 2668 argset = set()
2661 2669
2662 2670 for t, value, pos in _tokenizealias(program, lookup=lookup):
2663 2671 if t == 'symbol':
2664 2672 if value in argset:
2665 2673 # emulate tokenization of "_aliasarg('ARGNAME')":
2666 2674 # "_aliasarg()" is an unknown symbol only used separate
2667 2675 # alias argument placeholders from regular strings.
2668 2676 yield ('symbol', '_aliasarg', pos)
2669 2677 yield ('(', None, pos)
2670 2678 yield ('string', value, pos)
2671 2679 yield (')', None, pos)
2672 2680 continue
2673 2681 elif value.startswith('$'):
2674 2682 raise error.ParseError(_("'$' not for alias arguments"),
2675 2683 pos)
2676 2684 yield (t, value, pos)
2677 2685
2678 2686 p = parser.parser(elements)
2679 2687 tree, pos = p.parse(tokenizedefn(defn))
2680 2688 if pos != len(defn):
2681 2689 raise error.ParseError(_('invalid token'), pos)
2682 2690 return parser.simplifyinfixops(tree, ('or',))
2683 2691
2684 2692 class revsetalias(object):
2685 2693 # whether own `error` information is already shown or not.
2686 2694 # this avoids showing same warning multiple times at each `findaliases`.
2687 2695 warned = False
2688 2696
2689 2697 def __init__(self, name, value):
2690 2698 '''Aliases like:
2691 2699
2692 2700 h = heads(default)
2693 2701 b($1) = ancestors($1) - ancestors(default)
2694 2702 '''
2695 2703 self.name, self.tree, self.args, self.error = _parsealiasdecl(name)
2696 2704 if self.error:
2697 2705 self.error = _('failed to parse the declaration of revset alias'
2698 2706 ' "%s": %s') % (self.name, self.error)
2699 2707 return
2700 2708
2701 2709 try:
2702 2710 self.replacement = _parsealiasdefn(value, self.args)
2703 2711 # Check for placeholder injection
2704 2712 _checkaliasarg(self.replacement, self.args)
2705 2713 except error.ParseError as inst:
2706 2714 self.error = _('failed to parse the definition of revset alias'
2707 2715 ' "%s": %s') % (self.name, parseerrordetail(inst))
2708 2716
2709 2717 def _getalias(aliases, tree):
2710 2718 """If tree looks like an unexpanded alias, return it. Return None
2711 2719 otherwise.
2712 2720 """
2713 2721 if isinstance(tree, tuple) and tree:
2714 2722 if tree[0] == 'symbol' and len(tree) == 2:
2715 2723 name = tree[1]
2716 2724 alias = aliases.get(name)
2717 2725 if alias and alias.args is None and alias.tree == tree:
2718 2726 return alias
2719 2727 if tree[0] == 'func' and len(tree) > 1:
2720 2728 if tree[1][0] == 'symbol' and len(tree[1]) == 2:
2721 2729 name = tree[1][1]
2722 2730 alias = aliases.get(name)
2723 2731 if alias and alias.args is not None and alias.tree == tree[:2]:
2724 2732 return alias
2725 2733 return None
2726 2734
2727 2735 def _expandargs(tree, args):
2728 2736 """Replace _aliasarg instances with the substitution value of the
2729 2737 same name in args, recursively.
2730 2738 """
2731 2739 if not tree or not isinstance(tree, tuple):
2732 2740 return tree
2733 2741 arg = _getaliasarg(tree)
2734 2742 if arg is not None:
2735 2743 return args[arg]
2736 2744 return tuple(_expandargs(t, args) for t in tree)
2737 2745
2738 2746 def _expandaliases(aliases, tree, expanding, cache):
2739 2747 """Expand aliases in tree, recursively.
2740 2748
2741 2749 'aliases' is a dictionary mapping user defined aliases to
2742 2750 revsetalias objects.
2743 2751 """
2744 2752 if not isinstance(tree, tuple):
2745 2753 # Do not expand raw strings
2746 2754 return tree
2747 2755 alias = _getalias(aliases, tree)
2748 2756 if alias is not None:
2749 2757 if alias.error:
2750 2758 raise util.Abort(alias.error)
2751 2759 if alias in expanding:
2752 2760 raise error.ParseError(_('infinite expansion of revset alias "%s" '
2753 2761 'detected') % alias.name)
2754 2762 expanding.append(alias)
2755 2763 if alias.name not in cache:
2756 2764 cache[alias.name] = _expandaliases(aliases, alias.replacement,
2757 2765 expanding, cache)
2758 2766 result = cache[alias.name]
2759 2767 expanding.pop()
2760 2768 if alias.args is not None:
2761 2769 l = getlist(tree[2])
2762 2770 if len(l) != len(alias.args):
2763 2771 raise error.ParseError(
2764 2772 _('invalid number of arguments: %s') % len(l))
2765 2773 l = [_expandaliases(aliases, a, [], cache) for a in l]
2766 2774 result = _expandargs(result, dict(zip(alias.args, l)))
2767 2775 else:
2768 2776 result = tuple(_expandaliases(aliases, t, expanding, cache)
2769 2777 for t in tree)
2770 2778 return result
2771 2779
2772 2780 def findaliases(ui, tree, showwarning=None):
2773 2781 _checkaliasarg(tree)
2774 2782 aliases = {}
2775 2783 for k, v in ui.configitems('revsetalias'):
2776 2784 alias = revsetalias(k, v)
2777 2785 aliases[alias.name] = alias
2778 2786 tree = _expandaliases(aliases, tree, [], {})
2779 2787 if showwarning:
2780 2788 # warn about problematic (but not referred) aliases
2781 2789 for name, alias in sorted(aliases.iteritems()):
2782 2790 if alias.error and not alias.warned:
2783 2791 showwarning(_('warning: %s\n') % (alias.error))
2784 2792 alias.warned = True
2785 2793 return tree
2786 2794
2787 2795 def foldconcat(tree):
2788 2796 """Fold elements to be concatenated by `##`
2789 2797 """
2790 2798 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2791 2799 return tree
2792 2800 if tree[0] == '_concat':
2793 2801 pending = [tree]
2794 2802 l = []
2795 2803 while pending:
2796 2804 e = pending.pop()
2797 2805 if e[0] == '_concat':
2798 2806 pending.extend(reversed(e[1:]))
2799 2807 elif e[0] in ('string', 'symbol'):
2800 2808 l.append(e[1])
2801 2809 else:
2802 2810 msg = _("\"##\" can't concatenate \"%s\" element") % (e[0])
2803 2811 raise error.ParseError(msg)
2804 2812 return ('string', ''.join(l))
2805 2813 else:
2806 2814 return tuple(foldconcat(t) for t in tree)
2807 2815
2808 2816 def parse(spec, lookup=None):
2809 2817 p = parser.parser(elements)
2810 2818 tree, pos = p.parse(tokenize(spec, lookup=lookup))
2811 2819 if pos != len(spec):
2812 2820 raise error.ParseError(_("invalid token"), pos)
2813 2821 return parser.simplifyinfixops(tree, ('or',))
2814 2822
2815 2823 def posttreebuilthook(tree, repo):
2816 2824 # hook for extensions to execute code on the optimized tree
2817 2825 pass
2818 2826
2819 2827 def match(ui, spec, repo=None):
2820 2828 if not spec:
2821 2829 raise error.ParseError(_("empty query"))
2822 2830 lookup = None
2823 2831 if repo:
2824 2832 lookup = repo.__contains__
2825 2833 tree = parse(spec, lookup)
2826 2834 return _makematcher(ui, tree, repo)
2827 2835
2828 2836 def matchany(ui, specs, repo=None):
2829 2837 """Create a matcher that will include any revisions matching one of the
2830 2838 given specs"""
2831 2839 if not specs:
2832 2840 def mfunc(repo, subset=None):
2833 2841 return baseset()
2834 2842 return mfunc
2835 2843 if not all(specs):
2836 2844 raise error.ParseError(_("empty query"))
2837 2845 lookup = None
2838 2846 if repo:
2839 2847 lookup = repo.__contains__
2840 2848 if len(specs) == 1:
2841 2849 tree = parse(specs[0], lookup)
2842 2850 else:
2843 2851 tree = ('or',) + tuple(parse(s, lookup) for s in specs)
2844 2852 return _makematcher(ui, tree, repo)
2845 2853
2846 2854 def _makematcher(ui, tree, repo):
2847 2855 if ui:
2848 2856 tree = findaliases(ui, tree, showwarning=ui.warn)
2849 2857 tree = foldconcat(tree)
2850 2858 weight, tree = optimize(tree, True)
2851 2859 posttreebuilthook(tree, repo)
2852 2860 def mfunc(repo, subset=None):
2853 2861 if subset is None:
2854 2862 subset = fullreposet(repo)
2855 2863 if util.safehasattr(subset, 'isascending'):
2856 2864 result = getset(repo, subset, tree)
2857 2865 else:
2858 2866 result = getset(repo, baseset(subset), tree)
2859 2867 return result
2860 2868 return mfunc
2861 2869
2862 2870 def formatspec(expr, *args):
2863 2871 '''
2864 2872 This is a convenience function for using revsets internally, and
2865 2873 escapes arguments appropriately. Aliases are intentionally ignored
2866 2874 so that intended expression behavior isn't accidentally subverted.
2867 2875
2868 2876 Supported arguments:
2869 2877
2870 2878 %r = revset expression, parenthesized
2871 2879 %d = int(arg), no quoting
2872 2880 %s = string(arg), escaped and single-quoted
2873 2881 %b = arg.branch(), escaped and single-quoted
2874 2882 %n = hex(arg), single-quoted
2875 2883 %% = a literal '%'
2876 2884
2877 2885 Prefixing the type with 'l' specifies a parenthesized list of that type.
2878 2886
2879 2887 >>> formatspec('%r:: and %lr', '10 or 11', ("this()", "that()"))
2880 2888 '(10 or 11):: and ((this()) or (that()))'
2881 2889 >>> formatspec('%d:: and not %d::', 10, 20)
2882 2890 '10:: and not 20::'
2883 2891 >>> formatspec('%ld or %ld', [], [1])
2884 2892 "_list('') or 1"
2885 2893 >>> formatspec('keyword(%s)', 'foo\\xe9')
2886 2894 "keyword('foo\\\\xe9')"
2887 2895 >>> b = lambda: 'default'
2888 2896 >>> b.branch = b
2889 2897 >>> formatspec('branch(%b)', b)
2890 2898 "branch('default')"
2891 2899 >>> formatspec('root(%ls)', ['a', 'b', 'c', 'd'])
2892 2900 "root(_list('a\\x00b\\x00c\\x00d'))"
2893 2901 '''
2894 2902
2895 2903 def quote(s):
2896 2904 return repr(str(s))
2897 2905
2898 2906 def argtype(c, arg):
2899 2907 if c == 'd':
2900 2908 return str(int(arg))
2901 2909 elif c == 's':
2902 2910 return quote(arg)
2903 2911 elif c == 'r':
2904 2912 parse(arg) # make sure syntax errors are confined
2905 2913 return '(%s)' % arg
2906 2914 elif c == 'n':
2907 2915 return quote(node.hex(arg))
2908 2916 elif c == 'b':
2909 2917 return quote(arg.branch())
2910 2918
2911 2919 def listexp(s, t):
2912 2920 l = len(s)
2913 2921 if l == 0:
2914 2922 return "_list('')"
2915 2923 elif l == 1:
2916 2924 return argtype(t, s[0])
2917 2925 elif t == 'd':
2918 2926 return "_intlist('%s')" % "\0".join(str(int(a)) for a in s)
2919 2927 elif t == 's':
2920 2928 return "_list('%s')" % "\0".join(s)
2921 2929 elif t == 'n':
2922 2930 return "_hexlist('%s')" % "\0".join(node.hex(a) for a in s)
2923 2931 elif t == 'b':
2924 2932 return "_list('%s')" % "\0".join(a.branch() for a in s)
2925 2933
2926 2934 m = l // 2
2927 2935 return '(%s or %s)' % (listexp(s[:m], t), listexp(s[m:], t))
2928 2936
2929 2937 ret = ''
2930 2938 pos = 0
2931 2939 arg = 0
2932 2940 while pos < len(expr):
2933 2941 c = expr[pos]
2934 2942 if c == '%':
2935 2943 pos += 1
2936 2944 d = expr[pos]
2937 2945 if d == '%':
2938 2946 ret += d
2939 2947 elif d in 'dsnbr':
2940 2948 ret += argtype(d, args[arg])
2941 2949 arg += 1
2942 2950 elif d == 'l':
2943 2951 # a list of some type
2944 2952 pos += 1
2945 2953 d = expr[pos]
2946 2954 ret += listexp(list(args[arg]), d)
2947 2955 arg += 1
2948 2956 else:
2949 2957 raise util.Abort('unexpected revspec format character %s' % d)
2950 2958 else:
2951 2959 ret += c
2952 2960 pos += 1
2953 2961
2954 2962 return ret
2955 2963
2956 2964 def prettyformat(tree):
2957 2965 return parser.prettyformat(tree, ('string', 'symbol'))
2958 2966
2959 2967 def depth(tree):
2960 2968 if isinstance(tree, tuple):
2961 2969 return max(map(depth, tree)) + 1
2962 2970 else:
2963 2971 return 0
2964 2972
2965 2973 def funcsused(tree):
2966 2974 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2967 2975 return set()
2968 2976 else:
2969 2977 funcs = set()
2970 2978 for s in tree[1:]:
2971 2979 funcs |= funcsused(s)
2972 2980 if tree[0] == 'func':
2973 2981 funcs.add(tree[1][1])
2974 2982 return funcs
2975 2983
2976 2984 class abstractsmartset(object):
2977 2985
2978 2986 def __nonzero__(self):
2979 2987 """True if the smartset is not empty"""
2980 2988 raise NotImplementedError()
2981 2989
2982 2990 def __contains__(self, rev):
2983 2991 """provide fast membership testing"""
2984 2992 raise NotImplementedError()
2985 2993
2986 2994 def __iter__(self):
2987 2995 """iterate the set in the order it is supposed to be iterated"""
2988 2996 raise NotImplementedError()
2989 2997
2990 2998 # Attributes containing a function to perform a fast iteration in a given
2991 2999 # direction. A smartset can have none, one, or both defined.
2992 3000 #
2993 3001 # Default value is None instead of a function returning None to avoid
2994 3002 # initializing an iterator just for testing if a fast method exists.
2995 3003 fastasc = None
2996 3004 fastdesc = None
2997 3005
2998 3006 def isascending(self):
2999 3007 """True if the set will iterate in ascending order"""
3000 3008 raise NotImplementedError()
3001 3009
3002 3010 def isdescending(self):
3003 3011 """True if the set will iterate in descending order"""
3004 3012 raise NotImplementedError()
3005 3013
3006 3014 @util.cachefunc
3007 3015 def min(self):
3008 3016 """return the minimum element in the set"""
3009 3017 if self.fastasc is not None:
3010 3018 for r in self.fastasc():
3011 3019 return r
3012 3020 raise ValueError('arg is an empty sequence')
3013 3021 return min(self)
3014 3022
3015 3023 @util.cachefunc
3016 3024 def max(self):
3017 3025 """return the maximum element in the set"""
3018 3026 if self.fastdesc is not None:
3019 3027 for r in self.fastdesc():
3020 3028 return r
3021 3029 raise ValueError('arg is an empty sequence')
3022 3030 return max(self)
3023 3031
3024 3032 def first(self):
3025 3033 """return the first element in the set (user iteration perspective)
3026 3034
3027 3035 Return None if the set is empty"""
3028 3036 raise NotImplementedError()
3029 3037
3030 3038 def last(self):
3031 3039 """return the last element in the set (user iteration perspective)
3032 3040
3033 3041 Return None if the set is empty"""
3034 3042 raise NotImplementedError()
3035 3043
3036 3044 def __len__(self):
3037 3045 """return the length of the smartsets
3038 3046
3039 3047 This can be expensive on smartset that could be lazy otherwise."""
3040 3048 raise NotImplementedError()
3041 3049
3042 3050 def reverse(self):
3043 3051 """reverse the expected iteration order"""
3044 3052 raise NotImplementedError()
3045 3053
3046 3054 def sort(self, reverse=True):
3047 3055 """get the set to iterate in an ascending or descending order"""
3048 3056 raise NotImplementedError()
3049 3057
3050 3058 def __and__(self, other):
3051 3059 """Returns a new object with the intersection of the two collections.
3052 3060
3053 3061 This is part of the mandatory API for smartset."""
3054 3062 if isinstance(other, fullreposet):
3055 3063 return self
3056 3064 return self.filter(other.__contains__, cache=False)
3057 3065
3058 3066 def __add__(self, other):
3059 3067 """Returns a new object with the union of the two collections.
3060 3068
3061 3069 This is part of the mandatory API for smartset."""
3062 3070 return addset(self, other)
3063 3071
3064 3072 def __sub__(self, other):
3065 3073 """Returns a new object with the substraction of the two collections.
3066 3074
3067 3075 This is part of the mandatory API for smartset."""
3068 3076 c = other.__contains__
3069 3077 return self.filter(lambda r: not c(r), cache=False)
3070 3078
3071 3079 def filter(self, condition, cache=True):
3072 3080 """Returns this smartset filtered by condition as a new smartset.
3073 3081
3074 3082 `condition` is a callable which takes a revision number and returns a
3075 3083 boolean.
3076 3084
3077 3085 This is part of the mandatory API for smartset."""
3078 3086 # builtin cannot be cached. but do not needs to
3079 3087 if cache and util.safehasattr(condition, 'func_code'):
3080 3088 condition = util.cachefunc(condition)
3081 3089 return filteredset(self, condition)
3082 3090
3083 3091 class baseset(abstractsmartset):
3084 3092 """Basic data structure that represents a revset and contains the basic
3085 3093 operation that it should be able to perform.
3086 3094
3087 3095 Every method in this class should be implemented by any smartset class.
3088 3096 """
3089 3097 def __init__(self, data=()):
3090 3098 if not isinstance(data, list):
3091 3099 if isinstance(data, set):
3092 3100 self._set = data
3093 3101 data = list(data)
3094 3102 self._list = data
3095 3103 self._ascending = None
3096 3104
3097 3105 @util.propertycache
3098 3106 def _set(self):
3099 3107 return set(self._list)
3100 3108
3101 3109 @util.propertycache
3102 3110 def _asclist(self):
3103 3111 asclist = self._list[:]
3104 3112 asclist.sort()
3105 3113 return asclist
3106 3114
3107 3115 def __iter__(self):
3108 3116 if self._ascending is None:
3109 3117 return iter(self._list)
3110 3118 elif self._ascending:
3111 3119 return iter(self._asclist)
3112 3120 else:
3113 3121 return reversed(self._asclist)
3114 3122
3115 3123 def fastasc(self):
3116 3124 return iter(self._asclist)
3117 3125
3118 3126 def fastdesc(self):
3119 3127 return reversed(self._asclist)
3120 3128
3121 3129 @util.propertycache
3122 3130 def __contains__(self):
3123 3131 return self._set.__contains__
3124 3132
3125 3133 def __nonzero__(self):
3126 3134 return bool(self._list)
3127 3135
3128 3136 def sort(self, reverse=False):
3129 3137 self._ascending = not bool(reverse)
3130 3138
3131 3139 def reverse(self):
3132 3140 if self._ascending is None:
3133 3141 self._list.reverse()
3134 3142 else:
3135 3143 self._ascending = not self._ascending
3136 3144
3137 3145 def __len__(self):
3138 3146 return len(self._list)
3139 3147
3140 3148 def isascending(self):
3141 3149 """Returns True if the collection is ascending order, False if not.
3142 3150
3143 3151 This is part of the mandatory API for smartset."""
3144 3152 if len(self) <= 1:
3145 3153 return True
3146 3154 return self._ascending is not None and self._ascending
3147 3155
3148 3156 def isdescending(self):
3149 3157 """Returns True if the collection is descending order, False if not.
3150 3158
3151 3159 This is part of the mandatory API for smartset."""
3152 3160 if len(self) <= 1:
3153 3161 return True
3154 3162 return self._ascending is not None and not self._ascending
3155 3163
3156 3164 def first(self):
3157 3165 if self:
3158 3166 if self._ascending is None:
3159 3167 return self._list[0]
3160 3168 elif self._ascending:
3161 3169 return self._asclist[0]
3162 3170 else:
3163 3171 return self._asclist[-1]
3164 3172 return None
3165 3173
3166 3174 def last(self):
3167 3175 if self:
3168 3176 if self._ascending is None:
3169 3177 return self._list[-1]
3170 3178 elif self._ascending:
3171 3179 return self._asclist[-1]
3172 3180 else:
3173 3181 return self._asclist[0]
3174 3182 return None
3175 3183
3176 3184 def __repr__(self):
3177 3185 d = {None: '', False: '-', True: '+'}[self._ascending]
3178 3186 return '<%s%s %r>' % (type(self).__name__, d, self._list)
3179 3187
3180 3188 class filteredset(abstractsmartset):
3181 3189 """Duck type for baseset class which iterates lazily over the revisions in
3182 3190 the subset and contains a function which tests for membership in the
3183 3191 revset
3184 3192 """
3185 3193 def __init__(self, subset, condition=lambda x: True):
3186 3194 """
3187 3195 condition: a function that decide whether a revision in the subset
3188 3196 belongs to the revset or not.
3189 3197 """
3190 3198 self._subset = subset
3191 3199 self._condition = condition
3192 3200
3193 3201 def __contains__(self, x):
3194 3202 return x in self._subset and self._condition(x)
3195 3203
3196 3204 def __iter__(self):
3197 3205 return self._iterfilter(self._subset)
3198 3206
3199 3207 def _iterfilter(self, it):
3200 3208 cond = self._condition
3201 3209 for x in it:
3202 3210 if cond(x):
3203 3211 yield x
3204 3212
3205 3213 @property
3206 3214 def fastasc(self):
3207 3215 it = self._subset.fastasc
3208 3216 if it is None:
3209 3217 return None
3210 3218 return lambda: self._iterfilter(it())
3211 3219
3212 3220 @property
3213 3221 def fastdesc(self):
3214 3222 it = self._subset.fastdesc
3215 3223 if it is None:
3216 3224 return None
3217 3225 return lambda: self._iterfilter(it())
3218 3226
3219 3227 def __nonzero__(self):
3220 3228 for r in self:
3221 3229 return True
3222 3230 return False
3223 3231
3224 3232 def __len__(self):
3225 3233 # Basic implementation to be changed in future patches.
3226 3234 l = baseset([r for r in self])
3227 3235 return len(l)
3228 3236
3229 3237 def sort(self, reverse=False):
3230 3238 self._subset.sort(reverse=reverse)
3231 3239
3232 3240 def reverse(self):
3233 3241 self._subset.reverse()
3234 3242
3235 3243 def isascending(self):
3236 3244 return self._subset.isascending()
3237 3245
3238 3246 def isdescending(self):
3239 3247 return self._subset.isdescending()
3240 3248
3241 3249 def first(self):
3242 3250 for x in self:
3243 3251 return x
3244 3252 return None
3245 3253
3246 3254 def last(self):
3247 3255 it = None
3248 3256 if self.isascending():
3249 3257 it = self.fastdesc
3250 3258 elif self.isdescending():
3251 3259 it = self.fastasc
3252 3260 if it is not None:
3253 3261 for x in it():
3254 3262 return x
3255 3263 return None #empty case
3256 3264 else:
3257 3265 x = None
3258 3266 for x in self:
3259 3267 pass
3260 3268 return x
3261 3269
3262 3270 def __repr__(self):
3263 3271 return '<%s %r>' % (type(self).__name__, self._subset)
3264 3272
3265 3273 def _iterordered(ascending, iter1, iter2):
3266 3274 """produce an ordered iteration from two iterators with the same order
3267 3275
3268 3276 The ascending is used to indicated the iteration direction.
3269 3277 """
3270 3278 choice = max
3271 3279 if ascending:
3272 3280 choice = min
3273 3281
3274 3282 val1 = None
3275 3283 val2 = None
3276 3284 try:
3277 3285 # Consume both iterators in an ordered way until one is empty
3278 3286 while True:
3279 3287 if val1 is None:
3280 3288 val1 = iter1.next()
3281 3289 if val2 is None:
3282 3290 val2 = iter2.next()
3283 3291 next = choice(val1, val2)
3284 3292 yield next
3285 3293 if val1 == next:
3286 3294 val1 = None
3287 3295 if val2 == next:
3288 3296 val2 = None
3289 3297 except StopIteration:
3290 3298 # Flush any remaining values and consume the other one
3291 3299 it = iter2
3292 3300 if val1 is not None:
3293 3301 yield val1
3294 3302 it = iter1
3295 3303 elif val2 is not None:
3296 3304 # might have been equality and both are empty
3297 3305 yield val2
3298 3306 for val in it:
3299 3307 yield val
3300 3308
3301 3309 class addset(abstractsmartset):
3302 3310 """Represent the addition of two sets
3303 3311
3304 3312 Wrapper structure for lazily adding two structures without losing much
3305 3313 performance on the __contains__ method
3306 3314
3307 3315 If the ascending attribute is set, that means the two structures are
3308 3316 ordered in either an ascending or descending way. Therefore, we can add
3309 3317 them maintaining the order by iterating over both at the same time
3310 3318
3311 3319 >>> xs = baseset([0, 3, 2])
3312 3320 >>> ys = baseset([5, 2, 4])
3313 3321
3314 3322 >>> rs = addset(xs, ys)
3315 3323 >>> bool(rs), 0 in rs, 1 in rs, 5 in rs, rs.first(), rs.last()
3316 3324 (True, True, False, True, 0, 4)
3317 3325 >>> rs = addset(xs, baseset([]))
3318 3326 >>> bool(rs), 0 in rs, 1 in rs, rs.first(), rs.last()
3319 3327 (True, True, False, 0, 2)
3320 3328 >>> rs = addset(baseset([]), baseset([]))
3321 3329 >>> bool(rs), 0 in rs, rs.first(), rs.last()
3322 3330 (False, False, None, None)
3323 3331
3324 3332 iterate unsorted:
3325 3333 >>> rs = addset(xs, ys)
3326 3334 >>> [x for x in rs] # without _genlist
3327 3335 [0, 3, 2, 5, 4]
3328 3336 >>> assert not rs._genlist
3329 3337 >>> len(rs)
3330 3338 5
3331 3339 >>> [x for x in rs] # with _genlist
3332 3340 [0, 3, 2, 5, 4]
3333 3341 >>> assert rs._genlist
3334 3342
3335 3343 iterate ascending:
3336 3344 >>> rs = addset(xs, ys, ascending=True)
3337 3345 >>> [x for x in rs], [x for x in rs.fastasc()] # without _asclist
3338 3346 ([0, 2, 3, 4, 5], [0, 2, 3, 4, 5])
3339 3347 >>> assert not rs._asclist
3340 3348 >>> len(rs)
3341 3349 5
3342 3350 >>> [x for x in rs], [x for x in rs.fastasc()]
3343 3351 ([0, 2, 3, 4, 5], [0, 2, 3, 4, 5])
3344 3352 >>> assert rs._asclist
3345 3353
3346 3354 iterate descending:
3347 3355 >>> rs = addset(xs, ys, ascending=False)
3348 3356 >>> [x for x in rs], [x for x in rs.fastdesc()] # without _asclist
3349 3357 ([5, 4, 3, 2, 0], [5, 4, 3, 2, 0])
3350 3358 >>> assert not rs._asclist
3351 3359 >>> len(rs)
3352 3360 5
3353 3361 >>> [x for x in rs], [x for x in rs.fastdesc()]
3354 3362 ([5, 4, 3, 2, 0], [5, 4, 3, 2, 0])
3355 3363 >>> assert rs._asclist
3356 3364
3357 3365 iterate ascending without fastasc:
3358 3366 >>> rs = addset(xs, generatorset(ys), ascending=True)
3359 3367 >>> assert rs.fastasc is None
3360 3368 >>> [x for x in rs]
3361 3369 [0, 2, 3, 4, 5]
3362 3370
3363 3371 iterate descending without fastdesc:
3364 3372 >>> rs = addset(generatorset(xs), ys, ascending=False)
3365 3373 >>> assert rs.fastdesc is None
3366 3374 >>> [x for x in rs]
3367 3375 [5, 4, 3, 2, 0]
3368 3376 """
3369 3377 def __init__(self, revs1, revs2, ascending=None):
3370 3378 self._r1 = revs1
3371 3379 self._r2 = revs2
3372 3380 self._iter = None
3373 3381 self._ascending = ascending
3374 3382 self._genlist = None
3375 3383 self._asclist = None
3376 3384
3377 3385 def __len__(self):
3378 3386 return len(self._list)
3379 3387
3380 3388 def __nonzero__(self):
3381 3389 return bool(self._r1) or bool(self._r2)
3382 3390
3383 3391 @util.propertycache
3384 3392 def _list(self):
3385 3393 if not self._genlist:
3386 3394 self._genlist = baseset(iter(self))
3387 3395 return self._genlist
3388 3396
3389 3397 def __iter__(self):
3390 3398 """Iterate over both collections without repeating elements
3391 3399
3392 3400 If the ascending attribute is not set, iterate over the first one and
3393 3401 then over the second one checking for membership on the first one so we
3394 3402 dont yield any duplicates.
3395 3403
3396 3404 If the ascending attribute is set, iterate over both collections at the
3397 3405 same time, yielding only one value at a time in the given order.
3398 3406 """
3399 3407 if self._ascending is None:
3400 3408 if self._genlist:
3401 3409 return iter(self._genlist)
3402 3410 def arbitraryordergen():
3403 3411 for r in self._r1:
3404 3412 yield r
3405 3413 inr1 = self._r1.__contains__
3406 3414 for r in self._r2:
3407 3415 if not inr1(r):
3408 3416 yield r
3409 3417 return arbitraryordergen()
3410 3418 # try to use our own fast iterator if it exists
3411 3419 self._trysetasclist()
3412 3420 if self._ascending:
3413 3421 attr = 'fastasc'
3414 3422 else:
3415 3423 attr = 'fastdesc'
3416 3424 it = getattr(self, attr)
3417 3425 if it is not None:
3418 3426 return it()
3419 3427 # maybe half of the component supports fast
3420 3428 # get iterator for _r1
3421 3429 iter1 = getattr(self._r1, attr)
3422 3430 if iter1 is None:
3423 3431 # let's avoid side effect (not sure it matters)
3424 3432 iter1 = iter(sorted(self._r1, reverse=not self._ascending))
3425 3433 else:
3426 3434 iter1 = iter1()
3427 3435 # get iterator for _r2
3428 3436 iter2 = getattr(self._r2, attr)
3429 3437 if iter2 is None:
3430 3438 # let's avoid side effect (not sure it matters)
3431 3439 iter2 = iter(sorted(self._r2, reverse=not self._ascending))
3432 3440 else:
3433 3441 iter2 = iter2()
3434 3442 return _iterordered(self._ascending, iter1, iter2)
3435 3443
3436 3444 def _trysetasclist(self):
3437 3445 """populate the _asclist attribute if possible and necessary"""
3438 3446 if self._genlist is not None and self._asclist is None:
3439 3447 self._asclist = sorted(self._genlist)
3440 3448
3441 3449 @property
3442 3450 def fastasc(self):
3443 3451 self._trysetasclist()
3444 3452 if self._asclist is not None:
3445 3453 return self._asclist.__iter__
3446 3454 iter1 = self._r1.fastasc
3447 3455 iter2 = self._r2.fastasc
3448 3456 if None in (iter1, iter2):
3449 3457 return None
3450 3458 return lambda: _iterordered(True, iter1(), iter2())
3451 3459
3452 3460 @property
3453 3461 def fastdesc(self):
3454 3462 self._trysetasclist()
3455 3463 if self._asclist is not None:
3456 3464 return self._asclist.__reversed__
3457 3465 iter1 = self._r1.fastdesc
3458 3466 iter2 = self._r2.fastdesc
3459 3467 if None in (iter1, iter2):
3460 3468 return None
3461 3469 return lambda: _iterordered(False, iter1(), iter2())
3462 3470
3463 3471 def __contains__(self, x):
3464 3472 return x in self._r1 or x in self._r2
3465 3473
3466 3474 def sort(self, reverse=False):
3467 3475 """Sort the added set
3468 3476
3469 3477 For this we use the cached list with all the generated values and if we
3470 3478 know they are ascending or descending we can sort them in a smart way.
3471 3479 """
3472 3480 self._ascending = not reverse
3473 3481
3474 3482 def isascending(self):
3475 3483 return self._ascending is not None and self._ascending
3476 3484
3477 3485 def isdescending(self):
3478 3486 return self._ascending is not None and not self._ascending
3479 3487
3480 3488 def reverse(self):
3481 3489 if self._ascending is None:
3482 3490 self._list.reverse()
3483 3491 else:
3484 3492 self._ascending = not self._ascending
3485 3493
3486 3494 def first(self):
3487 3495 for x in self:
3488 3496 return x
3489 3497 return None
3490 3498
3491 3499 def last(self):
3492 3500 self.reverse()
3493 3501 val = self.first()
3494 3502 self.reverse()
3495 3503 return val
3496 3504
3497 3505 def __repr__(self):
3498 3506 d = {None: '', False: '-', True: '+'}[self._ascending]
3499 3507 return '<%s%s %r, %r>' % (type(self).__name__, d, self._r1, self._r2)
3500 3508
3501 3509 class generatorset(abstractsmartset):
3502 3510 """Wrap a generator for lazy iteration
3503 3511
3504 3512 Wrapper structure for generators that provides lazy membership and can
3505 3513 be iterated more than once.
3506 3514 When asked for membership it generates values until either it finds the
3507 3515 requested one or has gone through all the elements in the generator
3508 3516 """
3509 3517 def __init__(self, gen, iterasc=None):
3510 3518 """
3511 3519 gen: a generator producing the values for the generatorset.
3512 3520 """
3513 3521 self._gen = gen
3514 3522 self._asclist = None
3515 3523 self._cache = {}
3516 3524 self._genlist = []
3517 3525 self._finished = False
3518 3526 self._ascending = True
3519 3527 if iterasc is not None:
3520 3528 if iterasc:
3521 3529 self.fastasc = self._iterator
3522 3530 self.__contains__ = self._asccontains
3523 3531 else:
3524 3532 self.fastdesc = self._iterator
3525 3533 self.__contains__ = self._desccontains
3526 3534
3527 3535 def __nonzero__(self):
3528 3536 # Do not use 'for r in self' because it will enforce the iteration
3529 3537 # order (default ascending), possibly unrolling a whole descending
3530 3538 # iterator.
3531 3539 if self._genlist:
3532 3540 return True
3533 3541 for r in self._consumegen():
3534 3542 return True
3535 3543 return False
3536 3544
3537 3545 def __contains__(self, x):
3538 3546 if x in self._cache:
3539 3547 return self._cache[x]
3540 3548
3541 3549 # Use new values only, as existing values would be cached.
3542 3550 for l in self._consumegen():
3543 3551 if l == x:
3544 3552 return True
3545 3553
3546 3554 self._cache[x] = False
3547 3555 return False
3548 3556
3549 3557 def _asccontains(self, x):
3550 3558 """version of contains optimised for ascending generator"""
3551 3559 if x in self._cache:
3552 3560 return self._cache[x]
3553 3561
3554 3562 # Use new values only, as existing values would be cached.
3555 3563 for l in self._consumegen():
3556 3564 if l == x:
3557 3565 return True
3558 3566 if l > x:
3559 3567 break
3560 3568
3561 3569 self._cache[x] = False
3562 3570 return False
3563 3571
3564 3572 def _desccontains(self, x):
3565 3573 """version of contains optimised for descending generator"""
3566 3574 if x in self._cache:
3567 3575 return self._cache[x]
3568 3576
3569 3577 # Use new values only, as existing values would be cached.
3570 3578 for l in self._consumegen():
3571 3579 if l == x:
3572 3580 return True
3573 3581 if l < x:
3574 3582 break
3575 3583
3576 3584 self._cache[x] = False
3577 3585 return False
3578 3586
3579 3587 def __iter__(self):
3580 3588 if self._ascending:
3581 3589 it = self.fastasc
3582 3590 else:
3583 3591 it = self.fastdesc
3584 3592 if it is not None:
3585 3593 return it()
3586 3594 # we need to consume the iterator
3587 3595 for x in self._consumegen():
3588 3596 pass
3589 3597 # recall the same code
3590 3598 return iter(self)
3591 3599
3592 3600 def _iterator(self):
3593 3601 if self._finished:
3594 3602 return iter(self._genlist)
3595 3603
3596 3604 # We have to use this complex iteration strategy to allow multiple
3597 3605 # iterations at the same time. We need to be able to catch revision
3598 3606 # removed from _consumegen and added to genlist in another instance.
3599 3607 #
3600 3608 # Getting rid of it would provide an about 15% speed up on this
3601 3609 # iteration.
3602 3610 genlist = self._genlist
3603 3611 nextrev = self._consumegen().next
3604 3612 _len = len # cache global lookup
3605 3613 def gen():
3606 3614 i = 0
3607 3615 while True:
3608 3616 if i < _len(genlist):
3609 3617 yield genlist[i]
3610 3618 else:
3611 3619 yield nextrev()
3612 3620 i += 1
3613 3621 return gen()
3614 3622
3615 3623 def _consumegen(self):
3616 3624 cache = self._cache
3617 3625 genlist = self._genlist.append
3618 3626 for item in self._gen:
3619 3627 cache[item] = True
3620 3628 genlist(item)
3621 3629 yield item
3622 3630 if not self._finished:
3623 3631 self._finished = True
3624 3632 asc = self._genlist[:]
3625 3633 asc.sort()
3626 3634 self._asclist = asc
3627 3635 self.fastasc = asc.__iter__
3628 3636 self.fastdesc = asc.__reversed__
3629 3637
3630 3638 def __len__(self):
3631 3639 for x in self._consumegen():
3632 3640 pass
3633 3641 return len(self._genlist)
3634 3642
3635 3643 def sort(self, reverse=False):
3636 3644 self._ascending = not reverse
3637 3645
3638 3646 def reverse(self):
3639 3647 self._ascending = not self._ascending
3640 3648
3641 3649 def isascending(self):
3642 3650 return self._ascending
3643 3651
3644 3652 def isdescending(self):
3645 3653 return not self._ascending
3646 3654
3647 3655 def first(self):
3648 3656 if self._ascending:
3649 3657 it = self.fastasc
3650 3658 else:
3651 3659 it = self.fastdesc
3652 3660 if it is None:
3653 3661 # we need to consume all and try again
3654 3662 for x in self._consumegen():
3655 3663 pass
3656 3664 return self.first()
3657 3665 return next(it(), None)
3658 3666
3659 3667 def last(self):
3660 3668 if self._ascending:
3661 3669 it = self.fastdesc
3662 3670 else:
3663 3671 it = self.fastasc
3664 3672 if it is None:
3665 3673 # we need to consume all and try again
3666 3674 for x in self._consumegen():
3667 3675 pass
3668 3676 return self.first()
3669 3677 return next(it(), None)
3670 3678
3671 3679 def __repr__(self):
3672 3680 d = {False: '-', True: '+'}[self._ascending]
3673 3681 return '<%s%s>' % (type(self).__name__, d)
3674 3682
3675 3683 class spanset(abstractsmartset):
3676 3684 """Duck type for baseset class which represents a range of revisions and
3677 3685 can work lazily and without having all the range in memory
3678 3686
3679 3687 Note that spanset(x, y) behave almost like xrange(x, y) except for two
3680 3688 notable points:
3681 3689 - when x < y it will be automatically descending,
3682 3690 - revision filtered with this repoview will be skipped.
3683 3691
3684 3692 """
3685 3693 def __init__(self, repo, start=0, end=None):
3686 3694 """
3687 3695 start: first revision included the set
3688 3696 (default to 0)
3689 3697 end: first revision excluded (last+1)
3690 3698 (default to len(repo)
3691 3699
3692 3700 Spanset will be descending if `end` < `start`.
3693 3701 """
3694 3702 if end is None:
3695 3703 end = len(repo)
3696 3704 self._ascending = start <= end
3697 3705 if not self._ascending:
3698 3706 start, end = end + 1, start +1
3699 3707 self._start = start
3700 3708 self._end = end
3701 3709 self._hiddenrevs = repo.changelog.filteredrevs
3702 3710
3703 3711 def sort(self, reverse=False):
3704 3712 self._ascending = not reverse
3705 3713
3706 3714 def reverse(self):
3707 3715 self._ascending = not self._ascending
3708 3716
3709 3717 def _iterfilter(self, iterrange):
3710 3718 s = self._hiddenrevs
3711 3719 for r in iterrange:
3712 3720 if r not in s:
3713 3721 yield r
3714 3722
3715 3723 def __iter__(self):
3716 3724 if self._ascending:
3717 3725 return self.fastasc()
3718 3726 else:
3719 3727 return self.fastdesc()
3720 3728
3721 3729 def fastasc(self):
3722 3730 iterrange = xrange(self._start, self._end)
3723 3731 if self._hiddenrevs:
3724 3732 return self._iterfilter(iterrange)
3725 3733 return iter(iterrange)
3726 3734
3727 3735 def fastdesc(self):
3728 3736 iterrange = xrange(self._end - 1, self._start - 1, -1)
3729 3737 if self._hiddenrevs:
3730 3738 return self._iterfilter(iterrange)
3731 3739 return iter(iterrange)
3732 3740
3733 3741 def __contains__(self, rev):
3734 3742 hidden = self._hiddenrevs
3735 3743 return ((self._start <= rev < self._end)
3736 3744 and not (hidden and rev in hidden))
3737 3745
3738 3746 def __nonzero__(self):
3739 3747 for r in self:
3740 3748 return True
3741 3749 return False
3742 3750
3743 3751 def __len__(self):
3744 3752 if not self._hiddenrevs:
3745 3753 return abs(self._end - self._start)
3746 3754 else:
3747 3755 count = 0
3748 3756 start = self._start
3749 3757 end = self._end
3750 3758 for rev in self._hiddenrevs:
3751 3759 if (end < rev <= start) or (start <= rev < end):
3752 3760 count += 1
3753 3761 return abs(self._end - self._start) - count
3754 3762
3755 3763 def isascending(self):
3756 3764 return self._ascending
3757 3765
3758 3766 def isdescending(self):
3759 3767 return not self._ascending
3760 3768
3761 3769 def first(self):
3762 3770 if self._ascending:
3763 3771 it = self.fastasc
3764 3772 else:
3765 3773 it = self.fastdesc
3766 3774 for x in it():
3767 3775 return x
3768 3776 return None
3769 3777
3770 3778 def last(self):
3771 3779 if self._ascending:
3772 3780 it = self.fastdesc
3773 3781 else:
3774 3782 it = self.fastasc
3775 3783 for x in it():
3776 3784 return x
3777 3785 return None
3778 3786
3779 3787 def __repr__(self):
3780 3788 d = {False: '-', True: '+'}[self._ascending]
3781 3789 return '<%s%s %d:%d>' % (type(self).__name__, d,
3782 3790 self._start, self._end - 1)
3783 3791
3784 3792 class fullreposet(spanset):
3785 3793 """a set containing all revisions in the repo
3786 3794
3787 3795 This class exists to host special optimization and magic to handle virtual
3788 3796 revisions such as "null".
3789 3797 """
3790 3798
3791 3799 def __init__(self, repo):
3792 3800 super(fullreposet, self).__init__(repo)
3793 3801
3794 3802 def __and__(self, other):
3795 3803 """As self contains the whole repo, all of the other set should also be
3796 3804 in self. Therefore `self & other = other`.
3797 3805
3798 3806 This boldly assumes the other contains valid revs only.
3799 3807 """
3800 3808 # other not a smartset, make is so
3801 3809 if not util.safehasattr(other, 'isascending'):
3802 3810 # filter out hidden revision
3803 3811 # (this boldly assumes all smartset are pure)
3804 3812 #
3805 3813 # `other` was used with "&", let's assume this is a set like
3806 3814 # object.
3807 3815 other = baseset(other - self._hiddenrevs)
3808 3816
3809 3817 # XXX As fullreposet is also used as bootstrap, this is wrong.
3810 3818 #
3811 3819 # With a giveme312() revset returning [3,1,2], this makes
3812 3820 # 'hg log -r "giveme312()"' -> 1, 2, 3 (wrong)
3813 3821 # We cannot just drop it because other usage still need to sort it:
3814 3822 # 'hg log -r "all() and giveme312()"' -> 1, 2, 3 (right)
3815 3823 #
3816 3824 # There is also some faulty revset implementations that rely on it
3817 3825 # (eg: children as of its state in e8075329c5fb)
3818 3826 #
3819 3827 # When we fix the two points above we can move this into the if clause
3820 3828 other.sort(reverse=self.isdescending())
3821 3829 return other
3822 3830
3823 3831 def prettyformatset(revs):
3824 3832 lines = []
3825 3833 rs = repr(revs)
3826 3834 p = 0
3827 3835 while p < len(rs):
3828 3836 q = rs.find('<', p + 1)
3829 3837 if q < 0:
3830 3838 q = len(rs)
3831 3839 l = rs.count('<', 0, p) - rs.count('>', 0, p)
3832 3840 assert l >= 0
3833 3841 lines.append((l, rs[p:q].rstrip()))
3834 3842 p = q
3835 3843 return '\n'.join(' ' * l + s for l, s in lines)
3836 3844
3837 3845 # tell hggettext to extract docstrings from these functions:
3838 3846 i18nfunctions = symbols.values()
General Comments 0
You need to be logged in to leave comments. Login now