##// END OF EJS Templates
reachableroots: default to the C implementation...
Laurent Charignon -
r26006:1ffd97cb default
parent child Browse files
Show More
@@ -1,3707 +1,3719
1 1 # revset.py - revision set queries for mercurial
2 2 #
3 3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import heapq
11 11 import re
12 12
13 13 from .i18n import _
14 14 from . import (
15 15 encoding,
16 16 error,
17 17 hbisect,
18 18 match as matchmod,
19 19 node,
20 20 obsolete as obsmod,
21 21 parser,
22 22 pathutil,
23 23 phases,
24 24 repoview,
25 25 util,
26 26 )
27 27
28 28 def _revancestors(repo, revs, followfirst):
29 29 """Like revlog.ancestors(), but supports followfirst."""
30 30 if followfirst:
31 31 cut = 1
32 32 else:
33 33 cut = None
34 34 cl = repo.changelog
35 35
36 36 def iterate():
37 37 revs.sort(reverse=True)
38 38 irevs = iter(revs)
39 39 h = []
40 40
41 41 inputrev = next(irevs, None)
42 42 if inputrev is not None:
43 43 heapq.heappush(h, -inputrev)
44 44
45 45 seen = set()
46 46 while h:
47 47 current = -heapq.heappop(h)
48 48 if current == inputrev:
49 49 inputrev = next(irevs, None)
50 50 if inputrev is not None:
51 51 heapq.heappush(h, -inputrev)
52 52 if current not in seen:
53 53 seen.add(current)
54 54 yield current
55 55 for parent in cl.parentrevs(current)[:cut]:
56 56 if parent != node.nullrev:
57 57 heapq.heappush(h, -parent)
58 58
59 59 return generatorset(iterate(), iterasc=False)
60 60
61 61 def _revdescendants(repo, revs, followfirst):
62 62 """Like revlog.descendants() but supports followfirst."""
63 63 if followfirst:
64 64 cut = 1
65 65 else:
66 66 cut = None
67 67
68 68 def iterate():
69 69 cl = repo.changelog
70 70 # XXX this should be 'parentset.min()' assuming 'parentset' is a
71 71 # smartset (and if it is not, it should.)
72 72 first = min(revs)
73 73 nullrev = node.nullrev
74 74 if first == nullrev:
75 75 # Are there nodes with a null first parent and a non-null
76 76 # second one? Maybe. Do we care? Probably not.
77 77 for i in cl:
78 78 yield i
79 79 else:
80 80 seen = set(revs)
81 81 for i in cl.revs(first + 1):
82 82 for x in cl.parentrevs(i)[:cut]:
83 83 if x != nullrev and x in seen:
84 84 seen.add(i)
85 85 yield i
86 86 break
87 87
88 88 return generatorset(iterate(), iterasc=True)
89 89
90 def reachableroots(repo, roots, heads, includepath=False):
90 def reachablerootspure(repo, minroot, roots, heads, includepath):
91 91 """return (heads(::<roots> and ::<heads>))
92 92
93 93 If includepath is True, return (<roots>::<heads>)."""
94 94 if not roots:
95 95 return baseset()
96 96 parentrevs = repo.changelog.parentrevs
97 97 visit = list(heads)
98 98 reachable = set()
99 99 seen = {}
100 # XXX this should be 'parentset.min()' assuming 'parentset' is a smartset
101 # (and if it is not, it should.)
102 minroot = min(roots)
103 roots = set(roots)
104 100 # prefetch all the things! (because python is slow)
105 101 reached = reachable.add
106 102 dovisit = visit.append
107 103 nextvisit = visit.pop
108 104 # open-code the post-order traversal due to the tiny size of
109 105 # sys.getrecursionlimit()
110 106 while visit:
111 107 rev = nextvisit()
112 108 if rev in roots:
113 109 reached(rev)
114 110 if not includepath:
115 111 continue
116 112 parents = parentrevs(rev)
117 113 seen[rev] = parents
118 114 for parent in parents:
119 115 if parent >= minroot and parent not in seen:
120 116 dovisit(parent)
121 117 if not reachable:
122 118 return baseset()
123 119 if not includepath:
124 120 return reachable
125 121 for rev in sorted(seen):
126 122 for parent in seen[rev]:
127 123 if parent in reachable:
128 124 reached(rev)
129 125 return baseset(sorted(reachable))
130 126
127 def reachableroots(repo, roots, heads, includepath=False):
128 """return (heads(::<roots> and ::<heads>))
129
130 If includepath is True, return (<roots>::<heads>)."""
131 if not roots:
132 return baseset()
133 # XXX this should be 'parentset.min()' assuming 'parentset' is a smartset
134 # (and if it is not, it should.)
135 minroot = min(roots)
136 roots = set(roots)
137 heads = list(heads)
138 try:
139 return repo.changelog.reachableroots(minroot, heads, roots, includepath)
140 except AttributeError:
141 return reachablerootspure(repo, minroot, roots, heads, includepath)
142
131 143 elements = {
132 144 # token-type: binding-strength, primary, prefix, infix, suffix
133 145 "(": (21, None, ("group", 1, ")"), ("func", 1, ")"), None),
134 146 "##": (20, None, None, ("_concat", 20), None),
135 147 "~": (18, None, None, ("ancestor", 18), None),
136 148 "^": (18, None, None, ("parent", 18), ("parentpost", 18)),
137 149 "-": (5, None, ("negate", 19), ("minus", 5), None),
138 150 "::": (17, None, ("dagrangepre", 17), ("dagrange", 17),
139 151 ("dagrangepost", 17)),
140 152 "..": (17, None, ("dagrangepre", 17), ("dagrange", 17),
141 153 ("dagrangepost", 17)),
142 154 ":": (15, "rangeall", ("rangepre", 15), ("range", 15), ("rangepost", 15)),
143 155 "not": (10, None, ("not", 10), None, None),
144 156 "!": (10, None, ("not", 10), None, None),
145 157 "and": (5, None, None, ("and", 5), None),
146 158 "&": (5, None, None, ("and", 5), None),
147 159 "%": (5, None, None, ("only", 5), ("onlypost", 5)),
148 160 "or": (4, None, None, ("or", 4), None),
149 161 "|": (4, None, None, ("or", 4), None),
150 162 "+": (4, None, None, ("or", 4), None),
151 163 "=": (3, None, None, ("keyvalue", 3), None),
152 164 ",": (2, None, None, ("list", 2), None),
153 165 ")": (0, None, None, None, None),
154 166 "symbol": (0, "symbol", None, None, None),
155 167 "string": (0, "string", None, None, None),
156 168 "end": (0, None, None, None, None),
157 169 }
158 170
159 171 keywords = set(['and', 'or', 'not'])
160 172
161 173 # default set of valid characters for the initial letter of symbols
162 174 _syminitletters = set(c for c in [chr(i) for i in xrange(256)]
163 175 if c.isalnum() or c in '._@' or ord(c) > 127)
164 176
165 177 # default set of valid characters for non-initial letters of symbols
166 178 _symletters = set(c for c in [chr(i) for i in xrange(256)]
167 179 if c.isalnum() or c in '-._/@' or ord(c) > 127)
168 180
169 181 def tokenize(program, lookup=None, syminitletters=None, symletters=None):
170 182 '''
171 183 Parse a revset statement into a stream of tokens
172 184
173 185 ``syminitletters`` is the set of valid characters for the initial
174 186 letter of symbols.
175 187
176 188 By default, character ``c`` is recognized as valid for initial
177 189 letter of symbols, if ``c.isalnum() or c in '._@' or ord(c) > 127``.
178 190
179 191 ``symletters`` is the set of valid characters for non-initial
180 192 letters of symbols.
181 193
182 194 By default, character ``c`` is recognized as valid for non-initial
183 195 letters of symbols, if ``c.isalnum() or c in '-._/@' or ord(c) > 127``.
184 196
185 197 Check that @ is a valid unquoted token character (issue3686):
186 198 >>> list(tokenize("@::"))
187 199 [('symbol', '@', 0), ('::', None, 1), ('end', None, 3)]
188 200
189 201 '''
190 202 if syminitletters is None:
191 203 syminitletters = _syminitletters
192 204 if symletters is None:
193 205 symletters = _symletters
194 206
195 207 if program and lookup:
196 208 # attempt to parse old-style ranges first to deal with
197 209 # things like old-tag which contain query metacharacters
198 210 parts = program.split(':', 1)
199 211 if all(lookup(sym) for sym in parts if sym):
200 212 if parts[0]:
201 213 yield ('symbol', parts[0], 0)
202 214 if len(parts) > 1:
203 215 s = len(parts[0])
204 216 yield (':', None, s)
205 217 if parts[1]:
206 218 yield ('symbol', parts[1], s + 1)
207 219 yield ('end', None, len(program))
208 220 return
209 221
210 222 pos, l = 0, len(program)
211 223 while pos < l:
212 224 c = program[pos]
213 225 if c.isspace(): # skip inter-token whitespace
214 226 pass
215 227 elif c == ':' and program[pos:pos + 2] == '::': # look ahead carefully
216 228 yield ('::', None, pos)
217 229 pos += 1 # skip ahead
218 230 elif c == '.' and program[pos:pos + 2] == '..': # look ahead carefully
219 231 yield ('..', None, pos)
220 232 pos += 1 # skip ahead
221 233 elif c == '#' and program[pos:pos + 2] == '##': # look ahead carefully
222 234 yield ('##', None, pos)
223 235 pos += 1 # skip ahead
224 236 elif c in "():=,-|&+!~^%": # handle simple operators
225 237 yield (c, None, pos)
226 238 elif (c in '"\'' or c == 'r' and
227 239 program[pos:pos + 2] in ("r'", 'r"')): # handle quoted strings
228 240 if c == 'r':
229 241 pos += 1
230 242 c = program[pos]
231 243 decode = lambda x: x
232 244 else:
233 245 decode = lambda x: x.decode('string-escape')
234 246 pos += 1
235 247 s = pos
236 248 while pos < l: # find closing quote
237 249 d = program[pos]
238 250 if d == '\\': # skip over escaped characters
239 251 pos += 2
240 252 continue
241 253 if d == c:
242 254 yield ('string', decode(program[s:pos]), s)
243 255 break
244 256 pos += 1
245 257 else:
246 258 raise error.ParseError(_("unterminated string"), s)
247 259 # gather up a symbol/keyword
248 260 elif c in syminitletters:
249 261 s = pos
250 262 pos += 1
251 263 while pos < l: # find end of symbol
252 264 d = program[pos]
253 265 if d not in symletters:
254 266 break
255 267 if d == '.' and program[pos - 1] == '.': # special case for ..
256 268 pos -= 1
257 269 break
258 270 pos += 1
259 271 sym = program[s:pos]
260 272 if sym in keywords: # operator keywords
261 273 yield (sym, None, s)
262 274 elif '-' in sym:
263 275 # some jerk gave us foo-bar-baz, try to check if it's a symbol
264 276 if lookup and lookup(sym):
265 277 # looks like a real symbol
266 278 yield ('symbol', sym, s)
267 279 else:
268 280 # looks like an expression
269 281 parts = sym.split('-')
270 282 for p in parts[:-1]:
271 283 if p: # possible consecutive -
272 284 yield ('symbol', p, s)
273 285 s += len(p)
274 286 yield ('-', None, pos)
275 287 s += 1
276 288 if parts[-1]: # possible trailing -
277 289 yield ('symbol', parts[-1], s)
278 290 else:
279 291 yield ('symbol', sym, s)
280 292 pos -= 1
281 293 else:
282 294 raise error.ParseError(_("syntax error in revset '%s'") %
283 295 program, pos)
284 296 pos += 1
285 297 yield ('end', None, pos)
286 298
287 299 def parseerrordetail(inst):
288 300 """Compose error message from specified ParseError object
289 301 """
290 302 if len(inst.args) > 1:
291 303 return _('at %s: %s') % (inst.args[1], inst.args[0])
292 304 else:
293 305 return inst.args[0]
294 306
295 307 # helpers
296 308
297 309 def getstring(x, err):
298 310 if x and (x[0] == 'string' or x[0] == 'symbol'):
299 311 return x[1]
300 312 raise error.ParseError(err)
301 313
302 314 def getlist(x):
303 315 if not x:
304 316 return []
305 317 if x[0] == 'list':
306 318 return getlist(x[1]) + [x[2]]
307 319 return [x]
308 320
309 321 def getargs(x, min, max, err):
310 322 l = getlist(x)
311 323 if len(l) < min or (max >= 0 and len(l) > max):
312 324 raise error.ParseError(err)
313 325 return l
314 326
315 327 def getargsdict(x, funcname, keys):
316 328 return parser.buildargsdict(getlist(x), funcname, keys.split(),
317 329 keyvaluenode='keyvalue', keynode='symbol')
318 330
319 331 def isvalidsymbol(tree):
320 332 """Examine whether specified ``tree`` is valid ``symbol`` or not
321 333 """
322 334 return tree[0] == 'symbol' and len(tree) > 1
323 335
324 336 def getsymbol(tree):
325 337 """Get symbol name from valid ``symbol`` in ``tree``
326 338
327 339 This assumes that ``tree`` is already examined by ``isvalidsymbol``.
328 340 """
329 341 return tree[1]
330 342
331 343 def isvalidfunc(tree):
332 344 """Examine whether specified ``tree`` is valid ``func`` or not
333 345 """
334 346 return tree[0] == 'func' and len(tree) > 1 and isvalidsymbol(tree[1])
335 347
336 348 def getfuncname(tree):
337 349 """Get function name from valid ``func`` in ``tree``
338 350
339 351 This assumes that ``tree`` is already examined by ``isvalidfunc``.
340 352 """
341 353 return getsymbol(tree[1])
342 354
343 355 def getfuncargs(tree):
344 356 """Get list of function arguments from valid ``func`` in ``tree``
345 357
346 358 This assumes that ``tree`` is already examined by ``isvalidfunc``.
347 359 """
348 360 if len(tree) > 2:
349 361 return getlist(tree[2])
350 362 else:
351 363 return []
352 364
353 365 def getset(repo, subset, x):
354 366 if not x:
355 367 raise error.ParseError(_("missing argument"))
356 368 s = methods[x[0]](repo, subset, *x[1:])
357 369 if util.safehasattr(s, 'isascending'):
358 370 return s
359 371 if (repo.ui.configbool('devel', 'all-warnings')
360 372 or repo.ui.configbool('devel', 'old-revset')):
361 373 # else case should not happen, because all non-func are internal,
362 374 # ignoring for now.
363 375 if x[0] == 'func' and x[1][0] == 'symbol' and x[1][1] in symbols:
364 376 repo.ui.develwarn('revset "%s" use list instead of smartset, '
365 377 '(upgrade your code)' % x[1][1])
366 378 return baseset(s)
367 379
368 380 def _getrevsource(repo, r):
369 381 extra = repo[r].extra()
370 382 for label in ('source', 'transplant_source', 'rebase_source'):
371 383 if label in extra:
372 384 try:
373 385 return repo[extra[label]].rev()
374 386 except error.RepoLookupError:
375 387 pass
376 388 return None
377 389
378 390 # operator methods
379 391
380 392 def stringset(repo, subset, x):
381 393 x = repo[x].rev()
382 394 if (x in subset
383 395 or x == node.nullrev and isinstance(subset, fullreposet)):
384 396 return baseset([x])
385 397 return baseset()
386 398
387 399 def rangeset(repo, subset, x, y):
388 400 m = getset(repo, fullreposet(repo), x)
389 401 n = getset(repo, fullreposet(repo), y)
390 402
391 403 if not m or not n:
392 404 return baseset()
393 405 m, n = m.first(), n.last()
394 406
395 407 if m == n:
396 408 r = baseset([m])
397 409 elif n == node.wdirrev:
398 410 r = spanset(repo, m, len(repo)) + baseset([n])
399 411 elif m == node.wdirrev:
400 412 r = baseset([m]) + spanset(repo, len(repo) - 1, n - 1)
401 413 elif m < n:
402 414 r = spanset(repo, m, n + 1)
403 415 else:
404 416 r = spanset(repo, m, n - 1)
405 417 # XXX We should combine with subset first: 'subset & baseset(...)'. This is
406 418 # necessary to ensure we preserve the order in subset.
407 419 #
408 420 # This has performance implication, carrying the sorting over when possible
409 421 # would be more efficient.
410 422 return r & subset
411 423
412 424 def dagrange(repo, subset, x, y):
413 425 r = fullreposet(repo)
414 426 xs = reachableroots(repo, getset(repo, r, x), getset(repo, r, y),
415 427 includepath=True)
416 428 # XXX We should combine with subset first: 'subset & baseset(...)'. This is
417 429 # necessary to ensure we preserve the order in subset.
418 430 return xs & subset
419 431
420 432 def andset(repo, subset, x, y):
421 433 return getset(repo, getset(repo, subset, x), y)
422 434
423 435 def orset(repo, subset, *xs):
424 436 assert xs
425 437 if len(xs) == 1:
426 438 return getset(repo, subset, xs[0])
427 439 p = len(xs) // 2
428 440 a = orset(repo, subset, *xs[:p])
429 441 b = orset(repo, subset, *xs[p:])
430 442 return a + b
431 443
432 444 def notset(repo, subset, x):
433 445 return subset - getset(repo, subset, x)
434 446
435 447 def listset(repo, subset, a, b):
436 448 raise error.ParseError(_("can't use a list in this context"))
437 449
438 450 def keyvaluepair(repo, subset, k, v):
439 451 raise error.ParseError(_("can't use a key-value pair in this context"))
440 452
441 453 def func(repo, subset, a, b):
442 454 if a[0] == 'symbol' and a[1] in symbols:
443 455 return symbols[a[1]](repo, subset, b)
444 456
445 457 keep = lambda fn: getattr(fn, '__doc__', None) is not None
446 458
447 459 syms = [s for (s, fn) in symbols.items() if keep(fn)]
448 460 raise error.UnknownIdentifier(a[1], syms)
449 461
450 462 # functions
451 463
452 464 def adds(repo, subset, x):
453 465 """``adds(pattern)``
454 466 Changesets that add a file matching pattern.
455 467
456 468 The pattern without explicit kind like ``glob:`` is expected to be
457 469 relative to the current directory and match against a file or a
458 470 directory.
459 471 """
460 472 # i18n: "adds" is a keyword
461 473 pat = getstring(x, _("adds requires a pattern"))
462 474 return checkstatus(repo, subset, pat, 1)
463 475
464 476 def ancestor(repo, subset, x):
465 477 """``ancestor(*changeset)``
466 478 A greatest common ancestor of the changesets.
467 479
468 480 Accepts 0 or more changesets.
469 481 Will return empty list when passed no args.
470 482 Greatest common ancestor of a single changeset is that changeset.
471 483 """
472 484 # i18n: "ancestor" is a keyword
473 485 l = getlist(x)
474 486 rl = fullreposet(repo)
475 487 anc = None
476 488
477 489 # (getset(repo, rl, i) for i in l) generates a list of lists
478 490 for revs in (getset(repo, rl, i) for i in l):
479 491 for r in revs:
480 492 if anc is None:
481 493 anc = repo[r]
482 494 else:
483 495 anc = anc.ancestor(repo[r])
484 496
485 497 if anc is not None and anc.rev() in subset:
486 498 return baseset([anc.rev()])
487 499 return baseset()
488 500
489 501 def _ancestors(repo, subset, x, followfirst=False):
490 502 heads = getset(repo, fullreposet(repo), x)
491 503 if not heads:
492 504 return baseset()
493 505 s = _revancestors(repo, heads, followfirst)
494 506 return subset & s
495 507
496 508 def ancestors(repo, subset, x):
497 509 """``ancestors(set)``
498 510 Changesets that are ancestors of a changeset in set.
499 511 """
500 512 return _ancestors(repo, subset, x)
501 513
502 514 def _firstancestors(repo, subset, x):
503 515 # ``_firstancestors(set)``
504 516 # Like ``ancestors(set)`` but follows only the first parents.
505 517 return _ancestors(repo, subset, x, followfirst=True)
506 518
507 519 def ancestorspec(repo, subset, x, n):
508 520 """``set~n``
509 521 Changesets that are the Nth ancestor (first parents only) of a changeset
510 522 in set.
511 523 """
512 524 try:
513 525 n = int(n[1])
514 526 except (TypeError, ValueError):
515 527 raise error.ParseError(_("~ expects a number"))
516 528 ps = set()
517 529 cl = repo.changelog
518 530 for r in getset(repo, fullreposet(repo), x):
519 531 for i in range(n):
520 532 r = cl.parentrevs(r)[0]
521 533 ps.add(r)
522 534 return subset & ps
523 535
524 536 def author(repo, subset, x):
525 537 """``author(string)``
526 538 Alias for ``user(string)``.
527 539 """
528 540 # i18n: "author" is a keyword
529 541 n = encoding.lower(getstring(x, _("author requires a string")))
530 542 kind, pattern, matcher = _substringmatcher(n)
531 543 return subset.filter(lambda x: matcher(encoding.lower(repo[x].user())))
532 544
533 545 def bisect(repo, subset, x):
534 546 """``bisect(string)``
535 547 Changesets marked in the specified bisect status:
536 548
537 549 - ``good``, ``bad``, ``skip``: csets explicitly marked as good/bad/skip
538 550 - ``goods``, ``bads`` : csets topologically good/bad
539 551 - ``range`` : csets taking part in the bisection
540 552 - ``pruned`` : csets that are goods, bads or skipped
541 553 - ``untested`` : csets whose fate is yet unknown
542 554 - ``ignored`` : csets ignored due to DAG topology
543 555 - ``current`` : the cset currently being bisected
544 556 """
545 557 # i18n: "bisect" is a keyword
546 558 status = getstring(x, _("bisect requires a string")).lower()
547 559 state = set(hbisect.get(repo, status))
548 560 return subset & state
549 561
550 562 # Backward-compatibility
551 563 # - no help entry so that we do not advertise it any more
552 564 def bisected(repo, subset, x):
553 565 return bisect(repo, subset, x)
554 566
555 567 def bookmark(repo, subset, x):
556 568 """``bookmark([name])``
557 569 The named bookmark or all bookmarks.
558 570
559 571 If `name` starts with `re:`, the remainder of the name is treated as
560 572 a regular expression. To match a bookmark that actually starts with `re:`,
561 573 use the prefix `literal:`.
562 574 """
563 575 # i18n: "bookmark" is a keyword
564 576 args = getargs(x, 0, 1, _('bookmark takes one or no arguments'))
565 577 if args:
566 578 bm = getstring(args[0],
567 579 # i18n: "bookmark" is a keyword
568 580 _('the argument to bookmark must be a string'))
569 581 kind, pattern, matcher = _stringmatcher(bm)
570 582 bms = set()
571 583 if kind == 'literal':
572 584 bmrev = repo._bookmarks.get(pattern, None)
573 585 if not bmrev:
574 586 raise error.RepoLookupError(_("bookmark '%s' does not exist")
575 587 % bm)
576 588 bms.add(repo[bmrev].rev())
577 589 else:
578 590 matchrevs = set()
579 591 for name, bmrev in repo._bookmarks.iteritems():
580 592 if matcher(name):
581 593 matchrevs.add(bmrev)
582 594 if not matchrevs:
583 595 raise error.RepoLookupError(_("no bookmarks exist"
584 596 " that match '%s'") % pattern)
585 597 for bmrev in matchrevs:
586 598 bms.add(repo[bmrev].rev())
587 599 else:
588 600 bms = set([repo[r].rev()
589 601 for r in repo._bookmarks.values()])
590 602 bms -= set([node.nullrev])
591 603 return subset & bms
592 604
593 605 def branch(repo, subset, x):
594 606 """``branch(string or set)``
595 607 All changesets belonging to the given branch or the branches of the given
596 608 changesets.
597 609
598 610 If `string` starts with `re:`, the remainder of the name is treated as
599 611 a regular expression. To match a branch that actually starts with `re:`,
600 612 use the prefix `literal:`.
601 613 """
602 614 getbi = repo.revbranchcache().branchinfo
603 615
604 616 try:
605 617 b = getstring(x, '')
606 618 except error.ParseError:
607 619 # not a string, but another revspec, e.g. tip()
608 620 pass
609 621 else:
610 622 kind, pattern, matcher = _stringmatcher(b)
611 623 if kind == 'literal':
612 624 # note: falls through to the revspec case if no branch with
613 625 # this name exists
614 626 if pattern in repo.branchmap():
615 627 return subset.filter(lambda r: matcher(getbi(r)[0]))
616 628 else:
617 629 return subset.filter(lambda r: matcher(getbi(r)[0]))
618 630
619 631 s = getset(repo, fullreposet(repo), x)
620 632 b = set()
621 633 for r in s:
622 634 b.add(getbi(r)[0])
623 635 c = s.__contains__
624 636 return subset.filter(lambda r: c(r) or getbi(r)[0] in b)
625 637
626 638 def bumped(repo, subset, x):
627 639 """``bumped()``
628 640 Mutable changesets marked as successors of public changesets.
629 641
630 642 Only non-public and non-obsolete changesets can be `bumped`.
631 643 """
632 644 # i18n: "bumped" is a keyword
633 645 getargs(x, 0, 0, _("bumped takes no arguments"))
634 646 bumped = obsmod.getrevs(repo, 'bumped')
635 647 return subset & bumped
636 648
637 649 def bundle(repo, subset, x):
638 650 """``bundle()``
639 651 Changesets in the bundle.
640 652
641 653 Bundle must be specified by the -R option."""
642 654
643 655 try:
644 656 bundlerevs = repo.changelog.bundlerevs
645 657 except AttributeError:
646 658 raise util.Abort(_("no bundle provided - specify with -R"))
647 659 return subset & bundlerevs
648 660
649 661 def checkstatus(repo, subset, pat, field):
650 662 hasset = matchmod.patkind(pat) == 'set'
651 663
652 664 mcache = [None]
653 665 def matches(x):
654 666 c = repo[x]
655 667 if not mcache[0] or hasset:
656 668 mcache[0] = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
657 669 m = mcache[0]
658 670 fname = None
659 671 if not m.anypats() and len(m.files()) == 1:
660 672 fname = m.files()[0]
661 673 if fname is not None:
662 674 if fname not in c.files():
663 675 return False
664 676 else:
665 677 for f in c.files():
666 678 if m(f):
667 679 break
668 680 else:
669 681 return False
670 682 files = repo.status(c.p1().node(), c.node())[field]
671 683 if fname is not None:
672 684 if fname in files:
673 685 return True
674 686 else:
675 687 for f in files:
676 688 if m(f):
677 689 return True
678 690
679 691 return subset.filter(matches)
680 692
681 693 def _children(repo, narrow, parentset):
682 694 if not parentset:
683 695 return baseset()
684 696 cs = set()
685 697 pr = repo.changelog.parentrevs
686 698 minrev = parentset.min()
687 699 for r in narrow:
688 700 if r <= minrev:
689 701 continue
690 702 for p in pr(r):
691 703 if p in parentset:
692 704 cs.add(r)
693 705 # XXX using a set to feed the baseset is wrong. Sets are not ordered.
694 706 # This does not break because of other fullreposet misbehavior.
695 707 return baseset(cs)
696 708
697 709 def children(repo, subset, x):
698 710 """``children(set)``
699 711 Child changesets of changesets in set.
700 712 """
701 713 s = getset(repo, fullreposet(repo), x)
702 714 cs = _children(repo, subset, s)
703 715 return subset & cs
704 716
705 717 def closed(repo, subset, x):
706 718 """``closed()``
707 719 Changeset is closed.
708 720 """
709 721 # i18n: "closed" is a keyword
710 722 getargs(x, 0, 0, _("closed takes no arguments"))
711 723 return subset.filter(lambda r: repo[r].closesbranch())
712 724
713 725 def contains(repo, subset, x):
714 726 """``contains(pattern)``
715 727 The revision's manifest contains a file matching pattern (but might not
716 728 modify it). See :hg:`help patterns` for information about file patterns.
717 729
718 730 The pattern without explicit kind like ``glob:`` is expected to be
719 731 relative to the current directory and match against a file exactly
720 732 for efficiency.
721 733 """
722 734 # i18n: "contains" is a keyword
723 735 pat = getstring(x, _("contains requires a pattern"))
724 736
725 737 def matches(x):
726 738 if not matchmod.patkind(pat):
727 739 pats = pathutil.canonpath(repo.root, repo.getcwd(), pat)
728 740 if pats in repo[x]:
729 741 return True
730 742 else:
731 743 c = repo[x]
732 744 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
733 745 for f in c.manifest():
734 746 if m(f):
735 747 return True
736 748 return False
737 749
738 750 return subset.filter(matches)
739 751
740 752 def converted(repo, subset, x):
741 753 """``converted([id])``
742 754 Changesets converted from the given identifier in the old repository if
743 755 present, or all converted changesets if no identifier is specified.
744 756 """
745 757
746 758 # There is exactly no chance of resolving the revision, so do a simple
747 759 # string compare and hope for the best
748 760
749 761 rev = None
750 762 # i18n: "converted" is a keyword
751 763 l = getargs(x, 0, 1, _('converted takes one or no arguments'))
752 764 if l:
753 765 # i18n: "converted" is a keyword
754 766 rev = getstring(l[0], _('converted requires a revision'))
755 767
756 768 def _matchvalue(r):
757 769 source = repo[r].extra().get('convert_revision', None)
758 770 return source is not None and (rev is None or source.startswith(rev))
759 771
760 772 return subset.filter(lambda r: _matchvalue(r))
761 773
762 774 def date(repo, subset, x):
763 775 """``date(interval)``
764 776 Changesets within the interval, see :hg:`help dates`.
765 777 """
766 778 # i18n: "date" is a keyword
767 779 ds = getstring(x, _("date requires a string"))
768 780 dm = util.matchdate(ds)
769 781 return subset.filter(lambda x: dm(repo[x].date()[0]))
770 782
771 783 def desc(repo, subset, x):
772 784 """``desc(string)``
773 785 Search commit message for string. The match is case-insensitive.
774 786 """
775 787 # i18n: "desc" is a keyword
776 788 ds = encoding.lower(getstring(x, _("desc requires a string")))
777 789
778 790 def matches(x):
779 791 c = repo[x]
780 792 return ds in encoding.lower(c.description())
781 793
782 794 return subset.filter(matches)
783 795
784 796 def _descendants(repo, subset, x, followfirst=False):
785 797 roots = getset(repo, fullreposet(repo), x)
786 798 if not roots:
787 799 return baseset()
788 800 s = _revdescendants(repo, roots, followfirst)
789 801
790 802 # Both sets need to be ascending in order to lazily return the union
791 803 # in the correct order.
792 804 base = subset & roots
793 805 desc = subset & s
794 806 result = base + desc
795 807 if subset.isascending():
796 808 result.sort()
797 809 elif subset.isdescending():
798 810 result.sort(reverse=True)
799 811 else:
800 812 result = subset & result
801 813 return result
802 814
803 815 def descendants(repo, subset, x):
804 816 """``descendants(set)``
805 817 Changesets which are descendants of changesets in set.
806 818 """
807 819 return _descendants(repo, subset, x)
808 820
809 821 def _firstdescendants(repo, subset, x):
810 822 # ``_firstdescendants(set)``
811 823 # Like ``descendants(set)`` but follows only the first parents.
812 824 return _descendants(repo, subset, x, followfirst=True)
813 825
814 826 def destination(repo, subset, x):
815 827 """``destination([set])``
816 828 Changesets that were created by a graft, transplant or rebase operation,
817 829 with the given revisions specified as the source. Omitting the optional set
818 830 is the same as passing all().
819 831 """
820 832 if x is not None:
821 833 sources = getset(repo, fullreposet(repo), x)
822 834 else:
823 835 sources = fullreposet(repo)
824 836
825 837 dests = set()
826 838
827 839 # subset contains all of the possible destinations that can be returned, so
828 840 # iterate over them and see if their source(s) were provided in the arg set.
829 841 # Even if the immediate src of r is not in the arg set, src's source (or
830 842 # further back) may be. Scanning back further than the immediate src allows
831 843 # transitive transplants and rebases to yield the same results as transitive
832 844 # grafts.
833 845 for r in subset:
834 846 src = _getrevsource(repo, r)
835 847 lineage = None
836 848
837 849 while src is not None:
838 850 if lineage is None:
839 851 lineage = list()
840 852
841 853 lineage.append(r)
842 854
843 855 # The visited lineage is a match if the current source is in the arg
844 856 # set. Since every candidate dest is visited by way of iterating
845 857 # subset, any dests further back in the lineage will be tested by a
846 858 # different iteration over subset. Likewise, if the src was already
847 859 # selected, the current lineage can be selected without going back
848 860 # further.
849 861 if src in sources or src in dests:
850 862 dests.update(lineage)
851 863 break
852 864
853 865 r = src
854 866 src = _getrevsource(repo, r)
855 867
856 868 return subset.filter(dests.__contains__)
857 869
858 870 def divergent(repo, subset, x):
859 871 """``divergent()``
860 872 Final successors of changesets with an alternative set of final successors.
861 873 """
862 874 # i18n: "divergent" is a keyword
863 875 getargs(x, 0, 0, _("divergent takes no arguments"))
864 876 divergent = obsmod.getrevs(repo, 'divergent')
865 877 return subset & divergent
866 878
867 879 def extinct(repo, subset, x):
868 880 """``extinct()``
869 881 Obsolete changesets with obsolete descendants only.
870 882 """
871 883 # i18n: "extinct" is a keyword
872 884 getargs(x, 0, 0, _("extinct takes no arguments"))
873 885 extincts = obsmod.getrevs(repo, 'extinct')
874 886 return subset & extincts
875 887
876 888 def extra(repo, subset, x):
877 889 """``extra(label, [value])``
878 890 Changesets with the given label in the extra metadata, with the given
879 891 optional value.
880 892
881 893 If `value` starts with `re:`, the remainder of the value is treated as
882 894 a regular expression. To match a value that actually starts with `re:`,
883 895 use the prefix `literal:`.
884 896 """
885 897 args = getargsdict(x, 'extra', 'label value')
886 898 if 'label' not in args:
887 899 # i18n: "extra" is a keyword
888 900 raise error.ParseError(_('extra takes at least 1 argument'))
889 901 # i18n: "extra" is a keyword
890 902 label = getstring(args['label'], _('first argument to extra must be '
891 903 'a string'))
892 904 value = None
893 905
894 906 if 'value' in args:
895 907 # i18n: "extra" is a keyword
896 908 value = getstring(args['value'], _('second argument to extra must be '
897 909 'a string'))
898 910 kind, value, matcher = _stringmatcher(value)
899 911
900 912 def _matchvalue(r):
901 913 extra = repo[r].extra()
902 914 return label in extra and (value is None or matcher(extra[label]))
903 915
904 916 return subset.filter(lambda r: _matchvalue(r))
905 917
906 918 def filelog(repo, subset, x):
907 919 """``filelog(pattern)``
908 920 Changesets connected to the specified filelog.
909 921
910 922 For performance reasons, visits only revisions mentioned in the file-level
911 923 filelog, rather than filtering through all changesets (much faster, but
912 924 doesn't include deletes or duplicate changes). For a slower, more accurate
913 925 result, use ``file()``.
914 926
915 927 The pattern without explicit kind like ``glob:`` is expected to be
916 928 relative to the current directory and match against a file exactly
917 929 for efficiency.
918 930
919 931 If some linkrev points to revisions filtered by the current repoview, we'll
920 932 work around it to return a non-filtered value.
921 933 """
922 934
923 935 # i18n: "filelog" is a keyword
924 936 pat = getstring(x, _("filelog requires a pattern"))
925 937 s = set()
926 938 cl = repo.changelog
927 939
928 940 if not matchmod.patkind(pat):
929 941 f = pathutil.canonpath(repo.root, repo.getcwd(), pat)
930 942 files = [f]
931 943 else:
932 944 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=repo[None])
933 945 files = (f for f in repo[None] if m(f))
934 946
935 947 for f in files:
936 948 backrevref = {} # final value for: filerev -> changerev
937 949 lowestchild = {} # lowest known filerev child of a filerev
938 950 delayed = [] # filerev with filtered linkrev, for post-processing
939 951 lowesthead = None # cache for manifest content of all head revisions
940 952 fl = repo.file(f)
941 953 for fr in list(fl):
942 954 rev = fl.linkrev(fr)
943 955 if rev not in cl:
944 956 # changerev pointed in linkrev is filtered
945 957 # record it for post processing.
946 958 delayed.append((fr, rev))
947 959 continue
948 960 for p in fl.parentrevs(fr):
949 961 if 0 <= p and p not in lowestchild:
950 962 lowestchild[p] = fr
951 963 backrevref[fr] = rev
952 964 s.add(rev)
953 965
954 966 # Post-processing of all filerevs we skipped because they were
955 967 # filtered. If such filerevs have known and unfiltered children, this
956 968 # means they have an unfiltered appearance out there. We'll use linkrev
957 969 # adjustment to find one of these appearances. The lowest known child
958 970 # will be used as a starting point because it is the best upper-bound we
959 971 # have.
960 972 #
961 973 # This approach will fail when an unfiltered but linkrev-shadowed
962 974 # appearance exists in a head changeset without unfiltered filerev
963 975 # children anywhere.
964 976 while delayed:
965 977 # must be a descending iteration. To slowly fill lowest child
966 978 # information that is of potential use by the next item.
967 979 fr, rev = delayed.pop()
968 980 lkr = rev
969 981
970 982 child = lowestchild.get(fr)
971 983
972 984 if child is None:
973 985 # search for existence of this file revision in a head revision.
974 986 # There are three possibilities:
975 987 # - the revision exists in a head and we can find an
976 988 # introduction from there,
977 989 # - the revision does not exist in a head because it has been
978 990 # changed since its introduction: we would have found a child
979 991 # and be in the other 'else' clause,
980 992 # - all versions of the revision are hidden.
981 993 if lowesthead is None:
982 994 lowesthead = {}
983 995 for h in repo.heads():
984 996 fnode = repo[h].manifest().get(f)
985 997 if fnode is not None:
986 998 lowesthead[fl.rev(fnode)] = h
987 999 headrev = lowesthead.get(fr)
988 1000 if headrev is None:
989 1001 # content is nowhere unfiltered
990 1002 continue
991 1003 rev = repo[headrev][f].introrev()
992 1004 else:
993 1005 # the lowest known child is a good upper bound
994 1006 childcrev = backrevref[child]
995 1007 # XXX this does not guarantee returning the lowest
996 1008 # introduction of this revision, but this gives a
997 1009 # result which is a good start and will fit in most
998 1010 # cases. We probably need to fix the multiple
999 1011 # introductions case properly (report each
1000 1012 # introduction, even for identical file revisions)
1001 1013 # once and for all at some point anyway.
1002 1014 for p in repo[childcrev][f].parents():
1003 1015 if p.filerev() == fr:
1004 1016 rev = p.rev()
1005 1017 break
1006 1018 if rev == lkr: # no shadowed entry found
1007 1019 # XXX This should never happen unless some manifest points
1008 1020 # to biggish file revisions (like a revision that uses a
1009 1021 # parent that never appears in the manifest ancestors)
1010 1022 continue
1011 1023
1012 1024 # Fill the data for the next iteration.
1013 1025 for p in fl.parentrevs(fr):
1014 1026 if 0 <= p and p not in lowestchild:
1015 1027 lowestchild[p] = fr
1016 1028 backrevref[fr] = rev
1017 1029 s.add(rev)
1018 1030
1019 1031 return subset & s
1020 1032
1021 1033 def first(repo, subset, x):
1022 1034 """``first(set, [n])``
1023 1035 An alias for limit().
1024 1036 """
1025 1037 return limit(repo, subset, x)
1026 1038
1027 1039 def _follow(repo, subset, x, name, followfirst=False):
1028 1040 l = getargs(x, 0, 1, _("%s takes no arguments or a filename") % name)
1029 1041 c = repo['.']
1030 1042 if l:
1031 1043 x = getstring(l[0], _("%s expected a filename") % name)
1032 1044 if x in c:
1033 1045 cx = c[x]
1034 1046 s = set(ctx.rev() for ctx in cx.ancestors(followfirst=followfirst))
1035 1047 # include the revision responsible for the most recent version
1036 1048 s.add(cx.introrev())
1037 1049 else:
1038 1050 return baseset()
1039 1051 else:
1040 1052 s = _revancestors(repo, baseset([c.rev()]), followfirst)
1041 1053
1042 1054 return subset & s
1043 1055
1044 1056 def follow(repo, subset, x):
1045 1057 """``follow([file])``
1046 1058 An alias for ``::.`` (ancestors of the working directory's first parent).
1047 1059 If a filename is specified, the history of the given file is followed,
1048 1060 including copies.
1049 1061 """
1050 1062 return _follow(repo, subset, x, 'follow')
1051 1063
1052 1064 def _followfirst(repo, subset, x):
1053 1065 # ``followfirst([file])``
1054 1066 # Like ``follow([file])`` but follows only the first parent of
1055 1067 # every revision or file revision.
1056 1068 return _follow(repo, subset, x, '_followfirst', followfirst=True)
1057 1069
1058 1070 def getall(repo, subset, x):
1059 1071 """``all()``
1060 1072 All changesets, the same as ``0:tip``.
1061 1073 """
1062 1074 # i18n: "all" is a keyword
1063 1075 getargs(x, 0, 0, _("all takes no arguments"))
1064 1076 return subset & spanset(repo) # drop "null" if any
1065 1077
1066 1078 def grep(repo, subset, x):
1067 1079 """``grep(regex)``
1068 1080 Like ``keyword(string)`` but accepts a regex. Use ``grep(r'...')``
1069 1081 to ensure special escape characters are handled correctly. Unlike
1070 1082 ``keyword(string)``, the match is case-sensitive.
1071 1083 """
1072 1084 try:
1073 1085 # i18n: "grep" is a keyword
1074 1086 gr = re.compile(getstring(x, _("grep requires a string")))
1075 1087 except re.error as e:
1076 1088 raise error.ParseError(_('invalid match pattern: %s') % e)
1077 1089
1078 1090 def matches(x):
1079 1091 c = repo[x]
1080 1092 for e in c.files() + [c.user(), c.description()]:
1081 1093 if gr.search(e):
1082 1094 return True
1083 1095 return False
1084 1096
1085 1097 return subset.filter(matches)
1086 1098
1087 1099 def _matchfiles(repo, subset, x):
1088 1100 # _matchfiles takes a revset list of prefixed arguments:
1089 1101 #
1090 1102 # [p:foo, i:bar, x:baz]
1091 1103 #
1092 1104 # builds a match object from them and filters subset. Allowed
1093 1105 # prefixes are 'p:' for regular patterns, 'i:' for include
1094 1106 # patterns and 'x:' for exclude patterns. Use 'r:' prefix to pass
1095 1107 # a revision identifier, or the empty string to reference the
1096 1108 # working directory, from which the match object is
1097 1109 # initialized. Use 'd:' to set the default matching mode, default
1098 1110 # to 'glob'. At most one 'r:' and 'd:' argument can be passed.
1099 1111
1100 1112 # i18n: "_matchfiles" is a keyword
1101 1113 l = getargs(x, 1, -1, _("_matchfiles requires at least one argument"))
1102 1114 pats, inc, exc = [], [], []
1103 1115 rev, default = None, None
1104 1116 for arg in l:
1105 1117 # i18n: "_matchfiles" is a keyword
1106 1118 s = getstring(arg, _("_matchfiles requires string arguments"))
1107 1119 prefix, value = s[:2], s[2:]
1108 1120 if prefix == 'p:':
1109 1121 pats.append(value)
1110 1122 elif prefix == 'i:':
1111 1123 inc.append(value)
1112 1124 elif prefix == 'x:':
1113 1125 exc.append(value)
1114 1126 elif prefix == 'r:':
1115 1127 if rev is not None:
1116 1128 # i18n: "_matchfiles" is a keyword
1117 1129 raise error.ParseError(_('_matchfiles expected at most one '
1118 1130 'revision'))
1119 1131 if value != '': # empty means working directory; leave rev as None
1120 1132 rev = value
1121 1133 elif prefix == 'd:':
1122 1134 if default is not None:
1123 1135 # i18n: "_matchfiles" is a keyword
1124 1136 raise error.ParseError(_('_matchfiles expected at most one '
1125 1137 'default mode'))
1126 1138 default = value
1127 1139 else:
1128 1140 # i18n: "_matchfiles" is a keyword
1129 1141 raise error.ParseError(_('invalid _matchfiles prefix: %s') % prefix)
1130 1142 if not default:
1131 1143 default = 'glob'
1132 1144
1133 1145 m = matchmod.match(repo.root, repo.getcwd(), pats, include=inc,
1134 1146 exclude=exc, ctx=repo[rev], default=default)
1135 1147
1136 1148 def matches(x):
1137 1149 for f in repo[x].files():
1138 1150 if m(f):
1139 1151 return True
1140 1152 return False
1141 1153
1142 1154 return subset.filter(matches)
1143 1155
1144 1156 def hasfile(repo, subset, x):
1145 1157 """``file(pattern)``
1146 1158 Changesets affecting files matched by pattern.
1147 1159
1148 1160 For a faster but less accurate result, consider using ``filelog()``
1149 1161 instead.
1150 1162
1151 1163 This predicate uses ``glob:`` as the default kind of pattern.
1152 1164 """
1153 1165 # i18n: "file" is a keyword
1154 1166 pat = getstring(x, _("file requires a pattern"))
1155 1167 return _matchfiles(repo, subset, ('string', 'p:' + pat))
1156 1168
1157 1169 def head(repo, subset, x):
1158 1170 """``head()``
1159 1171 Changeset is a named branch head.
1160 1172 """
1161 1173 # i18n: "head" is a keyword
1162 1174 getargs(x, 0, 0, _("head takes no arguments"))
1163 1175 hs = set()
1164 1176 cl = repo.changelog
1165 1177 for b, ls in repo.branchmap().iteritems():
1166 1178 hs.update(cl.rev(h) for h in ls)
1167 1179 # XXX using a set to feed the baseset is wrong. Sets are not ordered.
1168 1180 # This does not break because of other fullreposet misbehavior.
1169 1181 # XXX We should combine with subset first: 'subset & baseset(...)'. This is
1170 1182 # necessary to ensure we preserve the order in subset.
1171 1183 return baseset(hs) & subset
1172 1184
1173 1185 def heads(repo, subset, x):
1174 1186 """``heads(set)``
1175 1187 Members of set with no children in set.
1176 1188 """
1177 1189 s = getset(repo, subset, x)
1178 1190 ps = parents(repo, subset, x)
1179 1191 return s - ps
1180 1192
1181 1193 def hidden(repo, subset, x):
1182 1194 """``hidden()``
1183 1195 Hidden changesets.
1184 1196 """
1185 1197 # i18n: "hidden" is a keyword
1186 1198 getargs(x, 0, 0, _("hidden takes no arguments"))
1187 1199 hiddenrevs = repoview.filterrevs(repo, 'visible')
1188 1200 return subset & hiddenrevs
1189 1201
1190 1202 def keyword(repo, subset, x):
1191 1203 """``keyword(string)``
1192 1204 Search commit message, user name, and names of changed files for
1193 1205 string. The match is case-insensitive.
1194 1206 """
1195 1207 # i18n: "keyword" is a keyword
1196 1208 kw = encoding.lower(getstring(x, _("keyword requires a string")))
1197 1209
1198 1210 def matches(r):
1199 1211 c = repo[r]
1200 1212 return any(kw in encoding.lower(t)
1201 1213 for t in c.files() + [c.user(), c.description()])
1202 1214
1203 1215 return subset.filter(matches)
1204 1216
1205 1217 def limit(repo, subset, x):
1206 1218 """``limit(set, [n])``
1207 1219 First n members of set, defaulting to 1.
1208 1220 """
1209 1221 # i18n: "limit" is a keyword
1210 1222 l = getargs(x, 1, 2, _("limit requires one or two arguments"))
1211 1223 try:
1212 1224 lim = 1
1213 1225 if len(l) == 2:
1214 1226 # i18n: "limit" is a keyword
1215 1227 lim = int(getstring(l[1], _("limit requires a number")))
1216 1228 except (TypeError, ValueError):
1217 1229 # i18n: "limit" is a keyword
1218 1230 raise error.ParseError(_("limit expects a number"))
1219 1231 ss = subset
1220 1232 os = getset(repo, fullreposet(repo), l[0])
1221 1233 result = []
1222 1234 it = iter(os)
1223 1235 for x in xrange(lim):
1224 1236 y = next(it, None)
1225 1237 if y is None:
1226 1238 break
1227 1239 elif y in ss:
1228 1240 result.append(y)
1229 1241 return baseset(result)
1230 1242
1231 1243 def last(repo, subset, x):
1232 1244 """``last(set, [n])``
1233 1245 Last n members of set, defaulting to 1.
1234 1246 """
1235 1247 # i18n: "last" is a keyword
1236 1248 l = getargs(x, 1, 2, _("last requires one or two arguments"))
1237 1249 try:
1238 1250 lim = 1
1239 1251 if len(l) == 2:
1240 1252 # i18n: "last" is a keyword
1241 1253 lim = int(getstring(l[1], _("last requires a number")))
1242 1254 except (TypeError, ValueError):
1243 1255 # i18n: "last" is a keyword
1244 1256 raise error.ParseError(_("last expects a number"))
1245 1257 ss = subset
1246 1258 os = getset(repo, fullreposet(repo), l[0])
1247 1259 os.reverse()
1248 1260 result = []
1249 1261 it = iter(os)
1250 1262 for x in xrange(lim):
1251 1263 y = next(it, None)
1252 1264 if y is None:
1253 1265 break
1254 1266 elif y in ss:
1255 1267 result.append(y)
1256 1268 return baseset(result)
1257 1269
1258 1270 def maxrev(repo, subset, x):
1259 1271 """``max(set)``
1260 1272 Changeset with highest revision number in set.
1261 1273 """
1262 1274 os = getset(repo, fullreposet(repo), x)
1263 1275 if os:
1264 1276 m = os.max()
1265 1277 if m in subset:
1266 1278 return baseset([m])
1267 1279 return baseset()
1268 1280
1269 1281 def merge(repo, subset, x):
1270 1282 """``merge()``
1271 1283 Changeset is a merge changeset.
1272 1284 """
1273 1285 # i18n: "merge" is a keyword
1274 1286 getargs(x, 0, 0, _("merge takes no arguments"))
1275 1287 cl = repo.changelog
1276 1288 return subset.filter(lambda r: cl.parentrevs(r)[1] != -1)
1277 1289
1278 1290 def branchpoint(repo, subset, x):
1279 1291 """``branchpoint()``
1280 1292 Changesets with more than one child.
1281 1293 """
1282 1294 # i18n: "branchpoint" is a keyword
1283 1295 getargs(x, 0, 0, _("branchpoint takes no arguments"))
1284 1296 cl = repo.changelog
1285 1297 if not subset:
1286 1298 return baseset()
1287 1299 # XXX this should be 'parentset.min()' assuming 'parentset' is a smartset
1288 1300 # (and if it is not, it should.)
1289 1301 baserev = min(subset)
1290 1302 parentscount = [0]*(len(repo) - baserev)
1291 1303 for r in cl.revs(start=baserev + 1):
1292 1304 for p in cl.parentrevs(r):
1293 1305 if p >= baserev:
1294 1306 parentscount[p - baserev] += 1
1295 1307 return subset.filter(lambda r: parentscount[r - baserev] > 1)
1296 1308
1297 1309 def minrev(repo, subset, x):
1298 1310 """``min(set)``
1299 1311 Changeset with lowest revision number in set.
1300 1312 """
1301 1313 os = getset(repo, fullreposet(repo), x)
1302 1314 if os:
1303 1315 m = os.min()
1304 1316 if m in subset:
1305 1317 return baseset([m])
1306 1318 return baseset()
1307 1319
1308 1320 def modifies(repo, subset, x):
1309 1321 """``modifies(pattern)``
1310 1322 Changesets modifying files matched by pattern.
1311 1323
1312 1324 The pattern without explicit kind like ``glob:`` is expected to be
1313 1325 relative to the current directory and match against a file or a
1314 1326 directory.
1315 1327 """
1316 1328 # i18n: "modifies" is a keyword
1317 1329 pat = getstring(x, _("modifies requires a pattern"))
1318 1330 return checkstatus(repo, subset, pat, 0)
1319 1331
1320 1332 def named(repo, subset, x):
1321 1333 """``named(namespace)``
1322 1334 The changesets in a given namespace.
1323 1335
1324 1336 If `namespace` starts with `re:`, the remainder of the string is treated as
1325 1337 a regular expression. To match a namespace that actually starts with `re:`,
1326 1338 use the prefix `literal:`.
1327 1339 """
1328 1340 # i18n: "named" is a keyword
1329 1341 args = getargs(x, 1, 1, _('named requires a namespace argument'))
1330 1342
1331 1343 ns = getstring(args[0],
1332 1344 # i18n: "named" is a keyword
1333 1345 _('the argument to named must be a string'))
1334 1346 kind, pattern, matcher = _stringmatcher(ns)
1335 1347 namespaces = set()
1336 1348 if kind == 'literal':
1337 1349 if pattern not in repo.names:
1338 1350 raise error.RepoLookupError(_("namespace '%s' does not exist")
1339 1351 % ns)
1340 1352 namespaces.add(repo.names[pattern])
1341 1353 else:
1342 1354 for name, ns in repo.names.iteritems():
1343 1355 if matcher(name):
1344 1356 namespaces.add(ns)
1345 1357 if not namespaces:
1346 1358 raise error.RepoLookupError(_("no namespace exists"
1347 1359 " that match '%s'") % pattern)
1348 1360
1349 1361 names = set()
1350 1362 for ns in namespaces:
1351 1363 for name in ns.listnames(repo):
1352 1364 if name not in ns.deprecated:
1353 1365 names.update(repo[n].rev() for n in ns.nodes(repo, name))
1354 1366
1355 1367 names -= set([node.nullrev])
1356 1368 return subset & names
1357 1369
1358 1370 def node_(repo, subset, x):
1359 1371 """``id(string)``
1360 1372 Revision non-ambiguously specified by the given hex string prefix.
1361 1373 """
1362 1374 # i18n: "id" is a keyword
1363 1375 l = getargs(x, 1, 1, _("id requires one argument"))
1364 1376 # i18n: "id" is a keyword
1365 1377 n = getstring(l[0], _("id requires a string"))
1366 1378 if len(n) == 40:
1367 1379 try:
1368 1380 rn = repo.changelog.rev(node.bin(n))
1369 1381 except (LookupError, TypeError):
1370 1382 rn = None
1371 1383 else:
1372 1384 rn = None
1373 1385 pm = repo.changelog._partialmatch(n)
1374 1386 if pm is not None:
1375 1387 rn = repo.changelog.rev(pm)
1376 1388
1377 1389 if rn is None:
1378 1390 return baseset()
1379 1391 result = baseset([rn])
1380 1392 return result & subset
1381 1393
1382 1394 def obsolete(repo, subset, x):
1383 1395 """``obsolete()``
1384 1396 Mutable changeset with a newer version."""
1385 1397 # i18n: "obsolete" is a keyword
1386 1398 getargs(x, 0, 0, _("obsolete takes no arguments"))
1387 1399 obsoletes = obsmod.getrevs(repo, 'obsolete')
1388 1400 return subset & obsoletes
1389 1401
1390 1402 def only(repo, subset, x):
1391 1403 """``only(set, [set])``
1392 1404 Changesets that are ancestors of the first set that are not ancestors
1393 1405 of any other head in the repo. If a second set is specified, the result
1394 1406 is ancestors of the first set that are not ancestors of the second set
1395 1407 (i.e. ::<set1> - ::<set2>).
1396 1408 """
1397 1409 cl = repo.changelog
1398 1410 # i18n: "only" is a keyword
1399 1411 args = getargs(x, 1, 2, _('only takes one or two arguments'))
1400 1412 include = getset(repo, fullreposet(repo), args[0])
1401 1413 if len(args) == 1:
1402 1414 if not include:
1403 1415 return baseset()
1404 1416
1405 1417 descendants = set(_revdescendants(repo, include, False))
1406 1418 exclude = [rev for rev in cl.headrevs()
1407 1419 if not rev in descendants and not rev in include]
1408 1420 else:
1409 1421 exclude = getset(repo, fullreposet(repo), args[1])
1410 1422
1411 1423 results = set(cl.findmissingrevs(common=exclude, heads=include))
1412 1424 # XXX we should turn this into a baseset instead of a set, smartset may do
1413 1425 # some optimisations from the fact this is a baseset.
1414 1426 return subset & results
1415 1427
1416 1428 def origin(repo, subset, x):
1417 1429 """``origin([set])``
1418 1430 Changesets that were specified as a source for the grafts, transplants or
1419 1431 rebases that created the given revisions. Omitting the optional set is the
1420 1432 same as passing all(). If a changeset created by these operations is itself
1421 1433 specified as a source for one of these operations, only the source changeset
1422 1434 for the first operation is selected.
1423 1435 """
1424 1436 if x is not None:
1425 1437 dests = getset(repo, fullreposet(repo), x)
1426 1438 else:
1427 1439 dests = fullreposet(repo)
1428 1440
1429 1441 def _firstsrc(rev):
1430 1442 src = _getrevsource(repo, rev)
1431 1443 if src is None:
1432 1444 return None
1433 1445
1434 1446 while True:
1435 1447 prev = _getrevsource(repo, src)
1436 1448
1437 1449 if prev is None:
1438 1450 return src
1439 1451 src = prev
1440 1452
1441 1453 o = set([_firstsrc(r) for r in dests])
1442 1454 o -= set([None])
1443 1455 # XXX we should turn this into a baseset instead of a set, smartset may do
1444 1456 # some optimisations from the fact this is a baseset.
1445 1457 return subset & o
1446 1458
1447 1459 def outgoing(repo, subset, x):
1448 1460 """``outgoing([path])``
1449 1461 Changesets not found in the specified destination repository, or the
1450 1462 default push location.
1451 1463 """
1452 1464 # Avoid cycles.
1453 1465 from . import (
1454 1466 discovery,
1455 1467 hg,
1456 1468 )
1457 1469 # i18n: "outgoing" is a keyword
1458 1470 l = getargs(x, 0, 1, _("outgoing takes one or no arguments"))
1459 1471 # i18n: "outgoing" is a keyword
1460 1472 dest = l and getstring(l[0], _("outgoing requires a repository path")) or ''
1461 1473 dest = repo.ui.expandpath(dest or 'default-push', dest or 'default')
1462 1474 dest, branches = hg.parseurl(dest)
1463 1475 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1464 1476 if revs:
1465 1477 revs = [repo.lookup(rev) for rev in revs]
1466 1478 other = hg.peer(repo, {}, dest)
1467 1479 repo.ui.pushbuffer()
1468 1480 outgoing = discovery.findcommonoutgoing(repo, other, onlyheads=revs)
1469 1481 repo.ui.popbuffer()
1470 1482 cl = repo.changelog
1471 1483 o = set([cl.rev(r) for r in outgoing.missing])
1472 1484 return subset & o
1473 1485
1474 1486 def p1(repo, subset, x):
1475 1487 """``p1([set])``
1476 1488 First parent of changesets in set, or the working directory.
1477 1489 """
1478 1490 if x is None:
1479 1491 p = repo[x].p1().rev()
1480 1492 if p >= 0:
1481 1493 return subset & baseset([p])
1482 1494 return baseset()
1483 1495
1484 1496 ps = set()
1485 1497 cl = repo.changelog
1486 1498 for r in getset(repo, fullreposet(repo), x):
1487 1499 ps.add(cl.parentrevs(r)[0])
1488 1500 ps -= set([node.nullrev])
1489 1501 # XXX we should turn this into a baseset instead of a set, smartset may do
1490 1502 # some optimisations from the fact this is a baseset.
1491 1503 return subset & ps
1492 1504
1493 1505 def p2(repo, subset, x):
1494 1506 """``p2([set])``
1495 1507 Second parent of changesets in set, or the working directory.
1496 1508 """
1497 1509 if x is None:
1498 1510 ps = repo[x].parents()
1499 1511 try:
1500 1512 p = ps[1].rev()
1501 1513 if p >= 0:
1502 1514 return subset & baseset([p])
1503 1515 return baseset()
1504 1516 except IndexError:
1505 1517 return baseset()
1506 1518
1507 1519 ps = set()
1508 1520 cl = repo.changelog
1509 1521 for r in getset(repo, fullreposet(repo), x):
1510 1522 ps.add(cl.parentrevs(r)[1])
1511 1523 ps -= set([node.nullrev])
1512 1524 # XXX we should turn this into a baseset instead of a set, smartset may do
1513 1525 # some optimisations from the fact this is a baseset.
1514 1526 return subset & ps
1515 1527
1516 1528 def parents(repo, subset, x):
1517 1529 """``parents([set])``
1518 1530 The set of all parents for all changesets in set, or the working directory.
1519 1531 """
1520 1532 if x is None:
1521 1533 ps = set(p.rev() for p in repo[x].parents())
1522 1534 else:
1523 1535 ps = set()
1524 1536 cl = repo.changelog
1525 1537 up = ps.update
1526 1538 parentrevs = cl.parentrevs
1527 1539 for r in getset(repo, fullreposet(repo), x):
1528 1540 if r == node.wdirrev:
1529 1541 up(p.rev() for p in repo[r].parents())
1530 1542 else:
1531 1543 up(parentrevs(r))
1532 1544 ps -= set([node.nullrev])
1533 1545 return subset & ps
1534 1546
1535 1547 def _phase(repo, subset, target):
1536 1548 """helper to select all rev in phase <target>"""
1537 1549 repo._phasecache.loadphaserevs(repo) # ensure phase's sets are loaded
1538 1550 if repo._phasecache._phasesets:
1539 1551 s = repo._phasecache._phasesets[target] - repo.changelog.filteredrevs
1540 1552 s = baseset(s)
1541 1553 s.sort() # set are non ordered, so we enforce ascending
1542 1554 return subset & s
1543 1555 else:
1544 1556 phase = repo._phasecache.phase
1545 1557 condition = lambda r: phase(repo, r) == target
1546 1558 return subset.filter(condition, cache=False)
1547 1559
1548 1560 def draft(repo, subset, x):
1549 1561 """``draft()``
1550 1562 Changeset in draft phase."""
1551 1563 # i18n: "draft" is a keyword
1552 1564 getargs(x, 0, 0, _("draft takes no arguments"))
1553 1565 target = phases.draft
1554 1566 return _phase(repo, subset, target)
1555 1567
1556 1568 def secret(repo, subset, x):
1557 1569 """``secret()``
1558 1570 Changeset in secret phase."""
1559 1571 # i18n: "secret" is a keyword
1560 1572 getargs(x, 0, 0, _("secret takes no arguments"))
1561 1573 target = phases.secret
1562 1574 return _phase(repo, subset, target)
1563 1575
1564 1576 def parentspec(repo, subset, x, n):
1565 1577 """``set^0``
1566 1578 The set.
1567 1579 ``set^1`` (or ``set^``), ``set^2``
1568 1580 First or second parent, respectively, of all changesets in set.
1569 1581 """
1570 1582 try:
1571 1583 n = int(n[1])
1572 1584 if n not in (0, 1, 2):
1573 1585 raise ValueError
1574 1586 except (TypeError, ValueError):
1575 1587 raise error.ParseError(_("^ expects a number 0, 1, or 2"))
1576 1588 ps = set()
1577 1589 cl = repo.changelog
1578 1590 for r in getset(repo, fullreposet(repo), x):
1579 1591 if n == 0:
1580 1592 ps.add(r)
1581 1593 elif n == 1:
1582 1594 ps.add(cl.parentrevs(r)[0])
1583 1595 elif n == 2:
1584 1596 parents = cl.parentrevs(r)
1585 1597 if len(parents) > 1:
1586 1598 ps.add(parents[1])
1587 1599 return subset & ps
1588 1600
1589 1601 def present(repo, subset, x):
1590 1602 """``present(set)``
1591 1603 An empty set, if any revision in set isn't found; otherwise,
1592 1604 all revisions in set.
1593 1605
1594 1606 If any of specified revisions is not present in the local repository,
1595 1607 the query is normally aborted. But this predicate allows the query
1596 1608 to continue even in such cases.
1597 1609 """
1598 1610 try:
1599 1611 return getset(repo, subset, x)
1600 1612 except error.RepoLookupError:
1601 1613 return baseset()
1602 1614
1603 1615 # for internal use
1604 1616 def _notpublic(repo, subset, x):
1605 1617 getargs(x, 0, 0, "_notpublic takes no arguments")
1606 1618 repo._phasecache.loadphaserevs(repo) # ensure phase's sets are loaded
1607 1619 if repo._phasecache._phasesets:
1608 1620 s = set()
1609 1621 for u in repo._phasecache._phasesets[1:]:
1610 1622 s.update(u)
1611 1623 s = baseset(s - repo.changelog.filteredrevs)
1612 1624 s.sort()
1613 1625 return subset & s
1614 1626 else:
1615 1627 phase = repo._phasecache.phase
1616 1628 target = phases.public
1617 1629 condition = lambda r: phase(repo, r) != target
1618 1630 return subset.filter(condition, cache=False)
1619 1631
1620 1632 def public(repo, subset, x):
1621 1633 """``public()``
1622 1634 Changeset in public phase."""
1623 1635 # i18n: "public" is a keyword
1624 1636 getargs(x, 0, 0, _("public takes no arguments"))
1625 1637 phase = repo._phasecache.phase
1626 1638 target = phases.public
1627 1639 condition = lambda r: phase(repo, r) == target
1628 1640 return subset.filter(condition, cache=False)
1629 1641
1630 1642 def remote(repo, subset, x):
1631 1643 """``remote([id [,path]])``
1632 1644 Local revision that corresponds to the given identifier in a
1633 1645 remote repository, if present. Here, the '.' identifier is a
1634 1646 synonym for the current local branch.
1635 1647 """
1636 1648
1637 1649 from . import hg # avoid start-up nasties
1638 1650 # i18n: "remote" is a keyword
1639 1651 l = getargs(x, 0, 2, _("remote takes one, two or no arguments"))
1640 1652
1641 1653 q = '.'
1642 1654 if len(l) > 0:
1643 1655 # i18n: "remote" is a keyword
1644 1656 q = getstring(l[0], _("remote requires a string id"))
1645 1657 if q == '.':
1646 1658 q = repo['.'].branch()
1647 1659
1648 1660 dest = ''
1649 1661 if len(l) > 1:
1650 1662 # i18n: "remote" is a keyword
1651 1663 dest = getstring(l[1], _("remote requires a repository path"))
1652 1664 dest = repo.ui.expandpath(dest or 'default')
1653 1665 dest, branches = hg.parseurl(dest)
1654 1666 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1655 1667 if revs:
1656 1668 revs = [repo.lookup(rev) for rev in revs]
1657 1669 other = hg.peer(repo, {}, dest)
1658 1670 n = other.lookup(q)
1659 1671 if n in repo:
1660 1672 r = repo[n].rev()
1661 1673 if r in subset:
1662 1674 return baseset([r])
1663 1675 return baseset()
1664 1676
1665 1677 def removes(repo, subset, x):
1666 1678 """``removes(pattern)``
1667 1679 Changesets which remove files matching pattern.
1668 1680
1669 1681 The pattern without explicit kind like ``glob:`` is expected to be
1670 1682 relative to the current directory and match against a file or a
1671 1683 directory.
1672 1684 """
1673 1685 # i18n: "removes" is a keyword
1674 1686 pat = getstring(x, _("removes requires a pattern"))
1675 1687 return checkstatus(repo, subset, pat, 2)
1676 1688
1677 1689 def rev(repo, subset, x):
1678 1690 """``rev(number)``
1679 1691 Revision with the given numeric identifier.
1680 1692 """
1681 1693 # i18n: "rev" is a keyword
1682 1694 l = getargs(x, 1, 1, _("rev requires one argument"))
1683 1695 try:
1684 1696 # i18n: "rev" is a keyword
1685 1697 l = int(getstring(l[0], _("rev requires a number")))
1686 1698 except (TypeError, ValueError):
1687 1699 # i18n: "rev" is a keyword
1688 1700 raise error.ParseError(_("rev expects a number"))
1689 1701 if l not in repo.changelog and l != node.nullrev:
1690 1702 return baseset()
1691 1703 return subset & baseset([l])
1692 1704
1693 1705 def matching(repo, subset, x):
1694 1706 """``matching(revision [, field])``
1695 1707 Changesets in which a given set of fields match the set of fields in the
1696 1708 selected revision or set.
1697 1709
1698 1710 To match more than one field pass the list of fields to match separated
1699 1711 by spaces (e.g. ``author description``).
1700 1712
1701 1713 Valid fields are most regular revision fields and some special fields.
1702 1714
1703 1715 Regular revision fields are ``description``, ``author``, ``branch``,
1704 1716 ``date``, ``files``, ``phase``, ``parents``, ``substate``, ``user``
1705 1717 and ``diff``.
1706 1718 Note that ``author`` and ``user`` are synonyms. ``diff`` refers to the
1707 1719 contents of the revision. Two revisions matching their ``diff`` will
1708 1720 also match their ``files``.
1709 1721
1710 1722 Special fields are ``summary`` and ``metadata``:
1711 1723 ``summary`` matches the first line of the description.
1712 1724 ``metadata`` is equivalent to matching ``description user date``
1713 1725 (i.e. it matches the main metadata fields).
1714 1726
1715 1727 ``metadata`` is the default field which is used when no fields are
1716 1728 specified. You can match more than one field at a time.
1717 1729 """
1718 1730 # i18n: "matching" is a keyword
1719 1731 l = getargs(x, 1, 2, _("matching takes 1 or 2 arguments"))
1720 1732
1721 1733 revs = getset(repo, fullreposet(repo), l[0])
1722 1734
1723 1735 fieldlist = ['metadata']
1724 1736 if len(l) > 1:
1725 1737 fieldlist = getstring(l[1],
1726 1738 # i18n: "matching" is a keyword
1727 1739 _("matching requires a string "
1728 1740 "as its second argument")).split()
1729 1741
1730 1742 # Make sure that there are no repeated fields,
1731 1743 # expand the 'special' 'metadata' field type
1732 1744 # and check the 'files' whenever we check the 'diff'
1733 1745 fields = []
1734 1746 for field in fieldlist:
1735 1747 if field == 'metadata':
1736 1748 fields += ['user', 'description', 'date']
1737 1749 elif field == 'diff':
1738 1750 # a revision matching the diff must also match the files
1739 1751 # since matching the diff is very costly, make sure to
1740 1752 # also match the files first
1741 1753 fields += ['files', 'diff']
1742 1754 else:
1743 1755 if field == 'author':
1744 1756 field = 'user'
1745 1757 fields.append(field)
1746 1758 fields = set(fields)
1747 1759 if 'summary' in fields and 'description' in fields:
1748 1760 # If a revision matches its description it also matches its summary
1749 1761 fields.discard('summary')
1750 1762
1751 1763 # We may want to match more than one field
1752 1764 # Not all fields take the same amount of time to be matched
1753 1765 # Sort the selected fields in order of increasing matching cost
1754 1766 fieldorder = ['phase', 'parents', 'user', 'date', 'branch', 'summary',
1755 1767 'files', 'description', 'substate', 'diff']
1756 1768 def fieldkeyfunc(f):
1757 1769 try:
1758 1770 return fieldorder.index(f)
1759 1771 except ValueError:
1760 1772 # assume an unknown field is very costly
1761 1773 return len(fieldorder)
1762 1774 fields = list(fields)
1763 1775 fields.sort(key=fieldkeyfunc)
1764 1776
1765 1777 # Each field will be matched with its own "getfield" function
1766 1778 # which will be added to the getfieldfuncs array of functions
1767 1779 getfieldfuncs = []
1768 1780 _funcs = {
1769 1781 'user': lambda r: repo[r].user(),
1770 1782 'branch': lambda r: repo[r].branch(),
1771 1783 'date': lambda r: repo[r].date(),
1772 1784 'description': lambda r: repo[r].description(),
1773 1785 'files': lambda r: repo[r].files(),
1774 1786 'parents': lambda r: repo[r].parents(),
1775 1787 'phase': lambda r: repo[r].phase(),
1776 1788 'substate': lambda r: repo[r].substate,
1777 1789 'summary': lambda r: repo[r].description().splitlines()[0],
1778 1790 'diff': lambda r: list(repo[r].diff(git=True),)
1779 1791 }
1780 1792 for info in fields:
1781 1793 getfield = _funcs.get(info, None)
1782 1794 if getfield is None:
1783 1795 raise error.ParseError(
1784 1796 # i18n: "matching" is a keyword
1785 1797 _("unexpected field name passed to matching: %s") % info)
1786 1798 getfieldfuncs.append(getfield)
1787 1799 # convert the getfield array of functions into a "getinfo" function
1788 1800 # which returns an array of field values (or a single value if there
1789 1801 # is only one field to match)
1790 1802 getinfo = lambda r: [f(r) for f in getfieldfuncs]
1791 1803
1792 1804 def matches(x):
1793 1805 for rev in revs:
1794 1806 target = getinfo(rev)
1795 1807 match = True
1796 1808 for n, f in enumerate(getfieldfuncs):
1797 1809 if target[n] != f(x):
1798 1810 match = False
1799 1811 if match:
1800 1812 return True
1801 1813 return False
1802 1814
1803 1815 return subset.filter(matches)
1804 1816
1805 1817 def reverse(repo, subset, x):
1806 1818 """``reverse(set)``
1807 1819 Reverse order of set.
1808 1820 """
1809 1821 l = getset(repo, subset, x)
1810 1822 l.reverse()
1811 1823 return l
1812 1824
1813 1825 def roots(repo, subset, x):
1814 1826 """``roots(set)``
1815 1827 Changesets in set with no parent changeset in set.
1816 1828 """
1817 1829 s = getset(repo, fullreposet(repo), x)
1818 1830 parents = repo.changelog.parentrevs
1819 1831 def filter(r):
1820 1832 for p in parents(r):
1821 1833 if 0 <= p and p in s:
1822 1834 return False
1823 1835 return True
1824 1836 return subset & s.filter(filter)
1825 1837
1826 1838 def sort(repo, subset, x):
1827 1839 """``sort(set[, [-]key...])``
1828 1840 Sort set by keys. The default sort order is ascending, specify a key
1829 1841 as ``-key`` to sort in descending order.
1830 1842
1831 1843 The keys can be:
1832 1844
1833 1845 - ``rev`` for the revision number,
1834 1846 - ``branch`` for the branch name,
1835 1847 - ``desc`` for the commit message (description),
1836 1848 - ``user`` for user name (``author`` can be used as an alias),
1837 1849 - ``date`` for the commit date
1838 1850 """
1839 1851 # i18n: "sort" is a keyword
1840 1852 l = getargs(x, 1, 2, _("sort requires one or two arguments"))
1841 1853 keys = "rev"
1842 1854 if len(l) == 2:
1843 1855 # i18n: "sort" is a keyword
1844 1856 keys = getstring(l[1], _("sort spec must be a string"))
1845 1857
1846 1858 s = l[0]
1847 1859 keys = keys.split()
1848 1860 l = []
1849 1861 def invert(s):
1850 1862 return "".join(chr(255 - ord(c)) for c in s)
1851 1863 revs = getset(repo, subset, s)
1852 1864 if keys == ["rev"]:
1853 1865 revs.sort()
1854 1866 return revs
1855 1867 elif keys == ["-rev"]:
1856 1868 revs.sort(reverse=True)
1857 1869 return revs
1858 1870 for r in revs:
1859 1871 c = repo[r]
1860 1872 e = []
1861 1873 for k in keys:
1862 1874 if k == 'rev':
1863 1875 e.append(r)
1864 1876 elif k == '-rev':
1865 1877 e.append(-r)
1866 1878 elif k == 'branch':
1867 1879 e.append(c.branch())
1868 1880 elif k == '-branch':
1869 1881 e.append(invert(c.branch()))
1870 1882 elif k == 'desc':
1871 1883 e.append(c.description())
1872 1884 elif k == '-desc':
1873 1885 e.append(invert(c.description()))
1874 1886 elif k in 'user author':
1875 1887 e.append(c.user())
1876 1888 elif k in '-user -author':
1877 1889 e.append(invert(c.user()))
1878 1890 elif k == 'date':
1879 1891 e.append(c.date()[0])
1880 1892 elif k == '-date':
1881 1893 e.append(-c.date()[0])
1882 1894 else:
1883 1895 raise error.ParseError(_("unknown sort key %r") % k)
1884 1896 e.append(r)
1885 1897 l.append(e)
1886 1898 l.sort()
1887 1899 return baseset([e[-1] for e in l])
1888 1900
1889 1901 def subrepo(repo, subset, x):
1890 1902 """``subrepo([pattern])``
1891 1903 Changesets that add, modify or remove the given subrepo. If no subrepo
1892 1904 pattern is named, any subrepo changes are returned.
1893 1905 """
1894 1906 # i18n: "subrepo" is a keyword
1895 1907 args = getargs(x, 0, 1, _('subrepo takes at most one argument'))
1896 1908 if len(args) != 0:
1897 1909 pat = getstring(args[0], _("subrepo requires a pattern"))
1898 1910
1899 1911 m = matchmod.exact(repo.root, repo.root, ['.hgsubstate'])
1900 1912
1901 1913 def submatches(names):
1902 1914 k, p, m = _stringmatcher(pat)
1903 1915 for name in names:
1904 1916 if m(name):
1905 1917 yield name
1906 1918
1907 1919 def matches(x):
1908 1920 c = repo[x]
1909 1921 s = repo.status(c.p1().node(), c.node(), match=m)
1910 1922
1911 1923 if len(args) == 0:
1912 1924 return s.added or s.modified or s.removed
1913 1925
1914 1926 if s.added:
1915 1927 return any(submatches(c.substate.keys()))
1916 1928
1917 1929 if s.modified:
1918 1930 subs = set(c.p1().substate.keys())
1919 1931 subs.update(c.substate.keys())
1920 1932
1921 1933 for path in submatches(subs):
1922 1934 if c.p1().substate.get(path) != c.substate.get(path):
1923 1935 return True
1924 1936
1925 1937 if s.removed:
1926 1938 return any(submatches(c.p1().substate.keys()))
1927 1939
1928 1940 return False
1929 1941
1930 1942 return subset.filter(matches)
1931 1943
1932 1944 def _stringmatcher(pattern):
1933 1945 """
1934 1946 accepts a string, possibly starting with 're:' or 'literal:' prefix.
1935 1947 returns the matcher name, pattern, and matcher function.
1936 1948 missing or unknown prefixes are treated as literal matches.
1937 1949
1938 1950 helper for tests:
1939 1951 >>> def test(pattern, *tests):
1940 1952 ... kind, pattern, matcher = _stringmatcher(pattern)
1941 1953 ... return (kind, pattern, [bool(matcher(t)) for t in tests])
1942 1954
1943 1955 exact matching (no prefix):
1944 1956 >>> test('abcdefg', 'abc', 'def', 'abcdefg')
1945 1957 ('literal', 'abcdefg', [False, False, True])
1946 1958
1947 1959 regex matching ('re:' prefix)
1948 1960 >>> test('re:a.+b', 'nomatch', 'fooadef', 'fooadefbar')
1949 1961 ('re', 'a.+b', [False, False, True])
1950 1962
1951 1963 force exact matches ('literal:' prefix)
1952 1964 >>> test('literal:re:foobar', 'foobar', 're:foobar')
1953 1965 ('literal', 're:foobar', [False, True])
1954 1966
1955 1967 unknown prefixes are ignored and treated as literals
1956 1968 >>> test('foo:bar', 'foo', 'bar', 'foo:bar')
1957 1969 ('literal', 'foo:bar', [False, False, True])
1958 1970 """
1959 1971 if pattern.startswith('re:'):
1960 1972 pattern = pattern[3:]
1961 1973 try:
1962 1974 regex = re.compile(pattern)
1963 1975 except re.error as e:
1964 1976 raise error.ParseError(_('invalid regular expression: %s')
1965 1977 % e)
1966 1978 return 're', pattern, regex.search
1967 1979 elif pattern.startswith('literal:'):
1968 1980 pattern = pattern[8:]
1969 1981 return 'literal', pattern, pattern.__eq__
1970 1982
1971 1983 def _substringmatcher(pattern):
1972 1984 kind, pattern, matcher = _stringmatcher(pattern)
1973 1985 if kind == 'literal':
1974 1986 matcher = lambda s: pattern in s
1975 1987 return kind, pattern, matcher
1976 1988
1977 1989 def tag(repo, subset, x):
1978 1990 """``tag([name])``
1979 1991 The specified tag by name, or all tagged revisions if no name is given.
1980 1992
1981 1993 If `name` starts with `re:`, the remainder of the name is treated as
1982 1994 a regular expression. To match a tag that actually starts with `re:`,
1983 1995 use the prefix `literal:`.
1984 1996 """
1985 1997 # i18n: "tag" is a keyword
1986 1998 args = getargs(x, 0, 1, _("tag takes one or no arguments"))
1987 1999 cl = repo.changelog
1988 2000 if args:
1989 2001 pattern = getstring(args[0],
1990 2002 # i18n: "tag" is a keyword
1991 2003 _('the argument to tag must be a string'))
1992 2004 kind, pattern, matcher = _stringmatcher(pattern)
1993 2005 if kind == 'literal':
1994 2006 # avoid resolving all tags
1995 2007 tn = repo._tagscache.tags.get(pattern, None)
1996 2008 if tn is None:
1997 2009 raise error.RepoLookupError(_("tag '%s' does not exist")
1998 2010 % pattern)
1999 2011 s = set([repo[tn].rev()])
2000 2012 else:
2001 2013 s = set([cl.rev(n) for t, n in repo.tagslist() if matcher(t)])
2002 2014 else:
2003 2015 s = set([cl.rev(n) for t, n in repo.tagslist() if t != 'tip'])
2004 2016 return subset & s
2005 2017
2006 2018 def tagged(repo, subset, x):
2007 2019 return tag(repo, subset, x)
2008 2020
2009 2021 def unstable(repo, subset, x):
2010 2022 """``unstable()``
2011 2023 Non-obsolete changesets with obsolete ancestors.
2012 2024 """
2013 2025 # i18n: "unstable" is a keyword
2014 2026 getargs(x, 0, 0, _("unstable takes no arguments"))
2015 2027 unstables = obsmod.getrevs(repo, 'unstable')
2016 2028 return subset & unstables
2017 2029
2018 2030
2019 2031 def user(repo, subset, x):
2020 2032 """``user(string)``
2021 2033 User name contains string. The match is case-insensitive.
2022 2034
2023 2035 If `string` starts with `re:`, the remainder of the string is treated as
2024 2036 a regular expression. To match a user that actually contains `re:`, use
2025 2037 the prefix `literal:`.
2026 2038 """
2027 2039 return author(repo, subset, x)
2028 2040
2029 2041 # experimental
2030 2042 def wdir(repo, subset, x):
2031 2043 # i18n: "wdir" is a keyword
2032 2044 getargs(x, 0, 0, _("wdir takes no arguments"))
2033 2045 if node.wdirrev in subset or isinstance(subset, fullreposet):
2034 2046 return baseset([node.wdirrev])
2035 2047 return baseset()
2036 2048
2037 2049 # for internal use
2038 2050 def _list(repo, subset, x):
2039 2051 s = getstring(x, "internal error")
2040 2052 if not s:
2041 2053 return baseset()
2042 2054 # remove duplicates here. it's difficult for caller to deduplicate sets
2043 2055 # because different symbols can point to the same rev.
2044 2056 cl = repo.changelog
2045 2057 ls = []
2046 2058 seen = set()
2047 2059 for t in s.split('\0'):
2048 2060 try:
2049 2061 # fast path for integer revision
2050 2062 r = int(t)
2051 2063 if str(r) != t or r not in cl:
2052 2064 raise ValueError
2053 2065 except ValueError:
2054 2066 r = repo[t].rev()
2055 2067 if r in seen:
2056 2068 continue
2057 2069 if (r in subset
2058 2070 or r == node.nullrev and isinstance(subset, fullreposet)):
2059 2071 ls.append(r)
2060 2072 seen.add(r)
2061 2073 return baseset(ls)
2062 2074
2063 2075 # for internal use
2064 2076 def _intlist(repo, subset, x):
2065 2077 s = getstring(x, "internal error")
2066 2078 if not s:
2067 2079 return baseset()
2068 2080 ls = [int(r) for r in s.split('\0')]
2069 2081 s = subset
2070 2082 return baseset([r for r in ls if r in s])
2071 2083
2072 2084 # for internal use
2073 2085 def _hexlist(repo, subset, x):
2074 2086 s = getstring(x, "internal error")
2075 2087 if not s:
2076 2088 return baseset()
2077 2089 cl = repo.changelog
2078 2090 ls = [cl.rev(node.bin(r)) for r in s.split('\0')]
2079 2091 s = subset
2080 2092 return baseset([r for r in ls if r in s])
2081 2093
2082 2094 symbols = {
2083 2095 "adds": adds,
2084 2096 "all": getall,
2085 2097 "ancestor": ancestor,
2086 2098 "ancestors": ancestors,
2087 2099 "_firstancestors": _firstancestors,
2088 2100 "author": author,
2089 2101 "bisect": bisect,
2090 2102 "bisected": bisected,
2091 2103 "bookmark": bookmark,
2092 2104 "branch": branch,
2093 2105 "branchpoint": branchpoint,
2094 2106 "bumped": bumped,
2095 2107 "bundle": bundle,
2096 2108 "children": children,
2097 2109 "closed": closed,
2098 2110 "contains": contains,
2099 2111 "converted": converted,
2100 2112 "date": date,
2101 2113 "desc": desc,
2102 2114 "descendants": descendants,
2103 2115 "_firstdescendants": _firstdescendants,
2104 2116 "destination": destination,
2105 2117 "divergent": divergent,
2106 2118 "draft": draft,
2107 2119 "extinct": extinct,
2108 2120 "extra": extra,
2109 2121 "file": hasfile,
2110 2122 "filelog": filelog,
2111 2123 "first": first,
2112 2124 "follow": follow,
2113 2125 "_followfirst": _followfirst,
2114 2126 "grep": grep,
2115 2127 "head": head,
2116 2128 "heads": heads,
2117 2129 "hidden": hidden,
2118 2130 "id": node_,
2119 2131 "keyword": keyword,
2120 2132 "last": last,
2121 2133 "limit": limit,
2122 2134 "_matchfiles": _matchfiles,
2123 2135 "max": maxrev,
2124 2136 "merge": merge,
2125 2137 "min": minrev,
2126 2138 "modifies": modifies,
2127 2139 "named": named,
2128 2140 "obsolete": obsolete,
2129 2141 "only": only,
2130 2142 "origin": origin,
2131 2143 "outgoing": outgoing,
2132 2144 "p1": p1,
2133 2145 "p2": p2,
2134 2146 "parents": parents,
2135 2147 "present": present,
2136 2148 "public": public,
2137 2149 "_notpublic": _notpublic,
2138 2150 "remote": remote,
2139 2151 "removes": removes,
2140 2152 "rev": rev,
2141 2153 "reverse": reverse,
2142 2154 "roots": roots,
2143 2155 "sort": sort,
2144 2156 "secret": secret,
2145 2157 "subrepo": subrepo,
2146 2158 "matching": matching,
2147 2159 "tag": tag,
2148 2160 "tagged": tagged,
2149 2161 "user": user,
2150 2162 "unstable": unstable,
2151 2163 "wdir": wdir,
2152 2164 "_list": _list,
2153 2165 "_intlist": _intlist,
2154 2166 "_hexlist": _hexlist,
2155 2167 }
2156 2168
2157 2169 # symbols which can't be used for a DoS attack for any given input
2158 2170 # (e.g. those which accept regexes as plain strings shouldn't be included)
2159 2171 # functions that just return a lot of changesets (like all) don't count here
2160 2172 safesymbols = set([
2161 2173 "adds",
2162 2174 "all",
2163 2175 "ancestor",
2164 2176 "ancestors",
2165 2177 "_firstancestors",
2166 2178 "author",
2167 2179 "bisect",
2168 2180 "bisected",
2169 2181 "bookmark",
2170 2182 "branch",
2171 2183 "branchpoint",
2172 2184 "bumped",
2173 2185 "bundle",
2174 2186 "children",
2175 2187 "closed",
2176 2188 "converted",
2177 2189 "date",
2178 2190 "desc",
2179 2191 "descendants",
2180 2192 "_firstdescendants",
2181 2193 "destination",
2182 2194 "divergent",
2183 2195 "draft",
2184 2196 "extinct",
2185 2197 "extra",
2186 2198 "file",
2187 2199 "filelog",
2188 2200 "first",
2189 2201 "follow",
2190 2202 "_followfirst",
2191 2203 "head",
2192 2204 "heads",
2193 2205 "hidden",
2194 2206 "id",
2195 2207 "keyword",
2196 2208 "last",
2197 2209 "limit",
2198 2210 "_matchfiles",
2199 2211 "max",
2200 2212 "merge",
2201 2213 "min",
2202 2214 "modifies",
2203 2215 "obsolete",
2204 2216 "only",
2205 2217 "origin",
2206 2218 "outgoing",
2207 2219 "p1",
2208 2220 "p2",
2209 2221 "parents",
2210 2222 "present",
2211 2223 "public",
2212 2224 "_notpublic",
2213 2225 "remote",
2214 2226 "removes",
2215 2227 "rev",
2216 2228 "reverse",
2217 2229 "roots",
2218 2230 "sort",
2219 2231 "secret",
2220 2232 "matching",
2221 2233 "tag",
2222 2234 "tagged",
2223 2235 "user",
2224 2236 "unstable",
2225 2237 "wdir",
2226 2238 "_list",
2227 2239 "_intlist",
2228 2240 "_hexlist",
2229 2241 ])
2230 2242
2231 2243 methods = {
2232 2244 "range": rangeset,
2233 2245 "dagrange": dagrange,
2234 2246 "string": stringset,
2235 2247 "symbol": stringset,
2236 2248 "and": andset,
2237 2249 "or": orset,
2238 2250 "not": notset,
2239 2251 "list": listset,
2240 2252 "keyvalue": keyvaluepair,
2241 2253 "func": func,
2242 2254 "ancestor": ancestorspec,
2243 2255 "parent": parentspec,
2244 2256 "parentpost": p1,
2245 2257 }
2246 2258
2247 2259 def optimize(x, small):
2248 2260 if x is None:
2249 2261 return 0, x
2250 2262
2251 2263 smallbonus = 1
2252 2264 if small:
2253 2265 smallbonus = .5
2254 2266
2255 2267 op = x[0]
2256 2268 if op == 'minus':
2257 2269 return optimize(('and', x[1], ('not', x[2])), small)
2258 2270 elif op == 'only':
2259 2271 return optimize(('func', ('symbol', 'only'),
2260 2272 ('list', x[1], x[2])), small)
2261 2273 elif op == 'onlypost':
2262 2274 return optimize(('func', ('symbol', 'only'), x[1]), small)
2263 2275 elif op == 'dagrangepre':
2264 2276 return optimize(('func', ('symbol', 'ancestors'), x[1]), small)
2265 2277 elif op == 'dagrangepost':
2266 2278 return optimize(('func', ('symbol', 'descendants'), x[1]), small)
2267 2279 elif op == 'rangeall':
2268 2280 return optimize(('range', ('string', '0'), ('string', 'tip')), small)
2269 2281 elif op == 'rangepre':
2270 2282 return optimize(('range', ('string', '0'), x[1]), small)
2271 2283 elif op == 'rangepost':
2272 2284 return optimize(('range', x[1], ('string', 'tip')), small)
2273 2285 elif op == 'negate':
2274 2286 return optimize(('string',
2275 2287 '-' + getstring(x[1], _("can't negate that"))), small)
2276 2288 elif op in 'string symbol negate':
2277 2289 return smallbonus, x # single revisions are small
2278 2290 elif op == 'and':
2279 2291 wa, ta = optimize(x[1], True)
2280 2292 wb, tb = optimize(x[2], True)
2281 2293
2282 2294 # (::x and not ::y)/(not ::y and ::x) have a fast path
2283 2295 def isonly(revs, bases):
2284 2296 return (
2285 2297 revs is not None
2286 2298 and revs[0] == 'func'
2287 2299 and getstring(revs[1], _('not a symbol')) == 'ancestors'
2288 2300 and bases is not None
2289 2301 and bases[0] == 'not'
2290 2302 and bases[1][0] == 'func'
2291 2303 and getstring(bases[1][1], _('not a symbol')) == 'ancestors')
2292 2304
2293 2305 w = min(wa, wb)
2294 2306 if isonly(ta, tb):
2295 2307 return w, ('func', ('symbol', 'only'), ('list', ta[2], tb[1][2]))
2296 2308 if isonly(tb, ta):
2297 2309 return w, ('func', ('symbol', 'only'), ('list', tb[2], ta[1][2]))
2298 2310
2299 2311 if wa > wb:
2300 2312 return w, (op, tb, ta)
2301 2313 return w, (op, ta, tb)
2302 2314 elif op == 'or':
2303 2315 # fast path for machine-generated expression, that is likely to have
2304 2316 # lots of trivial revisions: 'a + b + c()' to '_list(a b) + c()'
2305 2317 ws, ts, ss = [], [], []
2306 2318 def flushss():
2307 2319 if not ss:
2308 2320 return
2309 2321 if len(ss) == 1:
2310 2322 w, t = ss[0]
2311 2323 else:
2312 2324 s = '\0'.join(t[1] for w, t in ss)
2313 2325 y = ('func', ('symbol', '_list'), ('string', s))
2314 2326 w, t = optimize(y, False)
2315 2327 ws.append(w)
2316 2328 ts.append(t)
2317 2329 del ss[:]
2318 2330 for y in x[1:]:
2319 2331 w, t = optimize(y, False)
2320 2332 if t is not None and (t[0] == 'string' or t[0] == 'symbol'):
2321 2333 ss.append((w, t))
2322 2334 continue
2323 2335 flushss()
2324 2336 ws.append(w)
2325 2337 ts.append(t)
2326 2338 flushss()
2327 2339 if len(ts) == 1:
2328 2340 return ws[0], ts[0] # 'or' operation is fully optimized out
2329 2341 # we can't reorder trees by weight because it would change the order.
2330 2342 # ("sort(a + b)" == "sort(b + a)", but "a + b" != "b + a")
2331 2343 # ts = tuple(t for w, t in sorted(zip(ws, ts), key=lambda wt: wt[0]))
2332 2344 return max(ws), (op,) + tuple(ts)
2333 2345 elif op == 'not':
2334 2346 # Optimize not public() to _notpublic() because we have a fast version
2335 2347 if x[1] == ('func', ('symbol', 'public'), None):
2336 2348 newsym = ('func', ('symbol', '_notpublic'), None)
2337 2349 o = optimize(newsym, not small)
2338 2350 return o[0], o[1]
2339 2351 else:
2340 2352 o = optimize(x[1], not small)
2341 2353 return o[0], (op, o[1])
2342 2354 elif op == 'parentpost':
2343 2355 o = optimize(x[1], small)
2344 2356 return o[0], (op, o[1])
2345 2357 elif op == 'group':
2346 2358 return optimize(x[1], small)
2347 2359 elif op in 'dagrange range list parent ancestorspec':
2348 2360 if op == 'parent':
2349 2361 # x^:y means (x^) : y, not x ^ (:y)
2350 2362 post = ('parentpost', x[1])
2351 2363 if x[2][0] == 'dagrangepre':
2352 2364 return optimize(('dagrange', post, x[2][1]), small)
2353 2365 elif x[2][0] == 'rangepre':
2354 2366 return optimize(('range', post, x[2][1]), small)
2355 2367
2356 2368 wa, ta = optimize(x[1], small)
2357 2369 wb, tb = optimize(x[2], small)
2358 2370 return wa + wb, (op, ta, tb)
2359 2371 elif op == 'func':
2360 2372 f = getstring(x[1], _("not a symbol"))
2361 2373 wa, ta = optimize(x[2], small)
2362 2374 if f in ("author branch closed date desc file grep keyword "
2363 2375 "outgoing user"):
2364 2376 w = 10 # slow
2365 2377 elif f in "modifies adds removes":
2366 2378 w = 30 # slower
2367 2379 elif f == "contains":
2368 2380 w = 100 # very slow
2369 2381 elif f == "ancestor":
2370 2382 w = 1 * smallbonus
2371 2383 elif f in "reverse limit first _intlist":
2372 2384 w = 0
2373 2385 elif f in "sort":
2374 2386 w = 10 # assume most sorts look at changelog
2375 2387 else:
2376 2388 w = 1
2377 2389 return w + wa, (op, x[1], ta)
2378 2390 return 1, x
2379 2391
2380 2392 _aliasarg = ('func', ('symbol', '_aliasarg'))
2381 2393 def _getaliasarg(tree):
2382 2394 """If tree matches ('func', ('symbol', '_aliasarg'), ('string', X))
2383 2395 return X, None otherwise.
2384 2396 """
2385 2397 if (len(tree) == 3 and tree[:2] == _aliasarg
2386 2398 and tree[2][0] == 'string'):
2387 2399 return tree[2][1]
2388 2400 return None
2389 2401
2390 2402 def _checkaliasarg(tree, known=None):
2391 2403 """Check tree contains no _aliasarg construct or only ones which
2392 2404 value is in known. Used to avoid alias placeholders injection.
2393 2405 """
2394 2406 if isinstance(tree, tuple):
2395 2407 arg = _getaliasarg(tree)
2396 2408 if arg is not None and (not known or arg not in known):
2397 2409 raise error.UnknownIdentifier('_aliasarg', [])
2398 2410 for t in tree:
2399 2411 _checkaliasarg(t, known)
2400 2412
2401 2413 # the set of valid characters for the initial letter of symbols in
2402 2414 # alias declarations and definitions
2403 2415 _aliassyminitletters = set(c for c in [chr(i) for i in xrange(256)]
2404 2416 if c.isalnum() or c in '._@$' or ord(c) > 127)
2405 2417
2406 2418 def _tokenizealias(program, lookup=None):
2407 2419 """Parse alias declaration/definition into a stream of tokens
2408 2420
2409 2421 This allows symbol names to use also ``$`` as an initial letter
2410 2422 (for backward compatibility), and callers of this function should
2411 2423 examine whether ``$`` is used also for unexpected symbols or not.
2412 2424 """
2413 2425 return tokenize(program, lookup=lookup,
2414 2426 syminitletters=_aliassyminitletters)
2415 2427
2416 2428 def _parsealiasdecl(decl):
2417 2429 """Parse alias declaration ``decl``
2418 2430
2419 2431 This returns ``(name, tree, args, errorstr)`` tuple:
2420 2432
2421 2433 - ``name``: of declared alias (may be ``decl`` itself at error)
2422 2434 - ``tree``: parse result (or ``None`` at error)
2423 2435 - ``args``: list of alias argument names (or None for symbol declaration)
2424 2436 - ``errorstr``: detail about detected error (or None)
2425 2437
2426 2438 >>> _parsealiasdecl('foo')
2427 2439 ('foo', ('symbol', 'foo'), None, None)
2428 2440 >>> _parsealiasdecl('$foo')
2429 2441 ('$foo', None, None, "'$' not for alias arguments")
2430 2442 >>> _parsealiasdecl('foo::bar')
2431 2443 ('foo::bar', None, None, 'invalid format')
2432 2444 >>> _parsealiasdecl('foo bar')
2433 2445 ('foo bar', None, None, 'at 4: invalid token')
2434 2446 >>> _parsealiasdecl('foo()')
2435 2447 ('foo', ('func', ('symbol', 'foo')), [], None)
2436 2448 >>> _parsealiasdecl('$foo()')
2437 2449 ('$foo()', None, None, "'$' not for alias arguments")
2438 2450 >>> _parsealiasdecl('foo($1, $2)')
2439 2451 ('foo', ('func', ('symbol', 'foo')), ['$1', '$2'], None)
2440 2452 >>> _parsealiasdecl('foo(bar_bar, baz.baz)')
2441 2453 ('foo', ('func', ('symbol', 'foo')), ['bar_bar', 'baz.baz'], None)
2442 2454 >>> _parsealiasdecl('foo($1, $2, nested($1, $2))')
2443 2455 ('foo($1, $2, nested($1, $2))', None, None, 'invalid argument list')
2444 2456 >>> _parsealiasdecl('foo(bar($1, $2))')
2445 2457 ('foo(bar($1, $2))', None, None, 'invalid argument list')
2446 2458 >>> _parsealiasdecl('foo("string")')
2447 2459 ('foo("string")', None, None, 'invalid argument list')
2448 2460 >>> _parsealiasdecl('foo($1, $2')
2449 2461 ('foo($1, $2', None, None, 'at 10: unexpected token: end')
2450 2462 >>> _parsealiasdecl('foo("string')
2451 2463 ('foo("string', None, None, 'at 5: unterminated string')
2452 2464 >>> _parsealiasdecl('foo($1, $2, $1)')
2453 2465 ('foo', None, None, 'argument names collide with each other')
2454 2466 """
2455 2467 p = parser.parser(elements)
2456 2468 try:
2457 2469 tree, pos = p.parse(_tokenizealias(decl))
2458 2470 if (pos != len(decl)):
2459 2471 raise error.ParseError(_('invalid token'), pos)
2460 2472
2461 2473 if isvalidsymbol(tree):
2462 2474 # "name = ...." style
2463 2475 name = getsymbol(tree)
2464 2476 if name.startswith('$'):
2465 2477 return (decl, None, None, _("'$' not for alias arguments"))
2466 2478 return (name, ('symbol', name), None, None)
2467 2479
2468 2480 if isvalidfunc(tree):
2469 2481 # "name(arg, ....) = ...." style
2470 2482 name = getfuncname(tree)
2471 2483 if name.startswith('$'):
2472 2484 return (decl, None, None, _("'$' not for alias arguments"))
2473 2485 args = []
2474 2486 for arg in getfuncargs(tree):
2475 2487 if not isvalidsymbol(arg):
2476 2488 return (decl, None, None, _("invalid argument list"))
2477 2489 args.append(getsymbol(arg))
2478 2490 if len(args) != len(set(args)):
2479 2491 return (name, None, None,
2480 2492 _("argument names collide with each other"))
2481 2493 return (name, ('func', ('symbol', name)), args, None)
2482 2494
2483 2495 return (decl, None, None, _("invalid format"))
2484 2496 except error.ParseError as inst:
2485 2497 return (decl, None, None, parseerrordetail(inst))
2486 2498
2487 2499 def _parsealiasdefn(defn, args):
2488 2500 """Parse alias definition ``defn``
2489 2501
2490 2502 This function also replaces alias argument references in the
2491 2503 specified definition by ``_aliasarg(ARGNAME)``.
2492 2504
2493 2505 ``args`` is a list of alias argument names, or None if the alias
2494 2506 is declared as a symbol.
2495 2507
2496 2508 This returns "tree" as parsing result.
2497 2509
2498 2510 >>> args = ['$1', '$2', 'foo']
2499 2511 >>> print prettyformat(_parsealiasdefn('$1 or foo', args))
2500 2512 (or
2501 2513 (func
2502 2514 ('symbol', '_aliasarg')
2503 2515 ('string', '$1'))
2504 2516 (func
2505 2517 ('symbol', '_aliasarg')
2506 2518 ('string', 'foo')))
2507 2519 >>> try:
2508 2520 ... _parsealiasdefn('$1 or $bar', args)
2509 2521 ... except error.ParseError, inst:
2510 2522 ... print parseerrordetail(inst)
2511 2523 at 6: '$' not for alias arguments
2512 2524 >>> args = ['$1', '$10', 'foo']
2513 2525 >>> print prettyformat(_parsealiasdefn('$10 or foobar', args))
2514 2526 (or
2515 2527 (func
2516 2528 ('symbol', '_aliasarg')
2517 2529 ('string', '$10'))
2518 2530 ('symbol', 'foobar'))
2519 2531 >>> print prettyformat(_parsealiasdefn('"$1" or "foo"', args))
2520 2532 (or
2521 2533 ('string', '$1')
2522 2534 ('string', 'foo'))
2523 2535 """
2524 2536 def tokenizedefn(program, lookup=None):
2525 2537 if args:
2526 2538 argset = set(args)
2527 2539 else:
2528 2540 argset = set()
2529 2541
2530 2542 for t, value, pos in _tokenizealias(program, lookup=lookup):
2531 2543 if t == 'symbol':
2532 2544 if value in argset:
2533 2545 # emulate tokenization of "_aliasarg('ARGNAME')":
2534 2546 # "_aliasarg()" is an unknown symbol only used separate
2535 2547 # alias argument placeholders from regular strings.
2536 2548 yield ('symbol', '_aliasarg', pos)
2537 2549 yield ('(', None, pos)
2538 2550 yield ('string', value, pos)
2539 2551 yield (')', None, pos)
2540 2552 continue
2541 2553 elif value.startswith('$'):
2542 2554 raise error.ParseError(_("'$' not for alias arguments"),
2543 2555 pos)
2544 2556 yield (t, value, pos)
2545 2557
2546 2558 p = parser.parser(elements)
2547 2559 tree, pos = p.parse(tokenizedefn(defn))
2548 2560 if pos != len(defn):
2549 2561 raise error.ParseError(_('invalid token'), pos)
2550 2562 return parser.simplifyinfixops(tree, ('or',))
2551 2563
2552 2564 class revsetalias(object):
2553 2565 # whether own `error` information is already shown or not.
2554 2566 # this avoids showing same warning multiple times at each `findaliases`.
2555 2567 warned = False
2556 2568
2557 2569 def __init__(self, name, value):
2558 2570 '''Aliases like:
2559 2571
2560 2572 h = heads(default)
2561 2573 b($1) = ancestors($1) - ancestors(default)
2562 2574 '''
2563 2575 self.name, self.tree, self.args, self.error = _parsealiasdecl(name)
2564 2576 if self.error:
2565 2577 self.error = _('failed to parse the declaration of revset alias'
2566 2578 ' "%s": %s') % (self.name, self.error)
2567 2579 return
2568 2580
2569 2581 try:
2570 2582 self.replacement = _parsealiasdefn(value, self.args)
2571 2583 # Check for placeholder injection
2572 2584 _checkaliasarg(self.replacement, self.args)
2573 2585 except error.ParseError as inst:
2574 2586 self.error = _('failed to parse the definition of revset alias'
2575 2587 ' "%s": %s') % (self.name, parseerrordetail(inst))
2576 2588
2577 2589 def _getalias(aliases, tree):
2578 2590 """If tree looks like an unexpanded alias, return it. Return None
2579 2591 otherwise.
2580 2592 """
2581 2593 if isinstance(tree, tuple) and tree:
2582 2594 if tree[0] == 'symbol' and len(tree) == 2:
2583 2595 name = tree[1]
2584 2596 alias = aliases.get(name)
2585 2597 if alias and alias.args is None and alias.tree == tree:
2586 2598 return alias
2587 2599 if tree[0] == 'func' and len(tree) > 1:
2588 2600 if tree[1][0] == 'symbol' and len(tree[1]) == 2:
2589 2601 name = tree[1][1]
2590 2602 alias = aliases.get(name)
2591 2603 if alias and alias.args is not None and alias.tree == tree[:2]:
2592 2604 return alias
2593 2605 return None
2594 2606
2595 2607 def _expandargs(tree, args):
2596 2608 """Replace _aliasarg instances with the substitution value of the
2597 2609 same name in args, recursively.
2598 2610 """
2599 2611 if not tree or not isinstance(tree, tuple):
2600 2612 return tree
2601 2613 arg = _getaliasarg(tree)
2602 2614 if arg is not None:
2603 2615 return args[arg]
2604 2616 return tuple(_expandargs(t, args) for t in tree)
2605 2617
2606 2618 def _expandaliases(aliases, tree, expanding, cache):
2607 2619 """Expand aliases in tree, recursively.
2608 2620
2609 2621 'aliases' is a dictionary mapping user defined aliases to
2610 2622 revsetalias objects.
2611 2623 """
2612 2624 if not isinstance(tree, tuple):
2613 2625 # Do not expand raw strings
2614 2626 return tree
2615 2627 alias = _getalias(aliases, tree)
2616 2628 if alias is not None:
2617 2629 if alias.error:
2618 2630 raise util.Abort(alias.error)
2619 2631 if alias in expanding:
2620 2632 raise error.ParseError(_('infinite expansion of revset alias "%s" '
2621 2633 'detected') % alias.name)
2622 2634 expanding.append(alias)
2623 2635 if alias.name not in cache:
2624 2636 cache[alias.name] = _expandaliases(aliases, alias.replacement,
2625 2637 expanding, cache)
2626 2638 result = cache[alias.name]
2627 2639 expanding.pop()
2628 2640 if alias.args is not None:
2629 2641 l = getlist(tree[2])
2630 2642 if len(l) != len(alias.args):
2631 2643 raise error.ParseError(
2632 2644 _('invalid number of arguments: %s') % len(l))
2633 2645 l = [_expandaliases(aliases, a, [], cache) for a in l]
2634 2646 result = _expandargs(result, dict(zip(alias.args, l)))
2635 2647 else:
2636 2648 result = tuple(_expandaliases(aliases, t, expanding, cache)
2637 2649 for t in tree)
2638 2650 return result
2639 2651
2640 2652 def findaliases(ui, tree, showwarning=None):
2641 2653 _checkaliasarg(tree)
2642 2654 aliases = {}
2643 2655 for k, v in ui.configitems('revsetalias'):
2644 2656 alias = revsetalias(k, v)
2645 2657 aliases[alias.name] = alias
2646 2658 tree = _expandaliases(aliases, tree, [], {})
2647 2659 if showwarning:
2648 2660 # warn about problematic (but not referred) aliases
2649 2661 for name, alias in sorted(aliases.iteritems()):
2650 2662 if alias.error and not alias.warned:
2651 2663 showwarning(_('warning: %s\n') % (alias.error))
2652 2664 alias.warned = True
2653 2665 return tree
2654 2666
2655 2667 def foldconcat(tree):
2656 2668 """Fold elements to be concatenated by `##`
2657 2669 """
2658 2670 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2659 2671 return tree
2660 2672 if tree[0] == '_concat':
2661 2673 pending = [tree]
2662 2674 l = []
2663 2675 while pending:
2664 2676 e = pending.pop()
2665 2677 if e[0] == '_concat':
2666 2678 pending.extend(reversed(e[1:]))
2667 2679 elif e[0] in ('string', 'symbol'):
2668 2680 l.append(e[1])
2669 2681 else:
2670 2682 msg = _("\"##\" can't concatenate \"%s\" element") % (e[0])
2671 2683 raise error.ParseError(msg)
2672 2684 return ('string', ''.join(l))
2673 2685 else:
2674 2686 return tuple(foldconcat(t) for t in tree)
2675 2687
2676 2688 def parse(spec, lookup=None):
2677 2689 p = parser.parser(elements)
2678 2690 tree, pos = p.parse(tokenize(spec, lookup=lookup))
2679 2691 if pos != len(spec):
2680 2692 raise error.ParseError(_("invalid token"), pos)
2681 2693 return parser.simplifyinfixops(tree, ('or',))
2682 2694
2683 2695 def posttreebuilthook(tree, repo):
2684 2696 # hook for extensions to execute code on the optimized tree
2685 2697 pass
2686 2698
2687 2699 def match(ui, spec, repo=None):
2688 2700 if not spec:
2689 2701 raise error.ParseError(_("empty query"))
2690 2702 lookup = None
2691 2703 if repo:
2692 2704 lookup = repo.__contains__
2693 2705 tree = parse(spec, lookup)
2694 2706 return _makematcher(ui, tree, repo)
2695 2707
2696 2708 def matchany(ui, specs, repo=None):
2697 2709 """Create a matcher that will include any revisions matching one of the
2698 2710 given specs"""
2699 2711 if not specs:
2700 2712 def mfunc(repo, subset=None):
2701 2713 return baseset()
2702 2714 return mfunc
2703 2715 if not all(specs):
2704 2716 raise error.ParseError(_("empty query"))
2705 2717 lookup = None
2706 2718 if repo:
2707 2719 lookup = repo.__contains__
2708 2720 if len(specs) == 1:
2709 2721 tree = parse(specs[0], lookup)
2710 2722 else:
2711 2723 tree = ('or',) + tuple(parse(s, lookup) for s in specs)
2712 2724 return _makematcher(ui, tree, repo)
2713 2725
2714 2726 def _makematcher(ui, tree, repo):
2715 2727 if ui:
2716 2728 tree = findaliases(ui, tree, showwarning=ui.warn)
2717 2729 tree = foldconcat(tree)
2718 2730 weight, tree = optimize(tree, True)
2719 2731 posttreebuilthook(tree, repo)
2720 2732 def mfunc(repo, subset=None):
2721 2733 if subset is None:
2722 2734 subset = fullreposet(repo)
2723 2735 if util.safehasattr(subset, 'isascending'):
2724 2736 result = getset(repo, subset, tree)
2725 2737 else:
2726 2738 result = getset(repo, baseset(subset), tree)
2727 2739 return result
2728 2740 return mfunc
2729 2741
2730 2742 def formatspec(expr, *args):
2731 2743 '''
2732 2744 This is a convenience function for using revsets internally, and
2733 2745 escapes arguments appropriately. Aliases are intentionally ignored
2734 2746 so that intended expression behavior isn't accidentally subverted.
2735 2747
2736 2748 Supported arguments:
2737 2749
2738 2750 %r = revset expression, parenthesized
2739 2751 %d = int(arg), no quoting
2740 2752 %s = string(arg), escaped and single-quoted
2741 2753 %b = arg.branch(), escaped and single-quoted
2742 2754 %n = hex(arg), single-quoted
2743 2755 %% = a literal '%'
2744 2756
2745 2757 Prefixing the type with 'l' specifies a parenthesized list of that type.
2746 2758
2747 2759 >>> formatspec('%r:: and %lr', '10 or 11', ("this()", "that()"))
2748 2760 '(10 or 11):: and ((this()) or (that()))'
2749 2761 >>> formatspec('%d:: and not %d::', 10, 20)
2750 2762 '10:: and not 20::'
2751 2763 >>> formatspec('%ld or %ld', [], [1])
2752 2764 "_list('') or 1"
2753 2765 >>> formatspec('keyword(%s)', 'foo\\xe9')
2754 2766 "keyword('foo\\\\xe9')"
2755 2767 >>> b = lambda: 'default'
2756 2768 >>> b.branch = b
2757 2769 >>> formatspec('branch(%b)', b)
2758 2770 "branch('default')"
2759 2771 >>> formatspec('root(%ls)', ['a', 'b', 'c', 'd'])
2760 2772 "root(_list('a\\x00b\\x00c\\x00d'))"
2761 2773 '''
2762 2774
2763 2775 def quote(s):
2764 2776 return repr(str(s))
2765 2777
2766 2778 def argtype(c, arg):
2767 2779 if c == 'd':
2768 2780 return str(int(arg))
2769 2781 elif c == 's':
2770 2782 return quote(arg)
2771 2783 elif c == 'r':
2772 2784 parse(arg) # make sure syntax errors are confined
2773 2785 return '(%s)' % arg
2774 2786 elif c == 'n':
2775 2787 return quote(node.hex(arg))
2776 2788 elif c == 'b':
2777 2789 return quote(arg.branch())
2778 2790
2779 2791 def listexp(s, t):
2780 2792 l = len(s)
2781 2793 if l == 0:
2782 2794 return "_list('')"
2783 2795 elif l == 1:
2784 2796 return argtype(t, s[0])
2785 2797 elif t == 'd':
2786 2798 return "_intlist('%s')" % "\0".join(str(int(a)) for a in s)
2787 2799 elif t == 's':
2788 2800 return "_list('%s')" % "\0".join(s)
2789 2801 elif t == 'n':
2790 2802 return "_hexlist('%s')" % "\0".join(node.hex(a) for a in s)
2791 2803 elif t == 'b':
2792 2804 return "_list('%s')" % "\0".join(a.branch() for a in s)
2793 2805
2794 2806 m = l // 2
2795 2807 return '(%s or %s)' % (listexp(s[:m], t), listexp(s[m:], t))
2796 2808
2797 2809 ret = ''
2798 2810 pos = 0
2799 2811 arg = 0
2800 2812 while pos < len(expr):
2801 2813 c = expr[pos]
2802 2814 if c == '%':
2803 2815 pos += 1
2804 2816 d = expr[pos]
2805 2817 if d == '%':
2806 2818 ret += d
2807 2819 elif d in 'dsnbr':
2808 2820 ret += argtype(d, args[arg])
2809 2821 arg += 1
2810 2822 elif d == 'l':
2811 2823 # a list of some type
2812 2824 pos += 1
2813 2825 d = expr[pos]
2814 2826 ret += listexp(list(args[arg]), d)
2815 2827 arg += 1
2816 2828 else:
2817 2829 raise util.Abort('unexpected revspec format character %s' % d)
2818 2830 else:
2819 2831 ret += c
2820 2832 pos += 1
2821 2833
2822 2834 return ret
2823 2835
2824 2836 def prettyformat(tree):
2825 2837 return parser.prettyformat(tree, ('string', 'symbol'))
2826 2838
2827 2839 def depth(tree):
2828 2840 if isinstance(tree, tuple):
2829 2841 return max(map(depth, tree)) + 1
2830 2842 else:
2831 2843 return 0
2832 2844
2833 2845 def funcsused(tree):
2834 2846 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2835 2847 return set()
2836 2848 else:
2837 2849 funcs = set()
2838 2850 for s in tree[1:]:
2839 2851 funcs |= funcsused(s)
2840 2852 if tree[0] == 'func':
2841 2853 funcs.add(tree[1][1])
2842 2854 return funcs
2843 2855
2844 2856 class abstractsmartset(object):
2845 2857
2846 2858 def __nonzero__(self):
2847 2859 """True if the smartset is not empty"""
2848 2860 raise NotImplementedError()
2849 2861
2850 2862 def __contains__(self, rev):
2851 2863 """provide fast membership testing"""
2852 2864 raise NotImplementedError()
2853 2865
2854 2866 def __iter__(self):
2855 2867 """iterate the set in the order it is supposed to be iterated"""
2856 2868 raise NotImplementedError()
2857 2869
2858 2870 # Attributes containing a function to perform a fast iteration in a given
2859 2871 # direction. A smartset can have none, one, or both defined.
2860 2872 #
2861 2873 # Default value is None instead of a function returning None to avoid
2862 2874 # initializing an iterator just for testing if a fast method exists.
2863 2875 fastasc = None
2864 2876 fastdesc = None
2865 2877
2866 2878 def isascending(self):
2867 2879 """True if the set will iterate in ascending order"""
2868 2880 raise NotImplementedError()
2869 2881
2870 2882 def isdescending(self):
2871 2883 """True if the set will iterate in descending order"""
2872 2884 raise NotImplementedError()
2873 2885
2874 2886 def min(self):
2875 2887 """return the minimum element in the set"""
2876 2888 if self.fastasc is not None:
2877 2889 for r in self.fastasc():
2878 2890 return r
2879 2891 raise ValueError('arg is an empty sequence')
2880 2892 return min(self)
2881 2893
2882 2894 def max(self):
2883 2895 """return the maximum element in the set"""
2884 2896 if self.fastdesc is not None:
2885 2897 for r in self.fastdesc():
2886 2898 return r
2887 2899 raise ValueError('arg is an empty sequence')
2888 2900 return max(self)
2889 2901
2890 2902 def first(self):
2891 2903 """return the first element in the set (user iteration perspective)
2892 2904
2893 2905 Return None if the set is empty"""
2894 2906 raise NotImplementedError()
2895 2907
2896 2908 def last(self):
2897 2909 """return the last element in the set (user iteration perspective)
2898 2910
2899 2911 Return None if the set is empty"""
2900 2912 raise NotImplementedError()
2901 2913
2902 2914 def __len__(self):
2903 2915 """return the length of the smartsets
2904 2916
2905 2917 This can be expensive on smartset that could be lazy otherwise."""
2906 2918 raise NotImplementedError()
2907 2919
2908 2920 def reverse(self):
2909 2921 """reverse the expected iteration order"""
2910 2922 raise NotImplementedError()
2911 2923
2912 2924 def sort(self, reverse=True):
2913 2925 """get the set to iterate in an ascending or descending order"""
2914 2926 raise NotImplementedError()
2915 2927
2916 2928 def __and__(self, other):
2917 2929 """Returns a new object with the intersection of the two collections.
2918 2930
2919 2931 This is part of the mandatory API for smartset."""
2920 2932 if isinstance(other, fullreposet):
2921 2933 return self
2922 2934 return self.filter(other.__contains__, cache=False)
2923 2935
2924 2936 def __add__(self, other):
2925 2937 """Returns a new object with the union of the two collections.
2926 2938
2927 2939 This is part of the mandatory API for smartset."""
2928 2940 return addset(self, other)
2929 2941
2930 2942 def __sub__(self, other):
2931 2943 """Returns a new object with the substraction of the two collections.
2932 2944
2933 2945 This is part of the mandatory API for smartset."""
2934 2946 c = other.__contains__
2935 2947 return self.filter(lambda r: not c(r), cache=False)
2936 2948
2937 2949 def filter(self, condition, cache=True):
2938 2950 """Returns this smartset filtered by condition as a new smartset.
2939 2951
2940 2952 `condition` is a callable which takes a revision number and returns a
2941 2953 boolean.
2942 2954
2943 2955 This is part of the mandatory API for smartset."""
2944 2956 # builtin cannot be cached. but do not needs to
2945 2957 if cache and util.safehasattr(condition, 'func_code'):
2946 2958 condition = util.cachefunc(condition)
2947 2959 return filteredset(self, condition)
2948 2960
2949 2961 class baseset(abstractsmartset):
2950 2962 """Basic data structure that represents a revset and contains the basic
2951 2963 operation that it should be able to perform.
2952 2964
2953 2965 Every method in this class should be implemented by any smartset class.
2954 2966 """
2955 2967 def __init__(self, data=()):
2956 2968 if not isinstance(data, list):
2957 2969 data = list(data)
2958 2970 self._list = data
2959 2971 self._ascending = None
2960 2972
2961 2973 @util.propertycache
2962 2974 def _set(self):
2963 2975 return set(self._list)
2964 2976
2965 2977 @util.propertycache
2966 2978 def _asclist(self):
2967 2979 asclist = self._list[:]
2968 2980 asclist.sort()
2969 2981 return asclist
2970 2982
2971 2983 def __iter__(self):
2972 2984 if self._ascending is None:
2973 2985 return iter(self._list)
2974 2986 elif self._ascending:
2975 2987 return iter(self._asclist)
2976 2988 else:
2977 2989 return reversed(self._asclist)
2978 2990
2979 2991 def fastasc(self):
2980 2992 return iter(self._asclist)
2981 2993
2982 2994 def fastdesc(self):
2983 2995 return reversed(self._asclist)
2984 2996
2985 2997 @util.propertycache
2986 2998 def __contains__(self):
2987 2999 return self._set.__contains__
2988 3000
2989 3001 def __nonzero__(self):
2990 3002 return bool(self._list)
2991 3003
2992 3004 def sort(self, reverse=False):
2993 3005 self._ascending = not bool(reverse)
2994 3006
2995 3007 def reverse(self):
2996 3008 if self._ascending is None:
2997 3009 self._list.reverse()
2998 3010 else:
2999 3011 self._ascending = not self._ascending
3000 3012
3001 3013 def __len__(self):
3002 3014 return len(self._list)
3003 3015
3004 3016 def isascending(self):
3005 3017 """Returns True if the collection is ascending order, False if not.
3006 3018
3007 3019 This is part of the mandatory API for smartset."""
3008 3020 if len(self) <= 1:
3009 3021 return True
3010 3022 return self._ascending is not None and self._ascending
3011 3023
3012 3024 def isdescending(self):
3013 3025 """Returns True if the collection is descending order, False if not.
3014 3026
3015 3027 This is part of the mandatory API for smartset."""
3016 3028 if len(self) <= 1:
3017 3029 return True
3018 3030 return self._ascending is not None and not self._ascending
3019 3031
3020 3032 def first(self):
3021 3033 if self:
3022 3034 if self._ascending is None:
3023 3035 return self._list[0]
3024 3036 elif self._ascending:
3025 3037 return self._asclist[0]
3026 3038 else:
3027 3039 return self._asclist[-1]
3028 3040 return None
3029 3041
3030 3042 def last(self):
3031 3043 if self:
3032 3044 if self._ascending is None:
3033 3045 return self._list[-1]
3034 3046 elif self._ascending:
3035 3047 return self._asclist[-1]
3036 3048 else:
3037 3049 return self._asclist[0]
3038 3050 return None
3039 3051
3040 3052 def __repr__(self):
3041 3053 d = {None: '', False: '-', True: '+'}[self._ascending]
3042 3054 return '<%s%s %r>' % (type(self).__name__, d, self._list)
3043 3055
3044 3056 class filteredset(abstractsmartset):
3045 3057 """Duck type for baseset class which iterates lazily over the revisions in
3046 3058 the subset and contains a function which tests for membership in the
3047 3059 revset
3048 3060 """
3049 3061 def __init__(self, subset, condition=lambda x: True):
3050 3062 """
3051 3063 condition: a function that decide whether a revision in the subset
3052 3064 belongs to the revset or not.
3053 3065 """
3054 3066 self._subset = subset
3055 3067 self._condition = condition
3056 3068 self._cache = {}
3057 3069
3058 3070 def __contains__(self, x):
3059 3071 c = self._cache
3060 3072 if x not in c:
3061 3073 v = c[x] = x in self._subset and self._condition(x)
3062 3074 return v
3063 3075 return c[x]
3064 3076
3065 3077 def __iter__(self):
3066 3078 return self._iterfilter(self._subset)
3067 3079
3068 3080 def _iterfilter(self, it):
3069 3081 cond = self._condition
3070 3082 for x in it:
3071 3083 if cond(x):
3072 3084 yield x
3073 3085
3074 3086 @property
3075 3087 def fastasc(self):
3076 3088 it = self._subset.fastasc
3077 3089 if it is None:
3078 3090 return None
3079 3091 return lambda: self._iterfilter(it())
3080 3092
3081 3093 @property
3082 3094 def fastdesc(self):
3083 3095 it = self._subset.fastdesc
3084 3096 if it is None:
3085 3097 return None
3086 3098 return lambda: self._iterfilter(it())
3087 3099
3088 3100 def __nonzero__(self):
3089 3101 for r in self:
3090 3102 return True
3091 3103 return False
3092 3104
3093 3105 def __len__(self):
3094 3106 # Basic implementation to be changed in future patches.
3095 3107 l = baseset([r for r in self])
3096 3108 return len(l)
3097 3109
3098 3110 def sort(self, reverse=False):
3099 3111 self._subset.sort(reverse=reverse)
3100 3112
3101 3113 def reverse(self):
3102 3114 self._subset.reverse()
3103 3115
3104 3116 def isascending(self):
3105 3117 return self._subset.isascending()
3106 3118
3107 3119 def isdescending(self):
3108 3120 return self._subset.isdescending()
3109 3121
3110 3122 def first(self):
3111 3123 for x in self:
3112 3124 return x
3113 3125 return None
3114 3126
3115 3127 def last(self):
3116 3128 it = None
3117 3129 if self.isascending():
3118 3130 it = self.fastdesc
3119 3131 elif self.isdescending():
3120 3132 it = self.fastasc
3121 3133 if it is not None:
3122 3134 for x in it():
3123 3135 return x
3124 3136 return None #empty case
3125 3137 else:
3126 3138 x = None
3127 3139 for x in self:
3128 3140 pass
3129 3141 return x
3130 3142
3131 3143 def __repr__(self):
3132 3144 return '<%s %r>' % (type(self).__name__, self._subset)
3133 3145
3134 3146 def _iterordered(ascending, iter1, iter2):
3135 3147 """produce an ordered iteration from two iterators with the same order
3136 3148
3137 3149 The ascending is used to indicated the iteration direction.
3138 3150 """
3139 3151 choice = max
3140 3152 if ascending:
3141 3153 choice = min
3142 3154
3143 3155 val1 = None
3144 3156 val2 = None
3145 3157 try:
3146 3158 # Consume both iterators in an ordered way until one is empty
3147 3159 while True:
3148 3160 if val1 is None:
3149 3161 val1 = iter1.next()
3150 3162 if val2 is None:
3151 3163 val2 = iter2.next()
3152 3164 next = choice(val1, val2)
3153 3165 yield next
3154 3166 if val1 == next:
3155 3167 val1 = None
3156 3168 if val2 == next:
3157 3169 val2 = None
3158 3170 except StopIteration:
3159 3171 # Flush any remaining values and consume the other one
3160 3172 it = iter2
3161 3173 if val1 is not None:
3162 3174 yield val1
3163 3175 it = iter1
3164 3176 elif val2 is not None:
3165 3177 # might have been equality and both are empty
3166 3178 yield val2
3167 3179 for val in it:
3168 3180 yield val
3169 3181
3170 3182 class addset(abstractsmartset):
3171 3183 """Represent the addition of two sets
3172 3184
3173 3185 Wrapper structure for lazily adding two structures without losing much
3174 3186 performance on the __contains__ method
3175 3187
3176 3188 If the ascending attribute is set, that means the two structures are
3177 3189 ordered in either an ascending or descending way. Therefore, we can add
3178 3190 them maintaining the order by iterating over both at the same time
3179 3191
3180 3192 >>> xs = baseset([0, 3, 2])
3181 3193 >>> ys = baseset([5, 2, 4])
3182 3194
3183 3195 >>> rs = addset(xs, ys)
3184 3196 >>> bool(rs), 0 in rs, 1 in rs, 5 in rs, rs.first(), rs.last()
3185 3197 (True, True, False, True, 0, 4)
3186 3198 >>> rs = addset(xs, baseset([]))
3187 3199 >>> bool(rs), 0 in rs, 1 in rs, rs.first(), rs.last()
3188 3200 (True, True, False, 0, 2)
3189 3201 >>> rs = addset(baseset([]), baseset([]))
3190 3202 >>> bool(rs), 0 in rs, rs.first(), rs.last()
3191 3203 (False, False, None, None)
3192 3204
3193 3205 iterate unsorted:
3194 3206 >>> rs = addset(xs, ys)
3195 3207 >>> [x for x in rs] # without _genlist
3196 3208 [0, 3, 2, 5, 4]
3197 3209 >>> assert not rs._genlist
3198 3210 >>> len(rs)
3199 3211 5
3200 3212 >>> [x for x in rs] # with _genlist
3201 3213 [0, 3, 2, 5, 4]
3202 3214 >>> assert rs._genlist
3203 3215
3204 3216 iterate ascending:
3205 3217 >>> rs = addset(xs, ys, ascending=True)
3206 3218 >>> [x for x in rs], [x for x in rs.fastasc()] # without _asclist
3207 3219 ([0, 2, 3, 4, 5], [0, 2, 3, 4, 5])
3208 3220 >>> assert not rs._asclist
3209 3221 >>> len(rs)
3210 3222 5
3211 3223 >>> [x for x in rs], [x for x in rs.fastasc()]
3212 3224 ([0, 2, 3, 4, 5], [0, 2, 3, 4, 5])
3213 3225 >>> assert rs._asclist
3214 3226
3215 3227 iterate descending:
3216 3228 >>> rs = addset(xs, ys, ascending=False)
3217 3229 >>> [x for x in rs], [x for x in rs.fastdesc()] # without _asclist
3218 3230 ([5, 4, 3, 2, 0], [5, 4, 3, 2, 0])
3219 3231 >>> assert not rs._asclist
3220 3232 >>> len(rs)
3221 3233 5
3222 3234 >>> [x for x in rs], [x for x in rs.fastdesc()]
3223 3235 ([5, 4, 3, 2, 0], [5, 4, 3, 2, 0])
3224 3236 >>> assert rs._asclist
3225 3237
3226 3238 iterate ascending without fastasc:
3227 3239 >>> rs = addset(xs, generatorset(ys), ascending=True)
3228 3240 >>> assert rs.fastasc is None
3229 3241 >>> [x for x in rs]
3230 3242 [0, 2, 3, 4, 5]
3231 3243
3232 3244 iterate descending without fastdesc:
3233 3245 >>> rs = addset(generatorset(xs), ys, ascending=False)
3234 3246 >>> assert rs.fastdesc is None
3235 3247 >>> [x for x in rs]
3236 3248 [5, 4, 3, 2, 0]
3237 3249 """
3238 3250 def __init__(self, revs1, revs2, ascending=None):
3239 3251 self._r1 = revs1
3240 3252 self._r2 = revs2
3241 3253 self._iter = None
3242 3254 self._ascending = ascending
3243 3255 self._genlist = None
3244 3256 self._asclist = None
3245 3257
3246 3258 def __len__(self):
3247 3259 return len(self._list)
3248 3260
3249 3261 def __nonzero__(self):
3250 3262 return bool(self._r1) or bool(self._r2)
3251 3263
3252 3264 @util.propertycache
3253 3265 def _list(self):
3254 3266 if not self._genlist:
3255 3267 self._genlist = baseset(iter(self))
3256 3268 return self._genlist
3257 3269
3258 3270 def __iter__(self):
3259 3271 """Iterate over both collections without repeating elements
3260 3272
3261 3273 If the ascending attribute is not set, iterate over the first one and
3262 3274 then over the second one checking for membership on the first one so we
3263 3275 dont yield any duplicates.
3264 3276
3265 3277 If the ascending attribute is set, iterate over both collections at the
3266 3278 same time, yielding only one value at a time in the given order.
3267 3279 """
3268 3280 if self._ascending is None:
3269 3281 if self._genlist:
3270 3282 return iter(self._genlist)
3271 3283 def arbitraryordergen():
3272 3284 for r in self._r1:
3273 3285 yield r
3274 3286 inr1 = self._r1.__contains__
3275 3287 for r in self._r2:
3276 3288 if not inr1(r):
3277 3289 yield r
3278 3290 return arbitraryordergen()
3279 3291 # try to use our own fast iterator if it exists
3280 3292 self._trysetasclist()
3281 3293 if self._ascending:
3282 3294 attr = 'fastasc'
3283 3295 else:
3284 3296 attr = 'fastdesc'
3285 3297 it = getattr(self, attr)
3286 3298 if it is not None:
3287 3299 return it()
3288 3300 # maybe half of the component supports fast
3289 3301 # get iterator for _r1
3290 3302 iter1 = getattr(self._r1, attr)
3291 3303 if iter1 is None:
3292 3304 # let's avoid side effect (not sure it matters)
3293 3305 iter1 = iter(sorted(self._r1, reverse=not self._ascending))
3294 3306 else:
3295 3307 iter1 = iter1()
3296 3308 # get iterator for _r2
3297 3309 iter2 = getattr(self._r2, attr)
3298 3310 if iter2 is None:
3299 3311 # let's avoid side effect (not sure it matters)
3300 3312 iter2 = iter(sorted(self._r2, reverse=not self._ascending))
3301 3313 else:
3302 3314 iter2 = iter2()
3303 3315 return _iterordered(self._ascending, iter1, iter2)
3304 3316
3305 3317 def _trysetasclist(self):
3306 3318 """populate the _asclist attribute if possible and necessary"""
3307 3319 if self._genlist is not None and self._asclist is None:
3308 3320 self._asclist = sorted(self._genlist)
3309 3321
3310 3322 @property
3311 3323 def fastasc(self):
3312 3324 self._trysetasclist()
3313 3325 if self._asclist is not None:
3314 3326 return self._asclist.__iter__
3315 3327 iter1 = self._r1.fastasc
3316 3328 iter2 = self._r2.fastasc
3317 3329 if None in (iter1, iter2):
3318 3330 return None
3319 3331 return lambda: _iterordered(True, iter1(), iter2())
3320 3332
3321 3333 @property
3322 3334 def fastdesc(self):
3323 3335 self._trysetasclist()
3324 3336 if self._asclist is not None:
3325 3337 return self._asclist.__reversed__
3326 3338 iter1 = self._r1.fastdesc
3327 3339 iter2 = self._r2.fastdesc
3328 3340 if None in (iter1, iter2):
3329 3341 return None
3330 3342 return lambda: _iterordered(False, iter1(), iter2())
3331 3343
3332 3344 def __contains__(self, x):
3333 3345 return x in self._r1 or x in self._r2
3334 3346
3335 3347 def sort(self, reverse=False):
3336 3348 """Sort the added set
3337 3349
3338 3350 For this we use the cached list with all the generated values and if we
3339 3351 know they are ascending or descending we can sort them in a smart way.
3340 3352 """
3341 3353 self._ascending = not reverse
3342 3354
3343 3355 def isascending(self):
3344 3356 return self._ascending is not None and self._ascending
3345 3357
3346 3358 def isdescending(self):
3347 3359 return self._ascending is not None and not self._ascending
3348 3360
3349 3361 def reverse(self):
3350 3362 if self._ascending is None:
3351 3363 self._list.reverse()
3352 3364 else:
3353 3365 self._ascending = not self._ascending
3354 3366
3355 3367 def first(self):
3356 3368 for x in self:
3357 3369 return x
3358 3370 return None
3359 3371
3360 3372 def last(self):
3361 3373 self.reverse()
3362 3374 val = self.first()
3363 3375 self.reverse()
3364 3376 return val
3365 3377
3366 3378 def __repr__(self):
3367 3379 d = {None: '', False: '-', True: '+'}[self._ascending]
3368 3380 return '<%s%s %r, %r>' % (type(self).__name__, d, self._r1, self._r2)
3369 3381
3370 3382 class generatorset(abstractsmartset):
3371 3383 """Wrap a generator for lazy iteration
3372 3384
3373 3385 Wrapper structure for generators that provides lazy membership and can
3374 3386 be iterated more than once.
3375 3387 When asked for membership it generates values until either it finds the
3376 3388 requested one or has gone through all the elements in the generator
3377 3389 """
3378 3390 def __init__(self, gen, iterasc=None):
3379 3391 """
3380 3392 gen: a generator producing the values for the generatorset.
3381 3393 """
3382 3394 self._gen = gen
3383 3395 self._asclist = None
3384 3396 self._cache = {}
3385 3397 self._genlist = []
3386 3398 self._finished = False
3387 3399 self._ascending = True
3388 3400 if iterasc is not None:
3389 3401 if iterasc:
3390 3402 self.fastasc = self._iterator
3391 3403 self.__contains__ = self._asccontains
3392 3404 else:
3393 3405 self.fastdesc = self._iterator
3394 3406 self.__contains__ = self._desccontains
3395 3407
3396 3408 def __nonzero__(self):
3397 3409 # Do not use 'for r in self' because it will enforce the iteration
3398 3410 # order (default ascending), possibly unrolling a whole descending
3399 3411 # iterator.
3400 3412 if self._genlist:
3401 3413 return True
3402 3414 for r in self._consumegen():
3403 3415 return True
3404 3416 return False
3405 3417
3406 3418 def __contains__(self, x):
3407 3419 if x in self._cache:
3408 3420 return self._cache[x]
3409 3421
3410 3422 # Use new values only, as existing values would be cached.
3411 3423 for l in self._consumegen():
3412 3424 if l == x:
3413 3425 return True
3414 3426
3415 3427 self._cache[x] = False
3416 3428 return False
3417 3429
3418 3430 def _asccontains(self, x):
3419 3431 """version of contains optimised for ascending generator"""
3420 3432 if x in self._cache:
3421 3433 return self._cache[x]
3422 3434
3423 3435 # Use new values only, as existing values would be cached.
3424 3436 for l in self._consumegen():
3425 3437 if l == x:
3426 3438 return True
3427 3439 if l > x:
3428 3440 break
3429 3441
3430 3442 self._cache[x] = False
3431 3443 return False
3432 3444
3433 3445 def _desccontains(self, x):
3434 3446 """version of contains optimised for descending generator"""
3435 3447 if x in self._cache:
3436 3448 return self._cache[x]
3437 3449
3438 3450 # Use new values only, as existing values would be cached.
3439 3451 for l in self._consumegen():
3440 3452 if l == x:
3441 3453 return True
3442 3454 if l < x:
3443 3455 break
3444 3456
3445 3457 self._cache[x] = False
3446 3458 return False
3447 3459
3448 3460 def __iter__(self):
3449 3461 if self._ascending:
3450 3462 it = self.fastasc
3451 3463 else:
3452 3464 it = self.fastdesc
3453 3465 if it is not None:
3454 3466 return it()
3455 3467 # we need to consume the iterator
3456 3468 for x in self._consumegen():
3457 3469 pass
3458 3470 # recall the same code
3459 3471 return iter(self)
3460 3472
3461 3473 def _iterator(self):
3462 3474 if self._finished:
3463 3475 return iter(self._genlist)
3464 3476
3465 3477 # We have to use this complex iteration strategy to allow multiple
3466 3478 # iterations at the same time. We need to be able to catch revision
3467 3479 # removed from _consumegen and added to genlist in another instance.
3468 3480 #
3469 3481 # Getting rid of it would provide an about 15% speed up on this
3470 3482 # iteration.
3471 3483 genlist = self._genlist
3472 3484 nextrev = self._consumegen().next
3473 3485 _len = len # cache global lookup
3474 3486 def gen():
3475 3487 i = 0
3476 3488 while True:
3477 3489 if i < _len(genlist):
3478 3490 yield genlist[i]
3479 3491 else:
3480 3492 yield nextrev()
3481 3493 i += 1
3482 3494 return gen()
3483 3495
3484 3496 def _consumegen(self):
3485 3497 cache = self._cache
3486 3498 genlist = self._genlist.append
3487 3499 for item in self._gen:
3488 3500 cache[item] = True
3489 3501 genlist(item)
3490 3502 yield item
3491 3503 if not self._finished:
3492 3504 self._finished = True
3493 3505 asc = self._genlist[:]
3494 3506 asc.sort()
3495 3507 self._asclist = asc
3496 3508 self.fastasc = asc.__iter__
3497 3509 self.fastdesc = asc.__reversed__
3498 3510
3499 3511 def __len__(self):
3500 3512 for x in self._consumegen():
3501 3513 pass
3502 3514 return len(self._genlist)
3503 3515
3504 3516 def sort(self, reverse=False):
3505 3517 self._ascending = not reverse
3506 3518
3507 3519 def reverse(self):
3508 3520 self._ascending = not self._ascending
3509 3521
3510 3522 def isascending(self):
3511 3523 return self._ascending
3512 3524
3513 3525 def isdescending(self):
3514 3526 return not self._ascending
3515 3527
3516 3528 def first(self):
3517 3529 if self._ascending:
3518 3530 it = self.fastasc
3519 3531 else:
3520 3532 it = self.fastdesc
3521 3533 if it is None:
3522 3534 # we need to consume all and try again
3523 3535 for x in self._consumegen():
3524 3536 pass
3525 3537 return self.first()
3526 3538 return next(it(), None)
3527 3539
3528 3540 def last(self):
3529 3541 if self._ascending:
3530 3542 it = self.fastdesc
3531 3543 else:
3532 3544 it = self.fastasc
3533 3545 if it is None:
3534 3546 # we need to consume all and try again
3535 3547 for x in self._consumegen():
3536 3548 pass
3537 3549 return self.first()
3538 3550 return next(it(), None)
3539 3551
3540 3552 def __repr__(self):
3541 3553 d = {False: '-', True: '+'}[self._ascending]
3542 3554 return '<%s%s>' % (type(self).__name__, d)
3543 3555
3544 3556 class spanset(abstractsmartset):
3545 3557 """Duck type for baseset class which represents a range of revisions and
3546 3558 can work lazily and without having all the range in memory
3547 3559
3548 3560 Note that spanset(x, y) behave almost like xrange(x, y) except for two
3549 3561 notable points:
3550 3562 - when x < y it will be automatically descending,
3551 3563 - revision filtered with this repoview will be skipped.
3552 3564
3553 3565 """
3554 3566 def __init__(self, repo, start=0, end=None):
3555 3567 """
3556 3568 start: first revision included the set
3557 3569 (default to 0)
3558 3570 end: first revision excluded (last+1)
3559 3571 (default to len(repo)
3560 3572
3561 3573 Spanset will be descending if `end` < `start`.
3562 3574 """
3563 3575 if end is None:
3564 3576 end = len(repo)
3565 3577 self._ascending = start <= end
3566 3578 if not self._ascending:
3567 3579 start, end = end + 1, start +1
3568 3580 self._start = start
3569 3581 self._end = end
3570 3582 self._hiddenrevs = repo.changelog.filteredrevs
3571 3583
3572 3584 def sort(self, reverse=False):
3573 3585 self._ascending = not reverse
3574 3586
3575 3587 def reverse(self):
3576 3588 self._ascending = not self._ascending
3577 3589
3578 3590 def _iterfilter(self, iterrange):
3579 3591 s = self._hiddenrevs
3580 3592 for r in iterrange:
3581 3593 if r not in s:
3582 3594 yield r
3583 3595
3584 3596 def __iter__(self):
3585 3597 if self._ascending:
3586 3598 return self.fastasc()
3587 3599 else:
3588 3600 return self.fastdesc()
3589 3601
3590 3602 def fastasc(self):
3591 3603 iterrange = xrange(self._start, self._end)
3592 3604 if self._hiddenrevs:
3593 3605 return self._iterfilter(iterrange)
3594 3606 return iter(iterrange)
3595 3607
3596 3608 def fastdesc(self):
3597 3609 iterrange = xrange(self._end - 1, self._start - 1, -1)
3598 3610 if self._hiddenrevs:
3599 3611 return self._iterfilter(iterrange)
3600 3612 return iter(iterrange)
3601 3613
3602 3614 def __contains__(self, rev):
3603 3615 hidden = self._hiddenrevs
3604 3616 return ((self._start <= rev < self._end)
3605 3617 and not (hidden and rev in hidden))
3606 3618
3607 3619 def __nonzero__(self):
3608 3620 for r in self:
3609 3621 return True
3610 3622 return False
3611 3623
3612 3624 def __len__(self):
3613 3625 if not self._hiddenrevs:
3614 3626 return abs(self._end - self._start)
3615 3627 else:
3616 3628 count = 0
3617 3629 start = self._start
3618 3630 end = self._end
3619 3631 for rev in self._hiddenrevs:
3620 3632 if (end < rev <= start) or (start <= rev < end):
3621 3633 count += 1
3622 3634 return abs(self._end - self._start) - count
3623 3635
3624 3636 def isascending(self):
3625 3637 return self._ascending
3626 3638
3627 3639 def isdescending(self):
3628 3640 return not self._ascending
3629 3641
3630 3642 def first(self):
3631 3643 if self._ascending:
3632 3644 it = self.fastasc
3633 3645 else:
3634 3646 it = self.fastdesc
3635 3647 for x in it():
3636 3648 return x
3637 3649 return None
3638 3650
3639 3651 def last(self):
3640 3652 if self._ascending:
3641 3653 it = self.fastdesc
3642 3654 else:
3643 3655 it = self.fastasc
3644 3656 for x in it():
3645 3657 return x
3646 3658 return None
3647 3659
3648 3660 def __repr__(self):
3649 3661 d = {False: '-', True: '+'}[self._ascending]
3650 3662 return '<%s%s %d:%d>' % (type(self).__name__, d,
3651 3663 self._start, self._end - 1)
3652 3664
3653 3665 class fullreposet(spanset):
3654 3666 """a set containing all revisions in the repo
3655 3667
3656 3668 This class exists to host special optimization and magic to handle virtual
3657 3669 revisions such as "null".
3658 3670 """
3659 3671
3660 3672 def __init__(self, repo):
3661 3673 super(fullreposet, self).__init__(repo)
3662 3674
3663 3675 def __and__(self, other):
3664 3676 """As self contains the whole repo, all of the other set should also be
3665 3677 in self. Therefore `self & other = other`.
3666 3678
3667 3679 This boldly assumes the other contains valid revs only.
3668 3680 """
3669 3681 # other not a smartset, make is so
3670 3682 if not util.safehasattr(other, 'isascending'):
3671 3683 # filter out hidden revision
3672 3684 # (this boldly assumes all smartset are pure)
3673 3685 #
3674 3686 # `other` was used with "&", let's assume this is a set like
3675 3687 # object.
3676 3688 other = baseset(other - self._hiddenrevs)
3677 3689
3678 3690 # XXX As fullreposet is also used as bootstrap, this is wrong.
3679 3691 #
3680 3692 # With a giveme312() revset returning [3,1,2], this makes
3681 3693 # 'hg log -r "giveme312()"' -> 1, 2, 3 (wrong)
3682 3694 # We cannot just drop it because other usage still need to sort it:
3683 3695 # 'hg log -r "all() and giveme312()"' -> 1, 2, 3 (right)
3684 3696 #
3685 3697 # There is also some faulty revset implementations that rely on it
3686 3698 # (eg: children as of its state in e8075329c5fb)
3687 3699 #
3688 3700 # When we fix the two points above we can move this into the if clause
3689 3701 other.sort(reverse=self.isdescending())
3690 3702 return other
3691 3703
3692 3704 def prettyformatset(revs):
3693 3705 lines = []
3694 3706 rs = repr(revs)
3695 3707 p = 0
3696 3708 while p < len(rs):
3697 3709 q = rs.find('<', p + 1)
3698 3710 if q < 0:
3699 3711 q = len(rs)
3700 3712 l = rs.count('<', 0, p) - rs.count('>', 0, p)
3701 3713 assert l >= 0
3702 3714 lines.append((l, rs[p:q].rstrip()))
3703 3715 p = q
3704 3716 return '\n'.join(' ' * l + s for l, s in lines)
3705 3717
3706 3718 # tell hggettext to extract docstrings from these functions:
3707 3719 i18nfunctions = symbols.values()
General Comments 0
You need to be logged in to leave comments. Login now