##// END OF EJS Templates
reachableroots: sort the smartset in the pure version too...
Pierre-Yves David -
r26091:60bbd4f9 default
parent child Browse files
Show More
@@ -1,3722 +1,3724
1 1 # revset.py - revision set queries for mercurial
2 2 #
3 3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import heapq
11 11 import re
12 12
13 13 from .i18n import _
14 14 from . import (
15 15 encoding,
16 16 error,
17 17 hbisect,
18 18 match as matchmod,
19 19 node,
20 20 obsolete as obsmod,
21 21 parser,
22 22 pathutil,
23 23 phases,
24 24 repoview,
25 25 util,
26 26 )
27 27
28 28 def _revancestors(repo, revs, followfirst):
29 29 """Like revlog.ancestors(), but supports followfirst."""
30 30 if followfirst:
31 31 cut = 1
32 32 else:
33 33 cut = None
34 34 cl = repo.changelog
35 35
36 36 def iterate():
37 37 revs.sort(reverse=True)
38 38 irevs = iter(revs)
39 39 h = []
40 40
41 41 inputrev = next(irevs, None)
42 42 if inputrev is not None:
43 43 heapq.heappush(h, -inputrev)
44 44
45 45 seen = set()
46 46 while h:
47 47 current = -heapq.heappop(h)
48 48 if current == inputrev:
49 49 inputrev = next(irevs, None)
50 50 if inputrev is not None:
51 51 heapq.heappush(h, -inputrev)
52 52 if current not in seen:
53 53 seen.add(current)
54 54 yield current
55 55 for parent in cl.parentrevs(current)[:cut]:
56 56 if parent != node.nullrev:
57 57 heapq.heappush(h, -parent)
58 58
59 59 return generatorset(iterate(), iterasc=False)
60 60
61 61 def _revdescendants(repo, revs, followfirst):
62 62 """Like revlog.descendants() but supports followfirst."""
63 63 if followfirst:
64 64 cut = 1
65 65 else:
66 66 cut = None
67 67
68 68 def iterate():
69 69 cl = repo.changelog
70 70 # XXX this should be 'parentset.min()' assuming 'parentset' is a
71 71 # smartset (and if it is not, it should.)
72 72 first = min(revs)
73 73 nullrev = node.nullrev
74 74 if first == nullrev:
75 75 # Are there nodes with a null first parent and a non-null
76 76 # second one? Maybe. Do we care? Probably not.
77 77 for i in cl:
78 78 yield i
79 79 else:
80 80 seen = set(revs)
81 81 for i in cl.revs(first + 1):
82 82 for x in cl.parentrevs(i)[:cut]:
83 83 if x != nullrev and x in seen:
84 84 seen.add(i)
85 85 yield i
86 86 break
87 87
88 88 return generatorset(iterate(), iterasc=True)
89 89
90 90 def reachablerootspure(repo, minroot, roots, heads, includepath):
91 91 """return (heads(::<roots> and ::<heads>))
92 92
93 93 If includepath is True, return (<roots>::<heads>)."""
94 94 if not roots:
95 95 return baseset()
96 96 parentrevs = repo.changelog.parentrevs
97 97 roots = set(roots)
98 98 visit = list(heads)
99 99 reachable = set()
100 100 seen = {}
101 101 # prefetch all the things! (because python is slow)
102 102 reached = reachable.add
103 103 dovisit = visit.append
104 104 nextvisit = visit.pop
105 105 # open-code the post-order traversal due to the tiny size of
106 106 # sys.getrecursionlimit()
107 107 while visit:
108 108 rev = nextvisit()
109 109 if rev in roots:
110 110 reached(rev)
111 111 if not includepath:
112 112 continue
113 113 parents = parentrevs(rev)
114 114 seen[rev] = parents
115 115 for parent in parents:
116 116 if parent >= minroot and parent not in seen:
117 117 dovisit(parent)
118 118 if not reachable:
119 119 return baseset()
120 120 if not includepath:
121 121 return reachable
122 122 for rev in sorted(seen):
123 123 for parent in seen[rev]:
124 124 if parent in reachable:
125 125 reached(rev)
126 return baseset(sorted(reachable))
126 reachable = baseset(reachable)
127 reachable.sort()
128 return reachable
127 129
128 130 def reachableroots(repo, roots, heads, includepath=False):
129 131 """return (heads(::<roots> and ::<heads>))
130 132
131 133 If includepath is True, return (<roots>::<heads>)."""
132 134 if not roots:
133 135 return baseset()
134 136 # XXX this should be 'parentset.min()' assuming 'parentset' is a smartset
135 137 # (and if it is not, it should.)
136 138 minroot = min(roots)
137 139 roots = list(roots)
138 140 heads = list(heads)
139 141 try:
140 142 return repo.changelog.reachableroots(minroot, heads, roots, includepath)
141 143 except AttributeError:
142 144 return reachablerootspure(repo, minroot, roots, heads, includepath)
143 145
144 146 elements = {
145 147 # token-type: binding-strength, primary, prefix, infix, suffix
146 148 "(": (21, None, ("group", 1, ")"), ("func", 1, ")"), None),
147 149 "##": (20, None, None, ("_concat", 20), None),
148 150 "~": (18, None, None, ("ancestor", 18), None),
149 151 "^": (18, None, None, ("parent", 18), ("parentpost", 18)),
150 152 "-": (5, None, ("negate", 19), ("minus", 5), None),
151 153 "::": (17, None, ("dagrangepre", 17), ("dagrange", 17),
152 154 ("dagrangepost", 17)),
153 155 "..": (17, None, ("dagrangepre", 17), ("dagrange", 17),
154 156 ("dagrangepost", 17)),
155 157 ":": (15, "rangeall", ("rangepre", 15), ("range", 15), ("rangepost", 15)),
156 158 "not": (10, None, ("not", 10), None, None),
157 159 "!": (10, None, ("not", 10), None, None),
158 160 "and": (5, None, None, ("and", 5), None),
159 161 "&": (5, None, None, ("and", 5), None),
160 162 "%": (5, None, None, ("only", 5), ("onlypost", 5)),
161 163 "or": (4, None, None, ("or", 4), None),
162 164 "|": (4, None, None, ("or", 4), None),
163 165 "+": (4, None, None, ("or", 4), None),
164 166 "=": (3, None, None, ("keyvalue", 3), None),
165 167 ",": (2, None, None, ("list", 2), None),
166 168 ")": (0, None, None, None, None),
167 169 "symbol": (0, "symbol", None, None, None),
168 170 "string": (0, "string", None, None, None),
169 171 "end": (0, None, None, None, None),
170 172 }
171 173
172 174 keywords = set(['and', 'or', 'not'])
173 175
174 176 # default set of valid characters for the initial letter of symbols
175 177 _syminitletters = set(c for c in [chr(i) for i in xrange(256)]
176 178 if c.isalnum() or c in '._@' or ord(c) > 127)
177 179
178 180 # default set of valid characters for non-initial letters of symbols
179 181 _symletters = set(c for c in [chr(i) for i in xrange(256)]
180 182 if c.isalnum() or c in '-._/@' or ord(c) > 127)
181 183
182 184 def tokenize(program, lookup=None, syminitletters=None, symletters=None):
183 185 '''
184 186 Parse a revset statement into a stream of tokens
185 187
186 188 ``syminitletters`` is the set of valid characters for the initial
187 189 letter of symbols.
188 190
189 191 By default, character ``c`` is recognized as valid for initial
190 192 letter of symbols, if ``c.isalnum() or c in '._@' or ord(c) > 127``.
191 193
192 194 ``symletters`` is the set of valid characters for non-initial
193 195 letters of symbols.
194 196
195 197 By default, character ``c`` is recognized as valid for non-initial
196 198 letters of symbols, if ``c.isalnum() or c in '-._/@' or ord(c) > 127``.
197 199
198 200 Check that @ is a valid unquoted token character (issue3686):
199 201 >>> list(tokenize("@::"))
200 202 [('symbol', '@', 0), ('::', None, 1), ('end', None, 3)]
201 203
202 204 '''
203 205 if syminitletters is None:
204 206 syminitletters = _syminitletters
205 207 if symletters is None:
206 208 symletters = _symletters
207 209
208 210 if program and lookup:
209 211 # attempt to parse old-style ranges first to deal with
210 212 # things like old-tag which contain query metacharacters
211 213 parts = program.split(':', 1)
212 214 if all(lookup(sym) for sym in parts if sym):
213 215 if parts[0]:
214 216 yield ('symbol', parts[0], 0)
215 217 if len(parts) > 1:
216 218 s = len(parts[0])
217 219 yield (':', None, s)
218 220 if parts[1]:
219 221 yield ('symbol', parts[1], s + 1)
220 222 yield ('end', None, len(program))
221 223 return
222 224
223 225 pos, l = 0, len(program)
224 226 while pos < l:
225 227 c = program[pos]
226 228 if c.isspace(): # skip inter-token whitespace
227 229 pass
228 230 elif c == ':' and program[pos:pos + 2] == '::': # look ahead carefully
229 231 yield ('::', None, pos)
230 232 pos += 1 # skip ahead
231 233 elif c == '.' and program[pos:pos + 2] == '..': # look ahead carefully
232 234 yield ('..', None, pos)
233 235 pos += 1 # skip ahead
234 236 elif c == '#' and program[pos:pos + 2] == '##': # look ahead carefully
235 237 yield ('##', None, pos)
236 238 pos += 1 # skip ahead
237 239 elif c in "():=,-|&+!~^%": # handle simple operators
238 240 yield (c, None, pos)
239 241 elif (c in '"\'' or c == 'r' and
240 242 program[pos:pos + 2] in ("r'", 'r"')): # handle quoted strings
241 243 if c == 'r':
242 244 pos += 1
243 245 c = program[pos]
244 246 decode = lambda x: x
245 247 else:
246 248 decode = lambda x: x.decode('string-escape')
247 249 pos += 1
248 250 s = pos
249 251 while pos < l: # find closing quote
250 252 d = program[pos]
251 253 if d == '\\': # skip over escaped characters
252 254 pos += 2
253 255 continue
254 256 if d == c:
255 257 yield ('string', decode(program[s:pos]), s)
256 258 break
257 259 pos += 1
258 260 else:
259 261 raise error.ParseError(_("unterminated string"), s)
260 262 # gather up a symbol/keyword
261 263 elif c in syminitletters:
262 264 s = pos
263 265 pos += 1
264 266 while pos < l: # find end of symbol
265 267 d = program[pos]
266 268 if d not in symletters:
267 269 break
268 270 if d == '.' and program[pos - 1] == '.': # special case for ..
269 271 pos -= 1
270 272 break
271 273 pos += 1
272 274 sym = program[s:pos]
273 275 if sym in keywords: # operator keywords
274 276 yield (sym, None, s)
275 277 elif '-' in sym:
276 278 # some jerk gave us foo-bar-baz, try to check if it's a symbol
277 279 if lookup and lookup(sym):
278 280 # looks like a real symbol
279 281 yield ('symbol', sym, s)
280 282 else:
281 283 # looks like an expression
282 284 parts = sym.split('-')
283 285 for p in parts[:-1]:
284 286 if p: # possible consecutive -
285 287 yield ('symbol', p, s)
286 288 s += len(p)
287 289 yield ('-', None, pos)
288 290 s += 1
289 291 if parts[-1]: # possible trailing -
290 292 yield ('symbol', parts[-1], s)
291 293 else:
292 294 yield ('symbol', sym, s)
293 295 pos -= 1
294 296 else:
295 297 raise error.ParseError(_("syntax error in revset '%s'") %
296 298 program, pos)
297 299 pos += 1
298 300 yield ('end', None, pos)
299 301
300 302 def parseerrordetail(inst):
301 303 """Compose error message from specified ParseError object
302 304 """
303 305 if len(inst.args) > 1:
304 306 return _('at %s: %s') % (inst.args[1], inst.args[0])
305 307 else:
306 308 return inst.args[0]
307 309
308 310 # helpers
309 311
310 312 def getstring(x, err):
311 313 if x and (x[0] == 'string' or x[0] == 'symbol'):
312 314 return x[1]
313 315 raise error.ParseError(err)
314 316
315 317 def getlist(x):
316 318 if not x:
317 319 return []
318 320 if x[0] == 'list':
319 321 return getlist(x[1]) + [x[2]]
320 322 return [x]
321 323
322 324 def getargs(x, min, max, err):
323 325 l = getlist(x)
324 326 if len(l) < min or (max >= 0 and len(l) > max):
325 327 raise error.ParseError(err)
326 328 return l
327 329
328 330 def getargsdict(x, funcname, keys):
329 331 return parser.buildargsdict(getlist(x), funcname, keys.split(),
330 332 keyvaluenode='keyvalue', keynode='symbol')
331 333
332 334 def isvalidsymbol(tree):
333 335 """Examine whether specified ``tree`` is valid ``symbol`` or not
334 336 """
335 337 return tree[0] == 'symbol' and len(tree) > 1
336 338
337 339 def getsymbol(tree):
338 340 """Get symbol name from valid ``symbol`` in ``tree``
339 341
340 342 This assumes that ``tree`` is already examined by ``isvalidsymbol``.
341 343 """
342 344 return tree[1]
343 345
344 346 def isvalidfunc(tree):
345 347 """Examine whether specified ``tree`` is valid ``func`` or not
346 348 """
347 349 return tree[0] == 'func' and len(tree) > 1 and isvalidsymbol(tree[1])
348 350
349 351 def getfuncname(tree):
350 352 """Get function name from valid ``func`` in ``tree``
351 353
352 354 This assumes that ``tree`` is already examined by ``isvalidfunc``.
353 355 """
354 356 return getsymbol(tree[1])
355 357
356 358 def getfuncargs(tree):
357 359 """Get list of function arguments from valid ``func`` in ``tree``
358 360
359 361 This assumes that ``tree`` is already examined by ``isvalidfunc``.
360 362 """
361 363 if len(tree) > 2:
362 364 return getlist(tree[2])
363 365 else:
364 366 return []
365 367
366 368 def getset(repo, subset, x):
367 369 if not x:
368 370 raise error.ParseError(_("missing argument"))
369 371 s = methods[x[0]](repo, subset, *x[1:])
370 372 if util.safehasattr(s, 'isascending'):
371 373 return s
372 374 if (repo.ui.configbool('devel', 'all-warnings')
373 375 or repo.ui.configbool('devel', 'old-revset')):
374 376 # else case should not happen, because all non-func are internal,
375 377 # ignoring for now.
376 378 if x[0] == 'func' and x[1][0] == 'symbol' and x[1][1] in symbols:
377 379 repo.ui.develwarn('revset "%s" use list instead of smartset, '
378 380 '(upgrade your code)' % x[1][1])
379 381 return baseset(s)
380 382
381 383 def _getrevsource(repo, r):
382 384 extra = repo[r].extra()
383 385 for label in ('source', 'transplant_source', 'rebase_source'):
384 386 if label in extra:
385 387 try:
386 388 return repo[extra[label]].rev()
387 389 except error.RepoLookupError:
388 390 pass
389 391 return None
390 392
391 393 # operator methods
392 394
393 395 def stringset(repo, subset, x):
394 396 x = repo[x].rev()
395 397 if (x in subset
396 398 or x == node.nullrev and isinstance(subset, fullreposet)):
397 399 return baseset([x])
398 400 return baseset()
399 401
400 402 def rangeset(repo, subset, x, y):
401 403 m = getset(repo, fullreposet(repo), x)
402 404 n = getset(repo, fullreposet(repo), y)
403 405
404 406 if not m or not n:
405 407 return baseset()
406 408 m, n = m.first(), n.last()
407 409
408 410 if m == n:
409 411 r = baseset([m])
410 412 elif n == node.wdirrev:
411 413 r = spanset(repo, m, len(repo)) + baseset([n])
412 414 elif m == node.wdirrev:
413 415 r = baseset([m]) + spanset(repo, len(repo) - 1, n - 1)
414 416 elif m < n:
415 417 r = spanset(repo, m, n + 1)
416 418 else:
417 419 r = spanset(repo, m, n - 1)
418 420 # XXX We should combine with subset first: 'subset & baseset(...)'. This is
419 421 # necessary to ensure we preserve the order in subset.
420 422 #
421 423 # This has performance implication, carrying the sorting over when possible
422 424 # would be more efficient.
423 425 return r & subset
424 426
425 427 def dagrange(repo, subset, x, y):
426 428 r = fullreposet(repo)
427 429 xs = reachableroots(repo, getset(repo, r, x), getset(repo, r, y),
428 430 includepath=True)
429 431 # XXX We should combine with subset first: 'subset & baseset(...)'. This is
430 432 # necessary to ensure we preserve the order in subset.
431 433 return xs & subset
432 434
433 435 def andset(repo, subset, x, y):
434 436 return getset(repo, getset(repo, subset, x), y)
435 437
436 438 def orset(repo, subset, *xs):
437 439 assert xs
438 440 if len(xs) == 1:
439 441 return getset(repo, subset, xs[0])
440 442 p = len(xs) // 2
441 443 a = orset(repo, subset, *xs[:p])
442 444 b = orset(repo, subset, *xs[p:])
443 445 return a + b
444 446
445 447 def notset(repo, subset, x):
446 448 return subset - getset(repo, subset, x)
447 449
448 450 def listset(repo, subset, a, b):
449 451 raise error.ParseError(_("can't use a list in this context"))
450 452
451 453 def keyvaluepair(repo, subset, k, v):
452 454 raise error.ParseError(_("can't use a key-value pair in this context"))
453 455
454 456 def func(repo, subset, a, b):
455 457 if a[0] == 'symbol' and a[1] in symbols:
456 458 return symbols[a[1]](repo, subset, b)
457 459
458 460 keep = lambda fn: getattr(fn, '__doc__', None) is not None
459 461
460 462 syms = [s for (s, fn) in symbols.items() if keep(fn)]
461 463 raise error.UnknownIdentifier(a[1], syms)
462 464
463 465 # functions
464 466
465 467 def adds(repo, subset, x):
466 468 """``adds(pattern)``
467 469 Changesets that add a file matching pattern.
468 470
469 471 The pattern without explicit kind like ``glob:`` is expected to be
470 472 relative to the current directory and match against a file or a
471 473 directory.
472 474 """
473 475 # i18n: "adds" is a keyword
474 476 pat = getstring(x, _("adds requires a pattern"))
475 477 return checkstatus(repo, subset, pat, 1)
476 478
477 479 def ancestor(repo, subset, x):
478 480 """``ancestor(*changeset)``
479 481 A greatest common ancestor of the changesets.
480 482
481 483 Accepts 0 or more changesets.
482 484 Will return empty list when passed no args.
483 485 Greatest common ancestor of a single changeset is that changeset.
484 486 """
485 487 # i18n: "ancestor" is a keyword
486 488 l = getlist(x)
487 489 rl = fullreposet(repo)
488 490 anc = None
489 491
490 492 # (getset(repo, rl, i) for i in l) generates a list of lists
491 493 for revs in (getset(repo, rl, i) for i in l):
492 494 for r in revs:
493 495 if anc is None:
494 496 anc = repo[r]
495 497 else:
496 498 anc = anc.ancestor(repo[r])
497 499
498 500 if anc is not None and anc.rev() in subset:
499 501 return baseset([anc.rev()])
500 502 return baseset()
501 503
502 504 def _ancestors(repo, subset, x, followfirst=False):
503 505 heads = getset(repo, fullreposet(repo), x)
504 506 if not heads:
505 507 return baseset()
506 508 s = _revancestors(repo, heads, followfirst)
507 509 return subset & s
508 510
509 511 def ancestors(repo, subset, x):
510 512 """``ancestors(set)``
511 513 Changesets that are ancestors of a changeset in set.
512 514 """
513 515 return _ancestors(repo, subset, x)
514 516
515 517 def _firstancestors(repo, subset, x):
516 518 # ``_firstancestors(set)``
517 519 # Like ``ancestors(set)`` but follows only the first parents.
518 520 return _ancestors(repo, subset, x, followfirst=True)
519 521
520 522 def ancestorspec(repo, subset, x, n):
521 523 """``set~n``
522 524 Changesets that are the Nth ancestor (first parents only) of a changeset
523 525 in set.
524 526 """
525 527 try:
526 528 n = int(n[1])
527 529 except (TypeError, ValueError):
528 530 raise error.ParseError(_("~ expects a number"))
529 531 ps = set()
530 532 cl = repo.changelog
531 533 for r in getset(repo, fullreposet(repo), x):
532 534 for i in range(n):
533 535 r = cl.parentrevs(r)[0]
534 536 ps.add(r)
535 537 return subset & ps
536 538
537 539 def author(repo, subset, x):
538 540 """``author(string)``
539 541 Alias for ``user(string)``.
540 542 """
541 543 # i18n: "author" is a keyword
542 544 n = encoding.lower(getstring(x, _("author requires a string")))
543 545 kind, pattern, matcher = _substringmatcher(n)
544 546 return subset.filter(lambda x: matcher(encoding.lower(repo[x].user())))
545 547
546 548 def bisect(repo, subset, x):
547 549 """``bisect(string)``
548 550 Changesets marked in the specified bisect status:
549 551
550 552 - ``good``, ``bad``, ``skip``: csets explicitly marked as good/bad/skip
551 553 - ``goods``, ``bads`` : csets topologically good/bad
552 554 - ``range`` : csets taking part in the bisection
553 555 - ``pruned`` : csets that are goods, bads or skipped
554 556 - ``untested`` : csets whose fate is yet unknown
555 557 - ``ignored`` : csets ignored due to DAG topology
556 558 - ``current`` : the cset currently being bisected
557 559 """
558 560 # i18n: "bisect" is a keyword
559 561 status = getstring(x, _("bisect requires a string")).lower()
560 562 state = set(hbisect.get(repo, status))
561 563 return subset & state
562 564
563 565 # Backward-compatibility
564 566 # - no help entry so that we do not advertise it any more
565 567 def bisected(repo, subset, x):
566 568 return bisect(repo, subset, x)
567 569
568 570 def bookmark(repo, subset, x):
569 571 """``bookmark([name])``
570 572 The named bookmark or all bookmarks.
571 573
572 574 If `name` starts with `re:`, the remainder of the name is treated as
573 575 a regular expression. To match a bookmark that actually starts with `re:`,
574 576 use the prefix `literal:`.
575 577 """
576 578 # i18n: "bookmark" is a keyword
577 579 args = getargs(x, 0, 1, _('bookmark takes one or no arguments'))
578 580 if args:
579 581 bm = getstring(args[0],
580 582 # i18n: "bookmark" is a keyword
581 583 _('the argument to bookmark must be a string'))
582 584 kind, pattern, matcher = _stringmatcher(bm)
583 585 bms = set()
584 586 if kind == 'literal':
585 587 bmrev = repo._bookmarks.get(pattern, None)
586 588 if not bmrev:
587 589 raise error.RepoLookupError(_("bookmark '%s' does not exist")
588 590 % bm)
589 591 bms.add(repo[bmrev].rev())
590 592 else:
591 593 matchrevs = set()
592 594 for name, bmrev in repo._bookmarks.iteritems():
593 595 if matcher(name):
594 596 matchrevs.add(bmrev)
595 597 if not matchrevs:
596 598 raise error.RepoLookupError(_("no bookmarks exist"
597 599 " that match '%s'") % pattern)
598 600 for bmrev in matchrevs:
599 601 bms.add(repo[bmrev].rev())
600 602 else:
601 603 bms = set([repo[r].rev()
602 604 for r in repo._bookmarks.values()])
603 605 bms -= set([node.nullrev])
604 606 return subset & bms
605 607
606 608 def branch(repo, subset, x):
607 609 """``branch(string or set)``
608 610 All changesets belonging to the given branch or the branches of the given
609 611 changesets.
610 612
611 613 If `string` starts with `re:`, the remainder of the name is treated as
612 614 a regular expression. To match a branch that actually starts with `re:`,
613 615 use the prefix `literal:`.
614 616 """
615 617 getbi = repo.revbranchcache().branchinfo
616 618
617 619 try:
618 620 b = getstring(x, '')
619 621 except error.ParseError:
620 622 # not a string, but another revspec, e.g. tip()
621 623 pass
622 624 else:
623 625 kind, pattern, matcher = _stringmatcher(b)
624 626 if kind == 'literal':
625 627 # note: falls through to the revspec case if no branch with
626 628 # this name exists
627 629 if pattern in repo.branchmap():
628 630 return subset.filter(lambda r: matcher(getbi(r)[0]))
629 631 else:
630 632 return subset.filter(lambda r: matcher(getbi(r)[0]))
631 633
632 634 s = getset(repo, fullreposet(repo), x)
633 635 b = set()
634 636 for r in s:
635 637 b.add(getbi(r)[0])
636 638 c = s.__contains__
637 639 return subset.filter(lambda r: c(r) or getbi(r)[0] in b)
638 640
639 641 def bumped(repo, subset, x):
640 642 """``bumped()``
641 643 Mutable changesets marked as successors of public changesets.
642 644
643 645 Only non-public and non-obsolete changesets can be `bumped`.
644 646 """
645 647 # i18n: "bumped" is a keyword
646 648 getargs(x, 0, 0, _("bumped takes no arguments"))
647 649 bumped = obsmod.getrevs(repo, 'bumped')
648 650 return subset & bumped
649 651
650 652 def bundle(repo, subset, x):
651 653 """``bundle()``
652 654 Changesets in the bundle.
653 655
654 656 Bundle must be specified by the -R option."""
655 657
656 658 try:
657 659 bundlerevs = repo.changelog.bundlerevs
658 660 except AttributeError:
659 661 raise util.Abort(_("no bundle provided - specify with -R"))
660 662 return subset & bundlerevs
661 663
662 664 def checkstatus(repo, subset, pat, field):
663 665 hasset = matchmod.patkind(pat) == 'set'
664 666
665 667 mcache = [None]
666 668 def matches(x):
667 669 c = repo[x]
668 670 if not mcache[0] or hasset:
669 671 mcache[0] = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
670 672 m = mcache[0]
671 673 fname = None
672 674 if not m.anypats() and len(m.files()) == 1:
673 675 fname = m.files()[0]
674 676 if fname is not None:
675 677 if fname not in c.files():
676 678 return False
677 679 else:
678 680 for f in c.files():
679 681 if m(f):
680 682 break
681 683 else:
682 684 return False
683 685 files = repo.status(c.p1().node(), c.node())[field]
684 686 if fname is not None:
685 687 if fname in files:
686 688 return True
687 689 else:
688 690 for f in files:
689 691 if m(f):
690 692 return True
691 693
692 694 return subset.filter(matches)
693 695
694 696 def _children(repo, narrow, parentset):
695 697 if not parentset:
696 698 return baseset()
697 699 cs = set()
698 700 pr = repo.changelog.parentrevs
699 701 minrev = parentset.min()
700 702 for r in narrow:
701 703 if r <= minrev:
702 704 continue
703 705 for p in pr(r):
704 706 if p in parentset:
705 707 cs.add(r)
706 708 # XXX using a set to feed the baseset is wrong. Sets are not ordered.
707 709 # This does not break because of other fullreposet misbehavior.
708 710 return baseset(cs)
709 711
710 712 def children(repo, subset, x):
711 713 """``children(set)``
712 714 Child changesets of changesets in set.
713 715 """
714 716 s = getset(repo, fullreposet(repo), x)
715 717 cs = _children(repo, subset, s)
716 718 return subset & cs
717 719
718 720 def closed(repo, subset, x):
719 721 """``closed()``
720 722 Changeset is closed.
721 723 """
722 724 # i18n: "closed" is a keyword
723 725 getargs(x, 0, 0, _("closed takes no arguments"))
724 726 return subset.filter(lambda r: repo[r].closesbranch())
725 727
726 728 def contains(repo, subset, x):
727 729 """``contains(pattern)``
728 730 The revision's manifest contains a file matching pattern (but might not
729 731 modify it). See :hg:`help patterns` for information about file patterns.
730 732
731 733 The pattern without explicit kind like ``glob:`` is expected to be
732 734 relative to the current directory and match against a file exactly
733 735 for efficiency.
734 736 """
735 737 # i18n: "contains" is a keyword
736 738 pat = getstring(x, _("contains requires a pattern"))
737 739
738 740 def matches(x):
739 741 if not matchmod.patkind(pat):
740 742 pats = pathutil.canonpath(repo.root, repo.getcwd(), pat)
741 743 if pats in repo[x]:
742 744 return True
743 745 else:
744 746 c = repo[x]
745 747 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
746 748 for f in c.manifest():
747 749 if m(f):
748 750 return True
749 751 return False
750 752
751 753 return subset.filter(matches)
752 754
753 755 def converted(repo, subset, x):
754 756 """``converted([id])``
755 757 Changesets converted from the given identifier in the old repository if
756 758 present, or all converted changesets if no identifier is specified.
757 759 """
758 760
759 761 # There is exactly no chance of resolving the revision, so do a simple
760 762 # string compare and hope for the best
761 763
762 764 rev = None
763 765 # i18n: "converted" is a keyword
764 766 l = getargs(x, 0, 1, _('converted takes one or no arguments'))
765 767 if l:
766 768 # i18n: "converted" is a keyword
767 769 rev = getstring(l[0], _('converted requires a revision'))
768 770
769 771 def _matchvalue(r):
770 772 source = repo[r].extra().get('convert_revision', None)
771 773 return source is not None and (rev is None or source.startswith(rev))
772 774
773 775 return subset.filter(lambda r: _matchvalue(r))
774 776
775 777 def date(repo, subset, x):
776 778 """``date(interval)``
777 779 Changesets within the interval, see :hg:`help dates`.
778 780 """
779 781 # i18n: "date" is a keyword
780 782 ds = getstring(x, _("date requires a string"))
781 783 dm = util.matchdate(ds)
782 784 return subset.filter(lambda x: dm(repo[x].date()[0]))
783 785
784 786 def desc(repo, subset, x):
785 787 """``desc(string)``
786 788 Search commit message for string. The match is case-insensitive.
787 789 """
788 790 # i18n: "desc" is a keyword
789 791 ds = encoding.lower(getstring(x, _("desc requires a string")))
790 792
791 793 def matches(x):
792 794 c = repo[x]
793 795 return ds in encoding.lower(c.description())
794 796
795 797 return subset.filter(matches)
796 798
797 799 def _descendants(repo, subset, x, followfirst=False):
798 800 roots = getset(repo, fullreposet(repo), x)
799 801 if not roots:
800 802 return baseset()
801 803 s = _revdescendants(repo, roots, followfirst)
802 804
803 805 # Both sets need to be ascending in order to lazily return the union
804 806 # in the correct order.
805 807 base = subset & roots
806 808 desc = subset & s
807 809 result = base + desc
808 810 if subset.isascending():
809 811 result.sort()
810 812 elif subset.isdescending():
811 813 result.sort(reverse=True)
812 814 else:
813 815 result = subset & result
814 816 return result
815 817
816 818 def descendants(repo, subset, x):
817 819 """``descendants(set)``
818 820 Changesets which are descendants of changesets in set.
819 821 """
820 822 return _descendants(repo, subset, x)
821 823
822 824 def _firstdescendants(repo, subset, x):
823 825 # ``_firstdescendants(set)``
824 826 # Like ``descendants(set)`` but follows only the first parents.
825 827 return _descendants(repo, subset, x, followfirst=True)
826 828
827 829 def destination(repo, subset, x):
828 830 """``destination([set])``
829 831 Changesets that were created by a graft, transplant or rebase operation,
830 832 with the given revisions specified as the source. Omitting the optional set
831 833 is the same as passing all().
832 834 """
833 835 if x is not None:
834 836 sources = getset(repo, fullreposet(repo), x)
835 837 else:
836 838 sources = fullreposet(repo)
837 839
838 840 dests = set()
839 841
840 842 # subset contains all of the possible destinations that can be returned, so
841 843 # iterate over them and see if their source(s) were provided in the arg set.
842 844 # Even if the immediate src of r is not in the arg set, src's source (or
843 845 # further back) may be. Scanning back further than the immediate src allows
844 846 # transitive transplants and rebases to yield the same results as transitive
845 847 # grafts.
846 848 for r in subset:
847 849 src = _getrevsource(repo, r)
848 850 lineage = None
849 851
850 852 while src is not None:
851 853 if lineage is None:
852 854 lineage = list()
853 855
854 856 lineage.append(r)
855 857
856 858 # The visited lineage is a match if the current source is in the arg
857 859 # set. Since every candidate dest is visited by way of iterating
858 860 # subset, any dests further back in the lineage will be tested by a
859 861 # different iteration over subset. Likewise, if the src was already
860 862 # selected, the current lineage can be selected without going back
861 863 # further.
862 864 if src in sources or src in dests:
863 865 dests.update(lineage)
864 866 break
865 867
866 868 r = src
867 869 src = _getrevsource(repo, r)
868 870
869 871 return subset.filter(dests.__contains__)
870 872
871 873 def divergent(repo, subset, x):
872 874 """``divergent()``
873 875 Final successors of changesets with an alternative set of final successors.
874 876 """
875 877 # i18n: "divergent" is a keyword
876 878 getargs(x, 0, 0, _("divergent takes no arguments"))
877 879 divergent = obsmod.getrevs(repo, 'divergent')
878 880 return subset & divergent
879 881
880 882 def extinct(repo, subset, x):
881 883 """``extinct()``
882 884 Obsolete changesets with obsolete descendants only.
883 885 """
884 886 # i18n: "extinct" is a keyword
885 887 getargs(x, 0, 0, _("extinct takes no arguments"))
886 888 extincts = obsmod.getrevs(repo, 'extinct')
887 889 return subset & extincts
888 890
889 891 def extra(repo, subset, x):
890 892 """``extra(label, [value])``
891 893 Changesets with the given label in the extra metadata, with the given
892 894 optional value.
893 895
894 896 If `value` starts with `re:`, the remainder of the value is treated as
895 897 a regular expression. To match a value that actually starts with `re:`,
896 898 use the prefix `literal:`.
897 899 """
898 900 args = getargsdict(x, 'extra', 'label value')
899 901 if 'label' not in args:
900 902 # i18n: "extra" is a keyword
901 903 raise error.ParseError(_('extra takes at least 1 argument'))
902 904 # i18n: "extra" is a keyword
903 905 label = getstring(args['label'], _('first argument to extra must be '
904 906 'a string'))
905 907 value = None
906 908
907 909 if 'value' in args:
908 910 # i18n: "extra" is a keyword
909 911 value = getstring(args['value'], _('second argument to extra must be '
910 912 'a string'))
911 913 kind, value, matcher = _stringmatcher(value)
912 914
913 915 def _matchvalue(r):
914 916 extra = repo[r].extra()
915 917 return label in extra and (value is None or matcher(extra[label]))
916 918
917 919 return subset.filter(lambda r: _matchvalue(r))
918 920
919 921 def filelog(repo, subset, x):
920 922 """``filelog(pattern)``
921 923 Changesets connected to the specified filelog.
922 924
923 925 For performance reasons, visits only revisions mentioned in the file-level
924 926 filelog, rather than filtering through all changesets (much faster, but
925 927 doesn't include deletes or duplicate changes). For a slower, more accurate
926 928 result, use ``file()``.
927 929
928 930 The pattern without explicit kind like ``glob:`` is expected to be
929 931 relative to the current directory and match against a file exactly
930 932 for efficiency.
931 933
932 934 If some linkrev points to revisions filtered by the current repoview, we'll
933 935 work around it to return a non-filtered value.
934 936 """
935 937
936 938 # i18n: "filelog" is a keyword
937 939 pat = getstring(x, _("filelog requires a pattern"))
938 940 s = set()
939 941 cl = repo.changelog
940 942
941 943 if not matchmod.patkind(pat):
942 944 f = pathutil.canonpath(repo.root, repo.getcwd(), pat)
943 945 files = [f]
944 946 else:
945 947 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=repo[None])
946 948 files = (f for f in repo[None] if m(f))
947 949
948 950 for f in files:
949 951 backrevref = {} # final value for: filerev -> changerev
950 952 lowestchild = {} # lowest known filerev child of a filerev
951 953 delayed = [] # filerev with filtered linkrev, for post-processing
952 954 lowesthead = None # cache for manifest content of all head revisions
953 955 fl = repo.file(f)
954 956 for fr in list(fl):
955 957 rev = fl.linkrev(fr)
956 958 if rev not in cl:
957 959 # changerev pointed in linkrev is filtered
958 960 # record it for post processing.
959 961 delayed.append((fr, rev))
960 962 continue
961 963 for p in fl.parentrevs(fr):
962 964 if 0 <= p and p not in lowestchild:
963 965 lowestchild[p] = fr
964 966 backrevref[fr] = rev
965 967 s.add(rev)
966 968
967 969 # Post-processing of all filerevs we skipped because they were
968 970 # filtered. If such filerevs have known and unfiltered children, this
969 971 # means they have an unfiltered appearance out there. We'll use linkrev
970 972 # adjustment to find one of these appearances. The lowest known child
971 973 # will be used as a starting point because it is the best upper-bound we
972 974 # have.
973 975 #
974 976 # This approach will fail when an unfiltered but linkrev-shadowed
975 977 # appearance exists in a head changeset without unfiltered filerev
976 978 # children anywhere.
977 979 while delayed:
978 980 # must be a descending iteration. To slowly fill lowest child
979 981 # information that is of potential use by the next item.
980 982 fr, rev = delayed.pop()
981 983 lkr = rev
982 984
983 985 child = lowestchild.get(fr)
984 986
985 987 if child is None:
986 988 # search for existence of this file revision in a head revision.
987 989 # There are three possibilities:
988 990 # - the revision exists in a head and we can find an
989 991 # introduction from there,
990 992 # - the revision does not exist in a head because it has been
991 993 # changed since its introduction: we would have found a child
992 994 # and be in the other 'else' clause,
993 995 # - all versions of the revision are hidden.
994 996 if lowesthead is None:
995 997 lowesthead = {}
996 998 for h in repo.heads():
997 999 fnode = repo[h].manifest().get(f)
998 1000 if fnode is not None:
999 1001 lowesthead[fl.rev(fnode)] = h
1000 1002 headrev = lowesthead.get(fr)
1001 1003 if headrev is None:
1002 1004 # content is nowhere unfiltered
1003 1005 continue
1004 1006 rev = repo[headrev][f].introrev()
1005 1007 else:
1006 1008 # the lowest known child is a good upper bound
1007 1009 childcrev = backrevref[child]
1008 1010 # XXX this does not guarantee returning the lowest
1009 1011 # introduction of this revision, but this gives a
1010 1012 # result which is a good start and will fit in most
1011 1013 # cases. We probably need to fix the multiple
1012 1014 # introductions case properly (report each
1013 1015 # introduction, even for identical file revisions)
1014 1016 # once and for all at some point anyway.
1015 1017 for p in repo[childcrev][f].parents():
1016 1018 if p.filerev() == fr:
1017 1019 rev = p.rev()
1018 1020 break
1019 1021 if rev == lkr: # no shadowed entry found
1020 1022 # XXX This should never happen unless some manifest points
1021 1023 # to biggish file revisions (like a revision that uses a
1022 1024 # parent that never appears in the manifest ancestors)
1023 1025 continue
1024 1026
1025 1027 # Fill the data for the next iteration.
1026 1028 for p in fl.parentrevs(fr):
1027 1029 if 0 <= p and p not in lowestchild:
1028 1030 lowestchild[p] = fr
1029 1031 backrevref[fr] = rev
1030 1032 s.add(rev)
1031 1033
1032 1034 return subset & s
1033 1035
1034 1036 def first(repo, subset, x):
1035 1037 """``first(set, [n])``
1036 1038 An alias for limit().
1037 1039 """
1038 1040 return limit(repo, subset, x)
1039 1041
1040 1042 def _follow(repo, subset, x, name, followfirst=False):
1041 1043 l = getargs(x, 0, 1, _("%s takes no arguments or a filename") % name)
1042 1044 c = repo['.']
1043 1045 if l:
1044 1046 x = getstring(l[0], _("%s expected a filename") % name)
1045 1047 if x in c:
1046 1048 cx = c[x]
1047 1049 s = set(ctx.rev() for ctx in cx.ancestors(followfirst=followfirst))
1048 1050 # include the revision responsible for the most recent version
1049 1051 s.add(cx.introrev())
1050 1052 else:
1051 1053 return baseset()
1052 1054 else:
1053 1055 s = _revancestors(repo, baseset([c.rev()]), followfirst)
1054 1056
1055 1057 return subset & s
1056 1058
1057 1059 def follow(repo, subset, x):
1058 1060 """``follow([file])``
1059 1061 An alias for ``::.`` (ancestors of the working directory's first parent).
1060 1062 If a filename is specified, the history of the given file is followed,
1061 1063 including copies.
1062 1064 """
1063 1065 return _follow(repo, subset, x, 'follow')
1064 1066
1065 1067 def _followfirst(repo, subset, x):
1066 1068 # ``followfirst([file])``
1067 1069 # Like ``follow([file])`` but follows only the first parent of
1068 1070 # every revision or file revision.
1069 1071 return _follow(repo, subset, x, '_followfirst', followfirst=True)
1070 1072
1071 1073 def getall(repo, subset, x):
1072 1074 """``all()``
1073 1075 All changesets, the same as ``0:tip``.
1074 1076 """
1075 1077 # i18n: "all" is a keyword
1076 1078 getargs(x, 0, 0, _("all takes no arguments"))
1077 1079 return subset & spanset(repo) # drop "null" if any
1078 1080
1079 1081 def grep(repo, subset, x):
1080 1082 """``grep(regex)``
1081 1083 Like ``keyword(string)`` but accepts a regex. Use ``grep(r'...')``
1082 1084 to ensure special escape characters are handled correctly. Unlike
1083 1085 ``keyword(string)``, the match is case-sensitive.
1084 1086 """
1085 1087 try:
1086 1088 # i18n: "grep" is a keyword
1087 1089 gr = re.compile(getstring(x, _("grep requires a string")))
1088 1090 except re.error as e:
1089 1091 raise error.ParseError(_('invalid match pattern: %s') % e)
1090 1092
1091 1093 def matches(x):
1092 1094 c = repo[x]
1093 1095 for e in c.files() + [c.user(), c.description()]:
1094 1096 if gr.search(e):
1095 1097 return True
1096 1098 return False
1097 1099
1098 1100 return subset.filter(matches)
1099 1101
1100 1102 def _matchfiles(repo, subset, x):
1101 1103 # _matchfiles takes a revset list of prefixed arguments:
1102 1104 #
1103 1105 # [p:foo, i:bar, x:baz]
1104 1106 #
1105 1107 # builds a match object from them and filters subset. Allowed
1106 1108 # prefixes are 'p:' for regular patterns, 'i:' for include
1107 1109 # patterns and 'x:' for exclude patterns. Use 'r:' prefix to pass
1108 1110 # a revision identifier, or the empty string to reference the
1109 1111 # working directory, from which the match object is
1110 1112 # initialized. Use 'd:' to set the default matching mode, default
1111 1113 # to 'glob'. At most one 'r:' and 'd:' argument can be passed.
1112 1114
1113 1115 # i18n: "_matchfiles" is a keyword
1114 1116 l = getargs(x, 1, -1, _("_matchfiles requires at least one argument"))
1115 1117 pats, inc, exc = [], [], []
1116 1118 rev, default = None, None
1117 1119 for arg in l:
1118 1120 # i18n: "_matchfiles" is a keyword
1119 1121 s = getstring(arg, _("_matchfiles requires string arguments"))
1120 1122 prefix, value = s[:2], s[2:]
1121 1123 if prefix == 'p:':
1122 1124 pats.append(value)
1123 1125 elif prefix == 'i:':
1124 1126 inc.append(value)
1125 1127 elif prefix == 'x:':
1126 1128 exc.append(value)
1127 1129 elif prefix == 'r:':
1128 1130 if rev is not None:
1129 1131 # i18n: "_matchfiles" is a keyword
1130 1132 raise error.ParseError(_('_matchfiles expected at most one '
1131 1133 'revision'))
1132 1134 if value != '': # empty means working directory; leave rev as None
1133 1135 rev = value
1134 1136 elif prefix == 'd:':
1135 1137 if default is not None:
1136 1138 # i18n: "_matchfiles" is a keyword
1137 1139 raise error.ParseError(_('_matchfiles expected at most one '
1138 1140 'default mode'))
1139 1141 default = value
1140 1142 else:
1141 1143 # i18n: "_matchfiles" is a keyword
1142 1144 raise error.ParseError(_('invalid _matchfiles prefix: %s') % prefix)
1143 1145 if not default:
1144 1146 default = 'glob'
1145 1147
1146 1148 m = matchmod.match(repo.root, repo.getcwd(), pats, include=inc,
1147 1149 exclude=exc, ctx=repo[rev], default=default)
1148 1150
1149 1151 def matches(x):
1150 1152 for f in repo[x].files():
1151 1153 if m(f):
1152 1154 return True
1153 1155 return False
1154 1156
1155 1157 return subset.filter(matches)
1156 1158
1157 1159 def hasfile(repo, subset, x):
1158 1160 """``file(pattern)``
1159 1161 Changesets affecting files matched by pattern.
1160 1162
1161 1163 For a faster but less accurate result, consider using ``filelog()``
1162 1164 instead.
1163 1165
1164 1166 This predicate uses ``glob:`` as the default kind of pattern.
1165 1167 """
1166 1168 # i18n: "file" is a keyword
1167 1169 pat = getstring(x, _("file requires a pattern"))
1168 1170 return _matchfiles(repo, subset, ('string', 'p:' + pat))
1169 1171
1170 1172 def head(repo, subset, x):
1171 1173 """``head()``
1172 1174 Changeset is a named branch head.
1173 1175 """
1174 1176 # i18n: "head" is a keyword
1175 1177 getargs(x, 0, 0, _("head takes no arguments"))
1176 1178 hs = set()
1177 1179 cl = repo.changelog
1178 1180 for b, ls in repo.branchmap().iteritems():
1179 1181 hs.update(cl.rev(h) for h in ls)
1180 1182 # XXX using a set to feed the baseset is wrong. Sets are not ordered.
1181 1183 # This does not break because of other fullreposet misbehavior.
1182 1184 # XXX We should combine with subset first: 'subset & baseset(...)'. This is
1183 1185 # necessary to ensure we preserve the order in subset.
1184 1186 return baseset(hs) & subset
1185 1187
1186 1188 def heads(repo, subset, x):
1187 1189 """``heads(set)``
1188 1190 Members of set with no children in set.
1189 1191 """
1190 1192 s = getset(repo, subset, x)
1191 1193 ps = parents(repo, subset, x)
1192 1194 return s - ps
1193 1195
1194 1196 def hidden(repo, subset, x):
1195 1197 """``hidden()``
1196 1198 Hidden changesets.
1197 1199 """
1198 1200 # i18n: "hidden" is a keyword
1199 1201 getargs(x, 0, 0, _("hidden takes no arguments"))
1200 1202 hiddenrevs = repoview.filterrevs(repo, 'visible')
1201 1203 return subset & hiddenrevs
1202 1204
1203 1205 def keyword(repo, subset, x):
1204 1206 """``keyword(string)``
1205 1207 Search commit message, user name, and names of changed files for
1206 1208 string. The match is case-insensitive.
1207 1209 """
1208 1210 # i18n: "keyword" is a keyword
1209 1211 kw = encoding.lower(getstring(x, _("keyword requires a string")))
1210 1212
1211 1213 def matches(r):
1212 1214 c = repo[r]
1213 1215 return any(kw in encoding.lower(t)
1214 1216 for t in c.files() + [c.user(), c.description()])
1215 1217
1216 1218 return subset.filter(matches)
1217 1219
1218 1220 def limit(repo, subset, x):
1219 1221 """``limit(set, [n])``
1220 1222 First n members of set, defaulting to 1.
1221 1223 """
1222 1224 # i18n: "limit" is a keyword
1223 1225 l = getargs(x, 1, 2, _("limit requires one or two arguments"))
1224 1226 try:
1225 1227 lim = 1
1226 1228 if len(l) == 2:
1227 1229 # i18n: "limit" is a keyword
1228 1230 lim = int(getstring(l[1], _("limit requires a number")))
1229 1231 except (TypeError, ValueError):
1230 1232 # i18n: "limit" is a keyword
1231 1233 raise error.ParseError(_("limit expects a number"))
1232 1234 ss = subset
1233 1235 os = getset(repo, fullreposet(repo), l[0])
1234 1236 result = []
1235 1237 it = iter(os)
1236 1238 for x in xrange(lim):
1237 1239 y = next(it, None)
1238 1240 if y is None:
1239 1241 break
1240 1242 elif y in ss:
1241 1243 result.append(y)
1242 1244 return baseset(result)
1243 1245
1244 1246 def last(repo, subset, x):
1245 1247 """``last(set, [n])``
1246 1248 Last n members of set, defaulting to 1.
1247 1249 """
1248 1250 # i18n: "last" is a keyword
1249 1251 l = getargs(x, 1, 2, _("last requires one or two arguments"))
1250 1252 try:
1251 1253 lim = 1
1252 1254 if len(l) == 2:
1253 1255 # i18n: "last" is a keyword
1254 1256 lim = int(getstring(l[1], _("last requires a number")))
1255 1257 except (TypeError, ValueError):
1256 1258 # i18n: "last" is a keyword
1257 1259 raise error.ParseError(_("last expects a number"))
1258 1260 ss = subset
1259 1261 os = getset(repo, fullreposet(repo), l[0])
1260 1262 os.reverse()
1261 1263 result = []
1262 1264 it = iter(os)
1263 1265 for x in xrange(lim):
1264 1266 y = next(it, None)
1265 1267 if y is None:
1266 1268 break
1267 1269 elif y in ss:
1268 1270 result.append(y)
1269 1271 return baseset(result)
1270 1272
1271 1273 def maxrev(repo, subset, x):
1272 1274 """``max(set)``
1273 1275 Changeset with highest revision number in set.
1274 1276 """
1275 1277 os = getset(repo, fullreposet(repo), x)
1276 1278 if os:
1277 1279 m = os.max()
1278 1280 if m in subset:
1279 1281 return baseset([m])
1280 1282 return baseset()
1281 1283
1282 1284 def merge(repo, subset, x):
1283 1285 """``merge()``
1284 1286 Changeset is a merge changeset.
1285 1287 """
1286 1288 # i18n: "merge" is a keyword
1287 1289 getargs(x, 0, 0, _("merge takes no arguments"))
1288 1290 cl = repo.changelog
1289 1291 return subset.filter(lambda r: cl.parentrevs(r)[1] != -1)
1290 1292
1291 1293 def branchpoint(repo, subset, x):
1292 1294 """``branchpoint()``
1293 1295 Changesets with more than one child.
1294 1296 """
1295 1297 # i18n: "branchpoint" is a keyword
1296 1298 getargs(x, 0, 0, _("branchpoint takes no arguments"))
1297 1299 cl = repo.changelog
1298 1300 if not subset:
1299 1301 return baseset()
1300 1302 # XXX this should be 'parentset.min()' assuming 'parentset' is a smartset
1301 1303 # (and if it is not, it should.)
1302 1304 baserev = min(subset)
1303 1305 parentscount = [0]*(len(repo) - baserev)
1304 1306 for r in cl.revs(start=baserev + 1):
1305 1307 for p in cl.parentrevs(r):
1306 1308 if p >= baserev:
1307 1309 parentscount[p - baserev] += 1
1308 1310 return subset.filter(lambda r: parentscount[r - baserev] > 1)
1309 1311
1310 1312 def minrev(repo, subset, x):
1311 1313 """``min(set)``
1312 1314 Changeset with lowest revision number in set.
1313 1315 """
1314 1316 os = getset(repo, fullreposet(repo), x)
1315 1317 if os:
1316 1318 m = os.min()
1317 1319 if m in subset:
1318 1320 return baseset([m])
1319 1321 return baseset()
1320 1322
1321 1323 def modifies(repo, subset, x):
1322 1324 """``modifies(pattern)``
1323 1325 Changesets modifying files matched by pattern.
1324 1326
1325 1327 The pattern without explicit kind like ``glob:`` is expected to be
1326 1328 relative to the current directory and match against a file or a
1327 1329 directory.
1328 1330 """
1329 1331 # i18n: "modifies" is a keyword
1330 1332 pat = getstring(x, _("modifies requires a pattern"))
1331 1333 return checkstatus(repo, subset, pat, 0)
1332 1334
1333 1335 def named(repo, subset, x):
1334 1336 """``named(namespace)``
1335 1337 The changesets in a given namespace.
1336 1338
1337 1339 If `namespace` starts with `re:`, the remainder of the string is treated as
1338 1340 a regular expression. To match a namespace that actually starts with `re:`,
1339 1341 use the prefix `literal:`.
1340 1342 """
1341 1343 # i18n: "named" is a keyword
1342 1344 args = getargs(x, 1, 1, _('named requires a namespace argument'))
1343 1345
1344 1346 ns = getstring(args[0],
1345 1347 # i18n: "named" is a keyword
1346 1348 _('the argument to named must be a string'))
1347 1349 kind, pattern, matcher = _stringmatcher(ns)
1348 1350 namespaces = set()
1349 1351 if kind == 'literal':
1350 1352 if pattern not in repo.names:
1351 1353 raise error.RepoLookupError(_("namespace '%s' does not exist")
1352 1354 % ns)
1353 1355 namespaces.add(repo.names[pattern])
1354 1356 else:
1355 1357 for name, ns in repo.names.iteritems():
1356 1358 if matcher(name):
1357 1359 namespaces.add(ns)
1358 1360 if not namespaces:
1359 1361 raise error.RepoLookupError(_("no namespace exists"
1360 1362 " that match '%s'") % pattern)
1361 1363
1362 1364 names = set()
1363 1365 for ns in namespaces:
1364 1366 for name in ns.listnames(repo):
1365 1367 if name not in ns.deprecated:
1366 1368 names.update(repo[n].rev() for n in ns.nodes(repo, name))
1367 1369
1368 1370 names -= set([node.nullrev])
1369 1371 return subset & names
1370 1372
1371 1373 def node_(repo, subset, x):
1372 1374 """``id(string)``
1373 1375 Revision non-ambiguously specified by the given hex string prefix.
1374 1376 """
1375 1377 # i18n: "id" is a keyword
1376 1378 l = getargs(x, 1, 1, _("id requires one argument"))
1377 1379 # i18n: "id" is a keyword
1378 1380 n = getstring(l[0], _("id requires a string"))
1379 1381 if len(n) == 40:
1380 1382 try:
1381 1383 rn = repo.changelog.rev(node.bin(n))
1382 1384 except (LookupError, TypeError):
1383 1385 rn = None
1384 1386 else:
1385 1387 rn = None
1386 1388 pm = repo.changelog._partialmatch(n)
1387 1389 if pm is not None:
1388 1390 rn = repo.changelog.rev(pm)
1389 1391
1390 1392 if rn is None:
1391 1393 return baseset()
1392 1394 result = baseset([rn])
1393 1395 return result & subset
1394 1396
1395 1397 def obsolete(repo, subset, x):
1396 1398 """``obsolete()``
1397 1399 Mutable changeset with a newer version."""
1398 1400 # i18n: "obsolete" is a keyword
1399 1401 getargs(x, 0, 0, _("obsolete takes no arguments"))
1400 1402 obsoletes = obsmod.getrevs(repo, 'obsolete')
1401 1403 return subset & obsoletes
1402 1404
1403 1405 def only(repo, subset, x):
1404 1406 """``only(set, [set])``
1405 1407 Changesets that are ancestors of the first set that are not ancestors
1406 1408 of any other head in the repo. If a second set is specified, the result
1407 1409 is ancestors of the first set that are not ancestors of the second set
1408 1410 (i.e. ::<set1> - ::<set2>).
1409 1411 """
1410 1412 cl = repo.changelog
1411 1413 # i18n: "only" is a keyword
1412 1414 args = getargs(x, 1, 2, _('only takes one or two arguments'))
1413 1415 include = getset(repo, fullreposet(repo), args[0])
1414 1416 if len(args) == 1:
1415 1417 if not include:
1416 1418 return baseset()
1417 1419
1418 1420 descendants = set(_revdescendants(repo, include, False))
1419 1421 exclude = [rev for rev in cl.headrevs()
1420 1422 if not rev in descendants and not rev in include]
1421 1423 else:
1422 1424 exclude = getset(repo, fullreposet(repo), args[1])
1423 1425
1424 1426 results = set(cl.findmissingrevs(common=exclude, heads=include))
1425 1427 # XXX we should turn this into a baseset instead of a set, smartset may do
1426 1428 # some optimisations from the fact this is a baseset.
1427 1429 return subset & results
1428 1430
1429 1431 def origin(repo, subset, x):
1430 1432 """``origin([set])``
1431 1433 Changesets that were specified as a source for the grafts, transplants or
1432 1434 rebases that created the given revisions. Omitting the optional set is the
1433 1435 same as passing all(). If a changeset created by these operations is itself
1434 1436 specified as a source for one of these operations, only the source changeset
1435 1437 for the first operation is selected.
1436 1438 """
1437 1439 if x is not None:
1438 1440 dests = getset(repo, fullreposet(repo), x)
1439 1441 else:
1440 1442 dests = fullreposet(repo)
1441 1443
1442 1444 def _firstsrc(rev):
1443 1445 src = _getrevsource(repo, rev)
1444 1446 if src is None:
1445 1447 return None
1446 1448
1447 1449 while True:
1448 1450 prev = _getrevsource(repo, src)
1449 1451
1450 1452 if prev is None:
1451 1453 return src
1452 1454 src = prev
1453 1455
1454 1456 o = set([_firstsrc(r) for r in dests])
1455 1457 o -= set([None])
1456 1458 # XXX we should turn this into a baseset instead of a set, smartset may do
1457 1459 # some optimisations from the fact this is a baseset.
1458 1460 return subset & o
1459 1461
1460 1462 def outgoing(repo, subset, x):
1461 1463 """``outgoing([path])``
1462 1464 Changesets not found in the specified destination repository, or the
1463 1465 default push location.
1464 1466 """
1465 1467 # Avoid cycles.
1466 1468 from . import (
1467 1469 discovery,
1468 1470 hg,
1469 1471 )
1470 1472 # i18n: "outgoing" is a keyword
1471 1473 l = getargs(x, 0, 1, _("outgoing takes one or no arguments"))
1472 1474 # i18n: "outgoing" is a keyword
1473 1475 dest = l and getstring(l[0], _("outgoing requires a repository path")) or ''
1474 1476 dest = repo.ui.expandpath(dest or 'default-push', dest or 'default')
1475 1477 dest, branches = hg.parseurl(dest)
1476 1478 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1477 1479 if revs:
1478 1480 revs = [repo.lookup(rev) for rev in revs]
1479 1481 other = hg.peer(repo, {}, dest)
1480 1482 repo.ui.pushbuffer()
1481 1483 outgoing = discovery.findcommonoutgoing(repo, other, onlyheads=revs)
1482 1484 repo.ui.popbuffer()
1483 1485 cl = repo.changelog
1484 1486 o = set([cl.rev(r) for r in outgoing.missing])
1485 1487 return subset & o
1486 1488
1487 1489 def p1(repo, subset, x):
1488 1490 """``p1([set])``
1489 1491 First parent of changesets in set, or the working directory.
1490 1492 """
1491 1493 if x is None:
1492 1494 p = repo[x].p1().rev()
1493 1495 if p >= 0:
1494 1496 return subset & baseset([p])
1495 1497 return baseset()
1496 1498
1497 1499 ps = set()
1498 1500 cl = repo.changelog
1499 1501 for r in getset(repo, fullreposet(repo), x):
1500 1502 ps.add(cl.parentrevs(r)[0])
1501 1503 ps -= set([node.nullrev])
1502 1504 # XXX we should turn this into a baseset instead of a set, smartset may do
1503 1505 # some optimisations from the fact this is a baseset.
1504 1506 return subset & ps
1505 1507
1506 1508 def p2(repo, subset, x):
1507 1509 """``p2([set])``
1508 1510 Second parent of changesets in set, or the working directory.
1509 1511 """
1510 1512 if x is None:
1511 1513 ps = repo[x].parents()
1512 1514 try:
1513 1515 p = ps[1].rev()
1514 1516 if p >= 0:
1515 1517 return subset & baseset([p])
1516 1518 return baseset()
1517 1519 except IndexError:
1518 1520 return baseset()
1519 1521
1520 1522 ps = set()
1521 1523 cl = repo.changelog
1522 1524 for r in getset(repo, fullreposet(repo), x):
1523 1525 ps.add(cl.parentrevs(r)[1])
1524 1526 ps -= set([node.nullrev])
1525 1527 # XXX we should turn this into a baseset instead of a set, smartset may do
1526 1528 # some optimisations from the fact this is a baseset.
1527 1529 return subset & ps
1528 1530
1529 1531 def parents(repo, subset, x):
1530 1532 """``parents([set])``
1531 1533 The set of all parents for all changesets in set, or the working directory.
1532 1534 """
1533 1535 if x is None:
1534 1536 ps = set(p.rev() for p in repo[x].parents())
1535 1537 else:
1536 1538 ps = set()
1537 1539 cl = repo.changelog
1538 1540 up = ps.update
1539 1541 parentrevs = cl.parentrevs
1540 1542 for r in getset(repo, fullreposet(repo), x):
1541 1543 if r == node.wdirrev:
1542 1544 up(p.rev() for p in repo[r].parents())
1543 1545 else:
1544 1546 up(parentrevs(r))
1545 1547 ps -= set([node.nullrev])
1546 1548 return subset & ps
1547 1549
1548 1550 def _phase(repo, subset, target):
1549 1551 """helper to select all rev in phase <target>"""
1550 1552 repo._phasecache.loadphaserevs(repo) # ensure phase's sets are loaded
1551 1553 if repo._phasecache._phasesets:
1552 1554 s = repo._phasecache._phasesets[target] - repo.changelog.filteredrevs
1553 1555 s = baseset(s)
1554 1556 s.sort() # set are non ordered, so we enforce ascending
1555 1557 return subset & s
1556 1558 else:
1557 1559 phase = repo._phasecache.phase
1558 1560 condition = lambda r: phase(repo, r) == target
1559 1561 return subset.filter(condition, cache=False)
1560 1562
1561 1563 def draft(repo, subset, x):
1562 1564 """``draft()``
1563 1565 Changeset in draft phase."""
1564 1566 # i18n: "draft" is a keyword
1565 1567 getargs(x, 0, 0, _("draft takes no arguments"))
1566 1568 target = phases.draft
1567 1569 return _phase(repo, subset, target)
1568 1570
1569 1571 def secret(repo, subset, x):
1570 1572 """``secret()``
1571 1573 Changeset in secret phase."""
1572 1574 # i18n: "secret" is a keyword
1573 1575 getargs(x, 0, 0, _("secret takes no arguments"))
1574 1576 target = phases.secret
1575 1577 return _phase(repo, subset, target)
1576 1578
1577 1579 def parentspec(repo, subset, x, n):
1578 1580 """``set^0``
1579 1581 The set.
1580 1582 ``set^1`` (or ``set^``), ``set^2``
1581 1583 First or second parent, respectively, of all changesets in set.
1582 1584 """
1583 1585 try:
1584 1586 n = int(n[1])
1585 1587 if n not in (0, 1, 2):
1586 1588 raise ValueError
1587 1589 except (TypeError, ValueError):
1588 1590 raise error.ParseError(_("^ expects a number 0, 1, or 2"))
1589 1591 ps = set()
1590 1592 cl = repo.changelog
1591 1593 for r in getset(repo, fullreposet(repo), x):
1592 1594 if n == 0:
1593 1595 ps.add(r)
1594 1596 elif n == 1:
1595 1597 ps.add(cl.parentrevs(r)[0])
1596 1598 elif n == 2:
1597 1599 parents = cl.parentrevs(r)
1598 1600 if len(parents) > 1:
1599 1601 ps.add(parents[1])
1600 1602 return subset & ps
1601 1603
1602 1604 def present(repo, subset, x):
1603 1605 """``present(set)``
1604 1606 An empty set, if any revision in set isn't found; otherwise,
1605 1607 all revisions in set.
1606 1608
1607 1609 If any of specified revisions is not present in the local repository,
1608 1610 the query is normally aborted. But this predicate allows the query
1609 1611 to continue even in such cases.
1610 1612 """
1611 1613 try:
1612 1614 return getset(repo, subset, x)
1613 1615 except error.RepoLookupError:
1614 1616 return baseset()
1615 1617
1616 1618 # for internal use
1617 1619 def _notpublic(repo, subset, x):
1618 1620 getargs(x, 0, 0, "_notpublic takes no arguments")
1619 1621 repo._phasecache.loadphaserevs(repo) # ensure phase's sets are loaded
1620 1622 if repo._phasecache._phasesets:
1621 1623 s = set()
1622 1624 for u in repo._phasecache._phasesets[1:]:
1623 1625 s.update(u)
1624 1626 s = baseset(s - repo.changelog.filteredrevs)
1625 1627 s.sort()
1626 1628 return subset & s
1627 1629 else:
1628 1630 phase = repo._phasecache.phase
1629 1631 target = phases.public
1630 1632 condition = lambda r: phase(repo, r) != target
1631 1633 return subset.filter(condition, cache=False)
1632 1634
1633 1635 def public(repo, subset, x):
1634 1636 """``public()``
1635 1637 Changeset in public phase."""
1636 1638 # i18n: "public" is a keyword
1637 1639 getargs(x, 0, 0, _("public takes no arguments"))
1638 1640 phase = repo._phasecache.phase
1639 1641 target = phases.public
1640 1642 condition = lambda r: phase(repo, r) == target
1641 1643 return subset.filter(condition, cache=False)
1642 1644
1643 1645 def remote(repo, subset, x):
1644 1646 """``remote([id [,path]])``
1645 1647 Local revision that corresponds to the given identifier in a
1646 1648 remote repository, if present. Here, the '.' identifier is a
1647 1649 synonym for the current local branch.
1648 1650 """
1649 1651
1650 1652 from . import hg # avoid start-up nasties
1651 1653 # i18n: "remote" is a keyword
1652 1654 l = getargs(x, 0, 2, _("remote takes one, two or no arguments"))
1653 1655
1654 1656 q = '.'
1655 1657 if len(l) > 0:
1656 1658 # i18n: "remote" is a keyword
1657 1659 q = getstring(l[0], _("remote requires a string id"))
1658 1660 if q == '.':
1659 1661 q = repo['.'].branch()
1660 1662
1661 1663 dest = ''
1662 1664 if len(l) > 1:
1663 1665 # i18n: "remote" is a keyword
1664 1666 dest = getstring(l[1], _("remote requires a repository path"))
1665 1667 dest = repo.ui.expandpath(dest or 'default')
1666 1668 dest, branches = hg.parseurl(dest)
1667 1669 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1668 1670 if revs:
1669 1671 revs = [repo.lookup(rev) for rev in revs]
1670 1672 other = hg.peer(repo, {}, dest)
1671 1673 n = other.lookup(q)
1672 1674 if n in repo:
1673 1675 r = repo[n].rev()
1674 1676 if r in subset:
1675 1677 return baseset([r])
1676 1678 return baseset()
1677 1679
1678 1680 def removes(repo, subset, x):
1679 1681 """``removes(pattern)``
1680 1682 Changesets which remove files matching pattern.
1681 1683
1682 1684 The pattern without explicit kind like ``glob:`` is expected to be
1683 1685 relative to the current directory and match against a file or a
1684 1686 directory.
1685 1687 """
1686 1688 # i18n: "removes" is a keyword
1687 1689 pat = getstring(x, _("removes requires a pattern"))
1688 1690 return checkstatus(repo, subset, pat, 2)
1689 1691
1690 1692 def rev(repo, subset, x):
1691 1693 """``rev(number)``
1692 1694 Revision with the given numeric identifier.
1693 1695 """
1694 1696 # i18n: "rev" is a keyword
1695 1697 l = getargs(x, 1, 1, _("rev requires one argument"))
1696 1698 try:
1697 1699 # i18n: "rev" is a keyword
1698 1700 l = int(getstring(l[0], _("rev requires a number")))
1699 1701 except (TypeError, ValueError):
1700 1702 # i18n: "rev" is a keyword
1701 1703 raise error.ParseError(_("rev expects a number"))
1702 1704 if l not in repo.changelog and l != node.nullrev:
1703 1705 return baseset()
1704 1706 return subset & baseset([l])
1705 1707
1706 1708 def matching(repo, subset, x):
1707 1709 """``matching(revision [, field])``
1708 1710 Changesets in which a given set of fields match the set of fields in the
1709 1711 selected revision or set.
1710 1712
1711 1713 To match more than one field pass the list of fields to match separated
1712 1714 by spaces (e.g. ``author description``).
1713 1715
1714 1716 Valid fields are most regular revision fields and some special fields.
1715 1717
1716 1718 Regular revision fields are ``description``, ``author``, ``branch``,
1717 1719 ``date``, ``files``, ``phase``, ``parents``, ``substate``, ``user``
1718 1720 and ``diff``.
1719 1721 Note that ``author`` and ``user`` are synonyms. ``diff`` refers to the
1720 1722 contents of the revision. Two revisions matching their ``diff`` will
1721 1723 also match their ``files``.
1722 1724
1723 1725 Special fields are ``summary`` and ``metadata``:
1724 1726 ``summary`` matches the first line of the description.
1725 1727 ``metadata`` is equivalent to matching ``description user date``
1726 1728 (i.e. it matches the main metadata fields).
1727 1729
1728 1730 ``metadata`` is the default field which is used when no fields are
1729 1731 specified. You can match more than one field at a time.
1730 1732 """
1731 1733 # i18n: "matching" is a keyword
1732 1734 l = getargs(x, 1, 2, _("matching takes 1 or 2 arguments"))
1733 1735
1734 1736 revs = getset(repo, fullreposet(repo), l[0])
1735 1737
1736 1738 fieldlist = ['metadata']
1737 1739 if len(l) > 1:
1738 1740 fieldlist = getstring(l[1],
1739 1741 # i18n: "matching" is a keyword
1740 1742 _("matching requires a string "
1741 1743 "as its second argument")).split()
1742 1744
1743 1745 # Make sure that there are no repeated fields,
1744 1746 # expand the 'special' 'metadata' field type
1745 1747 # and check the 'files' whenever we check the 'diff'
1746 1748 fields = []
1747 1749 for field in fieldlist:
1748 1750 if field == 'metadata':
1749 1751 fields += ['user', 'description', 'date']
1750 1752 elif field == 'diff':
1751 1753 # a revision matching the diff must also match the files
1752 1754 # since matching the diff is very costly, make sure to
1753 1755 # also match the files first
1754 1756 fields += ['files', 'diff']
1755 1757 else:
1756 1758 if field == 'author':
1757 1759 field = 'user'
1758 1760 fields.append(field)
1759 1761 fields = set(fields)
1760 1762 if 'summary' in fields and 'description' in fields:
1761 1763 # If a revision matches its description it also matches its summary
1762 1764 fields.discard('summary')
1763 1765
1764 1766 # We may want to match more than one field
1765 1767 # Not all fields take the same amount of time to be matched
1766 1768 # Sort the selected fields in order of increasing matching cost
1767 1769 fieldorder = ['phase', 'parents', 'user', 'date', 'branch', 'summary',
1768 1770 'files', 'description', 'substate', 'diff']
1769 1771 def fieldkeyfunc(f):
1770 1772 try:
1771 1773 return fieldorder.index(f)
1772 1774 except ValueError:
1773 1775 # assume an unknown field is very costly
1774 1776 return len(fieldorder)
1775 1777 fields = list(fields)
1776 1778 fields.sort(key=fieldkeyfunc)
1777 1779
1778 1780 # Each field will be matched with its own "getfield" function
1779 1781 # which will be added to the getfieldfuncs array of functions
1780 1782 getfieldfuncs = []
1781 1783 _funcs = {
1782 1784 'user': lambda r: repo[r].user(),
1783 1785 'branch': lambda r: repo[r].branch(),
1784 1786 'date': lambda r: repo[r].date(),
1785 1787 'description': lambda r: repo[r].description(),
1786 1788 'files': lambda r: repo[r].files(),
1787 1789 'parents': lambda r: repo[r].parents(),
1788 1790 'phase': lambda r: repo[r].phase(),
1789 1791 'substate': lambda r: repo[r].substate,
1790 1792 'summary': lambda r: repo[r].description().splitlines()[0],
1791 1793 'diff': lambda r: list(repo[r].diff(git=True),)
1792 1794 }
1793 1795 for info in fields:
1794 1796 getfield = _funcs.get(info, None)
1795 1797 if getfield is None:
1796 1798 raise error.ParseError(
1797 1799 # i18n: "matching" is a keyword
1798 1800 _("unexpected field name passed to matching: %s") % info)
1799 1801 getfieldfuncs.append(getfield)
1800 1802 # convert the getfield array of functions into a "getinfo" function
1801 1803 # which returns an array of field values (or a single value if there
1802 1804 # is only one field to match)
1803 1805 getinfo = lambda r: [f(r) for f in getfieldfuncs]
1804 1806
1805 1807 def matches(x):
1806 1808 for rev in revs:
1807 1809 target = getinfo(rev)
1808 1810 match = True
1809 1811 for n, f in enumerate(getfieldfuncs):
1810 1812 if target[n] != f(x):
1811 1813 match = False
1812 1814 if match:
1813 1815 return True
1814 1816 return False
1815 1817
1816 1818 return subset.filter(matches)
1817 1819
1818 1820 def reverse(repo, subset, x):
1819 1821 """``reverse(set)``
1820 1822 Reverse order of set.
1821 1823 """
1822 1824 l = getset(repo, subset, x)
1823 1825 l.reverse()
1824 1826 return l
1825 1827
1826 1828 def roots(repo, subset, x):
1827 1829 """``roots(set)``
1828 1830 Changesets in set with no parent changeset in set.
1829 1831 """
1830 1832 s = getset(repo, fullreposet(repo), x)
1831 1833 parents = repo.changelog.parentrevs
1832 1834 def filter(r):
1833 1835 for p in parents(r):
1834 1836 if 0 <= p and p in s:
1835 1837 return False
1836 1838 return True
1837 1839 return subset & s.filter(filter)
1838 1840
1839 1841 def sort(repo, subset, x):
1840 1842 """``sort(set[, [-]key...])``
1841 1843 Sort set by keys. The default sort order is ascending, specify a key
1842 1844 as ``-key`` to sort in descending order.
1843 1845
1844 1846 The keys can be:
1845 1847
1846 1848 - ``rev`` for the revision number,
1847 1849 - ``branch`` for the branch name,
1848 1850 - ``desc`` for the commit message (description),
1849 1851 - ``user`` for user name (``author`` can be used as an alias),
1850 1852 - ``date`` for the commit date
1851 1853 """
1852 1854 # i18n: "sort" is a keyword
1853 1855 l = getargs(x, 1, 2, _("sort requires one or two arguments"))
1854 1856 keys = "rev"
1855 1857 if len(l) == 2:
1856 1858 # i18n: "sort" is a keyword
1857 1859 keys = getstring(l[1], _("sort spec must be a string"))
1858 1860
1859 1861 s = l[0]
1860 1862 keys = keys.split()
1861 1863 l = []
1862 1864 def invert(s):
1863 1865 return "".join(chr(255 - ord(c)) for c in s)
1864 1866 revs = getset(repo, subset, s)
1865 1867 if keys == ["rev"]:
1866 1868 revs.sort()
1867 1869 return revs
1868 1870 elif keys == ["-rev"]:
1869 1871 revs.sort(reverse=True)
1870 1872 return revs
1871 1873 for r in revs:
1872 1874 c = repo[r]
1873 1875 e = []
1874 1876 for k in keys:
1875 1877 if k == 'rev':
1876 1878 e.append(r)
1877 1879 elif k == '-rev':
1878 1880 e.append(-r)
1879 1881 elif k == 'branch':
1880 1882 e.append(c.branch())
1881 1883 elif k == '-branch':
1882 1884 e.append(invert(c.branch()))
1883 1885 elif k == 'desc':
1884 1886 e.append(c.description())
1885 1887 elif k == '-desc':
1886 1888 e.append(invert(c.description()))
1887 1889 elif k in 'user author':
1888 1890 e.append(c.user())
1889 1891 elif k in '-user -author':
1890 1892 e.append(invert(c.user()))
1891 1893 elif k == 'date':
1892 1894 e.append(c.date()[0])
1893 1895 elif k == '-date':
1894 1896 e.append(-c.date()[0])
1895 1897 else:
1896 1898 raise error.ParseError(_("unknown sort key %r") % k)
1897 1899 e.append(r)
1898 1900 l.append(e)
1899 1901 l.sort()
1900 1902 return baseset([e[-1] for e in l])
1901 1903
1902 1904 def subrepo(repo, subset, x):
1903 1905 """``subrepo([pattern])``
1904 1906 Changesets that add, modify or remove the given subrepo. If no subrepo
1905 1907 pattern is named, any subrepo changes are returned.
1906 1908 """
1907 1909 # i18n: "subrepo" is a keyword
1908 1910 args = getargs(x, 0, 1, _('subrepo takes at most one argument'))
1909 1911 if len(args) != 0:
1910 1912 pat = getstring(args[0], _("subrepo requires a pattern"))
1911 1913
1912 1914 m = matchmod.exact(repo.root, repo.root, ['.hgsubstate'])
1913 1915
1914 1916 def submatches(names):
1915 1917 k, p, m = _stringmatcher(pat)
1916 1918 for name in names:
1917 1919 if m(name):
1918 1920 yield name
1919 1921
1920 1922 def matches(x):
1921 1923 c = repo[x]
1922 1924 s = repo.status(c.p1().node(), c.node(), match=m)
1923 1925
1924 1926 if len(args) == 0:
1925 1927 return s.added or s.modified or s.removed
1926 1928
1927 1929 if s.added:
1928 1930 return any(submatches(c.substate.keys()))
1929 1931
1930 1932 if s.modified:
1931 1933 subs = set(c.p1().substate.keys())
1932 1934 subs.update(c.substate.keys())
1933 1935
1934 1936 for path in submatches(subs):
1935 1937 if c.p1().substate.get(path) != c.substate.get(path):
1936 1938 return True
1937 1939
1938 1940 if s.removed:
1939 1941 return any(submatches(c.p1().substate.keys()))
1940 1942
1941 1943 return False
1942 1944
1943 1945 return subset.filter(matches)
1944 1946
1945 1947 def _stringmatcher(pattern):
1946 1948 """
1947 1949 accepts a string, possibly starting with 're:' or 'literal:' prefix.
1948 1950 returns the matcher name, pattern, and matcher function.
1949 1951 missing or unknown prefixes are treated as literal matches.
1950 1952
1951 1953 helper for tests:
1952 1954 >>> def test(pattern, *tests):
1953 1955 ... kind, pattern, matcher = _stringmatcher(pattern)
1954 1956 ... return (kind, pattern, [bool(matcher(t)) for t in tests])
1955 1957
1956 1958 exact matching (no prefix):
1957 1959 >>> test('abcdefg', 'abc', 'def', 'abcdefg')
1958 1960 ('literal', 'abcdefg', [False, False, True])
1959 1961
1960 1962 regex matching ('re:' prefix)
1961 1963 >>> test('re:a.+b', 'nomatch', 'fooadef', 'fooadefbar')
1962 1964 ('re', 'a.+b', [False, False, True])
1963 1965
1964 1966 force exact matches ('literal:' prefix)
1965 1967 >>> test('literal:re:foobar', 'foobar', 're:foobar')
1966 1968 ('literal', 're:foobar', [False, True])
1967 1969
1968 1970 unknown prefixes are ignored and treated as literals
1969 1971 >>> test('foo:bar', 'foo', 'bar', 'foo:bar')
1970 1972 ('literal', 'foo:bar', [False, False, True])
1971 1973 """
1972 1974 if pattern.startswith('re:'):
1973 1975 pattern = pattern[3:]
1974 1976 try:
1975 1977 regex = re.compile(pattern)
1976 1978 except re.error as e:
1977 1979 raise error.ParseError(_('invalid regular expression: %s')
1978 1980 % e)
1979 1981 return 're', pattern, regex.search
1980 1982 elif pattern.startswith('literal:'):
1981 1983 pattern = pattern[8:]
1982 1984 return 'literal', pattern, pattern.__eq__
1983 1985
1984 1986 def _substringmatcher(pattern):
1985 1987 kind, pattern, matcher = _stringmatcher(pattern)
1986 1988 if kind == 'literal':
1987 1989 matcher = lambda s: pattern in s
1988 1990 return kind, pattern, matcher
1989 1991
1990 1992 def tag(repo, subset, x):
1991 1993 """``tag([name])``
1992 1994 The specified tag by name, or all tagged revisions if no name is given.
1993 1995
1994 1996 If `name` starts with `re:`, the remainder of the name is treated as
1995 1997 a regular expression. To match a tag that actually starts with `re:`,
1996 1998 use the prefix `literal:`.
1997 1999 """
1998 2000 # i18n: "tag" is a keyword
1999 2001 args = getargs(x, 0, 1, _("tag takes one or no arguments"))
2000 2002 cl = repo.changelog
2001 2003 if args:
2002 2004 pattern = getstring(args[0],
2003 2005 # i18n: "tag" is a keyword
2004 2006 _('the argument to tag must be a string'))
2005 2007 kind, pattern, matcher = _stringmatcher(pattern)
2006 2008 if kind == 'literal':
2007 2009 # avoid resolving all tags
2008 2010 tn = repo._tagscache.tags.get(pattern, None)
2009 2011 if tn is None:
2010 2012 raise error.RepoLookupError(_("tag '%s' does not exist")
2011 2013 % pattern)
2012 2014 s = set([repo[tn].rev()])
2013 2015 else:
2014 2016 s = set([cl.rev(n) for t, n in repo.tagslist() if matcher(t)])
2015 2017 else:
2016 2018 s = set([cl.rev(n) for t, n in repo.tagslist() if t != 'tip'])
2017 2019 return subset & s
2018 2020
2019 2021 def tagged(repo, subset, x):
2020 2022 return tag(repo, subset, x)
2021 2023
2022 2024 def unstable(repo, subset, x):
2023 2025 """``unstable()``
2024 2026 Non-obsolete changesets with obsolete ancestors.
2025 2027 """
2026 2028 # i18n: "unstable" is a keyword
2027 2029 getargs(x, 0, 0, _("unstable takes no arguments"))
2028 2030 unstables = obsmod.getrevs(repo, 'unstable')
2029 2031 return subset & unstables
2030 2032
2031 2033
2032 2034 def user(repo, subset, x):
2033 2035 """``user(string)``
2034 2036 User name contains string. The match is case-insensitive.
2035 2037
2036 2038 If `string` starts with `re:`, the remainder of the string is treated as
2037 2039 a regular expression. To match a user that actually contains `re:`, use
2038 2040 the prefix `literal:`.
2039 2041 """
2040 2042 return author(repo, subset, x)
2041 2043
2042 2044 # experimental
2043 2045 def wdir(repo, subset, x):
2044 2046 # i18n: "wdir" is a keyword
2045 2047 getargs(x, 0, 0, _("wdir takes no arguments"))
2046 2048 if node.wdirrev in subset or isinstance(subset, fullreposet):
2047 2049 return baseset([node.wdirrev])
2048 2050 return baseset()
2049 2051
2050 2052 # for internal use
2051 2053 def _list(repo, subset, x):
2052 2054 s = getstring(x, "internal error")
2053 2055 if not s:
2054 2056 return baseset()
2055 2057 # remove duplicates here. it's difficult for caller to deduplicate sets
2056 2058 # because different symbols can point to the same rev.
2057 2059 cl = repo.changelog
2058 2060 ls = []
2059 2061 seen = set()
2060 2062 for t in s.split('\0'):
2061 2063 try:
2062 2064 # fast path for integer revision
2063 2065 r = int(t)
2064 2066 if str(r) != t or r not in cl:
2065 2067 raise ValueError
2066 2068 except ValueError:
2067 2069 r = repo[t].rev()
2068 2070 if r in seen:
2069 2071 continue
2070 2072 if (r in subset
2071 2073 or r == node.nullrev and isinstance(subset, fullreposet)):
2072 2074 ls.append(r)
2073 2075 seen.add(r)
2074 2076 return baseset(ls)
2075 2077
2076 2078 # for internal use
2077 2079 def _intlist(repo, subset, x):
2078 2080 s = getstring(x, "internal error")
2079 2081 if not s:
2080 2082 return baseset()
2081 2083 ls = [int(r) for r in s.split('\0')]
2082 2084 s = subset
2083 2085 return baseset([r for r in ls if r in s])
2084 2086
2085 2087 # for internal use
2086 2088 def _hexlist(repo, subset, x):
2087 2089 s = getstring(x, "internal error")
2088 2090 if not s:
2089 2091 return baseset()
2090 2092 cl = repo.changelog
2091 2093 ls = [cl.rev(node.bin(r)) for r in s.split('\0')]
2092 2094 s = subset
2093 2095 return baseset([r for r in ls if r in s])
2094 2096
2095 2097 symbols = {
2096 2098 "adds": adds,
2097 2099 "all": getall,
2098 2100 "ancestor": ancestor,
2099 2101 "ancestors": ancestors,
2100 2102 "_firstancestors": _firstancestors,
2101 2103 "author": author,
2102 2104 "bisect": bisect,
2103 2105 "bisected": bisected,
2104 2106 "bookmark": bookmark,
2105 2107 "branch": branch,
2106 2108 "branchpoint": branchpoint,
2107 2109 "bumped": bumped,
2108 2110 "bundle": bundle,
2109 2111 "children": children,
2110 2112 "closed": closed,
2111 2113 "contains": contains,
2112 2114 "converted": converted,
2113 2115 "date": date,
2114 2116 "desc": desc,
2115 2117 "descendants": descendants,
2116 2118 "_firstdescendants": _firstdescendants,
2117 2119 "destination": destination,
2118 2120 "divergent": divergent,
2119 2121 "draft": draft,
2120 2122 "extinct": extinct,
2121 2123 "extra": extra,
2122 2124 "file": hasfile,
2123 2125 "filelog": filelog,
2124 2126 "first": first,
2125 2127 "follow": follow,
2126 2128 "_followfirst": _followfirst,
2127 2129 "grep": grep,
2128 2130 "head": head,
2129 2131 "heads": heads,
2130 2132 "hidden": hidden,
2131 2133 "id": node_,
2132 2134 "keyword": keyword,
2133 2135 "last": last,
2134 2136 "limit": limit,
2135 2137 "_matchfiles": _matchfiles,
2136 2138 "max": maxrev,
2137 2139 "merge": merge,
2138 2140 "min": minrev,
2139 2141 "modifies": modifies,
2140 2142 "named": named,
2141 2143 "obsolete": obsolete,
2142 2144 "only": only,
2143 2145 "origin": origin,
2144 2146 "outgoing": outgoing,
2145 2147 "p1": p1,
2146 2148 "p2": p2,
2147 2149 "parents": parents,
2148 2150 "present": present,
2149 2151 "public": public,
2150 2152 "_notpublic": _notpublic,
2151 2153 "remote": remote,
2152 2154 "removes": removes,
2153 2155 "rev": rev,
2154 2156 "reverse": reverse,
2155 2157 "roots": roots,
2156 2158 "sort": sort,
2157 2159 "secret": secret,
2158 2160 "subrepo": subrepo,
2159 2161 "matching": matching,
2160 2162 "tag": tag,
2161 2163 "tagged": tagged,
2162 2164 "user": user,
2163 2165 "unstable": unstable,
2164 2166 "wdir": wdir,
2165 2167 "_list": _list,
2166 2168 "_intlist": _intlist,
2167 2169 "_hexlist": _hexlist,
2168 2170 }
2169 2171
2170 2172 # symbols which can't be used for a DoS attack for any given input
2171 2173 # (e.g. those which accept regexes as plain strings shouldn't be included)
2172 2174 # functions that just return a lot of changesets (like all) don't count here
2173 2175 safesymbols = set([
2174 2176 "adds",
2175 2177 "all",
2176 2178 "ancestor",
2177 2179 "ancestors",
2178 2180 "_firstancestors",
2179 2181 "author",
2180 2182 "bisect",
2181 2183 "bisected",
2182 2184 "bookmark",
2183 2185 "branch",
2184 2186 "branchpoint",
2185 2187 "bumped",
2186 2188 "bundle",
2187 2189 "children",
2188 2190 "closed",
2189 2191 "converted",
2190 2192 "date",
2191 2193 "desc",
2192 2194 "descendants",
2193 2195 "_firstdescendants",
2194 2196 "destination",
2195 2197 "divergent",
2196 2198 "draft",
2197 2199 "extinct",
2198 2200 "extra",
2199 2201 "file",
2200 2202 "filelog",
2201 2203 "first",
2202 2204 "follow",
2203 2205 "_followfirst",
2204 2206 "head",
2205 2207 "heads",
2206 2208 "hidden",
2207 2209 "id",
2208 2210 "keyword",
2209 2211 "last",
2210 2212 "limit",
2211 2213 "_matchfiles",
2212 2214 "max",
2213 2215 "merge",
2214 2216 "min",
2215 2217 "modifies",
2216 2218 "obsolete",
2217 2219 "only",
2218 2220 "origin",
2219 2221 "outgoing",
2220 2222 "p1",
2221 2223 "p2",
2222 2224 "parents",
2223 2225 "present",
2224 2226 "public",
2225 2227 "_notpublic",
2226 2228 "remote",
2227 2229 "removes",
2228 2230 "rev",
2229 2231 "reverse",
2230 2232 "roots",
2231 2233 "sort",
2232 2234 "secret",
2233 2235 "matching",
2234 2236 "tag",
2235 2237 "tagged",
2236 2238 "user",
2237 2239 "unstable",
2238 2240 "wdir",
2239 2241 "_list",
2240 2242 "_intlist",
2241 2243 "_hexlist",
2242 2244 ])
2243 2245
2244 2246 methods = {
2245 2247 "range": rangeset,
2246 2248 "dagrange": dagrange,
2247 2249 "string": stringset,
2248 2250 "symbol": stringset,
2249 2251 "and": andset,
2250 2252 "or": orset,
2251 2253 "not": notset,
2252 2254 "list": listset,
2253 2255 "keyvalue": keyvaluepair,
2254 2256 "func": func,
2255 2257 "ancestor": ancestorspec,
2256 2258 "parent": parentspec,
2257 2259 "parentpost": p1,
2258 2260 }
2259 2261
2260 2262 def optimize(x, small):
2261 2263 if x is None:
2262 2264 return 0, x
2263 2265
2264 2266 smallbonus = 1
2265 2267 if small:
2266 2268 smallbonus = .5
2267 2269
2268 2270 op = x[0]
2269 2271 if op == 'minus':
2270 2272 return optimize(('and', x[1], ('not', x[2])), small)
2271 2273 elif op == 'only':
2272 2274 return optimize(('func', ('symbol', 'only'),
2273 2275 ('list', x[1], x[2])), small)
2274 2276 elif op == 'onlypost':
2275 2277 return optimize(('func', ('symbol', 'only'), x[1]), small)
2276 2278 elif op == 'dagrangepre':
2277 2279 return optimize(('func', ('symbol', 'ancestors'), x[1]), small)
2278 2280 elif op == 'dagrangepost':
2279 2281 return optimize(('func', ('symbol', 'descendants'), x[1]), small)
2280 2282 elif op == 'rangeall':
2281 2283 return optimize(('range', ('string', '0'), ('string', 'tip')), small)
2282 2284 elif op == 'rangepre':
2283 2285 return optimize(('range', ('string', '0'), x[1]), small)
2284 2286 elif op == 'rangepost':
2285 2287 return optimize(('range', x[1], ('string', 'tip')), small)
2286 2288 elif op == 'negate':
2287 2289 return optimize(('string',
2288 2290 '-' + getstring(x[1], _("can't negate that"))), small)
2289 2291 elif op in 'string symbol negate':
2290 2292 return smallbonus, x # single revisions are small
2291 2293 elif op == 'and':
2292 2294 wa, ta = optimize(x[1], True)
2293 2295 wb, tb = optimize(x[2], True)
2294 2296
2295 2297 # (::x and not ::y)/(not ::y and ::x) have a fast path
2296 2298 def isonly(revs, bases):
2297 2299 return (
2298 2300 revs is not None
2299 2301 and revs[0] == 'func'
2300 2302 and getstring(revs[1], _('not a symbol')) == 'ancestors'
2301 2303 and bases is not None
2302 2304 and bases[0] == 'not'
2303 2305 and bases[1][0] == 'func'
2304 2306 and getstring(bases[1][1], _('not a symbol')) == 'ancestors')
2305 2307
2306 2308 w = min(wa, wb)
2307 2309 if isonly(ta, tb):
2308 2310 return w, ('func', ('symbol', 'only'), ('list', ta[2], tb[1][2]))
2309 2311 if isonly(tb, ta):
2310 2312 return w, ('func', ('symbol', 'only'), ('list', tb[2], ta[1][2]))
2311 2313
2312 2314 if wa > wb:
2313 2315 return w, (op, tb, ta)
2314 2316 return w, (op, ta, tb)
2315 2317 elif op == 'or':
2316 2318 # fast path for machine-generated expression, that is likely to have
2317 2319 # lots of trivial revisions: 'a + b + c()' to '_list(a b) + c()'
2318 2320 ws, ts, ss = [], [], []
2319 2321 def flushss():
2320 2322 if not ss:
2321 2323 return
2322 2324 if len(ss) == 1:
2323 2325 w, t = ss[0]
2324 2326 else:
2325 2327 s = '\0'.join(t[1] for w, t in ss)
2326 2328 y = ('func', ('symbol', '_list'), ('string', s))
2327 2329 w, t = optimize(y, False)
2328 2330 ws.append(w)
2329 2331 ts.append(t)
2330 2332 del ss[:]
2331 2333 for y in x[1:]:
2332 2334 w, t = optimize(y, False)
2333 2335 if t is not None and (t[0] == 'string' or t[0] == 'symbol'):
2334 2336 ss.append((w, t))
2335 2337 continue
2336 2338 flushss()
2337 2339 ws.append(w)
2338 2340 ts.append(t)
2339 2341 flushss()
2340 2342 if len(ts) == 1:
2341 2343 return ws[0], ts[0] # 'or' operation is fully optimized out
2342 2344 # we can't reorder trees by weight because it would change the order.
2343 2345 # ("sort(a + b)" == "sort(b + a)", but "a + b" != "b + a")
2344 2346 # ts = tuple(t for w, t in sorted(zip(ws, ts), key=lambda wt: wt[0]))
2345 2347 return max(ws), (op,) + tuple(ts)
2346 2348 elif op == 'not':
2347 2349 # Optimize not public() to _notpublic() because we have a fast version
2348 2350 if x[1] == ('func', ('symbol', 'public'), None):
2349 2351 newsym = ('func', ('symbol', '_notpublic'), None)
2350 2352 o = optimize(newsym, not small)
2351 2353 return o[0], o[1]
2352 2354 else:
2353 2355 o = optimize(x[1], not small)
2354 2356 return o[0], (op, o[1])
2355 2357 elif op == 'parentpost':
2356 2358 o = optimize(x[1], small)
2357 2359 return o[0], (op, o[1])
2358 2360 elif op == 'group':
2359 2361 return optimize(x[1], small)
2360 2362 elif op in 'dagrange range list parent ancestorspec':
2361 2363 if op == 'parent':
2362 2364 # x^:y means (x^) : y, not x ^ (:y)
2363 2365 post = ('parentpost', x[1])
2364 2366 if x[2][0] == 'dagrangepre':
2365 2367 return optimize(('dagrange', post, x[2][1]), small)
2366 2368 elif x[2][0] == 'rangepre':
2367 2369 return optimize(('range', post, x[2][1]), small)
2368 2370
2369 2371 wa, ta = optimize(x[1], small)
2370 2372 wb, tb = optimize(x[2], small)
2371 2373 return wa + wb, (op, ta, tb)
2372 2374 elif op == 'func':
2373 2375 f = getstring(x[1], _("not a symbol"))
2374 2376 wa, ta = optimize(x[2], small)
2375 2377 if f in ("author branch closed date desc file grep keyword "
2376 2378 "outgoing user"):
2377 2379 w = 10 # slow
2378 2380 elif f in "modifies adds removes":
2379 2381 w = 30 # slower
2380 2382 elif f == "contains":
2381 2383 w = 100 # very slow
2382 2384 elif f == "ancestor":
2383 2385 w = 1 * smallbonus
2384 2386 elif f in "reverse limit first _intlist":
2385 2387 w = 0
2386 2388 elif f in "sort":
2387 2389 w = 10 # assume most sorts look at changelog
2388 2390 else:
2389 2391 w = 1
2390 2392 return w + wa, (op, x[1], ta)
2391 2393 return 1, x
2392 2394
2393 2395 _aliasarg = ('func', ('symbol', '_aliasarg'))
2394 2396 def _getaliasarg(tree):
2395 2397 """If tree matches ('func', ('symbol', '_aliasarg'), ('string', X))
2396 2398 return X, None otherwise.
2397 2399 """
2398 2400 if (len(tree) == 3 and tree[:2] == _aliasarg
2399 2401 and tree[2][0] == 'string'):
2400 2402 return tree[2][1]
2401 2403 return None
2402 2404
2403 2405 def _checkaliasarg(tree, known=None):
2404 2406 """Check tree contains no _aliasarg construct or only ones which
2405 2407 value is in known. Used to avoid alias placeholders injection.
2406 2408 """
2407 2409 if isinstance(tree, tuple):
2408 2410 arg = _getaliasarg(tree)
2409 2411 if arg is not None and (not known or arg not in known):
2410 2412 raise error.UnknownIdentifier('_aliasarg', [])
2411 2413 for t in tree:
2412 2414 _checkaliasarg(t, known)
2413 2415
2414 2416 # the set of valid characters for the initial letter of symbols in
2415 2417 # alias declarations and definitions
2416 2418 _aliassyminitletters = set(c for c in [chr(i) for i in xrange(256)]
2417 2419 if c.isalnum() or c in '._@$' or ord(c) > 127)
2418 2420
2419 2421 def _tokenizealias(program, lookup=None):
2420 2422 """Parse alias declaration/definition into a stream of tokens
2421 2423
2422 2424 This allows symbol names to use also ``$`` as an initial letter
2423 2425 (for backward compatibility), and callers of this function should
2424 2426 examine whether ``$`` is used also for unexpected symbols or not.
2425 2427 """
2426 2428 return tokenize(program, lookup=lookup,
2427 2429 syminitletters=_aliassyminitletters)
2428 2430
2429 2431 def _parsealiasdecl(decl):
2430 2432 """Parse alias declaration ``decl``
2431 2433
2432 2434 This returns ``(name, tree, args, errorstr)`` tuple:
2433 2435
2434 2436 - ``name``: of declared alias (may be ``decl`` itself at error)
2435 2437 - ``tree``: parse result (or ``None`` at error)
2436 2438 - ``args``: list of alias argument names (or None for symbol declaration)
2437 2439 - ``errorstr``: detail about detected error (or None)
2438 2440
2439 2441 >>> _parsealiasdecl('foo')
2440 2442 ('foo', ('symbol', 'foo'), None, None)
2441 2443 >>> _parsealiasdecl('$foo')
2442 2444 ('$foo', None, None, "'$' not for alias arguments")
2443 2445 >>> _parsealiasdecl('foo::bar')
2444 2446 ('foo::bar', None, None, 'invalid format')
2445 2447 >>> _parsealiasdecl('foo bar')
2446 2448 ('foo bar', None, None, 'at 4: invalid token')
2447 2449 >>> _parsealiasdecl('foo()')
2448 2450 ('foo', ('func', ('symbol', 'foo')), [], None)
2449 2451 >>> _parsealiasdecl('$foo()')
2450 2452 ('$foo()', None, None, "'$' not for alias arguments")
2451 2453 >>> _parsealiasdecl('foo($1, $2)')
2452 2454 ('foo', ('func', ('symbol', 'foo')), ['$1', '$2'], None)
2453 2455 >>> _parsealiasdecl('foo(bar_bar, baz.baz)')
2454 2456 ('foo', ('func', ('symbol', 'foo')), ['bar_bar', 'baz.baz'], None)
2455 2457 >>> _parsealiasdecl('foo($1, $2, nested($1, $2))')
2456 2458 ('foo($1, $2, nested($1, $2))', None, None, 'invalid argument list')
2457 2459 >>> _parsealiasdecl('foo(bar($1, $2))')
2458 2460 ('foo(bar($1, $2))', None, None, 'invalid argument list')
2459 2461 >>> _parsealiasdecl('foo("string")')
2460 2462 ('foo("string")', None, None, 'invalid argument list')
2461 2463 >>> _parsealiasdecl('foo($1, $2')
2462 2464 ('foo($1, $2', None, None, 'at 10: unexpected token: end')
2463 2465 >>> _parsealiasdecl('foo("string')
2464 2466 ('foo("string', None, None, 'at 5: unterminated string')
2465 2467 >>> _parsealiasdecl('foo($1, $2, $1)')
2466 2468 ('foo', None, None, 'argument names collide with each other')
2467 2469 """
2468 2470 p = parser.parser(elements)
2469 2471 try:
2470 2472 tree, pos = p.parse(_tokenizealias(decl))
2471 2473 if (pos != len(decl)):
2472 2474 raise error.ParseError(_('invalid token'), pos)
2473 2475
2474 2476 if isvalidsymbol(tree):
2475 2477 # "name = ...." style
2476 2478 name = getsymbol(tree)
2477 2479 if name.startswith('$'):
2478 2480 return (decl, None, None, _("'$' not for alias arguments"))
2479 2481 return (name, ('symbol', name), None, None)
2480 2482
2481 2483 if isvalidfunc(tree):
2482 2484 # "name(arg, ....) = ...." style
2483 2485 name = getfuncname(tree)
2484 2486 if name.startswith('$'):
2485 2487 return (decl, None, None, _("'$' not for alias arguments"))
2486 2488 args = []
2487 2489 for arg in getfuncargs(tree):
2488 2490 if not isvalidsymbol(arg):
2489 2491 return (decl, None, None, _("invalid argument list"))
2490 2492 args.append(getsymbol(arg))
2491 2493 if len(args) != len(set(args)):
2492 2494 return (name, None, None,
2493 2495 _("argument names collide with each other"))
2494 2496 return (name, ('func', ('symbol', name)), args, None)
2495 2497
2496 2498 return (decl, None, None, _("invalid format"))
2497 2499 except error.ParseError as inst:
2498 2500 return (decl, None, None, parseerrordetail(inst))
2499 2501
2500 2502 def _parsealiasdefn(defn, args):
2501 2503 """Parse alias definition ``defn``
2502 2504
2503 2505 This function also replaces alias argument references in the
2504 2506 specified definition by ``_aliasarg(ARGNAME)``.
2505 2507
2506 2508 ``args`` is a list of alias argument names, or None if the alias
2507 2509 is declared as a symbol.
2508 2510
2509 2511 This returns "tree" as parsing result.
2510 2512
2511 2513 >>> args = ['$1', '$2', 'foo']
2512 2514 >>> print prettyformat(_parsealiasdefn('$1 or foo', args))
2513 2515 (or
2514 2516 (func
2515 2517 ('symbol', '_aliasarg')
2516 2518 ('string', '$1'))
2517 2519 (func
2518 2520 ('symbol', '_aliasarg')
2519 2521 ('string', 'foo')))
2520 2522 >>> try:
2521 2523 ... _parsealiasdefn('$1 or $bar', args)
2522 2524 ... except error.ParseError, inst:
2523 2525 ... print parseerrordetail(inst)
2524 2526 at 6: '$' not for alias arguments
2525 2527 >>> args = ['$1', '$10', 'foo']
2526 2528 >>> print prettyformat(_parsealiasdefn('$10 or foobar', args))
2527 2529 (or
2528 2530 (func
2529 2531 ('symbol', '_aliasarg')
2530 2532 ('string', '$10'))
2531 2533 ('symbol', 'foobar'))
2532 2534 >>> print prettyformat(_parsealiasdefn('"$1" or "foo"', args))
2533 2535 (or
2534 2536 ('string', '$1')
2535 2537 ('string', 'foo'))
2536 2538 """
2537 2539 def tokenizedefn(program, lookup=None):
2538 2540 if args:
2539 2541 argset = set(args)
2540 2542 else:
2541 2543 argset = set()
2542 2544
2543 2545 for t, value, pos in _tokenizealias(program, lookup=lookup):
2544 2546 if t == 'symbol':
2545 2547 if value in argset:
2546 2548 # emulate tokenization of "_aliasarg('ARGNAME')":
2547 2549 # "_aliasarg()" is an unknown symbol only used separate
2548 2550 # alias argument placeholders from regular strings.
2549 2551 yield ('symbol', '_aliasarg', pos)
2550 2552 yield ('(', None, pos)
2551 2553 yield ('string', value, pos)
2552 2554 yield (')', None, pos)
2553 2555 continue
2554 2556 elif value.startswith('$'):
2555 2557 raise error.ParseError(_("'$' not for alias arguments"),
2556 2558 pos)
2557 2559 yield (t, value, pos)
2558 2560
2559 2561 p = parser.parser(elements)
2560 2562 tree, pos = p.parse(tokenizedefn(defn))
2561 2563 if pos != len(defn):
2562 2564 raise error.ParseError(_('invalid token'), pos)
2563 2565 return parser.simplifyinfixops(tree, ('or',))
2564 2566
2565 2567 class revsetalias(object):
2566 2568 # whether own `error` information is already shown or not.
2567 2569 # this avoids showing same warning multiple times at each `findaliases`.
2568 2570 warned = False
2569 2571
2570 2572 def __init__(self, name, value):
2571 2573 '''Aliases like:
2572 2574
2573 2575 h = heads(default)
2574 2576 b($1) = ancestors($1) - ancestors(default)
2575 2577 '''
2576 2578 self.name, self.tree, self.args, self.error = _parsealiasdecl(name)
2577 2579 if self.error:
2578 2580 self.error = _('failed to parse the declaration of revset alias'
2579 2581 ' "%s": %s') % (self.name, self.error)
2580 2582 return
2581 2583
2582 2584 try:
2583 2585 self.replacement = _parsealiasdefn(value, self.args)
2584 2586 # Check for placeholder injection
2585 2587 _checkaliasarg(self.replacement, self.args)
2586 2588 except error.ParseError as inst:
2587 2589 self.error = _('failed to parse the definition of revset alias'
2588 2590 ' "%s": %s') % (self.name, parseerrordetail(inst))
2589 2591
2590 2592 def _getalias(aliases, tree):
2591 2593 """If tree looks like an unexpanded alias, return it. Return None
2592 2594 otherwise.
2593 2595 """
2594 2596 if isinstance(tree, tuple) and tree:
2595 2597 if tree[0] == 'symbol' and len(tree) == 2:
2596 2598 name = tree[1]
2597 2599 alias = aliases.get(name)
2598 2600 if alias and alias.args is None and alias.tree == tree:
2599 2601 return alias
2600 2602 if tree[0] == 'func' and len(tree) > 1:
2601 2603 if tree[1][0] == 'symbol' and len(tree[1]) == 2:
2602 2604 name = tree[1][1]
2603 2605 alias = aliases.get(name)
2604 2606 if alias and alias.args is not None and alias.tree == tree[:2]:
2605 2607 return alias
2606 2608 return None
2607 2609
2608 2610 def _expandargs(tree, args):
2609 2611 """Replace _aliasarg instances with the substitution value of the
2610 2612 same name in args, recursively.
2611 2613 """
2612 2614 if not tree or not isinstance(tree, tuple):
2613 2615 return tree
2614 2616 arg = _getaliasarg(tree)
2615 2617 if arg is not None:
2616 2618 return args[arg]
2617 2619 return tuple(_expandargs(t, args) for t in tree)
2618 2620
2619 2621 def _expandaliases(aliases, tree, expanding, cache):
2620 2622 """Expand aliases in tree, recursively.
2621 2623
2622 2624 'aliases' is a dictionary mapping user defined aliases to
2623 2625 revsetalias objects.
2624 2626 """
2625 2627 if not isinstance(tree, tuple):
2626 2628 # Do not expand raw strings
2627 2629 return tree
2628 2630 alias = _getalias(aliases, tree)
2629 2631 if alias is not None:
2630 2632 if alias.error:
2631 2633 raise util.Abort(alias.error)
2632 2634 if alias in expanding:
2633 2635 raise error.ParseError(_('infinite expansion of revset alias "%s" '
2634 2636 'detected') % alias.name)
2635 2637 expanding.append(alias)
2636 2638 if alias.name not in cache:
2637 2639 cache[alias.name] = _expandaliases(aliases, alias.replacement,
2638 2640 expanding, cache)
2639 2641 result = cache[alias.name]
2640 2642 expanding.pop()
2641 2643 if alias.args is not None:
2642 2644 l = getlist(tree[2])
2643 2645 if len(l) != len(alias.args):
2644 2646 raise error.ParseError(
2645 2647 _('invalid number of arguments: %s') % len(l))
2646 2648 l = [_expandaliases(aliases, a, [], cache) for a in l]
2647 2649 result = _expandargs(result, dict(zip(alias.args, l)))
2648 2650 else:
2649 2651 result = tuple(_expandaliases(aliases, t, expanding, cache)
2650 2652 for t in tree)
2651 2653 return result
2652 2654
2653 2655 def findaliases(ui, tree, showwarning=None):
2654 2656 _checkaliasarg(tree)
2655 2657 aliases = {}
2656 2658 for k, v in ui.configitems('revsetalias'):
2657 2659 alias = revsetalias(k, v)
2658 2660 aliases[alias.name] = alias
2659 2661 tree = _expandaliases(aliases, tree, [], {})
2660 2662 if showwarning:
2661 2663 # warn about problematic (but not referred) aliases
2662 2664 for name, alias in sorted(aliases.iteritems()):
2663 2665 if alias.error and not alias.warned:
2664 2666 showwarning(_('warning: %s\n') % (alias.error))
2665 2667 alias.warned = True
2666 2668 return tree
2667 2669
2668 2670 def foldconcat(tree):
2669 2671 """Fold elements to be concatenated by `##`
2670 2672 """
2671 2673 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2672 2674 return tree
2673 2675 if tree[0] == '_concat':
2674 2676 pending = [tree]
2675 2677 l = []
2676 2678 while pending:
2677 2679 e = pending.pop()
2678 2680 if e[0] == '_concat':
2679 2681 pending.extend(reversed(e[1:]))
2680 2682 elif e[0] in ('string', 'symbol'):
2681 2683 l.append(e[1])
2682 2684 else:
2683 2685 msg = _("\"##\" can't concatenate \"%s\" element") % (e[0])
2684 2686 raise error.ParseError(msg)
2685 2687 return ('string', ''.join(l))
2686 2688 else:
2687 2689 return tuple(foldconcat(t) for t in tree)
2688 2690
2689 2691 def parse(spec, lookup=None):
2690 2692 p = parser.parser(elements)
2691 2693 tree, pos = p.parse(tokenize(spec, lookup=lookup))
2692 2694 if pos != len(spec):
2693 2695 raise error.ParseError(_("invalid token"), pos)
2694 2696 return parser.simplifyinfixops(tree, ('or',))
2695 2697
2696 2698 def posttreebuilthook(tree, repo):
2697 2699 # hook for extensions to execute code on the optimized tree
2698 2700 pass
2699 2701
2700 2702 def match(ui, spec, repo=None):
2701 2703 if not spec:
2702 2704 raise error.ParseError(_("empty query"))
2703 2705 lookup = None
2704 2706 if repo:
2705 2707 lookup = repo.__contains__
2706 2708 tree = parse(spec, lookup)
2707 2709 return _makematcher(ui, tree, repo)
2708 2710
2709 2711 def matchany(ui, specs, repo=None):
2710 2712 """Create a matcher that will include any revisions matching one of the
2711 2713 given specs"""
2712 2714 if not specs:
2713 2715 def mfunc(repo, subset=None):
2714 2716 return baseset()
2715 2717 return mfunc
2716 2718 if not all(specs):
2717 2719 raise error.ParseError(_("empty query"))
2718 2720 lookup = None
2719 2721 if repo:
2720 2722 lookup = repo.__contains__
2721 2723 if len(specs) == 1:
2722 2724 tree = parse(specs[0], lookup)
2723 2725 else:
2724 2726 tree = ('or',) + tuple(parse(s, lookup) for s in specs)
2725 2727 return _makematcher(ui, tree, repo)
2726 2728
2727 2729 def _makematcher(ui, tree, repo):
2728 2730 if ui:
2729 2731 tree = findaliases(ui, tree, showwarning=ui.warn)
2730 2732 tree = foldconcat(tree)
2731 2733 weight, tree = optimize(tree, True)
2732 2734 posttreebuilthook(tree, repo)
2733 2735 def mfunc(repo, subset=None):
2734 2736 if subset is None:
2735 2737 subset = fullreposet(repo)
2736 2738 if util.safehasattr(subset, 'isascending'):
2737 2739 result = getset(repo, subset, tree)
2738 2740 else:
2739 2741 result = getset(repo, baseset(subset), tree)
2740 2742 return result
2741 2743 return mfunc
2742 2744
2743 2745 def formatspec(expr, *args):
2744 2746 '''
2745 2747 This is a convenience function for using revsets internally, and
2746 2748 escapes arguments appropriately. Aliases are intentionally ignored
2747 2749 so that intended expression behavior isn't accidentally subverted.
2748 2750
2749 2751 Supported arguments:
2750 2752
2751 2753 %r = revset expression, parenthesized
2752 2754 %d = int(arg), no quoting
2753 2755 %s = string(arg), escaped and single-quoted
2754 2756 %b = arg.branch(), escaped and single-quoted
2755 2757 %n = hex(arg), single-quoted
2756 2758 %% = a literal '%'
2757 2759
2758 2760 Prefixing the type with 'l' specifies a parenthesized list of that type.
2759 2761
2760 2762 >>> formatspec('%r:: and %lr', '10 or 11', ("this()", "that()"))
2761 2763 '(10 or 11):: and ((this()) or (that()))'
2762 2764 >>> formatspec('%d:: and not %d::', 10, 20)
2763 2765 '10:: and not 20::'
2764 2766 >>> formatspec('%ld or %ld', [], [1])
2765 2767 "_list('') or 1"
2766 2768 >>> formatspec('keyword(%s)', 'foo\\xe9')
2767 2769 "keyword('foo\\\\xe9')"
2768 2770 >>> b = lambda: 'default'
2769 2771 >>> b.branch = b
2770 2772 >>> formatspec('branch(%b)', b)
2771 2773 "branch('default')"
2772 2774 >>> formatspec('root(%ls)', ['a', 'b', 'c', 'd'])
2773 2775 "root(_list('a\\x00b\\x00c\\x00d'))"
2774 2776 '''
2775 2777
2776 2778 def quote(s):
2777 2779 return repr(str(s))
2778 2780
2779 2781 def argtype(c, arg):
2780 2782 if c == 'd':
2781 2783 return str(int(arg))
2782 2784 elif c == 's':
2783 2785 return quote(arg)
2784 2786 elif c == 'r':
2785 2787 parse(arg) # make sure syntax errors are confined
2786 2788 return '(%s)' % arg
2787 2789 elif c == 'n':
2788 2790 return quote(node.hex(arg))
2789 2791 elif c == 'b':
2790 2792 return quote(arg.branch())
2791 2793
2792 2794 def listexp(s, t):
2793 2795 l = len(s)
2794 2796 if l == 0:
2795 2797 return "_list('')"
2796 2798 elif l == 1:
2797 2799 return argtype(t, s[0])
2798 2800 elif t == 'd':
2799 2801 return "_intlist('%s')" % "\0".join(str(int(a)) for a in s)
2800 2802 elif t == 's':
2801 2803 return "_list('%s')" % "\0".join(s)
2802 2804 elif t == 'n':
2803 2805 return "_hexlist('%s')" % "\0".join(node.hex(a) for a in s)
2804 2806 elif t == 'b':
2805 2807 return "_list('%s')" % "\0".join(a.branch() for a in s)
2806 2808
2807 2809 m = l // 2
2808 2810 return '(%s or %s)' % (listexp(s[:m], t), listexp(s[m:], t))
2809 2811
2810 2812 ret = ''
2811 2813 pos = 0
2812 2814 arg = 0
2813 2815 while pos < len(expr):
2814 2816 c = expr[pos]
2815 2817 if c == '%':
2816 2818 pos += 1
2817 2819 d = expr[pos]
2818 2820 if d == '%':
2819 2821 ret += d
2820 2822 elif d in 'dsnbr':
2821 2823 ret += argtype(d, args[arg])
2822 2824 arg += 1
2823 2825 elif d == 'l':
2824 2826 # a list of some type
2825 2827 pos += 1
2826 2828 d = expr[pos]
2827 2829 ret += listexp(list(args[arg]), d)
2828 2830 arg += 1
2829 2831 else:
2830 2832 raise util.Abort('unexpected revspec format character %s' % d)
2831 2833 else:
2832 2834 ret += c
2833 2835 pos += 1
2834 2836
2835 2837 return ret
2836 2838
2837 2839 def prettyformat(tree):
2838 2840 return parser.prettyformat(tree, ('string', 'symbol'))
2839 2841
2840 2842 def depth(tree):
2841 2843 if isinstance(tree, tuple):
2842 2844 return max(map(depth, tree)) + 1
2843 2845 else:
2844 2846 return 0
2845 2847
2846 2848 def funcsused(tree):
2847 2849 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2848 2850 return set()
2849 2851 else:
2850 2852 funcs = set()
2851 2853 for s in tree[1:]:
2852 2854 funcs |= funcsused(s)
2853 2855 if tree[0] == 'func':
2854 2856 funcs.add(tree[1][1])
2855 2857 return funcs
2856 2858
2857 2859 class abstractsmartset(object):
2858 2860
2859 2861 def __nonzero__(self):
2860 2862 """True if the smartset is not empty"""
2861 2863 raise NotImplementedError()
2862 2864
2863 2865 def __contains__(self, rev):
2864 2866 """provide fast membership testing"""
2865 2867 raise NotImplementedError()
2866 2868
2867 2869 def __iter__(self):
2868 2870 """iterate the set in the order it is supposed to be iterated"""
2869 2871 raise NotImplementedError()
2870 2872
2871 2873 # Attributes containing a function to perform a fast iteration in a given
2872 2874 # direction. A smartset can have none, one, or both defined.
2873 2875 #
2874 2876 # Default value is None instead of a function returning None to avoid
2875 2877 # initializing an iterator just for testing if a fast method exists.
2876 2878 fastasc = None
2877 2879 fastdesc = None
2878 2880
2879 2881 def isascending(self):
2880 2882 """True if the set will iterate in ascending order"""
2881 2883 raise NotImplementedError()
2882 2884
2883 2885 def isdescending(self):
2884 2886 """True if the set will iterate in descending order"""
2885 2887 raise NotImplementedError()
2886 2888
2887 2889 def min(self):
2888 2890 """return the minimum element in the set"""
2889 2891 if self.fastasc is not None:
2890 2892 for r in self.fastasc():
2891 2893 return r
2892 2894 raise ValueError('arg is an empty sequence')
2893 2895 return min(self)
2894 2896
2895 2897 def max(self):
2896 2898 """return the maximum element in the set"""
2897 2899 if self.fastdesc is not None:
2898 2900 for r in self.fastdesc():
2899 2901 return r
2900 2902 raise ValueError('arg is an empty sequence')
2901 2903 return max(self)
2902 2904
2903 2905 def first(self):
2904 2906 """return the first element in the set (user iteration perspective)
2905 2907
2906 2908 Return None if the set is empty"""
2907 2909 raise NotImplementedError()
2908 2910
2909 2911 def last(self):
2910 2912 """return the last element in the set (user iteration perspective)
2911 2913
2912 2914 Return None if the set is empty"""
2913 2915 raise NotImplementedError()
2914 2916
2915 2917 def __len__(self):
2916 2918 """return the length of the smartsets
2917 2919
2918 2920 This can be expensive on smartset that could be lazy otherwise."""
2919 2921 raise NotImplementedError()
2920 2922
2921 2923 def reverse(self):
2922 2924 """reverse the expected iteration order"""
2923 2925 raise NotImplementedError()
2924 2926
2925 2927 def sort(self, reverse=True):
2926 2928 """get the set to iterate in an ascending or descending order"""
2927 2929 raise NotImplementedError()
2928 2930
2929 2931 def __and__(self, other):
2930 2932 """Returns a new object with the intersection of the two collections.
2931 2933
2932 2934 This is part of the mandatory API for smartset."""
2933 2935 if isinstance(other, fullreposet):
2934 2936 return self
2935 2937 return self.filter(other.__contains__, cache=False)
2936 2938
2937 2939 def __add__(self, other):
2938 2940 """Returns a new object with the union of the two collections.
2939 2941
2940 2942 This is part of the mandatory API for smartset."""
2941 2943 return addset(self, other)
2942 2944
2943 2945 def __sub__(self, other):
2944 2946 """Returns a new object with the substraction of the two collections.
2945 2947
2946 2948 This is part of the mandatory API for smartset."""
2947 2949 c = other.__contains__
2948 2950 return self.filter(lambda r: not c(r), cache=False)
2949 2951
2950 2952 def filter(self, condition, cache=True):
2951 2953 """Returns this smartset filtered by condition as a new smartset.
2952 2954
2953 2955 `condition` is a callable which takes a revision number and returns a
2954 2956 boolean.
2955 2957
2956 2958 This is part of the mandatory API for smartset."""
2957 2959 # builtin cannot be cached. but do not needs to
2958 2960 if cache and util.safehasattr(condition, 'func_code'):
2959 2961 condition = util.cachefunc(condition)
2960 2962 return filteredset(self, condition)
2961 2963
2962 2964 class baseset(abstractsmartset):
2963 2965 """Basic data structure that represents a revset and contains the basic
2964 2966 operation that it should be able to perform.
2965 2967
2966 2968 Every method in this class should be implemented by any smartset class.
2967 2969 """
2968 2970 def __init__(self, data=()):
2969 2971 if not isinstance(data, list):
2970 2972 if isinstance(data, set):
2971 2973 self._set = data
2972 2974 data = list(data)
2973 2975 self._list = data
2974 2976 self._ascending = None
2975 2977
2976 2978 @util.propertycache
2977 2979 def _set(self):
2978 2980 return set(self._list)
2979 2981
2980 2982 @util.propertycache
2981 2983 def _asclist(self):
2982 2984 asclist = self._list[:]
2983 2985 asclist.sort()
2984 2986 return asclist
2985 2987
2986 2988 def __iter__(self):
2987 2989 if self._ascending is None:
2988 2990 return iter(self._list)
2989 2991 elif self._ascending:
2990 2992 return iter(self._asclist)
2991 2993 else:
2992 2994 return reversed(self._asclist)
2993 2995
2994 2996 def fastasc(self):
2995 2997 return iter(self._asclist)
2996 2998
2997 2999 def fastdesc(self):
2998 3000 return reversed(self._asclist)
2999 3001
3000 3002 @util.propertycache
3001 3003 def __contains__(self):
3002 3004 return self._set.__contains__
3003 3005
3004 3006 def __nonzero__(self):
3005 3007 return bool(self._list)
3006 3008
3007 3009 def sort(self, reverse=False):
3008 3010 self._ascending = not bool(reverse)
3009 3011
3010 3012 def reverse(self):
3011 3013 if self._ascending is None:
3012 3014 self._list.reverse()
3013 3015 else:
3014 3016 self._ascending = not self._ascending
3015 3017
3016 3018 def __len__(self):
3017 3019 return len(self._list)
3018 3020
3019 3021 def isascending(self):
3020 3022 """Returns True if the collection is ascending order, False if not.
3021 3023
3022 3024 This is part of the mandatory API for smartset."""
3023 3025 if len(self) <= 1:
3024 3026 return True
3025 3027 return self._ascending is not None and self._ascending
3026 3028
3027 3029 def isdescending(self):
3028 3030 """Returns True if the collection is descending order, False if not.
3029 3031
3030 3032 This is part of the mandatory API for smartset."""
3031 3033 if len(self) <= 1:
3032 3034 return True
3033 3035 return self._ascending is not None and not self._ascending
3034 3036
3035 3037 def first(self):
3036 3038 if self:
3037 3039 if self._ascending is None:
3038 3040 return self._list[0]
3039 3041 elif self._ascending:
3040 3042 return self._asclist[0]
3041 3043 else:
3042 3044 return self._asclist[-1]
3043 3045 return None
3044 3046
3045 3047 def last(self):
3046 3048 if self:
3047 3049 if self._ascending is None:
3048 3050 return self._list[-1]
3049 3051 elif self._ascending:
3050 3052 return self._asclist[-1]
3051 3053 else:
3052 3054 return self._asclist[0]
3053 3055 return None
3054 3056
3055 3057 def __repr__(self):
3056 3058 d = {None: '', False: '-', True: '+'}[self._ascending]
3057 3059 return '<%s%s %r>' % (type(self).__name__, d, self._list)
3058 3060
3059 3061 class filteredset(abstractsmartset):
3060 3062 """Duck type for baseset class which iterates lazily over the revisions in
3061 3063 the subset and contains a function which tests for membership in the
3062 3064 revset
3063 3065 """
3064 3066 def __init__(self, subset, condition=lambda x: True):
3065 3067 """
3066 3068 condition: a function that decide whether a revision in the subset
3067 3069 belongs to the revset or not.
3068 3070 """
3069 3071 self._subset = subset
3070 3072 self._condition = condition
3071 3073 self._cache = {}
3072 3074
3073 3075 def __contains__(self, x):
3074 3076 c = self._cache
3075 3077 if x not in c:
3076 3078 v = c[x] = x in self._subset and self._condition(x)
3077 3079 return v
3078 3080 return c[x]
3079 3081
3080 3082 def __iter__(self):
3081 3083 return self._iterfilter(self._subset)
3082 3084
3083 3085 def _iterfilter(self, it):
3084 3086 cond = self._condition
3085 3087 for x in it:
3086 3088 if cond(x):
3087 3089 yield x
3088 3090
3089 3091 @property
3090 3092 def fastasc(self):
3091 3093 it = self._subset.fastasc
3092 3094 if it is None:
3093 3095 return None
3094 3096 return lambda: self._iterfilter(it())
3095 3097
3096 3098 @property
3097 3099 def fastdesc(self):
3098 3100 it = self._subset.fastdesc
3099 3101 if it is None:
3100 3102 return None
3101 3103 return lambda: self._iterfilter(it())
3102 3104
3103 3105 def __nonzero__(self):
3104 3106 for r in self:
3105 3107 return True
3106 3108 return False
3107 3109
3108 3110 def __len__(self):
3109 3111 # Basic implementation to be changed in future patches.
3110 3112 l = baseset([r for r in self])
3111 3113 return len(l)
3112 3114
3113 3115 def sort(self, reverse=False):
3114 3116 self._subset.sort(reverse=reverse)
3115 3117
3116 3118 def reverse(self):
3117 3119 self._subset.reverse()
3118 3120
3119 3121 def isascending(self):
3120 3122 return self._subset.isascending()
3121 3123
3122 3124 def isdescending(self):
3123 3125 return self._subset.isdescending()
3124 3126
3125 3127 def first(self):
3126 3128 for x in self:
3127 3129 return x
3128 3130 return None
3129 3131
3130 3132 def last(self):
3131 3133 it = None
3132 3134 if self.isascending():
3133 3135 it = self.fastdesc
3134 3136 elif self.isdescending():
3135 3137 it = self.fastasc
3136 3138 if it is not None:
3137 3139 for x in it():
3138 3140 return x
3139 3141 return None #empty case
3140 3142 else:
3141 3143 x = None
3142 3144 for x in self:
3143 3145 pass
3144 3146 return x
3145 3147
3146 3148 def __repr__(self):
3147 3149 return '<%s %r>' % (type(self).__name__, self._subset)
3148 3150
3149 3151 def _iterordered(ascending, iter1, iter2):
3150 3152 """produce an ordered iteration from two iterators with the same order
3151 3153
3152 3154 The ascending is used to indicated the iteration direction.
3153 3155 """
3154 3156 choice = max
3155 3157 if ascending:
3156 3158 choice = min
3157 3159
3158 3160 val1 = None
3159 3161 val2 = None
3160 3162 try:
3161 3163 # Consume both iterators in an ordered way until one is empty
3162 3164 while True:
3163 3165 if val1 is None:
3164 3166 val1 = iter1.next()
3165 3167 if val2 is None:
3166 3168 val2 = iter2.next()
3167 3169 next = choice(val1, val2)
3168 3170 yield next
3169 3171 if val1 == next:
3170 3172 val1 = None
3171 3173 if val2 == next:
3172 3174 val2 = None
3173 3175 except StopIteration:
3174 3176 # Flush any remaining values and consume the other one
3175 3177 it = iter2
3176 3178 if val1 is not None:
3177 3179 yield val1
3178 3180 it = iter1
3179 3181 elif val2 is not None:
3180 3182 # might have been equality and both are empty
3181 3183 yield val2
3182 3184 for val in it:
3183 3185 yield val
3184 3186
3185 3187 class addset(abstractsmartset):
3186 3188 """Represent the addition of two sets
3187 3189
3188 3190 Wrapper structure for lazily adding two structures without losing much
3189 3191 performance on the __contains__ method
3190 3192
3191 3193 If the ascending attribute is set, that means the two structures are
3192 3194 ordered in either an ascending or descending way. Therefore, we can add
3193 3195 them maintaining the order by iterating over both at the same time
3194 3196
3195 3197 >>> xs = baseset([0, 3, 2])
3196 3198 >>> ys = baseset([5, 2, 4])
3197 3199
3198 3200 >>> rs = addset(xs, ys)
3199 3201 >>> bool(rs), 0 in rs, 1 in rs, 5 in rs, rs.first(), rs.last()
3200 3202 (True, True, False, True, 0, 4)
3201 3203 >>> rs = addset(xs, baseset([]))
3202 3204 >>> bool(rs), 0 in rs, 1 in rs, rs.first(), rs.last()
3203 3205 (True, True, False, 0, 2)
3204 3206 >>> rs = addset(baseset([]), baseset([]))
3205 3207 >>> bool(rs), 0 in rs, rs.first(), rs.last()
3206 3208 (False, False, None, None)
3207 3209
3208 3210 iterate unsorted:
3209 3211 >>> rs = addset(xs, ys)
3210 3212 >>> [x for x in rs] # without _genlist
3211 3213 [0, 3, 2, 5, 4]
3212 3214 >>> assert not rs._genlist
3213 3215 >>> len(rs)
3214 3216 5
3215 3217 >>> [x for x in rs] # with _genlist
3216 3218 [0, 3, 2, 5, 4]
3217 3219 >>> assert rs._genlist
3218 3220
3219 3221 iterate ascending:
3220 3222 >>> rs = addset(xs, ys, ascending=True)
3221 3223 >>> [x for x in rs], [x for x in rs.fastasc()] # without _asclist
3222 3224 ([0, 2, 3, 4, 5], [0, 2, 3, 4, 5])
3223 3225 >>> assert not rs._asclist
3224 3226 >>> len(rs)
3225 3227 5
3226 3228 >>> [x for x in rs], [x for x in rs.fastasc()]
3227 3229 ([0, 2, 3, 4, 5], [0, 2, 3, 4, 5])
3228 3230 >>> assert rs._asclist
3229 3231
3230 3232 iterate descending:
3231 3233 >>> rs = addset(xs, ys, ascending=False)
3232 3234 >>> [x for x in rs], [x for x in rs.fastdesc()] # without _asclist
3233 3235 ([5, 4, 3, 2, 0], [5, 4, 3, 2, 0])
3234 3236 >>> assert not rs._asclist
3235 3237 >>> len(rs)
3236 3238 5
3237 3239 >>> [x for x in rs], [x for x in rs.fastdesc()]
3238 3240 ([5, 4, 3, 2, 0], [5, 4, 3, 2, 0])
3239 3241 >>> assert rs._asclist
3240 3242
3241 3243 iterate ascending without fastasc:
3242 3244 >>> rs = addset(xs, generatorset(ys), ascending=True)
3243 3245 >>> assert rs.fastasc is None
3244 3246 >>> [x for x in rs]
3245 3247 [0, 2, 3, 4, 5]
3246 3248
3247 3249 iterate descending without fastdesc:
3248 3250 >>> rs = addset(generatorset(xs), ys, ascending=False)
3249 3251 >>> assert rs.fastdesc is None
3250 3252 >>> [x for x in rs]
3251 3253 [5, 4, 3, 2, 0]
3252 3254 """
3253 3255 def __init__(self, revs1, revs2, ascending=None):
3254 3256 self._r1 = revs1
3255 3257 self._r2 = revs2
3256 3258 self._iter = None
3257 3259 self._ascending = ascending
3258 3260 self._genlist = None
3259 3261 self._asclist = None
3260 3262
3261 3263 def __len__(self):
3262 3264 return len(self._list)
3263 3265
3264 3266 def __nonzero__(self):
3265 3267 return bool(self._r1) or bool(self._r2)
3266 3268
3267 3269 @util.propertycache
3268 3270 def _list(self):
3269 3271 if not self._genlist:
3270 3272 self._genlist = baseset(iter(self))
3271 3273 return self._genlist
3272 3274
3273 3275 def __iter__(self):
3274 3276 """Iterate over both collections without repeating elements
3275 3277
3276 3278 If the ascending attribute is not set, iterate over the first one and
3277 3279 then over the second one checking for membership on the first one so we
3278 3280 dont yield any duplicates.
3279 3281
3280 3282 If the ascending attribute is set, iterate over both collections at the
3281 3283 same time, yielding only one value at a time in the given order.
3282 3284 """
3283 3285 if self._ascending is None:
3284 3286 if self._genlist:
3285 3287 return iter(self._genlist)
3286 3288 def arbitraryordergen():
3287 3289 for r in self._r1:
3288 3290 yield r
3289 3291 inr1 = self._r1.__contains__
3290 3292 for r in self._r2:
3291 3293 if not inr1(r):
3292 3294 yield r
3293 3295 return arbitraryordergen()
3294 3296 # try to use our own fast iterator if it exists
3295 3297 self._trysetasclist()
3296 3298 if self._ascending:
3297 3299 attr = 'fastasc'
3298 3300 else:
3299 3301 attr = 'fastdesc'
3300 3302 it = getattr(self, attr)
3301 3303 if it is not None:
3302 3304 return it()
3303 3305 # maybe half of the component supports fast
3304 3306 # get iterator for _r1
3305 3307 iter1 = getattr(self._r1, attr)
3306 3308 if iter1 is None:
3307 3309 # let's avoid side effect (not sure it matters)
3308 3310 iter1 = iter(sorted(self._r1, reverse=not self._ascending))
3309 3311 else:
3310 3312 iter1 = iter1()
3311 3313 # get iterator for _r2
3312 3314 iter2 = getattr(self._r2, attr)
3313 3315 if iter2 is None:
3314 3316 # let's avoid side effect (not sure it matters)
3315 3317 iter2 = iter(sorted(self._r2, reverse=not self._ascending))
3316 3318 else:
3317 3319 iter2 = iter2()
3318 3320 return _iterordered(self._ascending, iter1, iter2)
3319 3321
3320 3322 def _trysetasclist(self):
3321 3323 """populate the _asclist attribute if possible and necessary"""
3322 3324 if self._genlist is not None and self._asclist is None:
3323 3325 self._asclist = sorted(self._genlist)
3324 3326
3325 3327 @property
3326 3328 def fastasc(self):
3327 3329 self._trysetasclist()
3328 3330 if self._asclist is not None:
3329 3331 return self._asclist.__iter__
3330 3332 iter1 = self._r1.fastasc
3331 3333 iter2 = self._r2.fastasc
3332 3334 if None in (iter1, iter2):
3333 3335 return None
3334 3336 return lambda: _iterordered(True, iter1(), iter2())
3335 3337
3336 3338 @property
3337 3339 def fastdesc(self):
3338 3340 self._trysetasclist()
3339 3341 if self._asclist is not None:
3340 3342 return self._asclist.__reversed__
3341 3343 iter1 = self._r1.fastdesc
3342 3344 iter2 = self._r2.fastdesc
3343 3345 if None in (iter1, iter2):
3344 3346 return None
3345 3347 return lambda: _iterordered(False, iter1(), iter2())
3346 3348
3347 3349 def __contains__(self, x):
3348 3350 return x in self._r1 or x in self._r2
3349 3351
3350 3352 def sort(self, reverse=False):
3351 3353 """Sort the added set
3352 3354
3353 3355 For this we use the cached list with all the generated values and if we
3354 3356 know they are ascending or descending we can sort them in a smart way.
3355 3357 """
3356 3358 self._ascending = not reverse
3357 3359
3358 3360 def isascending(self):
3359 3361 return self._ascending is not None and self._ascending
3360 3362
3361 3363 def isdescending(self):
3362 3364 return self._ascending is not None and not self._ascending
3363 3365
3364 3366 def reverse(self):
3365 3367 if self._ascending is None:
3366 3368 self._list.reverse()
3367 3369 else:
3368 3370 self._ascending = not self._ascending
3369 3371
3370 3372 def first(self):
3371 3373 for x in self:
3372 3374 return x
3373 3375 return None
3374 3376
3375 3377 def last(self):
3376 3378 self.reverse()
3377 3379 val = self.first()
3378 3380 self.reverse()
3379 3381 return val
3380 3382
3381 3383 def __repr__(self):
3382 3384 d = {None: '', False: '-', True: '+'}[self._ascending]
3383 3385 return '<%s%s %r, %r>' % (type(self).__name__, d, self._r1, self._r2)
3384 3386
3385 3387 class generatorset(abstractsmartset):
3386 3388 """Wrap a generator for lazy iteration
3387 3389
3388 3390 Wrapper structure for generators that provides lazy membership and can
3389 3391 be iterated more than once.
3390 3392 When asked for membership it generates values until either it finds the
3391 3393 requested one or has gone through all the elements in the generator
3392 3394 """
3393 3395 def __init__(self, gen, iterasc=None):
3394 3396 """
3395 3397 gen: a generator producing the values for the generatorset.
3396 3398 """
3397 3399 self._gen = gen
3398 3400 self._asclist = None
3399 3401 self._cache = {}
3400 3402 self._genlist = []
3401 3403 self._finished = False
3402 3404 self._ascending = True
3403 3405 if iterasc is not None:
3404 3406 if iterasc:
3405 3407 self.fastasc = self._iterator
3406 3408 self.__contains__ = self._asccontains
3407 3409 else:
3408 3410 self.fastdesc = self._iterator
3409 3411 self.__contains__ = self._desccontains
3410 3412
3411 3413 def __nonzero__(self):
3412 3414 # Do not use 'for r in self' because it will enforce the iteration
3413 3415 # order (default ascending), possibly unrolling a whole descending
3414 3416 # iterator.
3415 3417 if self._genlist:
3416 3418 return True
3417 3419 for r in self._consumegen():
3418 3420 return True
3419 3421 return False
3420 3422
3421 3423 def __contains__(self, x):
3422 3424 if x in self._cache:
3423 3425 return self._cache[x]
3424 3426
3425 3427 # Use new values only, as existing values would be cached.
3426 3428 for l in self._consumegen():
3427 3429 if l == x:
3428 3430 return True
3429 3431
3430 3432 self._cache[x] = False
3431 3433 return False
3432 3434
3433 3435 def _asccontains(self, x):
3434 3436 """version of contains optimised for ascending generator"""
3435 3437 if x in self._cache:
3436 3438 return self._cache[x]
3437 3439
3438 3440 # Use new values only, as existing values would be cached.
3439 3441 for l in self._consumegen():
3440 3442 if l == x:
3441 3443 return True
3442 3444 if l > x:
3443 3445 break
3444 3446
3445 3447 self._cache[x] = False
3446 3448 return False
3447 3449
3448 3450 def _desccontains(self, x):
3449 3451 """version of contains optimised for descending generator"""
3450 3452 if x in self._cache:
3451 3453 return self._cache[x]
3452 3454
3453 3455 # Use new values only, as existing values would be cached.
3454 3456 for l in self._consumegen():
3455 3457 if l == x:
3456 3458 return True
3457 3459 if l < x:
3458 3460 break
3459 3461
3460 3462 self._cache[x] = False
3461 3463 return False
3462 3464
3463 3465 def __iter__(self):
3464 3466 if self._ascending:
3465 3467 it = self.fastasc
3466 3468 else:
3467 3469 it = self.fastdesc
3468 3470 if it is not None:
3469 3471 return it()
3470 3472 # we need to consume the iterator
3471 3473 for x in self._consumegen():
3472 3474 pass
3473 3475 # recall the same code
3474 3476 return iter(self)
3475 3477
3476 3478 def _iterator(self):
3477 3479 if self._finished:
3478 3480 return iter(self._genlist)
3479 3481
3480 3482 # We have to use this complex iteration strategy to allow multiple
3481 3483 # iterations at the same time. We need to be able to catch revision
3482 3484 # removed from _consumegen and added to genlist in another instance.
3483 3485 #
3484 3486 # Getting rid of it would provide an about 15% speed up on this
3485 3487 # iteration.
3486 3488 genlist = self._genlist
3487 3489 nextrev = self._consumegen().next
3488 3490 _len = len # cache global lookup
3489 3491 def gen():
3490 3492 i = 0
3491 3493 while True:
3492 3494 if i < _len(genlist):
3493 3495 yield genlist[i]
3494 3496 else:
3495 3497 yield nextrev()
3496 3498 i += 1
3497 3499 return gen()
3498 3500
3499 3501 def _consumegen(self):
3500 3502 cache = self._cache
3501 3503 genlist = self._genlist.append
3502 3504 for item in self._gen:
3503 3505 cache[item] = True
3504 3506 genlist(item)
3505 3507 yield item
3506 3508 if not self._finished:
3507 3509 self._finished = True
3508 3510 asc = self._genlist[:]
3509 3511 asc.sort()
3510 3512 self._asclist = asc
3511 3513 self.fastasc = asc.__iter__
3512 3514 self.fastdesc = asc.__reversed__
3513 3515
3514 3516 def __len__(self):
3515 3517 for x in self._consumegen():
3516 3518 pass
3517 3519 return len(self._genlist)
3518 3520
3519 3521 def sort(self, reverse=False):
3520 3522 self._ascending = not reverse
3521 3523
3522 3524 def reverse(self):
3523 3525 self._ascending = not self._ascending
3524 3526
3525 3527 def isascending(self):
3526 3528 return self._ascending
3527 3529
3528 3530 def isdescending(self):
3529 3531 return not self._ascending
3530 3532
3531 3533 def first(self):
3532 3534 if self._ascending:
3533 3535 it = self.fastasc
3534 3536 else:
3535 3537 it = self.fastdesc
3536 3538 if it is None:
3537 3539 # we need to consume all and try again
3538 3540 for x in self._consumegen():
3539 3541 pass
3540 3542 return self.first()
3541 3543 return next(it(), None)
3542 3544
3543 3545 def last(self):
3544 3546 if self._ascending:
3545 3547 it = self.fastdesc
3546 3548 else:
3547 3549 it = self.fastasc
3548 3550 if it is None:
3549 3551 # we need to consume all and try again
3550 3552 for x in self._consumegen():
3551 3553 pass
3552 3554 return self.first()
3553 3555 return next(it(), None)
3554 3556
3555 3557 def __repr__(self):
3556 3558 d = {False: '-', True: '+'}[self._ascending]
3557 3559 return '<%s%s>' % (type(self).__name__, d)
3558 3560
3559 3561 class spanset(abstractsmartset):
3560 3562 """Duck type for baseset class which represents a range of revisions and
3561 3563 can work lazily and without having all the range in memory
3562 3564
3563 3565 Note that spanset(x, y) behave almost like xrange(x, y) except for two
3564 3566 notable points:
3565 3567 - when x < y it will be automatically descending,
3566 3568 - revision filtered with this repoview will be skipped.
3567 3569
3568 3570 """
3569 3571 def __init__(self, repo, start=0, end=None):
3570 3572 """
3571 3573 start: first revision included the set
3572 3574 (default to 0)
3573 3575 end: first revision excluded (last+1)
3574 3576 (default to len(repo)
3575 3577
3576 3578 Spanset will be descending if `end` < `start`.
3577 3579 """
3578 3580 if end is None:
3579 3581 end = len(repo)
3580 3582 self._ascending = start <= end
3581 3583 if not self._ascending:
3582 3584 start, end = end + 1, start +1
3583 3585 self._start = start
3584 3586 self._end = end
3585 3587 self._hiddenrevs = repo.changelog.filteredrevs
3586 3588
3587 3589 def sort(self, reverse=False):
3588 3590 self._ascending = not reverse
3589 3591
3590 3592 def reverse(self):
3591 3593 self._ascending = not self._ascending
3592 3594
3593 3595 def _iterfilter(self, iterrange):
3594 3596 s = self._hiddenrevs
3595 3597 for r in iterrange:
3596 3598 if r not in s:
3597 3599 yield r
3598 3600
3599 3601 def __iter__(self):
3600 3602 if self._ascending:
3601 3603 return self.fastasc()
3602 3604 else:
3603 3605 return self.fastdesc()
3604 3606
3605 3607 def fastasc(self):
3606 3608 iterrange = xrange(self._start, self._end)
3607 3609 if self._hiddenrevs:
3608 3610 return self._iterfilter(iterrange)
3609 3611 return iter(iterrange)
3610 3612
3611 3613 def fastdesc(self):
3612 3614 iterrange = xrange(self._end - 1, self._start - 1, -1)
3613 3615 if self._hiddenrevs:
3614 3616 return self._iterfilter(iterrange)
3615 3617 return iter(iterrange)
3616 3618
3617 3619 def __contains__(self, rev):
3618 3620 hidden = self._hiddenrevs
3619 3621 return ((self._start <= rev < self._end)
3620 3622 and not (hidden and rev in hidden))
3621 3623
3622 3624 def __nonzero__(self):
3623 3625 for r in self:
3624 3626 return True
3625 3627 return False
3626 3628
3627 3629 def __len__(self):
3628 3630 if not self._hiddenrevs:
3629 3631 return abs(self._end - self._start)
3630 3632 else:
3631 3633 count = 0
3632 3634 start = self._start
3633 3635 end = self._end
3634 3636 for rev in self._hiddenrevs:
3635 3637 if (end < rev <= start) or (start <= rev < end):
3636 3638 count += 1
3637 3639 return abs(self._end - self._start) - count
3638 3640
3639 3641 def isascending(self):
3640 3642 return self._ascending
3641 3643
3642 3644 def isdescending(self):
3643 3645 return not self._ascending
3644 3646
3645 3647 def first(self):
3646 3648 if self._ascending:
3647 3649 it = self.fastasc
3648 3650 else:
3649 3651 it = self.fastdesc
3650 3652 for x in it():
3651 3653 return x
3652 3654 return None
3653 3655
3654 3656 def last(self):
3655 3657 if self._ascending:
3656 3658 it = self.fastdesc
3657 3659 else:
3658 3660 it = self.fastasc
3659 3661 for x in it():
3660 3662 return x
3661 3663 return None
3662 3664
3663 3665 def __repr__(self):
3664 3666 d = {False: '-', True: '+'}[self._ascending]
3665 3667 return '<%s%s %d:%d>' % (type(self).__name__, d,
3666 3668 self._start, self._end - 1)
3667 3669
3668 3670 class fullreposet(spanset):
3669 3671 """a set containing all revisions in the repo
3670 3672
3671 3673 This class exists to host special optimization and magic to handle virtual
3672 3674 revisions such as "null".
3673 3675 """
3674 3676
3675 3677 def __init__(self, repo):
3676 3678 super(fullreposet, self).__init__(repo)
3677 3679
3678 3680 def __and__(self, other):
3679 3681 """As self contains the whole repo, all of the other set should also be
3680 3682 in self. Therefore `self & other = other`.
3681 3683
3682 3684 This boldly assumes the other contains valid revs only.
3683 3685 """
3684 3686 # other not a smartset, make is so
3685 3687 if not util.safehasattr(other, 'isascending'):
3686 3688 # filter out hidden revision
3687 3689 # (this boldly assumes all smartset are pure)
3688 3690 #
3689 3691 # `other` was used with "&", let's assume this is a set like
3690 3692 # object.
3691 3693 other = baseset(other - self._hiddenrevs)
3692 3694
3693 3695 # XXX As fullreposet is also used as bootstrap, this is wrong.
3694 3696 #
3695 3697 # With a giveme312() revset returning [3,1,2], this makes
3696 3698 # 'hg log -r "giveme312()"' -> 1, 2, 3 (wrong)
3697 3699 # We cannot just drop it because other usage still need to sort it:
3698 3700 # 'hg log -r "all() and giveme312()"' -> 1, 2, 3 (right)
3699 3701 #
3700 3702 # There is also some faulty revset implementations that rely on it
3701 3703 # (eg: children as of its state in e8075329c5fb)
3702 3704 #
3703 3705 # When we fix the two points above we can move this into the if clause
3704 3706 other.sort(reverse=self.isdescending())
3705 3707 return other
3706 3708
3707 3709 def prettyformatset(revs):
3708 3710 lines = []
3709 3711 rs = repr(revs)
3710 3712 p = 0
3711 3713 while p < len(rs):
3712 3714 q = rs.find('<', p + 1)
3713 3715 if q < 0:
3714 3716 q = len(rs)
3715 3717 l = rs.count('<', 0, p) - rs.count('>', 0, p)
3716 3718 assert l >= 0
3717 3719 lines.append((l, rs[p:q].rstrip()))
3718 3720 p = q
3719 3721 return '\n'.join(' ' * l + s for l, s in lines)
3720 3722
3721 3723 # tell hggettext to extract docstrings from these functions:
3722 3724 i18nfunctions = symbols.values()
General Comments 0
You need to be logged in to leave comments. Login now