##// END OF EJS Templates
revset: cache smartset's min/max...
Pierre-Yves David -
r26099:ab66c1de default
parent child Browse files
Show More
@@ -1,3723 +1,3725 b''
1 1 # revset.py - revision set queries for mercurial
2 2 #
3 3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import heapq
11 11 import re
12 12
13 13 from .i18n import _
14 14 from . import (
15 15 encoding,
16 16 error,
17 17 hbisect,
18 18 match as matchmod,
19 19 node,
20 20 obsolete as obsmod,
21 21 parser,
22 22 pathutil,
23 23 phases,
24 24 repoview,
25 25 util,
26 26 )
27 27
28 28 def _revancestors(repo, revs, followfirst):
29 29 """Like revlog.ancestors(), but supports followfirst."""
30 30 if followfirst:
31 31 cut = 1
32 32 else:
33 33 cut = None
34 34 cl = repo.changelog
35 35
36 36 def iterate():
37 37 revs.sort(reverse=True)
38 38 irevs = iter(revs)
39 39 h = []
40 40
41 41 inputrev = next(irevs, None)
42 42 if inputrev is not None:
43 43 heapq.heappush(h, -inputrev)
44 44
45 45 seen = set()
46 46 while h:
47 47 current = -heapq.heappop(h)
48 48 if current == inputrev:
49 49 inputrev = next(irevs, None)
50 50 if inputrev is not None:
51 51 heapq.heappush(h, -inputrev)
52 52 if current not in seen:
53 53 seen.add(current)
54 54 yield current
55 55 for parent in cl.parentrevs(current)[:cut]:
56 56 if parent != node.nullrev:
57 57 heapq.heappush(h, -parent)
58 58
59 59 return generatorset(iterate(), iterasc=False)
60 60
61 61 def _revdescendants(repo, revs, followfirst):
62 62 """Like revlog.descendants() but supports followfirst."""
63 63 if followfirst:
64 64 cut = 1
65 65 else:
66 66 cut = None
67 67
68 68 def iterate():
69 69 cl = repo.changelog
70 70 # XXX this should be 'parentset.min()' assuming 'parentset' is a
71 71 # smartset (and if it is not, it should.)
72 72 first = min(revs)
73 73 nullrev = node.nullrev
74 74 if first == nullrev:
75 75 # Are there nodes with a null first parent and a non-null
76 76 # second one? Maybe. Do we care? Probably not.
77 77 for i in cl:
78 78 yield i
79 79 else:
80 80 seen = set(revs)
81 81 for i in cl.revs(first + 1):
82 82 for x in cl.parentrevs(i)[:cut]:
83 83 if x != nullrev and x in seen:
84 84 seen.add(i)
85 85 yield i
86 86 break
87 87
88 88 return generatorset(iterate(), iterasc=True)
89 89
90 90 def _reachablerootspure(repo, minroot, roots, heads, includepath):
91 91 """return (heads(::<roots> and ::<heads>))
92 92
93 93 If includepath is True, return (<roots>::<heads>)."""
94 94 if not roots:
95 95 return []
96 96 parentrevs = repo.changelog.parentrevs
97 97 roots = set(roots)
98 98 visit = list(heads)
99 99 reachable = set()
100 100 seen = {}
101 101 # prefetch all the things! (because python is slow)
102 102 reached = reachable.add
103 103 dovisit = visit.append
104 104 nextvisit = visit.pop
105 105 # open-code the post-order traversal due to the tiny size of
106 106 # sys.getrecursionlimit()
107 107 while visit:
108 108 rev = nextvisit()
109 109 if rev in roots:
110 110 reached(rev)
111 111 if not includepath:
112 112 continue
113 113 parents = parentrevs(rev)
114 114 seen[rev] = parents
115 115 for parent in parents:
116 116 if parent >= minroot and parent not in seen:
117 117 dovisit(parent)
118 118 if not reachable:
119 119 return baseset()
120 120 if not includepath:
121 121 return reachable
122 122 for rev in sorted(seen):
123 123 for parent in seen[rev]:
124 124 if parent in reachable:
125 125 reached(rev)
126 126 return reachable
127 127
128 128 def reachableroots(repo, roots, heads, includepath=False):
129 129 """return (heads(::<roots> and ::<heads>))
130 130
131 131 If includepath is True, return (<roots>::<heads>)."""
132 132 if not roots:
133 133 return baseset()
134 134 minroot = roots.min()
135 135 roots = list(roots)
136 136 heads = list(heads)
137 137 try:
138 138 revs = repo.changelog.reachableroots(minroot, heads, roots, includepath)
139 139 except AttributeError:
140 140 revs = _reachablerootspure(repo, minroot, roots, heads, includepath)
141 141 revs = baseset(revs)
142 142 revs.sort()
143 143 return revs
144 144
145 145 elements = {
146 146 # token-type: binding-strength, primary, prefix, infix, suffix
147 147 "(": (21, None, ("group", 1, ")"), ("func", 1, ")"), None),
148 148 "##": (20, None, None, ("_concat", 20), None),
149 149 "~": (18, None, None, ("ancestor", 18), None),
150 150 "^": (18, None, None, ("parent", 18), ("parentpost", 18)),
151 151 "-": (5, None, ("negate", 19), ("minus", 5), None),
152 152 "::": (17, None, ("dagrangepre", 17), ("dagrange", 17),
153 153 ("dagrangepost", 17)),
154 154 "..": (17, None, ("dagrangepre", 17), ("dagrange", 17),
155 155 ("dagrangepost", 17)),
156 156 ":": (15, "rangeall", ("rangepre", 15), ("range", 15), ("rangepost", 15)),
157 157 "not": (10, None, ("not", 10), None, None),
158 158 "!": (10, None, ("not", 10), None, None),
159 159 "and": (5, None, None, ("and", 5), None),
160 160 "&": (5, None, None, ("and", 5), None),
161 161 "%": (5, None, None, ("only", 5), ("onlypost", 5)),
162 162 "or": (4, None, None, ("or", 4), None),
163 163 "|": (4, None, None, ("or", 4), None),
164 164 "+": (4, None, None, ("or", 4), None),
165 165 "=": (3, None, None, ("keyvalue", 3), None),
166 166 ",": (2, None, None, ("list", 2), None),
167 167 ")": (0, None, None, None, None),
168 168 "symbol": (0, "symbol", None, None, None),
169 169 "string": (0, "string", None, None, None),
170 170 "end": (0, None, None, None, None),
171 171 }
172 172
173 173 keywords = set(['and', 'or', 'not'])
174 174
175 175 # default set of valid characters for the initial letter of symbols
176 176 _syminitletters = set(c for c in [chr(i) for i in xrange(256)]
177 177 if c.isalnum() or c in '._@' or ord(c) > 127)
178 178
179 179 # default set of valid characters for non-initial letters of symbols
180 180 _symletters = set(c for c in [chr(i) for i in xrange(256)]
181 181 if c.isalnum() or c in '-._/@' or ord(c) > 127)
182 182
183 183 def tokenize(program, lookup=None, syminitletters=None, symletters=None):
184 184 '''
185 185 Parse a revset statement into a stream of tokens
186 186
187 187 ``syminitletters`` is the set of valid characters for the initial
188 188 letter of symbols.
189 189
190 190 By default, character ``c`` is recognized as valid for initial
191 191 letter of symbols, if ``c.isalnum() or c in '._@' or ord(c) > 127``.
192 192
193 193 ``symletters`` is the set of valid characters for non-initial
194 194 letters of symbols.
195 195
196 196 By default, character ``c`` is recognized as valid for non-initial
197 197 letters of symbols, if ``c.isalnum() or c in '-._/@' or ord(c) > 127``.
198 198
199 199 Check that @ is a valid unquoted token character (issue3686):
200 200 >>> list(tokenize("@::"))
201 201 [('symbol', '@', 0), ('::', None, 1), ('end', None, 3)]
202 202
203 203 '''
204 204 if syminitletters is None:
205 205 syminitletters = _syminitletters
206 206 if symletters is None:
207 207 symletters = _symletters
208 208
209 209 if program and lookup:
210 210 # attempt to parse old-style ranges first to deal with
211 211 # things like old-tag which contain query metacharacters
212 212 parts = program.split(':', 1)
213 213 if all(lookup(sym) for sym in parts if sym):
214 214 if parts[0]:
215 215 yield ('symbol', parts[0], 0)
216 216 if len(parts) > 1:
217 217 s = len(parts[0])
218 218 yield (':', None, s)
219 219 if parts[1]:
220 220 yield ('symbol', parts[1], s + 1)
221 221 yield ('end', None, len(program))
222 222 return
223 223
224 224 pos, l = 0, len(program)
225 225 while pos < l:
226 226 c = program[pos]
227 227 if c.isspace(): # skip inter-token whitespace
228 228 pass
229 229 elif c == ':' and program[pos:pos + 2] == '::': # look ahead carefully
230 230 yield ('::', None, pos)
231 231 pos += 1 # skip ahead
232 232 elif c == '.' and program[pos:pos + 2] == '..': # look ahead carefully
233 233 yield ('..', None, pos)
234 234 pos += 1 # skip ahead
235 235 elif c == '#' and program[pos:pos + 2] == '##': # look ahead carefully
236 236 yield ('##', None, pos)
237 237 pos += 1 # skip ahead
238 238 elif c in "():=,-|&+!~^%": # handle simple operators
239 239 yield (c, None, pos)
240 240 elif (c in '"\'' or c == 'r' and
241 241 program[pos:pos + 2] in ("r'", 'r"')): # handle quoted strings
242 242 if c == 'r':
243 243 pos += 1
244 244 c = program[pos]
245 245 decode = lambda x: x
246 246 else:
247 247 decode = lambda x: x.decode('string-escape')
248 248 pos += 1
249 249 s = pos
250 250 while pos < l: # find closing quote
251 251 d = program[pos]
252 252 if d == '\\': # skip over escaped characters
253 253 pos += 2
254 254 continue
255 255 if d == c:
256 256 yield ('string', decode(program[s:pos]), s)
257 257 break
258 258 pos += 1
259 259 else:
260 260 raise error.ParseError(_("unterminated string"), s)
261 261 # gather up a symbol/keyword
262 262 elif c in syminitletters:
263 263 s = pos
264 264 pos += 1
265 265 while pos < l: # find end of symbol
266 266 d = program[pos]
267 267 if d not in symletters:
268 268 break
269 269 if d == '.' and program[pos - 1] == '.': # special case for ..
270 270 pos -= 1
271 271 break
272 272 pos += 1
273 273 sym = program[s:pos]
274 274 if sym in keywords: # operator keywords
275 275 yield (sym, None, s)
276 276 elif '-' in sym:
277 277 # some jerk gave us foo-bar-baz, try to check if it's a symbol
278 278 if lookup and lookup(sym):
279 279 # looks like a real symbol
280 280 yield ('symbol', sym, s)
281 281 else:
282 282 # looks like an expression
283 283 parts = sym.split('-')
284 284 for p in parts[:-1]:
285 285 if p: # possible consecutive -
286 286 yield ('symbol', p, s)
287 287 s += len(p)
288 288 yield ('-', None, pos)
289 289 s += 1
290 290 if parts[-1]: # possible trailing -
291 291 yield ('symbol', parts[-1], s)
292 292 else:
293 293 yield ('symbol', sym, s)
294 294 pos -= 1
295 295 else:
296 296 raise error.ParseError(_("syntax error in revset '%s'") %
297 297 program, pos)
298 298 pos += 1
299 299 yield ('end', None, pos)
300 300
301 301 def parseerrordetail(inst):
302 302 """Compose error message from specified ParseError object
303 303 """
304 304 if len(inst.args) > 1:
305 305 return _('at %s: %s') % (inst.args[1], inst.args[0])
306 306 else:
307 307 return inst.args[0]
308 308
309 309 # helpers
310 310
311 311 def getstring(x, err):
312 312 if x and (x[0] == 'string' or x[0] == 'symbol'):
313 313 return x[1]
314 314 raise error.ParseError(err)
315 315
316 316 def getlist(x):
317 317 if not x:
318 318 return []
319 319 if x[0] == 'list':
320 320 return getlist(x[1]) + [x[2]]
321 321 return [x]
322 322
323 323 def getargs(x, min, max, err):
324 324 l = getlist(x)
325 325 if len(l) < min or (max >= 0 and len(l) > max):
326 326 raise error.ParseError(err)
327 327 return l
328 328
329 329 def getargsdict(x, funcname, keys):
330 330 return parser.buildargsdict(getlist(x), funcname, keys.split(),
331 331 keyvaluenode='keyvalue', keynode='symbol')
332 332
333 333 def isvalidsymbol(tree):
334 334 """Examine whether specified ``tree`` is valid ``symbol`` or not
335 335 """
336 336 return tree[0] == 'symbol' and len(tree) > 1
337 337
338 338 def getsymbol(tree):
339 339 """Get symbol name from valid ``symbol`` in ``tree``
340 340
341 341 This assumes that ``tree`` is already examined by ``isvalidsymbol``.
342 342 """
343 343 return tree[1]
344 344
345 345 def isvalidfunc(tree):
346 346 """Examine whether specified ``tree`` is valid ``func`` or not
347 347 """
348 348 return tree[0] == 'func' and len(tree) > 1 and isvalidsymbol(tree[1])
349 349
350 350 def getfuncname(tree):
351 351 """Get function name from valid ``func`` in ``tree``
352 352
353 353 This assumes that ``tree`` is already examined by ``isvalidfunc``.
354 354 """
355 355 return getsymbol(tree[1])
356 356
357 357 def getfuncargs(tree):
358 358 """Get list of function arguments from valid ``func`` in ``tree``
359 359
360 360 This assumes that ``tree`` is already examined by ``isvalidfunc``.
361 361 """
362 362 if len(tree) > 2:
363 363 return getlist(tree[2])
364 364 else:
365 365 return []
366 366
367 367 def getset(repo, subset, x):
368 368 if not x:
369 369 raise error.ParseError(_("missing argument"))
370 370 s = methods[x[0]](repo, subset, *x[1:])
371 371 if util.safehasattr(s, 'isascending'):
372 372 return s
373 373 if (repo.ui.configbool('devel', 'all-warnings')
374 374 or repo.ui.configbool('devel', 'old-revset')):
375 375 # else case should not happen, because all non-func are internal,
376 376 # ignoring for now.
377 377 if x[0] == 'func' and x[1][0] == 'symbol' and x[1][1] in symbols:
378 378 repo.ui.develwarn('revset "%s" use list instead of smartset, '
379 379 '(upgrade your code)' % x[1][1])
380 380 return baseset(s)
381 381
382 382 def _getrevsource(repo, r):
383 383 extra = repo[r].extra()
384 384 for label in ('source', 'transplant_source', 'rebase_source'):
385 385 if label in extra:
386 386 try:
387 387 return repo[extra[label]].rev()
388 388 except error.RepoLookupError:
389 389 pass
390 390 return None
391 391
392 392 # operator methods
393 393
394 394 def stringset(repo, subset, x):
395 395 x = repo[x].rev()
396 396 if (x in subset
397 397 or x == node.nullrev and isinstance(subset, fullreposet)):
398 398 return baseset([x])
399 399 return baseset()
400 400
401 401 def rangeset(repo, subset, x, y):
402 402 m = getset(repo, fullreposet(repo), x)
403 403 n = getset(repo, fullreposet(repo), y)
404 404
405 405 if not m or not n:
406 406 return baseset()
407 407 m, n = m.first(), n.last()
408 408
409 409 if m == n:
410 410 r = baseset([m])
411 411 elif n == node.wdirrev:
412 412 r = spanset(repo, m, len(repo)) + baseset([n])
413 413 elif m == node.wdirrev:
414 414 r = baseset([m]) + spanset(repo, len(repo) - 1, n - 1)
415 415 elif m < n:
416 416 r = spanset(repo, m, n + 1)
417 417 else:
418 418 r = spanset(repo, m, n - 1)
419 419 # XXX We should combine with subset first: 'subset & baseset(...)'. This is
420 420 # necessary to ensure we preserve the order in subset.
421 421 #
422 422 # This has performance implication, carrying the sorting over when possible
423 423 # would be more efficient.
424 424 return r & subset
425 425
426 426 def dagrange(repo, subset, x, y):
427 427 r = fullreposet(repo)
428 428 xs = reachableroots(repo, getset(repo, r, x), getset(repo, r, y),
429 429 includepath=True)
430 430 # XXX We should combine with subset first: 'subset & baseset(...)'. This is
431 431 # necessary to ensure we preserve the order in subset.
432 432 return xs & subset
433 433
434 434 def andset(repo, subset, x, y):
435 435 return getset(repo, getset(repo, subset, x), y)
436 436
437 437 def orset(repo, subset, *xs):
438 438 assert xs
439 439 if len(xs) == 1:
440 440 return getset(repo, subset, xs[0])
441 441 p = len(xs) // 2
442 442 a = orset(repo, subset, *xs[:p])
443 443 b = orset(repo, subset, *xs[p:])
444 444 return a + b
445 445
446 446 def notset(repo, subset, x):
447 447 return subset - getset(repo, subset, x)
448 448
449 449 def listset(repo, subset, a, b):
450 450 raise error.ParseError(_("can't use a list in this context"))
451 451
452 452 def keyvaluepair(repo, subset, k, v):
453 453 raise error.ParseError(_("can't use a key-value pair in this context"))
454 454
455 455 def func(repo, subset, a, b):
456 456 if a[0] == 'symbol' and a[1] in symbols:
457 457 return symbols[a[1]](repo, subset, b)
458 458
459 459 keep = lambda fn: getattr(fn, '__doc__', None) is not None
460 460
461 461 syms = [s for (s, fn) in symbols.items() if keep(fn)]
462 462 raise error.UnknownIdentifier(a[1], syms)
463 463
464 464 # functions
465 465
466 466 def adds(repo, subset, x):
467 467 """``adds(pattern)``
468 468 Changesets that add a file matching pattern.
469 469
470 470 The pattern without explicit kind like ``glob:`` is expected to be
471 471 relative to the current directory and match against a file or a
472 472 directory.
473 473 """
474 474 # i18n: "adds" is a keyword
475 475 pat = getstring(x, _("adds requires a pattern"))
476 476 return checkstatus(repo, subset, pat, 1)
477 477
478 478 def ancestor(repo, subset, x):
479 479 """``ancestor(*changeset)``
480 480 A greatest common ancestor of the changesets.
481 481
482 482 Accepts 0 or more changesets.
483 483 Will return empty list when passed no args.
484 484 Greatest common ancestor of a single changeset is that changeset.
485 485 """
486 486 # i18n: "ancestor" is a keyword
487 487 l = getlist(x)
488 488 rl = fullreposet(repo)
489 489 anc = None
490 490
491 491 # (getset(repo, rl, i) for i in l) generates a list of lists
492 492 for revs in (getset(repo, rl, i) for i in l):
493 493 for r in revs:
494 494 if anc is None:
495 495 anc = repo[r]
496 496 else:
497 497 anc = anc.ancestor(repo[r])
498 498
499 499 if anc is not None and anc.rev() in subset:
500 500 return baseset([anc.rev()])
501 501 return baseset()
502 502
503 503 def _ancestors(repo, subset, x, followfirst=False):
504 504 heads = getset(repo, fullreposet(repo), x)
505 505 if not heads:
506 506 return baseset()
507 507 s = _revancestors(repo, heads, followfirst)
508 508 return subset & s
509 509
510 510 def ancestors(repo, subset, x):
511 511 """``ancestors(set)``
512 512 Changesets that are ancestors of a changeset in set.
513 513 """
514 514 return _ancestors(repo, subset, x)
515 515
516 516 def _firstancestors(repo, subset, x):
517 517 # ``_firstancestors(set)``
518 518 # Like ``ancestors(set)`` but follows only the first parents.
519 519 return _ancestors(repo, subset, x, followfirst=True)
520 520
521 521 def ancestorspec(repo, subset, x, n):
522 522 """``set~n``
523 523 Changesets that are the Nth ancestor (first parents only) of a changeset
524 524 in set.
525 525 """
526 526 try:
527 527 n = int(n[1])
528 528 except (TypeError, ValueError):
529 529 raise error.ParseError(_("~ expects a number"))
530 530 ps = set()
531 531 cl = repo.changelog
532 532 for r in getset(repo, fullreposet(repo), x):
533 533 for i in range(n):
534 534 r = cl.parentrevs(r)[0]
535 535 ps.add(r)
536 536 return subset & ps
537 537
538 538 def author(repo, subset, x):
539 539 """``author(string)``
540 540 Alias for ``user(string)``.
541 541 """
542 542 # i18n: "author" is a keyword
543 543 n = encoding.lower(getstring(x, _("author requires a string")))
544 544 kind, pattern, matcher = _substringmatcher(n)
545 545 return subset.filter(lambda x: matcher(encoding.lower(repo[x].user())))
546 546
547 547 def bisect(repo, subset, x):
548 548 """``bisect(string)``
549 549 Changesets marked in the specified bisect status:
550 550
551 551 - ``good``, ``bad``, ``skip``: csets explicitly marked as good/bad/skip
552 552 - ``goods``, ``bads`` : csets topologically good/bad
553 553 - ``range`` : csets taking part in the bisection
554 554 - ``pruned`` : csets that are goods, bads or skipped
555 555 - ``untested`` : csets whose fate is yet unknown
556 556 - ``ignored`` : csets ignored due to DAG topology
557 557 - ``current`` : the cset currently being bisected
558 558 """
559 559 # i18n: "bisect" is a keyword
560 560 status = getstring(x, _("bisect requires a string")).lower()
561 561 state = set(hbisect.get(repo, status))
562 562 return subset & state
563 563
564 564 # Backward-compatibility
565 565 # - no help entry so that we do not advertise it any more
566 566 def bisected(repo, subset, x):
567 567 return bisect(repo, subset, x)
568 568
569 569 def bookmark(repo, subset, x):
570 570 """``bookmark([name])``
571 571 The named bookmark or all bookmarks.
572 572
573 573 If `name` starts with `re:`, the remainder of the name is treated as
574 574 a regular expression. To match a bookmark that actually starts with `re:`,
575 575 use the prefix `literal:`.
576 576 """
577 577 # i18n: "bookmark" is a keyword
578 578 args = getargs(x, 0, 1, _('bookmark takes one or no arguments'))
579 579 if args:
580 580 bm = getstring(args[0],
581 581 # i18n: "bookmark" is a keyword
582 582 _('the argument to bookmark must be a string'))
583 583 kind, pattern, matcher = _stringmatcher(bm)
584 584 bms = set()
585 585 if kind == 'literal':
586 586 bmrev = repo._bookmarks.get(pattern, None)
587 587 if not bmrev:
588 588 raise error.RepoLookupError(_("bookmark '%s' does not exist")
589 589 % bm)
590 590 bms.add(repo[bmrev].rev())
591 591 else:
592 592 matchrevs = set()
593 593 for name, bmrev in repo._bookmarks.iteritems():
594 594 if matcher(name):
595 595 matchrevs.add(bmrev)
596 596 if not matchrevs:
597 597 raise error.RepoLookupError(_("no bookmarks exist"
598 598 " that match '%s'") % pattern)
599 599 for bmrev in matchrevs:
600 600 bms.add(repo[bmrev].rev())
601 601 else:
602 602 bms = set([repo[r].rev()
603 603 for r in repo._bookmarks.values()])
604 604 bms -= set([node.nullrev])
605 605 return subset & bms
606 606
607 607 def branch(repo, subset, x):
608 608 """``branch(string or set)``
609 609 All changesets belonging to the given branch or the branches of the given
610 610 changesets.
611 611
612 612 If `string` starts with `re:`, the remainder of the name is treated as
613 613 a regular expression. To match a branch that actually starts with `re:`,
614 614 use the prefix `literal:`.
615 615 """
616 616 getbi = repo.revbranchcache().branchinfo
617 617
618 618 try:
619 619 b = getstring(x, '')
620 620 except error.ParseError:
621 621 # not a string, but another revspec, e.g. tip()
622 622 pass
623 623 else:
624 624 kind, pattern, matcher = _stringmatcher(b)
625 625 if kind == 'literal':
626 626 # note: falls through to the revspec case if no branch with
627 627 # this name exists
628 628 if pattern in repo.branchmap():
629 629 return subset.filter(lambda r: matcher(getbi(r)[0]))
630 630 else:
631 631 return subset.filter(lambda r: matcher(getbi(r)[0]))
632 632
633 633 s = getset(repo, fullreposet(repo), x)
634 634 b = set()
635 635 for r in s:
636 636 b.add(getbi(r)[0])
637 637 c = s.__contains__
638 638 return subset.filter(lambda r: c(r) or getbi(r)[0] in b)
639 639
640 640 def bumped(repo, subset, x):
641 641 """``bumped()``
642 642 Mutable changesets marked as successors of public changesets.
643 643
644 644 Only non-public and non-obsolete changesets can be `bumped`.
645 645 """
646 646 # i18n: "bumped" is a keyword
647 647 getargs(x, 0, 0, _("bumped takes no arguments"))
648 648 bumped = obsmod.getrevs(repo, 'bumped')
649 649 return subset & bumped
650 650
651 651 def bundle(repo, subset, x):
652 652 """``bundle()``
653 653 Changesets in the bundle.
654 654
655 655 Bundle must be specified by the -R option."""
656 656
657 657 try:
658 658 bundlerevs = repo.changelog.bundlerevs
659 659 except AttributeError:
660 660 raise util.Abort(_("no bundle provided - specify with -R"))
661 661 return subset & bundlerevs
662 662
663 663 def checkstatus(repo, subset, pat, field):
664 664 hasset = matchmod.patkind(pat) == 'set'
665 665
666 666 mcache = [None]
667 667 def matches(x):
668 668 c = repo[x]
669 669 if not mcache[0] or hasset:
670 670 mcache[0] = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
671 671 m = mcache[0]
672 672 fname = None
673 673 if not m.anypats() and len(m.files()) == 1:
674 674 fname = m.files()[0]
675 675 if fname is not None:
676 676 if fname not in c.files():
677 677 return False
678 678 else:
679 679 for f in c.files():
680 680 if m(f):
681 681 break
682 682 else:
683 683 return False
684 684 files = repo.status(c.p1().node(), c.node())[field]
685 685 if fname is not None:
686 686 if fname in files:
687 687 return True
688 688 else:
689 689 for f in files:
690 690 if m(f):
691 691 return True
692 692
693 693 return subset.filter(matches)
694 694
695 695 def _children(repo, narrow, parentset):
696 696 if not parentset:
697 697 return baseset()
698 698 cs = set()
699 699 pr = repo.changelog.parentrevs
700 700 minrev = parentset.min()
701 701 for r in narrow:
702 702 if r <= minrev:
703 703 continue
704 704 for p in pr(r):
705 705 if p in parentset:
706 706 cs.add(r)
707 707 # XXX using a set to feed the baseset is wrong. Sets are not ordered.
708 708 # This does not break because of other fullreposet misbehavior.
709 709 return baseset(cs)
710 710
711 711 def children(repo, subset, x):
712 712 """``children(set)``
713 713 Child changesets of changesets in set.
714 714 """
715 715 s = getset(repo, fullreposet(repo), x)
716 716 cs = _children(repo, subset, s)
717 717 return subset & cs
718 718
719 719 def closed(repo, subset, x):
720 720 """``closed()``
721 721 Changeset is closed.
722 722 """
723 723 # i18n: "closed" is a keyword
724 724 getargs(x, 0, 0, _("closed takes no arguments"))
725 725 return subset.filter(lambda r: repo[r].closesbranch())
726 726
727 727 def contains(repo, subset, x):
728 728 """``contains(pattern)``
729 729 The revision's manifest contains a file matching pattern (but might not
730 730 modify it). See :hg:`help patterns` for information about file patterns.
731 731
732 732 The pattern without explicit kind like ``glob:`` is expected to be
733 733 relative to the current directory and match against a file exactly
734 734 for efficiency.
735 735 """
736 736 # i18n: "contains" is a keyword
737 737 pat = getstring(x, _("contains requires a pattern"))
738 738
739 739 def matches(x):
740 740 if not matchmod.patkind(pat):
741 741 pats = pathutil.canonpath(repo.root, repo.getcwd(), pat)
742 742 if pats in repo[x]:
743 743 return True
744 744 else:
745 745 c = repo[x]
746 746 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
747 747 for f in c.manifest():
748 748 if m(f):
749 749 return True
750 750 return False
751 751
752 752 return subset.filter(matches)
753 753
754 754 def converted(repo, subset, x):
755 755 """``converted([id])``
756 756 Changesets converted from the given identifier in the old repository if
757 757 present, or all converted changesets if no identifier is specified.
758 758 """
759 759
760 760 # There is exactly no chance of resolving the revision, so do a simple
761 761 # string compare and hope for the best
762 762
763 763 rev = None
764 764 # i18n: "converted" is a keyword
765 765 l = getargs(x, 0, 1, _('converted takes one or no arguments'))
766 766 if l:
767 767 # i18n: "converted" is a keyword
768 768 rev = getstring(l[0], _('converted requires a revision'))
769 769
770 770 def _matchvalue(r):
771 771 source = repo[r].extra().get('convert_revision', None)
772 772 return source is not None and (rev is None or source.startswith(rev))
773 773
774 774 return subset.filter(lambda r: _matchvalue(r))
775 775
776 776 def date(repo, subset, x):
777 777 """``date(interval)``
778 778 Changesets within the interval, see :hg:`help dates`.
779 779 """
780 780 # i18n: "date" is a keyword
781 781 ds = getstring(x, _("date requires a string"))
782 782 dm = util.matchdate(ds)
783 783 return subset.filter(lambda x: dm(repo[x].date()[0]))
784 784
785 785 def desc(repo, subset, x):
786 786 """``desc(string)``
787 787 Search commit message for string. The match is case-insensitive.
788 788 """
789 789 # i18n: "desc" is a keyword
790 790 ds = encoding.lower(getstring(x, _("desc requires a string")))
791 791
792 792 def matches(x):
793 793 c = repo[x]
794 794 return ds in encoding.lower(c.description())
795 795
796 796 return subset.filter(matches)
797 797
798 798 def _descendants(repo, subset, x, followfirst=False):
799 799 roots = getset(repo, fullreposet(repo), x)
800 800 if not roots:
801 801 return baseset()
802 802 s = _revdescendants(repo, roots, followfirst)
803 803
804 804 # Both sets need to be ascending in order to lazily return the union
805 805 # in the correct order.
806 806 base = subset & roots
807 807 desc = subset & s
808 808 result = base + desc
809 809 if subset.isascending():
810 810 result.sort()
811 811 elif subset.isdescending():
812 812 result.sort(reverse=True)
813 813 else:
814 814 result = subset & result
815 815 return result
816 816
817 817 def descendants(repo, subset, x):
818 818 """``descendants(set)``
819 819 Changesets which are descendants of changesets in set.
820 820 """
821 821 return _descendants(repo, subset, x)
822 822
823 823 def _firstdescendants(repo, subset, x):
824 824 # ``_firstdescendants(set)``
825 825 # Like ``descendants(set)`` but follows only the first parents.
826 826 return _descendants(repo, subset, x, followfirst=True)
827 827
828 828 def destination(repo, subset, x):
829 829 """``destination([set])``
830 830 Changesets that were created by a graft, transplant or rebase operation,
831 831 with the given revisions specified as the source. Omitting the optional set
832 832 is the same as passing all().
833 833 """
834 834 if x is not None:
835 835 sources = getset(repo, fullreposet(repo), x)
836 836 else:
837 837 sources = fullreposet(repo)
838 838
839 839 dests = set()
840 840
841 841 # subset contains all of the possible destinations that can be returned, so
842 842 # iterate over them and see if their source(s) were provided in the arg set.
843 843 # Even if the immediate src of r is not in the arg set, src's source (or
844 844 # further back) may be. Scanning back further than the immediate src allows
845 845 # transitive transplants and rebases to yield the same results as transitive
846 846 # grafts.
847 847 for r in subset:
848 848 src = _getrevsource(repo, r)
849 849 lineage = None
850 850
851 851 while src is not None:
852 852 if lineage is None:
853 853 lineage = list()
854 854
855 855 lineage.append(r)
856 856
857 857 # The visited lineage is a match if the current source is in the arg
858 858 # set. Since every candidate dest is visited by way of iterating
859 859 # subset, any dests further back in the lineage will be tested by a
860 860 # different iteration over subset. Likewise, if the src was already
861 861 # selected, the current lineage can be selected without going back
862 862 # further.
863 863 if src in sources or src in dests:
864 864 dests.update(lineage)
865 865 break
866 866
867 867 r = src
868 868 src = _getrevsource(repo, r)
869 869
870 870 return subset.filter(dests.__contains__)
871 871
872 872 def divergent(repo, subset, x):
873 873 """``divergent()``
874 874 Final successors of changesets with an alternative set of final successors.
875 875 """
876 876 # i18n: "divergent" is a keyword
877 877 getargs(x, 0, 0, _("divergent takes no arguments"))
878 878 divergent = obsmod.getrevs(repo, 'divergent')
879 879 return subset & divergent
880 880
881 881 def extinct(repo, subset, x):
882 882 """``extinct()``
883 883 Obsolete changesets with obsolete descendants only.
884 884 """
885 885 # i18n: "extinct" is a keyword
886 886 getargs(x, 0, 0, _("extinct takes no arguments"))
887 887 extincts = obsmod.getrevs(repo, 'extinct')
888 888 return subset & extincts
889 889
890 890 def extra(repo, subset, x):
891 891 """``extra(label, [value])``
892 892 Changesets with the given label in the extra metadata, with the given
893 893 optional value.
894 894
895 895 If `value` starts with `re:`, the remainder of the value is treated as
896 896 a regular expression. To match a value that actually starts with `re:`,
897 897 use the prefix `literal:`.
898 898 """
899 899 args = getargsdict(x, 'extra', 'label value')
900 900 if 'label' not in args:
901 901 # i18n: "extra" is a keyword
902 902 raise error.ParseError(_('extra takes at least 1 argument'))
903 903 # i18n: "extra" is a keyword
904 904 label = getstring(args['label'], _('first argument to extra must be '
905 905 'a string'))
906 906 value = None
907 907
908 908 if 'value' in args:
909 909 # i18n: "extra" is a keyword
910 910 value = getstring(args['value'], _('second argument to extra must be '
911 911 'a string'))
912 912 kind, value, matcher = _stringmatcher(value)
913 913
914 914 def _matchvalue(r):
915 915 extra = repo[r].extra()
916 916 return label in extra and (value is None or matcher(extra[label]))
917 917
918 918 return subset.filter(lambda r: _matchvalue(r))
919 919
920 920 def filelog(repo, subset, x):
921 921 """``filelog(pattern)``
922 922 Changesets connected to the specified filelog.
923 923
924 924 For performance reasons, visits only revisions mentioned in the file-level
925 925 filelog, rather than filtering through all changesets (much faster, but
926 926 doesn't include deletes or duplicate changes). For a slower, more accurate
927 927 result, use ``file()``.
928 928
929 929 The pattern without explicit kind like ``glob:`` is expected to be
930 930 relative to the current directory and match against a file exactly
931 931 for efficiency.
932 932
933 933 If some linkrev points to revisions filtered by the current repoview, we'll
934 934 work around it to return a non-filtered value.
935 935 """
936 936
937 937 # i18n: "filelog" is a keyword
938 938 pat = getstring(x, _("filelog requires a pattern"))
939 939 s = set()
940 940 cl = repo.changelog
941 941
942 942 if not matchmod.patkind(pat):
943 943 f = pathutil.canonpath(repo.root, repo.getcwd(), pat)
944 944 files = [f]
945 945 else:
946 946 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=repo[None])
947 947 files = (f for f in repo[None] if m(f))
948 948
949 949 for f in files:
950 950 backrevref = {} # final value for: filerev -> changerev
951 951 lowestchild = {} # lowest known filerev child of a filerev
952 952 delayed = [] # filerev with filtered linkrev, for post-processing
953 953 lowesthead = None # cache for manifest content of all head revisions
954 954 fl = repo.file(f)
955 955 for fr in list(fl):
956 956 rev = fl.linkrev(fr)
957 957 if rev not in cl:
958 958 # changerev pointed in linkrev is filtered
959 959 # record it for post processing.
960 960 delayed.append((fr, rev))
961 961 continue
962 962 for p in fl.parentrevs(fr):
963 963 if 0 <= p and p not in lowestchild:
964 964 lowestchild[p] = fr
965 965 backrevref[fr] = rev
966 966 s.add(rev)
967 967
968 968 # Post-processing of all filerevs we skipped because they were
969 969 # filtered. If such filerevs have known and unfiltered children, this
970 970 # means they have an unfiltered appearance out there. We'll use linkrev
971 971 # adjustment to find one of these appearances. The lowest known child
972 972 # will be used as a starting point because it is the best upper-bound we
973 973 # have.
974 974 #
975 975 # This approach will fail when an unfiltered but linkrev-shadowed
976 976 # appearance exists in a head changeset without unfiltered filerev
977 977 # children anywhere.
978 978 while delayed:
979 979 # must be a descending iteration. To slowly fill lowest child
980 980 # information that is of potential use by the next item.
981 981 fr, rev = delayed.pop()
982 982 lkr = rev
983 983
984 984 child = lowestchild.get(fr)
985 985
986 986 if child is None:
987 987 # search for existence of this file revision in a head revision.
988 988 # There are three possibilities:
989 989 # - the revision exists in a head and we can find an
990 990 # introduction from there,
991 991 # - the revision does not exist in a head because it has been
992 992 # changed since its introduction: we would have found a child
993 993 # and be in the other 'else' clause,
994 994 # - all versions of the revision are hidden.
995 995 if lowesthead is None:
996 996 lowesthead = {}
997 997 for h in repo.heads():
998 998 fnode = repo[h].manifest().get(f)
999 999 if fnode is not None:
1000 1000 lowesthead[fl.rev(fnode)] = h
1001 1001 headrev = lowesthead.get(fr)
1002 1002 if headrev is None:
1003 1003 # content is nowhere unfiltered
1004 1004 continue
1005 1005 rev = repo[headrev][f].introrev()
1006 1006 else:
1007 1007 # the lowest known child is a good upper bound
1008 1008 childcrev = backrevref[child]
1009 1009 # XXX this does not guarantee returning the lowest
1010 1010 # introduction of this revision, but this gives a
1011 1011 # result which is a good start and will fit in most
1012 1012 # cases. We probably need to fix the multiple
1013 1013 # introductions case properly (report each
1014 1014 # introduction, even for identical file revisions)
1015 1015 # once and for all at some point anyway.
1016 1016 for p in repo[childcrev][f].parents():
1017 1017 if p.filerev() == fr:
1018 1018 rev = p.rev()
1019 1019 break
1020 1020 if rev == lkr: # no shadowed entry found
1021 1021 # XXX This should never happen unless some manifest points
1022 1022 # to biggish file revisions (like a revision that uses a
1023 1023 # parent that never appears in the manifest ancestors)
1024 1024 continue
1025 1025
1026 1026 # Fill the data for the next iteration.
1027 1027 for p in fl.parentrevs(fr):
1028 1028 if 0 <= p and p not in lowestchild:
1029 1029 lowestchild[p] = fr
1030 1030 backrevref[fr] = rev
1031 1031 s.add(rev)
1032 1032
1033 1033 return subset & s
1034 1034
1035 1035 def first(repo, subset, x):
1036 1036 """``first(set, [n])``
1037 1037 An alias for limit().
1038 1038 """
1039 1039 return limit(repo, subset, x)
1040 1040
1041 1041 def _follow(repo, subset, x, name, followfirst=False):
1042 1042 l = getargs(x, 0, 1, _("%s takes no arguments or a filename") % name)
1043 1043 c = repo['.']
1044 1044 if l:
1045 1045 x = getstring(l[0], _("%s expected a filename") % name)
1046 1046 if x in c:
1047 1047 cx = c[x]
1048 1048 s = set(ctx.rev() for ctx in cx.ancestors(followfirst=followfirst))
1049 1049 # include the revision responsible for the most recent version
1050 1050 s.add(cx.introrev())
1051 1051 else:
1052 1052 return baseset()
1053 1053 else:
1054 1054 s = _revancestors(repo, baseset([c.rev()]), followfirst)
1055 1055
1056 1056 return subset & s
1057 1057
1058 1058 def follow(repo, subset, x):
1059 1059 """``follow([file])``
1060 1060 An alias for ``::.`` (ancestors of the working directory's first parent).
1061 1061 If a filename is specified, the history of the given file is followed,
1062 1062 including copies.
1063 1063 """
1064 1064 return _follow(repo, subset, x, 'follow')
1065 1065
1066 1066 def _followfirst(repo, subset, x):
1067 1067 # ``followfirst([file])``
1068 1068 # Like ``follow([file])`` but follows only the first parent of
1069 1069 # every revision or file revision.
1070 1070 return _follow(repo, subset, x, '_followfirst', followfirst=True)
1071 1071
1072 1072 def getall(repo, subset, x):
1073 1073 """``all()``
1074 1074 All changesets, the same as ``0:tip``.
1075 1075 """
1076 1076 # i18n: "all" is a keyword
1077 1077 getargs(x, 0, 0, _("all takes no arguments"))
1078 1078 return subset & spanset(repo) # drop "null" if any
1079 1079
1080 1080 def grep(repo, subset, x):
1081 1081 """``grep(regex)``
1082 1082 Like ``keyword(string)`` but accepts a regex. Use ``grep(r'...')``
1083 1083 to ensure special escape characters are handled correctly. Unlike
1084 1084 ``keyword(string)``, the match is case-sensitive.
1085 1085 """
1086 1086 try:
1087 1087 # i18n: "grep" is a keyword
1088 1088 gr = re.compile(getstring(x, _("grep requires a string")))
1089 1089 except re.error as e:
1090 1090 raise error.ParseError(_('invalid match pattern: %s') % e)
1091 1091
1092 1092 def matches(x):
1093 1093 c = repo[x]
1094 1094 for e in c.files() + [c.user(), c.description()]:
1095 1095 if gr.search(e):
1096 1096 return True
1097 1097 return False
1098 1098
1099 1099 return subset.filter(matches)
1100 1100
1101 1101 def _matchfiles(repo, subset, x):
1102 1102 # _matchfiles takes a revset list of prefixed arguments:
1103 1103 #
1104 1104 # [p:foo, i:bar, x:baz]
1105 1105 #
1106 1106 # builds a match object from them and filters subset. Allowed
1107 1107 # prefixes are 'p:' for regular patterns, 'i:' for include
1108 1108 # patterns and 'x:' for exclude patterns. Use 'r:' prefix to pass
1109 1109 # a revision identifier, or the empty string to reference the
1110 1110 # working directory, from which the match object is
1111 1111 # initialized. Use 'd:' to set the default matching mode, default
1112 1112 # to 'glob'. At most one 'r:' and 'd:' argument can be passed.
1113 1113
1114 1114 # i18n: "_matchfiles" is a keyword
1115 1115 l = getargs(x, 1, -1, _("_matchfiles requires at least one argument"))
1116 1116 pats, inc, exc = [], [], []
1117 1117 rev, default = None, None
1118 1118 for arg in l:
1119 1119 # i18n: "_matchfiles" is a keyword
1120 1120 s = getstring(arg, _("_matchfiles requires string arguments"))
1121 1121 prefix, value = s[:2], s[2:]
1122 1122 if prefix == 'p:':
1123 1123 pats.append(value)
1124 1124 elif prefix == 'i:':
1125 1125 inc.append(value)
1126 1126 elif prefix == 'x:':
1127 1127 exc.append(value)
1128 1128 elif prefix == 'r:':
1129 1129 if rev is not None:
1130 1130 # i18n: "_matchfiles" is a keyword
1131 1131 raise error.ParseError(_('_matchfiles expected at most one '
1132 1132 'revision'))
1133 1133 if value != '': # empty means working directory; leave rev as None
1134 1134 rev = value
1135 1135 elif prefix == 'd:':
1136 1136 if default is not None:
1137 1137 # i18n: "_matchfiles" is a keyword
1138 1138 raise error.ParseError(_('_matchfiles expected at most one '
1139 1139 'default mode'))
1140 1140 default = value
1141 1141 else:
1142 1142 # i18n: "_matchfiles" is a keyword
1143 1143 raise error.ParseError(_('invalid _matchfiles prefix: %s') % prefix)
1144 1144 if not default:
1145 1145 default = 'glob'
1146 1146
1147 1147 m = matchmod.match(repo.root, repo.getcwd(), pats, include=inc,
1148 1148 exclude=exc, ctx=repo[rev], default=default)
1149 1149
1150 1150 def matches(x):
1151 1151 for f in repo[x].files():
1152 1152 if m(f):
1153 1153 return True
1154 1154 return False
1155 1155
1156 1156 return subset.filter(matches)
1157 1157
1158 1158 def hasfile(repo, subset, x):
1159 1159 """``file(pattern)``
1160 1160 Changesets affecting files matched by pattern.
1161 1161
1162 1162 For a faster but less accurate result, consider using ``filelog()``
1163 1163 instead.
1164 1164
1165 1165 This predicate uses ``glob:`` as the default kind of pattern.
1166 1166 """
1167 1167 # i18n: "file" is a keyword
1168 1168 pat = getstring(x, _("file requires a pattern"))
1169 1169 return _matchfiles(repo, subset, ('string', 'p:' + pat))
1170 1170
1171 1171 def head(repo, subset, x):
1172 1172 """``head()``
1173 1173 Changeset is a named branch head.
1174 1174 """
1175 1175 # i18n: "head" is a keyword
1176 1176 getargs(x, 0, 0, _("head takes no arguments"))
1177 1177 hs = set()
1178 1178 cl = repo.changelog
1179 1179 for b, ls in repo.branchmap().iteritems():
1180 1180 hs.update(cl.rev(h) for h in ls)
1181 1181 # XXX using a set to feed the baseset is wrong. Sets are not ordered.
1182 1182 # This does not break because of other fullreposet misbehavior.
1183 1183 # XXX We should combine with subset first: 'subset & baseset(...)'. This is
1184 1184 # necessary to ensure we preserve the order in subset.
1185 1185 return baseset(hs) & subset
1186 1186
1187 1187 def heads(repo, subset, x):
1188 1188 """``heads(set)``
1189 1189 Members of set with no children in set.
1190 1190 """
1191 1191 s = getset(repo, subset, x)
1192 1192 ps = parents(repo, subset, x)
1193 1193 return s - ps
1194 1194
1195 1195 def hidden(repo, subset, x):
1196 1196 """``hidden()``
1197 1197 Hidden changesets.
1198 1198 """
1199 1199 # i18n: "hidden" is a keyword
1200 1200 getargs(x, 0, 0, _("hidden takes no arguments"))
1201 1201 hiddenrevs = repoview.filterrevs(repo, 'visible')
1202 1202 return subset & hiddenrevs
1203 1203
1204 1204 def keyword(repo, subset, x):
1205 1205 """``keyword(string)``
1206 1206 Search commit message, user name, and names of changed files for
1207 1207 string. The match is case-insensitive.
1208 1208 """
1209 1209 # i18n: "keyword" is a keyword
1210 1210 kw = encoding.lower(getstring(x, _("keyword requires a string")))
1211 1211
1212 1212 def matches(r):
1213 1213 c = repo[r]
1214 1214 return any(kw in encoding.lower(t)
1215 1215 for t in c.files() + [c.user(), c.description()])
1216 1216
1217 1217 return subset.filter(matches)
1218 1218
1219 1219 def limit(repo, subset, x):
1220 1220 """``limit(set, [n])``
1221 1221 First n members of set, defaulting to 1.
1222 1222 """
1223 1223 # i18n: "limit" is a keyword
1224 1224 l = getargs(x, 1, 2, _("limit requires one or two arguments"))
1225 1225 try:
1226 1226 lim = 1
1227 1227 if len(l) == 2:
1228 1228 # i18n: "limit" is a keyword
1229 1229 lim = int(getstring(l[1], _("limit requires a number")))
1230 1230 except (TypeError, ValueError):
1231 1231 # i18n: "limit" is a keyword
1232 1232 raise error.ParseError(_("limit expects a number"))
1233 1233 ss = subset
1234 1234 os = getset(repo, fullreposet(repo), l[0])
1235 1235 result = []
1236 1236 it = iter(os)
1237 1237 for x in xrange(lim):
1238 1238 y = next(it, None)
1239 1239 if y is None:
1240 1240 break
1241 1241 elif y in ss:
1242 1242 result.append(y)
1243 1243 return baseset(result)
1244 1244
1245 1245 def last(repo, subset, x):
1246 1246 """``last(set, [n])``
1247 1247 Last n members of set, defaulting to 1.
1248 1248 """
1249 1249 # i18n: "last" is a keyword
1250 1250 l = getargs(x, 1, 2, _("last requires one or two arguments"))
1251 1251 try:
1252 1252 lim = 1
1253 1253 if len(l) == 2:
1254 1254 # i18n: "last" is a keyword
1255 1255 lim = int(getstring(l[1], _("last requires a number")))
1256 1256 except (TypeError, ValueError):
1257 1257 # i18n: "last" is a keyword
1258 1258 raise error.ParseError(_("last expects a number"))
1259 1259 ss = subset
1260 1260 os = getset(repo, fullreposet(repo), l[0])
1261 1261 os.reverse()
1262 1262 result = []
1263 1263 it = iter(os)
1264 1264 for x in xrange(lim):
1265 1265 y = next(it, None)
1266 1266 if y is None:
1267 1267 break
1268 1268 elif y in ss:
1269 1269 result.append(y)
1270 1270 return baseset(result)
1271 1271
1272 1272 def maxrev(repo, subset, x):
1273 1273 """``max(set)``
1274 1274 Changeset with highest revision number in set.
1275 1275 """
1276 1276 os = getset(repo, fullreposet(repo), x)
1277 1277 if os:
1278 1278 m = os.max()
1279 1279 if m in subset:
1280 1280 return baseset([m])
1281 1281 return baseset()
1282 1282
1283 1283 def merge(repo, subset, x):
1284 1284 """``merge()``
1285 1285 Changeset is a merge changeset.
1286 1286 """
1287 1287 # i18n: "merge" is a keyword
1288 1288 getargs(x, 0, 0, _("merge takes no arguments"))
1289 1289 cl = repo.changelog
1290 1290 return subset.filter(lambda r: cl.parentrevs(r)[1] != -1)
1291 1291
1292 1292 def branchpoint(repo, subset, x):
1293 1293 """``branchpoint()``
1294 1294 Changesets with more than one child.
1295 1295 """
1296 1296 # i18n: "branchpoint" is a keyword
1297 1297 getargs(x, 0, 0, _("branchpoint takes no arguments"))
1298 1298 cl = repo.changelog
1299 1299 if not subset:
1300 1300 return baseset()
1301 1301 # XXX this should be 'parentset.min()' assuming 'parentset' is a smartset
1302 1302 # (and if it is not, it should.)
1303 1303 baserev = min(subset)
1304 1304 parentscount = [0]*(len(repo) - baserev)
1305 1305 for r in cl.revs(start=baserev + 1):
1306 1306 for p in cl.parentrevs(r):
1307 1307 if p >= baserev:
1308 1308 parentscount[p - baserev] += 1
1309 1309 return subset.filter(lambda r: parentscount[r - baserev] > 1)
1310 1310
1311 1311 def minrev(repo, subset, x):
1312 1312 """``min(set)``
1313 1313 Changeset with lowest revision number in set.
1314 1314 """
1315 1315 os = getset(repo, fullreposet(repo), x)
1316 1316 if os:
1317 1317 m = os.min()
1318 1318 if m in subset:
1319 1319 return baseset([m])
1320 1320 return baseset()
1321 1321
1322 1322 def modifies(repo, subset, x):
1323 1323 """``modifies(pattern)``
1324 1324 Changesets modifying files matched by pattern.
1325 1325
1326 1326 The pattern without explicit kind like ``glob:`` is expected to be
1327 1327 relative to the current directory and match against a file or a
1328 1328 directory.
1329 1329 """
1330 1330 # i18n: "modifies" is a keyword
1331 1331 pat = getstring(x, _("modifies requires a pattern"))
1332 1332 return checkstatus(repo, subset, pat, 0)
1333 1333
1334 1334 def named(repo, subset, x):
1335 1335 """``named(namespace)``
1336 1336 The changesets in a given namespace.
1337 1337
1338 1338 If `namespace` starts with `re:`, the remainder of the string is treated as
1339 1339 a regular expression. To match a namespace that actually starts with `re:`,
1340 1340 use the prefix `literal:`.
1341 1341 """
1342 1342 # i18n: "named" is a keyword
1343 1343 args = getargs(x, 1, 1, _('named requires a namespace argument'))
1344 1344
1345 1345 ns = getstring(args[0],
1346 1346 # i18n: "named" is a keyword
1347 1347 _('the argument to named must be a string'))
1348 1348 kind, pattern, matcher = _stringmatcher(ns)
1349 1349 namespaces = set()
1350 1350 if kind == 'literal':
1351 1351 if pattern not in repo.names:
1352 1352 raise error.RepoLookupError(_("namespace '%s' does not exist")
1353 1353 % ns)
1354 1354 namespaces.add(repo.names[pattern])
1355 1355 else:
1356 1356 for name, ns in repo.names.iteritems():
1357 1357 if matcher(name):
1358 1358 namespaces.add(ns)
1359 1359 if not namespaces:
1360 1360 raise error.RepoLookupError(_("no namespace exists"
1361 1361 " that match '%s'") % pattern)
1362 1362
1363 1363 names = set()
1364 1364 for ns in namespaces:
1365 1365 for name in ns.listnames(repo):
1366 1366 if name not in ns.deprecated:
1367 1367 names.update(repo[n].rev() for n in ns.nodes(repo, name))
1368 1368
1369 1369 names -= set([node.nullrev])
1370 1370 return subset & names
1371 1371
1372 1372 def node_(repo, subset, x):
1373 1373 """``id(string)``
1374 1374 Revision non-ambiguously specified by the given hex string prefix.
1375 1375 """
1376 1376 # i18n: "id" is a keyword
1377 1377 l = getargs(x, 1, 1, _("id requires one argument"))
1378 1378 # i18n: "id" is a keyword
1379 1379 n = getstring(l[0], _("id requires a string"))
1380 1380 if len(n) == 40:
1381 1381 try:
1382 1382 rn = repo.changelog.rev(node.bin(n))
1383 1383 except (LookupError, TypeError):
1384 1384 rn = None
1385 1385 else:
1386 1386 rn = None
1387 1387 pm = repo.changelog._partialmatch(n)
1388 1388 if pm is not None:
1389 1389 rn = repo.changelog.rev(pm)
1390 1390
1391 1391 if rn is None:
1392 1392 return baseset()
1393 1393 result = baseset([rn])
1394 1394 return result & subset
1395 1395
1396 1396 def obsolete(repo, subset, x):
1397 1397 """``obsolete()``
1398 1398 Mutable changeset with a newer version."""
1399 1399 # i18n: "obsolete" is a keyword
1400 1400 getargs(x, 0, 0, _("obsolete takes no arguments"))
1401 1401 obsoletes = obsmod.getrevs(repo, 'obsolete')
1402 1402 return subset & obsoletes
1403 1403
1404 1404 def only(repo, subset, x):
1405 1405 """``only(set, [set])``
1406 1406 Changesets that are ancestors of the first set that are not ancestors
1407 1407 of any other head in the repo. If a second set is specified, the result
1408 1408 is ancestors of the first set that are not ancestors of the second set
1409 1409 (i.e. ::<set1> - ::<set2>).
1410 1410 """
1411 1411 cl = repo.changelog
1412 1412 # i18n: "only" is a keyword
1413 1413 args = getargs(x, 1, 2, _('only takes one or two arguments'))
1414 1414 include = getset(repo, fullreposet(repo), args[0])
1415 1415 if len(args) == 1:
1416 1416 if not include:
1417 1417 return baseset()
1418 1418
1419 1419 descendants = set(_revdescendants(repo, include, False))
1420 1420 exclude = [rev for rev in cl.headrevs()
1421 1421 if not rev in descendants and not rev in include]
1422 1422 else:
1423 1423 exclude = getset(repo, fullreposet(repo), args[1])
1424 1424
1425 1425 results = set(cl.findmissingrevs(common=exclude, heads=include))
1426 1426 # XXX we should turn this into a baseset instead of a set, smartset may do
1427 1427 # some optimisations from the fact this is a baseset.
1428 1428 return subset & results
1429 1429
1430 1430 def origin(repo, subset, x):
1431 1431 """``origin([set])``
1432 1432 Changesets that were specified as a source for the grafts, transplants or
1433 1433 rebases that created the given revisions. Omitting the optional set is the
1434 1434 same as passing all(). If a changeset created by these operations is itself
1435 1435 specified as a source for one of these operations, only the source changeset
1436 1436 for the first operation is selected.
1437 1437 """
1438 1438 if x is not None:
1439 1439 dests = getset(repo, fullreposet(repo), x)
1440 1440 else:
1441 1441 dests = fullreposet(repo)
1442 1442
1443 1443 def _firstsrc(rev):
1444 1444 src = _getrevsource(repo, rev)
1445 1445 if src is None:
1446 1446 return None
1447 1447
1448 1448 while True:
1449 1449 prev = _getrevsource(repo, src)
1450 1450
1451 1451 if prev is None:
1452 1452 return src
1453 1453 src = prev
1454 1454
1455 1455 o = set([_firstsrc(r) for r in dests])
1456 1456 o -= set([None])
1457 1457 # XXX we should turn this into a baseset instead of a set, smartset may do
1458 1458 # some optimisations from the fact this is a baseset.
1459 1459 return subset & o
1460 1460
1461 1461 def outgoing(repo, subset, x):
1462 1462 """``outgoing([path])``
1463 1463 Changesets not found in the specified destination repository, or the
1464 1464 default push location.
1465 1465 """
1466 1466 # Avoid cycles.
1467 1467 from . import (
1468 1468 discovery,
1469 1469 hg,
1470 1470 )
1471 1471 # i18n: "outgoing" is a keyword
1472 1472 l = getargs(x, 0, 1, _("outgoing takes one or no arguments"))
1473 1473 # i18n: "outgoing" is a keyword
1474 1474 dest = l and getstring(l[0], _("outgoing requires a repository path")) or ''
1475 1475 dest = repo.ui.expandpath(dest or 'default-push', dest or 'default')
1476 1476 dest, branches = hg.parseurl(dest)
1477 1477 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1478 1478 if revs:
1479 1479 revs = [repo.lookup(rev) for rev in revs]
1480 1480 other = hg.peer(repo, {}, dest)
1481 1481 repo.ui.pushbuffer()
1482 1482 outgoing = discovery.findcommonoutgoing(repo, other, onlyheads=revs)
1483 1483 repo.ui.popbuffer()
1484 1484 cl = repo.changelog
1485 1485 o = set([cl.rev(r) for r in outgoing.missing])
1486 1486 return subset & o
1487 1487
1488 1488 def p1(repo, subset, x):
1489 1489 """``p1([set])``
1490 1490 First parent of changesets in set, or the working directory.
1491 1491 """
1492 1492 if x is None:
1493 1493 p = repo[x].p1().rev()
1494 1494 if p >= 0:
1495 1495 return subset & baseset([p])
1496 1496 return baseset()
1497 1497
1498 1498 ps = set()
1499 1499 cl = repo.changelog
1500 1500 for r in getset(repo, fullreposet(repo), x):
1501 1501 ps.add(cl.parentrevs(r)[0])
1502 1502 ps -= set([node.nullrev])
1503 1503 # XXX we should turn this into a baseset instead of a set, smartset may do
1504 1504 # some optimisations from the fact this is a baseset.
1505 1505 return subset & ps
1506 1506
1507 1507 def p2(repo, subset, x):
1508 1508 """``p2([set])``
1509 1509 Second parent of changesets in set, or the working directory.
1510 1510 """
1511 1511 if x is None:
1512 1512 ps = repo[x].parents()
1513 1513 try:
1514 1514 p = ps[1].rev()
1515 1515 if p >= 0:
1516 1516 return subset & baseset([p])
1517 1517 return baseset()
1518 1518 except IndexError:
1519 1519 return baseset()
1520 1520
1521 1521 ps = set()
1522 1522 cl = repo.changelog
1523 1523 for r in getset(repo, fullreposet(repo), x):
1524 1524 ps.add(cl.parentrevs(r)[1])
1525 1525 ps -= set([node.nullrev])
1526 1526 # XXX we should turn this into a baseset instead of a set, smartset may do
1527 1527 # some optimisations from the fact this is a baseset.
1528 1528 return subset & ps
1529 1529
1530 1530 def parents(repo, subset, x):
1531 1531 """``parents([set])``
1532 1532 The set of all parents for all changesets in set, or the working directory.
1533 1533 """
1534 1534 if x is None:
1535 1535 ps = set(p.rev() for p in repo[x].parents())
1536 1536 else:
1537 1537 ps = set()
1538 1538 cl = repo.changelog
1539 1539 up = ps.update
1540 1540 parentrevs = cl.parentrevs
1541 1541 for r in getset(repo, fullreposet(repo), x):
1542 1542 if r == node.wdirrev:
1543 1543 up(p.rev() for p in repo[r].parents())
1544 1544 else:
1545 1545 up(parentrevs(r))
1546 1546 ps -= set([node.nullrev])
1547 1547 return subset & ps
1548 1548
1549 1549 def _phase(repo, subset, target):
1550 1550 """helper to select all rev in phase <target>"""
1551 1551 repo._phasecache.loadphaserevs(repo) # ensure phase's sets are loaded
1552 1552 if repo._phasecache._phasesets:
1553 1553 s = repo._phasecache._phasesets[target] - repo.changelog.filteredrevs
1554 1554 s = baseset(s)
1555 1555 s.sort() # set are non ordered, so we enforce ascending
1556 1556 return subset & s
1557 1557 else:
1558 1558 phase = repo._phasecache.phase
1559 1559 condition = lambda r: phase(repo, r) == target
1560 1560 return subset.filter(condition, cache=False)
1561 1561
1562 1562 def draft(repo, subset, x):
1563 1563 """``draft()``
1564 1564 Changeset in draft phase."""
1565 1565 # i18n: "draft" is a keyword
1566 1566 getargs(x, 0, 0, _("draft takes no arguments"))
1567 1567 target = phases.draft
1568 1568 return _phase(repo, subset, target)
1569 1569
1570 1570 def secret(repo, subset, x):
1571 1571 """``secret()``
1572 1572 Changeset in secret phase."""
1573 1573 # i18n: "secret" is a keyword
1574 1574 getargs(x, 0, 0, _("secret takes no arguments"))
1575 1575 target = phases.secret
1576 1576 return _phase(repo, subset, target)
1577 1577
1578 1578 def parentspec(repo, subset, x, n):
1579 1579 """``set^0``
1580 1580 The set.
1581 1581 ``set^1`` (or ``set^``), ``set^2``
1582 1582 First or second parent, respectively, of all changesets in set.
1583 1583 """
1584 1584 try:
1585 1585 n = int(n[1])
1586 1586 if n not in (0, 1, 2):
1587 1587 raise ValueError
1588 1588 except (TypeError, ValueError):
1589 1589 raise error.ParseError(_("^ expects a number 0, 1, or 2"))
1590 1590 ps = set()
1591 1591 cl = repo.changelog
1592 1592 for r in getset(repo, fullreposet(repo), x):
1593 1593 if n == 0:
1594 1594 ps.add(r)
1595 1595 elif n == 1:
1596 1596 ps.add(cl.parentrevs(r)[0])
1597 1597 elif n == 2:
1598 1598 parents = cl.parentrevs(r)
1599 1599 if len(parents) > 1:
1600 1600 ps.add(parents[1])
1601 1601 return subset & ps
1602 1602
1603 1603 def present(repo, subset, x):
1604 1604 """``present(set)``
1605 1605 An empty set, if any revision in set isn't found; otherwise,
1606 1606 all revisions in set.
1607 1607
1608 1608 If any of specified revisions is not present in the local repository,
1609 1609 the query is normally aborted. But this predicate allows the query
1610 1610 to continue even in such cases.
1611 1611 """
1612 1612 try:
1613 1613 return getset(repo, subset, x)
1614 1614 except error.RepoLookupError:
1615 1615 return baseset()
1616 1616
1617 1617 # for internal use
1618 1618 def _notpublic(repo, subset, x):
1619 1619 getargs(x, 0, 0, "_notpublic takes no arguments")
1620 1620 repo._phasecache.loadphaserevs(repo) # ensure phase's sets are loaded
1621 1621 if repo._phasecache._phasesets:
1622 1622 s = set()
1623 1623 for u in repo._phasecache._phasesets[1:]:
1624 1624 s.update(u)
1625 1625 s = baseset(s - repo.changelog.filteredrevs)
1626 1626 s.sort()
1627 1627 return subset & s
1628 1628 else:
1629 1629 phase = repo._phasecache.phase
1630 1630 target = phases.public
1631 1631 condition = lambda r: phase(repo, r) != target
1632 1632 return subset.filter(condition, cache=False)
1633 1633
1634 1634 def public(repo, subset, x):
1635 1635 """``public()``
1636 1636 Changeset in public phase."""
1637 1637 # i18n: "public" is a keyword
1638 1638 getargs(x, 0, 0, _("public takes no arguments"))
1639 1639 phase = repo._phasecache.phase
1640 1640 target = phases.public
1641 1641 condition = lambda r: phase(repo, r) == target
1642 1642 return subset.filter(condition, cache=False)
1643 1643
1644 1644 def remote(repo, subset, x):
1645 1645 """``remote([id [,path]])``
1646 1646 Local revision that corresponds to the given identifier in a
1647 1647 remote repository, if present. Here, the '.' identifier is a
1648 1648 synonym for the current local branch.
1649 1649 """
1650 1650
1651 1651 from . import hg # avoid start-up nasties
1652 1652 # i18n: "remote" is a keyword
1653 1653 l = getargs(x, 0, 2, _("remote takes one, two or no arguments"))
1654 1654
1655 1655 q = '.'
1656 1656 if len(l) > 0:
1657 1657 # i18n: "remote" is a keyword
1658 1658 q = getstring(l[0], _("remote requires a string id"))
1659 1659 if q == '.':
1660 1660 q = repo['.'].branch()
1661 1661
1662 1662 dest = ''
1663 1663 if len(l) > 1:
1664 1664 # i18n: "remote" is a keyword
1665 1665 dest = getstring(l[1], _("remote requires a repository path"))
1666 1666 dest = repo.ui.expandpath(dest or 'default')
1667 1667 dest, branches = hg.parseurl(dest)
1668 1668 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1669 1669 if revs:
1670 1670 revs = [repo.lookup(rev) for rev in revs]
1671 1671 other = hg.peer(repo, {}, dest)
1672 1672 n = other.lookup(q)
1673 1673 if n in repo:
1674 1674 r = repo[n].rev()
1675 1675 if r in subset:
1676 1676 return baseset([r])
1677 1677 return baseset()
1678 1678
1679 1679 def removes(repo, subset, x):
1680 1680 """``removes(pattern)``
1681 1681 Changesets which remove files matching pattern.
1682 1682
1683 1683 The pattern without explicit kind like ``glob:`` is expected to be
1684 1684 relative to the current directory and match against a file or a
1685 1685 directory.
1686 1686 """
1687 1687 # i18n: "removes" is a keyword
1688 1688 pat = getstring(x, _("removes requires a pattern"))
1689 1689 return checkstatus(repo, subset, pat, 2)
1690 1690
1691 1691 def rev(repo, subset, x):
1692 1692 """``rev(number)``
1693 1693 Revision with the given numeric identifier.
1694 1694 """
1695 1695 # i18n: "rev" is a keyword
1696 1696 l = getargs(x, 1, 1, _("rev requires one argument"))
1697 1697 try:
1698 1698 # i18n: "rev" is a keyword
1699 1699 l = int(getstring(l[0], _("rev requires a number")))
1700 1700 except (TypeError, ValueError):
1701 1701 # i18n: "rev" is a keyword
1702 1702 raise error.ParseError(_("rev expects a number"))
1703 1703 if l not in repo.changelog and l != node.nullrev:
1704 1704 return baseset()
1705 1705 return subset & baseset([l])
1706 1706
1707 1707 def matching(repo, subset, x):
1708 1708 """``matching(revision [, field])``
1709 1709 Changesets in which a given set of fields match the set of fields in the
1710 1710 selected revision or set.
1711 1711
1712 1712 To match more than one field pass the list of fields to match separated
1713 1713 by spaces (e.g. ``author description``).
1714 1714
1715 1715 Valid fields are most regular revision fields and some special fields.
1716 1716
1717 1717 Regular revision fields are ``description``, ``author``, ``branch``,
1718 1718 ``date``, ``files``, ``phase``, ``parents``, ``substate``, ``user``
1719 1719 and ``diff``.
1720 1720 Note that ``author`` and ``user`` are synonyms. ``diff`` refers to the
1721 1721 contents of the revision. Two revisions matching their ``diff`` will
1722 1722 also match their ``files``.
1723 1723
1724 1724 Special fields are ``summary`` and ``metadata``:
1725 1725 ``summary`` matches the first line of the description.
1726 1726 ``metadata`` is equivalent to matching ``description user date``
1727 1727 (i.e. it matches the main metadata fields).
1728 1728
1729 1729 ``metadata`` is the default field which is used when no fields are
1730 1730 specified. You can match more than one field at a time.
1731 1731 """
1732 1732 # i18n: "matching" is a keyword
1733 1733 l = getargs(x, 1, 2, _("matching takes 1 or 2 arguments"))
1734 1734
1735 1735 revs = getset(repo, fullreposet(repo), l[0])
1736 1736
1737 1737 fieldlist = ['metadata']
1738 1738 if len(l) > 1:
1739 1739 fieldlist = getstring(l[1],
1740 1740 # i18n: "matching" is a keyword
1741 1741 _("matching requires a string "
1742 1742 "as its second argument")).split()
1743 1743
1744 1744 # Make sure that there are no repeated fields,
1745 1745 # expand the 'special' 'metadata' field type
1746 1746 # and check the 'files' whenever we check the 'diff'
1747 1747 fields = []
1748 1748 for field in fieldlist:
1749 1749 if field == 'metadata':
1750 1750 fields += ['user', 'description', 'date']
1751 1751 elif field == 'diff':
1752 1752 # a revision matching the diff must also match the files
1753 1753 # since matching the diff is very costly, make sure to
1754 1754 # also match the files first
1755 1755 fields += ['files', 'diff']
1756 1756 else:
1757 1757 if field == 'author':
1758 1758 field = 'user'
1759 1759 fields.append(field)
1760 1760 fields = set(fields)
1761 1761 if 'summary' in fields and 'description' in fields:
1762 1762 # If a revision matches its description it also matches its summary
1763 1763 fields.discard('summary')
1764 1764
1765 1765 # We may want to match more than one field
1766 1766 # Not all fields take the same amount of time to be matched
1767 1767 # Sort the selected fields in order of increasing matching cost
1768 1768 fieldorder = ['phase', 'parents', 'user', 'date', 'branch', 'summary',
1769 1769 'files', 'description', 'substate', 'diff']
1770 1770 def fieldkeyfunc(f):
1771 1771 try:
1772 1772 return fieldorder.index(f)
1773 1773 except ValueError:
1774 1774 # assume an unknown field is very costly
1775 1775 return len(fieldorder)
1776 1776 fields = list(fields)
1777 1777 fields.sort(key=fieldkeyfunc)
1778 1778
1779 1779 # Each field will be matched with its own "getfield" function
1780 1780 # which will be added to the getfieldfuncs array of functions
1781 1781 getfieldfuncs = []
1782 1782 _funcs = {
1783 1783 'user': lambda r: repo[r].user(),
1784 1784 'branch': lambda r: repo[r].branch(),
1785 1785 'date': lambda r: repo[r].date(),
1786 1786 'description': lambda r: repo[r].description(),
1787 1787 'files': lambda r: repo[r].files(),
1788 1788 'parents': lambda r: repo[r].parents(),
1789 1789 'phase': lambda r: repo[r].phase(),
1790 1790 'substate': lambda r: repo[r].substate,
1791 1791 'summary': lambda r: repo[r].description().splitlines()[0],
1792 1792 'diff': lambda r: list(repo[r].diff(git=True),)
1793 1793 }
1794 1794 for info in fields:
1795 1795 getfield = _funcs.get(info, None)
1796 1796 if getfield is None:
1797 1797 raise error.ParseError(
1798 1798 # i18n: "matching" is a keyword
1799 1799 _("unexpected field name passed to matching: %s") % info)
1800 1800 getfieldfuncs.append(getfield)
1801 1801 # convert the getfield array of functions into a "getinfo" function
1802 1802 # which returns an array of field values (or a single value if there
1803 1803 # is only one field to match)
1804 1804 getinfo = lambda r: [f(r) for f in getfieldfuncs]
1805 1805
1806 1806 def matches(x):
1807 1807 for rev in revs:
1808 1808 target = getinfo(rev)
1809 1809 match = True
1810 1810 for n, f in enumerate(getfieldfuncs):
1811 1811 if target[n] != f(x):
1812 1812 match = False
1813 1813 if match:
1814 1814 return True
1815 1815 return False
1816 1816
1817 1817 return subset.filter(matches)
1818 1818
1819 1819 def reverse(repo, subset, x):
1820 1820 """``reverse(set)``
1821 1821 Reverse order of set.
1822 1822 """
1823 1823 l = getset(repo, subset, x)
1824 1824 l.reverse()
1825 1825 return l
1826 1826
1827 1827 def roots(repo, subset, x):
1828 1828 """``roots(set)``
1829 1829 Changesets in set with no parent changeset in set.
1830 1830 """
1831 1831 s = getset(repo, fullreposet(repo), x)
1832 1832 parents = repo.changelog.parentrevs
1833 1833 def filter(r):
1834 1834 for p in parents(r):
1835 1835 if 0 <= p and p in s:
1836 1836 return False
1837 1837 return True
1838 1838 return subset & s.filter(filter)
1839 1839
1840 1840 def sort(repo, subset, x):
1841 1841 """``sort(set[, [-]key...])``
1842 1842 Sort set by keys. The default sort order is ascending, specify a key
1843 1843 as ``-key`` to sort in descending order.
1844 1844
1845 1845 The keys can be:
1846 1846
1847 1847 - ``rev`` for the revision number,
1848 1848 - ``branch`` for the branch name,
1849 1849 - ``desc`` for the commit message (description),
1850 1850 - ``user`` for user name (``author`` can be used as an alias),
1851 1851 - ``date`` for the commit date
1852 1852 """
1853 1853 # i18n: "sort" is a keyword
1854 1854 l = getargs(x, 1, 2, _("sort requires one or two arguments"))
1855 1855 keys = "rev"
1856 1856 if len(l) == 2:
1857 1857 # i18n: "sort" is a keyword
1858 1858 keys = getstring(l[1], _("sort spec must be a string"))
1859 1859
1860 1860 s = l[0]
1861 1861 keys = keys.split()
1862 1862 l = []
1863 1863 def invert(s):
1864 1864 return "".join(chr(255 - ord(c)) for c in s)
1865 1865 revs = getset(repo, subset, s)
1866 1866 if keys == ["rev"]:
1867 1867 revs.sort()
1868 1868 return revs
1869 1869 elif keys == ["-rev"]:
1870 1870 revs.sort(reverse=True)
1871 1871 return revs
1872 1872 for r in revs:
1873 1873 c = repo[r]
1874 1874 e = []
1875 1875 for k in keys:
1876 1876 if k == 'rev':
1877 1877 e.append(r)
1878 1878 elif k == '-rev':
1879 1879 e.append(-r)
1880 1880 elif k == 'branch':
1881 1881 e.append(c.branch())
1882 1882 elif k == '-branch':
1883 1883 e.append(invert(c.branch()))
1884 1884 elif k == 'desc':
1885 1885 e.append(c.description())
1886 1886 elif k == '-desc':
1887 1887 e.append(invert(c.description()))
1888 1888 elif k in 'user author':
1889 1889 e.append(c.user())
1890 1890 elif k in '-user -author':
1891 1891 e.append(invert(c.user()))
1892 1892 elif k == 'date':
1893 1893 e.append(c.date()[0])
1894 1894 elif k == '-date':
1895 1895 e.append(-c.date()[0])
1896 1896 else:
1897 1897 raise error.ParseError(_("unknown sort key %r") % k)
1898 1898 e.append(r)
1899 1899 l.append(e)
1900 1900 l.sort()
1901 1901 return baseset([e[-1] for e in l])
1902 1902
1903 1903 def subrepo(repo, subset, x):
1904 1904 """``subrepo([pattern])``
1905 1905 Changesets that add, modify or remove the given subrepo. If no subrepo
1906 1906 pattern is named, any subrepo changes are returned.
1907 1907 """
1908 1908 # i18n: "subrepo" is a keyword
1909 1909 args = getargs(x, 0, 1, _('subrepo takes at most one argument'))
1910 1910 if len(args) != 0:
1911 1911 pat = getstring(args[0], _("subrepo requires a pattern"))
1912 1912
1913 1913 m = matchmod.exact(repo.root, repo.root, ['.hgsubstate'])
1914 1914
1915 1915 def submatches(names):
1916 1916 k, p, m = _stringmatcher(pat)
1917 1917 for name in names:
1918 1918 if m(name):
1919 1919 yield name
1920 1920
1921 1921 def matches(x):
1922 1922 c = repo[x]
1923 1923 s = repo.status(c.p1().node(), c.node(), match=m)
1924 1924
1925 1925 if len(args) == 0:
1926 1926 return s.added or s.modified or s.removed
1927 1927
1928 1928 if s.added:
1929 1929 return any(submatches(c.substate.keys()))
1930 1930
1931 1931 if s.modified:
1932 1932 subs = set(c.p1().substate.keys())
1933 1933 subs.update(c.substate.keys())
1934 1934
1935 1935 for path in submatches(subs):
1936 1936 if c.p1().substate.get(path) != c.substate.get(path):
1937 1937 return True
1938 1938
1939 1939 if s.removed:
1940 1940 return any(submatches(c.p1().substate.keys()))
1941 1941
1942 1942 return False
1943 1943
1944 1944 return subset.filter(matches)
1945 1945
1946 1946 def _stringmatcher(pattern):
1947 1947 """
1948 1948 accepts a string, possibly starting with 're:' or 'literal:' prefix.
1949 1949 returns the matcher name, pattern, and matcher function.
1950 1950 missing or unknown prefixes are treated as literal matches.
1951 1951
1952 1952 helper for tests:
1953 1953 >>> def test(pattern, *tests):
1954 1954 ... kind, pattern, matcher = _stringmatcher(pattern)
1955 1955 ... return (kind, pattern, [bool(matcher(t)) for t in tests])
1956 1956
1957 1957 exact matching (no prefix):
1958 1958 >>> test('abcdefg', 'abc', 'def', 'abcdefg')
1959 1959 ('literal', 'abcdefg', [False, False, True])
1960 1960
1961 1961 regex matching ('re:' prefix)
1962 1962 >>> test('re:a.+b', 'nomatch', 'fooadef', 'fooadefbar')
1963 1963 ('re', 'a.+b', [False, False, True])
1964 1964
1965 1965 force exact matches ('literal:' prefix)
1966 1966 >>> test('literal:re:foobar', 'foobar', 're:foobar')
1967 1967 ('literal', 're:foobar', [False, True])
1968 1968
1969 1969 unknown prefixes are ignored and treated as literals
1970 1970 >>> test('foo:bar', 'foo', 'bar', 'foo:bar')
1971 1971 ('literal', 'foo:bar', [False, False, True])
1972 1972 """
1973 1973 if pattern.startswith('re:'):
1974 1974 pattern = pattern[3:]
1975 1975 try:
1976 1976 regex = re.compile(pattern)
1977 1977 except re.error as e:
1978 1978 raise error.ParseError(_('invalid regular expression: %s')
1979 1979 % e)
1980 1980 return 're', pattern, regex.search
1981 1981 elif pattern.startswith('literal:'):
1982 1982 pattern = pattern[8:]
1983 1983 return 'literal', pattern, pattern.__eq__
1984 1984
1985 1985 def _substringmatcher(pattern):
1986 1986 kind, pattern, matcher = _stringmatcher(pattern)
1987 1987 if kind == 'literal':
1988 1988 matcher = lambda s: pattern in s
1989 1989 return kind, pattern, matcher
1990 1990
1991 1991 def tag(repo, subset, x):
1992 1992 """``tag([name])``
1993 1993 The specified tag by name, or all tagged revisions if no name is given.
1994 1994
1995 1995 If `name` starts with `re:`, the remainder of the name is treated as
1996 1996 a regular expression. To match a tag that actually starts with `re:`,
1997 1997 use the prefix `literal:`.
1998 1998 """
1999 1999 # i18n: "tag" is a keyword
2000 2000 args = getargs(x, 0, 1, _("tag takes one or no arguments"))
2001 2001 cl = repo.changelog
2002 2002 if args:
2003 2003 pattern = getstring(args[0],
2004 2004 # i18n: "tag" is a keyword
2005 2005 _('the argument to tag must be a string'))
2006 2006 kind, pattern, matcher = _stringmatcher(pattern)
2007 2007 if kind == 'literal':
2008 2008 # avoid resolving all tags
2009 2009 tn = repo._tagscache.tags.get(pattern, None)
2010 2010 if tn is None:
2011 2011 raise error.RepoLookupError(_("tag '%s' does not exist")
2012 2012 % pattern)
2013 2013 s = set([repo[tn].rev()])
2014 2014 else:
2015 2015 s = set([cl.rev(n) for t, n in repo.tagslist() if matcher(t)])
2016 2016 else:
2017 2017 s = set([cl.rev(n) for t, n in repo.tagslist() if t != 'tip'])
2018 2018 return subset & s
2019 2019
2020 2020 def tagged(repo, subset, x):
2021 2021 return tag(repo, subset, x)
2022 2022
2023 2023 def unstable(repo, subset, x):
2024 2024 """``unstable()``
2025 2025 Non-obsolete changesets with obsolete ancestors.
2026 2026 """
2027 2027 # i18n: "unstable" is a keyword
2028 2028 getargs(x, 0, 0, _("unstable takes no arguments"))
2029 2029 unstables = obsmod.getrevs(repo, 'unstable')
2030 2030 return subset & unstables
2031 2031
2032 2032
2033 2033 def user(repo, subset, x):
2034 2034 """``user(string)``
2035 2035 User name contains string. The match is case-insensitive.
2036 2036
2037 2037 If `string` starts with `re:`, the remainder of the string is treated as
2038 2038 a regular expression. To match a user that actually contains `re:`, use
2039 2039 the prefix `literal:`.
2040 2040 """
2041 2041 return author(repo, subset, x)
2042 2042
2043 2043 # experimental
2044 2044 def wdir(repo, subset, x):
2045 2045 # i18n: "wdir" is a keyword
2046 2046 getargs(x, 0, 0, _("wdir takes no arguments"))
2047 2047 if node.wdirrev in subset or isinstance(subset, fullreposet):
2048 2048 return baseset([node.wdirrev])
2049 2049 return baseset()
2050 2050
2051 2051 # for internal use
2052 2052 def _list(repo, subset, x):
2053 2053 s = getstring(x, "internal error")
2054 2054 if not s:
2055 2055 return baseset()
2056 2056 # remove duplicates here. it's difficult for caller to deduplicate sets
2057 2057 # because different symbols can point to the same rev.
2058 2058 cl = repo.changelog
2059 2059 ls = []
2060 2060 seen = set()
2061 2061 for t in s.split('\0'):
2062 2062 try:
2063 2063 # fast path for integer revision
2064 2064 r = int(t)
2065 2065 if str(r) != t or r not in cl:
2066 2066 raise ValueError
2067 2067 except ValueError:
2068 2068 r = repo[t].rev()
2069 2069 if r in seen:
2070 2070 continue
2071 2071 if (r in subset
2072 2072 or r == node.nullrev and isinstance(subset, fullreposet)):
2073 2073 ls.append(r)
2074 2074 seen.add(r)
2075 2075 return baseset(ls)
2076 2076
2077 2077 # for internal use
2078 2078 def _intlist(repo, subset, x):
2079 2079 s = getstring(x, "internal error")
2080 2080 if not s:
2081 2081 return baseset()
2082 2082 ls = [int(r) for r in s.split('\0')]
2083 2083 s = subset
2084 2084 return baseset([r for r in ls if r in s])
2085 2085
2086 2086 # for internal use
2087 2087 def _hexlist(repo, subset, x):
2088 2088 s = getstring(x, "internal error")
2089 2089 if not s:
2090 2090 return baseset()
2091 2091 cl = repo.changelog
2092 2092 ls = [cl.rev(node.bin(r)) for r in s.split('\0')]
2093 2093 s = subset
2094 2094 return baseset([r for r in ls if r in s])
2095 2095
2096 2096 symbols = {
2097 2097 "adds": adds,
2098 2098 "all": getall,
2099 2099 "ancestor": ancestor,
2100 2100 "ancestors": ancestors,
2101 2101 "_firstancestors": _firstancestors,
2102 2102 "author": author,
2103 2103 "bisect": bisect,
2104 2104 "bisected": bisected,
2105 2105 "bookmark": bookmark,
2106 2106 "branch": branch,
2107 2107 "branchpoint": branchpoint,
2108 2108 "bumped": bumped,
2109 2109 "bundle": bundle,
2110 2110 "children": children,
2111 2111 "closed": closed,
2112 2112 "contains": contains,
2113 2113 "converted": converted,
2114 2114 "date": date,
2115 2115 "desc": desc,
2116 2116 "descendants": descendants,
2117 2117 "_firstdescendants": _firstdescendants,
2118 2118 "destination": destination,
2119 2119 "divergent": divergent,
2120 2120 "draft": draft,
2121 2121 "extinct": extinct,
2122 2122 "extra": extra,
2123 2123 "file": hasfile,
2124 2124 "filelog": filelog,
2125 2125 "first": first,
2126 2126 "follow": follow,
2127 2127 "_followfirst": _followfirst,
2128 2128 "grep": grep,
2129 2129 "head": head,
2130 2130 "heads": heads,
2131 2131 "hidden": hidden,
2132 2132 "id": node_,
2133 2133 "keyword": keyword,
2134 2134 "last": last,
2135 2135 "limit": limit,
2136 2136 "_matchfiles": _matchfiles,
2137 2137 "max": maxrev,
2138 2138 "merge": merge,
2139 2139 "min": minrev,
2140 2140 "modifies": modifies,
2141 2141 "named": named,
2142 2142 "obsolete": obsolete,
2143 2143 "only": only,
2144 2144 "origin": origin,
2145 2145 "outgoing": outgoing,
2146 2146 "p1": p1,
2147 2147 "p2": p2,
2148 2148 "parents": parents,
2149 2149 "present": present,
2150 2150 "public": public,
2151 2151 "_notpublic": _notpublic,
2152 2152 "remote": remote,
2153 2153 "removes": removes,
2154 2154 "rev": rev,
2155 2155 "reverse": reverse,
2156 2156 "roots": roots,
2157 2157 "sort": sort,
2158 2158 "secret": secret,
2159 2159 "subrepo": subrepo,
2160 2160 "matching": matching,
2161 2161 "tag": tag,
2162 2162 "tagged": tagged,
2163 2163 "user": user,
2164 2164 "unstable": unstable,
2165 2165 "wdir": wdir,
2166 2166 "_list": _list,
2167 2167 "_intlist": _intlist,
2168 2168 "_hexlist": _hexlist,
2169 2169 }
2170 2170
2171 2171 # symbols which can't be used for a DoS attack for any given input
2172 2172 # (e.g. those which accept regexes as plain strings shouldn't be included)
2173 2173 # functions that just return a lot of changesets (like all) don't count here
2174 2174 safesymbols = set([
2175 2175 "adds",
2176 2176 "all",
2177 2177 "ancestor",
2178 2178 "ancestors",
2179 2179 "_firstancestors",
2180 2180 "author",
2181 2181 "bisect",
2182 2182 "bisected",
2183 2183 "bookmark",
2184 2184 "branch",
2185 2185 "branchpoint",
2186 2186 "bumped",
2187 2187 "bundle",
2188 2188 "children",
2189 2189 "closed",
2190 2190 "converted",
2191 2191 "date",
2192 2192 "desc",
2193 2193 "descendants",
2194 2194 "_firstdescendants",
2195 2195 "destination",
2196 2196 "divergent",
2197 2197 "draft",
2198 2198 "extinct",
2199 2199 "extra",
2200 2200 "file",
2201 2201 "filelog",
2202 2202 "first",
2203 2203 "follow",
2204 2204 "_followfirst",
2205 2205 "head",
2206 2206 "heads",
2207 2207 "hidden",
2208 2208 "id",
2209 2209 "keyword",
2210 2210 "last",
2211 2211 "limit",
2212 2212 "_matchfiles",
2213 2213 "max",
2214 2214 "merge",
2215 2215 "min",
2216 2216 "modifies",
2217 2217 "obsolete",
2218 2218 "only",
2219 2219 "origin",
2220 2220 "outgoing",
2221 2221 "p1",
2222 2222 "p2",
2223 2223 "parents",
2224 2224 "present",
2225 2225 "public",
2226 2226 "_notpublic",
2227 2227 "remote",
2228 2228 "removes",
2229 2229 "rev",
2230 2230 "reverse",
2231 2231 "roots",
2232 2232 "sort",
2233 2233 "secret",
2234 2234 "matching",
2235 2235 "tag",
2236 2236 "tagged",
2237 2237 "user",
2238 2238 "unstable",
2239 2239 "wdir",
2240 2240 "_list",
2241 2241 "_intlist",
2242 2242 "_hexlist",
2243 2243 ])
2244 2244
2245 2245 methods = {
2246 2246 "range": rangeset,
2247 2247 "dagrange": dagrange,
2248 2248 "string": stringset,
2249 2249 "symbol": stringset,
2250 2250 "and": andset,
2251 2251 "or": orset,
2252 2252 "not": notset,
2253 2253 "list": listset,
2254 2254 "keyvalue": keyvaluepair,
2255 2255 "func": func,
2256 2256 "ancestor": ancestorspec,
2257 2257 "parent": parentspec,
2258 2258 "parentpost": p1,
2259 2259 }
2260 2260
2261 2261 def optimize(x, small):
2262 2262 if x is None:
2263 2263 return 0, x
2264 2264
2265 2265 smallbonus = 1
2266 2266 if small:
2267 2267 smallbonus = .5
2268 2268
2269 2269 op = x[0]
2270 2270 if op == 'minus':
2271 2271 return optimize(('and', x[1], ('not', x[2])), small)
2272 2272 elif op == 'only':
2273 2273 return optimize(('func', ('symbol', 'only'),
2274 2274 ('list', x[1], x[2])), small)
2275 2275 elif op == 'onlypost':
2276 2276 return optimize(('func', ('symbol', 'only'), x[1]), small)
2277 2277 elif op == 'dagrangepre':
2278 2278 return optimize(('func', ('symbol', 'ancestors'), x[1]), small)
2279 2279 elif op == 'dagrangepost':
2280 2280 return optimize(('func', ('symbol', 'descendants'), x[1]), small)
2281 2281 elif op == 'rangeall':
2282 2282 return optimize(('range', ('string', '0'), ('string', 'tip')), small)
2283 2283 elif op == 'rangepre':
2284 2284 return optimize(('range', ('string', '0'), x[1]), small)
2285 2285 elif op == 'rangepost':
2286 2286 return optimize(('range', x[1], ('string', 'tip')), small)
2287 2287 elif op == 'negate':
2288 2288 return optimize(('string',
2289 2289 '-' + getstring(x[1], _("can't negate that"))), small)
2290 2290 elif op in 'string symbol negate':
2291 2291 return smallbonus, x # single revisions are small
2292 2292 elif op == 'and':
2293 2293 wa, ta = optimize(x[1], True)
2294 2294 wb, tb = optimize(x[2], True)
2295 2295
2296 2296 # (::x and not ::y)/(not ::y and ::x) have a fast path
2297 2297 def isonly(revs, bases):
2298 2298 return (
2299 2299 revs is not None
2300 2300 and revs[0] == 'func'
2301 2301 and getstring(revs[1], _('not a symbol')) == 'ancestors'
2302 2302 and bases is not None
2303 2303 and bases[0] == 'not'
2304 2304 and bases[1][0] == 'func'
2305 2305 and getstring(bases[1][1], _('not a symbol')) == 'ancestors')
2306 2306
2307 2307 w = min(wa, wb)
2308 2308 if isonly(ta, tb):
2309 2309 return w, ('func', ('symbol', 'only'), ('list', ta[2], tb[1][2]))
2310 2310 if isonly(tb, ta):
2311 2311 return w, ('func', ('symbol', 'only'), ('list', tb[2], ta[1][2]))
2312 2312
2313 2313 if wa > wb:
2314 2314 return w, (op, tb, ta)
2315 2315 return w, (op, ta, tb)
2316 2316 elif op == 'or':
2317 2317 # fast path for machine-generated expression, that is likely to have
2318 2318 # lots of trivial revisions: 'a + b + c()' to '_list(a b) + c()'
2319 2319 ws, ts, ss = [], [], []
2320 2320 def flushss():
2321 2321 if not ss:
2322 2322 return
2323 2323 if len(ss) == 1:
2324 2324 w, t = ss[0]
2325 2325 else:
2326 2326 s = '\0'.join(t[1] for w, t in ss)
2327 2327 y = ('func', ('symbol', '_list'), ('string', s))
2328 2328 w, t = optimize(y, False)
2329 2329 ws.append(w)
2330 2330 ts.append(t)
2331 2331 del ss[:]
2332 2332 for y in x[1:]:
2333 2333 w, t = optimize(y, False)
2334 2334 if t is not None and (t[0] == 'string' or t[0] == 'symbol'):
2335 2335 ss.append((w, t))
2336 2336 continue
2337 2337 flushss()
2338 2338 ws.append(w)
2339 2339 ts.append(t)
2340 2340 flushss()
2341 2341 if len(ts) == 1:
2342 2342 return ws[0], ts[0] # 'or' operation is fully optimized out
2343 2343 # we can't reorder trees by weight because it would change the order.
2344 2344 # ("sort(a + b)" == "sort(b + a)", but "a + b" != "b + a")
2345 2345 # ts = tuple(t for w, t in sorted(zip(ws, ts), key=lambda wt: wt[0]))
2346 2346 return max(ws), (op,) + tuple(ts)
2347 2347 elif op == 'not':
2348 2348 # Optimize not public() to _notpublic() because we have a fast version
2349 2349 if x[1] == ('func', ('symbol', 'public'), None):
2350 2350 newsym = ('func', ('symbol', '_notpublic'), None)
2351 2351 o = optimize(newsym, not small)
2352 2352 return o[0], o[1]
2353 2353 else:
2354 2354 o = optimize(x[1], not small)
2355 2355 return o[0], (op, o[1])
2356 2356 elif op == 'parentpost':
2357 2357 o = optimize(x[1], small)
2358 2358 return o[0], (op, o[1])
2359 2359 elif op == 'group':
2360 2360 return optimize(x[1], small)
2361 2361 elif op in 'dagrange range list parent ancestorspec':
2362 2362 if op == 'parent':
2363 2363 # x^:y means (x^) : y, not x ^ (:y)
2364 2364 post = ('parentpost', x[1])
2365 2365 if x[2][0] == 'dagrangepre':
2366 2366 return optimize(('dagrange', post, x[2][1]), small)
2367 2367 elif x[2][0] == 'rangepre':
2368 2368 return optimize(('range', post, x[2][1]), small)
2369 2369
2370 2370 wa, ta = optimize(x[1], small)
2371 2371 wb, tb = optimize(x[2], small)
2372 2372 return wa + wb, (op, ta, tb)
2373 2373 elif op == 'func':
2374 2374 f = getstring(x[1], _("not a symbol"))
2375 2375 wa, ta = optimize(x[2], small)
2376 2376 if f in ("author branch closed date desc file grep keyword "
2377 2377 "outgoing user"):
2378 2378 w = 10 # slow
2379 2379 elif f in "modifies adds removes":
2380 2380 w = 30 # slower
2381 2381 elif f == "contains":
2382 2382 w = 100 # very slow
2383 2383 elif f == "ancestor":
2384 2384 w = 1 * smallbonus
2385 2385 elif f in "reverse limit first _intlist":
2386 2386 w = 0
2387 2387 elif f in "sort":
2388 2388 w = 10 # assume most sorts look at changelog
2389 2389 else:
2390 2390 w = 1
2391 2391 return w + wa, (op, x[1], ta)
2392 2392 return 1, x
2393 2393
2394 2394 _aliasarg = ('func', ('symbol', '_aliasarg'))
2395 2395 def _getaliasarg(tree):
2396 2396 """If tree matches ('func', ('symbol', '_aliasarg'), ('string', X))
2397 2397 return X, None otherwise.
2398 2398 """
2399 2399 if (len(tree) == 3 and tree[:2] == _aliasarg
2400 2400 and tree[2][0] == 'string'):
2401 2401 return tree[2][1]
2402 2402 return None
2403 2403
2404 2404 def _checkaliasarg(tree, known=None):
2405 2405 """Check tree contains no _aliasarg construct or only ones which
2406 2406 value is in known. Used to avoid alias placeholders injection.
2407 2407 """
2408 2408 if isinstance(tree, tuple):
2409 2409 arg = _getaliasarg(tree)
2410 2410 if arg is not None and (not known or arg not in known):
2411 2411 raise error.UnknownIdentifier('_aliasarg', [])
2412 2412 for t in tree:
2413 2413 _checkaliasarg(t, known)
2414 2414
2415 2415 # the set of valid characters for the initial letter of symbols in
2416 2416 # alias declarations and definitions
2417 2417 _aliassyminitletters = set(c for c in [chr(i) for i in xrange(256)]
2418 2418 if c.isalnum() or c in '._@$' or ord(c) > 127)
2419 2419
2420 2420 def _tokenizealias(program, lookup=None):
2421 2421 """Parse alias declaration/definition into a stream of tokens
2422 2422
2423 2423 This allows symbol names to use also ``$`` as an initial letter
2424 2424 (for backward compatibility), and callers of this function should
2425 2425 examine whether ``$`` is used also for unexpected symbols or not.
2426 2426 """
2427 2427 return tokenize(program, lookup=lookup,
2428 2428 syminitletters=_aliassyminitletters)
2429 2429
2430 2430 def _parsealiasdecl(decl):
2431 2431 """Parse alias declaration ``decl``
2432 2432
2433 2433 This returns ``(name, tree, args, errorstr)`` tuple:
2434 2434
2435 2435 - ``name``: of declared alias (may be ``decl`` itself at error)
2436 2436 - ``tree``: parse result (or ``None`` at error)
2437 2437 - ``args``: list of alias argument names (or None for symbol declaration)
2438 2438 - ``errorstr``: detail about detected error (or None)
2439 2439
2440 2440 >>> _parsealiasdecl('foo')
2441 2441 ('foo', ('symbol', 'foo'), None, None)
2442 2442 >>> _parsealiasdecl('$foo')
2443 2443 ('$foo', None, None, "'$' not for alias arguments")
2444 2444 >>> _parsealiasdecl('foo::bar')
2445 2445 ('foo::bar', None, None, 'invalid format')
2446 2446 >>> _parsealiasdecl('foo bar')
2447 2447 ('foo bar', None, None, 'at 4: invalid token')
2448 2448 >>> _parsealiasdecl('foo()')
2449 2449 ('foo', ('func', ('symbol', 'foo')), [], None)
2450 2450 >>> _parsealiasdecl('$foo()')
2451 2451 ('$foo()', None, None, "'$' not for alias arguments")
2452 2452 >>> _parsealiasdecl('foo($1, $2)')
2453 2453 ('foo', ('func', ('symbol', 'foo')), ['$1', '$2'], None)
2454 2454 >>> _parsealiasdecl('foo(bar_bar, baz.baz)')
2455 2455 ('foo', ('func', ('symbol', 'foo')), ['bar_bar', 'baz.baz'], None)
2456 2456 >>> _parsealiasdecl('foo($1, $2, nested($1, $2))')
2457 2457 ('foo($1, $2, nested($1, $2))', None, None, 'invalid argument list')
2458 2458 >>> _parsealiasdecl('foo(bar($1, $2))')
2459 2459 ('foo(bar($1, $2))', None, None, 'invalid argument list')
2460 2460 >>> _parsealiasdecl('foo("string")')
2461 2461 ('foo("string")', None, None, 'invalid argument list')
2462 2462 >>> _parsealiasdecl('foo($1, $2')
2463 2463 ('foo($1, $2', None, None, 'at 10: unexpected token: end')
2464 2464 >>> _parsealiasdecl('foo("string')
2465 2465 ('foo("string', None, None, 'at 5: unterminated string')
2466 2466 >>> _parsealiasdecl('foo($1, $2, $1)')
2467 2467 ('foo', None, None, 'argument names collide with each other')
2468 2468 """
2469 2469 p = parser.parser(elements)
2470 2470 try:
2471 2471 tree, pos = p.parse(_tokenizealias(decl))
2472 2472 if (pos != len(decl)):
2473 2473 raise error.ParseError(_('invalid token'), pos)
2474 2474
2475 2475 if isvalidsymbol(tree):
2476 2476 # "name = ...." style
2477 2477 name = getsymbol(tree)
2478 2478 if name.startswith('$'):
2479 2479 return (decl, None, None, _("'$' not for alias arguments"))
2480 2480 return (name, ('symbol', name), None, None)
2481 2481
2482 2482 if isvalidfunc(tree):
2483 2483 # "name(arg, ....) = ...." style
2484 2484 name = getfuncname(tree)
2485 2485 if name.startswith('$'):
2486 2486 return (decl, None, None, _("'$' not for alias arguments"))
2487 2487 args = []
2488 2488 for arg in getfuncargs(tree):
2489 2489 if not isvalidsymbol(arg):
2490 2490 return (decl, None, None, _("invalid argument list"))
2491 2491 args.append(getsymbol(arg))
2492 2492 if len(args) != len(set(args)):
2493 2493 return (name, None, None,
2494 2494 _("argument names collide with each other"))
2495 2495 return (name, ('func', ('symbol', name)), args, None)
2496 2496
2497 2497 return (decl, None, None, _("invalid format"))
2498 2498 except error.ParseError as inst:
2499 2499 return (decl, None, None, parseerrordetail(inst))
2500 2500
2501 2501 def _parsealiasdefn(defn, args):
2502 2502 """Parse alias definition ``defn``
2503 2503
2504 2504 This function also replaces alias argument references in the
2505 2505 specified definition by ``_aliasarg(ARGNAME)``.
2506 2506
2507 2507 ``args`` is a list of alias argument names, or None if the alias
2508 2508 is declared as a symbol.
2509 2509
2510 2510 This returns "tree" as parsing result.
2511 2511
2512 2512 >>> args = ['$1', '$2', 'foo']
2513 2513 >>> print prettyformat(_parsealiasdefn('$1 or foo', args))
2514 2514 (or
2515 2515 (func
2516 2516 ('symbol', '_aliasarg')
2517 2517 ('string', '$1'))
2518 2518 (func
2519 2519 ('symbol', '_aliasarg')
2520 2520 ('string', 'foo')))
2521 2521 >>> try:
2522 2522 ... _parsealiasdefn('$1 or $bar', args)
2523 2523 ... except error.ParseError, inst:
2524 2524 ... print parseerrordetail(inst)
2525 2525 at 6: '$' not for alias arguments
2526 2526 >>> args = ['$1', '$10', 'foo']
2527 2527 >>> print prettyformat(_parsealiasdefn('$10 or foobar', args))
2528 2528 (or
2529 2529 (func
2530 2530 ('symbol', '_aliasarg')
2531 2531 ('string', '$10'))
2532 2532 ('symbol', 'foobar'))
2533 2533 >>> print prettyformat(_parsealiasdefn('"$1" or "foo"', args))
2534 2534 (or
2535 2535 ('string', '$1')
2536 2536 ('string', 'foo'))
2537 2537 """
2538 2538 def tokenizedefn(program, lookup=None):
2539 2539 if args:
2540 2540 argset = set(args)
2541 2541 else:
2542 2542 argset = set()
2543 2543
2544 2544 for t, value, pos in _tokenizealias(program, lookup=lookup):
2545 2545 if t == 'symbol':
2546 2546 if value in argset:
2547 2547 # emulate tokenization of "_aliasarg('ARGNAME')":
2548 2548 # "_aliasarg()" is an unknown symbol only used separate
2549 2549 # alias argument placeholders from regular strings.
2550 2550 yield ('symbol', '_aliasarg', pos)
2551 2551 yield ('(', None, pos)
2552 2552 yield ('string', value, pos)
2553 2553 yield (')', None, pos)
2554 2554 continue
2555 2555 elif value.startswith('$'):
2556 2556 raise error.ParseError(_("'$' not for alias arguments"),
2557 2557 pos)
2558 2558 yield (t, value, pos)
2559 2559
2560 2560 p = parser.parser(elements)
2561 2561 tree, pos = p.parse(tokenizedefn(defn))
2562 2562 if pos != len(defn):
2563 2563 raise error.ParseError(_('invalid token'), pos)
2564 2564 return parser.simplifyinfixops(tree, ('or',))
2565 2565
2566 2566 class revsetalias(object):
2567 2567 # whether own `error` information is already shown or not.
2568 2568 # this avoids showing same warning multiple times at each `findaliases`.
2569 2569 warned = False
2570 2570
2571 2571 def __init__(self, name, value):
2572 2572 '''Aliases like:
2573 2573
2574 2574 h = heads(default)
2575 2575 b($1) = ancestors($1) - ancestors(default)
2576 2576 '''
2577 2577 self.name, self.tree, self.args, self.error = _parsealiasdecl(name)
2578 2578 if self.error:
2579 2579 self.error = _('failed to parse the declaration of revset alias'
2580 2580 ' "%s": %s') % (self.name, self.error)
2581 2581 return
2582 2582
2583 2583 try:
2584 2584 self.replacement = _parsealiasdefn(value, self.args)
2585 2585 # Check for placeholder injection
2586 2586 _checkaliasarg(self.replacement, self.args)
2587 2587 except error.ParseError as inst:
2588 2588 self.error = _('failed to parse the definition of revset alias'
2589 2589 ' "%s": %s') % (self.name, parseerrordetail(inst))
2590 2590
2591 2591 def _getalias(aliases, tree):
2592 2592 """If tree looks like an unexpanded alias, return it. Return None
2593 2593 otherwise.
2594 2594 """
2595 2595 if isinstance(tree, tuple) and tree:
2596 2596 if tree[0] == 'symbol' and len(tree) == 2:
2597 2597 name = tree[1]
2598 2598 alias = aliases.get(name)
2599 2599 if alias and alias.args is None and alias.tree == tree:
2600 2600 return alias
2601 2601 if tree[0] == 'func' and len(tree) > 1:
2602 2602 if tree[1][0] == 'symbol' and len(tree[1]) == 2:
2603 2603 name = tree[1][1]
2604 2604 alias = aliases.get(name)
2605 2605 if alias and alias.args is not None and alias.tree == tree[:2]:
2606 2606 return alias
2607 2607 return None
2608 2608
2609 2609 def _expandargs(tree, args):
2610 2610 """Replace _aliasarg instances with the substitution value of the
2611 2611 same name in args, recursively.
2612 2612 """
2613 2613 if not tree or not isinstance(tree, tuple):
2614 2614 return tree
2615 2615 arg = _getaliasarg(tree)
2616 2616 if arg is not None:
2617 2617 return args[arg]
2618 2618 return tuple(_expandargs(t, args) for t in tree)
2619 2619
2620 2620 def _expandaliases(aliases, tree, expanding, cache):
2621 2621 """Expand aliases in tree, recursively.
2622 2622
2623 2623 'aliases' is a dictionary mapping user defined aliases to
2624 2624 revsetalias objects.
2625 2625 """
2626 2626 if not isinstance(tree, tuple):
2627 2627 # Do not expand raw strings
2628 2628 return tree
2629 2629 alias = _getalias(aliases, tree)
2630 2630 if alias is not None:
2631 2631 if alias.error:
2632 2632 raise util.Abort(alias.error)
2633 2633 if alias in expanding:
2634 2634 raise error.ParseError(_('infinite expansion of revset alias "%s" '
2635 2635 'detected') % alias.name)
2636 2636 expanding.append(alias)
2637 2637 if alias.name not in cache:
2638 2638 cache[alias.name] = _expandaliases(aliases, alias.replacement,
2639 2639 expanding, cache)
2640 2640 result = cache[alias.name]
2641 2641 expanding.pop()
2642 2642 if alias.args is not None:
2643 2643 l = getlist(tree[2])
2644 2644 if len(l) != len(alias.args):
2645 2645 raise error.ParseError(
2646 2646 _('invalid number of arguments: %s') % len(l))
2647 2647 l = [_expandaliases(aliases, a, [], cache) for a in l]
2648 2648 result = _expandargs(result, dict(zip(alias.args, l)))
2649 2649 else:
2650 2650 result = tuple(_expandaliases(aliases, t, expanding, cache)
2651 2651 for t in tree)
2652 2652 return result
2653 2653
2654 2654 def findaliases(ui, tree, showwarning=None):
2655 2655 _checkaliasarg(tree)
2656 2656 aliases = {}
2657 2657 for k, v in ui.configitems('revsetalias'):
2658 2658 alias = revsetalias(k, v)
2659 2659 aliases[alias.name] = alias
2660 2660 tree = _expandaliases(aliases, tree, [], {})
2661 2661 if showwarning:
2662 2662 # warn about problematic (but not referred) aliases
2663 2663 for name, alias in sorted(aliases.iteritems()):
2664 2664 if alias.error and not alias.warned:
2665 2665 showwarning(_('warning: %s\n') % (alias.error))
2666 2666 alias.warned = True
2667 2667 return tree
2668 2668
2669 2669 def foldconcat(tree):
2670 2670 """Fold elements to be concatenated by `##`
2671 2671 """
2672 2672 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2673 2673 return tree
2674 2674 if tree[0] == '_concat':
2675 2675 pending = [tree]
2676 2676 l = []
2677 2677 while pending:
2678 2678 e = pending.pop()
2679 2679 if e[0] == '_concat':
2680 2680 pending.extend(reversed(e[1:]))
2681 2681 elif e[0] in ('string', 'symbol'):
2682 2682 l.append(e[1])
2683 2683 else:
2684 2684 msg = _("\"##\" can't concatenate \"%s\" element") % (e[0])
2685 2685 raise error.ParseError(msg)
2686 2686 return ('string', ''.join(l))
2687 2687 else:
2688 2688 return tuple(foldconcat(t) for t in tree)
2689 2689
2690 2690 def parse(spec, lookup=None):
2691 2691 p = parser.parser(elements)
2692 2692 tree, pos = p.parse(tokenize(spec, lookup=lookup))
2693 2693 if pos != len(spec):
2694 2694 raise error.ParseError(_("invalid token"), pos)
2695 2695 return parser.simplifyinfixops(tree, ('or',))
2696 2696
2697 2697 def posttreebuilthook(tree, repo):
2698 2698 # hook for extensions to execute code on the optimized tree
2699 2699 pass
2700 2700
2701 2701 def match(ui, spec, repo=None):
2702 2702 if not spec:
2703 2703 raise error.ParseError(_("empty query"))
2704 2704 lookup = None
2705 2705 if repo:
2706 2706 lookup = repo.__contains__
2707 2707 tree = parse(spec, lookup)
2708 2708 return _makematcher(ui, tree, repo)
2709 2709
2710 2710 def matchany(ui, specs, repo=None):
2711 2711 """Create a matcher that will include any revisions matching one of the
2712 2712 given specs"""
2713 2713 if not specs:
2714 2714 def mfunc(repo, subset=None):
2715 2715 return baseset()
2716 2716 return mfunc
2717 2717 if not all(specs):
2718 2718 raise error.ParseError(_("empty query"))
2719 2719 lookup = None
2720 2720 if repo:
2721 2721 lookup = repo.__contains__
2722 2722 if len(specs) == 1:
2723 2723 tree = parse(specs[0], lookup)
2724 2724 else:
2725 2725 tree = ('or',) + tuple(parse(s, lookup) for s in specs)
2726 2726 return _makematcher(ui, tree, repo)
2727 2727
2728 2728 def _makematcher(ui, tree, repo):
2729 2729 if ui:
2730 2730 tree = findaliases(ui, tree, showwarning=ui.warn)
2731 2731 tree = foldconcat(tree)
2732 2732 weight, tree = optimize(tree, True)
2733 2733 posttreebuilthook(tree, repo)
2734 2734 def mfunc(repo, subset=None):
2735 2735 if subset is None:
2736 2736 subset = fullreposet(repo)
2737 2737 if util.safehasattr(subset, 'isascending'):
2738 2738 result = getset(repo, subset, tree)
2739 2739 else:
2740 2740 result = getset(repo, baseset(subset), tree)
2741 2741 return result
2742 2742 return mfunc
2743 2743
2744 2744 def formatspec(expr, *args):
2745 2745 '''
2746 2746 This is a convenience function for using revsets internally, and
2747 2747 escapes arguments appropriately. Aliases are intentionally ignored
2748 2748 so that intended expression behavior isn't accidentally subverted.
2749 2749
2750 2750 Supported arguments:
2751 2751
2752 2752 %r = revset expression, parenthesized
2753 2753 %d = int(arg), no quoting
2754 2754 %s = string(arg), escaped and single-quoted
2755 2755 %b = arg.branch(), escaped and single-quoted
2756 2756 %n = hex(arg), single-quoted
2757 2757 %% = a literal '%'
2758 2758
2759 2759 Prefixing the type with 'l' specifies a parenthesized list of that type.
2760 2760
2761 2761 >>> formatspec('%r:: and %lr', '10 or 11', ("this()", "that()"))
2762 2762 '(10 or 11):: and ((this()) or (that()))'
2763 2763 >>> formatspec('%d:: and not %d::', 10, 20)
2764 2764 '10:: and not 20::'
2765 2765 >>> formatspec('%ld or %ld', [], [1])
2766 2766 "_list('') or 1"
2767 2767 >>> formatspec('keyword(%s)', 'foo\\xe9')
2768 2768 "keyword('foo\\\\xe9')"
2769 2769 >>> b = lambda: 'default'
2770 2770 >>> b.branch = b
2771 2771 >>> formatspec('branch(%b)', b)
2772 2772 "branch('default')"
2773 2773 >>> formatspec('root(%ls)', ['a', 'b', 'c', 'd'])
2774 2774 "root(_list('a\\x00b\\x00c\\x00d'))"
2775 2775 '''
2776 2776
2777 2777 def quote(s):
2778 2778 return repr(str(s))
2779 2779
2780 2780 def argtype(c, arg):
2781 2781 if c == 'd':
2782 2782 return str(int(arg))
2783 2783 elif c == 's':
2784 2784 return quote(arg)
2785 2785 elif c == 'r':
2786 2786 parse(arg) # make sure syntax errors are confined
2787 2787 return '(%s)' % arg
2788 2788 elif c == 'n':
2789 2789 return quote(node.hex(arg))
2790 2790 elif c == 'b':
2791 2791 return quote(arg.branch())
2792 2792
2793 2793 def listexp(s, t):
2794 2794 l = len(s)
2795 2795 if l == 0:
2796 2796 return "_list('')"
2797 2797 elif l == 1:
2798 2798 return argtype(t, s[0])
2799 2799 elif t == 'd':
2800 2800 return "_intlist('%s')" % "\0".join(str(int(a)) for a in s)
2801 2801 elif t == 's':
2802 2802 return "_list('%s')" % "\0".join(s)
2803 2803 elif t == 'n':
2804 2804 return "_hexlist('%s')" % "\0".join(node.hex(a) for a in s)
2805 2805 elif t == 'b':
2806 2806 return "_list('%s')" % "\0".join(a.branch() for a in s)
2807 2807
2808 2808 m = l // 2
2809 2809 return '(%s or %s)' % (listexp(s[:m], t), listexp(s[m:], t))
2810 2810
2811 2811 ret = ''
2812 2812 pos = 0
2813 2813 arg = 0
2814 2814 while pos < len(expr):
2815 2815 c = expr[pos]
2816 2816 if c == '%':
2817 2817 pos += 1
2818 2818 d = expr[pos]
2819 2819 if d == '%':
2820 2820 ret += d
2821 2821 elif d in 'dsnbr':
2822 2822 ret += argtype(d, args[arg])
2823 2823 arg += 1
2824 2824 elif d == 'l':
2825 2825 # a list of some type
2826 2826 pos += 1
2827 2827 d = expr[pos]
2828 2828 ret += listexp(list(args[arg]), d)
2829 2829 arg += 1
2830 2830 else:
2831 2831 raise util.Abort('unexpected revspec format character %s' % d)
2832 2832 else:
2833 2833 ret += c
2834 2834 pos += 1
2835 2835
2836 2836 return ret
2837 2837
2838 2838 def prettyformat(tree):
2839 2839 return parser.prettyformat(tree, ('string', 'symbol'))
2840 2840
2841 2841 def depth(tree):
2842 2842 if isinstance(tree, tuple):
2843 2843 return max(map(depth, tree)) + 1
2844 2844 else:
2845 2845 return 0
2846 2846
2847 2847 def funcsused(tree):
2848 2848 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2849 2849 return set()
2850 2850 else:
2851 2851 funcs = set()
2852 2852 for s in tree[1:]:
2853 2853 funcs |= funcsused(s)
2854 2854 if tree[0] == 'func':
2855 2855 funcs.add(tree[1][1])
2856 2856 return funcs
2857 2857
2858 2858 class abstractsmartset(object):
2859 2859
2860 2860 def __nonzero__(self):
2861 2861 """True if the smartset is not empty"""
2862 2862 raise NotImplementedError()
2863 2863
2864 2864 def __contains__(self, rev):
2865 2865 """provide fast membership testing"""
2866 2866 raise NotImplementedError()
2867 2867
2868 2868 def __iter__(self):
2869 2869 """iterate the set in the order it is supposed to be iterated"""
2870 2870 raise NotImplementedError()
2871 2871
2872 2872 # Attributes containing a function to perform a fast iteration in a given
2873 2873 # direction. A smartset can have none, one, or both defined.
2874 2874 #
2875 2875 # Default value is None instead of a function returning None to avoid
2876 2876 # initializing an iterator just for testing if a fast method exists.
2877 2877 fastasc = None
2878 2878 fastdesc = None
2879 2879
2880 2880 def isascending(self):
2881 2881 """True if the set will iterate in ascending order"""
2882 2882 raise NotImplementedError()
2883 2883
2884 2884 def isdescending(self):
2885 2885 """True if the set will iterate in descending order"""
2886 2886 raise NotImplementedError()
2887 2887
2888 @util.cachefunc
2888 2889 def min(self):
2889 2890 """return the minimum element in the set"""
2890 2891 if self.fastasc is not None:
2891 2892 for r in self.fastasc():
2892 2893 return r
2893 2894 raise ValueError('arg is an empty sequence')
2894 2895 return min(self)
2895 2896
2897 @util.cachefunc
2896 2898 def max(self):
2897 2899 """return the maximum element in the set"""
2898 2900 if self.fastdesc is not None:
2899 2901 for r in self.fastdesc():
2900 2902 return r
2901 2903 raise ValueError('arg is an empty sequence')
2902 2904 return max(self)
2903 2905
2904 2906 def first(self):
2905 2907 """return the first element in the set (user iteration perspective)
2906 2908
2907 2909 Return None if the set is empty"""
2908 2910 raise NotImplementedError()
2909 2911
2910 2912 def last(self):
2911 2913 """return the last element in the set (user iteration perspective)
2912 2914
2913 2915 Return None if the set is empty"""
2914 2916 raise NotImplementedError()
2915 2917
2916 2918 def __len__(self):
2917 2919 """return the length of the smartsets
2918 2920
2919 2921 This can be expensive on smartset that could be lazy otherwise."""
2920 2922 raise NotImplementedError()
2921 2923
2922 2924 def reverse(self):
2923 2925 """reverse the expected iteration order"""
2924 2926 raise NotImplementedError()
2925 2927
2926 2928 def sort(self, reverse=True):
2927 2929 """get the set to iterate in an ascending or descending order"""
2928 2930 raise NotImplementedError()
2929 2931
2930 2932 def __and__(self, other):
2931 2933 """Returns a new object with the intersection of the two collections.
2932 2934
2933 2935 This is part of the mandatory API for smartset."""
2934 2936 if isinstance(other, fullreposet):
2935 2937 return self
2936 2938 return self.filter(other.__contains__, cache=False)
2937 2939
2938 2940 def __add__(self, other):
2939 2941 """Returns a new object with the union of the two collections.
2940 2942
2941 2943 This is part of the mandatory API for smartset."""
2942 2944 return addset(self, other)
2943 2945
2944 2946 def __sub__(self, other):
2945 2947 """Returns a new object with the substraction of the two collections.
2946 2948
2947 2949 This is part of the mandatory API for smartset."""
2948 2950 c = other.__contains__
2949 2951 return self.filter(lambda r: not c(r), cache=False)
2950 2952
2951 2953 def filter(self, condition, cache=True):
2952 2954 """Returns this smartset filtered by condition as a new smartset.
2953 2955
2954 2956 `condition` is a callable which takes a revision number and returns a
2955 2957 boolean.
2956 2958
2957 2959 This is part of the mandatory API for smartset."""
2958 2960 # builtin cannot be cached. but do not needs to
2959 2961 if cache and util.safehasattr(condition, 'func_code'):
2960 2962 condition = util.cachefunc(condition)
2961 2963 return filteredset(self, condition)
2962 2964
2963 2965 class baseset(abstractsmartset):
2964 2966 """Basic data structure that represents a revset and contains the basic
2965 2967 operation that it should be able to perform.
2966 2968
2967 2969 Every method in this class should be implemented by any smartset class.
2968 2970 """
2969 2971 def __init__(self, data=()):
2970 2972 if not isinstance(data, list):
2971 2973 if isinstance(data, set):
2972 2974 self._set = data
2973 2975 data = list(data)
2974 2976 self._list = data
2975 2977 self._ascending = None
2976 2978
2977 2979 @util.propertycache
2978 2980 def _set(self):
2979 2981 return set(self._list)
2980 2982
2981 2983 @util.propertycache
2982 2984 def _asclist(self):
2983 2985 asclist = self._list[:]
2984 2986 asclist.sort()
2985 2987 return asclist
2986 2988
2987 2989 def __iter__(self):
2988 2990 if self._ascending is None:
2989 2991 return iter(self._list)
2990 2992 elif self._ascending:
2991 2993 return iter(self._asclist)
2992 2994 else:
2993 2995 return reversed(self._asclist)
2994 2996
2995 2997 def fastasc(self):
2996 2998 return iter(self._asclist)
2997 2999
2998 3000 def fastdesc(self):
2999 3001 return reversed(self._asclist)
3000 3002
3001 3003 @util.propertycache
3002 3004 def __contains__(self):
3003 3005 return self._set.__contains__
3004 3006
3005 3007 def __nonzero__(self):
3006 3008 return bool(self._list)
3007 3009
3008 3010 def sort(self, reverse=False):
3009 3011 self._ascending = not bool(reverse)
3010 3012
3011 3013 def reverse(self):
3012 3014 if self._ascending is None:
3013 3015 self._list.reverse()
3014 3016 else:
3015 3017 self._ascending = not self._ascending
3016 3018
3017 3019 def __len__(self):
3018 3020 return len(self._list)
3019 3021
3020 3022 def isascending(self):
3021 3023 """Returns True if the collection is ascending order, False if not.
3022 3024
3023 3025 This is part of the mandatory API for smartset."""
3024 3026 if len(self) <= 1:
3025 3027 return True
3026 3028 return self._ascending is not None and self._ascending
3027 3029
3028 3030 def isdescending(self):
3029 3031 """Returns True if the collection is descending order, False if not.
3030 3032
3031 3033 This is part of the mandatory API for smartset."""
3032 3034 if len(self) <= 1:
3033 3035 return True
3034 3036 return self._ascending is not None and not self._ascending
3035 3037
3036 3038 def first(self):
3037 3039 if self:
3038 3040 if self._ascending is None:
3039 3041 return self._list[0]
3040 3042 elif self._ascending:
3041 3043 return self._asclist[0]
3042 3044 else:
3043 3045 return self._asclist[-1]
3044 3046 return None
3045 3047
3046 3048 def last(self):
3047 3049 if self:
3048 3050 if self._ascending is None:
3049 3051 return self._list[-1]
3050 3052 elif self._ascending:
3051 3053 return self._asclist[-1]
3052 3054 else:
3053 3055 return self._asclist[0]
3054 3056 return None
3055 3057
3056 3058 def __repr__(self):
3057 3059 d = {None: '', False: '-', True: '+'}[self._ascending]
3058 3060 return '<%s%s %r>' % (type(self).__name__, d, self._list)
3059 3061
3060 3062 class filteredset(abstractsmartset):
3061 3063 """Duck type for baseset class which iterates lazily over the revisions in
3062 3064 the subset and contains a function which tests for membership in the
3063 3065 revset
3064 3066 """
3065 3067 def __init__(self, subset, condition=lambda x: True):
3066 3068 """
3067 3069 condition: a function that decide whether a revision in the subset
3068 3070 belongs to the revset or not.
3069 3071 """
3070 3072 self._subset = subset
3071 3073 self._condition = condition
3072 3074 self._cache = {}
3073 3075
3074 3076 def __contains__(self, x):
3075 3077 c = self._cache
3076 3078 if x not in c:
3077 3079 v = c[x] = x in self._subset and self._condition(x)
3078 3080 return v
3079 3081 return c[x]
3080 3082
3081 3083 def __iter__(self):
3082 3084 return self._iterfilter(self._subset)
3083 3085
3084 3086 def _iterfilter(self, it):
3085 3087 cond = self._condition
3086 3088 for x in it:
3087 3089 if cond(x):
3088 3090 yield x
3089 3091
3090 3092 @property
3091 3093 def fastasc(self):
3092 3094 it = self._subset.fastasc
3093 3095 if it is None:
3094 3096 return None
3095 3097 return lambda: self._iterfilter(it())
3096 3098
3097 3099 @property
3098 3100 def fastdesc(self):
3099 3101 it = self._subset.fastdesc
3100 3102 if it is None:
3101 3103 return None
3102 3104 return lambda: self._iterfilter(it())
3103 3105
3104 3106 def __nonzero__(self):
3105 3107 for r in self:
3106 3108 return True
3107 3109 return False
3108 3110
3109 3111 def __len__(self):
3110 3112 # Basic implementation to be changed in future patches.
3111 3113 l = baseset([r for r in self])
3112 3114 return len(l)
3113 3115
3114 3116 def sort(self, reverse=False):
3115 3117 self._subset.sort(reverse=reverse)
3116 3118
3117 3119 def reverse(self):
3118 3120 self._subset.reverse()
3119 3121
3120 3122 def isascending(self):
3121 3123 return self._subset.isascending()
3122 3124
3123 3125 def isdescending(self):
3124 3126 return self._subset.isdescending()
3125 3127
3126 3128 def first(self):
3127 3129 for x in self:
3128 3130 return x
3129 3131 return None
3130 3132
3131 3133 def last(self):
3132 3134 it = None
3133 3135 if self.isascending():
3134 3136 it = self.fastdesc
3135 3137 elif self.isdescending():
3136 3138 it = self.fastasc
3137 3139 if it is not None:
3138 3140 for x in it():
3139 3141 return x
3140 3142 return None #empty case
3141 3143 else:
3142 3144 x = None
3143 3145 for x in self:
3144 3146 pass
3145 3147 return x
3146 3148
3147 3149 def __repr__(self):
3148 3150 return '<%s %r>' % (type(self).__name__, self._subset)
3149 3151
3150 3152 def _iterordered(ascending, iter1, iter2):
3151 3153 """produce an ordered iteration from two iterators with the same order
3152 3154
3153 3155 The ascending is used to indicated the iteration direction.
3154 3156 """
3155 3157 choice = max
3156 3158 if ascending:
3157 3159 choice = min
3158 3160
3159 3161 val1 = None
3160 3162 val2 = None
3161 3163 try:
3162 3164 # Consume both iterators in an ordered way until one is empty
3163 3165 while True:
3164 3166 if val1 is None:
3165 3167 val1 = iter1.next()
3166 3168 if val2 is None:
3167 3169 val2 = iter2.next()
3168 3170 next = choice(val1, val2)
3169 3171 yield next
3170 3172 if val1 == next:
3171 3173 val1 = None
3172 3174 if val2 == next:
3173 3175 val2 = None
3174 3176 except StopIteration:
3175 3177 # Flush any remaining values and consume the other one
3176 3178 it = iter2
3177 3179 if val1 is not None:
3178 3180 yield val1
3179 3181 it = iter1
3180 3182 elif val2 is not None:
3181 3183 # might have been equality and both are empty
3182 3184 yield val2
3183 3185 for val in it:
3184 3186 yield val
3185 3187
3186 3188 class addset(abstractsmartset):
3187 3189 """Represent the addition of two sets
3188 3190
3189 3191 Wrapper structure for lazily adding two structures without losing much
3190 3192 performance on the __contains__ method
3191 3193
3192 3194 If the ascending attribute is set, that means the two structures are
3193 3195 ordered in either an ascending or descending way. Therefore, we can add
3194 3196 them maintaining the order by iterating over both at the same time
3195 3197
3196 3198 >>> xs = baseset([0, 3, 2])
3197 3199 >>> ys = baseset([5, 2, 4])
3198 3200
3199 3201 >>> rs = addset(xs, ys)
3200 3202 >>> bool(rs), 0 in rs, 1 in rs, 5 in rs, rs.first(), rs.last()
3201 3203 (True, True, False, True, 0, 4)
3202 3204 >>> rs = addset(xs, baseset([]))
3203 3205 >>> bool(rs), 0 in rs, 1 in rs, rs.first(), rs.last()
3204 3206 (True, True, False, 0, 2)
3205 3207 >>> rs = addset(baseset([]), baseset([]))
3206 3208 >>> bool(rs), 0 in rs, rs.first(), rs.last()
3207 3209 (False, False, None, None)
3208 3210
3209 3211 iterate unsorted:
3210 3212 >>> rs = addset(xs, ys)
3211 3213 >>> [x for x in rs] # without _genlist
3212 3214 [0, 3, 2, 5, 4]
3213 3215 >>> assert not rs._genlist
3214 3216 >>> len(rs)
3215 3217 5
3216 3218 >>> [x for x in rs] # with _genlist
3217 3219 [0, 3, 2, 5, 4]
3218 3220 >>> assert rs._genlist
3219 3221
3220 3222 iterate ascending:
3221 3223 >>> rs = addset(xs, ys, ascending=True)
3222 3224 >>> [x for x in rs], [x for x in rs.fastasc()] # without _asclist
3223 3225 ([0, 2, 3, 4, 5], [0, 2, 3, 4, 5])
3224 3226 >>> assert not rs._asclist
3225 3227 >>> len(rs)
3226 3228 5
3227 3229 >>> [x for x in rs], [x for x in rs.fastasc()]
3228 3230 ([0, 2, 3, 4, 5], [0, 2, 3, 4, 5])
3229 3231 >>> assert rs._asclist
3230 3232
3231 3233 iterate descending:
3232 3234 >>> rs = addset(xs, ys, ascending=False)
3233 3235 >>> [x for x in rs], [x for x in rs.fastdesc()] # without _asclist
3234 3236 ([5, 4, 3, 2, 0], [5, 4, 3, 2, 0])
3235 3237 >>> assert not rs._asclist
3236 3238 >>> len(rs)
3237 3239 5
3238 3240 >>> [x for x in rs], [x for x in rs.fastdesc()]
3239 3241 ([5, 4, 3, 2, 0], [5, 4, 3, 2, 0])
3240 3242 >>> assert rs._asclist
3241 3243
3242 3244 iterate ascending without fastasc:
3243 3245 >>> rs = addset(xs, generatorset(ys), ascending=True)
3244 3246 >>> assert rs.fastasc is None
3245 3247 >>> [x for x in rs]
3246 3248 [0, 2, 3, 4, 5]
3247 3249
3248 3250 iterate descending without fastdesc:
3249 3251 >>> rs = addset(generatorset(xs), ys, ascending=False)
3250 3252 >>> assert rs.fastdesc is None
3251 3253 >>> [x for x in rs]
3252 3254 [5, 4, 3, 2, 0]
3253 3255 """
3254 3256 def __init__(self, revs1, revs2, ascending=None):
3255 3257 self._r1 = revs1
3256 3258 self._r2 = revs2
3257 3259 self._iter = None
3258 3260 self._ascending = ascending
3259 3261 self._genlist = None
3260 3262 self._asclist = None
3261 3263
3262 3264 def __len__(self):
3263 3265 return len(self._list)
3264 3266
3265 3267 def __nonzero__(self):
3266 3268 return bool(self._r1) or bool(self._r2)
3267 3269
3268 3270 @util.propertycache
3269 3271 def _list(self):
3270 3272 if not self._genlist:
3271 3273 self._genlist = baseset(iter(self))
3272 3274 return self._genlist
3273 3275
3274 3276 def __iter__(self):
3275 3277 """Iterate over both collections without repeating elements
3276 3278
3277 3279 If the ascending attribute is not set, iterate over the first one and
3278 3280 then over the second one checking for membership on the first one so we
3279 3281 dont yield any duplicates.
3280 3282
3281 3283 If the ascending attribute is set, iterate over both collections at the
3282 3284 same time, yielding only one value at a time in the given order.
3283 3285 """
3284 3286 if self._ascending is None:
3285 3287 if self._genlist:
3286 3288 return iter(self._genlist)
3287 3289 def arbitraryordergen():
3288 3290 for r in self._r1:
3289 3291 yield r
3290 3292 inr1 = self._r1.__contains__
3291 3293 for r in self._r2:
3292 3294 if not inr1(r):
3293 3295 yield r
3294 3296 return arbitraryordergen()
3295 3297 # try to use our own fast iterator if it exists
3296 3298 self._trysetasclist()
3297 3299 if self._ascending:
3298 3300 attr = 'fastasc'
3299 3301 else:
3300 3302 attr = 'fastdesc'
3301 3303 it = getattr(self, attr)
3302 3304 if it is not None:
3303 3305 return it()
3304 3306 # maybe half of the component supports fast
3305 3307 # get iterator for _r1
3306 3308 iter1 = getattr(self._r1, attr)
3307 3309 if iter1 is None:
3308 3310 # let's avoid side effect (not sure it matters)
3309 3311 iter1 = iter(sorted(self._r1, reverse=not self._ascending))
3310 3312 else:
3311 3313 iter1 = iter1()
3312 3314 # get iterator for _r2
3313 3315 iter2 = getattr(self._r2, attr)
3314 3316 if iter2 is None:
3315 3317 # let's avoid side effect (not sure it matters)
3316 3318 iter2 = iter(sorted(self._r2, reverse=not self._ascending))
3317 3319 else:
3318 3320 iter2 = iter2()
3319 3321 return _iterordered(self._ascending, iter1, iter2)
3320 3322
3321 3323 def _trysetasclist(self):
3322 3324 """populate the _asclist attribute if possible and necessary"""
3323 3325 if self._genlist is not None and self._asclist is None:
3324 3326 self._asclist = sorted(self._genlist)
3325 3327
3326 3328 @property
3327 3329 def fastasc(self):
3328 3330 self._trysetasclist()
3329 3331 if self._asclist is not None:
3330 3332 return self._asclist.__iter__
3331 3333 iter1 = self._r1.fastasc
3332 3334 iter2 = self._r2.fastasc
3333 3335 if None in (iter1, iter2):
3334 3336 return None
3335 3337 return lambda: _iterordered(True, iter1(), iter2())
3336 3338
3337 3339 @property
3338 3340 def fastdesc(self):
3339 3341 self._trysetasclist()
3340 3342 if self._asclist is not None:
3341 3343 return self._asclist.__reversed__
3342 3344 iter1 = self._r1.fastdesc
3343 3345 iter2 = self._r2.fastdesc
3344 3346 if None in (iter1, iter2):
3345 3347 return None
3346 3348 return lambda: _iterordered(False, iter1(), iter2())
3347 3349
3348 3350 def __contains__(self, x):
3349 3351 return x in self._r1 or x in self._r2
3350 3352
3351 3353 def sort(self, reverse=False):
3352 3354 """Sort the added set
3353 3355
3354 3356 For this we use the cached list with all the generated values and if we
3355 3357 know they are ascending or descending we can sort them in a smart way.
3356 3358 """
3357 3359 self._ascending = not reverse
3358 3360
3359 3361 def isascending(self):
3360 3362 return self._ascending is not None and self._ascending
3361 3363
3362 3364 def isdescending(self):
3363 3365 return self._ascending is not None and not self._ascending
3364 3366
3365 3367 def reverse(self):
3366 3368 if self._ascending is None:
3367 3369 self._list.reverse()
3368 3370 else:
3369 3371 self._ascending = not self._ascending
3370 3372
3371 3373 def first(self):
3372 3374 for x in self:
3373 3375 return x
3374 3376 return None
3375 3377
3376 3378 def last(self):
3377 3379 self.reverse()
3378 3380 val = self.first()
3379 3381 self.reverse()
3380 3382 return val
3381 3383
3382 3384 def __repr__(self):
3383 3385 d = {None: '', False: '-', True: '+'}[self._ascending]
3384 3386 return '<%s%s %r, %r>' % (type(self).__name__, d, self._r1, self._r2)
3385 3387
3386 3388 class generatorset(abstractsmartset):
3387 3389 """Wrap a generator for lazy iteration
3388 3390
3389 3391 Wrapper structure for generators that provides lazy membership and can
3390 3392 be iterated more than once.
3391 3393 When asked for membership it generates values until either it finds the
3392 3394 requested one or has gone through all the elements in the generator
3393 3395 """
3394 3396 def __init__(self, gen, iterasc=None):
3395 3397 """
3396 3398 gen: a generator producing the values for the generatorset.
3397 3399 """
3398 3400 self._gen = gen
3399 3401 self._asclist = None
3400 3402 self._cache = {}
3401 3403 self._genlist = []
3402 3404 self._finished = False
3403 3405 self._ascending = True
3404 3406 if iterasc is not None:
3405 3407 if iterasc:
3406 3408 self.fastasc = self._iterator
3407 3409 self.__contains__ = self._asccontains
3408 3410 else:
3409 3411 self.fastdesc = self._iterator
3410 3412 self.__contains__ = self._desccontains
3411 3413
3412 3414 def __nonzero__(self):
3413 3415 # Do not use 'for r in self' because it will enforce the iteration
3414 3416 # order (default ascending), possibly unrolling a whole descending
3415 3417 # iterator.
3416 3418 if self._genlist:
3417 3419 return True
3418 3420 for r in self._consumegen():
3419 3421 return True
3420 3422 return False
3421 3423
3422 3424 def __contains__(self, x):
3423 3425 if x in self._cache:
3424 3426 return self._cache[x]
3425 3427
3426 3428 # Use new values only, as existing values would be cached.
3427 3429 for l in self._consumegen():
3428 3430 if l == x:
3429 3431 return True
3430 3432
3431 3433 self._cache[x] = False
3432 3434 return False
3433 3435
3434 3436 def _asccontains(self, x):
3435 3437 """version of contains optimised for ascending generator"""
3436 3438 if x in self._cache:
3437 3439 return self._cache[x]
3438 3440
3439 3441 # Use new values only, as existing values would be cached.
3440 3442 for l in self._consumegen():
3441 3443 if l == x:
3442 3444 return True
3443 3445 if l > x:
3444 3446 break
3445 3447
3446 3448 self._cache[x] = False
3447 3449 return False
3448 3450
3449 3451 def _desccontains(self, x):
3450 3452 """version of contains optimised for descending generator"""
3451 3453 if x in self._cache:
3452 3454 return self._cache[x]
3453 3455
3454 3456 # Use new values only, as existing values would be cached.
3455 3457 for l in self._consumegen():
3456 3458 if l == x:
3457 3459 return True
3458 3460 if l < x:
3459 3461 break
3460 3462
3461 3463 self._cache[x] = False
3462 3464 return False
3463 3465
3464 3466 def __iter__(self):
3465 3467 if self._ascending:
3466 3468 it = self.fastasc
3467 3469 else:
3468 3470 it = self.fastdesc
3469 3471 if it is not None:
3470 3472 return it()
3471 3473 # we need to consume the iterator
3472 3474 for x in self._consumegen():
3473 3475 pass
3474 3476 # recall the same code
3475 3477 return iter(self)
3476 3478
3477 3479 def _iterator(self):
3478 3480 if self._finished:
3479 3481 return iter(self._genlist)
3480 3482
3481 3483 # We have to use this complex iteration strategy to allow multiple
3482 3484 # iterations at the same time. We need to be able to catch revision
3483 3485 # removed from _consumegen and added to genlist in another instance.
3484 3486 #
3485 3487 # Getting rid of it would provide an about 15% speed up on this
3486 3488 # iteration.
3487 3489 genlist = self._genlist
3488 3490 nextrev = self._consumegen().next
3489 3491 _len = len # cache global lookup
3490 3492 def gen():
3491 3493 i = 0
3492 3494 while True:
3493 3495 if i < _len(genlist):
3494 3496 yield genlist[i]
3495 3497 else:
3496 3498 yield nextrev()
3497 3499 i += 1
3498 3500 return gen()
3499 3501
3500 3502 def _consumegen(self):
3501 3503 cache = self._cache
3502 3504 genlist = self._genlist.append
3503 3505 for item in self._gen:
3504 3506 cache[item] = True
3505 3507 genlist(item)
3506 3508 yield item
3507 3509 if not self._finished:
3508 3510 self._finished = True
3509 3511 asc = self._genlist[:]
3510 3512 asc.sort()
3511 3513 self._asclist = asc
3512 3514 self.fastasc = asc.__iter__
3513 3515 self.fastdesc = asc.__reversed__
3514 3516
3515 3517 def __len__(self):
3516 3518 for x in self._consumegen():
3517 3519 pass
3518 3520 return len(self._genlist)
3519 3521
3520 3522 def sort(self, reverse=False):
3521 3523 self._ascending = not reverse
3522 3524
3523 3525 def reverse(self):
3524 3526 self._ascending = not self._ascending
3525 3527
3526 3528 def isascending(self):
3527 3529 return self._ascending
3528 3530
3529 3531 def isdescending(self):
3530 3532 return not self._ascending
3531 3533
3532 3534 def first(self):
3533 3535 if self._ascending:
3534 3536 it = self.fastasc
3535 3537 else:
3536 3538 it = self.fastdesc
3537 3539 if it is None:
3538 3540 # we need to consume all and try again
3539 3541 for x in self._consumegen():
3540 3542 pass
3541 3543 return self.first()
3542 3544 return next(it(), None)
3543 3545
3544 3546 def last(self):
3545 3547 if self._ascending:
3546 3548 it = self.fastdesc
3547 3549 else:
3548 3550 it = self.fastasc
3549 3551 if it is None:
3550 3552 # we need to consume all and try again
3551 3553 for x in self._consumegen():
3552 3554 pass
3553 3555 return self.first()
3554 3556 return next(it(), None)
3555 3557
3556 3558 def __repr__(self):
3557 3559 d = {False: '-', True: '+'}[self._ascending]
3558 3560 return '<%s%s>' % (type(self).__name__, d)
3559 3561
3560 3562 class spanset(abstractsmartset):
3561 3563 """Duck type for baseset class which represents a range of revisions and
3562 3564 can work lazily and without having all the range in memory
3563 3565
3564 3566 Note that spanset(x, y) behave almost like xrange(x, y) except for two
3565 3567 notable points:
3566 3568 - when x < y it will be automatically descending,
3567 3569 - revision filtered with this repoview will be skipped.
3568 3570
3569 3571 """
3570 3572 def __init__(self, repo, start=0, end=None):
3571 3573 """
3572 3574 start: first revision included the set
3573 3575 (default to 0)
3574 3576 end: first revision excluded (last+1)
3575 3577 (default to len(repo)
3576 3578
3577 3579 Spanset will be descending if `end` < `start`.
3578 3580 """
3579 3581 if end is None:
3580 3582 end = len(repo)
3581 3583 self._ascending = start <= end
3582 3584 if not self._ascending:
3583 3585 start, end = end + 1, start +1
3584 3586 self._start = start
3585 3587 self._end = end
3586 3588 self._hiddenrevs = repo.changelog.filteredrevs
3587 3589
3588 3590 def sort(self, reverse=False):
3589 3591 self._ascending = not reverse
3590 3592
3591 3593 def reverse(self):
3592 3594 self._ascending = not self._ascending
3593 3595
3594 3596 def _iterfilter(self, iterrange):
3595 3597 s = self._hiddenrevs
3596 3598 for r in iterrange:
3597 3599 if r not in s:
3598 3600 yield r
3599 3601
3600 3602 def __iter__(self):
3601 3603 if self._ascending:
3602 3604 return self.fastasc()
3603 3605 else:
3604 3606 return self.fastdesc()
3605 3607
3606 3608 def fastasc(self):
3607 3609 iterrange = xrange(self._start, self._end)
3608 3610 if self._hiddenrevs:
3609 3611 return self._iterfilter(iterrange)
3610 3612 return iter(iterrange)
3611 3613
3612 3614 def fastdesc(self):
3613 3615 iterrange = xrange(self._end - 1, self._start - 1, -1)
3614 3616 if self._hiddenrevs:
3615 3617 return self._iterfilter(iterrange)
3616 3618 return iter(iterrange)
3617 3619
3618 3620 def __contains__(self, rev):
3619 3621 hidden = self._hiddenrevs
3620 3622 return ((self._start <= rev < self._end)
3621 3623 and not (hidden and rev in hidden))
3622 3624
3623 3625 def __nonzero__(self):
3624 3626 for r in self:
3625 3627 return True
3626 3628 return False
3627 3629
3628 3630 def __len__(self):
3629 3631 if not self._hiddenrevs:
3630 3632 return abs(self._end - self._start)
3631 3633 else:
3632 3634 count = 0
3633 3635 start = self._start
3634 3636 end = self._end
3635 3637 for rev in self._hiddenrevs:
3636 3638 if (end < rev <= start) or (start <= rev < end):
3637 3639 count += 1
3638 3640 return abs(self._end - self._start) - count
3639 3641
3640 3642 def isascending(self):
3641 3643 return self._ascending
3642 3644
3643 3645 def isdescending(self):
3644 3646 return not self._ascending
3645 3647
3646 3648 def first(self):
3647 3649 if self._ascending:
3648 3650 it = self.fastasc
3649 3651 else:
3650 3652 it = self.fastdesc
3651 3653 for x in it():
3652 3654 return x
3653 3655 return None
3654 3656
3655 3657 def last(self):
3656 3658 if self._ascending:
3657 3659 it = self.fastdesc
3658 3660 else:
3659 3661 it = self.fastasc
3660 3662 for x in it():
3661 3663 return x
3662 3664 return None
3663 3665
3664 3666 def __repr__(self):
3665 3667 d = {False: '-', True: '+'}[self._ascending]
3666 3668 return '<%s%s %d:%d>' % (type(self).__name__, d,
3667 3669 self._start, self._end - 1)
3668 3670
3669 3671 class fullreposet(spanset):
3670 3672 """a set containing all revisions in the repo
3671 3673
3672 3674 This class exists to host special optimization and magic to handle virtual
3673 3675 revisions such as "null".
3674 3676 """
3675 3677
3676 3678 def __init__(self, repo):
3677 3679 super(fullreposet, self).__init__(repo)
3678 3680
3679 3681 def __and__(self, other):
3680 3682 """As self contains the whole repo, all of the other set should also be
3681 3683 in self. Therefore `self & other = other`.
3682 3684
3683 3685 This boldly assumes the other contains valid revs only.
3684 3686 """
3685 3687 # other not a smartset, make is so
3686 3688 if not util.safehasattr(other, 'isascending'):
3687 3689 # filter out hidden revision
3688 3690 # (this boldly assumes all smartset are pure)
3689 3691 #
3690 3692 # `other` was used with "&", let's assume this is a set like
3691 3693 # object.
3692 3694 other = baseset(other - self._hiddenrevs)
3693 3695
3694 3696 # XXX As fullreposet is also used as bootstrap, this is wrong.
3695 3697 #
3696 3698 # With a giveme312() revset returning [3,1,2], this makes
3697 3699 # 'hg log -r "giveme312()"' -> 1, 2, 3 (wrong)
3698 3700 # We cannot just drop it because other usage still need to sort it:
3699 3701 # 'hg log -r "all() and giveme312()"' -> 1, 2, 3 (right)
3700 3702 #
3701 3703 # There is also some faulty revset implementations that rely on it
3702 3704 # (eg: children as of its state in e8075329c5fb)
3703 3705 #
3704 3706 # When we fix the two points above we can move this into the if clause
3705 3707 other.sort(reverse=self.isdescending())
3706 3708 return other
3707 3709
3708 3710 def prettyformatset(revs):
3709 3711 lines = []
3710 3712 rs = repr(revs)
3711 3713 p = 0
3712 3714 while p < len(rs):
3713 3715 q = rs.find('<', p + 1)
3714 3716 if q < 0:
3715 3717 q = len(rs)
3716 3718 l = rs.count('<', 0, p) - rs.count('>', 0, p)
3717 3719 assert l >= 0
3718 3720 lines.append((l, rs[p:q].rstrip()))
3719 3721 p = q
3720 3722 return '\n'.join(' ' * l + s for l, s in lines)
3721 3723
3722 3724 # tell hggettext to extract docstrings from these functions:
3723 3725 i18nfunctions = symbols.values()
General Comments 0
You need to be logged in to leave comments. Login now