##// END OF EJS Templates
reachableroots: use smartset min...
Pierre-Yves David -
r26093:20413113 default
parent child Browse files
Show More
@@ -1,3724 +1,3722 b''
1 1 # revset.py - revision set queries for mercurial
2 2 #
3 3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import heapq
11 11 import re
12 12
13 13 from .i18n import _
14 14 from . import (
15 15 encoding,
16 16 error,
17 17 hbisect,
18 18 match as matchmod,
19 19 node,
20 20 obsolete as obsmod,
21 21 parser,
22 22 pathutil,
23 23 phases,
24 24 repoview,
25 25 util,
26 26 )
27 27
28 28 def _revancestors(repo, revs, followfirst):
29 29 """Like revlog.ancestors(), but supports followfirst."""
30 30 if followfirst:
31 31 cut = 1
32 32 else:
33 33 cut = None
34 34 cl = repo.changelog
35 35
36 36 def iterate():
37 37 revs.sort(reverse=True)
38 38 irevs = iter(revs)
39 39 h = []
40 40
41 41 inputrev = next(irevs, None)
42 42 if inputrev is not None:
43 43 heapq.heappush(h, -inputrev)
44 44
45 45 seen = set()
46 46 while h:
47 47 current = -heapq.heappop(h)
48 48 if current == inputrev:
49 49 inputrev = next(irevs, None)
50 50 if inputrev is not None:
51 51 heapq.heappush(h, -inputrev)
52 52 if current not in seen:
53 53 seen.add(current)
54 54 yield current
55 55 for parent in cl.parentrevs(current)[:cut]:
56 56 if parent != node.nullrev:
57 57 heapq.heappush(h, -parent)
58 58
59 59 return generatorset(iterate(), iterasc=False)
60 60
61 61 def _revdescendants(repo, revs, followfirst):
62 62 """Like revlog.descendants() but supports followfirst."""
63 63 if followfirst:
64 64 cut = 1
65 65 else:
66 66 cut = None
67 67
68 68 def iterate():
69 69 cl = repo.changelog
70 70 # XXX this should be 'parentset.min()' assuming 'parentset' is a
71 71 # smartset (and if it is not, it should.)
72 72 first = min(revs)
73 73 nullrev = node.nullrev
74 74 if first == nullrev:
75 75 # Are there nodes with a null first parent and a non-null
76 76 # second one? Maybe. Do we care? Probably not.
77 77 for i in cl:
78 78 yield i
79 79 else:
80 80 seen = set(revs)
81 81 for i in cl.revs(first + 1):
82 82 for x in cl.parentrevs(i)[:cut]:
83 83 if x != nullrev and x in seen:
84 84 seen.add(i)
85 85 yield i
86 86 break
87 87
88 88 return generatorset(iterate(), iterasc=True)
89 89
90 90 def reachablerootspure(repo, minroot, roots, heads, includepath):
91 91 """return (heads(::<roots> and ::<heads>))
92 92
93 93 If includepath is True, return (<roots>::<heads>)."""
94 94 if not roots:
95 95 return baseset()
96 96 parentrevs = repo.changelog.parentrevs
97 97 roots = set(roots)
98 98 visit = list(heads)
99 99 reachable = set()
100 100 seen = {}
101 101 # prefetch all the things! (because python is slow)
102 102 reached = reachable.add
103 103 dovisit = visit.append
104 104 nextvisit = visit.pop
105 105 # open-code the post-order traversal due to the tiny size of
106 106 # sys.getrecursionlimit()
107 107 while visit:
108 108 rev = nextvisit()
109 109 if rev in roots:
110 110 reached(rev)
111 111 if not includepath:
112 112 continue
113 113 parents = parentrevs(rev)
114 114 seen[rev] = parents
115 115 for parent in parents:
116 116 if parent >= minroot and parent not in seen:
117 117 dovisit(parent)
118 118 if not reachable:
119 119 return baseset()
120 120 if not includepath:
121 121 return reachable
122 122 for rev in sorted(seen):
123 123 for parent in seen[rev]:
124 124 if parent in reachable:
125 125 reached(rev)
126 126 reachable = baseset(reachable)
127 127 reachable.sort()
128 128 return reachable
129 129
130 130 def reachableroots(repo, roots, heads, includepath=False):
131 131 """return (heads(::<roots> and ::<heads>))
132 132
133 133 If includepath is True, return (<roots>::<heads>)."""
134 134 if not roots:
135 135 return baseset()
136 # XXX this should be 'parentset.min()' assuming 'parentset' is a smartset
137 # (and if it is not, it should.)
138 minroot = min(roots)
136 minroot = roots.min()
139 137 roots = list(roots)
140 138 heads = list(heads)
141 139 try:
142 140 return repo.changelog.reachableroots(minroot, heads, roots, includepath)
143 141 except AttributeError:
144 142 return reachablerootspure(repo, minroot, roots, heads, includepath)
145 143
146 144 elements = {
147 145 # token-type: binding-strength, primary, prefix, infix, suffix
148 146 "(": (21, None, ("group", 1, ")"), ("func", 1, ")"), None),
149 147 "##": (20, None, None, ("_concat", 20), None),
150 148 "~": (18, None, None, ("ancestor", 18), None),
151 149 "^": (18, None, None, ("parent", 18), ("parentpost", 18)),
152 150 "-": (5, None, ("negate", 19), ("minus", 5), None),
153 151 "::": (17, None, ("dagrangepre", 17), ("dagrange", 17),
154 152 ("dagrangepost", 17)),
155 153 "..": (17, None, ("dagrangepre", 17), ("dagrange", 17),
156 154 ("dagrangepost", 17)),
157 155 ":": (15, "rangeall", ("rangepre", 15), ("range", 15), ("rangepost", 15)),
158 156 "not": (10, None, ("not", 10), None, None),
159 157 "!": (10, None, ("not", 10), None, None),
160 158 "and": (5, None, None, ("and", 5), None),
161 159 "&": (5, None, None, ("and", 5), None),
162 160 "%": (5, None, None, ("only", 5), ("onlypost", 5)),
163 161 "or": (4, None, None, ("or", 4), None),
164 162 "|": (4, None, None, ("or", 4), None),
165 163 "+": (4, None, None, ("or", 4), None),
166 164 "=": (3, None, None, ("keyvalue", 3), None),
167 165 ",": (2, None, None, ("list", 2), None),
168 166 ")": (0, None, None, None, None),
169 167 "symbol": (0, "symbol", None, None, None),
170 168 "string": (0, "string", None, None, None),
171 169 "end": (0, None, None, None, None),
172 170 }
173 171
174 172 keywords = set(['and', 'or', 'not'])
175 173
176 174 # default set of valid characters for the initial letter of symbols
177 175 _syminitletters = set(c for c in [chr(i) for i in xrange(256)]
178 176 if c.isalnum() or c in '._@' or ord(c) > 127)
179 177
180 178 # default set of valid characters for non-initial letters of symbols
181 179 _symletters = set(c for c in [chr(i) for i in xrange(256)]
182 180 if c.isalnum() or c in '-._/@' or ord(c) > 127)
183 181
184 182 def tokenize(program, lookup=None, syminitletters=None, symletters=None):
185 183 '''
186 184 Parse a revset statement into a stream of tokens
187 185
188 186 ``syminitletters`` is the set of valid characters for the initial
189 187 letter of symbols.
190 188
191 189 By default, character ``c`` is recognized as valid for initial
192 190 letter of symbols, if ``c.isalnum() or c in '._@' or ord(c) > 127``.
193 191
194 192 ``symletters`` is the set of valid characters for non-initial
195 193 letters of symbols.
196 194
197 195 By default, character ``c`` is recognized as valid for non-initial
198 196 letters of symbols, if ``c.isalnum() or c in '-._/@' or ord(c) > 127``.
199 197
200 198 Check that @ is a valid unquoted token character (issue3686):
201 199 >>> list(tokenize("@::"))
202 200 [('symbol', '@', 0), ('::', None, 1), ('end', None, 3)]
203 201
204 202 '''
205 203 if syminitletters is None:
206 204 syminitletters = _syminitletters
207 205 if symletters is None:
208 206 symletters = _symletters
209 207
210 208 if program and lookup:
211 209 # attempt to parse old-style ranges first to deal with
212 210 # things like old-tag which contain query metacharacters
213 211 parts = program.split(':', 1)
214 212 if all(lookup(sym) for sym in parts if sym):
215 213 if parts[0]:
216 214 yield ('symbol', parts[0], 0)
217 215 if len(parts) > 1:
218 216 s = len(parts[0])
219 217 yield (':', None, s)
220 218 if parts[1]:
221 219 yield ('symbol', parts[1], s + 1)
222 220 yield ('end', None, len(program))
223 221 return
224 222
225 223 pos, l = 0, len(program)
226 224 while pos < l:
227 225 c = program[pos]
228 226 if c.isspace(): # skip inter-token whitespace
229 227 pass
230 228 elif c == ':' and program[pos:pos + 2] == '::': # look ahead carefully
231 229 yield ('::', None, pos)
232 230 pos += 1 # skip ahead
233 231 elif c == '.' and program[pos:pos + 2] == '..': # look ahead carefully
234 232 yield ('..', None, pos)
235 233 pos += 1 # skip ahead
236 234 elif c == '#' and program[pos:pos + 2] == '##': # look ahead carefully
237 235 yield ('##', None, pos)
238 236 pos += 1 # skip ahead
239 237 elif c in "():=,-|&+!~^%": # handle simple operators
240 238 yield (c, None, pos)
241 239 elif (c in '"\'' or c == 'r' and
242 240 program[pos:pos + 2] in ("r'", 'r"')): # handle quoted strings
243 241 if c == 'r':
244 242 pos += 1
245 243 c = program[pos]
246 244 decode = lambda x: x
247 245 else:
248 246 decode = lambda x: x.decode('string-escape')
249 247 pos += 1
250 248 s = pos
251 249 while pos < l: # find closing quote
252 250 d = program[pos]
253 251 if d == '\\': # skip over escaped characters
254 252 pos += 2
255 253 continue
256 254 if d == c:
257 255 yield ('string', decode(program[s:pos]), s)
258 256 break
259 257 pos += 1
260 258 else:
261 259 raise error.ParseError(_("unterminated string"), s)
262 260 # gather up a symbol/keyword
263 261 elif c in syminitletters:
264 262 s = pos
265 263 pos += 1
266 264 while pos < l: # find end of symbol
267 265 d = program[pos]
268 266 if d not in symletters:
269 267 break
270 268 if d == '.' and program[pos - 1] == '.': # special case for ..
271 269 pos -= 1
272 270 break
273 271 pos += 1
274 272 sym = program[s:pos]
275 273 if sym in keywords: # operator keywords
276 274 yield (sym, None, s)
277 275 elif '-' in sym:
278 276 # some jerk gave us foo-bar-baz, try to check if it's a symbol
279 277 if lookup and lookup(sym):
280 278 # looks like a real symbol
281 279 yield ('symbol', sym, s)
282 280 else:
283 281 # looks like an expression
284 282 parts = sym.split('-')
285 283 for p in parts[:-1]:
286 284 if p: # possible consecutive -
287 285 yield ('symbol', p, s)
288 286 s += len(p)
289 287 yield ('-', None, pos)
290 288 s += 1
291 289 if parts[-1]: # possible trailing -
292 290 yield ('symbol', parts[-1], s)
293 291 else:
294 292 yield ('symbol', sym, s)
295 293 pos -= 1
296 294 else:
297 295 raise error.ParseError(_("syntax error in revset '%s'") %
298 296 program, pos)
299 297 pos += 1
300 298 yield ('end', None, pos)
301 299
302 300 def parseerrordetail(inst):
303 301 """Compose error message from specified ParseError object
304 302 """
305 303 if len(inst.args) > 1:
306 304 return _('at %s: %s') % (inst.args[1], inst.args[0])
307 305 else:
308 306 return inst.args[0]
309 307
310 308 # helpers
311 309
312 310 def getstring(x, err):
313 311 if x and (x[0] == 'string' or x[0] == 'symbol'):
314 312 return x[1]
315 313 raise error.ParseError(err)
316 314
317 315 def getlist(x):
318 316 if not x:
319 317 return []
320 318 if x[0] == 'list':
321 319 return getlist(x[1]) + [x[2]]
322 320 return [x]
323 321
324 322 def getargs(x, min, max, err):
325 323 l = getlist(x)
326 324 if len(l) < min or (max >= 0 and len(l) > max):
327 325 raise error.ParseError(err)
328 326 return l
329 327
330 328 def getargsdict(x, funcname, keys):
331 329 return parser.buildargsdict(getlist(x), funcname, keys.split(),
332 330 keyvaluenode='keyvalue', keynode='symbol')
333 331
334 332 def isvalidsymbol(tree):
335 333 """Examine whether specified ``tree`` is valid ``symbol`` or not
336 334 """
337 335 return tree[0] == 'symbol' and len(tree) > 1
338 336
339 337 def getsymbol(tree):
340 338 """Get symbol name from valid ``symbol`` in ``tree``
341 339
342 340 This assumes that ``tree`` is already examined by ``isvalidsymbol``.
343 341 """
344 342 return tree[1]
345 343
346 344 def isvalidfunc(tree):
347 345 """Examine whether specified ``tree`` is valid ``func`` or not
348 346 """
349 347 return tree[0] == 'func' and len(tree) > 1 and isvalidsymbol(tree[1])
350 348
351 349 def getfuncname(tree):
352 350 """Get function name from valid ``func`` in ``tree``
353 351
354 352 This assumes that ``tree`` is already examined by ``isvalidfunc``.
355 353 """
356 354 return getsymbol(tree[1])
357 355
358 356 def getfuncargs(tree):
359 357 """Get list of function arguments from valid ``func`` in ``tree``
360 358
361 359 This assumes that ``tree`` is already examined by ``isvalidfunc``.
362 360 """
363 361 if len(tree) > 2:
364 362 return getlist(tree[2])
365 363 else:
366 364 return []
367 365
368 366 def getset(repo, subset, x):
369 367 if not x:
370 368 raise error.ParseError(_("missing argument"))
371 369 s = methods[x[0]](repo, subset, *x[1:])
372 370 if util.safehasattr(s, 'isascending'):
373 371 return s
374 372 if (repo.ui.configbool('devel', 'all-warnings')
375 373 or repo.ui.configbool('devel', 'old-revset')):
376 374 # else case should not happen, because all non-func are internal,
377 375 # ignoring for now.
378 376 if x[0] == 'func' and x[1][0] == 'symbol' and x[1][1] in symbols:
379 377 repo.ui.develwarn('revset "%s" use list instead of smartset, '
380 378 '(upgrade your code)' % x[1][1])
381 379 return baseset(s)
382 380
383 381 def _getrevsource(repo, r):
384 382 extra = repo[r].extra()
385 383 for label in ('source', 'transplant_source', 'rebase_source'):
386 384 if label in extra:
387 385 try:
388 386 return repo[extra[label]].rev()
389 387 except error.RepoLookupError:
390 388 pass
391 389 return None
392 390
393 391 # operator methods
394 392
395 393 def stringset(repo, subset, x):
396 394 x = repo[x].rev()
397 395 if (x in subset
398 396 or x == node.nullrev and isinstance(subset, fullreposet)):
399 397 return baseset([x])
400 398 return baseset()
401 399
402 400 def rangeset(repo, subset, x, y):
403 401 m = getset(repo, fullreposet(repo), x)
404 402 n = getset(repo, fullreposet(repo), y)
405 403
406 404 if not m or not n:
407 405 return baseset()
408 406 m, n = m.first(), n.last()
409 407
410 408 if m == n:
411 409 r = baseset([m])
412 410 elif n == node.wdirrev:
413 411 r = spanset(repo, m, len(repo)) + baseset([n])
414 412 elif m == node.wdirrev:
415 413 r = baseset([m]) + spanset(repo, len(repo) - 1, n - 1)
416 414 elif m < n:
417 415 r = spanset(repo, m, n + 1)
418 416 else:
419 417 r = spanset(repo, m, n - 1)
420 418 # XXX We should combine with subset first: 'subset & baseset(...)'. This is
421 419 # necessary to ensure we preserve the order in subset.
422 420 #
423 421 # This has performance implication, carrying the sorting over when possible
424 422 # would be more efficient.
425 423 return r & subset
426 424
427 425 def dagrange(repo, subset, x, y):
428 426 r = fullreposet(repo)
429 427 xs = reachableroots(repo, getset(repo, r, x), getset(repo, r, y),
430 428 includepath=True)
431 429 # XXX We should combine with subset first: 'subset & baseset(...)'. This is
432 430 # necessary to ensure we preserve the order in subset.
433 431 return xs & subset
434 432
435 433 def andset(repo, subset, x, y):
436 434 return getset(repo, getset(repo, subset, x), y)
437 435
438 436 def orset(repo, subset, *xs):
439 437 assert xs
440 438 if len(xs) == 1:
441 439 return getset(repo, subset, xs[0])
442 440 p = len(xs) // 2
443 441 a = orset(repo, subset, *xs[:p])
444 442 b = orset(repo, subset, *xs[p:])
445 443 return a + b
446 444
447 445 def notset(repo, subset, x):
448 446 return subset - getset(repo, subset, x)
449 447
450 448 def listset(repo, subset, a, b):
451 449 raise error.ParseError(_("can't use a list in this context"))
452 450
453 451 def keyvaluepair(repo, subset, k, v):
454 452 raise error.ParseError(_("can't use a key-value pair in this context"))
455 453
456 454 def func(repo, subset, a, b):
457 455 if a[0] == 'symbol' and a[1] in symbols:
458 456 return symbols[a[1]](repo, subset, b)
459 457
460 458 keep = lambda fn: getattr(fn, '__doc__', None) is not None
461 459
462 460 syms = [s for (s, fn) in symbols.items() if keep(fn)]
463 461 raise error.UnknownIdentifier(a[1], syms)
464 462
465 463 # functions
466 464
467 465 def adds(repo, subset, x):
468 466 """``adds(pattern)``
469 467 Changesets that add a file matching pattern.
470 468
471 469 The pattern without explicit kind like ``glob:`` is expected to be
472 470 relative to the current directory and match against a file or a
473 471 directory.
474 472 """
475 473 # i18n: "adds" is a keyword
476 474 pat = getstring(x, _("adds requires a pattern"))
477 475 return checkstatus(repo, subset, pat, 1)
478 476
479 477 def ancestor(repo, subset, x):
480 478 """``ancestor(*changeset)``
481 479 A greatest common ancestor of the changesets.
482 480
483 481 Accepts 0 or more changesets.
484 482 Will return empty list when passed no args.
485 483 Greatest common ancestor of a single changeset is that changeset.
486 484 """
487 485 # i18n: "ancestor" is a keyword
488 486 l = getlist(x)
489 487 rl = fullreposet(repo)
490 488 anc = None
491 489
492 490 # (getset(repo, rl, i) for i in l) generates a list of lists
493 491 for revs in (getset(repo, rl, i) for i in l):
494 492 for r in revs:
495 493 if anc is None:
496 494 anc = repo[r]
497 495 else:
498 496 anc = anc.ancestor(repo[r])
499 497
500 498 if anc is not None and anc.rev() in subset:
501 499 return baseset([anc.rev()])
502 500 return baseset()
503 501
504 502 def _ancestors(repo, subset, x, followfirst=False):
505 503 heads = getset(repo, fullreposet(repo), x)
506 504 if not heads:
507 505 return baseset()
508 506 s = _revancestors(repo, heads, followfirst)
509 507 return subset & s
510 508
511 509 def ancestors(repo, subset, x):
512 510 """``ancestors(set)``
513 511 Changesets that are ancestors of a changeset in set.
514 512 """
515 513 return _ancestors(repo, subset, x)
516 514
517 515 def _firstancestors(repo, subset, x):
518 516 # ``_firstancestors(set)``
519 517 # Like ``ancestors(set)`` but follows only the first parents.
520 518 return _ancestors(repo, subset, x, followfirst=True)
521 519
522 520 def ancestorspec(repo, subset, x, n):
523 521 """``set~n``
524 522 Changesets that are the Nth ancestor (first parents only) of a changeset
525 523 in set.
526 524 """
527 525 try:
528 526 n = int(n[1])
529 527 except (TypeError, ValueError):
530 528 raise error.ParseError(_("~ expects a number"))
531 529 ps = set()
532 530 cl = repo.changelog
533 531 for r in getset(repo, fullreposet(repo), x):
534 532 for i in range(n):
535 533 r = cl.parentrevs(r)[0]
536 534 ps.add(r)
537 535 return subset & ps
538 536
539 537 def author(repo, subset, x):
540 538 """``author(string)``
541 539 Alias for ``user(string)``.
542 540 """
543 541 # i18n: "author" is a keyword
544 542 n = encoding.lower(getstring(x, _("author requires a string")))
545 543 kind, pattern, matcher = _substringmatcher(n)
546 544 return subset.filter(lambda x: matcher(encoding.lower(repo[x].user())))
547 545
548 546 def bisect(repo, subset, x):
549 547 """``bisect(string)``
550 548 Changesets marked in the specified bisect status:
551 549
552 550 - ``good``, ``bad``, ``skip``: csets explicitly marked as good/bad/skip
553 551 - ``goods``, ``bads`` : csets topologically good/bad
554 552 - ``range`` : csets taking part in the bisection
555 553 - ``pruned`` : csets that are goods, bads or skipped
556 554 - ``untested`` : csets whose fate is yet unknown
557 555 - ``ignored`` : csets ignored due to DAG topology
558 556 - ``current`` : the cset currently being bisected
559 557 """
560 558 # i18n: "bisect" is a keyword
561 559 status = getstring(x, _("bisect requires a string")).lower()
562 560 state = set(hbisect.get(repo, status))
563 561 return subset & state
564 562
565 563 # Backward-compatibility
566 564 # - no help entry so that we do not advertise it any more
567 565 def bisected(repo, subset, x):
568 566 return bisect(repo, subset, x)
569 567
570 568 def bookmark(repo, subset, x):
571 569 """``bookmark([name])``
572 570 The named bookmark or all bookmarks.
573 571
574 572 If `name` starts with `re:`, the remainder of the name is treated as
575 573 a regular expression. To match a bookmark that actually starts with `re:`,
576 574 use the prefix `literal:`.
577 575 """
578 576 # i18n: "bookmark" is a keyword
579 577 args = getargs(x, 0, 1, _('bookmark takes one or no arguments'))
580 578 if args:
581 579 bm = getstring(args[0],
582 580 # i18n: "bookmark" is a keyword
583 581 _('the argument to bookmark must be a string'))
584 582 kind, pattern, matcher = _stringmatcher(bm)
585 583 bms = set()
586 584 if kind == 'literal':
587 585 bmrev = repo._bookmarks.get(pattern, None)
588 586 if not bmrev:
589 587 raise error.RepoLookupError(_("bookmark '%s' does not exist")
590 588 % bm)
591 589 bms.add(repo[bmrev].rev())
592 590 else:
593 591 matchrevs = set()
594 592 for name, bmrev in repo._bookmarks.iteritems():
595 593 if matcher(name):
596 594 matchrevs.add(bmrev)
597 595 if not matchrevs:
598 596 raise error.RepoLookupError(_("no bookmarks exist"
599 597 " that match '%s'") % pattern)
600 598 for bmrev in matchrevs:
601 599 bms.add(repo[bmrev].rev())
602 600 else:
603 601 bms = set([repo[r].rev()
604 602 for r in repo._bookmarks.values()])
605 603 bms -= set([node.nullrev])
606 604 return subset & bms
607 605
608 606 def branch(repo, subset, x):
609 607 """``branch(string or set)``
610 608 All changesets belonging to the given branch or the branches of the given
611 609 changesets.
612 610
613 611 If `string` starts with `re:`, the remainder of the name is treated as
614 612 a regular expression. To match a branch that actually starts with `re:`,
615 613 use the prefix `literal:`.
616 614 """
617 615 getbi = repo.revbranchcache().branchinfo
618 616
619 617 try:
620 618 b = getstring(x, '')
621 619 except error.ParseError:
622 620 # not a string, but another revspec, e.g. tip()
623 621 pass
624 622 else:
625 623 kind, pattern, matcher = _stringmatcher(b)
626 624 if kind == 'literal':
627 625 # note: falls through to the revspec case if no branch with
628 626 # this name exists
629 627 if pattern in repo.branchmap():
630 628 return subset.filter(lambda r: matcher(getbi(r)[0]))
631 629 else:
632 630 return subset.filter(lambda r: matcher(getbi(r)[0]))
633 631
634 632 s = getset(repo, fullreposet(repo), x)
635 633 b = set()
636 634 for r in s:
637 635 b.add(getbi(r)[0])
638 636 c = s.__contains__
639 637 return subset.filter(lambda r: c(r) or getbi(r)[0] in b)
640 638
641 639 def bumped(repo, subset, x):
642 640 """``bumped()``
643 641 Mutable changesets marked as successors of public changesets.
644 642
645 643 Only non-public and non-obsolete changesets can be `bumped`.
646 644 """
647 645 # i18n: "bumped" is a keyword
648 646 getargs(x, 0, 0, _("bumped takes no arguments"))
649 647 bumped = obsmod.getrevs(repo, 'bumped')
650 648 return subset & bumped
651 649
652 650 def bundle(repo, subset, x):
653 651 """``bundle()``
654 652 Changesets in the bundle.
655 653
656 654 Bundle must be specified by the -R option."""
657 655
658 656 try:
659 657 bundlerevs = repo.changelog.bundlerevs
660 658 except AttributeError:
661 659 raise util.Abort(_("no bundle provided - specify with -R"))
662 660 return subset & bundlerevs
663 661
664 662 def checkstatus(repo, subset, pat, field):
665 663 hasset = matchmod.patkind(pat) == 'set'
666 664
667 665 mcache = [None]
668 666 def matches(x):
669 667 c = repo[x]
670 668 if not mcache[0] or hasset:
671 669 mcache[0] = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
672 670 m = mcache[0]
673 671 fname = None
674 672 if not m.anypats() and len(m.files()) == 1:
675 673 fname = m.files()[0]
676 674 if fname is not None:
677 675 if fname not in c.files():
678 676 return False
679 677 else:
680 678 for f in c.files():
681 679 if m(f):
682 680 break
683 681 else:
684 682 return False
685 683 files = repo.status(c.p1().node(), c.node())[field]
686 684 if fname is not None:
687 685 if fname in files:
688 686 return True
689 687 else:
690 688 for f in files:
691 689 if m(f):
692 690 return True
693 691
694 692 return subset.filter(matches)
695 693
696 694 def _children(repo, narrow, parentset):
697 695 if not parentset:
698 696 return baseset()
699 697 cs = set()
700 698 pr = repo.changelog.parentrevs
701 699 minrev = parentset.min()
702 700 for r in narrow:
703 701 if r <= minrev:
704 702 continue
705 703 for p in pr(r):
706 704 if p in parentset:
707 705 cs.add(r)
708 706 # XXX using a set to feed the baseset is wrong. Sets are not ordered.
709 707 # This does not break because of other fullreposet misbehavior.
710 708 return baseset(cs)
711 709
712 710 def children(repo, subset, x):
713 711 """``children(set)``
714 712 Child changesets of changesets in set.
715 713 """
716 714 s = getset(repo, fullreposet(repo), x)
717 715 cs = _children(repo, subset, s)
718 716 return subset & cs
719 717
720 718 def closed(repo, subset, x):
721 719 """``closed()``
722 720 Changeset is closed.
723 721 """
724 722 # i18n: "closed" is a keyword
725 723 getargs(x, 0, 0, _("closed takes no arguments"))
726 724 return subset.filter(lambda r: repo[r].closesbranch())
727 725
728 726 def contains(repo, subset, x):
729 727 """``contains(pattern)``
730 728 The revision's manifest contains a file matching pattern (but might not
731 729 modify it). See :hg:`help patterns` for information about file patterns.
732 730
733 731 The pattern without explicit kind like ``glob:`` is expected to be
734 732 relative to the current directory and match against a file exactly
735 733 for efficiency.
736 734 """
737 735 # i18n: "contains" is a keyword
738 736 pat = getstring(x, _("contains requires a pattern"))
739 737
740 738 def matches(x):
741 739 if not matchmod.patkind(pat):
742 740 pats = pathutil.canonpath(repo.root, repo.getcwd(), pat)
743 741 if pats in repo[x]:
744 742 return True
745 743 else:
746 744 c = repo[x]
747 745 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
748 746 for f in c.manifest():
749 747 if m(f):
750 748 return True
751 749 return False
752 750
753 751 return subset.filter(matches)
754 752
755 753 def converted(repo, subset, x):
756 754 """``converted([id])``
757 755 Changesets converted from the given identifier in the old repository if
758 756 present, or all converted changesets if no identifier is specified.
759 757 """
760 758
761 759 # There is exactly no chance of resolving the revision, so do a simple
762 760 # string compare and hope for the best
763 761
764 762 rev = None
765 763 # i18n: "converted" is a keyword
766 764 l = getargs(x, 0, 1, _('converted takes one or no arguments'))
767 765 if l:
768 766 # i18n: "converted" is a keyword
769 767 rev = getstring(l[0], _('converted requires a revision'))
770 768
771 769 def _matchvalue(r):
772 770 source = repo[r].extra().get('convert_revision', None)
773 771 return source is not None and (rev is None or source.startswith(rev))
774 772
775 773 return subset.filter(lambda r: _matchvalue(r))
776 774
777 775 def date(repo, subset, x):
778 776 """``date(interval)``
779 777 Changesets within the interval, see :hg:`help dates`.
780 778 """
781 779 # i18n: "date" is a keyword
782 780 ds = getstring(x, _("date requires a string"))
783 781 dm = util.matchdate(ds)
784 782 return subset.filter(lambda x: dm(repo[x].date()[0]))
785 783
786 784 def desc(repo, subset, x):
787 785 """``desc(string)``
788 786 Search commit message for string. The match is case-insensitive.
789 787 """
790 788 # i18n: "desc" is a keyword
791 789 ds = encoding.lower(getstring(x, _("desc requires a string")))
792 790
793 791 def matches(x):
794 792 c = repo[x]
795 793 return ds in encoding.lower(c.description())
796 794
797 795 return subset.filter(matches)
798 796
799 797 def _descendants(repo, subset, x, followfirst=False):
800 798 roots = getset(repo, fullreposet(repo), x)
801 799 if not roots:
802 800 return baseset()
803 801 s = _revdescendants(repo, roots, followfirst)
804 802
805 803 # Both sets need to be ascending in order to lazily return the union
806 804 # in the correct order.
807 805 base = subset & roots
808 806 desc = subset & s
809 807 result = base + desc
810 808 if subset.isascending():
811 809 result.sort()
812 810 elif subset.isdescending():
813 811 result.sort(reverse=True)
814 812 else:
815 813 result = subset & result
816 814 return result
817 815
818 816 def descendants(repo, subset, x):
819 817 """``descendants(set)``
820 818 Changesets which are descendants of changesets in set.
821 819 """
822 820 return _descendants(repo, subset, x)
823 821
824 822 def _firstdescendants(repo, subset, x):
825 823 # ``_firstdescendants(set)``
826 824 # Like ``descendants(set)`` but follows only the first parents.
827 825 return _descendants(repo, subset, x, followfirst=True)
828 826
829 827 def destination(repo, subset, x):
830 828 """``destination([set])``
831 829 Changesets that were created by a graft, transplant or rebase operation,
832 830 with the given revisions specified as the source. Omitting the optional set
833 831 is the same as passing all().
834 832 """
835 833 if x is not None:
836 834 sources = getset(repo, fullreposet(repo), x)
837 835 else:
838 836 sources = fullreposet(repo)
839 837
840 838 dests = set()
841 839
842 840 # subset contains all of the possible destinations that can be returned, so
843 841 # iterate over them and see if their source(s) were provided in the arg set.
844 842 # Even if the immediate src of r is not in the arg set, src's source (or
845 843 # further back) may be. Scanning back further than the immediate src allows
846 844 # transitive transplants and rebases to yield the same results as transitive
847 845 # grafts.
848 846 for r in subset:
849 847 src = _getrevsource(repo, r)
850 848 lineage = None
851 849
852 850 while src is not None:
853 851 if lineage is None:
854 852 lineage = list()
855 853
856 854 lineage.append(r)
857 855
858 856 # The visited lineage is a match if the current source is in the arg
859 857 # set. Since every candidate dest is visited by way of iterating
860 858 # subset, any dests further back in the lineage will be tested by a
861 859 # different iteration over subset. Likewise, if the src was already
862 860 # selected, the current lineage can be selected without going back
863 861 # further.
864 862 if src in sources or src in dests:
865 863 dests.update(lineage)
866 864 break
867 865
868 866 r = src
869 867 src = _getrevsource(repo, r)
870 868
871 869 return subset.filter(dests.__contains__)
872 870
873 871 def divergent(repo, subset, x):
874 872 """``divergent()``
875 873 Final successors of changesets with an alternative set of final successors.
876 874 """
877 875 # i18n: "divergent" is a keyword
878 876 getargs(x, 0, 0, _("divergent takes no arguments"))
879 877 divergent = obsmod.getrevs(repo, 'divergent')
880 878 return subset & divergent
881 879
882 880 def extinct(repo, subset, x):
883 881 """``extinct()``
884 882 Obsolete changesets with obsolete descendants only.
885 883 """
886 884 # i18n: "extinct" is a keyword
887 885 getargs(x, 0, 0, _("extinct takes no arguments"))
888 886 extincts = obsmod.getrevs(repo, 'extinct')
889 887 return subset & extincts
890 888
891 889 def extra(repo, subset, x):
892 890 """``extra(label, [value])``
893 891 Changesets with the given label in the extra metadata, with the given
894 892 optional value.
895 893
896 894 If `value` starts with `re:`, the remainder of the value is treated as
897 895 a regular expression. To match a value that actually starts with `re:`,
898 896 use the prefix `literal:`.
899 897 """
900 898 args = getargsdict(x, 'extra', 'label value')
901 899 if 'label' not in args:
902 900 # i18n: "extra" is a keyword
903 901 raise error.ParseError(_('extra takes at least 1 argument'))
904 902 # i18n: "extra" is a keyword
905 903 label = getstring(args['label'], _('first argument to extra must be '
906 904 'a string'))
907 905 value = None
908 906
909 907 if 'value' in args:
910 908 # i18n: "extra" is a keyword
911 909 value = getstring(args['value'], _('second argument to extra must be '
912 910 'a string'))
913 911 kind, value, matcher = _stringmatcher(value)
914 912
915 913 def _matchvalue(r):
916 914 extra = repo[r].extra()
917 915 return label in extra and (value is None or matcher(extra[label]))
918 916
919 917 return subset.filter(lambda r: _matchvalue(r))
920 918
921 919 def filelog(repo, subset, x):
922 920 """``filelog(pattern)``
923 921 Changesets connected to the specified filelog.
924 922
925 923 For performance reasons, visits only revisions mentioned in the file-level
926 924 filelog, rather than filtering through all changesets (much faster, but
927 925 doesn't include deletes or duplicate changes). For a slower, more accurate
928 926 result, use ``file()``.
929 927
930 928 The pattern without explicit kind like ``glob:`` is expected to be
931 929 relative to the current directory and match against a file exactly
932 930 for efficiency.
933 931
934 932 If some linkrev points to revisions filtered by the current repoview, we'll
935 933 work around it to return a non-filtered value.
936 934 """
937 935
938 936 # i18n: "filelog" is a keyword
939 937 pat = getstring(x, _("filelog requires a pattern"))
940 938 s = set()
941 939 cl = repo.changelog
942 940
943 941 if not matchmod.patkind(pat):
944 942 f = pathutil.canonpath(repo.root, repo.getcwd(), pat)
945 943 files = [f]
946 944 else:
947 945 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=repo[None])
948 946 files = (f for f in repo[None] if m(f))
949 947
950 948 for f in files:
951 949 backrevref = {} # final value for: filerev -> changerev
952 950 lowestchild = {} # lowest known filerev child of a filerev
953 951 delayed = [] # filerev with filtered linkrev, for post-processing
954 952 lowesthead = None # cache for manifest content of all head revisions
955 953 fl = repo.file(f)
956 954 for fr in list(fl):
957 955 rev = fl.linkrev(fr)
958 956 if rev not in cl:
959 957 # changerev pointed in linkrev is filtered
960 958 # record it for post processing.
961 959 delayed.append((fr, rev))
962 960 continue
963 961 for p in fl.parentrevs(fr):
964 962 if 0 <= p and p not in lowestchild:
965 963 lowestchild[p] = fr
966 964 backrevref[fr] = rev
967 965 s.add(rev)
968 966
969 967 # Post-processing of all filerevs we skipped because they were
970 968 # filtered. If such filerevs have known and unfiltered children, this
971 969 # means they have an unfiltered appearance out there. We'll use linkrev
972 970 # adjustment to find one of these appearances. The lowest known child
973 971 # will be used as a starting point because it is the best upper-bound we
974 972 # have.
975 973 #
976 974 # This approach will fail when an unfiltered but linkrev-shadowed
977 975 # appearance exists in a head changeset without unfiltered filerev
978 976 # children anywhere.
979 977 while delayed:
980 978 # must be a descending iteration. To slowly fill lowest child
981 979 # information that is of potential use by the next item.
982 980 fr, rev = delayed.pop()
983 981 lkr = rev
984 982
985 983 child = lowestchild.get(fr)
986 984
987 985 if child is None:
988 986 # search for existence of this file revision in a head revision.
989 987 # There are three possibilities:
990 988 # - the revision exists in a head and we can find an
991 989 # introduction from there,
992 990 # - the revision does not exist in a head because it has been
993 991 # changed since its introduction: we would have found a child
994 992 # and be in the other 'else' clause,
995 993 # - all versions of the revision are hidden.
996 994 if lowesthead is None:
997 995 lowesthead = {}
998 996 for h in repo.heads():
999 997 fnode = repo[h].manifest().get(f)
1000 998 if fnode is not None:
1001 999 lowesthead[fl.rev(fnode)] = h
1002 1000 headrev = lowesthead.get(fr)
1003 1001 if headrev is None:
1004 1002 # content is nowhere unfiltered
1005 1003 continue
1006 1004 rev = repo[headrev][f].introrev()
1007 1005 else:
1008 1006 # the lowest known child is a good upper bound
1009 1007 childcrev = backrevref[child]
1010 1008 # XXX this does not guarantee returning the lowest
1011 1009 # introduction of this revision, but this gives a
1012 1010 # result which is a good start and will fit in most
1013 1011 # cases. We probably need to fix the multiple
1014 1012 # introductions case properly (report each
1015 1013 # introduction, even for identical file revisions)
1016 1014 # once and for all at some point anyway.
1017 1015 for p in repo[childcrev][f].parents():
1018 1016 if p.filerev() == fr:
1019 1017 rev = p.rev()
1020 1018 break
1021 1019 if rev == lkr: # no shadowed entry found
1022 1020 # XXX This should never happen unless some manifest points
1023 1021 # to biggish file revisions (like a revision that uses a
1024 1022 # parent that never appears in the manifest ancestors)
1025 1023 continue
1026 1024
1027 1025 # Fill the data for the next iteration.
1028 1026 for p in fl.parentrevs(fr):
1029 1027 if 0 <= p and p not in lowestchild:
1030 1028 lowestchild[p] = fr
1031 1029 backrevref[fr] = rev
1032 1030 s.add(rev)
1033 1031
1034 1032 return subset & s
1035 1033
1036 1034 def first(repo, subset, x):
1037 1035 """``first(set, [n])``
1038 1036 An alias for limit().
1039 1037 """
1040 1038 return limit(repo, subset, x)
1041 1039
1042 1040 def _follow(repo, subset, x, name, followfirst=False):
1043 1041 l = getargs(x, 0, 1, _("%s takes no arguments or a filename") % name)
1044 1042 c = repo['.']
1045 1043 if l:
1046 1044 x = getstring(l[0], _("%s expected a filename") % name)
1047 1045 if x in c:
1048 1046 cx = c[x]
1049 1047 s = set(ctx.rev() for ctx in cx.ancestors(followfirst=followfirst))
1050 1048 # include the revision responsible for the most recent version
1051 1049 s.add(cx.introrev())
1052 1050 else:
1053 1051 return baseset()
1054 1052 else:
1055 1053 s = _revancestors(repo, baseset([c.rev()]), followfirst)
1056 1054
1057 1055 return subset & s
1058 1056
1059 1057 def follow(repo, subset, x):
1060 1058 """``follow([file])``
1061 1059 An alias for ``::.`` (ancestors of the working directory's first parent).
1062 1060 If a filename is specified, the history of the given file is followed,
1063 1061 including copies.
1064 1062 """
1065 1063 return _follow(repo, subset, x, 'follow')
1066 1064
1067 1065 def _followfirst(repo, subset, x):
1068 1066 # ``followfirst([file])``
1069 1067 # Like ``follow([file])`` but follows only the first parent of
1070 1068 # every revision or file revision.
1071 1069 return _follow(repo, subset, x, '_followfirst', followfirst=True)
1072 1070
1073 1071 def getall(repo, subset, x):
1074 1072 """``all()``
1075 1073 All changesets, the same as ``0:tip``.
1076 1074 """
1077 1075 # i18n: "all" is a keyword
1078 1076 getargs(x, 0, 0, _("all takes no arguments"))
1079 1077 return subset & spanset(repo) # drop "null" if any
1080 1078
1081 1079 def grep(repo, subset, x):
1082 1080 """``grep(regex)``
1083 1081 Like ``keyword(string)`` but accepts a regex. Use ``grep(r'...')``
1084 1082 to ensure special escape characters are handled correctly. Unlike
1085 1083 ``keyword(string)``, the match is case-sensitive.
1086 1084 """
1087 1085 try:
1088 1086 # i18n: "grep" is a keyword
1089 1087 gr = re.compile(getstring(x, _("grep requires a string")))
1090 1088 except re.error as e:
1091 1089 raise error.ParseError(_('invalid match pattern: %s') % e)
1092 1090
1093 1091 def matches(x):
1094 1092 c = repo[x]
1095 1093 for e in c.files() + [c.user(), c.description()]:
1096 1094 if gr.search(e):
1097 1095 return True
1098 1096 return False
1099 1097
1100 1098 return subset.filter(matches)
1101 1099
1102 1100 def _matchfiles(repo, subset, x):
1103 1101 # _matchfiles takes a revset list of prefixed arguments:
1104 1102 #
1105 1103 # [p:foo, i:bar, x:baz]
1106 1104 #
1107 1105 # builds a match object from them and filters subset. Allowed
1108 1106 # prefixes are 'p:' for regular patterns, 'i:' for include
1109 1107 # patterns and 'x:' for exclude patterns. Use 'r:' prefix to pass
1110 1108 # a revision identifier, or the empty string to reference the
1111 1109 # working directory, from which the match object is
1112 1110 # initialized. Use 'd:' to set the default matching mode, default
1113 1111 # to 'glob'. At most one 'r:' and 'd:' argument can be passed.
1114 1112
1115 1113 # i18n: "_matchfiles" is a keyword
1116 1114 l = getargs(x, 1, -1, _("_matchfiles requires at least one argument"))
1117 1115 pats, inc, exc = [], [], []
1118 1116 rev, default = None, None
1119 1117 for arg in l:
1120 1118 # i18n: "_matchfiles" is a keyword
1121 1119 s = getstring(arg, _("_matchfiles requires string arguments"))
1122 1120 prefix, value = s[:2], s[2:]
1123 1121 if prefix == 'p:':
1124 1122 pats.append(value)
1125 1123 elif prefix == 'i:':
1126 1124 inc.append(value)
1127 1125 elif prefix == 'x:':
1128 1126 exc.append(value)
1129 1127 elif prefix == 'r:':
1130 1128 if rev is not None:
1131 1129 # i18n: "_matchfiles" is a keyword
1132 1130 raise error.ParseError(_('_matchfiles expected at most one '
1133 1131 'revision'))
1134 1132 if value != '': # empty means working directory; leave rev as None
1135 1133 rev = value
1136 1134 elif prefix == 'd:':
1137 1135 if default is not None:
1138 1136 # i18n: "_matchfiles" is a keyword
1139 1137 raise error.ParseError(_('_matchfiles expected at most one '
1140 1138 'default mode'))
1141 1139 default = value
1142 1140 else:
1143 1141 # i18n: "_matchfiles" is a keyword
1144 1142 raise error.ParseError(_('invalid _matchfiles prefix: %s') % prefix)
1145 1143 if not default:
1146 1144 default = 'glob'
1147 1145
1148 1146 m = matchmod.match(repo.root, repo.getcwd(), pats, include=inc,
1149 1147 exclude=exc, ctx=repo[rev], default=default)
1150 1148
1151 1149 def matches(x):
1152 1150 for f in repo[x].files():
1153 1151 if m(f):
1154 1152 return True
1155 1153 return False
1156 1154
1157 1155 return subset.filter(matches)
1158 1156
1159 1157 def hasfile(repo, subset, x):
1160 1158 """``file(pattern)``
1161 1159 Changesets affecting files matched by pattern.
1162 1160
1163 1161 For a faster but less accurate result, consider using ``filelog()``
1164 1162 instead.
1165 1163
1166 1164 This predicate uses ``glob:`` as the default kind of pattern.
1167 1165 """
1168 1166 # i18n: "file" is a keyword
1169 1167 pat = getstring(x, _("file requires a pattern"))
1170 1168 return _matchfiles(repo, subset, ('string', 'p:' + pat))
1171 1169
1172 1170 def head(repo, subset, x):
1173 1171 """``head()``
1174 1172 Changeset is a named branch head.
1175 1173 """
1176 1174 # i18n: "head" is a keyword
1177 1175 getargs(x, 0, 0, _("head takes no arguments"))
1178 1176 hs = set()
1179 1177 cl = repo.changelog
1180 1178 for b, ls in repo.branchmap().iteritems():
1181 1179 hs.update(cl.rev(h) for h in ls)
1182 1180 # XXX using a set to feed the baseset is wrong. Sets are not ordered.
1183 1181 # This does not break because of other fullreposet misbehavior.
1184 1182 # XXX We should combine with subset first: 'subset & baseset(...)'. This is
1185 1183 # necessary to ensure we preserve the order in subset.
1186 1184 return baseset(hs) & subset
1187 1185
1188 1186 def heads(repo, subset, x):
1189 1187 """``heads(set)``
1190 1188 Members of set with no children in set.
1191 1189 """
1192 1190 s = getset(repo, subset, x)
1193 1191 ps = parents(repo, subset, x)
1194 1192 return s - ps
1195 1193
1196 1194 def hidden(repo, subset, x):
1197 1195 """``hidden()``
1198 1196 Hidden changesets.
1199 1197 """
1200 1198 # i18n: "hidden" is a keyword
1201 1199 getargs(x, 0, 0, _("hidden takes no arguments"))
1202 1200 hiddenrevs = repoview.filterrevs(repo, 'visible')
1203 1201 return subset & hiddenrevs
1204 1202
1205 1203 def keyword(repo, subset, x):
1206 1204 """``keyword(string)``
1207 1205 Search commit message, user name, and names of changed files for
1208 1206 string. The match is case-insensitive.
1209 1207 """
1210 1208 # i18n: "keyword" is a keyword
1211 1209 kw = encoding.lower(getstring(x, _("keyword requires a string")))
1212 1210
1213 1211 def matches(r):
1214 1212 c = repo[r]
1215 1213 return any(kw in encoding.lower(t)
1216 1214 for t in c.files() + [c.user(), c.description()])
1217 1215
1218 1216 return subset.filter(matches)
1219 1217
1220 1218 def limit(repo, subset, x):
1221 1219 """``limit(set, [n])``
1222 1220 First n members of set, defaulting to 1.
1223 1221 """
1224 1222 # i18n: "limit" is a keyword
1225 1223 l = getargs(x, 1, 2, _("limit requires one or two arguments"))
1226 1224 try:
1227 1225 lim = 1
1228 1226 if len(l) == 2:
1229 1227 # i18n: "limit" is a keyword
1230 1228 lim = int(getstring(l[1], _("limit requires a number")))
1231 1229 except (TypeError, ValueError):
1232 1230 # i18n: "limit" is a keyword
1233 1231 raise error.ParseError(_("limit expects a number"))
1234 1232 ss = subset
1235 1233 os = getset(repo, fullreposet(repo), l[0])
1236 1234 result = []
1237 1235 it = iter(os)
1238 1236 for x in xrange(lim):
1239 1237 y = next(it, None)
1240 1238 if y is None:
1241 1239 break
1242 1240 elif y in ss:
1243 1241 result.append(y)
1244 1242 return baseset(result)
1245 1243
1246 1244 def last(repo, subset, x):
1247 1245 """``last(set, [n])``
1248 1246 Last n members of set, defaulting to 1.
1249 1247 """
1250 1248 # i18n: "last" is a keyword
1251 1249 l = getargs(x, 1, 2, _("last requires one or two arguments"))
1252 1250 try:
1253 1251 lim = 1
1254 1252 if len(l) == 2:
1255 1253 # i18n: "last" is a keyword
1256 1254 lim = int(getstring(l[1], _("last requires a number")))
1257 1255 except (TypeError, ValueError):
1258 1256 # i18n: "last" is a keyword
1259 1257 raise error.ParseError(_("last expects a number"))
1260 1258 ss = subset
1261 1259 os = getset(repo, fullreposet(repo), l[0])
1262 1260 os.reverse()
1263 1261 result = []
1264 1262 it = iter(os)
1265 1263 for x in xrange(lim):
1266 1264 y = next(it, None)
1267 1265 if y is None:
1268 1266 break
1269 1267 elif y in ss:
1270 1268 result.append(y)
1271 1269 return baseset(result)
1272 1270
1273 1271 def maxrev(repo, subset, x):
1274 1272 """``max(set)``
1275 1273 Changeset with highest revision number in set.
1276 1274 """
1277 1275 os = getset(repo, fullreposet(repo), x)
1278 1276 if os:
1279 1277 m = os.max()
1280 1278 if m in subset:
1281 1279 return baseset([m])
1282 1280 return baseset()
1283 1281
1284 1282 def merge(repo, subset, x):
1285 1283 """``merge()``
1286 1284 Changeset is a merge changeset.
1287 1285 """
1288 1286 # i18n: "merge" is a keyword
1289 1287 getargs(x, 0, 0, _("merge takes no arguments"))
1290 1288 cl = repo.changelog
1291 1289 return subset.filter(lambda r: cl.parentrevs(r)[1] != -1)
1292 1290
1293 1291 def branchpoint(repo, subset, x):
1294 1292 """``branchpoint()``
1295 1293 Changesets with more than one child.
1296 1294 """
1297 1295 # i18n: "branchpoint" is a keyword
1298 1296 getargs(x, 0, 0, _("branchpoint takes no arguments"))
1299 1297 cl = repo.changelog
1300 1298 if not subset:
1301 1299 return baseset()
1302 1300 # XXX this should be 'parentset.min()' assuming 'parentset' is a smartset
1303 1301 # (and if it is not, it should.)
1304 1302 baserev = min(subset)
1305 1303 parentscount = [0]*(len(repo) - baserev)
1306 1304 for r in cl.revs(start=baserev + 1):
1307 1305 for p in cl.parentrevs(r):
1308 1306 if p >= baserev:
1309 1307 parentscount[p - baserev] += 1
1310 1308 return subset.filter(lambda r: parentscount[r - baserev] > 1)
1311 1309
1312 1310 def minrev(repo, subset, x):
1313 1311 """``min(set)``
1314 1312 Changeset with lowest revision number in set.
1315 1313 """
1316 1314 os = getset(repo, fullreposet(repo), x)
1317 1315 if os:
1318 1316 m = os.min()
1319 1317 if m in subset:
1320 1318 return baseset([m])
1321 1319 return baseset()
1322 1320
1323 1321 def modifies(repo, subset, x):
1324 1322 """``modifies(pattern)``
1325 1323 Changesets modifying files matched by pattern.
1326 1324
1327 1325 The pattern without explicit kind like ``glob:`` is expected to be
1328 1326 relative to the current directory and match against a file or a
1329 1327 directory.
1330 1328 """
1331 1329 # i18n: "modifies" is a keyword
1332 1330 pat = getstring(x, _("modifies requires a pattern"))
1333 1331 return checkstatus(repo, subset, pat, 0)
1334 1332
1335 1333 def named(repo, subset, x):
1336 1334 """``named(namespace)``
1337 1335 The changesets in a given namespace.
1338 1336
1339 1337 If `namespace` starts with `re:`, the remainder of the string is treated as
1340 1338 a regular expression. To match a namespace that actually starts with `re:`,
1341 1339 use the prefix `literal:`.
1342 1340 """
1343 1341 # i18n: "named" is a keyword
1344 1342 args = getargs(x, 1, 1, _('named requires a namespace argument'))
1345 1343
1346 1344 ns = getstring(args[0],
1347 1345 # i18n: "named" is a keyword
1348 1346 _('the argument to named must be a string'))
1349 1347 kind, pattern, matcher = _stringmatcher(ns)
1350 1348 namespaces = set()
1351 1349 if kind == 'literal':
1352 1350 if pattern not in repo.names:
1353 1351 raise error.RepoLookupError(_("namespace '%s' does not exist")
1354 1352 % ns)
1355 1353 namespaces.add(repo.names[pattern])
1356 1354 else:
1357 1355 for name, ns in repo.names.iteritems():
1358 1356 if matcher(name):
1359 1357 namespaces.add(ns)
1360 1358 if not namespaces:
1361 1359 raise error.RepoLookupError(_("no namespace exists"
1362 1360 " that match '%s'") % pattern)
1363 1361
1364 1362 names = set()
1365 1363 for ns in namespaces:
1366 1364 for name in ns.listnames(repo):
1367 1365 if name not in ns.deprecated:
1368 1366 names.update(repo[n].rev() for n in ns.nodes(repo, name))
1369 1367
1370 1368 names -= set([node.nullrev])
1371 1369 return subset & names
1372 1370
1373 1371 def node_(repo, subset, x):
1374 1372 """``id(string)``
1375 1373 Revision non-ambiguously specified by the given hex string prefix.
1376 1374 """
1377 1375 # i18n: "id" is a keyword
1378 1376 l = getargs(x, 1, 1, _("id requires one argument"))
1379 1377 # i18n: "id" is a keyword
1380 1378 n = getstring(l[0], _("id requires a string"))
1381 1379 if len(n) == 40:
1382 1380 try:
1383 1381 rn = repo.changelog.rev(node.bin(n))
1384 1382 except (LookupError, TypeError):
1385 1383 rn = None
1386 1384 else:
1387 1385 rn = None
1388 1386 pm = repo.changelog._partialmatch(n)
1389 1387 if pm is not None:
1390 1388 rn = repo.changelog.rev(pm)
1391 1389
1392 1390 if rn is None:
1393 1391 return baseset()
1394 1392 result = baseset([rn])
1395 1393 return result & subset
1396 1394
1397 1395 def obsolete(repo, subset, x):
1398 1396 """``obsolete()``
1399 1397 Mutable changeset with a newer version."""
1400 1398 # i18n: "obsolete" is a keyword
1401 1399 getargs(x, 0, 0, _("obsolete takes no arguments"))
1402 1400 obsoletes = obsmod.getrevs(repo, 'obsolete')
1403 1401 return subset & obsoletes
1404 1402
1405 1403 def only(repo, subset, x):
1406 1404 """``only(set, [set])``
1407 1405 Changesets that are ancestors of the first set that are not ancestors
1408 1406 of any other head in the repo. If a second set is specified, the result
1409 1407 is ancestors of the first set that are not ancestors of the second set
1410 1408 (i.e. ::<set1> - ::<set2>).
1411 1409 """
1412 1410 cl = repo.changelog
1413 1411 # i18n: "only" is a keyword
1414 1412 args = getargs(x, 1, 2, _('only takes one or two arguments'))
1415 1413 include = getset(repo, fullreposet(repo), args[0])
1416 1414 if len(args) == 1:
1417 1415 if not include:
1418 1416 return baseset()
1419 1417
1420 1418 descendants = set(_revdescendants(repo, include, False))
1421 1419 exclude = [rev for rev in cl.headrevs()
1422 1420 if not rev in descendants and not rev in include]
1423 1421 else:
1424 1422 exclude = getset(repo, fullreposet(repo), args[1])
1425 1423
1426 1424 results = set(cl.findmissingrevs(common=exclude, heads=include))
1427 1425 # XXX we should turn this into a baseset instead of a set, smartset may do
1428 1426 # some optimisations from the fact this is a baseset.
1429 1427 return subset & results
1430 1428
1431 1429 def origin(repo, subset, x):
1432 1430 """``origin([set])``
1433 1431 Changesets that were specified as a source for the grafts, transplants or
1434 1432 rebases that created the given revisions. Omitting the optional set is the
1435 1433 same as passing all(). If a changeset created by these operations is itself
1436 1434 specified as a source for one of these operations, only the source changeset
1437 1435 for the first operation is selected.
1438 1436 """
1439 1437 if x is not None:
1440 1438 dests = getset(repo, fullreposet(repo), x)
1441 1439 else:
1442 1440 dests = fullreposet(repo)
1443 1441
1444 1442 def _firstsrc(rev):
1445 1443 src = _getrevsource(repo, rev)
1446 1444 if src is None:
1447 1445 return None
1448 1446
1449 1447 while True:
1450 1448 prev = _getrevsource(repo, src)
1451 1449
1452 1450 if prev is None:
1453 1451 return src
1454 1452 src = prev
1455 1453
1456 1454 o = set([_firstsrc(r) for r in dests])
1457 1455 o -= set([None])
1458 1456 # XXX we should turn this into a baseset instead of a set, smartset may do
1459 1457 # some optimisations from the fact this is a baseset.
1460 1458 return subset & o
1461 1459
1462 1460 def outgoing(repo, subset, x):
1463 1461 """``outgoing([path])``
1464 1462 Changesets not found in the specified destination repository, or the
1465 1463 default push location.
1466 1464 """
1467 1465 # Avoid cycles.
1468 1466 from . import (
1469 1467 discovery,
1470 1468 hg,
1471 1469 )
1472 1470 # i18n: "outgoing" is a keyword
1473 1471 l = getargs(x, 0, 1, _("outgoing takes one or no arguments"))
1474 1472 # i18n: "outgoing" is a keyword
1475 1473 dest = l and getstring(l[0], _("outgoing requires a repository path")) or ''
1476 1474 dest = repo.ui.expandpath(dest or 'default-push', dest or 'default')
1477 1475 dest, branches = hg.parseurl(dest)
1478 1476 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1479 1477 if revs:
1480 1478 revs = [repo.lookup(rev) for rev in revs]
1481 1479 other = hg.peer(repo, {}, dest)
1482 1480 repo.ui.pushbuffer()
1483 1481 outgoing = discovery.findcommonoutgoing(repo, other, onlyheads=revs)
1484 1482 repo.ui.popbuffer()
1485 1483 cl = repo.changelog
1486 1484 o = set([cl.rev(r) for r in outgoing.missing])
1487 1485 return subset & o
1488 1486
1489 1487 def p1(repo, subset, x):
1490 1488 """``p1([set])``
1491 1489 First parent of changesets in set, or the working directory.
1492 1490 """
1493 1491 if x is None:
1494 1492 p = repo[x].p1().rev()
1495 1493 if p >= 0:
1496 1494 return subset & baseset([p])
1497 1495 return baseset()
1498 1496
1499 1497 ps = set()
1500 1498 cl = repo.changelog
1501 1499 for r in getset(repo, fullreposet(repo), x):
1502 1500 ps.add(cl.parentrevs(r)[0])
1503 1501 ps -= set([node.nullrev])
1504 1502 # XXX we should turn this into a baseset instead of a set, smartset may do
1505 1503 # some optimisations from the fact this is a baseset.
1506 1504 return subset & ps
1507 1505
1508 1506 def p2(repo, subset, x):
1509 1507 """``p2([set])``
1510 1508 Second parent of changesets in set, or the working directory.
1511 1509 """
1512 1510 if x is None:
1513 1511 ps = repo[x].parents()
1514 1512 try:
1515 1513 p = ps[1].rev()
1516 1514 if p >= 0:
1517 1515 return subset & baseset([p])
1518 1516 return baseset()
1519 1517 except IndexError:
1520 1518 return baseset()
1521 1519
1522 1520 ps = set()
1523 1521 cl = repo.changelog
1524 1522 for r in getset(repo, fullreposet(repo), x):
1525 1523 ps.add(cl.parentrevs(r)[1])
1526 1524 ps -= set([node.nullrev])
1527 1525 # XXX we should turn this into a baseset instead of a set, smartset may do
1528 1526 # some optimisations from the fact this is a baseset.
1529 1527 return subset & ps
1530 1528
1531 1529 def parents(repo, subset, x):
1532 1530 """``parents([set])``
1533 1531 The set of all parents for all changesets in set, or the working directory.
1534 1532 """
1535 1533 if x is None:
1536 1534 ps = set(p.rev() for p in repo[x].parents())
1537 1535 else:
1538 1536 ps = set()
1539 1537 cl = repo.changelog
1540 1538 up = ps.update
1541 1539 parentrevs = cl.parentrevs
1542 1540 for r in getset(repo, fullreposet(repo), x):
1543 1541 if r == node.wdirrev:
1544 1542 up(p.rev() for p in repo[r].parents())
1545 1543 else:
1546 1544 up(parentrevs(r))
1547 1545 ps -= set([node.nullrev])
1548 1546 return subset & ps
1549 1547
1550 1548 def _phase(repo, subset, target):
1551 1549 """helper to select all rev in phase <target>"""
1552 1550 repo._phasecache.loadphaserevs(repo) # ensure phase's sets are loaded
1553 1551 if repo._phasecache._phasesets:
1554 1552 s = repo._phasecache._phasesets[target] - repo.changelog.filteredrevs
1555 1553 s = baseset(s)
1556 1554 s.sort() # set are non ordered, so we enforce ascending
1557 1555 return subset & s
1558 1556 else:
1559 1557 phase = repo._phasecache.phase
1560 1558 condition = lambda r: phase(repo, r) == target
1561 1559 return subset.filter(condition, cache=False)
1562 1560
1563 1561 def draft(repo, subset, x):
1564 1562 """``draft()``
1565 1563 Changeset in draft phase."""
1566 1564 # i18n: "draft" is a keyword
1567 1565 getargs(x, 0, 0, _("draft takes no arguments"))
1568 1566 target = phases.draft
1569 1567 return _phase(repo, subset, target)
1570 1568
1571 1569 def secret(repo, subset, x):
1572 1570 """``secret()``
1573 1571 Changeset in secret phase."""
1574 1572 # i18n: "secret" is a keyword
1575 1573 getargs(x, 0, 0, _("secret takes no arguments"))
1576 1574 target = phases.secret
1577 1575 return _phase(repo, subset, target)
1578 1576
1579 1577 def parentspec(repo, subset, x, n):
1580 1578 """``set^0``
1581 1579 The set.
1582 1580 ``set^1`` (or ``set^``), ``set^2``
1583 1581 First or second parent, respectively, of all changesets in set.
1584 1582 """
1585 1583 try:
1586 1584 n = int(n[1])
1587 1585 if n not in (0, 1, 2):
1588 1586 raise ValueError
1589 1587 except (TypeError, ValueError):
1590 1588 raise error.ParseError(_("^ expects a number 0, 1, or 2"))
1591 1589 ps = set()
1592 1590 cl = repo.changelog
1593 1591 for r in getset(repo, fullreposet(repo), x):
1594 1592 if n == 0:
1595 1593 ps.add(r)
1596 1594 elif n == 1:
1597 1595 ps.add(cl.parentrevs(r)[0])
1598 1596 elif n == 2:
1599 1597 parents = cl.parentrevs(r)
1600 1598 if len(parents) > 1:
1601 1599 ps.add(parents[1])
1602 1600 return subset & ps
1603 1601
1604 1602 def present(repo, subset, x):
1605 1603 """``present(set)``
1606 1604 An empty set, if any revision in set isn't found; otherwise,
1607 1605 all revisions in set.
1608 1606
1609 1607 If any of specified revisions is not present in the local repository,
1610 1608 the query is normally aborted. But this predicate allows the query
1611 1609 to continue even in such cases.
1612 1610 """
1613 1611 try:
1614 1612 return getset(repo, subset, x)
1615 1613 except error.RepoLookupError:
1616 1614 return baseset()
1617 1615
1618 1616 # for internal use
1619 1617 def _notpublic(repo, subset, x):
1620 1618 getargs(x, 0, 0, "_notpublic takes no arguments")
1621 1619 repo._phasecache.loadphaserevs(repo) # ensure phase's sets are loaded
1622 1620 if repo._phasecache._phasesets:
1623 1621 s = set()
1624 1622 for u in repo._phasecache._phasesets[1:]:
1625 1623 s.update(u)
1626 1624 s = baseset(s - repo.changelog.filteredrevs)
1627 1625 s.sort()
1628 1626 return subset & s
1629 1627 else:
1630 1628 phase = repo._phasecache.phase
1631 1629 target = phases.public
1632 1630 condition = lambda r: phase(repo, r) != target
1633 1631 return subset.filter(condition, cache=False)
1634 1632
1635 1633 def public(repo, subset, x):
1636 1634 """``public()``
1637 1635 Changeset in public phase."""
1638 1636 # i18n: "public" is a keyword
1639 1637 getargs(x, 0, 0, _("public takes no arguments"))
1640 1638 phase = repo._phasecache.phase
1641 1639 target = phases.public
1642 1640 condition = lambda r: phase(repo, r) == target
1643 1641 return subset.filter(condition, cache=False)
1644 1642
1645 1643 def remote(repo, subset, x):
1646 1644 """``remote([id [,path]])``
1647 1645 Local revision that corresponds to the given identifier in a
1648 1646 remote repository, if present. Here, the '.' identifier is a
1649 1647 synonym for the current local branch.
1650 1648 """
1651 1649
1652 1650 from . import hg # avoid start-up nasties
1653 1651 # i18n: "remote" is a keyword
1654 1652 l = getargs(x, 0, 2, _("remote takes one, two or no arguments"))
1655 1653
1656 1654 q = '.'
1657 1655 if len(l) > 0:
1658 1656 # i18n: "remote" is a keyword
1659 1657 q = getstring(l[0], _("remote requires a string id"))
1660 1658 if q == '.':
1661 1659 q = repo['.'].branch()
1662 1660
1663 1661 dest = ''
1664 1662 if len(l) > 1:
1665 1663 # i18n: "remote" is a keyword
1666 1664 dest = getstring(l[1], _("remote requires a repository path"))
1667 1665 dest = repo.ui.expandpath(dest or 'default')
1668 1666 dest, branches = hg.parseurl(dest)
1669 1667 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1670 1668 if revs:
1671 1669 revs = [repo.lookup(rev) for rev in revs]
1672 1670 other = hg.peer(repo, {}, dest)
1673 1671 n = other.lookup(q)
1674 1672 if n in repo:
1675 1673 r = repo[n].rev()
1676 1674 if r in subset:
1677 1675 return baseset([r])
1678 1676 return baseset()
1679 1677
1680 1678 def removes(repo, subset, x):
1681 1679 """``removes(pattern)``
1682 1680 Changesets which remove files matching pattern.
1683 1681
1684 1682 The pattern without explicit kind like ``glob:`` is expected to be
1685 1683 relative to the current directory and match against a file or a
1686 1684 directory.
1687 1685 """
1688 1686 # i18n: "removes" is a keyword
1689 1687 pat = getstring(x, _("removes requires a pattern"))
1690 1688 return checkstatus(repo, subset, pat, 2)
1691 1689
1692 1690 def rev(repo, subset, x):
1693 1691 """``rev(number)``
1694 1692 Revision with the given numeric identifier.
1695 1693 """
1696 1694 # i18n: "rev" is a keyword
1697 1695 l = getargs(x, 1, 1, _("rev requires one argument"))
1698 1696 try:
1699 1697 # i18n: "rev" is a keyword
1700 1698 l = int(getstring(l[0], _("rev requires a number")))
1701 1699 except (TypeError, ValueError):
1702 1700 # i18n: "rev" is a keyword
1703 1701 raise error.ParseError(_("rev expects a number"))
1704 1702 if l not in repo.changelog and l != node.nullrev:
1705 1703 return baseset()
1706 1704 return subset & baseset([l])
1707 1705
1708 1706 def matching(repo, subset, x):
1709 1707 """``matching(revision [, field])``
1710 1708 Changesets in which a given set of fields match the set of fields in the
1711 1709 selected revision or set.
1712 1710
1713 1711 To match more than one field pass the list of fields to match separated
1714 1712 by spaces (e.g. ``author description``).
1715 1713
1716 1714 Valid fields are most regular revision fields and some special fields.
1717 1715
1718 1716 Regular revision fields are ``description``, ``author``, ``branch``,
1719 1717 ``date``, ``files``, ``phase``, ``parents``, ``substate``, ``user``
1720 1718 and ``diff``.
1721 1719 Note that ``author`` and ``user`` are synonyms. ``diff`` refers to the
1722 1720 contents of the revision. Two revisions matching their ``diff`` will
1723 1721 also match their ``files``.
1724 1722
1725 1723 Special fields are ``summary`` and ``metadata``:
1726 1724 ``summary`` matches the first line of the description.
1727 1725 ``metadata`` is equivalent to matching ``description user date``
1728 1726 (i.e. it matches the main metadata fields).
1729 1727
1730 1728 ``metadata`` is the default field which is used when no fields are
1731 1729 specified. You can match more than one field at a time.
1732 1730 """
1733 1731 # i18n: "matching" is a keyword
1734 1732 l = getargs(x, 1, 2, _("matching takes 1 or 2 arguments"))
1735 1733
1736 1734 revs = getset(repo, fullreposet(repo), l[0])
1737 1735
1738 1736 fieldlist = ['metadata']
1739 1737 if len(l) > 1:
1740 1738 fieldlist = getstring(l[1],
1741 1739 # i18n: "matching" is a keyword
1742 1740 _("matching requires a string "
1743 1741 "as its second argument")).split()
1744 1742
1745 1743 # Make sure that there are no repeated fields,
1746 1744 # expand the 'special' 'metadata' field type
1747 1745 # and check the 'files' whenever we check the 'diff'
1748 1746 fields = []
1749 1747 for field in fieldlist:
1750 1748 if field == 'metadata':
1751 1749 fields += ['user', 'description', 'date']
1752 1750 elif field == 'diff':
1753 1751 # a revision matching the diff must also match the files
1754 1752 # since matching the diff is very costly, make sure to
1755 1753 # also match the files first
1756 1754 fields += ['files', 'diff']
1757 1755 else:
1758 1756 if field == 'author':
1759 1757 field = 'user'
1760 1758 fields.append(field)
1761 1759 fields = set(fields)
1762 1760 if 'summary' in fields and 'description' in fields:
1763 1761 # If a revision matches its description it also matches its summary
1764 1762 fields.discard('summary')
1765 1763
1766 1764 # We may want to match more than one field
1767 1765 # Not all fields take the same amount of time to be matched
1768 1766 # Sort the selected fields in order of increasing matching cost
1769 1767 fieldorder = ['phase', 'parents', 'user', 'date', 'branch', 'summary',
1770 1768 'files', 'description', 'substate', 'diff']
1771 1769 def fieldkeyfunc(f):
1772 1770 try:
1773 1771 return fieldorder.index(f)
1774 1772 except ValueError:
1775 1773 # assume an unknown field is very costly
1776 1774 return len(fieldorder)
1777 1775 fields = list(fields)
1778 1776 fields.sort(key=fieldkeyfunc)
1779 1777
1780 1778 # Each field will be matched with its own "getfield" function
1781 1779 # which will be added to the getfieldfuncs array of functions
1782 1780 getfieldfuncs = []
1783 1781 _funcs = {
1784 1782 'user': lambda r: repo[r].user(),
1785 1783 'branch': lambda r: repo[r].branch(),
1786 1784 'date': lambda r: repo[r].date(),
1787 1785 'description': lambda r: repo[r].description(),
1788 1786 'files': lambda r: repo[r].files(),
1789 1787 'parents': lambda r: repo[r].parents(),
1790 1788 'phase': lambda r: repo[r].phase(),
1791 1789 'substate': lambda r: repo[r].substate,
1792 1790 'summary': lambda r: repo[r].description().splitlines()[0],
1793 1791 'diff': lambda r: list(repo[r].diff(git=True),)
1794 1792 }
1795 1793 for info in fields:
1796 1794 getfield = _funcs.get(info, None)
1797 1795 if getfield is None:
1798 1796 raise error.ParseError(
1799 1797 # i18n: "matching" is a keyword
1800 1798 _("unexpected field name passed to matching: %s") % info)
1801 1799 getfieldfuncs.append(getfield)
1802 1800 # convert the getfield array of functions into a "getinfo" function
1803 1801 # which returns an array of field values (or a single value if there
1804 1802 # is only one field to match)
1805 1803 getinfo = lambda r: [f(r) for f in getfieldfuncs]
1806 1804
1807 1805 def matches(x):
1808 1806 for rev in revs:
1809 1807 target = getinfo(rev)
1810 1808 match = True
1811 1809 for n, f in enumerate(getfieldfuncs):
1812 1810 if target[n] != f(x):
1813 1811 match = False
1814 1812 if match:
1815 1813 return True
1816 1814 return False
1817 1815
1818 1816 return subset.filter(matches)
1819 1817
1820 1818 def reverse(repo, subset, x):
1821 1819 """``reverse(set)``
1822 1820 Reverse order of set.
1823 1821 """
1824 1822 l = getset(repo, subset, x)
1825 1823 l.reverse()
1826 1824 return l
1827 1825
1828 1826 def roots(repo, subset, x):
1829 1827 """``roots(set)``
1830 1828 Changesets in set with no parent changeset in set.
1831 1829 """
1832 1830 s = getset(repo, fullreposet(repo), x)
1833 1831 parents = repo.changelog.parentrevs
1834 1832 def filter(r):
1835 1833 for p in parents(r):
1836 1834 if 0 <= p and p in s:
1837 1835 return False
1838 1836 return True
1839 1837 return subset & s.filter(filter)
1840 1838
1841 1839 def sort(repo, subset, x):
1842 1840 """``sort(set[, [-]key...])``
1843 1841 Sort set by keys. The default sort order is ascending, specify a key
1844 1842 as ``-key`` to sort in descending order.
1845 1843
1846 1844 The keys can be:
1847 1845
1848 1846 - ``rev`` for the revision number,
1849 1847 - ``branch`` for the branch name,
1850 1848 - ``desc`` for the commit message (description),
1851 1849 - ``user`` for user name (``author`` can be used as an alias),
1852 1850 - ``date`` for the commit date
1853 1851 """
1854 1852 # i18n: "sort" is a keyword
1855 1853 l = getargs(x, 1, 2, _("sort requires one or two arguments"))
1856 1854 keys = "rev"
1857 1855 if len(l) == 2:
1858 1856 # i18n: "sort" is a keyword
1859 1857 keys = getstring(l[1], _("sort spec must be a string"))
1860 1858
1861 1859 s = l[0]
1862 1860 keys = keys.split()
1863 1861 l = []
1864 1862 def invert(s):
1865 1863 return "".join(chr(255 - ord(c)) for c in s)
1866 1864 revs = getset(repo, subset, s)
1867 1865 if keys == ["rev"]:
1868 1866 revs.sort()
1869 1867 return revs
1870 1868 elif keys == ["-rev"]:
1871 1869 revs.sort(reverse=True)
1872 1870 return revs
1873 1871 for r in revs:
1874 1872 c = repo[r]
1875 1873 e = []
1876 1874 for k in keys:
1877 1875 if k == 'rev':
1878 1876 e.append(r)
1879 1877 elif k == '-rev':
1880 1878 e.append(-r)
1881 1879 elif k == 'branch':
1882 1880 e.append(c.branch())
1883 1881 elif k == '-branch':
1884 1882 e.append(invert(c.branch()))
1885 1883 elif k == 'desc':
1886 1884 e.append(c.description())
1887 1885 elif k == '-desc':
1888 1886 e.append(invert(c.description()))
1889 1887 elif k in 'user author':
1890 1888 e.append(c.user())
1891 1889 elif k in '-user -author':
1892 1890 e.append(invert(c.user()))
1893 1891 elif k == 'date':
1894 1892 e.append(c.date()[0])
1895 1893 elif k == '-date':
1896 1894 e.append(-c.date()[0])
1897 1895 else:
1898 1896 raise error.ParseError(_("unknown sort key %r") % k)
1899 1897 e.append(r)
1900 1898 l.append(e)
1901 1899 l.sort()
1902 1900 return baseset([e[-1] for e in l])
1903 1901
1904 1902 def subrepo(repo, subset, x):
1905 1903 """``subrepo([pattern])``
1906 1904 Changesets that add, modify or remove the given subrepo. If no subrepo
1907 1905 pattern is named, any subrepo changes are returned.
1908 1906 """
1909 1907 # i18n: "subrepo" is a keyword
1910 1908 args = getargs(x, 0, 1, _('subrepo takes at most one argument'))
1911 1909 if len(args) != 0:
1912 1910 pat = getstring(args[0], _("subrepo requires a pattern"))
1913 1911
1914 1912 m = matchmod.exact(repo.root, repo.root, ['.hgsubstate'])
1915 1913
1916 1914 def submatches(names):
1917 1915 k, p, m = _stringmatcher(pat)
1918 1916 for name in names:
1919 1917 if m(name):
1920 1918 yield name
1921 1919
1922 1920 def matches(x):
1923 1921 c = repo[x]
1924 1922 s = repo.status(c.p1().node(), c.node(), match=m)
1925 1923
1926 1924 if len(args) == 0:
1927 1925 return s.added or s.modified or s.removed
1928 1926
1929 1927 if s.added:
1930 1928 return any(submatches(c.substate.keys()))
1931 1929
1932 1930 if s.modified:
1933 1931 subs = set(c.p1().substate.keys())
1934 1932 subs.update(c.substate.keys())
1935 1933
1936 1934 for path in submatches(subs):
1937 1935 if c.p1().substate.get(path) != c.substate.get(path):
1938 1936 return True
1939 1937
1940 1938 if s.removed:
1941 1939 return any(submatches(c.p1().substate.keys()))
1942 1940
1943 1941 return False
1944 1942
1945 1943 return subset.filter(matches)
1946 1944
1947 1945 def _stringmatcher(pattern):
1948 1946 """
1949 1947 accepts a string, possibly starting with 're:' or 'literal:' prefix.
1950 1948 returns the matcher name, pattern, and matcher function.
1951 1949 missing or unknown prefixes are treated as literal matches.
1952 1950
1953 1951 helper for tests:
1954 1952 >>> def test(pattern, *tests):
1955 1953 ... kind, pattern, matcher = _stringmatcher(pattern)
1956 1954 ... return (kind, pattern, [bool(matcher(t)) for t in tests])
1957 1955
1958 1956 exact matching (no prefix):
1959 1957 >>> test('abcdefg', 'abc', 'def', 'abcdefg')
1960 1958 ('literal', 'abcdefg', [False, False, True])
1961 1959
1962 1960 regex matching ('re:' prefix)
1963 1961 >>> test('re:a.+b', 'nomatch', 'fooadef', 'fooadefbar')
1964 1962 ('re', 'a.+b', [False, False, True])
1965 1963
1966 1964 force exact matches ('literal:' prefix)
1967 1965 >>> test('literal:re:foobar', 'foobar', 're:foobar')
1968 1966 ('literal', 're:foobar', [False, True])
1969 1967
1970 1968 unknown prefixes are ignored and treated as literals
1971 1969 >>> test('foo:bar', 'foo', 'bar', 'foo:bar')
1972 1970 ('literal', 'foo:bar', [False, False, True])
1973 1971 """
1974 1972 if pattern.startswith('re:'):
1975 1973 pattern = pattern[3:]
1976 1974 try:
1977 1975 regex = re.compile(pattern)
1978 1976 except re.error as e:
1979 1977 raise error.ParseError(_('invalid regular expression: %s')
1980 1978 % e)
1981 1979 return 're', pattern, regex.search
1982 1980 elif pattern.startswith('literal:'):
1983 1981 pattern = pattern[8:]
1984 1982 return 'literal', pattern, pattern.__eq__
1985 1983
1986 1984 def _substringmatcher(pattern):
1987 1985 kind, pattern, matcher = _stringmatcher(pattern)
1988 1986 if kind == 'literal':
1989 1987 matcher = lambda s: pattern in s
1990 1988 return kind, pattern, matcher
1991 1989
1992 1990 def tag(repo, subset, x):
1993 1991 """``tag([name])``
1994 1992 The specified tag by name, or all tagged revisions if no name is given.
1995 1993
1996 1994 If `name` starts with `re:`, the remainder of the name is treated as
1997 1995 a regular expression. To match a tag that actually starts with `re:`,
1998 1996 use the prefix `literal:`.
1999 1997 """
2000 1998 # i18n: "tag" is a keyword
2001 1999 args = getargs(x, 0, 1, _("tag takes one or no arguments"))
2002 2000 cl = repo.changelog
2003 2001 if args:
2004 2002 pattern = getstring(args[0],
2005 2003 # i18n: "tag" is a keyword
2006 2004 _('the argument to tag must be a string'))
2007 2005 kind, pattern, matcher = _stringmatcher(pattern)
2008 2006 if kind == 'literal':
2009 2007 # avoid resolving all tags
2010 2008 tn = repo._tagscache.tags.get(pattern, None)
2011 2009 if tn is None:
2012 2010 raise error.RepoLookupError(_("tag '%s' does not exist")
2013 2011 % pattern)
2014 2012 s = set([repo[tn].rev()])
2015 2013 else:
2016 2014 s = set([cl.rev(n) for t, n in repo.tagslist() if matcher(t)])
2017 2015 else:
2018 2016 s = set([cl.rev(n) for t, n in repo.tagslist() if t != 'tip'])
2019 2017 return subset & s
2020 2018
2021 2019 def tagged(repo, subset, x):
2022 2020 return tag(repo, subset, x)
2023 2021
2024 2022 def unstable(repo, subset, x):
2025 2023 """``unstable()``
2026 2024 Non-obsolete changesets with obsolete ancestors.
2027 2025 """
2028 2026 # i18n: "unstable" is a keyword
2029 2027 getargs(x, 0, 0, _("unstable takes no arguments"))
2030 2028 unstables = obsmod.getrevs(repo, 'unstable')
2031 2029 return subset & unstables
2032 2030
2033 2031
2034 2032 def user(repo, subset, x):
2035 2033 """``user(string)``
2036 2034 User name contains string. The match is case-insensitive.
2037 2035
2038 2036 If `string` starts with `re:`, the remainder of the string is treated as
2039 2037 a regular expression. To match a user that actually contains `re:`, use
2040 2038 the prefix `literal:`.
2041 2039 """
2042 2040 return author(repo, subset, x)
2043 2041
2044 2042 # experimental
2045 2043 def wdir(repo, subset, x):
2046 2044 # i18n: "wdir" is a keyword
2047 2045 getargs(x, 0, 0, _("wdir takes no arguments"))
2048 2046 if node.wdirrev in subset or isinstance(subset, fullreposet):
2049 2047 return baseset([node.wdirrev])
2050 2048 return baseset()
2051 2049
2052 2050 # for internal use
2053 2051 def _list(repo, subset, x):
2054 2052 s = getstring(x, "internal error")
2055 2053 if not s:
2056 2054 return baseset()
2057 2055 # remove duplicates here. it's difficult for caller to deduplicate sets
2058 2056 # because different symbols can point to the same rev.
2059 2057 cl = repo.changelog
2060 2058 ls = []
2061 2059 seen = set()
2062 2060 for t in s.split('\0'):
2063 2061 try:
2064 2062 # fast path for integer revision
2065 2063 r = int(t)
2066 2064 if str(r) != t or r not in cl:
2067 2065 raise ValueError
2068 2066 except ValueError:
2069 2067 r = repo[t].rev()
2070 2068 if r in seen:
2071 2069 continue
2072 2070 if (r in subset
2073 2071 or r == node.nullrev and isinstance(subset, fullreposet)):
2074 2072 ls.append(r)
2075 2073 seen.add(r)
2076 2074 return baseset(ls)
2077 2075
2078 2076 # for internal use
2079 2077 def _intlist(repo, subset, x):
2080 2078 s = getstring(x, "internal error")
2081 2079 if not s:
2082 2080 return baseset()
2083 2081 ls = [int(r) for r in s.split('\0')]
2084 2082 s = subset
2085 2083 return baseset([r for r in ls if r in s])
2086 2084
2087 2085 # for internal use
2088 2086 def _hexlist(repo, subset, x):
2089 2087 s = getstring(x, "internal error")
2090 2088 if not s:
2091 2089 return baseset()
2092 2090 cl = repo.changelog
2093 2091 ls = [cl.rev(node.bin(r)) for r in s.split('\0')]
2094 2092 s = subset
2095 2093 return baseset([r for r in ls if r in s])
2096 2094
2097 2095 symbols = {
2098 2096 "adds": adds,
2099 2097 "all": getall,
2100 2098 "ancestor": ancestor,
2101 2099 "ancestors": ancestors,
2102 2100 "_firstancestors": _firstancestors,
2103 2101 "author": author,
2104 2102 "bisect": bisect,
2105 2103 "bisected": bisected,
2106 2104 "bookmark": bookmark,
2107 2105 "branch": branch,
2108 2106 "branchpoint": branchpoint,
2109 2107 "bumped": bumped,
2110 2108 "bundle": bundle,
2111 2109 "children": children,
2112 2110 "closed": closed,
2113 2111 "contains": contains,
2114 2112 "converted": converted,
2115 2113 "date": date,
2116 2114 "desc": desc,
2117 2115 "descendants": descendants,
2118 2116 "_firstdescendants": _firstdescendants,
2119 2117 "destination": destination,
2120 2118 "divergent": divergent,
2121 2119 "draft": draft,
2122 2120 "extinct": extinct,
2123 2121 "extra": extra,
2124 2122 "file": hasfile,
2125 2123 "filelog": filelog,
2126 2124 "first": first,
2127 2125 "follow": follow,
2128 2126 "_followfirst": _followfirst,
2129 2127 "grep": grep,
2130 2128 "head": head,
2131 2129 "heads": heads,
2132 2130 "hidden": hidden,
2133 2131 "id": node_,
2134 2132 "keyword": keyword,
2135 2133 "last": last,
2136 2134 "limit": limit,
2137 2135 "_matchfiles": _matchfiles,
2138 2136 "max": maxrev,
2139 2137 "merge": merge,
2140 2138 "min": minrev,
2141 2139 "modifies": modifies,
2142 2140 "named": named,
2143 2141 "obsolete": obsolete,
2144 2142 "only": only,
2145 2143 "origin": origin,
2146 2144 "outgoing": outgoing,
2147 2145 "p1": p1,
2148 2146 "p2": p2,
2149 2147 "parents": parents,
2150 2148 "present": present,
2151 2149 "public": public,
2152 2150 "_notpublic": _notpublic,
2153 2151 "remote": remote,
2154 2152 "removes": removes,
2155 2153 "rev": rev,
2156 2154 "reverse": reverse,
2157 2155 "roots": roots,
2158 2156 "sort": sort,
2159 2157 "secret": secret,
2160 2158 "subrepo": subrepo,
2161 2159 "matching": matching,
2162 2160 "tag": tag,
2163 2161 "tagged": tagged,
2164 2162 "user": user,
2165 2163 "unstable": unstable,
2166 2164 "wdir": wdir,
2167 2165 "_list": _list,
2168 2166 "_intlist": _intlist,
2169 2167 "_hexlist": _hexlist,
2170 2168 }
2171 2169
2172 2170 # symbols which can't be used for a DoS attack for any given input
2173 2171 # (e.g. those which accept regexes as plain strings shouldn't be included)
2174 2172 # functions that just return a lot of changesets (like all) don't count here
2175 2173 safesymbols = set([
2176 2174 "adds",
2177 2175 "all",
2178 2176 "ancestor",
2179 2177 "ancestors",
2180 2178 "_firstancestors",
2181 2179 "author",
2182 2180 "bisect",
2183 2181 "bisected",
2184 2182 "bookmark",
2185 2183 "branch",
2186 2184 "branchpoint",
2187 2185 "bumped",
2188 2186 "bundle",
2189 2187 "children",
2190 2188 "closed",
2191 2189 "converted",
2192 2190 "date",
2193 2191 "desc",
2194 2192 "descendants",
2195 2193 "_firstdescendants",
2196 2194 "destination",
2197 2195 "divergent",
2198 2196 "draft",
2199 2197 "extinct",
2200 2198 "extra",
2201 2199 "file",
2202 2200 "filelog",
2203 2201 "first",
2204 2202 "follow",
2205 2203 "_followfirst",
2206 2204 "head",
2207 2205 "heads",
2208 2206 "hidden",
2209 2207 "id",
2210 2208 "keyword",
2211 2209 "last",
2212 2210 "limit",
2213 2211 "_matchfiles",
2214 2212 "max",
2215 2213 "merge",
2216 2214 "min",
2217 2215 "modifies",
2218 2216 "obsolete",
2219 2217 "only",
2220 2218 "origin",
2221 2219 "outgoing",
2222 2220 "p1",
2223 2221 "p2",
2224 2222 "parents",
2225 2223 "present",
2226 2224 "public",
2227 2225 "_notpublic",
2228 2226 "remote",
2229 2227 "removes",
2230 2228 "rev",
2231 2229 "reverse",
2232 2230 "roots",
2233 2231 "sort",
2234 2232 "secret",
2235 2233 "matching",
2236 2234 "tag",
2237 2235 "tagged",
2238 2236 "user",
2239 2237 "unstable",
2240 2238 "wdir",
2241 2239 "_list",
2242 2240 "_intlist",
2243 2241 "_hexlist",
2244 2242 ])
2245 2243
2246 2244 methods = {
2247 2245 "range": rangeset,
2248 2246 "dagrange": dagrange,
2249 2247 "string": stringset,
2250 2248 "symbol": stringset,
2251 2249 "and": andset,
2252 2250 "or": orset,
2253 2251 "not": notset,
2254 2252 "list": listset,
2255 2253 "keyvalue": keyvaluepair,
2256 2254 "func": func,
2257 2255 "ancestor": ancestorspec,
2258 2256 "parent": parentspec,
2259 2257 "parentpost": p1,
2260 2258 }
2261 2259
2262 2260 def optimize(x, small):
2263 2261 if x is None:
2264 2262 return 0, x
2265 2263
2266 2264 smallbonus = 1
2267 2265 if small:
2268 2266 smallbonus = .5
2269 2267
2270 2268 op = x[0]
2271 2269 if op == 'minus':
2272 2270 return optimize(('and', x[1], ('not', x[2])), small)
2273 2271 elif op == 'only':
2274 2272 return optimize(('func', ('symbol', 'only'),
2275 2273 ('list', x[1], x[2])), small)
2276 2274 elif op == 'onlypost':
2277 2275 return optimize(('func', ('symbol', 'only'), x[1]), small)
2278 2276 elif op == 'dagrangepre':
2279 2277 return optimize(('func', ('symbol', 'ancestors'), x[1]), small)
2280 2278 elif op == 'dagrangepost':
2281 2279 return optimize(('func', ('symbol', 'descendants'), x[1]), small)
2282 2280 elif op == 'rangeall':
2283 2281 return optimize(('range', ('string', '0'), ('string', 'tip')), small)
2284 2282 elif op == 'rangepre':
2285 2283 return optimize(('range', ('string', '0'), x[1]), small)
2286 2284 elif op == 'rangepost':
2287 2285 return optimize(('range', x[1], ('string', 'tip')), small)
2288 2286 elif op == 'negate':
2289 2287 return optimize(('string',
2290 2288 '-' + getstring(x[1], _("can't negate that"))), small)
2291 2289 elif op in 'string symbol negate':
2292 2290 return smallbonus, x # single revisions are small
2293 2291 elif op == 'and':
2294 2292 wa, ta = optimize(x[1], True)
2295 2293 wb, tb = optimize(x[2], True)
2296 2294
2297 2295 # (::x and not ::y)/(not ::y and ::x) have a fast path
2298 2296 def isonly(revs, bases):
2299 2297 return (
2300 2298 revs is not None
2301 2299 and revs[0] == 'func'
2302 2300 and getstring(revs[1], _('not a symbol')) == 'ancestors'
2303 2301 and bases is not None
2304 2302 and bases[0] == 'not'
2305 2303 and bases[1][0] == 'func'
2306 2304 and getstring(bases[1][1], _('not a symbol')) == 'ancestors')
2307 2305
2308 2306 w = min(wa, wb)
2309 2307 if isonly(ta, tb):
2310 2308 return w, ('func', ('symbol', 'only'), ('list', ta[2], tb[1][2]))
2311 2309 if isonly(tb, ta):
2312 2310 return w, ('func', ('symbol', 'only'), ('list', tb[2], ta[1][2]))
2313 2311
2314 2312 if wa > wb:
2315 2313 return w, (op, tb, ta)
2316 2314 return w, (op, ta, tb)
2317 2315 elif op == 'or':
2318 2316 # fast path for machine-generated expression, that is likely to have
2319 2317 # lots of trivial revisions: 'a + b + c()' to '_list(a b) + c()'
2320 2318 ws, ts, ss = [], [], []
2321 2319 def flushss():
2322 2320 if not ss:
2323 2321 return
2324 2322 if len(ss) == 1:
2325 2323 w, t = ss[0]
2326 2324 else:
2327 2325 s = '\0'.join(t[1] for w, t in ss)
2328 2326 y = ('func', ('symbol', '_list'), ('string', s))
2329 2327 w, t = optimize(y, False)
2330 2328 ws.append(w)
2331 2329 ts.append(t)
2332 2330 del ss[:]
2333 2331 for y in x[1:]:
2334 2332 w, t = optimize(y, False)
2335 2333 if t is not None and (t[0] == 'string' or t[0] == 'symbol'):
2336 2334 ss.append((w, t))
2337 2335 continue
2338 2336 flushss()
2339 2337 ws.append(w)
2340 2338 ts.append(t)
2341 2339 flushss()
2342 2340 if len(ts) == 1:
2343 2341 return ws[0], ts[0] # 'or' operation is fully optimized out
2344 2342 # we can't reorder trees by weight because it would change the order.
2345 2343 # ("sort(a + b)" == "sort(b + a)", but "a + b" != "b + a")
2346 2344 # ts = tuple(t for w, t in sorted(zip(ws, ts), key=lambda wt: wt[0]))
2347 2345 return max(ws), (op,) + tuple(ts)
2348 2346 elif op == 'not':
2349 2347 # Optimize not public() to _notpublic() because we have a fast version
2350 2348 if x[1] == ('func', ('symbol', 'public'), None):
2351 2349 newsym = ('func', ('symbol', '_notpublic'), None)
2352 2350 o = optimize(newsym, not small)
2353 2351 return o[0], o[1]
2354 2352 else:
2355 2353 o = optimize(x[1], not small)
2356 2354 return o[0], (op, o[1])
2357 2355 elif op == 'parentpost':
2358 2356 o = optimize(x[1], small)
2359 2357 return o[0], (op, o[1])
2360 2358 elif op == 'group':
2361 2359 return optimize(x[1], small)
2362 2360 elif op in 'dagrange range list parent ancestorspec':
2363 2361 if op == 'parent':
2364 2362 # x^:y means (x^) : y, not x ^ (:y)
2365 2363 post = ('parentpost', x[1])
2366 2364 if x[2][0] == 'dagrangepre':
2367 2365 return optimize(('dagrange', post, x[2][1]), small)
2368 2366 elif x[2][0] == 'rangepre':
2369 2367 return optimize(('range', post, x[2][1]), small)
2370 2368
2371 2369 wa, ta = optimize(x[1], small)
2372 2370 wb, tb = optimize(x[2], small)
2373 2371 return wa + wb, (op, ta, tb)
2374 2372 elif op == 'func':
2375 2373 f = getstring(x[1], _("not a symbol"))
2376 2374 wa, ta = optimize(x[2], small)
2377 2375 if f in ("author branch closed date desc file grep keyword "
2378 2376 "outgoing user"):
2379 2377 w = 10 # slow
2380 2378 elif f in "modifies adds removes":
2381 2379 w = 30 # slower
2382 2380 elif f == "contains":
2383 2381 w = 100 # very slow
2384 2382 elif f == "ancestor":
2385 2383 w = 1 * smallbonus
2386 2384 elif f in "reverse limit first _intlist":
2387 2385 w = 0
2388 2386 elif f in "sort":
2389 2387 w = 10 # assume most sorts look at changelog
2390 2388 else:
2391 2389 w = 1
2392 2390 return w + wa, (op, x[1], ta)
2393 2391 return 1, x
2394 2392
2395 2393 _aliasarg = ('func', ('symbol', '_aliasarg'))
2396 2394 def _getaliasarg(tree):
2397 2395 """If tree matches ('func', ('symbol', '_aliasarg'), ('string', X))
2398 2396 return X, None otherwise.
2399 2397 """
2400 2398 if (len(tree) == 3 and tree[:2] == _aliasarg
2401 2399 and tree[2][0] == 'string'):
2402 2400 return tree[2][1]
2403 2401 return None
2404 2402
2405 2403 def _checkaliasarg(tree, known=None):
2406 2404 """Check tree contains no _aliasarg construct or only ones which
2407 2405 value is in known. Used to avoid alias placeholders injection.
2408 2406 """
2409 2407 if isinstance(tree, tuple):
2410 2408 arg = _getaliasarg(tree)
2411 2409 if arg is not None and (not known or arg not in known):
2412 2410 raise error.UnknownIdentifier('_aliasarg', [])
2413 2411 for t in tree:
2414 2412 _checkaliasarg(t, known)
2415 2413
2416 2414 # the set of valid characters for the initial letter of symbols in
2417 2415 # alias declarations and definitions
2418 2416 _aliassyminitletters = set(c for c in [chr(i) for i in xrange(256)]
2419 2417 if c.isalnum() or c in '._@$' or ord(c) > 127)
2420 2418
2421 2419 def _tokenizealias(program, lookup=None):
2422 2420 """Parse alias declaration/definition into a stream of tokens
2423 2421
2424 2422 This allows symbol names to use also ``$`` as an initial letter
2425 2423 (for backward compatibility), and callers of this function should
2426 2424 examine whether ``$`` is used also for unexpected symbols or not.
2427 2425 """
2428 2426 return tokenize(program, lookup=lookup,
2429 2427 syminitletters=_aliassyminitletters)
2430 2428
2431 2429 def _parsealiasdecl(decl):
2432 2430 """Parse alias declaration ``decl``
2433 2431
2434 2432 This returns ``(name, tree, args, errorstr)`` tuple:
2435 2433
2436 2434 - ``name``: of declared alias (may be ``decl`` itself at error)
2437 2435 - ``tree``: parse result (or ``None`` at error)
2438 2436 - ``args``: list of alias argument names (or None for symbol declaration)
2439 2437 - ``errorstr``: detail about detected error (or None)
2440 2438
2441 2439 >>> _parsealiasdecl('foo')
2442 2440 ('foo', ('symbol', 'foo'), None, None)
2443 2441 >>> _parsealiasdecl('$foo')
2444 2442 ('$foo', None, None, "'$' not for alias arguments")
2445 2443 >>> _parsealiasdecl('foo::bar')
2446 2444 ('foo::bar', None, None, 'invalid format')
2447 2445 >>> _parsealiasdecl('foo bar')
2448 2446 ('foo bar', None, None, 'at 4: invalid token')
2449 2447 >>> _parsealiasdecl('foo()')
2450 2448 ('foo', ('func', ('symbol', 'foo')), [], None)
2451 2449 >>> _parsealiasdecl('$foo()')
2452 2450 ('$foo()', None, None, "'$' not for alias arguments")
2453 2451 >>> _parsealiasdecl('foo($1, $2)')
2454 2452 ('foo', ('func', ('symbol', 'foo')), ['$1', '$2'], None)
2455 2453 >>> _parsealiasdecl('foo(bar_bar, baz.baz)')
2456 2454 ('foo', ('func', ('symbol', 'foo')), ['bar_bar', 'baz.baz'], None)
2457 2455 >>> _parsealiasdecl('foo($1, $2, nested($1, $2))')
2458 2456 ('foo($1, $2, nested($1, $2))', None, None, 'invalid argument list')
2459 2457 >>> _parsealiasdecl('foo(bar($1, $2))')
2460 2458 ('foo(bar($1, $2))', None, None, 'invalid argument list')
2461 2459 >>> _parsealiasdecl('foo("string")')
2462 2460 ('foo("string")', None, None, 'invalid argument list')
2463 2461 >>> _parsealiasdecl('foo($1, $2')
2464 2462 ('foo($1, $2', None, None, 'at 10: unexpected token: end')
2465 2463 >>> _parsealiasdecl('foo("string')
2466 2464 ('foo("string', None, None, 'at 5: unterminated string')
2467 2465 >>> _parsealiasdecl('foo($1, $2, $1)')
2468 2466 ('foo', None, None, 'argument names collide with each other')
2469 2467 """
2470 2468 p = parser.parser(elements)
2471 2469 try:
2472 2470 tree, pos = p.parse(_tokenizealias(decl))
2473 2471 if (pos != len(decl)):
2474 2472 raise error.ParseError(_('invalid token'), pos)
2475 2473
2476 2474 if isvalidsymbol(tree):
2477 2475 # "name = ...." style
2478 2476 name = getsymbol(tree)
2479 2477 if name.startswith('$'):
2480 2478 return (decl, None, None, _("'$' not for alias arguments"))
2481 2479 return (name, ('symbol', name), None, None)
2482 2480
2483 2481 if isvalidfunc(tree):
2484 2482 # "name(arg, ....) = ...." style
2485 2483 name = getfuncname(tree)
2486 2484 if name.startswith('$'):
2487 2485 return (decl, None, None, _("'$' not for alias arguments"))
2488 2486 args = []
2489 2487 for arg in getfuncargs(tree):
2490 2488 if not isvalidsymbol(arg):
2491 2489 return (decl, None, None, _("invalid argument list"))
2492 2490 args.append(getsymbol(arg))
2493 2491 if len(args) != len(set(args)):
2494 2492 return (name, None, None,
2495 2493 _("argument names collide with each other"))
2496 2494 return (name, ('func', ('symbol', name)), args, None)
2497 2495
2498 2496 return (decl, None, None, _("invalid format"))
2499 2497 except error.ParseError as inst:
2500 2498 return (decl, None, None, parseerrordetail(inst))
2501 2499
2502 2500 def _parsealiasdefn(defn, args):
2503 2501 """Parse alias definition ``defn``
2504 2502
2505 2503 This function also replaces alias argument references in the
2506 2504 specified definition by ``_aliasarg(ARGNAME)``.
2507 2505
2508 2506 ``args`` is a list of alias argument names, or None if the alias
2509 2507 is declared as a symbol.
2510 2508
2511 2509 This returns "tree" as parsing result.
2512 2510
2513 2511 >>> args = ['$1', '$2', 'foo']
2514 2512 >>> print prettyformat(_parsealiasdefn('$1 or foo', args))
2515 2513 (or
2516 2514 (func
2517 2515 ('symbol', '_aliasarg')
2518 2516 ('string', '$1'))
2519 2517 (func
2520 2518 ('symbol', '_aliasarg')
2521 2519 ('string', 'foo')))
2522 2520 >>> try:
2523 2521 ... _parsealiasdefn('$1 or $bar', args)
2524 2522 ... except error.ParseError, inst:
2525 2523 ... print parseerrordetail(inst)
2526 2524 at 6: '$' not for alias arguments
2527 2525 >>> args = ['$1', '$10', 'foo']
2528 2526 >>> print prettyformat(_parsealiasdefn('$10 or foobar', args))
2529 2527 (or
2530 2528 (func
2531 2529 ('symbol', '_aliasarg')
2532 2530 ('string', '$10'))
2533 2531 ('symbol', 'foobar'))
2534 2532 >>> print prettyformat(_parsealiasdefn('"$1" or "foo"', args))
2535 2533 (or
2536 2534 ('string', '$1')
2537 2535 ('string', 'foo'))
2538 2536 """
2539 2537 def tokenizedefn(program, lookup=None):
2540 2538 if args:
2541 2539 argset = set(args)
2542 2540 else:
2543 2541 argset = set()
2544 2542
2545 2543 for t, value, pos in _tokenizealias(program, lookup=lookup):
2546 2544 if t == 'symbol':
2547 2545 if value in argset:
2548 2546 # emulate tokenization of "_aliasarg('ARGNAME')":
2549 2547 # "_aliasarg()" is an unknown symbol only used separate
2550 2548 # alias argument placeholders from regular strings.
2551 2549 yield ('symbol', '_aliasarg', pos)
2552 2550 yield ('(', None, pos)
2553 2551 yield ('string', value, pos)
2554 2552 yield (')', None, pos)
2555 2553 continue
2556 2554 elif value.startswith('$'):
2557 2555 raise error.ParseError(_("'$' not for alias arguments"),
2558 2556 pos)
2559 2557 yield (t, value, pos)
2560 2558
2561 2559 p = parser.parser(elements)
2562 2560 tree, pos = p.parse(tokenizedefn(defn))
2563 2561 if pos != len(defn):
2564 2562 raise error.ParseError(_('invalid token'), pos)
2565 2563 return parser.simplifyinfixops(tree, ('or',))
2566 2564
2567 2565 class revsetalias(object):
2568 2566 # whether own `error` information is already shown or not.
2569 2567 # this avoids showing same warning multiple times at each `findaliases`.
2570 2568 warned = False
2571 2569
2572 2570 def __init__(self, name, value):
2573 2571 '''Aliases like:
2574 2572
2575 2573 h = heads(default)
2576 2574 b($1) = ancestors($1) - ancestors(default)
2577 2575 '''
2578 2576 self.name, self.tree, self.args, self.error = _parsealiasdecl(name)
2579 2577 if self.error:
2580 2578 self.error = _('failed to parse the declaration of revset alias'
2581 2579 ' "%s": %s') % (self.name, self.error)
2582 2580 return
2583 2581
2584 2582 try:
2585 2583 self.replacement = _parsealiasdefn(value, self.args)
2586 2584 # Check for placeholder injection
2587 2585 _checkaliasarg(self.replacement, self.args)
2588 2586 except error.ParseError as inst:
2589 2587 self.error = _('failed to parse the definition of revset alias'
2590 2588 ' "%s": %s') % (self.name, parseerrordetail(inst))
2591 2589
2592 2590 def _getalias(aliases, tree):
2593 2591 """If tree looks like an unexpanded alias, return it. Return None
2594 2592 otherwise.
2595 2593 """
2596 2594 if isinstance(tree, tuple) and tree:
2597 2595 if tree[0] == 'symbol' and len(tree) == 2:
2598 2596 name = tree[1]
2599 2597 alias = aliases.get(name)
2600 2598 if alias and alias.args is None and alias.tree == tree:
2601 2599 return alias
2602 2600 if tree[0] == 'func' and len(tree) > 1:
2603 2601 if tree[1][0] == 'symbol' and len(tree[1]) == 2:
2604 2602 name = tree[1][1]
2605 2603 alias = aliases.get(name)
2606 2604 if alias and alias.args is not None and alias.tree == tree[:2]:
2607 2605 return alias
2608 2606 return None
2609 2607
2610 2608 def _expandargs(tree, args):
2611 2609 """Replace _aliasarg instances with the substitution value of the
2612 2610 same name in args, recursively.
2613 2611 """
2614 2612 if not tree or not isinstance(tree, tuple):
2615 2613 return tree
2616 2614 arg = _getaliasarg(tree)
2617 2615 if arg is not None:
2618 2616 return args[arg]
2619 2617 return tuple(_expandargs(t, args) for t in tree)
2620 2618
2621 2619 def _expandaliases(aliases, tree, expanding, cache):
2622 2620 """Expand aliases in tree, recursively.
2623 2621
2624 2622 'aliases' is a dictionary mapping user defined aliases to
2625 2623 revsetalias objects.
2626 2624 """
2627 2625 if not isinstance(tree, tuple):
2628 2626 # Do not expand raw strings
2629 2627 return tree
2630 2628 alias = _getalias(aliases, tree)
2631 2629 if alias is not None:
2632 2630 if alias.error:
2633 2631 raise util.Abort(alias.error)
2634 2632 if alias in expanding:
2635 2633 raise error.ParseError(_('infinite expansion of revset alias "%s" '
2636 2634 'detected') % alias.name)
2637 2635 expanding.append(alias)
2638 2636 if alias.name not in cache:
2639 2637 cache[alias.name] = _expandaliases(aliases, alias.replacement,
2640 2638 expanding, cache)
2641 2639 result = cache[alias.name]
2642 2640 expanding.pop()
2643 2641 if alias.args is not None:
2644 2642 l = getlist(tree[2])
2645 2643 if len(l) != len(alias.args):
2646 2644 raise error.ParseError(
2647 2645 _('invalid number of arguments: %s') % len(l))
2648 2646 l = [_expandaliases(aliases, a, [], cache) for a in l]
2649 2647 result = _expandargs(result, dict(zip(alias.args, l)))
2650 2648 else:
2651 2649 result = tuple(_expandaliases(aliases, t, expanding, cache)
2652 2650 for t in tree)
2653 2651 return result
2654 2652
2655 2653 def findaliases(ui, tree, showwarning=None):
2656 2654 _checkaliasarg(tree)
2657 2655 aliases = {}
2658 2656 for k, v in ui.configitems('revsetalias'):
2659 2657 alias = revsetalias(k, v)
2660 2658 aliases[alias.name] = alias
2661 2659 tree = _expandaliases(aliases, tree, [], {})
2662 2660 if showwarning:
2663 2661 # warn about problematic (but not referred) aliases
2664 2662 for name, alias in sorted(aliases.iteritems()):
2665 2663 if alias.error and not alias.warned:
2666 2664 showwarning(_('warning: %s\n') % (alias.error))
2667 2665 alias.warned = True
2668 2666 return tree
2669 2667
2670 2668 def foldconcat(tree):
2671 2669 """Fold elements to be concatenated by `##`
2672 2670 """
2673 2671 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2674 2672 return tree
2675 2673 if tree[0] == '_concat':
2676 2674 pending = [tree]
2677 2675 l = []
2678 2676 while pending:
2679 2677 e = pending.pop()
2680 2678 if e[0] == '_concat':
2681 2679 pending.extend(reversed(e[1:]))
2682 2680 elif e[0] in ('string', 'symbol'):
2683 2681 l.append(e[1])
2684 2682 else:
2685 2683 msg = _("\"##\" can't concatenate \"%s\" element") % (e[0])
2686 2684 raise error.ParseError(msg)
2687 2685 return ('string', ''.join(l))
2688 2686 else:
2689 2687 return tuple(foldconcat(t) for t in tree)
2690 2688
2691 2689 def parse(spec, lookup=None):
2692 2690 p = parser.parser(elements)
2693 2691 tree, pos = p.parse(tokenize(spec, lookup=lookup))
2694 2692 if pos != len(spec):
2695 2693 raise error.ParseError(_("invalid token"), pos)
2696 2694 return parser.simplifyinfixops(tree, ('or',))
2697 2695
2698 2696 def posttreebuilthook(tree, repo):
2699 2697 # hook for extensions to execute code on the optimized tree
2700 2698 pass
2701 2699
2702 2700 def match(ui, spec, repo=None):
2703 2701 if not spec:
2704 2702 raise error.ParseError(_("empty query"))
2705 2703 lookup = None
2706 2704 if repo:
2707 2705 lookup = repo.__contains__
2708 2706 tree = parse(spec, lookup)
2709 2707 return _makematcher(ui, tree, repo)
2710 2708
2711 2709 def matchany(ui, specs, repo=None):
2712 2710 """Create a matcher that will include any revisions matching one of the
2713 2711 given specs"""
2714 2712 if not specs:
2715 2713 def mfunc(repo, subset=None):
2716 2714 return baseset()
2717 2715 return mfunc
2718 2716 if not all(specs):
2719 2717 raise error.ParseError(_("empty query"))
2720 2718 lookup = None
2721 2719 if repo:
2722 2720 lookup = repo.__contains__
2723 2721 if len(specs) == 1:
2724 2722 tree = parse(specs[0], lookup)
2725 2723 else:
2726 2724 tree = ('or',) + tuple(parse(s, lookup) for s in specs)
2727 2725 return _makematcher(ui, tree, repo)
2728 2726
2729 2727 def _makematcher(ui, tree, repo):
2730 2728 if ui:
2731 2729 tree = findaliases(ui, tree, showwarning=ui.warn)
2732 2730 tree = foldconcat(tree)
2733 2731 weight, tree = optimize(tree, True)
2734 2732 posttreebuilthook(tree, repo)
2735 2733 def mfunc(repo, subset=None):
2736 2734 if subset is None:
2737 2735 subset = fullreposet(repo)
2738 2736 if util.safehasattr(subset, 'isascending'):
2739 2737 result = getset(repo, subset, tree)
2740 2738 else:
2741 2739 result = getset(repo, baseset(subset), tree)
2742 2740 return result
2743 2741 return mfunc
2744 2742
2745 2743 def formatspec(expr, *args):
2746 2744 '''
2747 2745 This is a convenience function for using revsets internally, and
2748 2746 escapes arguments appropriately. Aliases are intentionally ignored
2749 2747 so that intended expression behavior isn't accidentally subverted.
2750 2748
2751 2749 Supported arguments:
2752 2750
2753 2751 %r = revset expression, parenthesized
2754 2752 %d = int(arg), no quoting
2755 2753 %s = string(arg), escaped and single-quoted
2756 2754 %b = arg.branch(), escaped and single-quoted
2757 2755 %n = hex(arg), single-quoted
2758 2756 %% = a literal '%'
2759 2757
2760 2758 Prefixing the type with 'l' specifies a parenthesized list of that type.
2761 2759
2762 2760 >>> formatspec('%r:: and %lr', '10 or 11', ("this()", "that()"))
2763 2761 '(10 or 11):: and ((this()) or (that()))'
2764 2762 >>> formatspec('%d:: and not %d::', 10, 20)
2765 2763 '10:: and not 20::'
2766 2764 >>> formatspec('%ld or %ld', [], [1])
2767 2765 "_list('') or 1"
2768 2766 >>> formatspec('keyword(%s)', 'foo\\xe9')
2769 2767 "keyword('foo\\\\xe9')"
2770 2768 >>> b = lambda: 'default'
2771 2769 >>> b.branch = b
2772 2770 >>> formatspec('branch(%b)', b)
2773 2771 "branch('default')"
2774 2772 >>> formatspec('root(%ls)', ['a', 'b', 'c', 'd'])
2775 2773 "root(_list('a\\x00b\\x00c\\x00d'))"
2776 2774 '''
2777 2775
2778 2776 def quote(s):
2779 2777 return repr(str(s))
2780 2778
2781 2779 def argtype(c, arg):
2782 2780 if c == 'd':
2783 2781 return str(int(arg))
2784 2782 elif c == 's':
2785 2783 return quote(arg)
2786 2784 elif c == 'r':
2787 2785 parse(arg) # make sure syntax errors are confined
2788 2786 return '(%s)' % arg
2789 2787 elif c == 'n':
2790 2788 return quote(node.hex(arg))
2791 2789 elif c == 'b':
2792 2790 return quote(arg.branch())
2793 2791
2794 2792 def listexp(s, t):
2795 2793 l = len(s)
2796 2794 if l == 0:
2797 2795 return "_list('')"
2798 2796 elif l == 1:
2799 2797 return argtype(t, s[0])
2800 2798 elif t == 'd':
2801 2799 return "_intlist('%s')" % "\0".join(str(int(a)) for a in s)
2802 2800 elif t == 's':
2803 2801 return "_list('%s')" % "\0".join(s)
2804 2802 elif t == 'n':
2805 2803 return "_hexlist('%s')" % "\0".join(node.hex(a) for a in s)
2806 2804 elif t == 'b':
2807 2805 return "_list('%s')" % "\0".join(a.branch() for a in s)
2808 2806
2809 2807 m = l // 2
2810 2808 return '(%s or %s)' % (listexp(s[:m], t), listexp(s[m:], t))
2811 2809
2812 2810 ret = ''
2813 2811 pos = 0
2814 2812 arg = 0
2815 2813 while pos < len(expr):
2816 2814 c = expr[pos]
2817 2815 if c == '%':
2818 2816 pos += 1
2819 2817 d = expr[pos]
2820 2818 if d == '%':
2821 2819 ret += d
2822 2820 elif d in 'dsnbr':
2823 2821 ret += argtype(d, args[arg])
2824 2822 arg += 1
2825 2823 elif d == 'l':
2826 2824 # a list of some type
2827 2825 pos += 1
2828 2826 d = expr[pos]
2829 2827 ret += listexp(list(args[arg]), d)
2830 2828 arg += 1
2831 2829 else:
2832 2830 raise util.Abort('unexpected revspec format character %s' % d)
2833 2831 else:
2834 2832 ret += c
2835 2833 pos += 1
2836 2834
2837 2835 return ret
2838 2836
2839 2837 def prettyformat(tree):
2840 2838 return parser.prettyformat(tree, ('string', 'symbol'))
2841 2839
2842 2840 def depth(tree):
2843 2841 if isinstance(tree, tuple):
2844 2842 return max(map(depth, tree)) + 1
2845 2843 else:
2846 2844 return 0
2847 2845
2848 2846 def funcsused(tree):
2849 2847 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2850 2848 return set()
2851 2849 else:
2852 2850 funcs = set()
2853 2851 for s in tree[1:]:
2854 2852 funcs |= funcsused(s)
2855 2853 if tree[0] == 'func':
2856 2854 funcs.add(tree[1][1])
2857 2855 return funcs
2858 2856
2859 2857 class abstractsmartset(object):
2860 2858
2861 2859 def __nonzero__(self):
2862 2860 """True if the smartset is not empty"""
2863 2861 raise NotImplementedError()
2864 2862
2865 2863 def __contains__(self, rev):
2866 2864 """provide fast membership testing"""
2867 2865 raise NotImplementedError()
2868 2866
2869 2867 def __iter__(self):
2870 2868 """iterate the set in the order it is supposed to be iterated"""
2871 2869 raise NotImplementedError()
2872 2870
2873 2871 # Attributes containing a function to perform a fast iteration in a given
2874 2872 # direction. A smartset can have none, one, or both defined.
2875 2873 #
2876 2874 # Default value is None instead of a function returning None to avoid
2877 2875 # initializing an iterator just for testing if a fast method exists.
2878 2876 fastasc = None
2879 2877 fastdesc = None
2880 2878
2881 2879 def isascending(self):
2882 2880 """True if the set will iterate in ascending order"""
2883 2881 raise NotImplementedError()
2884 2882
2885 2883 def isdescending(self):
2886 2884 """True if the set will iterate in descending order"""
2887 2885 raise NotImplementedError()
2888 2886
2889 2887 def min(self):
2890 2888 """return the minimum element in the set"""
2891 2889 if self.fastasc is not None:
2892 2890 for r in self.fastasc():
2893 2891 return r
2894 2892 raise ValueError('arg is an empty sequence')
2895 2893 return min(self)
2896 2894
2897 2895 def max(self):
2898 2896 """return the maximum element in the set"""
2899 2897 if self.fastdesc is not None:
2900 2898 for r in self.fastdesc():
2901 2899 return r
2902 2900 raise ValueError('arg is an empty sequence')
2903 2901 return max(self)
2904 2902
2905 2903 def first(self):
2906 2904 """return the first element in the set (user iteration perspective)
2907 2905
2908 2906 Return None if the set is empty"""
2909 2907 raise NotImplementedError()
2910 2908
2911 2909 def last(self):
2912 2910 """return the last element in the set (user iteration perspective)
2913 2911
2914 2912 Return None if the set is empty"""
2915 2913 raise NotImplementedError()
2916 2914
2917 2915 def __len__(self):
2918 2916 """return the length of the smartsets
2919 2917
2920 2918 This can be expensive on smartset that could be lazy otherwise."""
2921 2919 raise NotImplementedError()
2922 2920
2923 2921 def reverse(self):
2924 2922 """reverse the expected iteration order"""
2925 2923 raise NotImplementedError()
2926 2924
2927 2925 def sort(self, reverse=True):
2928 2926 """get the set to iterate in an ascending or descending order"""
2929 2927 raise NotImplementedError()
2930 2928
2931 2929 def __and__(self, other):
2932 2930 """Returns a new object with the intersection of the two collections.
2933 2931
2934 2932 This is part of the mandatory API for smartset."""
2935 2933 if isinstance(other, fullreposet):
2936 2934 return self
2937 2935 return self.filter(other.__contains__, cache=False)
2938 2936
2939 2937 def __add__(self, other):
2940 2938 """Returns a new object with the union of the two collections.
2941 2939
2942 2940 This is part of the mandatory API for smartset."""
2943 2941 return addset(self, other)
2944 2942
2945 2943 def __sub__(self, other):
2946 2944 """Returns a new object with the substraction of the two collections.
2947 2945
2948 2946 This is part of the mandatory API for smartset."""
2949 2947 c = other.__contains__
2950 2948 return self.filter(lambda r: not c(r), cache=False)
2951 2949
2952 2950 def filter(self, condition, cache=True):
2953 2951 """Returns this smartset filtered by condition as a new smartset.
2954 2952
2955 2953 `condition` is a callable which takes a revision number and returns a
2956 2954 boolean.
2957 2955
2958 2956 This is part of the mandatory API for smartset."""
2959 2957 # builtin cannot be cached. but do not needs to
2960 2958 if cache and util.safehasattr(condition, 'func_code'):
2961 2959 condition = util.cachefunc(condition)
2962 2960 return filteredset(self, condition)
2963 2961
2964 2962 class baseset(abstractsmartset):
2965 2963 """Basic data structure that represents a revset and contains the basic
2966 2964 operation that it should be able to perform.
2967 2965
2968 2966 Every method in this class should be implemented by any smartset class.
2969 2967 """
2970 2968 def __init__(self, data=()):
2971 2969 if not isinstance(data, list):
2972 2970 if isinstance(data, set):
2973 2971 self._set = data
2974 2972 data = list(data)
2975 2973 self._list = data
2976 2974 self._ascending = None
2977 2975
2978 2976 @util.propertycache
2979 2977 def _set(self):
2980 2978 return set(self._list)
2981 2979
2982 2980 @util.propertycache
2983 2981 def _asclist(self):
2984 2982 asclist = self._list[:]
2985 2983 asclist.sort()
2986 2984 return asclist
2987 2985
2988 2986 def __iter__(self):
2989 2987 if self._ascending is None:
2990 2988 return iter(self._list)
2991 2989 elif self._ascending:
2992 2990 return iter(self._asclist)
2993 2991 else:
2994 2992 return reversed(self._asclist)
2995 2993
2996 2994 def fastasc(self):
2997 2995 return iter(self._asclist)
2998 2996
2999 2997 def fastdesc(self):
3000 2998 return reversed(self._asclist)
3001 2999
3002 3000 @util.propertycache
3003 3001 def __contains__(self):
3004 3002 return self._set.__contains__
3005 3003
3006 3004 def __nonzero__(self):
3007 3005 return bool(self._list)
3008 3006
3009 3007 def sort(self, reverse=False):
3010 3008 self._ascending = not bool(reverse)
3011 3009
3012 3010 def reverse(self):
3013 3011 if self._ascending is None:
3014 3012 self._list.reverse()
3015 3013 else:
3016 3014 self._ascending = not self._ascending
3017 3015
3018 3016 def __len__(self):
3019 3017 return len(self._list)
3020 3018
3021 3019 def isascending(self):
3022 3020 """Returns True if the collection is ascending order, False if not.
3023 3021
3024 3022 This is part of the mandatory API for smartset."""
3025 3023 if len(self) <= 1:
3026 3024 return True
3027 3025 return self._ascending is not None and self._ascending
3028 3026
3029 3027 def isdescending(self):
3030 3028 """Returns True if the collection is descending order, False if not.
3031 3029
3032 3030 This is part of the mandatory API for smartset."""
3033 3031 if len(self) <= 1:
3034 3032 return True
3035 3033 return self._ascending is not None and not self._ascending
3036 3034
3037 3035 def first(self):
3038 3036 if self:
3039 3037 if self._ascending is None:
3040 3038 return self._list[0]
3041 3039 elif self._ascending:
3042 3040 return self._asclist[0]
3043 3041 else:
3044 3042 return self._asclist[-1]
3045 3043 return None
3046 3044
3047 3045 def last(self):
3048 3046 if self:
3049 3047 if self._ascending is None:
3050 3048 return self._list[-1]
3051 3049 elif self._ascending:
3052 3050 return self._asclist[-1]
3053 3051 else:
3054 3052 return self._asclist[0]
3055 3053 return None
3056 3054
3057 3055 def __repr__(self):
3058 3056 d = {None: '', False: '-', True: '+'}[self._ascending]
3059 3057 return '<%s%s %r>' % (type(self).__name__, d, self._list)
3060 3058
3061 3059 class filteredset(abstractsmartset):
3062 3060 """Duck type for baseset class which iterates lazily over the revisions in
3063 3061 the subset and contains a function which tests for membership in the
3064 3062 revset
3065 3063 """
3066 3064 def __init__(self, subset, condition=lambda x: True):
3067 3065 """
3068 3066 condition: a function that decide whether a revision in the subset
3069 3067 belongs to the revset or not.
3070 3068 """
3071 3069 self._subset = subset
3072 3070 self._condition = condition
3073 3071 self._cache = {}
3074 3072
3075 3073 def __contains__(self, x):
3076 3074 c = self._cache
3077 3075 if x not in c:
3078 3076 v = c[x] = x in self._subset and self._condition(x)
3079 3077 return v
3080 3078 return c[x]
3081 3079
3082 3080 def __iter__(self):
3083 3081 return self._iterfilter(self._subset)
3084 3082
3085 3083 def _iterfilter(self, it):
3086 3084 cond = self._condition
3087 3085 for x in it:
3088 3086 if cond(x):
3089 3087 yield x
3090 3088
3091 3089 @property
3092 3090 def fastasc(self):
3093 3091 it = self._subset.fastasc
3094 3092 if it is None:
3095 3093 return None
3096 3094 return lambda: self._iterfilter(it())
3097 3095
3098 3096 @property
3099 3097 def fastdesc(self):
3100 3098 it = self._subset.fastdesc
3101 3099 if it is None:
3102 3100 return None
3103 3101 return lambda: self._iterfilter(it())
3104 3102
3105 3103 def __nonzero__(self):
3106 3104 for r in self:
3107 3105 return True
3108 3106 return False
3109 3107
3110 3108 def __len__(self):
3111 3109 # Basic implementation to be changed in future patches.
3112 3110 l = baseset([r for r in self])
3113 3111 return len(l)
3114 3112
3115 3113 def sort(self, reverse=False):
3116 3114 self._subset.sort(reverse=reverse)
3117 3115
3118 3116 def reverse(self):
3119 3117 self._subset.reverse()
3120 3118
3121 3119 def isascending(self):
3122 3120 return self._subset.isascending()
3123 3121
3124 3122 def isdescending(self):
3125 3123 return self._subset.isdescending()
3126 3124
3127 3125 def first(self):
3128 3126 for x in self:
3129 3127 return x
3130 3128 return None
3131 3129
3132 3130 def last(self):
3133 3131 it = None
3134 3132 if self.isascending():
3135 3133 it = self.fastdesc
3136 3134 elif self.isdescending():
3137 3135 it = self.fastasc
3138 3136 if it is not None:
3139 3137 for x in it():
3140 3138 return x
3141 3139 return None #empty case
3142 3140 else:
3143 3141 x = None
3144 3142 for x in self:
3145 3143 pass
3146 3144 return x
3147 3145
3148 3146 def __repr__(self):
3149 3147 return '<%s %r>' % (type(self).__name__, self._subset)
3150 3148
3151 3149 def _iterordered(ascending, iter1, iter2):
3152 3150 """produce an ordered iteration from two iterators with the same order
3153 3151
3154 3152 The ascending is used to indicated the iteration direction.
3155 3153 """
3156 3154 choice = max
3157 3155 if ascending:
3158 3156 choice = min
3159 3157
3160 3158 val1 = None
3161 3159 val2 = None
3162 3160 try:
3163 3161 # Consume both iterators in an ordered way until one is empty
3164 3162 while True:
3165 3163 if val1 is None:
3166 3164 val1 = iter1.next()
3167 3165 if val2 is None:
3168 3166 val2 = iter2.next()
3169 3167 next = choice(val1, val2)
3170 3168 yield next
3171 3169 if val1 == next:
3172 3170 val1 = None
3173 3171 if val2 == next:
3174 3172 val2 = None
3175 3173 except StopIteration:
3176 3174 # Flush any remaining values and consume the other one
3177 3175 it = iter2
3178 3176 if val1 is not None:
3179 3177 yield val1
3180 3178 it = iter1
3181 3179 elif val2 is not None:
3182 3180 # might have been equality and both are empty
3183 3181 yield val2
3184 3182 for val in it:
3185 3183 yield val
3186 3184
3187 3185 class addset(abstractsmartset):
3188 3186 """Represent the addition of two sets
3189 3187
3190 3188 Wrapper structure for lazily adding two structures without losing much
3191 3189 performance on the __contains__ method
3192 3190
3193 3191 If the ascending attribute is set, that means the two structures are
3194 3192 ordered in either an ascending or descending way. Therefore, we can add
3195 3193 them maintaining the order by iterating over both at the same time
3196 3194
3197 3195 >>> xs = baseset([0, 3, 2])
3198 3196 >>> ys = baseset([5, 2, 4])
3199 3197
3200 3198 >>> rs = addset(xs, ys)
3201 3199 >>> bool(rs), 0 in rs, 1 in rs, 5 in rs, rs.first(), rs.last()
3202 3200 (True, True, False, True, 0, 4)
3203 3201 >>> rs = addset(xs, baseset([]))
3204 3202 >>> bool(rs), 0 in rs, 1 in rs, rs.first(), rs.last()
3205 3203 (True, True, False, 0, 2)
3206 3204 >>> rs = addset(baseset([]), baseset([]))
3207 3205 >>> bool(rs), 0 in rs, rs.first(), rs.last()
3208 3206 (False, False, None, None)
3209 3207
3210 3208 iterate unsorted:
3211 3209 >>> rs = addset(xs, ys)
3212 3210 >>> [x for x in rs] # without _genlist
3213 3211 [0, 3, 2, 5, 4]
3214 3212 >>> assert not rs._genlist
3215 3213 >>> len(rs)
3216 3214 5
3217 3215 >>> [x for x in rs] # with _genlist
3218 3216 [0, 3, 2, 5, 4]
3219 3217 >>> assert rs._genlist
3220 3218
3221 3219 iterate ascending:
3222 3220 >>> rs = addset(xs, ys, ascending=True)
3223 3221 >>> [x for x in rs], [x for x in rs.fastasc()] # without _asclist
3224 3222 ([0, 2, 3, 4, 5], [0, 2, 3, 4, 5])
3225 3223 >>> assert not rs._asclist
3226 3224 >>> len(rs)
3227 3225 5
3228 3226 >>> [x for x in rs], [x for x in rs.fastasc()]
3229 3227 ([0, 2, 3, 4, 5], [0, 2, 3, 4, 5])
3230 3228 >>> assert rs._asclist
3231 3229
3232 3230 iterate descending:
3233 3231 >>> rs = addset(xs, ys, ascending=False)
3234 3232 >>> [x for x in rs], [x for x in rs.fastdesc()] # without _asclist
3235 3233 ([5, 4, 3, 2, 0], [5, 4, 3, 2, 0])
3236 3234 >>> assert not rs._asclist
3237 3235 >>> len(rs)
3238 3236 5
3239 3237 >>> [x for x in rs], [x for x in rs.fastdesc()]
3240 3238 ([5, 4, 3, 2, 0], [5, 4, 3, 2, 0])
3241 3239 >>> assert rs._asclist
3242 3240
3243 3241 iterate ascending without fastasc:
3244 3242 >>> rs = addset(xs, generatorset(ys), ascending=True)
3245 3243 >>> assert rs.fastasc is None
3246 3244 >>> [x for x in rs]
3247 3245 [0, 2, 3, 4, 5]
3248 3246
3249 3247 iterate descending without fastdesc:
3250 3248 >>> rs = addset(generatorset(xs), ys, ascending=False)
3251 3249 >>> assert rs.fastdesc is None
3252 3250 >>> [x for x in rs]
3253 3251 [5, 4, 3, 2, 0]
3254 3252 """
3255 3253 def __init__(self, revs1, revs2, ascending=None):
3256 3254 self._r1 = revs1
3257 3255 self._r2 = revs2
3258 3256 self._iter = None
3259 3257 self._ascending = ascending
3260 3258 self._genlist = None
3261 3259 self._asclist = None
3262 3260
3263 3261 def __len__(self):
3264 3262 return len(self._list)
3265 3263
3266 3264 def __nonzero__(self):
3267 3265 return bool(self._r1) or bool(self._r2)
3268 3266
3269 3267 @util.propertycache
3270 3268 def _list(self):
3271 3269 if not self._genlist:
3272 3270 self._genlist = baseset(iter(self))
3273 3271 return self._genlist
3274 3272
3275 3273 def __iter__(self):
3276 3274 """Iterate over both collections without repeating elements
3277 3275
3278 3276 If the ascending attribute is not set, iterate over the first one and
3279 3277 then over the second one checking for membership on the first one so we
3280 3278 dont yield any duplicates.
3281 3279
3282 3280 If the ascending attribute is set, iterate over both collections at the
3283 3281 same time, yielding only one value at a time in the given order.
3284 3282 """
3285 3283 if self._ascending is None:
3286 3284 if self._genlist:
3287 3285 return iter(self._genlist)
3288 3286 def arbitraryordergen():
3289 3287 for r in self._r1:
3290 3288 yield r
3291 3289 inr1 = self._r1.__contains__
3292 3290 for r in self._r2:
3293 3291 if not inr1(r):
3294 3292 yield r
3295 3293 return arbitraryordergen()
3296 3294 # try to use our own fast iterator if it exists
3297 3295 self._trysetasclist()
3298 3296 if self._ascending:
3299 3297 attr = 'fastasc'
3300 3298 else:
3301 3299 attr = 'fastdesc'
3302 3300 it = getattr(self, attr)
3303 3301 if it is not None:
3304 3302 return it()
3305 3303 # maybe half of the component supports fast
3306 3304 # get iterator for _r1
3307 3305 iter1 = getattr(self._r1, attr)
3308 3306 if iter1 is None:
3309 3307 # let's avoid side effect (not sure it matters)
3310 3308 iter1 = iter(sorted(self._r1, reverse=not self._ascending))
3311 3309 else:
3312 3310 iter1 = iter1()
3313 3311 # get iterator for _r2
3314 3312 iter2 = getattr(self._r2, attr)
3315 3313 if iter2 is None:
3316 3314 # let's avoid side effect (not sure it matters)
3317 3315 iter2 = iter(sorted(self._r2, reverse=not self._ascending))
3318 3316 else:
3319 3317 iter2 = iter2()
3320 3318 return _iterordered(self._ascending, iter1, iter2)
3321 3319
3322 3320 def _trysetasclist(self):
3323 3321 """populate the _asclist attribute if possible and necessary"""
3324 3322 if self._genlist is not None and self._asclist is None:
3325 3323 self._asclist = sorted(self._genlist)
3326 3324
3327 3325 @property
3328 3326 def fastasc(self):
3329 3327 self._trysetasclist()
3330 3328 if self._asclist is not None:
3331 3329 return self._asclist.__iter__
3332 3330 iter1 = self._r1.fastasc
3333 3331 iter2 = self._r2.fastasc
3334 3332 if None in (iter1, iter2):
3335 3333 return None
3336 3334 return lambda: _iterordered(True, iter1(), iter2())
3337 3335
3338 3336 @property
3339 3337 def fastdesc(self):
3340 3338 self._trysetasclist()
3341 3339 if self._asclist is not None:
3342 3340 return self._asclist.__reversed__
3343 3341 iter1 = self._r1.fastdesc
3344 3342 iter2 = self._r2.fastdesc
3345 3343 if None in (iter1, iter2):
3346 3344 return None
3347 3345 return lambda: _iterordered(False, iter1(), iter2())
3348 3346
3349 3347 def __contains__(self, x):
3350 3348 return x in self._r1 or x in self._r2
3351 3349
3352 3350 def sort(self, reverse=False):
3353 3351 """Sort the added set
3354 3352
3355 3353 For this we use the cached list with all the generated values and if we
3356 3354 know they are ascending or descending we can sort them in a smart way.
3357 3355 """
3358 3356 self._ascending = not reverse
3359 3357
3360 3358 def isascending(self):
3361 3359 return self._ascending is not None and self._ascending
3362 3360
3363 3361 def isdescending(self):
3364 3362 return self._ascending is not None and not self._ascending
3365 3363
3366 3364 def reverse(self):
3367 3365 if self._ascending is None:
3368 3366 self._list.reverse()
3369 3367 else:
3370 3368 self._ascending = not self._ascending
3371 3369
3372 3370 def first(self):
3373 3371 for x in self:
3374 3372 return x
3375 3373 return None
3376 3374
3377 3375 def last(self):
3378 3376 self.reverse()
3379 3377 val = self.first()
3380 3378 self.reverse()
3381 3379 return val
3382 3380
3383 3381 def __repr__(self):
3384 3382 d = {None: '', False: '-', True: '+'}[self._ascending]
3385 3383 return '<%s%s %r, %r>' % (type(self).__name__, d, self._r1, self._r2)
3386 3384
3387 3385 class generatorset(abstractsmartset):
3388 3386 """Wrap a generator for lazy iteration
3389 3387
3390 3388 Wrapper structure for generators that provides lazy membership and can
3391 3389 be iterated more than once.
3392 3390 When asked for membership it generates values until either it finds the
3393 3391 requested one or has gone through all the elements in the generator
3394 3392 """
3395 3393 def __init__(self, gen, iterasc=None):
3396 3394 """
3397 3395 gen: a generator producing the values for the generatorset.
3398 3396 """
3399 3397 self._gen = gen
3400 3398 self._asclist = None
3401 3399 self._cache = {}
3402 3400 self._genlist = []
3403 3401 self._finished = False
3404 3402 self._ascending = True
3405 3403 if iterasc is not None:
3406 3404 if iterasc:
3407 3405 self.fastasc = self._iterator
3408 3406 self.__contains__ = self._asccontains
3409 3407 else:
3410 3408 self.fastdesc = self._iterator
3411 3409 self.__contains__ = self._desccontains
3412 3410
3413 3411 def __nonzero__(self):
3414 3412 # Do not use 'for r in self' because it will enforce the iteration
3415 3413 # order (default ascending), possibly unrolling a whole descending
3416 3414 # iterator.
3417 3415 if self._genlist:
3418 3416 return True
3419 3417 for r in self._consumegen():
3420 3418 return True
3421 3419 return False
3422 3420
3423 3421 def __contains__(self, x):
3424 3422 if x in self._cache:
3425 3423 return self._cache[x]
3426 3424
3427 3425 # Use new values only, as existing values would be cached.
3428 3426 for l in self._consumegen():
3429 3427 if l == x:
3430 3428 return True
3431 3429
3432 3430 self._cache[x] = False
3433 3431 return False
3434 3432
3435 3433 def _asccontains(self, x):
3436 3434 """version of contains optimised for ascending generator"""
3437 3435 if x in self._cache:
3438 3436 return self._cache[x]
3439 3437
3440 3438 # Use new values only, as existing values would be cached.
3441 3439 for l in self._consumegen():
3442 3440 if l == x:
3443 3441 return True
3444 3442 if l > x:
3445 3443 break
3446 3444
3447 3445 self._cache[x] = False
3448 3446 return False
3449 3447
3450 3448 def _desccontains(self, x):
3451 3449 """version of contains optimised for descending generator"""
3452 3450 if x in self._cache:
3453 3451 return self._cache[x]
3454 3452
3455 3453 # Use new values only, as existing values would be cached.
3456 3454 for l in self._consumegen():
3457 3455 if l == x:
3458 3456 return True
3459 3457 if l < x:
3460 3458 break
3461 3459
3462 3460 self._cache[x] = False
3463 3461 return False
3464 3462
3465 3463 def __iter__(self):
3466 3464 if self._ascending:
3467 3465 it = self.fastasc
3468 3466 else:
3469 3467 it = self.fastdesc
3470 3468 if it is not None:
3471 3469 return it()
3472 3470 # we need to consume the iterator
3473 3471 for x in self._consumegen():
3474 3472 pass
3475 3473 # recall the same code
3476 3474 return iter(self)
3477 3475
3478 3476 def _iterator(self):
3479 3477 if self._finished:
3480 3478 return iter(self._genlist)
3481 3479
3482 3480 # We have to use this complex iteration strategy to allow multiple
3483 3481 # iterations at the same time. We need to be able to catch revision
3484 3482 # removed from _consumegen and added to genlist in another instance.
3485 3483 #
3486 3484 # Getting rid of it would provide an about 15% speed up on this
3487 3485 # iteration.
3488 3486 genlist = self._genlist
3489 3487 nextrev = self._consumegen().next
3490 3488 _len = len # cache global lookup
3491 3489 def gen():
3492 3490 i = 0
3493 3491 while True:
3494 3492 if i < _len(genlist):
3495 3493 yield genlist[i]
3496 3494 else:
3497 3495 yield nextrev()
3498 3496 i += 1
3499 3497 return gen()
3500 3498
3501 3499 def _consumegen(self):
3502 3500 cache = self._cache
3503 3501 genlist = self._genlist.append
3504 3502 for item in self._gen:
3505 3503 cache[item] = True
3506 3504 genlist(item)
3507 3505 yield item
3508 3506 if not self._finished:
3509 3507 self._finished = True
3510 3508 asc = self._genlist[:]
3511 3509 asc.sort()
3512 3510 self._asclist = asc
3513 3511 self.fastasc = asc.__iter__
3514 3512 self.fastdesc = asc.__reversed__
3515 3513
3516 3514 def __len__(self):
3517 3515 for x in self._consumegen():
3518 3516 pass
3519 3517 return len(self._genlist)
3520 3518
3521 3519 def sort(self, reverse=False):
3522 3520 self._ascending = not reverse
3523 3521
3524 3522 def reverse(self):
3525 3523 self._ascending = not self._ascending
3526 3524
3527 3525 def isascending(self):
3528 3526 return self._ascending
3529 3527
3530 3528 def isdescending(self):
3531 3529 return not self._ascending
3532 3530
3533 3531 def first(self):
3534 3532 if self._ascending:
3535 3533 it = self.fastasc
3536 3534 else:
3537 3535 it = self.fastdesc
3538 3536 if it is None:
3539 3537 # we need to consume all and try again
3540 3538 for x in self._consumegen():
3541 3539 pass
3542 3540 return self.first()
3543 3541 return next(it(), None)
3544 3542
3545 3543 def last(self):
3546 3544 if self._ascending:
3547 3545 it = self.fastdesc
3548 3546 else:
3549 3547 it = self.fastasc
3550 3548 if it is None:
3551 3549 # we need to consume all and try again
3552 3550 for x in self._consumegen():
3553 3551 pass
3554 3552 return self.first()
3555 3553 return next(it(), None)
3556 3554
3557 3555 def __repr__(self):
3558 3556 d = {False: '-', True: '+'}[self._ascending]
3559 3557 return '<%s%s>' % (type(self).__name__, d)
3560 3558
3561 3559 class spanset(abstractsmartset):
3562 3560 """Duck type for baseset class which represents a range of revisions and
3563 3561 can work lazily and without having all the range in memory
3564 3562
3565 3563 Note that spanset(x, y) behave almost like xrange(x, y) except for two
3566 3564 notable points:
3567 3565 - when x < y it will be automatically descending,
3568 3566 - revision filtered with this repoview will be skipped.
3569 3567
3570 3568 """
3571 3569 def __init__(self, repo, start=0, end=None):
3572 3570 """
3573 3571 start: first revision included the set
3574 3572 (default to 0)
3575 3573 end: first revision excluded (last+1)
3576 3574 (default to len(repo)
3577 3575
3578 3576 Spanset will be descending if `end` < `start`.
3579 3577 """
3580 3578 if end is None:
3581 3579 end = len(repo)
3582 3580 self._ascending = start <= end
3583 3581 if not self._ascending:
3584 3582 start, end = end + 1, start +1
3585 3583 self._start = start
3586 3584 self._end = end
3587 3585 self._hiddenrevs = repo.changelog.filteredrevs
3588 3586
3589 3587 def sort(self, reverse=False):
3590 3588 self._ascending = not reverse
3591 3589
3592 3590 def reverse(self):
3593 3591 self._ascending = not self._ascending
3594 3592
3595 3593 def _iterfilter(self, iterrange):
3596 3594 s = self._hiddenrevs
3597 3595 for r in iterrange:
3598 3596 if r not in s:
3599 3597 yield r
3600 3598
3601 3599 def __iter__(self):
3602 3600 if self._ascending:
3603 3601 return self.fastasc()
3604 3602 else:
3605 3603 return self.fastdesc()
3606 3604
3607 3605 def fastasc(self):
3608 3606 iterrange = xrange(self._start, self._end)
3609 3607 if self._hiddenrevs:
3610 3608 return self._iterfilter(iterrange)
3611 3609 return iter(iterrange)
3612 3610
3613 3611 def fastdesc(self):
3614 3612 iterrange = xrange(self._end - 1, self._start - 1, -1)
3615 3613 if self._hiddenrevs:
3616 3614 return self._iterfilter(iterrange)
3617 3615 return iter(iterrange)
3618 3616
3619 3617 def __contains__(self, rev):
3620 3618 hidden = self._hiddenrevs
3621 3619 return ((self._start <= rev < self._end)
3622 3620 and not (hidden and rev in hidden))
3623 3621
3624 3622 def __nonzero__(self):
3625 3623 for r in self:
3626 3624 return True
3627 3625 return False
3628 3626
3629 3627 def __len__(self):
3630 3628 if not self._hiddenrevs:
3631 3629 return abs(self._end - self._start)
3632 3630 else:
3633 3631 count = 0
3634 3632 start = self._start
3635 3633 end = self._end
3636 3634 for rev in self._hiddenrevs:
3637 3635 if (end < rev <= start) or (start <= rev < end):
3638 3636 count += 1
3639 3637 return abs(self._end - self._start) - count
3640 3638
3641 3639 def isascending(self):
3642 3640 return self._ascending
3643 3641
3644 3642 def isdescending(self):
3645 3643 return not self._ascending
3646 3644
3647 3645 def first(self):
3648 3646 if self._ascending:
3649 3647 it = self.fastasc
3650 3648 else:
3651 3649 it = self.fastdesc
3652 3650 for x in it():
3653 3651 return x
3654 3652 return None
3655 3653
3656 3654 def last(self):
3657 3655 if self._ascending:
3658 3656 it = self.fastdesc
3659 3657 else:
3660 3658 it = self.fastasc
3661 3659 for x in it():
3662 3660 return x
3663 3661 return None
3664 3662
3665 3663 def __repr__(self):
3666 3664 d = {False: '-', True: '+'}[self._ascending]
3667 3665 return '<%s%s %d:%d>' % (type(self).__name__, d,
3668 3666 self._start, self._end - 1)
3669 3667
3670 3668 class fullreposet(spanset):
3671 3669 """a set containing all revisions in the repo
3672 3670
3673 3671 This class exists to host special optimization and magic to handle virtual
3674 3672 revisions such as "null".
3675 3673 """
3676 3674
3677 3675 def __init__(self, repo):
3678 3676 super(fullreposet, self).__init__(repo)
3679 3677
3680 3678 def __and__(self, other):
3681 3679 """As self contains the whole repo, all of the other set should also be
3682 3680 in self. Therefore `self & other = other`.
3683 3681
3684 3682 This boldly assumes the other contains valid revs only.
3685 3683 """
3686 3684 # other not a smartset, make is so
3687 3685 if not util.safehasattr(other, 'isascending'):
3688 3686 # filter out hidden revision
3689 3687 # (this boldly assumes all smartset are pure)
3690 3688 #
3691 3689 # `other` was used with "&", let's assume this is a set like
3692 3690 # object.
3693 3691 other = baseset(other - self._hiddenrevs)
3694 3692
3695 3693 # XXX As fullreposet is also used as bootstrap, this is wrong.
3696 3694 #
3697 3695 # With a giveme312() revset returning [3,1,2], this makes
3698 3696 # 'hg log -r "giveme312()"' -> 1, 2, 3 (wrong)
3699 3697 # We cannot just drop it because other usage still need to sort it:
3700 3698 # 'hg log -r "all() and giveme312()"' -> 1, 2, 3 (right)
3701 3699 #
3702 3700 # There is also some faulty revset implementations that rely on it
3703 3701 # (eg: children as of its state in e8075329c5fb)
3704 3702 #
3705 3703 # When we fix the two points above we can move this into the if clause
3706 3704 other.sort(reverse=self.isdescending())
3707 3705 return other
3708 3706
3709 3707 def prettyformatset(revs):
3710 3708 lines = []
3711 3709 rs = repr(revs)
3712 3710 p = 0
3713 3711 while p < len(rs):
3714 3712 q = rs.find('<', p + 1)
3715 3713 if q < 0:
3716 3714 q = len(rs)
3717 3715 l = rs.count('<', 0, p) - rs.count('>', 0, p)
3718 3716 assert l >= 0
3719 3717 lines.append((l, rs[p:q].rstrip()))
3720 3718 p = q
3721 3719 return '\n'.join(' ' * l + s for l, s in lines)
3722 3720
3723 3721 # tell hggettext to extract docstrings from these functions:
3724 3722 i18nfunctions = symbols.values()
General Comments 0
You need to be logged in to leave comments. Login now