##// END OF EJS Templates
revset: rename revsbetween to reachableroots and add an argument...
Laurent Charignon -
r26002:fd92bfbb default
parent child Browse files
Show More
@@ -1,3701 +1,3707 b''
1 1 # revset.py - revision set queries for mercurial
2 2 #
3 3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import heapq
11 11 import re
12 12
13 13 from .i18n import _
14 14 from . import (
15 15 encoding,
16 16 error,
17 17 hbisect,
18 18 match as matchmod,
19 19 node,
20 20 obsolete as obsmod,
21 21 parser,
22 22 pathutil,
23 23 phases,
24 24 repoview,
25 25 util,
26 26 )
27 27
28 28 def _revancestors(repo, revs, followfirst):
29 29 """Like revlog.ancestors(), but supports followfirst."""
30 30 if followfirst:
31 31 cut = 1
32 32 else:
33 33 cut = None
34 34 cl = repo.changelog
35 35
36 36 def iterate():
37 37 revs.sort(reverse=True)
38 38 irevs = iter(revs)
39 39 h = []
40 40
41 41 inputrev = next(irevs, None)
42 42 if inputrev is not None:
43 43 heapq.heappush(h, -inputrev)
44 44
45 45 seen = set()
46 46 while h:
47 47 current = -heapq.heappop(h)
48 48 if current == inputrev:
49 49 inputrev = next(irevs, None)
50 50 if inputrev is not None:
51 51 heapq.heappush(h, -inputrev)
52 52 if current not in seen:
53 53 seen.add(current)
54 54 yield current
55 55 for parent in cl.parentrevs(current)[:cut]:
56 56 if parent != node.nullrev:
57 57 heapq.heappush(h, -parent)
58 58
59 59 return generatorset(iterate(), iterasc=False)
60 60
61 61 def _revdescendants(repo, revs, followfirst):
62 62 """Like revlog.descendants() but supports followfirst."""
63 63 if followfirst:
64 64 cut = 1
65 65 else:
66 66 cut = None
67 67
68 68 def iterate():
69 69 cl = repo.changelog
70 70 # XXX this should be 'parentset.min()' assuming 'parentset' is a
71 71 # smartset (and if it is not, it should.)
72 72 first = min(revs)
73 73 nullrev = node.nullrev
74 74 if first == nullrev:
75 75 # Are there nodes with a null first parent and a non-null
76 76 # second one? Maybe. Do we care? Probably not.
77 77 for i in cl:
78 78 yield i
79 79 else:
80 80 seen = set(revs)
81 81 for i in cl.revs(first + 1):
82 82 for x in cl.parentrevs(i)[:cut]:
83 83 if x != nullrev and x in seen:
84 84 seen.add(i)
85 85 yield i
86 86 break
87 87
88 88 return generatorset(iterate(), iterasc=True)
89 89
90 def revsbetween(repo, roots, heads):
91 """Return all paths between roots and heads, inclusive of both endpoint
92 sets."""
90 def reachableroots(repo, roots, heads, includepath=False):
91 """return (heads(::<roots> and ::<heads>))
92
93 If includepath is True, return (<roots>::<heads>)."""
93 94 if not roots:
94 95 return baseset()
95 96 parentrevs = repo.changelog.parentrevs
96 97 visit = list(heads)
97 98 reachable = set()
98 99 seen = {}
99 100 # XXX this should be 'parentset.min()' assuming 'parentset' is a smartset
100 101 # (and if it is not, it should.)
101 102 minroot = min(roots)
102 103 roots = set(roots)
103 104 # prefetch all the things! (because python is slow)
104 105 reached = reachable.add
105 106 dovisit = visit.append
106 107 nextvisit = visit.pop
107 108 # open-code the post-order traversal due to the tiny size of
108 109 # sys.getrecursionlimit()
109 110 while visit:
110 111 rev = nextvisit()
111 112 if rev in roots:
112 113 reached(rev)
114 if not includepath:
115 continue
113 116 parents = parentrevs(rev)
114 117 seen[rev] = parents
115 118 for parent in parents:
116 119 if parent >= minroot and parent not in seen:
117 120 dovisit(parent)
118 121 if not reachable:
119 122 return baseset()
123 if not includepath:
124 return reachable
120 125 for rev in sorted(seen):
121 126 for parent in seen[rev]:
122 127 if parent in reachable:
123 128 reached(rev)
124 129 return baseset(sorted(reachable))
125 130
126 131 elements = {
127 132 # token-type: binding-strength, primary, prefix, infix, suffix
128 133 "(": (21, None, ("group", 1, ")"), ("func", 1, ")"), None),
129 134 "##": (20, None, None, ("_concat", 20), None),
130 135 "~": (18, None, None, ("ancestor", 18), None),
131 136 "^": (18, None, None, ("parent", 18), ("parentpost", 18)),
132 137 "-": (5, None, ("negate", 19), ("minus", 5), None),
133 138 "::": (17, None, ("dagrangepre", 17), ("dagrange", 17),
134 139 ("dagrangepost", 17)),
135 140 "..": (17, None, ("dagrangepre", 17), ("dagrange", 17),
136 141 ("dagrangepost", 17)),
137 142 ":": (15, "rangeall", ("rangepre", 15), ("range", 15), ("rangepost", 15)),
138 143 "not": (10, None, ("not", 10), None, None),
139 144 "!": (10, None, ("not", 10), None, None),
140 145 "and": (5, None, None, ("and", 5), None),
141 146 "&": (5, None, None, ("and", 5), None),
142 147 "%": (5, None, None, ("only", 5), ("onlypost", 5)),
143 148 "or": (4, None, None, ("or", 4), None),
144 149 "|": (4, None, None, ("or", 4), None),
145 150 "+": (4, None, None, ("or", 4), None),
146 151 "=": (3, None, None, ("keyvalue", 3), None),
147 152 ",": (2, None, None, ("list", 2), None),
148 153 ")": (0, None, None, None, None),
149 154 "symbol": (0, "symbol", None, None, None),
150 155 "string": (0, "string", None, None, None),
151 156 "end": (0, None, None, None, None),
152 157 }
153 158
154 159 keywords = set(['and', 'or', 'not'])
155 160
156 161 # default set of valid characters for the initial letter of symbols
157 162 _syminitletters = set(c for c in [chr(i) for i in xrange(256)]
158 163 if c.isalnum() or c in '._@' or ord(c) > 127)
159 164
160 165 # default set of valid characters for non-initial letters of symbols
161 166 _symletters = set(c for c in [chr(i) for i in xrange(256)]
162 167 if c.isalnum() or c in '-._/@' or ord(c) > 127)
163 168
164 169 def tokenize(program, lookup=None, syminitletters=None, symletters=None):
165 170 '''
166 171 Parse a revset statement into a stream of tokens
167 172
168 173 ``syminitletters`` is the set of valid characters for the initial
169 174 letter of symbols.
170 175
171 176 By default, character ``c`` is recognized as valid for initial
172 177 letter of symbols, if ``c.isalnum() or c in '._@' or ord(c) > 127``.
173 178
174 179 ``symletters`` is the set of valid characters for non-initial
175 180 letters of symbols.
176 181
177 182 By default, character ``c`` is recognized as valid for non-initial
178 183 letters of symbols, if ``c.isalnum() or c in '-._/@' or ord(c) > 127``.
179 184
180 185 Check that @ is a valid unquoted token character (issue3686):
181 186 >>> list(tokenize("@::"))
182 187 [('symbol', '@', 0), ('::', None, 1), ('end', None, 3)]
183 188
184 189 '''
185 190 if syminitletters is None:
186 191 syminitletters = _syminitletters
187 192 if symletters is None:
188 193 symletters = _symletters
189 194
190 195 if program and lookup:
191 196 # attempt to parse old-style ranges first to deal with
192 197 # things like old-tag which contain query metacharacters
193 198 parts = program.split(':', 1)
194 199 if all(lookup(sym) for sym in parts if sym):
195 200 if parts[0]:
196 201 yield ('symbol', parts[0], 0)
197 202 if len(parts) > 1:
198 203 s = len(parts[0])
199 204 yield (':', None, s)
200 205 if parts[1]:
201 206 yield ('symbol', parts[1], s + 1)
202 207 yield ('end', None, len(program))
203 208 return
204 209
205 210 pos, l = 0, len(program)
206 211 while pos < l:
207 212 c = program[pos]
208 213 if c.isspace(): # skip inter-token whitespace
209 214 pass
210 215 elif c == ':' and program[pos:pos + 2] == '::': # look ahead carefully
211 216 yield ('::', None, pos)
212 217 pos += 1 # skip ahead
213 218 elif c == '.' and program[pos:pos + 2] == '..': # look ahead carefully
214 219 yield ('..', None, pos)
215 220 pos += 1 # skip ahead
216 221 elif c == '#' and program[pos:pos + 2] == '##': # look ahead carefully
217 222 yield ('##', None, pos)
218 223 pos += 1 # skip ahead
219 224 elif c in "():=,-|&+!~^%": # handle simple operators
220 225 yield (c, None, pos)
221 226 elif (c in '"\'' or c == 'r' and
222 227 program[pos:pos + 2] in ("r'", 'r"')): # handle quoted strings
223 228 if c == 'r':
224 229 pos += 1
225 230 c = program[pos]
226 231 decode = lambda x: x
227 232 else:
228 233 decode = lambda x: x.decode('string-escape')
229 234 pos += 1
230 235 s = pos
231 236 while pos < l: # find closing quote
232 237 d = program[pos]
233 238 if d == '\\': # skip over escaped characters
234 239 pos += 2
235 240 continue
236 241 if d == c:
237 242 yield ('string', decode(program[s:pos]), s)
238 243 break
239 244 pos += 1
240 245 else:
241 246 raise error.ParseError(_("unterminated string"), s)
242 247 # gather up a symbol/keyword
243 248 elif c in syminitletters:
244 249 s = pos
245 250 pos += 1
246 251 while pos < l: # find end of symbol
247 252 d = program[pos]
248 253 if d not in symletters:
249 254 break
250 255 if d == '.' and program[pos - 1] == '.': # special case for ..
251 256 pos -= 1
252 257 break
253 258 pos += 1
254 259 sym = program[s:pos]
255 260 if sym in keywords: # operator keywords
256 261 yield (sym, None, s)
257 262 elif '-' in sym:
258 263 # some jerk gave us foo-bar-baz, try to check if it's a symbol
259 264 if lookup and lookup(sym):
260 265 # looks like a real symbol
261 266 yield ('symbol', sym, s)
262 267 else:
263 268 # looks like an expression
264 269 parts = sym.split('-')
265 270 for p in parts[:-1]:
266 271 if p: # possible consecutive -
267 272 yield ('symbol', p, s)
268 273 s += len(p)
269 274 yield ('-', None, pos)
270 275 s += 1
271 276 if parts[-1]: # possible trailing -
272 277 yield ('symbol', parts[-1], s)
273 278 else:
274 279 yield ('symbol', sym, s)
275 280 pos -= 1
276 281 else:
277 282 raise error.ParseError(_("syntax error in revset '%s'") %
278 283 program, pos)
279 284 pos += 1
280 285 yield ('end', None, pos)
281 286
282 287 def parseerrordetail(inst):
283 288 """Compose error message from specified ParseError object
284 289 """
285 290 if len(inst.args) > 1:
286 291 return _('at %s: %s') % (inst.args[1], inst.args[0])
287 292 else:
288 293 return inst.args[0]
289 294
290 295 # helpers
291 296
292 297 def getstring(x, err):
293 298 if x and (x[0] == 'string' or x[0] == 'symbol'):
294 299 return x[1]
295 300 raise error.ParseError(err)
296 301
297 302 def getlist(x):
298 303 if not x:
299 304 return []
300 305 if x[0] == 'list':
301 306 return getlist(x[1]) + [x[2]]
302 307 return [x]
303 308
304 309 def getargs(x, min, max, err):
305 310 l = getlist(x)
306 311 if len(l) < min or (max >= 0 and len(l) > max):
307 312 raise error.ParseError(err)
308 313 return l
309 314
310 315 def getargsdict(x, funcname, keys):
311 316 return parser.buildargsdict(getlist(x), funcname, keys.split(),
312 317 keyvaluenode='keyvalue', keynode='symbol')
313 318
314 319 def isvalidsymbol(tree):
315 320 """Examine whether specified ``tree`` is valid ``symbol`` or not
316 321 """
317 322 return tree[0] == 'symbol' and len(tree) > 1
318 323
319 324 def getsymbol(tree):
320 325 """Get symbol name from valid ``symbol`` in ``tree``
321 326
322 327 This assumes that ``tree`` is already examined by ``isvalidsymbol``.
323 328 """
324 329 return tree[1]
325 330
326 331 def isvalidfunc(tree):
327 332 """Examine whether specified ``tree`` is valid ``func`` or not
328 333 """
329 334 return tree[0] == 'func' and len(tree) > 1 and isvalidsymbol(tree[1])
330 335
331 336 def getfuncname(tree):
332 337 """Get function name from valid ``func`` in ``tree``
333 338
334 339 This assumes that ``tree`` is already examined by ``isvalidfunc``.
335 340 """
336 341 return getsymbol(tree[1])
337 342
338 343 def getfuncargs(tree):
339 344 """Get list of function arguments from valid ``func`` in ``tree``
340 345
341 346 This assumes that ``tree`` is already examined by ``isvalidfunc``.
342 347 """
343 348 if len(tree) > 2:
344 349 return getlist(tree[2])
345 350 else:
346 351 return []
347 352
348 353 def getset(repo, subset, x):
349 354 if not x:
350 355 raise error.ParseError(_("missing argument"))
351 356 s = methods[x[0]](repo, subset, *x[1:])
352 357 if util.safehasattr(s, 'isascending'):
353 358 return s
354 359 if (repo.ui.configbool('devel', 'all-warnings')
355 360 or repo.ui.configbool('devel', 'old-revset')):
356 361 # else case should not happen, because all non-func are internal,
357 362 # ignoring for now.
358 363 if x[0] == 'func' and x[1][0] == 'symbol' and x[1][1] in symbols:
359 364 repo.ui.develwarn('revset "%s" use list instead of smartset, '
360 365 '(upgrade your code)' % x[1][1])
361 366 return baseset(s)
362 367
363 368 def _getrevsource(repo, r):
364 369 extra = repo[r].extra()
365 370 for label in ('source', 'transplant_source', 'rebase_source'):
366 371 if label in extra:
367 372 try:
368 373 return repo[extra[label]].rev()
369 374 except error.RepoLookupError:
370 375 pass
371 376 return None
372 377
373 378 # operator methods
374 379
375 380 def stringset(repo, subset, x):
376 381 x = repo[x].rev()
377 382 if (x in subset
378 383 or x == node.nullrev and isinstance(subset, fullreposet)):
379 384 return baseset([x])
380 385 return baseset()
381 386
382 387 def rangeset(repo, subset, x, y):
383 388 m = getset(repo, fullreposet(repo), x)
384 389 n = getset(repo, fullreposet(repo), y)
385 390
386 391 if not m or not n:
387 392 return baseset()
388 393 m, n = m.first(), n.last()
389 394
390 395 if m == n:
391 396 r = baseset([m])
392 397 elif n == node.wdirrev:
393 398 r = spanset(repo, m, len(repo)) + baseset([n])
394 399 elif m == node.wdirrev:
395 400 r = baseset([m]) + spanset(repo, len(repo) - 1, n - 1)
396 401 elif m < n:
397 402 r = spanset(repo, m, n + 1)
398 403 else:
399 404 r = spanset(repo, m, n - 1)
400 405 # XXX We should combine with subset first: 'subset & baseset(...)'. This is
401 406 # necessary to ensure we preserve the order in subset.
402 407 #
403 408 # This has performance implication, carrying the sorting over when possible
404 409 # would be more efficient.
405 410 return r & subset
406 411
407 412 def dagrange(repo, subset, x, y):
408 413 r = fullreposet(repo)
409 xs = revsbetween(repo, getset(repo, r, x), getset(repo, r, y))
414 xs = reachableroots(repo, getset(repo, r, x), getset(repo, r, y),
415 includepath=True)
410 416 # XXX We should combine with subset first: 'subset & baseset(...)'. This is
411 417 # necessary to ensure we preserve the order in subset.
412 418 return xs & subset
413 419
414 420 def andset(repo, subset, x, y):
415 421 return getset(repo, getset(repo, subset, x), y)
416 422
417 423 def orset(repo, subset, *xs):
418 424 assert xs
419 425 if len(xs) == 1:
420 426 return getset(repo, subset, xs[0])
421 427 p = len(xs) // 2
422 428 a = orset(repo, subset, *xs[:p])
423 429 b = orset(repo, subset, *xs[p:])
424 430 return a + b
425 431
426 432 def notset(repo, subset, x):
427 433 return subset - getset(repo, subset, x)
428 434
429 435 def listset(repo, subset, a, b):
430 436 raise error.ParseError(_("can't use a list in this context"))
431 437
432 438 def keyvaluepair(repo, subset, k, v):
433 439 raise error.ParseError(_("can't use a key-value pair in this context"))
434 440
435 441 def func(repo, subset, a, b):
436 442 if a[0] == 'symbol' and a[1] in symbols:
437 443 return symbols[a[1]](repo, subset, b)
438 444
439 445 keep = lambda fn: getattr(fn, '__doc__', None) is not None
440 446
441 447 syms = [s for (s, fn) in symbols.items() if keep(fn)]
442 448 raise error.UnknownIdentifier(a[1], syms)
443 449
444 450 # functions
445 451
446 452 def adds(repo, subset, x):
447 453 """``adds(pattern)``
448 454 Changesets that add a file matching pattern.
449 455
450 456 The pattern without explicit kind like ``glob:`` is expected to be
451 457 relative to the current directory and match against a file or a
452 458 directory.
453 459 """
454 460 # i18n: "adds" is a keyword
455 461 pat = getstring(x, _("adds requires a pattern"))
456 462 return checkstatus(repo, subset, pat, 1)
457 463
458 464 def ancestor(repo, subset, x):
459 465 """``ancestor(*changeset)``
460 466 A greatest common ancestor of the changesets.
461 467
462 468 Accepts 0 or more changesets.
463 469 Will return empty list when passed no args.
464 470 Greatest common ancestor of a single changeset is that changeset.
465 471 """
466 472 # i18n: "ancestor" is a keyword
467 473 l = getlist(x)
468 474 rl = fullreposet(repo)
469 475 anc = None
470 476
471 477 # (getset(repo, rl, i) for i in l) generates a list of lists
472 478 for revs in (getset(repo, rl, i) for i in l):
473 479 for r in revs:
474 480 if anc is None:
475 481 anc = repo[r]
476 482 else:
477 483 anc = anc.ancestor(repo[r])
478 484
479 485 if anc is not None and anc.rev() in subset:
480 486 return baseset([anc.rev()])
481 487 return baseset()
482 488
483 489 def _ancestors(repo, subset, x, followfirst=False):
484 490 heads = getset(repo, fullreposet(repo), x)
485 491 if not heads:
486 492 return baseset()
487 493 s = _revancestors(repo, heads, followfirst)
488 494 return subset & s
489 495
490 496 def ancestors(repo, subset, x):
491 497 """``ancestors(set)``
492 498 Changesets that are ancestors of a changeset in set.
493 499 """
494 500 return _ancestors(repo, subset, x)
495 501
496 502 def _firstancestors(repo, subset, x):
497 503 # ``_firstancestors(set)``
498 504 # Like ``ancestors(set)`` but follows only the first parents.
499 505 return _ancestors(repo, subset, x, followfirst=True)
500 506
501 507 def ancestorspec(repo, subset, x, n):
502 508 """``set~n``
503 509 Changesets that are the Nth ancestor (first parents only) of a changeset
504 510 in set.
505 511 """
506 512 try:
507 513 n = int(n[1])
508 514 except (TypeError, ValueError):
509 515 raise error.ParseError(_("~ expects a number"))
510 516 ps = set()
511 517 cl = repo.changelog
512 518 for r in getset(repo, fullreposet(repo), x):
513 519 for i in range(n):
514 520 r = cl.parentrevs(r)[0]
515 521 ps.add(r)
516 522 return subset & ps
517 523
518 524 def author(repo, subset, x):
519 525 """``author(string)``
520 526 Alias for ``user(string)``.
521 527 """
522 528 # i18n: "author" is a keyword
523 529 n = encoding.lower(getstring(x, _("author requires a string")))
524 530 kind, pattern, matcher = _substringmatcher(n)
525 531 return subset.filter(lambda x: matcher(encoding.lower(repo[x].user())))
526 532
527 533 def bisect(repo, subset, x):
528 534 """``bisect(string)``
529 535 Changesets marked in the specified bisect status:
530 536
531 537 - ``good``, ``bad``, ``skip``: csets explicitly marked as good/bad/skip
532 538 - ``goods``, ``bads`` : csets topologically good/bad
533 539 - ``range`` : csets taking part in the bisection
534 540 - ``pruned`` : csets that are goods, bads or skipped
535 541 - ``untested`` : csets whose fate is yet unknown
536 542 - ``ignored`` : csets ignored due to DAG topology
537 543 - ``current`` : the cset currently being bisected
538 544 """
539 545 # i18n: "bisect" is a keyword
540 546 status = getstring(x, _("bisect requires a string")).lower()
541 547 state = set(hbisect.get(repo, status))
542 548 return subset & state
543 549
544 550 # Backward-compatibility
545 551 # - no help entry so that we do not advertise it any more
546 552 def bisected(repo, subset, x):
547 553 return bisect(repo, subset, x)
548 554
549 555 def bookmark(repo, subset, x):
550 556 """``bookmark([name])``
551 557 The named bookmark or all bookmarks.
552 558
553 559 If `name` starts with `re:`, the remainder of the name is treated as
554 560 a regular expression. To match a bookmark that actually starts with `re:`,
555 561 use the prefix `literal:`.
556 562 """
557 563 # i18n: "bookmark" is a keyword
558 564 args = getargs(x, 0, 1, _('bookmark takes one or no arguments'))
559 565 if args:
560 566 bm = getstring(args[0],
561 567 # i18n: "bookmark" is a keyword
562 568 _('the argument to bookmark must be a string'))
563 569 kind, pattern, matcher = _stringmatcher(bm)
564 570 bms = set()
565 571 if kind == 'literal':
566 572 bmrev = repo._bookmarks.get(pattern, None)
567 573 if not bmrev:
568 574 raise error.RepoLookupError(_("bookmark '%s' does not exist")
569 575 % bm)
570 576 bms.add(repo[bmrev].rev())
571 577 else:
572 578 matchrevs = set()
573 579 for name, bmrev in repo._bookmarks.iteritems():
574 580 if matcher(name):
575 581 matchrevs.add(bmrev)
576 582 if not matchrevs:
577 583 raise error.RepoLookupError(_("no bookmarks exist"
578 584 " that match '%s'") % pattern)
579 585 for bmrev in matchrevs:
580 586 bms.add(repo[bmrev].rev())
581 587 else:
582 588 bms = set([repo[r].rev()
583 589 for r in repo._bookmarks.values()])
584 590 bms -= set([node.nullrev])
585 591 return subset & bms
586 592
587 593 def branch(repo, subset, x):
588 594 """``branch(string or set)``
589 595 All changesets belonging to the given branch or the branches of the given
590 596 changesets.
591 597
592 598 If `string` starts with `re:`, the remainder of the name is treated as
593 599 a regular expression. To match a branch that actually starts with `re:`,
594 600 use the prefix `literal:`.
595 601 """
596 602 getbi = repo.revbranchcache().branchinfo
597 603
598 604 try:
599 605 b = getstring(x, '')
600 606 except error.ParseError:
601 607 # not a string, but another revspec, e.g. tip()
602 608 pass
603 609 else:
604 610 kind, pattern, matcher = _stringmatcher(b)
605 611 if kind == 'literal':
606 612 # note: falls through to the revspec case if no branch with
607 613 # this name exists
608 614 if pattern in repo.branchmap():
609 615 return subset.filter(lambda r: matcher(getbi(r)[0]))
610 616 else:
611 617 return subset.filter(lambda r: matcher(getbi(r)[0]))
612 618
613 619 s = getset(repo, fullreposet(repo), x)
614 620 b = set()
615 621 for r in s:
616 622 b.add(getbi(r)[0])
617 623 c = s.__contains__
618 624 return subset.filter(lambda r: c(r) or getbi(r)[0] in b)
619 625
620 626 def bumped(repo, subset, x):
621 627 """``bumped()``
622 628 Mutable changesets marked as successors of public changesets.
623 629
624 630 Only non-public and non-obsolete changesets can be `bumped`.
625 631 """
626 632 # i18n: "bumped" is a keyword
627 633 getargs(x, 0, 0, _("bumped takes no arguments"))
628 634 bumped = obsmod.getrevs(repo, 'bumped')
629 635 return subset & bumped
630 636
631 637 def bundle(repo, subset, x):
632 638 """``bundle()``
633 639 Changesets in the bundle.
634 640
635 641 Bundle must be specified by the -R option."""
636 642
637 643 try:
638 644 bundlerevs = repo.changelog.bundlerevs
639 645 except AttributeError:
640 646 raise util.Abort(_("no bundle provided - specify with -R"))
641 647 return subset & bundlerevs
642 648
643 649 def checkstatus(repo, subset, pat, field):
644 650 hasset = matchmod.patkind(pat) == 'set'
645 651
646 652 mcache = [None]
647 653 def matches(x):
648 654 c = repo[x]
649 655 if not mcache[0] or hasset:
650 656 mcache[0] = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
651 657 m = mcache[0]
652 658 fname = None
653 659 if not m.anypats() and len(m.files()) == 1:
654 660 fname = m.files()[0]
655 661 if fname is not None:
656 662 if fname not in c.files():
657 663 return False
658 664 else:
659 665 for f in c.files():
660 666 if m(f):
661 667 break
662 668 else:
663 669 return False
664 670 files = repo.status(c.p1().node(), c.node())[field]
665 671 if fname is not None:
666 672 if fname in files:
667 673 return True
668 674 else:
669 675 for f in files:
670 676 if m(f):
671 677 return True
672 678
673 679 return subset.filter(matches)
674 680
675 681 def _children(repo, narrow, parentset):
676 682 if not parentset:
677 683 return baseset()
678 684 cs = set()
679 685 pr = repo.changelog.parentrevs
680 686 minrev = parentset.min()
681 687 for r in narrow:
682 688 if r <= minrev:
683 689 continue
684 690 for p in pr(r):
685 691 if p in parentset:
686 692 cs.add(r)
687 693 # XXX using a set to feed the baseset is wrong. Sets are not ordered.
688 694 # This does not break because of other fullreposet misbehavior.
689 695 return baseset(cs)
690 696
691 697 def children(repo, subset, x):
692 698 """``children(set)``
693 699 Child changesets of changesets in set.
694 700 """
695 701 s = getset(repo, fullreposet(repo), x)
696 702 cs = _children(repo, subset, s)
697 703 return subset & cs
698 704
699 705 def closed(repo, subset, x):
700 706 """``closed()``
701 707 Changeset is closed.
702 708 """
703 709 # i18n: "closed" is a keyword
704 710 getargs(x, 0, 0, _("closed takes no arguments"))
705 711 return subset.filter(lambda r: repo[r].closesbranch())
706 712
707 713 def contains(repo, subset, x):
708 714 """``contains(pattern)``
709 715 The revision's manifest contains a file matching pattern (but might not
710 716 modify it). See :hg:`help patterns` for information about file patterns.
711 717
712 718 The pattern without explicit kind like ``glob:`` is expected to be
713 719 relative to the current directory and match against a file exactly
714 720 for efficiency.
715 721 """
716 722 # i18n: "contains" is a keyword
717 723 pat = getstring(x, _("contains requires a pattern"))
718 724
719 725 def matches(x):
720 726 if not matchmod.patkind(pat):
721 727 pats = pathutil.canonpath(repo.root, repo.getcwd(), pat)
722 728 if pats in repo[x]:
723 729 return True
724 730 else:
725 731 c = repo[x]
726 732 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
727 733 for f in c.manifest():
728 734 if m(f):
729 735 return True
730 736 return False
731 737
732 738 return subset.filter(matches)
733 739
734 740 def converted(repo, subset, x):
735 741 """``converted([id])``
736 742 Changesets converted from the given identifier in the old repository if
737 743 present, or all converted changesets if no identifier is specified.
738 744 """
739 745
740 746 # There is exactly no chance of resolving the revision, so do a simple
741 747 # string compare and hope for the best
742 748
743 749 rev = None
744 750 # i18n: "converted" is a keyword
745 751 l = getargs(x, 0, 1, _('converted takes one or no arguments'))
746 752 if l:
747 753 # i18n: "converted" is a keyword
748 754 rev = getstring(l[0], _('converted requires a revision'))
749 755
750 756 def _matchvalue(r):
751 757 source = repo[r].extra().get('convert_revision', None)
752 758 return source is not None and (rev is None or source.startswith(rev))
753 759
754 760 return subset.filter(lambda r: _matchvalue(r))
755 761
756 762 def date(repo, subset, x):
757 763 """``date(interval)``
758 764 Changesets within the interval, see :hg:`help dates`.
759 765 """
760 766 # i18n: "date" is a keyword
761 767 ds = getstring(x, _("date requires a string"))
762 768 dm = util.matchdate(ds)
763 769 return subset.filter(lambda x: dm(repo[x].date()[0]))
764 770
765 771 def desc(repo, subset, x):
766 772 """``desc(string)``
767 773 Search commit message for string. The match is case-insensitive.
768 774 """
769 775 # i18n: "desc" is a keyword
770 776 ds = encoding.lower(getstring(x, _("desc requires a string")))
771 777
772 778 def matches(x):
773 779 c = repo[x]
774 780 return ds in encoding.lower(c.description())
775 781
776 782 return subset.filter(matches)
777 783
778 784 def _descendants(repo, subset, x, followfirst=False):
779 785 roots = getset(repo, fullreposet(repo), x)
780 786 if not roots:
781 787 return baseset()
782 788 s = _revdescendants(repo, roots, followfirst)
783 789
784 790 # Both sets need to be ascending in order to lazily return the union
785 791 # in the correct order.
786 792 base = subset & roots
787 793 desc = subset & s
788 794 result = base + desc
789 795 if subset.isascending():
790 796 result.sort()
791 797 elif subset.isdescending():
792 798 result.sort(reverse=True)
793 799 else:
794 800 result = subset & result
795 801 return result
796 802
797 803 def descendants(repo, subset, x):
798 804 """``descendants(set)``
799 805 Changesets which are descendants of changesets in set.
800 806 """
801 807 return _descendants(repo, subset, x)
802 808
803 809 def _firstdescendants(repo, subset, x):
804 810 # ``_firstdescendants(set)``
805 811 # Like ``descendants(set)`` but follows only the first parents.
806 812 return _descendants(repo, subset, x, followfirst=True)
807 813
808 814 def destination(repo, subset, x):
809 815 """``destination([set])``
810 816 Changesets that were created by a graft, transplant or rebase operation,
811 817 with the given revisions specified as the source. Omitting the optional set
812 818 is the same as passing all().
813 819 """
814 820 if x is not None:
815 821 sources = getset(repo, fullreposet(repo), x)
816 822 else:
817 823 sources = fullreposet(repo)
818 824
819 825 dests = set()
820 826
821 827 # subset contains all of the possible destinations that can be returned, so
822 828 # iterate over them and see if their source(s) were provided in the arg set.
823 829 # Even if the immediate src of r is not in the arg set, src's source (or
824 830 # further back) may be. Scanning back further than the immediate src allows
825 831 # transitive transplants and rebases to yield the same results as transitive
826 832 # grafts.
827 833 for r in subset:
828 834 src = _getrevsource(repo, r)
829 835 lineage = None
830 836
831 837 while src is not None:
832 838 if lineage is None:
833 839 lineage = list()
834 840
835 841 lineage.append(r)
836 842
837 843 # The visited lineage is a match if the current source is in the arg
838 844 # set. Since every candidate dest is visited by way of iterating
839 845 # subset, any dests further back in the lineage will be tested by a
840 846 # different iteration over subset. Likewise, if the src was already
841 847 # selected, the current lineage can be selected without going back
842 848 # further.
843 849 if src in sources or src in dests:
844 850 dests.update(lineage)
845 851 break
846 852
847 853 r = src
848 854 src = _getrevsource(repo, r)
849 855
850 856 return subset.filter(dests.__contains__)
851 857
852 858 def divergent(repo, subset, x):
853 859 """``divergent()``
854 860 Final successors of changesets with an alternative set of final successors.
855 861 """
856 862 # i18n: "divergent" is a keyword
857 863 getargs(x, 0, 0, _("divergent takes no arguments"))
858 864 divergent = obsmod.getrevs(repo, 'divergent')
859 865 return subset & divergent
860 866
861 867 def extinct(repo, subset, x):
862 868 """``extinct()``
863 869 Obsolete changesets with obsolete descendants only.
864 870 """
865 871 # i18n: "extinct" is a keyword
866 872 getargs(x, 0, 0, _("extinct takes no arguments"))
867 873 extincts = obsmod.getrevs(repo, 'extinct')
868 874 return subset & extincts
869 875
870 876 def extra(repo, subset, x):
871 877 """``extra(label, [value])``
872 878 Changesets with the given label in the extra metadata, with the given
873 879 optional value.
874 880
875 881 If `value` starts with `re:`, the remainder of the value is treated as
876 882 a regular expression. To match a value that actually starts with `re:`,
877 883 use the prefix `literal:`.
878 884 """
879 885 args = getargsdict(x, 'extra', 'label value')
880 886 if 'label' not in args:
881 887 # i18n: "extra" is a keyword
882 888 raise error.ParseError(_('extra takes at least 1 argument'))
883 889 # i18n: "extra" is a keyword
884 890 label = getstring(args['label'], _('first argument to extra must be '
885 891 'a string'))
886 892 value = None
887 893
888 894 if 'value' in args:
889 895 # i18n: "extra" is a keyword
890 896 value = getstring(args['value'], _('second argument to extra must be '
891 897 'a string'))
892 898 kind, value, matcher = _stringmatcher(value)
893 899
894 900 def _matchvalue(r):
895 901 extra = repo[r].extra()
896 902 return label in extra and (value is None or matcher(extra[label]))
897 903
898 904 return subset.filter(lambda r: _matchvalue(r))
899 905
900 906 def filelog(repo, subset, x):
901 907 """``filelog(pattern)``
902 908 Changesets connected to the specified filelog.
903 909
904 910 For performance reasons, visits only revisions mentioned in the file-level
905 911 filelog, rather than filtering through all changesets (much faster, but
906 912 doesn't include deletes or duplicate changes). For a slower, more accurate
907 913 result, use ``file()``.
908 914
909 915 The pattern without explicit kind like ``glob:`` is expected to be
910 916 relative to the current directory and match against a file exactly
911 917 for efficiency.
912 918
913 919 If some linkrev points to revisions filtered by the current repoview, we'll
914 920 work around it to return a non-filtered value.
915 921 """
916 922
917 923 # i18n: "filelog" is a keyword
918 924 pat = getstring(x, _("filelog requires a pattern"))
919 925 s = set()
920 926 cl = repo.changelog
921 927
922 928 if not matchmod.patkind(pat):
923 929 f = pathutil.canonpath(repo.root, repo.getcwd(), pat)
924 930 files = [f]
925 931 else:
926 932 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=repo[None])
927 933 files = (f for f in repo[None] if m(f))
928 934
929 935 for f in files:
930 936 backrevref = {} # final value for: filerev -> changerev
931 937 lowestchild = {} # lowest known filerev child of a filerev
932 938 delayed = [] # filerev with filtered linkrev, for post-processing
933 939 lowesthead = None # cache for manifest content of all head revisions
934 940 fl = repo.file(f)
935 941 for fr in list(fl):
936 942 rev = fl.linkrev(fr)
937 943 if rev not in cl:
938 944 # changerev pointed in linkrev is filtered
939 945 # record it for post processing.
940 946 delayed.append((fr, rev))
941 947 continue
942 948 for p in fl.parentrevs(fr):
943 949 if 0 <= p and p not in lowestchild:
944 950 lowestchild[p] = fr
945 951 backrevref[fr] = rev
946 952 s.add(rev)
947 953
948 954 # Post-processing of all filerevs we skipped because they were
949 955 # filtered. If such filerevs have known and unfiltered children, this
950 956 # means they have an unfiltered appearance out there. We'll use linkrev
951 957 # adjustment to find one of these appearances. The lowest known child
952 958 # will be used as a starting point because it is the best upper-bound we
953 959 # have.
954 960 #
955 961 # This approach will fail when an unfiltered but linkrev-shadowed
956 962 # appearance exists in a head changeset without unfiltered filerev
957 963 # children anywhere.
958 964 while delayed:
959 965 # must be a descending iteration. To slowly fill lowest child
960 966 # information that is of potential use by the next item.
961 967 fr, rev = delayed.pop()
962 968 lkr = rev
963 969
964 970 child = lowestchild.get(fr)
965 971
966 972 if child is None:
967 973 # search for existence of this file revision in a head revision.
968 974 # There are three possibilities:
969 975 # - the revision exists in a head and we can find an
970 976 # introduction from there,
971 977 # - the revision does not exist in a head because it has been
972 978 # changed since its introduction: we would have found a child
973 979 # and be in the other 'else' clause,
974 980 # - all versions of the revision are hidden.
975 981 if lowesthead is None:
976 982 lowesthead = {}
977 983 for h in repo.heads():
978 984 fnode = repo[h].manifest().get(f)
979 985 if fnode is not None:
980 986 lowesthead[fl.rev(fnode)] = h
981 987 headrev = lowesthead.get(fr)
982 988 if headrev is None:
983 989 # content is nowhere unfiltered
984 990 continue
985 991 rev = repo[headrev][f].introrev()
986 992 else:
987 993 # the lowest known child is a good upper bound
988 994 childcrev = backrevref[child]
989 995 # XXX this does not guarantee returning the lowest
990 996 # introduction of this revision, but this gives a
991 997 # result which is a good start and will fit in most
992 998 # cases. We probably need to fix the multiple
993 999 # introductions case properly (report each
994 1000 # introduction, even for identical file revisions)
995 1001 # once and for all at some point anyway.
996 1002 for p in repo[childcrev][f].parents():
997 1003 if p.filerev() == fr:
998 1004 rev = p.rev()
999 1005 break
1000 1006 if rev == lkr: # no shadowed entry found
1001 1007 # XXX This should never happen unless some manifest points
1002 1008 # to biggish file revisions (like a revision that uses a
1003 1009 # parent that never appears in the manifest ancestors)
1004 1010 continue
1005 1011
1006 1012 # Fill the data for the next iteration.
1007 1013 for p in fl.parentrevs(fr):
1008 1014 if 0 <= p and p not in lowestchild:
1009 1015 lowestchild[p] = fr
1010 1016 backrevref[fr] = rev
1011 1017 s.add(rev)
1012 1018
1013 1019 return subset & s
1014 1020
1015 1021 def first(repo, subset, x):
1016 1022 """``first(set, [n])``
1017 1023 An alias for limit().
1018 1024 """
1019 1025 return limit(repo, subset, x)
1020 1026
1021 1027 def _follow(repo, subset, x, name, followfirst=False):
1022 1028 l = getargs(x, 0, 1, _("%s takes no arguments or a filename") % name)
1023 1029 c = repo['.']
1024 1030 if l:
1025 1031 x = getstring(l[0], _("%s expected a filename") % name)
1026 1032 if x in c:
1027 1033 cx = c[x]
1028 1034 s = set(ctx.rev() for ctx in cx.ancestors(followfirst=followfirst))
1029 1035 # include the revision responsible for the most recent version
1030 1036 s.add(cx.introrev())
1031 1037 else:
1032 1038 return baseset()
1033 1039 else:
1034 1040 s = _revancestors(repo, baseset([c.rev()]), followfirst)
1035 1041
1036 1042 return subset & s
1037 1043
1038 1044 def follow(repo, subset, x):
1039 1045 """``follow([file])``
1040 1046 An alias for ``::.`` (ancestors of the working directory's first parent).
1041 1047 If a filename is specified, the history of the given file is followed,
1042 1048 including copies.
1043 1049 """
1044 1050 return _follow(repo, subset, x, 'follow')
1045 1051
1046 1052 def _followfirst(repo, subset, x):
1047 1053 # ``followfirst([file])``
1048 1054 # Like ``follow([file])`` but follows only the first parent of
1049 1055 # every revision or file revision.
1050 1056 return _follow(repo, subset, x, '_followfirst', followfirst=True)
1051 1057
1052 1058 def getall(repo, subset, x):
1053 1059 """``all()``
1054 1060 All changesets, the same as ``0:tip``.
1055 1061 """
1056 1062 # i18n: "all" is a keyword
1057 1063 getargs(x, 0, 0, _("all takes no arguments"))
1058 1064 return subset & spanset(repo) # drop "null" if any
1059 1065
1060 1066 def grep(repo, subset, x):
1061 1067 """``grep(regex)``
1062 1068 Like ``keyword(string)`` but accepts a regex. Use ``grep(r'...')``
1063 1069 to ensure special escape characters are handled correctly. Unlike
1064 1070 ``keyword(string)``, the match is case-sensitive.
1065 1071 """
1066 1072 try:
1067 1073 # i18n: "grep" is a keyword
1068 1074 gr = re.compile(getstring(x, _("grep requires a string")))
1069 1075 except re.error as e:
1070 1076 raise error.ParseError(_('invalid match pattern: %s') % e)
1071 1077
1072 1078 def matches(x):
1073 1079 c = repo[x]
1074 1080 for e in c.files() + [c.user(), c.description()]:
1075 1081 if gr.search(e):
1076 1082 return True
1077 1083 return False
1078 1084
1079 1085 return subset.filter(matches)
1080 1086
1081 1087 def _matchfiles(repo, subset, x):
1082 1088 # _matchfiles takes a revset list of prefixed arguments:
1083 1089 #
1084 1090 # [p:foo, i:bar, x:baz]
1085 1091 #
1086 1092 # builds a match object from them and filters subset. Allowed
1087 1093 # prefixes are 'p:' for regular patterns, 'i:' for include
1088 1094 # patterns and 'x:' for exclude patterns. Use 'r:' prefix to pass
1089 1095 # a revision identifier, or the empty string to reference the
1090 1096 # working directory, from which the match object is
1091 1097 # initialized. Use 'd:' to set the default matching mode, default
1092 1098 # to 'glob'. At most one 'r:' and 'd:' argument can be passed.
1093 1099
1094 1100 # i18n: "_matchfiles" is a keyword
1095 1101 l = getargs(x, 1, -1, _("_matchfiles requires at least one argument"))
1096 1102 pats, inc, exc = [], [], []
1097 1103 rev, default = None, None
1098 1104 for arg in l:
1099 1105 # i18n: "_matchfiles" is a keyword
1100 1106 s = getstring(arg, _("_matchfiles requires string arguments"))
1101 1107 prefix, value = s[:2], s[2:]
1102 1108 if prefix == 'p:':
1103 1109 pats.append(value)
1104 1110 elif prefix == 'i:':
1105 1111 inc.append(value)
1106 1112 elif prefix == 'x:':
1107 1113 exc.append(value)
1108 1114 elif prefix == 'r:':
1109 1115 if rev is not None:
1110 1116 # i18n: "_matchfiles" is a keyword
1111 1117 raise error.ParseError(_('_matchfiles expected at most one '
1112 1118 'revision'))
1113 1119 if value != '': # empty means working directory; leave rev as None
1114 1120 rev = value
1115 1121 elif prefix == 'd:':
1116 1122 if default is not None:
1117 1123 # i18n: "_matchfiles" is a keyword
1118 1124 raise error.ParseError(_('_matchfiles expected at most one '
1119 1125 'default mode'))
1120 1126 default = value
1121 1127 else:
1122 1128 # i18n: "_matchfiles" is a keyword
1123 1129 raise error.ParseError(_('invalid _matchfiles prefix: %s') % prefix)
1124 1130 if not default:
1125 1131 default = 'glob'
1126 1132
1127 1133 m = matchmod.match(repo.root, repo.getcwd(), pats, include=inc,
1128 1134 exclude=exc, ctx=repo[rev], default=default)
1129 1135
1130 1136 def matches(x):
1131 1137 for f in repo[x].files():
1132 1138 if m(f):
1133 1139 return True
1134 1140 return False
1135 1141
1136 1142 return subset.filter(matches)
1137 1143
1138 1144 def hasfile(repo, subset, x):
1139 1145 """``file(pattern)``
1140 1146 Changesets affecting files matched by pattern.
1141 1147
1142 1148 For a faster but less accurate result, consider using ``filelog()``
1143 1149 instead.
1144 1150
1145 1151 This predicate uses ``glob:`` as the default kind of pattern.
1146 1152 """
1147 1153 # i18n: "file" is a keyword
1148 1154 pat = getstring(x, _("file requires a pattern"))
1149 1155 return _matchfiles(repo, subset, ('string', 'p:' + pat))
1150 1156
1151 1157 def head(repo, subset, x):
1152 1158 """``head()``
1153 1159 Changeset is a named branch head.
1154 1160 """
1155 1161 # i18n: "head" is a keyword
1156 1162 getargs(x, 0, 0, _("head takes no arguments"))
1157 1163 hs = set()
1158 1164 cl = repo.changelog
1159 1165 for b, ls in repo.branchmap().iteritems():
1160 1166 hs.update(cl.rev(h) for h in ls)
1161 1167 # XXX using a set to feed the baseset is wrong. Sets are not ordered.
1162 1168 # This does not break because of other fullreposet misbehavior.
1163 1169 # XXX We should combine with subset first: 'subset & baseset(...)'. This is
1164 1170 # necessary to ensure we preserve the order in subset.
1165 1171 return baseset(hs) & subset
1166 1172
1167 1173 def heads(repo, subset, x):
1168 1174 """``heads(set)``
1169 1175 Members of set with no children in set.
1170 1176 """
1171 1177 s = getset(repo, subset, x)
1172 1178 ps = parents(repo, subset, x)
1173 1179 return s - ps
1174 1180
1175 1181 def hidden(repo, subset, x):
1176 1182 """``hidden()``
1177 1183 Hidden changesets.
1178 1184 """
1179 1185 # i18n: "hidden" is a keyword
1180 1186 getargs(x, 0, 0, _("hidden takes no arguments"))
1181 1187 hiddenrevs = repoview.filterrevs(repo, 'visible')
1182 1188 return subset & hiddenrevs
1183 1189
1184 1190 def keyword(repo, subset, x):
1185 1191 """``keyword(string)``
1186 1192 Search commit message, user name, and names of changed files for
1187 1193 string. The match is case-insensitive.
1188 1194 """
1189 1195 # i18n: "keyword" is a keyword
1190 1196 kw = encoding.lower(getstring(x, _("keyword requires a string")))
1191 1197
1192 1198 def matches(r):
1193 1199 c = repo[r]
1194 1200 return any(kw in encoding.lower(t)
1195 1201 for t in c.files() + [c.user(), c.description()])
1196 1202
1197 1203 return subset.filter(matches)
1198 1204
1199 1205 def limit(repo, subset, x):
1200 1206 """``limit(set, [n])``
1201 1207 First n members of set, defaulting to 1.
1202 1208 """
1203 1209 # i18n: "limit" is a keyword
1204 1210 l = getargs(x, 1, 2, _("limit requires one or two arguments"))
1205 1211 try:
1206 1212 lim = 1
1207 1213 if len(l) == 2:
1208 1214 # i18n: "limit" is a keyword
1209 1215 lim = int(getstring(l[1], _("limit requires a number")))
1210 1216 except (TypeError, ValueError):
1211 1217 # i18n: "limit" is a keyword
1212 1218 raise error.ParseError(_("limit expects a number"))
1213 1219 ss = subset
1214 1220 os = getset(repo, fullreposet(repo), l[0])
1215 1221 result = []
1216 1222 it = iter(os)
1217 1223 for x in xrange(lim):
1218 1224 y = next(it, None)
1219 1225 if y is None:
1220 1226 break
1221 1227 elif y in ss:
1222 1228 result.append(y)
1223 1229 return baseset(result)
1224 1230
1225 1231 def last(repo, subset, x):
1226 1232 """``last(set, [n])``
1227 1233 Last n members of set, defaulting to 1.
1228 1234 """
1229 1235 # i18n: "last" is a keyword
1230 1236 l = getargs(x, 1, 2, _("last requires one or two arguments"))
1231 1237 try:
1232 1238 lim = 1
1233 1239 if len(l) == 2:
1234 1240 # i18n: "last" is a keyword
1235 1241 lim = int(getstring(l[1], _("last requires a number")))
1236 1242 except (TypeError, ValueError):
1237 1243 # i18n: "last" is a keyword
1238 1244 raise error.ParseError(_("last expects a number"))
1239 1245 ss = subset
1240 1246 os = getset(repo, fullreposet(repo), l[0])
1241 1247 os.reverse()
1242 1248 result = []
1243 1249 it = iter(os)
1244 1250 for x in xrange(lim):
1245 1251 y = next(it, None)
1246 1252 if y is None:
1247 1253 break
1248 1254 elif y in ss:
1249 1255 result.append(y)
1250 1256 return baseset(result)
1251 1257
1252 1258 def maxrev(repo, subset, x):
1253 1259 """``max(set)``
1254 1260 Changeset with highest revision number in set.
1255 1261 """
1256 1262 os = getset(repo, fullreposet(repo), x)
1257 1263 if os:
1258 1264 m = os.max()
1259 1265 if m in subset:
1260 1266 return baseset([m])
1261 1267 return baseset()
1262 1268
1263 1269 def merge(repo, subset, x):
1264 1270 """``merge()``
1265 1271 Changeset is a merge changeset.
1266 1272 """
1267 1273 # i18n: "merge" is a keyword
1268 1274 getargs(x, 0, 0, _("merge takes no arguments"))
1269 1275 cl = repo.changelog
1270 1276 return subset.filter(lambda r: cl.parentrevs(r)[1] != -1)
1271 1277
1272 1278 def branchpoint(repo, subset, x):
1273 1279 """``branchpoint()``
1274 1280 Changesets with more than one child.
1275 1281 """
1276 1282 # i18n: "branchpoint" is a keyword
1277 1283 getargs(x, 0, 0, _("branchpoint takes no arguments"))
1278 1284 cl = repo.changelog
1279 1285 if not subset:
1280 1286 return baseset()
1281 1287 # XXX this should be 'parentset.min()' assuming 'parentset' is a smartset
1282 1288 # (and if it is not, it should.)
1283 1289 baserev = min(subset)
1284 1290 parentscount = [0]*(len(repo) - baserev)
1285 1291 for r in cl.revs(start=baserev + 1):
1286 1292 for p in cl.parentrevs(r):
1287 1293 if p >= baserev:
1288 1294 parentscount[p - baserev] += 1
1289 1295 return subset.filter(lambda r: parentscount[r - baserev] > 1)
1290 1296
1291 1297 def minrev(repo, subset, x):
1292 1298 """``min(set)``
1293 1299 Changeset with lowest revision number in set.
1294 1300 """
1295 1301 os = getset(repo, fullreposet(repo), x)
1296 1302 if os:
1297 1303 m = os.min()
1298 1304 if m in subset:
1299 1305 return baseset([m])
1300 1306 return baseset()
1301 1307
1302 1308 def modifies(repo, subset, x):
1303 1309 """``modifies(pattern)``
1304 1310 Changesets modifying files matched by pattern.
1305 1311
1306 1312 The pattern without explicit kind like ``glob:`` is expected to be
1307 1313 relative to the current directory and match against a file or a
1308 1314 directory.
1309 1315 """
1310 1316 # i18n: "modifies" is a keyword
1311 1317 pat = getstring(x, _("modifies requires a pattern"))
1312 1318 return checkstatus(repo, subset, pat, 0)
1313 1319
1314 1320 def named(repo, subset, x):
1315 1321 """``named(namespace)``
1316 1322 The changesets in a given namespace.
1317 1323
1318 1324 If `namespace` starts with `re:`, the remainder of the string is treated as
1319 1325 a regular expression. To match a namespace that actually starts with `re:`,
1320 1326 use the prefix `literal:`.
1321 1327 """
1322 1328 # i18n: "named" is a keyword
1323 1329 args = getargs(x, 1, 1, _('named requires a namespace argument'))
1324 1330
1325 1331 ns = getstring(args[0],
1326 1332 # i18n: "named" is a keyword
1327 1333 _('the argument to named must be a string'))
1328 1334 kind, pattern, matcher = _stringmatcher(ns)
1329 1335 namespaces = set()
1330 1336 if kind == 'literal':
1331 1337 if pattern not in repo.names:
1332 1338 raise error.RepoLookupError(_("namespace '%s' does not exist")
1333 1339 % ns)
1334 1340 namespaces.add(repo.names[pattern])
1335 1341 else:
1336 1342 for name, ns in repo.names.iteritems():
1337 1343 if matcher(name):
1338 1344 namespaces.add(ns)
1339 1345 if not namespaces:
1340 1346 raise error.RepoLookupError(_("no namespace exists"
1341 1347 " that match '%s'") % pattern)
1342 1348
1343 1349 names = set()
1344 1350 for ns in namespaces:
1345 1351 for name in ns.listnames(repo):
1346 1352 if name not in ns.deprecated:
1347 1353 names.update(repo[n].rev() for n in ns.nodes(repo, name))
1348 1354
1349 1355 names -= set([node.nullrev])
1350 1356 return subset & names
1351 1357
1352 1358 def node_(repo, subset, x):
1353 1359 """``id(string)``
1354 1360 Revision non-ambiguously specified by the given hex string prefix.
1355 1361 """
1356 1362 # i18n: "id" is a keyword
1357 1363 l = getargs(x, 1, 1, _("id requires one argument"))
1358 1364 # i18n: "id" is a keyword
1359 1365 n = getstring(l[0], _("id requires a string"))
1360 1366 if len(n) == 40:
1361 1367 try:
1362 1368 rn = repo.changelog.rev(node.bin(n))
1363 1369 except (LookupError, TypeError):
1364 1370 rn = None
1365 1371 else:
1366 1372 rn = None
1367 1373 pm = repo.changelog._partialmatch(n)
1368 1374 if pm is not None:
1369 1375 rn = repo.changelog.rev(pm)
1370 1376
1371 1377 if rn is None:
1372 1378 return baseset()
1373 1379 result = baseset([rn])
1374 1380 return result & subset
1375 1381
1376 1382 def obsolete(repo, subset, x):
1377 1383 """``obsolete()``
1378 1384 Mutable changeset with a newer version."""
1379 1385 # i18n: "obsolete" is a keyword
1380 1386 getargs(x, 0, 0, _("obsolete takes no arguments"))
1381 1387 obsoletes = obsmod.getrevs(repo, 'obsolete')
1382 1388 return subset & obsoletes
1383 1389
1384 1390 def only(repo, subset, x):
1385 1391 """``only(set, [set])``
1386 1392 Changesets that are ancestors of the first set that are not ancestors
1387 1393 of any other head in the repo. If a second set is specified, the result
1388 1394 is ancestors of the first set that are not ancestors of the second set
1389 1395 (i.e. ::<set1> - ::<set2>).
1390 1396 """
1391 1397 cl = repo.changelog
1392 1398 # i18n: "only" is a keyword
1393 1399 args = getargs(x, 1, 2, _('only takes one or two arguments'))
1394 1400 include = getset(repo, fullreposet(repo), args[0])
1395 1401 if len(args) == 1:
1396 1402 if not include:
1397 1403 return baseset()
1398 1404
1399 1405 descendants = set(_revdescendants(repo, include, False))
1400 1406 exclude = [rev for rev in cl.headrevs()
1401 1407 if not rev in descendants and not rev in include]
1402 1408 else:
1403 1409 exclude = getset(repo, fullreposet(repo), args[1])
1404 1410
1405 1411 results = set(cl.findmissingrevs(common=exclude, heads=include))
1406 1412 # XXX we should turn this into a baseset instead of a set, smartset may do
1407 1413 # some optimisations from the fact this is a baseset.
1408 1414 return subset & results
1409 1415
1410 1416 def origin(repo, subset, x):
1411 1417 """``origin([set])``
1412 1418 Changesets that were specified as a source for the grafts, transplants or
1413 1419 rebases that created the given revisions. Omitting the optional set is the
1414 1420 same as passing all(). If a changeset created by these operations is itself
1415 1421 specified as a source for one of these operations, only the source changeset
1416 1422 for the first operation is selected.
1417 1423 """
1418 1424 if x is not None:
1419 1425 dests = getset(repo, fullreposet(repo), x)
1420 1426 else:
1421 1427 dests = fullreposet(repo)
1422 1428
1423 1429 def _firstsrc(rev):
1424 1430 src = _getrevsource(repo, rev)
1425 1431 if src is None:
1426 1432 return None
1427 1433
1428 1434 while True:
1429 1435 prev = _getrevsource(repo, src)
1430 1436
1431 1437 if prev is None:
1432 1438 return src
1433 1439 src = prev
1434 1440
1435 1441 o = set([_firstsrc(r) for r in dests])
1436 1442 o -= set([None])
1437 1443 # XXX we should turn this into a baseset instead of a set, smartset may do
1438 1444 # some optimisations from the fact this is a baseset.
1439 1445 return subset & o
1440 1446
1441 1447 def outgoing(repo, subset, x):
1442 1448 """``outgoing([path])``
1443 1449 Changesets not found in the specified destination repository, or the
1444 1450 default push location.
1445 1451 """
1446 1452 # Avoid cycles.
1447 1453 from . import (
1448 1454 discovery,
1449 1455 hg,
1450 1456 )
1451 1457 # i18n: "outgoing" is a keyword
1452 1458 l = getargs(x, 0, 1, _("outgoing takes one or no arguments"))
1453 1459 # i18n: "outgoing" is a keyword
1454 1460 dest = l and getstring(l[0], _("outgoing requires a repository path")) or ''
1455 1461 dest = repo.ui.expandpath(dest or 'default-push', dest or 'default')
1456 1462 dest, branches = hg.parseurl(dest)
1457 1463 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1458 1464 if revs:
1459 1465 revs = [repo.lookup(rev) for rev in revs]
1460 1466 other = hg.peer(repo, {}, dest)
1461 1467 repo.ui.pushbuffer()
1462 1468 outgoing = discovery.findcommonoutgoing(repo, other, onlyheads=revs)
1463 1469 repo.ui.popbuffer()
1464 1470 cl = repo.changelog
1465 1471 o = set([cl.rev(r) for r in outgoing.missing])
1466 1472 return subset & o
1467 1473
1468 1474 def p1(repo, subset, x):
1469 1475 """``p1([set])``
1470 1476 First parent of changesets in set, or the working directory.
1471 1477 """
1472 1478 if x is None:
1473 1479 p = repo[x].p1().rev()
1474 1480 if p >= 0:
1475 1481 return subset & baseset([p])
1476 1482 return baseset()
1477 1483
1478 1484 ps = set()
1479 1485 cl = repo.changelog
1480 1486 for r in getset(repo, fullreposet(repo), x):
1481 1487 ps.add(cl.parentrevs(r)[0])
1482 1488 ps -= set([node.nullrev])
1483 1489 # XXX we should turn this into a baseset instead of a set, smartset may do
1484 1490 # some optimisations from the fact this is a baseset.
1485 1491 return subset & ps
1486 1492
1487 1493 def p2(repo, subset, x):
1488 1494 """``p2([set])``
1489 1495 Second parent of changesets in set, or the working directory.
1490 1496 """
1491 1497 if x is None:
1492 1498 ps = repo[x].parents()
1493 1499 try:
1494 1500 p = ps[1].rev()
1495 1501 if p >= 0:
1496 1502 return subset & baseset([p])
1497 1503 return baseset()
1498 1504 except IndexError:
1499 1505 return baseset()
1500 1506
1501 1507 ps = set()
1502 1508 cl = repo.changelog
1503 1509 for r in getset(repo, fullreposet(repo), x):
1504 1510 ps.add(cl.parentrevs(r)[1])
1505 1511 ps -= set([node.nullrev])
1506 1512 # XXX we should turn this into a baseset instead of a set, smartset may do
1507 1513 # some optimisations from the fact this is a baseset.
1508 1514 return subset & ps
1509 1515
1510 1516 def parents(repo, subset, x):
1511 1517 """``parents([set])``
1512 1518 The set of all parents for all changesets in set, or the working directory.
1513 1519 """
1514 1520 if x is None:
1515 1521 ps = set(p.rev() for p in repo[x].parents())
1516 1522 else:
1517 1523 ps = set()
1518 1524 cl = repo.changelog
1519 1525 up = ps.update
1520 1526 parentrevs = cl.parentrevs
1521 1527 for r in getset(repo, fullreposet(repo), x):
1522 1528 if r == node.wdirrev:
1523 1529 up(p.rev() for p in repo[r].parents())
1524 1530 else:
1525 1531 up(parentrevs(r))
1526 1532 ps -= set([node.nullrev])
1527 1533 return subset & ps
1528 1534
1529 1535 def _phase(repo, subset, target):
1530 1536 """helper to select all rev in phase <target>"""
1531 1537 repo._phasecache.loadphaserevs(repo) # ensure phase's sets are loaded
1532 1538 if repo._phasecache._phasesets:
1533 1539 s = repo._phasecache._phasesets[target] - repo.changelog.filteredrevs
1534 1540 s = baseset(s)
1535 1541 s.sort() # set are non ordered, so we enforce ascending
1536 1542 return subset & s
1537 1543 else:
1538 1544 phase = repo._phasecache.phase
1539 1545 condition = lambda r: phase(repo, r) == target
1540 1546 return subset.filter(condition, cache=False)
1541 1547
1542 1548 def draft(repo, subset, x):
1543 1549 """``draft()``
1544 1550 Changeset in draft phase."""
1545 1551 # i18n: "draft" is a keyword
1546 1552 getargs(x, 0, 0, _("draft takes no arguments"))
1547 1553 target = phases.draft
1548 1554 return _phase(repo, subset, target)
1549 1555
1550 1556 def secret(repo, subset, x):
1551 1557 """``secret()``
1552 1558 Changeset in secret phase."""
1553 1559 # i18n: "secret" is a keyword
1554 1560 getargs(x, 0, 0, _("secret takes no arguments"))
1555 1561 target = phases.secret
1556 1562 return _phase(repo, subset, target)
1557 1563
1558 1564 def parentspec(repo, subset, x, n):
1559 1565 """``set^0``
1560 1566 The set.
1561 1567 ``set^1`` (or ``set^``), ``set^2``
1562 1568 First or second parent, respectively, of all changesets in set.
1563 1569 """
1564 1570 try:
1565 1571 n = int(n[1])
1566 1572 if n not in (0, 1, 2):
1567 1573 raise ValueError
1568 1574 except (TypeError, ValueError):
1569 1575 raise error.ParseError(_("^ expects a number 0, 1, or 2"))
1570 1576 ps = set()
1571 1577 cl = repo.changelog
1572 1578 for r in getset(repo, fullreposet(repo), x):
1573 1579 if n == 0:
1574 1580 ps.add(r)
1575 1581 elif n == 1:
1576 1582 ps.add(cl.parentrevs(r)[0])
1577 1583 elif n == 2:
1578 1584 parents = cl.parentrevs(r)
1579 1585 if len(parents) > 1:
1580 1586 ps.add(parents[1])
1581 1587 return subset & ps
1582 1588
1583 1589 def present(repo, subset, x):
1584 1590 """``present(set)``
1585 1591 An empty set, if any revision in set isn't found; otherwise,
1586 1592 all revisions in set.
1587 1593
1588 1594 If any of specified revisions is not present in the local repository,
1589 1595 the query is normally aborted. But this predicate allows the query
1590 1596 to continue even in such cases.
1591 1597 """
1592 1598 try:
1593 1599 return getset(repo, subset, x)
1594 1600 except error.RepoLookupError:
1595 1601 return baseset()
1596 1602
1597 1603 # for internal use
1598 1604 def _notpublic(repo, subset, x):
1599 1605 getargs(x, 0, 0, "_notpublic takes no arguments")
1600 1606 repo._phasecache.loadphaserevs(repo) # ensure phase's sets are loaded
1601 1607 if repo._phasecache._phasesets:
1602 1608 s = set()
1603 1609 for u in repo._phasecache._phasesets[1:]:
1604 1610 s.update(u)
1605 1611 s = baseset(s - repo.changelog.filteredrevs)
1606 1612 s.sort()
1607 1613 return subset & s
1608 1614 else:
1609 1615 phase = repo._phasecache.phase
1610 1616 target = phases.public
1611 1617 condition = lambda r: phase(repo, r) != target
1612 1618 return subset.filter(condition, cache=False)
1613 1619
1614 1620 def public(repo, subset, x):
1615 1621 """``public()``
1616 1622 Changeset in public phase."""
1617 1623 # i18n: "public" is a keyword
1618 1624 getargs(x, 0, 0, _("public takes no arguments"))
1619 1625 phase = repo._phasecache.phase
1620 1626 target = phases.public
1621 1627 condition = lambda r: phase(repo, r) == target
1622 1628 return subset.filter(condition, cache=False)
1623 1629
1624 1630 def remote(repo, subset, x):
1625 1631 """``remote([id [,path]])``
1626 1632 Local revision that corresponds to the given identifier in a
1627 1633 remote repository, if present. Here, the '.' identifier is a
1628 1634 synonym for the current local branch.
1629 1635 """
1630 1636
1631 1637 from . import hg # avoid start-up nasties
1632 1638 # i18n: "remote" is a keyword
1633 1639 l = getargs(x, 0, 2, _("remote takes one, two or no arguments"))
1634 1640
1635 1641 q = '.'
1636 1642 if len(l) > 0:
1637 1643 # i18n: "remote" is a keyword
1638 1644 q = getstring(l[0], _("remote requires a string id"))
1639 1645 if q == '.':
1640 1646 q = repo['.'].branch()
1641 1647
1642 1648 dest = ''
1643 1649 if len(l) > 1:
1644 1650 # i18n: "remote" is a keyword
1645 1651 dest = getstring(l[1], _("remote requires a repository path"))
1646 1652 dest = repo.ui.expandpath(dest or 'default')
1647 1653 dest, branches = hg.parseurl(dest)
1648 1654 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1649 1655 if revs:
1650 1656 revs = [repo.lookup(rev) for rev in revs]
1651 1657 other = hg.peer(repo, {}, dest)
1652 1658 n = other.lookup(q)
1653 1659 if n in repo:
1654 1660 r = repo[n].rev()
1655 1661 if r in subset:
1656 1662 return baseset([r])
1657 1663 return baseset()
1658 1664
1659 1665 def removes(repo, subset, x):
1660 1666 """``removes(pattern)``
1661 1667 Changesets which remove files matching pattern.
1662 1668
1663 1669 The pattern without explicit kind like ``glob:`` is expected to be
1664 1670 relative to the current directory and match against a file or a
1665 1671 directory.
1666 1672 """
1667 1673 # i18n: "removes" is a keyword
1668 1674 pat = getstring(x, _("removes requires a pattern"))
1669 1675 return checkstatus(repo, subset, pat, 2)
1670 1676
1671 1677 def rev(repo, subset, x):
1672 1678 """``rev(number)``
1673 1679 Revision with the given numeric identifier.
1674 1680 """
1675 1681 # i18n: "rev" is a keyword
1676 1682 l = getargs(x, 1, 1, _("rev requires one argument"))
1677 1683 try:
1678 1684 # i18n: "rev" is a keyword
1679 1685 l = int(getstring(l[0], _("rev requires a number")))
1680 1686 except (TypeError, ValueError):
1681 1687 # i18n: "rev" is a keyword
1682 1688 raise error.ParseError(_("rev expects a number"))
1683 1689 if l not in repo.changelog and l != node.nullrev:
1684 1690 return baseset()
1685 1691 return subset & baseset([l])
1686 1692
1687 1693 def matching(repo, subset, x):
1688 1694 """``matching(revision [, field])``
1689 1695 Changesets in which a given set of fields match the set of fields in the
1690 1696 selected revision or set.
1691 1697
1692 1698 To match more than one field pass the list of fields to match separated
1693 1699 by spaces (e.g. ``author description``).
1694 1700
1695 1701 Valid fields are most regular revision fields and some special fields.
1696 1702
1697 1703 Regular revision fields are ``description``, ``author``, ``branch``,
1698 1704 ``date``, ``files``, ``phase``, ``parents``, ``substate``, ``user``
1699 1705 and ``diff``.
1700 1706 Note that ``author`` and ``user`` are synonyms. ``diff`` refers to the
1701 1707 contents of the revision. Two revisions matching their ``diff`` will
1702 1708 also match their ``files``.
1703 1709
1704 1710 Special fields are ``summary`` and ``metadata``:
1705 1711 ``summary`` matches the first line of the description.
1706 1712 ``metadata`` is equivalent to matching ``description user date``
1707 1713 (i.e. it matches the main metadata fields).
1708 1714
1709 1715 ``metadata`` is the default field which is used when no fields are
1710 1716 specified. You can match more than one field at a time.
1711 1717 """
1712 1718 # i18n: "matching" is a keyword
1713 1719 l = getargs(x, 1, 2, _("matching takes 1 or 2 arguments"))
1714 1720
1715 1721 revs = getset(repo, fullreposet(repo), l[0])
1716 1722
1717 1723 fieldlist = ['metadata']
1718 1724 if len(l) > 1:
1719 1725 fieldlist = getstring(l[1],
1720 1726 # i18n: "matching" is a keyword
1721 1727 _("matching requires a string "
1722 1728 "as its second argument")).split()
1723 1729
1724 1730 # Make sure that there are no repeated fields,
1725 1731 # expand the 'special' 'metadata' field type
1726 1732 # and check the 'files' whenever we check the 'diff'
1727 1733 fields = []
1728 1734 for field in fieldlist:
1729 1735 if field == 'metadata':
1730 1736 fields += ['user', 'description', 'date']
1731 1737 elif field == 'diff':
1732 1738 # a revision matching the diff must also match the files
1733 1739 # since matching the diff is very costly, make sure to
1734 1740 # also match the files first
1735 1741 fields += ['files', 'diff']
1736 1742 else:
1737 1743 if field == 'author':
1738 1744 field = 'user'
1739 1745 fields.append(field)
1740 1746 fields = set(fields)
1741 1747 if 'summary' in fields and 'description' in fields:
1742 1748 # If a revision matches its description it also matches its summary
1743 1749 fields.discard('summary')
1744 1750
1745 1751 # We may want to match more than one field
1746 1752 # Not all fields take the same amount of time to be matched
1747 1753 # Sort the selected fields in order of increasing matching cost
1748 1754 fieldorder = ['phase', 'parents', 'user', 'date', 'branch', 'summary',
1749 1755 'files', 'description', 'substate', 'diff']
1750 1756 def fieldkeyfunc(f):
1751 1757 try:
1752 1758 return fieldorder.index(f)
1753 1759 except ValueError:
1754 1760 # assume an unknown field is very costly
1755 1761 return len(fieldorder)
1756 1762 fields = list(fields)
1757 1763 fields.sort(key=fieldkeyfunc)
1758 1764
1759 1765 # Each field will be matched with its own "getfield" function
1760 1766 # which will be added to the getfieldfuncs array of functions
1761 1767 getfieldfuncs = []
1762 1768 _funcs = {
1763 1769 'user': lambda r: repo[r].user(),
1764 1770 'branch': lambda r: repo[r].branch(),
1765 1771 'date': lambda r: repo[r].date(),
1766 1772 'description': lambda r: repo[r].description(),
1767 1773 'files': lambda r: repo[r].files(),
1768 1774 'parents': lambda r: repo[r].parents(),
1769 1775 'phase': lambda r: repo[r].phase(),
1770 1776 'substate': lambda r: repo[r].substate,
1771 1777 'summary': lambda r: repo[r].description().splitlines()[0],
1772 1778 'diff': lambda r: list(repo[r].diff(git=True),)
1773 1779 }
1774 1780 for info in fields:
1775 1781 getfield = _funcs.get(info, None)
1776 1782 if getfield is None:
1777 1783 raise error.ParseError(
1778 1784 # i18n: "matching" is a keyword
1779 1785 _("unexpected field name passed to matching: %s") % info)
1780 1786 getfieldfuncs.append(getfield)
1781 1787 # convert the getfield array of functions into a "getinfo" function
1782 1788 # which returns an array of field values (or a single value if there
1783 1789 # is only one field to match)
1784 1790 getinfo = lambda r: [f(r) for f in getfieldfuncs]
1785 1791
1786 1792 def matches(x):
1787 1793 for rev in revs:
1788 1794 target = getinfo(rev)
1789 1795 match = True
1790 1796 for n, f in enumerate(getfieldfuncs):
1791 1797 if target[n] != f(x):
1792 1798 match = False
1793 1799 if match:
1794 1800 return True
1795 1801 return False
1796 1802
1797 1803 return subset.filter(matches)
1798 1804
1799 1805 def reverse(repo, subset, x):
1800 1806 """``reverse(set)``
1801 1807 Reverse order of set.
1802 1808 """
1803 1809 l = getset(repo, subset, x)
1804 1810 l.reverse()
1805 1811 return l
1806 1812
1807 1813 def roots(repo, subset, x):
1808 1814 """``roots(set)``
1809 1815 Changesets in set with no parent changeset in set.
1810 1816 """
1811 1817 s = getset(repo, fullreposet(repo), x)
1812 1818 parents = repo.changelog.parentrevs
1813 1819 def filter(r):
1814 1820 for p in parents(r):
1815 1821 if 0 <= p and p in s:
1816 1822 return False
1817 1823 return True
1818 1824 return subset & s.filter(filter)
1819 1825
1820 1826 def sort(repo, subset, x):
1821 1827 """``sort(set[, [-]key...])``
1822 1828 Sort set by keys. The default sort order is ascending, specify a key
1823 1829 as ``-key`` to sort in descending order.
1824 1830
1825 1831 The keys can be:
1826 1832
1827 1833 - ``rev`` for the revision number,
1828 1834 - ``branch`` for the branch name,
1829 1835 - ``desc`` for the commit message (description),
1830 1836 - ``user`` for user name (``author`` can be used as an alias),
1831 1837 - ``date`` for the commit date
1832 1838 """
1833 1839 # i18n: "sort" is a keyword
1834 1840 l = getargs(x, 1, 2, _("sort requires one or two arguments"))
1835 1841 keys = "rev"
1836 1842 if len(l) == 2:
1837 1843 # i18n: "sort" is a keyword
1838 1844 keys = getstring(l[1], _("sort spec must be a string"))
1839 1845
1840 1846 s = l[0]
1841 1847 keys = keys.split()
1842 1848 l = []
1843 1849 def invert(s):
1844 1850 return "".join(chr(255 - ord(c)) for c in s)
1845 1851 revs = getset(repo, subset, s)
1846 1852 if keys == ["rev"]:
1847 1853 revs.sort()
1848 1854 return revs
1849 1855 elif keys == ["-rev"]:
1850 1856 revs.sort(reverse=True)
1851 1857 return revs
1852 1858 for r in revs:
1853 1859 c = repo[r]
1854 1860 e = []
1855 1861 for k in keys:
1856 1862 if k == 'rev':
1857 1863 e.append(r)
1858 1864 elif k == '-rev':
1859 1865 e.append(-r)
1860 1866 elif k == 'branch':
1861 1867 e.append(c.branch())
1862 1868 elif k == '-branch':
1863 1869 e.append(invert(c.branch()))
1864 1870 elif k == 'desc':
1865 1871 e.append(c.description())
1866 1872 elif k == '-desc':
1867 1873 e.append(invert(c.description()))
1868 1874 elif k in 'user author':
1869 1875 e.append(c.user())
1870 1876 elif k in '-user -author':
1871 1877 e.append(invert(c.user()))
1872 1878 elif k == 'date':
1873 1879 e.append(c.date()[0])
1874 1880 elif k == '-date':
1875 1881 e.append(-c.date()[0])
1876 1882 else:
1877 1883 raise error.ParseError(_("unknown sort key %r") % k)
1878 1884 e.append(r)
1879 1885 l.append(e)
1880 1886 l.sort()
1881 1887 return baseset([e[-1] for e in l])
1882 1888
1883 1889 def subrepo(repo, subset, x):
1884 1890 """``subrepo([pattern])``
1885 1891 Changesets that add, modify or remove the given subrepo. If no subrepo
1886 1892 pattern is named, any subrepo changes are returned.
1887 1893 """
1888 1894 # i18n: "subrepo" is a keyword
1889 1895 args = getargs(x, 0, 1, _('subrepo takes at most one argument'))
1890 1896 if len(args) != 0:
1891 1897 pat = getstring(args[0], _("subrepo requires a pattern"))
1892 1898
1893 1899 m = matchmod.exact(repo.root, repo.root, ['.hgsubstate'])
1894 1900
1895 1901 def submatches(names):
1896 1902 k, p, m = _stringmatcher(pat)
1897 1903 for name in names:
1898 1904 if m(name):
1899 1905 yield name
1900 1906
1901 1907 def matches(x):
1902 1908 c = repo[x]
1903 1909 s = repo.status(c.p1().node(), c.node(), match=m)
1904 1910
1905 1911 if len(args) == 0:
1906 1912 return s.added or s.modified or s.removed
1907 1913
1908 1914 if s.added:
1909 1915 return any(submatches(c.substate.keys()))
1910 1916
1911 1917 if s.modified:
1912 1918 subs = set(c.p1().substate.keys())
1913 1919 subs.update(c.substate.keys())
1914 1920
1915 1921 for path in submatches(subs):
1916 1922 if c.p1().substate.get(path) != c.substate.get(path):
1917 1923 return True
1918 1924
1919 1925 if s.removed:
1920 1926 return any(submatches(c.p1().substate.keys()))
1921 1927
1922 1928 return False
1923 1929
1924 1930 return subset.filter(matches)
1925 1931
1926 1932 def _stringmatcher(pattern):
1927 1933 """
1928 1934 accepts a string, possibly starting with 're:' or 'literal:' prefix.
1929 1935 returns the matcher name, pattern, and matcher function.
1930 1936 missing or unknown prefixes are treated as literal matches.
1931 1937
1932 1938 helper for tests:
1933 1939 >>> def test(pattern, *tests):
1934 1940 ... kind, pattern, matcher = _stringmatcher(pattern)
1935 1941 ... return (kind, pattern, [bool(matcher(t)) for t in tests])
1936 1942
1937 1943 exact matching (no prefix):
1938 1944 >>> test('abcdefg', 'abc', 'def', 'abcdefg')
1939 1945 ('literal', 'abcdefg', [False, False, True])
1940 1946
1941 1947 regex matching ('re:' prefix)
1942 1948 >>> test('re:a.+b', 'nomatch', 'fooadef', 'fooadefbar')
1943 1949 ('re', 'a.+b', [False, False, True])
1944 1950
1945 1951 force exact matches ('literal:' prefix)
1946 1952 >>> test('literal:re:foobar', 'foobar', 're:foobar')
1947 1953 ('literal', 're:foobar', [False, True])
1948 1954
1949 1955 unknown prefixes are ignored and treated as literals
1950 1956 >>> test('foo:bar', 'foo', 'bar', 'foo:bar')
1951 1957 ('literal', 'foo:bar', [False, False, True])
1952 1958 """
1953 1959 if pattern.startswith('re:'):
1954 1960 pattern = pattern[3:]
1955 1961 try:
1956 1962 regex = re.compile(pattern)
1957 1963 except re.error as e:
1958 1964 raise error.ParseError(_('invalid regular expression: %s')
1959 1965 % e)
1960 1966 return 're', pattern, regex.search
1961 1967 elif pattern.startswith('literal:'):
1962 1968 pattern = pattern[8:]
1963 1969 return 'literal', pattern, pattern.__eq__
1964 1970
1965 1971 def _substringmatcher(pattern):
1966 1972 kind, pattern, matcher = _stringmatcher(pattern)
1967 1973 if kind == 'literal':
1968 1974 matcher = lambda s: pattern in s
1969 1975 return kind, pattern, matcher
1970 1976
1971 1977 def tag(repo, subset, x):
1972 1978 """``tag([name])``
1973 1979 The specified tag by name, or all tagged revisions if no name is given.
1974 1980
1975 1981 If `name` starts with `re:`, the remainder of the name is treated as
1976 1982 a regular expression. To match a tag that actually starts with `re:`,
1977 1983 use the prefix `literal:`.
1978 1984 """
1979 1985 # i18n: "tag" is a keyword
1980 1986 args = getargs(x, 0, 1, _("tag takes one or no arguments"))
1981 1987 cl = repo.changelog
1982 1988 if args:
1983 1989 pattern = getstring(args[0],
1984 1990 # i18n: "tag" is a keyword
1985 1991 _('the argument to tag must be a string'))
1986 1992 kind, pattern, matcher = _stringmatcher(pattern)
1987 1993 if kind == 'literal':
1988 1994 # avoid resolving all tags
1989 1995 tn = repo._tagscache.tags.get(pattern, None)
1990 1996 if tn is None:
1991 1997 raise error.RepoLookupError(_("tag '%s' does not exist")
1992 1998 % pattern)
1993 1999 s = set([repo[tn].rev()])
1994 2000 else:
1995 2001 s = set([cl.rev(n) for t, n in repo.tagslist() if matcher(t)])
1996 2002 else:
1997 2003 s = set([cl.rev(n) for t, n in repo.tagslist() if t != 'tip'])
1998 2004 return subset & s
1999 2005
2000 2006 def tagged(repo, subset, x):
2001 2007 return tag(repo, subset, x)
2002 2008
2003 2009 def unstable(repo, subset, x):
2004 2010 """``unstable()``
2005 2011 Non-obsolete changesets with obsolete ancestors.
2006 2012 """
2007 2013 # i18n: "unstable" is a keyword
2008 2014 getargs(x, 0, 0, _("unstable takes no arguments"))
2009 2015 unstables = obsmod.getrevs(repo, 'unstable')
2010 2016 return subset & unstables
2011 2017
2012 2018
2013 2019 def user(repo, subset, x):
2014 2020 """``user(string)``
2015 2021 User name contains string. The match is case-insensitive.
2016 2022
2017 2023 If `string` starts with `re:`, the remainder of the string is treated as
2018 2024 a regular expression. To match a user that actually contains `re:`, use
2019 2025 the prefix `literal:`.
2020 2026 """
2021 2027 return author(repo, subset, x)
2022 2028
2023 2029 # experimental
2024 2030 def wdir(repo, subset, x):
2025 2031 # i18n: "wdir" is a keyword
2026 2032 getargs(x, 0, 0, _("wdir takes no arguments"))
2027 2033 if node.wdirrev in subset or isinstance(subset, fullreposet):
2028 2034 return baseset([node.wdirrev])
2029 2035 return baseset()
2030 2036
2031 2037 # for internal use
2032 2038 def _list(repo, subset, x):
2033 2039 s = getstring(x, "internal error")
2034 2040 if not s:
2035 2041 return baseset()
2036 2042 # remove duplicates here. it's difficult for caller to deduplicate sets
2037 2043 # because different symbols can point to the same rev.
2038 2044 cl = repo.changelog
2039 2045 ls = []
2040 2046 seen = set()
2041 2047 for t in s.split('\0'):
2042 2048 try:
2043 2049 # fast path for integer revision
2044 2050 r = int(t)
2045 2051 if str(r) != t or r not in cl:
2046 2052 raise ValueError
2047 2053 except ValueError:
2048 2054 r = repo[t].rev()
2049 2055 if r in seen:
2050 2056 continue
2051 2057 if (r in subset
2052 2058 or r == node.nullrev and isinstance(subset, fullreposet)):
2053 2059 ls.append(r)
2054 2060 seen.add(r)
2055 2061 return baseset(ls)
2056 2062
2057 2063 # for internal use
2058 2064 def _intlist(repo, subset, x):
2059 2065 s = getstring(x, "internal error")
2060 2066 if not s:
2061 2067 return baseset()
2062 2068 ls = [int(r) for r in s.split('\0')]
2063 2069 s = subset
2064 2070 return baseset([r for r in ls if r in s])
2065 2071
2066 2072 # for internal use
2067 2073 def _hexlist(repo, subset, x):
2068 2074 s = getstring(x, "internal error")
2069 2075 if not s:
2070 2076 return baseset()
2071 2077 cl = repo.changelog
2072 2078 ls = [cl.rev(node.bin(r)) for r in s.split('\0')]
2073 2079 s = subset
2074 2080 return baseset([r for r in ls if r in s])
2075 2081
2076 2082 symbols = {
2077 2083 "adds": adds,
2078 2084 "all": getall,
2079 2085 "ancestor": ancestor,
2080 2086 "ancestors": ancestors,
2081 2087 "_firstancestors": _firstancestors,
2082 2088 "author": author,
2083 2089 "bisect": bisect,
2084 2090 "bisected": bisected,
2085 2091 "bookmark": bookmark,
2086 2092 "branch": branch,
2087 2093 "branchpoint": branchpoint,
2088 2094 "bumped": bumped,
2089 2095 "bundle": bundle,
2090 2096 "children": children,
2091 2097 "closed": closed,
2092 2098 "contains": contains,
2093 2099 "converted": converted,
2094 2100 "date": date,
2095 2101 "desc": desc,
2096 2102 "descendants": descendants,
2097 2103 "_firstdescendants": _firstdescendants,
2098 2104 "destination": destination,
2099 2105 "divergent": divergent,
2100 2106 "draft": draft,
2101 2107 "extinct": extinct,
2102 2108 "extra": extra,
2103 2109 "file": hasfile,
2104 2110 "filelog": filelog,
2105 2111 "first": first,
2106 2112 "follow": follow,
2107 2113 "_followfirst": _followfirst,
2108 2114 "grep": grep,
2109 2115 "head": head,
2110 2116 "heads": heads,
2111 2117 "hidden": hidden,
2112 2118 "id": node_,
2113 2119 "keyword": keyword,
2114 2120 "last": last,
2115 2121 "limit": limit,
2116 2122 "_matchfiles": _matchfiles,
2117 2123 "max": maxrev,
2118 2124 "merge": merge,
2119 2125 "min": minrev,
2120 2126 "modifies": modifies,
2121 2127 "named": named,
2122 2128 "obsolete": obsolete,
2123 2129 "only": only,
2124 2130 "origin": origin,
2125 2131 "outgoing": outgoing,
2126 2132 "p1": p1,
2127 2133 "p2": p2,
2128 2134 "parents": parents,
2129 2135 "present": present,
2130 2136 "public": public,
2131 2137 "_notpublic": _notpublic,
2132 2138 "remote": remote,
2133 2139 "removes": removes,
2134 2140 "rev": rev,
2135 2141 "reverse": reverse,
2136 2142 "roots": roots,
2137 2143 "sort": sort,
2138 2144 "secret": secret,
2139 2145 "subrepo": subrepo,
2140 2146 "matching": matching,
2141 2147 "tag": tag,
2142 2148 "tagged": tagged,
2143 2149 "user": user,
2144 2150 "unstable": unstable,
2145 2151 "wdir": wdir,
2146 2152 "_list": _list,
2147 2153 "_intlist": _intlist,
2148 2154 "_hexlist": _hexlist,
2149 2155 }
2150 2156
2151 2157 # symbols which can't be used for a DoS attack for any given input
2152 2158 # (e.g. those which accept regexes as plain strings shouldn't be included)
2153 2159 # functions that just return a lot of changesets (like all) don't count here
2154 2160 safesymbols = set([
2155 2161 "adds",
2156 2162 "all",
2157 2163 "ancestor",
2158 2164 "ancestors",
2159 2165 "_firstancestors",
2160 2166 "author",
2161 2167 "bisect",
2162 2168 "bisected",
2163 2169 "bookmark",
2164 2170 "branch",
2165 2171 "branchpoint",
2166 2172 "bumped",
2167 2173 "bundle",
2168 2174 "children",
2169 2175 "closed",
2170 2176 "converted",
2171 2177 "date",
2172 2178 "desc",
2173 2179 "descendants",
2174 2180 "_firstdescendants",
2175 2181 "destination",
2176 2182 "divergent",
2177 2183 "draft",
2178 2184 "extinct",
2179 2185 "extra",
2180 2186 "file",
2181 2187 "filelog",
2182 2188 "first",
2183 2189 "follow",
2184 2190 "_followfirst",
2185 2191 "head",
2186 2192 "heads",
2187 2193 "hidden",
2188 2194 "id",
2189 2195 "keyword",
2190 2196 "last",
2191 2197 "limit",
2192 2198 "_matchfiles",
2193 2199 "max",
2194 2200 "merge",
2195 2201 "min",
2196 2202 "modifies",
2197 2203 "obsolete",
2198 2204 "only",
2199 2205 "origin",
2200 2206 "outgoing",
2201 2207 "p1",
2202 2208 "p2",
2203 2209 "parents",
2204 2210 "present",
2205 2211 "public",
2206 2212 "_notpublic",
2207 2213 "remote",
2208 2214 "removes",
2209 2215 "rev",
2210 2216 "reverse",
2211 2217 "roots",
2212 2218 "sort",
2213 2219 "secret",
2214 2220 "matching",
2215 2221 "tag",
2216 2222 "tagged",
2217 2223 "user",
2218 2224 "unstable",
2219 2225 "wdir",
2220 2226 "_list",
2221 2227 "_intlist",
2222 2228 "_hexlist",
2223 2229 ])
2224 2230
2225 2231 methods = {
2226 2232 "range": rangeset,
2227 2233 "dagrange": dagrange,
2228 2234 "string": stringset,
2229 2235 "symbol": stringset,
2230 2236 "and": andset,
2231 2237 "or": orset,
2232 2238 "not": notset,
2233 2239 "list": listset,
2234 2240 "keyvalue": keyvaluepair,
2235 2241 "func": func,
2236 2242 "ancestor": ancestorspec,
2237 2243 "parent": parentspec,
2238 2244 "parentpost": p1,
2239 2245 }
2240 2246
2241 2247 def optimize(x, small):
2242 2248 if x is None:
2243 2249 return 0, x
2244 2250
2245 2251 smallbonus = 1
2246 2252 if small:
2247 2253 smallbonus = .5
2248 2254
2249 2255 op = x[0]
2250 2256 if op == 'minus':
2251 2257 return optimize(('and', x[1], ('not', x[2])), small)
2252 2258 elif op == 'only':
2253 2259 return optimize(('func', ('symbol', 'only'),
2254 2260 ('list', x[1], x[2])), small)
2255 2261 elif op == 'onlypost':
2256 2262 return optimize(('func', ('symbol', 'only'), x[1]), small)
2257 2263 elif op == 'dagrangepre':
2258 2264 return optimize(('func', ('symbol', 'ancestors'), x[1]), small)
2259 2265 elif op == 'dagrangepost':
2260 2266 return optimize(('func', ('symbol', 'descendants'), x[1]), small)
2261 2267 elif op == 'rangeall':
2262 2268 return optimize(('range', ('string', '0'), ('string', 'tip')), small)
2263 2269 elif op == 'rangepre':
2264 2270 return optimize(('range', ('string', '0'), x[1]), small)
2265 2271 elif op == 'rangepost':
2266 2272 return optimize(('range', x[1], ('string', 'tip')), small)
2267 2273 elif op == 'negate':
2268 2274 return optimize(('string',
2269 2275 '-' + getstring(x[1], _("can't negate that"))), small)
2270 2276 elif op in 'string symbol negate':
2271 2277 return smallbonus, x # single revisions are small
2272 2278 elif op == 'and':
2273 2279 wa, ta = optimize(x[1], True)
2274 2280 wb, tb = optimize(x[2], True)
2275 2281
2276 2282 # (::x and not ::y)/(not ::y and ::x) have a fast path
2277 2283 def isonly(revs, bases):
2278 2284 return (
2279 2285 revs is not None
2280 2286 and revs[0] == 'func'
2281 2287 and getstring(revs[1], _('not a symbol')) == 'ancestors'
2282 2288 and bases is not None
2283 2289 and bases[0] == 'not'
2284 2290 and bases[1][0] == 'func'
2285 2291 and getstring(bases[1][1], _('not a symbol')) == 'ancestors')
2286 2292
2287 2293 w = min(wa, wb)
2288 2294 if isonly(ta, tb):
2289 2295 return w, ('func', ('symbol', 'only'), ('list', ta[2], tb[1][2]))
2290 2296 if isonly(tb, ta):
2291 2297 return w, ('func', ('symbol', 'only'), ('list', tb[2], ta[1][2]))
2292 2298
2293 2299 if wa > wb:
2294 2300 return w, (op, tb, ta)
2295 2301 return w, (op, ta, tb)
2296 2302 elif op == 'or':
2297 2303 # fast path for machine-generated expression, that is likely to have
2298 2304 # lots of trivial revisions: 'a + b + c()' to '_list(a b) + c()'
2299 2305 ws, ts, ss = [], [], []
2300 2306 def flushss():
2301 2307 if not ss:
2302 2308 return
2303 2309 if len(ss) == 1:
2304 2310 w, t = ss[0]
2305 2311 else:
2306 2312 s = '\0'.join(t[1] for w, t in ss)
2307 2313 y = ('func', ('symbol', '_list'), ('string', s))
2308 2314 w, t = optimize(y, False)
2309 2315 ws.append(w)
2310 2316 ts.append(t)
2311 2317 del ss[:]
2312 2318 for y in x[1:]:
2313 2319 w, t = optimize(y, False)
2314 2320 if t is not None and (t[0] == 'string' or t[0] == 'symbol'):
2315 2321 ss.append((w, t))
2316 2322 continue
2317 2323 flushss()
2318 2324 ws.append(w)
2319 2325 ts.append(t)
2320 2326 flushss()
2321 2327 if len(ts) == 1:
2322 2328 return ws[0], ts[0] # 'or' operation is fully optimized out
2323 2329 # we can't reorder trees by weight because it would change the order.
2324 2330 # ("sort(a + b)" == "sort(b + a)", but "a + b" != "b + a")
2325 2331 # ts = tuple(t for w, t in sorted(zip(ws, ts), key=lambda wt: wt[0]))
2326 2332 return max(ws), (op,) + tuple(ts)
2327 2333 elif op == 'not':
2328 2334 # Optimize not public() to _notpublic() because we have a fast version
2329 2335 if x[1] == ('func', ('symbol', 'public'), None):
2330 2336 newsym = ('func', ('symbol', '_notpublic'), None)
2331 2337 o = optimize(newsym, not small)
2332 2338 return o[0], o[1]
2333 2339 else:
2334 2340 o = optimize(x[1], not small)
2335 2341 return o[0], (op, o[1])
2336 2342 elif op == 'parentpost':
2337 2343 o = optimize(x[1], small)
2338 2344 return o[0], (op, o[1])
2339 2345 elif op == 'group':
2340 2346 return optimize(x[1], small)
2341 2347 elif op in 'dagrange range list parent ancestorspec':
2342 2348 if op == 'parent':
2343 2349 # x^:y means (x^) : y, not x ^ (:y)
2344 2350 post = ('parentpost', x[1])
2345 2351 if x[2][0] == 'dagrangepre':
2346 2352 return optimize(('dagrange', post, x[2][1]), small)
2347 2353 elif x[2][0] == 'rangepre':
2348 2354 return optimize(('range', post, x[2][1]), small)
2349 2355
2350 2356 wa, ta = optimize(x[1], small)
2351 2357 wb, tb = optimize(x[2], small)
2352 2358 return wa + wb, (op, ta, tb)
2353 2359 elif op == 'func':
2354 2360 f = getstring(x[1], _("not a symbol"))
2355 2361 wa, ta = optimize(x[2], small)
2356 2362 if f in ("author branch closed date desc file grep keyword "
2357 2363 "outgoing user"):
2358 2364 w = 10 # slow
2359 2365 elif f in "modifies adds removes":
2360 2366 w = 30 # slower
2361 2367 elif f == "contains":
2362 2368 w = 100 # very slow
2363 2369 elif f == "ancestor":
2364 2370 w = 1 * smallbonus
2365 2371 elif f in "reverse limit first _intlist":
2366 2372 w = 0
2367 2373 elif f in "sort":
2368 2374 w = 10 # assume most sorts look at changelog
2369 2375 else:
2370 2376 w = 1
2371 2377 return w + wa, (op, x[1], ta)
2372 2378 return 1, x
2373 2379
2374 2380 _aliasarg = ('func', ('symbol', '_aliasarg'))
2375 2381 def _getaliasarg(tree):
2376 2382 """If tree matches ('func', ('symbol', '_aliasarg'), ('string', X))
2377 2383 return X, None otherwise.
2378 2384 """
2379 2385 if (len(tree) == 3 and tree[:2] == _aliasarg
2380 2386 and tree[2][0] == 'string'):
2381 2387 return tree[2][1]
2382 2388 return None
2383 2389
2384 2390 def _checkaliasarg(tree, known=None):
2385 2391 """Check tree contains no _aliasarg construct or only ones which
2386 2392 value is in known. Used to avoid alias placeholders injection.
2387 2393 """
2388 2394 if isinstance(tree, tuple):
2389 2395 arg = _getaliasarg(tree)
2390 2396 if arg is not None and (not known or arg not in known):
2391 2397 raise error.UnknownIdentifier('_aliasarg', [])
2392 2398 for t in tree:
2393 2399 _checkaliasarg(t, known)
2394 2400
2395 2401 # the set of valid characters for the initial letter of symbols in
2396 2402 # alias declarations and definitions
2397 2403 _aliassyminitletters = set(c for c in [chr(i) for i in xrange(256)]
2398 2404 if c.isalnum() or c in '._@$' or ord(c) > 127)
2399 2405
2400 2406 def _tokenizealias(program, lookup=None):
2401 2407 """Parse alias declaration/definition into a stream of tokens
2402 2408
2403 2409 This allows symbol names to use also ``$`` as an initial letter
2404 2410 (for backward compatibility), and callers of this function should
2405 2411 examine whether ``$`` is used also for unexpected symbols or not.
2406 2412 """
2407 2413 return tokenize(program, lookup=lookup,
2408 2414 syminitletters=_aliassyminitletters)
2409 2415
2410 2416 def _parsealiasdecl(decl):
2411 2417 """Parse alias declaration ``decl``
2412 2418
2413 2419 This returns ``(name, tree, args, errorstr)`` tuple:
2414 2420
2415 2421 - ``name``: of declared alias (may be ``decl`` itself at error)
2416 2422 - ``tree``: parse result (or ``None`` at error)
2417 2423 - ``args``: list of alias argument names (or None for symbol declaration)
2418 2424 - ``errorstr``: detail about detected error (or None)
2419 2425
2420 2426 >>> _parsealiasdecl('foo')
2421 2427 ('foo', ('symbol', 'foo'), None, None)
2422 2428 >>> _parsealiasdecl('$foo')
2423 2429 ('$foo', None, None, "'$' not for alias arguments")
2424 2430 >>> _parsealiasdecl('foo::bar')
2425 2431 ('foo::bar', None, None, 'invalid format')
2426 2432 >>> _parsealiasdecl('foo bar')
2427 2433 ('foo bar', None, None, 'at 4: invalid token')
2428 2434 >>> _parsealiasdecl('foo()')
2429 2435 ('foo', ('func', ('symbol', 'foo')), [], None)
2430 2436 >>> _parsealiasdecl('$foo()')
2431 2437 ('$foo()', None, None, "'$' not for alias arguments")
2432 2438 >>> _parsealiasdecl('foo($1, $2)')
2433 2439 ('foo', ('func', ('symbol', 'foo')), ['$1', '$2'], None)
2434 2440 >>> _parsealiasdecl('foo(bar_bar, baz.baz)')
2435 2441 ('foo', ('func', ('symbol', 'foo')), ['bar_bar', 'baz.baz'], None)
2436 2442 >>> _parsealiasdecl('foo($1, $2, nested($1, $2))')
2437 2443 ('foo($1, $2, nested($1, $2))', None, None, 'invalid argument list')
2438 2444 >>> _parsealiasdecl('foo(bar($1, $2))')
2439 2445 ('foo(bar($1, $2))', None, None, 'invalid argument list')
2440 2446 >>> _parsealiasdecl('foo("string")')
2441 2447 ('foo("string")', None, None, 'invalid argument list')
2442 2448 >>> _parsealiasdecl('foo($1, $2')
2443 2449 ('foo($1, $2', None, None, 'at 10: unexpected token: end')
2444 2450 >>> _parsealiasdecl('foo("string')
2445 2451 ('foo("string', None, None, 'at 5: unterminated string')
2446 2452 >>> _parsealiasdecl('foo($1, $2, $1)')
2447 2453 ('foo', None, None, 'argument names collide with each other')
2448 2454 """
2449 2455 p = parser.parser(elements)
2450 2456 try:
2451 2457 tree, pos = p.parse(_tokenizealias(decl))
2452 2458 if (pos != len(decl)):
2453 2459 raise error.ParseError(_('invalid token'), pos)
2454 2460
2455 2461 if isvalidsymbol(tree):
2456 2462 # "name = ...." style
2457 2463 name = getsymbol(tree)
2458 2464 if name.startswith('$'):
2459 2465 return (decl, None, None, _("'$' not for alias arguments"))
2460 2466 return (name, ('symbol', name), None, None)
2461 2467
2462 2468 if isvalidfunc(tree):
2463 2469 # "name(arg, ....) = ...." style
2464 2470 name = getfuncname(tree)
2465 2471 if name.startswith('$'):
2466 2472 return (decl, None, None, _("'$' not for alias arguments"))
2467 2473 args = []
2468 2474 for arg in getfuncargs(tree):
2469 2475 if not isvalidsymbol(arg):
2470 2476 return (decl, None, None, _("invalid argument list"))
2471 2477 args.append(getsymbol(arg))
2472 2478 if len(args) != len(set(args)):
2473 2479 return (name, None, None,
2474 2480 _("argument names collide with each other"))
2475 2481 return (name, ('func', ('symbol', name)), args, None)
2476 2482
2477 2483 return (decl, None, None, _("invalid format"))
2478 2484 except error.ParseError as inst:
2479 2485 return (decl, None, None, parseerrordetail(inst))
2480 2486
2481 2487 def _parsealiasdefn(defn, args):
2482 2488 """Parse alias definition ``defn``
2483 2489
2484 2490 This function also replaces alias argument references in the
2485 2491 specified definition by ``_aliasarg(ARGNAME)``.
2486 2492
2487 2493 ``args`` is a list of alias argument names, or None if the alias
2488 2494 is declared as a symbol.
2489 2495
2490 2496 This returns "tree" as parsing result.
2491 2497
2492 2498 >>> args = ['$1', '$2', 'foo']
2493 2499 >>> print prettyformat(_parsealiasdefn('$1 or foo', args))
2494 2500 (or
2495 2501 (func
2496 2502 ('symbol', '_aliasarg')
2497 2503 ('string', '$1'))
2498 2504 (func
2499 2505 ('symbol', '_aliasarg')
2500 2506 ('string', 'foo')))
2501 2507 >>> try:
2502 2508 ... _parsealiasdefn('$1 or $bar', args)
2503 2509 ... except error.ParseError, inst:
2504 2510 ... print parseerrordetail(inst)
2505 2511 at 6: '$' not for alias arguments
2506 2512 >>> args = ['$1', '$10', 'foo']
2507 2513 >>> print prettyformat(_parsealiasdefn('$10 or foobar', args))
2508 2514 (or
2509 2515 (func
2510 2516 ('symbol', '_aliasarg')
2511 2517 ('string', '$10'))
2512 2518 ('symbol', 'foobar'))
2513 2519 >>> print prettyformat(_parsealiasdefn('"$1" or "foo"', args))
2514 2520 (or
2515 2521 ('string', '$1')
2516 2522 ('string', 'foo'))
2517 2523 """
2518 2524 def tokenizedefn(program, lookup=None):
2519 2525 if args:
2520 2526 argset = set(args)
2521 2527 else:
2522 2528 argset = set()
2523 2529
2524 2530 for t, value, pos in _tokenizealias(program, lookup=lookup):
2525 2531 if t == 'symbol':
2526 2532 if value in argset:
2527 2533 # emulate tokenization of "_aliasarg('ARGNAME')":
2528 2534 # "_aliasarg()" is an unknown symbol only used separate
2529 2535 # alias argument placeholders from regular strings.
2530 2536 yield ('symbol', '_aliasarg', pos)
2531 2537 yield ('(', None, pos)
2532 2538 yield ('string', value, pos)
2533 2539 yield (')', None, pos)
2534 2540 continue
2535 2541 elif value.startswith('$'):
2536 2542 raise error.ParseError(_("'$' not for alias arguments"),
2537 2543 pos)
2538 2544 yield (t, value, pos)
2539 2545
2540 2546 p = parser.parser(elements)
2541 2547 tree, pos = p.parse(tokenizedefn(defn))
2542 2548 if pos != len(defn):
2543 2549 raise error.ParseError(_('invalid token'), pos)
2544 2550 return parser.simplifyinfixops(tree, ('or',))
2545 2551
2546 2552 class revsetalias(object):
2547 2553 # whether own `error` information is already shown or not.
2548 2554 # this avoids showing same warning multiple times at each `findaliases`.
2549 2555 warned = False
2550 2556
2551 2557 def __init__(self, name, value):
2552 2558 '''Aliases like:
2553 2559
2554 2560 h = heads(default)
2555 2561 b($1) = ancestors($1) - ancestors(default)
2556 2562 '''
2557 2563 self.name, self.tree, self.args, self.error = _parsealiasdecl(name)
2558 2564 if self.error:
2559 2565 self.error = _('failed to parse the declaration of revset alias'
2560 2566 ' "%s": %s') % (self.name, self.error)
2561 2567 return
2562 2568
2563 2569 try:
2564 2570 self.replacement = _parsealiasdefn(value, self.args)
2565 2571 # Check for placeholder injection
2566 2572 _checkaliasarg(self.replacement, self.args)
2567 2573 except error.ParseError as inst:
2568 2574 self.error = _('failed to parse the definition of revset alias'
2569 2575 ' "%s": %s') % (self.name, parseerrordetail(inst))
2570 2576
2571 2577 def _getalias(aliases, tree):
2572 2578 """If tree looks like an unexpanded alias, return it. Return None
2573 2579 otherwise.
2574 2580 """
2575 2581 if isinstance(tree, tuple) and tree:
2576 2582 if tree[0] == 'symbol' and len(tree) == 2:
2577 2583 name = tree[1]
2578 2584 alias = aliases.get(name)
2579 2585 if alias and alias.args is None and alias.tree == tree:
2580 2586 return alias
2581 2587 if tree[0] == 'func' and len(tree) > 1:
2582 2588 if tree[1][0] == 'symbol' and len(tree[1]) == 2:
2583 2589 name = tree[1][1]
2584 2590 alias = aliases.get(name)
2585 2591 if alias and alias.args is not None and alias.tree == tree[:2]:
2586 2592 return alias
2587 2593 return None
2588 2594
2589 2595 def _expandargs(tree, args):
2590 2596 """Replace _aliasarg instances with the substitution value of the
2591 2597 same name in args, recursively.
2592 2598 """
2593 2599 if not tree or not isinstance(tree, tuple):
2594 2600 return tree
2595 2601 arg = _getaliasarg(tree)
2596 2602 if arg is not None:
2597 2603 return args[arg]
2598 2604 return tuple(_expandargs(t, args) for t in tree)
2599 2605
2600 2606 def _expandaliases(aliases, tree, expanding, cache):
2601 2607 """Expand aliases in tree, recursively.
2602 2608
2603 2609 'aliases' is a dictionary mapping user defined aliases to
2604 2610 revsetalias objects.
2605 2611 """
2606 2612 if not isinstance(tree, tuple):
2607 2613 # Do not expand raw strings
2608 2614 return tree
2609 2615 alias = _getalias(aliases, tree)
2610 2616 if alias is not None:
2611 2617 if alias.error:
2612 2618 raise util.Abort(alias.error)
2613 2619 if alias in expanding:
2614 2620 raise error.ParseError(_('infinite expansion of revset alias "%s" '
2615 2621 'detected') % alias.name)
2616 2622 expanding.append(alias)
2617 2623 if alias.name not in cache:
2618 2624 cache[alias.name] = _expandaliases(aliases, alias.replacement,
2619 2625 expanding, cache)
2620 2626 result = cache[alias.name]
2621 2627 expanding.pop()
2622 2628 if alias.args is not None:
2623 2629 l = getlist(tree[2])
2624 2630 if len(l) != len(alias.args):
2625 2631 raise error.ParseError(
2626 2632 _('invalid number of arguments: %s') % len(l))
2627 2633 l = [_expandaliases(aliases, a, [], cache) for a in l]
2628 2634 result = _expandargs(result, dict(zip(alias.args, l)))
2629 2635 else:
2630 2636 result = tuple(_expandaliases(aliases, t, expanding, cache)
2631 2637 for t in tree)
2632 2638 return result
2633 2639
2634 2640 def findaliases(ui, tree, showwarning=None):
2635 2641 _checkaliasarg(tree)
2636 2642 aliases = {}
2637 2643 for k, v in ui.configitems('revsetalias'):
2638 2644 alias = revsetalias(k, v)
2639 2645 aliases[alias.name] = alias
2640 2646 tree = _expandaliases(aliases, tree, [], {})
2641 2647 if showwarning:
2642 2648 # warn about problematic (but not referred) aliases
2643 2649 for name, alias in sorted(aliases.iteritems()):
2644 2650 if alias.error and not alias.warned:
2645 2651 showwarning(_('warning: %s\n') % (alias.error))
2646 2652 alias.warned = True
2647 2653 return tree
2648 2654
2649 2655 def foldconcat(tree):
2650 2656 """Fold elements to be concatenated by `##`
2651 2657 """
2652 2658 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2653 2659 return tree
2654 2660 if tree[0] == '_concat':
2655 2661 pending = [tree]
2656 2662 l = []
2657 2663 while pending:
2658 2664 e = pending.pop()
2659 2665 if e[0] == '_concat':
2660 2666 pending.extend(reversed(e[1:]))
2661 2667 elif e[0] in ('string', 'symbol'):
2662 2668 l.append(e[1])
2663 2669 else:
2664 2670 msg = _("\"##\" can't concatenate \"%s\" element") % (e[0])
2665 2671 raise error.ParseError(msg)
2666 2672 return ('string', ''.join(l))
2667 2673 else:
2668 2674 return tuple(foldconcat(t) for t in tree)
2669 2675
2670 2676 def parse(spec, lookup=None):
2671 2677 p = parser.parser(elements)
2672 2678 tree, pos = p.parse(tokenize(spec, lookup=lookup))
2673 2679 if pos != len(spec):
2674 2680 raise error.ParseError(_("invalid token"), pos)
2675 2681 return parser.simplifyinfixops(tree, ('or',))
2676 2682
2677 2683 def posttreebuilthook(tree, repo):
2678 2684 # hook for extensions to execute code on the optimized tree
2679 2685 pass
2680 2686
2681 2687 def match(ui, spec, repo=None):
2682 2688 if not spec:
2683 2689 raise error.ParseError(_("empty query"))
2684 2690 lookup = None
2685 2691 if repo:
2686 2692 lookup = repo.__contains__
2687 2693 tree = parse(spec, lookup)
2688 2694 return _makematcher(ui, tree, repo)
2689 2695
2690 2696 def matchany(ui, specs, repo=None):
2691 2697 """Create a matcher that will include any revisions matching one of the
2692 2698 given specs"""
2693 2699 if not specs:
2694 2700 def mfunc(repo, subset=None):
2695 2701 return baseset()
2696 2702 return mfunc
2697 2703 if not all(specs):
2698 2704 raise error.ParseError(_("empty query"))
2699 2705 lookup = None
2700 2706 if repo:
2701 2707 lookup = repo.__contains__
2702 2708 if len(specs) == 1:
2703 2709 tree = parse(specs[0], lookup)
2704 2710 else:
2705 2711 tree = ('or',) + tuple(parse(s, lookup) for s in specs)
2706 2712 return _makematcher(ui, tree, repo)
2707 2713
2708 2714 def _makematcher(ui, tree, repo):
2709 2715 if ui:
2710 2716 tree = findaliases(ui, tree, showwarning=ui.warn)
2711 2717 tree = foldconcat(tree)
2712 2718 weight, tree = optimize(tree, True)
2713 2719 posttreebuilthook(tree, repo)
2714 2720 def mfunc(repo, subset=None):
2715 2721 if subset is None:
2716 2722 subset = fullreposet(repo)
2717 2723 if util.safehasattr(subset, 'isascending'):
2718 2724 result = getset(repo, subset, tree)
2719 2725 else:
2720 2726 result = getset(repo, baseset(subset), tree)
2721 2727 return result
2722 2728 return mfunc
2723 2729
2724 2730 def formatspec(expr, *args):
2725 2731 '''
2726 2732 This is a convenience function for using revsets internally, and
2727 2733 escapes arguments appropriately. Aliases are intentionally ignored
2728 2734 so that intended expression behavior isn't accidentally subverted.
2729 2735
2730 2736 Supported arguments:
2731 2737
2732 2738 %r = revset expression, parenthesized
2733 2739 %d = int(arg), no quoting
2734 2740 %s = string(arg), escaped and single-quoted
2735 2741 %b = arg.branch(), escaped and single-quoted
2736 2742 %n = hex(arg), single-quoted
2737 2743 %% = a literal '%'
2738 2744
2739 2745 Prefixing the type with 'l' specifies a parenthesized list of that type.
2740 2746
2741 2747 >>> formatspec('%r:: and %lr', '10 or 11', ("this()", "that()"))
2742 2748 '(10 or 11):: and ((this()) or (that()))'
2743 2749 >>> formatspec('%d:: and not %d::', 10, 20)
2744 2750 '10:: and not 20::'
2745 2751 >>> formatspec('%ld or %ld', [], [1])
2746 2752 "_list('') or 1"
2747 2753 >>> formatspec('keyword(%s)', 'foo\\xe9')
2748 2754 "keyword('foo\\\\xe9')"
2749 2755 >>> b = lambda: 'default'
2750 2756 >>> b.branch = b
2751 2757 >>> formatspec('branch(%b)', b)
2752 2758 "branch('default')"
2753 2759 >>> formatspec('root(%ls)', ['a', 'b', 'c', 'd'])
2754 2760 "root(_list('a\\x00b\\x00c\\x00d'))"
2755 2761 '''
2756 2762
2757 2763 def quote(s):
2758 2764 return repr(str(s))
2759 2765
2760 2766 def argtype(c, arg):
2761 2767 if c == 'd':
2762 2768 return str(int(arg))
2763 2769 elif c == 's':
2764 2770 return quote(arg)
2765 2771 elif c == 'r':
2766 2772 parse(arg) # make sure syntax errors are confined
2767 2773 return '(%s)' % arg
2768 2774 elif c == 'n':
2769 2775 return quote(node.hex(arg))
2770 2776 elif c == 'b':
2771 2777 return quote(arg.branch())
2772 2778
2773 2779 def listexp(s, t):
2774 2780 l = len(s)
2775 2781 if l == 0:
2776 2782 return "_list('')"
2777 2783 elif l == 1:
2778 2784 return argtype(t, s[0])
2779 2785 elif t == 'd':
2780 2786 return "_intlist('%s')" % "\0".join(str(int(a)) for a in s)
2781 2787 elif t == 's':
2782 2788 return "_list('%s')" % "\0".join(s)
2783 2789 elif t == 'n':
2784 2790 return "_hexlist('%s')" % "\0".join(node.hex(a) for a in s)
2785 2791 elif t == 'b':
2786 2792 return "_list('%s')" % "\0".join(a.branch() for a in s)
2787 2793
2788 2794 m = l // 2
2789 2795 return '(%s or %s)' % (listexp(s[:m], t), listexp(s[m:], t))
2790 2796
2791 2797 ret = ''
2792 2798 pos = 0
2793 2799 arg = 0
2794 2800 while pos < len(expr):
2795 2801 c = expr[pos]
2796 2802 if c == '%':
2797 2803 pos += 1
2798 2804 d = expr[pos]
2799 2805 if d == '%':
2800 2806 ret += d
2801 2807 elif d in 'dsnbr':
2802 2808 ret += argtype(d, args[arg])
2803 2809 arg += 1
2804 2810 elif d == 'l':
2805 2811 # a list of some type
2806 2812 pos += 1
2807 2813 d = expr[pos]
2808 2814 ret += listexp(list(args[arg]), d)
2809 2815 arg += 1
2810 2816 else:
2811 2817 raise util.Abort('unexpected revspec format character %s' % d)
2812 2818 else:
2813 2819 ret += c
2814 2820 pos += 1
2815 2821
2816 2822 return ret
2817 2823
2818 2824 def prettyformat(tree):
2819 2825 return parser.prettyformat(tree, ('string', 'symbol'))
2820 2826
2821 2827 def depth(tree):
2822 2828 if isinstance(tree, tuple):
2823 2829 return max(map(depth, tree)) + 1
2824 2830 else:
2825 2831 return 0
2826 2832
2827 2833 def funcsused(tree):
2828 2834 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2829 2835 return set()
2830 2836 else:
2831 2837 funcs = set()
2832 2838 for s in tree[1:]:
2833 2839 funcs |= funcsused(s)
2834 2840 if tree[0] == 'func':
2835 2841 funcs.add(tree[1][1])
2836 2842 return funcs
2837 2843
2838 2844 class abstractsmartset(object):
2839 2845
2840 2846 def __nonzero__(self):
2841 2847 """True if the smartset is not empty"""
2842 2848 raise NotImplementedError()
2843 2849
2844 2850 def __contains__(self, rev):
2845 2851 """provide fast membership testing"""
2846 2852 raise NotImplementedError()
2847 2853
2848 2854 def __iter__(self):
2849 2855 """iterate the set in the order it is supposed to be iterated"""
2850 2856 raise NotImplementedError()
2851 2857
2852 2858 # Attributes containing a function to perform a fast iteration in a given
2853 2859 # direction. A smartset can have none, one, or both defined.
2854 2860 #
2855 2861 # Default value is None instead of a function returning None to avoid
2856 2862 # initializing an iterator just for testing if a fast method exists.
2857 2863 fastasc = None
2858 2864 fastdesc = None
2859 2865
2860 2866 def isascending(self):
2861 2867 """True if the set will iterate in ascending order"""
2862 2868 raise NotImplementedError()
2863 2869
2864 2870 def isdescending(self):
2865 2871 """True if the set will iterate in descending order"""
2866 2872 raise NotImplementedError()
2867 2873
2868 2874 def min(self):
2869 2875 """return the minimum element in the set"""
2870 2876 if self.fastasc is not None:
2871 2877 for r in self.fastasc():
2872 2878 return r
2873 2879 raise ValueError('arg is an empty sequence')
2874 2880 return min(self)
2875 2881
2876 2882 def max(self):
2877 2883 """return the maximum element in the set"""
2878 2884 if self.fastdesc is not None:
2879 2885 for r in self.fastdesc():
2880 2886 return r
2881 2887 raise ValueError('arg is an empty sequence')
2882 2888 return max(self)
2883 2889
2884 2890 def first(self):
2885 2891 """return the first element in the set (user iteration perspective)
2886 2892
2887 2893 Return None if the set is empty"""
2888 2894 raise NotImplementedError()
2889 2895
2890 2896 def last(self):
2891 2897 """return the last element in the set (user iteration perspective)
2892 2898
2893 2899 Return None if the set is empty"""
2894 2900 raise NotImplementedError()
2895 2901
2896 2902 def __len__(self):
2897 2903 """return the length of the smartsets
2898 2904
2899 2905 This can be expensive on smartset that could be lazy otherwise."""
2900 2906 raise NotImplementedError()
2901 2907
2902 2908 def reverse(self):
2903 2909 """reverse the expected iteration order"""
2904 2910 raise NotImplementedError()
2905 2911
2906 2912 def sort(self, reverse=True):
2907 2913 """get the set to iterate in an ascending or descending order"""
2908 2914 raise NotImplementedError()
2909 2915
2910 2916 def __and__(self, other):
2911 2917 """Returns a new object with the intersection of the two collections.
2912 2918
2913 2919 This is part of the mandatory API for smartset."""
2914 2920 if isinstance(other, fullreposet):
2915 2921 return self
2916 2922 return self.filter(other.__contains__, cache=False)
2917 2923
2918 2924 def __add__(self, other):
2919 2925 """Returns a new object with the union of the two collections.
2920 2926
2921 2927 This is part of the mandatory API for smartset."""
2922 2928 return addset(self, other)
2923 2929
2924 2930 def __sub__(self, other):
2925 2931 """Returns a new object with the substraction of the two collections.
2926 2932
2927 2933 This is part of the mandatory API for smartset."""
2928 2934 c = other.__contains__
2929 2935 return self.filter(lambda r: not c(r), cache=False)
2930 2936
2931 2937 def filter(self, condition, cache=True):
2932 2938 """Returns this smartset filtered by condition as a new smartset.
2933 2939
2934 2940 `condition` is a callable which takes a revision number and returns a
2935 2941 boolean.
2936 2942
2937 2943 This is part of the mandatory API for smartset."""
2938 2944 # builtin cannot be cached. but do not needs to
2939 2945 if cache and util.safehasattr(condition, 'func_code'):
2940 2946 condition = util.cachefunc(condition)
2941 2947 return filteredset(self, condition)
2942 2948
2943 2949 class baseset(abstractsmartset):
2944 2950 """Basic data structure that represents a revset and contains the basic
2945 2951 operation that it should be able to perform.
2946 2952
2947 2953 Every method in this class should be implemented by any smartset class.
2948 2954 """
2949 2955 def __init__(self, data=()):
2950 2956 if not isinstance(data, list):
2951 2957 data = list(data)
2952 2958 self._list = data
2953 2959 self._ascending = None
2954 2960
2955 2961 @util.propertycache
2956 2962 def _set(self):
2957 2963 return set(self._list)
2958 2964
2959 2965 @util.propertycache
2960 2966 def _asclist(self):
2961 2967 asclist = self._list[:]
2962 2968 asclist.sort()
2963 2969 return asclist
2964 2970
2965 2971 def __iter__(self):
2966 2972 if self._ascending is None:
2967 2973 return iter(self._list)
2968 2974 elif self._ascending:
2969 2975 return iter(self._asclist)
2970 2976 else:
2971 2977 return reversed(self._asclist)
2972 2978
2973 2979 def fastasc(self):
2974 2980 return iter(self._asclist)
2975 2981
2976 2982 def fastdesc(self):
2977 2983 return reversed(self._asclist)
2978 2984
2979 2985 @util.propertycache
2980 2986 def __contains__(self):
2981 2987 return self._set.__contains__
2982 2988
2983 2989 def __nonzero__(self):
2984 2990 return bool(self._list)
2985 2991
2986 2992 def sort(self, reverse=False):
2987 2993 self._ascending = not bool(reverse)
2988 2994
2989 2995 def reverse(self):
2990 2996 if self._ascending is None:
2991 2997 self._list.reverse()
2992 2998 else:
2993 2999 self._ascending = not self._ascending
2994 3000
2995 3001 def __len__(self):
2996 3002 return len(self._list)
2997 3003
2998 3004 def isascending(self):
2999 3005 """Returns True if the collection is ascending order, False if not.
3000 3006
3001 3007 This is part of the mandatory API for smartset."""
3002 3008 if len(self) <= 1:
3003 3009 return True
3004 3010 return self._ascending is not None and self._ascending
3005 3011
3006 3012 def isdescending(self):
3007 3013 """Returns True if the collection is descending order, False if not.
3008 3014
3009 3015 This is part of the mandatory API for smartset."""
3010 3016 if len(self) <= 1:
3011 3017 return True
3012 3018 return self._ascending is not None and not self._ascending
3013 3019
3014 3020 def first(self):
3015 3021 if self:
3016 3022 if self._ascending is None:
3017 3023 return self._list[0]
3018 3024 elif self._ascending:
3019 3025 return self._asclist[0]
3020 3026 else:
3021 3027 return self._asclist[-1]
3022 3028 return None
3023 3029
3024 3030 def last(self):
3025 3031 if self:
3026 3032 if self._ascending is None:
3027 3033 return self._list[-1]
3028 3034 elif self._ascending:
3029 3035 return self._asclist[-1]
3030 3036 else:
3031 3037 return self._asclist[0]
3032 3038 return None
3033 3039
3034 3040 def __repr__(self):
3035 3041 d = {None: '', False: '-', True: '+'}[self._ascending]
3036 3042 return '<%s%s %r>' % (type(self).__name__, d, self._list)
3037 3043
3038 3044 class filteredset(abstractsmartset):
3039 3045 """Duck type for baseset class which iterates lazily over the revisions in
3040 3046 the subset and contains a function which tests for membership in the
3041 3047 revset
3042 3048 """
3043 3049 def __init__(self, subset, condition=lambda x: True):
3044 3050 """
3045 3051 condition: a function that decide whether a revision in the subset
3046 3052 belongs to the revset or not.
3047 3053 """
3048 3054 self._subset = subset
3049 3055 self._condition = condition
3050 3056 self._cache = {}
3051 3057
3052 3058 def __contains__(self, x):
3053 3059 c = self._cache
3054 3060 if x not in c:
3055 3061 v = c[x] = x in self._subset and self._condition(x)
3056 3062 return v
3057 3063 return c[x]
3058 3064
3059 3065 def __iter__(self):
3060 3066 return self._iterfilter(self._subset)
3061 3067
3062 3068 def _iterfilter(self, it):
3063 3069 cond = self._condition
3064 3070 for x in it:
3065 3071 if cond(x):
3066 3072 yield x
3067 3073
3068 3074 @property
3069 3075 def fastasc(self):
3070 3076 it = self._subset.fastasc
3071 3077 if it is None:
3072 3078 return None
3073 3079 return lambda: self._iterfilter(it())
3074 3080
3075 3081 @property
3076 3082 def fastdesc(self):
3077 3083 it = self._subset.fastdesc
3078 3084 if it is None:
3079 3085 return None
3080 3086 return lambda: self._iterfilter(it())
3081 3087
3082 3088 def __nonzero__(self):
3083 3089 for r in self:
3084 3090 return True
3085 3091 return False
3086 3092
3087 3093 def __len__(self):
3088 3094 # Basic implementation to be changed in future patches.
3089 3095 l = baseset([r for r in self])
3090 3096 return len(l)
3091 3097
3092 3098 def sort(self, reverse=False):
3093 3099 self._subset.sort(reverse=reverse)
3094 3100
3095 3101 def reverse(self):
3096 3102 self._subset.reverse()
3097 3103
3098 3104 def isascending(self):
3099 3105 return self._subset.isascending()
3100 3106
3101 3107 def isdescending(self):
3102 3108 return self._subset.isdescending()
3103 3109
3104 3110 def first(self):
3105 3111 for x in self:
3106 3112 return x
3107 3113 return None
3108 3114
3109 3115 def last(self):
3110 3116 it = None
3111 3117 if self.isascending():
3112 3118 it = self.fastdesc
3113 3119 elif self.isdescending():
3114 3120 it = self.fastasc
3115 3121 if it is not None:
3116 3122 for x in it():
3117 3123 return x
3118 3124 return None #empty case
3119 3125 else:
3120 3126 x = None
3121 3127 for x in self:
3122 3128 pass
3123 3129 return x
3124 3130
3125 3131 def __repr__(self):
3126 3132 return '<%s %r>' % (type(self).__name__, self._subset)
3127 3133
3128 3134 def _iterordered(ascending, iter1, iter2):
3129 3135 """produce an ordered iteration from two iterators with the same order
3130 3136
3131 3137 The ascending is used to indicated the iteration direction.
3132 3138 """
3133 3139 choice = max
3134 3140 if ascending:
3135 3141 choice = min
3136 3142
3137 3143 val1 = None
3138 3144 val2 = None
3139 3145 try:
3140 3146 # Consume both iterators in an ordered way until one is empty
3141 3147 while True:
3142 3148 if val1 is None:
3143 3149 val1 = iter1.next()
3144 3150 if val2 is None:
3145 3151 val2 = iter2.next()
3146 3152 next = choice(val1, val2)
3147 3153 yield next
3148 3154 if val1 == next:
3149 3155 val1 = None
3150 3156 if val2 == next:
3151 3157 val2 = None
3152 3158 except StopIteration:
3153 3159 # Flush any remaining values and consume the other one
3154 3160 it = iter2
3155 3161 if val1 is not None:
3156 3162 yield val1
3157 3163 it = iter1
3158 3164 elif val2 is not None:
3159 3165 # might have been equality and both are empty
3160 3166 yield val2
3161 3167 for val in it:
3162 3168 yield val
3163 3169
3164 3170 class addset(abstractsmartset):
3165 3171 """Represent the addition of two sets
3166 3172
3167 3173 Wrapper structure for lazily adding two structures without losing much
3168 3174 performance on the __contains__ method
3169 3175
3170 3176 If the ascending attribute is set, that means the two structures are
3171 3177 ordered in either an ascending or descending way. Therefore, we can add
3172 3178 them maintaining the order by iterating over both at the same time
3173 3179
3174 3180 >>> xs = baseset([0, 3, 2])
3175 3181 >>> ys = baseset([5, 2, 4])
3176 3182
3177 3183 >>> rs = addset(xs, ys)
3178 3184 >>> bool(rs), 0 in rs, 1 in rs, 5 in rs, rs.first(), rs.last()
3179 3185 (True, True, False, True, 0, 4)
3180 3186 >>> rs = addset(xs, baseset([]))
3181 3187 >>> bool(rs), 0 in rs, 1 in rs, rs.first(), rs.last()
3182 3188 (True, True, False, 0, 2)
3183 3189 >>> rs = addset(baseset([]), baseset([]))
3184 3190 >>> bool(rs), 0 in rs, rs.first(), rs.last()
3185 3191 (False, False, None, None)
3186 3192
3187 3193 iterate unsorted:
3188 3194 >>> rs = addset(xs, ys)
3189 3195 >>> [x for x in rs] # without _genlist
3190 3196 [0, 3, 2, 5, 4]
3191 3197 >>> assert not rs._genlist
3192 3198 >>> len(rs)
3193 3199 5
3194 3200 >>> [x for x in rs] # with _genlist
3195 3201 [0, 3, 2, 5, 4]
3196 3202 >>> assert rs._genlist
3197 3203
3198 3204 iterate ascending:
3199 3205 >>> rs = addset(xs, ys, ascending=True)
3200 3206 >>> [x for x in rs], [x for x in rs.fastasc()] # without _asclist
3201 3207 ([0, 2, 3, 4, 5], [0, 2, 3, 4, 5])
3202 3208 >>> assert not rs._asclist
3203 3209 >>> len(rs)
3204 3210 5
3205 3211 >>> [x for x in rs], [x for x in rs.fastasc()]
3206 3212 ([0, 2, 3, 4, 5], [0, 2, 3, 4, 5])
3207 3213 >>> assert rs._asclist
3208 3214
3209 3215 iterate descending:
3210 3216 >>> rs = addset(xs, ys, ascending=False)
3211 3217 >>> [x for x in rs], [x for x in rs.fastdesc()] # without _asclist
3212 3218 ([5, 4, 3, 2, 0], [5, 4, 3, 2, 0])
3213 3219 >>> assert not rs._asclist
3214 3220 >>> len(rs)
3215 3221 5
3216 3222 >>> [x for x in rs], [x for x in rs.fastdesc()]
3217 3223 ([5, 4, 3, 2, 0], [5, 4, 3, 2, 0])
3218 3224 >>> assert rs._asclist
3219 3225
3220 3226 iterate ascending without fastasc:
3221 3227 >>> rs = addset(xs, generatorset(ys), ascending=True)
3222 3228 >>> assert rs.fastasc is None
3223 3229 >>> [x for x in rs]
3224 3230 [0, 2, 3, 4, 5]
3225 3231
3226 3232 iterate descending without fastdesc:
3227 3233 >>> rs = addset(generatorset(xs), ys, ascending=False)
3228 3234 >>> assert rs.fastdesc is None
3229 3235 >>> [x for x in rs]
3230 3236 [5, 4, 3, 2, 0]
3231 3237 """
3232 3238 def __init__(self, revs1, revs2, ascending=None):
3233 3239 self._r1 = revs1
3234 3240 self._r2 = revs2
3235 3241 self._iter = None
3236 3242 self._ascending = ascending
3237 3243 self._genlist = None
3238 3244 self._asclist = None
3239 3245
3240 3246 def __len__(self):
3241 3247 return len(self._list)
3242 3248
3243 3249 def __nonzero__(self):
3244 3250 return bool(self._r1) or bool(self._r2)
3245 3251
3246 3252 @util.propertycache
3247 3253 def _list(self):
3248 3254 if not self._genlist:
3249 3255 self._genlist = baseset(iter(self))
3250 3256 return self._genlist
3251 3257
3252 3258 def __iter__(self):
3253 3259 """Iterate over both collections without repeating elements
3254 3260
3255 3261 If the ascending attribute is not set, iterate over the first one and
3256 3262 then over the second one checking for membership on the first one so we
3257 3263 dont yield any duplicates.
3258 3264
3259 3265 If the ascending attribute is set, iterate over both collections at the
3260 3266 same time, yielding only one value at a time in the given order.
3261 3267 """
3262 3268 if self._ascending is None:
3263 3269 if self._genlist:
3264 3270 return iter(self._genlist)
3265 3271 def arbitraryordergen():
3266 3272 for r in self._r1:
3267 3273 yield r
3268 3274 inr1 = self._r1.__contains__
3269 3275 for r in self._r2:
3270 3276 if not inr1(r):
3271 3277 yield r
3272 3278 return arbitraryordergen()
3273 3279 # try to use our own fast iterator if it exists
3274 3280 self._trysetasclist()
3275 3281 if self._ascending:
3276 3282 attr = 'fastasc'
3277 3283 else:
3278 3284 attr = 'fastdesc'
3279 3285 it = getattr(self, attr)
3280 3286 if it is not None:
3281 3287 return it()
3282 3288 # maybe half of the component supports fast
3283 3289 # get iterator for _r1
3284 3290 iter1 = getattr(self._r1, attr)
3285 3291 if iter1 is None:
3286 3292 # let's avoid side effect (not sure it matters)
3287 3293 iter1 = iter(sorted(self._r1, reverse=not self._ascending))
3288 3294 else:
3289 3295 iter1 = iter1()
3290 3296 # get iterator for _r2
3291 3297 iter2 = getattr(self._r2, attr)
3292 3298 if iter2 is None:
3293 3299 # let's avoid side effect (not sure it matters)
3294 3300 iter2 = iter(sorted(self._r2, reverse=not self._ascending))
3295 3301 else:
3296 3302 iter2 = iter2()
3297 3303 return _iterordered(self._ascending, iter1, iter2)
3298 3304
3299 3305 def _trysetasclist(self):
3300 3306 """populate the _asclist attribute if possible and necessary"""
3301 3307 if self._genlist is not None and self._asclist is None:
3302 3308 self._asclist = sorted(self._genlist)
3303 3309
3304 3310 @property
3305 3311 def fastasc(self):
3306 3312 self._trysetasclist()
3307 3313 if self._asclist is not None:
3308 3314 return self._asclist.__iter__
3309 3315 iter1 = self._r1.fastasc
3310 3316 iter2 = self._r2.fastasc
3311 3317 if None in (iter1, iter2):
3312 3318 return None
3313 3319 return lambda: _iterordered(True, iter1(), iter2())
3314 3320
3315 3321 @property
3316 3322 def fastdesc(self):
3317 3323 self._trysetasclist()
3318 3324 if self._asclist is not None:
3319 3325 return self._asclist.__reversed__
3320 3326 iter1 = self._r1.fastdesc
3321 3327 iter2 = self._r2.fastdesc
3322 3328 if None in (iter1, iter2):
3323 3329 return None
3324 3330 return lambda: _iterordered(False, iter1(), iter2())
3325 3331
3326 3332 def __contains__(self, x):
3327 3333 return x in self._r1 or x in self._r2
3328 3334
3329 3335 def sort(self, reverse=False):
3330 3336 """Sort the added set
3331 3337
3332 3338 For this we use the cached list with all the generated values and if we
3333 3339 know they are ascending or descending we can sort them in a smart way.
3334 3340 """
3335 3341 self._ascending = not reverse
3336 3342
3337 3343 def isascending(self):
3338 3344 return self._ascending is not None and self._ascending
3339 3345
3340 3346 def isdescending(self):
3341 3347 return self._ascending is not None and not self._ascending
3342 3348
3343 3349 def reverse(self):
3344 3350 if self._ascending is None:
3345 3351 self._list.reverse()
3346 3352 else:
3347 3353 self._ascending = not self._ascending
3348 3354
3349 3355 def first(self):
3350 3356 for x in self:
3351 3357 return x
3352 3358 return None
3353 3359
3354 3360 def last(self):
3355 3361 self.reverse()
3356 3362 val = self.first()
3357 3363 self.reverse()
3358 3364 return val
3359 3365
3360 3366 def __repr__(self):
3361 3367 d = {None: '', False: '-', True: '+'}[self._ascending]
3362 3368 return '<%s%s %r, %r>' % (type(self).__name__, d, self._r1, self._r2)
3363 3369
3364 3370 class generatorset(abstractsmartset):
3365 3371 """Wrap a generator for lazy iteration
3366 3372
3367 3373 Wrapper structure for generators that provides lazy membership and can
3368 3374 be iterated more than once.
3369 3375 When asked for membership it generates values until either it finds the
3370 3376 requested one or has gone through all the elements in the generator
3371 3377 """
3372 3378 def __init__(self, gen, iterasc=None):
3373 3379 """
3374 3380 gen: a generator producing the values for the generatorset.
3375 3381 """
3376 3382 self._gen = gen
3377 3383 self._asclist = None
3378 3384 self._cache = {}
3379 3385 self._genlist = []
3380 3386 self._finished = False
3381 3387 self._ascending = True
3382 3388 if iterasc is not None:
3383 3389 if iterasc:
3384 3390 self.fastasc = self._iterator
3385 3391 self.__contains__ = self._asccontains
3386 3392 else:
3387 3393 self.fastdesc = self._iterator
3388 3394 self.__contains__ = self._desccontains
3389 3395
3390 3396 def __nonzero__(self):
3391 3397 # Do not use 'for r in self' because it will enforce the iteration
3392 3398 # order (default ascending), possibly unrolling a whole descending
3393 3399 # iterator.
3394 3400 if self._genlist:
3395 3401 return True
3396 3402 for r in self._consumegen():
3397 3403 return True
3398 3404 return False
3399 3405
3400 3406 def __contains__(self, x):
3401 3407 if x in self._cache:
3402 3408 return self._cache[x]
3403 3409
3404 3410 # Use new values only, as existing values would be cached.
3405 3411 for l in self._consumegen():
3406 3412 if l == x:
3407 3413 return True
3408 3414
3409 3415 self._cache[x] = False
3410 3416 return False
3411 3417
3412 3418 def _asccontains(self, x):
3413 3419 """version of contains optimised for ascending generator"""
3414 3420 if x in self._cache:
3415 3421 return self._cache[x]
3416 3422
3417 3423 # Use new values only, as existing values would be cached.
3418 3424 for l in self._consumegen():
3419 3425 if l == x:
3420 3426 return True
3421 3427 if l > x:
3422 3428 break
3423 3429
3424 3430 self._cache[x] = False
3425 3431 return False
3426 3432
3427 3433 def _desccontains(self, x):
3428 3434 """version of contains optimised for descending generator"""
3429 3435 if x in self._cache:
3430 3436 return self._cache[x]
3431 3437
3432 3438 # Use new values only, as existing values would be cached.
3433 3439 for l in self._consumegen():
3434 3440 if l == x:
3435 3441 return True
3436 3442 if l < x:
3437 3443 break
3438 3444
3439 3445 self._cache[x] = False
3440 3446 return False
3441 3447
3442 3448 def __iter__(self):
3443 3449 if self._ascending:
3444 3450 it = self.fastasc
3445 3451 else:
3446 3452 it = self.fastdesc
3447 3453 if it is not None:
3448 3454 return it()
3449 3455 # we need to consume the iterator
3450 3456 for x in self._consumegen():
3451 3457 pass
3452 3458 # recall the same code
3453 3459 return iter(self)
3454 3460
3455 3461 def _iterator(self):
3456 3462 if self._finished:
3457 3463 return iter(self._genlist)
3458 3464
3459 3465 # We have to use this complex iteration strategy to allow multiple
3460 3466 # iterations at the same time. We need to be able to catch revision
3461 3467 # removed from _consumegen and added to genlist in another instance.
3462 3468 #
3463 3469 # Getting rid of it would provide an about 15% speed up on this
3464 3470 # iteration.
3465 3471 genlist = self._genlist
3466 3472 nextrev = self._consumegen().next
3467 3473 _len = len # cache global lookup
3468 3474 def gen():
3469 3475 i = 0
3470 3476 while True:
3471 3477 if i < _len(genlist):
3472 3478 yield genlist[i]
3473 3479 else:
3474 3480 yield nextrev()
3475 3481 i += 1
3476 3482 return gen()
3477 3483
3478 3484 def _consumegen(self):
3479 3485 cache = self._cache
3480 3486 genlist = self._genlist.append
3481 3487 for item in self._gen:
3482 3488 cache[item] = True
3483 3489 genlist(item)
3484 3490 yield item
3485 3491 if not self._finished:
3486 3492 self._finished = True
3487 3493 asc = self._genlist[:]
3488 3494 asc.sort()
3489 3495 self._asclist = asc
3490 3496 self.fastasc = asc.__iter__
3491 3497 self.fastdesc = asc.__reversed__
3492 3498
3493 3499 def __len__(self):
3494 3500 for x in self._consumegen():
3495 3501 pass
3496 3502 return len(self._genlist)
3497 3503
3498 3504 def sort(self, reverse=False):
3499 3505 self._ascending = not reverse
3500 3506
3501 3507 def reverse(self):
3502 3508 self._ascending = not self._ascending
3503 3509
3504 3510 def isascending(self):
3505 3511 return self._ascending
3506 3512
3507 3513 def isdescending(self):
3508 3514 return not self._ascending
3509 3515
3510 3516 def first(self):
3511 3517 if self._ascending:
3512 3518 it = self.fastasc
3513 3519 else:
3514 3520 it = self.fastdesc
3515 3521 if it is None:
3516 3522 # we need to consume all and try again
3517 3523 for x in self._consumegen():
3518 3524 pass
3519 3525 return self.first()
3520 3526 return next(it(), None)
3521 3527
3522 3528 def last(self):
3523 3529 if self._ascending:
3524 3530 it = self.fastdesc
3525 3531 else:
3526 3532 it = self.fastasc
3527 3533 if it is None:
3528 3534 # we need to consume all and try again
3529 3535 for x in self._consumegen():
3530 3536 pass
3531 3537 return self.first()
3532 3538 return next(it(), None)
3533 3539
3534 3540 def __repr__(self):
3535 3541 d = {False: '-', True: '+'}[self._ascending]
3536 3542 return '<%s%s>' % (type(self).__name__, d)
3537 3543
3538 3544 class spanset(abstractsmartset):
3539 3545 """Duck type for baseset class which represents a range of revisions and
3540 3546 can work lazily and without having all the range in memory
3541 3547
3542 3548 Note that spanset(x, y) behave almost like xrange(x, y) except for two
3543 3549 notable points:
3544 3550 - when x < y it will be automatically descending,
3545 3551 - revision filtered with this repoview will be skipped.
3546 3552
3547 3553 """
3548 3554 def __init__(self, repo, start=0, end=None):
3549 3555 """
3550 3556 start: first revision included the set
3551 3557 (default to 0)
3552 3558 end: first revision excluded (last+1)
3553 3559 (default to len(repo)
3554 3560
3555 3561 Spanset will be descending if `end` < `start`.
3556 3562 """
3557 3563 if end is None:
3558 3564 end = len(repo)
3559 3565 self._ascending = start <= end
3560 3566 if not self._ascending:
3561 3567 start, end = end + 1, start +1
3562 3568 self._start = start
3563 3569 self._end = end
3564 3570 self._hiddenrevs = repo.changelog.filteredrevs
3565 3571
3566 3572 def sort(self, reverse=False):
3567 3573 self._ascending = not reverse
3568 3574
3569 3575 def reverse(self):
3570 3576 self._ascending = not self._ascending
3571 3577
3572 3578 def _iterfilter(self, iterrange):
3573 3579 s = self._hiddenrevs
3574 3580 for r in iterrange:
3575 3581 if r not in s:
3576 3582 yield r
3577 3583
3578 3584 def __iter__(self):
3579 3585 if self._ascending:
3580 3586 return self.fastasc()
3581 3587 else:
3582 3588 return self.fastdesc()
3583 3589
3584 3590 def fastasc(self):
3585 3591 iterrange = xrange(self._start, self._end)
3586 3592 if self._hiddenrevs:
3587 3593 return self._iterfilter(iterrange)
3588 3594 return iter(iterrange)
3589 3595
3590 3596 def fastdesc(self):
3591 3597 iterrange = xrange(self._end - 1, self._start - 1, -1)
3592 3598 if self._hiddenrevs:
3593 3599 return self._iterfilter(iterrange)
3594 3600 return iter(iterrange)
3595 3601
3596 3602 def __contains__(self, rev):
3597 3603 hidden = self._hiddenrevs
3598 3604 return ((self._start <= rev < self._end)
3599 3605 and not (hidden and rev in hidden))
3600 3606
3601 3607 def __nonzero__(self):
3602 3608 for r in self:
3603 3609 return True
3604 3610 return False
3605 3611
3606 3612 def __len__(self):
3607 3613 if not self._hiddenrevs:
3608 3614 return abs(self._end - self._start)
3609 3615 else:
3610 3616 count = 0
3611 3617 start = self._start
3612 3618 end = self._end
3613 3619 for rev in self._hiddenrevs:
3614 3620 if (end < rev <= start) or (start <= rev < end):
3615 3621 count += 1
3616 3622 return abs(self._end - self._start) - count
3617 3623
3618 3624 def isascending(self):
3619 3625 return self._ascending
3620 3626
3621 3627 def isdescending(self):
3622 3628 return not self._ascending
3623 3629
3624 3630 def first(self):
3625 3631 if self._ascending:
3626 3632 it = self.fastasc
3627 3633 else:
3628 3634 it = self.fastdesc
3629 3635 for x in it():
3630 3636 return x
3631 3637 return None
3632 3638
3633 3639 def last(self):
3634 3640 if self._ascending:
3635 3641 it = self.fastdesc
3636 3642 else:
3637 3643 it = self.fastasc
3638 3644 for x in it():
3639 3645 return x
3640 3646 return None
3641 3647
3642 3648 def __repr__(self):
3643 3649 d = {False: '-', True: '+'}[self._ascending]
3644 3650 return '<%s%s %d:%d>' % (type(self).__name__, d,
3645 3651 self._start, self._end - 1)
3646 3652
3647 3653 class fullreposet(spanset):
3648 3654 """a set containing all revisions in the repo
3649 3655
3650 3656 This class exists to host special optimization and magic to handle virtual
3651 3657 revisions such as "null".
3652 3658 """
3653 3659
3654 3660 def __init__(self, repo):
3655 3661 super(fullreposet, self).__init__(repo)
3656 3662
3657 3663 def __and__(self, other):
3658 3664 """As self contains the whole repo, all of the other set should also be
3659 3665 in self. Therefore `self & other = other`.
3660 3666
3661 3667 This boldly assumes the other contains valid revs only.
3662 3668 """
3663 3669 # other not a smartset, make is so
3664 3670 if not util.safehasattr(other, 'isascending'):
3665 3671 # filter out hidden revision
3666 3672 # (this boldly assumes all smartset are pure)
3667 3673 #
3668 3674 # `other` was used with "&", let's assume this is a set like
3669 3675 # object.
3670 3676 other = baseset(other - self._hiddenrevs)
3671 3677
3672 3678 # XXX As fullreposet is also used as bootstrap, this is wrong.
3673 3679 #
3674 3680 # With a giveme312() revset returning [3,1,2], this makes
3675 3681 # 'hg log -r "giveme312()"' -> 1, 2, 3 (wrong)
3676 3682 # We cannot just drop it because other usage still need to sort it:
3677 3683 # 'hg log -r "all() and giveme312()"' -> 1, 2, 3 (right)
3678 3684 #
3679 3685 # There is also some faulty revset implementations that rely on it
3680 3686 # (eg: children as of its state in e8075329c5fb)
3681 3687 #
3682 3688 # When we fix the two points above we can move this into the if clause
3683 3689 other.sort(reverse=self.isdescending())
3684 3690 return other
3685 3691
3686 3692 def prettyformatset(revs):
3687 3693 lines = []
3688 3694 rs = repr(revs)
3689 3695 p = 0
3690 3696 while p < len(rs):
3691 3697 q = rs.find('<', p + 1)
3692 3698 if q < 0:
3693 3699 q = len(rs)
3694 3700 l = rs.count('<', 0, p) - rs.count('>', 0, p)
3695 3701 assert l >= 0
3696 3702 lines.append((l, rs[p:q].rstrip()))
3697 3703 p = q
3698 3704 return '\n'.join(' ' * l + s for l, s in lines)
3699 3705
3700 3706 # tell hggettext to extract docstrings from these functions:
3701 3707 i18nfunctions = symbols.values()
General Comments 0
You need to be logged in to leave comments. Login now