##// END OF EJS Templates
revset: make revsbetween public...
Laurent Charignon -
r26001:748053b4 default
parent child Browse files
Show More
@@ -1,3701 +1,3701 b''
1 1 # revset.py - revision set queries for mercurial
2 2 #
3 3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import heapq
11 11 import re
12 12
13 13 from .i18n import _
14 14 from . import (
15 15 encoding,
16 16 error,
17 17 hbisect,
18 18 match as matchmod,
19 19 node,
20 20 obsolete as obsmod,
21 21 parser,
22 22 pathutil,
23 23 phases,
24 24 repoview,
25 25 util,
26 26 )
27 27
28 28 def _revancestors(repo, revs, followfirst):
29 29 """Like revlog.ancestors(), but supports followfirst."""
30 30 if followfirst:
31 31 cut = 1
32 32 else:
33 33 cut = None
34 34 cl = repo.changelog
35 35
36 36 def iterate():
37 37 revs.sort(reverse=True)
38 38 irevs = iter(revs)
39 39 h = []
40 40
41 41 inputrev = next(irevs, None)
42 42 if inputrev is not None:
43 43 heapq.heappush(h, -inputrev)
44 44
45 45 seen = set()
46 46 while h:
47 47 current = -heapq.heappop(h)
48 48 if current == inputrev:
49 49 inputrev = next(irevs, None)
50 50 if inputrev is not None:
51 51 heapq.heappush(h, -inputrev)
52 52 if current not in seen:
53 53 seen.add(current)
54 54 yield current
55 55 for parent in cl.parentrevs(current)[:cut]:
56 56 if parent != node.nullrev:
57 57 heapq.heappush(h, -parent)
58 58
59 59 return generatorset(iterate(), iterasc=False)
60 60
61 61 def _revdescendants(repo, revs, followfirst):
62 62 """Like revlog.descendants() but supports followfirst."""
63 63 if followfirst:
64 64 cut = 1
65 65 else:
66 66 cut = None
67 67
68 68 def iterate():
69 69 cl = repo.changelog
70 70 # XXX this should be 'parentset.min()' assuming 'parentset' is a
71 71 # smartset (and if it is not, it should.)
72 72 first = min(revs)
73 73 nullrev = node.nullrev
74 74 if first == nullrev:
75 75 # Are there nodes with a null first parent and a non-null
76 76 # second one? Maybe. Do we care? Probably not.
77 77 for i in cl:
78 78 yield i
79 79 else:
80 80 seen = set(revs)
81 81 for i in cl.revs(first + 1):
82 82 for x in cl.parentrevs(i)[:cut]:
83 83 if x != nullrev and x in seen:
84 84 seen.add(i)
85 85 yield i
86 86 break
87 87
88 88 return generatorset(iterate(), iterasc=True)
89 89
90 def _revsbetween(repo, roots, heads):
90 def revsbetween(repo, roots, heads):
91 91 """Return all paths between roots and heads, inclusive of both endpoint
92 92 sets."""
93 93 if not roots:
94 94 return baseset()
95 95 parentrevs = repo.changelog.parentrevs
96 96 visit = list(heads)
97 97 reachable = set()
98 98 seen = {}
99 99 # XXX this should be 'parentset.min()' assuming 'parentset' is a smartset
100 100 # (and if it is not, it should.)
101 101 minroot = min(roots)
102 102 roots = set(roots)
103 103 # prefetch all the things! (because python is slow)
104 104 reached = reachable.add
105 105 dovisit = visit.append
106 106 nextvisit = visit.pop
107 107 # open-code the post-order traversal due to the tiny size of
108 108 # sys.getrecursionlimit()
109 109 while visit:
110 110 rev = nextvisit()
111 111 if rev in roots:
112 112 reached(rev)
113 113 parents = parentrevs(rev)
114 114 seen[rev] = parents
115 115 for parent in parents:
116 116 if parent >= minroot and parent not in seen:
117 117 dovisit(parent)
118 118 if not reachable:
119 119 return baseset()
120 120 for rev in sorted(seen):
121 121 for parent in seen[rev]:
122 122 if parent in reachable:
123 123 reached(rev)
124 124 return baseset(sorted(reachable))
125 125
126 126 elements = {
127 127 # token-type: binding-strength, primary, prefix, infix, suffix
128 128 "(": (21, None, ("group", 1, ")"), ("func", 1, ")"), None),
129 129 "##": (20, None, None, ("_concat", 20), None),
130 130 "~": (18, None, None, ("ancestor", 18), None),
131 131 "^": (18, None, None, ("parent", 18), ("parentpost", 18)),
132 132 "-": (5, None, ("negate", 19), ("minus", 5), None),
133 133 "::": (17, None, ("dagrangepre", 17), ("dagrange", 17),
134 134 ("dagrangepost", 17)),
135 135 "..": (17, None, ("dagrangepre", 17), ("dagrange", 17),
136 136 ("dagrangepost", 17)),
137 137 ":": (15, "rangeall", ("rangepre", 15), ("range", 15), ("rangepost", 15)),
138 138 "not": (10, None, ("not", 10), None, None),
139 139 "!": (10, None, ("not", 10), None, None),
140 140 "and": (5, None, None, ("and", 5), None),
141 141 "&": (5, None, None, ("and", 5), None),
142 142 "%": (5, None, None, ("only", 5), ("onlypost", 5)),
143 143 "or": (4, None, None, ("or", 4), None),
144 144 "|": (4, None, None, ("or", 4), None),
145 145 "+": (4, None, None, ("or", 4), None),
146 146 "=": (3, None, None, ("keyvalue", 3), None),
147 147 ",": (2, None, None, ("list", 2), None),
148 148 ")": (0, None, None, None, None),
149 149 "symbol": (0, "symbol", None, None, None),
150 150 "string": (0, "string", None, None, None),
151 151 "end": (0, None, None, None, None),
152 152 }
153 153
154 154 keywords = set(['and', 'or', 'not'])
155 155
156 156 # default set of valid characters for the initial letter of symbols
157 157 _syminitletters = set(c for c in [chr(i) for i in xrange(256)]
158 158 if c.isalnum() or c in '._@' or ord(c) > 127)
159 159
160 160 # default set of valid characters for non-initial letters of symbols
161 161 _symletters = set(c for c in [chr(i) for i in xrange(256)]
162 162 if c.isalnum() or c in '-._/@' or ord(c) > 127)
163 163
164 164 def tokenize(program, lookup=None, syminitletters=None, symletters=None):
165 165 '''
166 166 Parse a revset statement into a stream of tokens
167 167
168 168 ``syminitletters`` is the set of valid characters for the initial
169 169 letter of symbols.
170 170
171 171 By default, character ``c`` is recognized as valid for initial
172 172 letter of symbols, if ``c.isalnum() or c in '._@' or ord(c) > 127``.
173 173
174 174 ``symletters`` is the set of valid characters for non-initial
175 175 letters of symbols.
176 176
177 177 By default, character ``c`` is recognized as valid for non-initial
178 178 letters of symbols, if ``c.isalnum() or c in '-._/@' or ord(c) > 127``.
179 179
180 180 Check that @ is a valid unquoted token character (issue3686):
181 181 >>> list(tokenize("@::"))
182 182 [('symbol', '@', 0), ('::', None, 1), ('end', None, 3)]
183 183
184 184 '''
185 185 if syminitletters is None:
186 186 syminitletters = _syminitletters
187 187 if symletters is None:
188 188 symletters = _symletters
189 189
190 190 if program and lookup:
191 191 # attempt to parse old-style ranges first to deal with
192 192 # things like old-tag which contain query metacharacters
193 193 parts = program.split(':', 1)
194 194 if all(lookup(sym) for sym in parts if sym):
195 195 if parts[0]:
196 196 yield ('symbol', parts[0], 0)
197 197 if len(parts) > 1:
198 198 s = len(parts[0])
199 199 yield (':', None, s)
200 200 if parts[1]:
201 201 yield ('symbol', parts[1], s + 1)
202 202 yield ('end', None, len(program))
203 203 return
204 204
205 205 pos, l = 0, len(program)
206 206 while pos < l:
207 207 c = program[pos]
208 208 if c.isspace(): # skip inter-token whitespace
209 209 pass
210 210 elif c == ':' and program[pos:pos + 2] == '::': # look ahead carefully
211 211 yield ('::', None, pos)
212 212 pos += 1 # skip ahead
213 213 elif c == '.' and program[pos:pos + 2] == '..': # look ahead carefully
214 214 yield ('..', None, pos)
215 215 pos += 1 # skip ahead
216 216 elif c == '#' and program[pos:pos + 2] == '##': # look ahead carefully
217 217 yield ('##', None, pos)
218 218 pos += 1 # skip ahead
219 219 elif c in "():=,-|&+!~^%": # handle simple operators
220 220 yield (c, None, pos)
221 221 elif (c in '"\'' or c == 'r' and
222 222 program[pos:pos + 2] in ("r'", 'r"')): # handle quoted strings
223 223 if c == 'r':
224 224 pos += 1
225 225 c = program[pos]
226 226 decode = lambda x: x
227 227 else:
228 228 decode = lambda x: x.decode('string-escape')
229 229 pos += 1
230 230 s = pos
231 231 while pos < l: # find closing quote
232 232 d = program[pos]
233 233 if d == '\\': # skip over escaped characters
234 234 pos += 2
235 235 continue
236 236 if d == c:
237 237 yield ('string', decode(program[s:pos]), s)
238 238 break
239 239 pos += 1
240 240 else:
241 241 raise error.ParseError(_("unterminated string"), s)
242 242 # gather up a symbol/keyword
243 243 elif c in syminitletters:
244 244 s = pos
245 245 pos += 1
246 246 while pos < l: # find end of symbol
247 247 d = program[pos]
248 248 if d not in symletters:
249 249 break
250 250 if d == '.' and program[pos - 1] == '.': # special case for ..
251 251 pos -= 1
252 252 break
253 253 pos += 1
254 254 sym = program[s:pos]
255 255 if sym in keywords: # operator keywords
256 256 yield (sym, None, s)
257 257 elif '-' in sym:
258 258 # some jerk gave us foo-bar-baz, try to check if it's a symbol
259 259 if lookup and lookup(sym):
260 260 # looks like a real symbol
261 261 yield ('symbol', sym, s)
262 262 else:
263 263 # looks like an expression
264 264 parts = sym.split('-')
265 265 for p in parts[:-1]:
266 266 if p: # possible consecutive -
267 267 yield ('symbol', p, s)
268 268 s += len(p)
269 269 yield ('-', None, pos)
270 270 s += 1
271 271 if parts[-1]: # possible trailing -
272 272 yield ('symbol', parts[-1], s)
273 273 else:
274 274 yield ('symbol', sym, s)
275 275 pos -= 1
276 276 else:
277 277 raise error.ParseError(_("syntax error in revset '%s'") %
278 278 program, pos)
279 279 pos += 1
280 280 yield ('end', None, pos)
281 281
282 282 def parseerrordetail(inst):
283 283 """Compose error message from specified ParseError object
284 284 """
285 285 if len(inst.args) > 1:
286 286 return _('at %s: %s') % (inst.args[1], inst.args[0])
287 287 else:
288 288 return inst.args[0]
289 289
290 290 # helpers
291 291
292 292 def getstring(x, err):
293 293 if x and (x[0] == 'string' or x[0] == 'symbol'):
294 294 return x[1]
295 295 raise error.ParseError(err)
296 296
297 297 def getlist(x):
298 298 if not x:
299 299 return []
300 300 if x[0] == 'list':
301 301 return getlist(x[1]) + [x[2]]
302 302 return [x]
303 303
304 304 def getargs(x, min, max, err):
305 305 l = getlist(x)
306 306 if len(l) < min or (max >= 0 and len(l) > max):
307 307 raise error.ParseError(err)
308 308 return l
309 309
310 310 def getargsdict(x, funcname, keys):
311 311 return parser.buildargsdict(getlist(x), funcname, keys.split(),
312 312 keyvaluenode='keyvalue', keynode='symbol')
313 313
314 314 def isvalidsymbol(tree):
315 315 """Examine whether specified ``tree`` is valid ``symbol`` or not
316 316 """
317 317 return tree[0] == 'symbol' and len(tree) > 1
318 318
319 319 def getsymbol(tree):
320 320 """Get symbol name from valid ``symbol`` in ``tree``
321 321
322 322 This assumes that ``tree`` is already examined by ``isvalidsymbol``.
323 323 """
324 324 return tree[1]
325 325
326 326 def isvalidfunc(tree):
327 327 """Examine whether specified ``tree`` is valid ``func`` or not
328 328 """
329 329 return tree[0] == 'func' and len(tree) > 1 and isvalidsymbol(tree[1])
330 330
331 331 def getfuncname(tree):
332 332 """Get function name from valid ``func`` in ``tree``
333 333
334 334 This assumes that ``tree`` is already examined by ``isvalidfunc``.
335 335 """
336 336 return getsymbol(tree[1])
337 337
338 338 def getfuncargs(tree):
339 339 """Get list of function arguments from valid ``func`` in ``tree``
340 340
341 341 This assumes that ``tree`` is already examined by ``isvalidfunc``.
342 342 """
343 343 if len(tree) > 2:
344 344 return getlist(tree[2])
345 345 else:
346 346 return []
347 347
348 348 def getset(repo, subset, x):
349 349 if not x:
350 350 raise error.ParseError(_("missing argument"))
351 351 s = methods[x[0]](repo, subset, *x[1:])
352 352 if util.safehasattr(s, 'isascending'):
353 353 return s
354 354 if (repo.ui.configbool('devel', 'all-warnings')
355 355 or repo.ui.configbool('devel', 'old-revset')):
356 356 # else case should not happen, because all non-func are internal,
357 357 # ignoring for now.
358 358 if x[0] == 'func' and x[1][0] == 'symbol' and x[1][1] in symbols:
359 359 repo.ui.develwarn('revset "%s" use list instead of smartset, '
360 360 '(upgrade your code)' % x[1][1])
361 361 return baseset(s)
362 362
363 363 def _getrevsource(repo, r):
364 364 extra = repo[r].extra()
365 365 for label in ('source', 'transplant_source', 'rebase_source'):
366 366 if label in extra:
367 367 try:
368 368 return repo[extra[label]].rev()
369 369 except error.RepoLookupError:
370 370 pass
371 371 return None
372 372
373 373 # operator methods
374 374
375 375 def stringset(repo, subset, x):
376 376 x = repo[x].rev()
377 377 if (x in subset
378 378 or x == node.nullrev and isinstance(subset, fullreposet)):
379 379 return baseset([x])
380 380 return baseset()
381 381
382 382 def rangeset(repo, subset, x, y):
383 383 m = getset(repo, fullreposet(repo), x)
384 384 n = getset(repo, fullreposet(repo), y)
385 385
386 386 if not m or not n:
387 387 return baseset()
388 388 m, n = m.first(), n.last()
389 389
390 390 if m == n:
391 391 r = baseset([m])
392 392 elif n == node.wdirrev:
393 393 r = spanset(repo, m, len(repo)) + baseset([n])
394 394 elif m == node.wdirrev:
395 395 r = baseset([m]) + spanset(repo, len(repo) - 1, n - 1)
396 396 elif m < n:
397 397 r = spanset(repo, m, n + 1)
398 398 else:
399 399 r = spanset(repo, m, n - 1)
400 400 # XXX We should combine with subset first: 'subset & baseset(...)'. This is
401 401 # necessary to ensure we preserve the order in subset.
402 402 #
403 403 # This has performance implication, carrying the sorting over when possible
404 404 # would be more efficient.
405 405 return r & subset
406 406
407 407 def dagrange(repo, subset, x, y):
408 408 r = fullreposet(repo)
409 xs = _revsbetween(repo, getset(repo, r, x), getset(repo, r, y))
409 xs = revsbetween(repo, getset(repo, r, x), getset(repo, r, y))
410 410 # XXX We should combine with subset first: 'subset & baseset(...)'. This is
411 411 # necessary to ensure we preserve the order in subset.
412 412 return xs & subset
413 413
414 414 def andset(repo, subset, x, y):
415 415 return getset(repo, getset(repo, subset, x), y)
416 416
417 417 def orset(repo, subset, *xs):
418 418 assert xs
419 419 if len(xs) == 1:
420 420 return getset(repo, subset, xs[0])
421 421 p = len(xs) // 2
422 422 a = orset(repo, subset, *xs[:p])
423 423 b = orset(repo, subset, *xs[p:])
424 424 return a + b
425 425
426 426 def notset(repo, subset, x):
427 427 return subset - getset(repo, subset, x)
428 428
429 429 def listset(repo, subset, a, b):
430 430 raise error.ParseError(_("can't use a list in this context"))
431 431
432 432 def keyvaluepair(repo, subset, k, v):
433 433 raise error.ParseError(_("can't use a key-value pair in this context"))
434 434
435 435 def func(repo, subset, a, b):
436 436 if a[0] == 'symbol' and a[1] in symbols:
437 437 return symbols[a[1]](repo, subset, b)
438 438
439 439 keep = lambda fn: getattr(fn, '__doc__', None) is not None
440 440
441 441 syms = [s for (s, fn) in symbols.items() if keep(fn)]
442 442 raise error.UnknownIdentifier(a[1], syms)
443 443
444 444 # functions
445 445
446 446 def adds(repo, subset, x):
447 447 """``adds(pattern)``
448 448 Changesets that add a file matching pattern.
449 449
450 450 The pattern without explicit kind like ``glob:`` is expected to be
451 451 relative to the current directory and match against a file or a
452 452 directory.
453 453 """
454 454 # i18n: "adds" is a keyword
455 455 pat = getstring(x, _("adds requires a pattern"))
456 456 return checkstatus(repo, subset, pat, 1)
457 457
458 458 def ancestor(repo, subset, x):
459 459 """``ancestor(*changeset)``
460 460 A greatest common ancestor of the changesets.
461 461
462 462 Accepts 0 or more changesets.
463 463 Will return empty list when passed no args.
464 464 Greatest common ancestor of a single changeset is that changeset.
465 465 """
466 466 # i18n: "ancestor" is a keyword
467 467 l = getlist(x)
468 468 rl = fullreposet(repo)
469 469 anc = None
470 470
471 471 # (getset(repo, rl, i) for i in l) generates a list of lists
472 472 for revs in (getset(repo, rl, i) for i in l):
473 473 for r in revs:
474 474 if anc is None:
475 475 anc = repo[r]
476 476 else:
477 477 anc = anc.ancestor(repo[r])
478 478
479 479 if anc is not None and anc.rev() in subset:
480 480 return baseset([anc.rev()])
481 481 return baseset()
482 482
483 483 def _ancestors(repo, subset, x, followfirst=False):
484 484 heads = getset(repo, fullreposet(repo), x)
485 485 if not heads:
486 486 return baseset()
487 487 s = _revancestors(repo, heads, followfirst)
488 488 return subset & s
489 489
490 490 def ancestors(repo, subset, x):
491 491 """``ancestors(set)``
492 492 Changesets that are ancestors of a changeset in set.
493 493 """
494 494 return _ancestors(repo, subset, x)
495 495
496 496 def _firstancestors(repo, subset, x):
497 497 # ``_firstancestors(set)``
498 498 # Like ``ancestors(set)`` but follows only the first parents.
499 499 return _ancestors(repo, subset, x, followfirst=True)
500 500
501 501 def ancestorspec(repo, subset, x, n):
502 502 """``set~n``
503 503 Changesets that are the Nth ancestor (first parents only) of a changeset
504 504 in set.
505 505 """
506 506 try:
507 507 n = int(n[1])
508 508 except (TypeError, ValueError):
509 509 raise error.ParseError(_("~ expects a number"))
510 510 ps = set()
511 511 cl = repo.changelog
512 512 for r in getset(repo, fullreposet(repo), x):
513 513 for i in range(n):
514 514 r = cl.parentrevs(r)[0]
515 515 ps.add(r)
516 516 return subset & ps
517 517
518 518 def author(repo, subset, x):
519 519 """``author(string)``
520 520 Alias for ``user(string)``.
521 521 """
522 522 # i18n: "author" is a keyword
523 523 n = encoding.lower(getstring(x, _("author requires a string")))
524 524 kind, pattern, matcher = _substringmatcher(n)
525 525 return subset.filter(lambda x: matcher(encoding.lower(repo[x].user())))
526 526
527 527 def bisect(repo, subset, x):
528 528 """``bisect(string)``
529 529 Changesets marked in the specified bisect status:
530 530
531 531 - ``good``, ``bad``, ``skip``: csets explicitly marked as good/bad/skip
532 532 - ``goods``, ``bads`` : csets topologically good/bad
533 533 - ``range`` : csets taking part in the bisection
534 534 - ``pruned`` : csets that are goods, bads or skipped
535 535 - ``untested`` : csets whose fate is yet unknown
536 536 - ``ignored`` : csets ignored due to DAG topology
537 537 - ``current`` : the cset currently being bisected
538 538 """
539 539 # i18n: "bisect" is a keyword
540 540 status = getstring(x, _("bisect requires a string")).lower()
541 541 state = set(hbisect.get(repo, status))
542 542 return subset & state
543 543
544 544 # Backward-compatibility
545 545 # - no help entry so that we do not advertise it any more
546 546 def bisected(repo, subset, x):
547 547 return bisect(repo, subset, x)
548 548
549 549 def bookmark(repo, subset, x):
550 550 """``bookmark([name])``
551 551 The named bookmark or all bookmarks.
552 552
553 553 If `name` starts with `re:`, the remainder of the name is treated as
554 554 a regular expression. To match a bookmark that actually starts with `re:`,
555 555 use the prefix `literal:`.
556 556 """
557 557 # i18n: "bookmark" is a keyword
558 558 args = getargs(x, 0, 1, _('bookmark takes one or no arguments'))
559 559 if args:
560 560 bm = getstring(args[0],
561 561 # i18n: "bookmark" is a keyword
562 562 _('the argument to bookmark must be a string'))
563 563 kind, pattern, matcher = _stringmatcher(bm)
564 564 bms = set()
565 565 if kind == 'literal':
566 566 bmrev = repo._bookmarks.get(pattern, None)
567 567 if not bmrev:
568 568 raise error.RepoLookupError(_("bookmark '%s' does not exist")
569 569 % bm)
570 570 bms.add(repo[bmrev].rev())
571 571 else:
572 572 matchrevs = set()
573 573 for name, bmrev in repo._bookmarks.iteritems():
574 574 if matcher(name):
575 575 matchrevs.add(bmrev)
576 576 if not matchrevs:
577 577 raise error.RepoLookupError(_("no bookmarks exist"
578 578 " that match '%s'") % pattern)
579 579 for bmrev in matchrevs:
580 580 bms.add(repo[bmrev].rev())
581 581 else:
582 582 bms = set([repo[r].rev()
583 583 for r in repo._bookmarks.values()])
584 584 bms -= set([node.nullrev])
585 585 return subset & bms
586 586
587 587 def branch(repo, subset, x):
588 588 """``branch(string or set)``
589 589 All changesets belonging to the given branch or the branches of the given
590 590 changesets.
591 591
592 592 If `string` starts with `re:`, the remainder of the name is treated as
593 593 a regular expression. To match a branch that actually starts with `re:`,
594 594 use the prefix `literal:`.
595 595 """
596 596 getbi = repo.revbranchcache().branchinfo
597 597
598 598 try:
599 599 b = getstring(x, '')
600 600 except error.ParseError:
601 601 # not a string, but another revspec, e.g. tip()
602 602 pass
603 603 else:
604 604 kind, pattern, matcher = _stringmatcher(b)
605 605 if kind == 'literal':
606 606 # note: falls through to the revspec case if no branch with
607 607 # this name exists
608 608 if pattern in repo.branchmap():
609 609 return subset.filter(lambda r: matcher(getbi(r)[0]))
610 610 else:
611 611 return subset.filter(lambda r: matcher(getbi(r)[0]))
612 612
613 613 s = getset(repo, fullreposet(repo), x)
614 614 b = set()
615 615 for r in s:
616 616 b.add(getbi(r)[0])
617 617 c = s.__contains__
618 618 return subset.filter(lambda r: c(r) or getbi(r)[0] in b)
619 619
620 620 def bumped(repo, subset, x):
621 621 """``bumped()``
622 622 Mutable changesets marked as successors of public changesets.
623 623
624 624 Only non-public and non-obsolete changesets can be `bumped`.
625 625 """
626 626 # i18n: "bumped" is a keyword
627 627 getargs(x, 0, 0, _("bumped takes no arguments"))
628 628 bumped = obsmod.getrevs(repo, 'bumped')
629 629 return subset & bumped
630 630
631 631 def bundle(repo, subset, x):
632 632 """``bundle()``
633 633 Changesets in the bundle.
634 634
635 635 Bundle must be specified by the -R option."""
636 636
637 637 try:
638 638 bundlerevs = repo.changelog.bundlerevs
639 639 except AttributeError:
640 640 raise util.Abort(_("no bundle provided - specify with -R"))
641 641 return subset & bundlerevs
642 642
643 643 def checkstatus(repo, subset, pat, field):
644 644 hasset = matchmod.patkind(pat) == 'set'
645 645
646 646 mcache = [None]
647 647 def matches(x):
648 648 c = repo[x]
649 649 if not mcache[0] or hasset:
650 650 mcache[0] = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
651 651 m = mcache[0]
652 652 fname = None
653 653 if not m.anypats() and len(m.files()) == 1:
654 654 fname = m.files()[0]
655 655 if fname is not None:
656 656 if fname not in c.files():
657 657 return False
658 658 else:
659 659 for f in c.files():
660 660 if m(f):
661 661 break
662 662 else:
663 663 return False
664 664 files = repo.status(c.p1().node(), c.node())[field]
665 665 if fname is not None:
666 666 if fname in files:
667 667 return True
668 668 else:
669 669 for f in files:
670 670 if m(f):
671 671 return True
672 672
673 673 return subset.filter(matches)
674 674
675 675 def _children(repo, narrow, parentset):
676 676 if not parentset:
677 677 return baseset()
678 678 cs = set()
679 679 pr = repo.changelog.parentrevs
680 680 minrev = parentset.min()
681 681 for r in narrow:
682 682 if r <= minrev:
683 683 continue
684 684 for p in pr(r):
685 685 if p in parentset:
686 686 cs.add(r)
687 687 # XXX using a set to feed the baseset is wrong. Sets are not ordered.
688 688 # This does not break because of other fullreposet misbehavior.
689 689 return baseset(cs)
690 690
691 691 def children(repo, subset, x):
692 692 """``children(set)``
693 693 Child changesets of changesets in set.
694 694 """
695 695 s = getset(repo, fullreposet(repo), x)
696 696 cs = _children(repo, subset, s)
697 697 return subset & cs
698 698
699 699 def closed(repo, subset, x):
700 700 """``closed()``
701 701 Changeset is closed.
702 702 """
703 703 # i18n: "closed" is a keyword
704 704 getargs(x, 0, 0, _("closed takes no arguments"))
705 705 return subset.filter(lambda r: repo[r].closesbranch())
706 706
707 707 def contains(repo, subset, x):
708 708 """``contains(pattern)``
709 709 The revision's manifest contains a file matching pattern (but might not
710 710 modify it). See :hg:`help patterns` for information about file patterns.
711 711
712 712 The pattern without explicit kind like ``glob:`` is expected to be
713 713 relative to the current directory and match against a file exactly
714 714 for efficiency.
715 715 """
716 716 # i18n: "contains" is a keyword
717 717 pat = getstring(x, _("contains requires a pattern"))
718 718
719 719 def matches(x):
720 720 if not matchmod.patkind(pat):
721 721 pats = pathutil.canonpath(repo.root, repo.getcwd(), pat)
722 722 if pats in repo[x]:
723 723 return True
724 724 else:
725 725 c = repo[x]
726 726 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
727 727 for f in c.manifest():
728 728 if m(f):
729 729 return True
730 730 return False
731 731
732 732 return subset.filter(matches)
733 733
734 734 def converted(repo, subset, x):
735 735 """``converted([id])``
736 736 Changesets converted from the given identifier in the old repository if
737 737 present, or all converted changesets if no identifier is specified.
738 738 """
739 739
740 740 # There is exactly no chance of resolving the revision, so do a simple
741 741 # string compare and hope for the best
742 742
743 743 rev = None
744 744 # i18n: "converted" is a keyword
745 745 l = getargs(x, 0, 1, _('converted takes one or no arguments'))
746 746 if l:
747 747 # i18n: "converted" is a keyword
748 748 rev = getstring(l[0], _('converted requires a revision'))
749 749
750 750 def _matchvalue(r):
751 751 source = repo[r].extra().get('convert_revision', None)
752 752 return source is not None and (rev is None or source.startswith(rev))
753 753
754 754 return subset.filter(lambda r: _matchvalue(r))
755 755
756 756 def date(repo, subset, x):
757 757 """``date(interval)``
758 758 Changesets within the interval, see :hg:`help dates`.
759 759 """
760 760 # i18n: "date" is a keyword
761 761 ds = getstring(x, _("date requires a string"))
762 762 dm = util.matchdate(ds)
763 763 return subset.filter(lambda x: dm(repo[x].date()[0]))
764 764
765 765 def desc(repo, subset, x):
766 766 """``desc(string)``
767 767 Search commit message for string. The match is case-insensitive.
768 768 """
769 769 # i18n: "desc" is a keyword
770 770 ds = encoding.lower(getstring(x, _("desc requires a string")))
771 771
772 772 def matches(x):
773 773 c = repo[x]
774 774 return ds in encoding.lower(c.description())
775 775
776 776 return subset.filter(matches)
777 777
778 778 def _descendants(repo, subset, x, followfirst=False):
779 779 roots = getset(repo, fullreposet(repo), x)
780 780 if not roots:
781 781 return baseset()
782 782 s = _revdescendants(repo, roots, followfirst)
783 783
784 784 # Both sets need to be ascending in order to lazily return the union
785 785 # in the correct order.
786 786 base = subset & roots
787 787 desc = subset & s
788 788 result = base + desc
789 789 if subset.isascending():
790 790 result.sort()
791 791 elif subset.isdescending():
792 792 result.sort(reverse=True)
793 793 else:
794 794 result = subset & result
795 795 return result
796 796
797 797 def descendants(repo, subset, x):
798 798 """``descendants(set)``
799 799 Changesets which are descendants of changesets in set.
800 800 """
801 801 return _descendants(repo, subset, x)
802 802
803 803 def _firstdescendants(repo, subset, x):
804 804 # ``_firstdescendants(set)``
805 805 # Like ``descendants(set)`` but follows only the first parents.
806 806 return _descendants(repo, subset, x, followfirst=True)
807 807
808 808 def destination(repo, subset, x):
809 809 """``destination([set])``
810 810 Changesets that were created by a graft, transplant or rebase operation,
811 811 with the given revisions specified as the source. Omitting the optional set
812 812 is the same as passing all().
813 813 """
814 814 if x is not None:
815 815 sources = getset(repo, fullreposet(repo), x)
816 816 else:
817 817 sources = fullreposet(repo)
818 818
819 819 dests = set()
820 820
821 821 # subset contains all of the possible destinations that can be returned, so
822 822 # iterate over them and see if their source(s) were provided in the arg set.
823 823 # Even if the immediate src of r is not in the arg set, src's source (or
824 824 # further back) may be. Scanning back further than the immediate src allows
825 825 # transitive transplants and rebases to yield the same results as transitive
826 826 # grafts.
827 827 for r in subset:
828 828 src = _getrevsource(repo, r)
829 829 lineage = None
830 830
831 831 while src is not None:
832 832 if lineage is None:
833 833 lineage = list()
834 834
835 835 lineage.append(r)
836 836
837 837 # The visited lineage is a match if the current source is in the arg
838 838 # set. Since every candidate dest is visited by way of iterating
839 839 # subset, any dests further back in the lineage will be tested by a
840 840 # different iteration over subset. Likewise, if the src was already
841 841 # selected, the current lineage can be selected without going back
842 842 # further.
843 843 if src in sources or src in dests:
844 844 dests.update(lineage)
845 845 break
846 846
847 847 r = src
848 848 src = _getrevsource(repo, r)
849 849
850 850 return subset.filter(dests.__contains__)
851 851
852 852 def divergent(repo, subset, x):
853 853 """``divergent()``
854 854 Final successors of changesets with an alternative set of final successors.
855 855 """
856 856 # i18n: "divergent" is a keyword
857 857 getargs(x, 0, 0, _("divergent takes no arguments"))
858 858 divergent = obsmod.getrevs(repo, 'divergent')
859 859 return subset & divergent
860 860
861 861 def extinct(repo, subset, x):
862 862 """``extinct()``
863 863 Obsolete changesets with obsolete descendants only.
864 864 """
865 865 # i18n: "extinct" is a keyword
866 866 getargs(x, 0, 0, _("extinct takes no arguments"))
867 867 extincts = obsmod.getrevs(repo, 'extinct')
868 868 return subset & extincts
869 869
870 870 def extra(repo, subset, x):
871 871 """``extra(label, [value])``
872 872 Changesets with the given label in the extra metadata, with the given
873 873 optional value.
874 874
875 875 If `value` starts with `re:`, the remainder of the value is treated as
876 876 a regular expression. To match a value that actually starts with `re:`,
877 877 use the prefix `literal:`.
878 878 """
879 879 args = getargsdict(x, 'extra', 'label value')
880 880 if 'label' not in args:
881 881 # i18n: "extra" is a keyword
882 882 raise error.ParseError(_('extra takes at least 1 argument'))
883 883 # i18n: "extra" is a keyword
884 884 label = getstring(args['label'], _('first argument to extra must be '
885 885 'a string'))
886 886 value = None
887 887
888 888 if 'value' in args:
889 889 # i18n: "extra" is a keyword
890 890 value = getstring(args['value'], _('second argument to extra must be '
891 891 'a string'))
892 892 kind, value, matcher = _stringmatcher(value)
893 893
894 894 def _matchvalue(r):
895 895 extra = repo[r].extra()
896 896 return label in extra and (value is None or matcher(extra[label]))
897 897
898 898 return subset.filter(lambda r: _matchvalue(r))
899 899
900 900 def filelog(repo, subset, x):
901 901 """``filelog(pattern)``
902 902 Changesets connected to the specified filelog.
903 903
904 904 For performance reasons, visits only revisions mentioned in the file-level
905 905 filelog, rather than filtering through all changesets (much faster, but
906 906 doesn't include deletes or duplicate changes). For a slower, more accurate
907 907 result, use ``file()``.
908 908
909 909 The pattern without explicit kind like ``glob:`` is expected to be
910 910 relative to the current directory and match against a file exactly
911 911 for efficiency.
912 912
913 913 If some linkrev points to revisions filtered by the current repoview, we'll
914 914 work around it to return a non-filtered value.
915 915 """
916 916
917 917 # i18n: "filelog" is a keyword
918 918 pat = getstring(x, _("filelog requires a pattern"))
919 919 s = set()
920 920 cl = repo.changelog
921 921
922 922 if not matchmod.patkind(pat):
923 923 f = pathutil.canonpath(repo.root, repo.getcwd(), pat)
924 924 files = [f]
925 925 else:
926 926 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=repo[None])
927 927 files = (f for f in repo[None] if m(f))
928 928
929 929 for f in files:
930 930 backrevref = {} # final value for: filerev -> changerev
931 931 lowestchild = {} # lowest known filerev child of a filerev
932 932 delayed = [] # filerev with filtered linkrev, for post-processing
933 933 lowesthead = None # cache for manifest content of all head revisions
934 934 fl = repo.file(f)
935 935 for fr in list(fl):
936 936 rev = fl.linkrev(fr)
937 937 if rev not in cl:
938 938 # changerev pointed in linkrev is filtered
939 939 # record it for post processing.
940 940 delayed.append((fr, rev))
941 941 continue
942 942 for p in fl.parentrevs(fr):
943 943 if 0 <= p and p not in lowestchild:
944 944 lowestchild[p] = fr
945 945 backrevref[fr] = rev
946 946 s.add(rev)
947 947
948 948 # Post-processing of all filerevs we skipped because they were
949 949 # filtered. If such filerevs have known and unfiltered children, this
950 950 # means they have an unfiltered appearance out there. We'll use linkrev
951 951 # adjustment to find one of these appearances. The lowest known child
952 952 # will be used as a starting point because it is the best upper-bound we
953 953 # have.
954 954 #
955 955 # This approach will fail when an unfiltered but linkrev-shadowed
956 956 # appearance exists in a head changeset without unfiltered filerev
957 957 # children anywhere.
958 958 while delayed:
959 959 # must be a descending iteration. To slowly fill lowest child
960 960 # information that is of potential use by the next item.
961 961 fr, rev = delayed.pop()
962 962 lkr = rev
963 963
964 964 child = lowestchild.get(fr)
965 965
966 966 if child is None:
967 967 # search for existence of this file revision in a head revision.
968 968 # There are three possibilities:
969 969 # - the revision exists in a head and we can find an
970 970 # introduction from there,
971 971 # - the revision does not exist in a head because it has been
972 972 # changed since its introduction: we would have found a child
973 973 # and be in the other 'else' clause,
974 974 # - all versions of the revision are hidden.
975 975 if lowesthead is None:
976 976 lowesthead = {}
977 977 for h in repo.heads():
978 978 fnode = repo[h].manifest().get(f)
979 979 if fnode is not None:
980 980 lowesthead[fl.rev(fnode)] = h
981 981 headrev = lowesthead.get(fr)
982 982 if headrev is None:
983 983 # content is nowhere unfiltered
984 984 continue
985 985 rev = repo[headrev][f].introrev()
986 986 else:
987 987 # the lowest known child is a good upper bound
988 988 childcrev = backrevref[child]
989 989 # XXX this does not guarantee returning the lowest
990 990 # introduction of this revision, but this gives a
991 991 # result which is a good start and will fit in most
992 992 # cases. We probably need to fix the multiple
993 993 # introductions case properly (report each
994 994 # introduction, even for identical file revisions)
995 995 # once and for all at some point anyway.
996 996 for p in repo[childcrev][f].parents():
997 997 if p.filerev() == fr:
998 998 rev = p.rev()
999 999 break
1000 1000 if rev == lkr: # no shadowed entry found
1001 1001 # XXX This should never happen unless some manifest points
1002 1002 # to biggish file revisions (like a revision that uses a
1003 1003 # parent that never appears in the manifest ancestors)
1004 1004 continue
1005 1005
1006 1006 # Fill the data for the next iteration.
1007 1007 for p in fl.parentrevs(fr):
1008 1008 if 0 <= p and p not in lowestchild:
1009 1009 lowestchild[p] = fr
1010 1010 backrevref[fr] = rev
1011 1011 s.add(rev)
1012 1012
1013 1013 return subset & s
1014 1014
1015 1015 def first(repo, subset, x):
1016 1016 """``first(set, [n])``
1017 1017 An alias for limit().
1018 1018 """
1019 1019 return limit(repo, subset, x)
1020 1020
1021 1021 def _follow(repo, subset, x, name, followfirst=False):
1022 1022 l = getargs(x, 0, 1, _("%s takes no arguments or a filename") % name)
1023 1023 c = repo['.']
1024 1024 if l:
1025 1025 x = getstring(l[0], _("%s expected a filename") % name)
1026 1026 if x in c:
1027 1027 cx = c[x]
1028 1028 s = set(ctx.rev() for ctx in cx.ancestors(followfirst=followfirst))
1029 1029 # include the revision responsible for the most recent version
1030 1030 s.add(cx.introrev())
1031 1031 else:
1032 1032 return baseset()
1033 1033 else:
1034 1034 s = _revancestors(repo, baseset([c.rev()]), followfirst)
1035 1035
1036 1036 return subset & s
1037 1037
1038 1038 def follow(repo, subset, x):
1039 1039 """``follow([file])``
1040 1040 An alias for ``::.`` (ancestors of the working directory's first parent).
1041 1041 If a filename is specified, the history of the given file is followed,
1042 1042 including copies.
1043 1043 """
1044 1044 return _follow(repo, subset, x, 'follow')
1045 1045
1046 1046 def _followfirst(repo, subset, x):
1047 1047 # ``followfirst([file])``
1048 1048 # Like ``follow([file])`` but follows only the first parent of
1049 1049 # every revision or file revision.
1050 1050 return _follow(repo, subset, x, '_followfirst', followfirst=True)
1051 1051
1052 1052 def getall(repo, subset, x):
1053 1053 """``all()``
1054 1054 All changesets, the same as ``0:tip``.
1055 1055 """
1056 1056 # i18n: "all" is a keyword
1057 1057 getargs(x, 0, 0, _("all takes no arguments"))
1058 1058 return subset & spanset(repo) # drop "null" if any
1059 1059
1060 1060 def grep(repo, subset, x):
1061 1061 """``grep(regex)``
1062 1062 Like ``keyword(string)`` but accepts a regex. Use ``grep(r'...')``
1063 1063 to ensure special escape characters are handled correctly. Unlike
1064 1064 ``keyword(string)``, the match is case-sensitive.
1065 1065 """
1066 1066 try:
1067 1067 # i18n: "grep" is a keyword
1068 1068 gr = re.compile(getstring(x, _("grep requires a string")))
1069 1069 except re.error as e:
1070 1070 raise error.ParseError(_('invalid match pattern: %s') % e)
1071 1071
1072 1072 def matches(x):
1073 1073 c = repo[x]
1074 1074 for e in c.files() + [c.user(), c.description()]:
1075 1075 if gr.search(e):
1076 1076 return True
1077 1077 return False
1078 1078
1079 1079 return subset.filter(matches)
1080 1080
1081 1081 def _matchfiles(repo, subset, x):
1082 1082 # _matchfiles takes a revset list of prefixed arguments:
1083 1083 #
1084 1084 # [p:foo, i:bar, x:baz]
1085 1085 #
1086 1086 # builds a match object from them and filters subset. Allowed
1087 1087 # prefixes are 'p:' for regular patterns, 'i:' for include
1088 1088 # patterns and 'x:' for exclude patterns. Use 'r:' prefix to pass
1089 1089 # a revision identifier, or the empty string to reference the
1090 1090 # working directory, from which the match object is
1091 1091 # initialized. Use 'd:' to set the default matching mode, default
1092 1092 # to 'glob'. At most one 'r:' and 'd:' argument can be passed.
1093 1093
1094 1094 # i18n: "_matchfiles" is a keyword
1095 1095 l = getargs(x, 1, -1, _("_matchfiles requires at least one argument"))
1096 1096 pats, inc, exc = [], [], []
1097 1097 rev, default = None, None
1098 1098 for arg in l:
1099 1099 # i18n: "_matchfiles" is a keyword
1100 1100 s = getstring(arg, _("_matchfiles requires string arguments"))
1101 1101 prefix, value = s[:2], s[2:]
1102 1102 if prefix == 'p:':
1103 1103 pats.append(value)
1104 1104 elif prefix == 'i:':
1105 1105 inc.append(value)
1106 1106 elif prefix == 'x:':
1107 1107 exc.append(value)
1108 1108 elif prefix == 'r:':
1109 1109 if rev is not None:
1110 1110 # i18n: "_matchfiles" is a keyword
1111 1111 raise error.ParseError(_('_matchfiles expected at most one '
1112 1112 'revision'))
1113 1113 if value != '': # empty means working directory; leave rev as None
1114 1114 rev = value
1115 1115 elif prefix == 'd:':
1116 1116 if default is not None:
1117 1117 # i18n: "_matchfiles" is a keyword
1118 1118 raise error.ParseError(_('_matchfiles expected at most one '
1119 1119 'default mode'))
1120 1120 default = value
1121 1121 else:
1122 1122 # i18n: "_matchfiles" is a keyword
1123 1123 raise error.ParseError(_('invalid _matchfiles prefix: %s') % prefix)
1124 1124 if not default:
1125 1125 default = 'glob'
1126 1126
1127 1127 m = matchmod.match(repo.root, repo.getcwd(), pats, include=inc,
1128 1128 exclude=exc, ctx=repo[rev], default=default)
1129 1129
1130 1130 def matches(x):
1131 1131 for f in repo[x].files():
1132 1132 if m(f):
1133 1133 return True
1134 1134 return False
1135 1135
1136 1136 return subset.filter(matches)
1137 1137
1138 1138 def hasfile(repo, subset, x):
1139 1139 """``file(pattern)``
1140 1140 Changesets affecting files matched by pattern.
1141 1141
1142 1142 For a faster but less accurate result, consider using ``filelog()``
1143 1143 instead.
1144 1144
1145 1145 This predicate uses ``glob:`` as the default kind of pattern.
1146 1146 """
1147 1147 # i18n: "file" is a keyword
1148 1148 pat = getstring(x, _("file requires a pattern"))
1149 1149 return _matchfiles(repo, subset, ('string', 'p:' + pat))
1150 1150
1151 1151 def head(repo, subset, x):
1152 1152 """``head()``
1153 1153 Changeset is a named branch head.
1154 1154 """
1155 1155 # i18n: "head" is a keyword
1156 1156 getargs(x, 0, 0, _("head takes no arguments"))
1157 1157 hs = set()
1158 1158 cl = repo.changelog
1159 1159 for b, ls in repo.branchmap().iteritems():
1160 1160 hs.update(cl.rev(h) for h in ls)
1161 1161 # XXX using a set to feed the baseset is wrong. Sets are not ordered.
1162 1162 # This does not break because of other fullreposet misbehavior.
1163 1163 # XXX We should combine with subset first: 'subset & baseset(...)'. This is
1164 1164 # necessary to ensure we preserve the order in subset.
1165 1165 return baseset(hs) & subset
1166 1166
1167 1167 def heads(repo, subset, x):
1168 1168 """``heads(set)``
1169 1169 Members of set with no children in set.
1170 1170 """
1171 1171 s = getset(repo, subset, x)
1172 1172 ps = parents(repo, subset, x)
1173 1173 return s - ps
1174 1174
1175 1175 def hidden(repo, subset, x):
1176 1176 """``hidden()``
1177 1177 Hidden changesets.
1178 1178 """
1179 1179 # i18n: "hidden" is a keyword
1180 1180 getargs(x, 0, 0, _("hidden takes no arguments"))
1181 1181 hiddenrevs = repoview.filterrevs(repo, 'visible')
1182 1182 return subset & hiddenrevs
1183 1183
1184 1184 def keyword(repo, subset, x):
1185 1185 """``keyword(string)``
1186 1186 Search commit message, user name, and names of changed files for
1187 1187 string. The match is case-insensitive.
1188 1188 """
1189 1189 # i18n: "keyword" is a keyword
1190 1190 kw = encoding.lower(getstring(x, _("keyword requires a string")))
1191 1191
1192 1192 def matches(r):
1193 1193 c = repo[r]
1194 1194 return any(kw in encoding.lower(t)
1195 1195 for t in c.files() + [c.user(), c.description()])
1196 1196
1197 1197 return subset.filter(matches)
1198 1198
1199 1199 def limit(repo, subset, x):
1200 1200 """``limit(set, [n])``
1201 1201 First n members of set, defaulting to 1.
1202 1202 """
1203 1203 # i18n: "limit" is a keyword
1204 1204 l = getargs(x, 1, 2, _("limit requires one or two arguments"))
1205 1205 try:
1206 1206 lim = 1
1207 1207 if len(l) == 2:
1208 1208 # i18n: "limit" is a keyword
1209 1209 lim = int(getstring(l[1], _("limit requires a number")))
1210 1210 except (TypeError, ValueError):
1211 1211 # i18n: "limit" is a keyword
1212 1212 raise error.ParseError(_("limit expects a number"))
1213 1213 ss = subset
1214 1214 os = getset(repo, fullreposet(repo), l[0])
1215 1215 result = []
1216 1216 it = iter(os)
1217 1217 for x in xrange(lim):
1218 1218 y = next(it, None)
1219 1219 if y is None:
1220 1220 break
1221 1221 elif y in ss:
1222 1222 result.append(y)
1223 1223 return baseset(result)
1224 1224
1225 1225 def last(repo, subset, x):
1226 1226 """``last(set, [n])``
1227 1227 Last n members of set, defaulting to 1.
1228 1228 """
1229 1229 # i18n: "last" is a keyword
1230 1230 l = getargs(x, 1, 2, _("last requires one or two arguments"))
1231 1231 try:
1232 1232 lim = 1
1233 1233 if len(l) == 2:
1234 1234 # i18n: "last" is a keyword
1235 1235 lim = int(getstring(l[1], _("last requires a number")))
1236 1236 except (TypeError, ValueError):
1237 1237 # i18n: "last" is a keyword
1238 1238 raise error.ParseError(_("last expects a number"))
1239 1239 ss = subset
1240 1240 os = getset(repo, fullreposet(repo), l[0])
1241 1241 os.reverse()
1242 1242 result = []
1243 1243 it = iter(os)
1244 1244 for x in xrange(lim):
1245 1245 y = next(it, None)
1246 1246 if y is None:
1247 1247 break
1248 1248 elif y in ss:
1249 1249 result.append(y)
1250 1250 return baseset(result)
1251 1251
1252 1252 def maxrev(repo, subset, x):
1253 1253 """``max(set)``
1254 1254 Changeset with highest revision number in set.
1255 1255 """
1256 1256 os = getset(repo, fullreposet(repo), x)
1257 1257 if os:
1258 1258 m = os.max()
1259 1259 if m in subset:
1260 1260 return baseset([m])
1261 1261 return baseset()
1262 1262
1263 1263 def merge(repo, subset, x):
1264 1264 """``merge()``
1265 1265 Changeset is a merge changeset.
1266 1266 """
1267 1267 # i18n: "merge" is a keyword
1268 1268 getargs(x, 0, 0, _("merge takes no arguments"))
1269 1269 cl = repo.changelog
1270 1270 return subset.filter(lambda r: cl.parentrevs(r)[1] != -1)
1271 1271
1272 1272 def branchpoint(repo, subset, x):
1273 1273 """``branchpoint()``
1274 1274 Changesets with more than one child.
1275 1275 """
1276 1276 # i18n: "branchpoint" is a keyword
1277 1277 getargs(x, 0, 0, _("branchpoint takes no arguments"))
1278 1278 cl = repo.changelog
1279 1279 if not subset:
1280 1280 return baseset()
1281 1281 # XXX this should be 'parentset.min()' assuming 'parentset' is a smartset
1282 1282 # (and if it is not, it should.)
1283 1283 baserev = min(subset)
1284 1284 parentscount = [0]*(len(repo) - baserev)
1285 1285 for r in cl.revs(start=baserev + 1):
1286 1286 for p in cl.parentrevs(r):
1287 1287 if p >= baserev:
1288 1288 parentscount[p - baserev] += 1
1289 1289 return subset.filter(lambda r: parentscount[r - baserev] > 1)
1290 1290
1291 1291 def minrev(repo, subset, x):
1292 1292 """``min(set)``
1293 1293 Changeset with lowest revision number in set.
1294 1294 """
1295 1295 os = getset(repo, fullreposet(repo), x)
1296 1296 if os:
1297 1297 m = os.min()
1298 1298 if m in subset:
1299 1299 return baseset([m])
1300 1300 return baseset()
1301 1301
1302 1302 def modifies(repo, subset, x):
1303 1303 """``modifies(pattern)``
1304 1304 Changesets modifying files matched by pattern.
1305 1305
1306 1306 The pattern without explicit kind like ``glob:`` is expected to be
1307 1307 relative to the current directory and match against a file or a
1308 1308 directory.
1309 1309 """
1310 1310 # i18n: "modifies" is a keyword
1311 1311 pat = getstring(x, _("modifies requires a pattern"))
1312 1312 return checkstatus(repo, subset, pat, 0)
1313 1313
1314 1314 def named(repo, subset, x):
1315 1315 """``named(namespace)``
1316 1316 The changesets in a given namespace.
1317 1317
1318 1318 If `namespace` starts with `re:`, the remainder of the string is treated as
1319 1319 a regular expression. To match a namespace that actually starts with `re:`,
1320 1320 use the prefix `literal:`.
1321 1321 """
1322 1322 # i18n: "named" is a keyword
1323 1323 args = getargs(x, 1, 1, _('named requires a namespace argument'))
1324 1324
1325 1325 ns = getstring(args[0],
1326 1326 # i18n: "named" is a keyword
1327 1327 _('the argument to named must be a string'))
1328 1328 kind, pattern, matcher = _stringmatcher(ns)
1329 1329 namespaces = set()
1330 1330 if kind == 'literal':
1331 1331 if pattern not in repo.names:
1332 1332 raise error.RepoLookupError(_("namespace '%s' does not exist")
1333 1333 % ns)
1334 1334 namespaces.add(repo.names[pattern])
1335 1335 else:
1336 1336 for name, ns in repo.names.iteritems():
1337 1337 if matcher(name):
1338 1338 namespaces.add(ns)
1339 1339 if not namespaces:
1340 1340 raise error.RepoLookupError(_("no namespace exists"
1341 1341 " that match '%s'") % pattern)
1342 1342
1343 1343 names = set()
1344 1344 for ns in namespaces:
1345 1345 for name in ns.listnames(repo):
1346 1346 if name not in ns.deprecated:
1347 1347 names.update(repo[n].rev() for n in ns.nodes(repo, name))
1348 1348
1349 1349 names -= set([node.nullrev])
1350 1350 return subset & names
1351 1351
1352 1352 def node_(repo, subset, x):
1353 1353 """``id(string)``
1354 1354 Revision non-ambiguously specified by the given hex string prefix.
1355 1355 """
1356 1356 # i18n: "id" is a keyword
1357 1357 l = getargs(x, 1, 1, _("id requires one argument"))
1358 1358 # i18n: "id" is a keyword
1359 1359 n = getstring(l[0], _("id requires a string"))
1360 1360 if len(n) == 40:
1361 1361 try:
1362 1362 rn = repo.changelog.rev(node.bin(n))
1363 1363 except (LookupError, TypeError):
1364 1364 rn = None
1365 1365 else:
1366 1366 rn = None
1367 1367 pm = repo.changelog._partialmatch(n)
1368 1368 if pm is not None:
1369 1369 rn = repo.changelog.rev(pm)
1370 1370
1371 1371 if rn is None:
1372 1372 return baseset()
1373 1373 result = baseset([rn])
1374 1374 return result & subset
1375 1375
1376 1376 def obsolete(repo, subset, x):
1377 1377 """``obsolete()``
1378 1378 Mutable changeset with a newer version."""
1379 1379 # i18n: "obsolete" is a keyword
1380 1380 getargs(x, 0, 0, _("obsolete takes no arguments"))
1381 1381 obsoletes = obsmod.getrevs(repo, 'obsolete')
1382 1382 return subset & obsoletes
1383 1383
1384 1384 def only(repo, subset, x):
1385 1385 """``only(set, [set])``
1386 1386 Changesets that are ancestors of the first set that are not ancestors
1387 1387 of any other head in the repo. If a second set is specified, the result
1388 1388 is ancestors of the first set that are not ancestors of the second set
1389 1389 (i.e. ::<set1> - ::<set2>).
1390 1390 """
1391 1391 cl = repo.changelog
1392 1392 # i18n: "only" is a keyword
1393 1393 args = getargs(x, 1, 2, _('only takes one or two arguments'))
1394 1394 include = getset(repo, fullreposet(repo), args[0])
1395 1395 if len(args) == 1:
1396 1396 if not include:
1397 1397 return baseset()
1398 1398
1399 1399 descendants = set(_revdescendants(repo, include, False))
1400 1400 exclude = [rev for rev in cl.headrevs()
1401 1401 if not rev in descendants and not rev in include]
1402 1402 else:
1403 1403 exclude = getset(repo, fullreposet(repo), args[1])
1404 1404
1405 1405 results = set(cl.findmissingrevs(common=exclude, heads=include))
1406 1406 # XXX we should turn this into a baseset instead of a set, smartset may do
1407 1407 # some optimisations from the fact this is a baseset.
1408 1408 return subset & results
1409 1409
1410 1410 def origin(repo, subset, x):
1411 1411 """``origin([set])``
1412 1412 Changesets that were specified as a source for the grafts, transplants or
1413 1413 rebases that created the given revisions. Omitting the optional set is the
1414 1414 same as passing all(). If a changeset created by these operations is itself
1415 1415 specified as a source for one of these operations, only the source changeset
1416 1416 for the first operation is selected.
1417 1417 """
1418 1418 if x is not None:
1419 1419 dests = getset(repo, fullreposet(repo), x)
1420 1420 else:
1421 1421 dests = fullreposet(repo)
1422 1422
1423 1423 def _firstsrc(rev):
1424 1424 src = _getrevsource(repo, rev)
1425 1425 if src is None:
1426 1426 return None
1427 1427
1428 1428 while True:
1429 1429 prev = _getrevsource(repo, src)
1430 1430
1431 1431 if prev is None:
1432 1432 return src
1433 1433 src = prev
1434 1434
1435 1435 o = set([_firstsrc(r) for r in dests])
1436 1436 o -= set([None])
1437 1437 # XXX we should turn this into a baseset instead of a set, smartset may do
1438 1438 # some optimisations from the fact this is a baseset.
1439 1439 return subset & o
1440 1440
1441 1441 def outgoing(repo, subset, x):
1442 1442 """``outgoing([path])``
1443 1443 Changesets not found in the specified destination repository, or the
1444 1444 default push location.
1445 1445 """
1446 1446 # Avoid cycles.
1447 1447 from . import (
1448 1448 discovery,
1449 1449 hg,
1450 1450 )
1451 1451 # i18n: "outgoing" is a keyword
1452 1452 l = getargs(x, 0, 1, _("outgoing takes one or no arguments"))
1453 1453 # i18n: "outgoing" is a keyword
1454 1454 dest = l and getstring(l[0], _("outgoing requires a repository path")) or ''
1455 1455 dest = repo.ui.expandpath(dest or 'default-push', dest or 'default')
1456 1456 dest, branches = hg.parseurl(dest)
1457 1457 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1458 1458 if revs:
1459 1459 revs = [repo.lookup(rev) for rev in revs]
1460 1460 other = hg.peer(repo, {}, dest)
1461 1461 repo.ui.pushbuffer()
1462 1462 outgoing = discovery.findcommonoutgoing(repo, other, onlyheads=revs)
1463 1463 repo.ui.popbuffer()
1464 1464 cl = repo.changelog
1465 1465 o = set([cl.rev(r) for r in outgoing.missing])
1466 1466 return subset & o
1467 1467
1468 1468 def p1(repo, subset, x):
1469 1469 """``p1([set])``
1470 1470 First parent of changesets in set, or the working directory.
1471 1471 """
1472 1472 if x is None:
1473 1473 p = repo[x].p1().rev()
1474 1474 if p >= 0:
1475 1475 return subset & baseset([p])
1476 1476 return baseset()
1477 1477
1478 1478 ps = set()
1479 1479 cl = repo.changelog
1480 1480 for r in getset(repo, fullreposet(repo), x):
1481 1481 ps.add(cl.parentrevs(r)[0])
1482 1482 ps -= set([node.nullrev])
1483 1483 # XXX we should turn this into a baseset instead of a set, smartset may do
1484 1484 # some optimisations from the fact this is a baseset.
1485 1485 return subset & ps
1486 1486
1487 1487 def p2(repo, subset, x):
1488 1488 """``p2([set])``
1489 1489 Second parent of changesets in set, or the working directory.
1490 1490 """
1491 1491 if x is None:
1492 1492 ps = repo[x].parents()
1493 1493 try:
1494 1494 p = ps[1].rev()
1495 1495 if p >= 0:
1496 1496 return subset & baseset([p])
1497 1497 return baseset()
1498 1498 except IndexError:
1499 1499 return baseset()
1500 1500
1501 1501 ps = set()
1502 1502 cl = repo.changelog
1503 1503 for r in getset(repo, fullreposet(repo), x):
1504 1504 ps.add(cl.parentrevs(r)[1])
1505 1505 ps -= set([node.nullrev])
1506 1506 # XXX we should turn this into a baseset instead of a set, smartset may do
1507 1507 # some optimisations from the fact this is a baseset.
1508 1508 return subset & ps
1509 1509
1510 1510 def parents(repo, subset, x):
1511 1511 """``parents([set])``
1512 1512 The set of all parents for all changesets in set, or the working directory.
1513 1513 """
1514 1514 if x is None:
1515 1515 ps = set(p.rev() for p in repo[x].parents())
1516 1516 else:
1517 1517 ps = set()
1518 1518 cl = repo.changelog
1519 1519 up = ps.update
1520 1520 parentrevs = cl.parentrevs
1521 1521 for r in getset(repo, fullreposet(repo), x):
1522 1522 if r == node.wdirrev:
1523 1523 up(p.rev() for p in repo[r].parents())
1524 1524 else:
1525 1525 up(parentrevs(r))
1526 1526 ps -= set([node.nullrev])
1527 1527 return subset & ps
1528 1528
1529 1529 def _phase(repo, subset, target):
1530 1530 """helper to select all rev in phase <target>"""
1531 1531 repo._phasecache.loadphaserevs(repo) # ensure phase's sets are loaded
1532 1532 if repo._phasecache._phasesets:
1533 1533 s = repo._phasecache._phasesets[target] - repo.changelog.filteredrevs
1534 1534 s = baseset(s)
1535 1535 s.sort() # set are non ordered, so we enforce ascending
1536 1536 return subset & s
1537 1537 else:
1538 1538 phase = repo._phasecache.phase
1539 1539 condition = lambda r: phase(repo, r) == target
1540 1540 return subset.filter(condition, cache=False)
1541 1541
1542 1542 def draft(repo, subset, x):
1543 1543 """``draft()``
1544 1544 Changeset in draft phase."""
1545 1545 # i18n: "draft" is a keyword
1546 1546 getargs(x, 0, 0, _("draft takes no arguments"))
1547 1547 target = phases.draft
1548 1548 return _phase(repo, subset, target)
1549 1549
1550 1550 def secret(repo, subset, x):
1551 1551 """``secret()``
1552 1552 Changeset in secret phase."""
1553 1553 # i18n: "secret" is a keyword
1554 1554 getargs(x, 0, 0, _("secret takes no arguments"))
1555 1555 target = phases.secret
1556 1556 return _phase(repo, subset, target)
1557 1557
1558 1558 def parentspec(repo, subset, x, n):
1559 1559 """``set^0``
1560 1560 The set.
1561 1561 ``set^1`` (or ``set^``), ``set^2``
1562 1562 First or second parent, respectively, of all changesets in set.
1563 1563 """
1564 1564 try:
1565 1565 n = int(n[1])
1566 1566 if n not in (0, 1, 2):
1567 1567 raise ValueError
1568 1568 except (TypeError, ValueError):
1569 1569 raise error.ParseError(_("^ expects a number 0, 1, or 2"))
1570 1570 ps = set()
1571 1571 cl = repo.changelog
1572 1572 for r in getset(repo, fullreposet(repo), x):
1573 1573 if n == 0:
1574 1574 ps.add(r)
1575 1575 elif n == 1:
1576 1576 ps.add(cl.parentrevs(r)[0])
1577 1577 elif n == 2:
1578 1578 parents = cl.parentrevs(r)
1579 1579 if len(parents) > 1:
1580 1580 ps.add(parents[1])
1581 1581 return subset & ps
1582 1582
1583 1583 def present(repo, subset, x):
1584 1584 """``present(set)``
1585 1585 An empty set, if any revision in set isn't found; otherwise,
1586 1586 all revisions in set.
1587 1587
1588 1588 If any of specified revisions is not present in the local repository,
1589 1589 the query is normally aborted. But this predicate allows the query
1590 1590 to continue even in such cases.
1591 1591 """
1592 1592 try:
1593 1593 return getset(repo, subset, x)
1594 1594 except error.RepoLookupError:
1595 1595 return baseset()
1596 1596
1597 1597 # for internal use
1598 1598 def _notpublic(repo, subset, x):
1599 1599 getargs(x, 0, 0, "_notpublic takes no arguments")
1600 1600 repo._phasecache.loadphaserevs(repo) # ensure phase's sets are loaded
1601 1601 if repo._phasecache._phasesets:
1602 1602 s = set()
1603 1603 for u in repo._phasecache._phasesets[1:]:
1604 1604 s.update(u)
1605 1605 s = baseset(s - repo.changelog.filteredrevs)
1606 1606 s.sort()
1607 1607 return subset & s
1608 1608 else:
1609 1609 phase = repo._phasecache.phase
1610 1610 target = phases.public
1611 1611 condition = lambda r: phase(repo, r) != target
1612 1612 return subset.filter(condition, cache=False)
1613 1613
1614 1614 def public(repo, subset, x):
1615 1615 """``public()``
1616 1616 Changeset in public phase."""
1617 1617 # i18n: "public" is a keyword
1618 1618 getargs(x, 0, 0, _("public takes no arguments"))
1619 1619 phase = repo._phasecache.phase
1620 1620 target = phases.public
1621 1621 condition = lambda r: phase(repo, r) == target
1622 1622 return subset.filter(condition, cache=False)
1623 1623
1624 1624 def remote(repo, subset, x):
1625 1625 """``remote([id [,path]])``
1626 1626 Local revision that corresponds to the given identifier in a
1627 1627 remote repository, if present. Here, the '.' identifier is a
1628 1628 synonym for the current local branch.
1629 1629 """
1630 1630
1631 1631 from . import hg # avoid start-up nasties
1632 1632 # i18n: "remote" is a keyword
1633 1633 l = getargs(x, 0, 2, _("remote takes one, two or no arguments"))
1634 1634
1635 1635 q = '.'
1636 1636 if len(l) > 0:
1637 1637 # i18n: "remote" is a keyword
1638 1638 q = getstring(l[0], _("remote requires a string id"))
1639 1639 if q == '.':
1640 1640 q = repo['.'].branch()
1641 1641
1642 1642 dest = ''
1643 1643 if len(l) > 1:
1644 1644 # i18n: "remote" is a keyword
1645 1645 dest = getstring(l[1], _("remote requires a repository path"))
1646 1646 dest = repo.ui.expandpath(dest or 'default')
1647 1647 dest, branches = hg.parseurl(dest)
1648 1648 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1649 1649 if revs:
1650 1650 revs = [repo.lookup(rev) for rev in revs]
1651 1651 other = hg.peer(repo, {}, dest)
1652 1652 n = other.lookup(q)
1653 1653 if n in repo:
1654 1654 r = repo[n].rev()
1655 1655 if r in subset:
1656 1656 return baseset([r])
1657 1657 return baseset()
1658 1658
1659 1659 def removes(repo, subset, x):
1660 1660 """``removes(pattern)``
1661 1661 Changesets which remove files matching pattern.
1662 1662
1663 1663 The pattern without explicit kind like ``glob:`` is expected to be
1664 1664 relative to the current directory and match against a file or a
1665 1665 directory.
1666 1666 """
1667 1667 # i18n: "removes" is a keyword
1668 1668 pat = getstring(x, _("removes requires a pattern"))
1669 1669 return checkstatus(repo, subset, pat, 2)
1670 1670
1671 1671 def rev(repo, subset, x):
1672 1672 """``rev(number)``
1673 1673 Revision with the given numeric identifier.
1674 1674 """
1675 1675 # i18n: "rev" is a keyword
1676 1676 l = getargs(x, 1, 1, _("rev requires one argument"))
1677 1677 try:
1678 1678 # i18n: "rev" is a keyword
1679 1679 l = int(getstring(l[0], _("rev requires a number")))
1680 1680 except (TypeError, ValueError):
1681 1681 # i18n: "rev" is a keyword
1682 1682 raise error.ParseError(_("rev expects a number"))
1683 1683 if l not in repo.changelog and l != node.nullrev:
1684 1684 return baseset()
1685 1685 return subset & baseset([l])
1686 1686
1687 1687 def matching(repo, subset, x):
1688 1688 """``matching(revision [, field])``
1689 1689 Changesets in which a given set of fields match the set of fields in the
1690 1690 selected revision or set.
1691 1691
1692 1692 To match more than one field pass the list of fields to match separated
1693 1693 by spaces (e.g. ``author description``).
1694 1694
1695 1695 Valid fields are most regular revision fields and some special fields.
1696 1696
1697 1697 Regular revision fields are ``description``, ``author``, ``branch``,
1698 1698 ``date``, ``files``, ``phase``, ``parents``, ``substate``, ``user``
1699 1699 and ``diff``.
1700 1700 Note that ``author`` and ``user`` are synonyms. ``diff`` refers to the
1701 1701 contents of the revision. Two revisions matching their ``diff`` will
1702 1702 also match their ``files``.
1703 1703
1704 1704 Special fields are ``summary`` and ``metadata``:
1705 1705 ``summary`` matches the first line of the description.
1706 1706 ``metadata`` is equivalent to matching ``description user date``
1707 1707 (i.e. it matches the main metadata fields).
1708 1708
1709 1709 ``metadata`` is the default field which is used when no fields are
1710 1710 specified. You can match more than one field at a time.
1711 1711 """
1712 1712 # i18n: "matching" is a keyword
1713 1713 l = getargs(x, 1, 2, _("matching takes 1 or 2 arguments"))
1714 1714
1715 1715 revs = getset(repo, fullreposet(repo), l[0])
1716 1716
1717 1717 fieldlist = ['metadata']
1718 1718 if len(l) > 1:
1719 1719 fieldlist = getstring(l[1],
1720 1720 # i18n: "matching" is a keyword
1721 1721 _("matching requires a string "
1722 1722 "as its second argument")).split()
1723 1723
1724 1724 # Make sure that there are no repeated fields,
1725 1725 # expand the 'special' 'metadata' field type
1726 1726 # and check the 'files' whenever we check the 'diff'
1727 1727 fields = []
1728 1728 for field in fieldlist:
1729 1729 if field == 'metadata':
1730 1730 fields += ['user', 'description', 'date']
1731 1731 elif field == 'diff':
1732 1732 # a revision matching the diff must also match the files
1733 1733 # since matching the diff is very costly, make sure to
1734 1734 # also match the files first
1735 1735 fields += ['files', 'diff']
1736 1736 else:
1737 1737 if field == 'author':
1738 1738 field = 'user'
1739 1739 fields.append(field)
1740 1740 fields = set(fields)
1741 1741 if 'summary' in fields and 'description' in fields:
1742 1742 # If a revision matches its description it also matches its summary
1743 1743 fields.discard('summary')
1744 1744
1745 1745 # We may want to match more than one field
1746 1746 # Not all fields take the same amount of time to be matched
1747 1747 # Sort the selected fields in order of increasing matching cost
1748 1748 fieldorder = ['phase', 'parents', 'user', 'date', 'branch', 'summary',
1749 1749 'files', 'description', 'substate', 'diff']
1750 1750 def fieldkeyfunc(f):
1751 1751 try:
1752 1752 return fieldorder.index(f)
1753 1753 except ValueError:
1754 1754 # assume an unknown field is very costly
1755 1755 return len(fieldorder)
1756 1756 fields = list(fields)
1757 1757 fields.sort(key=fieldkeyfunc)
1758 1758
1759 1759 # Each field will be matched with its own "getfield" function
1760 1760 # which will be added to the getfieldfuncs array of functions
1761 1761 getfieldfuncs = []
1762 1762 _funcs = {
1763 1763 'user': lambda r: repo[r].user(),
1764 1764 'branch': lambda r: repo[r].branch(),
1765 1765 'date': lambda r: repo[r].date(),
1766 1766 'description': lambda r: repo[r].description(),
1767 1767 'files': lambda r: repo[r].files(),
1768 1768 'parents': lambda r: repo[r].parents(),
1769 1769 'phase': lambda r: repo[r].phase(),
1770 1770 'substate': lambda r: repo[r].substate,
1771 1771 'summary': lambda r: repo[r].description().splitlines()[0],
1772 1772 'diff': lambda r: list(repo[r].diff(git=True),)
1773 1773 }
1774 1774 for info in fields:
1775 1775 getfield = _funcs.get(info, None)
1776 1776 if getfield is None:
1777 1777 raise error.ParseError(
1778 1778 # i18n: "matching" is a keyword
1779 1779 _("unexpected field name passed to matching: %s") % info)
1780 1780 getfieldfuncs.append(getfield)
1781 1781 # convert the getfield array of functions into a "getinfo" function
1782 1782 # which returns an array of field values (or a single value if there
1783 1783 # is only one field to match)
1784 1784 getinfo = lambda r: [f(r) for f in getfieldfuncs]
1785 1785
1786 1786 def matches(x):
1787 1787 for rev in revs:
1788 1788 target = getinfo(rev)
1789 1789 match = True
1790 1790 for n, f in enumerate(getfieldfuncs):
1791 1791 if target[n] != f(x):
1792 1792 match = False
1793 1793 if match:
1794 1794 return True
1795 1795 return False
1796 1796
1797 1797 return subset.filter(matches)
1798 1798
1799 1799 def reverse(repo, subset, x):
1800 1800 """``reverse(set)``
1801 1801 Reverse order of set.
1802 1802 """
1803 1803 l = getset(repo, subset, x)
1804 1804 l.reverse()
1805 1805 return l
1806 1806
1807 1807 def roots(repo, subset, x):
1808 1808 """``roots(set)``
1809 1809 Changesets in set with no parent changeset in set.
1810 1810 """
1811 1811 s = getset(repo, fullreposet(repo), x)
1812 1812 parents = repo.changelog.parentrevs
1813 1813 def filter(r):
1814 1814 for p in parents(r):
1815 1815 if 0 <= p and p in s:
1816 1816 return False
1817 1817 return True
1818 1818 return subset & s.filter(filter)
1819 1819
1820 1820 def sort(repo, subset, x):
1821 1821 """``sort(set[, [-]key...])``
1822 1822 Sort set by keys. The default sort order is ascending, specify a key
1823 1823 as ``-key`` to sort in descending order.
1824 1824
1825 1825 The keys can be:
1826 1826
1827 1827 - ``rev`` for the revision number,
1828 1828 - ``branch`` for the branch name,
1829 1829 - ``desc`` for the commit message (description),
1830 1830 - ``user`` for user name (``author`` can be used as an alias),
1831 1831 - ``date`` for the commit date
1832 1832 """
1833 1833 # i18n: "sort" is a keyword
1834 1834 l = getargs(x, 1, 2, _("sort requires one or two arguments"))
1835 1835 keys = "rev"
1836 1836 if len(l) == 2:
1837 1837 # i18n: "sort" is a keyword
1838 1838 keys = getstring(l[1], _("sort spec must be a string"))
1839 1839
1840 1840 s = l[0]
1841 1841 keys = keys.split()
1842 1842 l = []
1843 1843 def invert(s):
1844 1844 return "".join(chr(255 - ord(c)) for c in s)
1845 1845 revs = getset(repo, subset, s)
1846 1846 if keys == ["rev"]:
1847 1847 revs.sort()
1848 1848 return revs
1849 1849 elif keys == ["-rev"]:
1850 1850 revs.sort(reverse=True)
1851 1851 return revs
1852 1852 for r in revs:
1853 1853 c = repo[r]
1854 1854 e = []
1855 1855 for k in keys:
1856 1856 if k == 'rev':
1857 1857 e.append(r)
1858 1858 elif k == '-rev':
1859 1859 e.append(-r)
1860 1860 elif k == 'branch':
1861 1861 e.append(c.branch())
1862 1862 elif k == '-branch':
1863 1863 e.append(invert(c.branch()))
1864 1864 elif k == 'desc':
1865 1865 e.append(c.description())
1866 1866 elif k == '-desc':
1867 1867 e.append(invert(c.description()))
1868 1868 elif k in 'user author':
1869 1869 e.append(c.user())
1870 1870 elif k in '-user -author':
1871 1871 e.append(invert(c.user()))
1872 1872 elif k == 'date':
1873 1873 e.append(c.date()[0])
1874 1874 elif k == '-date':
1875 1875 e.append(-c.date()[0])
1876 1876 else:
1877 1877 raise error.ParseError(_("unknown sort key %r") % k)
1878 1878 e.append(r)
1879 1879 l.append(e)
1880 1880 l.sort()
1881 1881 return baseset([e[-1] for e in l])
1882 1882
1883 1883 def subrepo(repo, subset, x):
1884 1884 """``subrepo([pattern])``
1885 1885 Changesets that add, modify or remove the given subrepo. If no subrepo
1886 1886 pattern is named, any subrepo changes are returned.
1887 1887 """
1888 1888 # i18n: "subrepo" is a keyword
1889 1889 args = getargs(x, 0, 1, _('subrepo takes at most one argument'))
1890 1890 if len(args) != 0:
1891 1891 pat = getstring(args[0], _("subrepo requires a pattern"))
1892 1892
1893 1893 m = matchmod.exact(repo.root, repo.root, ['.hgsubstate'])
1894 1894
1895 1895 def submatches(names):
1896 1896 k, p, m = _stringmatcher(pat)
1897 1897 for name in names:
1898 1898 if m(name):
1899 1899 yield name
1900 1900
1901 1901 def matches(x):
1902 1902 c = repo[x]
1903 1903 s = repo.status(c.p1().node(), c.node(), match=m)
1904 1904
1905 1905 if len(args) == 0:
1906 1906 return s.added or s.modified or s.removed
1907 1907
1908 1908 if s.added:
1909 1909 return any(submatches(c.substate.keys()))
1910 1910
1911 1911 if s.modified:
1912 1912 subs = set(c.p1().substate.keys())
1913 1913 subs.update(c.substate.keys())
1914 1914
1915 1915 for path in submatches(subs):
1916 1916 if c.p1().substate.get(path) != c.substate.get(path):
1917 1917 return True
1918 1918
1919 1919 if s.removed:
1920 1920 return any(submatches(c.p1().substate.keys()))
1921 1921
1922 1922 return False
1923 1923
1924 1924 return subset.filter(matches)
1925 1925
1926 1926 def _stringmatcher(pattern):
1927 1927 """
1928 1928 accepts a string, possibly starting with 're:' or 'literal:' prefix.
1929 1929 returns the matcher name, pattern, and matcher function.
1930 1930 missing or unknown prefixes are treated as literal matches.
1931 1931
1932 1932 helper for tests:
1933 1933 >>> def test(pattern, *tests):
1934 1934 ... kind, pattern, matcher = _stringmatcher(pattern)
1935 1935 ... return (kind, pattern, [bool(matcher(t)) for t in tests])
1936 1936
1937 1937 exact matching (no prefix):
1938 1938 >>> test('abcdefg', 'abc', 'def', 'abcdefg')
1939 1939 ('literal', 'abcdefg', [False, False, True])
1940 1940
1941 1941 regex matching ('re:' prefix)
1942 1942 >>> test('re:a.+b', 'nomatch', 'fooadef', 'fooadefbar')
1943 1943 ('re', 'a.+b', [False, False, True])
1944 1944
1945 1945 force exact matches ('literal:' prefix)
1946 1946 >>> test('literal:re:foobar', 'foobar', 're:foobar')
1947 1947 ('literal', 're:foobar', [False, True])
1948 1948
1949 1949 unknown prefixes are ignored and treated as literals
1950 1950 >>> test('foo:bar', 'foo', 'bar', 'foo:bar')
1951 1951 ('literal', 'foo:bar', [False, False, True])
1952 1952 """
1953 1953 if pattern.startswith('re:'):
1954 1954 pattern = pattern[3:]
1955 1955 try:
1956 1956 regex = re.compile(pattern)
1957 1957 except re.error as e:
1958 1958 raise error.ParseError(_('invalid regular expression: %s')
1959 1959 % e)
1960 1960 return 're', pattern, regex.search
1961 1961 elif pattern.startswith('literal:'):
1962 1962 pattern = pattern[8:]
1963 1963 return 'literal', pattern, pattern.__eq__
1964 1964
1965 1965 def _substringmatcher(pattern):
1966 1966 kind, pattern, matcher = _stringmatcher(pattern)
1967 1967 if kind == 'literal':
1968 1968 matcher = lambda s: pattern in s
1969 1969 return kind, pattern, matcher
1970 1970
1971 1971 def tag(repo, subset, x):
1972 1972 """``tag([name])``
1973 1973 The specified tag by name, or all tagged revisions if no name is given.
1974 1974
1975 1975 If `name` starts with `re:`, the remainder of the name is treated as
1976 1976 a regular expression. To match a tag that actually starts with `re:`,
1977 1977 use the prefix `literal:`.
1978 1978 """
1979 1979 # i18n: "tag" is a keyword
1980 1980 args = getargs(x, 0, 1, _("tag takes one or no arguments"))
1981 1981 cl = repo.changelog
1982 1982 if args:
1983 1983 pattern = getstring(args[0],
1984 1984 # i18n: "tag" is a keyword
1985 1985 _('the argument to tag must be a string'))
1986 1986 kind, pattern, matcher = _stringmatcher(pattern)
1987 1987 if kind == 'literal':
1988 1988 # avoid resolving all tags
1989 1989 tn = repo._tagscache.tags.get(pattern, None)
1990 1990 if tn is None:
1991 1991 raise error.RepoLookupError(_("tag '%s' does not exist")
1992 1992 % pattern)
1993 1993 s = set([repo[tn].rev()])
1994 1994 else:
1995 1995 s = set([cl.rev(n) for t, n in repo.tagslist() if matcher(t)])
1996 1996 else:
1997 1997 s = set([cl.rev(n) for t, n in repo.tagslist() if t != 'tip'])
1998 1998 return subset & s
1999 1999
2000 2000 def tagged(repo, subset, x):
2001 2001 return tag(repo, subset, x)
2002 2002
2003 2003 def unstable(repo, subset, x):
2004 2004 """``unstable()``
2005 2005 Non-obsolete changesets with obsolete ancestors.
2006 2006 """
2007 2007 # i18n: "unstable" is a keyword
2008 2008 getargs(x, 0, 0, _("unstable takes no arguments"))
2009 2009 unstables = obsmod.getrevs(repo, 'unstable')
2010 2010 return subset & unstables
2011 2011
2012 2012
2013 2013 def user(repo, subset, x):
2014 2014 """``user(string)``
2015 2015 User name contains string. The match is case-insensitive.
2016 2016
2017 2017 If `string` starts with `re:`, the remainder of the string is treated as
2018 2018 a regular expression. To match a user that actually contains `re:`, use
2019 2019 the prefix `literal:`.
2020 2020 """
2021 2021 return author(repo, subset, x)
2022 2022
2023 2023 # experimental
2024 2024 def wdir(repo, subset, x):
2025 2025 # i18n: "wdir" is a keyword
2026 2026 getargs(x, 0, 0, _("wdir takes no arguments"))
2027 2027 if node.wdirrev in subset or isinstance(subset, fullreposet):
2028 2028 return baseset([node.wdirrev])
2029 2029 return baseset()
2030 2030
2031 2031 # for internal use
2032 2032 def _list(repo, subset, x):
2033 2033 s = getstring(x, "internal error")
2034 2034 if not s:
2035 2035 return baseset()
2036 2036 # remove duplicates here. it's difficult for caller to deduplicate sets
2037 2037 # because different symbols can point to the same rev.
2038 2038 cl = repo.changelog
2039 2039 ls = []
2040 2040 seen = set()
2041 2041 for t in s.split('\0'):
2042 2042 try:
2043 2043 # fast path for integer revision
2044 2044 r = int(t)
2045 2045 if str(r) != t or r not in cl:
2046 2046 raise ValueError
2047 2047 except ValueError:
2048 2048 r = repo[t].rev()
2049 2049 if r in seen:
2050 2050 continue
2051 2051 if (r in subset
2052 2052 or r == node.nullrev and isinstance(subset, fullreposet)):
2053 2053 ls.append(r)
2054 2054 seen.add(r)
2055 2055 return baseset(ls)
2056 2056
2057 2057 # for internal use
2058 2058 def _intlist(repo, subset, x):
2059 2059 s = getstring(x, "internal error")
2060 2060 if not s:
2061 2061 return baseset()
2062 2062 ls = [int(r) for r in s.split('\0')]
2063 2063 s = subset
2064 2064 return baseset([r for r in ls if r in s])
2065 2065
2066 2066 # for internal use
2067 2067 def _hexlist(repo, subset, x):
2068 2068 s = getstring(x, "internal error")
2069 2069 if not s:
2070 2070 return baseset()
2071 2071 cl = repo.changelog
2072 2072 ls = [cl.rev(node.bin(r)) for r in s.split('\0')]
2073 2073 s = subset
2074 2074 return baseset([r for r in ls if r in s])
2075 2075
2076 2076 symbols = {
2077 2077 "adds": adds,
2078 2078 "all": getall,
2079 2079 "ancestor": ancestor,
2080 2080 "ancestors": ancestors,
2081 2081 "_firstancestors": _firstancestors,
2082 2082 "author": author,
2083 2083 "bisect": bisect,
2084 2084 "bisected": bisected,
2085 2085 "bookmark": bookmark,
2086 2086 "branch": branch,
2087 2087 "branchpoint": branchpoint,
2088 2088 "bumped": bumped,
2089 2089 "bundle": bundle,
2090 2090 "children": children,
2091 2091 "closed": closed,
2092 2092 "contains": contains,
2093 2093 "converted": converted,
2094 2094 "date": date,
2095 2095 "desc": desc,
2096 2096 "descendants": descendants,
2097 2097 "_firstdescendants": _firstdescendants,
2098 2098 "destination": destination,
2099 2099 "divergent": divergent,
2100 2100 "draft": draft,
2101 2101 "extinct": extinct,
2102 2102 "extra": extra,
2103 2103 "file": hasfile,
2104 2104 "filelog": filelog,
2105 2105 "first": first,
2106 2106 "follow": follow,
2107 2107 "_followfirst": _followfirst,
2108 2108 "grep": grep,
2109 2109 "head": head,
2110 2110 "heads": heads,
2111 2111 "hidden": hidden,
2112 2112 "id": node_,
2113 2113 "keyword": keyword,
2114 2114 "last": last,
2115 2115 "limit": limit,
2116 2116 "_matchfiles": _matchfiles,
2117 2117 "max": maxrev,
2118 2118 "merge": merge,
2119 2119 "min": minrev,
2120 2120 "modifies": modifies,
2121 2121 "named": named,
2122 2122 "obsolete": obsolete,
2123 2123 "only": only,
2124 2124 "origin": origin,
2125 2125 "outgoing": outgoing,
2126 2126 "p1": p1,
2127 2127 "p2": p2,
2128 2128 "parents": parents,
2129 2129 "present": present,
2130 2130 "public": public,
2131 2131 "_notpublic": _notpublic,
2132 2132 "remote": remote,
2133 2133 "removes": removes,
2134 2134 "rev": rev,
2135 2135 "reverse": reverse,
2136 2136 "roots": roots,
2137 2137 "sort": sort,
2138 2138 "secret": secret,
2139 2139 "subrepo": subrepo,
2140 2140 "matching": matching,
2141 2141 "tag": tag,
2142 2142 "tagged": tagged,
2143 2143 "user": user,
2144 2144 "unstable": unstable,
2145 2145 "wdir": wdir,
2146 2146 "_list": _list,
2147 2147 "_intlist": _intlist,
2148 2148 "_hexlist": _hexlist,
2149 2149 }
2150 2150
2151 2151 # symbols which can't be used for a DoS attack for any given input
2152 2152 # (e.g. those which accept regexes as plain strings shouldn't be included)
2153 2153 # functions that just return a lot of changesets (like all) don't count here
2154 2154 safesymbols = set([
2155 2155 "adds",
2156 2156 "all",
2157 2157 "ancestor",
2158 2158 "ancestors",
2159 2159 "_firstancestors",
2160 2160 "author",
2161 2161 "bisect",
2162 2162 "bisected",
2163 2163 "bookmark",
2164 2164 "branch",
2165 2165 "branchpoint",
2166 2166 "bumped",
2167 2167 "bundle",
2168 2168 "children",
2169 2169 "closed",
2170 2170 "converted",
2171 2171 "date",
2172 2172 "desc",
2173 2173 "descendants",
2174 2174 "_firstdescendants",
2175 2175 "destination",
2176 2176 "divergent",
2177 2177 "draft",
2178 2178 "extinct",
2179 2179 "extra",
2180 2180 "file",
2181 2181 "filelog",
2182 2182 "first",
2183 2183 "follow",
2184 2184 "_followfirst",
2185 2185 "head",
2186 2186 "heads",
2187 2187 "hidden",
2188 2188 "id",
2189 2189 "keyword",
2190 2190 "last",
2191 2191 "limit",
2192 2192 "_matchfiles",
2193 2193 "max",
2194 2194 "merge",
2195 2195 "min",
2196 2196 "modifies",
2197 2197 "obsolete",
2198 2198 "only",
2199 2199 "origin",
2200 2200 "outgoing",
2201 2201 "p1",
2202 2202 "p2",
2203 2203 "parents",
2204 2204 "present",
2205 2205 "public",
2206 2206 "_notpublic",
2207 2207 "remote",
2208 2208 "removes",
2209 2209 "rev",
2210 2210 "reverse",
2211 2211 "roots",
2212 2212 "sort",
2213 2213 "secret",
2214 2214 "matching",
2215 2215 "tag",
2216 2216 "tagged",
2217 2217 "user",
2218 2218 "unstable",
2219 2219 "wdir",
2220 2220 "_list",
2221 2221 "_intlist",
2222 2222 "_hexlist",
2223 2223 ])
2224 2224
2225 2225 methods = {
2226 2226 "range": rangeset,
2227 2227 "dagrange": dagrange,
2228 2228 "string": stringset,
2229 2229 "symbol": stringset,
2230 2230 "and": andset,
2231 2231 "or": orset,
2232 2232 "not": notset,
2233 2233 "list": listset,
2234 2234 "keyvalue": keyvaluepair,
2235 2235 "func": func,
2236 2236 "ancestor": ancestorspec,
2237 2237 "parent": parentspec,
2238 2238 "parentpost": p1,
2239 2239 }
2240 2240
2241 2241 def optimize(x, small):
2242 2242 if x is None:
2243 2243 return 0, x
2244 2244
2245 2245 smallbonus = 1
2246 2246 if small:
2247 2247 smallbonus = .5
2248 2248
2249 2249 op = x[0]
2250 2250 if op == 'minus':
2251 2251 return optimize(('and', x[1], ('not', x[2])), small)
2252 2252 elif op == 'only':
2253 2253 return optimize(('func', ('symbol', 'only'),
2254 2254 ('list', x[1], x[2])), small)
2255 2255 elif op == 'onlypost':
2256 2256 return optimize(('func', ('symbol', 'only'), x[1]), small)
2257 2257 elif op == 'dagrangepre':
2258 2258 return optimize(('func', ('symbol', 'ancestors'), x[1]), small)
2259 2259 elif op == 'dagrangepost':
2260 2260 return optimize(('func', ('symbol', 'descendants'), x[1]), small)
2261 2261 elif op == 'rangeall':
2262 2262 return optimize(('range', ('string', '0'), ('string', 'tip')), small)
2263 2263 elif op == 'rangepre':
2264 2264 return optimize(('range', ('string', '0'), x[1]), small)
2265 2265 elif op == 'rangepost':
2266 2266 return optimize(('range', x[1], ('string', 'tip')), small)
2267 2267 elif op == 'negate':
2268 2268 return optimize(('string',
2269 2269 '-' + getstring(x[1], _("can't negate that"))), small)
2270 2270 elif op in 'string symbol negate':
2271 2271 return smallbonus, x # single revisions are small
2272 2272 elif op == 'and':
2273 2273 wa, ta = optimize(x[1], True)
2274 2274 wb, tb = optimize(x[2], True)
2275 2275
2276 2276 # (::x and not ::y)/(not ::y and ::x) have a fast path
2277 2277 def isonly(revs, bases):
2278 2278 return (
2279 2279 revs is not None
2280 2280 and revs[0] == 'func'
2281 2281 and getstring(revs[1], _('not a symbol')) == 'ancestors'
2282 2282 and bases is not None
2283 2283 and bases[0] == 'not'
2284 2284 and bases[1][0] == 'func'
2285 2285 and getstring(bases[1][1], _('not a symbol')) == 'ancestors')
2286 2286
2287 2287 w = min(wa, wb)
2288 2288 if isonly(ta, tb):
2289 2289 return w, ('func', ('symbol', 'only'), ('list', ta[2], tb[1][2]))
2290 2290 if isonly(tb, ta):
2291 2291 return w, ('func', ('symbol', 'only'), ('list', tb[2], ta[1][2]))
2292 2292
2293 2293 if wa > wb:
2294 2294 return w, (op, tb, ta)
2295 2295 return w, (op, ta, tb)
2296 2296 elif op == 'or':
2297 2297 # fast path for machine-generated expression, that is likely to have
2298 2298 # lots of trivial revisions: 'a + b + c()' to '_list(a b) + c()'
2299 2299 ws, ts, ss = [], [], []
2300 2300 def flushss():
2301 2301 if not ss:
2302 2302 return
2303 2303 if len(ss) == 1:
2304 2304 w, t = ss[0]
2305 2305 else:
2306 2306 s = '\0'.join(t[1] for w, t in ss)
2307 2307 y = ('func', ('symbol', '_list'), ('string', s))
2308 2308 w, t = optimize(y, False)
2309 2309 ws.append(w)
2310 2310 ts.append(t)
2311 2311 del ss[:]
2312 2312 for y in x[1:]:
2313 2313 w, t = optimize(y, False)
2314 2314 if t is not None and (t[0] == 'string' or t[0] == 'symbol'):
2315 2315 ss.append((w, t))
2316 2316 continue
2317 2317 flushss()
2318 2318 ws.append(w)
2319 2319 ts.append(t)
2320 2320 flushss()
2321 2321 if len(ts) == 1:
2322 2322 return ws[0], ts[0] # 'or' operation is fully optimized out
2323 2323 # we can't reorder trees by weight because it would change the order.
2324 2324 # ("sort(a + b)" == "sort(b + a)", but "a + b" != "b + a")
2325 2325 # ts = tuple(t for w, t in sorted(zip(ws, ts), key=lambda wt: wt[0]))
2326 2326 return max(ws), (op,) + tuple(ts)
2327 2327 elif op == 'not':
2328 2328 # Optimize not public() to _notpublic() because we have a fast version
2329 2329 if x[1] == ('func', ('symbol', 'public'), None):
2330 2330 newsym = ('func', ('symbol', '_notpublic'), None)
2331 2331 o = optimize(newsym, not small)
2332 2332 return o[0], o[1]
2333 2333 else:
2334 2334 o = optimize(x[1], not small)
2335 2335 return o[0], (op, o[1])
2336 2336 elif op == 'parentpost':
2337 2337 o = optimize(x[1], small)
2338 2338 return o[0], (op, o[1])
2339 2339 elif op == 'group':
2340 2340 return optimize(x[1], small)
2341 2341 elif op in 'dagrange range list parent ancestorspec':
2342 2342 if op == 'parent':
2343 2343 # x^:y means (x^) : y, not x ^ (:y)
2344 2344 post = ('parentpost', x[1])
2345 2345 if x[2][0] == 'dagrangepre':
2346 2346 return optimize(('dagrange', post, x[2][1]), small)
2347 2347 elif x[2][0] == 'rangepre':
2348 2348 return optimize(('range', post, x[2][1]), small)
2349 2349
2350 2350 wa, ta = optimize(x[1], small)
2351 2351 wb, tb = optimize(x[2], small)
2352 2352 return wa + wb, (op, ta, tb)
2353 2353 elif op == 'func':
2354 2354 f = getstring(x[1], _("not a symbol"))
2355 2355 wa, ta = optimize(x[2], small)
2356 2356 if f in ("author branch closed date desc file grep keyword "
2357 2357 "outgoing user"):
2358 2358 w = 10 # slow
2359 2359 elif f in "modifies adds removes":
2360 2360 w = 30 # slower
2361 2361 elif f == "contains":
2362 2362 w = 100 # very slow
2363 2363 elif f == "ancestor":
2364 2364 w = 1 * smallbonus
2365 2365 elif f in "reverse limit first _intlist":
2366 2366 w = 0
2367 2367 elif f in "sort":
2368 2368 w = 10 # assume most sorts look at changelog
2369 2369 else:
2370 2370 w = 1
2371 2371 return w + wa, (op, x[1], ta)
2372 2372 return 1, x
2373 2373
2374 2374 _aliasarg = ('func', ('symbol', '_aliasarg'))
2375 2375 def _getaliasarg(tree):
2376 2376 """If tree matches ('func', ('symbol', '_aliasarg'), ('string', X))
2377 2377 return X, None otherwise.
2378 2378 """
2379 2379 if (len(tree) == 3 and tree[:2] == _aliasarg
2380 2380 and tree[2][0] == 'string'):
2381 2381 return tree[2][1]
2382 2382 return None
2383 2383
2384 2384 def _checkaliasarg(tree, known=None):
2385 2385 """Check tree contains no _aliasarg construct or only ones which
2386 2386 value is in known. Used to avoid alias placeholders injection.
2387 2387 """
2388 2388 if isinstance(tree, tuple):
2389 2389 arg = _getaliasarg(tree)
2390 2390 if arg is not None and (not known or arg not in known):
2391 2391 raise error.UnknownIdentifier('_aliasarg', [])
2392 2392 for t in tree:
2393 2393 _checkaliasarg(t, known)
2394 2394
2395 2395 # the set of valid characters for the initial letter of symbols in
2396 2396 # alias declarations and definitions
2397 2397 _aliassyminitletters = set(c for c in [chr(i) for i in xrange(256)]
2398 2398 if c.isalnum() or c in '._@$' or ord(c) > 127)
2399 2399
2400 2400 def _tokenizealias(program, lookup=None):
2401 2401 """Parse alias declaration/definition into a stream of tokens
2402 2402
2403 2403 This allows symbol names to use also ``$`` as an initial letter
2404 2404 (for backward compatibility), and callers of this function should
2405 2405 examine whether ``$`` is used also for unexpected symbols or not.
2406 2406 """
2407 2407 return tokenize(program, lookup=lookup,
2408 2408 syminitletters=_aliassyminitletters)
2409 2409
2410 2410 def _parsealiasdecl(decl):
2411 2411 """Parse alias declaration ``decl``
2412 2412
2413 2413 This returns ``(name, tree, args, errorstr)`` tuple:
2414 2414
2415 2415 - ``name``: of declared alias (may be ``decl`` itself at error)
2416 2416 - ``tree``: parse result (or ``None`` at error)
2417 2417 - ``args``: list of alias argument names (or None for symbol declaration)
2418 2418 - ``errorstr``: detail about detected error (or None)
2419 2419
2420 2420 >>> _parsealiasdecl('foo')
2421 2421 ('foo', ('symbol', 'foo'), None, None)
2422 2422 >>> _parsealiasdecl('$foo')
2423 2423 ('$foo', None, None, "'$' not for alias arguments")
2424 2424 >>> _parsealiasdecl('foo::bar')
2425 2425 ('foo::bar', None, None, 'invalid format')
2426 2426 >>> _parsealiasdecl('foo bar')
2427 2427 ('foo bar', None, None, 'at 4: invalid token')
2428 2428 >>> _parsealiasdecl('foo()')
2429 2429 ('foo', ('func', ('symbol', 'foo')), [], None)
2430 2430 >>> _parsealiasdecl('$foo()')
2431 2431 ('$foo()', None, None, "'$' not for alias arguments")
2432 2432 >>> _parsealiasdecl('foo($1, $2)')
2433 2433 ('foo', ('func', ('symbol', 'foo')), ['$1', '$2'], None)
2434 2434 >>> _parsealiasdecl('foo(bar_bar, baz.baz)')
2435 2435 ('foo', ('func', ('symbol', 'foo')), ['bar_bar', 'baz.baz'], None)
2436 2436 >>> _parsealiasdecl('foo($1, $2, nested($1, $2))')
2437 2437 ('foo($1, $2, nested($1, $2))', None, None, 'invalid argument list')
2438 2438 >>> _parsealiasdecl('foo(bar($1, $2))')
2439 2439 ('foo(bar($1, $2))', None, None, 'invalid argument list')
2440 2440 >>> _parsealiasdecl('foo("string")')
2441 2441 ('foo("string")', None, None, 'invalid argument list')
2442 2442 >>> _parsealiasdecl('foo($1, $2')
2443 2443 ('foo($1, $2', None, None, 'at 10: unexpected token: end')
2444 2444 >>> _parsealiasdecl('foo("string')
2445 2445 ('foo("string', None, None, 'at 5: unterminated string')
2446 2446 >>> _parsealiasdecl('foo($1, $2, $1)')
2447 2447 ('foo', None, None, 'argument names collide with each other')
2448 2448 """
2449 2449 p = parser.parser(elements)
2450 2450 try:
2451 2451 tree, pos = p.parse(_tokenizealias(decl))
2452 2452 if (pos != len(decl)):
2453 2453 raise error.ParseError(_('invalid token'), pos)
2454 2454
2455 2455 if isvalidsymbol(tree):
2456 2456 # "name = ...." style
2457 2457 name = getsymbol(tree)
2458 2458 if name.startswith('$'):
2459 2459 return (decl, None, None, _("'$' not for alias arguments"))
2460 2460 return (name, ('symbol', name), None, None)
2461 2461
2462 2462 if isvalidfunc(tree):
2463 2463 # "name(arg, ....) = ...." style
2464 2464 name = getfuncname(tree)
2465 2465 if name.startswith('$'):
2466 2466 return (decl, None, None, _("'$' not for alias arguments"))
2467 2467 args = []
2468 2468 for arg in getfuncargs(tree):
2469 2469 if not isvalidsymbol(arg):
2470 2470 return (decl, None, None, _("invalid argument list"))
2471 2471 args.append(getsymbol(arg))
2472 2472 if len(args) != len(set(args)):
2473 2473 return (name, None, None,
2474 2474 _("argument names collide with each other"))
2475 2475 return (name, ('func', ('symbol', name)), args, None)
2476 2476
2477 2477 return (decl, None, None, _("invalid format"))
2478 2478 except error.ParseError as inst:
2479 2479 return (decl, None, None, parseerrordetail(inst))
2480 2480
2481 2481 def _parsealiasdefn(defn, args):
2482 2482 """Parse alias definition ``defn``
2483 2483
2484 2484 This function also replaces alias argument references in the
2485 2485 specified definition by ``_aliasarg(ARGNAME)``.
2486 2486
2487 2487 ``args`` is a list of alias argument names, or None if the alias
2488 2488 is declared as a symbol.
2489 2489
2490 2490 This returns "tree" as parsing result.
2491 2491
2492 2492 >>> args = ['$1', '$2', 'foo']
2493 2493 >>> print prettyformat(_parsealiasdefn('$1 or foo', args))
2494 2494 (or
2495 2495 (func
2496 2496 ('symbol', '_aliasarg')
2497 2497 ('string', '$1'))
2498 2498 (func
2499 2499 ('symbol', '_aliasarg')
2500 2500 ('string', 'foo')))
2501 2501 >>> try:
2502 2502 ... _parsealiasdefn('$1 or $bar', args)
2503 2503 ... except error.ParseError, inst:
2504 2504 ... print parseerrordetail(inst)
2505 2505 at 6: '$' not for alias arguments
2506 2506 >>> args = ['$1', '$10', 'foo']
2507 2507 >>> print prettyformat(_parsealiasdefn('$10 or foobar', args))
2508 2508 (or
2509 2509 (func
2510 2510 ('symbol', '_aliasarg')
2511 2511 ('string', '$10'))
2512 2512 ('symbol', 'foobar'))
2513 2513 >>> print prettyformat(_parsealiasdefn('"$1" or "foo"', args))
2514 2514 (or
2515 2515 ('string', '$1')
2516 2516 ('string', 'foo'))
2517 2517 """
2518 2518 def tokenizedefn(program, lookup=None):
2519 2519 if args:
2520 2520 argset = set(args)
2521 2521 else:
2522 2522 argset = set()
2523 2523
2524 2524 for t, value, pos in _tokenizealias(program, lookup=lookup):
2525 2525 if t == 'symbol':
2526 2526 if value in argset:
2527 2527 # emulate tokenization of "_aliasarg('ARGNAME')":
2528 2528 # "_aliasarg()" is an unknown symbol only used separate
2529 2529 # alias argument placeholders from regular strings.
2530 2530 yield ('symbol', '_aliasarg', pos)
2531 2531 yield ('(', None, pos)
2532 2532 yield ('string', value, pos)
2533 2533 yield (')', None, pos)
2534 2534 continue
2535 2535 elif value.startswith('$'):
2536 2536 raise error.ParseError(_("'$' not for alias arguments"),
2537 2537 pos)
2538 2538 yield (t, value, pos)
2539 2539
2540 2540 p = parser.parser(elements)
2541 2541 tree, pos = p.parse(tokenizedefn(defn))
2542 2542 if pos != len(defn):
2543 2543 raise error.ParseError(_('invalid token'), pos)
2544 2544 return parser.simplifyinfixops(tree, ('or',))
2545 2545
2546 2546 class revsetalias(object):
2547 2547 # whether own `error` information is already shown or not.
2548 2548 # this avoids showing same warning multiple times at each `findaliases`.
2549 2549 warned = False
2550 2550
2551 2551 def __init__(self, name, value):
2552 2552 '''Aliases like:
2553 2553
2554 2554 h = heads(default)
2555 2555 b($1) = ancestors($1) - ancestors(default)
2556 2556 '''
2557 2557 self.name, self.tree, self.args, self.error = _parsealiasdecl(name)
2558 2558 if self.error:
2559 2559 self.error = _('failed to parse the declaration of revset alias'
2560 2560 ' "%s": %s') % (self.name, self.error)
2561 2561 return
2562 2562
2563 2563 try:
2564 2564 self.replacement = _parsealiasdefn(value, self.args)
2565 2565 # Check for placeholder injection
2566 2566 _checkaliasarg(self.replacement, self.args)
2567 2567 except error.ParseError as inst:
2568 2568 self.error = _('failed to parse the definition of revset alias'
2569 2569 ' "%s": %s') % (self.name, parseerrordetail(inst))
2570 2570
2571 2571 def _getalias(aliases, tree):
2572 2572 """If tree looks like an unexpanded alias, return it. Return None
2573 2573 otherwise.
2574 2574 """
2575 2575 if isinstance(tree, tuple) and tree:
2576 2576 if tree[0] == 'symbol' and len(tree) == 2:
2577 2577 name = tree[1]
2578 2578 alias = aliases.get(name)
2579 2579 if alias and alias.args is None and alias.tree == tree:
2580 2580 return alias
2581 2581 if tree[0] == 'func' and len(tree) > 1:
2582 2582 if tree[1][0] == 'symbol' and len(tree[1]) == 2:
2583 2583 name = tree[1][1]
2584 2584 alias = aliases.get(name)
2585 2585 if alias and alias.args is not None and alias.tree == tree[:2]:
2586 2586 return alias
2587 2587 return None
2588 2588
2589 2589 def _expandargs(tree, args):
2590 2590 """Replace _aliasarg instances with the substitution value of the
2591 2591 same name in args, recursively.
2592 2592 """
2593 2593 if not tree or not isinstance(tree, tuple):
2594 2594 return tree
2595 2595 arg = _getaliasarg(tree)
2596 2596 if arg is not None:
2597 2597 return args[arg]
2598 2598 return tuple(_expandargs(t, args) for t in tree)
2599 2599
2600 2600 def _expandaliases(aliases, tree, expanding, cache):
2601 2601 """Expand aliases in tree, recursively.
2602 2602
2603 2603 'aliases' is a dictionary mapping user defined aliases to
2604 2604 revsetalias objects.
2605 2605 """
2606 2606 if not isinstance(tree, tuple):
2607 2607 # Do not expand raw strings
2608 2608 return tree
2609 2609 alias = _getalias(aliases, tree)
2610 2610 if alias is not None:
2611 2611 if alias.error:
2612 2612 raise util.Abort(alias.error)
2613 2613 if alias in expanding:
2614 2614 raise error.ParseError(_('infinite expansion of revset alias "%s" '
2615 2615 'detected') % alias.name)
2616 2616 expanding.append(alias)
2617 2617 if alias.name not in cache:
2618 2618 cache[alias.name] = _expandaliases(aliases, alias.replacement,
2619 2619 expanding, cache)
2620 2620 result = cache[alias.name]
2621 2621 expanding.pop()
2622 2622 if alias.args is not None:
2623 2623 l = getlist(tree[2])
2624 2624 if len(l) != len(alias.args):
2625 2625 raise error.ParseError(
2626 2626 _('invalid number of arguments: %s') % len(l))
2627 2627 l = [_expandaliases(aliases, a, [], cache) for a in l]
2628 2628 result = _expandargs(result, dict(zip(alias.args, l)))
2629 2629 else:
2630 2630 result = tuple(_expandaliases(aliases, t, expanding, cache)
2631 2631 for t in tree)
2632 2632 return result
2633 2633
2634 2634 def findaliases(ui, tree, showwarning=None):
2635 2635 _checkaliasarg(tree)
2636 2636 aliases = {}
2637 2637 for k, v in ui.configitems('revsetalias'):
2638 2638 alias = revsetalias(k, v)
2639 2639 aliases[alias.name] = alias
2640 2640 tree = _expandaliases(aliases, tree, [], {})
2641 2641 if showwarning:
2642 2642 # warn about problematic (but not referred) aliases
2643 2643 for name, alias in sorted(aliases.iteritems()):
2644 2644 if alias.error and not alias.warned:
2645 2645 showwarning(_('warning: %s\n') % (alias.error))
2646 2646 alias.warned = True
2647 2647 return tree
2648 2648
2649 2649 def foldconcat(tree):
2650 2650 """Fold elements to be concatenated by `##`
2651 2651 """
2652 2652 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2653 2653 return tree
2654 2654 if tree[0] == '_concat':
2655 2655 pending = [tree]
2656 2656 l = []
2657 2657 while pending:
2658 2658 e = pending.pop()
2659 2659 if e[0] == '_concat':
2660 2660 pending.extend(reversed(e[1:]))
2661 2661 elif e[0] in ('string', 'symbol'):
2662 2662 l.append(e[1])
2663 2663 else:
2664 2664 msg = _("\"##\" can't concatenate \"%s\" element") % (e[0])
2665 2665 raise error.ParseError(msg)
2666 2666 return ('string', ''.join(l))
2667 2667 else:
2668 2668 return tuple(foldconcat(t) for t in tree)
2669 2669
2670 2670 def parse(spec, lookup=None):
2671 2671 p = parser.parser(elements)
2672 2672 tree, pos = p.parse(tokenize(spec, lookup=lookup))
2673 2673 if pos != len(spec):
2674 2674 raise error.ParseError(_("invalid token"), pos)
2675 2675 return parser.simplifyinfixops(tree, ('or',))
2676 2676
2677 2677 def posttreebuilthook(tree, repo):
2678 2678 # hook for extensions to execute code on the optimized tree
2679 2679 pass
2680 2680
2681 2681 def match(ui, spec, repo=None):
2682 2682 if not spec:
2683 2683 raise error.ParseError(_("empty query"))
2684 2684 lookup = None
2685 2685 if repo:
2686 2686 lookup = repo.__contains__
2687 2687 tree = parse(spec, lookup)
2688 2688 return _makematcher(ui, tree, repo)
2689 2689
2690 2690 def matchany(ui, specs, repo=None):
2691 2691 """Create a matcher that will include any revisions matching one of the
2692 2692 given specs"""
2693 2693 if not specs:
2694 2694 def mfunc(repo, subset=None):
2695 2695 return baseset()
2696 2696 return mfunc
2697 2697 if not all(specs):
2698 2698 raise error.ParseError(_("empty query"))
2699 2699 lookup = None
2700 2700 if repo:
2701 2701 lookup = repo.__contains__
2702 2702 if len(specs) == 1:
2703 2703 tree = parse(specs[0], lookup)
2704 2704 else:
2705 2705 tree = ('or',) + tuple(parse(s, lookup) for s in specs)
2706 2706 return _makematcher(ui, tree, repo)
2707 2707
2708 2708 def _makematcher(ui, tree, repo):
2709 2709 if ui:
2710 2710 tree = findaliases(ui, tree, showwarning=ui.warn)
2711 2711 tree = foldconcat(tree)
2712 2712 weight, tree = optimize(tree, True)
2713 2713 posttreebuilthook(tree, repo)
2714 2714 def mfunc(repo, subset=None):
2715 2715 if subset is None:
2716 2716 subset = fullreposet(repo)
2717 2717 if util.safehasattr(subset, 'isascending'):
2718 2718 result = getset(repo, subset, tree)
2719 2719 else:
2720 2720 result = getset(repo, baseset(subset), tree)
2721 2721 return result
2722 2722 return mfunc
2723 2723
2724 2724 def formatspec(expr, *args):
2725 2725 '''
2726 2726 This is a convenience function for using revsets internally, and
2727 2727 escapes arguments appropriately. Aliases are intentionally ignored
2728 2728 so that intended expression behavior isn't accidentally subverted.
2729 2729
2730 2730 Supported arguments:
2731 2731
2732 2732 %r = revset expression, parenthesized
2733 2733 %d = int(arg), no quoting
2734 2734 %s = string(arg), escaped and single-quoted
2735 2735 %b = arg.branch(), escaped and single-quoted
2736 2736 %n = hex(arg), single-quoted
2737 2737 %% = a literal '%'
2738 2738
2739 2739 Prefixing the type with 'l' specifies a parenthesized list of that type.
2740 2740
2741 2741 >>> formatspec('%r:: and %lr', '10 or 11', ("this()", "that()"))
2742 2742 '(10 or 11):: and ((this()) or (that()))'
2743 2743 >>> formatspec('%d:: and not %d::', 10, 20)
2744 2744 '10:: and not 20::'
2745 2745 >>> formatspec('%ld or %ld', [], [1])
2746 2746 "_list('') or 1"
2747 2747 >>> formatspec('keyword(%s)', 'foo\\xe9')
2748 2748 "keyword('foo\\\\xe9')"
2749 2749 >>> b = lambda: 'default'
2750 2750 >>> b.branch = b
2751 2751 >>> formatspec('branch(%b)', b)
2752 2752 "branch('default')"
2753 2753 >>> formatspec('root(%ls)', ['a', 'b', 'c', 'd'])
2754 2754 "root(_list('a\\x00b\\x00c\\x00d'))"
2755 2755 '''
2756 2756
2757 2757 def quote(s):
2758 2758 return repr(str(s))
2759 2759
2760 2760 def argtype(c, arg):
2761 2761 if c == 'd':
2762 2762 return str(int(arg))
2763 2763 elif c == 's':
2764 2764 return quote(arg)
2765 2765 elif c == 'r':
2766 2766 parse(arg) # make sure syntax errors are confined
2767 2767 return '(%s)' % arg
2768 2768 elif c == 'n':
2769 2769 return quote(node.hex(arg))
2770 2770 elif c == 'b':
2771 2771 return quote(arg.branch())
2772 2772
2773 2773 def listexp(s, t):
2774 2774 l = len(s)
2775 2775 if l == 0:
2776 2776 return "_list('')"
2777 2777 elif l == 1:
2778 2778 return argtype(t, s[0])
2779 2779 elif t == 'd':
2780 2780 return "_intlist('%s')" % "\0".join(str(int(a)) for a in s)
2781 2781 elif t == 's':
2782 2782 return "_list('%s')" % "\0".join(s)
2783 2783 elif t == 'n':
2784 2784 return "_hexlist('%s')" % "\0".join(node.hex(a) for a in s)
2785 2785 elif t == 'b':
2786 2786 return "_list('%s')" % "\0".join(a.branch() for a in s)
2787 2787
2788 2788 m = l // 2
2789 2789 return '(%s or %s)' % (listexp(s[:m], t), listexp(s[m:], t))
2790 2790
2791 2791 ret = ''
2792 2792 pos = 0
2793 2793 arg = 0
2794 2794 while pos < len(expr):
2795 2795 c = expr[pos]
2796 2796 if c == '%':
2797 2797 pos += 1
2798 2798 d = expr[pos]
2799 2799 if d == '%':
2800 2800 ret += d
2801 2801 elif d in 'dsnbr':
2802 2802 ret += argtype(d, args[arg])
2803 2803 arg += 1
2804 2804 elif d == 'l':
2805 2805 # a list of some type
2806 2806 pos += 1
2807 2807 d = expr[pos]
2808 2808 ret += listexp(list(args[arg]), d)
2809 2809 arg += 1
2810 2810 else:
2811 2811 raise util.Abort('unexpected revspec format character %s' % d)
2812 2812 else:
2813 2813 ret += c
2814 2814 pos += 1
2815 2815
2816 2816 return ret
2817 2817
2818 2818 def prettyformat(tree):
2819 2819 return parser.prettyformat(tree, ('string', 'symbol'))
2820 2820
2821 2821 def depth(tree):
2822 2822 if isinstance(tree, tuple):
2823 2823 return max(map(depth, tree)) + 1
2824 2824 else:
2825 2825 return 0
2826 2826
2827 2827 def funcsused(tree):
2828 2828 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2829 2829 return set()
2830 2830 else:
2831 2831 funcs = set()
2832 2832 for s in tree[1:]:
2833 2833 funcs |= funcsused(s)
2834 2834 if tree[0] == 'func':
2835 2835 funcs.add(tree[1][1])
2836 2836 return funcs
2837 2837
2838 2838 class abstractsmartset(object):
2839 2839
2840 2840 def __nonzero__(self):
2841 2841 """True if the smartset is not empty"""
2842 2842 raise NotImplementedError()
2843 2843
2844 2844 def __contains__(self, rev):
2845 2845 """provide fast membership testing"""
2846 2846 raise NotImplementedError()
2847 2847
2848 2848 def __iter__(self):
2849 2849 """iterate the set in the order it is supposed to be iterated"""
2850 2850 raise NotImplementedError()
2851 2851
2852 2852 # Attributes containing a function to perform a fast iteration in a given
2853 2853 # direction. A smartset can have none, one, or both defined.
2854 2854 #
2855 2855 # Default value is None instead of a function returning None to avoid
2856 2856 # initializing an iterator just for testing if a fast method exists.
2857 2857 fastasc = None
2858 2858 fastdesc = None
2859 2859
2860 2860 def isascending(self):
2861 2861 """True if the set will iterate in ascending order"""
2862 2862 raise NotImplementedError()
2863 2863
2864 2864 def isdescending(self):
2865 2865 """True if the set will iterate in descending order"""
2866 2866 raise NotImplementedError()
2867 2867
2868 2868 def min(self):
2869 2869 """return the minimum element in the set"""
2870 2870 if self.fastasc is not None:
2871 2871 for r in self.fastasc():
2872 2872 return r
2873 2873 raise ValueError('arg is an empty sequence')
2874 2874 return min(self)
2875 2875
2876 2876 def max(self):
2877 2877 """return the maximum element in the set"""
2878 2878 if self.fastdesc is not None:
2879 2879 for r in self.fastdesc():
2880 2880 return r
2881 2881 raise ValueError('arg is an empty sequence')
2882 2882 return max(self)
2883 2883
2884 2884 def first(self):
2885 2885 """return the first element in the set (user iteration perspective)
2886 2886
2887 2887 Return None if the set is empty"""
2888 2888 raise NotImplementedError()
2889 2889
2890 2890 def last(self):
2891 2891 """return the last element in the set (user iteration perspective)
2892 2892
2893 2893 Return None if the set is empty"""
2894 2894 raise NotImplementedError()
2895 2895
2896 2896 def __len__(self):
2897 2897 """return the length of the smartsets
2898 2898
2899 2899 This can be expensive on smartset that could be lazy otherwise."""
2900 2900 raise NotImplementedError()
2901 2901
2902 2902 def reverse(self):
2903 2903 """reverse the expected iteration order"""
2904 2904 raise NotImplementedError()
2905 2905
2906 2906 def sort(self, reverse=True):
2907 2907 """get the set to iterate in an ascending or descending order"""
2908 2908 raise NotImplementedError()
2909 2909
2910 2910 def __and__(self, other):
2911 2911 """Returns a new object with the intersection of the two collections.
2912 2912
2913 2913 This is part of the mandatory API for smartset."""
2914 2914 if isinstance(other, fullreposet):
2915 2915 return self
2916 2916 return self.filter(other.__contains__, cache=False)
2917 2917
2918 2918 def __add__(self, other):
2919 2919 """Returns a new object with the union of the two collections.
2920 2920
2921 2921 This is part of the mandatory API for smartset."""
2922 2922 return addset(self, other)
2923 2923
2924 2924 def __sub__(self, other):
2925 2925 """Returns a new object with the substraction of the two collections.
2926 2926
2927 2927 This is part of the mandatory API for smartset."""
2928 2928 c = other.__contains__
2929 2929 return self.filter(lambda r: not c(r), cache=False)
2930 2930
2931 2931 def filter(self, condition, cache=True):
2932 2932 """Returns this smartset filtered by condition as a new smartset.
2933 2933
2934 2934 `condition` is a callable which takes a revision number and returns a
2935 2935 boolean.
2936 2936
2937 2937 This is part of the mandatory API for smartset."""
2938 2938 # builtin cannot be cached. but do not needs to
2939 2939 if cache and util.safehasattr(condition, 'func_code'):
2940 2940 condition = util.cachefunc(condition)
2941 2941 return filteredset(self, condition)
2942 2942
2943 2943 class baseset(abstractsmartset):
2944 2944 """Basic data structure that represents a revset and contains the basic
2945 2945 operation that it should be able to perform.
2946 2946
2947 2947 Every method in this class should be implemented by any smartset class.
2948 2948 """
2949 2949 def __init__(self, data=()):
2950 2950 if not isinstance(data, list):
2951 2951 data = list(data)
2952 2952 self._list = data
2953 2953 self._ascending = None
2954 2954
2955 2955 @util.propertycache
2956 2956 def _set(self):
2957 2957 return set(self._list)
2958 2958
2959 2959 @util.propertycache
2960 2960 def _asclist(self):
2961 2961 asclist = self._list[:]
2962 2962 asclist.sort()
2963 2963 return asclist
2964 2964
2965 2965 def __iter__(self):
2966 2966 if self._ascending is None:
2967 2967 return iter(self._list)
2968 2968 elif self._ascending:
2969 2969 return iter(self._asclist)
2970 2970 else:
2971 2971 return reversed(self._asclist)
2972 2972
2973 2973 def fastasc(self):
2974 2974 return iter(self._asclist)
2975 2975
2976 2976 def fastdesc(self):
2977 2977 return reversed(self._asclist)
2978 2978
2979 2979 @util.propertycache
2980 2980 def __contains__(self):
2981 2981 return self._set.__contains__
2982 2982
2983 2983 def __nonzero__(self):
2984 2984 return bool(self._list)
2985 2985
2986 2986 def sort(self, reverse=False):
2987 2987 self._ascending = not bool(reverse)
2988 2988
2989 2989 def reverse(self):
2990 2990 if self._ascending is None:
2991 2991 self._list.reverse()
2992 2992 else:
2993 2993 self._ascending = not self._ascending
2994 2994
2995 2995 def __len__(self):
2996 2996 return len(self._list)
2997 2997
2998 2998 def isascending(self):
2999 2999 """Returns True if the collection is ascending order, False if not.
3000 3000
3001 3001 This is part of the mandatory API for smartset."""
3002 3002 if len(self) <= 1:
3003 3003 return True
3004 3004 return self._ascending is not None and self._ascending
3005 3005
3006 3006 def isdescending(self):
3007 3007 """Returns True if the collection is descending order, False if not.
3008 3008
3009 3009 This is part of the mandatory API for smartset."""
3010 3010 if len(self) <= 1:
3011 3011 return True
3012 3012 return self._ascending is not None and not self._ascending
3013 3013
3014 3014 def first(self):
3015 3015 if self:
3016 3016 if self._ascending is None:
3017 3017 return self._list[0]
3018 3018 elif self._ascending:
3019 3019 return self._asclist[0]
3020 3020 else:
3021 3021 return self._asclist[-1]
3022 3022 return None
3023 3023
3024 3024 def last(self):
3025 3025 if self:
3026 3026 if self._ascending is None:
3027 3027 return self._list[-1]
3028 3028 elif self._ascending:
3029 3029 return self._asclist[-1]
3030 3030 else:
3031 3031 return self._asclist[0]
3032 3032 return None
3033 3033
3034 3034 def __repr__(self):
3035 3035 d = {None: '', False: '-', True: '+'}[self._ascending]
3036 3036 return '<%s%s %r>' % (type(self).__name__, d, self._list)
3037 3037
3038 3038 class filteredset(abstractsmartset):
3039 3039 """Duck type for baseset class which iterates lazily over the revisions in
3040 3040 the subset and contains a function which tests for membership in the
3041 3041 revset
3042 3042 """
3043 3043 def __init__(self, subset, condition=lambda x: True):
3044 3044 """
3045 3045 condition: a function that decide whether a revision in the subset
3046 3046 belongs to the revset or not.
3047 3047 """
3048 3048 self._subset = subset
3049 3049 self._condition = condition
3050 3050 self._cache = {}
3051 3051
3052 3052 def __contains__(self, x):
3053 3053 c = self._cache
3054 3054 if x not in c:
3055 3055 v = c[x] = x in self._subset and self._condition(x)
3056 3056 return v
3057 3057 return c[x]
3058 3058
3059 3059 def __iter__(self):
3060 3060 return self._iterfilter(self._subset)
3061 3061
3062 3062 def _iterfilter(self, it):
3063 3063 cond = self._condition
3064 3064 for x in it:
3065 3065 if cond(x):
3066 3066 yield x
3067 3067
3068 3068 @property
3069 3069 def fastasc(self):
3070 3070 it = self._subset.fastasc
3071 3071 if it is None:
3072 3072 return None
3073 3073 return lambda: self._iterfilter(it())
3074 3074
3075 3075 @property
3076 3076 def fastdesc(self):
3077 3077 it = self._subset.fastdesc
3078 3078 if it is None:
3079 3079 return None
3080 3080 return lambda: self._iterfilter(it())
3081 3081
3082 3082 def __nonzero__(self):
3083 3083 for r in self:
3084 3084 return True
3085 3085 return False
3086 3086
3087 3087 def __len__(self):
3088 3088 # Basic implementation to be changed in future patches.
3089 3089 l = baseset([r for r in self])
3090 3090 return len(l)
3091 3091
3092 3092 def sort(self, reverse=False):
3093 3093 self._subset.sort(reverse=reverse)
3094 3094
3095 3095 def reverse(self):
3096 3096 self._subset.reverse()
3097 3097
3098 3098 def isascending(self):
3099 3099 return self._subset.isascending()
3100 3100
3101 3101 def isdescending(self):
3102 3102 return self._subset.isdescending()
3103 3103
3104 3104 def first(self):
3105 3105 for x in self:
3106 3106 return x
3107 3107 return None
3108 3108
3109 3109 def last(self):
3110 3110 it = None
3111 3111 if self.isascending():
3112 3112 it = self.fastdesc
3113 3113 elif self.isdescending():
3114 3114 it = self.fastasc
3115 3115 if it is not None:
3116 3116 for x in it():
3117 3117 return x
3118 3118 return None #empty case
3119 3119 else:
3120 3120 x = None
3121 3121 for x in self:
3122 3122 pass
3123 3123 return x
3124 3124
3125 3125 def __repr__(self):
3126 3126 return '<%s %r>' % (type(self).__name__, self._subset)
3127 3127
3128 3128 def _iterordered(ascending, iter1, iter2):
3129 3129 """produce an ordered iteration from two iterators with the same order
3130 3130
3131 3131 The ascending is used to indicated the iteration direction.
3132 3132 """
3133 3133 choice = max
3134 3134 if ascending:
3135 3135 choice = min
3136 3136
3137 3137 val1 = None
3138 3138 val2 = None
3139 3139 try:
3140 3140 # Consume both iterators in an ordered way until one is empty
3141 3141 while True:
3142 3142 if val1 is None:
3143 3143 val1 = iter1.next()
3144 3144 if val2 is None:
3145 3145 val2 = iter2.next()
3146 3146 next = choice(val1, val2)
3147 3147 yield next
3148 3148 if val1 == next:
3149 3149 val1 = None
3150 3150 if val2 == next:
3151 3151 val2 = None
3152 3152 except StopIteration:
3153 3153 # Flush any remaining values and consume the other one
3154 3154 it = iter2
3155 3155 if val1 is not None:
3156 3156 yield val1
3157 3157 it = iter1
3158 3158 elif val2 is not None:
3159 3159 # might have been equality and both are empty
3160 3160 yield val2
3161 3161 for val in it:
3162 3162 yield val
3163 3163
3164 3164 class addset(abstractsmartset):
3165 3165 """Represent the addition of two sets
3166 3166
3167 3167 Wrapper structure for lazily adding two structures without losing much
3168 3168 performance on the __contains__ method
3169 3169
3170 3170 If the ascending attribute is set, that means the two structures are
3171 3171 ordered in either an ascending or descending way. Therefore, we can add
3172 3172 them maintaining the order by iterating over both at the same time
3173 3173
3174 3174 >>> xs = baseset([0, 3, 2])
3175 3175 >>> ys = baseset([5, 2, 4])
3176 3176
3177 3177 >>> rs = addset(xs, ys)
3178 3178 >>> bool(rs), 0 in rs, 1 in rs, 5 in rs, rs.first(), rs.last()
3179 3179 (True, True, False, True, 0, 4)
3180 3180 >>> rs = addset(xs, baseset([]))
3181 3181 >>> bool(rs), 0 in rs, 1 in rs, rs.first(), rs.last()
3182 3182 (True, True, False, 0, 2)
3183 3183 >>> rs = addset(baseset([]), baseset([]))
3184 3184 >>> bool(rs), 0 in rs, rs.first(), rs.last()
3185 3185 (False, False, None, None)
3186 3186
3187 3187 iterate unsorted:
3188 3188 >>> rs = addset(xs, ys)
3189 3189 >>> [x for x in rs] # without _genlist
3190 3190 [0, 3, 2, 5, 4]
3191 3191 >>> assert not rs._genlist
3192 3192 >>> len(rs)
3193 3193 5
3194 3194 >>> [x for x in rs] # with _genlist
3195 3195 [0, 3, 2, 5, 4]
3196 3196 >>> assert rs._genlist
3197 3197
3198 3198 iterate ascending:
3199 3199 >>> rs = addset(xs, ys, ascending=True)
3200 3200 >>> [x for x in rs], [x for x in rs.fastasc()] # without _asclist
3201 3201 ([0, 2, 3, 4, 5], [0, 2, 3, 4, 5])
3202 3202 >>> assert not rs._asclist
3203 3203 >>> len(rs)
3204 3204 5
3205 3205 >>> [x for x in rs], [x for x in rs.fastasc()]
3206 3206 ([0, 2, 3, 4, 5], [0, 2, 3, 4, 5])
3207 3207 >>> assert rs._asclist
3208 3208
3209 3209 iterate descending:
3210 3210 >>> rs = addset(xs, ys, ascending=False)
3211 3211 >>> [x for x in rs], [x for x in rs.fastdesc()] # without _asclist
3212 3212 ([5, 4, 3, 2, 0], [5, 4, 3, 2, 0])
3213 3213 >>> assert not rs._asclist
3214 3214 >>> len(rs)
3215 3215 5
3216 3216 >>> [x for x in rs], [x for x in rs.fastdesc()]
3217 3217 ([5, 4, 3, 2, 0], [5, 4, 3, 2, 0])
3218 3218 >>> assert rs._asclist
3219 3219
3220 3220 iterate ascending without fastasc:
3221 3221 >>> rs = addset(xs, generatorset(ys), ascending=True)
3222 3222 >>> assert rs.fastasc is None
3223 3223 >>> [x for x in rs]
3224 3224 [0, 2, 3, 4, 5]
3225 3225
3226 3226 iterate descending without fastdesc:
3227 3227 >>> rs = addset(generatorset(xs), ys, ascending=False)
3228 3228 >>> assert rs.fastdesc is None
3229 3229 >>> [x for x in rs]
3230 3230 [5, 4, 3, 2, 0]
3231 3231 """
3232 3232 def __init__(self, revs1, revs2, ascending=None):
3233 3233 self._r1 = revs1
3234 3234 self._r2 = revs2
3235 3235 self._iter = None
3236 3236 self._ascending = ascending
3237 3237 self._genlist = None
3238 3238 self._asclist = None
3239 3239
3240 3240 def __len__(self):
3241 3241 return len(self._list)
3242 3242
3243 3243 def __nonzero__(self):
3244 3244 return bool(self._r1) or bool(self._r2)
3245 3245
3246 3246 @util.propertycache
3247 3247 def _list(self):
3248 3248 if not self._genlist:
3249 3249 self._genlist = baseset(iter(self))
3250 3250 return self._genlist
3251 3251
3252 3252 def __iter__(self):
3253 3253 """Iterate over both collections without repeating elements
3254 3254
3255 3255 If the ascending attribute is not set, iterate over the first one and
3256 3256 then over the second one checking for membership on the first one so we
3257 3257 dont yield any duplicates.
3258 3258
3259 3259 If the ascending attribute is set, iterate over both collections at the
3260 3260 same time, yielding only one value at a time in the given order.
3261 3261 """
3262 3262 if self._ascending is None:
3263 3263 if self._genlist:
3264 3264 return iter(self._genlist)
3265 3265 def arbitraryordergen():
3266 3266 for r in self._r1:
3267 3267 yield r
3268 3268 inr1 = self._r1.__contains__
3269 3269 for r in self._r2:
3270 3270 if not inr1(r):
3271 3271 yield r
3272 3272 return arbitraryordergen()
3273 3273 # try to use our own fast iterator if it exists
3274 3274 self._trysetasclist()
3275 3275 if self._ascending:
3276 3276 attr = 'fastasc'
3277 3277 else:
3278 3278 attr = 'fastdesc'
3279 3279 it = getattr(self, attr)
3280 3280 if it is not None:
3281 3281 return it()
3282 3282 # maybe half of the component supports fast
3283 3283 # get iterator for _r1
3284 3284 iter1 = getattr(self._r1, attr)
3285 3285 if iter1 is None:
3286 3286 # let's avoid side effect (not sure it matters)
3287 3287 iter1 = iter(sorted(self._r1, reverse=not self._ascending))
3288 3288 else:
3289 3289 iter1 = iter1()
3290 3290 # get iterator for _r2
3291 3291 iter2 = getattr(self._r2, attr)
3292 3292 if iter2 is None:
3293 3293 # let's avoid side effect (not sure it matters)
3294 3294 iter2 = iter(sorted(self._r2, reverse=not self._ascending))
3295 3295 else:
3296 3296 iter2 = iter2()
3297 3297 return _iterordered(self._ascending, iter1, iter2)
3298 3298
3299 3299 def _trysetasclist(self):
3300 3300 """populate the _asclist attribute if possible and necessary"""
3301 3301 if self._genlist is not None and self._asclist is None:
3302 3302 self._asclist = sorted(self._genlist)
3303 3303
3304 3304 @property
3305 3305 def fastasc(self):
3306 3306 self._trysetasclist()
3307 3307 if self._asclist is not None:
3308 3308 return self._asclist.__iter__
3309 3309 iter1 = self._r1.fastasc
3310 3310 iter2 = self._r2.fastasc
3311 3311 if None in (iter1, iter2):
3312 3312 return None
3313 3313 return lambda: _iterordered(True, iter1(), iter2())
3314 3314
3315 3315 @property
3316 3316 def fastdesc(self):
3317 3317 self._trysetasclist()
3318 3318 if self._asclist is not None:
3319 3319 return self._asclist.__reversed__
3320 3320 iter1 = self._r1.fastdesc
3321 3321 iter2 = self._r2.fastdesc
3322 3322 if None in (iter1, iter2):
3323 3323 return None
3324 3324 return lambda: _iterordered(False, iter1(), iter2())
3325 3325
3326 3326 def __contains__(self, x):
3327 3327 return x in self._r1 or x in self._r2
3328 3328
3329 3329 def sort(self, reverse=False):
3330 3330 """Sort the added set
3331 3331
3332 3332 For this we use the cached list with all the generated values and if we
3333 3333 know they are ascending or descending we can sort them in a smart way.
3334 3334 """
3335 3335 self._ascending = not reverse
3336 3336
3337 3337 def isascending(self):
3338 3338 return self._ascending is not None and self._ascending
3339 3339
3340 3340 def isdescending(self):
3341 3341 return self._ascending is not None and not self._ascending
3342 3342
3343 3343 def reverse(self):
3344 3344 if self._ascending is None:
3345 3345 self._list.reverse()
3346 3346 else:
3347 3347 self._ascending = not self._ascending
3348 3348
3349 3349 def first(self):
3350 3350 for x in self:
3351 3351 return x
3352 3352 return None
3353 3353
3354 3354 def last(self):
3355 3355 self.reverse()
3356 3356 val = self.first()
3357 3357 self.reverse()
3358 3358 return val
3359 3359
3360 3360 def __repr__(self):
3361 3361 d = {None: '', False: '-', True: '+'}[self._ascending]
3362 3362 return '<%s%s %r, %r>' % (type(self).__name__, d, self._r1, self._r2)
3363 3363
3364 3364 class generatorset(abstractsmartset):
3365 3365 """Wrap a generator for lazy iteration
3366 3366
3367 3367 Wrapper structure for generators that provides lazy membership and can
3368 3368 be iterated more than once.
3369 3369 When asked for membership it generates values until either it finds the
3370 3370 requested one or has gone through all the elements in the generator
3371 3371 """
3372 3372 def __init__(self, gen, iterasc=None):
3373 3373 """
3374 3374 gen: a generator producing the values for the generatorset.
3375 3375 """
3376 3376 self._gen = gen
3377 3377 self._asclist = None
3378 3378 self._cache = {}
3379 3379 self._genlist = []
3380 3380 self._finished = False
3381 3381 self._ascending = True
3382 3382 if iterasc is not None:
3383 3383 if iterasc:
3384 3384 self.fastasc = self._iterator
3385 3385 self.__contains__ = self._asccontains
3386 3386 else:
3387 3387 self.fastdesc = self._iterator
3388 3388 self.__contains__ = self._desccontains
3389 3389
3390 3390 def __nonzero__(self):
3391 3391 # Do not use 'for r in self' because it will enforce the iteration
3392 3392 # order (default ascending), possibly unrolling a whole descending
3393 3393 # iterator.
3394 3394 if self._genlist:
3395 3395 return True
3396 3396 for r in self._consumegen():
3397 3397 return True
3398 3398 return False
3399 3399
3400 3400 def __contains__(self, x):
3401 3401 if x in self._cache:
3402 3402 return self._cache[x]
3403 3403
3404 3404 # Use new values only, as existing values would be cached.
3405 3405 for l in self._consumegen():
3406 3406 if l == x:
3407 3407 return True
3408 3408
3409 3409 self._cache[x] = False
3410 3410 return False
3411 3411
3412 3412 def _asccontains(self, x):
3413 3413 """version of contains optimised for ascending generator"""
3414 3414 if x in self._cache:
3415 3415 return self._cache[x]
3416 3416
3417 3417 # Use new values only, as existing values would be cached.
3418 3418 for l in self._consumegen():
3419 3419 if l == x:
3420 3420 return True
3421 3421 if l > x:
3422 3422 break
3423 3423
3424 3424 self._cache[x] = False
3425 3425 return False
3426 3426
3427 3427 def _desccontains(self, x):
3428 3428 """version of contains optimised for descending generator"""
3429 3429 if x in self._cache:
3430 3430 return self._cache[x]
3431 3431
3432 3432 # Use new values only, as existing values would be cached.
3433 3433 for l in self._consumegen():
3434 3434 if l == x:
3435 3435 return True
3436 3436 if l < x:
3437 3437 break
3438 3438
3439 3439 self._cache[x] = False
3440 3440 return False
3441 3441
3442 3442 def __iter__(self):
3443 3443 if self._ascending:
3444 3444 it = self.fastasc
3445 3445 else:
3446 3446 it = self.fastdesc
3447 3447 if it is not None:
3448 3448 return it()
3449 3449 # we need to consume the iterator
3450 3450 for x in self._consumegen():
3451 3451 pass
3452 3452 # recall the same code
3453 3453 return iter(self)
3454 3454
3455 3455 def _iterator(self):
3456 3456 if self._finished:
3457 3457 return iter(self._genlist)
3458 3458
3459 3459 # We have to use this complex iteration strategy to allow multiple
3460 3460 # iterations at the same time. We need to be able to catch revision
3461 3461 # removed from _consumegen and added to genlist in another instance.
3462 3462 #
3463 3463 # Getting rid of it would provide an about 15% speed up on this
3464 3464 # iteration.
3465 3465 genlist = self._genlist
3466 3466 nextrev = self._consumegen().next
3467 3467 _len = len # cache global lookup
3468 3468 def gen():
3469 3469 i = 0
3470 3470 while True:
3471 3471 if i < _len(genlist):
3472 3472 yield genlist[i]
3473 3473 else:
3474 3474 yield nextrev()
3475 3475 i += 1
3476 3476 return gen()
3477 3477
3478 3478 def _consumegen(self):
3479 3479 cache = self._cache
3480 3480 genlist = self._genlist.append
3481 3481 for item in self._gen:
3482 3482 cache[item] = True
3483 3483 genlist(item)
3484 3484 yield item
3485 3485 if not self._finished:
3486 3486 self._finished = True
3487 3487 asc = self._genlist[:]
3488 3488 asc.sort()
3489 3489 self._asclist = asc
3490 3490 self.fastasc = asc.__iter__
3491 3491 self.fastdesc = asc.__reversed__
3492 3492
3493 3493 def __len__(self):
3494 3494 for x in self._consumegen():
3495 3495 pass
3496 3496 return len(self._genlist)
3497 3497
3498 3498 def sort(self, reverse=False):
3499 3499 self._ascending = not reverse
3500 3500
3501 3501 def reverse(self):
3502 3502 self._ascending = not self._ascending
3503 3503
3504 3504 def isascending(self):
3505 3505 return self._ascending
3506 3506
3507 3507 def isdescending(self):
3508 3508 return not self._ascending
3509 3509
3510 3510 def first(self):
3511 3511 if self._ascending:
3512 3512 it = self.fastasc
3513 3513 else:
3514 3514 it = self.fastdesc
3515 3515 if it is None:
3516 3516 # we need to consume all and try again
3517 3517 for x in self._consumegen():
3518 3518 pass
3519 3519 return self.first()
3520 3520 return next(it(), None)
3521 3521
3522 3522 def last(self):
3523 3523 if self._ascending:
3524 3524 it = self.fastdesc
3525 3525 else:
3526 3526 it = self.fastasc
3527 3527 if it is None:
3528 3528 # we need to consume all and try again
3529 3529 for x in self._consumegen():
3530 3530 pass
3531 3531 return self.first()
3532 3532 return next(it(), None)
3533 3533
3534 3534 def __repr__(self):
3535 3535 d = {False: '-', True: '+'}[self._ascending]
3536 3536 return '<%s%s>' % (type(self).__name__, d)
3537 3537
3538 3538 class spanset(abstractsmartset):
3539 3539 """Duck type for baseset class which represents a range of revisions and
3540 3540 can work lazily and without having all the range in memory
3541 3541
3542 3542 Note that spanset(x, y) behave almost like xrange(x, y) except for two
3543 3543 notable points:
3544 3544 - when x < y it will be automatically descending,
3545 3545 - revision filtered with this repoview will be skipped.
3546 3546
3547 3547 """
3548 3548 def __init__(self, repo, start=0, end=None):
3549 3549 """
3550 3550 start: first revision included the set
3551 3551 (default to 0)
3552 3552 end: first revision excluded (last+1)
3553 3553 (default to len(repo)
3554 3554
3555 3555 Spanset will be descending if `end` < `start`.
3556 3556 """
3557 3557 if end is None:
3558 3558 end = len(repo)
3559 3559 self._ascending = start <= end
3560 3560 if not self._ascending:
3561 3561 start, end = end + 1, start +1
3562 3562 self._start = start
3563 3563 self._end = end
3564 3564 self._hiddenrevs = repo.changelog.filteredrevs
3565 3565
3566 3566 def sort(self, reverse=False):
3567 3567 self._ascending = not reverse
3568 3568
3569 3569 def reverse(self):
3570 3570 self._ascending = not self._ascending
3571 3571
3572 3572 def _iterfilter(self, iterrange):
3573 3573 s = self._hiddenrevs
3574 3574 for r in iterrange:
3575 3575 if r not in s:
3576 3576 yield r
3577 3577
3578 3578 def __iter__(self):
3579 3579 if self._ascending:
3580 3580 return self.fastasc()
3581 3581 else:
3582 3582 return self.fastdesc()
3583 3583
3584 3584 def fastasc(self):
3585 3585 iterrange = xrange(self._start, self._end)
3586 3586 if self._hiddenrevs:
3587 3587 return self._iterfilter(iterrange)
3588 3588 return iter(iterrange)
3589 3589
3590 3590 def fastdesc(self):
3591 3591 iterrange = xrange(self._end - 1, self._start - 1, -1)
3592 3592 if self._hiddenrevs:
3593 3593 return self._iterfilter(iterrange)
3594 3594 return iter(iterrange)
3595 3595
3596 3596 def __contains__(self, rev):
3597 3597 hidden = self._hiddenrevs
3598 3598 return ((self._start <= rev < self._end)
3599 3599 and not (hidden and rev in hidden))
3600 3600
3601 3601 def __nonzero__(self):
3602 3602 for r in self:
3603 3603 return True
3604 3604 return False
3605 3605
3606 3606 def __len__(self):
3607 3607 if not self._hiddenrevs:
3608 3608 return abs(self._end - self._start)
3609 3609 else:
3610 3610 count = 0
3611 3611 start = self._start
3612 3612 end = self._end
3613 3613 for rev in self._hiddenrevs:
3614 3614 if (end < rev <= start) or (start <= rev < end):
3615 3615 count += 1
3616 3616 return abs(self._end - self._start) - count
3617 3617
3618 3618 def isascending(self):
3619 3619 return self._ascending
3620 3620
3621 3621 def isdescending(self):
3622 3622 return not self._ascending
3623 3623
3624 3624 def first(self):
3625 3625 if self._ascending:
3626 3626 it = self.fastasc
3627 3627 else:
3628 3628 it = self.fastdesc
3629 3629 for x in it():
3630 3630 return x
3631 3631 return None
3632 3632
3633 3633 def last(self):
3634 3634 if self._ascending:
3635 3635 it = self.fastdesc
3636 3636 else:
3637 3637 it = self.fastasc
3638 3638 for x in it():
3639 3639 return x
3640 3640 return None
3641 3641
3642 3642 def __repr__(self):
3643 3643 d = {False: '-', True: '+'}[self._ascending]
3644 3644 return '<%s%s %d:%d>' % (type(self).__name__, d,
3645 3645 self._start, self._end - 1)
3646 3646
3647 3647 class fullreposet(spanset):
3648 3648 """a set containing all revisions in the repo
3649 3649
3650 3650 This class exists to host special optimization and magic to handle virtual
3651 3651 revisions such as "null".
3652 3652 """
3653 3653
3654 3654 def __init__(self, repo):
3655 3655 super(fullreposet, self).__init__(repo)
3656 3656
3657 3657 def __and__(self, other):
3658 3658 """As self contains the whole repo, all of the other set should also be
3659 3659 in self. Therefore `self & other = other`.
3660 3660
3661 3661 This boldly assumes the other contains valid revs only.
3662 3662 """
3663 3663 # other not a smartset, make is so
3664 3664 if not util.safehasattr(other, 'isascending'):
3665 3665 # filter out hidden revision
3666 3666 # (this boldly assumes all smartset are pure)
3667 3667 #
3668 3668 # `other` was used with "&", let's assume this is a set like
3669 3669 # object.
3670 3670 other = baseset(other - self._hiddenrevs)
3671 3671
3672 3672 # XXX As fullreposet is also used as bootstrap, this is wrong.
3673 3673 #
3674 3674 # With a giveme312() revset returning [3,1,2], this makes
3675 3675 # 'hg log -r "giveme312()"' -> 1, 2, 3 (wrong)
3676 3676 # We cannot just drop it because other usage still need to sort it:
3677 3677 # 'hg log -r "all() and giveme312()"' -> 1, 2, 3 (right)
3678 3678 #
3679 3679 # There is also some faulty revset implementations that rely on it
3680 3680 # (eg: children as of its state in e8075329c5fb)
3681 3681 #
3682 3682 # When we fix the two points above we can move this into the if clause
3683 3683 other.sort(reverse=self.isdescending())
3684 3684 return other
3685 3685
3686 3686 def prettyformatset(revs):
3687 3687 lines = []
3688 3688 rs = repr(revs)
3689 3689 p = 0
3690 3690 while p < len(rs):
3691 3691 q = rs.find('<', p + 1)
3692 3692 if q < 0:
3693 3693 q = len(rs)
3694 3694 l = rs.count('<', 0, p) - rs.count('>', 0, p)
3695 3695 assert l >= 0
3696 3696 lines.append((l, rs[p:q].rstrip()))
3697 3697 p = q
3698 3698 return '\n'.join(' ' * l + s for l, s in lines)
3699 3699
3700 3700 # tell hggettext to extract docstrings from these functions:
3701 3701 i18nfunctions = symbols.values()
General Comments 0
You need to be logged in to leave comments. Login now