##// END OF EJS Templates
revset: split post-parsing stage from match()...
Yuya Nishihara -
r25926:996102be default
parent child Browse files
Show More
@@ -1,3676 +1,3679 b''
1 1 # revset.py - revision set queries for mercurial
2 2 #
3 3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 import re
9 9 import parser, util, error, hbisect, phases
10 10 import node
11 11 import heapq
12 12 import match as matchmod
13 13 from i18n import _
14 14 import encoding
15 15 import obsolete as obsmod
16 16 import pathutil
17 17 import repoview
18 18
19 19 def _revancestors(repo, revs, followfirst):
20 20 """Like revlog.ancestors(), but supports followfirst."""
21 21 if followfirst:
22 22 cut = 1
23 23 else:
24 24 cut = None
25 25 cl = repo.changelog
26 26
27 27 def iterate():
28 28 revs.sort(reverse=True)
29 29 irevs = iter(revs)
30 30 h = []
31 31
32 32 inputrev = next(irevs, None)
33 33 if inputrev is not None:
34 34 heapq.heappush(h, -inputrev)
35 35
36 36 seen = set()
37 37 while h:
38 38 current = -heapq.heappop(h)
39 39 if current == inputrev:
40 40 inputrev = next(irevs, None)
41 41 if inputrev is not None:
42 42 heapq.heappush(h, -inputrev)
43 43 if current not in seen:
44 44 seen.add(current)
45 45 yield current
46 46 for parent in cl.parentrevs(current)[:cut]:
47 47 if parent != node.nullrev:
48 48 heapq.heappush(h, -parent)
49 49
50 50 return generatorset(iterate(), iterasc=False)
51 51
52 52 def _revdescendants(repo, revs, followfirst):
53 53 """Like revlog.descendants() but supports followfirst."""
54 54 if followfirst:
55 55 cut = 1
56 56 else:
57 57 cut = None
58 58
59 59 def iterate():
60 60 cl = repo.changelog
61 61 # XXX this should be 'parentset.min()' assuming 'parentset' is a
62 62 # smartset (and if it is not, it should.)
63 63 first = min(revs)
64 64 nullrev = node.nullrev
65 65 if first == nullrev:
66 66 # Are there nodes with a null first parent and a non-null
67 67 # second one? Maybe. Do we care? Probably not.
68 68 for i in cl:
69 69 yield i
70 70 else:
71 71 seen = set(revs)
72 72 for i in cl.revs(first + 1):
73 73 for x in cl.parentrevs(i)[:cut]:
74 74 if x != nullrev and x in seen:
75 75 seen.add(i)
76 76 yield i
77 77 break
78 78
79 79 return generatorset(iterate(), iterasc=True)
80 80
81 81 def _revsbetween(repo, roots, heads):
82 82 """Return all paths between roots and heads, inclusive of both endpoint
83 83 sets."""
84 84 if not roots:
85 85 return baseset()
86 86 parentrevs = repo.changelog.parentrevs
87 87 visit = list(heads)
88 88 reachable = set()
89 89 seen = {}
90 90 # XXX this should be 'parentset.min()' assuming 'parentset' is a smartset
91 91 # (and if it is not, it should.)
92 92 minroot = min(roots)
93 93 roots = set(roots)
94 94 # prefetch all the things! (because python is slow)
95 95 reached = reachable.add
96 96 dovisit = visit.append
97 97 nextvisit = visit.pop
98 98 # open-code the post-order traversal due to the tiny size of
99 99 # sys.getrecursionlimit()
100 100 while visit:
101 101 rev = nextvisit()
102 102 if rev in roots:
103 103 reached(rev)
104 104 parents = parentrevs(rev)
105 105 seen[rev] = parents
106 106 for parent in parents:
107 107 if parent >= minroot and parent not in seen:
108 108 dovisit(parent)
109 109 if not reachable:
110 110 return baseset()
111 111 for rev in sorted(seen):
112 112 for parent in seen[rev]:
113 113 if parent in reachable:
114 114 reached(rev)
115 115 return baseset(sorted(reachable))
116 116
117 117 elements = {
118 118 # token-type: binding-strength, primary, prefix, infix, suffix
119 119 "(": (21, None, ("group", 1, ")"), ("func", 1, ")"), None),
120 120 "##": (20, None, None, ("_concat", 20), None),
121 121 "~": (18, None, None, ("ancestor", 18), None),
122 122 "^": (18, None, None, ("parent", 18), ("parentpost", 18)),
123 123 "-": (5, None, ("negate", 19), ("minus", 5), None),
124 124 "::": (17, None, ("dagrangepre", 17), ("dagrange", 17),
125 125 ("dagrangepost", 17)),
126 126 "..": (17, None, ("dagrangepre", 17), ("dagrange", 17),
127 127 ("dagrangepost", 17)),
128 128 ":": (15, "rangeall", ("rangepre", 15), ("range", 15), ("rangepost", 15)),
129 129 "not": (10, None, ("not", 10), None, None),
130 130 "!": (10, None, ("not", 10), None, None),
131 131 "and": (5, None, None, ("and", 5), None),
132 132 "&": (5, None, None, ("and", 5), None),
133 133 "%": (5, None, None, ("only", 5), ("onlypost", 5)),
134 134 "or": (4, None, None, ("or", 4), None),
135 135 "|": (4, None, None, ("or", 4), None),
136 136 "+": (4, None, None, ("or", 4), None),
137 137 "=": (3, None, None, ("keyvalue", 3), None),
138 138 ",": (2, None, None, ("list", 2), None),
139 139 ")": (0, None, None, None, None),
140 140 "symbol": (0, "symbol", None, None, None),
141 141 "string": (0, "string", None, None, None),
142 142 "end": (0, None, None, None, None),
143 143 }
144 144
145 145 keywords = set(['and', 'or', 'not'])
146 146
147 147 # default set of valid characters for the initial letter of symbols
148 148 _syminitletters = set(c for c in [chr(i) for i in xrange(256)]
149 149 if c.isalnum() or c in '._@' or ord(c) > 127)
150 150
151 151 # default set of valid characters for non-initial letters of symbols
152 152 _symletters = set(c for c in [chr(i) for i in xrange(256)]
153 153 if c.isalnum() or c in '-._/@' or ord(c) > 127)
154 154
155 155 def tokenize(program, lookup=None, syminitletters=None, symletters=None):
156 156 '''
157 157 Parse a revset statement into a stream of tokens
158 158
159 159 ``syminitletters`` is the set of valid characters for the initial
160 160 letter of symbols.
161 161
162 162 By default, character ``c`` is recognized as valid for initial
163 163 letter of symbols, if ``c.isalnum() or c in '._@' or ord(c) > 127``.
164 164
165 165 ``symletters`` is the set of valid characters for non-initial
166 166 letters of symbols.
167 167
168 168 By default, character ``c`` is recognized as valid for non-initial
169 169 letters of symbols, if ``c.isalnum() or c in '-._/@' or ord(c) > 127``.
170 170
171 171 Check that @ is a valid unquoted token character (issue3686):
172 172 >>> list(tokenize("@::"))
173 173 [('symbol', '@', 0), ('::', None, 1), ('end', None, 3)]
174 174
175 175 '''
176 176 if syminitletters is None:
177 177 syminitletters = _syminitletters
178 178 if symletters is None:
179 179 symletters = _symletters
180 180
181 181 if program and lookup:
182 182 # attempt to parse old-style ranges first to deal with
183 183 # things like old-tag which contain query metacharacters
184 184 parts = program.split(':', 1)
185 185 if all(lookup(sym) for sym in parts if sym):
186 186 if parts[0]:
187 187 yield ('symbol', parts[0], 0)
188 188 if len(parts) > 1:
189 189 s = len(parts[0])
190 190 yield (':', None, s)
191 191 if parts[1]:
192 192 yield ('symbol', parts[1], s + 1)
193 193 yield ('end', None, len(program))
194 194 return
195 195
196 196 pos, l = 0, len(program)
197 197 while pos < l:
198 198 c = program[pos]
199 199 if c.isspace(): # skip inter-token whitespace
200 200 pass
201 201 elif c == ':' and program[pos:pos + 2] == '::': # look ahead carefully
202 202 yield ('::', None, pos)
203 203 pos += 1 # skip ahead
204 204 elif c == '.' and program[pos:pos + 2] == '..': # look ahead carefully
205 205 yield ('..', None, pos)
206 206 pos += 1 # skip ahead
207 207 elif c == '#' and program[pos:pos + 2] == '##': # look ahead carefully
208 208 yield ('##', None, pos)
209 209 pos += 1 # skip ahead
210 210 elif c in "():=,-|&+!~^%": # handle simple operators
211 211 yield (c, None, pos)
212 212 elif (c in '"\'' or c == 'r' and
213 213 program[pos:pos + 2] in ("r'", 'r"')): # handle quoted strings
214 214 if c == 'r':
215 215 pos += 1
216 216 c = program[pos]
217 217 decode = lambda x: x
218 218 else:
219 219 decode = lambda x: x.decode('string-escape')
220 220 pos += 1
221 221 s = pos
222 222 while pos < l: # find closing quote
223 223 d = program[pos]
224 224 if d == '\\': # skip over escaped characters
225 225 pos += 2
226 226 continue
227 227 if d == c:
228 228 yield ('string', decode(program[s:pos]), s)
229 229 break
230 230 pos += 1
231 231 else:
232 232 raise error.ParseError(_("unterminated string"), s)
233 233 # gather up a symbol/keyword
234 234 elif c in syminitletters:
235 235 s = pos
236 236 pos += 1
237 237 while pos < l: # find end of symbol
238 238 d = program[pos]
239 239 if d not in symletters:
240 240 break
241 241 if d == '.' and program[pos - 1] == '.': # special case for ..
242 242 pos -= 1
243 243 break
244 244 pos += 1
245 245 sym = program[s:pos]
246 246 if sym in keywords: # operator keywords
247 247 yield (sym, None, s)
248 248 elif '-' in sym:
249 249 # some jerk gave us foo-bar-baz, try to check if it's a symbol
250 250 if lookup and lookup(sym):
251 251 # looks like a real symbol
252 252 yield ('symbol', sym, s)
253 253 else:
254 254 # looks like an expression
255 255 parts = sym.split('-')
256 256 for p in parts[:-1]:
257 257 if p: # possible consecutive -
258 258 yield ('symbol', p, s)
259 259 s += len(p)
260 260 yield ('-', None, pos)
261 261 s += 1
262 262 if parts[-1]: # possible trailing -
263 263 yield ('symbol', parts[-1], s)
264 264 else:
265 265 yield ('symbol', sym, s)
266 266 pos -= 1
267 267 else:
268 268 raise error.ParseError(_("syntax error in revset '%s'") %
269 269 program, pos)
270 270 pos += 1
271 271 yield ('end', None, pos)
272 272
273 273 def parseerrordetail(inst):
274 274 """Compose error message from specified ParseError object
275 275 """
276 276 if len(inst.args) > 1:
277 277 return _('at %s: %s') % (inst.args[1], inst.args[0])
278 278 else:
279 279 return inst.args[0]
280 280
281 281 # helpers
282 282
283 283 def getstring(x, err):
284 284 if x and (x[0] == 'string' or x[0] == 'symbol'):
285 285 return x[1]
286 286 raise error.ParseError(err)
287 287
288 288 def getlist(x):
289 289 if not x:
290 290 return []
291 291 if x[0] == 'list':
292 292 return getlist(x[1]) + [x[2]]
293 293 return [x]
294 294
295 295 def getargs(x, min, max, err):
296 296 l = getlist(x)
297 297 if len(l) < min or (max >= 0 and len(l) > max):
298 298 raise error.ParseError(err)
299 299 return l
300 300
301 301 def getargsdict(x, funcname, keys):
302 302 return parser.buildargsdict(getlist(x), funcname, keys.split(),
303 303 keyvaluenode='keyvalue', keynode='symbol')
304 304
305 305 def isvalidsymbol(tree):
306 306 """Examine whether specified ``tree`` is valid ``symbol`` or not
307 307 """
308 308 return tree[0] == 'symbol' and len(tree) > 1
309 309
310 310 def getsymbol(tree):
311 311 """Get symbol name from valid ``symbol`` in ``tree``
312 312
313 313 This assumes that ``tree`` is already examined by ``isvalidsymbol``.
314 314 """
315 315 return tree[1]
316 316
317 317 def isvalidfunc(tree):
318 318 """Examine whether specified ``tree`` is valid ``func`` or not
319 319 """
320 320 return tree[0] == 'func' and len(tree) > 1 and isvalidsymbol(tree[1])
321 321
322 322 def getfuncname(tree):
323 323 """Get function name from valid ``func`` in ``tree``
324 324
325 325 This assumes that ``tree`` is already examined by ``isvalidfunc``.
326 326 """
327 327 return getsymbol(tree[1])
328 328
329 329 def getfuncargs(tree):
330 330 """Get list of function arguments from valid ``func`` in ``tree``
331 331
332 332 This assumes that ``tree`` is already examined by ``isvalidfunc``.
333 333 """
334 334 if len(tree) > 2:
335 335 return getlist(tree[2])
336 336 else:
337 337 return []
338 338
339 339 def getset(repo, subset, x):
340 340 if not x:
341 341 raise error.ParseError(_("missing argument"))
342 342 s = methods[x[0]](repo, subset, *x[1:])
343 343 if util.safehasattr(s, 'isascending'):
344 344 return s
345 345 if (repo.ui.configbool('devel', 'all-warnings')
346 346 or repo.ui.configbool('devel', 'old-revset')):
347 347 # else case should not happen, because all non-func are internal,
348 348 # ignoring for now.
349 349 if x[0] == 'func' and x[1][0] == 'symbol' and x[1][1] in symbols:
350 350 repo.ui.develwarn('revset "%s" use list instead of smartset, '
351 351 '(upgrade your code)' % x[1][1])
352 352 return baseset(s)
353 353
354 354 def _getrevsource(repo, r):
355 355 extra = repo[r].extra()
356 356 for label in ('source', 'transplant_source', 'rebase_source'):
357 357 if label in extra:
358 358 try:
359 359 return repo[extra[label]].rev()
360 360 except error.RepoLookupError:
361 361 pass
362 362 return None
363 363
364 364 # operator methods
365 365
366 366 def stringset(repo, subset, x):
367 367 x = repo[x].rev()
368 368 if (x in subset
369 369 or x == node.nullrev and isinstance(subset, fullreposet)):
370 370 return baseset([x])
371 371 return baseset()
372 372
373 373 def rangeset(repo, subset, x, y):
374 374 m = getset(repo, fullreposet(repo), x)
375 375 n = getset(repo, fullreposet(repo), y)
376 376
377 377 if not m or not n:
378 378 return baseset()
379 379 m, n = m.first(), n.last()
380 380
381 381 if m == n:
382 382 r = baseset([m])
383 383 elif n == node.wdirrev:
384 384 r = spanset(repo, m, len(repo)) + baseset([n])
385 385 elif m == node.wdirrev:
386 386 r = baseset([m]) + spanset(repo, len(repo) - 1, n - 1)
387 387 elif m < n:
388 388 r = spanset(repo, m, n + 1)
389 389 else:
390 390 r = spanset(repo, m, n - 1)
391 391 # XXX We should combine with subset first: 'subset & baseset(...)'. This is
392 392 # necessary to ensure we preserve the order in subset.
393 393 #
394 394 # This has performance implication, carrying the sorting over when possible
395 395 # would be more efficient.
396 396 return r & subset
397 397
398 398 def dagrange(repo, subset, x, y):
399 399 r = fullreposet(repo)
400 400 xs = _revsbetween(repo, getset(repo, r, x), getset(repo, r, y))
401 401 # XXX We should combine with subset first: 'subset & baseset(...)'. This is
402 402 # necessary to ensure we preserve the order in subset.
403 403 return xs & subset
404 404
405 405 def andset(repo, subset, x, y):
406 406 return getset(repo, getset(repo, subset, x), y)
407 407
408 408 def orset(repo, subset, *xs):
409 409 rs = [getset(repo, subset, x) for x in xs]
410 410 return _combinesets(rs)
411 411
412 412 def notset(repo, subset, x):
413 413 return subset - getset(repo, subset, x)
414 414
415 415 def listset(repo, subset, a, b):
416 416 raise error.ParseError(_("can't use a list in this context"))
417 417
418 418 def keyvaluepair(repo, subset, k, v):
419 419 raise error.ParseError(_("can't use a key-value pair in this context"))
420 420
421 421 def func(repo, subset, a, b):
422 422 if a[0] == 'symbol' and a[1] in symbols:
423 423 return symbols[a[1]](repo, subset, b)
424 424
425 425 keep = lambda fn: getattr(fn, '__doc__', None) is not None
426 426
427 427 syms = [s for (s, fn) in symbols.items() if keep(fn)]
428 428 raise error.UnknownIdentifier(a[1], syms)
429 429
430 430 # functions
431 431
432 432 def adds(repo, subset, x):
433 433 """``adds(pattern)``
434 434 Changesets that add a file matching pattern.
435 435
436 436 The pattern without explicit kind like ``glob:`` is expected to be
437 437 relative to the current directory and match against a file or a
438 438 directory.
439 439 """
440 440 # i18n: "adds" is a keyword
441 441 pat = getstring(x, _("adds requires a pattern"))
442 442 return checkstatus(repo, subset, pat, 1)
443 443
444 444 def ancestor(repo, subset, x):
445 445 """``ancestor(*changeset)``
446 446 A greatest common ancestor of the changesets.
447 447
448 448 Accepts 0 or more changesets.
449 449 Will return empty list when passed no args.
450 450 Greatest common ancestor of a single changeset is that changeset.
451 451 """
452 452 # i18n: "ancestor" is a keyword
453 453 l = getlist(x)
454 454 rl = fullreposet(repo)
455 455 anc = None
456 456
457 457 # (getset(repo, rl, i) for i in l) generates a list of lists
458 458 for revs in (getset(repo, rl, i) for i in l):
459 459 for r in revs:
460 460 if anc is None:
461 461 anc = repo[r]
462 462 else:
463 463 anc = anc.ancestor(repo[r])
464 464
465 465 if anc is not None and anc.rev() in subset:
466 466 return baseset([anc.rev()])
467 467 return baseset()
468 468
469 469 def _ancestors(repo, subset, x, followfirst=False):
470 470 heads = getset(repo, fullreposet(repo), x)
471 471 if not heads:
472 472 return baseset()
473 473 s = _revancestors(repo, heads, followfirst)
474 474 return subset & s
475 475
476 476 def ancestors(repo, subset, x):
477 477 """``ancestors(set)``
478 478 Changesets that are ancestors of a changeset in set.
479 479 """
480 480 return _ancestors(repo, subset, x)
481 481
482 482 def _firstancestors(repo, subset, x):
483 483 # ``_firstancestors(set)``
484 484 # Like ``ancestors(set)`` but follows only the first parents.
485 485 return _ancestors(repo, subset, x, followfirst=True)
486 486
487 487 def ancestorspec(repo, subset, x, n):
488 488 """``set~n``
489 489 Changesets that are the Nth ancestor (first parents only) of a changeset
490 490 in set.
491 491 """
492 492 try:
493 493 n = int(n[1])
494 494 except (TypeError, ValueError):
495 495 raise error.ParseError(_("~ expects a number"))
496 496 ps = set()
497 497 cl = repo.changelog
498 498 for r in getset(repo, fullreposet(repo), x):
499 499 for i in range(n):
500 500 r = cl.parentrevs(r)[0]
501 501 ps.add(r)
502 502 return subset & ps
503 503
504 504 def author(repo, subset, x):
505 505 """``author(string)``
506 506 Alias for ``user(string)``.
507 507 """
508 508 # i18n: "author" is a keyword
509 509 n = encoding.lower(getstring(x, _("author requires a string")))
510 510 kind, pattern, matcher = _substringmatcher(n)
511 511 return subset.filter(lambda x: matcher(encoding.lower(repo[x].user())))
512 512
513 513 def bisect(repo, subset, x):
514 514 """``bisect(string)``
515 515 Changesets marked in the specified bisect status:
516 516
517 517 - ``good``, ``bad``, ``skip``: csets explicitly marked as good/bad/skip
518 518 - ``goods``, ``bads`` : csets topologically good/bad
519 519 - ``range`` : csets taking part in the bisection
520 520 - ``pruned`` : csets that are goods, bads or skipped
521 521 - ``untested`` : csets whose fate is yet unknown
522 522 - ``ignored`` : csets ignored due to DAG topology
523 523 - ``current`` : the cset currently being bisected
524 524 """
525 525 # i18n: "bisect" is a keyword
526 526 status = getstring(x, _("bisect requires a string")).lower()
527 527 state = set(hbisect.get(repo, status))
528 528 return subset & state
529 529
530 530 # Backward-compatibility
531 531 # - no help entry so that we do not advertise it any more
532 532 def bisected(repo, subset, x):
533 533 return bisect(repo, subset, x)
534 534
535 535 def bookmark(repo, subset, x):
536 536 """``bookmark([name])``
537 537 The named bookmark or all bookmarks.
538 538
539 539 If `name` starts with `re:`, the remainder of the name is treated as
540 540 a regular expression. To match a bookmark that actually starts with `re:`,
541 541 use the prefix `literal:`.
542 542 """
543 543 # i18n: "bookmark" is a keyword
544 544 args = getargs(x, 0, 1, _('bookmark takes one or no arguments'))
545 545 if args:
546 546 bm = getstring(args[0],
547 547 # i18n: "bookmark" is a keyword
548 548 _('the argument to bookmark must be a string'))
549 549 kind, pattern, matcher = _stringmatcher(bm)
550 550 bms = set()
551 551 if kind == 'literal':
552 552 bmrev = repo._bookmarks.get(pattern, None)
553 553 if not bmrev:
554 554 raise error.RepoLookupError(_("bookmark '%s' does not exist")
555 555 % bm)
556 556 bms.add(repo[bmrev].rev())
557 557 else:
558 558 matchrevs = set()
559 559 for name, bmrev in repo._bookmarks.iteritems():
560 560 if matcher(name):
561 561 matchrevs.add(bmrev)
562 562 if not matchrevs:
563 563 raise error.RepoLookupError(_("no bookmarks exist"
564 564 " that match '%s'") % pattern)
565 565 for bmrev in matchrevs:
566 566 bms.add(repo[bmrev].rev())
567 567 else:
568 568 bms = set([repo[r].rev()
569 569 for r in repo._bookmarks.values()])
570 570 bms -= set([node.nullrev])
571 571 return subset & bms
572 572
573 573 def branch(repo, subset, x):
574 574 """``branch(string or set)``
575 575 All changesets belonging to the given branch or the branches of the given
576 576 changesets.
577 577
578 578 If `string` starts with `re:`, the remainder of the name is treated as
579 579 a regular expression. To match a branch that actually starts with `re:`,
580 580 use the prefix `literal:`.
581 581 """
582 582 getbi = repo.revbranchcache().branchinfo
583 583
584 584 try:
585 585 b = getstring(x, '')
586 586 except error.ParseError:
587 587 # not a string, but another revspec, e.g. tip()
588 588 pass
589 589 else:
590 590 kind, pattern, matcher = _stringmatcher(b)
591 591 if kind == 'literal':
592 592 # note: falls through to the revspec case if no branch with
593 593 # this name exists
594 594 if pattern in repo.branchmap():
595 595 return subset.filter(lambda r: matcher(getbi(r)[0]))
596 596 else:
597 597 return subset.filter(lambda r: matcher(getbi(r)[0]))
598 598
599 599 s = getset(repo, fullreposet(repo), x)
600 600 b = set()
601 601 for r in s:
602 602 b.add(getbi(r)[0])
603 603 c = s.__contains__
604 604 return subset.filter(lambda r: c(r) or getbi(r)[0] in b)
605 605
606 606 def bumped(repo, subset, x):
607 607 """``bumped()``
608 608 Mutable changesets marked as successors of public changesets.
609 609
610 610 Only non-public and non-obsolete changesets can be `bumped`.
611 611 """
612 612 # i18n: "bumped" is a keyword
613 613 getargs(x, 0, 0, _("bumped takes no arguments"))
614 614 bumped = obsmod.getrevs(repo, 'bumped')
615 615 return subset & bumped
616 616
617 617 def bundle(repo, subset, x):
618 618 """``bundle()``
619 619 Changesets in the bundle.
620 620
621 621 Bundle must be specified by the -R option."""
622 622
623 623 try:
624 624 bundlerevs = repo.changelog.bundlerevs
625 625 except AttributeError:
626 626 raise util.Abort(_("no bundle provided - specify with -R"))
627 627 return subset & bundlerevs
628 628
629 629 def checkstatus(repo, subset, pat, field):
630 630 hasset = matchmod.patkind(pat) == 'set'
631 631
632 632 mcache = [None]
633 633 def matches(x):
634 634 c = repo[x]
635 635 if not mcache[0] or hasset:
636 636 mcache[0] = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
637 637 m = mcache[0]
638 638 fname = None
639 639 if not m.anypats() and len(m.files()) == 1:
640 640 fname = m.files()[0]
641 641 if fname is not None:
642 642 if fname not in c.files():
643 643 return False
644 644 else:
645 645 for f in c.files():
646 646 if m(f):
647 647 break
648 648 else:
649 649 return False
650 650 files = repo.status(c.p1().node(), c.node())[field]
651 651 if fname is not None:
652 652 if fname in files:
653 653 return True
654 654 else:
655 655 for f in files:
656 656 if m(f):
657 657 return True
658 658
659 659 return subset.filter(matches)
660 660
661 661 def _children(repo, narrow, parentset):
662 662 if not parentset:
663 663 return baseset()
664 664 cs = set()
665 665 pr = repo.changelog.parentrevs
666 666 minrev = parentset.min()
667 667 for r in narrow:
668 668 if r <= minrev:
669 669 continue
670 670 for p in pr(r):
671 671 if p in parentset:
672 672 cs.add(r)
673 673 # XXX using a set to feed the baseset is wrong. Sets are not ordered.
674 674 # This does not break because of other fullreposet misbehavior.
675 675 return baseset(cs)
676 676
677 677 def children(repo, subset, x):
678 678 """``children(set)``
679 679 Child changesets of changesets in set.
680 680 """
681 681 s = getset(repo, fullreposet(repo), x)
682 682 cs = _children(repo, subset, s)
683 683 return subset & cs
684 684
685 685 def closed(repo, subset, x):
686 686 """``closed()``
687 687 Changeset is closed.
688 688 """
689 689 # i18n: "closed" is a keyword
690 690 getargs(x, 0, 0, _("closed takes no arguments"))
691 691 return subset.filter(lambda r: repo[r].closesbranch())
692 692
693 693 def contains(repo, subset, x):
694 694 """``contains(pattern)``
695 695 The revision's manifest contains a file matching pattern (but might not
696 696 modify it). See :hg:`help patterns` for information about file patterns.
697 697
698 698 The pattern without explicit kind like ``glob:`` is expected to be
699 699 relative to the current directory and match against a file exactly
700 700 for efficiency.
701 701 """
702 702 # i18n: "contains" is a keyword
703 703 pat = getstring(x, _("contains requires a pattern"))
704 704
705 705 def matches(x):
706 706 if not matchmod.patkind(pat):
707 707 pats = pathutil.canonpath(repo.root, repo.getcwd(), pat)
708 708 if pats in repo[x]:
709 709 return True
710 710 else:
711 711 c = repo[x]
712 712 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
713 713 for f in c.manifest():
714 714 if m(f):
715 715 return True
716 716 return False
717 717
718 718 return subset.filter(matches)
719 719
720 720 def converted(repo, subset, x):
721 721 """``converted([id])``
722 722 Changesets converted from the given identifier in the old repository if
723 723 present, or all converted changesets if no identifier is specified.
724 724 """
725 725
726 726 # There is exactly no chance of resolving the revision, so do a simple
727 727 # string compare and hope for the best
728 728
729 729 rev = None
730 730 # i18n: "converted" is a keyword
731 731 l = getargs(x, 0, 1, _('converted takes one or no arguments'))
732 732 if l:
733 733 # i18n: "converted" is a keyword
734 734 rev = getstring(l[0], _('converted requires a revision'))
735 735
736 736 def _matchvalue(r):
737 737 source = repo[r].extra().get('convert_revision', None)
738 738 return source is not None and (rev is None or source.startswith(rev))
739 739
740 740 return subset.filter(lambda r: _matchvalue(r))
741 741
742 742 def date(repo, subset, x):
743 743 """``date(interval)``
744 744 Changesets within the interval, see :hg:`help dates`.
745 745 """
746 746 # i18n: "date" is a keyword
747 747 ds = getstring(x, _("date requires a string"))
748 748 dm = util.matchdate(ds)
749 749 return subset.filter(lambda x: dm(repo[x].date()[0]))
750 750
751 751 def desc(repo, subset, x):
752 752 """``desc(string)``
753 753 Search commit message for string. The match is case-insensitive.
754 754 """
755 755 # i18n: "desc" is a keyword
756 756 ds = encoding.lower(getstring(x, _("desc requires a string")))
757 757
758 758 def matches(x):
759 759 c = repo[x]
760 760 return ds in encoding.lower(c.description())
761 761
762 762 return subset.filter(matches)
763 763
764 764 def _descendants(repo, subset, x, followfirst=False):
765 765 roots = getset(repo, fullreposet(repo), x)
766 766 if not roots:
767 767 return baseset()
768 768 s = _revdescendants(repo, roots, followfirst)
769 769
770 770 # Both sets need to be ascending in order to lazily return the union
771 771 # in the correct order.
772 772 base = subset & roots
773 773 desc = subset & s
774 774 result = base + desc
775 775 if subset.isascending():
776 776 result.sort()
777 777 elif subset.isdescending():
778 778 result.sort(reverse=True)
779 779 else:
780 780 result = subset & result
781 781 return result
782 782
783 783 def descendants(repo, subset, x):
784 784 """``descendants(set)``
785 785 Changesets which are descendants of changesets in set.
786 786 """
787 787 return _descendants(repo, subset, x)
788 788
789 789 def _firstdescendants(repo, subset, x):
790 790 # ``_firstdescendants(set)``
791 791 # Like ``descendants(set)`` but follows only the first parents.
792 792 return _descendants(repo, subset, x, followfirst=True)
793 793
794 794 def destination(repo, subset, x):
795 795 """``destination([set])``
796 796 Changesets that were created by a graft, transplant or rebase operation,
797 797 with the given revisions specified as the source. Omitting the optional set
798 798 is the same as passing all().
799 799 """
800 800 if x is not None:
801 801 sources = getset(repo, fullreposet(repo), x)
802 802 else:
803 803 sources = fullreposet(repo)
804 804
805 805 dests = set()
806 806
807 807 # subset contains all of the possible destinations that can be returned, so
808 808 # iterate over them and see if their source(s) were provided in the arg set.
809 809 # Even if the immediate src of r is not in the arg set, src's source (or
810 810 # further back) may be. Scanning back further than the immediate src allows
811 811 # transitive transplants and rebases to yield the same results as transitive
812 812 # grafts.
813 813 for r in subset:
814 814 src = _getrevsource(repo, r)
815 815 lineage = None
816 816
817 817 while src is not None:
818 818 if lineage is None:
819 819 lineage = list()
820 820
821 821 lineage.append(r)
822 822
823 823 # The visited lineage is a match if the current source is in the arg
824 824 # set. Since every candidate dest is visited by way of iterating
825 825 # subset, any dests further back in the lineage will be tested by a
826 826 # different iteration over subset. Likewise, if the src was already
827 827 # selected, the current lineage can be selected without going back
828 828 # further.
829 829 if src in sources or src in dests:
830 830 dests.update(lineage)
831 831 break
832 832
833 833 r = src
834 834 src = _getrevsource(repo, r)
835 835
836 836 return subset.filter(dests.__contains__)
837 837
838 838 def divergent(repo, subset, x):
839 839 """``divergent()``
840 840 Final successors of changesets with an alternative set of final successors.
841 841 """
842 842 # i18n: "divergent" is a keyword
843 843 getargs(x, 0, 0, _("divergent takes no arguments"))
844 844 divergent = obsmod.getrevs(repo, 'divergent')
845 845 return subset & divergent
846 846
847 847 def extinct(repo, subset, x):
848 848 """``extinct()``
849 849 Obsolete changesets with obsolete descendants only.
850 850 """
851 851 # i18n: "extinct" is a keyword
852 852 getargs(x, 0, 0, _("extinct takes no arguments"))
853 853 extincts = obsmod.getrevs(repo, 'extinct')
854 854 return subset & extincts
855 855
856 856 def extra(repo, subset, x):
857 857 """``extra(label, [value])``
858 858 Changesets with the given label in the extra metadata, with the given
859 859 optional value.
860 860
861 861 If `value` starts with `re:`, the remainder of the value is treated as
862 862 a regular expression. To match a value that actually starts with `re:`,
863 863 use the prefix `literal:`.
864 864 """
865 865 args = getargsdict(x, 'extra', 'label value')
866 866 if 'label' not in args:
867 867 # i18n: "extra" is a keyword
868 868 raise error.ParseError(_('extra takes at least 1 argument'))
869 869 # i18n: "extra" is a keyword
870 870 label = getstring(args['label'], _('first argument to extra must be '
871 871 'a string'))
872 872 value = None
873 873
874 874 if 'value' in args:
875 875 # i18n: "extra" is a keyword
876 876 value = getstring(args['value'], _('second argument to extra must be '
877 877 'a string'))
878 878 kind, value, matcher = _stringmatcher(value)
879 879
880 880 def _matchvalue(r):
881 881 extra = repo[r].extra()
882 882 return label in extra and (value is None or matcher(extra[label]))
883 883
884 884 return subset.filter(lambda r: _matchvalue(r))
885 885
886 886 def filelog(repo, subset, x):
887 887 """``filelog(pattern)``
888 888 Changesets connected to the specified filelog.
889 889
890 890 For performance reasons, visits only revisions mentioned in the file-level
891 891 filelog, rather than filtering through all changesets (much faster, but
892 892 doesn't include deletes or duplicate changes). For a slower, more accurate
893 893 result, use ``file()``.
894 894
895 895 The pattern without explicit kind like ``glob:`` is expected to be
896 896 relative to the current directory and match against a file exactly
897 897 for efficiency.
898 898
899 899 If some linkrev points to revisions filtered by the current repoview, we'll
900 900 work around it to return a non-filtered value.
901 901 """
902 902
903 903 # i18n: "filelog" is a keyword
904 904 pat = getstring(x, _("filelog requires a pattern"))
905 905 s = set()
906 906 cl = repo.changelog
907 907
908 908 if not matchmod.patkind(pat):
909 909 f = pathutil.canonpath(repo.root, repo.getcwd(), pat)
910 910 files = [f]
911 911 else:
912 912 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=repo[None])
913 913 files = (f for f in repo[None] if m(f))
914 914
915 915 for f in files:
916 916 backrevref = {} # final value for: filerev -> changerev
917 917 lowestchild = {} # lowest known filerev child of a filerev
918 918 delayed = [] # filerev with filtered linkrev, for post-processing
919 919 lowesthead = None # cache for manifest content of all head revisions
920 920 fl = repo.file(f)
921 921 for fr in list(fl):
922 922 rev = fl.linkrev(fr)
923 923 if rev not in cl:
924 924 # changerev pointed in linkrev is filtered
925 925 # record it for post processing.
926 926 delayed.append((fr, rev))
927 927 continue
928 928 for p in fl.parentrevs(fr):
929 929 if 0 <= p and p not in lowestchild:
930 930 lowestchild[p] = fr
931 931 backrevref[fr] = rev
932 932 s.add(rev)
933 933
934 934 # Post-processing of all filerevs we skipped because they were
935 935 # filtered. If such filerevs have known and unfiltered children, this
936 936 # means they have an unfiltered appearance out there. We'll use linkrev
937 937 # adjustment to find one of these appearances. The lowest known child
938 938 # will be used as a starting point because it is the best upper-bound we
939 939 # have.
940 940 #
941 941 # This approach will fail when an unfiltered but linkrev-shadowed
942 942 # appearance exists in a head changeset without unfiltered filerev
943 943 # children anywhere.
944 944 while delayed:
945 945 # must be a descending iteration. To slowly fill lowest child
946 946 # information that is of potential use by the next item.
947 947 fr, rev = delayed.pop()
948 948 lkr = rev
949 949
950 950 child = lowestchild.get(fr)
951 951
952 952 if child is None:
953 953 # search for existence of this file revision in a head revision.
954 954 # There are three possibilities:
955 955 # - the revision exists in a head and we can find an
956 956 # introduction from there,
957 957 # - the revision does not exist in a head because it has been
958 958 # changed since its introduction: we would have found a child
959 959 # and be in the other 'else' clause,
960 960 # - all versions of the revision are hidden.
961 961 if lowesthead is None:
962 962 lowesthead = {}
963 963 for h in repo.heads():
964 964 fnode = repo[h].manifest().get(f)
965 965 if fnode is not None:
966 966 lowesthead[fl.rev(fnode)] = h
967 967 headrev = lowesthead.get(fr)
968 968 if headrev is None:
969 969 # content is nowhere unfiltered
970 970 continue
971 971 rev = repo[headrev][f].introrev()
972 972 else:
973 973 # the lowest known child is a good upper bound
974 974 childcrev = backrevref[child]
975 975 # XXX this does not guarantee returning the lowest
976 976 # introduction of this revision, but this gives a
977 977 # result which is a good start and will fit in most
978 978 # cases. We probably need to fix the multiple
979 979 # introductions case properly (report each
980 980 # introduction, even for identical file revisions)
981 981 # once and for all at some point anyway.
982 982 for p in repo[childcrev][f].parents():
983 983 if p.filerev() == fr:
984 984 rev = p.rev()
985 985 break
986 986 if rev == lkr: # no shadowed entry found
987 987 # XXX This should never happen unless some manifest points
988 988 # to biggish file revisions (like a revision that uses a
989 989 # parent that never appears in the manifest ancestors)
990 990 continue
991 991
992 992 # Fill the data for the next iteration.
993 993 for p in fl.parentrevs(fr):
994 994 if 0 <= p and p not in lowestchild:
995 995 lowestchild[p] = fr
996 996 backrevref[fr] = rev
997 997 s.add(rev)
998 998
999 999 return subset & s
1000 1000
1001 1001 def first(repo, subset, x):
1002 1002 """``first(set, [n])``
1003 1003 An alias for limit().
1004 1004 """
1005 1005 return limit(repo, subset, x)
1006 1006
1007 1007 def _follow(repo, subset, x, name, followfirst=False):
1008 1008 l = getargs(x, 0, 1, _("%s takes no arguments or a filename") % name)
1009 1009 c = repo['.']
1010 1010 if l:
1011 1011 x = getstring(l[0], _("%s expected a filename") % name)
1012 1012 if x in c:
1013 1013 cx = c[x]
1014 1014 s = set(ctx.rev() for ctx in cx.ancestors(followfirst=followfirst))
1015 1015 # include the revision responsible for the most recent version
1016 1016 s.add(cx.introrev())
1017 1017 else:
1018 1018 return baseset()
1019 1019 else:
1020 1020 s = _revancestors(repo, baseset([c.rev()]), followfirst)
1021 1021
1022 1022 return subset & s
1023 1023
1024 1024 def follow(repo, subset, x):
1025 1025 """``follow([file])``
1026 1026 An alias for ``::.`` (ancestors of the working directory's first parent).
1027 1027 If a filename is specified, the history of the given file is followed,
1028 1028 including copies.
1029 1029 """
1030 1030 return _follow(repo, subset, x, 'follow')
1031 1031
1032 1032 def _followfirst(repo, subset, x):
1033 1033 # ``followfirst([file])``
1034 1034 # Like ``follow([file])`` but follows only the first parent of
1035 1035 # every revision or file revision.
1036 1036 return _follow(repo, subset, x, '_followfirst', followfirst=True)
1037 1037
1038 1038 def getall(repo, subset, x):
1039 1039 """``all()``
1040 1040 All changesets, the same as ``0:tip``.
1041 1041 """
1042 1042 # i18n: "all" is a keyword
1043 1043 getargs(x, 0, 0, _("all takes no arguments"))
1044 1044 return subset & spanset(repo) # drop "null" if any
1045 1045
1046 1046 def grep(repo, subset, x):
1047 1047 """``grep(regex)``
1048 1048 Like ``keyword(string)`` but accepts a regex. Use ``grep(r'...')``
1049 1049 to ensure special escape characters are handled correctly. Unlike
1050 1050 ``keyword(string)``, the match is case-sensitive.
1051 1051 """
1052 1052 try:
1053 1053 # i18n: "grep" is a keyword
1054 1054 gr = re.compile(getstring(x, _("grep requires a string")))
1055 1055 except re.error as e:
1056 1056 raise error.ParseError(_('invalid match pattern: %s') % e)
1057 1057
1058 1058 def matches(x):
1059 1059 c = repo[x]
1060 1060 for e in c.files() + [c.user(), c.description()]:
1061 1061 if gr.search(e):
1062 1062 return True
1063 1063 return False
1064 1064
1065 1065 return subset.filter(matches)
1066 1066
1067 1067 def _matchfiles(repo, subset, x):
1068 1068 # _matchfiles takes a revset list of prefixed arguments:
1069 1069 #
1070 1070 # [p:foo, i:bar, x:baz]
1071 1071 #
1072 1072 # builds a match object from them and filters subset. Allowed
1073 1073 # prefixes are 'p:' for regular patterns, 'i:' for include
1074 1074 # patterns and 'x:' for exclude patterns. Use 'r:' prefix to pass
1075 1075 # a revision identifier, or the empty string to reference the
1076 1076 # working directory, from which the match object is
1077 1077 # initialized. Use 'd:' to set the default matching mode, default
1078 1078 # to 'glob'. At most one 'r:' and 'd:' argument can be passed.
1079 1079
1080 1080 # i18n: "_matchfiles" is a keyword
1081 1081 l = getargs(x, 1, -1, _("_matchfiles requires at least one argument"))
1082 1082 pats, inc, exc = [], [], []
1083 1083 rev, default = None, None
1084 1084 for arg in l:
1085 1085 # i18n: "_matchfiles" is a keyword
1086 1086 s = getstring(arg, _("_matchfiles requires string arguments"))
1087 1087 prefix, value = s[:2], s[2:]
1088 1088 if prefix == 'p:':
1089 1089 pats.append(value)
1090 1090 elif prefix == 'i:':
1091 1091 inc.append(value)
1092 1092 elif prefix == 'x:':
1093 1093 exc.append(value)
1094 1094 elif prefix == 'r:':
1095 1095 if rev is not None:
1096 1096 # i18n: "_matchfiles" is a keyword
1097 1097 raise error.ParseError(_('_matchfiles expected at most one '
1098 1098 'revision'))
1099 1099 if value != '': # empty means working directory; leave rev as None
1100 1100 rev = value
1101 1101 elif prefix == 'd:':
1102 1102 if default is not None:
1103 1103 # i18n: "_matchfiles" is a keyword
1104 1104 raise error.ParseError(_('_matchfiles expected at most one '
1105 1105 'default mode'))
1106 1106 default = value
1107 1107 else:
1108 1108 # i18n: "_matchfiles" is a keyword
1109 1109 raise error.ParseError(_('invalid _matchfiles prefix: %s') % prefix)
1110 1110 if not default:
1111 1111 default = 'glob'
1112 1112
1113 1113 m = matchmod.match(repo.root, repo.getcwd(), pats, include=inc,
1114 1114 exclude=exc, ctx=repo[rev], default=default)
1115 1115
1116 1116 def matches(x):
1117 1117 for f in repo[x].files():
1118 1118 if m(f):
1119 1119 return True
1120 1120 return False
1121 1121
1122 1122 return subset.filter(matches)
1123 1123
1124 1124 def hasfile(repo, subset, x):
1125 1125 """``file(pattern)``
1126 1126 Changesets affecting files matched by pattern.
1127 1127
1128 1128 For a faster but less accurate result, consider using ``filelog()``
1129 1129 instead.
1130 1130
1131 1131 This predicate uses ``glob:`` as the default kind of pattern.
1132 1132 """
1133 1133 # i18n: "file" is a keyword
1134 1134 pat = getstring(x, _("file requires a pattern"))
1135 1135 return _matchfiles(repo, subset, ('string', 'p:' + pat))
1136 1136
1137 1137 def head(repo, subset, x):
1138 1138 """``head()``
1139 1139 Changeset is a named branch head.
1140 1140 """
1141 1141 # i18n: "head" is a keyword
1142 1142 getargs(x, 0, 0, _("head takes no arguments"))
1143 1143 hs = set()
1144 1144 cl = repo.changelog
1145 1145 for b, ls in repo.branchmap().iteritems():
1146 1146 hs.update(cl.rev(h) for h in ls)
1147 1147 # XXX using a set to feed the baseset is wrong. Sets are not ordered.
1148 1148 # This does not break because of other fullreposet misbehavior.
1149 1149 # XXX We should combine with subset first: 'subset & baseset(...)'. This is
1150 1150 # necessary to ensure we preserve the order in subset.
1151 1151 return baseset(hs) & subset
1152 1152
1153 1153 def heads(repo, subset, x):
1154 1154 """``heads(set)``
1155 1155 Members of set with no children in set.
1156 1156 """
1157 1157 s = getset(repo, subset, x)
1158 1158 ps = parents(repo, subset, x)
1159 1159 return s - ps
1160 1160
1161 1161 def hidden(repo, subset, x):
1162 1162 """``hidden()``
1163 1163 Hidden changesets.
1164 1164 """
1165 1165 # i18n: "hidden" is a keyword
1166 1166 getargs(x, 0, 0, _("hidden takes no arguments"))
1167 1167 hiddenrevs = repoview.filterrevs(repo, 'visible')
1168 1168 return subset & hiddenrevs
1169 1169
1170 1170 def keyword(repo, subset, x):
1171 1171 """``keyword(string)``
1172 1172 Search commit message, user name, and names of changed files for
1173 1173 string. The match is case-insensitive.
1174 1174 """
1175 1175 # i18n: "keyword" is a keyword
1176 1176 kw = encoding.lower(getstring(x, _("keyword requires a string")))
1177 1177
1178 1178 def matches(r):
1179 1179 c = repo[r]
1180 1180 return any(kw in encoding.lower(t)
1181 1181 for t in c.files() + [c.user(), c.description()])
1182 1182
1183 1183 return subset.filter(matches)
1184 1184
1185 1185 def limit(repo, subset, x):
1186 1186 """``limit(set, [n])``
1187 1187 First n members of set, defaulting to 1.
1188 1188 """
1189 1189 # i18n: "limit" is a keyword
1190 1190 l = getargs(x, 1, 2, _("limit requires one or two arguments"))
1191 1191 try:
1192 1192 lim = 1
1193 1193 if len(l) == 2:
1194 1194 # i18n: "limit" is a keyword
1195 1195 lim = int(getstring(l[1], _("limit requires a number")))
1196 1196 except (TypeError, ValueError):
1197 1197 # i18n: "limit" is a keyword
1198 1198 raise error.ParseError(_("limit expects a number"))
1199 1199 ss = subset
1200 1200 os = getset(repo, fullreposet(repo), l[0])
1201 1201 result = []
1202 1202 it = iter(os)
1203 1203 for x in xrange(lim):
1204 1204 y = next(it, None)
1205 1205 if y is None:
1206 1206 break
1207 1207 elif y in ss:
1208 1208 result.append(y)
1209 1209 return baseset(result)
1210 1210
1211 1211 def last(repo, subset, x):
1212 1212 """``last(set, [n])``
1213 1213 Last n members of set, defaulting to 1.
1214 1214 """
1215 1215 # i18n: "last" is a keyword
1216 1216 l = getargs(x, 1, 2, _("last requires one or two arguments"))
1217 1217 try:
1218 1218 lim = 1
1219 1219 if len(l) == 2:
1220 1220 # i18n: "last" is a keyword
1221 1221 lim = int(getstring(l[1], _("last requires a number")))
1222 1222 except (TypeError, ValueError):
1223 1223 # i18n: "last" is a keyword
1224 1224 raise error.ParseError(_("last expects a number"))
1225 1225 ss = subset
1226 1226 os = getset(repo, fullreposet(repo), l[0])
1227 1227 os.reverse()
1228 1228 result = []
1229 1229 it = iter(os)
1230 1230 for x in xrange(lim):
1231 1231 y = next(it, None)
1232 1232 if y is None:
1233 1233 break
1234 1234 elif y in ss:
1235 1235 result.append(y)
1236 1236 return baseset(result)
1237 1237
1238 1238 def maxrev(repo, subset, x):
1239 1239 """``max(set)``
1240 1240 Changeset with highest revision number in set.
1241 1241 """
1242 1242 os = getset(repo, fullreposet(repo), x)
1243 1243 if os:
1244 1244 m = os.max()
1245 1245 if m in subset:
1246 1246 return baseset([m])
1247 1247 return baseset()
1248 1248
1249 1249 def merge(repo, subset, x):
1250 1250 """``merge()``
1251 1251 Changeset is a merge changeset.
1252 1252 """
1253 1253 # i18n: "merge" is a keyword
1254 1254 getargs(x, 0, 0, _("merge takes no arguments"))
1255 1255 cl = repo.changelog
1256 1256 return subset.filter(lambda r: cl.parentrevs(r)[1] != -1)
1257 1257
1258 1258 def branchpoint(repo, subset, x):
1259 1259 """``branchpoint()``
1260 1260 Changesets with more than one child.
1261 1261 """
1262 1262 # i18n: "branchpoint" is a keyword
1263 1263 getargs(x, 0, 0, _("branchpoint takes no arguments"))
1264 1264 cl = repo.changelog
1265 1265 if not subset:
1266 1266 return baseset()
1267 1267 # XXX this should be 'parentset.min()' assuming 'parentset' is a smartset
1268 1268 # (and if it is not, it should.)
1269 1269 baserev = min(subset)
1270 1270 parentscount = [0]*(len(repo) - baserev)
1271 1271 for r in cl.revs(start=baserev + 1):
1272 1272 for p in cl.parentrevs(r):
1273 1273 if p >= baserev:
1274 1274 parentscount[p - baserev] += 1
1275 1275 return subset.filter(lambda r: parentscount[r - baserev] > 1)
1276 1276
1277 1277 def minrev(repo, subset, x):
1278 1278 """``min(set)``
1279 1279 Changeset with lowest revision number in set.
1280 1280 """
1281 1281 os = getset(repo, fullreposet(repo), x)
1282 1282 if os:
1283 1283 m = os.min()
1284 1284 if m in subset:
1285 1285 return baseset([m])
1286 1286 return baseset()
1287 1287
1288 1288 def modifies(repo, subset, x):
1289 1289 """``modifies(pattern)``
1290 1290 Changesets modifying files matched by pattern.
1291 1291
1292 1292 The pattern without explicit kind like ``glob:`` is expected to be
1293 1293 relative to the current directory and match against a file or a
1294 1294 directory.
1295 1295 """
1296 1296 # i18n: "modifies" is a keyword
1297 1297 pat = getstring(x, _("modifies requires a pattern"))
1298 1298 return checkstatus(repo, subset, pat, 0)
1299 1299
1300 1300 def named(repo, subset, x):
1301 1301 """``named(namespace)``
1302 1302 The changesets in a given namespace.
1303 1303
1304 1304 If `namespace` starts with `re:`, the remainder of the string is treated as
1305 1305 a regular expression. To match a namespace that actually starts with `re:`,
1306 1306 use the prefix `literal:`.
1307 1307 """
1308 1308 # i18n: "named" is a keyword
1309 1309 args = getargs(x, 1, 1, _('named requires a namespace argument'))
1310 1310
1311 1311 ns = getstring(args[0],
1312 1312 # i18n: "named" is a keyword
1313 1313 _('the argument to named must be a string'))
1314 1314 kind, pattern, matcher = _stringmatcher(ns)
1315 1315 namespaces = set()
1316 1316 if kind == 'literal':
1317 1317 if pattern not in repo.names:
1318 1318 raise error.RepoLookupError(_("namespace '%s' does not exist")
1319 1319 % ns)
1320 1320 namespaces.add(repo.names[pattern])
1321 1321 else:
1322 1322 for name, ns in repo.names.iteritems():
1323 1323 if matcher(name):
1324 1324 namespaces.add(ns)
1325 1325 if not namespaces:
1326 1326 raise error.RepoLookupError(_("no namespace exists"
1327 1327 " that match '%s'") % pattern)
1328 1328
1329 1329 names = set()
1330 1330 for ns in namespaces:
1331 1331 for name in ns.listnames(repo):
1332 1332 if name not in ns.deprecated:
1333 1333 names.update(repo[n].rev() for n in ns.nodes(repo, name))
1334 1334
1335 1335 names -= set([node.nullrev])
1336 1336 return subset & names
1337 1337
1338 1338 def node_(repo, subset, x):
1339 1339 """``id(string)``
1340 1340 Revision non-ambiguously specified by the given hex string prefix.
1341 1341 """
1342 1342 # i18n: "id" is a keyword
1343 1343 l = getargs(x, 1, 1, _("id requires one argument"))
1344 1344 # i18n: "id" is a keyword
1345 1345 n = getstring(l[0], _("id requires a string"))
1346 1346 if len(n) == 40:
1347 1347 try:
1348 1348 rn = repo.changelog.rev(node.bin(n))
1349 1349 except (LookupError, TypeError):
1350 1350 rn = None
1351 1351 else:
1352 1352 rn = None
1353 1353 pm = repo.changelog._partialmatch(n)
1354 1354 if pm is not None:
1355 1355 rn = repo.changelog.rev(pm)
1356 1356
1357 1357 if rn is None:
1358 1358 return baseset()
1359 1359 result = baseset([rn])
1360 1360 return result & subset
1361 1361
1362 1362 def obsolete(repo, subset, x):
1363 1363 """``obsolete()``
1364 1364 Mutable changeset with a newer version."""
1365 1365 # i18n: "obsolete" is a keyword
1366 1366 getargs(x, 0, 0, _("obsolete takes no arguments"))
1367 1367 obsoletes = obsmod.getrevs(repo, 'obsolete')
1368 1368 return subset & obsoletes
1369 1369
1370 1370 def only(repo, subset, x):
1371 1371 """``only(set, [set])``
1372 1372 Changesets that are ancestors of the first set that are not ancestors
1373 1373 of any other head in the repo. If a second set is specified, the result
1374 1374 is ancestors of the first set that are not ancestors of the second set
1375 1375 (i.e. ::<set1> - ::<set2>).
1376 1376 """
1377 1377 cl = repo.changelog
1378 1378 # i18n: "only" is a keyword
1379 1379 args = getargs(x, 1, 2, _('only takes one or two arguments'))
1380 1380 include = getset(repo, fullreposet(repo), args[0])
1381 1381 if len(args) == 1:
1382 1382 if not include:
1383 1383 return baseset()
1384 1384
1385 1385 descendants = set(_revdescendants(repo, include, False))
1386 1386 exclude = [rev for rev in cl.headrevs()
1387 1387 if not rev in descendants and not rev in include]
1388 1388 else:
1389 1389 exclude = getset(repo, fullreposet(repo), args[1])
1390 1390
1391 1391 results = set(cl.findmissingrevs(common=exclude, heads=include))
1392 1392 # XXX we should turn this into a baseset instead of a set, smartset may do
1393 1393 # some optimisations from the fact this is a baseset.
1394 1394 return subset & results
1395 1395
1396 1396 def origin(repo, subset, x):
1397 1397 """``origin([set])``
1398 1398 Changesets that were specified as a source for the grafts, transplants or
1399 1399 rebases that created the given revisions. Omitting the optional set is the
1400 1400 same as passing all(). If a changeset created by these operations is itself
1401 1401 specified as a source for one of these operations, only the source changeset
1402 1402 for the first operation is selected.
1403 1403 """
1404 1404 if x is not None:
1405 1405 dests = getset(repo, fullreposet(repo), x)
1406 1406 else:
1407 1407 dests = fullreposet(repo)
1408 1408
1409 1409 def _firstsrc(rev):
1410 1410 src = _getrevsource(repo, rev)
1411 1411 if src is None:
1412 1412 return None
1413 1413
1414 1414 while True:
1415 1415 prev = _getrevsource(repo, src)
1416 1416
1417 1417 if prev is None:
1418 1418 return src
1419 1419 src = prev
1420 1420
1421 1421 o = set([_firstsrc(r) for r in dests])
1422 1422 o -= set([None])
1423 1423 # XXX we should turn this into a baseset instead of a set, smartset may do
1424 1424 # some optimisations from the fact this is a baseset.
1425 1425 return subset & o
1426 1426
1427 1427 def outgoing(repo, subset, x):
1428 1428 """``outgoing([path])``
1429 1429 Changesets not found in the specified destination repository, or the
1430 1430 default push location.
1431 1431 """
1432 1432 # Avoid cycles.
1433 1433 import discovery
1434 1434 import hg
1435 1435 # i18n: "outgoing" is a keyword
1436 1436 l = getargs(x, 0, 1, _("outgoing takes one or no arguments"))
1437 1437 # i18n: "outgoing" is a keyword
1438 1438 dest = l and getstring(l[0], _("outgoing requires a repository path")) or ''
1439 1439 dest = repo.ui.expandpath(dest or 'default-push', dest or 'default')
1440 1440 dest, branches = hg.parseurl(dest)
1441 1441 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1442 1442 if revs:
1443 1443 revs = [repo.lookup(rev) for rev in revs]
1444 1444 other = hg.peer(repo, {}, dest)
1445 1445 repo.ui.pushbuffer()
1446 1446 outgoing = discovery.findcommonoutgoing(repo, other, onlyheads=revs)
1447 1447 repo.ui.popbuffer()
1448 1448 cl = repo.changelog
1449 1449 o = set([cl.rev(r) for r in outgoing.missing])
1450 1450 return subset & o
1451 1451
1452 1452 def p1(repo, subset, x):
1453 1453 """``p1([set])``
1454 1454 First parent of changesets in set, or the working directory.
1455 1455 """
1456 1456 if x is None:
1457 1457 p = repo[x].p1().rev()
1458 1458 if p >= 0:
1459 1459 return subset & baseset([p])
1460 1460 return baseset()
1461 1461
1462 1462 ps = set()
1463 1463 cl = repo.changelog
1464 1464 for r in getset(repo, fullreposet(repo), x):
1465 1465 ps.add(cl.parentrevs(r)[0])
1466 1466 ps -= set([node.nullrev])
1467 1467 # XXX we should turn this into a baseset instead of a set, smartset may do
1468 1468 # some optimisations from the fact this is a baseset.
1469 1469 return subset & ps
1470 1470
1471 1471 def p2(repo, subset, x):
1472 1472 """``p2([set])``
1473 1473 Second parent of changesets in set, or the working directory.
1474 1474 """
1475 1475 if x is None:
1476 1476 ps = repo[x].parents()
1477 1477 try:
1478 1478 p = ps[1].rev()
1479 1479 if p >= 0:
1480 1480 return subset & baseset([p])
1481 1481 return baseset()
1482 1482 except IndexError:
1483 1483 return baseset()
1484 1484
1485 1485 ps = set()
1486 1486 cl = repo.changelog
1487 1487 for r in getset(repo, fullreposet(repo), x):
1488 1488 ps.add(cl.parentrevs(r)[1])
1489 1489 ps -= set([node.nullrev])
1490 1490 # XXX we should turn this into a baseset instead of a set, smartset may do
1491 1491 # some optimisations from the fact this is a baseset.
1492 1492 return subset & ps
1493 1493
1494 1494 def parents(repo, subset, x):
1495 1495 """``parents([set])``
1496 1496 The set of all parents for all changesets in set, or the working directory.
1497 1497 """
1498 1498 if x is None:
1499 1499 ps = set(p.rev() for p in repo[x].parents())
1500 1500 else:
1501 1501 ps = set()
1502 1502 cl = repo.changelog
1503 1503 up = ps.update
1504 1504 parentrevs = cl.parentrevs
1505 1505 for r in getset(repo, fullreposet(repo), x):
1506 1506 if r == node.wdirrev:
1507 1507 up(p.rev() for p in repo[r].parents())
1508 1508 else:
1509 1509 up(parentrevs(r))
1510 1510 ps -= set([node.nullrev])
1511 1511 return subset & ps
1512 1512
1513 1513 def _phase(repo, subset, target):
1514 1514 """helper to select all rev in phase <target>"""
1515 1515 repo._phasecache.loadphaserevs(repo) # ensure phase's sets are loaded
1516 1516 if repo._phasecache._phasesets:
1517 1517 s = repo._phasecache._phasesets[target] - repo.changelog.filteredrevs
1518 1518 s = baseset(s)
1519 1519 s.sort() # set are non ordered, so we enforce ascending
1520 1520 return subset & s
1521 1521 else:
1522 1522 phase = repo._phasecache.phase
1523 1523 condition = lambda r: phase(repo, r) == target
1524 1524 return subset.filter(condition, cache=False)
1525 1525
1526 1526 def draft(repo, subset, x):
1527 1527 """``draft()``
1528 1528 Changeset in draft phase."""
1529 1529 # i18n: "draft" is a keyword
1530 1530 getargs(x, 0, 0, _("draft takes no arguments"))
1531 1531 target = phases.draft
1532 1532 return _phase(repo, subset, target)
1533 1533
1534 1534 def secret(repo, subset, x):
1535 1535 """``secret()``
1536 1536 Changeset in secret phase."""
1537 1537 # i18n: "secret" is a keyword
1538 1538 getargs(x, 0, 0, _("secret takes no arguments"))
1539 1539 target = phases.secret
1540 1540 return _phase(repo, subset, target)
1541 1541
1542 1542 def parentspec(repo, subset, x, n):
1543 1543 """``set^0``
1544 1544 The set.
1545 1545 ``set^1`` (or ``set^``), ``set^2``
1546 1546 First or second parent, respectively, of all changesets in set.
1547 1547 """
1548 1548 try:
1549 1549 n = int(n[1])
1550 1550 if n not in (0, 1, 2):
1551 1551 raise ValueError
1552 1552 except (TypeError, ValueError):
1553 1553 raise error.ParseError(_("^ expects a number 0, 1, or 2"))
1554 1554 ps = set()
1555 1555 cl = repo.changelog
1556 1556 for r in getset(repo, fullreposet(repo), x):
1557 1557 if n == 0:
1558 1558 ps.add(r)
1559 1559 elif n == 1:
1560 1560 ps.add(cl.parentrevs(r)[0])
1561 1561 elif n == 2:
1562 1562 parents = cl.parentrevs(r)
1563 1563 if len(parents) > 1:
1564 1564 ps.add(parents[1])
1565 1565 return subset & ps
1566 1566
1567 1567 def present(repo, subset, x):
1568 1568 """``present(set)``
1569 1569 An empty set, if any revision in set isn't found; otherwise,
1570 1570 all revisions in set.
1571 1571
1572 1572 If any of specified revisions is not present in the local repository,
1573 1573 the query is normally aborted. But this predicate allows the query
1574 1574 to continue even in such cases.
1575 1575 """
1576 1576 try:
1577 1577 return getset(repo, subset, x)
1578 1578 except error.RepoLookupError:
1579 1579 return baseset()
1580 1580
1581 1581 # for internal use
1582 1582 def _notpublic(repo, subset, x):
1583 1583 getargs(x, 0, 0, "_notpublic takes no arguments")
1584 1584 repo._phasecache.loadphaserevs(repo) # ensure phase's sets are loaded
1585 1585 if repo._phasecache._phasesets:
1586 1586 s = set()
1587 1587 for u in repo._phasecache._phasesets[1:]:
1588 1588 s.update(u)
1589 1589 s = baseset(s - repo.changelog.filteredrevs)
1590 1590 s.sort()
1591 1591 return subset & s
1592 1592 else:
1593 1593 phase = repo._phasecache.phase
1594 1594 target = phases.public
1595 1595 condition = lambda r: phase(repo, r) != target
1596 1596 return subset.filter(condition, cache=False)
1597 1597
1598 1598 def public(repo, subset, x):
1599 1599 """``public()``
1600 1600 Changeset in public phase."""
1601 1601 # i18n: "public" is a keyword
1602 1602 getargs(x, 0, 0, _("public takes no arguments"))
1603 1603 phase = repo._phasecache.phase
1604 1604 target = phases.public
1605 1605 condition = lambda r: phase(repo, r) == target
1606 1606 return subset.filter(condition, cache=False)
1607 1607
1608 1608 def remote(repo, subset, x):
1609 1609 """``remote([id [,path]])``
1610 1610 Local revision that corresponds to the given identifier in a
1611 1611 remote repository, if present. Here, the '.' identifier is a
1612 1612 synonym for the current local branch.
1613 1613 """
1614 1614
1615 1615 import hg # avoid start-up nasties
1616 1616 # i18n: "remote" is a keyword
1617 1617 l = getargs(x, 0, 2, _("remote takes one, two or no arguments"))
1618 1618
1619 1619 q = '.'
1620 1620 if len(l) > 0:
1621 1621 # i18n: "remote" is a keyword
1622 1622 q = getstring(l[0], _("remote requires a string id"))
1623 1623 if q == '.':
1624 1624 q = repo['.'].branch()
1625 1625
1626 1626 dest = ''
1627 1627 if len(l) > 1:
1628 1628 # i18n: "remote" is a keyword
1629 1629 dest = getstring(l[1], _("remote requires a repository path"))
1630 1630 dest = repo.ui.expandpath(dest or 'default')
1631 1631 dest, branches = hg.parseurl(dest)
1632 1632 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1633 1633 if revs:
1634 1634 revs = [repo.lookup(rev) for rev in revs]
1635 1635 other = hg.peer(repo, {}, dest)
1636 1636 n = other.lookup(q)
1637 1637 if n in repo:
1638 1638 r = repo[n].rev()
1639 1639 if r in subset:
1640 1640 return baseset([r])
1641 1641 return baseset()
1642 1642
1643 1643 def removes(repo, subset, x):
1644 1644 """``removes(pattern)``
1645 1645 Changesets which remove files matching pattern.
1646 1646
1647 1647 The pattern without explicit kind like ``glob:`` is expected to be
1648 1648 relative to the current directory and match against a file or a
1649 1649 directory.
1650 1650 """
1651 1651 # i18n: "removes" is a keyword
1652 1652 pat = getstring(x, _("removes requires a pattern"))
1653 1653 return checkstatus(repo, subset, pat, 2)
1654 1654
1655 1655 def rev(repo, subset, x):
1656 1656 """``rev(number)``
1657 1657 Revision with the given numeric identifier.
1658 1658 """
1659 1659 # i18n: "rev" is a keyword
1660 1660 l = getargs(x, 1, 1, _("rev requires one argument"))
1661 1661 try:
1662 1662 # i18n: "rev" is a keyword
1663 1663 l = int(getstring(l[0], _("rev requires a number")))
1664 1664 except (TypeError, ValueError):
1665 1665 # i18n: "rev" is a keyword
1666 1666 raise error.ParseError(_("rev expects a number"))
1667 1667 if l not in repo.changelog and l != node.nullrev:
1668 1668 return baseset()
1669 1669 return subset & baseset([l])
1670 1670
1671 1671 def matching(repo, subset, x):
1672 1672 """``matching(revision [, field])``
1673 1673 Changesets in which a given set of fields match the set of fields in the
1674 1674 selected revision or set.
1675 1675
1676 1676 To match more than one field pass the list of fields to match separated
1677 1677 by spaces (e.g. ``author description``).
1678 1678
1679 1679 Valid fields are most regular revision fields and some special fields.
1680 1680
1681 1681 Regular revision fields are ``description``, ``author``, ``branch``,
1682 1682 ``date``, ``files``, ``phase``, ``parents``, ``substate``, ``user``
1683 1683 and ``diff``.
1684 1684 Note that ``author`` and ``user`` are synonyms. ``diff`` refers to the
1685 1685 contents of the revision. Two revisions matching their ``diff`` will
1686 1686 also match their ``files``.
1687 1687
1688 1688 Special fields are ``summary`` and ``metadata``:
1689 1689 ``summary`` matches the first line of the description.
1690 1690 ``metadata`` is equivalent to matching ``description user date``
1691 1691 (i.e. it matches the main metadata fields).
1692 1692
1693 1693 ``metadata`` is the default field which is used when no fields are
1694 1694 specified. You can match more than one field at a time.
1695 1695 """
1696 1696 # i18n: "matching" is a keyword
1697 1697 l = getargs(x, 1, 2, _("matching takes 1 or 2 arguments"))
1698 1698
1699 1699 revs = getset(repo, fullreposet(repo), l[0])
1700 1700
1701 1701 fieldlist = ['metadata']
1702 1702 if len(l) > 1:
1703 1703 fieldlist = getstring(l[1],
1704 1704 # i18n: "matching" is a keyword
1705 1705 _("matching requires a string "
1706 1706 "as its second argument")).split()
1707 1707
1708 1708 # Make sure that there are no repeated fields,
1709 1709 # expand the 'special' 'metadata' field type
1710 1710 # and check the 'files' whenever we check the 'diff'
1711 1711 fields = []
1712 1712 for field in fieldlist:
1713 1713 if field == 'metadata':
1714 1714 fields += ['user', 'description', 'date']
1715 1715 elif field == 'diff':
1716 1716 # a revision matching the diff must also match the files
1717 1717 # since matching the diff is very costly, make sure to
1718 1718 # also match the files first
1719 1719 fields += ['files', 'diff']
1720 1720 else:
1721 1721 if field == 'author':
1722 1722 field = 'user'
1723 1723 fields.append(field)
1724 1724 fields = set(fields)
1725 1725 if 'summary' in fields and 'description' in fields:
1726 1726 # If a revision matches its description it also matches its summary
1727 1727 fields.discard('summary')
1728 1728
1729 1729 # We may want to match more than one field
1730 1730 # Not all fields take the same amount of time to be matched
1731 1731 # Sort the selected fields in order of increasing matching cost
1732 1732 fieldorder = ['phase', 'parents', 'user', 'date', 'branch', 'summary',
1733 1733 'files', 'description', 'substate', 'diff']
1734 1734 def fieldkeyfunc(f):
1735 1735 try:
1736 1736 return fieldorder.index(f)
1737 1737 except ValueError:
1738 1738 # assume an unknown field is very costly
1739 1739 return len(fieldorder)
1740 1740 fields = list(fields)
1741 1741 fields.sort(key=fieldkeyfunc)
1742 1742
1743 1743 # Each field will be matched with its own "getfield" function
1744 1744 # which will be added to the getfieldfuncs array of functions
1745 1745 getfieldfuncs = []
1746 1746 _funcs = {
1747 1747 'user': lambda r: repo[r].user(),
1748 1748 'branch': lambda r: repo[r].branch(),
1749 1749 'date': lambda r: repo[r].date(),
1750 1750 'description': lambda r: repo[r].description(),
1751 1751 'files': lambda r: repo[r].files(),
1752 1752 'parents': lambda r: repo[r].parents(),
1753 1753 'phase': lambda r: repo[r].phase(),
1754 1754 'substate': lambda r: repo[r].substate,
1755 1755 'summary': lambda r: repo[r].description().splitlines()[0],
1756 1756 'diff': lambda r: list(repo[r].diff(git=True),)
1757 1757 }
1758 1758 for info in fields:
1759 1759 getfield = _funcs.get(info, None)
1760 1760 if getfield is None:
1761 1761 raise error.ParseError(
1762 1762 # i18n: "matching" is a keyword
1763 1763 _("unexpected field name passed to matching: %s") % info)
1764 1764 getfieldfuncs.append(getfield)
1765 1765 # convert the getfield array of functions into a "getinfo" function
1766 1766 # which returns an array of field values (or a single value if there
1767 1767 # is only one field to match)
1768 1768 getinfo = lambda r: [f(r) for f in getfieldfuncs]
1769 1769
1770 1770 def matches(x):
1771 1771 for rev in revs:
1772 1772 target = getinfo(rev)
1773 1773 match = True
1774 1774 for n, f in enumerate(getfieldfuncs):
1775 1775 if target[n] != f(x):
1776 1776 match = False
1777 1777 if match:
1778 1778 return True
1779 1779 return False
1780 1780
1781 1781 return subset.filter(matches)
1782 1782
1783 1783 def reverse(repo, subset, x):
1784 1784 """``reverse(set)``
1785 1785 Reverse order of set.
1786 1786 """
1787 1787 l = getset(repo, subset, x)
1788 1788 l.reverse()
1789 1789 return l
1790 1790
1791 1791 def roots(repo, subset, x):
1792 1792 """``roots(set)``
1793 1793 Changesets in set with no parent changeset in set.
1794 1794 """
1795 1795 s = getset(repo, fullreposet(repo), x)
1796 1796 parents = repo.changelog.parentrevs
1797 1797 def filter(r):
1798 1798 for p in parents(r):
1799 1799 if 0 <= p and p in s:
1800 1800 return False
1801 1801 return True
1802 1802 return subset & s.filter(filter)
1803 1803
1804 1804 def sort(repo, subset, x):
1805 1805 """``sort(set[, [-]key...])``
1806 1806 Sort set by keys. The default sort order is ascending, specify a key
1807 1807 as ``-key`` to sort in descending order.
1808 1808
1809 1809 The keys can be:
1810 1810
1811 1811 - ``rev`` for the revision number,
1812 1812 - ``branch`` for the branch name,
1813 1813 - ``desc`` for the commit message (description),
1814 1814 - ``user`` for user name (``author`` can be used as an alias),
1815 1815 - ``date`` for the commit date
1816 1816 """
1817 1817 # i18n: "sort" is a keyword
1818 1818 l = getargs(x, 1, 2, _("sort requires one or two arguments"))
1819 1819 keys = "rev"
1820 1820 if len(l) == 2:
1821 1821 # i18n: "sort" is a keyword
1822 1822 keys = getstring(l[1], _("sort spec must be a string"))
1823 1823
1824 1824 s = l[0]
1825 1825 keys = keys.split()
1826 1826 l = []
1827 1827 def invert(s):
1828 1828 return "".join(chr(255 - ord(c)) for c in s)
1829 1829 revs = getset(repo, subset, s)
1830 1830 if keys == ["rev"]:
1831 1831 revs.sort()
1832 1832 return revs
1833 1833 elif keys == ["-rev"]:
1834 1834 revs.sort(reverse=True)
1835 1835 return revs
1836 1836 for r in revs:
1837 1837 c = repo[r]
1838 1838 e = []
1839 1839 for k in keys:
1840 1840 if k == 'rev':
1841 1841 e.append(r)
1842 1842 elif k == '-rev':
1843 1843 e.append(-r)
1844 1844 elif k == 'branch':
1845 1845 e.append(c.branch())
1846 1846 elif k == '-branch':
1847 1847 e.append(invert(c.branch()))
1848 1848 elif k == 'desc':
1849 1849 e.append(c.description())
1850 1850 elif k == '-desc':
1851 1851 e.append(invert(c.description()))
1852 1852 elif k in 'user author':
1853 1853 e.append(c.user())
1854 1854 elif k in '-user -author':
1855 1855 e.append(invert(c.user()))
1856 1856 elif k == 'date':
1857 1857 e.append(c.date()[0])
1858 1858 elif k == '-date':
1859 1859 e.append(-c.date()[0])
1860 1860 else:
1861 1861 raise error.ParseError(_("unknown sort key %r") % k)
1862 1862 e.append(r)
1863 1863 l.append(e)
1864 1864 l.sort()
1865 1865 return baseset([e[-1] for e in l])
1866 1866
1867 1867 def subrepo(repo, subset, x):
1868 1868 """``subrepo([pattern])``
1869 1869 Changesets that add, modify or remove the given subrepo. If no subrepo
1870 1870 pattern is named, any subrepo changes are returned.
1871 1871 """
1872 1872 # i18n: "subrepo" is a keyword
1873 1873 args = getargs(x, 0, 1, _('subrepo takes at most one argument'))
1874 1874 if len(args) != 0:
1875 1875 pat = getstring(args[0], _("subrepo requires a pattern"))
1876 1876
1877 1877 m = matchmod.exact(repo.root, repo.root, ['.hgsubstate'])
1878 1878
1879 1879 def submatches(names):
1880 1880 k, p, m = _stringmatcher(pat)
1881 1881 for name in names:
1882 1882 if m(name):
1883 1883 yield name
1884 1884
1885 1885 def matches(x):
1886 1886 c = repo[x]
1887 1887 s = repo.status(c.p1().node(), c.node(), match=m)
1888 1888
1889 1889 if len(args) == 0:
1890 1890 return s.added or s.modified or s.removed
1891 1891
1892 1892 if s.added:
1893 1893 return any(submatches(c.substate.keys()))
1894 1894
1895 1895 if s.modified:
1896 1896 subs = set(c.p1().substate.keys())
1897 1897 subs.update(c.substate.keys())
1898 1898
1899 1899 for path in submatches(subs):
1900 1900 if c.p1().substate.get(path) != c.substate.get(path):
1901 1901 return True
1902 1902
1903 1903 if s.removed:
1904 1904 return any(submatches(c.p1().substate.keys()))
1905 1905
1906 1906 return False
1907 1907
1908 1908 return subset.filter(matches)
1909 1909
1910 1910 def _stringmatcher(pattern):
1911 1911 """
1912 1912 accepts a string, possibly starting with 're:' or 'literal:' prefix.
1913 1913 returns the matcher name, pattern, and matcher function.
1914 1914 missing or unknown prefixes are treated as literal matches.
1915 1915
1916 1916 helper for tests:
1917 1917 >>> def test(pattern, *tests):
1918 1918 ... kind, pattern, matcher = _stringmatcher(pattern)
1919 1919 ... return (kind, pattern, [bool(matcher(t)) for t in tests])
1920 1920
1921 1921 exact matching (no prefix):
1922 1922 >>> test('abcdefg', 'abc', 'def', 'abcdefg')
1923 1923 ('literal', 'abcdefg', [False, False, True])
1924 1924
1925 1925 regex matching ('re:' prefix)
1926 1926 >>> test('re:a.+b', 'nomatch', 'fooadef', 'fooadefbar')
1927 1927 ('re', 'a.+b', [False, False, True])
1928 1928
1929 1929 force exact matches ('literal:' prefix)
1930 1930 >>> test('literal:re:foobar', 'foobar', 're:foobar')
1931 1931 ('literal', 're:foobar', [False, True])
1932 1932
1933 1933 unknown prefixes are ignored and treated as literals
1934 1934 >>> test('foo:bar', 'foo', 'bar', 'foo:bar')
1935 1935 ('literal', 'foo:bar', [False, False, True])
1936 1936 """
1937 1937 if pattern.startswith('re:'):
1938 1938 pattern = pattern[3:]
1939 1939 try:
1940 1940 regex = re.compile(pattern)
1941 1941 except re.error as e:
1942 1942 raise error.ParseError(_('invalid regular expression: %s')
1943 1943 % e)
1944 1944 return 're', pattern, regex.search
1945 1945 elif pattern.startswith('literal:'):
1946 1946 pattern = pattern[8:]
1947 1947 return 'literal', pattern, pattern.__eq__
1948 1948
1949 1949 def _substringmatcher(pattern):
1950 1950 kind, pattern, matcher = _stringmatcher(pattern)
1951 1951 if kind == 'literal':
1952 1952 matcher = lambda s: pattern in s
1953 1953 return kind, pattern, matcher
1954 1954
1955 1955 def tag(repo, subset, x):
1956 1956 """``tag([name])``
1957 1957 The specified tag by name, or all tagged revisions if no name is given.
1958 1958
1959 1959 If `name` starts with `re:`, the remainder of the name is treated as
1960 1960 a regular expression. To match a tag that actually starts with `re:`,
1961 1961 use the prefix `literal:`.
1962 1962 """
1963 1963 # i18n: "tag" is a keyword
1964 1964 args = getargs(x, 0, 1, _("tag takes one or no arguments"))
1965 1965 cl = repo.changelog
1966 1966 if args:
1967 1967 pattern = getstring(args[0],
1968 1968 # i18n: "tag" is a keyword
1969 1969 _('the argument to tag must be a string'))
1970 1970 kind, pattern, matcher = _stringmatcher(pattern)
1971 1971 if kind == 'literal':
1972 1972 # avoid resolving all tags
1973 1973 tn = repo._tagscache.tags.get(pattern, None)
1974 1974 if tn is None:
1975 1975 raise error.RepoLookupError(_("tag '%s' does not exist")
1976 1976 % pattern)
1977 1977 s = set([repo[tn].rev()])
1978 1978 else:
1979 1979 s = set([cl.rev(n) for t, n in repo.tagslist() if matcher(t)])
1980 1980 else:
1981 1981 s = set([cl.rev(n) for t, n in repo.tagslist() if t != 'tip'])
1982 1982 return subset & s
1983 1983
1984 1984 def tagged(repo, subset, x):
1985 1985 return tag(repo, subset, x)
1986 1986
1987 1987 def unstable(repo, subset, x):
1988 1988 """``unstable()``
1989 1989 Non-obsolete changesets with obsolete ancestors.
1990 1990 """
1991 1991 # i18n: "unstable" is a keyword
1992 1992 getargs(x, 0, 0, _("unstable takes no arguments"))
1993 1993 unstables = obsmod.getrevs(repo, 'unstable')
1994 1994 return subset & unstables
1995 1995
1996 1996
1997 1997 def user(repo, subset, x):
1998 1998 """``user(string)``
1999 1999 User name contains string. The match is case-insensitive.
2000 2000
2001 2001 If `string` starts with `re:`, the remainder of the string is treated as
2002 2002 a regular expression. To match a user that actually contains `re:`, use
2003 2003 the prefix `literal:`.
2004 2004 """
2005 2005 return author(repo, subset, x)
2006 2006
2007 2007 # experimental
2008 2008 def wdir(repo, subset, x):
2009 2009 # i18n: "wdir" is a keyword
2010 2010 getargs(x, 0, 0, _("wdir takes no arguments"))
2011 2011 if node.wdirrev in subset or isinstance(subset, fullreposet):
2012 2012 return baseset([node.wdirrev])
2013 2013 return baseset()
2014 2014
2015 2015 # for internal use
2016 2016 def _list(repo, subset, x):
2017 2017 s = getstring(x, "internal error")
2018 2018 if not s:
2019 2019 return baseset()
2020 2020 # remove duplicates here. it's difficult for caller to deduplicate sets
2021 2021 # because different symbols can point to the same rev.
2022 2022 cl = repo.changelog
2023 2023 ls = []
2024 2024 seen = set()
2025 2025 for t in s.split('\0'):
2026 2026 try:
2027 2027 # fast path for integer revision
2028 2028 r = int(t)
2029 2029 if str(r) != t or r not in cl:
2030 2030 raise ValueError
2031 2031 except ValueError:
2032 2032 r = repo[t].rev()
2033 2033 if r in seen:
2034 2034 continue
2035 2035 if (r in subset
2036 2036 or r == node.nullrev and isinstance(subset, fullreposet)):
2037 2037 ls.append(r)
2038 2038 seen.add(r)
2039 2039 return baseset(ls)
2040 2040
2041 2041 # for internal use
2042 2042 def _intlist(repo, subset, x):
2043 2043 s = getstring(x, "internal error")
2044 2044 if not s:
2045 2045 return baseset()
2046 2046 ls = [int(r) for r in s.split('\0')]
2047 2047 s = subset
2048 2048 return baseset([r for r in ls if r in s])
2049 2049
2050 2050 # for internal use
2051 2051 def _hexlist(repo, subset, x):
2052 2052 s = getstring(x, "internal error")
2053 2053 if not s:
2054 2054 return baseset()
2055 2055 cl = repo.changelog
2056 2056 ls = [cl.rev(node.bin(r)) for r in s.split('\0')]
2057 2057 s = subset
2058 2058 return baseset([r for r in ls if r in s])
2059 2059
2060 2060 symbols = {
2061 2061 "adds": adds,
2062 2062 "all": getall,
2063 2063 "ancestor": ancestor,
2064 2064 "ancestors": ancestors,
2065 2065 "_firstancestors": _firstancestors,
2066 2066 "author": author,
2067 2067 "bisect": bisect,
2068 2068 "bisected": bisected,
2069 2069 "bookmark": bookmark,
2070 2070 "branch": branch,
2071 2071 "branchpoint": branchpoint,
2072 2072 "bumped": bumped,
2073 2073 "bundle": bundle,
2074 2074 "children": children,
2075 2075 "closed": closed,
2076 2076 "contains": contains,
2077 2077 "converted": converted,
2078 2078 "date": date,
2079 2079 "desc": desc,
2080 2080 "descendants": descendants,
2081 2081 "_firstdescendants": _firstdescendants,
2082 2082 "destination": destination,
2083 2083 "divergent": divergent,
2084 2084 "draft": draft,
2085 2085 "extinct": extinct,
2086 2086 "extra": extra,
2087 2087 "file": hasfile,
2088 2088 "filelog": filelog,
2089 2089 "first": first,
2090 2090 "follow": follow,
2091 2091 "_followfirst": _followfirst,
2092 2092 "grep": grep,
2093 2093 "head": head,
2094 2094 "heads": heads,
2095 2095 "hidden": hidden,
2096 2096 "id": node_,
2097 2097 "keyword": keyword,
2098 2098 "last": last,
2099 2099 "limit": limit,
2100 2100 "_matchfiles": _matchfiles,
2101 2101 "max": maxrev,
2102 2102 "merge": merge,
2103 2103 "min": minrev,
2104 2104 "modifies": modifies,
2105 2105 "named": named,
2106 2106 "obsolete": obsolete,
2107 2107 "only": only,
2108 2108 "origin": origin,
2109 2109 "outgoing": outgoing,
2110 2110 "p1": p1,
2111 2111 "p2": p2,
2112 2112 "parents": parents,
2113 2113 "present": present,
2114 2114 "public": public,
2115 2115 "_notpublic": _notpublic,
2116 2116 "remote": remote,
2117 2117 "removes": removes,
2118 2118 "rev": rev,
2119 2119 "reverse": reverse,
2120 2120 "roots": roots,
2121 2121 "sort": sort,
2122 2122 "secret": secret,
2123 2123 "subrepo": subrepo,
2124 2124 "matching": matching,
2125 2125 "tag": tag,
2126 2126 "tagged": tagged,
2127 2127 "user": user,
2128 2128 "unstable": unstable,
2129 2129 "wdir": wdir,
2130 2130 "_list": _list,
2131 2131 "_intlist": _intlist,
2132 2132 "_hexlist": _hexlist,
2133 2133 }
2134 2134
2135 2135 # symbols which can't be used for a DoS attack for any given input
2136 2136 # (e.g. those which accept regexes as plain strings shouldn't be included)
2137 2137 # functions that just return a lot of changesets (like all) don't count here
2138 2138 safesymbols = set([
2139 2139 "adds",
2140 2140 "all",
2141 2141 "ancestor",
2142 2142 "ancestors",
2143 2143 "_firstancestors",
2144 2144 "author",
2145 2145 "bisect",
2146 2146 "bisected",
2147 2147 "bookmark",
2148 2148 "branch",
2149 2149 "branchpoint",
2150 2150 "bumped",
2151 2151 "bundle",
2152 2152 "children",
2153 2153 "closed",
2154 2154 "converted",
2155 2155 "date",
2156 2156 "desc",
2157 2157 "descendants",
2158 2158 "_firstdescendants",
2159 2159 "destination",
2160 2160 "divergent",
2161 2161 "draft",
2162 2162 "extinct",
2163 2163 "extra",
2164 2164 "file",
2165 2165 "filelog",
2166 2166 "first",
2167 2167 "follow",
2168 2168 "_followfirst",
2169 2169 "head",
2170 2170 "heads",
2171 2171 "hidden",
2172 2172 "id",
2173 2173 "keyword",
2174 2174 "last",
2175 2175 "limit",
2176 2176 "_matchfiles",
2177 2177 "max",
2178 2178 "merge",
2179 2179 "min",
2180 2180 "modifies",
2181 2181 "obsolete",
2182 2182 "only",
2183 2183 "origin",
2184 2184 "outgoing",
2185 2185 "p1",
2186 2186 "p2",
2187 2187 "parents",
2188 2188 "present",
2189 2189 "public",
2190 2190 "_notpublic",
2191 2191 "remote",
2192 2192 "removes",
2193 2193 "rev",
2194 2194 "reverse",
2195 2195 "roots",
2196 2196 "sort",
2197 2197 "secret",
2198 2198 "matching",
2199 2199 "tag",
2200 2200 "tagged",
2201 2201 "user",
2202 2202 "unstable",
2203 2203 "wdir",
2204 2204 "_list",
2205 2205 "_intlist",
2206 2206 "_hexlist",
2207 2207 ])
2208 2208
2209 2209 methods = {
2210 2210 "range": rangeset,
2211 2211 "dagrange": dagrange,
2212 2212 "string": stringset,
2213 2213 "symbol": stringset,
2214 2214 "and": andset,
2215 2215 "or": orset,
2216 2216 "not": notset,
2217 2217 "list": listset,
2218 2218 "keyvalue": keyvaluepair,
2219 2219 "func": func,
2220 2220 "ancestor": ancestorspec,
2221 2221 "parent": parentspec,
2222 2222 "parentpost": p1,
2223 2223 }
2224 2224
2225 2225 def optimize(x, small):
2226 2226 if x is None:
2227 2227 return 0, x
2228 2228
2229 2229 smallbonus = 1
2230 2230 if small:
2231 2231 smallbonus = .5
2232 2232
2233 2233 op = x[0]
2234 2234 if op == 'minus':
2235 2235 return optimize(('and', x[1], ('not', x[2])), small)
2236 2236 elif op == 'only':
2237 2237 return optimize(('func', ('symbol', 'only'),
2238 2238 ('list', x[1], x[2])), small)
2239 2239 elif op == 'onlypost':
2240 2240 return optimize(('func', ('symbol', 'only'), x[1]), small)
2241 2241 elif op == 'dagrangepre':
2242 2242 return optimize(('func', ('symbol', 'ancestors'), x[1]), small)
2243 2243 elif op == 'dagrangepost':
2244 2244 return optimize(('func', ('symbol', 'descendants'), x[1]), small)
2245 2245 elif op == 'rangeall':
2246 2246 return optimize(('range', ('string', '0'), ('string', 'tip')), small)
2247 2247 elif op == 'rangepre':
2248 2248 return optimize(('range', ('string', '0'), x[1]), small)
2249 2249 elif op == 'rangepost':
2250 2250 return optimize(('range', x[1], ('string', 'tip')), small)
2251 2251 elif op == 'negate':
2252 2252 return optimize(('string',
2253 2253 '-' + getstring(x[1], _("can't negate that"))), small)
2254 2254 elif op in 'string symbol negate':
2255 2255 return smallbonus, x # single revisions are small
2256 2256 elif op == 'and':
2257 2257 wa, ta = optimize(x[1], True)
2258 2258 wb, tb = optimize(x[2], True)
2259 2259
2260 2260 # (::x and not ::y)/(not ::y and ::x) have a fast path
2261 2261 def isonly(revs, bases):
2262 2262 return (
2263 2263 revs[0] == 'func'
2264 2264 and getstring(revs[1], _('not a symbol')) == 'ancestors'
2265 2265 and bases[0] == 'not'
2266 2266 and bases[1][0] == 'func'
2267 2267 and getstring(bases[1][1], _('not a symbol')) == 'ancestors')
2268 2268
2269 2269 w = min(wa, wb)
2270 2270 if isonly(ta, tb):
2271 2271 return w, ('func', ('symbol', 'only'), ('list', ta[2], tb[1][2]))
2272 2272 if isonly(tb, ta):
2273 2273 return w, ('func', ('symbol', 'only'), ('list', tb[2], ta[1][2]))
2274 2274
2275 2275 if wa > wb:
2276 2276 return w, (op, tb, ta)
2277 2277 return w, (op, ta, tb)
2278 2278 elif op == 'or':
2279 2279 # fast path for machine-generated expression, that is likely to have
2280 2280 # lots of trivial revisions: 'a + b + c()' to '_list(a b) + c()'
2281 2281 ws, ts, ss = [], [], []
2282 2282 def flushss():
2283 2283 if not ss:
2284 2284 return
2285 2285 if len(ss) == 1:
2286 2286 w, t = ss[0]
2287 2287 else:
2288 2288 s = '\0'.join(t[1] for w, t in ss)
2289 2289 y = ('func', ('symbol', '_list'), ('string', s))
2290 2290 w, t = optimize(y, False)
2291 2291 ws.append(w)
2292 2292 ts.append(t)
2293 2293 del ss[:]
2294 2294 for y in x[1:]:
2295 2295 w, t = optimize(y, False)
2296 2296 if t[0] == 'string' or t[0] == 'symbol':
2297 2297 ss.append((w, t))
2298 2298 continue
2299 2299 flushss()
2300 2300 ws.append(w)
2301 2301 ts.append(t)
2302 2302 flushss()
2303 2303 if len(ts) == 1:
2304 2304 return ws[0], ts[0] # 'or' operation is fully optimized out
2305 2305 # we can't reorder trees by weight because it would change the order.
2306 2306 # ("sort(a + b)" == "sort(b + a)", but "a + b" != "b + a")
2307 2307 # ts = tuple(t for w, t in sorted(zip(ws, ts), key=lambda wt: wt[0]))
2308 2308 return max(ws), (op,) + tuple(ts)
2309 2309 elif op == 'not':
2310 2310 # Optimize not public() to _notpublic() because we have a fast version
2311 2311 if x[1] == ('func', ('symbol', 'public'), None):
2312 2312 newsym = ('func', ('symbol', '_notpublic'), None)
2313 2313 o = optimize(newsym, not small)
2314 2314 return o[0], o[1]
2315 2315 else:
2316 2316 o = optimize(x[1], not small)
2317 2317 return o[0], (op, o[1])
2318 2318 elif op == 'parentpost':
2319 2319 o = optimize(x[1], small)
2320 2320 return o[0], (op, o[1])
2321 2321 elif op == 'group':
2322 2322 return optimize(x[1], small)
2323 2323 elif op in 'dagrange range list parent ancestorspec':
2324 2324 if op == 'parent':
2325 2325 # x^:y means (x^) : y, not x ^ (:y)
2326 2326 post = ('parentpost', x[1])
2327 2327 if x[2][0] == 'dagrangepre':
2328 2328 return optimize(('dagrange', post, x[2][1]), small)
2329 2329 elif x[2][0] == 'rangepre':
2330 2330 return optimize(('range', post, x[2][1]), small)
2331 2331
2332 2332 wa, ta = optimize(x[1], small)
2333 2333 wb, tb = optimize(x[2], small)
2334 2334 return wa + wb, (op, ta, tb)
2335 2335 elif op == 'func':
2336 2336 f = getstring(x[1], _("not a symbol"))
2337 2337 wa, ta = optimize(x[2], small)
2338 2338 if f in ("author branch closed date desc file grep keyword "
2339 2339 "outgoing user"):
2340 2340 w = 10 # slow
2341 2341 elif f in "modifies adds removes":
2342 2342 w = 30 # slower
2343 2343 elif f == "contains":
2344 2344 w = 100 # very slow
2345 2345 elif f == "ancestor":
2346 2346 w = 1 * smallbonus
2347 2347 elif f in "reverse limit first _intlist":
2348 2348 w = 0
2349 2349 elif f in "sort":
2350 2350 w = 10 # assume most sorts look at changelog
2351 2351 else:
2352 2352 w = 1
2353 2353 return w + wa, (op, x[1], ta)
2354 2354 return 1, x
2355 2355
2356 2356 _aliasarg = ('func', ('symbol', '_aliasarg'))
2357 2357 def _getaliasarg(tree):
2358 2358 """If tree matches ('func', ('symbol', '_aliasarg'), ('string', X))
2359 2359 return X, None otherwise.
2360 2360 """
2361 2361 if (len(tree) == 3 and tree[:2] == _aliasarg
2362 2362 and tree[2][0] == 'string'):
2363 2363 return tree[2][1]
2364 2364 return None
2365 2365
2366 2366 def _checkaliasarg(tree, known=None):
2367 2367 """Check tree contains no _aliasarg construct or only ones which
2368 2368 value is in known. Used to avoid alias placeholders injection.
2369 2369 """
2370 2370 if isinstance(tree, tuple):
2371 2371 arg = _getaliasarg(tree)
2372 2372 if arg is not None and (not known or arg not in known):
2373 2373 raise error.UnknownIdentifier('_aliasarg', [])
2374 2374 for t in tree:
2375 2375 _checkaliasarg(t, known)
2376 2376
2377 2377 # the set of valid characters for the initial letter of symbols in
2378 2378 # alias declarations and definitions
2379 2379 _aliassyminitletters = set(c for c in [chr(i) for i in xrange(256)]
2380 2380 if c.isalnum() or c in '._@$' or ord(c) > 127)
2381 2381
2382 2382 def _tokenizealias(program, lookup=None):
2383 2383 """Parse alias declaration/definition into a stream of tokens
2384 2384
2385 2385 This allows symbol names to use also ``$`` as an initial letter
2386 2386 (for backward compatibility), and callers of this function should
2387 2387 examine whether ``$`` is used also for unexpected symbols or not.
2388 2388 """
2389 2389 return tokenize(program, lookup=lookup,
2390 2390 syminitletters=_aliassyminitletters)
2391 2391
2392 2392 def _parsealiasdecl(decl):
2393 2393 """Parse alias declaration ``decl``
2394 2394
2395 2395 This returns ``(name, tree, args, errorstr)`` tuple:
2396 2396
2397 2397 - ``name``: of declared alias (may be ``decl`` itself at error)
2398 2398 - ``tree``: parse result (or ``None`` at error)
2399 2399 - ``args``: list of alias argument names (or None for symbol declaration)
2400 2400 - ``errorstr``: detail about detected error (or None)
2401 2401
2402 2402 >>> _parsealiasdecl('foo')
2403 2403 ('foo', ('symbol', 'foo'), None, None)
2404 2404 >>> _parsealiasdecl('$foo')
2405 2405 ('$foo', None, None, "'$' not for alias arguments")
2406 2406 >>> _parsealiasdecl('foo::bar')
2407 2407 ('foo::bar', None, None, 'invalid format')
2408 2408 >>> _parsealiasdecl('foo bar')
2409 2409 ('foo bar', None, None, 'at 4: invalid token')
2410 2410 >>> _parsealiasdecl('foo()')
2411 2411 ('foo', ('func', ('symbol', 'foo')), [], None)
2412 2412 >>> _parsealiasdecl('$foo()')
2413 2413 ('$foo()', None, None, "'$' not for alias arguments")
2414 2414 >>> _parsealiasdecl('foo($1, $2)')
2415 2415 ('foo', ('func', ('symbol', 'foo')), ['$1', '$2'], None)
2416 2416 >>> _parsealiasdecl('foo(bar_bar, baz.baz)')
2417 2417 ('foo', ('func', ('symbol', 'foo')), ['bar_bar', 'baz.baz'], None)
2418 2418 >>> _parsealiasdecl('foo($1, $2, nested($1, $2))')
2419 2419 ('foo($1, $2, nested($1, $2))', None, None, 'invalid argument list')
2420 2420 >>> _parsealiasdecl('foo(bar($1, $2))')
2421 2421 ('foo(bar($1, $2))', None, None, 'invalid argument list')
2422 2422 >>> _parsealiasdecl('foo("string")')
2423 2423 ('foo("string")', None, None, 'invalid argument list')
2424 2424 >>> _parsealiasdecl('foo($1, $2')
2425 2425 ('foo($1, $2', None, None, 'at 10: unexpected token: end')
2426 2426 >>> _parsealiasdecl('foo("string')
2427 2427 ('foo("string', None, None, 'at 5: unterminated string')
2428 2428 >>> _parsealiasdecl('foo($1, $2, $1)')
2429 2429 ('foo', None, None, 'argument names collide with each other')
2430 2430 """
2431 2431 p = parser.parser(elements)
2432 2432 try:
2433 2433 tree, pos = p.parse(_tokenizealias(decl))
2434 2434 if (pos != len(decl)):
2435 2435 raise error.ParseError(_('invalid token'), pos)
2436 2436
2437 2437 if isvalidsymbol(tree):
2438 2438 # "name = ...." style
2439 2439 name = getsymbol(tree)
2440 2440 if name.startswith('$'):
2441 2441 return (decl, None, None, _("'$' not for alias arguments"))
2442 2442 return (name, ('symbol', name), None, None)
2443 2443
2444 2444 if isvalidfunc(tree):
2445 2445 # "name(arg, ....) = ...." style
2446 2446 name = getfuncname(tree)
2447 2447 if name.startswith('$'):
2448 2448 return (decl, None, None, _("'$' not for alias arguments"))
2449 2449 args = []
2450 2450 for arg in getfuncargs(tree):
2451 2451 if not isvalidsymbol(arg):
2452 2452 return (decl, None, None, _("invalid argument list"))
2453 2453 args.append(getsymbol(arg))
2454 2454 if len(args) != len(set(args)):
2455 2455 return (name, None, None,
2456 2456 _("argument names collide with each other"))
2457 2457 return (name, ('func', ('symbol', name)), args, None)
2458 2458
2459 2459 return (decl, None, None, _("invalid format"))
2460 2460 except error.ParseError as inst:
2461 2461 return (decl, None, None, parseerrordetail(inst))
2462 2462
2463 2463 def _parsealiasdefn(defn, args):
2464 2464 """Parse alias definition ``defn``
2465 2465
2466 2466 This function also replaces alias argument references in the
2467 2467 specified definition by ``_aliasarg(ARGNAME)``.
2468 2468
2469 2469 ``args`` is a list of alias argument names, or None if the alias
2470 2470 is declared as a symbol.
2471 2471
2472 2472 This returns "tree" as parsing result.
2473 2473
2474 2474 >>> args = ['$1', '$2', 'foo']
2475 2475 >>> print prettyformat(_parsealiasdefn('$1 or foo', args))
2476 2476 (or
2477 2477 (func
2478 2478 ('symbol', '_aliasarg')
2479 2479 ('string', '$1'))
2480 2480 (func
2481 2481 ('symbol', '_aliasarg')
2482 2482 ('string', 'foo')))
2483 2483 >>> try:
2484 2484 ... _parsealiasdefn('$1 or $bar', args)
2485 2485 ... except error.ParseError, inst:
2486 2486 ... print parseerrordetail(inst)
2487 2487 at 6: '$' not for alias arguments
2488 2488 >>> args = ['$1', '$10', 'foo']
2489 2489 >>> print prettyformat(_parsealiasdefn('$10 or foobar', args))
2490 2490 (or
2491 2491 (func
2492 2492 ('symbol', '_aliasarg')
2493 2493 ('string', '$10'))
2494 2494 ('symbol', 'foobar'))
2495 2495 >>> print prettyformat(_parsealiasdefn('"$1" or "foo"', args))
2496 2496 (or
2497 2497 ('string', '$1')
2498 2498 ('string', 'foo'))
2499 2499 """
2500 2500 def tokenizedefn(program, lookup=None):
2501 2501 if args:
2502 2502 argset = set(args)
2503 2503 else:
2504 2504 argset = set()
2505 2505
2506 2506 for t, value, pos in _tokenizealias(program, lookup=lookup):
2507 2507 if t == 'symbol':
2508 2508 if value in argset:
2509 2509 # emulate tokenization of "_aliasarg('ARGNAME')":
2510 2510 # "_aliasarg()" is an unknown symbol only used separate
2511 2511 # alias argument placeholders from regular strings.
2512 2512 yield ('symbol', '_aliasarg', pos)
2513 2513 yield ('(', None, pos)
2514 2514 yield ('string', value, pos)
2515 2515 yield (')', None, pos)
2516 2516 continue
2517 2517 elif value.startswith('$'):
2518 2518 raise error.ParseError(_("'$' not for alias arguments"),
2519 2519 pos)
2520 2520 yield (t, value, pos)
2521 2521
2522 2522 p = parser.parser(elements)
2523 2523 tree, pos = p.parse(tokenizedefn(defn))
2524 2524 if pos != len(defn):
2525 2525 raise error.ParseError(_('invalid token'), pos)
2526 2526 return parser.simplifyinfixops(tree, ('or',))
2527 2527
2528 2528 class revsetalias(object):
2529 2529 # whether own `error` information is already shown or not.
2530 2530 # this avoids showing same warning multiple times at each `findaliases`.
2531 2531 warned = False
2532 2532
2533 2533 def __init__(self, name, value):
2534 2534 '''Aliases like:
2535 2535
2536 2536 h = heads(default)
2537 2537 b($1) = ancestors($1) - ancestors(default)
2538 2538 '''
2539 2539 self.name, self.tree, self.args, self.error = _parsealiasdecl(name)
2540 2540 if self.error:
2541 2541 self.error = _('failed to parse the declaration of revset alias'
2542 2542 ' "%s": %s') % (self.name, self.error)
2543 2543 return
2544 2544
2545 2545 try:
2546 2546 self.replacement = _parsealiasdefn(value, self.args)
2547 2547 # Check for placeholder injection
2548 2548 _checkaliasarg(self.replacement, self.args)
2549 2549 except error.ParseError as inst:
2550 2550 self.error = _('failed to parse the definition of revset alias'
2551 2551 ' "%s": %s') % (self.name, parseerrordetail(inst))
2552 2552
2553 2553 def _getalias(aliases, tree):
2554 2554 """If tree looks like an unexpanded alias, return it. Return None
2555 2555 otherwise.
2556 2556 """
2557 2557 if isinstance(tree, tuple) and tree:
2558 2558 if tree[0] == 'symbol' and len(tree) == 2:
2559 2559 name = tree[1]
2560 2560 alias = aliases.get(name)
2561 2561 if alias and alias.args is None and alias.tree == tree:
2562 2562 return alias
2563 2563 if tree[0] == 'func' and len(tree) > 1:
2564 2564 if tree[1][0] == 'symbol' and len(tree[1]) == 2:
2565 2565 name = tree[1][1]
2566 2566 alias = aliases.get(name)
2567 2567 if alias and alias.args is not None and alias.tree == tree[:2]:
2568 2568 return alias
2569 2569 return None
2570 2570
2571 2571 def _expandargs(tree, args):
2572 2572 """Replace _aliasarg instances with the substitution value of the
2573 2573 same name in args, recursively.
2574 2574 """
2575 2575 if not tree or not isinstance(tree, tuple):
2576 2576 return tree
2577 2577 arg = _getaliasarg(tree)
2578 2578 if arg is not None:
2579 2579 return args[arg]
2580 2580 return tuple(_expandargs(t, args) for t in tree)
2581 2581
2582 2582 def _expandaliases(aliases, tree, expanding, cache):
2583 2583 """Expand aliases in tree, recursively.
2584 2584
2585 2585 'aliases' is a dictionary mapping user defined aliases to
2586 2586 revsetalias objects.
2587 2587 """
2588 2588 if not isinstance(tree, tuple):
2589 2589 # Do not expand raw strings
2590 2590 return tree
2591 2591 alias = _getalias(aliases, tree)
2592 2592 if alias is not None:
2593 2593 if alias.error:
2594 2594 raise util.Abort(alias.error)
2595 2595 if alias in expanding:
2596 2596 raise error.ParseError(_('infinite expansion of revset alias "%s" '
2597 2597 'detected') % alias.name)
2598 2598 expanding.append(alias)
2599 2599 if alias.name not in cache:
2600 2600 cache[alias.name] = _expandaliases(aliases, alias.replacement,
2601 2601 expanding, cache)
2602 2602 result = cache[alias.name]
2603 2603 expanding.pop()
2604 2604 if alias.args is not None:
2605 2605 l = getlist(tree[2])
2606 2606 if len(l) != len(alias.args):
2607 2607 raise error.ParseError(
2608 2608 _('invalid number of arguments: %s') % len(l))
2609 2609 l = [_expandaliases(aliases, a, [], cache) for a in l]
2610 2610 result = _expandargs(result, dict(zip(alias.args, l)))
2611 2611 else:
2612 2612 result = tuple(_expandaliases(aliases, t, expanding, cache)
2613 2613 for t in tree)
2614 2614 return result
2615 2615
2616 2616 def findaliases(ui, tree, showwarning=None):
2617 2617 _checkaliasarg(tree)
2618 2618 aliases = {}
2619 2619 for k, v in ui.configitems('revsetalias'):
2620 2620 alias = revsetalias(k, v)
2621 2621 aliases[alias.name] = alias
2622 2622 tree = _expandaliases(aliases, tree, [], {})
2623 2623 if showwarning:
2624 2624 # warn about problematic (but not referred) aliases
2625 2625 for name, alias in sorted(aliases.iteritems()):
2626 2626 if alias.error and not alias.warned:
2627 2627 showwarning(_('warning: %s\n') % (alias.error))
2628 2628 alias.warned = True
2629 2629 return tree
2630 2630
2631 2631 def foldconcat(tree):
2632 2632 """Fold elements to be concatenated by `##`
2633 2633 """
2634 2634 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2635 2635 return tree
2636 2636 if tree[0] == '_concat':
2637 2637 pending = [tree]
2638 2638 l = []
2639 2639 while pending:
2640 2640 e = pending.pop()
2641 2641 if e[0] == '_concat':
2642 2642 pending.extend(reversed(e[1:]))
2643 2643 elif e[0] in ('string', 'symbol'):
2644 2644 l.append(e[1])
2645 2645 else:
2646 2646 msg = _("\"##\" can't concatenate \"%s\" element") % (e[0])
2647 2647 raise error.ParseError(msg)
2648 2648 return ('string', ''.join(l))
2649 2649 else:
2650 2650 return tuple(foldconcat(t) for t in tree)
2651 2651
2652 2652 def parse(spec, lookup=None):
2653 2653 p = parser.parser(elements)
2654 2654 tree, pos = p.parse(tokenize(spec, lookup=lookup))
2655 2655 if pos != len(spec):
2656 2656 raise error.ParseError(_("invalid token"), pos)
2657 2657 return parser.simplifyinfixops(tree, ('or',))
2658 2658
2659 2659 def posttreebuilthook(tree, repo):
2660 2660 # hook for extensions to execute code on the optimized tree
2661 2661 pass
2662 2662
2663 2663 def match(ui, spec, repo=None):
2664 2664 if not spec:
2665 2665 raise error.ParseError(_("empty query"))
2666 2666 lookup = None
2667 2667 if repo:
2668 2668 lookup = repo.__contains__
2669 2669 tree = parse(spec, lookup)
2670 return _makematcher(ui, tree, repo)
2671
2672 def _makematcher(ui, tree, repo):
2670 2673 if ui:
2671 2674 tree = findaliases(ui, tree, showwarning=ui.warn)
2672 2675 tree = foldconcat(tree)
2673 2676 weight, tree = optimize(tree, True)
2674 2677 posttreebuilthook(tree, repo)
2675 2678 def mfunc(repo, subset=None):
2676 2679 if subset is None:
2677 2680 subset = fullreposet(repo)
2678 2681 if util.safehasattr(subset, 'isascending'):
2679 2682 result = getset(repo, subset, tree)
2680 2683 else:
2681 2684 result = getset(repo, baseset(subset), tree)
2682 2685 return result
2683 2686 return mfunc
2684 2687
2685 2688 def formatspec(expr, *args):
2686 2689 '''
2687 2690 This is a convenience function for using revsets internally, and
2688 2691 escapes arguments appropriately. Aliases are intentionally ignored
2689 2692 so that intended expression behavior isn't accidentally subverted.
2690 2693
2691 2694 Supported arguments:
2692 2695
2693 2696 %r = revset expression, parenthesized
2694 2697 %d = int(arg), no quoting
2695 2698 %s = string(arg), escaped and single-quoted
2696 2699 %b = arg.branch(), escaped and single-quoted
2697 2700 %n = hex(arg), single-quoted
2698 2701 %% = a literal '%'
2699 2702
2700 2703 Prefixing the type with 'l' specifies a parenthesized list of that type.
2701 2704
2702 2705 >>> formatspec('%r:: and %lr', '10 or 11', ("this()", "that()"))
2703 2706 '(10 or 11):: and ((this()) or (that()))'
2704 2707 >>> formatspec('%d:: and not %d::', 10, 20)
2705 2708 '10:: and not 20::'
2706 2709 >>> formatspec('%ld or %ld', [], [1])
2707 2710 "_list('') or 1"
2708 2711 >>> formatspec('keyword(%s)', 'foo\\xe9')
2709 2712 "keyword('foo\\\\xe9')"
2710 2713 >>> b = lambda: 'default'
2711 2714 >>> b.branch = b
2712 2715 >>> formatspec('branch(%b)', b)
2713 2716 "branch('default')"
2714 2717 >>> formatspec('root(%ls)', ['a', 'b', 'c', 'd'])
2715 2718 "root(_list('a\\x00b\\x00c\\x00d'))"
2716 2719 '''
2717 2720
2718 2721 def quote(s):
2719 2722 return repr(str(s))
2720 2723
2721 2724 def argtype(c, arg):
2722 2725 if c == 'd':
2723 2726 return str(int(arg))
2724 2727 elif c == 's':
2725 2728 return quote(arg)
2726 2729 elif c == 'r':
2727 2730 parse(arg) # make sure syntax errors are confined
2728 2731 return '(%s)' % arg
2729 2732 elif c == 'n':
2730 2733 return quote(node.hex(arg))
2731 2734 elif c == 'b':
2732 2735 return quote(arg.branch())
2733 2736
2734 2737 def listexp(s, t):
2735 2738 l = len(s)
2736 2739 if l == 0:
2737 2740 return "_list('')"
2738 2741 elif l == 1:
2739 2742 return argtype(t, s[0])
2740 2743 elif t == 'd':
2741 2744 return "_intlist('%s')" % "\0".join(str(int(a)) for a in s)
2742 2745 elif t == 's':
2743 2746 return "_list('%s')" % "\0".join(s)
2744 2747 elif t == 'n':
2745 2748 return "_hexlist('%s')" % "\0".join(node.hex(a) for a in s)
2746 2749 elif t == 'b':
2747 2750 return "_list('%s')" % "\0".join(a.branch() for a in s)
2748 2751
2749 2752 m = l // 2
2750 2753 return '(%s or %s)' % (listexp(s[:m], t), listexp(s[m:], t))
2751 2754
2752 2755 ret = ''
2753 2756 pos = 0
2754 2757 arg = 0
2755 2758 while pos < len(expr):
2756 2759 c = expr[pos]
2757 2760 if c == '%':
2758 2761 pos += 1
2759 2762 d = expr[pos]
2760 2763 if d == '%':
2761 2764 ret += d
2762 2765 elif d in 'dsnbr':
2763 2766 ret += argtype(d, args[arg])
2764 2767 arg += 1
2765 2768 elif d == 'l':
2766 2769 # a list of some type
2767 2770 pos += 1
2768 2771 d = expr[pos]
2769 2772 ret += listexp(list(args[arg]), d)
2770 2773 arg += 1
2771 2774 else:
2772 2775 raise util.Abort('unexpected revspec format character %s' % d)
2773 2776 else:
2774 2777 ret += c
2775 2778 pos += 1
2776 2779
2777 2780 return ret
2778 2781
2779 2782 def prettyformat(tree):
2780 2783 return parser.prettyformat(tree, ('string', 'symbol'))
2781 2784
2782 2785 def depth(tree):
2783 2786 if isinstance(tree, tuple):
2784 2787 return max(map(depth, tree)) + 1
2785 2788 else:
2786 2789 return 0
2787 2790
2788 2791 def funcsused(tree):
2789 2792 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2790 2793 return set()
2791 2794 else:
2792 2795 funcs = set()
2793 2796 for s in tree[1:]:
2794 2797 funcs |= funcsused(s)
2795 2798 if tree[0] == 'func':
2796 2799 funcs.add(tree[1][1])
2797 2800 return funcs
2798 2801
2799 2802 class abstractsmartset(object):
2800 2803
2801 2804 def __nonzero__(self):
2802 2805 """True if the smartset is not empty"""
2803 2806 raise NotImplementedError()
2804 2807
2805 2808 def __contains__(self, rev):
2806 2809 """provide fast membership testing"""
2807 2810 raise NotImplementedError()
2808 2811
2809 2812 def __iter__(self):
2810 2813 """iterate the set in the order it is supposed to be iterated"""
2811 2814 raise NotImplementedError()
2812 2815
2813 2816 # Attributes containing a function to perform a fast iteration in a given
2814 2817 # direction. A smartset can have none, one, or both defined.
2815 2818 #
2816 2819 # Default value is None instead of a function returning None to avoid
2817 2820 # initializing an iterator just for testing if a fast method exists.
2818 2821 fastasc = None
2819 2822 fastdesc = None
2820 2823
2821 2824 def isascending(self):
2822 2825 """True if the set will iterate in ascending order"""
2823 2826 raise NotImplementedError()
2824 2827
2825 2828 def isdescending(self):
2826 2829 """True if the set will iterate in descending order"""
2827 2830 raise NotImplementedError()
2828 2831
2829 2832 def min(self):
2830 2833 """return the minimum element in the set"""
2831 2834 if self.fastasc is not None:
2832 2835 for r in self.fastasc():
2833 2836 return r
2834 2837 raise ValueError('arg is an empty sequence')
2835 2838 return min(self)
2836 2839
2837 2840 def max(self):
2838 2841 """return the maximum element in the set"""
2839 2842 if self.fastdesc is not None:
2840 2843 for r in self.fastdesc():
2841 2844 return r
2842 2845 raise ValueError('arg is an empty sequence')
2843 2846 return max(self)
2844 2847
2845 2848 def first(self):
2846 2849 """return the first element in the set (user iteration perspective)
2847 2850
2848 2851 Return None if the set is empty"""
2849 2852 raise NotImplementedError()
2850 2853
2851 2854 def last(self):
2852 2855 """return the last element in the set (user iteration perspective)
2853 2856
2854 2857 Return None if the set is empty"""
2855 2858 raise NotImplementedError()
2856 2859
2857 2860 def __len__(self):
2858 2861 """return the length of the smartsets
2859 2862
2860 2863 This can be expensive on smartset that could be lazy otherwise."""
2861 2864 raise NotImplementedError()
2862 2865
2863 2866 def reverse(self):
2864 2867 """reverse the expected iteration order"""
2865 2868 raise NotImplementedError()
2866 2869
2867 2870 def sort(self, reverse=True):
2868 2871 """get the set to iterate in an ascending or descending order"""
2869 2872 raise NotImplementedError()
2870 2873
2871 2874 def __and__(self, other):
2872 2875 """Returns a new object with the intersection of the two collections.
2873 2876
2874 2877 This is part of the mandatory API for smartset."""
2875 2878 if isinstance(other, fullreposet):
2876 2879 return self
2877 2880 return self.filter(other.__contains__, cache=False)
2878 2881
2879 2882 def __add__(self, other):
2880 2883 """Returns a new object with the union of the two collections.
2881 2884
2882 2885 This is part of the mandatory API for smartset."""
2883 2886 return addset(self, other)
2884 2887
2885 2888 def __sub__(self, other):
2886 2889 """Returns a new object with the substraction of the two collections.
2887 2890
2888 2891 This is part of the mandatory API for smartset."""
2889 2892 c = other.__contains__
2890 2893 return self.filter(lambda r: not c(r), cache=False)
2891 2894
2892 2895 def filter(self, condition, cache=True):
2893 2896 """Returns this smartset filtered by condition as a new smartset.
2894 2897
2895 2898 `condition` is a callable which takes a revision number and returns a
2896 2899 boolean.
2897 2900
2898 2901 This is part of the mandatory API for smartset."""
2899 2902 # builtin cannot be cached. but do not needs to
2900 2903 if cache and util.safehasattr(condition, 'func_code'):
2901 2904 condition = util.cachefunc(condition)
2902 2905 return filteredset(self, condition)
2903 2906
2904 2907 class baseset(abstractsmartset):
2905 2908 """Basic data structure that represents a revset and contains the basic
2906 2909 operation that it should be able to perform.
2907 2910
2908 2911 Every method in this class should be implemented by any smartset class.
2909 2912 """
2910 2913 def __init__(self, data=()):
2911 2914 if not isinstance(data, list):
2912 2915 data = list(data)
2913 2916 self._list = data
2914 2917 self._ascending = None
2915 2918
2916 2919 @util.propertycache
2917 2920 def _set(self):
2918 2921 return set(self._list)
2919 2922
2920 2923 @util.propertycache
2921 2924 def _asclist(self):
2922 2925 asclist = self._list[:]
2923 2926 asclist.sort()
2924 2927 return asclist
2925 2928
2926 2929 def __iter__(self):
2927 2930 if self._ascending is None:
2928 2931 return iter(self._list)
2929 2932 elif self._ascending:
2930 2933 return iter(self._asclist)
2931 2934 else:
2932 2935 return reversed(self._asclist)
2933 2936
2934 2937 def fastasc(self):
2935 2938 return iter(self._asclist)
2936 2939
2937 2940 def fastdesc(self):
2938 2941 return reversed(self._asclist)
2939 2942
2940 2943 @util.propertycache
2941 2944 def __contains__(self):
2942 2945 return self._set.__contains__
2943 2946
2944 2947 def __nonzero__(self):
2945 2948 return bool(self._list)
2946 2949
2947 2950 def sort(self, reverse=False):
2948 2951 self._ascending = not bool(reverse)
2949 2952
2950 2953 def reverse(self):
2951 2954 if self._ascending is None:
2952 2955 self._list.reverse()
2953 2956 else:
2954 2957 self._ascending = not self._ascending
2955 2958
2956 2959 def __len__(self):
2957 2960 return len(self._list)
2958 2961
2959 2962 def isascending(self):
2960 2963 """Returns True if the collection is ascending order, False if not.
2961 2964
2962 2965 This is part of the mandatory API for smartset."""
2963 2966 if len(self) <= 1:
2964 2967 return True
2965 2968 return self._ascending is not None and self._ascending
2966 2969
2967 2970 def isdescending(self):
2968 2971 """Returns True if the collection is descending order, False if not.
2969 2972
2970 2973 This is part of the mandatory API for smartset."""
2971 2974 if len(self) <= 1:
2972 2975 return True
2973 2976 return self._ascending is not None and not self._ascending
2974 2977
2975 2978 def first(self):
2976 2979 if self:
2977 2980 if self._ascending is None:
2978 2981 return self._list[0]
2979 2982 elif self._ascending:
2980 2983 return self._asclist[0]
2981 2984 else:
2982 2985 return self._asclist[-1]
2983 2986 return None
2984 2987
2985 2988 def last(self):
2986 2989 if self:
2987 2990 if self._ascending is None:
2988 2991 return self._list[-1]
2989 2992 elif self._ascending:
2990 2993 return self._asclist[-1]
2991 2994 else:
2992 2995 return self._asclist[0]
2993 2996 return None
2994 2997
2995 2998 def __repr__(self):
2996 2999 d = {None: '', False: '-', True: '+'}[self._ascending]
2997 3000 return '<%s%s %r>' % (type(self).__name__, d, self._list)
2998 3001
2999 3002 class filteredset(abstractsmartset):
3000 3003 """Duck type for baseset class which iterates lazily over the revisions in
3001 3004 the subset and contains a function which tests for membership in the
3002 3005 revset
3003 3006 """
3004 3007 def __init__(self, subset, condition=lambda x: True):
3005 3008 """
3006 3009 condition: a function that decide whether a revision in the subset
3007 3010 belongs to the revset or not.
3008 3011 """
3009 3012 self._subset = subset
3010 3013 self._condition = condition
3011 3014 self._cache = {}
3012 3015
3013 3016 def __contains__(self, x):
3014 3017 c = self._cache
3015 3018 if x not in c:
3016 3019 v = c[x] = x in self._subset and self._condition(x)
3017 3020 return v
3018 3021 return c[x]
3019 3022
3020 3023 def __iter__(self):
3021 3024 return self._iterfilter(self._subset)
3022 3025
3023 3026 def _iterfilter(self, it):
3024 3027 cond = self._condition
3025 3028 for x in it:
3026 3029 if cond(x):
3027 3030 yield x
3028 3031
3029 3032 @property
3030 3033 def fastasc(self):
3031 3034 it = self._subset.fastasc
3032 3035 if it is None:
3033 3036 return None
3034 3037 return lambda: self._iterfilter(it())
3035 3038
3036 3039 @property
3037 3040 def fastdesc(self):
3038 3041 it = self._subset.fastdesc
3039 3042 if it is None:
3040 3043 return None
3041 3044 return lambda: self._iterfilter(it())
3042 3045
3043 3046 def __nonzero__(self):
3044 3047 for r in self:
3045 3048 return True
3046 3049 return False
3047 3050
3048 3051 def __len__(self):
3049 3052 # Basic implementation to be changed in future patches.
3050 3053 l = baseset([r for r in self])
3051 3054 return len(l)
3052 3055
3053 3056 def sort(self, reverse=False):
3054 3057 self._subset.sort(reverse=reverse)
3055 3058
3056 3059 def reverse(self):
3057 3060 self._subset.reverse()
3058 3061
3059 3062 def isascending(self):
3060 3063 return self._subset.isascending()
3061 3064
3062 3065 def isdescending(self):
3063 3066 return self._subset.isdescending()
3064 3067
3065 3068 def first(self):
3066 3069 for x in self:
3067 3070 return x
3068 3071 return None
3069 3072
3070 3073 def last(self):
3071 3074 it = None
3072 3075 if self.isascending():
3073 3076 it = self.fastdesc
3074 3077 elif self.isdescending():
3075 3078 it = self.fastasc
3076 3079 if it is not None:
3077 3080 for x in it():
3078 3081 return x
3079 3082 return None #empty case
3080 3083 else:
3081 3084 x = None
3082 3085 for x in self:
3083 3086 pass
3084 3087 return x
3085 3088
3086 3089 def __repr__(self):
3087 3090 return '<%s %r>' % (type(self).__name__, self._subset)
3088 3091
3089 3092 # this function will be removed, or merged to addset or orset, when
3090 3093 # - scmutil.revrange() can be rewritten to not combine calculated smartsets
3091 3094 # - or addset can handle more than two sets without balanced tree
3092 3095 def _combinesets(subsets):
3093 3096 """Create balanced tree of addsets representing union of given sets"""
3094 3097 if not subsets:
3095 3098 return baseset()
3096 3099 if len(subsets) == 1:
3097 3100 return subsets[0]
3098 3101 p = len(subsets) // 2
3099 3102 xs = _combinesets(subsets[:p])
3100 3103 ys = _combinesets(subsets[p:])
3101 3104 return addset(xs, ys)
3102 3105
3103 3106 def _iterordered(ascending, iter1, iter2):
3104 3107 """produce an ordered iteration from two iterators with the same order
3105 3108
3106 3109 The ascending is used to indicated the iteration direction.
3107 3110 """
3108 3111 choice = max
3109 3112 if ascending:
3110 3113 choice = min
3111 3114
3112 3115 val1 = None
3113 3116 val2 = None
3114 3117 try:
3115 3118 # Consume both iterators in an ordered way until one is empty
3116 3119 while True:
3117 3120 if val1 is None:
3118 3121 val1 = iter1.next()
3119 3122 if val2 is None:
3120 3123 val2 = iter2.next()
3121 3124 next = choice(val1, val2)
3122 3125 yield next
3123 3126 if val1 == next:
3124 3127 val1 = None
3125 3128 if val2 == next:
3126 3129 val2 = None
3127 3130 except StopIteration:
3128 3131 # Flush any remaining values and consume the other one
3129 3132 it = iter2
3130 3133 if val1 is not None:
3131 3134 yield val1
3132 3135 it = iter1
3133 3136 elif val2 is not None:
3134 3137 # might have been equality and both are empty
3135 3138 yield val2
3136 3139 for val in it:
3137 3140 yield val
3138 3141
3139 3142 class addset(abstractsmartset):
3140 3143 """Represent the addition of two sets
3141 3144
3142 3145 Wrapper structure for lazily adding two structures without losing much
3143 3146 performance on the __contains__ method
3144 3147
3145 3148 If the ascending attribute is set, that means the two structures are
3146 3149 ordered in either an ascending or descending way. Therefore, we can add
3147 3150 them maintaining the order by iterating over both at the same time
3148 3151
3149 3152 >>> xs = baseset([0, 3, 2])
3150 3153 >>> ys = baseset([5, 2, 4])
3151 3154
3152 3155 >>> rs = addset(xs, ys)
3153 3156 >>> bool(rs), 0 in rs, 1 in rs, 5 in rs, rs.first(), rs.last()
3154 3157 (True, True, False, True, 0, 4)
3155 3158 >>> rs = addset(xs, baseset([]))
3156 3159 >>> bool(rs), 0 in rs, 1 in rs, rs.first(), rs.last()
3157 3160 (True, True, False, 0, 2)
3158 3161 >>> rs = addset(baseset([]), baseset([]))
3159 3162 >>> bool(rs), 0 in rs, rs.first(), rs.last()
3160 3163 (False, False, None, None)
3161 3164
3162 3165 iterate unsorted:
3163 3166 >>> rs = addset(xs, ys)
3164 3167 >>> [x for x in rs] # without _genlist
3165 3168 [0, 3, 2, 5, 4]
3166 3169 >>> assert not rs._genlist
3167 3170 >>> len(rs)
3168 3171 5
3169 3172 >>> [x for x in rs] # with _genlist
3170 3173 [0, 3, 2, 5, 4]
3171 3174 >>> assert rs._genlist
3172 3175
3173 3176 iterate ascending:
3174 3177 >>> rs = addset(xs, ys, ascending=True)
3175 3178 >>> [x for x in rs], [x for x in rs.fastasc()] # without _asclist
3176 3179 ([0, 2, 3, 4, 5], [0, 2, 3, 4, 5])
3177 3180 >>> assert not rs._asclist
3178 3181 >>> len(rs)
3179 3182 5
3180 3183 >>> [x for x in rs], [x for x in rs.fastasc()]
3181 3184 ([0, 2, 3, 4, 5], [0, 2, 3, 4, 5])
3182 3185 >>> assert rs._asclist
3183 3186
3184 3187 iterate descending:
3185 3188 >>> rs = addset(xs, ys, ascending=False)
3186 3189 >>> [x for x in rs], [x for x in rs.fastdesc()] # without _asclist
3187 3190 ([5, 4, 3, 2, 0], [5, 4, 3, 2, 0])
3188 3191 >>> assert not rs._asclist
3189 3192 >>> len(rs)
3190 3193 5
3191 3194 >>> [x for x in rs], [x for x in rs.fastdesc()]
3192 3195 ([5, 4, 3, 2, 0], [5, 4, 3, 2, 0])
3193 3196 >>> assert rs._asclist
3194 3197
3195 3198 iterate ascending without fastasc:
3196 3199 >>> rs = addset(xs, generatorset(ys), ascending=True)
3197 3200 >>> assert rs.fastasc is None
3198 3201 >>> [x for x in rs]
3199 3202 [0, 2, 3, 4, 5]
3200 3203
3201 3204 iterate descending without fastdesc:
3202 3205 >>> rs = addset(generatorset(xs), ys, ascending=False)
3203 3206 >>> assert rs.fastdesc is None
3204 3207 >>> [x for x in rs]
3205 3208 [5, 4, 3, 2, 0]
3206 3209 """
3207 3210 def __init__(self, revs1, revs2, ascending=None):
3208 3211 self._r1 = revs1
3209 3212 self._r2 = revs2
3210 3213 self._iter = None
3211 3214 self._ascending = ascending
3212 3215 self._genlist = None
3213 3216 self._asclist = None
3214 3217
3215 3218 def __len__(self):
3216 3219 return len(self._list)
3217 3220
3218 3221 def __nonzero__(self):
3219 3222 return bool(self._r1) or bool(self._r2)
3220 3223
3221 3224 @util.propertycache
3222 3225 def _list(self):
3223 3226 if not self._genlist:
3224 3227 self._genlist = baseset(iter(self))
3225 3228 return self._genlist
3226 3229
3227 3230 def __iter__(self):
3228 3231 """Iterate over both collections without repeating elements
3229 3232
3230 3233 If the ascending attribute is not set, iterate over the first one and
3231 3234 then over the second one checking for membership on the first one so we
3232 3235 dont yield any duplicates.
3233 3236
3234 3237 If the ascending attribute is set, iterate over both collections at the
3235 3238 same time, yielding only one value at a time in the given order.
3236 3239 """
3237 3240 if self._ascending is None:
3238 3241 if self._genlist:
3239 3242 return iter(self._genlist)
3240 3243 def arbitraryordergen():
3241 3244 for r in self._r1:
3242 3245 yield r
3243 3246 inr1 = self._r1.__contains__
3244 3247 for r in self._r2:
3245 3248 if not inr1(r):
3246 3249 yield r
3247 3250 return arbitraryordergen()
3248 3251 # try to use our own fast iterator if it exists
3249 3252 self._trysetasclist()
3250 3253 if self._ascending:
3251 3254 attr = 'fastasc'
3252 3255 else:
3253 3256 attr = 'fastdesc'
3254 3257 it = getattr(self, attr)
3255 3258 if it is not None:
3256 3259 return it()
3257 3260 # maybe half of the component supports fast
3258 3261 # get iterator for _r1
3259 3262 iter1 = getattr(self._r1, attr)
3260 3263 if iter1 is None:
3261 3264 # let's avoid side effect (not sure it matters)
3262 3265 iter1 = iter(sorted(self._r1, reverse=not self._ascending))
3263 3266 else:
3264 3267 iter1 = iter1()
3265 3268 # get iterator for _r2
3266 3269 iter2 = getattr(self._r2, attr)
3267 3270 if iter2 is None:
3268 3271 # let's avoid side effect (not sure it matters)
3269 3272 iter2 = iter(sorted(self._r2, reverse=not self._ascending))
3270 3273 else:
3271 3274 iter2 = iter2()
3272 3275 return _iterordered(self._ascending, iter1, iter2)
3273 3276
3274 3277 def _trysetasclist(self):
3275 3278 """populate the _asclist attribute if possible and necessary"""
3276 3279 if self._genlist is not None and self._asclist is None:
3277 3280 self._asclist = sorted(self._genlist)
3278 3281
3279 3282 @property
3280 3283 def fastasc(self):
3281 3284 self._trysetasclist()
3282 3285 if self._asclist is not None:
3283 3286 return self._asclist.__iter__
3284 3287 iter1 = self._r1.fastasc
3285 3288 iter2 = self._r2.fastasc
3286 3289 if None in (iter1, iter2):
3287 3290 return None
3288 3291 return lambda: _iterordered(True, iter1(), iter2())
3289 3292
3290 3293 @property
3291 3294 def fastdesc(self):
3292 3295 self._trysetasclist()
3293 3296 if self._asclist is not None:
3294 3297 return self._asclist.__reversed__
3295 3298 iter1 = self._r1.fastdesc
3296 3299 iter2 = self._r2.fastdesc
3297 3300 if None in (iter1, iter2):
3298 3301 return None
3299 3302 return lambda: _iterordered(False, iter1(), iter2())
3300 3303
3301 3304 def __contains__(self, x):
3302 3305 return x in self._r1 or x in self._r2
3303 3306
3304 3307 def sort(self, reverse=False):
3305 3308 """Sort the added set
3306 3309
3307 3310 For this we use the cached list with all the generated values and if we
3308 3311 know they are ascending or descending we can sort them in a smart way.
3309 3312 """
3310 3313 self._ascending = not reverse
3311 3314
3312 3315 def isascending(self):
3313 3316 return self._ascending is not None and self._ascending
3314 3317
3315 3318 def isdescending(self):
3316 3319 return self._ascending is not None and not self._ascending
3317 3320
3318 3321 def reverse(self):
3319 3322 if self._ascending is None:
3320 3323 self._list.reverse()
3321 3324 else:
3322 3325 self._ascending = not self._ascending
3323 3326
3324 3327 def first(self):
3325 3328 for x in self:
3326 3329 return x
3327 3330 return None
3328 3331
3329 3332 def last(self):
3330 3333 self.reverse()
3331 3334 val = self.first()
3332 3335 self.reverse()
3333 3336 return val
3334 3337
3335 3338 def __repr__(self):
3336 3339 d = {None: '', False: '-', True: '+'}[self._ascending]
3337 3340 return '<%s%s %r, %r>' % (type(self).__name__, d, self._r1, self._r2)
3338 3341
3339 3342 class generatorset(abstractsmartset):
3340 3343 """Wrap a generator for lazy iteration
3341 3344
3342 3345 Wrapper structure for generators that provides lazy membership and can
3343 3346 be iterated more than once.
3344 3347 When asked for membership it generates values until either it finds the
3345 3348 requested one or has gone through all the elements in the generator
3346 3349 """
3347 3350 def __init__(self, gen, iterasc=None):
3348 3351 """
3349 3352 gen: a generator producing the values for the generatorset.
3350 3353 """
3351 3354 self._gen = gen
3352 3355 self._asclist = None
3353 3356 self._cache = {}
3354 3357 self._genlist = []
3355 3358 self._finished = False
3356 3359 self._ascending = True
3357 3360 if iterasc is not None:
3358 3361 if iterasc:
3359 3362 self.fastasc = self._iterator
3360 3363 self.__contains__ = self._asccontains
3361 3364 else:
3362 3365 self.fastdesc = self._iterator
3363 3366 self.__contains__ = self._desccontains
3364 3367
3365 3368 def __nonzero__(self):
3366 3369 # Do not use 'for r in self' because it will enforce the iteration
3367 3370 # order (default ascending), possibly unrolling a whole descending
3368 3371 # iterator.
3369 3372 if self._genlist:
3370 3373 return True
3371 3374 for r in self._consumegen():
3372 3375 return True
3373 3376 return False
3374 3377
3375 3378 def __contains__(self, x):
3376 3379 if x in self._cache:
3377 3380 return self._cache[x]
3378 3381
3379 3382 # Use new values only, as existing values would be cached.
3380 3383 for l in self._consumegen():
3381 3384 if l == x:
3382 3385 return True
3383 3386
3384 3387 self._cache[x] = False
3385 3388 return False
3386 3389
3387 3390 def _asccontains(self, x):
3388 3391 """version of contains optimised for ascending generator"""
3389 3392 if x in self._cache:
3390 3393 return self._cache[x]
3391 3394
3392 3395 # Use new values only, as existing values would be cached.
3393 3396 for l in self._consumegen():
3394 3397 if l == x:
3395 3398 return True
3396 3399 if l > x:
3397 3400 break
3398 3401
3399 3402 self._cache[x] = False
3400 3403 return False
3401 3404
3402 3405 def _desccontains(self, x):
3403 3406 """version of contains optimised for descending generator"""
3404 3407 if x in self._cache:
3405 3408 return self._cache[x]
3406 3409
3407 3410 # Use new values only, as existing values would be cached.
3408 3411 for l in self._consumegen():
3409 3412 if l == x:
3410 3413 return True
3411 3414 if l < x:
3412 3415 break
3413 3416
3414 3417 self._cache[x] = False
3415 3418 return False
3416 3419
3417 3420 def __iter__(self):
3418 3421 if self._ascending:
3419 3422 it = self.fastasc
3420 3423 else:
3421 3424 it = self.fastdesc
3422 3425 if it is not None:
3423 3426 return it()
3424 3427 # we need to consume the iterator
3425 3428 for x in self._consumegen():
3426 3429 pass
3427 3430 # recall the same code
3428 3431 return iter(self)
3429 3432
3430 3433 def _iterator(self):
3431 3434 if self._finished:
3432 3435 return iter(self._genlist)
3433 3436
3434 3437 # We have to use this complex iteration strategy to allow multiple
3435 3438 # iterations at the same time. We need to be able to catch revision
3436 3439 # removed from _consumegen and added to genlist in another instance.
3437 3440 #
3438 3441 # Getting rid of it would provide an about 15% speed up on this
3439 3442 # iteration.
3440 3443 genlist = self._genlist
3441 3444 nextrev = self._consumegen().next
3442 3445 _len = len # cache global lookup
3443 3446 def gen():
3444 3447 i = 0
3445 3448 while True:
3446 3449 if i < _len(genlist):
3447 3450 yield genlist[i]
3448 3451 else:
3449 3452 yield nextrev()
3450 3453 i += 1
3451 3454 return gen()
3452 3455
3453 3456 def _consumegen(self):
3454 3457 cache = self._cache
3455 3458 genlist = self._genlist.append
3456 3459 for item in self._gen:
3457 3460 cache[item] = True
3458 3461 genlist(item)
3459 3462 yield item
3460 3463 if not self._finished:
3461 3464 self._finished = True
3462 3465 asc = self._genlist[:]
3463 3466 asc.sort()
3464 3467 self._asclist = asc
3465 3468 self.fastasc = asc.__iter__
3466 3469 self.fastdesc = asc.__reversed__
3467 3470
3468 3471 def __len__(self):
3469 3472 for x in self._consumegen():
3470 3473 pass
3471 3474 return len(self._genlist)
3472 3475
3473 3476 def sort(self, reverse=False):
3474 3477 self._ascending = not reverse
3475 3478
3476 3479 def reverse(self):
3477 3480 self._ascending = not self._ascending
3478 3481
3479 3482 def isascending(self):
3480 3483 return self._ascending
3481 3484
3482 3485 def isdescending(self):
3483 3486 return not self._ascending
3484 3487
3485 3488 def first(self):
3486 3489 if self._ascending:
3487 3490 it = self.fastasc
3488 3491 else:
3489 3492 it = self.fastdesc
3490 3493 if it is None:
3491 3494 # we need to consume all and try again
3492 3495 for x in self._consumegen():
3493 3496 pass
3494 3497 return self.first()
3495 3498 return next(it(), None)
3496 3499
3497 3500 def last(self):
3498 3501 if self._ascending:
3499 3502 it = self.fastdesc
3500 3503 else:
3501 3504 it = self.fastasc
3502 3505 if it is None:
3503 3506 # we need to consume all and try again
3504 3507 for x in self._consumegen():
3505 3508 pass
3506 3509 return self.first()
3507 3510 return next(it(), None)
3508 3511
3509 3512 def __repr__(self):
3510 3513 d = {False: '-', True: '+'}[self._ascending]
3511 3514 return '<%s%s>' % (type(self).__name__, d)
3512 3515
3513 3516 class spanset(abstractsmartset):
3514 3517 """Duck type for baseset class which represents a range of revisions and
3515 3518 can work lazily and without having all the range in memory
3516 3519
3517 3520 Note that spanset(x, y) behave almost like xrange(x, y) except for two
3518 3521 notable points:
3519 3522 - when x < y it will be automatically descending,
3520 3523 - revision filtered with this repoview will be skipped.
3521 3524
3522 3525 """
3523 3526 def __init__(self, repo, start=0, end=None):
3524 3527 """
3525 3528 start: first revision included the set
3526 3529 (default to 0)
3527 3530 end: first revision excluded (last+1)
3528 3531 (default to len(repo)
3529 3532
3530 3533 Spanset will be descending if `end` < `start`.
3531 3534 """
3532 3535 if end is None:
3533 3536 end = len(repo)
3534 3537 self._ascending = start <= end
3535 3538 if not self._ascending:
3536 3539 start, end = end + 1, start +1
3537 3540 self._start = start
3538 3541 self._end = end
3539 3542 self._hiddenrevs = repo.changelog.filteredrevs
3540 3543
3541 3544 def sort(self, reverse=False):
3542 3545 self._ascending = not reverse
3543 3546
3544 3547 def reverse(self):
3545 3548 self._ascending = not self._ascending
3546 3549
3547 3550 def _iterfilter(self, iterrange):
3548 3551 s = self._hiddenrevs
3549 3552 for r in iterrange:
3550 3553 if r not in s:
3551 3554 yield r
3552 3555
3553 3556 def __iter__(self):
3554 3557 if self._ascending:
3555 3558 return self.fastasc()
3556 3559 else:
3557 3560 return self.fastdesc()
3558 3561
3559 3562 def fastasc(self):
3560 3563 iterrange = xrange(self._start, self._end)
3561 3564 if self._hiddenrevs:
3562 3565 return self._iterfilter(iterrange)
3563 3566 return iter(iterrange)
3564 3567
3565 3568 def fastdesc(self):
3566 3569 iterrange = xrange(self._end - 1, self._start - 1, -1)
3567 3570 if self._hiddenrevs:
3568 3571 return self._iterfilter(iterrange)
3569 3572 return iter(iterrange)
3570 3573
3571 3574 def __contains__(self, rev):
3572 3575 hidden = self._hiddenrevs
3573 3576 return ((self._start <= rev < self._end)
3574 3577 and not (hidden and rev in hidden))
3575 3578
3576 3579 def __nonzero__(self):
3577 3580 for r in self:
3578 3581 return True
3579 3582 return False
3580 3583
3581 3584 def __len__(self):
3582 3585 if not self._hiddenrevs:
3583 3586 return abs(self._end - self._start)
3584 3587 else:
3585 3588 count = 0
3586 3589 start = self._start
3587 3590 end = self._end
3588 3591 for rev in self._hiddenrevs:
3589 3592 if (end < rev <= start) or (start <= rev < end):
3590 3593 count += 1
3591 3594 return abs(self._end - self._start) - count
3592 3595
3593 3596 def isascending(self):
3594 3597 return self._ascending
3595 3598
3596 3599 def isdescending(self):
3597 3600 return not self._ascending
3598 3601
3599 3602 def first(self):
3600 3603 if self._ascending:
3601 3604 it = self.fastasc
3602 3605 else:
3603 3606 it = self.fastdesc
3604 3607 for x in it():
3605 3608 return x
3606 3609 return None
3607 3610
3608 3611 def last(self):
3609 3612 if self._ascending:
3610 3613 it = self.fastdesc
3611 3614 else:
3612 3615 it = self.fastasc
3613 3616 for x in it():
3614 3617 return x
3615 3618 return None
3616 3619
3617 3620 def __repr__(self):
3618 3621 d = {False: '-', True: '+'}[self._ascending]
3619 3622 return '<%s%s %d:%d>' % (type(self).__name__, d,
3620 3623 self._start, self._end - 1)
3621 3624
3622 3625 class fullreposet(spanset):
3623 3626 """a set containing all revisions in the repo
3624 3627
3625 3628 This class exists to host special optimization and magic to handle virtual
3626 3629 revisions such as "null".
3627 3630 """
3628 3631
3629 3632 def __init__(self, repo):
3630 3633 super(fullreposet, self).__init__(repo)
3631 3634
3632 3635 def __and__(self, other):
3633 3636 """As self contains the whole repo, all of the other set should also be
3634 3637 in self. Therefore `self & other = other`.
3635 3638
3636 3639 This boldly assumes the other contains valid revs only.
3637 3640 """
3638 3641 # other not a smartset, make is so
3639 3642 if not util.safehasattr(other, 'isascending'):
3640 3643 # filter out hidden revision
3641 3644 # (this boldly assumes all smartset are pure)
3642 3645 #
3643 3646 # `other` was used with "&", let's assume this is a set like
3644 3647 # object.
3645 3648 other = baseset(other - self._hiddenrevs)
3646 3649
3647 3650 # XXX As fullreposet is also used as bootstrap, this is wrong.
3648 3651 #
3649 3652 # With a giveme312() revset returning [3,1,2], this makes
3650 3653 # 'hg log -r "giveme312()"' -> 1, 2, 3 (wrong)
3651 3654 # We cannot just drop it because other usage still need to sort it:
3652 3655 # 'hg log -r "all() and giveme312()"' -> 1, 2, 3 (right)
3653 3656 #
3654 3657 # There is also some faulty revset implementations that rely on it
3655 3658 # (eg: children as of its state in e8075329c5fb)
3656 3659 #
3657 3660 # When we fix the two points above we can move this into the if clause
3658 3661 other.sort(reverse=self.isdescending())
3659 3662 return other
3660 3663
3661 3664 def prettyformatset(revs):
3662 3665 lines = []
3663 3666 rs = repr(revs)
3664 3667 p = 0
3665 3668 while p < len(rs):
3666 3669 q = rs.find('<', p + 1)
3667 3670 if q < 0:
3668 3671 q = len(rs)
3669 3672 l = rs.count('<', 0, p) - rs.count('>', 0, p)
3670 3673 assert l >= 0
3671 3674 lines.append((l, rs[p:q].rstrip()))
3672 3675 p = q
3673 3676 return '\n'.join(' ' * l + s for l, s in lines)
3674 3677
3675 3678 # tell hggettext to extract docstrings from these functions:
3676 3679 i18nfunctions = symbols.values()
General Comments 0
You need to be logged in to leave comments. Login now