##// END OF EJS Templates
revset: rework 'filteredset.last'...
Pierre-Yves David -
r25648:9b9877d2 default
parent child Browse files
Show More
@@ -1,3632 +1,3635 b''
1 1 # revset.py - revision set queries for mercurial
2 2 #
3 3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 import re
9 9 import parser, util, error, hbisect, phases
10 10 import node
11 11 import heapq
12 12 import match as matchmod
13 13 from i18n import _
14 14 import encoding
15 15 import obsolete as obsmod
16 16 import pathutil
17 17 import repoview
18 18
19 19 def _revancestors(repo, revs, followfirst):
20 20 """Like revlog.ancestors(), but supports followfirst."""
21 21 if followfirst:
22 22 cut = 1
23 23 else:
24 24 cut = None
25 25 cl = repo.changelog
26 26
27 27 def iterate():
28 28 revs.sort(reverse=True)
29 29 irevs = iter(revs)
30 30 h = []
31 31
32 32 inputrev = next(irevs, None)
33 33 if inputrev is not None:
34 34 heapq.heappush(h, -inputrev)
35 35
36 36 seen = set()
37 37 while h:
38 38 current = -heapq.heappop(h)
39 39 if current == inputrev:
40 40 inputrev = next(irevs, None)
41 41 if inputrev is not None:
42 42 heapq.heappush(h, -inputrev)
43 43 if current not in seen:
44 44 seen.add(current)
45 45 yield current
46 46 for parent in cl.parentrevs(current)[:cut]:
47 47 if parent != node.nullrev:
48 48 heapq.heappush(h, -parent)
49 49
50 50 return generatorset(iterate(), iterasc=False)
51 51
52 52 def _revdescendants(repo, revs, followfirst):
53 53 """Like revlog.descendants() but supports followfirst."""
54 54 if followfirst:
55 55 cut = 1
56 56 else:
57 57 cut = None
58 58
59 59 def iterate():
60 60 cl = repo.changelog
61 61 # XXX this should be 'parentset.min()' assuming 'parentset' is a
62 62 # smartset (and if it is not, it should.)
63 63 first = min(revs)
64 64 nullrev = node.nullrev
65 65 if first == nullrev:
66 66 # Are there nodes with a null first parent and a non-null
67 67 # second one? Maybe. Do we care? Probably not.
68 68 for i in cl:
69 69 yield i
70 70 else:
71 71 seen = set(revs)
72 72 for i in cl.revs(first + 1):
73 73 for x in cl.parentrevs(i)[:cut]:
74 74 if x != nullrev and x in seen:
75 75 seen.add(i)
76 76 yield i
77 77 break
78 78
79 79 return generatorset(iterate(), iterasc=True)
80 80
81 81 def _revsbetween(repo, roots, heads):
82 82 """Return all paths between roots and heads, inclusive of both endpoint
83 83 sets."""
84 84 if not roots:
85 85 return baseset()
86 86 parentrevs = repo.changelog.parentrevs
87 87 visit = list(heads)
88 88 reachable = set()
89 89 seen = {}
90 90 # XXX this should be 'parentset.min()' assuming 'parentset' is a smartset
91 91 # (and if it is not, it should.)
92 92 minroot = min(roots)
93 93 roots = set(roots)
94 94 # prefetch all the things! (because python is slow)
95 95 reached = reachable.add
96 96 dovisit = visit.append
97 97 nextvisit = visit.pop
98 98 # open-code the post-order traversal due to the tiny size of
99 99 # sys.getrecursionlimit()
100 100 while visit:
101 101 rev = nextvisit()
102 102 if rev in roots:
103 103 reached(rev)
104 104 parents = parentrevs(rev)
105 105 seen[rev] = parents
106 106 for parent in parents:
107 107 if parent >= minroot and parent not in seen:
108 108 dovisit(parent)
109 109 if not reachable:
110 110 return baseset()
111 111 for rev in sorted(seen):
112 112 for parent in seen[rev]:
113 113 if parent in reachable:
114 114 reached(rev)
115 115 return baseset(sorted(reachable))
116 116
117 117 elements = {
118 118 "(": (21, ("group", 1, ")"), ("func", 1, ")")),
119 119 "##": (20, None, ("_concat", 20)),
120 120 "~": (18, None, ("ancestor", 18)),
121 121 "^": (18, None, ("parent", 18), ("parentpost", 18)),
122 122 "-": (5, ("negate", 19), ("minus", 5)),
123 123 "::": (17, ("dagrangepre", 17), ("dagrange", 17),
124 124 ("dagrangepost", 17)),
125 125 "..": (17, ("dagrangepre", 17), ("dagrange", 17),
126 126 ("dagrangepost", 17)),
127 127 ":": (15, ("rangepre", 15), ("range", 15), ("rangepost", 15)),
128 128 "not": (10, ("not", 10)),
129 129 "!": (10, ("not", 10)),
130 130 "and": (5, None, ("and", 5)),
131 131 "&": (5, None, ("and", 5)),
132 132 "%": (5, None, ("only", 5), ("onlypost", 5)),
133 133 "or": (4, None, ("or", 4)),
134 134 "|": (4, None, ("or", 4)),
135 135 "+": (4, None, ("or", 4)),
136 136 ",": (2, None, ("list", 2)),
137 137 ")": (0, None, None),
138 138 "symbol": (0, ("symbol",), None),
139 139 "string": (0, ("string",), None),
140 140 "end": (0, None, None),
141 141 }
142 142
143 143 keywords = set(['and', 'or', 'not'])
144 144
145 145 # default set of valid characters for the initial letter of symbols
146 146 _syminitletters = set(c for c in [chr(i) for i in xrange(256)]
147 147 if c.isalnum() or c in '._@' or ord(c) > 127)
148 148
149 149 # default set of valid characters for non-initial letters of symbols
150 150 _symletters = set(c for c in [chr(i) for i in xrange(256)]
151 151 if c.isalnum() or c in '-._/@' or ord(c) > 127)
152 152
153 153 def tokenize(program, lookup=None, syminitletters=None, symletters=None):
154 154 '''
155 155 Parse a revset statement into a stream of tokens
156 156
157 157 ``syminitletters`` is the set of valid characters for the initial
158 158 letter of symbols.
159 159
160 160 By default, character ``c`` is recognized as valid for initial
161 161 letter of symbols, if ``c.isalnum() or c in '._@' or ord(c) > 127``.
162 162
163 163 ``symletters`` is the set of valid characters for non-initial
164 164 letters of symbols.
165 165
166 166 By default, character ``c`` is recognized as valid for non-initial
167 167 letters of symbols, if ``c.isalnum() or c in '-._/@' or ord(c) > 127``.
168 168
169 169 Check that @ is a valid unquoted token character (issue3686):
170 170 >>> list(tokenize("@::"))
171 171 [('symbol', '@', 0), ('::', None, 1), ('end', None, 3)]
172 172
173 173 '''
174 174 if syminitletters is None:
175 175 syminitletters = _syminitletters
176 176 if symletters is None:
177 177 symletters = _symletters
178 178
179 179 pos, l = 0, len(program)
180 180 while pos < l:
181 181 c = program[pos]
182 182 if c.isspace(): # skip inter-token whitespace
183 183 pass
184 184 elif c == ':' and program[pos:pos + 2] == '::': # look ahead carefully
185 185 yield ('::', None, pos)
186 186 pos += 1 # skip ahead
187 187 elif c == '.' and program[pos:pos + 2] == '..': # look ahead carefully
188 188 yield ('..', None, pos)
189 189 pos += 1 # skip ahead
190 190 elif c == '#' and program[pos:pos + 2] == '##': # look ahead carefully
191 191 yield ('##', None, pos)
192 192 pos += 1 # skip ahead
193 193 elif c in "():,-|&+!~^%": # handle simple operators
194 194 yield (c, None, pos)
195 195 elif (c in '"\'' or c == 'r' and
196 196 program[pos:pos + 2] in ("r'", 'r"')): # handle quoted strings
197 197 if c == 'r':
198 198 pos += 1
199 199 c = program[pos]
200 200 decode = lambda x: x
201 201 else:
202 202 decode = lambda x: x.decode('string-escape')
203 203 pos += 1
204 204 s = pos
205 205 while pos < l: # find closing quote
206 206 d = program[pos]
207 207 if d == '\\': # skip over escaped characters
208 208 pos += 2
209 209 continue
210 210 if d == c:
211 211 yield ('string', decode(program[s:pos]), s)
212 212 break
213 213 pos += 1
214 214 else:
215 215 raise error.ParseError(_("unterminated string"), s)
216 216 # gather up a symbol/keyword
217 217 elif c in syminitletters:
218 218 s = pos
219 219 pos += 1
220 220 while pos < l: # find end of symbol
221 221 d = program[pos]
222 222 if d not in symletters:
223 223 break
224 224 if d == '.' and program[pos - 1] == '.': # special case for ..
225 225 pos -= 1
226 226 break
227 227 pos += 1
228 228 sym = program[s:pos]
229 229 if sym in keywords: # operator keywords
230 230 yield (sym, None, s)
231 231 elif '-' in sym:
232 232 # some jerk gave us foo-bar-baz, try to check if it's a symbol
233 233 if lookup and lookup(sym):
234 234 # looks like a real symbol
235 235 yield ('symbol', sym, s)
236 236 else:
237 237 # looks like an expression
238 238 parts = sym.split('-')
239 239 for p in parts[:-1]:
240 240 if p: # possible consecutive -
241 241 yield ('symbol', p, s)
242 242 s += len(p)
243 243 yield ('-', None, pos)
244 244 s += 1
245 245 if parts[-1]: # possible trailing -
246 246 yield ('symbol', parts[-1], s)
247 247 else:
248 248 yield ('symbol', sym, s)
249 249 pos -= 1
250 250 else:
251 251 raise error.ParseError(_("syntax error in revset '%s'") %
252 252 program, pos)
253 253 pos += 1
254 254 yield ('end', None, pos)
255 255
256 256 def parseerrordetail(inst):
257 257 """Compose error message from specified ParseError object
258 258 """
259 259 if len(inst.args) > 1:
260 260 return _('at %s: %s') % (inst.args[1], inst.args[0])
261 261 else:
262 262 return inst.args[0]
263 263
264 264 # helpers
265 265
266 266 def getstring(x, err):
267 267 if x and (x[0] == 'string' or x[0] == 'symbol'):
268 268 return x[1]
269 269 raise error.ParseError(err)
270 270
271 271 def getlist(x):
272 272 if not x:
273 273 return []
274 274 if x[0] == 'list':
275 275 return getlist(x[1]) + [x[2]]
276 276 return [x]
277 277
278 278 def getargs(x, min, max, err):
279 279 l = getlist(x)
280 280 if len(l) < min or (max >= 0 and len(l) > max):
281 281 raise error.ParseError(err)
282 282 return l
283 283
284 284 def isvalidsymbol(tree):
285 285 """Examine whether specified ``tree`` is valid ``symbol`` or not
286 286 """
287 287 return tree[0] == 'symbol' and len(tree) > 1
288 288
289 289 def getsymbol(tree):
290 290 """Get symbol name from valid ``symbol`` in ``tree``
291 291
292 292 This assumes that ``tree`` is already examined by ``isvalidsymbol``.
293 293 """
294 294 return tree[1]
295 295
296 296 def isvalidfunc(tree):
297 297 """Examine whether specified ``tree`` is valid ``func`` or not
298 298 """
299 299 return tree[0] == 'func' and len(tree) > 1 and isvalidsymbol(tree[1])
300 300
301 301 def getfuncname(tree):
302 302 """Get function name from valid ``func`` in ``tree``
303 303
304 304 This assumes that ``tree`` is already examined by ``isvalidfunc``.
305 305 """
306 306 return getsymbol(tree[1])
307 307
308 308 def getfuncargs(tree):
309 309 """Get list of function arguments from valid ``func`` in ``tree``
310 310
311 311 This assumes that ``tree`` is already examined by ``isvalidfunc``.
312 312 """
313 313 if len(tree) > 2:
314 314 return getlist(tree[2])
315 315 else:
316 316 return []
317 317
318 318 def getset(repo, subset, x):
319 319 if not x:
320 320 raise error.ParseError(_("missing argument"))
321 321 s = methods[x[0]](repo, subset, *x[1:])
322 322 if util.safehasattr(s, 'isascending'):
323 323 return s
324 324 if (repo.ui.configbool('devel', 'all-warnings')
325 325 or repo.ui.configbool('devel', 'old-revset')):
326 326 # else case should not happen, because all non-func are internal,
327 327 # ignoring for now.
328 328 if x[0] == 'func' and x[1][0] == 'symbol' and x[1][1] in symbols:
329 329 repo.ui.develwarn('revset "%s" use list instead of smartset, '
330 330 '(upgrade your code)' % x[1][1])
331 331 return baseset(s)
332 332
333 333 def _getrevsource(repo, r):
334 334 extra = repo[r].extra()
335 335 for label in ('source', 'transplant_source', 'rebase_source'):
336 336 if label in extra:
337 337 try:
338 338 return repo[extra[label]].rev()
339 339 except error.RepoLookupError:
340 340 pass
341 341 return None
342 342
343 343 # operator methods
344 344
345 345 def stringset(repo, subset, x):
346 346 x = repo[x].rev()
347 347 if (x in subset
348 348 or x == node.nullrev and isinstance(subset, fullreposet)):
349 349 return baseset([x])
350 350 return baseset()
351 351
352 352 def rangeset(repo, subset, x, y):
353 353 m = getset(repo, fullreposet(repo), x)
354 354 n = getset(repo, fullreposet(repo), y)
355 355
356 356 if not m or not n:
357 357 return baseset()
358 358 m, n = m.first(), n.last()
359 359
360 360 if m < n:
361 361 r = spanset(repo, m, n + 1)
362 362 else:
363 363 r = spanset(repo, m, n - 1)
364 364 # XXX We should combine with subset first: 'subset & baseset(...)'. This is
365 365 # necessary to ensure we preserve the order in subset.
366 366 #
367 367 # This has performance implication, carrying the sorting over when possible
368 368 # would be more efficient.
369 369 return r & subset
370 370
371 371 def dagrange(repo, subset, x, y):
372 372 r = fullreposet(repo)
373 373 xs = _revsbetween(repo, getset(repo, r, x), getset(repo, r, y))
374 374 # XXX We should combine with subset first: 'subset & baseset(...)'. This is
375 375 # necessary to ensure we preserve the order in subset.
376 376 return xs & subset
377 377
378 378 def andset(repo, subset, x, y):
379 379 return getset(repo, getset(repo, subset, x), y)
380 380
381 381 def orset(repo, subset, *xs):
382 382 rs = [getset(repo, subset, x) for x in xs]
383 383 return _combinesets(rs)
384 384
385 385 def notset(repo, subset, x):
386 386 return subset - getset(repo, subset, x)
387 387
388 388 def listset(repo, subset, a, b):
389 389 raise error.ParseError(_("can't use a list in this context"))
390 390
391 391 def func(repo, subset, a, b):
392 392 if a[0] == 'symbol' and a[1] in symbols:
393 393 return symbols[a[1]](repo, subset, b)
394 394
395 395 keep = lambda fn: getattr(fn, '__doc__', None) is not None
396 396
397 397 syms = [s for (s, fn) in symbols.items() if keep(fn)]
398 398 raise error.UnknownIdentifier(a[1], syms)
399 399
400 400 # functions
401 401
402 402 def adds(repo, subset, x):
403 403 """``adds(pattern)``
404 404 Changesets that add a file matching pattern.
405 405
406 406 The pattern without explicit kind like ``glob:`` is expected to be
407 407 relative to the current directory and match against a file or a
408 408 directory.
409 409 """
410 410 # i18n: "adds" is a keyword
411 411 pat = getstring(x, _("adds requires a pattern"))
412 412 return checkstatus(repo, subset, pat, 1)
413 413
414 414 def ancestor(repo, subset, x):
415 415 """``ancestor(*changeset)``
416 416 A greatest common ancestor of the changesets.
417 417
418 418 Accepts 0 or more changesets.
419 419 Will return empty list when passed no args.
420 420 Greatest common ancestor of a single changeset is that changeset.
421 421 """
422 422 # i18n: "ancestor" is a keyword
423 423 l = getlist(x)
424 424 rl = fullreposet(repo)
425 425 anc = None
426 426
427 427 # (getset(repo, rl, i) for i in l) generates a list of lists
428 428 for revs in (getset(repo, rl, i) for i in l):
429 429 for r in revs:
430 430 if anc is None:
431 431 anc = repo[r]
432 432 else:
433 433 anc = anc.ancestor(repo[r])
434 434
435 435 if anc is not None and anc.rev() in subset:
436 436 return baseset([anc.rev()])
437 437 return baseset()
438 438
439 439 def _ancestors(repo, subset, x, followfirst=False):
440 440 heads = getset(repo, fullreposet(repo), x)
441 441 if not heads:
442 442 return baseset()
443 443 s = _revancestors(repo, heads, followfirst)
444 444 return subset & s
445 445
446 446 def ancestors(repo, subset, x):
447 447 """``ancestors(set)``
448 448 Changesets that are ancestors of a changeset in set.
449 449 """
450 450 return _ancestors(repo, subset, x)
451 451
452 452 def _firstancestors(repo, subset, x):
453 453 # ``_firstancestors(set)``
454 454 # Like ``ancestors(set)`` but follows only the first parents.
455 455 return _ancestors(repo, subset, x, followfirst=True)
456 456
457 457 def ancestorspec(repo, subset, x, n):
458 458 """``set~n``
459 459 Changesets that are the Nth ancestor (first parents only) of a changeset
460 460 in set.
461 461 """
462 462 try:
463 463 n = int(n[1])
464 464 except (TypeError, ValueError):
465 465 raise error.ParseError(_("~ expects a number"))
466 466 ps = set()
467 467 cl = repo.changelog
468 468 for r in getset(repo, fullreposet(repo), x):
469 469 for i in range(n):
470 470 r = cl.parentrevs(r)[0]
471 471 ps.add(r)
472 472 return subset & ps
473 473
474 474 def author(repo, subset, x):
475 475 """``author(string)``
476 476 Alias for ``user(string)``.
477 477 """
478 478 # i18n: "author" is a keyword
479 479 n = encoding.lower(getstring(x, _("author requires a string")))
480 480 kind, pattern, matcher = _substringmatcher(n)
481 481 return subset.filter(lambda x: matcher(encoding.lower(repo[x].user())))
482 482
483 483 def bisect(repo, subset, x):
484 484 """``bisect(string)``
485 485 Changesets marked in the specified bisect status:
486 486
487 487 - ``good``, ``bad``, ``skip``: csets explicitly marked as good/bad/skip
488 488 - ``goods``, ``bads`` : csets topologically good/bad
489 489 - ``range`` : csets taking part in the bisection
490 490 - ``pruned`` : csets that are goods, bads or skipped
491 491 - ``untested`` : csets whose fate is yet unknown
492 492 - ``ignored`` : csets ignored due to DAG topology
493 493 - ``current`` : the cset currently being bisected
494 494 """
495 495 # i18n: "bisect" is a keyword
496 496 status = getstring(x, _("bisect requires a string")).lower()
497 497 state = set(hbisect.get(repo, status))
498 498 return subset & state
499 499
500 500 # Backward-compatibility
501 501 # - no help entry so that we do not advertise it any more
502 502 def bisected(repo, subset, x):
503 503 return bisect(repo, subset, x)
504 504
505 505 def bookmark(repo, subset, x):
506 506 """``bookmark([name])``
507 507 The named bookmark or all bookmarks.
508 508
509 509 If `name` starts with `re:`, the remainder of the name is treated as
510 510 a regular expression. To match a bookmark that actually starts with `re:`,
511 511 use the prefix `literal:`.
512 512 """
513 513 # i18n: "bookmark" is a keyword
514 514 args = getargs(x, 0, 1, _('bookmark takes one or no arguments'))
515 515 if args:
516 516 bm = getstring(args[0],
517 517 # i18n: "bookmark" is a keyword
518 518 _('the argument to bookmark must be a string'))
519 519 kind, pattern, matcher = _stringmatcher(bm)
520 520 bms = set()
521 521 if kind == 'literal':
522 522 bmrev = repo._bookmarks.get(pattern, None)
523 523 if not bmrev:
524 524 raise error.RepoLookupError(_("bookmark '%s' does not exist")
525 525 % bm)
526 526 bms.add(repo[bmrev].rev())
527 527 else:
528 528 matchrevs = set()
529 529 for name, bmrev in repo._bookmarks.iteritems():
530 530 if matcher(name):
531 531 matchrevs.add(bmrev)
532 532 if not matchrevs:
533 533 raise error.RepoLookupError(_("no bookmarks exist"
534 534 " that match '%s'") % pattern)
535 535 for bmrev in matchrevs:
536 536 bms.add(repo[bmrev].rev())
537 537 else:
538 538 bms = set([repo[r].rev()
539 539 for r in repo._bookmarks.values()])
540 540 bms -= set([node.nullrev])
541 541 return subset & bms
542 542
543 543 def branch(repo, subset, x):
544 544 """``branch(string or set)``
545 545 All changesets belonging to the given branch or the branches of the given
546 546 changesets.
547 547
548 548 If `string` starts with `re:`, the remainder of the name is treated as
549 549 a regular expression. To match a branch that actually starts with `re:`,
550 550 use the prefix `literal:`.
551 551 """
552 552 getbi = repo.revbranchcache().branchinfo
553 553
554 554 try:
555 555 b = getstring(x, '')
556 556 except error.ParseError:
557 557 # not a string, but another revspec, e.g. tip()
558 558 pass
559 559 else:
560 560 kind, pattern, matcher = _stringmatcher(b)
561 561 if kind == 'literal':
562 562 # note: falls through to the revspec case if no branch with
563 563 # this name exists
564 564 if pattern in repo.branchmap():
565 565 return subset.filter(lambda r: matcher(getbi(r)[0]))
566 566 else:
567 567 return subset.filter(lambda r: matcher(getbi(r)[0]))
568 568
569 569 s = getset(repo, fullreposet(repo), x)
570 570 b = set()
571 571 for r in s:
572 572 b.add(getbi(r)[0])
573 573 c = s.__contains__
574 574 return subset.filter(lambda r: c(r) or getbi(r)[0] in b)
575 575
576 576 def bumped(repo, subset, x):
577 577 """``bumped()``
578 578 Mutable changesets marked as successors of public changesets.
579 579
580 580 Only non-public and non-obsolete changesets can be `bumped`.
581 581 """
582 582 # i18n: "bumped" is a keyword
583 583 getargs(x, 0, 0, _("bumped takes no arguments"))
584 584 bumped = obsmod.getrevs(repo, 'bumped')
585 585 return subset & bumped
586 586
587 587 def bundle(repo, subset, x):
588 588 """``bundle()``
589 589 Changesets in the bundle.
590 590
591 591 Bundle must be specified by the -R option."""
592 592
593 593 try:
594 594 bundlerevs = repo.changelog.bundlerevs
595 595 except AttributeError:
596 596 raise util.Abort(_("no bundle provided - specify with -R"))
597 597 return subset & bundlerevs
598 598
599 599 def checkstatus(repo, subset, pat, field):
600 600 hasset = matchmod.patkind(pat) == 'set'
601 601
602 602 mcache = [None]
603 603 def matches(x):
604 604 c = repo[x]
605 605 if not mcache[0] or hasset:
606 606 mcache[0] = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
607 607 m = mcache[0]
608 608 fname = None
609 609 if not m.anypats() and len(m.files()) == 1:
610 610 fname = m.files()[0]
611 611 if fname is not None:
612 612 if fname not in c.files():
613 613 return False
614 614 else:
615 615 for f in c.files():
616 616 if m(f):
617 617 break
618 618 else:
619 619 return False
620 620 files = repo.status(c.p1().node(), c.node())[field]
621 621 if fname is not None:
622 622 if fname in files:
623 623 return True
624 624 else:
625 625 for f in files:
626 626 if m(f):
627 627 return True
628 628
629 629 return subset.filter(matches)
630 630
631 631 def _children(repo, narrow, parentset):
632 632 if not parentset:
633 633 return baseset()
634 634 cs = set()
635 635 pr = repo.changelog.parentrevs
636 636 minrev = parentset.min()
637 637 for r in narrow:
638 638 if r <= minrev:
639 639 continue
640 640 for p in pr(r):
641 641 if p in parentset:
642 642 cs.add(r)
643 643 # XXX using a set to feed the baseset is wrong. Sets are not ordered.
644 644 # This does not break because of other fullreposet misbehavior.
645 645 return baseset(cs)
646 646
647 647 def children(repo, subset, x):
648 648 """``children(set)``
649 649 Child changesets of changesets in set.
650 650 """
651 651 s = getset(repo, fullreposet(repo), x)
652 652 cs = _children(repo, subset, s)
653 653 return subset & cs
654 654
655 655 def closed(repo, subset, x):
656 656 """``closed()``
657 657 Changeset is closed.
658 658 """
659 659 # i18n: "closed" is a keyword
660 660 getargs(x, 0, 0, _("closed takes no arguments"))
661 661 return subset.filter(lambda r: repo[r].closesbranch())
662 662
663 663 def contains(repo, subset, x):
664 664 """``contains(pattern)``
665 665 The revision's manifest contains a file matching pattern (but might not
666 666 modify it). See :hg:`help patterns` for information about file patterns.
667 667
668 668 The pattern without explicit kind like ``glob:`` is expected to be
669 669 relative to the current directory and match against a file exactly
670 670 for efficiency.
671 671 """
672 672 # i18n: "contains" is a keyword
673 673 pat = getstring(x, _("contains requires a pattern"))
674 674
675 675 def matches(x):
676 676 if not matchmod.patkind(pat):
677 677 pats = pathutil.canonpath(repo.root, repo.getcwd(), pat)
678 678 if pats in repo[x]:
679 679 return True
680 680 else:
681 681 c = repo[x]
682 682 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
683 683 for f in c.manifest():
684 684 if m(f):
685 685 return True
686 686 return False
687 687
688 688 return subset.filter(matches)
689 689
690 690 def converted(repo, subset, x):
691 691 """``converted([id])``
692 692 Changesets converted from the given identifier in the old repository if
693 693 present, or all converted changesets if no identifier is specified.
694 694 """
695 695
696 696 # There is exactly no chance of resolving the revision, so do a simple
697 697 # string compare and hope for the best
698 698
699 699 rev = None
700 700 # i18n: "converted" is a keyword
701 701 l = getargs(x, 0, 1, _('converted takes one or no arguments'))
702 702 if l:
703 703 # i18n: "converted" is a keyword
704 704 rev = getstring(l[0], _('converted requires a revision'))
705 705
706 706 def _matchvalue(r):
707 707 source = repo[r].extra().get('convert_revision', None)
708 708 return source is not None and (rev is None or source.startswith(rev))
709 709
710 710 return subset.filter(lambda r: _matchvalue(r))
711 711
712 712 def date(repo, subset, x):
713 713 """``date(interval)``
714 714 Changesets within the interval, see :hg:`help dates`.
715 715 """
716 716 # i18n: "date" is a keyword
717 717 ds = getstring(x, _("date requires a string"))
718 718 dm = util.matchdate(ds)
719 719 return subset.filter(lambda x: dm(repo[x].date()[0]))
720 720
721 721 def desc(repo, subset, x):
722 722 """``desc(string)``
723 723 Search commit message for string. The match is case-insensitive.
724 724 """
725 725 # i18n: "desc" is a keyword
726 726 ds = encoding.lower(getstring(x, _("desc requires a string")))
727 727
728 728 def matches(x):
729 729 c = repo[x]
730 730 return ds in encoding.lower(c.description())
731 731
732 732 return subset.filter(matches)
733 733
734 734 def _descendants(repo, subset, x, followfirst=False):
735 735 roots = getset(repo, fullreposet(repo), x)
736 736 if not roots:
737 737 return baseset()
738 738 s = _revdescendants(repo, roots, followfirst)
739 739
740 740 # Both sets need to be ascending in order to lazily return the union
741 741 # in the correct order.
742 742 base = subset & roots
743 743 desc = subset & s
744 744 result = base + desc
745 745 if subset.isascending():
746 746 result.sort()
747 747 elif subset.isdescending():
748 748 result.sort(reverse=True)
749 749 else:
750 750 result = subset & result
751 751 return result
752 752
753 753 def descendants(repo, subset, x):
754 754 """``descendants(set)``
755 755 Changesets which are descendants of changesets in set.
756 756 """
757 757 return _descendants(repo, subset, x)
758 758
759 759 def _firstdescendants(repo, subset, x):
760 760 # ``_firstdescendants(set)``
761 761 # Like ``descendants(set)`` but follows only the first parents.
762 762 return _descendants(repo, subset, x, followfirst=True)
763 763
764 764 def destination(repo, subset, x):
765 765 """``destination([set])``
766 766 Changesets that were created by a graft, transplant or rebase operation,
767 767 with the given revisions specified as the source. Omitting the optional set
768 768 is the same as passing all().
769 769 """
770 770 if x is not None:
771 771 sources = getset(repo, fullreposet(repo), x)
772 772 else:
773 773 sources = fullreposet(repo)
774 774
775 775 dests = set()
776 776
777 777 # subset contains all of the possible destinations that can be returned, so
778 778 # iterate over them and see if their source(s) were provided in the arg set.
779 779 # Even if the immediate src of r is not in the arg set, src's source (or
780 780 # further back) may be. Scanning back further than the immediate src allows
781 781 # transitive transplants and rebases to yield the same results as transitive
782 782 # grafts.
783 783 for r in subset:
784 784 src = _getrevsource(repo, r)
785 785 lineage = None
786 786
787 787 while src is not None:
788 788 if lineage is None:
789 789 lineage = list()
790 790
791 791 lineage.append(r)
792 792
793 793 # The visited lineage is a match if the current source is in the arg
794 794 # set. Since every candidate dest is visited by way of iterating
795 795 # subset, any dests further back in the lineage will be tested by a
796 796 # different iteration over subset. Likewise, if the src was already
797 797 # selected, the current lineage can be selected without going back
798 798 # further.
799 799 if src in sources or src in dests:
800 800 dests.update(lineage)
801 801 break
802 802
803 803 r = src
804 804 src = _getrevsource(repo, r)
805 805
806 806 return subset.filter(dests.__contains__)
807 807
808 808 def divergent(repo, subset, x):
809 809 """``divergent()``
810 810 Final successors of changesets with an alternative set of final successors.
811 811 """
812 812 # i18n: "divergent" is a keyword
813 813 getargs(x, 0, 0, _("divergent takes no arguments"))
814 814 divergent = obsmod.getrevs(repo, 'divergent')
815 815 return subset & divergent
816 816
817 817 def extinct(repo, subset, x):
818 818 """``extinct()``
819 819 Obsolete changesets with obsolete descendants only.
820 820 """
821 821 # i18n: "extinct" is a keyword
822 822 getargs(x, 0, 0, _("extinct takes no arguments"))
823 823 extincts = obsmod.getrevs(repo, 'extinct')
824 824 return subset & extincts
825 825
826 826 def extra(repo, subset, x):
827 827 """``extra(label, [value])``
828 828 Changesets with the given label in the extra metadata, with the given
829 829 optional value.
830 830
831 831 If `value` starts with `re:`, the remainder of the value is treated as
832 832 a regular expression. To match a value that actually starts with `re:`,
833 833 use the prefix `literal:`.
834 834 """
835 835
836 836 # i18n: "extra" is a keyword
837 837 l = getargs(x, 1, 2, _('extra takes at least 1 and at most 2 arguments'))
838 838 # i18n: "extra" is a keyword
839 839 label = getstring(l[0], _('first argument to extra must be a string'))
840 840 value = None
841 841
842 842 if len(l) > 1:
843 843 # i18n: "extra" is a keyword
844 844 value = getstring(l[1], _('second argument to extra must be a string'))
845 845 kind, value, matcher = _stringmatcher(value)
846 846
847 847 def _matchvalue(r):
848 848 extra = repo[r].extra()
849 849 return label in extra and (value is None or matcher(extra[label]))
850 850
851 851 return subset.filter(lambda r: _matchvalue(r))
852 852
853 853 def filelog(repo, subset, x):
854 854 """``filelog(pattern)``
855 855 Changesets connected to the specified filelog.
856 856
857 857 For performance reasons, visits only revisions mentioned in the file-level
858 858 filelog, rather than filtering through all changesets (much faster, but
859 859 doesn't include deletes or duplicate changes). For a slower, more accurate
860 860 result, use ``file()``.
861 861
862 862 The pattern without explicit kind like ``glob:`` is expected to be
863 863 relative to the current directory and match against a file exactly
864 864 for efficiency.
865 865
866 866 If some linkrev points to revisions filtered by the current repoview, we'll
867 867 work around it to return a non-filtered value.
868 868 """
869 869
870 870 # i18n: "filelog" is a keyword
871 871 pat = getstring(x, _("filelog requires a pattern"))
872 872 s = set()
873 873 cl = repo.changelog
874 874
875 875 if not matchmod.patkind(pat):
876 876 f = pathutil.canonpath(repo.root, repo.getcwd(), pat)
877 877 files = [f]
878 878 else:
879 879 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=repo[None])
880 880 files = (f for f in repo[None] if m(f))
881 881
882 882 for f in files:
883 883 backrevref = {} # final value for: filerev -> changerev
884 884 lowestchild = {} # lowest known filerev child of a filerev
885 885 delayed = [] # filerev with filtered linkrev, for post-processing
886 886 lowesthead = None # cache for manifest content of all head revisions
887 887 fl = repo.file(f)
888 888 for fr in list(fl):
889 889 rev = fl.linkrev(fr)
890 890 if rev not in cl:
891 891 # changerev pointed in linkrev is filtered
892 892 # record it for post processing.
893 893 delayed.append((fr, rev))
894 894 continue
895 895 for p in fl.parentrevs(fr):
896 896 if 0 <= p and p not in lowestchild:
897 897 lowestchild[p] = fr
898 898 backrevref[fr] = rev
899 899 s.add(rev)
900 900
901 901 # Post-processing of all filerevs we skipped because they were
902 902 # filtered. If such filerevs have known and unfiltered children, this
903 903 # means they have an unfiltered appearance out there. We'll use linkrev
904 904 # adjustment to find one of these appearances. The lowest known child
905 905 # will be used as a starting point because it is the best upper-bound we
906 906 # have.
907 907 #
908 908 # This approach will fail when an unfiltered but linkrev-shadowed
909 909 # appearance exists in a head changeset without unfiltered filerev
910 910 # children anywhere.
911 911 while delayed:
912 912 # must be a descending iteration. To slowly fill lowest child
913 913 # information that is of potential use by the next item.
914 914 fr, rev = delayed.pop()
915 915 lkr = rev
916 916
917 917 child = lowestchild.get(fr)
918 918
919 919 if child is None:
920 920 # search for existence of this file revision in a head revision.
921 921 # There are three possibilities:
922 922 # - the revision exists in a head and we can find an
923 923 # introduction from there,
924 924 # - the revision does not exist in a head because it has been
925 925 # changed since its introduction: we would have found a child
926 926 # and be in the other 'else' clause,
927 927 # - all versions of the revision are hidden.
928 928 if lowesthead is None:
929 929 lowesthead = {}
930 930 for h in repo.heads():
931 931 fnode = repo[h].manifest().get(f)
932 932 if fnode is not None:
933 933 lowesthead[fl.rev(fnode)] = h
934 934 headrev = lowesthead.get(fr)
935 935 if headrev is None:
936 936 # content is nowhere unfiltered
937 937 continue
938 938 rev = repo[headrev][f].introrev()
939 939 else:
940 940 # the lowest known child is a good upper bound
941 941 childcrev = backrevref[child]
942 942 # XXX this does not guarantee returning the lowest
943 943 # introduction of this revision, but this gives a
944 944 # result which is a good start and will fit in most
945 945 # cases. We probably need to fix the multiple
946 946 # introductions case properly (report each
947 947 # introduction, even for identical file revisions)
948 948 # once and for all at some point anyway.
949 949 for p in repo[childcrev][f].parents():
950 950 if p.filerev() == fr:
951 951 rev = p.rev()
952 952 break
953 953 if rev == lkr: # no shadowed entry found
954 954 # XXX This should never happen unless some manifest points
955 955 # to biggish file revisions (like a revision that uses a
956 956 # parent that never appears in the manifest ancestors)
957 957 continue
958 958
959 959 # Fill the data for the next iteration.
960 960 for p in fl.parentrevs(fr):
961 961 if 0 <= p and p not in lowestchild:
962 962 lowestchild[p] = fr
963 963 backrevref[fr] = rev
964 964 s.add(rev)
965 965
966 966 return subset & s
967 967
968 968 def first(repo, subset, x):
969 969 """``first(set, [n])``
970 970 An alias for limit().
971 971 """
972 972 return limit(repo, subset, x)
973 973
974 974 def _follow(repo, subset, x, name, followfirst=False):
975 975 l = getargs(x, 0, 1, _("%s takes no arguments or a filename") % name)
976 976 c = repo['.']
977 977 if l:
978 978 x = getstring(l[0], _("%s expected a filename") % name)
979 979 if x in c:
980 980 cx = c[x]
981 981 s = set(ctx.rev() for ctx in cx.ancestors(followfirst=followfirst))
982 982 # include the revision responsible for the most recent version
983 983 s.add(cx.introrev())
984 984 else:
985 985 return baseset()
986 986 else:
987 987 s = _revancestors(repo, baseset([c.rev()]), followfirst)
988 988
989 989 return subset & s
990 990
991 991 def follow(repo, subset, x):
992 992 """``follow([file])``
993 993 An alias for ``::.`` (ancestors of the working directory's first parent).
994 994 If a filename is specified, the history of the given file is followed,
995 995 including copies.
996 996 """
997 997 return _follow(repo, subset, x, 'follow')
998 998
999 999 def _followfirst(repo, subset, x):
1000 1000 # ``followfirst([file])``
1001 1001 # Like ``follow([file])`` but follows only the first parent of
1002 1002 # every revision or file revision.
1003 1003 return _follow(repo, subset, x, '_followfirst', followfirst=True)
1004 1004
1005 1005 def getall(repo, subset, x):
1006 1006 """``all()``
1007 1007 All changesets, the same as ``0:tip``.
1008 1008 """
1009 1009 # i18n: "all" is a keyword
1010 1010 getargs(x, 0, 0, _("all takes no arguments"))
1011 1011 return subset & spanset(repo) # drop "null" if any
1012 1012
1013 1013 def grep(repo, subset, x):
1014 1014 """``grep(regex)``
1015 1015 Like ``keyword(string)`` but accepts a regex. Use ``grep(r'...')``
1016 1016 to ensure special escape characters are handled correctly. Unlike
1017 1017 ``keyword(string)``, the match is case-sensitive.
1018 1018 """
1019 1019 try:
1020 1020 # i18n: "grep" is a keyword
1021 1021 gr = re.compile(getstring(x, _("grep requires a string")))
1022 1022 except re.error, e:
1023 1023 raise error.ParseError(_('invalid match pattern: %s') % e)
1024 1024
1025 1025 def matches(x):
1026 1026 c = repo[x]
1027 1027 for e in c.files() + [c.user(), c.description()]:
1028 1028 if gr.search(e):
1029 1029 return True
1030 1030 return False
1031 1031
1032 1032 return subset.filter(matches)
1033 1033
1034 1034 def _matchfiles(repo, subset, x):
1035 1035 # _matchfiles takes a revset list of prefixed arguments:
1036 1036 #
1037 1037 # [p:foo, i:bar, x:baz]
1038 1038 #
1039 1039 # builds a match object from them and filters subset. Allowed
1040 1040 # prefixes are 'p:' for regular patterns, 'i:' for include
1041 1041 # patterns and 'x:' for exclude patterns. Use 'r:' prefix to pass
1042 1042 # a revision identifier, or the empty string to reference the
1043 1043 # working directory, from which the match object is
1044 1044 # initialized. Use 'd:' to set the default matching mode, default
1045 1045 # to 'glob'. At most one 'r:' and 'd:' argument can be passed.
1046 1046
1047 1047 # i18n: "_matchfiles" is a keyword
1048 1048 l = getargs(x, 1, -1, _("_matchfiles requires at least one argument"))
1049 1049 pats, inc, exc = [], [], []
1050 1050 rev, default = None, None
1051 1051 for arg in l:
1052 1052 # i18n: "_matchfiles" is a keyword
1053 1053 s = getstring(arg, _("_matchfiles requires string arguments"))
1054 1054 prefix, value = s[:2], s[2:]
1055 1055 if prefix == 'p:':
1056 1056 pats.append(value)
1057 1057 elif prefix == 'i:':
1058 1058 inc.append(value)
1059 1059 elif prefix == 'x:':
1060 1060 exc.append(value)
1061 1061 elif prefix == 'r:':
1062 1062 if rev is not None:
1063 1063 # i18n: "_matchfiles" is a keyword
1064 1064 raise error.ParseError(_('_matchfiles expected at most one '
1065 1065 'revision'))
1066 1066 if value != '': # empty means working directory; leave rev as None
1067 1067 rev = value
1068 1068 elif prefix == 'd:':
1069 1069 if default is not None:
1070 1070 # i18n: "_matchfiles" is a keyword
1071 1071 raise error.ParseError(_('_matchfiles expected at most one '
1072 1072 'default mode'))
1073 1073 default = value
1074 1074 else:
1075 1075 # i18n: "_matchfiles" is a keyword
1076 1076 raise error.ParseError(_('invalid _matchfiles prefix: %s') % prefix)
1077 1077 if not default:
1078 1078 default = 'glob'
1079 1079
1080 1080 m = matchmod.match(repo.root, repo.getcwd(), pats, include=inc,
1081 1081 exclude=exc, ctx=repo[rev], default=default)
1082 1082
1083 1083 def matches(x):
1084 1084 for f in repo[x].files():
1085 1085 if m(f):
1086 1086 return True
1087 1087 return False
1088 1088
1089 1089 return subset.filter(matches)
1090 1090
1091 1091 def hasfile(repo, subset, x):
1092 1092 """``file(pattern)``
1093 1093 Changesets affecting files matched by pattern.
1094 1094
1095 1095 For a faster but less accurate result, consider using ``filelog()``
1096 1096 instead.
1097 1097
1098 1098 This predicate uses ``glob:`` as the default kind of pattern.
1099 1099 """
1100 1100 # i18n: "file" is a keyword
1101 1101 pat = getstring(x, _("file requires a pattern"))
1102 1102 return _matchfiles(repo, subset, ('string', 'p:' + pat))
1103 1103
1104 1104 def head(repo, subset, x):
1105 1105 """``head()``
1106 1106 Changeset is a named branch head.
1107 1107 """
1108 1108 # i18n: "head" is a keyword
1109 1109 getargs(x, 0, 0, _("head takes no arguments"))
1110 1110 hs = set()
1111 1111 cl = repo.changelog
1112 1112 for b, ls in repo.branchmap().iteritems():
1113 1113 hs.update(cl.rev(h) for h in ls)
1114 1114 # XXX using a set to feed the baseset is wrong. Sets are not ordered.
1115 1115 # This does not break because of other fullreposet misbehavior.
1116 1116 # XXX We should combine with subset first: 'subset & baseset(...)'. This is
1117 1117 # necessary to ensure we preserve the order in subset.
1118 1118 return baseset(hs) & subset
1119 1119
1120 1120 def heads(repo, subset, x):
1121 1121 """``heads(set)``
1122 1122 Members of set with no children in set.
1123 1123 """
1124 1124 s = getset(repo, subset, x)
1125 1125 ps = parents(repo, subset, x)
1126 1126 return s - ps
1127 1127
1128 1128 def hidden(repo, subset, x):
1129 1129 """``hidden()``
1130 1130 Hidden changesets.
1131 1131 """
1132 1132 # i18n: "hidden" is a keyword
1133 1133 getargs(x, 0, 0, _("hidden takes no arguments"))
1134 1134 hiddenrevs = repoview.filterrevs(repo, 'visible')
1135 1135 return subset & hiddenrevs
1136 1136
1137 1137 def keyword(repo, subset, x):
1138 1138 """``keyword(string)``
1139 1139 Search commit message, user name, and names of changed files for
1140 1140 string. The match is case-insensitive.
1141 1141 """
1142 1142 # i18n: "keyword" is a keyword
1143 1143 kw = encoding.lower(getstring(x, _("keyword requires a string")))
1144 1144
1145 1145 def matches(r):
1146 1146 c = repo[r]
1147 1147 return any(kw in encoding.lower(t)
1148 1148 for t in c.files() + [c.user(), c.description()])
1149 1149
1150 1150 return subset.filter(matches)
1151 1151
1152 1152 def limit(repo, subset, x):
1153 1153 """``limit(set, [n])``
1154 1154 First n members of set, defaulting to 1.
1155 1155 """
1156 1156 # i18n: "limit" is a keyword
1157 1157 l = getargs(x, 1, 2, _("limit requires one or two arguments"))
1158 1158 try:
1159 1159 lim = 1
1160 1160 if len(l) == 2:
1161 1161 # i18n: "limit" is a keyword
1162 1162 lim = int(getstring(l[1], _("limit requires a number")))
1163 1163 except (TypeError, ValueError):
1164 1164 # i18n: "limit" is a keyword
1165 1165 raise error.ParseError(_("limit expects a number"))
1166 1166 ss = subset
1167 1167 os = getset(repo, fullreposet(repo), l[0])
1168 1168 result = []
1169 1169 it = iter(os)
1170 1170 for x in xrange(lim):
1171 1171 y = next(it, None)
1172 1172 if y is None:
1173 1173 break
1174 1174 elif y in ss:
1175 1175 result.append(y)
1176 1176 return baseset(result)
1177 1177
1178 1178 def last(repo, subset, x):
1179 1179 """``last(set, [n])``
1180 1180 Last n members of set, defaulting to 1.
1181 1181 """
1182 1182 # i18n: "last" is a keyword
1183 1183 l = getargs(x, 1, 2, _("last requires one or two arguments"))
1184 1184 try:
1185 1185 lim = 1
1186 1186 if len(l) == 2:
1187 1187 # i18n: "last" is a keyword
1188 1188 lim = int(getstring(l[1], _("last requires a number")))
1189 1189 except (TypeError, ValueError):
1190 1190 # i18n: "last" is a keyword
1191 1191 raise error.ParseError(_("last expects a number"))
1192 1192 ss = subset
1193 1193 os = getset(repo, fullreposet(repo), l[0])
1194 1194 os.reverse()
1195 1195 result = []
1196 1196 it = iter(os)
1197 1197 for x in xrange(lim):
1198 1198 y = next(it, None)
1199 1199 if y is None:
1200 1200 break
1201 1201 elif y in ss:
1202 1202 result.append(y)
1203 1203 return baseset(result)
1204 1204
1205 1205 def maxrev(repo, subset, x):
1206 1206 """``max(set)``
1207 1207 Changeset with highest revision number in set.
1208 1208 """
1209 1209 os = getset(repo, fullreposet(repo), x)
1210 1210 if os:
1211 1211 m = os.max()
1212 1212 if m in subset:
1213 1213 return baseset([m])
1214 1214 return baseset()
1215 1215
1216 1216 def merge(repo, subset, x):
1217 1217 """``merge()``
1218 1218 Changeset is a merge changeset.
1219 1219 """
1220 1220 # i18n: "merge" is a keyword
1221 1221 getargs(x, 0, 0, _("merge takes no arguments"))
1222 1222 cl = repo.changelog
1223 1223 return subset.filter(lambda r: cl.parentrevs(r)[1] != -1)
1224 1224
1225 1225 def branchpoint(repo, subset, x):
1226 1226 """``branchpoint()``
1227 1227 Changesets with more than one child.
1228 1228 """
1229 1229 # i18n: "branchpoint" is a keyword
1230 1230 getargs(x, 0, 0, _("branchpoint takes no arguments"))
1231 1231 cl = repo.changelog
1232 1232 if not subset:
1233 1233 return baseset()
1234 1234 # XXX this should be 'parentset.min()' assuming 'parentset' is a smartset
1235 1235 # (and if it is not, it should.)
1236 1236 baserev = min(subset)
1237 1237 parentscount = [0]*(len(repo) - baserev)
1238 1238 for r in cl.revs(start=baserev + 1):
1239 1239 for p in cl.parentrevs(r):
1240 1240 if p >= baserev:
1241 1241 parentscount[p - baserev] += 1
1242 1242 return subset.filter(lambda r: parentscount[r - baserev] > 1)
1243 1243
1244 1244 def minrev(repo, subset, x):
1245 1245 """``min(set)``
1246 1246 Changeset with lowest revision number in set.
1247 1247 """
1248 1248 os = getset(repo, fullreposet(repo), x)
1249 1249 if os:
1250 1250 m = os.min()
1251 1251 if m in subset:
1252 1252 return baseset([m])
1253 1253 return baseset()
1254 1254
1255 1255 def modifies(repo, subset, x):
1256 1256 """``modifies(pattern)``
1257 1257 Changesets modifying files matched by pattern.
1258 1258
1259 1259 The pattern without explicit kind like ``glob:`` is expected to be
1260 1260 relative to the current directory and match against a file or a
1261 1261 directory.
1262 1262 """
1263 1263 # i18n: "modifies" is a keyword
1264 1264 pat = getstring(x, _("modifies requires a pattern"))
1265 1265 return checkstatus(repo, subset, pat, 0)
1266 1266
1267 1267 def named(repo, subset, x):
1268 1268 """``named(namespace)``
1269 1269 The changesets in a given namespace.
1270 1270
1271 1271 If `namespace` starts with `re:`, the remainder of the string is treated as
1272 1272 a regular expression. To match a namespace that actually starts with `re:`,
1273 1273 use the prefix `literal:`.
1274 1274 """
1275 1275 # i18n: "named" is a keyword
1276 1276 args = getargs(x, 1, 1, _('named requires a namespace argument'))
1277 1277
1278 1278 ns = getstring(args[0],
1279 1279 # i18n: "named" is a keyword
1280 1280 _('the argument to named must be a string'))
1281 1281 kind, pattern, matcher = _stringmatcher(ns)
1282 1282 namespaces = set()
1283 1283 if kind == 'literal':
1284 1284 if pattern not in repo.names:
1285 1285 raise error.RepoLookupError(_("namespace '%s' does not exist")
1286 1286 % ns)
1287 1287 namespaces.add(repo.names[pattern])
1288 1288 else:
1289 1289 for name, ns in repo.names.iteritems():
1290 1290 if matcher(name):
1291 1291 namespaces.add(ns)
1292 1292 if not namespaces:
1293 1293 raise error.RepoLookupError(_("no namespace exists"
1294 1294 " that match '%s'") % pattern)
1295 1295
1296 1296 names = set()
1297 1297 for ns in namespaces:
1298 1298 for name in ns.listnames(repo):
1299 1299 if name not in ns.deprecated:
1300 1300 names.update(repo[n].rev() for n in ns.nodes(repo, name))
1301 1301
1302 1302 names -= set([node.nullrev])
1303 1303 return subset & names
1304 1304
1305 1305 def node_(repo, subset, x):
1306 1306 """``id(string)``
1307 1307 Revision non-ambiguously specified by the given hex string prefix.
1308 1308 """
1309 1309 # i18n: "id" is a keyword
1310 1310 l = getargs(x, 1, 1, _("id requires one argument"))
1311 1311 # i18n: "id" is a keyword
1312 1312 n = getstring(l[0], _("id requires a string"))
1313 1313 if len(n) == 40:
1314 1314 try:
1315 1315 rn = repo.changelog.rev(node.bin(n))
1316 1316 except (LookupError, TypeError):
1317 1317 rn = None
1318 1318 else:
1319 1319 rn = None
1320 1320 pm = repo.changelog._partialmatch(n)
1321 1321 if pm is not None:
1322 1322 rn = repo.changelog.rev(pm)
1323 1323
1324 1324 if rn is None:
1325 1325 return baseset()
1326 1326 result = baseset([rn])
1327 1327 return result & subset
1328 1328
1329 1329 def obsolete(repo, subset, x):
1330 1330 """``obsolete()``
1331 1331 Mutable changeset with a newer version."""
1332 1332 # i18n: "obsolete" is a keyword
1333 1333 getargs(x, 0, 0, _("obsolete takes no arguments"))
1334 1334 obsoletes = obsmod.getrevs(repo, 'obsolete')
1335 1335 return subset & obsoletes
1336 1336
1337 1337 def only(repo, subset, x):
1338 1338 """``only(set, [set])``
1339 1339 Changesets that are ancestors of the first set that are not ancestors
1340 1340 of any other head in the repo. If a second set is specified, the result
1341 1341 is ancestors of the first set that are not ancestors of the second set
1342 1342 (i.e. ::<set1> - ::<set2>).
1343 1343 """
1344 1344 cl = repo.changelog
1345 1345 # i18n: "only" is a keyword
1346 1346 args = getargs(x, 1, 2, _('only takes one or two arguments'))
1347 1347 include = getset(repo, fullreposet(repo), args[0])
1348 1348 if len(args) == 1:
1349 1349 if not include:
1350 1350 return baseset()
1351 1351
1352 1352 descendants = set(_revdescendants(repo, include, False))
1353 1353 exclude = [rev for rev in cl.headrevs()
1354 1354 if not rev in descendants and not rev in include]
1355 1355 else:
1356 1356 exclude = getset(repo, fullreposet(repo), args[1])
1357 1357
1358 1358 results = set(cl.findmissingrevs(common=exclude, heads=include))
1359 1359 # XXX we should turn this into a baseset instead of a set, smartset may do
1360 1360 # some optimisations from the fact this is a baseset.
1361 1361 return subset & results
1362 1362
1363 1363 def origin(repo, subset, x):
1364 1364 """``origin([set])``
1365 1365 Changesets that were specified as a source for the grafts, transplants or
1366 1366 rebases that created the given revisions. Omitting the optional set is the
1367 1367 same as passing all(). If a changeset created by these operations is itself
1368 1368 specified as a source for one of these operations, only the source changeset
1369 1369 for the first operation is selected.
1370 1370 """
1371 1371 if x is not None:
1372 1372 dests = getset(repo, fullreposet(repo), x)
1373 1373 else:
1374 1374 dests = fullreposet(repo)
1375 1375
1376 1376 def _firstsrc(rev):
1377 1377 src = _getrevsource(repo, rev)
1378 1378 if src is None:
1379 1379 return None
1380 1380
1381 1381 while True:
1382 1382 prev = _getrevsource(repo, src)
1383 1383
1384 1384 if prev is None:
1385 1385 return src
1386 1386 src = prev
1387 1387
1388 1388 o = set([_firstsrc(r) for r in dests])
1389 1389 o -= set([None])
1390 1390 # XXX we should turn this into a baseset instead of a set, smartset may do
1391 1391 # some optimisations from the fact this is a baseset.
1392 1392 return subset & o
1393 1393
1394 1394 def outgoing(repo, subset, x):
1395 1395 """``outgoing([path])``
1396 1396 Changesets not found in the specified destination repository, or the
1397 1397 default push location.
1398 1398 """
1399 1399 # Avoid cycles.
1400 1400 import discovery
1401 1401 import hg
1402 1402 # i18n: "outgoing" is a keyword
1403 1403 l = getargs(x, 0, 1, _("outgoing takes one or no arguments"))
1404 1404 # i18n: "outgoing" is a keyword
1405 1405 dest = l and getstring(l[0], _("outgoing requires a repository path")) or ''
1406 1406 dest = repo.ui.expandpath(dest or 'default-push', dest or 'default')
1407 1407 dest, branches = hg.parseurl(dest)
1408 1408 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1409 1409 if revs:
1410 1410 revs = [repo.lookup(rev) for rev in revs]
1411 1411 other = hg.peer(repo, {}, dest)
1412 1412 repo.ui.pushbuffer()
1413 1413 outgoing = discovery.findcommonoutgoing(repo, other, onlyheads=revs)
1414 1414 repo.ui.popbuffer()
1415 1415 cl = repo.changelog
1416 1416 o = set([cl.rev(r) for r in outgoing.missing])
1417 1417 return subset & o
1418 1418
1419 1419 def p1(repo, subset, x):
1420 1420 """``p1([set])``
1421 1421 First parent of changesets in set, or the working directory.
1422 1422 """
1423 1423 if x is None:
1424 1424 p = repo[x].p1().rev()
1425 1425 if p >= 0:
1426 1426 return subset & baseset([p])
1427 1427 return baseset()
1428 1428
1429 1429 ps = set()
1430 1430 cl = repo.changelog
1431 1431 for r in getset(repo, fullreposet(repo), x):
1432 1432 ps.add(cl.parentrevs(r)[0])
1433 1433 ps -= set([node.nullrev])
1434 1434 # XXX we should turn this into a baseset instead of a set, smartset may do
1435 1435 # some optimisations from the fact this is a baseset.
1436 1436 return subset & ps
1437 1437
1438 1438 def p2(repo, subset, x):
1439 1439 """``p2([set])``
1440 1440 Second parent of changesets in set, or the working directory.
1441 1441 """
1442 1442 if x is None:
1443 1443 ps = repo[x].parents()
1444 1444 try:
1445 1445 p = ps[1].rev()
1446 1446 if p >= 0:
1447 1447 return subset & baseset([p])
1448 1448 return baseset()
1449 1449 except IndexError:
1450 1450 return baseset()
1451 1451
1452 1452 ps = set()
1453 1453 cl = repo.changelog
1454 1454 for r in getset(repo, fullreposet(repo), x):
1455 1455 ps.add(cl.parentrevs(r)[1])
1456 1456 ps -= set([node.nullrev])
1457 1457 # XXX we should turn this into a baseset instead of a set, smartset may do
1458 1458 # some optimisations from the fact this is a baseset.
1459 1459 return subset & ps
1460 1460
1461 1461 def parents(repo, subset, x):
1462 1462 """``parents([set])``
1463 1463 The set of all parents for all changesets in set, or the working directory.
1464 1464 """
1465 1465 if x is None:
1466 1466 ps = set(p.rev() for p in repo[x].parents())
1467 1467 else:
1468 1468 ps = set()
1469 1469 cl = repo.changelog
1470 1470 for r in getset(repo, fullreposet(repo), x):
1471 1471 ps.update(cl.parentrevs(r))
1472 1472 ps -= set([node.nullrev])
1473 1473 return subset & ps
1474 1474
1475 1475 def _phase(repo, subset, target):
1476 1476 """helper to select all rev in phase <target>"""
1477 1477 repo._phasecache.loadphaserevs(repo) # ensure phase's sets are loaded
1478 1478 if repo._phasecache._phasesets:
1479 1479 s = repo._phasecache._phasesets[target] - repo.changelog.filteredrevs
1480 1480 s = baseset(s)
1481 1481 s.sort() # set are non ordered, so we enforce ascending
1482 1482 return subset & s
1483 1483 else:
1484 1484 phase = repo._phasecache.phase
1485 1485 condition = lambda r: phase(repo, r) == target
1486 1486 return subset.filter(condition, cache=False)
1487 1487
1488 1488 def draft(repo, subset, x):
1489 1489 """``draft()``
1490 1490 Changeset in draft phase."""
1491 1491 # i18n: "draft" is a keyword
1492 1492 getargs(x, 0, 0, _("draft takes no arguments"))
1493 1493 target = phases.draft
1494 1494 return _phase(repo, subset, target)
1495 1495
1496 1496 def secret(repo, subset, x):
1497 1497 """``secret()``
1498 1498 Changeset in secret phase."""
1499 1499 # i18n: "secret" is a keyword
1500 1500 getargs(x, 0, 0, _("secret takes no arguments"))
1501 1501 target = phases.secret
1502 1502 return _phase(repo, subset, target)
1503 1503
1504 1504 def parentspec(repo, subset, x, n):
1505 1505 """``set^0``
1506 1506 The set.
1507 1507 ``set^1`` (or ``set^``), ``set^2``
1508 1508 First or second parent, respectively, of all changesets in set.
1509 1509 """
1510 1510 try:
1511 1511 n = int(n[1])
1512 1512 if n not in (0, 1, 2):
1513 1513 raise ValueError
1514 1514 except (TypeError, ValueError):
1515 1515 raise error.ParseError(_("^ expects a number 0, 1, or 2"))
1516 1516 ps = set()
1517 1517 cl = repo.changelog
1518 1518 for r in getset(repo, fullreposet(repo), x):
1519 1519 if n == 0:
1520 1520 ps.add(r)
1521 1521 elif n == 1:
1522 1522 ps.add(cl.parentrevs(r)[0])
1523 1523 elif n == 2:
1524 1524 parents = cl.parentrevs(r)
1525 1525 if len(parents) > 1:
1526 1526 ps.add(parents[1])
1527 1527 return subset & ps
1528 1528
1529 1529 def present(repo, subset, x):
1530 1530 """``present(set)``
1531 1531 An empty set, if any revision in set isn't found; otherwise,
1532 1532 all revisions in set.
1533 1533
1534 1534 If any of specified revisions is not present in the local repository,
1535 1535 the query is normally aborted. But this predicate allows the query
1536 1536 to continue even in such cases.
1537 1537 """
1538 1538 try:
1539 1539 return getset(repo, subset, x)
1540 1540 except error.RepoLookupError:
1541 1541 return baseset()
1542 1542
1543 1543 # for internal use
1544 1544 def _notpublic(repo, subset, x):
1545 1545 getargs(x, 0, 0, "_notpublic takes no arguments")
1546 1546 repo._phasecache.loadphaserevs(repo) # ensure phase's sets are loaded
1547 1547 if repo._phasecache._phasesets:
1548 1548 s = set()
1549 1549 for u in repo._phasecache._phasesets[1:]:
1550 1550 s.update(u)
1551 1551 s = baseset(s - repo.changelog.filteredrevs)
1552 1552 s.sort()
1553 1553 return subset & s
1554 1554 else:
1555 1555 phase = repo._phasecache.phase
1556 1556 target = phases.public
1557 1557 condition = lambda r: phase(repo, r) != target
1558 1558 return subset.filter(condition, cache=False)
1559 1559
1560 1560 def public(repo, subset, x):
1561 1561 """``public()``
1562 1562 Changeset in public phase."""
1563 1563 # i18n: "public" is a keyword
1564 1564 getargs(x, 0, 0, _("public takes no arguments"))
1565 1565 phase = repo._phasecache.phase
1566 1566 target = phases.public
1567 1567 condition = lambda r: phase(repo, r) == target
1568 1568 return subset.filter(condition, cache=False)
1569 1569
1570 1570 def remote(repo, subset, x):
1571 1571 """``remote([id [,path]])``
1572 1572 Local revision that corresponds to the given identifier in a
1573 1573 remote repository, if present. Here, the '.' identifier is a
1574 1574 synonym for the current local branch.
1575 1575 """
1576 1576
1577 1577 import hg # avoid start-up nasties
1578 1578 # i18n: "remote" is a keyword
1579 1579 l = getargs(x, 0, 2, _("remote takes one, two or no arguments"))
1580 1580
1581 1581 q = '.'
1582 1582 if len(l) > 0:
1583 1583 # i18n: "remote" is a keyword
1584 1584 q = getstring(l[0], _("remote requires a string id"))
1585 1585 if q == '.':
1586 1586 q = repo['.'].branch()
1587 1587
1588 1588 dest = ''
1589 1589 if len(l) > 1:
1590 1590 # i18n: "remote" is a keyword
1591 1591 dest = getstring(l[1], _("remote requires a repository path"))
1592 1592 dest = repo.ui.expandpath(dest or 'default')
1593 1593 dest, branches = hg.parseurl(dest)
1594 1594 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1595 1595 if revs:
1596 1596 revs = [repo.lookup(rev) for rev in revs]
1597 1597 other = hg.peer(repo, {}, dest)
1598 1598 n = other.lookup(q)
1599 1599 if n in repo:
1600 1600 r = repo[n].rev()
1601 1601 if r in subset:
1602 1602 return baseset([r])
1603 1603 return baseset()
1604 1604
1605 1605 def removes(repo, subset, x):
1606 1606 """``removes(pattern)``
1607 1607 Changesets which remove files matching pattern.
1608 1608
1609 1609 The pattern without explicit kind like ``glob:`` is expected to be
1610 1610 relative to the current directory and match against a file or a
1611 1611 directory.
1612 1612 """
1613 1613 # i18n: "removes" is a keyword
1614 1614 pat = getstring(x, _("removes requires a pattern"))
1615 1615 return checkstatus(repo, subset, pat, 2)
1616 1616
1617 1617 def rev(repo, subset, x):
1618 1618 """``rev(number)``
1619 1619 Revision with the given numeric identifier.
1620 1620 """
1621 1621 # i18n: "rev" is a keyword
1622 1622 l = getargs(x, 1, 1, _("rev requires one argument"))
1623 1623 try:
1624 1624 # i18n: "rev" is a keyword
1625 1625 l = int(getstring(l[0], _("rev requires a number")))
1626 1626 except (TypeError, ValueError):
1627 1627 # i18n: "rev" is a keyword
1628 1628 raise error.ParseError(_("rev expects a number"))
1629 1629 if l not in repo.changelog and l != node.nullrev:
1630 1630 return baseset()
1631 1631 return subset & baseset([l])
1632 1632
1633 1633 def matching(repo, subset, x):
1634 1634 """``matching(revision [, field])``
1635 1635 Changesets in which a given set of fields match the set of fields in the
1636 1636 selected revision or set.
1637 1637
1638 1638 To match more than one field pass the list of fields to match separated
1639 1639 by spaces (e.g. ``author description``).
1640 1640
1641 1641 Valid fields are most regular revision fields and some special fields.
1642 1642
1643 1643 Regular revision fields are ``description``, ``author``, ``branch``,
1644 1644 ``date``, ``files``, ``phase``, ``parents``, ``substate``, ``user``
1645 1645 and ``diff``.
1646 1646 Note that ``author`` and ``user`` are synonyms. ``diff`` refers to the
1647 1647 contents of the revision. Two revisions matching their ``diff`` will
1648 1648 also match their ``files``.
1649 1649
1650 1650 Special fields are ``summary`` and ``metadata``:
1651 1651 ``summary`` matches the first line of the description.
1652 1652 ``metadata`` is equivalent to matching ``description user date``
1653 1653 (i.e. it matches the main metadata fields).
1654 1654
1655 1655 ``metadata`` is the default field which is used when no fields are
1656 1656 specified. You can match more than one field at a time.
1657 1657 """
1658 1658 # i18n: "matching" is a keyword
1659 1659 l = getargs(x, 1, 2, _("matching takes 1 or 2 arguments"))
1660 1660
1661 1661 revs = getset(repo, fullreposet(repo), l[0])
1662 1662
1663 1663 fieldlist = ['metadata']
1664 1664 if len(l) > 1:
1665 1665 fieldlist = getstring(l[1],
1666 1666 # i18n: "matching" is a keyword
1667 1667 _("matching requires a string "
1668 1668 "as its second argument")).split()
1669 1669
1670 1670 # Make sure that there are no repeated fields,
1671 1671 # expand the 'special' 'metadata' field type
1672 1672 # and check the 'files' whenever we check the 'diff'
1673 1673 fields = []
1674 1674 for field in fieldlist:
1675 1675 if field == 'metadata':
1676 1676 fields += ['user', 'description', 'date']
1677 1677 elif field == 'diff':
1678 1678 # a revision matching the diff must also match the files
1679 1679 # since matching the diff is very costly, make sure to
1680 1680 # also match the files first
1681 1681 fields += ['files', 'diff']
1682 1682 else:
1683 1683 if field == 'author':
1684 1684 field = 'user'
1685 1685 fields.append(field)
1686 1686 fields = set(fields)
1687 1687 if 'summary' in fields and 'description' in fields:
1688 1688 # If a revision matches its description it also matches its summary
1689 1689 fields.discard('summary')
1690 1690
1691 1691 # We may want to match more than one field
1692 1692 # Not all fields take the same amount of time to be matched
1693 1693 # Sort the selected fields in order of increasing matching cost
1694 1694 fieldorder = ['phase', 'parents', 'user', 'date', 'branch', 'summary',
1695 1695 'files', 'description', 'substate', 'diff']
1696 1696 def fieldkeyfunc(f):
1697 1697 try:
1698 1698 return fieldorder.index(f)
1699 1699 except ValueError:
1700 1700 # assume an unknown field is very costly
1701 1701 return len(fieldorder)
1702 1702 fields = list(fields)
1703 1703 fields.sort(key=fieldkeyfunc)
1704 1704
1705 1705 # Each field will be matched with its own "getfield" function
1706 1706 # which will be added to the getfieldfuncs array of functions
1707 1707 getfieldfuncs = []
1708 1708 _funcs = {
1709 1709 'user': lambda r: repo[r].user(),
1710 1710 'branch': lambda r: repo[r].branch(),
1711 1711 'date': lambda r: repo[r].date(),
1712 1712 'description': lambda r: repo[r].description(),
1713 1713 'files': lambda r: repo[r].files(),
1714 1714 'parents': lambda r: repo[r].parents(),
1715 1715 'phase': lambda r: repo[r].phase(),
1716 1716 'substate': lambda r: repo[r].substate,
1717 1717 'summary': lambda r: repo[r].description().splitlines()[0],
1718 1718 'diff': lambda r: list(repo[r].diff(git=True),)
1719 1719 }
1720 1720 for info in fields:
1721 1721 getfield = _funcs.get(info, None)
1722 1722 if getfield is None:
1723 1723 raise error.ParseError(
1724 1724 # i18n: "matching" is a keyword
1725 1725 _("unexpected field name passed to matching: %s") % info)
1726 1726 getfieldfuncs.append(getfield)
1727 1727 # convert the getfield array of functions into a "getinfo" function
1728 1728 # which returns an array of field values (or a single value if there
1729 1729 # is only one field to match)
1730 1730 getinfo = lambda r: [f(r) for f in getfieldfuncs]
1731 1731
1732 1732 def matches(x):
1733 1733 for rev in revs:
1734 1734 target = getinfo(rev)
1735 1735 match = True
1736 1736 for n, f in enumerate(getfieldfuncs):
1737 1737 if target[n] != f(x):
1738 1738 match = False
1739 1739 if match:
1740 1740 return True
1741 1741 return False
1742 1742
1743 1743 return subset.filter(matches)
1744 1744
1745 1745 def reverse(repo, subset, x):
1746 1746 """``reverse(set)``
1747 1747 Reverse order of set.
1748 1748 """
1749 1749 l = getset(repo, subset, x)
1750 1750 l.reverse()
1751 1751 return l
1752 1752
1753 1753 def roots(repo, subset, x):
1754 1754 """``roots(set)``
1755 1755 Changesets in set with no parent changeset in set.
1756 1756 """
1757 1757 s = getset(repo, fullreposet(repo), x)
1758 1758 parents = repo.changelog.parentrevs
1759 1759 def filter(r):
1760 1760 for p in parents(r):
1761 1761 if 0 <= p and p in s:
1762 1762 return False
1763 1763 return True
1764 1764 return subset & s.filter(filter)
1765 1765
1766 1766 def sort(repo, subset, x):
1767 1767 """``sort(set[, [-]key...])``
1768 1768 Sort set by keys. The default sort order is ascending, specify a key
1769 1769 as ``-key`` to sort in descending order.
1770 1770
1771 1771 The keys can be:
1772 1772
1773 1773 - ``rev`` for the revision number,
1774 1774 - ``branch`` for the branch name,
1775 1775 - ``desc`` for the commit message (description),
1776 1776 - ``user`` for user name (``author`` can be used as an alias),
1777 1777 - ``date`` for the commit date
1778 1778 """
1779 1779 # i18n: "sort" is a keyword
1780 1780 l = getargs(x, 1, 2, _("sort requires one or two arguments"))
1781 1781 keys = "rev"
1782 1782 if len(l) == 2:
1783 1783 # i18n: "sort" is a keyword
1784 1784 keys = getstring(l[1], _("sort spec must be a string"))
1785 1785
1786 1786 s = l[0]
1787 1787 keys = keys.split()
1788 1788 l = []
1789 1789 def invert(s):
1790 1790 return "".join(chr(255 - ord(c)) for c in s)
1791 1791 revs = getset(repo, subset, s)
1792 1792 if keys == ["rev"]:
1793 1793 revs.sort()
1794 1794 return revs
1795 1795 elif keys == ["-rev"]:
1796 1796 revs.sort(reverse=True)
1797 1797 return revs
1798 1798 for r in revs:
1799 1799 c = repo[r]
1800 1800 e = []
1801 1801 for k in keys:
1802 1802 if k == 'rev':
1803 1803 e.append(r)
1804 1804 elif k == '-rev':
1805 1805 e.append(-r)
1806 1806 elif k == 'branch':
1807 1807 e.append(c.branch())
1808 1808 elif k == '-branch':
1809 1809 e.append(invert(c.branch()))
1810 1810 elif k == 'desc':
1811 1811 e.append(c.description())
1812 1812 elif k == '-desc':
1813 1813 e.append(invert(c.description()))
1814 1814 elif k in 'user author':
1815 1815 e.append(c.user())
1816 1816 elif k in '-user -author':
1817 1817 e.append(invert(c.user()))
1818 1818 elif k == 'date':
1819 1819 e.append(c.date()[0])
1820 1820 elif k == '-date':
1821 1821 e.append(-c.date()[0])
1822 1822 else:
1823 1823 raise error.ParseError(_("unknown sort key %r") % k)
1824 1824 e.append(r)
1825 1825 l.append(e)
1826 1826 l.sort()
1827 1827 return baseset([e[-1] for e in l])
1828 1828
1829 1829 def subrepo(repo, subset, x):
1830 1830 """``subrepo([pattern])``
1831 1831 Changesets that add, modify or remove the given subrepo. If no subrepo
1832 1832 pattern is named, any subrepo changes are returned.
1833 1833 """
1834 1834 # i18n: "subrepo" is a keyword
1835 1835 args = getargs(x, 0, 1, _('subrepo takes at most one argument'))
1836 1836 if len(args) != 0:
1837 1837 pat = getstring(args[0], _("subrepo requires a pattern"))
1838 1838
1839 1839 m = matchmod.exact(repo.root, repo.root, ['.hgsubstate'])
1840 1840
1841 1841 def submatches(names):
1842 1842 k, p, m = _stringmatcher(pat)
1843 1843 for name in names:
1844 1844 if m(name):
1845 1845 yield name
1846 1846
1847 1847 def matches(x):
1848 1848 c = repo[x]
1849 1849 s = repo.status(c.p1().node(), c.node(), match=m)
1850 1850
1851 1851 if len(args) == 0:
1852 1852 return s.added or s.modified or s.removed
1853 1853
1854 1854 if s.added:
1855 1855 return any(submatches(c.substate.keys()))
1856 1856
1857 1857 if s.modified:
1858 1858 subs = set(c.p1().substate.keys())
1859 1859 subs.update(c.substate.keys())
1860 1860
1861 1861 for path in submatches(subs):
1862 1862 if c.p1().substate.get(path) != c.substate.get(path):
1863 1863 return True
1864 1864
1865 1865 if s.removed:
1866 1866 return any(submatches(c.p1().substate.keys()))
1867 1867
1868 1868 return False
1869 1869
1870 1870 return subset.filter(matches)
1871 1871
1872 1872 def _stringmatcher(pattern):
1873 1873 """
1874 1874 accepts a string, possibly starting with 're:' or 'literal:' prefix.
1875 1875 returns the matcher name, pattern, and matcher function.
1876 1876 missing or unknown prefixes are treated as literal matches.
1877 1877
1878 1878 helper for tests:
1879 1879 >>> def test(pattern, *tests):
1880 1880 ... kind, pattern, matcher = _stringmatcher(pattern)
1881 1881 ... return (kind, pattern, [bool(matcher(t)) for t in tests])
1882 1882
1883 1883 exact matching (no prefix):
1884 1884 >>> test('abcdefg', 'abc', 'def', 'abcdefg')
1885 1885 ('literal', 'abcdefg', [False, False, True])
1886 1886
1887 1887 regex matching ('re:' prefix)
1888 1888 >>> test('re:a.+b', 'nomatch', 'fooadef', 'fooadefbar')
1889 1889 ('re', 'a.+b', [False, False, True])
1890 1890
1891 1891 force exact matches ('literal:' prefix)
1892 1892 >>> test('literal:re:foobar', 'foobar', 're:foobar')
1893 1893 ('literal', 're:foobar', [False, True])
1894 1894
1895 1895 unknown prefixes are ignored and treated as literals
1896 1896 >>> test('foo:bar', 'foo', 'bar', 'foo:bar')
1897 1897 ('literal', 'foo:bar', [False, False, True])
1898 1898 """
1899 1899 if pattern.startswith('re:'):
1900 1900 pattern = pattern[3:]
1901 1901 try:
1902 1902 regex = re.compile(pattern)
1903 1903 except re.error, e:
1904 1904 raise error.ParseError(_('invalid regular expression: %s')
1905 1905 % e)
1906 1906 return 're', pattern, regex.search
1907 1907 elif pattern.startswith('literal:'):
1908 1908 pattern = pattern[8:]
1909 1909 return 'literal', pattern, pattern.__eq__
1910 1910
1911 1911 def _substringmatcher(pattern):
1912 1912 kind, pattern, matcher = _stringmatcher(pattern)
1913 1913 if kind == 'literal':
1914 1914 matcher = lambda s: pattern in s
1915 1915 return kind, pattern, matcher
1916 1916
1917 1917 def tag(repo, subset, x):
1918 1918 """``tag([name])``
1919 1919 The specified tag by name, or all tagged revisions if no name is given.
1920 1920
1921 1921 If `name` starts with `re:`, the remainder of the name is treated as
1922 1922 a regular expression. To match a tag that actually starts with `re:`,
1923 1923 use the prefix `literal:`.
1924 1924 """
1925 1925 # i18n: "tag" is a keyword
1926 1926 args = getargs(x, 0, 1, _("tag takes one or no arguments"))
1927 1927 cl = repo.changelog
1928 1928 if args:
1929 1929 pattern = getstring(args[0],
1930 1930 # i18n: "tag" is a keyword
1931 1931 _('the argument to tag must be a string'))
1932 1932 kind, pattern, matcher = _stringmatcher(pattern)
1933 1933 if kind == 'literal':
1934 1934 # avoid resolving all tags
1935 1935 tn = repo._tagscache.tags.get(pattern, None)
1936 1936 if tn is None:
1937 1937 raise error.RepoLookupError(_("tag '%s' does not exist")
1938 1938 % pattern)
1939 1939 s = set([repo[tn].rev()])
1940 1940 else:
1941 1941 s = set([cl.rev(n) for t, n in repo.tagslist() if matcher(t)])
1942 1942 else:
1943 1943 s = set([cl.rev(n) for t, n in repo.tagslist() if t != 'tip'])
1944 1944 return subset & s
1945 1945
1946 1946 def tagged(repo, subset, x):
1947 1947 return tag(repo, subset, x)
1948 1948
1949 1949 def unstable(repo, subset, x):
1950 1950 """``unstable()``
1951 1951 Non-obsolete changesets with obsolete ancestors.
1952 1952 """
1953 1953 # i18n: "unstable" is a keyword
1954 1954 getargs(x, 0, 0, _("unstable takes no arguments"))
1955 1955 unstables = obsmod.getrevs(repo, 'unstable')
1956 1956 return subset & unstables
1957 1957
1958 1958
1959 1959 def user(repo, subset, x):
1960 1960 """``user(string)``
1961 1961 User name contains string. The match is case-insensitive.
1962 1962
1963 1963 If `string` starts with `re:`, the remainder of the string is treated as
1964 1964 a regular expression. To match a user that actually contains `re:`, use
1965 1965 the prefix `literal:`.
1966 1966 """
1967 1967 return author(repo, subset, x)
1968 1968
1969 1969 # experimental
1970 1970 def wdir(repo, subset, x):
1971 1971 # i18n: "wdir" is a keyword
1972 1972 getargs(x, 0, 0, _("wdir takes no arguments"))
1973 1973 if None in subset or isinstance(subset, fullreposet):
1974 1974 return baseset([None])
1975 1975 return baseset()
1976 1976
1977 1977 # for internal use
1978 1978 def _list(repo, subset, x):
1979 1979 s = getstring(x, "internal error")
1980 1980 if not s:
1981 1981 return baseset()
1982 1982 # remove duplicates here. it's difficult for caller to deduplicate sets
1983 1983 # because different symbols can point to the same rev.
1984 1984 cl = repo.changelog
1985 1985 ls = []
1986 1986 seen = set()
1987 1987 for t in s.split('\0'):
1988 1988 try:
1989 1989 # fast path for integer revision
1990 1990 r = int(t)
1991 1991 if str(r) != t or r not in cl:
1992 1992 raise ValueError
1993 1993 except ValueError:
1994 1994 r = repo[t].rev()
1995 1995 if r in seen:
1996 1996 continue
1997 1997 if (r in subset
1998 1998 or r == node.nullrev and isinstance(subset, fullreposet)):
1999 1999 ls.append(r)
2000 2000 seen.add(r)
2001 2001 return baseset(ls)
2002 2002
2003 2003 # for internal use
2004 2004 def _intlist(repo, subset, x):
2005 2005 s = getstring(x, "internal error")
2006 2006 if not s:
2007 2007 return baseset()
2008 2008 ls = [int(r) for r in s.split('\0')]
2009 2009 s = subset
2010 2010 return baseset([r for r in ls if r in s])
2011 2011
2012 2012 # for internal use
2013 2013 def _hexlist(repo, subset, x):
2014 2014 s = getstring(x, "internal error")
2015 2015 if not s:
2016 2016 return baseset()
2017 2017 cl = repo.changelog
2018 2018 ls = [cl.rev(node.bin(r)) for r in s.split('\0')]
2019 2019 s = subset
2020 2020 return baseset([r for r in ls if r in s])
2021 2021
2022 2022 symbols = {
2023 2023 "adds": adds,
2024 2024 "all": getall,
2025 2025 "ancestor": ancestor,
2026 2026 "ancestors": ancestors,
2027 2027 "_firstancestors": _firstancestors,
2028 2028 "author": author,
2029 2029 "bisect": bisect,
2030 2030 "bisected": bisected,
2031 2031 "bookmark": bookmark,
2032 2032 "branch": branch,
2033 2033 "branchpoint": branchpoint,
2034 2034 "bumped": bumped,
2035 2035 "bundle": bundle,
2036 2036 "children": children,
2037 2037 "closed": closed,
2038 2038 "contains": contains,
2039 2039 "converted": converted,
2040 2040 "date": date,
2041 2041 "desc": desc,
2042 2042 "descendants": descendants,
2043 2043 "_firstdescendants": _firstdescendants,
2044 2044 "destination": destination,
2045 2045 "divergent": divergent,
2046 2046 "draft": draft,
2047 2047 "extinct": extinct,
2048 2048 "extra": extra,
2049 2049 "file": hasfile,
2050 2050 "filelog": filelog,
2051 2051 "first": first,
2052 2052 "follow": follow,
2053 2053 "_followfirst": _followfirst,
2054 2054 "grep": grep,
2055 2055 "head": head,
2056 2056 "heads": heads,
2057 2057 "hidden": hidden,
2058 2058 "id": node_,
2059 2059 "keyword": keyword,
2060 2060 "last": last,
2061 2061 "limit": limit,
2062 2062 "_matchfiles": _matchfiles,
2063 2063 "max": maxrev,
2064 2064 "merge": merge,
2065 2065 "min": minrev,
2066 2066 "modifies": modifies,
2067 2067 "named": named,
2068 2068 "obsolete": obsolete,
2069 2069 "only": only,
2070 2070 "origin": origin,
2071 2071 "outgoing": outgoing,
2072 2072 "p1": p1,
2073 2073 "p2": p2,
2074 2074 "parents": parents,
2075 2075 "present": present,
2076 2076 "public": public,
2077 2077 "_notpublic": _notpublic,
2078 2078 "remote": remote,
2079 2079 "removes": removes,
2080 2080 "rev": rev,
2081 2081 "reverse": reverse,
2082 2082 "roots": roots,
2083 2083 "sort": sort,
2084 2084 "secret": secret,
2085 2085 "subrepo": subrepo,
2086 2086 "matching": matching,
2087 2087 "tag": tag,
2088 2088 "tagged": tagged,
2089 2089 "user": user,
2090 2090 "unstable": unstable,
2091 2091 "wdir": wdir,
2092 2092 "_list": _list,
2093 2093 "_intlist": _intlist,
2094 2094 "_hexlist": _hexlist,
2095 2095 }
2096 2096
2097 2097 # symbols which can't be used for a DoS attack for any given input
2098 2098 # (e.g. those which accept regexes as plain strings shouldn't be included)
2099 2099 # functions that just return a lot of changesets (like all) don't count here
2100 2100 safesymbols = set([
2101 2101 "adds",
2102 2102 "all",
2103 2103 "ancestor",
2104 2104 "ancestors",
2105 2105 "_firstancestors",
2106 2106 "author",
2107 2107 "bisect",
2108 2108 "bisected",
2109 2109 "bookmark",
2110 2110 "branch",
2111 2111 "branchpoint",
2112 2112 "bumped",
2113 2113 "bundle",
2114 2114 "children",
2115 2115 "closed",
2116 2116 "converted",
2117 2117 "date",
2118 2118 "desc",
2119 2119 "descendants",
2120 2120 "_firstdescendants",
2121 2121 "destination",
2122 2122 "divergent",
2123 2123 "draft",
2124 2124 "extinct",
2125 2125 "extra",
2126 2126 "file",
2127 2127 "filelog",
2128 2128 "first",
2129 2129 "follow",
2130 2130 "_followfirst",
2131 2131 "head",
2132 2132 "heads",
2133 2133 "hidden",
2134 2134 "id",
2135 2135 "keyword",
2136 2136 "last",
2137 2137 "limit",
2138 2138 "_matchfiles",
2139 2139 "max",
2140 2140 "merge",
2141 2141 "min",
2142 2142 "modifies",
2143 2143 "obsolete",
2144 2144 "only",
2145 2145 "origin",
2146 2146 "outgoing",
2147 2147 "p1",
2148 2148 "p2",
2149 2149 "parents",
2150 2150 "present",
2151 2151 "public",
2152 2152 "_notpublic",
2153 2153 "remote",
2154 2154 "removes",
2155 2155 "rev",
2156 2156 "reverse",
2157 2157 "roots",
2158 2158 "sort",
2159 2159 "secret",
2160 2160 "matching",
2161 2161 "tag",
2162 2162 "tagged",
2163 2163 "user",
2164 2164 "unstable",
2165 2165 "wdir",
2166 2166 "_list",
2167 2167 "_intlist",
2168 2168 "_hexlist",
2169 2169 ])
2170 2170
2171 2171 methods = {
2172 2172 "range": rangeset,
2173 2173 "dagrange": dagrange,
2174 2174 "string": stringset,
2175 2175 "symbol": stringset,
2176 2176 "and": andset,
2177 2177 "or": orset,
2178 2178 "not": notset,
2179 2179 "list": listset,
2180 2180 "func": func,
2181 2181 "ancestor": ancestorspec,
2182 2182 "parent": parentspec,
2183 2183 "parentpost": p1,
2184 2184 }
2185 2185
2186 2186 def optimize(x, small):
2187 2187 if x is None:
2188 2188 return 0, x
2189 2189
2190 2190 smallbonus = 1
2191 2191 if small:
2192 2192 smallbonus = .5
2193 2193
2194 2194 op = x[0]
2195 2195 if op == 'minus':
2196 2196 return optimize(('and', x[1], ('not', x[2])), small)
2197 2197 elif op == 'only':
2198 2198 return optimize(('func', ('symbol', 'only'),
2199 2199 ('list', x[1], x[2])), small)
2200 2200 elif op == 'onlypost':
2201 2201 return optimize(('func', ('symbol', 'only'), x[1]), small)
2202 2202 elif op == 'dagrangepre':
2203 2203 return optimize(('func', ('symbol', 'ancestors'), x[1]), small)
2204 2204 elif op == 'dagrangepost':
2205 2205 return optimize(('func', ('symbol', 'descendants'), x[1]), small)
2206 2206 elif op == 'rangepre':
2207 2207 return optimize(('range', ('string', '0'), x[1]), small)
2208 2208 elif op == 'rangepost':
2209 2209 return optimize(('range', x[1], ('string', 'tip')), small)
2210 2210 elif op == 'negate':
2211 2211 return optimize(('string',
2212 2212 '-' + getstring(x[1], _("can't negate that"))), small)
2213 2213 elif op in 'string symbol negate':
2214 2214 return smallbonus, x # single revisions are small
2215 2215 elif op == 'and':
2216 2216 wa, ta = optimize(x[1], True)
2217 2217 wb, tb = optimize(x[2], True)
2218 2218
2219 2219 # (::x and not ::y)/(not ::y and ::x) have a fast path
2220 2220 def isonly(revs, bases):
2221 2221 return (
2222 2222 revs[0] == 'func'
2223 2223 and getstring(revs[1], _('not a symbol')) == 'ancestors'
2224 2224 and bases[0] == 'not'
2225 2225 and bases[1][0] == 'func'
2226 2226 and getstring(bases[1][1], _('not a symbol')) == 'ancestors')
2227 2227
2228 2228 w = min(wa, wb)
2229 2229 if isonly(ta, tb):
2230 2230 return w, ('func', ('symbol', 'only'), ('list', ta[2], tb[1][2]))
2231 2231 if isonly(tb, ta):
2232 2232 return w, ('func', ('symbol', 'only'), ('list', tb[2], ta[1][2]))
2233 2233
2234 2234 if wa > wb:
2235 2235 return w, (op, tb, ta)
2236 2236 return w, (op, ta, tb)
2237 2237 elif op == 'or':
2238 2238 # fast path for machine-generated expression, that is likely to have
2239 2239 # lots of trivial revisions: 'a + b + c()' to '_list(a b) + c()'
2240 2240 ws, ts, ss = [], [], []
2241 2241 def flushss():
2242 2242 if not ss:
2243 2243 return
2244 2244 if len(ss) == 1:
2245 2245 w, t = ss[0]
2246 2246 else:
2247 2247 s = '\0'.join(t[1] for w, t in ss)
2248 2248 y = ('func', ('symbol', '_list'), ('string', s))
2249 2249 w, t = optimize(y, False)
2250 2250 ws.append(w)
2251 2251 ts.append(t)
2252 2252 del ss[:]
2253 2253 for y in x[1:]:
2254 2254 w, t = optimize(y, False)
2255 2255 if t[0] == 'string' or t[0] == 'symbol':
2256 2256 ss.append((w, t))
2257 2257 continue
2258 2258 flushss()
2259 2259 ws.append(w)
2260 2260 ts.append(t)
2261 2261 flushss()
2262 2262 if len(ts) == 1:
2263 2263 return ws[0], ts[0] # 'or' operation is fully optimized out
2264 2264 # we can't reorder trees by weight because it would change the order.
2265 2265 # ("sort(a + b)" == "sort(b + a)", but "a + b" != "b + a")
2266 2266 # ts = tuple(t for w, t in sorted(zip(ws, ts), key=lambda wt: wt[0]))
2267 2267 return max(ws), (op,) + tuple(ts)
2268 2268 elif op == 'not':
2269 2269 # Optimize not public() to _notpublic() because we have a fast version
2270 2270 if x[1] == ('func', ('symbol', 'public'), None):
2271 2271 newsym = ('func', ('symbol', '_notpublic'), None)
2272 2272 o = optimize(newsym, not small)
2273 2273 return o[0], o[1]
2274 2274 else:
2275 2275 o = optimize(x[1], not small)
2276 2276 return o[0], (op, o[1])
2277 2277 elif op == 'parentpost':
2278 2278 o = optimize(x[1], small)
2279 2279 return o[0], (op, o[1])
2280 2280 elif op == 'group':
2281 2281 return optimize(x[1], small)
2282 2282 elif op in 'dagrange range list parent ancestorspec':
2283 2283 if op == 'parent':
2284 2284 # x^:y means (x^) : y, not x ^ (:y)
2285 2285 post = ('parentpost', x[1])
2286 2286 if x[2][0] == 'dagrangepre':
2287 2287 return optimize(('dagrange', post, x[2][1]), small)
2288 2288 elif x[2][0] == 'rangepre':
2289 2289 return optimize(('range', post, x[2][1]), small)
2290 2290
2291 2291 wa, ta = optimize(x[1], small)
2292 2292 wb, tb = optimize(x[2], small)
2293 2293 return wa + wb, (op, ta, tb)
2294 2294 elif op == 'func':
2295 2295 f = getstring(x[1], _("not a symbol"))
2296 2296 wa, ta = optimize(x[2], small)
2297 2297 if f in ("author branch closed date desc file grep keyword "
2298 2298 "outgoing user"):
2299 2299 w = 10 # slow
2300 2300 elif f in "modifies adds removes":
2301 2301 w = 30 # slower
2302 2302 elif f == "contains":
2303 2303 w = 100 # very slow
2304 2304 elif f == "ancestor":
2305 2305 w = 1 * smallbonus
2306 2306 elif f in "reverse limit first _intlist":
2307 2307 w = 0
2308 2308 elif f in "sort":
2309 2309 w = 10 # assume most sorts look at changelog
2310 2310 else:
2311 2311 w = 1
2312 2312 return w + wa, (op, x[1], ta)
2313 2313 return 1, x
2314 2314
2315 2315 _aliasarg = ('func', ('symbol', '_aliasarg'))
2316 2316 def _getaliasarg(tree):
2317 2317 """If tree matches ('func', ('symbol', '_aliasarg'), ('string', X))
2318 2318 return X, None otherwise.
2319 2319 """
2320 2320 if (len(tree) == 3 and tree[:2] == _aliasarg
2321 2321 and tree[2][0] == 'string'):
2322 2322 return tree[2][1]
2323 2323 return None
2324 2324
2325 2325 def _checkaliasarg(tree, known=None):
2326 2326 """Check tree contains no _aliasarg construct or only ones which
2327 2327 value is in known. Used to avoid alias placeholders injection.
2328 2328 """
2329 2329 if isinstance(tree, tuple):
2330 2330 arg = _getaliasarg(tree)
2331 2331 if arg is not None and (not known or arg not in known):
2332 2332 raise error.UnknownIdentifier('_aliasarg', [])
2333 2333 for t in tree:
2334 2334 _checkaliasarg(t, known)
2335 2335
2336 2336 # the set of valid characters for the initial letter of symbols in
2337 2337 # alias declarations and definitions
2338 2338 _aliassyminitletters = set(c for c in [chr(i) for i in xrange(256)]
2339 2339 if c.isalnum() or c in '._@$' or ord(c) > 127)
2340 2340
2341 2341 def _tokenizealias(program, lookup=None):
2342 2342 """Parse alias declaration/definition into a stream of tokens
2343 2343
2344 2344 This allows symbol names to use also ``$`` as an initial letter
2345 2345 (for backward compatibility), and callers of this function should
2346 2346 examine whether ``$`` is used also for unexpected symbols or not.
2347 2347 """
2348 2348 return tokenize(program, lookup=lookup,
2349 2349 syminitletters=_aliassyminitletters)
2350 2350
2351 2351 def _parsealiasdecl(decl):
2352 2352 """Parse alias declaration ``decl``
2353 2353
2354 2354 This returns ``(name, tree, args, errorstr)`` tuple:
2355 2355
2356 2356 - ``name``: of declared alias (may be ``decl`` itself at error)
2357 2357 - ``tree``: parse result (or ``None`` at error)
2358 2358 - ``args``: list of alias argument names (or None for symbol declaration)
2359 2359 - ``errorstr``: detail about detected error (or None)
2360 2360
2361 2361 >>> _parsealiasdecl('foo')
2362 2362 ('foo', ('symbol', 'foo'), None, None)
2363 2363 >>> _parsealiasdecl('$foo')
2364 2364 ('$foo', None, None, "'$' not for alias arguments")
2365 2365 >>> _parsealiasdecl('foo::bar')
2366 2366 ('foo::bar', None, None, 'invalid format')
2367 2367 >>> _parsealiasdecl('foo bar')
2368 2368 ('foo bar', None, None, 'at 4: invalid token')
2369 2369 >>> _parsealiasdecl('foo()')
2370 2370 ('foo', ('func', ('symbol', 'foo')), [], None)
2371 2371 >>> _parsealiasdecl('$foo()')
2372 2372 ('$foo()', None, None, "'$' not for alias arguments")
2373 2373 >>> _parsealiasdecl('foo($1, $2)')
2374 2374 ('foo', ('func', ('symbol', 'foo')), ['$1', '$2'], None)
2375 2375 >>> _parsealiasdecl('foo(bar_bar, baz.baz)')
2376 2376 ('foo', ('func', ('symbol', 'foo')), ['bar_bar', 'baz.baz'], None)
2377 2377 >>> _parsealiasdecl('foo($1, $2, nested($1, $2))')
2378 2378 ('foo($1, $2, nested($1, $2))', None, None, 'invalid argument list')
2379 2379 >>> _parsealiasdecl('foo(bar($1, $2))')
2380 2380 ('foo(bar($1, $2))', None, None, 'invalid argument list')
2381 2381 >>> _parsealiasdecl('foo("string")')
2382 2382 ('foo("string")', None, None, 'invalid argument list')
2383 2383 >>> _parsealiasdecl('foo($1, $2')
2384 2384 ('foo($1, $2', None, None, 'at 10: unexpected token: end')
2385 2385 >>> _parsealiasdecl('foo("string')
2386 2386 ('foo("string', None, None, 'at 5: unterminated string')
2387 2387 >>> _parsealiasdecl('foo($1, $2, $1)')
2388 2388 ('foo', None, None, 'argument names collide with each other')
2389 2389 """
2390 2390 p = parser.parser(_tokenizealias, elements)
2391 2391 try:
2392 2392 tree, pos = p.parse(decl)
2393 2393 if (pos != len(decl)):
2394 2394 raise error.ParseError(_('invalid token'), pos)
2395 2395
2396 2396 if isvalidsymbol(tree):
2397 2397 # "name = ...." style
2398 2398 name = getsymbol(tree)
2399 2399 if name.startswith('$'):
2400 2400 return (decl, None, None, _("'$' not for alias arguments"))
2401 2401 return (name, ('symbol', name), None, None)
2402 2402
2403 2403 if isvalidfunc(tree):
2404 2404 # "name(arg, ....) = ...." style
2405 2405 name = getfuncname(tree)
2406 2406 if name.startswith('$'):
2407 2407 return (decl, None, None, _("'$' not for alias arguments"))
2408 2408 args = []
2409 2409 for arg in getfuncargs(tree):
2410 2410 if not isvalidsymbol(arg):
2411 2411 return (decl, None, None, _("invalid argument list"))
2412 2412 args.append(getsymbol(arg))
2413 2413 if len(args) != len(set(args)):
2414 2414 return (name, None, None,
2415 2415 _("argument names collide with each other"))
2416 2416 return (name, ('func', ('symbol', name)), args, None)
2417 2417
2418 2418 return (decl, None, None, _("invalid format"))
2419 2419 except error.ParseError, inst:
2420 2420 return (decl, None, None, parseerrordetail(inst))
2421 2421
2422 2422 def _parsealiasdefn(defn, args):
2423 2423 """Parse alias definition ``defn``
2424 2424
2425 2425 This function also replaces alias argument references in the
2426 2426 specified definition by ``_aliasarg(ARGNAME)``.
2427 2427
2428 2428 ``args`` is a list of alias argument names, or None if the alias
2429 2429 is declared as a symbol.
2430 2430
2431 2431 This returns "tree" as parsing result.
2432 2432
2433 2433 >>> args = ['$1', '$2', 'foo']
2434 2434 >>> print prettyformat(_parsealiasdefn('$1 or foo', args))
2435 2435 (or
2436 2436 (func
2437 2437 ('symbol', '_aliasarg')
2438 2438 ('string', '$1'))
2439 2439 (func
2440 2440 ('symbol', '_aliasarg')
2441 2441 ('string', 'foo')))
2442 2442 >>> try:
2443 2443 ... _parsealiasdefn('$1 or $bar', args)
2444 2444 ... except error.ParseError, inst:
2445 2445 ... print parseerrordetail(inst)
2446 2446 at 6: '$' not for alias arguments
2447 2447 >>> args = ['$1', '$10', 'foo']
2448 2448 >>> print prettyformat(_parsealiasdefn('$10 or foobar', args))
2449 2449 (or
2450 2450 (func
2451 2451 ('symbol', '_aliasarg')
2452 2452 ('string', '$10'))
2453 2453 ('symbol', 'foobar'))
2454 2454 >>> print prettyformat(_parsealiasdefn('"$1" or "foo"', args))
2455 2455 (or
2456 2456 ('string', '$1')
2457 2457 ('string', 'foo'))
2458 2458 """
2459 2459 def tokenizedefn(program, lookup=None):
2460 2460 if args:
2461 2461 argset = set(args)
2462 2462 else:
2463 2463 argset = set()
2464 2464
2465 2465 for t, value, pos in _tokenizealias(program, lookup=lookup):
2466 2466 if t == 'symbol':
2467 2467 if value in argset:
2468 2468 # emulate tokenization of "_aliasarg('ARGNAME')":
2469 2469 # "_aliasarg()" is an unknown symbol only used separate
2470 2470 # alias argument placeholders from regular strings.
2471 2471 yield ('symbol', '_aliasarg', pos)
2472 2472 yield ('(', None, pos)
2473 2473 yield ('string', value, pos)
2474 2474 yield (')', None, pos)
2475 2475 continue
2476 2476 elif value.startswith('$'):
2477 2477 raise error.ParseError(_("'$' not for alias arguments"),
2478 2478 pos)
2479 2479 yield (t, value, pos)
2480 2480
2481 2481 p = parser.parser(tokenizedefn, elements)
2482 2482 tree, pos = p.parse(defn)
2483 2483 if pos != len(defn):
2484 2484 raise error.ParseError(_('invalid token'), pos)
2485 2485 return parser.simplifyinfixops(tree, ('or',))
2486 2486
2487 2487 class revsetalias(object):
2488 2488 # whether own `error` information is already shown or not.
2489 2489 # this avoids showing same warning multiple times at each `findaliases`.
2490 2490 warned = False
2491 2491
2492 2492 def __init__(self, name, value):
2493 2493 '''Aliases like:
2494 2494
2495 2495 h = heads(default)
2496 2496 b($1) = ancestors($1) - ancestors(default)
2497 2497 '''
2498 2498 self.name, self.tree, self.args, self.error = _parsealiasdecl(name)
2499 2499 if self.error:
2500 2500 self.error = _('failed to parse the declaration of revset alias'
2501 2501 ' "%s": %s') % (self.name, self.error)
2502 2502 return
2503 2503
2504 2504 try:
2505 2505 self.replacement = _parsealiasdefn(value, self.args)
2506 2506 # Check for placeholder injection
2507 2507 _checkaliasarg(self.replacement, self.args)
2508 2508 except error.ParseError, inst:
2509 2509 self.error = _('failed to parse the definition of revset alias'
2510 2510 ' "%s": %s') % (self.name, parseerrordetail(inst))
2511 2511
2512 2512 def _getalias(aliases, tree):
2513 2513 """If tree looks like an unexpanded alias, return it. Return None
2514 2514 otherwise.
2515 2515 """
2516 2516 if isinstance(tree, tuple) and tree:
2517 2517 if tree[0] == 'symbol' and len(tree) == 2:
2518 2518 name = tree[1]
2519 2519 alias = aliases.get(name)
2520 2520 if alias and alias.args is None and alias.tree == tree:
2521 2521 return alias
2522 2522 if tree[0] == 'func' and len(tree) > 1:
2523 2523 if tree[1][0] == 'symbol' and len(tree[1]) == 2:
2524 2524 name = tree[1][1]
2525 2525 alias = aliases.get(name)
2526 2526 if alias and alias.args is not None and alias.tree == tree[:2]:
2527 2527 return alias
2528 2528 return None
2529 2529
2530 2530 def _expandargs(tree, args):
2531 2531 """Replace _aliasarg instances with the substitution value of the
2532 2532 same name in args, recursively.
2533 2533 """
2534 2534 if not tree or not isinstance(tree, tuple):
2535 2535 return tree
2536 2536 arg = _getaliasarg(tree)
2537 2537 if arg is not None:
2538 2538 return args[arg]
2539 2539 return tuple(_expandargs(t, args) for t in tree)
2540 2540
2541 2541 def _expandaliases(aliases, tree, expanding, cache):
2542 2542 """Expand aliases in tree, recursively.
2543 2543
2544 2544 'aliases' is a dictionary mapping user defined aliases to
2545 2545 revsetalias objects.
2546 2546 """
2547 2547 if not isinstance(tree, tuple):
2548 2548 # Do not expand raw strings
2549 2549 return tree
2550 2550 alias = _getalias(aliases, tree)
2551 2551 if alias is not None:
2552 2552 if alias.error:
2553 2553 raise util.Abort(alias.error)
2554 2554 if alias in expanding:
2555 2555 raise error.ParseError(_('infinite expansion of revset alias "%s" '
2556 2556 'detected') % alias.name)
2557 2557 expanding.append(alias)
2558 2558 if alias.name not in cache:
2559 2559 cache[alias.name] = _expandaliases(aliases, alias.replacement,
2560 2560 expanding, cache)
2561 2561 result = cache[alias.name]
2562 2562 expanding.pop()
2563 2563 if alias.args is not None:
2564 2564 l = getlist(tree[2])
2565 2565 if len(l) != len(alias.args):
2566 2566 raise error.ParseError(
2567 2567 _('invalid number of arguments: %s') % len(l))
2568 2568 l = [_expandaliases(aliases, a, [], cache) for a in l]
2569 2569 result = _expandargs(result, dict(zip(alias.args, l)))
2570 2570 else:
2571 2571 result = tuple(_expandaliases(aliases, t, expanding, cache)
2572 2572 for t in tree)
2573 2573 return result
2574 2574
2575 2575 def findaliases(ui, tree, showwarning=None):
2576 2576 _checkaliasarg(tree)
2577 2577 aliases = {}
2578 2578 for k, v in ui.configitems('revsetalias'):
2579 2579 alias = revsetalias(k, v)
2580 2580 aliases[alias.name] = alias
2581 2581 tree = _expandaliases(aliases, tree, [], {})
2582 2582 if showwarning:
2583 2583 # warn about problematic (but not referred) aliases
2584 2584 for name, alias in sorted(aliases.iteritems()):
2585 2585 if alias.error and not alias.warned:
2586 2586 showwarning(_('warning: %s\n') % (alias.error))
2587 2587 alias.warned = True
2588 2588 return tree
2589 2589
2590 2590 def foldconcat(tree):
2591 2591 """Fold elements to be concatenated by `##`
2592 2592 """
2593 2593 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2594 2594 return tree
2595 2595 if tree[0] == '_concat':
2596 2596 pending = [tree]
2597 2597 l = []
2598 2598 while pending:
2599 2599 e = pending.pop()
2600 2600 if e[0] == '_concat':
2601 2601 pending.extend(reversed(e[1:]))
2602 2602 elif e[0] in ('string', 'symbol'):
2603 2603 l.append(e[1])
2604 2604 else:
2605 2605 msg = _("\"##\" can't concatenate \"%s\" element") % (e[0])
2606 2606 raise error.ParseError(msg)
2607 2607 return ('string', ''.join(l))
2608 2608 else:
2609 2609 return tuple(foldconcat(t) for t in tree)
2610 2610
2611 2611 def parse(spec, lookup=None):
2612 2612 p = parser.parser(tokenize, elements)
2613 2613 tree, pos = p.parse(spec, lookup=lookup)
2614 2614 if pos != len(spec):
2615 2615 raise error.ParseError(_("invalid token"), pos)
2616 2616 return parser.simplifyinfixops(tree, ('or',))
2617 2617
2618 2618 def posttreebuilthook(tree, repo):
2619 2619 # hook for extensions to execute code on the optimized tree
2620 2620 pass
2621 2621
2622 2622 def match(ui, spec, repo=None):
2623 2623 if not spec:
2624 2624 raise error.ParseError(_("empty query"))
2625 2625 lookup = None
2626 2626 if repo:
2627 2627 lookup = repo.__contains__
2628 2628 tree = parse(spec, lookup)
2629 2629 if ui:
2630 2630 tree = findaliases(ui, tree, showwarning=ui.warn)
2631 2631 tree = foldconcat(tree)
2632 2632 weight, tree = optimize(tree, True)
2633 2633 posttreebuilthook(tree, repo)
2634 2634 def mfunc(repo, subset=None):
2635 2635 if subset is None:
2636 2636 subset = fullreposet(repo)
2637 2637 if util.safehasattr(subset, 'isascending'):
2638 2638 result = getset(repo, subset, tree)
2639 2639 else:
2640 2640 result = getset(repo, baseset(subset), tree)
2641 2641 return result
2642 2642 return mfunc
2643 2643
2644 2644 def formatspec(expr, *args):
2645 2645 '''
2646 2646 This is a convenience function for using revsets internally, and
2647 2647 escapes arguments appropriately. Aliases are intentionally ignored
2648 2648 so that intended expression behavior isn't accidentally subverted.
2649 2649
2650 2650 Supported arguments:
2651 2651
2652 2652 %r = revset expression, parenthesized
2653 2653 %d = int(arg), no quoting
2654 2654 %s = string(arg), escaped and single-quoted
2655 2655 %b = arg.branch(), escaped and single-quoted
2656 2656 %n = hex(arg), single-quoted
2657 2657 %% = a literal '%'
2658 2658
2659 2659 Prefixing the type with 'l' specifies a parenthesized list of that type.
2660 2660
2661 2661 >>> formatspec('%r:: and %lr', '10 or 11', ("this()", "that()"))
2662 2662 '(10 or 11):: and ((this()) or (that()))'
2663 2663 >>> formatspec('%d:: and not %d::', 10, 20)
2664 2664 '10:: and not 20::'
2665 2665 >>> formatspec('%ld or %ld', [], [1])
2666 2666 "_list('') or 1"
2667 2667 >>> formatspec('keyword(%s)', 'foo\\xe9')
2668 2668 "keyword('foo\\\\xe9')"
2669 2669 >>> b = lambda: 'default'
2670 2670 >>> b.branch = b
2671 2671 >>> formatspec('branch(%b)', b)
2672 2672 "branch('default')"
2673 2673 >>> formatspec('root(%ls)', ['a', 'b', 'c', 'd'])
2674 2674 "root(_list('a\\x00b\\x00c\\x00d'))"
2675 2675 '''
2676 2676
2677 2677 def quote(s):
2678 2678 return repr(str(s))
2679 2679
2680 2680 def argtype(c, arg):
2681 2681 if c == 'd':
2682 2682 return str(int(arg))
2683 2683 elif c == 's':
2684 2684 return quote(arg)
2685 2685 elif c == 'r':
2686 2686 parse(arg) # make sure syntax errors are confined
2687 2687 return '(%s)' % arg
2688 2688 elif c == 'n':
2689 2689 return quote(node.hex(arg))
2690 2690 elif c == 'b':
2691 2691 return quote(arg.branch())
2692 2692
2693 2693 def listexp(s, t):
2694 2694 l = len(s)
2695 2695 if l == 0:
2696 2696 return "_list('')"
2697 2697 elif l == 1:
2698 2698 return argtype(t, s[0])
2699 2699 elif t == 'd':
2700 2700 return "_intlist('%s')" % "\0".join(str(int(a)) for a in s)
2701 2701 elif t == 's':
2702 2702 return "_list('%s')" % "\0".join(s)
2703 2703 elif t == 'n':
2704 2704 return "_hexlist('%s')" % "\0".join(node.hex(a) for a in s)
2705 2705 elif t == 'b':
2706 2706 return "_list('%s')" % "\0".join(a.branch() for a in s)
2707 2707
2708 2708 m = l // 2
2709 2709 return '(%s or %s)' % (listexp(s[:m], t), listexp(s[m:], t))
2710 2710
2711 2711 ret = ''
2712 2712 pos = 0
2713 2713 arg = 0
2714 2714 while pos < len(expr):
2715 2715 c = expr[pos]
2716 2716 if c == '%':
2717 2717 pos += 1
2718 2718 d = expr[pos]
2719 2719 if d == '%':
2720 2720 ret += d
2721 2721 elif d in 'dsnbr':
2722 2722 ret += argtype(d, args[arg])
2723 2723 arg += 1
2724 2724 elif d == 'l':
2725 2725 # a list of some type
2726 2726 pos += 1
2727 2727 d = expr[pos]
2728 2728 ret += listexp(list(args[arg]), d)
2729 2729 arg += 1
2730 2730 else:
2731 2731 raise util.Abort('unexpected revspec format character %s' % d)
2732 2732 else:
2733 2733 ret += c
2734 2734 pos += 1
2735 2735
2736 2736 return ret
2737 2737
2738 2738 def prettyformat(tree):
2739 2739 return parser.prettyformat(tree, ('string', 'symbol'))
2740 2740
2741 2741 def depth(tree):
2742 2742 if isinstance(tree, tuple):
2743 2743 return max(map(depth, tree)) + 1
2744 2744 else:
2745 2745 return 0
2746 2746
2747 2747 def funcsused(tree):
2748 2748 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2749 2749 return set()
2750 2750 else:
2751 2751 funcs = set()
2752 2752 for s in tree[1:]:
2753 2753 funcs |= funcsused(s)
2754 2754 if tree[0] == 'func':
2755 2755 funcs.add(tree[1][1])
2756 2756 return funcs
2757 2757
2758 2758 class abstractsmartset(object):
2759 2759
2760 2760 def __nonzero__(self):
2761 2761 """True if the smartset is not empty"""
2762 2762 raise NotImplementedError()
2763 2763
2764 2764 def __contains__(self, rev):
2765 2765 """provide fast membership testing"""
2766 2766 raise NotImplementedError()
2767 2767
2768 2768 def __iter__(self):
2769 2769 """iterate the set in the order it is supposed to be iterated"""
2770 2770 raise NotImplementedError()
2771 2771
2772 2772 # Attributes containing a function to perform a fast iteration in a given
2773 2773 # direction. A smartset can have none, one, or both defined.
2774 2774 #
2775 2775 # Default value is None instead of a function returning None to avoid
2776 2776 # initializing an iterator just for testing if a fast method exists.
2777 2777 fastasc = None
2778 2778 fastdesc = None
2779 2779
2780 2780 def isascending(self):
2781 2781 """True if the set will iterate in ascending order"""
2782 2782 raise NotImplementedError()
2783 2783
2784 2784 def isdescending(self):
2785 2785 """True if the set will iterate in descending order"""
2786 2786 raise NotImplementedError()
2787 2787
2788 2788 def min(self):
2789 2789 """return the minimum element in the set"""
2790 2790 if self.fastasc is not None:
2791 2791 for r in self.fastasc():
2792 2792 return r
2793 2793 raise ValueError('arg is an empty sequence')
2794 2794 return min(self)
2795 2795
2796 2796 def max(self):
2797 2797 """return the maximum element in the set"""
2798 2798 if self.fastdesc is not None:
2799 2799 for r in self.fastdesc():
2800 2800 return r
2801 2801 raise ValueError('arg is an empty sequence')
2802 2802 return max(self)
2803 2803
2804 2804 def first(self):
2805 2805 """return the first element in the set (user iteration perspective)
2806 2806
2807 2807 Return None if the set is empty"""
2808 2808 raise NotImplementedError()
2809 2809
2810 2810 def last(self):
2811 2811 """return the last element in the set (user iteration perspective)
2812 2812
2813 2813 Return None if the set is empty"""
2814 2814 raise NotImplementedError()
2815 2815
2816 2816 def __len__(self):
2817 2817 """return the length of the smartsets
2818 2818
2819 2819 This can be expensive on smartset that could be lazy otherwise."""
2820 2820 raise NotImplementedError()
2821 2821
2822 2822 def reverse(self):
2823 2823 """reverse the expected iteration order"""
2824 2824 raise NotImplementedError()
2825 2825
2826 2826 def sort(self, reverse=True):
2827 2827 """get the set to iterate in an ascending or descending order"""
2828 2828 raise NotImplementedError()
2829 2829
2830 2830 def __and__(self, other):
2831 2831 """Returns a new object with the intersection of the two collections.
2832 2832
2833 2833 This is part of the mandatory API for smartset."""
2834 2834 if isinstance(other, fullreposet):
2835 2835 return self
2836 2836 return self.filter(other.__contains__, cache=False)
2837 2837
2838 2838 def __add__(self, other):
2839 2839 """Returns a new object with the union of the two collections.
2840 2840
2841 2841 This is part of the mandatory API for smartset."""
2842 2842 return addset(self, other)
2843 2843
2844 2844 def __sub__(self, other):
2845 2845 """Returns a new object with the substraction of the two collections.
2846 2846
2847 2847 This is part of the mandatory API for smartset."""
2848 2848 c = other.__contains__
2849 2849 return self.filter(lambda r: not c(r), cache=False)
2850 2850
2851 2851 def filter(self, condition, cache=True):
2852 2852 """Returns this smartset filtered by condition as a new smartset.
2853 2853
2854 2854 `condition` is a callable which takes a revision number and returns a
2855 2855 boolean.
2856 2856
2857 2857 This is part of the mandatory API for smartset."""
2858 2858 # builtin cannot be cached. but do not needs to
2859 2859 if cache and util.safehasattr(condition, 'func_code'):
2860 2860 condition = util.cachefunc(condition)
2861 2861 return filteredset(self, condition)
2862 2862
2863 2863 class baseset(abstractsmartset):
2864 2864 """Basic data structure that represents a revset and contains the basic
2865 2865 operation that it should be able to perform.
2866 2866
2867 2867 Every method in this class should be implemented by any smartset class.
2868 2868 """
2869 2869 def __init__(self, data=()):
2870 2870 if not isinstance(data, list):
2871 2871 data = list(data)
2872 2872 self._list = data
2873 2873 self._ascending = None
2874 2874
2875 2875 @util.propertycache
2876 2876 def _set(self):
2877 2877 return set(self._list)
2878 2878
2879 2879 @util.propertycache
2880 2880 def _asclist(self):
2881 2881 asclist = self._list[:]
2882 2882 asclist.sort()
2883 2883 return asclist
2884 2884
2885 2885 def __iter__(self):
2886 2886 if self._ascending is None:
2887 2887 return iter(self._list)
2888 2888 elif self._ascending:
2889 2889 return iter(self._asclist)
2890 2890 else:
2891 2891 return reversed(self._asclist)
2892 2892
2893 2893 def fastasc(self):
2894 2894 return iter(self._asclist)
2895 2895
2896 2896 def fastdesc(self):
2897 2897 return reversed(self._asclist)
2898 2898
2899 2899 @util.propertycache
2900 2900 def __contains__(self):
2901 2901 return self._set.__contains__
2902 2902
2903 2903 def __nonzero__(self):
2904 2904 return bool(self._list)
2905 2905
2906 2906 def sort(self, reverse=False):
2907 2907 self._ascending = not bool(reverse)
2908 2908
2909 2909 def reverse(self):
2910 2910 if self._ascending is None:
2911 2911 self._list.reverse()
2912 2912 else:
2913 2913 self._ascending = not self._ascending
2914 2914
2915 2915 def __len__(self):
2916 2916 return len(self._list)
2917 2917
2918 2918 def isascending(self):
2919 2919 """Returns True if the collection is ascending order, False if not.
2920 2920
2921 2921 This is part of the mandatory API for smartset."""
2922 2922 if len(self) <= 1:
2923 2923 return True
2924 2924 return self._ascending is not None and self._ascending
2925 2925
2926 2926 def isdescending(self):
2927 2927 """Returns True if the collection is descending order, False if not.
2928 2928
2929 2929 This is part of the mandatory API for smartset."""
2930 2930 if len(self) <= 1:
2931 2931 return True
2932 2932 return self._ascending is not None and not self._ascending
2933 2933
2934 2934 def first(self):
2935 2935 if self:
2936 2936 if self._ascending is None:
2937 2937 return self._list[0]
2938 2938 elif self._ascending:
2939 2939 return self._asclist[0]
2940 2940 else:
2941 2941 return self._asclist[-1]
2942 2942 return None
2943 2943
2944 2944 def last(self):
2945 2945 if self:
2946 2946 if self._ascending is None:
2947 2947 return self._list[-1]
2948 2948 elif self._ascending:
2949 2949 return self._asclist[-1]
2950 2950 else:
2951 2951 return self._asclist[0]
2952 2952 return None
2953 2953
2954 2954 def __repr__(self):
2955 2955 d = {None: '', False: '-', True: '+'}[self._ascending]
2956 2956 return '<%s%s %r>' % (type(self).__name__, d, self._list)
2957 2957
2958 2958 class filteredset(abstractsmartset):
2959 2959 """Duck type for baseset class which iterates lazily over the revisions in
2960 2960 the subset and contains a function which tests for membership in the
2961 2961 revset
2962 2962 """
2963 2963 def __init__(self, subset, condition=lambda x: True):
2964 2964 """
2965 2965 condition: a function that decide whether a revision in the subset
2966 2966 belongs to the revset or not.
2967 2967 """
2968 2968 self._subset = subset
2969 2969 self._condition = condition
2970 2970 self._cache = {}
2971 2971
2972 2972 def __contains__(self, x):
2973 2973 c = self._cache
2974 2974 if x not in c:
2975 2975 v = c[x] = x in self._subset and self._condition(x)
2976 2976 return v
2977 2977 return c[x]
2978 2978
2979 2979 def __iter__(self):
2980 2980 return self._iterfilter(self._subset)
2981 2981
2982 2982 def _iterfilter(self, it):
2983 2983 cond = self._condition
2984 2984 for x in it:
2985 2985 if cond(x):
2986 2986 yield x
2987 2987
2988 2988 @property
2989 2989 def fastasc(self):
2990 2990 it = self._subset.fastasc
2991 2991 if it is None:
2992 2992 return None
2993 2993 return lambda: self._iterfilter(it())
2994 2994
2995 2995 @property
2996 2996 def fastdesc(self):
2997 2997 it = self._subset.fastdesc
2998 2998 if it is None:
2999 2999 return None
3000 3000 return lambda: self._iterfilter(it())
3001 3001
3002 3002 def __nonzero__(self):
3003 3003 for r in self:
3004 3004 return True
3005 3005 return False
3006 3006
3007 3007 def __len__(self):
3008 3008 # Basic implementation to be changed in future patches.
3009 3009 l = baseset([r for r in self])
3010 3010 return len(l)
3011 3011
3012 3012 def sort(self, reverse=False):
3013 3013 self._subset.sort(reverse=reverse)
3014 3014
3015 3015 def reverse(self):
3016 3016 self._subset.reverse()
3017 3017
3018 3018 def isascending(self):
3019 3019 return self._subset.isascending()
3020 3020
3021 3021 def isdescending(self):
3022 3022 return self._subset.isdescending()
3023 3023
3024 3024 def first(self):
3025 3025 for x in self:
3026 3026 return x
3027 3027 return None
3028 3028
3029 3029 def last(self):
3030 3030 it = None
3031 if self._subset.isascending:
3032 it = self.fastdesc
3033 elif self._subset.isdescending:
3031 if self.isascending():
3034 3032 it = self.fastdesc
3035 if it is None:
3036 # slowly consume everything. This needs improvement
3037 it = lambda: reversed(list(self))
3033 elif self.isdescending():
3034 it = self.fastasc
3035 if it is not None:
3038 3036 for x in it():
3039 3037 return x
3040 return None
3038 return None #empty case
3039 else:
3040 x = None
3041 for x in self:
3042 pass
3043 return x
3041 3044
3042 3045 def __repr__(self):
3043 3046 return '<%s %r>' % (type(self).__name__, self._subset)
3044 3047
3045 3048 # this function will be removed, or merged to addset or orset, when
3046 3049 # - scmutil.revrange() can be rewritten to not combine calculated smartsets
3047 3050 # - or addset can handle more than two sets without balanced tree
3048 3051 def _combinesets(subsets):
3049 3052 """Create balanced tree of addsets representing union of given sets"""
3050 3053 if not subsets:
3051 3054 return baseset()
3052 3055 if len(subsets) == 1:
3053 3056 return subsets[0]
3054 3057 p = len(subsets) // 2
3055 3058 xs = _combinesets(subsets[:p])
3056 3059 ys = _combinesets(subsets[p:])
3057 3060 return addset(xs, ys)
3058 3061
3059 3062 def _iterordered(ascending, iter1, iter2):
3060 3063 """produce an ordered iteration from two iterators with the same order
3061 3064
3062 3065 The ascending is used to indicated the iteration direction.
3063 3066 """
3064 3067 choice = max
3065 3068 if ascending:
3066 3069 choice = min
3067 3070
3068 3071 val1 = None
3069 3072 val2 = None
3070 3073 try:
3071 3074 # Consume both iterators in an ordered way until one is empty
3072 3075 while True:
3073 3076 if val1 is None:
3074 3077 val1 = iter1.next()
3075 3078 if val2 is None:
3076 3079 val2 = iter2.next()
3077 3080 next = choice(val1, val2)
3078 3081 yield next
3079 3082 if val1 == next:
3080 3083 val1 = None
3081 3084 if val2 == next:
3082 3085 val2 = None
3083 3086 except StopIteration:
3084 3087 # Flush any remaining values and consume the other one
3085 3088 it = iter2
3086 3089 if val1 is not None:
3087 3090 yield val1
3088 3091 it = iter1
3089 3092 elif val2 is not None:
3090 3093 # might have been equality and both are empty
3091 3094 yield val2
3092 3095 for val in it:
3093 3096 yield val
3094 3097
3095 3098 class addset(abstractsmartset):
3096 3099 """Represent the addition of two sets
3097 3100
3098 3101 Wrapper structure for lazily adding two structures without losing much
3099 3102 performance on the __contains__ method
3100 3103
3101 3104 If the ascending attribute is set, that means the two structures are
3102 3105 ordered in either an ascending or descending way. Therefore, we can add
3103 3106 them maintaining the order by iterating over both at the same time
3104 3107
3105 3108 >>> xs = baseset([0, 3, 2])
3106 3109 >>> ys = baseset([5, 2, 4])
3107 3110
3108 3111 >>> rs = addset(xs, ys)
3109 3112 >>> bool(rs), 0 in rs, 1 in rs, 5 in rs, rs.first(), rs.last()
3110 3113 (True, True, False, True, 0, 4)
3111 3114 >>> rs = addset(xs, baseset([]))
3112 3115 >>> bool(rs), 0 in rs, 1 in rs, rs.first(), rs.last()
3113 3116 (True, True, False, 0, 2)
3114 3117 >>> rs = addset(baseset([]), baseset([]))
3115 3118 >>> bool(rs), 0 in rs, rs.first(), rs.last()
3116 3119 (False, False, None, None)
3117 3120
3118 3121 iterate unsorted:
3119 3122 >>> rs = addset(xs, ys)
3120 3123 >>> [x for x in rs] # without _genlist
3121 3124 [0, 3, 2, 5, 4]
3122 3125 >>> assert not rs._genlist
3123 3126 >>> len(rs)
3124 3127 5
3125 3128 >>> [x for x in rs] # with _genlist
3126 3129 [0, 3, 2, 5, 4]
3127 3130 >>> assert rs._genlist
3128 3131
3129 3132 iterate ascending:
3130 3133 >>> rs = addset(xs, ys, ascending=True)
3131 3134 >>> [x for x in rs], [x for x in rs.fastasc()] # without _asclist
3132 3135 ([0, 2, 3, 4, 5], [0, 2, 3, 4, 5])
3133 3136 >>> assert not rs._asclist
3134 3137 >>> len(rs)
3135 3138 5
3136 3139 >>> [x for x in rs], [x for x in rs.fastasc()]
3137 3140 ([0, 2, 3, 4, 5], [0, 2, 3, 4, 5])
3138 3141 >>> assert rs._asclist
3139 3142
3140 3143 iterate descending:
3141 3144 >>> rs = addset(xs, ys, ascending=False)
3142 3145 >>> [x for x in rs], [x for x in rs.fastdesc()] # without _asclist
3143 3146 ([5, 4, 3, 2, 0], [5, 4, 3, 2, 0])
3144 3147 >>> assert not rs._asclist
3145 3148 >>> len(rs)
3146 3149 5
3147 3150 >>> [x for x in rs], [x for x in rs.fastdesc()]
3148 3151 ([5, 4, 3, 2, 0], [5, 4, 3, 2, 0])
3149 3152 >>> assert rs._asclist
3150 3153
3151 3154 iterate ascending without fastasc:
3152 3155 >>> rs = addset(xs, generatorset(ys), ascending=True)
3153 3156 >>> assert rs.fastasc is None
3154 3157 >>> [x for x in rs]
3155 3158 [0, 2, 3, 4, 5]
3156 3159
3157 3160 iterate descending without fastdesc:
3158 3161 >>> rs = addset(generatorset(xs), ys, ascending=False)
3159 3162 >>> assert rs.fastdesc is None
3160 3163 >>> [x for x in rs]
3161 3164 [5, 4, 3, 2, 0]
3162 3165 """
3163 3166 def __init__(self, revs1, revs2, ascending=None):
3164 3167 self._r1 = revs1
3165 3168 self._r2 = revs2
3166 3169 self._iter = None
3167 3170 self._ascending = ascending
3168 3171 self._genlist = None
3169 3172 self._asclist = None
3170 3173
3171 3174 def __len__(self):
3172 3175 return len(self._list)
3173 3176
3174 3177 def __nonzero__(self):
3175 3178 return bool(self._r1) or bool(self._r2)
3176 3179
3177 3180 @util.propertycache
3178 3181 def _list(self):
3179 3182 if not self._genlist:
3180 3183 self._genlist = baseset(iter(self))
3181 3184 return self._genlist
3182 3185
3183 3186 def __iter__(self):
3184 3187 """Iterate over both collections without repeating elements
3185 3188
3186 3189 If the ascending attribute is not set, iterate over the first one and
3187 3190 then over the second one checking for membership on the first one so we
3188 3191 dont yield any duplicates.
3189 3192
3190 3193 If the ascending attribute is set, iterate over both collections at the
3191 3194 same time, yielding only one value at a time in the given order.
3192 3195 """
3193 3196 if self._ascending is None:
3194 3197 if self._genlist:
3195 3198 return iter(self._genlist)
3196 3199 def arbitraryordergen():
3197 3200 for r in self._r1:
3198 3201 yield r
3199 3202 inr1 = self._r1.__contains__
3200 3203 for r in self._r2:
3201 3204 if not inr1(r):
3202 3205 yield r
3203 3206 return arbitraryordergen()
3204 3207 # try to use our own fast iterator if it exists
3205 3208 self._trysetasclist()
3206 3209 if self._ascending:
3207 3210 attr = 'fastasc'
3208 3211 else:
3209 3212 attr = 'fastdesc'
3210 3213 it = getattr(self, attr)
3211 3214 if it is not None:
3212 3215 return it()
3213 3216 # maybe half of the component supports fast
3214 3217 # get iterator for _r1
3215 3218 iter1 = getattr(self._r1, attr)
3216 3219 if iter1 is None:
3217 3220 # let's avoid side effect (not sure it matters)
3218 3221 iter1 = iter(sorted(self._r1, reverse=not self._ascending))
3219 3222 else:
3220 3223 iter1 = iter1()
3221 3224 # get iterator for _r2
3222 3225 iter2 = getattr(self._r2, attr)
3223 3226 if iter2 is None:
3224 3227 # let's avoid side effect (not sure it matters)
3225 3228 iter2 = iter(sorted(self._r2, reverse=not self._ascending))
3226 3229 else:
3227 3230 iter2 = iter2()
3228 3231 return _iterordered(self._ascending, iter1, iter2)
3229 3232
3230 3233 def _trysetasclist(self):
3231 3234 """populate the _asclist attribute if possible and necessary"""
3232 3235 if self._genlist is not None and self._asclist is None:
3233 3236 self._asclist = sorted(self._genlist)
3234 3237
3235 3238 @property
3236 3239 def fastasc(self):
3237 3240 self._trysetasclist()
3238 3241 if self._asclist is not None:
3239 3242 return self._asclist.__iter__
3240 3243 iter1 = self._r1.fastasc
3241 3244 iter2 = self._r2.fastasc
3242 3245 if None in (iter1, iter2):
3243 3246 return None
3244 3247 return lambda: _iterordered(True, iter1(), iter2())
3245 3248
3246 3249 @property
3247 3250 def fastdesc(self):
3248 3251 self._trysetasclist()
3249 3252 if self._asclist is not None:
3250 3253 return self._asclist.__reversed__
3251 3254 iter1 = self._r1.fastdesc
3252 3255 iter2 = self._r2.fastdesc
3253 3256 if None in (iter1, iter2):
3254 3257 return None
3255 3258 return lambda: _iterordered(False, iter1(), iter2())
3256 3259
3257 3260 def __contains__(self, x):
3258 3261 return x in self._r1 or x in self._r2
3259 3262
3260 3263 def sort(self, reverse=False):
3261 3264 """Sort the added set
3262 3265
3263 3266 For this we use the cached list with all the generated values and if we
3264 3267 know they are ascending or descending we can sort them in a smart way.
3265 3268 """
3266 3269 self._ascending = not reverse
3267 3270
3268 3271 def isascending(self):
3269 3272 return self._ascending is not None and self._ascending
3270 3273
3271 3274 def isdescending(self):
3272 3275 return self._ascending is not None and not self._ascending
3273 3276
3274 3277 def reverse(self):
3275 3278 if self._ascending is None:
3276 3279 self._list.reverse()
3277 3280 else:
3278 3281 self._ascending = not self._ascending
3279 3282
3280 3283 def first(self):
3281 3284 for x in self:
3282 3285 return x
3283 3286 return None
3284 3287
3285 3288 def last(self):
3286 3289 self.reverse()
3287 3290 val = self.first()
3288 3291 self.reverse()
3289 3292 return val
3290 3293
3291 3294 def __repr__(self):
3292 3295 d = {None: '', False: '-', True: '+'}[self._ascending]
3293 3296 return '<%s%s %r, %r>' % (type(self).__name__, d, self._r1, self._r2)
3294 3297
3295 3298 class generatorset(abstractsmartset):
3296 3299 """Wrap a generator for lazy iteration
3297 3300
3298 3301 Wrapper structure for generators that provides lazy membership and can
3299 3302 be iterated more than once.
3300 3303 When asked for membership it generates values until either it finds the
3301 3304 requested one or has gone through all the elements in the generator
3302 3305 """
3303 3306 def __init__(self, gen, iterasc=None):
3304 3307 """
3305 3308 gen: a generator producing the values for the generatorset.
3306 3309 """
3307 3310 self._gen = gen
3308 3311 self._asclist = None
3309 3312 self._cache = {}
3310 3313 self._genlist = []
3311 3314 self._finished = False
3312 3315 self._ascending = True
3313 3316 if iterasc is not None:
3314 3317 if iterasc:
3315 3318 self.fastasc = self._iterator
3316 3319 self.__contains__ = self._asccontains
3317 3320 else:
3318 3321 self.fastdesc = self._iterator
3319 3322 self.__contains__ = self._desccontains
3320 3323
3321 3324 def __nonzero__(self):
3322 3325 # Do not use 'for r in self' because it will enforce the iteration
3323 3326 # order (default ascending), possibly unrolling a whole descending
3324 3327 # iterator.
3325 3328 if self._genlist:
3326 3329 return True
3327 3330 for r in self._consumegen():
3328 3331 return True
3329 3332 return False
3330 3333
3331 3334 def __contains__(self, x):
3332 3335 if x in self._cache:
3333 3336 return self._cache[x]
3334 3337
3335 3338 # Use new values only, as existing values would be cached.
3336 3339 for l in self._consumegen():
3337 3340 if l == x:
3338 3341 return True
3339 3342
3340 3343 self._cache[x] = False
3341 3344 return False
3342 3345
3343 3346 def _asccontains(self, x):
3344 3347 """version of contains optimised for ascending generator"""
3345 3348 if x in self._cache:
3346 3349 return self._cache[x]
3347 3350
3348 3351 # Use new values only, as existing values would be cached.
3349 3352 for l in self._consumegen():
3350 3353 if l == x:
3351 3354 return True
3352 3355 if l > x:
3353 3356 break
3354 3357
3355 3358 self._cache[x] = False
3356 3359 return False
3357 3360
3358 3361 def _desccontains(self, x):
3359 3362 """version of contains optimised for descending generator"""
3360 3363 if x in self._cache:
3361 3364 return self._cache[x]
3362 3365
3363 3366 # Use new values only, as existing values would be cached.
3364 3367 for l in self._consumegen():
3365 3368 if l == x:
3366 3369 return True
3367 3370 if l < x:
3368 3371 break
3369 3372
3370 3373 self._cache[x] = False
3371 3374 return False
3372 3375
3373 3376 def __iter__(self):
3374 3377 if self._ascending:
3375 3378 it = self.fastasc
3376 3379 else:
3377 3380 it = self.fastdesc
3378 3381 if it is not None:
3379 3382 return it()
3380 3383 # we need to consume the iterator
3381 3384 for x in self._consumegen():
3382 3385 pass
3383 3386 # recall the same code
3384 3387 return iter(self)
3385 3388
3386 3389 def _iterator(self):
3387 3390 if self._finished:
3388 3391 return iter(self._genlist)
3389 3392
3390 3393 # We have to use this complex iteration strategy to allow multiple
3391 3394 # iterations at the same time. We need to be able to catch revision
3392 3395 # removed from _consumegen and added to genlist in another instance.
3393 3396 #
3394 3397 # Getting rid of it would provide an about 15% speed up on this
3395 3398 # iteration.
3396 3399 genlist = self._genlist
3397 3400 nextrev = self._consumegen().next
3398 3401 _len = len # cache global lookup
3399 3402 def gen():
3400 3403 i = 0
3401 3404 while True:
3402 3405 if i < _len(genlist):
3403 3406 yield genlist[i]
3404 3407 else:
3405 3408 yield nextrev()
3406 3409 i += 1
3407 3410 return gen()
3408 3411
3409 3412 def _consumegen(self):
3410 3413 cache = self._cache
3411 3414 genlist = self._genlist.append
3412 3415 for item in self._gen:
3413 3416 cache[item] = True
3414 3417 genlist(item)
3415 3418 yield item
3416 3419 if not self._finished:
3417 3420 self._finished = True
3418 3421 asc = self._genlist[:]
3419 3422 asc.sort()
3420 3423 self._asclist = asc
3421 3424 self.fastasc = asc.__iter__
3422 3425 self.fastdesc = asc.__reversed__
3423 3426
3424 3427 def __len__(self):
3425 3428 for x in self._consumegen():
3426 3429 pass
3427 3430 return len(self._genlist)
3428 3431
3429 3432 def sort(self, reverse=False):
3430 3433 self._ascending = not reverse
3431 3434
3432 3435 def reverse(self):
3433 3436 self._ascending = not self._ascending
3434 3437
3435 3438 def isascending(self):
3436 3439 return self._ascending
3437 3440
3438 3441 def isdescending(self):
3439 3442 return not self._ascending
3440 3443
3441 3444 def first(self):
3442 3445 if self._ascending:
3443 3446 it = self.fastasc
3444 3447 else:
3445 3448 it = self.fastdesc
3446 3449 if it is None:
3447 3450 # we need to consume all and try again
3448 3451 for x in self._consumegen():
3449 3452 pass
3450 3453 return self.first()
3451 3454 return next(it(), None)
3452 3455
3453 3456 def last(self):
3454 3457 if self._ascending:
3455 3458 it = self.fastdesc
3456 3459 else:
3457 3460 it = self.fastasc
3458 3461 if it is None:
3459 3462 # we need to consume all and try again
3460 3463 for x in self._consumegen():
3461 3464 pass
3462 3465 return self.first()
3463 3466 return next(it(), None)
3464 3467
3465 3468 def __repr__(self):
3466 3469 d = {False: '-', True: '+'}[self._ascending]
3467 3470 return '<%s%s>' % (type(self).__name__, d)
3468 3471
3469 3472 class spanset(abstractsmartset):
3470 3473 """Duck type for baseset class which represents a range of revisions and
3471 3474 can work lazily and without having all the range in memory
3472 3475
3473 3476 Note that spanset(x, y) behave almost like xrange(x, y) except for two
3474 3477 notable points:
3475 3478 - when x < y it will be automatically descending,
3476 3479 - revision filtered with this repoview will be skipped.
3477 3480
3478 3481 """
3479 3482 def __init__(self, repo, start=0, end=None):
3480 3483 """
3481 3484 start: first revision included the set
3482 3485 (default to 0)
3483 3486 end: first revision excluded (last+1)
3484 3487 (default to len(repo)
3485 3488
3486 3489 Spanset will be descending if `end` < `start`.
3487 3490 """
3488 3491 if end is None:
3489 3492 end = len(repo)
3490 3493 self._ascending = start <= end
3491 3494 if not self._ascending:
3492 3495 start, end = end + 1, start +1
3493 3496 self._start = start
3494 3497 self._end = end
3495 3498 self._hiddenrevs = repo.changelog.filteredrevs
3496 3499
3497 3500 def sort(self, reverse=False):
3498 3501 self._ascending = not reverse
3499 3502
3500 3503 def reverse(self):
3501 3504 self._ascending = not self._ascending
3502 3505
3503 3506 def _iterfilter(self, iterrange):
3504 3507 s = self._hiddenrevs
3505 3508 for r in iterrange:
3506 3509 if r not in s:
3507 3510 yield r
3508 3511
3509 3512 def __iter__(self):
3510 3513 if self._ascending:
3511 3514 return self.fastasc()
3512 3515 else:
3513 3516 return self.fastdesc()
3514 3517
3515 3518 def fastasc(self):
3516 3519 iterrange = xrange(self._start, self._end)
3517 3520 if self._hiddenrevs:
3518 3521 return self._iterfilter(iterrange)
3519 3522 return iter(iterrange)
3520 3523
3521 3524 def fastdesc(self):
3522 3525 iterrange = xrange(self._end - 1, self._start - 1, -1)
3523 3526 if self._hiddenrevs:
3524 3527 return self._iterfilter(iterrange)
3525 3528 return iter(iterrange)
3526 3529
3527 3530 def __contains__(self, rev):
3528 3531 hidden = self._hiddenrevs
3529 3532 return ((self._start <= rev < self._end)
3530 3533 and not (hidden and rev in hidden))
3531 3534
3532 3535 def __nonzero__(self):
3533 3536 for r in self:
3534 3537 return True
3535 3538 return False
3536 3539
3537 3540 def __len__(self):
3538 3541 if not self._hiddenrevs:
3539 3542 return abs(self._end - self._start)
3540 3543 else:
3541 3544 count = 0
3542 3545 start = self._start
3543 3546 end = self._end
3544 3547 for rev in self._hiddenrevs:
3545 3548 if (end < rev <= start) or (start <= rev < end):
3546 3549 count += 1
3547 3550 return abs(self._end - self._start) - count
3548 3551
3549 3552 def isascending(self):
3550 3553 return self._ascending
3551 3554
3552 3555 def isdescending(self):
3553 3556 return not self._ascending
3554 3557
3555 3558 def first(self):
3556 3559 if self._ascending:
3557 3560 it = self.fastasc
3558 3561 else:
3559 3562 it = self.fastdesc
3560 3563 for x in it():
3561 3564 return x
3562 3565 return None
3563 3566
3564 3567 def last(self):
3565 3568 if self._ascending:
3566 3569 it = self.fastdesc
3567 3570 else:
3568 3571 it = self.fastasc
3569 3572 for x in it():
3570 3573 return x
3571 3574 return None
3572 3575
3573 3576 def __repr__(self):
3574 3577 d = {False: '-', True: '+'}[self._ascending]
3575 3578 return '<%s%s %d:%d>' % (type(self).__name__, d,
3576 3579 self._start, self._end - 1)
3577 3580
3578 3581 class fullreposet(spanset):
3579 3582 """a set containing all revisions in the repo
3580 3583
3581 3584 This class exists to host special optimization and magic to handle virtual
3582 3585 revisions such as "null".
3583 3586 """
3584 3587
3585 3588 def __init__(self, repo):
3586 3589 super(fullreposet, self).__init__(repo)
3587 3590
3588 3591 def __and__(self, other):
3589 3592 """As self contains the whole repo, all of the other set should also be
3590 3593 in self. Therefore `self & other = other`.
3591 3594
3592 3595 This boldly assumes the other contains valid revs only.
3593 3596 """
3594 3597 # other not a smartset, make is so
3595 3598 if not util.safehasattr(other, 'isascending'):
3596 3599 # filter out hidden revision
3597 3600 # (this boldly assumes all smartset are pure)
3598 3601 #
3599 3602 # `other` was used with "&", let's assume this is a set like
3600 3603 # object.
3601 3604 other = baseset(other - self._hiddenrevs)
3602 3605
3603 3606 # XXX As fullreposet is also used as bootstrap, this is wrong.
3604 3607 #
3605 3608 # With a giveme312() revset returning [3,1,2], this makes
3606 3609 # 'hg log -r "giveme312()"' -> 1, 2, 3 (wrong)
3607 3610 # We cannot just drop it because other usage still need to sort it:
3608 3611 # 'hg log -r "all() and giveme312()"' -> 1, 2, 3 (right)
3609 3612 #
3610 3613 # There is also some faulty revset implementations that rely on it
3611 3614 # (eg: children as of its state in e8075329c5fb)
3612 3615 #
3613 3616 # When we fix the two points above we can move this into the if clause
3614 3617 other.sort(reverse=self.isdescending())
3615 3618 return other
3616 3619
3617 3620 def prettyformatset(revs):
3618 3621 lines = []
3619 3622 rs = repr(revs)
3620 3623 p = 0
3621 3624 while p < len(rs):
3622 3625 q = rs.find('<', p + 1)
3623 3626 if q < 0:
3624 3627 q = len(rs)
3625 3628 l = rs.count('<', 0, p) - rs.count('>', 0, p)
3626 3629 assert l >= 0
3627 3630 lines.append((l, rs[p:q].rstrip()))
3628 3631 p = q
3629 3632 return '\n'.join(' ' * l + s for l, s in lines)
3630 3633
3631 3634 # tell hggettext to extract docstrings from these functions:
3632 3635 i18nfunctions = symbols.values()
General Comments 0
You need to be logged in to leave comments. Login now