##// END OF EJS Templates
revset: narrow the subset using smartset operation in roots()...
Pierre-Yves David -
r24923:e5f16696 default
parent child Browse files
Show More
@@ -1,32 +1,33
1 1 all()
2 2 draft()
3 3 ::tip
4 4 draft() and ::tip
5 5 ::tip and draft()
6 6 0::tip
7 7 roots(0::tip)
8 8 author(lmoscovicz)
9 9 author(mpm)
10 10 author(lmoscovicz) or author(mpm)
11 11 author(mpm) or author(lmoscovicz)
12 12 tip:0
13 13 max(tip:0)
14 14 min(0:tip)
15 15 0::
16 16 min(0::)
17 17 # those two `roots(...)` inputs are close to what phase movement use.
18 18 roots((tip~100::) - (tip~100::tip))
19 19 roots((0::) - (0::tip))
20 42:68 and roots(42:tip)
20 21 ::p1(p1(tip))::
21 22 public()
22 23 :10000 and public()
23 24 draft()
24 25 :10000 and draft()
25 26 max(::(tip~20) - obsolete())
26 27 roots((0:tip)::)
27 28 (not public() - obsolete())
28 29 (_intlist('20000\x0020001')) and merge()
29 30 parents(20000)
30 31 (20000::) - (20000)
31 32 # The one below is used by rebase
32 33 (children(ancestor(tip~5, tip)) and ::(tip~5))::
@@ -1,3448 +1,3448
1 1 # revset.py - revision set queries for mercurial
2 2 #
3 3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 import re
9 9 import parser, util, error, hbisect, phases
10 10 import node
11 11 import heapq
12 12 import match as matchmod
13 13 from i18n import _
14 14 import encoding
15 15 import obsolete as obsmod
16 16 import pathutil
17 17 import repoview
18 18
19 19 def _revancestors(repo, revs, followfirst):
20 20 """Like revlog.ancestors(), but supports followfirst."""
21 21 if followfirst:
22 22 cut = 1
23 23 else:
24 24 cut = None
25 25 cl = repo.changelog
26 26
27 27 def iterate():
28 28 revqueue, revsnode = None, None
29 29 h = []
30 30
31 31 revs.sort(reverse=True)
32 32 revqueue = util.deque(revs)
33 33 if revqueue:
34 34 revsnode = revqueue.popleft()
35 35 heapq.heappush(h, -revsnode)
36 36
37 37 seen = set()
38 38 while h:
39 39 current = -heapq.heappop(h)
40 40 if current not in seen:
41 41 if revsnode and current == revsnode:
42 42 if revqueue:
43 43 revsnode = revqueue.popleft()
44 44 heapq.heappush(h, -revsnode)
45 45 seen.add(current)
46 46 yield current
47 47 for parent in cl.parentrevs(current)[:cut]:
48 48 if parent != node.nullrev:
49 49 heapq.heappush(h, -parent)
50 50
51 51 return generatorset(iterate(), iterasc=False)
52 52
53 53 def _revdescendants(repo, revs, followfirst):
54 54 """Like revlog.descendants() but supports followfirst."""
55 55 if followfirst:
56 56 cut = 1
57 57 else:
58 58 cut = None
59 59
60 60 def iterate():
61 61 cl = repo.changelog
62 62 first = min(revs)
63 63 nullrev = node.nullrev
64 64 if first == nullrev:
65 65 # Are there nodes with a null first parent and a non-null
66 66 # second one? Maybe. Do we care? Probably not.
67 67 for i in cl:
68 68 yield i
69 69 else:
70 70 seen = set(revs)
71 71 for i in cl.revs(first + 1):
72 72 for x in cl.parentrevs(i)[:cut]:
73 73 if x != nullrev and x in seen:
74 74 seen.add(i)
75 75 yield i
76 76 break
77 77
78 78 return generatorset(iterate(), iterasc=True)
79 79
80 80 def _revsbetween(repo, roots, heads):
81 81 """Return all paths between roots and heads, inclusive of both endpoint
82 82 sets."""
83 83 if not roots:
84 84 return baseset()
85 85 parentrevs = repo.changelog.parentrevs
86 86 visit = list(heads)
87 87 reachable = set()
88 88 seen = {}
89 89 minroot = min(roots)
90 90 roots = set(roots)
91 91 # open-code the post-order traversal due to the tiny size of
92 92 # sys.getrecursionlimit()
93 93 while visit:
94 94 rev = visit.pop()
95 95 if rev in roots:
96 96 reachable.add(rev)
97 97 parents = parentrevs(rev)
98 98 seen[rev] = parents
99 99 for parent in parents:
100 100 if parent >= minroot and parent not in seen:
101 101 visit.append(parent)
102 102 if not reachable:
103 103 return baseset()
104 104 for rev in sorted(seen):
105 105 for parent in seen[rev]:
106 106 if parent in reachable:
107 107 reachable.add(rev)
108 108 return baseset(sorted(reachable))
109 109
110 110 elements = {
111 111 "(": (21, ("group", 1, ")"), ("func", 1, ")")),
112 112 "##": (20, None, ("_concat", 20)),
113 113 "~": (18, None, ("ancestor", 18)),
114 114 "^": (18, None, ("parent", 18), ("parentpost", 18)),
115 115 "-": (5, ("negate", 19), ("minus", 5)),
116 116 "::": (17, ("dagrangepre", 17), ("dagrange", 17),
117 117 ("dagrangepost", 17)),
118 118 "..": (17, ("dagrangepre", 17), ("dagrange", 17),
119 119 ("dagrangepost", 17)),
120 120 ":": (15, ("rangepre", 15), ("range", 15), ("rangepost", 15)),
121 121 "not": (10, ("not", 10)),
122 122 "!": (10, ("not", 10)),
123 123 "and": (5, None, ("and", 5)),
124 124 "&": (5, None, ("and", 5)),
125 125 "%": (5, None, ("only", 5), ("onlypost", 5)),
126 126 "or": (4, None, ("or", 4)),
127 127 "|": (4, None, ("or", 4)),
128 128 "+": (4, None, ("or", 4)),
129 129 ",": (2, None, ("list", 2)),
130 130 ")": (0, None, None),
131 131 "symbol": (0, ("symbol",), None),
132 132 "string": (0, ("string",), None),
133 133 "end": (0, None, None),
134 134 }
135 135
136 136 keywords = set(['and', 'or', 'not'])
137 137
138 138 # default set of valid characters for the initial letter of symbols
139 139 _syminitletters = set(c for c in [chr(i) for i in xrange(256)]
140 140 if c.isalnum() or c in '._@' or ord(c) > 127)
141 141
142 142 # default set of valid characters for non-initial letters of symbols
143 143 _symletters = set(c for c in [chr(i) for i in xrange(256)]
144 144 if c.isalnum() or c in '-._/@' or ord(c) > 127)
145 145
146 146 def tokenize(program, lookup=None, syminitletters=None, symletters=None):
147 147 '''
148 148 Parse a revset statement into a stream of tokens
149 149
150 150 ``syminitletters`` is the set of valid characters for the initial
151 151 letter of symbols.
152 152
153 153 By default, character ``c`` is recognized as valid for initial
154 154 letter of symbols, if ``c.isalnum() or c in '._@' or ord(c) > 127``.
155 155
156 156 ``symletters`` is the set of valid characters for non-initial
157 157 letters of symbols.
158 158
159 159 By default, character ``c`` is recognized as valid for non-initial
160 160 letters of symbols, if ``c.isalnum() or c in '-._/@' or ord(c) > 127``.
161 161
162 162 Check that @ is a valid unquoted token character (issue3686):
163 163 >>> list(tokenize("@::"))
164 164 [('symbol', '@', 0), ('::', None, 1), ('end', None, 3)]
165 165
166 166 '''
167 167 if syminitletters is None:
168 168 syminitletters = _syminitletters
169 169 if symletters is None:
170 170 symletters = _symletters
171 171
172 172 pos, l = 0, len(program)
173 173 while pos < l:
174 174 c = program[pos]
175 175 if c.isspace(): # skip inter-token whitespace
176 176 pass
177 177 elif c == ':' and program[pos:pos + 2] == '::': # look ahead carefully
178 178 yield ('::', None, pos)
179 179 pos += 1 # skip ahead
180 180 elif c == '.' and program[pos:pos + 2] == '..': # look ahead carefully
181 181 yield ('..', None, pos)
182 182 pos += 1 # skip ahead
183 183 elif c == '#' and program[pos:pos + 2] == '##': # look ahead carefully
184 184 yield ('##', None, pos)
185 185 pos += 1 # skip ahead
186 186 elif c in "():,-|&+!~^%": # handle simple operators
187 187 yield (c, None, pos)
188 188 elif (c in '"\'' or c == 'r' and
189 189 program[pos:pos + 2] in ("r'", 'r"')): # handle quoted strings
190 190 if c == 'r':
191 191 pos += 1
192 192 c = program[pos]
193 193 decode = lambda x: x
194 194 else:
195 195 decode = lambda x: x.decode('string-escape')
196 196 pos += 1
197 197 s = pos
198 198 while pos < l: # find closing quote
199 199 d = program[pos]
200 200 if d == '\\': # skip over escaped characters
201 201 pos += 2
202 202 continue
203 203 if d == c:
204 204 yield ('string', decode(program[s:pos]), s)
205 205 break
206 206 pos += 1
207 207 else:
208 208 raise error.ParseError(_("unterminated string"), s)
209 209 # gather up a symbol/keyword
210 210 elif c in syminitletters:
211 211 s = pos
212 212 pos += 1
213 213 while pos < l: # find end of symbol
214 214 d = program[pos]
215 215 if d not in symletters:
216 216 break
217 217 if d == '.' and program[pos - 1] == '.': # special case for ..
218 218 pos -= 1
219 219 break
220 220 pos += 1
221 221 sym = program[s:pos]
222 222 if sym in keywords: # operator keywords
223 223 yield (sym, None, s)
224 224 elif '-' in sym:
225 225 # some jerk gave us foo-bar-baz, try to check if it's a symbol
226 226 if lookup and lookup(sym):
227 227 # looks like a real symbol
228 228 yield ('symbol', sym, s)
229 229 else:
230 230 # looks like an expression
231 231 parts = sym.split('-')
232 232 for p in parts[:-1]:
233 233 if p: # possible consecutive -
234 234 yield ('symbol', p, s)
235 235 s += len(p)
236 236 yield ('-', None, pos)
237 237 s += 1
238 238 if parts[-1]: # possible trailing -
239 239 yield ('symbol', parts[-1], s)
240 240 else:
241 241 yield ('symbol', sym, s)
242 242 pos -= 1
243 243 else:
244 244 raise error.ParseError(_("syntax error in revset '%s'") %
245 245 program, pos)
246 246 pos += 1
247 247 yield ('end', None, pos)
248 248
249 249 def parseerrordetail(inst):
250 250 """Compose error message from specified ParseError object
251 251 """
252 252 if len(inst.args) > 1:
253 253 return _('at %s: %s') % (inst.args[1], inst.args[0])
254 254 else:
255 255 return inst.args[0]
256 256
257 257 # helpers
258 258
259 259 def getstring(x, err):
260 260 if x and (x[0] == 'string' or x[0] == 'symbol'):
261 261 return x[1]
262 262 raise error.ParseError(err)
263 263
264 264 def getlist(x):
265 265 if not x:
266 266 return []
267 267 if x[0] == 'list':
268 268 return getlist(x[1]) + [x[2]]
269 269 return [x]
270 270
271 271 def getargs(x, min, max, err):
272 272 l = getlist(x)
273 273 if len(l) < min or (max >= 0 and len(l) > max):
274 274 raise error.ParseError(err)
275 275 return l
276 276
277 277 def isvalidsymbol(tree):
278 278 """Examine whether specified ``tree`` is valid ``symbol`` or not
279 279 """
280 280 return tree[0] == 'symbol' and len(tree) > 1
281 281
282 282 def getsymbol(tree):
283 283 """Get symbol name from valid ``symbol`` in ``tree``
284 284
285 285 This assumes that ``tree`` is already examined by ``isvalidsymbol``.
286 286 """
287 287 return tree[1]
288 288
289 289 def isvalidfunc(tree):
290 290 """Examine whether specified ``tree`` is valid ``func`` or not
291 291 """
292 292 return tree[0] == 'func' and len(tree) > 1 and isvalidsymbol(tree[1])
293 293
294 294 def getfuncname(tree):
295 295 """Get function name from valid ``func`` in ``tree``
296 296
297 297 This assumes that ``tree`` is already examined by ``isvalidfunc``.
298 298 """
299 299 return getsymbol(tree[1])
300 300
301 301 def getfuncargs(tree):
302 302 """Get list of function arguments from valid ``func`` in ``tree``
303 303
304 304 This assumes that ``tree`` is already examined by ``isvalidfunc``.
305 305 """
306 306 if len(tree) > 2:
307 307 return getlist(tree[2])
308 308 else:
309 309 return []
310 310
311 311 def getset(repo, subset, x):
312 312 if not x:
313 313 raise error.ParseError(_("missing argument"))
314 314 s = methods[x[0]](repo, subset, *x[1:])
315 315 if util.safehasattr(s, 'isascending'):
316 316 return s
317 317 return baseset(s)
318 318
319 319 def _getrevsource(repo, r):
320 320 extra = repo[r].extra()
321 321 for label in ('source', 'transplant_source', 'rebase_source'):
322 322 if label in extra:
323 323 try:
324 324 return repo[extra[label]].rev()
325 325 except error.RepoLookupError:
326 326 pass
327 327 return None
328 328
329 329 # operator methods
330 330
331 331 def stringset(repo, subset, x):
332 332 x = repo[x].rev()
333 333 if x in subset:
334 334 return baseset([x])
335 335 return baseset()
336 336
337 337 def symbolset(repo, subset, x):
338 338 if x in symbols:
339 339 raise error.ParseError(_("can't use %s here") % x)
340 340 return stringset(repo, subset, x)
341 341
342 342 def rangeset(repo, subset, x, y):
343 343 m = getset(repo, fullreposet(repo), x)
344 344 n = getset(repo, fullreposet(repo), y)
345 345
346 346 if not m or not n:
347 347 return baseset()
348 348 m, n = m.first(), n.last()
349 349
350 350 if m < n:
351 351 r = spanset(repo, m, n + 1)
352 352 else:
353 353 r = spanset(repo, m, n - 1)
354 354 return r & subset
355 355
356 356 def dagrange(repo, subset, x, y):
357 357 r = fullreposet(repo)
358 358 xs = _revsbetween(repo, getset(repo, r, x), getset(repo, r, y))
359 359 return xs & subset
360 360
361 361 def andset(repo, subset, x, y):
362 362 return getset(repo, getset(repo, subset, x), y)
363 363
364 364 def orset(repo, subset, x, y):
365 365 xl = getset(repo, subset, x)
366 366 yl = getset(repo, subset - xl, y)
367 367 return xl + yl
368 368
369 369 def notset(repo, subset, x):
370 370 return subset - getset(repo, subset, x)
371 371
372 372 def listset(repo, subset, a, b):
373 373 raise error.ParseError(_("can't use a list in this context"))
374 374
375 375 def func(repo, subset, a, b):
376 376 if a[0] == 'symbol' and a[1] in symbols:
377 377 return symbols[a[1]](repo, subset, b)
378 378 raise error.UnknownIdentifier(a[1], symbols.keys())
379 379
380 380 # functions
381 381
382 382 def adds(repo, subset, x):
383 383 """``adds(pattern)``
384 384 Changesets that add a file matching pattern.
385 385
386 386 The pattern without explicit kind like ``glob:`` is expected to be
387 387 relative to the current directory and match against a file or a
388 388 directory.
389 389 """
390 390 # i18n: "adds" is a keyword
391 391 pat = getstring(x, _("adds requires a pattern"))
392 392 return checkstatus(repo, subset, pat, 1)
393 393
394 394 def ancestor(repo, subset, x):
395 395 """``ancestor(*changeset)``
396 396 A greatest common ancestor of the changesets.
397 397
398 398 Accepts 0 or more changesets.
399 399 Will return empty list when passed no args.
400 400 Greatest common ancestor of a single changeset is that changeset.
401 401 """
402 402 # i18n: "ancestor" is a keyword
403 403 l = getlist(x)
404 404 rl = fullreposet(repo)
405 405 anc = None
406 406
407 407 # (getset(repo, rl, i) for i in l) generates a list of lists
408 408 for revs in (getset(repo, rl, i) for i in l):
409 409 for r in revs:
410 410 if anc is None:
411 411 anc = repo[r]
412 412 else:
413 413 anc = anc.ancestor(repo[r])
414 414
415 415 if anc is not None and anc.rev() in subset:
416 416 return baseset([anc.rev()])
417 417 return baseset()
418 418
419 419 def _ancestors(repo, subset, x, followfirst=False):
420 420 heads = getset(repo, fullreposet(repo), x)
421 421 if not heads:
422 422 return baseset()
423 423 s = _revancestors(repo, heads, followfirst)
424 424 return subset & s
425 425
426 426 def ancestors(repo, subset, x):
427 427 """``ancestors(set)``
428 428 Changesets that are ancestors of a changeset in set.
429 429 """
430 430 return _ancestors(repo, subset, x)
431 431
432 432 def _firstancestors(repo, subset, x):
433 433 # ``_firstancestors(set)``
434 434 # Like ``ancestors(set)`` but follows only the first parents.
435 435 return _ancestors(repo, subset, x, followfirst=True)
436 436
437 437 def ancestorspec(repo, subset, x, n):
438 438 """``set~n``
439 439 Changesets that are the Nth ancestor (first parents only) of a changeset
440 440 in set.
441 441 """
442 442 try:
443 443 n = int(n[1])
444 444 except (TypeError, ValueError):
445 445 raise error.ParseError(_("~ expects a number"))
446 446 ps = set()
447 447 cl = repo.changelog
448 448 for r in getset(repo, fullreposet(repo), x):
449 449 for i in range(n):
450 450 r = cl.parentrevs(r)[0]
451 451 ps.add(r)
452 452 return subset & ps
453 453
454 454 def author(repo, subset, x):
455 455 """``author(string)``
456 456 Alias for ``user(string)``.
457 457 """
458 458 # i18n: "author" is a keyword
459 459 n = encoding.lower(getstring(x, _("author requires a string")))
460 460 kind, pattern, matcher = _substringmatcher(n)
461 461 return subset.filter(lambda x: matcher(encoding.lower(repo[x].user())))
462 462
463 463 def bisect(repo, subset, x):
464 464 """``bisect(string)``
465 465 Changesets marked in the specified bisect status:
466 466
467 467 - ``good``, ``bad``, ``skip``: csets explicitly marked as good/bad/skip
468 468 - ``goods``, ``bads`` : csets topologically good/bad
469 469 - ``range`` : csets taking part in the bisection
470 470 - ``pruned`` : csets that are goods, bads or skipped
471 471 - ``untested`` : csets whose fate is yet unknown
472 472 - ``ignored`` : csets ignored due to DAG topology
473 473 - ``current`` : the cset currently being bisected
474 474 """
475 475 # i18n: "bisect" is a keyword
476 476 status = getstring(x, _("bisect requires a string")).lower()
477 477 state = set(hbisect.get(repo, status))
478 478 return subset & state
479 479
480 480 # Backward-compatibility
481 481 # - no help entry so that we do not advertise it any more
482 482 def bisected(repo, subset, x):
483 483 return bisect(repo, subset, x)
484 484
485 485 def bookmark(repo, subset, x):
486 486 """``bookmark([name])``
487 487 The named bookmark or all bookmarks.
488 488
489 489 If `name` starts with `re:`, the remainder of the name is treated as
490 490 a regular expression. To match a bookmark that actually starts with `re:`,
491 491 use the prefix `literal:`.
492 492 """
493 493 # i18n: "bookmark" is a keyword
494 494 args = getargs(x, 0, 1, _('bookmark takes one or no arguments'))
495 495 if args:
496 496 bm = getstring(args[0],
497 497 # i18n: "bookmark" is a keyword
498 498 _('the argument to bookmark must be a string'))
499 499 kind, pattern, matcher = _stringmatcher(bm)
500 500 bms = set()
501 501 if kind == 'literal':
502 502 bmrev = repo._bookmarks.get(pattern, None)
503 503 if not bmrev:
504 504 raise error.RepoLookupError(_("bookmark '%s' does not exist")
505 505 % bm)
506 506 bms.add(repo[bmrev].rev())
507 507 else:
508 508 matchrevs = set()
509 509 for name, bmrev in repo._bookmarks.iteritems():
510 510 if matcher(name):
511 511 matchrevs.add(bmrev)
512 512 if not matchrevs:
513 513 raise error.RepoLookupError(_("no bookmarks exist"
514 514 " that match '%s'") % pattern)
515 515 for bmrev in matchrevs:
516 516 bms.add(repo[bmrev].rev())
517 517 else:
518 518 bms = set([repo[r].rev()
519 519 for r in repo._bookmarks.values()])
520 520 bms -= set([node.nullrev])
521 521 return subset & bms
522 522
523 523 def branch(repo, subset, x):
524 524 """``branch(string or set)``
525 525 All changesets belonging to the given branch or the branches of the given
526 526 changesets.
527 527
528 528 If `string` starts with `re:`, the remainder of the name is treated as
529 529 a regular expression. To match a branch that actually starts with `re:`,
530 530 use the prefix `literal:`.
531 531 """
532 532 getbi = repo.revbranchcache().branchinfo
533 533
534 534 try:
535 535 b = getstring(x, '')
536 536 except error.ParseError:
537 537 # not a string, but another revspec, e.g. tip()
538 538 pass
539 539 else:
540 540 kind, pattern, matcher = _stringmatcher(b)
541 541 if kind == 'literal':
542 542 # note: falls through to the revspec case if no branch with
543 543 # this name exists
544 544 if pattern in repo.branchmap():
545 545 return subset.filter(lambda r: matcher(getbi(r)[0]))
546 546 else:
547 547 return subset.filter(lambda r: matcher(getbi(r)[0]))
548 548
549 549 s = getset(repo, fullreposet(repo), x)
550 550 b = set()
551 551 for r in s:
552 552 b.add(getbi(r)[0])
553 553 c = s.__contains__
554 554 return subset.filter(lambda r: c(r) or getbi(r)[0] in b)
555 555
556 556 def bumped(repo, subset, x):
557 557 """``bumped()``
558 558 Mutable changesets marked as successors of public changesets.
559 559
560 560 Only non-public and non-obsolete changesets can be `bumped`.
561 561 """
562 562 # i18n: "bumped" is a keyword
563 563 getargs(x, 0, 0, _("bumped takes no arguments"))
564 564 bumped = obsmod.getrevs(repo, 'bumped')
565 565 return subset & bumped
566 566
567 567 def bundle(repo, subset, x):
568 568 """``bundle()``
569 569 Changesets in the bundle.
570 570
571 571 Bundle must be specified by the -R option."""
572 572
573 573 try:
574 574 bundlerevs = repo.changelog.bundlerevs
575 575 except AttributeError:
576 576 raise util.Abort(_("no bundle provided - specify with -R"))
577 577 return subset & bundlerevs
578 578
579 579 def checkstatus(repo, subset, pat, field):
580 580 hasset = matchmod.patkind(pat) == 'set'
581 581
582 582 mcache = [None]
583 583 def matches(x):
584 584 c = repo[x]
585 585 if not mcache[0] or hasset:
586 586 mcache[0] = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
587 587 m = mcache[0]
588 588 fname = None
589 589 if not m.anypats() and len(m.files()) == 1:
590 590 fname = m.files()[0]
591 591 if fname is not None:
592 592 if fname not in c.files():
593 593 return False
594 594 else:
595 595 for f in c.files():
596 596 if m(f):
597 597 break
598 598 else:
599 599 return False
600 600 files = repo.status(c.p1().node(), c.node())[field]
601 601 if fname is not None:
602 602 if fname in files:
603 603 return True
604 604 else:
605 605 for f in files:
606 606 if m(f):
607 607 return True
608 608
609 609 return subset.filter(matches)
610 610
611 611 def _children(repo, narrow, parentset):
612 612 cs = set()
613 613 if not parentset:
614 614 return baseset(cs)
615 615 pr = repo.changelog.parentrevs
616 616 minrev = min(parentset)
617 617 for r in narrow:
618 618 if r <= minrev:
619 619 continue
620 620 for p in pr(r):
621 621 if p in parentset:
622 622 cs.add(r)
623 623 return baseset(cs)
624 624
625 625 def children(repo, subset, x):
626 626 """``children(set)``
627 627 Child changesets of changesets in set.
628 628 """
629 629 s = getset(repo, fullreposet(repo), x)
630 630 cs = _children(repo, subset, s)
631 631 return subset & cs
632 632
633 633 def closed(repo, subset, x):
634 634 """``closed()``
635 635 Changeset is closed.
636 636 """
637 637 # i18n: "closed" is a keyword
638 638 getargs(x, 0, 0, _("closed takes no arguments"))
639 639 return subset.filter(lambda r: repo[r].closesbranch())
640 640
641 641 def contains(repo, subset, x):
642 642 """``contains(pattern)``
643 643 The revision's manifest contains a file matching pattern (but might not
644 644 modify it). See :hg:`help patterns` for information about file patterns.
645 645
646 646 The pattern without explicit kind like ``glob:`` is expected to be
647 647 relative to the current directory and match against a file exactly
648 648 for efficiency.
649 649 """
650 650 # i18n: "contains" is a keyword
651 651 pat = getstring(x, _("contains requires a pattern"))
652 652
653 653 def matches(x):
654 654 if not matchmod.patkind(pat):
655 655 pats = pathutil.canonpath(repo.root, repo.getcwd(), pat)
656 656 if pats in repo[x]:
657 657 return True
658 658 else:
659 659 c = repo[x]
660 660 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
661 661 for f in c.manifest():
662 662 if m(f):
663 663 return True
664 664 return False
665 665
666 666 return subset.filter(matches)
667 667
668 668 def converted(repo, subset, x):
669 669 """``converted([id])``
670 670 Changesets converted from the given identifier in the old repository if
671 671 present, or all converted changesets if no identifier is specified.
672 672 """
673 673
674 674 # There is exactly no chance of resolving the revision, so do a simple
675 675 # string compare and hope for the best
676 676
677 677 rev = None
678 678 # i18n: "converted" is a keyword
679 679 l = getargs(x, 0, 1, _('converted takes one or no arguments'))
680 680 if l:
681 681 # i18n: "converted" is a keyword
682 682 rev = getstring(l[0], _('converted requires a revision'))
683 683
684 684 def _matchvalue(r):
685 685 source = repo[r].extra().get('convert_revision', None)
686 686 return source is not None and (rev is None or source.startswith(rev))
687 687
688 688 return subset.filter(lambda r: _matchvalue(r))
689 689
690 690 def date(repo, subset, x):
691 691 """``date(interval)``
692 692 Changesets within the interval, see :hg:`help dates`.
693 693 """
694 694 # i18n: "date" is a keyword
695 695 ds = getstring(x, _("date requires a string"))
696 696 dm = util.matchdate(ds)
697 697 return subset.filter(lambda x: dm(repo[x].date()[0]))
698 698
699 699 def desc(repo, subset, x):
700 700 """``desc(string)``
701 701 Search commit message for string. The match is case-insensitive.
702 702 """
703 703 # i18n: "desc" is a keyword
704 704 ds = encoding.lower(getstring(x, _("desc requires a string")))
705 705
706 706 def matches(x):
707 707 c = repo[x]
708 708 return ds in encoding.lower(c.description())
709 709
710 710 return subset.filter(matches)
711 711
712 712 def _descendants(repo, subset, x, followfirst=False):
713 713 roots = getset(repo, fullreposet(repo), x)
714 714 if not roots:
715 715 return baseset()
716 716 s = _revdescendants(repo, roots, followfirst)
717 717
718 718 # Both sets need to be ascending in order to lazily return the union
719 719 # in the correct order.
720 720 base = subset & roots
721 721 desc = subset & s
722 722 result = base + desc
723 723 if subset.isascending():
724 724 result.sort()
725 725 elif subset.isdescending():
726 726 result.sort(reverse=True)
727 727 else:
728 728 result = subset & result
729 729 return result
730 730
731 731 def descendants(repo, subset, x):
732 732 """``descendants(set)``
733 733 Changesets which are descendants of changesets in set.
734 734 """
735 735 return _descendants(repo, subset, x)
736 736
737 737 def _firstdescendants(repo, subset, x):
738 738 # ``_firstdescendants(set)``
739 739 # Like ``descendants(set)`` but follows only the first parents.
740 740 return _descendants(repo, subset, x, followfirst=True)
741 741
742 742 def destination(repo, subset, x):
743 743 """``destination([set])``
744 744 Changesets that were created by a graft, transplant or rebase operation,
745 745 with the given revisions specified as the source. Omitting the optional set
746 746 is the same as passing all().
747 747 """
748 748 if x is not None:
749 749 sources = getset(repo, fullreposet(repo), x)
750 750 else:
751 751 sources = fullreposet(repo)
752 752
753 753 dests = set()
754 754
755 755 # subset contains all of the possible destinations that can be returned, so
756 756 # iterate over them and see if their source(s) were provided in the arg set.
757 757 # Even if the immediate src of r is not in the arg set, src's source (or
758 758 # further back) may be. Scanning back further than the immediate src allows
759 759 # transitive transplants and rebases to yield the same results as transitive
760 760 # grafts.
761 761 for r in subset:
762 762 src = _getrevsource(repo, r)
763 763 lineage = None
764 764
765 765 while src is not None:
766 766 if lineage is None:
767 767 lineage = list()
768 768
769 769 lineage.append(r)
770 770
771 771 # The visited lineage is a match if the current source is in the arg
772 772 # set. Since every candidate dest is visited by way of iterating
773 773 # subset, any dests further back in the lineage will be tested by a
774 774 # different iteration over subset. Likewise, if the src was already
775 775 # selected, the current lineage can be selected without going back
776 776 # further.
777 777 if src in sources or src in dests:
778 778 dests.update(lineage)
779 779 break
780 780
781 781 r = src
782 782 src = _getrevsource(repo, r)
783 783
784 784 return subset.filter(dests.__contains__)
785 785
786 786 def divergent(repo, subset, x):
787 787 """``divergent()``
788 788 Final successors of changesets with an alternative set of final successors.
789 789 """
790 790 # i18n: "divergent" is a keyword
791 791 getargs(x, 0, 0, _("divergent takes no arguments"))
792 792 divergent = obsmod.getrevs(repo, 'divergent')
793 793 return subset & divergent
794 794
795 795 def draft(repo, subset, x):
796 796 """``draft()``
797 797 Changeset in draft phase."""
798 798 # i18n: "draft" is a keyword
799 799 getargs(x, 0, 0, _("draft takes no arguments"))
800 800 phase = repo._phasecache.phase
801 801 target = phases.draft
802 802 condition = lambda r: phase(repo, r) == target
803 803 return subset.filter(condition, cache=False)
804 804
805 805 def extinct(repo, subset, x):
806 806 """``extinct()``
807 807 Obsolete changesets with obsolete descendants only.
808 808 """
809 809 # i18n: "extinct" is a keyword
810 810 getargs(x, 0, 0, _("extinct takes no arguments"))
811 811 extincts = obsmod.getrevs(repo, 'extinct')
812 812 return subset & extincts
813 813
814 814 def extra(repo, subset, x):
815 815 """``extra(label, [value])``
816 816 Changesets with the given label in the extra metadata, with the given
817 817 optional value.
818 818
819 819 If `value` starts with `re:`, the remainder of the value is treated as
820 820 a regular expression. To match a value that actually starts with `re:`,
821 821 use the prefix `literal:`.
822 822 """
823 823
824 824 # i18n: "extra" is a keyword
825 825 l = getargs(x, 1, 2, _('extra takes at least 1 and at most 2 arguments'))
826 826 # i18n: "extra" is a keyword
827 827 label = getstring(l[0], _('first argument to extra must be a string'))
828 828 value = None
829 829
830 830 if len(l) > 1:
831 831 # i18n: "extra" is a keyword
832 832 value = getstring(l[1], _('second argument to extra must be a string'))
833 833 kind, value, matcher = _stringmatcher(value)
834 834
835 835 def _matchvalue(r):
836 836 extra = repo[r].extra()
837 837 return label in extra and (value is None or matcher(extra[label]))
838 838
839 839 return subset.filter(lambda r: _matchvalue(r))
840 840
841 841 def filelog(repo, subset, x):
842 842 """``filelog(pattern)``
843 843 Changesets connected to the specified filelog.
844 844
845 845 For performance reasons, visits only revisions mentioned in the file-level
846 846 filelog, rather than filtering through all changesets (much faster, but
847 847 doesn't include deletes or duplicate changes). For a slower, more accurate
848 848 result, use ``file()``.
849 849
850 850 The pattern without explicit kind like ``glob:`` is expected to be
851 851 relative to the current directory and match against a file exactly
852 852 for efficiency.
853 853
854 854 If some linkrev points to revisions filtered by the current repoview, we'll
855 855 work around it to return a non-filtered value.
856 856 """
857 857
858 858 # i18n: "filelog" is a keyword
859 859 pat = getstring(x, _("filelog requires a pattern"))
860 860 s = set()
861 861 cl = repo.changelog
862 862
863 863 if not matchmod.patkind(pat):
864 864 f = pathutil.canonpath(repo.root, repo.getcwd(), pat)
865 865 files = [f]
866 866 else:
867 867 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=repo[None])
868 868 files = (f for f in repo[None] if m(f))
869 869
870 870 for f in files:
871 871 backrevref = {} # final value for: filerev -> changerev
872 872 lowestchild = {} # lowest known filerev child of a filerev
873 873 delayed = [] # filerev with filtered linkrev, for post-processing
874 874 lowesthead = None # cache for manifest content of all head revisions
875 875 fl = repo.file(f)
876 876 for fr in list(fl):
877 877 rev = fl.linkrev(fr)
878 878 if rev not in cl:
879 879 # changerev pointed in linkrev is filtered
880 880 # record it for post processing.
881 881 delayed.append((fr, rev))
882 882 continue
883 883 for p in fl.parentrevs(fr):
884 884 if 0 <= p and p not in lowestchild:
885 885 lowestchild[p] = fr
886 886 backrevref[fr] = rev
887 887 s.add(rev)
888 888
889 889 # Post-processing of all filerevs we skipped because they were
890 890 # filtered. If such filerevs have known and unfiltered children, this
891 891 # means they have an unfiltered appearance out there. We'll use linkrev
892 892 # adjustment to find one of these appearances. The lowest known child
893 893 # will be used as a starting point because it is the best upper-bound we
894 894 # have.
895 895 #
896 896 # This approach will fail when an unfiltered but linkrev-shadowed
897 897 # appearance exists in a head changeset without unfiltered filerev
898 898 # children anywhere.
899 899 while delayed:
900 900 # must be a descending iteration. To slowly fill lowest child
901 901 # information that is of potential use by the next item.
902 902 fr, rev = delayed.pop()
903 903 lkr = rev
904 904
905 905 child = lowestchild.get(fr)
906 906
907 907 if child is None:
908 908 # search for existence of this file revision in a head revision.
909 909 # There are three possibilities:
910 910 # - the revision exists in a head and we can find an
911 911 # introduction from there,
912 912 # - the revision does not exist in a head because it has been
913 913 # changed since its introduction: we would have found a child
914 914 # and be in the other 'else' clause,
915 915 # - all versions of the revision are hidden.
916 916 if lowesthead is None:
917 917 lowesthead = {}
918 918 for h in repo.heads():
919 919 fnode = repo[h].manifest().get(f)
920 920 if fnode is not None:
921 921 lowesthead[fl.rev(fnode)] = h
922 922 headrev = lowesthead.get(fr)
923 923 if headrev is None:
924 924 # content is nowhere unfiltered
925 925 continue
926 926 rev = repo[headrev][f].introrev()
927 927 else:
928 928 # the lowest known child is a good upper bound
929 929 childcrev = backrevref[child]
930 930 # XXX this does not guarantee returning the lowest
931 931 # introduction of this revision, but this gives a
932 932 # result which is a good start and will fit in most
933 933 # cases. We probably need to fix the multiple
934 934 # introductions case properly (report each
935 935 # introduction, even for identical file revisions)
936 936 # once and for all at some point anyway.
937 937 for p in repo[childcrev][f].parents():
938 938 if p.filerev() == fr:
939 939 rev = p.rev()
940 940 break
941 941 if rev == lkr: # no shadowed entry found
942 942 # XXX This should never happen unless some manifest points
943 943 # to biggish file revisions (like a revision that uses a
944 944 # parent that never appears in the manifest ancestors)
945 945 continue
946 946
947 947 # Fill the data for the next iteration.
948 948 for p in fl.parentrevs(fr):
949 949 if 0 <= p and p not in lowestchild:
950 950 lowestchild[p] = fr
951 951 backrevref[fr] = rev
952 952 s.add(rev)
953 953
954 954 return subset & s
955 955
956 956 def first(repo, subset, x):
957 957 """``first(set, [n])``
958 958 An alias for limit().
959 959 """
960 960 return limit(repo, subset, x)
961 961
962 962 def _follow(repo, subset, x, name, followfirst=False):
963 963 l = getargs(x, 0, 1, _("%s takes no arguments or a filename") % name)
964 964 c = repo['.']
965 965 if l:
966 966 x = getstring(l[0], _("%s expected a filename") % name)
967 967 if x in c:
968 968 cx = c[x]
969 969 s = set(ctx.rev() for ctx in cx.ancestors(followfirst=followfirst))
970 970 # include the revision responsible for the most recent version
971 971 s.add(cx.introrev())
972 972 else:
973 973 return baseset()
974 974 else:
975 975 s = _revancestors(repo, baseset([c.rev()]), followfirst)
976 976
977 977 return subset & s
978 978
979 979 def follow(repo, subset, x):
980 980 """``follow([file])``
981 981 An alias for ``::.`` (ancestors of the working directory's first parent).
982 982 If a filename is specified, the history of the given file is followed,
983 983 including copies.
984 984 """
985 985 return _follow(repo, subset, x, 'follow')
986 986
987 987 def _followfirst(repo, subset, x):
988 988 # ``followfirst([file])``
989 989 # Like ``follow([file])`` but follows only the first parent of
990 990 # every revision or file revision.
991 991 return _follow(repo, subset, x, '_followfirst', followfirst=True)
992 992
993 993 def getall(repo, subset, x):
994 994 """``all()``
995 995 All changesets, the same as ``0:tip``.
996 996 """
997 997 # i18n: "all" is a keyword
998 998 getargs(x, 0, 0, _("all takes no arguments"))
999 999 return subset & spanset(repo) # drop "null" if any
1000 1000
1001 1001 def grep(repo, subset, x):
1002 1002 """``grep(regex)``
1003 1003 Like ``keyword(string)`` but accepts a regex. Use ``grep(r'...')``
1004 1004 to ensure special escape characters are handled correctly. Unlike
1005 1005 ``keyword(string)``, the match is case-sensitive.
1006 1006 """
1007 1007 try:
1008 1008 # i18n: "grep" is a keyword
1009 1009 gr = re.compile(getstring(x, _("grep requires a string")))
1010 1010 except re.error, e:
1011 1011 raise error.ParseError(_('invalid match pattern: %s') % e)
1012 1012
1013 1013 def matches(x):
1014 1014 c = repo[x]
1015 1015 for e in c.files() + [c.user(), c.description()]:
1016 1016 if gr.search(e):
1017 1017 return True
1018 1018 return False
1019 1019
1020 1020 return subset.filter(matches)
1021 1021
1022 1022 def _matchfiles(repo, subset, x):
1023 1023 # _matchfiles takes a revset list of prefixed arguments:
1024 1024 #
1025 1025 # [p:foo, i:bar, x:baz]
1026 1026 #
1027 1027 # builds a match object from them and filters subset. Allowed
1028 1028 # prefixes are 'p:' for regular patterns, 'i:' for include
1029 1029 # patterns and 'x:' for exclude patterns. Use 'r:' prefix to pass
1030 1030 # a revision identifier, or the empty string to reference the
1031 1031 # working directory, from which the match object is
1032 1032 # initialized. Use 'd:' to set the default matching mode, default
1033 1033 # to 'glob'. At most one 'r:' and 'd:' argument can be passed.
1034 1034
1035 1035 # i18n: "_matchfiles" is a keyword
1036 1036 l = getargs(x, 1, -1, _("_matchfiles requires at least one argument"))
1037 1037 pats, inc, exc = [], [], []
1038 1038 rev, default = None, None
1039 1039 for arg in l:
1040 1040 # i18n: "_matchfiles" is a keyword
1041 1041 s = getstring(arg, _("_matchfiles requires string arguments"))
1042 1042 prefix, value = s[:2], s[2:]
1043 1043 if prefix == 'p:':
1044 1044 pats.append(value)
1045 1045 elif prefix == 'i:':
1046 1046 inc.append(value)
1047 1047 elif prefix == 'x:':
1048 1048 exc.append(value)
1049 1049 elif prefix == 'r:':
1050 1050 if rev is not None:
1051 1051 # i18n: "_matchfiles" is a keyword
1052 1052 raise error.ParseError(_('_matchfiles expected at most one '
1053 1053 'revision'))
1054 1054 if value != '': # empty means working directory; leave rev as None
1055 1055 rev = value
1056 1056 elif prefix == 'd:':
1057 1057 if default is not None:
1058 1058 # i18n: "_matchfiles" is a keyword
1059 1059 raise error.ParseError(_('_matchfiles expected at most one '
1060 1060 'default mode'))
1061 1061 default = value
1062 1062 else:
1063 1063 # i18n: "_matchfiles" is a keyword
1064 1064 raise error.ParseError(_('invalid _matchfiles prefix: %s') % prefix)
1065 1065 if not default:
1066 1066 default = 'glob'
1067 1067
1068 1068 m = matchmod.match(repo.root, repo.getcwd(), pats, include=inc,
1069 1069 exclude=exc, ctx=repo[rev], default=default)
1070 1070
1071 1071 def matches(x):
1072 1072 for f in repo[x].files():
1073 1073 if m(f):
1074 1074 return True
1075 1075 return False
1076 1076
1077 1077 return subset.filter(matches)
1078 1078
1079 1079 def hasfile(repo, subset, x):
1080 1080 """``file(pattern)``
1081 1081 Changesets affecting files matched by pattern.
1082 1082
1083 1083 For a faster but less accurate result, consider using ``filelog()``
1084 1084 instead.
1085 1085
1086 1086 This predicate uses ``glob:`` as the default kind of pattern.
1087 1087 """
1088 1088 # i18n: "file" is a keyword
1089 1089 pat = getstring(x, _("file requires a pattern"))
1090 1090 return _matchfiles(repo, subset, ('string', 'p:' + pat))
1091 1091
1092 1092 def head(repo, subset, x):
1093 1093 """``head()``
1094 1094 Changeset is a named branch head.
1095 1095 """
1096 1096 # i18n: "head" is a keyword
1097 1097 getargs(x, 0, 0, _("head takes no arguments"))
1098 1098 hs = set()
1099 1099 for b, ls in repo.branchmap().iteritems():
1100 1100 hs.update(repo[h].rev() for h in ls)
1101 1101 return baseset(hs).filter(subset.__contains__)
1102 1102
1103 1103 def heads(repo, subset, x):
1104 1104 """``heads(set)``
1105 1105 Members of set with no children in set.
1106 1106 """
1107 1107 s = getset(repo, subset, x)
1108 1108 ps = parents(repo, subset, x)
1109 1109 return s - ps
1110 1110
1111 1111 def hidden(repo, subset, x):
1112 1112 """``hidden()``
1113 1113 Hidden changesets.
1114 1114 """
1115 1115 # i18n: "hidden" is a keyword
1116 1116 getargs(x, 0, 0, _("hidden takes no arguments"))
1117 1117 hiddenrevs = repoview.filterrevs(repo, 'visible')
1118 1118 return subset & hiddenrevs
1119 1119
1120 1120 def keyword(repo, subset, x):
1121 1121 """``keyword(string)``
1122 1122 Search commit message, user name, and names of changed files for
1123 1123 string. The match is case-insensitive.
1124 1124 """
1125 1125 # i18n: "keyword" is a keyword
1126 1126 kw = encoding.lower(getstring(x, _("keyword requires a string")))
1127 1127
1128 1128 def matches(r):
1129 1129 c = repo[r]
1130 1130 return util.any(kw in encoding.lower(t) for t in c.files() + [c.user(),
1131 1131 c.description()])
1132 1132
1133 1133 return subset.filter(matches)
1134 1134
1135 1135 def limit(repo, subset, x):
1136 1136 """``limit(set, [n])``
1137 1137 First n members of set, defaulting to 1.
1138 1138 """
1139 1139 # i18n: "limit" is a keyword
1140 1140 l = getargs(x, 1, 2, _("limit requires one or two arguments"))
1141 1141 try:
1142 1142 lim = 1
1143 1143 if len(l) == 2:
1144 1144 # i18n: "limit" is a keyword
1145 1145 lim = int(getstring(l[1], _("limit requires a number")))
1146 1146 except (TypeError, ValueError):
1147 1147 # i18n: "limit" is a keyword
1148 1148 raise error.ParseError(_("limit expects a number"))
1149 1149 ss = subset
1150 1150 os = getset(repo, fullreposet(repo), l[0])
1151 1151 result = []
1152 1152 it = iter(os)
1153 1153 for x in xrange(lim):
1154 1154 try:
1155 1155 y = it.next()
1156 1156 if y in ss:
1157 1157 result.append(y)
1158 1158 except (StopIteration):
1159 1159 break
1160 1160 return baseset(result)
1161 1161
1162 1162 def last(repo, subset, x):
1163 1163 """``last(set, [n])``
1164 1164 Last n members of set, defaulting to 1.
1165 1165 """
1166 1166 # i18n: "last" is a keyword
1167 1167 l = getargs(x, 1, 2, _("last requires one or two arguments"))
1168 1168 try:
1169 1169 lim = 1
1170 1170 if len(l) == 2:
1171 1171 # i18n: "last" is a keyword
1172 1172 lim = int(getstring(l[1], _("last requires a number")))
1173 1173 except (TypeError, ValueError):
1174 1174 # i18n: "last" is a keyword
1175 1175 raise error.ParseError(_("last expects a number"))
1176 1176 ss = subset
1177 1177 os = getset(repo, fullreposet(repo), l[0])
1178 1178 os.reverse()
1179 1179 result = []
1180 1180 it = iter(os)
1181 1181 for x in xrange(lim):
1182 1182 try:
1183 1183 y = it.next()
1184 1184 if y in ss:
1185 1185 result.append(y)
1186 1186 except (StopIteration):
1187 1187 break
1188 1188 return baseset(result)
1189 1189
1190 1190 def maxrev(repo, subset, x):
1191 1191 """``max(set)``
1192 1192 Changeset with highest revision number in set.
1193 1193 """
1194 1194 os = getset(repo, fullreposet(repo), x)
1195 1195 if os:
1196 1196 m = os.max()
1197 1197 if m in subset:
1198 1198 return baseset([m])
1199 1199 return baseset()
1200 1200
1201 1201 def merge(repo, subset, x):
1202 1202 """``merge()``
1203 1203 Changeset is a merge changeset.
1204 1204 """
1205 1205 # i18n: "merge" is a keyword
1206 1206 getargs(x, 0, 0, _("merge takes no arguments"))
1207 1207 cl = repo.changelog
1208 1208 return subset.filter(lambda r: cl.parentrevs(r)[1] != -1)
1209 1209
1210 1210 def branchpoint(repo, subset, x):
1211 1211 """``branchpoint()``
1212 1212 Changesets with more than one child.
1213 1213 """
1214 1214 # i18n: "branchpoint" is a keyword
1215 1215 getargs(x, 0, 0, _("branchpoint takes no arguments"))
1216 1216 cl = repo.changelog
1217 1217 if not subset:
1218 1218 return baseset()
1219 1219 baserev = min(subset)
1220 1220 parentscount = [0]*(len(repo) - baserev)
1221 1221 for r in cl.revs(start=baserev + 1):
1222 1222 for p in cl.parentrevs(r):
1223 1223 if p >= baserev:
1224 1224 parentscount[p - baserev] += 1
1225 1225 return subset.filter(lambda r: parentscount[r - baserev] > 1)
1226 1226
1227 1227 def minrev(repo, subset, x):
1228 1228 """``min(set)``
1229 1229 Changeset with lowest revision number in set.
1230 1230 """
1231 1231 os = getset(repo, fullreposet(repo), x)
1232 1232 if os:
1233 1233 m = os.min()
1234 1234 if m in subset:
1235 1235 return baseset([m])
1236 1236 return baseset()
1237 1237
1238 1238 def modifies(repo, subset, x):
1239 1239 """``modifies(pattern)``
1240 1240 Changesets modifying files matched by pattern.
1241 1241
1242 1242 The pattern without explicit kind like ``glob:`` is expected to be
1243 1243 relative to the current directory and match against a file or a
1244 1244 directory.
1245 1245 """
1246 1246 # i18n: "modifies" is a keyword
1247 1247 pat = getstring(x, _("modifies requires a pattern"))
1248 1248 return checkstatus(repo, subset, pat, 0)
1249 1249
1250 1250 def named(repo, subset, x):
1251 1251 """``named(namespace)``
1252 1252 The changesets in a given namespace.
1253 1253
1254 1254 If `namespace` starts with `re:`, the remainder of the string is treated as
1255 1255 a regular expression. To match a namespace that actually starts with `re:`,
1256 1256 use the prefix `literal:`.
1257 1257 """
1258 1258 # i18n: "named" is a keyword
1259 1259 args = getargs(x, 1, 1, _('named requires a namespace argument'))
1260 1260
1261 1261 ns = getstring(args[0],
1262 1262 # i18n: "named" is a keyword
1263 1263 _('the argument to named must be a string'))
1264 1264 kind, pattern, matcher = _stringmatcher(ns)
1265 1265 namespaces = set()
1266 1266 if kind == 'literal':
1267 1267 if pattern not in repo.names:
1268 1268 raise error.RepoLookupError(_("namespace '%s' does not exist")
1269 1269 % ns)
1270 1270 namespaces.add(repo.names[pattern])
1271 1271 else:
1272 1272 for name, ns in repo.names.iteritems():
1273 1273 if matcher(name):
1274 1274 namespaces.add(ns)
1275 1275 if not namespaces:
1276 1276 raise error.RepoLookupError(_("no namespace exists"
1277 1277 " that match '%s'") % pattern)
1278 1278
1279 1279 names = set()
1280 1280 for ns in namespaces:
1281 1281 for name in ns.listnames(repo):
1282 1282 if name not in ns.deprecated:
1283 1283 names.update(repo[n].rev() for n in ns.nodes(repo, name))
1284 1284
1285 1285 names -= set([node.nullrev])
1286 1286 return subset & names
1287 1287
1288 1288 def node_(repo, subset, x):
1289 1289 """``id(string)``
1290 1290 Revision non-ambiguously specified by the given hex string prefix.
1291 1291 """
1292 1292 # i18n: "id" is a keyword
1293 1293 l = getargs(x, 1, 1, _("id requires one argument"))
1294 1294 # i18n: "id" is a keyword
1295 1295 n = getstring(l[0], _("id requires a string"))
1296 1296 if len(n) == 40:
1297 1297 try:
1298 1298 rn = repo.changelog.rev(node.bin(n))
1299 1299 except (LookupError, TypeError):
1300 1300 rn = None
1301 1301 else:
1302 1302 rn = None
1303 1303 pm = repo.changelog._partialmatch(n)
1304 1304 if pm is not None:
1305 1305 rn = repo.changelog.rev(pm)
1306 1306
1307 1307 if rn is None:
1308 1308 return baseset()
1309 1309 result = baseset([rn])
1310 1310 return result & subset
1311 1311
1312 1312 def obsolete(repo, subset, x):
1313 1313 """``obsolete()``
1314 1314 Mutable changeset with a newer version."""
1315 1315 # i18n: "obsolete" is a keyword
1316 1316 getargs(x, 0, 0, _("obsolete takes no arguments"))
1317 1317 obsoletes = obsmod.getrevs(repo, 'obsolete')
1318 1318 return subset & obsoletes
1319 1319
1320 1320 def only(repo, subset, x):
1321 1321 """``only(set, [set])``
1322 1322 Changesets that are ancestors of the first set that are not ancestors
1323 1323 of any other head in the repo. If a second set is specified, the result
1324 1324 is ancestors of the first set that are not ancestors of the second set
1325 1325 (i.e. ::<set1> - ::<set2>).
1326 1326 """
1327 1327 cl = repo.changelog
1328 1328 # i18n: "only" is a keyword
1329 1329 args = getargs(x, 1, 2, _('only takes one or two arguments'))
1330 1330 include = getset(repo, fullreposet(repo), args[0])
1331 1331 if len(args) == 1:
1332 1332 if not include:
1333 1333 return baseset()
1334 1334
1335 1335 descendants = set(_revdescendants(repo, include, False))
1336 1336 exclude = [rev for rev in cl.headrevs()
1337 1337 if not rev in descendants and not rev in include]
1338 1338 else:
1339 1339 exclude = getset(repo, fullreposet(repo), args[1])
1340 1340
1341 1341 results = set(cl.findmissingrevs(common=exclude, heads=include))
1342 1342 return subset & results
1343 1343
1344 1344 def origin(repo, subset, x):
1345 1345 """``origin([set])``
1346 1346 Changesets that were specified as a source for the grafts, transplants or
1347 1347 rebases that created the given revisions. Omitting the optional set is the
1348 1348 same as passing all(). If a changeset created by these operations is itself
1349 1349 specified as a source for one of these operations, only the source changeset
1350 1350 for the first operation is selected.
1351 1351 """
1352 1352 if x is not None:
1353 1353 dests = getset(repo, fullreposet(repo), x)
1354 1354 else:
1355 1355 dests = fullreposet(repo)
1356 1356
1357 1357 def _firstsrc(rev):
1358 1358 src = _getrevsource(repo, rev)
1359 1359 if src is None:
1360 1360 return None
1361 1361
1362 1362 while True:
1363 1363 prev = _getrevsource(repo, src)
1364 1364
1365 1365 if prev is None:
1366 1366 return src
1367 1367 src = prev
1368 1368
1369 1369 o = set([_firstsrc(r) for r in dests])
1370 1370 o -= set([None])
1371 1371 return subset & o
1372 1372
1373 1373 def outgoing(repo, subset, x):
1374 1374 """``outgoing([path])``
1375 1375 Changesets not found in the specified destination repository, or the
1376 1376 default push location.
1377 1377 """
1378 1378 # Avoid cycles.
1379 1379 import discovery
1380 1380 import hg
1381 1381 # i18n: "outgoing" is a keyword
1382 1382 l = getargs(x, 0, 1, _("outgoing takes one or no arguments"))
1383 1383 # i18n: "outgoing" is a keyword
1384 1384 dest = l and getstring(l[0], _("outgoing requires a repository path")) or ''
1385 1385 dest = repo.ui.expandpath(dest or 'default-push', dest or 'default')
1386 1386 dest, branches = hg.parseurl(dest)
1387 1387 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1388 1388 if revs:
1389 1389 revs = [repo.lookup(rev) for rev in revs]
1390 1390 other = hg.peer(repo, {}, dest)
1391 1391 repo.ui.pushbuffer()
1392 1392 outgoing = discovery.findcommonoutgoing(repo, other, onlyheads=revs)
1393 1393 repo.ui.popbuffer()
1394 1394 cl = repo.changelog
1395 1395 o = set([cl.rev(r) for r in outgoing.missing])
1396 1396 return subset & o
1397 1397
1398 1398 def p1(repo, subset, x):
1399 1399 """``p1([set])``
1400 1400 First parent of changesets in set, or the working directory.
1401 1401 """
1402 1402 if x is None:
1403 1403 p = repo[x].p1().rev()
1404 1404 if p >= 0:
1405 1405 return subset & baseset([p])
1406 1406 return baseset()
1407 1407
1408 1408 ps = set()
1409 1409 cl = repo.changelog
1410 1410 for r in getset(repo, fullreposet(repo), x):
1411 1411 ps.add(cl.parentrevs(r)[0])
1412 1412 ps -= set([node.nullrev])
1413 1413 return subset & ps
1414 1414
1415 1415 def p2(repo, subset, x):
1416 1416 """``p2([set])``
1417 1417 Second parent of changesets in set, or the working directory.
1418 1418 """
1419 1419 if x is None:
1420 1420 ps = repo[x].parents()
1421 1421 try:
1422 1422 p = ps[1].rev()
1423 1423 if p >= 0:
1424 1424 return subset & baseset([p])
1425 1425 return baseset()
1426 1426 except IndexError:
1427 1427 return baseset()
1428 1428
1429 1429 ps = set()
1430 1430 cl = repo.changelog
1431 1431 for r in getset(repo, fullreposet(repo), x):
1432 1432 ps.add(cl.parentrevs(r)[1])
1433 1433 ps -= set([node.nullrev])
1434 1434 return subset & ps
1435 1435
1436 1436 def parents(repo, subset, x):
1437 1437 """``parents([set])``
1438 1438 The set of all parents for all changesets in set, or the working directory.
1439 1439 """
1440 1440 if x is None:
1441 1441 ps = set(p.rev() for p in repo[x].parents())
1442 1442 else:
1443 1443 ps = set()
1444 1444 cl = repo.changelog
1445 1445 for r in getset(repo, fullreposet(repo), x):
1446 1446 ps.update(cl.parentrevs(r))
1447 1447 ps -= set([node.nullrev])
1448 1448 return subset & ps
1449 1449
1450 1450 def parentspec(repo, subset, x, n):
1451 1451 """``set^0``
1452 1452 The set.
1453 1453 ``set^1`` (or ``set^``), ``set^2``
1454 1454 First or second parent, respectively, of all changesets in set.
1455 1455 """
1456 1456 try:
1457 1457 n = int(n[1])
1458 1458 if n not in (0, 1, 2):
1459 1459 raise ValueError
1460 1460 except (TypeError, ValueError):
1461 1461 raise error.ParseError(_("^ expects a number 0, 1, or 2"))
1462 1462 ps = set()
1463 1463 cl = repo.changelog
1464 1464 for r in getset(repo, fullreposet(repo), x):
1465 1465 if n == 0:
1466 1466 ps.add(r)
1467 1467 elif n == 1:
1468 1468 ps.add(cl.parentrevs(r)[0])
1469 1469 elif n == 2:
1470 1470 parents = cl.parentrevs(r)
1471 1471 if len(parents) > 1:
1472 1472 ps.add(parents[1])
1473 1473 return subset & ps
1474 1474
1475 1475 def present(repo, subset, x):
1476 1476 """``present(set)``
1477 1477 An empty set, if any revision in set isn't found; otherwise,
1478 1478 all revisions in set.
1479 1479
1480 1480 If any of specified revisions is not present in the local repository,
1481 1481 the query is normally aborted. But this predicate allows the query
1482 1482 to continue even in such cases.
1483 1483 """
1484 1484 try:
1485 1485 return getset(repo, subset, x)
1486 1486 except error.RepoLookupError:
1487 1487 return baseset()
1488 1488
1489 1489 def public(repo, subset, x):
1490 1490 """``public()``
1491 1491 Changeset in public phase."""
1492 1492 # i18n: "public" is a keyword
1493 1493 getargs(x, 0, 0, _("public takes no arguments"))
1494 1494 phase = repo._phasecache.phase
1495 1495 target = phases.public
1496 1496 condition = lambda r: phase(repo, r) == target
1497 1497 return subset.filter(condition, cache=False)
1498 1498
1499 1499 def remote(repo, subset, x):
1500 1500 """``remote([id [,path]])``
1501 1501 Local revision that corresponds to the given identifier in a
1502 1502 remote repository, if present. Here, the '.' identifier is a
1503 1503 synonym for the current local branch.
1504 1504 """
1505 1505
1506 1506 import hg # avoid start-up nasties
1507 1507 # i18n: "remote" is a keyword
1508 1508 l = getargs(x, 0, 2, _("remote takes one, two or no arguments"))
1509 1509
1510 1510 q = '.'
1511 1511 if len(l) > 0:
1512 1512 # i18n: "remote" is a keyword
1513 1513 q = getstring(l[0], _("remote requires a string id"))
1514 1514 if q == '.':
1515 1515 q = repo['.'].branch()
1516 1516
1517 1517 dest = ''
1518 1518 if len(l) > 1:
1519 1519 # i18n: "remote" is a keyword
1520 1520 dest = getstring(l[1], _("remote requires a repository path"))
1521 1521 dest = repo.ui.expandpath(dest or 'default')
1522 1522 dest, branches = hg.parseurl(dest)
1523 1523 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1524 1524 if revs:
1525 1525 revs = [repo.lookup(rev) for rev in revs]
1526 1526 other = hg.peer(repo, {}, dest)
1527 1527 n = other.lookup(q)
1528 1528 if n in repo:
1529 1529 r = repo[n].rev()
1530 1530 if r in subset:
1531 1531 return baseset([r])
1532 1532 return baseset()
1533 1533
1534 1534 def removes(repo, subset, x):
1535 1535 """``removes(pattern)``
1536 1536 Changesets which remove files matching pattern.
1537 1537
1538 1538 The pattern without explicit kind like ``glob:`` is expected to be
1539 1539 relative to the current directory and match against a file or a
1540 1540 directory.
1541 1541 """
1542 1542 # i18n: "removes" is a keyword
1543 1543 pat = getstring(x, _("removes requires a pattern"))
1544 1544 return checkstatus(repo, subset, pat, 2)
1545 1545
1546 1546 def rev(repo, subset, x):
1547 1547 """``rev(number)``
1548 1548 Revision with the given numeric identifier.
1549 1549 """
1550 1550 # i18n: "rev" is a keyword
1551 1551 l = getargs(x, 1, 1, _("rev requires one argument"))
1552 1552 try:
1553 1553 # i18n: "rev" is a keyword
1554 1554 l = int(getstring(l[0], _("rev requires a number")))
1555 1555 except (TypeError, ValueError):
1556 1556 # i18n: "rev" is a keyword
1557 1557 raise error.ParseError(_("rev expects a number"))
1558 1558 if l not in repo.changelog and l != node.nullrev:
1559 1559 return baseset()
1560 1560 return subset & baseset([l])
1561 1561
1562 1562 def matching(repo, subset, x):
1563 1563 """``matching(revision [, field])``
1564 1564 Changesets in which a given set of fields match the set of fields in the
1565 1565 selected revision or set.
1566 1566
1567 1567 To match more than one field pass the list of fields to match separated
1568 1568 by spaces (e.g. ``author description``).
1569 1569
1570 1570 Valid fields are most regular revision fields and some special fields.
1571 1571
1572 1572 Regular revision fields are ``description``, ``author``, ``branch``,
1573 1573 ``date``, ``files``, ``phase``, ``parents``, ``substate``, ``user``
1574 1574 and ``diff``.
1575 1575 Note that ``author`` and ``user`` are synonyms. ``diff`` refers to the
1576 1576 contents of the revision. Two revisions matching their ``diff`` will
1577 1577 also match their ``files``.
1578 1578
1579 1579 Special fields are ``summary`` and ``metadata``:
1580 1580 ``summary`` matches the first line of the description.
1581 1581 ``metadata`` is equivalent to matching ``description user date``
1582 1582 (i.e. it matches the main metadata fields).
1583 1583
1584 1584 ``metadata`` is the default field which is used when no fields are
1585 1585 specified. You can match more than one field at a time.
1586 1586 """
1587 1587 # i18n: "matching" is a keyword
1588 1588 l = getargs(x, 1, 2, _("matching takes 1 or 2 arguments"))
1589 1589
1590 1590 revs = getset(repo, fullreposet(repo), l[0])
1591 1591
1592 1592 fieldlist = ['metadata']
1593 1593 if len(l) > 1:
1594 1594 fieldlist = getstring(l[1],
1595 1595 # i18n: "matching" is a keyword
1596 1596 _("matching requires a string "
1597 1597 "as its second argument")).split()
1598 1598
1599 1599 # Make sure that there are no repeated fields,
1600 1600 # expand the 'special' 'metadata' field type
1601 1601 # and check the 'files' whenever we check the 'diff'
1602 1602 fields = []
1603 1603 for field in fieldlist:
1604 1604 if field == 'metadata':
1605 1605 fields += ['user', 'description', 'date']
1606 1606 elif field == 'diff':
1607 1607 # a revision matching the diff must also match the files
1608 1608 # since matching the diff is very costly, make sure to
1609 1609 # also match the files first
1610 1610 fields += ['files', 'diff']
1611 1611 else:
1612 1612 if field == 'author':
1613 1613 field = 'user'
1614 1614 fields.append(field)
1615 1615 fields = set(fields)
1616 1616 if 'summary' in fields and 'description' in fields:
1617 1617 # If a revision matches its description it also matches its summary
1618 1618 fields.discard('summary')
1619 1619
1620 1620 # We may want to match more than one field
1621 1621 # Not all fields take the same amount of time to be matched
1622 1622 # Sort the selected fields in order of increasing matching cost
1623 1623 fieldorder = ['phase', 'parents', 'user', 'date', 'branch', 'summary',
1624 1624 'files', 'description', 'substate', 'diff']
1625 1625 def fieldkeyfunc(f):
1626 1626 try:
1627 1627 return fieldorder.index(f)
1628 1628 except ValueError:
1629 1629 # assume an unknown field is very costly
1630 1630 return len(fieldorder)
1631 1631 fields = list(fields)
1632 1632 fields.sort(key=fieldkeyfunc)
1633 1633
1634 1634 # Each field will be matched with its own "getfield" function
1635 1635 # which will be added to the getfieldfuncs array of functions
1636 1636 getfieldfuncs = []
1637 1637 _funcs = {
1638 1638 'user': lambda r: repo[r].user(),
1639 1639 'branch': lambda r: repo[r].branch(),
1640 1640 'date': lambda r: repo[r].date(),
1641 1641 'description': lambda r: repo[r].description(),
1642 1642 'files': lambda r: repo[r].files(),
1643 1643 'parents': lambda r: repo[r].parents(),
1644 1644 'phase': lambda r: repo[r].phase(),
1645 1645 'substate': lambda r: repo[r].substate,
1646 1646 'summary': lambda r: repo[r].description().splitlines()[0],
1647 1647 'diff': lambda r: list(repo[r].diff(git=True),)
1648 1648 }
1649 1649 for info in fields:
1650 1650 getfield = _funcs.get(info, None)
1651 1651 if getfield is None:
1652 1652 raise error.ParseError(
1653 1653 # i18n: "matching" is a keyword
1654 1654 _("unexpected field name passed to matching: %s") % info)
1655 1655 getfieldfuncs.append(getfield)
1656 1656 # convert the getfield array of functions into a "getinfo" function
1657 1657 # which returns an array of field values (or a single value if there
1658 1658 # is only one field to match)
1659 1659 getinfo = lambda r: [f(r) for f in getfieldfuncs]
1660 1660
1661 1661 def matches(x):
1662 1662 for rev in revs:
1663 1663 target = getinfo(rev)
1664 1664 match = True
1665 1665 for n, f in enumerate(getfieldfuncs):
1666 1666 if target[n] != f(x):
1667 1667 match = False
1668 1668 if match:
1669 1669 return True
1670 1670 return False
1671 1671
1672 1672 return subset.filter(matches)
1673 1673
1674 1674 def reverse(repo, subset, x):
1675 1675 """``reverse(set)``
1676 1676 Reverse order of set.
1677 1677 """
1678 1678 l = getset(repo, subset, x)
1679 1679 l.reverse()
1680 1680 return l
1681 1681
1682 1682 def roots(repo, subset, x):
1683 1683 """``roots(set)``
1684 1684 Changesets in set with no parent changeset in set.
1685 1685 """
1686 1686 s = getset(repo, fullreposet(repo), x)
1687 subset = baseset([r for r in s if r in subset])
1687 subset = subset & s# baseset([r for r in s if r in subset])
1688 1688 cs = _children(repo, subset, s)
1689 1689 return subset - cs
1690 1690
1691 1691 def secret(repo, subset, x):
1692 1692 """``secret()``
1693 1693 Changeset in secret phase."""
1694 1694 # i18n: "secret" is a keyword
1695 1695 getargs(x, 0, 0, _("secret takes no arguments"))
1696 1696 phase = repo._phasecache.phase
1697 1697 target = phases.secret
1698 1698 condition = lambda r: phase(repo, r) == target
1699 1699 return subset.filter(condition, cache=False)
1700 1700
1701 1701 def sort(repo, subset, x):
1702 1702 """``sort(set[, [-]key...])``
1703 1703 Sort set by keys. The default sort order is ascending, specify a key
1704 1704 as ``-key`` to sort in descending order.
1705 1705
1706 1706 The keys can be:
1707 1707
1708 1708 - ``rev`` for the revision number,
1709 1709 - ``branch`` for the branch name,
1710 1710 - ``desc`` for the commit message (description),
1711 1711 - ``user`` for user name (``author`` can be used as an alias),
1712 1712 - ``date`` for the commit date
1713 1713 """
1714 1714 # i18n: "sort" is a keyword
1715 1715 l = getargs(x, 1, 2, _("sort requires one or two arguments"))
1716 1716 keys = "rev"
1717 1717 if len(l) == 2:
1718 1718 # i18n: "sort" is a keyword
1719 1719 keys = getstring(l[1], _("sort spec must be a string"))
1720 1720
1721 1721 s = l[0]
1722 1722 keys = keys.split()
1723 1723 l = []
1724 1724 def invert(s):
1725 1725 return "".join(chr(255 - ord(c)) for c in s)
1726 1726 revs = getset(repo, subset, s)
1727 1727 if keys == ["rev"]:
1728 1728 revs.sort()
1729 1729 return revs
1730 1730 elif keys == ["-rev"]:
1731 1731 revs.sort(reverse=True)
1732 1732 return revs
1733 1733 for r in revs:
1734 1734 c = repo[r]
1735 1735 e = []
1736 1736 for k in keys:
1737 1737 if k == 'rev':
1738 1738 e.append(r)
1739 1739 elif k == '-rev':
1740 1740 e.append(-r)
1741 1741 elif k == 'branch':
1742 1742 e.append(c.branch())
1743 1743 elif k == '-branch':
1744 1744 e.append(invert(c.branch()))
1745 1745 elif k == 'desc':
1746 1746 e.append(c.description())
1747 1747 elif k == '-desc':
1748 1748 e.append(invert(c.description()))
1749 1749 elif k in 'user author':
1750 1750 e.append(c.user())
1751 1751 elif k in '-user -author':
1752 1752 e.append(invert(c.user()))
1753 1753 elif k == 'date':
1754 1754 e.append(c.date()[0])
1755 1755 elif k == '-date':
1756 1756 e.append(-c.date()[0])
1757 1757 else:
1758 1758 raise error.ParseError(_("unknown sort key %r") % k)
1759 1759 e.append(r)
1760 1760 l.append(e)
1761 1761 l.sort()
1762 1762 return baseset([e[-1] for e in l])
1763 1763
1764 1764 def subrepo(repo, subset, x):
1765 1765 """``subrepo([pattern])``
1766 1766 Changesets that add, modify or remove the given subrepo. If no subrepo
1767 1767 pattern is named, any subrepo changes are returned.
1768 1768 """
1769 1769 # i18n: "subrepo" is a keyword
1770 1770 args = getargs(x, 0, 1, _('subrepo takes at most one argument'))
1771 1771 if len(args) != 0:
1772 1772 pat = getstring(args[0], _("subrepo requires a pattern"))
1773 1773
1774 1774 m = matchmod.exact(repo.root, repo.root, ['.hgsubstate'])
1775 1775
1776 1776 def submatches(names):
1777 1777 k, p, m = _stringmatcher(pat)
1778 1778 for name in names:
1779 1779 if m(name):
1780 1780 yield name
1781 1781
1782 1782 def matches(x):
1783 1783 c = repo[x]
1784 1784 s = repo.status(c.p1().node(), c.node(), match=m)
1785 1785
1786 1786 if len(args) == 0:
1787 1787 return s.added or s.modified or s.removed
1788 1788
1789 1789 if s.added:
1790 1790 return util.any(submatches(c.substate.keys()))
1791 1791
1792 1792 if s.modified:
1793 1793 subs = set(c.p1().substate.keys())
1794 1794 subs.update(c.substate.keys())
1795 1795
1796 1796 for path in submatches(subs):
1797 1797 if c.p1().substate.get(path) != c.substate.get(path):
1798 1798 return True
1799 1799
1800 1800 if s.removed:
1801 1801 return util.any(submatches(c.p1().substate.keys()))
1802 1802
1803 1803 return False
1804 1804
1805 1805 return subset.filter(matches)
1806 1806
1807 1807 def _stringmatcher(pattern):
1808 1808 """
1809 1809 accepts a string, possibly starting with 're:' or 'literal:' prefix.
1810 1810 returns the matcher name, pattern, and matcher function.
1811 1811 missing or unknown prefixes are treated as literal matches.
1812 1812
1813 1813 helper for tests:
1814 1814 >>> def test(pattern, *tests):
1815 1815 ... kind, pattern, matcher = _stringmatcher(pattern)
1816 1816 ... return (kind, pattern, [bool(matcher(t)) for t in tests])
1817 1817
1818 1818 exact matching (no prefix):
1819 1819 >>> test('abcdefg', 'abc', 'def', 'abcdefg')
1820 1820 ('literal', 'abcdefg', [False, False, True])
1821 1821
1822 1822 regex matching ('re:' prefix)
1823 1823 >>> test('re:a.+b', 'nomatch', 'fooadef', 'fooadefbar')
1824 1824 ('re', 'a.+b', [False, False, True])
1825 1825
1826 1826 force exact matches ('literal:' prefix)
1827 1827 >>> test('literal:re:foobar', 'foobar', 're:foobar')
1828 1828 ('literal', 're:foobar', [False, True])
1829 1829
1830 1830 unknown prefixes are ignored and treated as literals
1831 1831 >>> test('foo:bar', 'foo', 'bar', 'foo:bar')
1832 1832 ('literal', 'foo:bar', [False, False, True])
1833 1833 """
1834 1834 if pattern.startswith('re:'):
1835 1835 pattern = pattern[3:]
1836 1836 try:
1837 1837 regex = re.compile(pattern)
1838 1838 except re.error, e:
1839 1839 raise error.ParseError(_('invalid regular expression: %s')
1840 1840 % e)
1841 1841 return 're', pattern, regex.search
1842 1842 elif pattern.startswith('literal:'):
1843 1843 pattern = pattern[8:]
1844 1844 return 'literal', pattern, pattern.__eq__
1845 1845
1846 1846 def _substringmatcher(pattern):
1847 1847 kind, pattern, matcher = _stringmatcher(pattern)
1848 1848 if kind == 'literal':
1849 1849 matcher = lambda s: pattern in s
1850 1850 return kind, pattern, matcher
1851 1851
1852 1852 def tag(repo, subset, x):
1853 1853 """``tag([name])``
1854 1854 The specified tag by name, or all tagged revisions if no name is given.
1855 1855
1856 1856 If `name` starts with `re:`, the remainder of the name is treated as
1857 1857 a regular expression. To match a tag that actually starts with `re:`,
1858 1858 use the prefix `literal:`.
1859 1859 """
1860 1860 # i18n: "tag" is a keyword
1861 1861 args = getargs(x, 0, 1, _("tag takes one or no arguments"))
1862 1862 cl = repo.changelog
1863 1863 if args:
1864 1864 pattern = getstring(args[0],
1865 1865 # i18n: "tag" is a keyword
1866 1866 _('the argument to tag must be a string'))
1867 1867 kind, pattern, matcher = _stringmatcher(pattern)
1868 1868 if kind == 'literal':
1869 1869 # avoid resolving all tags
1870 1870 tn = repo._tagscache.tags.get(pattern, None)
1871 1871 if tn is None:
1872 1872 raise error.RepoLookupError(_("tag '%s' does not exist")
1873 1873 % pattern)
1874 1874 s = set([repo[tn].rev()])
1875 1875 else:
1876 1876 s = set([cl.rev(n) for t, n in repo.tagslist() if matcher(t)])
1877 1877 else:
1878 1878 s = set([cl.rev(n) for t, n in repo.tagslist() if t != 'tip'])
1879 1879 return subset & s
1880 1880
1881 1881 def tagged(repo, subset, x):
1882 1882 return tag(repo, subset, x)
1883 1883
1884 1884 def unstable(repo, subset, x):
1885 1885 """``unstable()``
1886 1886 Non-obsolete changesets with obsolete ancestors.
1887 1887 """
1888 1888 # i18n: "unstable" is a keyword
1889 1889 getargs(x, 0, 0, _("unstable takes no arguments"))
1890 1890 unstables = obsmod.getrevs(repo, 'unstable')
1891 1891 return subset & unstables
1892 1892
1893 1893
1894 1894 def user(repo, subset, x):
1895 1895 """``user(string)``
1896 1896 User name contains string. The match is case-insensitive.
1897 1897
1898 1898 If `string` starts with `re:`, the remainder of the string is treated as
1899 1899 a regular expression. To match a user that actually contains `re:`, use
1900 1900 the prefix `literal:`.
1901 1901 """
1902 1902 return author(repo, subset, x)
1903 1903
1904 1904 # experimental
1905 1905 def wdir(repo, subset, x):
1906 1906 # i18n: "wdir" is a keyword
1907 1907 getargs(x, 0, 0, _("wdir takes no arguments"))
1908 1908 if None in subset:
1909 1909 return baseset([None])
1910 1910 return baseset()
1911 1911
1912 1912 # for internal use
1913 1913 def _list(repo, subset, x):
1914 1914 s = getstring(x, "internal error")
1915 1915 if not s:
1916 1916 return baseset()
1917 1917 ls = [repo[r].rev() for r in s.split('\0')]
1918 1918 s = subset
1919 1919 return baseset([r for r in ls if r in s])
1920 1920
1921 1921 # for internal use
1922 1922 def _intlist(repo, subset, x):
1923 1923 s = getstring(x, "internal error")
1924 1924 if not s:
1925 1925 return baseset()
1926 1926 ls = [int(r) for r in s.split('\0')]
1927 1927 s = subset
1928 1928 return baseset([r for r in ls if r in s])
1929 1929
1930 1930 # for internal use
1931 1931 def _hexlist(repo, subset, x):
1932 1932 s = getstring(x, "internal error")
1933 1933 if not s:
1934 1934 return baseset()
1935 1935 cl = repo.changelog
1936 1936 ls = [cl.rev(node.bin(r)) for r in s.split('\0')]
1937 1937 s = subset
1938 1938 return baseset([r for r in ls if r in s])
1939 1939
1940 1940 symbols = {
1941 1941 "adds": adds,
1942 1942 "all": getall,
1943 1943 "ancestor": ancestor,
1944 1944 "ancestors": ancestors,
1945 1945 "_firstancestors": _firstancestors,
1946 1946 "author": author,
1947 1947 "bisect": bisect,
1948 1948 "bisected": bisected,
1949 1949 "bookmark": bookmark,
1950 1950 "branch": branch,
1951 1951 "branchpoint": branchpoint,
1952 1952 "bumped": bumped,
1953 1953 "bundle": bundle,
1954 1954 "children": children,
1955 1955 "closed": closed,
1956 1956 "contains": contains,
1957 1957 "converted": converted,
1958 1958 "date": date,
1959 1959 "desc": desc,
1960 1960 "descendants": descendants,
1961 1961 "_firstdescendants": _firstdescendants,
1962 1962 "destination": destination,
1963 1963 "divergent": divergent,
1964 1964 "draft": draft,
1965 1965 "extinct": extinct,
1966 1966 "extra": extra,
1967 1967 "file": hasfile,
1968 1968 "filelog": filelog,
1969 1969 "first": first,
1970 1970 "follow": follow,
1971 1971 "_followfirst": _followfirst,
1972 1972 "grep": grep,
1973 1973 "head": head,
1974 1974 "heads": heads,
1975 1975 "hidden": hidden,
1976 1976 "id": node_,
1977 1977 "keyword": keyword,
1978 1978 "last": last,
1979 1979 "limit": limit,
1980 1980 "_matchfiles": _matchfiles,
1981 1981 "max": maxrev,
1982 1982 "merge": merge,
1983 1983 "min": minrev,
1984 1984 "modifies": modifies,
1985 1985 "named": named,
1986 1986 "obsolete": obsolete,
1987 1987 "only": only,
1988 1988 "origin": origin,
1989 1989 "outgoing": outgoing,
1990 1990 "p1": p1,
1991 1991 "p2": p2,
1992 1992 "parents": parents,
1993 1993 "present": present,
1994 1994 "public": public,
1995 1995 "remote": remote,
1996 1996 "removes": removes,
1997 1997 "rev": rev,
1998 1998 "reverse": reverse,
1999 1999 "roots": roots,
2000 2000 "sort": sort,
2001 2001 "secret": secret,
2002 2002 "subrepo": subrepo,
2003 2003 "matching": matching,
2004 2004 "tag": tag,
2005 2005 "tagged": tagged,
2006 2006 "user": user,
2007 2007 "unstable": unstable,
2008 2008 "wdir": wdir,
2009 2009 "_list": _list,
2010 2010 "_intlist": _intlist,
2011 2011 "_hexlist": _hexlist,
2012 2012 }
2013 2013
2014 2014 # symbols which can't be used for a DoS attack for any given input
2015 2015 # (e.g. those which accept regexes as plain strings shouldn't be included)
2016 2016 # functions that just return a lot of changesets (like all) don't count here
2017 2017 safesymbols = set([
2018 2018 "adds",
2019 2019 "all",
2020 2020 "ancestor",
2021 2021 "ancestors",
2022 2022 "_firstancestors",
2023 2023 "author",
2024 2024 "bisect",
2025 2025 "bisected",
2026 2026 "bookmark",
2027 2027 "branch",
2028 2028 "branchpoint",
2029 2029 "bumped",
2030 2030 "bundle",
2031 2031 "children",
2032 2032 "closed",
2033 2033 "converted",
2034 2034 "date",
2035 2035 "desc",
2036 2036 "descendants",
2037 2037 "_firstdescendants",
2038 2038 "destination",
2039 2039 "divergent",
2040 2040 "draft",
2041 2041 "extinct",
2042 2042 "extra",
2043 2043 "file",
2044 2044 "filelog",
2045 2045 "first",
2046 2046 "follow",
2047 2047 "_followfirst",
2048 2048 "head",
2049 2049 "heads",
2050 2050 "hidden",
2051 2051 "id",
2052 2052 "keyword",
2053 2053 "last",
2054 2054 "limit",
2055 2055 "_matchfiles",
2056 2056 "max",
2057 2057 "merge",
2058 2058 "min",
2059 2059 "modifies",
2060 2060 "obsolete",
2061 2061 "only",
2062 2062 "origin",
2063 2063 "outgoing",
2064 2064 "p1",
2065 2065 "p2",
2066 2066 "parents",
2067 2067 "present",
2068 2068 "public",
2069 2069 "remote",
2070 2070 "removes",
2071 2071 "rev",
2072 2072 "reverse",
2073 2073 "roots",
2074 2074 "sort",
2075 2075 "secret",
2076 2076 "matching",
2077 2077 "tag",
2078 2078 "tagged",
2079 2079 "user",
2080 2080 "unstable",
2081 2081 "wdir",
2082 2082 "_list",
2083 2083 "_intlist",
2084 2084 "_hexlist",
2085 2085 ])
2086 2086
2087 2087 methods = {
2088 2088 "range": rangeset,
2089 2089 "dagrange": dagrange,
2090 2090 "string": stringset,
2091 2091 "symbol": symbolset,
2092 2092 "and": andset,
2093 2093 "or": orset,
2094 2094 "not": notset,
2095 2095 "list": listset,
2096 2096 "func": func,
2097 2097 "ancestor": ancestorspec,
2098 2098 "parent": parentspec,
2099 2099 "parentpost": p1,
2100 2100 "only": only,
2101 2101 "onlypost": only,
2102 2102 }
2103 2103
2104 2104 def optimize(x, small):
2105 2105 if x is None:
2106 2106 return 0, x
2107 2107
2108 2108 smallbonus = 1
2109 2109 if small:
2110 2110 smallbonus = .5
2111 2111
2112 2112 op = x[0]
2113 2113 if op == 'minus':
2114 2114 return optimize(('and', x[1], ('not', x[2])), small)
2115 2115 elif op == 'only':
2116 2116 return optimize(('func', ('symbol', 'only'),
2117 2117 ('list', x[1], x[2])), small)
2118 2118 elif op == 'dagrangepre':
2119 2119 return optimize(('func', ('symbol', 'ancestors'), x[1]), small)
2120 2120 elif op == 'dagrangepost':
2121 2121 return optimize(('func', ('symbol', 'descendants'), x[1]), small)
2122 2122 elif op == 'rangepre':
2123 2123 return optimize(('range', ('string', '0'), x[1]), small)
2124 2124 elif op == 'rangepost':
2125 2125 return optimize(('range', x[1], ('string', 'tip')), small)
2126 2126 elif op == 'negate':
2127 2127 return optimize(('string',
2128 2128 '-' + getstring(x[1], _("can't negate that"))), small)
2129 2129 elif op in 'string symbol negate':
2130 2130 return smallbonus, x # single revisions are small
2131 2131 elif op == 'and':
2132 2132 wa, ta = optimize(x[1], True)
2133 2133 wb, tb = optimize(x[2], True)
2134 2134
2135 2135 # (::x and not ::y)/(not ::y and ::x) have a fast path
2136 2136 def isonly(revs, bases):
2137 2137 return (
2138 2138 revs[0] == 'func'
2139 2139 and getstring(revs[1], _('not a symbol')) == 'ancestors'
2140 2140 and bases[0] == 'not'
2141 2141 and bases[1][0] == 'func'
2142 2142 and getstring(bases[1][1], _('not a symbol')) == 'ancestors')
2143 2143
2144 2144 w = min(wa, wb)
2145 2145 if isonly(ta, tb):
2146 2146 return w, ('func', ('symbol', 'only'), ('list', ta[2], tb[1][2]))
2147 2147 if isonly(tb, ta):
2148 2148 return w, ('func', ('symbol', 'only'), ('list', tb[2], ta[1][2]))
2149 2149
2150 2150 if wa > wb:
2151 2151 return w, (op, tb, ta)
2152 2152 return w, (op, ta, tb)
2153 2153 elif op == 'or':
2154 2154 wa, ta = optimize(x[1], False)
2155 2155 wb, tb = optimize(x[2], False)
2156 2156 if wb < wa:
2157 2157 wb, wa = wa, wb
2158 2158 return max(wa, wb), (op, ta, tb)
2159 2159 elif op == 'not':
2160 2160 o = optimize(x[1], not small)
2161 2161 return o[0], (op, o[1])
2162 2162 elif op == 'parentpost':
2163 2163 o = optimize(x[1], small)
2164 2164 return o[0], (op, o[1])
2165 2165 elif op == 'group':
2166 2166 return optimize(x[1], small)
2167 2167 elif op in 'dagrange range list parent ancestorspec':
2168 2168 if op == 'parent':
2169 2169 # x^:y means (x^) : y, not x ^ (:y)
2170 2170 post = ('parentpost', x[1])
2171 2171 if x[2][0] == 'dagrangepre':
2172 2172 return optimize(('dagrange', post, x[2][1]), small)
2173 2173 elif x[2][0] == 'rangepre':
2174 2174 return optimize(('range', post, x[2][1]), small)
2175 2175
2176 2176 wa, ta = optimize(x[1], small)
2177 2177 wb, tb = optimize(x[2], small)
2178 2178 return wa + wb, (op, ta, tb)
2179 2179 elif op == 'func':
2180 2180 f = getstring(x[1], _("not a symbol"))
2181 2181 wa, ta = optimize(x[2], small)
2182 2182 if f in ("author branch closed date desc file grep keyword "
2183 2183 "outgoing user"):
2184 2184 w = 10 # slow
2185 2185 elif f in "modifies adds removes":
2186 2186 w = 30 # slower
2187 2187 elif f == "contains":
2188 2188 w = 100 # very slow
2189 2189 elif f == "ancestor":
2190 2190 w = 1 * smallbonus
2191 2191 elif f in "reverse limit first _intlist":
2192 2192 w = 0
2193 2193 elif f in "sort":
2194 2194 w = 10 # assume most sorts look at changelog
2195 2195 else:
2196 2196 w = 1
2197 2197 return w + wa, (op, x[1], ta)
2198 2198 return 1, x
2199 2199
2200 2200 _aliasarg = ('func', ('symbol', '_aliasarg'))
2201 2201 def _getaliasarg(tree):
2202 2202 """If tree matches ('func', ('symbol', '_aliasarg'), ('string', X))
2203 2203 return X, None otherwise.
2204 2204 """
2205 2205 if (len(tree) == 3 and tree[:2] == _aliasarg
2206 2206 and tree[2][0] == 'string'):
2207 2207 return tree[2][1]
2208 2208 return None
2209 2209
2210 2210 def _checkaliasarg(tree, known=None):
2211 2211 """Check tree contains no _aliasarg construct or only ones which
2212 2212 value is in known. Used to avoid alias placeholders injection.
2213 2213 """
2214 2214 if isinstance(tree, tuple):
2215 2215 arg = _getaliasarg(tree)
2216 2216 if arg is not None and (not known or arg not in known):
2217 2217 raise error.UnknownIdentifier('_aliasarg', [])
2218 2218 for t in tree:
2219 2219 _checkaliasarg(t, known)
2220 2220
2221 2221 # the set of valid characters for the initial letter of symbols in
2222 2222 # alias declarations and definitions
2223 2223 _aliassyminitletters = set(c for c in [chr(i) for i in xrange(256)]
2224 2224 if c.isalnum() or c in '._@$' or ord(c) > 127)
2225 2225
2226 2226 def _tokenizealias(program, lookup=None):
2227 2227 """Parse alias declaration/definition into a stream of tokens
2228 2228
2229 2229 This allows symbol names to use also ``$`` as an initial letter
2230 2230 (for backward compatibility), and callers of this function should
2231 2231 examine whether ``$`` is used also for unexpected symbols or not.
2232 2232 """
2233 2233 return tokenize(program, lookup=lookup,
2234 2234 syminitletters=_aliassyminitletters)
2235 2235
2236 2236 def _parsealiasdecl(decl):
2237 2237 """Parse alias declaration ``decl``
2238 2238
2239 2239 This returns ``(name, tree, args, errorstr)`` tuple:
2240 2240
2241 2241 - ``name``: of declared alias (may be ``decl`` itself at error)
2242 2242 - ``tree``: parse result (or ``None`` at error)
2243 2243 - ``args``: list of alias argument names (or None for symbol declaration)
2244 2244 - ``errorstr``: detail about detected error (or None)
2245 2245
2246 2246 >>> _parsealiasdecl('foo')
2247 2247 ('foo', ('symbol', 'foo'), None, None)
2248 2248 >>> _parsealiasdecl('$foo')
2249 2249 ('$foo', None, None, "'$' not for alias arguments")
2250 2250 >>> _parsealiasdecl('foo::bar')
2251 2251 ('foo::bar', None, None, 'invalid format')
2252 2252 >>> _parsealiasdecl('foo bar')
2253 2253 ('foo bar', None, None, 'at 4: invalid token')
2254 2254 >>> _parsealiasdecl('foo()')
2255 2255 ('foo', ('func', ('symbol', 'foo')), [], None)
2256 2256 >>> _parsealiasdecl('$foo()')
2257 2257 ('$foo()', None, None, "'$' not for alias arguments")
2258 2258 >>> _parsealiasdecl('foo($1, $2)')
2259 2259 ('foo', ('func', ('symbol', 'foo')), ['$1', '$2'], None)
2260 2260 >>> _parsealiasdecl('foo(bar_bar, baz.baz)')
2261 2261 ('foo', ('func', ('symbol', 'foo')), ['bar_bar', 'baz.baz'], None)
2262 2262 >>> _parsealiasdecl('foo($1, $2, nested($1, $2))')
2263 2263 ('foo($1, $2, nested($1, $2))', None, None, 'invalid argument list')
2264 2264 >>> _parsealiasdecl('foo(bar($1, $2))')
2265 2265 ('foo(bar($1, $2))', None, None, 'invalid argument list')
2266 2266 >>> _parsealiasdecl('foo("string")')
2267 2267 ('foo("string")', None, None, 'invalid argument list')
2268 2268 >>> _parsealiasdecl('foo($1, $2')
2269 2269 ('foo($1, $2', None, None, 'at 10: unexpected token: end')
2270 2270 >>> _parsealiasdecl('foo("string')
2271 2271 ('foo("string', None, None, 'at 5: unterminated string')
2272 2272 >>> _parsealiasdecl('foo($1, $2, $1)')
2273 2273 ('foo', None, None, 'argument names collide with each other')
2274 2274 """
2275 2275 p = parser.parser(_tokenizealias, elements)
2276 2276 try:
2277 2277 tree, pos = p.parse(decl)
2278 2278 if (pos != len(decl)):
2279 2279 raise error.ParseError(_('invalid token'), pos)
2280 2280
2281 2281 if isvalidsymbol(tree):
2282 2282 # "name = ...." style
2283 2283 name = getsymbol(tree)
2284 2284 if name.startswith('$'):
2285 2285 return (decl, None, None, _("'$' not for alias arguments"))
2286 2286 return (name, ('symbol', name), None, None)
2287 2287
2288 2288 if isvalidfunc(tree):
2289 2289 # "name(arg, ....) = ...." style
2290 2290 name = getfuncname(tree)
2291 2291 if name.startswith('$'):
2292 2292 return (decl, None, None, _("'$' not for alias arguments"))
2293 2293 args = []
2294 2294 for arg in getfuncargs(tree):
2295 2295 if not isvalidsymbol(arg):
2296 2296 return (decl, None, None, _("invalid argument list"))
2297 2297 args.append(getsymbol(arg))
2298 2298 if len(args) != len(set(args)):
2299 2299 return (name, None, None,
2300 2300 _("argument names collide with each other"))
2301 2301 return (name, ('func', ('symbol', name)), args, None)
2302 2302
2303 2303 return (decl, None, None, _("invalid format"))
2304 2304 except error.ParseError, inst:
2305 2305 return (decl, None, None, parseerrordetail(inst))
2306 2306
2307 2307 def _parsealiasdefn(defn, args):
2308 2308 """Parse alias definition ``defn``
2309 2309
2310 2310 This function also replaces alias argument references in the
2311 2311 specified definition by ``_aliasarg(ARGNAME)``.
2312 2312
2313 2313 ``args`` is a list of alias argument names, or None if the alias
2314 2314 is declared as a symbol.
2315 2315
2316 2316 This returns "tree" as parsing result.
2317 2317
2318 2318 >>> args = ['$1', '$2', 'foo']
2319 2319 >>> print prettyformat(_parsealiasdefn('$1 or foo', args))
2320 2320 (or
2321 2321 (func
2322 2322 ('symbol', '_aliasarg')
2323 2323 ('string', '$1'))
2324 2324 (func
2325 2325 ('symbol', '_aliasarg')
2326 2326 ('string', 'foo')))
2327 2327 >>> try:
2328 2328 ... _parsealiasdefn('$1 or $bar', args)
2329 2329 ... except error.ParseError, inst:
2330 2330 ... print parseerrordetail(inst)
2331 2331 at 6: '$' not for alias arguments
2332 2332 >>> args = ['$1', '$10', 'foo']
2333 2333 >>> print prettyformat(_parsealiasdefn('$10 or foobar', args))
2334 2334 (or
2335 2335 (func
2336 2336 ('symbol', '_aliasarg')
2337 2337 ('string', '$10'))
2338 2338 ('symbol', 'foobar'))
2339 2339 >>> print prettyformat(_parsealiasdefn('"$1" or "foo"', args))
2340 2340 (or
2341 2341 ('string', '$1')
2342 2342 ('string', 'foo'))
2343 2343 """
2344 2344 def tokenizedefn(program, lookup=None):
2345 2345 if args:
2346 2346 argset = set(args)
2347 2347 else:
2348 2348 argset = set()
2349 2349
2350 2350 for t, value, pos in _tokenizealias(program, lookup=lookup):
2351 2351 if t == 'symbol':
2352 2352 if value in argset:
2353 2353 # emulate tokenization of "_aliasarg('ARGNAME')":
2354 2354 # "_aliasarg()" is an unknown symbol only used separate
2355 2355 # alias argument placeholders from regular strings.
2356 2356 yield ('symbol', '_aliasarg', pos)
2357 2357 yield ('(', None, pos)
2358 2358 yield ('string', value, pos)
2359 2359 yield (')', None, pos)
2360 2360 continue
2361 2361 elif value.startswith('$'):
2362 2362 raise error.ParseError(_("'$' not for alias arguments"),
2363 2363 pos)
2364 2364 yield (t, value, pos)
2365 2365
2366 2366 p = parser.parser(tokenizedefn, elements)
2367 2367 tree, pos = p.parse(defn)
2368 2368 if pos != len(defn):
2369 2369 raise error.ParseError(_('invalid token'), pos)
2370 2370 return tree
2371 2371
2372 2372 class revsetalias(object):
2373 2373 # whether own `error` information is already shown or not.
2374 2374 # this avoids showing same warning multiple times at each `findaliases`.
2375 2375 warned = False
2376 2376
2377 2377 def __init__(self, name, value):
2378 2378 '''Aliases like:
2379 2379
2380 2380 h = heads(default)
2381 2381 b($1) = ancestors($1) - ancestors(default)
2382 2382 '''
2383 2383 self.name, self.tree, self.args, self.error = _parsealiasdecl(name)
2384 2384 if self.error:
2385 2385 self.error = _('failed to parse the declaration of revset alias'
2386 2386 ' "%s": %s') % (self.name, self.error)
2387 2387 return
2388 2388
2389 2389 try:
2390 2390 self.replacement = _parsealiasdefn(value, self.args)
2391 2391 # Check for placeholder injection
2392 2392 _checkaliasarg(self.replacement, self.args)
2393 2393 except error.ParseError, inst:
2394 2394 self.error = _('failed to parse the definition of revset alias'
2395 2395 ' "%s": %s') % (self.name, parseerrordetail(inst))
2396 2396
2397 2397 def _getalias(aliases, tree):
2398 2398 """If tree looks like an unexpanded alias, return it. Return None
2399 2399 otherwise.
2400 2400 """
2401 2401 if isinstance(tree, tuple) and tree:
2402 2402 if tree[0] == 'symbol' and len(tree) == 2:
2403 2403 name = tree[1]
2404 2404 alias = aliases.get(name)
2405 2405 if alias and alias.args is None and alias.tree == tree:
2406 2406 return alias
2407 2407 if tree[0] == 'func' and len(tree) > 1:
2408 2408 if tree[1][0] == 'symbol' and len(tree[1]) == 2:
2409 2409 name = tree[1][1]
2410 2410 alias = aliases.get(name)
2411 2411 if alias and alias.args is not None and alias.tree == tree[:2]:
2412 2412 return alias
2413 2413 return None
2414 2414
2415 2415 def _expandargs(tree, args):
2416 2416 """Replace _aliasarg instances with the substitution value of the
2417 2417 same name in args, recursively.
2418 2418 """
2419 2419 if not tree or not isinstance(tree, tuple):
2420 2420 return tree
2421 2421 arg = _getaliasarg(tree)
2422 2422 if arg is not None:
2423 2423 return args[arg]
2424 2424 return tuple(_expandargs(t, args) for t in tree)
2425 2425
2426 2426 def _expandaliases(aliases, tree, expanding, cache):
2427 2427 """Expand aliases in tree, recursively.
2428 2428
2429 2429 'aliases' is a dictionary mapping user defined aliases to
2430 2430 revsetalias objects.
2431 2431 """
2432 2432 if not isinstance(tree, tuple):
2433 2433 # Do not expand raw strings
2434 2434 return tree
2435 2435 alias = _getalias(aliases, tree)
2436 2436 if alias is not None:
2437 2437 if alias.error:
2438 2438 raise util.Abort(alias.error)
2439 2439 if alias in expanding:
2440 2440 raise error.ParseError(_('infinite expansion of revset alias "%s" '
2441 2441 'detected') % alias.name)
2442 2442 expanding.append(alias)
2443 2443 if alias.name not in cache:
2444 2444 cache[alias.name] = _expandaliases(aliases, alias.replacement,
2445 2445 expanding, cache)
2446 2446 result = cache[alias.name]
2447 2447 expanding.pop()
2448 2448 if alias.args is not None:
2449 2449 l = getlist(tree[2])
2450 2450 if len(l) != len(alias.args):
2451 2451 raise error.ParseError(
2452 2452 _('invalid number of arguments: %s') % len(l))
2453 2453 l = [_expandaliases(aliases, a, [], cache) for a in l]
2454 2454 result = _expandargs(result, dict(zip(alias.args, l)))
2455 2455 else:
2456 2456 result = tuple(_expandaliases(aliases, t, expanding, cache)
2457 2457 for t in tree)
2458 2458 return result
2459 2459
2460 2460 def findaliases(ui, tree, showwarning=None):
2461 2461 _checkaliasarg(tree)
2462 2462 aliases = {}
2463 2463 for k, v in ui.configitems('revsetalias'):
2464 2464 alias = revsetalias(k, v)
2465 2465 aliases[alias.name] = alias
2466 2466 tree = _expandaliases(aliases, tree, [], {})
2467 2467 if showwarning:
2468 2468 # warn about problematic (but not referred) aliases
2469 2469 for name, alias in sorted(aliases.iteritems()):
2470 2470 if alias.error and not alias.warned:
2471 2471 showwarning(_('warning: %s\n') % (alias.error))
2472 2472 alias.warned = True
2473 2473 return tree
2474 2474
2475 2475 def foldconcat(tree):
2476 2476 """Fold elements to be concatenated by `##`
2477 2477 """
2478 2478 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2479 2479 return tree
2480 2480 if tree[0] == '_concat':
2481 2481 pending = [tree]
2482 2482 l = []
2483 2483 while pending:
2484 2484 e = pending.pop()
2485 2485 if e[0] == '_concat':
2486 2486 pending.extend(reversed(e[1:]))
2487 2487 elif e[0] in ('string', 'symbol'):
2488 2488 l.append(e[1])
2489 2489 else:
2490 2490 msg = _("\"##\" can't concatenate \"%s\" element") % (e[0])
2491 2491 raise error.ParseError(msg)
2492 2492 return ('string', ''.join(l))
2493 2493 else:
2494 2494 return tuple(foldconcat(t) for t in tree)
2495 2495
2496 2496 def parse(spec, lookup=None):
2497 2497 p = parser.parser(tokenize, elements)
2498 2498 return p.parse(spec, lookup=lookup)
2499 2499
2500 2500 def posttreebuilthook(tree, repo):
2501 2501 # hook for extensions to execute code on the optimized tree
2502 2502 pass
2503 2503
2504 2504 def match(ui, spec, repo=None):
2505 2505 if not spec:
2506 2506 raise error.ParseError(_("empty query"))
2507 2507 lookup = None
2508 2508 if repo:
2509 2509 lookup = repo.__contains__
2510 2510 tree, pos = parse(spec, lookup)
2511 2511 if (pos != len(spec)):
2512 2512 raise error.ParseError(_("invalid token"), pos)
2513 2513 if ui:
2514 2514 tree = findaliases(ui, tree, showwarning=ui.warn)
2515 2515 tree = foldconcat(tree)
2516 2516 weight, tree = optimize(tree, True)
2517 2517 posttreebuilthook(tree, repo)
2518 2518 def mfunc(repo, subset=None):
2519 2519 if subset is None:
2520 2520 subset = fullreposet(repo)
2521 2521 if util.safehasattr(subset, 'isascending'):
2522 2522 result = getset(repo, subset, tree)
2523 2523 else:
2524 2524 result = getset(repo, baseset(subset), tree)
2525 2525 return result
2526 2526 return mfunc
2527 2527
2528 2528 def formatspec(expr, *args):
2529 2529 '''
2530 2530 This is a convenience function for using revsets internally, and
2531 2531 escapes arguments appropriately. Aliases are intentionally ignored
2532 2532 so that intended expression behavior isn't accidentally subverted.
2533 2533
2534 2534 Supported arguments:
2535 2535
2536 2536 %r = revset expression, parenthesized
2537 2537 %d = int(arg), no quoting
2538 2538 %s = string(arg), escaped and single-quoted
2539 2539 %b = arg.branch(), escaped and single-quoted
2540 2540 %n = hex(arg), single-quoted
2541 2541 %% = a literal '%'
2542 2542
2543 2543 Prefixing the type with 'l' specifies a parenthesized list of that type.
2544 2544
2545 2545 >>> formatspec('%r:: and %lr', '10 or 11', ("this()", "that()"))
2546 2546 '(10 or 11):: and ((this()) or (that()))'
2547 2547 >>> formatspec('%d:: and not %d::', 10, 20)
2548 2548 '10:: and not 20::'
2549 2549 >>> formatspec('%ld or %ld', [], [1])
2550 2550 "_list('') or 1"
2551 2551 >>> formatspec('keyword(%s)', 'foo\\xe9')
2552 2552 "keyword('foo\\\\xe9')"
2553 2553 >>> b = lambda: 'default'
2554 2554 >>> b.branch = b
2555 2555 >>> formatspec('branch(%b)', b)
2556 2556 "branch('default')"
2557 2557 >>> formatspec('root(%ls)', ['a', 'b', 'c', 'd'])
2558 2558 "root(_list('a\\x00b\\x00c\\x00d'))"
2559 2559 '''
2560 2560
2561 2561 def quote(s):
2562 2562 return repr(str(s))
2563 2563
2564 2564 def argtype(c, arg):
2565 2565 if c == 'd':
2566 2566 return str(int(arg))
2567 2567 elif c == 's':
2568 2568 return quote(arg)
2569 2569 elif c == 'r':
2570 2570 parse(arg) # make sure syntax errors are confined
2571 2571 return '(%s)' % arg
2572 2572 elif c == 'n':
2573 2573 return quote(node.hex(arg))
2574 2574 elif c == 'b':
2575 2575 return quote(arg.branch())
2576 2576
2577 2577 def listexp(s, t):
2578 2578 l = len(s)
2579 2579 if l == 0:
2580 2580 return "_list('')"
2581 2581 elif l == 1:
2582 2582 return argtype(t, s[0])
2583 2583 elif t == 'd':
2584 2584 return "_intlist('%s')" % "\0".join(str(int(a)) for a in s)
2585 2585 elif t == 's':
2586 2586 return "_list('%s')" % "\0".join(s)
2587 2587 elif t == 'n':
2588 2588 return "_hexlist('%s')" % "\0".join(node.hex(a) for a in s)
2589 2589 elif t == 'b':
2590 2590 return "_list('%s')" % "\0".join(a.branch() for a in s)
2591 2591
2592 2592 m = l // 2
2593 2593 return '(%s or %s)' % (listexp(s[:m], t), listexp(s[m:], t))
2594 2594
2595 2595 ret = ''
2596 2596 pos = 0
2597 2597 arg = 0
2598 2598 while pos < len(expr):
2599 2599 c = expr[pos]
2600 2600 if c == '%':
2601 2601 pos += 1
2602 2602 d = expr[pos]
2603 2603 if d == '%':
2604 2604 ret += d
2605 2605 elif d in 'dsnbr':
2606 2606 ret += argtype(d, args[arg])
2607 2607 arg += 1
2608 2608 elif d == 'l':
2609 2609 # a list of some type
2610 2610 pos += 1
2611 2611 d = expr[pos]
2612 2612 ret += listexp(list(args[arg]), d)
2613 2613 arg += 1
2614 2614 else:
2615 2615 raise util.Abort('unexpected revspec format character %s' % d)
2616 2616 else:
2617 2617 ret += c
2618 2618 pos += 1
2619 2619
2620 2620 return ret
2621 2621
2622 2622 def prettyformat(tree):
2623 2623 def _prettyformat(tree, level, lines):
2624 2624 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2625 2625 lines.append((level, str(tree)))
2626 2626 else:
2627 2627 lines.append((level, '(%s' % tree[0]))
2628 2628 for s in tree[1:]:
2629 2629 _prettyformat(s, level + 1, lines)
2630 2630 lines[-1:] = [(lines[-1][0], lines[-1][1] + ')')]
2631 2631
2632 2632 lines = []
2633 2633 _prettyformat(tree, 0, lines)
2634 2634 output = '\n'.join((' '*l + s) for l, s in lines)
2635 2635 return output
2636 2636
2637 2637 def depth(tree):
2638 2638 if isinstance(tree, tuple):
2639 2639 return max(map(depth, tree)) + 1
2640 2640 else:
2641 2641 return 0
2642 2642
2643 2643 def funcsused(tree):
2644 2644 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2645 2645 return set()
2646 2646 else:
2647 2647 funcs = set()
2648 2648 for s in tree[1:]:
2649 2649 funcs |= funcsused(s)
2650 2650 if tree[0] == 'func':
2651 2651 funcs.add(tree[1][1])
2652 2652 return funcs
2653 2653
2654 2654 class abstractsmartset(object):
2655 2655
2656 2656 def __nonzero__(self):
2657 2657 """True if the smartset is not empty"""
2658 2658 raise NotImplementedError()
2659 2659
2660 2660 def __contains__(self, rev):
2661 2661 """provide fast membership testing"""
2662 2662 raise NotImplementedError()
2663 2663
2664 2664 def __iter__(self):
2665 2665 """iterate the set in the order it is supposed to be iterated"""
2666 2666 raise NotImplementedError()
2667 2667
2668 2668 # Attributes containing a function to perform a fast iteration in a given
2669 2669 # direction. A smartset can have none, one, or both defined.
2670 2670 #
2671 2671 # Default value is None instead of a function returning None to avoid
2672 2672 # initializing an iterator just for testing if a fast method exists.
2673 2673 fastasc = None
2674 2674 fastdesc = None
2675 2675
2676 2676 def isascending(self):
2677 2677 """True if the set will iterate in ascending order"""
2678 2678 raise NotImplementedError()
2679 2679
2680 2680 def isdescending(self):
2681 2681 """True if the set will iterate in descending order"""
2682 2682 raise NotImplementedError()
2683 2683
2684 2684 def min(self):
2685 2685 """return the minimum element in the set"""
2686 2686 if self.fastasc is not None:
2687 2687 for r in self.fastasc():
2688 2688 return r
2689 2689 raise ValueError('arg is an empty sequence')
2690 2690 return min(self)
2691 2691
2692 2692 def max(self):
2693 2693 """return the maximum element in the set"""
2694 2694 if self.fastdesc is not None:
2695 2695 for r in self.fastdesc():
2696 2696 return r
2697 2697 raise ValueError('arg is an empty sequence')
2698 2698 return max(self)
2699 2699
2700 2700 def first(self):
2701 2701 """return the first element in the set (user iteration perspective)
2702 2702
2703 2703 Return None if the set is empty"""
2704 2704 raise NotImplementedError()
2705 2705
2706 2706 def last(self):
2707 2707 """return the last element in the set (user iteration perspective)
2708 2708
2709 2709 Return None if the set is empty"""
2710 2710 raise NotImplementedError()
2711 2711
2712 2712 def __len__(self):
2713 2713 """return the length of the smartsets
2714 2714
2715 2715 This can be expensive on smartset that could be lazy otherwise."""
2716 2716 raise NotImplementedError()
2717 2717
2718 2718 def reverse(self):
2719 2719 """reverse the expected iteration order"""
2720 2720 raise NotImplementedError()
2721 2721
2722 2722 def sort(self, reverse=True):
2723 2723 """get the set to iterate in an ascending or descending order"""
2724 2724 raise NotImplementedError()
2725 2725
2726 2726 def __and__(self, other):
2727 2727 """Returns a new object with the intersection of the two collections.
2728 2728
2729 2729 This is part of the mandatory API for smartset."""
2730 2730 if isinstance(other, fullreposet):
2731 2731 return self
2732 2732 return self.filter(other.__contains__, cache=False)
2733 2733
2734 2734 def __add__(self, other):
2735 2735 """Returns a new object with the union of the two collections.
2736 2736
2737 2737 This is part of the mandatory API for smartset."""
2738 2738 return addset(self, other)
2739 2739
2740 2740 def __sub__(self, other):
2741 2741 """Returns a new object with the substraction of the two collections.
2742 2742
2743 2743 This is part of the mandatory API for smartset."""
2744 2744 c = other.__contains__
2745 2745 return self.filter(lambda r: not c(r), cache=False)
2746 2746
2747 2747 def filter(self, condition, cache=True):
2748 2748 """Returns this smartset filtered by condition as a new smartset.
2749 2749
2750 2750 `condition` is a callable which takes a revision number and returns a
2751 2751 boolean.
2752 2752
2753 2753 This is part of the mandatory API for smartset."""
2754 2754 # builtin cannot be cached. but do not needs to
2755 2755 if cache and util.safehasattr(condition, 'func_code'):
2756 2756 condition = util.cachefunc(condition)
2757 2757 return filteredset(self, condition)
2758 2758
2759 2759 class baseset(abstractsmartset):
2760 2760 """Basic data structure that represents a revset and contains the basic
2761 2761 operation that it should be able to perform.
2762 2762
2763 2763 Every method in this class should be implemented by any smartset class.
2764 2764 """
2765 2765 def __init__(self, data=()):
2766 2766 if not isinstance(data, list):
2767 2767 data = list(data)
2768 2768 self._list = data
2769 2769 self._ascending = None
2770 2770
2771 2771 @util.propertycache
2772 2772 def _set(self):
2773 2773 return set(self._list)
2774 2774
2775 2775 @util.propertycache
2776 2776 def _asclist(self):
2777 2777 asclist = self._list[:]
2778 2778 asclist.sort()
2779 2779 return asclist
2780 2780
2781 2781 def __iter__(self):
2782 2782 if self._ascending is None:
2783 2783 return iter(self._list)
2784 2784 elif self._ascending:
2785 2785 return iter(self._asclist)
2786 2786 else:
2787 2787 return reversed(self._asclist)
2788 2788
2789 2789 def fastasc(self):
2790 2790 return iter(self._asclist)
2791 2791
2792 2792 def fastdesc(self):
2793 2793 return reversed(self._asclist)
2794 2794
2795 2795 @util.propertycache
2796 2796 def __contains__(self):
2797 2797 return self._set.__contains__
2798 2798
2799 2799 def __nonzero__(self):
2800 2800 return bool(self._list)
2801 2801
2802 2802 def sort(self, reverse=False):
2803 2803 self._ascending = not bool(reverse)
2804 2804
2805 2805 def reverse(self):
2806 2806 if self._ascending is None:
2807 2807 self._list.reverse()
2808 2808 else:
2809 2809 self._ascending = not self._ascending
2810 2810
2811 2811 def __len__(self):
2812 2812 return len(self._list)
2813 2813
2814 2814 def isascending(self):
2815 2815 """Returns True if the collection is ascending order, False if not.
2816 2816
2817 2817 This is part of the mandatory API for smartset."""
2818 2818 if len(self) <= 1:
2819 2819 return True
2820 2820 return self._ascending is not None and self._ascending
2821 2821
2822 2822 def isdescending(self):
2823 2823 """Returns True if the collection is descending order, False if not.
2824 2824
2825 2825 This is part of the mandatory API for smartset."""
2826 2826 if len(self) <= 1:
2827 2827 return True
2828 2828 return self._ascending is not None and not self._ascending
2829 2829
2830 2830 def first(self):
2831 2831 if self:
2832 2832 if self._ascending is None:
2833 2833 return self._list[0]
2834 2834 elif self._ascending:
2835 2835 return self._asclist[0]
2836 2836 else:
2837 2837 return self._asclist[-1]
2838 2838 return None
2839 2839
2840 2840 def last(self):
2841 2841 if self:
2842 2842 if self._ascending is None:
2843 2843 return self._list[-1]
2844 2844 elif self._ascending:
2845 2845 return self._asclist[-1]
2846 2846 else:
2847 2847 return self._asclist[0]
2848 2848 return None
2849 2849
2850 2850 def __repr__(self):
2851 2851 d = {None: '', False: '-', True: '+'}[self._ascending]
2852 2852 return '<%s%s %r>' % (type(self).__name__, d, self._list)
2853 2853
2854 2854 class filteredset(abstractsmartset):
2855 2855 """Duck type for baseset class which iterates lazily over the revisions in
2856 2856 the subset and contains a function which tests for membership in the
2857 2857 revset
2858 2858 """
2859 2859 def __init__(self, subset, condition=lambda x: True):
2860 2860 """
2861 2861 condition: a function that decide whether a revision in the subset
2862 2862 belongs to the revset or not.
2863 2863 """
2864 2864 self._subset = subset
2865 2865 self._condition = condition
2866 2866 self._cache = {}
2867 2867
2868 2868 def __contains__(self, x):
2869 2869 c = self._cache
2870 2870 if x not in c:
2871 2871 v = c[x] = x in self._subset and self._condition(x)
2872 2872 return v
2873 2873 return c[x]
2874 2874
2875 2875 def __iter__(self):
2876 2876 return self._iterfilter(self._subset)
2877 2877
2878 2878 def _iterfilter(self, it):
2879 2879 cond = self._condition
2880 2880 for x in it:
2881 2881 if cond(x):
2882 2882 yield x
2883 2883
2884 2884 @property
2885 2885 def fastasc(self):
2886 2886 it = self._subset.fastasc
2887 2887 if it is None:
2888 2888 return None
2889 2889 return lambda: self._iterfilter(it())
2890 2890
2891 2891 @property
2892 2892 def fastdesc(self):
2893 2893 it = self._subset.fastdesc
2894 2894 if it is None:
2895 2895 return None
2896 2896 return lambda: self._iterfilter(it())
2897 2897
2898 2898 def __nonzero__(self):
2899 2899 for r in self:
2900 2900 return True
2901 2901 return False
2902 2902
2903 2903 def __len__(self):
2904 2904 # Basic implementation to be changed in future patches.
2905 2905 l = baseset([r for r in self])
2906 2906 return len(l)
2907 2907
2908 2908 def sort(self, reverse=False):
2909 2909 self._subset.sort(reverse=reverse)
2910 2910
2911 2911 def reverse(self):
2912 2912 self._subset.reverse()
2913 2913
2914 2914 def isascending(self):
2915 2915 return self._subset.isascending()
2916 2916
2917 2917 def isdescending(self):
2918 2918 return self._subset.isdescending()
2919 2919
2920 2920 def first(self):
2921 2921 for x in self:
2922 2922 return x
2923 2923 return None
2924 2924
2925 2925 def last(self):
2926 2926 it = None
2927 2927 if self._subset.isascending:
2928 2928 it = self.fastdesc
2929 2929 elif self._subset.isdescending:
2930 2930 it = self.fastdesc
2931 2931 if it is None:
2932 2932 # slowly consume everything. This needs improvement
2933 2933 it = lambda: reversed(list(self))
2934 2934 for x in it():
2935 2935 return x
2936 2936 return None
2937 2937
2938 2938 def __repr__(self):
2939 2939 return '<%s %r>' % (type(self).__name__, self._subset)
2940 2940
2941 2941 class addset(abstractsmartset):
2942 2942 """Represent the addition of two sets
2943 2943
2944 2944 Wrapper structure for lazily adding two structures without losing much
2945 2945 performance on the __contains__ method
2946 2946
2947 2947 If the ascending attribute is set, that means the two structures are
2948 2948 ordered in either an ascending or descending way. Therefore, we can add
2949 2949 them maintaining the order by iterating over both at the same time
2950 2950 """
2951 2951 def __init__(self, revs1, revs2, ascending=None):
2952 2952 self._r1 = revs1
2953 2953 self._r2 = revs2
2954 2954 self._iter = None
2955 2955 self._ascending = ascending
2956 2956 self._genlist = None
2957 2957 self._asclist = None
2958 2958
2959 2959 def __len__(self):
2960 2960 return len(self._list)
2961 2961
2962 2962 def __nonzero__(self):
2963 2963 return bool(self._r1) or bool(self._r2)
2964 2964
2965 2965 @util.propertycache
2966 2966 def _list(self):
2967 2967 if not self._genlist:
2968 2968 self._genlist = baseset(self._iterator())
2969 2969 return self._genlist
2970 2970
2971 2971 def _iterator(self):
2972 2972 """Iterate over both collections without repeating elements
2973 2973
2974 2974 If the ascending attribute is not set, iterate over the first one and
2975 2975 then over the second one checking for membership on the first one so we
2976 2976 dont yield any duplicates.
2977 2977
2978 2978 If the ascending attribute is set, iterate over both collections at the
2979 2979 same time, yielding only one value at a time in the given order.
2980 2980 """
2981 2981 if self._ascending is None:
2982 2982 def gen():
2983 2983 for r in self._r1:
2984 2984 yield r
2985 2985 inr1 = self._r1.__contains__
2986 2986 for r in self._r2:
2987 2987 if not inr1(r):
2988 2988 yield r
2989 2989 gen = gen()
2990 2990 else:
2991 2991 iter1 = iter(self._r1)
2992 2992 iter2 = iter(self._r2)
2993 2993 gen = self._iterordered(self._ascending, iter1, iter2)
2994 2994 return gen
2995 2995
2996 2996 def __iter__(self):
2997 2997 if self._ascending is None:
2998 2998 if self._genlist:
2999 2999 return iter(self._genlist)
3000 3000 return iter(self._iterator())
3001 3001 self._trysetasclist()
3002 3002 if self._ascending:
3003 3003 it = self.fastasc
3004 3004 else:
3005 3005 it = self.fastdesc
3006 3006 if it is None:
3007 3007 # consume the gen and try again
3008 3008 self._list
3009 3009 return iter(self)
3010 3010 return it()
3011 3011
3012 3012 def _trysetasclist(self):
3013 3013 """populate the _asclist attribute if possible and necessary"""
3014 3014 if self._genlist is not None and self._asclist is None:
3015 3015 self._asclist = sorted(self._genlist)
3016 3016
3017 3017 @property
3018 3018 def fastasc(self):
3019 3019 self._trysetasclist()
3020 3020 if self._asclist is not None:
3021 3021 return self._asclist.__iter__
3022 3022 iter1 = self._r1.fastasc
3023 3023 iter2 = self._r2.fastasc
3024 3024 if None in (iter1, iter2):
3025 3025 return None
3026 3026 return lambda: self._iterordered(True, iter1(), iter2())
3027 3027
3028 3028 @property
3029 3029 def fastdesc(self):
3030 3030 self._trysetasclist()
3031 3031 if self._asclist is not None:
3032 3032 return self._asclist.__reversed__
3033 3033 iter1 = self._r1.fastdesc
3034 3034 iter2 = self._r2.fastdesc
3035 3035 if None in (iter1, iter2):
3036 3036 return None
3037 3037 return lambda: self._iterordered(False, iter1(), iter2())
3038 3038
3039 3039 def _iterordered(self, ascending, iter1, iter2):
3040 3040 """produce an ordered iteration from two iterators with the same order
3041 3041
3042 3042 The ascending is used to indicated the iteration direction.
3043 3043 """
3044 3044 choice = max
3045 3045 if ascending:
3046 3046 choice = min
3047 3047
3048 3048 val1 = None
3049 3049 val2 = None
3050 3050
3051 3051 choice = max
3052 3052 if ascending:
3053 3053 choice = min
3054 3054 try:
3055 3055 # Consume both iterators in an ordered way until one is
3056 3056 # empty
3057 3057 while True:
3058 3058 if val1 is None:
3059 3059 val1 = iter1.next()
3060 3060 if val2 is None:
3061 3061 val2 = iter2.next()
3062 3062 next = choice(val1, val2)
3063 3063 yield next
3064 3064 if val1 == next:
3065 3065 val1 = None
3066 3066 if val2 == next:
3067 3067 val2 = None
3068 3068 except StopIteration:
3069 3069 # Flush any remaining values and consume the other one
3070 3070 it = iter2
3071 3071 if val1 is not None:
3072 3072 yield val1
3073 3073 it = iter1
3074 3074 elif val2 is not None:
3075 3075 # might have been equality and both are empty
3076 3076 yield val2
3077 3077 for val in it:
3078 3078 yield val
3079 3079
3080 3080 def __contains__(self, x):
3081 3081 return x in self._r1 or x in self._r2
3082 3082
3083 3083 def sort(self, reverse=False):
3084 3084 """Sort the added set
3085 3085
3086 3086 For this we use the cached list with all the generated values and if we
3087 3087 know they are ascending or descending we can sort them in a smart way.
3088 3088 """
3089 3089 self._ascending = not reverse
3090 3090
3091 3091 def isascending(self):
3092 3092 return self._ascending is not None and self._ascending
3093 3093
3094 3094 def isdescending(self):
3095 3095 return self._ascending is not None and not self._ascending
3096 3096
3097 3097 def reverse(self):
3098 3098 if self._ascending is None:
3099 3099 self._list.reverse()
3100 3100 else:
3101 3101 self._ascending = not self._ascending
3102 3102
3103 3103 def first(self):
3104 3104 for x in self:
3105 3105 return x
3106 3106 return None
3107 3107
3108 3108 def last(self):
3109 3109 self.reverse()
3110 3110 val = self.first()
3111 3111 self.reverse()
3112 3112 return val
3113 3113
3114 3114 def __repr__(self):
3115 3115 d = {None: '', False: '-', True: '+'}[self._ascending]
3116 3116 return '<%s%s %r, %r>' % (type(self).__name__, d, self._r1, self._r2)
3117 3117
3118 3118 class generatorset(abstractsmartset):
3119 3119 """Wrap a generator for lazy iteration
3120 3120
3121 3121 Wrapper structure for generators that provides lazy membership and can
3122 3122 be iterated more than once.
3123 3123 When asked for membership it generates values until either it finds the
3124 3124 requested one or has gone through all the elements in the generator
3125 3125 """
3126 3126 def __init__(self, gen, iterasc=None):
3127 3127 """
3128 3128 gen: a generator producing the values for the generatorset.
3129 3129 """
3130 3130 self._gen = gen
3131 3131 self._asclist = None
3132 3132 self._cache = {}
3133 3133 self._genlist = []
3134 3134 self._finished = False
3135 3135 self._ascending = True
3136 3136 if iterasc is not None:
3137 3137 if iterasc:
3138 3138 self.fastasc = self._iterator
3139 3139 self.__contains__ = self._asccontains
3140 3140 else:
3141 3141 self.fastdesc = self._iterator
3142 3142 self.__contains__ = self._desccontains
3143 3143
3144 3144 def __nonzero__(self):
3145 3145 for r in self:
3146 3146 return True
3147 3147 return False
3148 3148
3149 3149 def __contains__(self, x):
3150 3150 if x in self._cache:
3151 3151 return self._cache[x]
3152 3152
3153 3153 # Use new values only, as existing values would be cached.
3154 3154 for l in self._consumegen():
3155 3155 if l == x:
3156 3156 return True
3157 3157
3158 3158 self._cache[x] = False
3159 3159 return False
3160 3160
3161 3161 def _asccontains(self, x):
3162 3162 """version of contains optimised for ascending generator"""
3163 3163 if x in self._cache:
3164 3164 return self._cache[x]
3165 3165
3166 3166 # Use new values only, as existing values would be cached.
3167 3167 for l in self._consumegen():
3168 3168 if l == x:
3169 3169 return True
3170 3170 if l > x:
3171 3171 break
3172 3172
3173 3173 self._cache[x] = False
3174 3174 return False
3175 3175
3176 3176 def _desccontains(self, x):
3177 3177 """version of contains optimised for descending generator"""
3178 3178 if x in self._cache:
3179 3179 return self._cache[x]
3180 3180
3181 3181 # Use new values only, as existing values would be cached.
3182 3182 for l in self._consumegen():
3183 3183 if l == x:
3184 3184 return True
3185 3185 if l < x:
3186 3186 break
3187 3187
3188 3188 self._cache[x] = False
3189 3189 return False
3190 3190
3191 3191 def __iter__(self):
3192 3192 if self._ascending:
3193 3193 it = self.fastasc
3194 3194 else:
3195 3195 it = self.fastdesc
3196 3196 if it is not None:
3197 3197 return it()
3198 3198 # we need to consume the iterator
3199 3199 for x in self._consumegen():
3200 3200 pass
3201 3201 # recall the same code
3202 3202 return iter(self)
3203 3203
3204 3204 def _iterator(self):
3205 3205 if self._finished:
3206 3206 return iter(self._genlist)
3207 3207
3208 3208 # We have to use this complex iteration strategy to allow multiple
3209 3209 # iterations at the same time. We need to be able to catch revision
3210 3210 # removed from _consumegen and added to genlist in another instance.
3211 3211 #
3212 3212 # Getting rid of it would provide an about 15% speed up on this
3213 3213 # iteration.
3214 3214 genlist = self._genlist
3215 3215 nextrev = self._consumegen().next
3216 3216 _len = len # cache global lookup
3217 3217 def gen():
3218 3218 i = 0
3219 3219 while True:
3220 3220 if i < _len(genlist):
3221 3221 yield genlist[i]
3222 3222 else:
3223 3223 yield nextrev()
3224 3224 i += 1
3225 3225 return gen()
3226 3226
3227 3227 def _consumegen(self):
3228 3228 cache = self._cache
3229 3229 genlist = self._genlist.append
3230 3230 for item in self._gen:
3231 3231 cache[item] = True
3232 3232 genlist(item)
3233 3233 yield item
3234 3234 if not self._finished:
3235 3235 self._finished = True
3236 3236 asc = self._genlist[:]
3237 3237 asc.sort()
3238 3238 self._asclist = asc
3239 3239 self.fastasc = asc.__iter__
3240 3240 self.fastdesc = asc.__reversed__
3241 3241
3242 3242 def __len__(self):
3243 3243 for x in self._consumegen():
3244 3244 pass
3245 3245 return len(self._genlist)
3246 3246
3247 3247 def sort(self, reverse=False):
3248 3248 self._ascending = not reverse
3249 3249
3250 3250 def reverse(self):
3251 3251 self._ascending = not self._ascending
3252 3252
3253 3253 def isascending(self):
3254 3254 return self._ascending
3255 3255
3256 3256 def isdescending(self):
3257 3257 return not self._ascending
3258 3258
3259 3259 def first(self):
3260 3260 if self._ascending:
3261 3261 it = self.fastasc
3262 3262 else:
3263 3263 it = self.fastdesc
3264 3264 if it is None:
3265 3265 # we need to consume all and try again
3266 3266 for x in self._consumegen():
3267 3267 pass
3268 3268 return self.first()
3269 3269 if self:
3270 3270 return it().next()
3271 3271 return None
3272 3272
3273 3273 def last(self):
3274 3274 if self._ascending:
3275 3275 it = self.fastdesc
3276 3276 else:
3277 3277 it = self.fastasc
3278 3278 if it is None:
3279 3279 # we need to consume all and try again
3280 3280 for x in self._consumegen():
3281 3281 pass
3282 3282 return self.first()
3283 3283 if self:
3284 3284 return it().next()
3285 3285 return None
3286 3286
3287 3287 def __repr__(self):
3288 3288 d = {False: '-', True: '+'}[self._ascending]
3289 3289 return '<%s%s>' % (type(self).__name__, d)
3290 3290
3291 3291 class spanset(abstractsmartset):
3292 3292 """Duck type for baseset class which represents a range of revisions and
3293 3293 can work lazily and without having all the range in memory
3294 3294
3295 3295 Note that spanset(x, y) behave almost like xrange(x, y) except for two
3296 3296 notable points:
3297 3297 - when x < y it will be automatically descending,
3298 3298 - revision filtered with this repoview will be skipped.
3299 3299
3300 3300 """
3301 3301 def __init__(self, repo, start=0, end=None):
3302 3302 """
3303 3303 start: first revision included the set
3304 3304 (default to 0)
3305 3305 end: first revision excluded (last+1)
3306 3306 (default to len(repo)
3307 3307
3308 3308 Spanset will be descending if `end` < `start`.
3309 3309 """
3310 3310 if end is None:
3311 3311 end = len(repo)
3312 3312 self._ascending = start <= end
3313 3313 if not self._ascending:
3314 3314 start, end = end + 1, start +1
3315 3315 self._start = start
3316 3316 self._end = end
3317 3317 self._hiddenrevs = repo.changelog.filteredrevs
3318 3318
3319 3319 def sort(self, reverse=False):
3320 3320 self._ascending = not reverse
3321 3321
3322 3322 def reverse(self):
3323 3323 self._ascending = not self._ascending
3324 3324
3325 3325 def _iterfilter(self, iterrange):
3326 3326 s = self._hiddenrevs
3327 3327 for r in iterrange:
3328 3328 if r not in s:
3329 3329 yield r
3330 3330
3331 3331 def __iter__(self):
3332 3332 if self._ascending:
3333 3333 return self.fastasc()
3334 3334 else:
3335 3335 return self.fastdesc()
3336 3336
3337 3337 def fastasc(self):
3338 3338 iterrange = xrange(self._start, self._end)
3339 3339 if self._hiddenrevs:
3340 3340 return self._iterfilter(iterrange)
3341 3341 return iter(iterrange)
3342 3342
3343 3343 def fastdesc(self):
3344 3344 iterrange = xrange(self._end - 1, self._start - 1, -1)
3345 3345 if self._hiddenrevs:
3346 3346 return self._iterfilter(iterrange)
3347 3347 return iter(iterrange)
3348 3348
3349 3349 def __contains__(self, rev):
3350 3350 hidden = self._hiddenrevs
3351 3351 return ((self._start <= rev < self._end)
3352 3352 and not (hidden and rev in hidden))
3353 3353
3354 3354 def __nonzero__(self):
3355 3355 for r in self:
3356 3356 return True
3357 3357 return False
3358 3358
3359 3359 def __len__(self):
3360 3360 if not self._hiddenrevs:
3361 3361 return abs(self._end - self._start)
3362 3362 else:
3363 3363 count = 0
3364 3364 start = self._start
3365 3365 end = self._end
3366 3366 for rev in self._hiddenrevs:
3367 3367 if (end < rev <= start) or (start <= rev < end):
3368 3368 count += 1
3369 3369 return abs(self._end - self._start) - count
3370 3370
3371 3371 def isascending(self):
3372 3372 return self._ascending
3373 3373
3374 3374 def isdescending(self):
3375 3375 return not self._ascending
3376 3376
3377 3377 def first(self):
3378 3378 if self._ascending:
3379 3379 it = self.fastasc
3380 3380 else:
3381 3381 it = self.fastdesc
3382 3382 for x in it():
3383 3383 return x
3384 3384 return None
3385 3385
3386 3386 def last(self):
3387 3387 if self._ascending:
3388 3388 it = self.fastdesc
3389 3389 else:
3390 3390 it = self.fastasc
3391 3391 for x in it():
3392 3392 return x
3393 3393 return None
3394 3394
3395 3395 def __repr__(self):
3396 3396 d = {False: '-', True: '+'}[self._ascending]
3397 3397 return '<%s%s %d:%d>' % (type(self).__name__, d,
3398 3398 self._start, self._end - 1)
3399 3399
3400 3400 class fullreposet(spanset):
3401 3401 """a set containing all revisions in the repo
3402 3402
3403 3403 This class exists to host special optimization and magic to handle virtual
3404 3404 revisions such as "null".
3405 3405 """
3406 3406
3407 3407 def __init__(self, repo):
3408 3408 super(fullreposet, self).__init__(repo)
3409 3409
3410 3410 def __contains__(self, rev):
3411 3411 # assumes the given rev is valid
3412 3412 hidden = self._hiddenrevs
3413 3413 return not (hidden and rev in hidden)
3414 3414
3415 3415 def __and__(self, other):
3416 3416 """As self contains the whole repo, all of the other set should also be
3417 3417 in self. Therefore `self & other = other`.
3418 3418
3419 3419 This boldly assumes the other contains valid revs only.
3420 3420 """
3421 3421 # other not a smartset, make is so
3422 3422 if not util.safehasattr(other, 'isascending'):
3423 3423 # filter out hidden revision
3424 3424 # (this boldly assumes all smartset are pure)
3425 3425 #
3426 3426 # `other` was used with "&", let's assume this is a set like
3427 3427 # object.
3428 3428 other = baseset(other - self._hiddenrevs)
3429 3429
3430 3430 other.sort(reverse=self.isdescending())
3431 3431 return other
3432 3432
3433 3433 def prettyformatset(revs):
3434 3434 lines = []
3435 3435 rs = repr(revs)
3436 3436 p = 0
3437 3437 while p < len(rs):
3438 3438 q = rs.find('<', p + 1)
3439 3439 if q < 0:
3440 3440 q = len(rs)
3441 3441 l = rs.count('<', 0, p) - rs.count('>', 0, p)
3442 3442 assert l >= 0
3443 3443 lines.append((l, rs[p:q].rstrip()))
3444 3444 p = q
3445 3445 return '\n'.join(' ' * l + s for l, s in lines)
3446 3446
3447 3447 # tell hggettext to extract docstrings from these functions:
3448 3448 i18nfunctions = symbols.values()
General Comments 0
You need to be logged in to leave comments. Login now