##// END OF EJS Templates
revset: eliminate temporary reference to subset in limit() and last()
Yuya Nishihara -
r26636:ff6baf32 default
parent child Browse files
Show More
@@ -1,3768 +1,3766
1 1 # revset.py - revision set queries for mercurial
2 2 #
3 3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import heapq
11 11 import re
12 12
13 13 from .i18n import _
14 14 from . import (
15 15 encoding,
16 16 error,
17 17 hbisect,
18 18 match as matchmod,
19 19 node,
20 20 obsolete as obsmod,
21 21 parser,
22 22 pathutil,
23 23 phases,
24 24 repoview,
25 25 util,
26 26 )
27 27
28 28 def _revancestors(repo, revs, followfirst):
29 29 """Like revlog.ancestors(), but supports followfirst."""
30 30 if followfirst:
31 31 cut = 1
32 32 else:
33 33 cut = None
34 34 cl = repo.changelog
35 35
36 36 def iterate():
37 37 revs.sort(reverse=True)
38 38 irevs = iter(revs)
39 39 h = []
40 40
41 41 inputrev = next(irevs, None)
42 42 if inputrev is not None:
43 43 heapq.heappush(h, -inputrev)
44 44
45 45 seen = set()
46 46 while h:
47 47 current = -heapq.heappop(h)
48 48 if current == inputrev:
49 49 inputrev = next(irevs, None)
50 50 if inputrev is not None:
51 51 heapq.heappush(h, -inputrev)
52 52 if current not in seen:
53 53 seen.add(current)
54 54 yield current
55 55 for parent in cl.parentrevs(current)[:cut]:
56 56 if parent != node.nullrev:
57 57 heapq.heappush(h, -parent)
58 58
59 59 return generatorset(iterate(), iterasc=False)
60 60
61 61 def _revdescendants(repo, revs, followfirst):
62 62 """Like revlog.descendants() but supports followfirst."""
63 63 if followfirst:
64 64 cut = 1
65 65 else:
66 66 cut = None
67 67
68 68 def iterate():
69 69 cl = repo.changelog
70 70 # XXX this should be 'parentset.min()' assuming 'parentset' is a
71 71 # smartset (and if it is not, it should.)
72 72 first = min(revs)
73 73 nullrev = node.nullrev
74 74 if first == nullrev:
75 75 # Are there nodes with a null first parent and a non-null
76 76 # second one? Maybe. Do we care? Probably not.
77 77 for i in cl:
78 78 yield i
79 79 else:
80 80 seen = set(revs)
81 81 for i in cl.revs(first + 1):
82 82 for x in cl.parentrevs(i)[:cut]:
83 83 if x != nullrev and x in seen:
84 84 seen.add(i)
85 85 yield i
86 86 break
87 87
88 88 return generatorset(iterate(), iterasc=True)
89 89
90 90 def _reachablerootspure(repo, minroot, roots, heads, includepath):
91 91 """return (heads(::<roots> and ::<heads>))
92 92
93 93 If includepath is True, return (<roots>::<heads>)."""
94 94 if not roots:
95 95 return []
96 96 parentrevs = repo.changelog.parentrevs
97 97 roots = set(roots)
98 98 visit = list(heads)
99 99 reachable = set()
100 100 seen = {}
101 101 # prefetch all the things! (because python is slow)
102 102 reached = reachable.add
103 103 dovisit = visit.append
104 104 nextvisit = visit.pop
105 105 # open-code the post-order traversal due to the tiny size of
106 106 # sys.getrecursionlimit()
107 107 while visit:
108 108 rev = nextvisit()
109 109 if rev in roots:
110 110 reached(rev)
111 111 if not includepath:
112 112 continue
113 113 parents = parentrevs(rev)
114 114 seen[rev] = parents
115 115 for parent in parents:
116 116 if parent >= minroot and parent not in seen:
117 117 dovisit(parent)
118 118 if not reachable:
119 119 return baseset()
120 120 if not includepath:
121 121 return reachable
122 122 for rev in sorted(seen):
123 123 for parent in seen[rev]:
124 124 if parent in reachable:
125 125 reached(rev)
126 126 return reachable
127 127
128 128 def reachableroots(repo, roots, heads, includepath=False):
129 129 """return (heads(::<roots> and ::<heads>))
130 130
131 131 If includepath is True, return (<roots>::<heads>)."""
132 132 if not roots:
133 133 return baseset()
134 134 minroot = roots.min()
135 135 roots = list(roots)
136 136 heads = list(heads)
137 137 try:
138 138 revs = repo.changelog.reachableroots(minroot, heads, roots, includepath)
139 139 except AttributeError:
140 140 revs = _reachablerootspure(repo, minroot, roots, heads, includepath)
141 141 revs = baseset(revs)
142 142 revs.sort()
143 143 return revs
144 144
145 145 elements = {
146 146 # token-type: binding-strength, primary, prefix, infix, suffix
147 147 "(": (21, None, ("group", 1, ")"), ("func", 1, ")"), None),
148 148 "##": (20, None, None, ("_concat", 20), None),
149 149 "~": (18, None, None, ("ancestor", 18), None),
150 150 "^": (18, None, None, ("parent", 18), ("parentpost", 18)),
151 151 "-": (5, None, ("negate", 19), ("minus", 5), None),
152 152 "::": (17, None, ("dagrangepre", 17), ("dagrange", 17),
153 153 ("dagrangepost", 17)),
154 154 "..": (17, None, ("dagrangepre", 17), ("dagrange", 17),
155 155 ("dagrangepost", 17)),
156 156 ":": (15, "rangeall", ("rangepre", 15), ("range", 15), ("rangepost", 15)),
157 157 "not": (10, None, ("not", 10), None, None),
158 158 "!": (10, None, ("not", 10), None, None),
159 159 "and": (5, None, None, ("and", 5), None),
160 160 "&": (5, None, None, ("and", 5), None),
161 161 "%": (5, None, None, ("only", 5), ("onlypost", 5)),
162 162 "or": (4, None, None, ("or", 4), None),
163 163 "|": (4, None, None, ("or", 4), None),
164 164 "+": (4, None, None, ("or", 4), None),
165 165 "=": (3, None, None, ("keyvalue", 3), None),
166 166 ",": (2, None, None, ("list", 2), None),
167 167 ")": (0, None, None, None, None),
168 168 "symbol": (0, "symbol", None, None, None),
169 169 "string": (0, "string", None, None, None),
170 170 "end": (0, None, None, None, None),
171 171 }
172 172
173 173 keywords = set(['and', 'or', 'not'])
174 174
175 175 # default set of valid characters for the initial letter of symbols
176 176 _syminitletters = set(c for c in [chr(i) for i in xrange(256)]
177 177 if c.isalnum() or c in '._@' or ord(c) > 127)
178 178
179 179 # default set of valid characters for non-initial letters of symbols
180 180 _symletters = set(c for c in [chr(i) for i in xrange(256)]
181 181 if c.isalnum() or c in '-._/@' or ord(c) > 127)
182 182
183 183 def tokenize(program, lookup=None, syminitletters=None, symletters=None):
184 184 '''
185 185 Parse a revset statement into a stream of tokens
186 186
187 187 ``syminitletters`` is the set of valid characters for the initial
188 188 letter of symbols.
189 189
190 190 By default, character ``c`` is recognized as valid for initial
191 191 letter of symbols, if ``c.isalnum() or c in '._@' or ord(c) > 127``.
192 192
193 193 ``symletters`` is the set of valid characters for non-initial
194 194 letters of symbols.
195 195
196 196 By default, character ``c`` is recognized as valid for non-initial
197 197 letters of symbols, if ``c.isalnum() or c in '-._/@' or ord(c) > 127``.
198 198
199 199 Check that @ is a valid unquoted token character (issue3686):
200 200 >>> list(tokenize("@::"))
201 201 [('symbol', '@', 0), ('::', None, 1), ('end', None, 3)]
202 202
203 203 '''
204 204 if syminitletters is None:
205 205 syminitletters = _syminitletters
206 206 if symletters is None:
207 207 symletters = _symletters
208 208
209 209 if program and lookup:
210 210 # attempt to parse old-style ranges first to deal with
211 211 # things like old-tag which contain query metacharacters
212 212 parts = program.split(':', 1)
213 213 if all(lookup(sym) for sym in parts if sym):
214 214 if parts[0]:
215 215 yield ('symbol', parts[0], 0)
216 216 if len(parts) > 1:
217 217 s = len(parts[0])
218 218 yield (':', None, s)
219 219 if parts[1]:
220 220 yield ('symbol', parts[1], s + 1)
221 221 yield ('end', None, len(program))
222 222 return
223 223
224 224 pos, l = 0, len(program)
225 225 while pos < l:
226 226 c = program[pos]
227 227 if c.isspace(): # skip inter-token whitespace
228 228 pass
229 229 elif c == ':' and program[pos:pos + 2] == '::': # look ahead carefully
230 230 yield ('::', None, pos)
231 231 pos += 1 # skip ahead
232 232 elif c == '.' and program[pos:pos + 2] == '..': # look ahead carefully
233 233 yield ('..', None, pos)
234 234 pos += 1 # skip ahead
235 235 elif c == '#' and program[pos:pos + 2] == '##': # look ahead carefully
236 236 yield ('##', None, pos)
237 237 pos += 1 # skip ahead
238 238 elif c in "():=,-|&+!~^%": # handle simple operators
239 239 yield (c, None, pos)
240 240 elif (c in '"\'' or c == 'r' and
241 241 program[pos:pos + 2] in ("r'", 'r"')): # handle quoted strings
242 242 if c == 'r':
243 243 pos += 1
244 244 c = program[pos]
245 245 decode = lambda x: x
246 246 else:
247 247 decode = parser.unescapestr
248 248 pos += 1
249 249 s = pos
250 250 while pos < l: # find closing quote
251 251 d = program[pos]
252 252 if d == '\\': # skip over escaped characters
253 253 pos += 2
254 254 continue
255 255 if d == c:
256 256 yield ('string', decode(program[s:pos]), s)
257 257 break
258 258 pos += 1
259 259 else:
260 260 raise error.ParseError(_("unterminated string"), s)
261 261 # gather up a symbol/keyword
262 262 elif c in syminitletters:
263 263 s = pos
264 264 pos += 1
265 265 while pos < l: # find end of symbol
266 266 d = program[pos]
267 267 if d not in symletters:
268 268 break
269 269 if d == '.' and program[pos - 1] == '.': # special case for ..
270 270 pos -= 1
271 271 break
272 272 pos += 1
273 273 sym = program[s:pos]
274 274 if sym in keywords: # operator keywords
275 275 yield (sym, None, s)
276 276 elif '-' in sym:
277 277 # some jerk gave us foo-bar-baz, try to check if it's a symbol
278 278 if lookup and lookup(sym):
279 279 # looks like a real symbol
280 280 yield ('symbol', sym, s)
281 281 else:
282 282 # looks like an expression
283 283 parts = sym.split('-')
284 284 for p in parts[:-1]:
285 285 if p: # possible consecutive -
286 286 yield ('symbol', p, s)
287 287 s += len(p)
288 288 yield ('-', None, pos)
289 289 s += 1
290 290 if parts[-1]: # possible trailing -
291 291 yield ('symbol', parts[-1], s)
292 292 else:
293 293 yield ('symbol', sym, s)
294 294 pos -= 1
295 295 else:
296 296 raise error.ParseError(_("syntax error in revset '%s'") %
297 297 program, pos)
298 298 pos += 1
299 299 yield ('end', None, pos)
300 300
301 301 def parseerrordetail(inst):
302 302 """Compose error message from specified ParseError object
303 303 """
304 304 if len(inst.args) > 1:
305 305 return _('at %s: %s') % (inst.args[1], inst.args[0])
306 306 else:
307 307 return inst.args[0]
308 308
309 309 # helpers
310 310
311 311 def getstring(x, err):
312 312 if x and (x[0] == 'string' or x[0] == 'symbol'):
313 313 return x[1]
314 314 raise error.ParseError(err)
315 315
316 316 def getlist(x):
317 317 if not x:
318 318 return []
319 319 if x[0] == 'list':
320 320 return getlist(x[1]) + [x[2]]
321 321 return [x]
322 322
323 323 def getargs(x, min, max, err):
324 324 l = getlist(x)
325 325 if len(l) < min or (max >= 0 and len(l) > max):
326 326 raise error.ParseError(err)
327 327 return l
328 328
329 329 def getargsdict(x, funcname, keys):
330 330 return parser.buildargsdict(getlist(x), funcname, keys.split(),
331 331 keyvaluenode='keyvalue', keynode='symbol')
332 332
333 333 def isvalidsymbol(tree):
334 334 """Examine whether specified ``tree`` is valid ``symbol`` or not
335 335 """
336 336 return tree[0] == 'symbol' and len(tree) > 1
337 337
338 338 def getsymbol(tree):
339 339 """Get symbol name from valid ``symbol`` in ``tree``
340 340
341 341 This assumes that ``tree`` is already examined by ``isvalidsymbol``.
342 342 """
343 343 return tree[1]
344 344
345 345 def isvalidfunc(tree):
346 346 """Examine whether specified ``tree`` is valid ``func`` or not
347 347 """
348 348 return tree[0] == 'func' and len(tree) > 1 and isvalidsymbol(tree[1])
349 349
350 350 def getfuncname(tree):
351 351 """Get function name from valid ``func`` in ``tree``
352 352
353 353 This assumes that ``tree`` is already examined by ``isvalidfunc``.
354 354 """
355 355 return getsymbol(tree[1])
356 356
357 357 def getfuncargs(tree):
358 358 """Get list of function arguments from valid ``func`` in ``tree``
359 359
360 360 This assumes that ``tree`` is already examined by ``isvalidfunc``.
361 361 """
362 362 if len(tree) > 2:
363 363 return getlist(tree[2])
364 364 else:
365 365 return []
366 366
367 367 def getset(repo, subset, x):
368 368 if not x:
369 369 raise error.ParseError(_("missing argument"))
370 370 s = methods[x[0]](repo, subset, *x[1:])
371 371 if util.safehasattr(s, 'isascending'):
372 372 return s
373 373 if (repo.ui.configbool('devel', 'all-warnings')
374 374 or repo.ui.configbool('devel', 'old-revset')):
375 375 # else case should not happen, because all non-func are internal,
376 376 # ignoring for now.
377 377 if x[0] == 'func' and x[1][0] == 'symbol' and x[1][1] in symbols:
378 378 repo.ui.develwarn('revset "%s" use list instead of smartset, '
379 379 '(upgrade your code)' % x[1][1])
380 380 return baseset(s)
381 381
382 382 def _getrevsource(repo, r):
383 383 extra = repo[r].extra()
384 384 for label in ('source', 'transplant_source', 'rebase_source'):
385 385 if label in extra:
386 386 try:
387 387 return repo[extra[label]].rev()
388 388 except error.RepoLookupError:
389 389 pass
390 390 return None
391 391
392 392 # operator methods
393 393
394 394 def stringset(repo, subset, x):
395 395 x = repo[x].rev()
396 396 if (x in subset
397 397 or x == node.nullrev and isinstance(subset, fullreposet)):
398 398 return baseset([x])
399 399 return baseset()
400 400
401 401 def rangeset(repo, subset, x, y):
402 402 m = getset(repo, fullreposet(repo), x)
403 403 n = getset(repo, fullreposet(repo), y)
404 404
405 405 if not m or not n:
406 406 return baseset()
407 407 m, n = m.first(), n.last()
408 408
409 409 if m == n:
410 410 r = baseset([m])
411 411 elif n == node.wdirrev:
412 412 r = spanset(repo, m, len(repo)) + baseset([n])
413 413 elif m == node.wdirrev:
414 414 r = baseset([m]) + spanset(repo, len(repo) - 1, n - 1)
415 415 elif m < n:
416 416 r = spanset(repo, m, n + 1)
417 417 else:
418 418 r = spanset(repo, m, n - 1)
419 419 # XXX We should combine with subset first: 'subset & baseset(...)'. This is
420 420 # necessary to ensure we preserve the order in subset.
421 421 #
422 422 # This has performance implication, carrying the sorting over when possible
423 423 # would be more efficient.
424 424 return r & subset
425 425
426 426 def dagrange(repo, subset, x, y):
427 427 r = fullreposet(repo)
428 428 xs = reachableroots(repo, getset(repo, r, x), getset(repo, r, y),
429 429 includepath=True)
430 430 # XXX We should combine with subset first: 'subset & baseset(...)'. This is
431 431 # necessary to ensure we preserve the order in subset.
432 432 return xs & subset
433 433
434 434 def andset(repo, subset, x, y):
435 435 return getset(repo, getset(repo, subset, x), y)
436 436
437 437 def orset(repo, subset, *xs):
438 438 assert xs
439 439 if len(xs) == 1:
440 440 return getset(repo, subset, xs[0])
441 441 p = len(xs) // 2
442 442 a = orset(repo, subset, *xs[:p])
443 443 b = orset(repo, subset, *xs[p:])
444 444 return a + b
445 445
446 446 def notset(repo, subset, x):
447 447 return subset - getset(repo, subset, x)
448 448
449 449 def listset(repo, subset, a, b):
450 450 raise error.ParseError(_("can't use a list in this context"))
451 451
452 452 def keyvaluepair(repo, subset, k, v):
453 453 raise error.ParseError(_("can't use a key-value pair in this context"))
454 454
455 455 def func(repo, subset, a, b):
456 456 if a[0] == 'symbol' and a[1] in symbols:
457 457 return symbols[a[1]](repo, subset, b)
458 458
459 459 keep = lambda fn: getattr(fn, '__doc__', None) is not None
460 460
461 461 syms = [s for (s, fn) in symbols.items() if keep(fn)]
462 462 raise error.UnknownIdentifier(a[1], syms)
463 463
464 464 # functions
465 465
466 466 def _mergedefaultdest(repo, subset, x):
467 467 # ``_mergedefaultdest()``
468 468
469 469 # default destination for merge.
470 470 # # XXX: Currently private because I expect the signature to change.
471 471 # # XXX: - taking rev as arguments,
472 472 # # XXX: - bailing out in case of ambiguity vs returning all data.
473 473 getargs(x, 0, 0, _("_mergedefaultdest takes no arguments"))
474 474 if repo._activebookmark:
475 475 bmheads = repo.bookmarkheads(repo._activebookmark)
476 476 curhead = repo[repo._activebookmark].node()
477 477 if len(bmheads) == 2:
478 478 if curhead == bmheads[0]:
479 479 node = bmheads[1]
480 480 else:
481 481 node = bmheads[0]
482 482 elif len(bmheads) > 2:
483 483 raise error.Abort(_("multiple matching bookmarks to merge - "
484 484 "please merge with an explicit rev or bookmark"),
485 485 hint=_("run 'hg heads' to see all heads"))
486 486 elif len(bmheads) <= 1:
487 487 raise error.Abort(_("no matching bookmark to merge - "
488 488 "please merge with an explicit rev or bookmark"),
489 489 hint=_("run 'hg heads' to see all heads"))
490 490 else:
491 491 branch = repo[None].branch()
492 492 bheads = repo.branchheads(branch)
493 493 nbhs = [bh for bh in bheads if not repo[bh].bookmarks()]
494 494
495 495 if len(nbhs) > 2:
496 496 raise error.Abort(_("branch '%s' has %d heads - "
497 497 "please merge with an explicit rev")
498 498 % (branch, len(bheads)),
499 499 hint=_("run 'hg heads .' to see heads"))
500 500
501 501 parent = repo.dirstate.p1()
502 502 if len(nbhs) <= 1:
503 503 if len(bheads) > 1:
504 504 raise error.Abort(_("heads are bookmarked - "
505 505 "please merge with an explicit rev"),
506 506 hint=_("run 'hg heads' to see all heads"))
507 507 if len(repo.heads()) > 1:
508 508 raise error.Abort(_("branch '%s' has one head - "
509 509 "please merge with an explicit rev")
510 510 % branch,
511 511 hint=_("run 'hg heads' to see all heads"))
512 512 msg, hint = _('nothing to merge'), None
513 513 if parent != repo.lookup(branch):
514 514 hint = _("use 'hg update' instead")
515 515 raise error.Abort(msg, hint=hint)
516 516
517 517 if parent not in bheads:
518 518 raise error.Abort(_('working directory not at a head revision'),
519 519 hint=_("use 'hg update' or merge with an "
520 520 "explicit revision"))
521 521 if parent == nbhs[0]:
522 522 node = nbhs[-1]
523 523 else:
524 524 node = nbhs[0]
525 525 return subset & baseset([repo[node].rev()])
526 526
527 527 def adds(repo, subset, x):
528 528 """``adds(pattern)``
529 529 Changesets that add a file matching pattern.
530 530
531 531 The pattern without explicit kind like ``glob:`` is expected to be
532 532 relative to the current directory and match against a file or a
533 533 directory.
534 534 """
535 535 # i18n: "adds" is a keyword
536 536 pat = getstring(x, _("adds requires a pattern"))
537 537 return checkstatus(repo, subset, pat, 1)
538 538
539 539 def ancestor(repo, subset, x):
540 540 """``ancestor(*changeset)``
541 541 A greatest common ancestor of the changesets.
542 542
543 543 Accepts 0 or more changesets.
544 544 Will return empty list when passed no args.
545 545 Greatest common ancestor of a single changeset is that changeset.
546 546 """
547 547 # i18n: "ancestor" is a keyword
548 548 l = getlist(x)
549 549 rl = fullreposet(repo)
550 550 anc = None
551 551
552 552 # (getset(repo, rl, i) for i in l) generates a list of lists
553 553 for revs in (getset(repo, rl, i) for i in l):
554 554 for r in revs:
555 555 if anc is None:
556 556 anc = repo[r]
557 557 else:
558 558 anc = anc.ancestor(repo[r])
559 559
560 560 if anc is not None and anc.rev() in subset:
561 561 return baseset([anc.rev()])
562 562 return baseset()
563 563
564 564 def _ancestors(repo, subset, x, followfirst=False):
565 565 heads = getset(repo, fullreposet(repo), x)
566 566 if not heads:
567 567 return baseset()
568 568 s = _revancestors(repo, heads, followfirst)
569 569 return subset & s
570 570
571 571 def ancestors(repo, subset, x):
572 572 """``ancestors(set)``
573 573 Changesets that are ancestors of a changeset in set.
574 574 """
575 575 return _ancestors(repo, subset, x)
576 576
577 577 def _firstancestors(repo, subset, x):
578 578 # ``_firstancestors(set)``
579 579 # Like ``ancestors(set)`` but follows only the first parents.
580 580 return _ancestors(repo, subset, x, followfirst=True)
581 581
582 582 def ancestorspec(repo, subset, x, n):
583 583 """``set~n``
584 584 Changesets that are the Nth ancestor (first parents only) of a changeset
585 585 in set.
586 586 """
587 587 try:
588 588 n = int(n[1])
589 589 except (TypeError, ValueError):
590 590 raise error.ParseError(_("~ expects a number"))
591 591 ps = set()
592 592 cl = repo.changelog
593 593 for r in getset(repo, fullreposet(repo), x):
594 594 for i in range(n):
595 595 r = cl.parentrevs(r)[0]
596 596 ps.add(r)
597 597 return subset & ps
598 598
599 599 def author(repo, subset, x):
600 600 """``author(string)``
601 601 Alias for ``user(string)``.
602 602 """
603 603 # i18n: "author" is a keyword
604 604 n = encoding.lower(getstring(x, _("author requires a string")))
605 605 kind, pattern, matcher = _substringmatcher(n)
606 606 return subset.filter(lambda x: matcher(encoding.lower(repo[x].user())))
607 607
608 608 def bisect(repo, subset, x):
609 609 """``bisect(string)``
610 610 Changesets marked in the specified bisect status:
611 611
612 612 - ``good``, ``bad``, ``skip``: csets explicitly marked as good/bad/skip
613 613 - ``goods``, ``bads`` : csets topologically good/bad
614 614 - ``range`` : csets taking part in the bisection
615 615 - ``pruned`` : csets that are goods, bads or skipped
616 616 - ``untested`` : csets whose fate is yet unknown
617 617 - ``ignored`` : csets ignored due to DAG topology
618 618 - ``current`` : the cset currently being bisected
619 619 """
620 620 # i18n: "bisect" is a keyword
621 621 status = getstring(x, _("bisect requires a string")).lower()
622 622 state = set(hbisect.get(repo, status))
623 623 return subset & state
624 624
625 625 # Backward-compatibility
626 626 # - no help entry so that we do not advertise it any more
627 627 def bisected(repo, subset, x):
628 628 return bisect(repo, subset, x)
629 629
630 630 def bookmark(repo, subset, x):
631 631 """``bookmark([name])``
632 632 The named bookmark or all bookmarks.
633 633
634 634 If `name` starts with `re:`, the remainder of the name is treated as
635 635 a regular expression. To match a bookmark that actually starts with `re:`,
636 636 use the prefix `literal:`.
637 637 """
638 638 # i18n: "bookmark" is a keyword
639 639 args = getargs(x, 0, 1, _('bookmark takes one or no arguments'))
640 640 if args:
641 641 bm = getstring(args[0],
642 642 # i18n: "bookmark" is a keyword
643 643 _('the argument to bookmark must be a string'))
644 644 kind, pattern, matcher = util.stringmatcher(bm)
645 645 bms = set()
646 646 if kind == 'literal':
647 647 bmrev = repo._bookmarks.get(pattern, None)
648 648 if not bmrev:
649 649 raise error.RepoLookupError(_("bookmark '%s' does not exist")
650 650 % pattern)
651 651 bms.add(repo[bmrev].rev())
652 652 else:
653 653 matchrevs = set()
654 654 for name, bmrev in repo._bookmarks.iteritems():
655 655 if matcher(name):
656 656 matchrevs.add(bmrev)
657 657 if not matchrevs:
658 658 raise error.RepoLookupError(_("no bookmarks exist"
659 659 " that match '%s'") % pattern)
660 660 for bmrev in matchrevs:
661 661 bms.add(repo[bmrev].rev())
662 662 else:
663 663 bms = set([repo[r].rev()
664 664 for r in repo._bookmarks.values()])
665 665 bms -= set([node.nullrev])
666 666 return subset & bms
667 667
668 668 def branch(repo, subset, x):
669 669 """``branch(string or set)``
670 670 All changesets belonging to the given branch or the branches of the given
671 671 changesets.
672 672
673 673 If `string` starts with `re:`, the remainder of the name is treated as
674 674 a regular expression. To match a branch that actually starts with `re:`,
675 675 use the prefix `literal:`.
676 676 """
677 677 getbi = repo.revbranchcache().branchinfo
678 678
679 679 try:
680 680 b = getstring(x, '')
681 681 except error.ParseError:
682 682 # not a string, but another revspec, e.g. tip()
683 683 pass
684 684 else:
685 685 kind, pattern, matcher = util.stringmatcher(b)
686 686 if kind == 'literal':
687 687 # note: falls through to the revspec case if no branch with
688 688 # this name exists and pattern kind is not specified explicitly
689 689 if pattern in repo.branchmap():
690 690 return subset.filter(lambda r: matcher(getbi(r)[0]))
691 691 if b.startswith('literal:'):
692 692 raise error.RepoLookupError(_("branch '%s' does not exist")
693 693 % pattern)
694 694 else:
695 695 return subset.filter(lambda r: matcher(getbi(r)[0]))
696 696
697 697 s = getset(repo, fullreposet(repo), x)
698 698 b = set()
699 699 for r in s:
700 700 b.add(getbi(r)[0])
701 701 c = s.__contains__
702 702 return subset.filter(lambda r: c(r) or getbi(r)[0] in b)
703 703
704 704 def bumped(repo, subset, x):
705 705 """``bumped()``
706 706 Mutable changesets marked as successors of public changesets.
707 707
708 708 Only non-public and non-obsolete changesets can be `bumped`.
709 709 """
710 710 # i18n: "bumped" is a keyword
711 711 getargs(x, 0, 0, _("bumped takes no arguments"))
712 712 bumped = obsmod.getrevs(repo, 'bumped')
713 713 return subset & bumped
714 714
715 715 def bundle(repo, subset, x):
716 716 """``bundle()``
717 717 Changesets in the bundle.
718 718
719 719 Bundle must be specified by the -R option."""
720 720
721 721 try:
722 722 bundlerevs = repo.changelog.bundlerevs
723 723 except AttributeError:
724 724 raise error.Abort(_("no bundle provided - specify with -R"))
725 725 return subset & bundlerevs
726 726
727 727 def checkstatus(repo, subset, pat, field):
728 728 hasset = matchmod.patkind(pat) == 'set'
729 729
730 730 mcache = [None]
731 731 def matches(x):
732 732 c = repo[x]
733 733 if not mcache[0] or hasset:
734 734 mcache[0] = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
735 735 m = mcache[0]
736 736 fname = None
737 737 if not m.anypats() and len(m.files()) == 1:
738 738 fname = m.files()[0]
739 739 if fname is not None:
740 740 if fname not in c.files():
741 741 return False
742 742 else:
743 743 for f in c.files():
744 744 if m(f):
745 745 break
746 746 else:
747 747 return False
748 748 files = repo.status(c.p1().node(), c.node())[field]
749 749 if fname is not None:
750 750 if fname in files:
751 751 return True
752 752 else:
753 753 for f in files:
754 754 if m(f):
755 755 return True
756 756
757 757 return subset.filter(matches)
758 758
759 759 def _children(repo, narrow, parentset):
760 760 if not parentset:
761 761 return baseset()
762 762 cs = set()
763 763 pr = repo.changelog.parentrevs
764 764 minrev = parentset.min()
765 765 for r in narrow:
766 766 if r <= minrev:
767 767 continue
768 768 for p in pr(r):
769 769 if p in parentset:
770 770 cs.add(r)
771 771 # XXX using a set to feed the baseset is wrong. Sets are not ordered.
772 772 # This does not break because of other fullreposet misbehavior.
773 773 return baseset(cs)
774 774
775 775 def children(repo, subset, x):
776 776 """``children(set)``
777 777 Child changesets of changesets in set.
778 778 """
779 779 s = getset(repo, fullreposet(repo), x)
780 780 cs = _children(repo, subset, s)
781 781 return subset & cs
782 782
783 783 def closed(repo, subset, x):
784 784 """``closed()``
785 785 Changeset is closed.
786 786 """
787 787 # i18n: "closed" is a keyword
788 788 getargs(x, 0, 0, _("closed takes no arguments"))
789 789 return subset.filter(lambda r: repo[r].closesbranch())
790 790
791 791 def contains(repo, subset, x):
792 792 """``contains(pattern)``
793 793 The revision's manifest contains a file matching pattern (but might not
794 794 modify it). See :hg:`help patterns` for information about file patterns.
795 795
796 796 The pattern without explicit kind like ``glob:`` is expected to be
797 797 relative to the current directory and match against a file exactly
798 798 for efficiency.
799 799 """
800 800 # i18n: "contains" is a keyword
801 801 pat = getstring(x, _("contains requires a pattern"))
802 802
803 803 def matches(x):
804 804 if not matchmod.patkind(pat):
805 805 pats = pathutil.canonpath(repo.root, repo.getcwd(), pat)
806 806 if pats in repo[x]:
807 807 return True
808 808 else:
809 809 c = repo[x]
810 810 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
811 811 for f in c.manifest():
812 812 if m(f):
813 813 return True
814 814 return False
815 815
816 816 return subset.filter(matches)
817 817
818 818 def converted(repo, subset, x):
819 819 """``converted([id])``
820 820 Changesets converted from the given identifier in the old repository if
821 821 present, or all converted changesets if no identifier is specified.
822 822 """
823 823
824 824 # There is exactly no chance of resolving the revision, so do a simple
825 825 # string compare and hope for the best
826 826
827 827 rev = None
828 828 # i18n: "converted" is a keyword
829 829 l = getargs(x, 0, 1, _('converted takes one or no arguments'))
830 830 if l:
831 831 # i18n: "converted" is a keyword
832 832 rev = getstring(l[0], _('converted requires a revision'))
833 833
834 834 def _matchvalue(r):
835 835 source = repo[r].extra().get('convert_revision', None)
836 836 return source is not None and (rev is None or source.startswith(rev))
837 837
838 838 return subset.filter(lambda r: _matchvalue(r))
839 839
840 840 def date(repo, subset, x):
841 841 """``date(interval)``
842 842 Changesets within the interval, see :hg:`help dates`.
843 843 """
844 844 # i18n: "date" is a keyword
845 845 ds = getstring(x, _("date requires a string"))
846 846 dm = util.matchdate(ds)
847 847 return subset.filter(lambda x: dm(repo[x].date()[0]))
848 848
849 849 def desc(repo, subset, x):
850 850 """``desc(string)``
851 851 Search commit message for string. The match is case-insensitive.
852 852 """
853 853 # i18n: "desc" is a keyword
854 854 ds = encoding.lower(getstring(x, _("desc requires a string")))
855 855
856 856 def matches(x):
857 857 c = repo[x]
858 858 return ds in encoding.lower(c.description())
859 859
860 860 return subset.filter(matches)
861 861
862 862 def _descendants(repo, subset, x, followfirst=False):
863 863 roots = getset(repo, fullreposet(repo), x)
864 864 if not roots:
865 865 return baseset()
866 866 s = _revdescendants(repo, roots, followfirst)
867 867
868 868 # Both sets need to be ascending in order to lazily return the union
869 869 # in the correct order.
870 870 base = subset & roots
871 871 desc = subset & s
872 872 result = base + desc
873 873 if subset.isascending():
874 874 result.sort()
875 875 elif subset.isdescending():
876 876 result.sort(reverse=True)
877 877 else:
878 878 result = subset & result
879 879 return result
880 880
881 881 def descendants(repo, subset, x):
882 882 """``descendants(set)``
883 883 Changesets which are descendants of changesets in set.
884 884 """
885 885 return _descendants(repo, subset, x)
886 886
887 887 def _firstdescendants(repo, subset, x):
888 888 # ``_firstdescendants(set)``
889 889 # Like ``descendants(set)`` but follows only the first parents.
890 890 return _descendants(repo, subset, x, followfirst=True)
891 891
892 892 def destination(repo, subset, x):
893 893 """``destination([set])``
894 894 Changesets that were created by a graft, transplant or rebase operation,
895 895 with the given revisions specified as the source. Omitting the optional set
896 896 is the same as passing all().
897 897 """
898 898 if x is not None:
899 899 sources = getset(repo, fullreposet(repo), x)
900 900 else:
901 901 sources = fullreposet(repo)
902 902
903 903 dests = set()
904 904
905 905 # subset contains all of the possible destinations that can be returned, so
906 906 # iterate over them and see if their source(s) were provided in the arg set.
907 907 # Even if the immediate src of r is not in the arg set, src's source (or
908 908 # further back) may be. Scanning back further than the immediate src allows
909 909 # transitive transplants and rebases to yield the same results as transitive
910 910 # grafts.
911 911 for r in subset:
912 912 src = _getrevsource(repo, r)
913 913 lineage = None
914 914
915 915 while src is not None:
916 916 if lineage is None:
917 917 lineage = list()
918 918
919 919 lineage.append(r)
920 920
921 921 # The visited lineage is a match if the current source is in the arg
922 922 # set. Since every candidate dest is visited by way of iterating
923 923 # subset, any dests further back in the lineage will be tested by a
924 924 # different iteration over subset. Likewise, if the src was already
925 925 # selected, the current lineage can be selected without going back
926 926 # further.
927 927 if src in sources or src in dests:
928 928 dests.update(lineage)
929 929 break
930 930
931 931 r = src
932 932 src = _getrevsource(repo, r)
933 933
934 934 return subset.filter(dests.__contains__)
935 935
936 936 def divergent(repo, subset, x):
937 937 """``divergent()``
938 938 Final successors of changesets with an alternative set of final successors.
939 939 """
940 940 # i18n: "divergent" is a keyword
941 941 getargs(x, 0, 0, _("divergent takes no arguments"))
942 942 divergent = obsmod.getrevs(repo, 'divergent')
943 943 return subset & divergent
944 944
945 945 def extinct(repo, subset, x):
946 946 """``extinct()``
947 947 Obsolete changesets with obsolete descendants only.
948 948 """
949 949 # i18n: "extinct" is a keyword
950 950 getargs(x, 0, 0, _("extinct takes no arguments"))
951 951 extincts = obsmod.getrevs(repo, 'extinct')
952 952 return subset & extincts
953 953
954 954 def extra(repo, subset, x):
955 955 """``extra(label, [value])``
956 956 Changesets with the given label in the extra metadata, with the given
957 957 optional value.
958 958
959 959 If `value` starts with `re:`, the remainder of the value is treated as
960 960 a regular expression. To match a value that actually starts with `re:`,
961 961 use the prefix `literal:`.
962 962 """
963 963 args = getargsdict(x, 'extra', 'label value')
964 964 if 'label' not in args:
965 965 # i18n: "extra" is a keyword
966 966 raise error.ParseError(_('extra takes at least 1 argument'))
967 967 # i18n: "extra" is a keyword
968 968 label = getstring(args['label'], _('first argument to extra must be '
969 969 'a string'))
970 970 value = None
971 971
972 972 if 'value' in args:
973 973 # i18n: "extra" is a keyword
974 974 value = getstring(args['value'], _('second argument to extra must be '
975 975 'a string'))
976 976 kind, value, matcher = util.stringmatcher(value)
977 977
978 978 def _matchvalue(r):
979 979 extra = repo[r].extra()
980 980 return label in extra and (value is None or matcher(extra[label]))
981 981
982 982 return subset.filter(lambda r: _matchvalue(r))
983 983
984 984 def filelog(repo, subset, x):
985 985 """``filelog(pattern)``
986 986 Changesets connected to the specified filelog.
987 987
988 988 For performance reasons, visits only revisions mentioned in the file-level
989 989 filelog, rather than filtering through all changesets (much faster, but
990 990 doesn't include deletes or duplicate changes). For a slower, more accurate
991 991 result, use ``file()``.
992 992
993 993 The pattern without explicit kind like ``glob:`` is expected to be
994 994 relative to the current directory and match against a file exactly
995 995 for efficiency.
996 996
997 997 If some linkrev points to revisions filtered by the current repoview, we'll
998 998 work around it to return a non-filtered value.
999 999 """
1000 1000
1001 1001 # i18n: "filelog" is a keyword
1002 1002 pat = getstring(x, _("filelog requires a pattern"))
1003 1003 s = set()
1004 1004 cl = repo.changelog
1005 1005
1006 1006 if not matchmod.patkind(pat):
1007 1007 f = pathutil.canonpath(repo.root, repo.getcwd(), pat)
1008 1008 files = [f]
1009 1009 else:
1010 1010 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=repo[None])
1011 1011 files = (f for f in repo[None] if m(f))
1012 1012
1013 1013 for f in files:
1014 1014 backrevref = {} # final value for: filerev -> changerev
1015 1015 lowestchild = {} # lowest known filerev child of a filerev
1016 1016 delayed = [] # filerev with filtered linkrev, for post-processing
1017 1017 lowesthead = None # cache for manifest content of all head revisions
1018 1018 fl = repo.file(f)
1019 1019 for fr in list(fl):
1020 1020 rev = fl.linkrev(fr)
1021 1021 if rev not in cl:
1022 1022 # changerev pointed in linkrev is filtered
1023 1023 # record it for post processing.
1024 1024 delayed.append((fr, rev))
1025 1025 continue
1026 1026 for p in fl.parentrevs(fr):
1027 1027 if 0 <= p and p not in lowestchild:
1028 1028 lowestchild[p] = fr
1029 1029 backrevref[fr] = rev
1030 1030 s.add(rev)
1031 1031
1032 1032 # Post-processing of all filerevs we skipped because they were
1033 1033 # filtered. If such filerevs have known and unfiltered children, this
1034 1034 # means they have an unfiltered appearance out there. We'll use linkrev
1035 1035 # adjustment to find one of these appearances. The lowest known child
1036 1036 # will be used as a starting point because it is the best upper-bound we
1037 1037 # have.
1038 1038 #
1039 1039 # This approach will fail when an unfiltered but linkrev-shadowed
1040 1040 # appearance exists in a head changeset without unfiltered filerev
1041 1041 # children anywhere.
1042 1042 while delayed:
1043 1043 # must be a descending iteration. To slowly fill lowest child
1044 1044 # information that is of potential use by the next item.
1045 1045 fr, rev = delayed.pop()
1046 1046 lkr = rev
1047 1047
1048 1048 child = lowestchild.get(fr)
1049 1049
1050 1050 if child is None:
1051 1051 # search for existence of this file revision in a head revision.
1052 1052 # There are three possibilities:
1053 1053 # - the revision exists in a head and we can find an
1054 1054 # introduction from there,
1055 1055 # - the revision does not exist in a head because it has been
1056 1056 # changed since its introduction: we would have found a child
1057 1057 # and be in the other 'else' clause,
1058 1058 # - all versions of the revision are hidden.
1059 1059 if lowesthead is None:
1060 1060 lowesthead = {}
1061 1061 for h in repo.heads():
1062 1062 fnode = repo[h].manifest().get(f)
1063 1063 if fnode is not None:
1064 1064 lowesthead[fl.rev(fnode)] = h
1065 1065 headrev = lowesthead.get(fr)
1066 1066 if headrev is None:
1067 1067 # content is nowhere unfiltered
1068 1068 continue
1069 1069 rev = repo[headrev][f].introrev()
1070 1070 else:
1071 1071 # the lowest known child is a good upper bound
1072 1072 childcrev = backrevref[child]
1073 1073 # XXX this does not guarantee returning the lowest
1074 1074 # introduction of this revision, but this gives a
1075 1075 # result which is a good start and will fit in most
1076 1076 # cases. We probably need to fix the multiple
1077 1077 # introductions case properly (report each
1078 1078 # introduction, even for identical file revisions)
1079 1079 # once and for all at some point anyway.
1080 1080 for p in repo[childcrev][f].parents():
1081 1081 if p.filerev() == fr:
1082 1082 rev = p.rev()
1083 1083 break
1084 1084 if rev == lkr: # no shadowed entry found
1085 1085 # XXX This should never happen unless some manifest points
1086 1086 # to biggish file revisions (like a revision that uses a
1087 1087 # parent that never appears in the manifest ancestors)
1088 1088 continue
1089 1089
1090 1090 # Fill the data for the next iteration.
1091 1091 for p in fl.parentrevs(fr):
1092 1092 if 0 <= p and p not in lowestchild:
1093 1093 lowestchild[p] = fr
1094 1094 backrevref[fr] = rev
1095 1095 s.add(rev)
1096 1096
1097 1097 return subset & s
1098 1098
1099 1099 def first(repo, subset, x):
1100 1100 """``first(set, [n])``
1101 1101 An alias for limit().
1102 1102 """
1103 1103 return limit(repo, subset, x)
1104 1104
1105 1105 def _follow(repo, subset, x, name, followfirst=False):
1106 1106 l = getargs(x, 0, 1, _("%s takes no arguments or a pattern") % name)
1107 1107 c = repo['.']
1108 1108 if l:
1109 1109 x = getstring(l[0], _("%s expected a pattern") % name)
1110 1110 matcher = matchmod.match(repo.root, repo.getcwd(), [x],
1111 1111 ctx=repo[None], default='path')
1112 1112
1113 1113 s = set()
1114 1114 for fname in c:
1115 1115 if matcher(fname):
1116 1116 fctx = c[fname]
1117 1117 s = s.union(set(c.rev() for c in fctx.ancestors(followfirst)))
1118 1118 # include the revision responsible for the most recent version
1119 1119 s.add(fctx.introrev())
1120 1120 else:
1121 1121 s = _revancestors(repo, baseset([c.rev()]), followfirst)
1122 1122
1123 1123 return subset & s
1124 1124
1125 1125 def follow(repo, subset, x):
1126 1126 """``follow([pattern])``
1127 1127 An alias for ``::.`` (ancestors of the working directory's first parent).
1128 1128 If pattern is specified, the histories of files matching given
1129 1129 pattern is followed, including copies.
1130 1130 """
1131 1131 return _follow(repo, subset, x, 'follow')
1132 1132
1133 1133 def _followfirst(repo, subset, x):
1134 1134 # ``followfirst([pattern])``
1135 1135 # Like ``follow([pattern])`` but follows only the first parent of
1136 1136 # every revisions or files revisions.
1137 1137 return _follow(repo, subset, x, '_followfirst', followfirst=True)
1138 1138
1139 1139 def getall(repo, subset, x):
1140 1140 """``all()``
1141 1141 All changesets, the same as ``0:tip``.
1142 1142 """
1143 1143 # i18n: "all" is a keyword
1144 1144 getargs(x, 0, 0, _("all takes no arguments"))
1145 1145 return subset & spanset(repo) # drop "null" if any
1146 1146
1147 1147 def grep(repo, subset, x):
1148 1148 """``grep(regex)``
1149 1149 Like ``keyword(string)`` but accepts a regex. Use ``grep(r'...')``
1150 1150 to ensure special escape characters are handled correctly. Unlike
1151 1151 ``keyword(string)``, the match is case-sensitive.
1152 1152 """
1153 1153 try:
1154 1154 # i18n: "grep" is a keyword
1155 1155 gr = re.compile(getstring(x, _("grep requires a string")))
1156 1156 except re.error as e:
1157 1157 raise error.ParseError(_('invalid match pattern: %s') % e)
1158 1158
1159 1159 def matches(x):
1160 1160 c = repo[x]
1161 1161 for e in c.files() + [c.user(), c.description()]:
1162 1162 if gr.search(e):
1163 1163 return True
1164 1164 return False
1165 1165
1166 1166 return subset.filter(matches)
1167 1167
1168 1168 def _matchfiles(repo, subset, x):
1169 1169 # _matchfiles takes a revset list of prefixed arguments:
1170 1170 #
1171 1171 # [p:foo, i:bar, x:baz]
1172 1172 #
1173 1173 # builds a match object from them and filters subset. Allowed
1174 1174 # prefixes are 'p:' for regular patterns, 'i:' for include
1175 1175 # patterns and 'x:' for exclude patterns. Use 'r:' prefix to pass
1176 1176 # a revision identifier, or the empty string to reference the
1177 1177 # working directory, from which the match object is
1178 1178 # initialized. Use 'd:' to set the default matching mode, default
1179 1179 # to 'glob'. At most one 'r:' and 'd:' argument can be passed.
1180 1180
1181 1181 # i18n: "_matchfiles" is a keyword
1182 1182 l = getargs(x, 1, -1, _("_matchfiles requires at least one argument"))
1183 1183 pats, inc, exc = [], [], []
1184 1184 rev, default = None, None
1185 1185 for arg in l:
1186 1186 # i18n: "_matchfiles" is a keyword
1187 1187 s = getstring(arg, _("_matchfiles requires string arguments"))
1188 1188 prefix, value = s[:2], s[2:]
1189 1189 if prefix == 'p:':
1190 1190 pats.append(value)
1191 1191 elif prefix == 'i:':
1192 1192 inc.append(value)
1193 1193 elif prefix == 'x:':
1194 1194 exc.append(value)
1195 1195 elif prefix == 'r:':
1196 1196 if rev is not None:
1197 1197 # i18n: "_matchfiles" is a keyword
1198 1198 raise error.ParseError(_('_matchfiles expected at most one '
1199 1199 'revision'))
1200 1200 if value != '': # empty means working directory; leave rev as None
1201 1201 rev = value
1202 1202 elif prefix == 'd:':
1203 1203 if default is not None:
1204 1204 # i18n: "_matchfiles" is a keyword
1205 1205 raise error.ParseError(_('_matchfiles expected at most one '
1206 1206 'default mode'))
1207 1207 default = value
1208 1208 else:
1209 1209 # i18n: "_matchfiles" is a keyword
1210 1210 raise error.ParseError(_('invalid _matchfiles prefix: %s') % prefix)
1211 1211 if not default:
1212 1212 default = 'glob'
1213 1213
1214 1214 m = matchmod.match(repo.root, repo.getcwd(), pats, include=inc,
1215 1215 exclude=exc, ctx=repo[rev], default=default)
1216 1216
1217 1217 def matches(x):
1218 1218 for f in repo[x].files():
1219 1219 if m(f):
1220 1220 return True
1221 1221 return False
1222 1222
1223 1223 return subset.filter(matches)
1224 1224
1225 1225 def hasfile(repo, subset, x):
1226 1226 """``file(pattern)``
1227 1227 Changesets affecting files matched by pattern.
1228 1228
1229 1229 For a faster but less accurate result, consider using ``filelog()``
1230 1230 instead.
1231 1231
1232 1232 This predicate uses ``glob:`` as the default kind of pattern.
1233 1233 """
1234 1234 # i18n: "file" is a keyword
1235 1235 pat = getstring(x, _("file requires a pattern"))
1236 1236 return _matchfiles(repo, subset, ('string', 'p:' + pat))
1237 1237
1238 1238 def head(repo, subset, x):
1239 1239 """``head()``
1240 1240 Changeset is a named branch head.
1241 1241 """
1242 1242 # i18n: "head" is a keyword
1243 1243 getargs(x, 0, 0, _("head takes no arguments"))
1244 1244 hs = set()
1245 1245 cl = repo.changelog
1246 1246 for b, ls in repo.branchmap().iteritems():
1247 1247 hs.update(cl.rev(h) for h in ls)
1248 1248 # XXX using a set to feed the baseset is wrong. Sets are not ordered.
1249 1249 # This does not break because of other fullreposet misbehavior.
1250 1250 # XXX We should combine with subset first: 'subset & baseset(...)'. This is
1251 1251 # necessary to ensure we preserve the order in subset.
1252 1252 return baseset(hs) & subset
1253 1253
1254 1254 def heads(repo, subset, x):
1255 1255 """``heads(set)``
1256 1256 Members of set with no children in set.
1257 1257 """
1258 1258 s = getset(repo, subset, x)
1259 1259 ps = parents(repo, subset, x)
1260 1260 return s - ps
1261 1261
1262 1262 def hidden(repo, subset, x):
1263 1263 """``hidden()``
1264 1264 Hidden changesets.
1265 1265 """
1266 1266 # i18n: "hidden" is a keyword
1267 1267 getargs(x, 0, 0, _("hidden takes no arguments"))
1268 1268 hiddenrevs = repoview.filterrevs(repo, 'visible')
1269 1269 return subset & hiddenrevs
1270 1270
1271 1271 def keyword(repo, subset, x):
1272 1272 """``keyword(string)``
1273 1273 Search commit message, user name, and names of changed files for
1274 1274 string. The match is case-insensitive.
1275 1275 """
1276 1276 # i18n: "keyword" is a keyword
1277 1277 kw = encoding.lower(getstring(x, _("keyword requires a string")))
1278 1278
1279 1279 def matches(r):
1280 1280 c = repo[r]
1281 1281 return any(kw in encoding.lower(t)
1282 1282 for t in c.files() + [c.user(), c.description()])
1283 1283
1284 1284 return subset.filter(matches)
1285 1285
1286 1286 def limit(repo, subset, x):
1287 1287 """``limit(set, [n])``
1288 1288 First n members of set, defaulting to 1.
1289 1289 """
1290 1290 # i18n: "limit" is a keyword
1291 1291 l = getargs(x, 1, 2, _("limit requires one or two arguments"))
1292 1292 try:
1293 1293 lim = 1
1294 1294 if len(l) == 2:
1295 1295 # i18n: "limit" is a keyword
1296 1296 lim = int(getstring(l[1], _("limit requires a number")))
1297 1297 except (TypeError, ValueError):
1298 1298 # i18n: "limit" is a keyword
1299 1299 raise error.ParseError(_("limit expects a number"))
1300 ss = subset
1301 1300 os = getset(repo, fullreposet(repo), l[0])
1302 1301 result = []
1303 1302 it = iter(os)
1304 1303 for x in xrange(lim):
1305 1304 y = next(it, None)
1306 1305 if y is None:
1307 1306 break
1308 elif y in ss:
1307 elif y in subset:
1309 1308 result.append(y)
1310 1309 return baseset(result)
1311 1310
1312 1311 def last(repo, subset, x):
1313 1312 """``last(set, [n])``
1314 1313 Last n members of set, defaulting to 1.
1315 1314 """
1316 1315 # i18n: "last" is a keyword
1317 1316 l = getargs(x, 1, 2, _("last requires one or two arguments"))
1318 1317 try:
1319 1318 lim = 1
1320 1319 if len(l) == 2:
1321 1320 # i18n: "last" is a keyword
1322 1321 lim = int(getstring(l[1], _("last requires a number")))
1323 1322 except (TypeError, ValueError):
1324 1323 # i18n: "last" is a keyword
1325 1324 raise error.ParseError(_("last expects a number"))
1326 ss = subset
1327 1325 os = getset(repo, fullreposet(repo), l[0])
1328 1326 os.reverse()
1329 1327 result = []
1330 1328 it = iter(os)
1331 1329 for x in xrange(lim):
1332 1330 y = next(it, None)
1333 1331 if y is None:
1334 1332 break
1335 elif y in ss:
1333 elif y in subset:
1336 1334 result.append(y)
1337 1335 return baseset(result)
1338 1336
1339 1337 def maxrev(repo, subset, x):
1340 1338 """``max(set)``
1341 1339 Changeset with highest revision number in set.
1342 1340 """
1343 1341 os = getset(repo, fullreposet(repo), x)
1344 1342 try:
1345 1343 m = os.max()
1346 1344 if m in subset:
1347 1345 return baseset([m])
1348 1346 except ValueError:
1349 1347 # os.max() throws a ValueError when the collection is empty.
1350 1348 # Same as python's max().
1351 1349 pass
1352 1350 return baseset()
1353 1351
1354 1352 def merge(repo, subset, x):
1355 1353 """``merge()``
1356 1354 Changeset is a merge changeset.
1357 1355 """
1358 1356 # i18n: "merge" is a keyword
1359 1357 getargs(x, 0, 0, _("merge takes no arguments"))
1360 1358 cl = repo.changelog
1361 1359 return subset.filter(lambda r: cl.parentrevs(r)[1] != -1)
1362 1360
1363 1361 def branchpoint(repo, subset, x):
1364 1362 """``branchpoint()``
1365 1363 Changesets with more than one child.
1366 1364 """
1367 1365 # i18n: "branchpoint" is a keyword
1368 1366 getargs(x, 0, 0, _("branchpoint takes no arguments"))
1369 1367 cl = repo.changelog
1370 1368 if not subset:
1371 1369 return baseset()
1372 1370 # XXX this should be 'parentset.min()' assuming 'parentset' is a smartset
1373 1371 # (and if it is not, it should.)
1374 1372 baserev = min(subset)
1375 1373 parentscount = [0]*(len(repo) - baserev)
1376 1374 for r in cl.revs(start=baserev + 1):
1377 1375 for p in cl.parentrevs(r):
1378 1376 if p >= baserev:
1379 1377 parentscount[p - baserev] += 1
1380 1378 return subset.filter(lambda r: parentscount[r - baserev] > 1)
1381 1379
1382 1380 def minrev(repo, subset, x):
1383 1381 """``min(set)``
1384 1382 Changeset with lowest revision number in set.
1385 1383 """
1386 1384 os = getset(repo, fullreposet(repo), x)
1387 1385 try:
1388 1386 m = os.min()
1389 1387 if m in subset:
1390 1388 return baseset([m])
1391 1389 except ValueError:
1392 1390 # os.min() throws a ValueError when the collection is empty.
1393 1391 # Same as python's min().
1394 1392 pass
1395 1393 return baseset()
1396 1394
1397 1395 def modifies(repo, subset, x):
1398 1396 """``modifies(pattern)``
1399 1397 Changesets modifying files matched by pattern.
1400 1398
1401 1399 The pattern without explicit kind like ``glob:`` is expected to be
1402 1400 relative to the current directory and match against a file or a
1403 1401 directory.
1404 1402 """
1405 1403 # i18n: "modifies" is a keyword
1406 1404 pat = getstring(x, _("modifies requires a pattern"))
1407 1405 return checkstatus(repo, subset, pat, 0)
1408 1406
1409 1407 def named(repo, subset, x):
1410 1408 """``named(namespace)``
1411 1409 The changesets in a given namespace.
1412 1410
1413 1411 If `namespace` starts with `re:`, the remainder of the string is treated as
1414 1412 a regular expression. To match a namespace that actually starts with `re:`,
1415 1413 use the prefix `literal:`.
1416 1414 """
1417 1415 # i18n: "named" is a keyword
1418 1416 args = getargs(x, 1, 1, _('named requires a namespace argument'))
1419 1417
1420 1418 ns = getstring(args[0],
1421 1419 # i18n: "named" is a keyword
1422 1420 _('the argument to named must be a string'))
1423 1421 kind, pattern, matcher = util.stringmatcher(ns)
1424 1422 namespaces = set()
1425 1423 if kind == 'literal':
1426 1424 if pattern not in repo.names:
1427 1425 raise error.RepoLookupError(_("namespace '%s' does not exist")
1428 1426 % ns)
1429 1427 namespaces.add(repo.names[pattern])
1430 1428 else:
1431 1429 for name, ns in repo.names.iteritems():
1432 1430 if matcher(name):
1433 1431 namespaces.add(ns)
1434 1432 if not namespaces:
1435 1433 raise error.RepoLookupError(_("no namespace exists"
1436 1434 " that match '%s'") % pattern)
1437 1435
1438 1436 names = set()
1439 1437 for ns in namespaces:
1440 1438 for name in ns.listnames(repo):
1441 1439 if name not in ns.deprecated:
1442 1440 names.update(repo[n].rev() for n in ns.nodes(repo, name))
1443 1441
1444 1442 names -= set([node.nullrev])
1445 1443 return subset & names
1446 1444
1447 1445 def node_(repo, subset, x):
1448 1446 """``id(string)``
1449 1447 Revision non-ambiguously specified by the given hex string prefix.
1450 1448 """
1451 1449 # i18n: "id" is a keyword
1452 1450 l = getargs(x, 1, 1, _("id requires one argument"))
1453 1451 # i18n: "id" is a keyword
1454 1452 n = getstring(l[0], _("id requires a string"))
1455 1453 if len(n) == 40:
1456 1454 try:
1457 1455 rn = repo.changelog.rev(node.bin(n))
1458 1456 except (LookupError, TypeError):
1459 1457 rn = None
1460 1458 else:
1461 1459 rn = None
1462 1460 pm = repo.changelog._partialmatch(n)
1463 1461 if pm is not None:
1464 1462 rn = repo.changelog.rev(pm)
1465 1463
1466 1464 if rn is None:
1467 1465 return baseset()
1468 1466 result = baseset([rn])
1469 1467 return result & subset
1470 1468
1471 1469 def obsolete(repo, subset, x):
1472 1470 """``obsolete()``
1473 1471 Mutable changeset with a newer version."""
1474 1472 # i18n: "obsolete" is a keyword
1475 1473 getargs(x, 0, 0, _("obsolete takes no arguments"))
1476 1474 obsoletes = obsmod.getrevs(repo, 'obsolete')
1477 1475 return subset & obsoletes
1478 1476
1479 1477 def only(repo, subset, x):
1480 1478 """``only(set, [set])``
1481 1479 Changesets that are ancestors of the first set that are not ancestors
1482 1480 of any other head in the repo. If a second set is specified, the result
1483 1481 is ancestors of the first set that are not ancestors of the second set
1484 1482 (i.e. ::<set1> - ::<set2>).
1485 1483 """
1486 1484 cl = repo.changelog
1487 1485 # i18n: "only" is a keyword
1488 1486 args = getargs(x, 1, 2, _('only takes one or two arguments'))
1489 1487 include = getset(repo, fullreposet(repo), args[0])
1490 1488 if len(args) == 1:
1491 1489 if not include:
1492 1490 return baseset()
1493 1491
1494 1492 descendants = set(_revdescendants(repo, include, False))
1495 1493 exclude = [rev for rev in cl.headrevs()
1496 1494 if not rev in descendants and not rev in include]
1497 1495 else:
1498 1496 exclude = getset(repo, fullreposet(repo), args[1])
1499 1497
1500 1498 results = set(cl.findmissingrevs(common=exclude, heads=include))
1501 1499 # XXX we should turn this into a baseset instead of a set, smartset may do
1502 1500 # some optimisations from the fact this is a baseset.
1503 1501 return subset & results
1504 1502
1505 1503 def origin(repo, subset, x):
1506 1504 """``origin([set])``
1507 1505 Changesets that were specified as a source for the grafts, transplants or
1508 1506 rebases that created the given revisions. Omitting the optional set is the
1509 1507 same as passing all(). If a changeset created by these operations is itself
1510 1508 specified as a source for one of these operations, only the source changeset
1511 1509 for the first operation is selected.
1512 1510 """
1513 1511 if x is not None:
1514 1512 dests = getset(repo, fullreposet(repo), x)
1515 1513 else:
1516 1514 dests = fullreposet(repo)
1517 1515
1518 1516 def _firstsrc(rev):
1519 1517 src = _getrevsource(repo, rev)
1520 1518 if src is None:
1521 1519 return None
1522 1520
1523 1521 while True:
1524 1522 prev = _getrevsource(repo, src)
1525 1523
1526 1524 if prev is None:
1527 1525 return src
1528 1526 src = prev
1529 1527
1530 1528 o = set([_firstsrc(r) for r in dests])
1531 1529 o -= set([None])
1532 1530 # XXX we should turn this into a baseset instead of a set, smartset may do
1533 1531 # some optimisations from the fact this is a baseset.
1534 1532 return subset & o
1535 1533
1536 1534 def outgoing(repo, subset, x):
1537 1535 """``outgoing([path])``
1538 1536 Changesets not found in the specified destination repository, or the
1539 1537 default push location.
1540 1538 """
1541 1539 # Avoid cycles.
1542 1540 from . import (
1543 1541 discovery,
1544 1542 hg,
1545 1543 )
1546 1544 # i18n: "outgoing" is a keyword
1547 1545 l = getargs(x, 0, 1, _("outgoing takes one or no arguments"))
1548 1546 # i18n: "outgoing" is a keyword
1549 1547 dest = l and getstring(l[0], _("outgoing requires a repository path")) or ''
1550 1548 dest = repo.ui.expandpath(dest or 'default-push', dest or 'default')
1551 1549 dest, branches = hg.parseurl(dest)
1552 1550 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1553 1551 if revs:
1554 1552 revs = [repo.lookup(rev) for rev in revs]
1555 1553 other = hg.peer(repo, {}, dest)
1556 1554 repo.ui.pushbuffer()
1557 1555 outgoing = discovery.findcommonoutgoing(repo, other, onlyheads=revs)
1558 1556 repo.ui.popbuffer()
1559 1557 cl = repo.changelog
1560 1558 o = set([cl.rev(r) for r in outgoing.missing])
1561 1559 return subset & o
1562 1560
1563 1561 def p1(repo, subset, x):
1564 1562 """``p1([set])``
1565 1563 First parent of changesets in set, or the working directory.
1566 1564 """
1567 1565 if x is None:
1568 1566 p = repo[x].p1().rev()
1569 1567 if p >= 0:
1570 1568 return subset & baseset([p])
1571 1569 return baseset()
1572 1570
1573 1571 ps = set()
1574 1572 cl = repo.changelog
1575 1573 for r in getset(repo, fullreposet(repo), x):
1576 1574 ps.add(cl.parentrevs(r)[0])
1577 1575 ps -= set([node.nullrev])
1578 1576 # XXX we should turn this into a baseset instead of a set, smartset may do
1579 1577 # some optimisations from the fact this is a baseset.
1580 1578 return subset & ps
1581 1579
1582 1580 def p2(repo, subset, x):
1583 1581 """``p2([set])``
1584 1582 Second parent of changesets in set, or the working directory.
1585 1583 """
1586 1584 if x is None:
1587 1585 ps = repo[x].parents()
1588 1586 try:
1589 1587 p = ps[1].rev()
1590 1588 if p >= 0:
1591 1589 return subset & baseset([p])
1592 1590 return baseset()
1593 1591 except IndexError:
1594 1592 return baseset()
1595 1593
1596 1594 ps = set()
1597 1595 cl = repo.changelog
1598 1596 for r in getset(repo, fullreposet(repo), x):
1599 1597 ps.add(cl.parentrevs(r)[1])
1600 1598 ps -= set([node.nullrev])
1601 1599 # XXX we should turn this into a baseset instead of a set, smartset may do
1602 1600 # some optimisations from the fact this is a baseset.
1603 1601 return subset & ps
1604 1602
1605 1603 def parents(repo, subset, x):
1606 1604 """``parents([set])``
1607 1605 The set of all parents for all changesets in set, or the working directory.
1608 1606 """
1609 1607 if x is None:
1610 1608 ps = set(p.rev() for p in repo[x].parents())
1611 1609 else:
1612 1610 ps = set()
1613 1611 cl = repo.changelog
1614 1612 up = ps.update
1615 1613 parentrevs = cl.parentrevs
1616 1614 for r in getset(repo, fullreposet(repo), x):
1617 1615 if r == node.wdirrev:
1618 1616 up(p.rev() for p in repo[r].parents())
1619 1617 else:
1620 1618 up(parentrevs(r))
1621 1619 ps -= set([node.nullrev])
1622 1620 return subset & ps
1623 1621
1624 1622 def _phase(repo, subset, target):
1625 1623 """helper to select all rev in phase <target>"""
1626 1624 repo._phasecache.loadphaserevs(repo) # ensure phase's sets are loaded
1627 1625 if repo._phasecache._phasesets:
1628 1626 s = repo._phasecache._phasesets[target] - repo.changelog.filteredrevs
1629 1627 s = baseset(s)
1630 1628 s.sort() # set are non ordered, so we enforce ascending
1631 1629 return subset & s
1632 1630 else:
1633 1631 phase = repo._phasecache.phase
1634 1632 condition = lambda r: phase(repo, r) == target
1635 1633 return subset.filter(condition, cache=False)
1636 1634
1637 1635 def draft(repo, subset, x):
1638 1636 """``draft()``
1639 1637 Changeset in draft phase."""
1640 1638 # i18n: "draft" is a keyword
1641 1639 getargs(x, 0, 0, _("draft takes no arguments"))
1642 1640 target = phases.draft
1643 1641 return _phase(repo, subset, target)
1644 1642
1645 1643 def secret(repo, subset, x):
1646 1644 """``secret()``
1647 1645 Changeset in secret phase."""
1648 1646 # i18n: "secret" is a keyword
1649 1647 getargs(x, 0, 0, _("secret takes no arguments"))
1650 1648 target = phases.secret
1651 1649 return _phase(repo, subset, target)
1652 1650
1653 1651 def parentspec(repo, subset, x, n):
1654 1652 """``set^0``
1655 1653 The set.
1656 1654 ``set^1`` (or ``set^``), ``set^2``
1657 1655 First or second parent, respectively, of all changesets in set.
1658 1656 """
1659 1657 try:
1660 1658 n = int(n[1])
1661 1659 if n not in (0, 1, 2):
1662 1660 raise ValueError
1663 1661 except (TypeError, ValueError):
1664 1662 raise error.ParseError(_("^ expects a number 0, 1, or 2"))
1665 1663 ps = set()
1666 1664 cl = repo.changelog
1667 1665 for r in getset(repo, fullreposet(repo), x):
1668 1666 if n == 0:
1669 1667 ps.add(r)
1670 1668 elif n == 1:
1671 1669 ps.add(cl.parentrevs(r)[0])
1672 1670 elif n == 2:
1673 1671 parents = cl.parentrevs(r)
1674 1672 if len(parents) > 1:
1675 1673 ps.add(parents[1])
1676 1674 return subset & ps
1677 1675
1678 1676 def present(repo, subset, x):
1679 1677 """``present(set)``
1680 1678 An empty set, if any revision in set isn't found; otherwise,
1681 1679 all revisions in set.
1682 1680
1683 1681 If any of specified revisions is not present in the local repository,
1684 1682 the query is normally aborted. But this predicate allows the query
1685 1683 to continue even in such cases.
1686 1684 """
1687 1685 try:
1688 1686 return getset(repo, subset, x)
1689 1687 except error.RepoLookupError:
1690 1688 return baseset()
1691 1689
1692 1690 # for internal use
1693 1691 def _notpublic(repo, subset, x):
1694 1692 getargs(x, 0, 0, "_notpublic takes no arguments")
1695 1693 repo._phasecache.loadphaserevs(repo) # ensure phase's sets are loaded
1696 1694 if repo._phasecache._phasesets:
1697 1695 s = set()
1698 1696 for u in repo._phasecache._phasesets[1:]:
1699 1697 s.update(u)
1700 1698 s = baseset(s - repo.changelog.filteredrevs)
1701 1699 s.sort()
1702 1700 return subset & s
1703 1701 else:
1704 1702 phase = repo._phasecache.phase
1705 1703 target = phases.public
1706 1704 condition = lambda r: phase(repo, r) != target
1707 1705 return subset.filter(condition, cache=False)
1708 1706
1709 1707 def public(repo, subset, x):
1710 1708 """``public()``
1711 1709 Changeset in public phase."""
1712 1710 # i18n: "public" is a keyword
1713 1711 getargs(x, 0, 0, _("public takes no arguments"))
1714 1712 phase = repo._phasecache.phase
1715 1713 target = phases.public
1716 1714 condition = lambda r: phase(repo, r) == target
1717 1715 return subset.filter(condition, cache=False)
1718 1716
1719 1717 def remote(repo, subset, x):
1720 1718 """``remote([id [,path]])``
1721 1719 Local revision that corresponds to the given identifier in a
1722 1720 remote repository, if present. Here, the '.' identifier is a
1723 1721 synonym for the current local branch.
1724 1722 """
1725 1723
1726 1724 from . import hg # avoid start-up nasties
1727 1725 # i18n: "remote" is a keyword
1728 1726 l = getargs(x, 0, 2, _("remote takes one, two or no arguments"))
1729 1727
1730 1728 q = '.'
1731 1729 if len(l) > 0:
1732 1730 # i18n: "remote" is a keyword
1733 1731 q = getstring(l[0], _("remote requires a string id"))
1734 1732 if q == '.':
1735 1733 q = repo['.'].branch()
1736 1734
1737 1735 dest = ''
1738 1736 if len(l) > 1:
1739 1737 # i18n: "remote" is a keyword
1740 1738 dest = getstring(l[1], _("remote requires a repository path"))
1741 1739 dest = repo.ui.expandpath(dest or 'default')
1742 1740 dest, branches = hg.parseurl(dest)
1743 1741 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1744 1742 if revs:
1745 1743 revs = [repo.lookup(rev) for rev in revs]
1746 1744 other = hg.peer(repo, {}, dest)
1747 1745 n = other.lookup(q)
1748 1746 if n in repo:
1749 1747 r = repo[n].rev()
1750 1748 if r in subset:
1751 1749 return baseset([r])
1752 1750 return baseset()
1753 1751
1754 1752 def removes(repo, subset, x):
1755 1753 """``removes(pattern)``
1756 1754 Changesets which remove files matching pattern.
1757 1755
1758 1756 The pattern without explicit kind like ``glob:`` is expected to be
1759 1757 relative to the current directory and match against a file or a
1760 1758 directory.
1761 1759 """
1762 1760 # i18n: "removes" is a keyword
1763 1761 pat = getstring(x, _("removes requires a pattern"))
1764 1762 return checkstatus(repo, subset, pat, 2)
1765 1763
1766 1764 def rev(repo, subset, x):
1767 1765 """``rev(number)``
1768 1766 Revision with the given numeric identifier.
1769 1767 """
1770 1768 # i18n: "rev" is a keyword
1771 1769 l = getargs(x, 1, 1, _("rev requires one argument"))
1772 1770 try:
1773 1771 # i18n: "rev" is a keyword
1774 1772 l = int(getstring(l[0], _("rev requires a number")))
1775 1773 except (TypeError, ValueError):
1776 1774 # i18n: "rev" is a keyword
1777 1775 raise error.ParseError(_("rev expects a number"))
1778 1776 if l not in repo.changelog and l != node.nullrev:
1779 1777 return baseset()
1780 1778 return subset & baseset([l])
1781 1779
1782 1780 def matching(repo, subset, x):
1783 1781 """``matching(revision [, field])``
1784 1782 Changesets in which a given set of fields match the set of fields in the
1785 1783 selected revision or set.
1786 1784
1787 1785 To match more than one field pass the list of fields to match separated
1788 1786 by spaces (e.g. ``author description``).
1789 1787
1790 1788 Valid fields are most regular revision fields and some special fields.
1791 1789
1792 1790 Regular revision fields are ``description``, ``author``, ``branch``,
1793 1791 ``date``, ``files``, ``phase``, ``parents``, ``substate``, ``user``
1794 1792 and ``diff``.
1795 1793 Note that ``author`` and ``user`` are synonyms. ``diff`` refers to the
1796 1794 contents of the revision. Two revisions matching their ``diff`` will
1797 1795 also match their ``files``.
1798 1796
1799 1797 Special fields are ``summary`` and ``metadata``:
1800 1798 ``summary`` matches the first line of the description.
1801 1799 ``metadata`` is equivalent to matching ``description user date``
1802 1800 (i.e. it matches the main metadata fields).
1803 1801
1804 1802 ``metadata`` is the default field which is used when no fields are
1805 1803 specified. You can match more than one field at a time.
1806 1804 """
1807 1805 # i18n: "matching" is a keyword
1808 1806 l = getargs(x, 1, 2, _("matching takes 1 or 2 arguments"))
1809 1807
1810 1808 revs = getset(repo, fullreposet(repo), l[0])
1811 1809
1812 1810 fieldlist = ['metadata']
1813 1811 if len(l) > 1:
1814 1812 fieldlist = getstring(l[1],
1815 1813 # i18n: "matching" is a keyword
1816 1814 _("matching requires a string "
1817 1815 "as its second argument")).split()
1818 1816
1819 1817 # Make sure that there are no repeated fields,
1820 1818 # expand the 'special' 'metadata' field type
1821 1819 # and check the 'files' whenever we check the 'diff'
1822 1820 fields = []
1823 1821 for field in fieldlist:
1824 1822 if field == 'metadata':
1825 1823 fields += ['user', 'description', 'date']
1826 1824 elif field == 'diff':
1827 1825 # a revision matching the diff must also match the files
1828 1826 # since matching the diff is very costly, make sure to
1829 1827 # also match the files first
1830 1828 fields += ['files', 'diff']
1831 1829 else:
1832 1830 if field == 'author':
1833 1831 field = 'user'
1834 1832 fields.append(field)
1835 1833 fields = set(fields)
1836 1834 if 'summary' in fields and 'description' in fields:
1837 1835 # If a revision matches its description it also matches its summary
1838 1836 fields.discard('summary')
1839 1837
1840 1838 # We may want to match more than one field
1841 1839 # Not all fields take the same amount of time to be matched
1842 1840 # Sort the selected fields in order of increasing matching cost
1843 1841 fieldorder = ['phase', 'parents', 'user', 'date', 'branch', 'summary',
1844 1842 'files', 'description', 'substate', 'diff']
1845 1843 def fieldkeyfunc(f):
1846 1844 try:
1847 1845 return fieldorder.index(f)
1848 1846 except ValueError:
1849 1847 # assume an unknown field is very costly
1850 1848 return len(fieldorder)
1851 1849 fields = list(fields)
1852 1850 fields.sort(key=fieldkeyfunc)
1853 1851
1854 1852 # Each field will be matched with its own "getfield" function
1855 1853 # which will be added to the getfieldfuncs array of functions
1856 1854 getfieldfuncs = []
1857 1855 _funcs = {
1858 1856 'user': lambda r: repo[r].user(),
1859 1857 'branch': lambda r: repo[r].branch(),
1860 1858 'date': lambda r: repo[r].date(),
1861 1859 'description': lambda r: repo[r].description(),
1862 1860 'files': lambda r: repo[r].files(),
1863 1861 'parents': lambda r: repo[r].parents(),
1864 1862 'phase': lambda r: repo[r].phase(),
1865 1863 'substate': lambda r: repo[r].substate,
1866 1864 'summary': lambda r: repo[r].description().splitlines()[0],
1867 1865 'diff': lambda r: list(repo[r].diff(git=True),)
1868 1866 }
1869 1867 for info in fields:
1870 1868 getfield = _funcs.get(info, None)
1871 1869 if getfield is None:
1872 1870 raise error.ParseError(
1873 1871 # i18n: "matching" is a keyword
1874 1872 _("unexpected field name passed to matching: %s") % info)
1875 1873 getfieldfuncs.append(getfield)
1876 1874 # convert the getfield array of functions into a "getinfo" function
1877 1875 # which returns an array of field values (or a single value if there
1878 1876 # is only one field to match)
1879 1877 getinfo = lambda r: [f(r) for f in getfieldfuncs]
1880 1878
1881 1879 def matches(x):
1882 1880 for rev in revs:
1883 1881 target = getinfo(rev)
1884 1882 match = True
1885 1883 for n, f in enumerate(getfieldfuncs):
1886 1884 if target[n] != f(x):
1887 1885 match = False
1888 1886 if match:
1889 1887 return True
1890 1888 return False
1891 1889
1892 1890 return subset.filter(matches)
1893 1891
1894 1892 def reverse(repo, subset, x):
1895 1893 """``reverse(set)``
1896 1894 Reverse order of set.
1897 1895 """
1898 1896 l = getset(repo, subset, x)
1899 1897 l.reverse()
1900 1898 return l
1901 1899
1902 1900 def roots(repo, subset, x):
1903 1901 """``roots(set)``
1904 1902 Changesets in set with no parent changeset in set.
1905 1903 """
1906 1904 s = getset(repo, fullreposet(repo), x)
1907 1905 parents = repo.changelog.parentrevs
1908 1906 def filter(r):
1909 1907 for p in parents(r):
1910 1908 if 0 <= p and p in s:
1911 1909 return False
1912 1910 return True
1913 1911 return subset & s.filter(filter)
1914 1912
1915 1913 def sort(repo, subset, x):
1916 1914 """``sort(set[, [-]key...])``
1917 1915 Sort set by keys. The default sort order is ascending, specify a key
1918 1916 as ``-key`` to sort in descending order.
1919 1917
1920 1918 The keys can be:
1921 1919
1922 1920 - ``rev`` for the revision number,
1923 1921 - ``branch`` for the branch name,
1924 1922 - ``desc`` for the commit message (description),
1925 1923 - ``user`` for user name (``author`` can be used as an alias),
1926 1924 - ``date`` for the commit date
1927 1925 """
1928 1926 # i18n: "sort" is a keyword
1929 1927 l = getargs(x, 1, 2, _("sort requires one or two arguments"))
1930 1928 keys = "rev"
1931 1929 if len(l) == 2:
1932 1930 # i18n: "sort" is a keyword
1933 1931 keys = getstring(l[1], _("sort spec must be a string"))
1934 1932
1935 1933 s = l[0]
1936 1934 keys = keys.split()
1937 1935 l = []
1938 1936 def invert(s):
1939 1937 return "".join(chr(255 - ord(c)) for c in s)
1940 1938 revs = getset(repo, subset, s)
1941 1939 if keys == ["rev"]:
1942 1940 revs.sort()
1943 1941 return revs
1944 1942 elif keys == ["-rev"]:
1945 1943 revs.sort(reverse=True)
1946 1944 return revs
1947 1945 for r in revs:
1948 1946 c = repo[r]
1949 1947 e = []
1950 1948 for k in keys:
1951 1949 if k == 'rev':
1952 1950 e.append(r)
1953 1951 elif k == '-rev':
1954 1952 e.append(-r)
1955 1953 elif k == 'branch':
1956 1954 e.append(c.branch())
1957 1955 elif k == '-branch':
1958 1956 e.append(invert(c.branch()))
1959 1957 elif k == 'desc':
1960 1958 e.append(c.description())
1961 1959 elif k == '-desc':
1962 1960 e.append(invert(c.description()))
1963 1961 elif k in 'user author':
1964 1962 e.append(c.user())
1965 1963 elif k in '-user -author':
1966 1964 e.append(invert(c.user()))
1967 1965 elif k == 'date':
1968 1966 e.append(c.date()[0])
1969 1967 elif k == '-date':
1970 1968 e.append(-c.date()[0])
1971 1969 else:
1972 1970 raise error.ParseError(_("unknown sort key %r") % k)
1973 1971 e.append(r)
1974 1972 l.append(e)
1975 1973 l.sort()
1976 1974 return baseset([e[-1] for e in l])
1977 1975
1978 1976 def subrepo(repo, subset, x):
1979 1977 """``subrepo([pattern])``
1980 1978 Changesets that add, modify or remove the given subrepo. If no subrepo
1981 1979 pattern is named, any subrepo changes are returned.
1982 1980 """
1983 1981 # i18n: "subrepo" is a keyword
1984 1982 args = getargs(x, 0, 1, _('subrepo takes at most one argument'))
1985 1983 if len(args) != 0:
1986 1984 pat = getstring(args[0], _("subrepo requires a pattern"))
1987 1985
1988 1986 m = matchmod.exact(repo.root, repo.root, ['.hgsubstate'])
1989 1987
1990 1988 def submatches(names):
1991 1989 k, p, m = util.stringmatcher(pat)
1992 1990 for name in names:
1993 1991 if m(name):
1994 1992 yield name
1995 1993
1996 1994 def matches(x):
1997 1995 c = repo[x]
1998 1996 s = repo.status(c.p1().node(), c.node(), match=m)
1999 1997
2000 1998 if len(args) == 0:
2001 1999 return s.added or s.modified or s.removed
2002 2000
2003 2001 if s.added:
2004 2002 return any(submatches(c.substate.keys()))
2005 2003
2006 2004 if s.modified:
2007 2005 subs = set(c.p1().substate.keys())
2008 2006 subs.update(c.substate.keys())
2009 2007
2010 2008 for path in submatches(subs):
2011 2009 if c.p1().substate.get(path) != c.substate.get(path):
2012 2010 return True
2013 2011
2014 2012 if s.removed:
2015 2013 return any(submatches(c.p1().substate.keys()))
2016 2014
2017 2015 return False
2018 2016
2019 2017 return subset.filter(matches)
2020 2018
2021 2019 def _substringmatcher(pattern):
2022 2020 kind, pattern, matcher = util.stringmatcher(pattern)
2023 2021 if kind == 'literal':
2024 2022 matcher = lambda s: pattern in s
2025 2023 return kind, pattern, matcher
2026 2024
2027 2025 def tag(repo, subset, x):
2028 2026 """``tag([name])``
2029 2027 The specified tag by name, or all tagged revisions if no name is given.
2030 2028
2031 2029 If `name` starts with `re:`, the remainder of the name is treated as
2032 2030 a regular expression. To match a tag that actually starts with `re:`,
2033 2031 use the prefix `literal:`.
2034 2032 """
2035 2033 # i18n: "tag" is a keyword
2036 2034 args = getargs(x, 0, 1, _("tag takes one or no arguments"))
2037 2035 cl = repo.changelog
2038 2036 if args:
2039 2037 pattern = getstring(args[0],
2040 2038 # i18n: "tag" is a keyword
2041 2039 _('the argument to tag must be a string'))
2042 2040 kind, pattern, matcher = util.stringmatcher(pattern)
2043 2041 if kind == 'literal':
2044 2042 # avoid resolving all tags
2045 2043 tn = repo._tagscache.tags.get(pattern, None)
2046 2044 if tn is None:
2047 2045 raise error.RepoLookupError(_("tag '%s' does not exist")
2048 2046 % pattern)
2049 2047 s = set([repo[tn].rev()])
2050 2048 else:
2051 2049 s = set([cl.rev(n) for t, n in repo.tagslist() if matcher(t)])
2052 2050 else:
2053 2051 s = set([cl.rev(n) for t, n in repo.tagslist() if t != 'tip'])
2054 2052 return subset & s
2055 2053
2056 2054 def tagged(repo, subset, x):
2057 2055 return tag(repo, subset, x)
2058 2056
2059 2057 def unstable(repo, subset, x):
2060 2058 """``unstable()``
2061 2059 Non-obsolete changesets with obsolete ancestors.
2062 2060 """
2063 2061 # i18n: "unstable" is a keyword
2064 2062 getargs(x, 0, 0, _("unstable takes no arguments"))
2065 2063 unstables = obsmod.getrevs(repo, 'unstable')
2066 2064 return subset & unstables
2067 2065
2068 2066
2069 2067 def user(repo, subset, x):
2070 2068 """``user(string)``
2071 2069 User name contains string. The match is case-insensitive.
2072 2070
2073 2071 If `string` starts with `re:`, the remainder of the string is treated as
2074 2072 a regular expression. To match a user that actually contains `re:`, use
2075 2073 the prefix `literal:`.
2076 2074 """
2077 2075 return author(repo, subset, x)
2078 2076
2079 2077 # experimental
2080 2078 def wdir(repo, subset, x):
2081 2079 # i18n: "wdir" is a keyword
2082 2080 getargs(x, 0, 0, _("wdir takes no arguments"))
2083 2081 if node.wdirrev in subset or isinstance(subset, fullreposet):
2084 2082 return baseset([node.wdirrev])
2085 2083 return baseset()
2086 2084
2087 2085 # for internal use
2088 2086 def _list(repo, subset, x):
2089 2087 s = getstring(x, "internal error")
2090 2088 if not s:
2091 2089 return baseset()
2092 2090 # remove duplicates here. it's difficult for caller to deduplicate sets
2093 2091 # because different symbols can point to the same rev.
2094 2092 cl = repo.changelog
2095 2093 ls = []
2096 2094 seen = set()
2097 2095 for t in s.split('\0'):
2098 2096 try:
2099 2097 # fast path for integer revision
2100 2098 r = int(t)
2101 2099 if str(r) != t or r not in cl:
2102 2100 raise ValueError
2103 2101 revs = [r]
2104 2102 except ValueError:
2105 2103 revs = stringset(repo, subset, t)
2106 2104
2107 2105 for r in revs:
2108 2106 if r in seen:
2109 2107 continue
2110 2108 if (r in subset
2111 2109 or r == node.nullrev and isinstance(subset, fullreposet)):
2112 2110 ls.append(r)
2113 2111 seen.add(r)
2114 2112 return baseset(ls)
2115 2113
2116 2114 # for internal use
2117 2115 def _intlist(repo, subset, x):
2118 2116 s = getstring(x, "internal error")
2119 2117 if not s:
2120 2118 return baseset()
2121 2119 ls = [int(r) for r in s.split('\0')]
2122 2120 s = subset
2123 2121 return baseset([r for r in ls if r in s])
2124 2122
2125 2123 # for internal use
2126 2124 def _hexlist(repo, subset, x):
2127 2125 s = getstring(x, "internal error")
2128 2126 if not s:
2129 2127 return baseset()
2130 2128 cl = repo.changelog
2131 2129 ls = [cl.rev(node.bin(r)) for r in s.split('\0')]
2132 2130 s = subset
2133 2131 return baseset([r for r in ls if r in s])
2134 2132
2135 2133 symbols = {
2136 2134 "_mergedefaultdest": _mergedefaultdest,
2137 2135 "adds": adds,
2138 2136 "all": getall,
2139 2137 "ancestor": ancestor,
2140 2138 "ancestors": ancestors,
2141 2139 "_firstancestors": _firstancestors,
2142 2140 "author": author,
2143 2141 "bisect": bisect,
2144 2142 "bisected": bisected,
2145 2143 "bookmark": bookmark,
2146 2144 "branch": branch,
2147 2145 "branchpoint": branchpoint,
2148 2146 "bumped": bumped,
2149 2147 "bundle": bundle,
2150 2148 "children": children,
2151 2149 "closed": closed,
2152 2150 "contains": contains,
2153 2151 "converted": converted,
2154 2152 "date": date,
2155 2153 "desc": desc,
2156 2154 "descendants": descendants,
2157 2155 "_firstdescendants": _firstdescendants,
2158 2156 "destination": destination,
2159 2157 "divergent": divergent,
2160 2158 "draft": draft,
2161 2159 "extinct": extinct,
2162 2160 "extra": extra,
2163 2161 "file": hasfile,
2164 2162 "filelog": filelog,
2165 2163 "first": first,
2166 2164 "follow": follow,
2167 2165 "_followfirst": _followfirst,
2168 2166 "grep": grep,
2169 2167 "head": head,
2170 2168 "heads": heads,
2171 2169 "hidden": hidden,
2172 2170 "id": node_,
2173 2171 "keyword": keyword,
2174 2172 "last": last,
2175 2173 "limit": limit,
2176 2174 "_matchfiles": _matchfiles,
2177 2175 "max": maxrev,
2178 2176 "merge": merge,
2179 2177 "min": minrev,
2180 2178 "modifies": modifies,
2181 2179 "named": named,
2182 2180 "obsolete": obsolete,
2183 2181 "only": only,
2184 2182 "origin": origin,
2185 2183 "outgoing": outgoing,
2186 2184 "p1": p1,
2187 2185 "p2": p2,
2188 2186 "parents": parents,
2189 2187 "present": present,
2190 2188 "public": public,
2191 2189 "_notpublic": _notpublic,
2192 2190 "remote": remote,
2193 2191 "removes": removes,
2194 2192 "rev": rev,
2195 2193 "reverse": reverse,
2196 2194 "roots": roots,
2197 2195 "sort": sort,
2198 2196 "secret": secret,
2199 2197 "subrepo": subrepo,
2200 2198 "matching": matching,
2201 2199 "tag": tag,
2202 2200 "tagged": tagged,
2203 2201 "user": user,
2204 2202 "unstable": unstable,
2205 2203 "wdir": wdir,
2206 2204 "_list": _list,
2207 2205 "_intlist": _intlist,
2208 2206 "_hexlist": _hexlist,
2209 2207 }
2210 2208
2211 2209 # symbols which can't be used for a DoS attack for any given input
2212 2210 # (e.g. those which accept regexes as plain strings shouldn't be included)
2213 2211 # functions that just return a lot of changesets (like all) don't count here
2214 2212 safesymbols = set([
2215 2213 "adds",
2216 2214 "all",
2217 2215 "ancestor",
2218 2216 "ancestors",
2219 2217 "_firstancestors",
2220 2218 "author",
2221 2219 "bisect",
2222 2220 "bisected",
2223 2221 "bookmark",
2224 2222 "branch",
2225 2223 "branchpoint",
2226 2224 "bumped",
2227 2225 "bundle",
2228 2226 "children",
2229 2227 "closed",
2230 2228 "converted",
2231 2229 "date",
2232 2230 "desc",
2233 2231 "descendants",
2234 2232 "_firstdescendants",
2235 2233 "destination",
2236 2234 "divergent",
2237 2235 "draft",
2238 2236 "extinct",
2239 2237 "extra",
2240 2238 "file",
2241 2239 "filelog",
2242 2240 "first",
2243 2241 "follow",
2244 2242 "_followfirst",
2245 2243 "head",
2246 2244 "heads",
2247 2245 "hidden",
2248 2246 "id",
2249 2247 "keyword",
2250 2248 "last",
2251 2249 "limit",
2252 2250 "_matchfiles",
2253 2251 "max",
2254 2252 "merge",
2255 2253 "min",
2256 2254 "modifies",
2257 2255 "obsolete",
2258 2256 "only",
2259 2257 "origin",
2260 2258 "outgoing",
2261 2259 "p1",
2262 2260 "p2",
2263 2261 "parents",
2264 2262 "present",
2265 2263 "public",
2266 2264 "_notpublic",
2267 2265 "remote",
2268 2266 "removes",
2269 2267 "rev",
2270 2268 "reverse",
2271 2269 "roots",
2272 2270 "sort",
2273 2271 "secret",
2274 2272 "matching",
2275 2273 "tag",
2276 2274 "tagged",
2277 2275 "user",
2278 2276 "unstable",
2279 2277 "wdir",
2280 2278 "_list",
2281 2279 "_intlist",
2282 2280 "_hexlist",
2283 2281 ])
2284 2282
2285 2283 methods = {
2286 2284 "range": rangeset,
2287 2285 "dagrange": dagrange,
2288 2286 "string": stringset,
2289 2287 "symbol": stringset,
2290 2288 "and": andset,
2291 2289 "or": orset,
2292 2290 "not": notset,
2293 2291 "list": listset,
2294 2292 "keyvalue": keyvaluepair,
2295 2293 "func": func,
2296 2294 "ancestor": ancestorspec,
2297 2295 "parent": parentspec,
2298 2296 "parentpost": p1,
2299 2297 }
2300 2298
2301 2299 def optimize(x, small):
2302 2300 if x is None:
2303 2301 return 0, x
2304 2302
2305 2303 smallbonus = 1
2306 2304 if small:
2307 2305 smallbonus = .5
2308 2306
2309 2307 op = x[0]
2310 2308 if op == 'minus':
2311 2309 return optimize(('and', x[1], ('not', x[2])), small)
2312 2310 elif op == 'only':
2313 2311 return optimize(('func', ('symbol', 'only'),
2314 2312 ('list', x[1], x[2])), small)
2315 2313 elif op == 'onlypost':
2316 2314 return optimize(('func', ('symbol', 'only'), x[1]), small)
2317 2315 elif op == 'dagrangepre':
2318 2316 return optimize(('func', ('symbol', 'ancestors'), x[1]), small)
2319 2317 elif op == 'dagrangepost':
2320 2318 return optimize(('func', ('symbol', 'descendants'), x[1]), small)
2321 2319 elif op == 'rangeall':
2322 2320 return optimize(('range', ('string', '0'), ('string', 'tip')), small)
2323 2321 elif op == 'rangepre':
2324 2322 return optimize(('range', ('string', '0'), x[1]), small)
2325 2323 elif op == 'rangepost':
2326 2324 return optimize(('range', x[1], ('string', 'tip')), small)
2327 2325 elif op == 'negate':
2328 2326 return optimize(('string',
2329 2327 '-' + getstring(x[1], _("can't negate that"))), small)
2330 2328 elif op in 'string symbol negate':
2331 2329 return smallbonus, x # single revisions are small
2332 2330 elif op == 'and':
2333 2331 wa, ta = optimize(x[1], True)
2334 2332 wb, tb = optimize(x[2], True)
2335 2333
2336 2334 # (::x and not ::y)/(not ::y and ::x) have a fast path
2337 2335 def isonly(revs, bases):
2338 2336 return (
2339 2337 revs is not None
2340 2338 and revs[0] == 'func'
2341 2339 and getstring(revs[1], _('not a symbol')) == 'ancestors'
2342 2340 and bases is not None
2343 2341 and bases[0] == 'not'
2344 2342 and bases[1][0] == 'func'
2345 2343 and getstring(bases[1][1], _('not a symbol')) == 'ancestors')
2346 2344
2347 2345 w = min(wa, wb)
2348 2346 if isonly(ta, tb):
2349 2347 return w, ('func', ('symbol', 'only'), ('list', ta[2], tb[1][2]))
2350 2348 if isonly(tb, ta):
2351 2349 return w, ('func', ('symbol', 'only'), ('list', tb[2], ta[1][2]))
2352 2350
2353 2351 if wa > wb:
2354 2352 return w, (op, tb, ta)
2355 2353 return w, (op, ta, tb)
2356 2354 elif op == 'or':
2357 2355 # fast path for machine-generated expression, that is likely to have
2358 2356 # lots of trivial revisions: 'a + b + c()' to '_list(a b) + c()'
2359 2357 ws, ts, ss = [], [], []
2360 2358 def flushss():
2361 2359 if not ss:
2362 2360 return
2363 2361 if len(ss) == 1:
2364 2362 w, t = ss[0]
2365 2363 else:
2366 2364 s = '\0'.join(t[1] for w, t in ss)
2367 2365 y = ('func', ('symbol', '_list'), ('string', s))
2368 2366 w, t = optimize(y, False)
2369 2367 ws.append(w)
2370 2368 ts.append(t)
2371 2369 del ss[:]
2372 2370 for y in x[1:]:
2373 2371 w, t = optimize(y, False)
2374 2372 if t is not None and (t[0] == 'string' or t[0] == 'symbol'):
2375 2373 ss.append((w, t))
2376 2374 continue
2377 2375 flushss()
2378 2376 ws.append(w)
2379 2377 ts.append(t)
2380 2378 flushss()
2381 2379 if len(ts) == 1:
2382 2380 return ws[0], ts[0] # 'or' operation is fully optimized out
2383 2381 # we can't reorder trees by weight because it would change the order.
2384 2382 # ("sort(a + b)" == "sort(b + a)", but "a + b" != "b + a")
2385 2383 # ts = tuple(t for w, t in sorted(zip(ws, ts), key=lambda wt: wt[0]))
2386 2384 return max(ws), (op,) + tuple(ts)
2387 2385 elif op == 'not':
2388 2386 # Optimize not public() to _notpublic() because we have a fast version
2389 2387 if x[1] == ('func', ('symbol', 'public'), None):
2390 2388 newsym = ('func', ('symbol', '_notpublic'), None)
2391 2389 o = optimize(newsym, not small)
2392 2390 return o[0], o[1]
2393 2391 else:
2394 2392 o = optimize(x[1], not small)
2395 2393 return o[0], (op, o[1])
2396 2394 elif op == 'parentpost':
2397 2395 o = optimize(x[1], small)
2398 2396 return o[0], (op, o[1])
2399 2397 elif op == 'group':
2400 2398 return optimize(x[1], small)
2401 2399 elif op in 'dagrange range list parent ancestorspec':
2402 2400 if op == 'parent':
2403 2401 # x^:y means (x^) : y, not x ^ (:y)
2404 2402 post = ('parentpost', x[1])
2405 2403 if x[2][0] == 'dagrangepre':
2406 2404 return optimize(('dagrange', post, x[2][1]), small)
2407 2405 elif x[2][0] == 'rangepre':
2408 2406 return optimize(('range', post, x[2][1]), small)
2409 2407
2410 2408 wa, ta = optimize(x[1], small)
2411 2409 wb, tb = optimize(x[2], small)
2412 2410 return wa + wb, (op, ta, tb)
2413 2411 elif op == 'func':
2414 2412 f = getstring(x[1], _("not a symbol"))
2415 2413 wa, ta = optimize(x[2], small)
2416 2414 if f in ("author branch closed date desc file grep keyword "
2417 2415 "outgoing user"):
2418 2416 w = 10 # slow
2419 2417 elif f in "modifies adds removes":
2420 2418 w = 30 # slower
2421 2419 elif f == "contains":
2422 2420 w = 100 # very slow
2423 2421 elif f == "ancestor":
2424 2422 w = 1 * smallbonus
2425 2423 elif f in "reverse limit first _intlist":
2426 2424 w = 0
2427 2425 elif f in "sort":
2428 2426 w = 10 # assume most sorts look at changelog
2429 2427 else:
2430 2428 w = 1
2431 2429 return w + wa, (op, x[1], ta)
2432 2430 return 1, x
2433 2431
2434 2432 _aliasarg = ('func', ('symbol', '_aliasarg'))
2435 2433 def _getaliasarg(tree):
2436 2434 """If tree matches ('func', ('symbol', '_aliasarg'), ('string', X))
2437 2435 return X, None otherwise.
2438 2436 """
2439 2437 if (len(tree) == 3 and tree[:2] == _aliasarg
2440 2438 and tree[2][0] == 'string'):
2441 2439 return tree[2][1]
2442 2440 return None
2443 2441
2444 2442 def _checkaliasarg(tree, known=None):
2445 2443 """Check tree contains no _aliasarg construct or only ones which
2446 2444 value is in known. Used to avoid alias placeholders injection.
2447 2445 """
2448 2446 if isinstance(tree, tuple):
2449 2447 arg = _getaliasarg(tree)
2450 2448 if arg is not None and (not known or arg not in known):
2451 2449 raise error.UnknownIdentifier('_aliasarg', [])
2452 2450 for t in tree:
2453 2451 _checkaliasarg(t, known)
2454 2452
2455 2453 # the set of valid characters for the initial letter of symbols in
2456 2454 # alias declarations and definitions
2457 2455 _aliassyminitletters = set(c for c in [chr(i) for i in xrange(256)]
2458 2456 if c.isalnum() or c in '._@$' or ord(c) > 127)
2459 2457
2460 2458 def _tokenizealias(program, lookup=None):
2461 2459 """Parse alias declaration/definition into a stream of tokens
2462 2460
2463 2461 This allows symbol names to use also ``$`` as an initial letter
2464 2462 (for backward compatibility), and callers of this function should
2465 2463 examine whether ``$`` is used also for unexpected symbols or not.
2466 2464 """
2467 2465 return tokenize(program, lookup=lookup,
2468 2466 syminitletters=_aliassyminitletters)
2469 2467
2470 2468 def _parsealiasdecl(decl):
2471 2469 """Parse alias declaration ``decl``
2472 2470
2473 2471 This returns ``(name, tree, args, errorstr)`` tuple:
2474 2472
2475 2473 - ``name``: of declared alias (may be ``decl`` itself at error)
2476 2474 - ``tree``: parse result (or ``None`` at error)
2477 2475 - ``args``: list of alias argument names (or None for symbol declaration)
2478 2476 - ``errorstr``: detail about detected error (or None)
2479 2477
2480 2478 >>> _parsealiasdecl('foo')
2481 2479 ('foo', ('symbol', 'foo'), None, None)
2482 2480 >>> _parsealiasdecl('$foo')
2483 2481 ('$foo', None, None, "'$' not for alias arguments")
2484 2482 >>> _parsealiasdecl('foo::bar')
2485 2483 ('foo::bar', None, None, 'invalid format')
2486 2484 >>> _parsealiasdecl('foo bar')
2487 2485 ('foo bar', None, None, 'at 4: invalid token')
2488 2486 >>> _parsealiasdecl('foo()')
2489 2487 ('foo', ('func', ('symbol', 'foo')), [], None)
2490 2488 >>> _parsealiasdecl('$foo()')
2491 2489 ('$foo()', None, None, "'$' not for alias arguments")
2492 2490 >>> _parsealiasdecl('foo($1, $2)')
2493 2491 ('foo', ('func', ('symbol', 'foo')), ['$1', '$2'], None)
2494 2492 >>> _parsealiasdecl('foo(bar_bar, baz.baz)')
2495 2493 ('foo', ('func', ('symbol', 'foo')), ['bar_bar', 'baz.baz'], None)
2496 2494 >>> _parsealiasdecl('foo($1, $2, nested($1, $2))')
2497 2495 ('foo($1, $2, nested($1, $2))', None, None, 'invalid argument list')
2498 2496 >>> _parsealiasdecl('foo(bar($1, $2))')
2499 2497 ('foo(bar($1, $2))', None, None, 'invalid argument list')
2500 2498 >>> _parsealiasdecl('foo("string")')
2501 2499 ('foo("string")', None, None, 'invalid argument list')
2502 2500 >>> _parsealiasdecl('foo($1, $2')
2503 2501 ('foo($1, $2', None, None, 'at 10: unexpected token: end')
2504 2502 >>> _parsealiasdecl('foo("string')
2505 2503 ('foo("string', None, None, 'at 5: unterminated string')
2506 2504 >>> _parsealiasdecl('foo($1, $2, $1)')
2507 2505 ('foo', None, None, 'argument names collide with each other')
2508 2506 """
2509 2507 p = parser.parser(elements)
2510 2508 try:
2511 2509 tree, pos = p.parse(_tokenizealias(decl))
2512 2510 if (pos != len(decl)):
2513 2511 raise error.ParseError(_('invalid token'), pos)
2514 2512
2515 2513 if isvalidsymbol(tree):
2516 2514 # "name = ...." style
2517 2515 name = getsymbol(tree)
2518 2516 if name.startswith('$'):
2519 2517 return (decl, None, None, _("'$' not for alias arguments"))
2520 2518 return (name, ('symbol', name), None, None)
2521 2519
2522 2520 if isvalidfunc(tree):
2523 2521 # "name(arg, ....) = ...." style
2524 2522 name = getfuncname(tree)
2525 2523 if name.startswith('$'):
2526 2524 return (decl, None, None, _("'$' not for alias arguments"))
2527 2525 args = []
2528 2526 for arg in getfuncargs(tree):
2529 2527 if not isvalidsymbol(arg):
2530 2528 return (decl, None, None, _("invalid argument list"))
2531 2529 args.append(getsymbol(arg))
2532 2530 if len(args) != len(set(args)):
2533 2531 return (name, None, None,
2534 2532 _("argument names collide with each other"))
2535 2533 return (name, ('func', ('symbol', name)), args, None)
2536 2534
2537 2535 return (decl, None, None, _("invalid format"))
2538 2536 except error.ParseError as inst:
2539 2537 return (decl, None, None, parseerrordetail(inst))
2540 2538
2541 2539 def _parsealiasdefn(defn, args):
2542 2540 """Parse alias definition ``defn``
2543 2541
2544 2542 This function also replaces alias argument references in the
2545 2543 specified definition by ``_aliasarg(ARGNAME)``.
2546 2544
2547 2545 ``args`` is a list of alias argument names, or None if the alias
2548 2546 is declared as a symbol.
2549 2547
2550 2548 This returns "tree" as parsing result.
2551 2549
2552 2550 >>> args = ['$1', '$2', 'foo']
2553 2551 >>> print prettyformat(_parsealiasdefn('$1 or foo', args))
2554 2552 (or
2555 2553 (func
2556 2554 ('symbol', '_aliasarg')
2557 2555 ('string', '$1'))
2558 2556 (func
2559 2557 ('symbol', '_aliasarg')
2560 2558 ('string', 'foo')))
2561 2559 >>> try:
2562 2560 ... _parsealiasdefn('$1 or $bar', args)
2563 2561 ... except error.ParseError, inst:
2564 2562 ... print parseerrordetail(inst)
2565 2563 at 6: '$' not for alias arguments
2566 2564 >>> args = ['$1', '$10', 'foo']
2567 2565 >>> print prettyformat(_parsealiasdefn('$10 or foobar', args))
2568 2566 (or
2569 2567 (func
2570 2568 ('symbol', '_aliasarg')
2571 2569 ('string', '$10'))
2572 2570 ('symbol', 'foobar'))
2573 2571 >>> print prettyformat(_parsealiasdefn('"$1" or "foo"', args))
2574 2572 (or
2575 2573 ('string', '$1')
2576 2574 ('string', 'foo'))
2577 2575 """
2578 2576 def tokenizedefn(program, lookup=None):
2579 2577 if args:
2580 2578 argset = set(args)
2581 2579 else:
2582 2580 argset = set()
2583 2581
2584 2582 for t, value, pos in _tokenizealias(program, lookup=lookup):
2585 2583 if t == 'symbol':
2586 2584 if value in argset:
2587 2585 # emulate tokenization of "_aliasarg('ARGNAME')":
2588 2586 # "_aliasarg()" is an unknown symbol only used separate
2589 2587 # alias argument placeholders from regular strings.
2590 2588 yield ('symbol', '_aliasarg', pos)
2591 2589 yield ('(', None, pos)
2592 2590 yield ('string', value, pos)
2593 2591 yield (')', None, pos)
2594 2592 continue
2595 2593 elif value.startswith('$'):
2596 2594 raise error.ParseError(_("'$' not for alias arguments"),
2597 2595 pos)
2598 2596 yield (t, value, pos)
2599 2597
2600 2598 p = parser.parser(elements)
2601 2599 tree, pos = p.parse(tokenizedefn(defn))
2602 2600 if pos != len(defn):
2603 2601 raise error.ParseError(_('invalid token'), pos)
2604 2602 return parser.simplifyinfixops(tree, ('or',))
2605 2603
2606 2604 class revsetalias(object):
2607 2605 # whether own `error` information is already shown or not.
2608 2606 # this avoids showing same warning multiple times at each `findaliases`.
2609 2607 warned = False
2610 2608
2611 2609 def __init__(self, name, value):
2612 2610 '''Aliases like:
2613 2611
2614 2612 h = heads(default)
2615 2613 b($1) = ancestors($1) - ancestors(default)
2616 2614 '''
2617 2615 self.name, self.tree, self.args, self.error = _parsealiasdecl(name)
2618 2616 if self.error:
2619 2617 self.error = _('failed to parse the declaration of revset alias'
2620 2618 ' "%s": %s') % (self.name, self.error)
2621 2619 return
2622 2620
2623 2621 try:
2624 2622 self.replacement = _parsealiasdefn(value, self.args)
2625 2623 # Check for placeholder injection
2626 2624 _checkaliasarg(self.replacement, self.args)
2627 2625 except error.ParseError as inst:
2628 2626 self.error = _('failed to parse the definition of revset alias'
2629 2627 ' "%s": %s') % (self.name, parseerrordetail(inst))
2630 2628
2631 2629 def _getalias(aliases, tree):
2632 2630 """If tree looks like an unexpanded alias, return it. Return None
2633 2631 otherwise.
2634 2632 """
2635 2633 if isinstance(tree, tuple) and tree:
2636 2634 if tree[0] == 'symbol' and len(tree) == 2:
2637 2635 name = tree[1]
2638 2636 alias = aliases.get(name)
2639 2637 if alias and alias.args is None and alias.tree == tree:
2640 2638 return alias
2641 2639 if tree[0] == 'func' and len(tree) > 1:
2642 2640 if tree[1][0] == 'symbol' and len(tree[1]) == 2:
2643 2641 name = tree[1][1]
2644 2642 alias = aliases.get(name)
2645 2643 if alias and alias.args is not None and alias.tree == tree[:2]:
2646 2644 return alias
2647 2645 return None
2648 2646
2649 2647 def _expandargs(tree, args):
2650 2648 """Replace _aliasarg instances with the substitution value of the
2651 2649 same name in args, recursively.
2652 2650 """
2653 2651 if not tree or not isinstance(tree, tuple):
2654 2652 return tree
2655 2653 arg = _getaliasarg(tree)
2656 2654 if arg is not None:
2657 2655 return args[arg]
2658 2656 return tuple(_expandargs(t, args) for t in tree)
2659 2657
2660 2658 def _expandaliases(aliases, tree, expanding, cache):
2661 2659 """Expand aliases in tree, recursively.
2662 2660
2663 2661 'aliases' is a dictionary mapping user defined aliases to
2664 2662 revsetalias objects.
2665 2663 """
2666 2664 if not isinstance(tree, tuple):
2667 2665 # Do not expand raw strings
2668 2666 return tree
2669 2667 alias = _getalias(aliases, tree)
2670 2668 if alias is not None:
2671 2669 if alias.error:
2672 2670 raise error.Abort(alias.error)
2673 2671 if alias in expanding:
2674 2672 raise error.ParseError(_('infinite expansion of revset alias "%s" '
2675 2673 'detected') % alias.name)
2676 2674 expanding.append(alias)
2677 2675 if alias.name not in cache:
2678 2676 cache[alias.name] = _expandaliases(aliases, alias.replacement,
2679 2677 expanding, cache)
2680 2678 result = cache[alias.name]
2681 2679 expanding.pop()
2682 2680 if alias.args is not None:
2683 2681 l = getlist(tree[2])
2684 2682 if len(l) != len(alias.args):
2685 2683 raise error.ParseError(
2686 2684 _('invalid number of arguments: %s') % len(l))
2687 2685 l = [_expandaliases(aliases, a, [], cache) for a in l]
2688 2686 result = _expandargs(result, dict(zip(alias.args, l)))
2689 2687 else:
2690 2688 result = tuple(_expandaliases(aliases, t, expanding, cache)
2691 2689 for t in tree)
2692 2690 return result
2693 2691
2694 2692 def findaliases(ui, tree, showwarning=None):
2695 2693 _checkaliasarg(tree)
2696 2694 aliases = {}
2697 2695 for k, v in ui.configitems('revsetalias'):
2698 2696 alias = revsetalias(k, v)
2699 2697 aliases[alias.name] = alias
2700 2698 tree = _expandaliases(aliases, tree, [], {})
2701 2699 if showwarning:
2702 2700 # warn about problematic (but not referred) aliases
2703 2701 for name, alias in sorted(aliases.iteritems()):
2704 2702 if alias.error and not alias.warned:
2705 2703 showwarning(_('warning: %s\n') % (alias.error))
2706 2704 alias.warned = True
2707 2705 return tree
2708 2706
2709 2707 def foldconcat(tree):
2710 2708 """Fold elements to be concatenated by `##`
2711 2709 """
2712 2710 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2713 2711 return tree
2714 2712 if tree[0] == '_concat':
2715 2713 pending = [tree]
2716 2714 l = []
2717 2715 while pending:
2718 2716 e = pending.pop()
2719 2717 if e[0] == '_concat':
2720 2718 pending.extend(reversed(e[1:]))
2721 2719 elif e[0] in ('string', 'symbol'):
2722 2720 l.append(e[1])
2723 2721 else:
2724 2722 msg = _("\"##\" can't concatenate \"%s\" element") % (e[0])
2725 2723 raise error.ParseError(msg)
2726 2724 return ('string', ''.join(l))
2727 2725 else:
2728 2726 return tuple(foldconcat(t) for t in tree)
2729 2727
2730 2728 def parse(spec, lookup=None):
2731 2729 p = parser.parser(elements)
2732 2730 tree, pos = p.parse(tokenize(spec, lookup=lookup))
2733 2731 if pos != len(spec):
2734 2732 raise error.ParseError(_("invalid token"), pos)
2735 2733 return parser.simplifyinfixops(tree, ('or',))
2736 2734
2737 2735 def posttreebuilthook(tree, repo):
2738 2736 # hook for extensions to execute code on the optimized tree
2739 2737 pass
2740 2738
2741 2739 def match(ui, spec, repo=None):
2742 2740 if not spec:
2743 2741 raise error.ParseError(_("empty query"))
2744 2742 lookup = None
2745 2743 if repo:
2746 2744 lookup = repo.__contains__
2747 2745 tree = parse(spec, lookup)
2748 2746 return _makematcher(ui, tree, repo)
2749 2747
2750 2748 def matchany(ui, specs, repo=None):
2751 2749 """Create a matcher that will include any revisions matching one of the
2752 2750 given specs"""
2753 2751 if not specs:
2754 2752 def mfunc(repo, subset=None):
2755 2753 return baseset()
2756 2754 return mfunc
2757 2755 if not all(specs):
2758 2756 raise error.ParseError(_("empty query"))
2759 2757 lookup = None
2760 2758 if repo:
2761 2759 lookup = repo.__contains__
2762 2760 if len(specs) == 1:
2763 2761 tree = parse(specs[0], lookup)
2764 2762 else:
2765 2763 tree = ('or',) + tuple(parse(s, lookup) for s in specs)
2766 2764 return _makematcher(ui, tree, repo)
2767 2765
2768 2766 def _makematcher(ui, tree, repo):
2769 2767 if ui:
2770 2768 tree = findaliases(ui, tree, showwarning=ui.warn)
2771 2769 tree = foldconcat(tree)
2772 2770 weight, tree = optimize(tree, True)
2773 2771 posttreebuilthook(tree, repo)
2774 2772 def mfunc(repo, subset=None):
2775 2773 if subset is None:
2776 2774 subset = fullreposet(repo)
2777 2775 if util.safehasattr(subset, 'isascending'):
2778 2776 result = getset(repo, subset, tree)
2779 2777 else:
2780 2778 result = getset(repo, baseset(subset), tree)
2781 2779 return result
2782 2780 return mfunc
2783 2781
2784 2782 def formatspec(expr, *args):
2785 2783 '''
2786 2784 This is a convenience function for using revsets internally, and
2787 2785 escapes arguments appropriately. Aliases are intentionally ignored
2788 2786 so that intended expression behavior isn't accidentally subverted.
2789 2787
2790 2788 Supported arguments:
2791 2789
2792 2790 %r = revset expression, parenthesized
2793 2791 %d = int(arg), no quoting
2794 2792 %s = string(arg), escaped and single-quoted
2795 2793 %b = arg.branch(), escaped and single-quoted
2796 2794 %n = hex(arg), single-quoted
2797 2795 %% = a literal '%'
2798 2796
2799 2797 Prefixing the type with 'l' specifies a parenthesized list of that type.
2800 2798
2801 2799 >>> formatspec('%r:: and %lr', '10 or 11', ("this()", "that()"))
2802 2800 '(10 or 11):: and ((this()) or (that()))'
2803 2801 >>> formatspec('%d:: and not %d::', 10, 20)
2804 2802 '10:: and not 20::'
2805 2803 >>> formatspec('%ld or %ld', [], [1])
2806 2804 "_list('') or 1"
2807 2805 >>> formatspec('keyword(%s)', 'foo\\xe9')
2808 2806 "keyword('foo\\\\xe9')"
2809 2807 >>> b = lambda: 'default'
2810 2808 >>> b.branch = b
2811 2809 >>> formatspec('branch(%b)', b)
2812 2810 "branch('default')"
2813 2811 >>> formatspec('root(%ls)', ['a', 'b', 'c', 'd'])
2814 2812 "root(_list('a\\x00b\\x00c\\x00d'))"
2815 2813 '''
2816 2814
2817 2815 def quote(s):
2818 2816 return repr(str(s))
2819 2817
2820 2818 def argtype(c, arg):
2821 2819 if c == 'd':
2822 2820 return str(int(arg))
2823 2821 elif c == 's':
2824 2822 return quote(arg)
2825 2823 elif c == 'r':
2826 2824 parse(arg) # make sure syntax errors are confined
2827 2825 return '(%s)' % arg
2828 2826 elif c == 'n':
2829 2827 return quote(node.hex(arg))
2830 2828 elif c == 'b':
2831 2829 return quote(arg.branch())
2832 2830
2833 2831 def listexp(s, t):
2834 2832 l = len(s)
2835 2833 if l == 0:
2836 2834 return "_list('')"
2837 2835 elif l == 1:
2838 2836 return argtype(t, s[0])
2839 2837 elif t == 'd':
2840 2838 return "_intlist('%s')" % "\0".join(str(int(a)) for a in s)
2841 2839 elif t == 's':
2842 2840 return "_list('%s')" % "\0".join(s)
2843 2841 elif t == 'n':
2844 2842 return "_hexlist('%s')" % "\0".join(node.hex(a) for a in s)
2845 2843 elif t == 'b':
2846 2844 return "_list('%s')" % "\0".join(a.branch() for a in s)
2847 2845
2848 2846 m = l // 2
2849 2847 return '(%s or %s)' % (listexp(s[:m], t), listexp(s[m:], t))
2850 2848
2851 2849 ret = ''
2852 2850 pos = 0
2853 2851 arg = 0
2854 2852 while pos < len(expr):
2855 2853 c = expr[pos]
2856 2854 if c == '%':
2857 2855 pos += 1
2858 2856 d = expr[pos]
2859 2857 if d == '%':
2860 2858 ret += d
2861 2859 elif d in 'dsnbr':
2862 2860 ret += argtype(d, args[arg])
2863 2861 arg += 1
2864 2862 elif d == 'l':
2865 2863 # a list of some type
2866 2864 pos += 1
2867 2865 d = expr[pos]
2868 2866 ret += listexp(list(args[arg]), d)
2869 2867 arg += 1
2870 2868 else:
2871 2869 raise error.Abort('unexpected revspec format character %s' % d)
2872 2870 else:
2873 2871 ret += c
2874 2872 pos += 1
2875 2873
2876 2874 return ret
2877 2875
2878 2876 def prettyformat(tree):
2879 2877 return parser.prettyformat(tree, ('string', 'symbol'))
2880 2878
2881 2879 def depth(tree):
2882 2880 if isinstance(tree, tuple):
2883 2881 return max(map(depth, tree)) + 1
2884 2882 else:
2885 2883 return 0
2886 2884
2887 2885 def funcsused(tree):
2888 2886 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2889 2887 return set()
2890 2888 else:
2891 2889 funcs = set()
2892 2890 for s in tree[1:]:
2893 2891 funcs |= funcsused(s)
2894 2892 if tree[0] == 'func':
2895 2893 funcs.add(tree[1][1])
2896 2894 return funcs
2897 2895
2898 2896 class abstractsmartset(object):
2899 2897
2900 2898 def __nonzero__(self):
2901 2899 """True if the smartset is not empty"""
2902 2900 raise NotImplementedError()
2903 2901
2904 2902 def __contains__(self, rev):
2905 2903 """provide fast membership testing"""
2906 2904 raise NotImplementedError()
2907 2905
2908 2906 def __iter__(self):
2909 2907 """iterate the set in the order it is supposed to be iterated"""
2910 2908 raise NotImplementedError()
2911 2909
2912 2910 # Attributes containing a function to perform a fast iteration in a given
2913 2911 # direction. A smartset can have none, one, or both defined.
2914 2912 #
2915 2913 # Default value is None instead of a function returning None to avoid
2916 2914 # initializing an iterator just for testing if a fast method exists.
2917 2915 fastasc = None
2918 2916 fastdesc = None
2919 2917
2920 2918 def isascending(self):
2921 2919 """True if the set will iterate in ascending order"""
2922 2920 raise NotImplementedError()
2923 2921
2924 2922 def isdescending(self):
2925 2923 """True if the set will iterate in descending order"""
2926 2924 raise NotImplementedError()
2927 2925
2928 2926 @util.cachefunc
2929 2927 def min(self):
2930 2928 """return the minimum element in the set"""
2931 2929 if self.fastasc is not None:
2932 2930 for r in self.fastasc():
2933 2931 return r
2934 2932 raise ValueError('arg is an empty sequence')
2935 2933 return min(self)
2936 2934
2937 2935 @util.cachefunc
2938 2936 def max(self):
2939 2937 """return the maximum element in the set"""
2940 2938 if self.fastdesc is not None:
2941 2939 for r in self.fastdesc():
2942 2940 return r
2943 2941 raise ValueError('arg is an empty sequence')
2944 2942 return max(self)
2945 2943
2946 2944 def first(self):
2947 2945 """return the first element in the set (user iteration perspective)
2948 2946
2949 2947 Return None if the set is empty"""
2950 2948 raise NotImplementedError()
2951 2949
2952 2950 def last(self):
2953 2951 """return the last element in the set (user iteration perspective)
2954 2952
2955 2953 Return None if the set is empty"""
2956 2954 raise NotImplementedError()
2957 2955
2958 2956 def __len__(self):
2959 2957 """return the length of the smartsets
2960 2958
2961 2959 This can be expensive on smartset that could be lazy otherwise."""
2962 2960 raise NotImplementedError()
2963 2961
2964 2962 def reverse(self):
2965 2963 """reverse the expected iteration order"""
2966 2964 raise NotImplementedError()
2967 2965
2968 2966 def sort(self, reverse=True):
2969 2967 """get the set to iterate in an ascending or descending order"""
2970 2968 raise NotImplementedError()
2971 2969
2972 2970 def __and__(self, other):
2973 2971 """Returns a new object with the intersection of the two collections.
2974 2972
2975 2973 This is part of the mandatory API for smartset."""
2976 2974 if isinstance(other, fullreposet):
2977 2975 return self
2978 2976 return self.filter(other.__contains__, cache=False)
2979 2977
2980 2978 def __add__(self, other):
2981 2979 """Returns a new object with the union of the two collections.
2982 2980
2983 2981 This is part of the mandatory API for smartset."""
2984 2982 return addset(self, other)
2985 2983
2986 2984 def __sub__(self, other):
2987 2985 """Returns a new object with the substraction of the two collections.
2988 2986
2989 2987 This is part of the mandatory API for smartset."""
2990 2988 c = other.__contains__
2991 2989 return self.filter(lambda r: not c(r), cache=False)
2992 2990
2993 2991 def filter(self, condition, cache=True):
2994 2992 """Returns this smartset filtered by condition as a new smartset.
2995 2993
2996 2994 `condition` is a callable which takes a revision number and returns a
2997 2995 boolean.
2998 2996
2999 2997 This is part of the mandatory API for smartset."""
3000 2998 # builtin cannot be cached. but do not needs to
3001 2999 if cache and util.safehasattr(condition, 'func_code'):
3002 3000 condition = util.cachefunc(condition)
3003 3001 return filteredset(self, condition)
3004 3002
3005 3003 class baseset(abstractsmartset):
3006 3004 """Basic data structure that represents a revset and contains the basic
3007 3005 operation that it should be able to perform.
3008 3006
3009 3007 Every method in this class should be implemented by any smartset class.
3010 3008 """
3011 3009 def __init__(self, data=()):
3012 3010 if not isinstance(data, list):
3013 3011 if isinstance(data, set):
3014 3012 self._set = data
3015 3013 data = list(data)
3016 3014 self._list = data
3017 3015 self._ascending = None
3018 3016
3019 3017 @util.propertycache
3020 3018 def _set(self):
3021 3019 return set(self._list)
3022 3020
3023 3021 @util.propertycache
3024 3022 def _asclist(self):
3025 3023 asclist = self._list[:]
3026 3024 asclist.sort()
3027 3025 return asclist
3028 3026
3029 3027 def __iter__(self):
3030 3028 if self._ascending is None:
3031 3029 return iter(self._list)
3032 3030 elif self._ascending:
3033 3031 return iter(self._asclist)
3034 3032 else:
3035 3033 return reversed(self._asclist)
3036 3034
3037 3035 def fastasc(self):
3038 3036 return iter(self._asclist)
3039 3037
3040 3038 def fastdesc(self):
3041 3039 return reversed(self._asclist)
3042 3040
3043 3041 @util.propertycache
3044 3042 def __contains__(self):
3045 3043 return self._set.__contains__
3046 3044
3047 3045 def __nonzero__(self):
3048 3046 return bool(self._list)
3049 3047
3050 3048 def sort(self, reverse=False):
3051 3049 self._ascending = not bool(reverse)
3052 3050
3053 3051 def reverse(self):
3054 3052 if self._ascending is None:
3055 3053 self._list.reverse()
3056 3054 else:
3057 3055 self._ascending = not self._ascending
3058 3056
3059 3057 def __len__(self):
3060 3058 return len(self._list)
3061 3059
3062 3060 def isascending(self):
3063 3061 """Returns True if the collection is ascending order, False if not.
3064 3062
3065 3063 This is part of the mandatory API for smartset."""
3066 3064 if len(self) <= 1:
3067 3065 return True
3068 3066 return self._ascending is not None and self._ascending
3069 3067
3070 3068 def isdescending(self):
3071 3069 """Returns True if the collection is descending order, False if not.
3072 3070
3073 3071 This is part of the mandatory API for smartset."""
3074 3072 if len(self) <= 1:
3075 3073 return True
3076 3074 return self._ascending is not None and not self._ascending
3077 3075
3078 3076 def first(self):
3079 3077 if self:
3080 3078 if self._ascending is None:
3081 3079 return self._list[0]
3082 3080 elif self._ascending:
3083 3081 return self._asclist[0]
3084 3082 else:
3085 3083 return self._asclist[-1]
3086 3084 return None
3087 3085
3088 3086 def last(self):
3089 3087 if self:
3090 3088 if self._ascending is None:
3091 3089 return self._list[-1]
3092 3090 elif self._ascending:
3093 3091 return self._asclist[-1]
3094 3092 else:
3095 3093 return self._asclist[0]
3096 3094 return None
3097 3095
3098 3096 def __repr__(self):
3099 3097 d = {None: '', False: '-', True: '+'}[self._ascending]
3100 3098 return '<%s%s %r>' % (type(self).__name__, d, self._list)
3101 3099
3102 3100 class filteredset(abstractsmartset):
3103 3101 """Duck type for baseset class which iterates lazily over the revisions in
3104 3102 the subset and contains a function which tests for membership in the
3105 3103 revset
3106 3104 """
3107 3105 def __init__(self, subset, condition=lambda x: True):
3108 3106 """
3109 3107 condition: a function that decide whether a revision in the subset
3110 3108 belongs to the revset or not.
3111 3109 """
3112 3110 self._subset = subset
3113 3111 self._condition = condition
3114 3112
3115 3113 def __contains__(self, x):
3116 3114 return x in self._subset and self._condition(x)
3117 3115
3118 3116 def __iter__(self):
3119 3117 return self._iterfilter(self._subset)
3120 3118
3121 3119 def _iterfilter(self, it):
3122 3120 cond = self._condition
3123 3121 for x in it:
3124 3122 if cond(x):
3125 3123 yield x
3126 3124
3127 3125 @property
3128 3126 def fastasc(self):
3129 3127 it = self._subset.fastasc
3130 3128 if it is None:
3131 3129 return None
3132 3130 return lambda: self._iterfilter(it())
3133 3131
3134 3132 @property
3135 3133 def fastdesc(self):
3136 3134 it = self._subset.fastdesc
3137 3135 if it is None:
3138 3136 return None
3139 3137 return lambda: self._iterfilter(it())
3140 3138
3141 3139 def __nonzero__(self):
3142 3140 fast = self.fastasc
3143 3141 if fast is None:
3144 3142 fast = self.fastdesc
3145 3143 if fast is not None:
3146 3144 it = fast()
3147 3145 else:
3148 3146 it = self
3149 3147
3150 3148 for r in it:
3151 3149 return True
3152 3150 return False
3153 3151
3154 3152 def __len__(self):
3155 3153 # Basic implementation to be changed in future patches.
3156 3154 l = baseset([r for r in self])
3157 3155 return len(l)
3158 3156
3159 3157 def sort(self, reverse=False):
3160 3158 self._subset.sort(reverse=reverse)
3161 3159
3162 3160 def reverse(self):
3163 3161 self._subset.reverse()
3164 3162
3165 3163 def isascending(self):
3166 3164 return self._subset.isascending()
3167 3165
3168 3166 def isdescending(self):
3169 3167 return self._subset.isdescending()
3170 3168
3171 3169 def first(self):
3172 3170 for x in self:
3173 3171 return x
3174 3172 return None
3175 3173
3176 3174 def last(self):
3177 3175 it = None
3178 3176 if self.isascending():
3179 3177 it = self.fastdesc
3180 3178 elif self.isdescending():
3181 3179 it = self.fastasc
3182 3180 if it is not None:
3183 3181 for x in it():
3184 3182 return x
3185 3183 return None #empty case
3186 3184 else:
3187 3185 x = None
3188 3186 for x in self:
3189 3187 pass
3190 3188 return x
3191 3189
3192 3190 def __repr__(self):
3193 3191 return '<%s %r>' % (type(self).__name__, self._subset)
3194 3192
3195 3193 def _iterordered(ascending, iter1, iter2):
3196 3194 """produce an ordered iteration from two iterators with the same order
3197 3195
3198 3196 The ascending is used to indicated the iteration direction.
3199 3197 """
3200 3198 choice = max
3201 3199 if ascending:
3202 3200 choice = min
3203 3201
3204 3202 val1 = None
3205 3203 val2 = None
3206 3204 try:
3207 3205 # Consume both iterators in an ordered way until one is empty
3208 3206 while True:
3209 3207 if val1 is None:
3210 3208 val1 = iter1.next()
3211 3209 if val2 is None:
3212 3210 val2 = iter2.next()
3213 3211 next = choice(val1, val2)
3214 3212 yield next
3215 3213 if val1 == next:
3216 3214 val1 = None
3217 3215 if val2 == next:
3218 3216 val2 = None
3219 3217 except StopIteration:
3220 3218 # Flush any remaining values and consume the other one
3221 3219 it = iter2
3222 3220 if val1 is not None:
3223 3221 yield val1
3224 3222 it = iter1
3225 3223 elif val2 is not None:
3226 3224 # might have been equality and both are empty
3227 3225 yield val2
3228 3226 for val in it:
3229 3227 yield val
3230 3228
3231 3229 class addset(abstractsmartset):
3232 3230 """Represent the addition of two sets
3233 3231
3234 3232 Wrapper structure for lazily adding two structures without losing much
3235 3233 performance on the __contains__ method
3236 3234
3237 3235 If the ascending attribute is set, that means the two structures are
3238 3236 ordered in either an ascending or descending way. Therefore, we can add
3239 3237 them maintaining the order by iterating over both at the same time
3240 3238
3241 3239 >>> xs = baseset([0, 3, 2])
3242 3240 >>> ys = baseset([5, 2, 4])
3243 3241
3244 3242 >>> rs = addset(xs, ys)
3245 3243 >>> bool(rs), 0 in rs, 1 in rs, 5 in rs, rs.first(), rs.last()
3246 3244 (True, True, False, True, 0, 4)
3247 3245 >>> rs = addset(xs, baseset([]))
3248 3246 >>> bool(rs), 0 in rs, 1 in rs, rs.first(), rs.last()
3249 3247 (True, True, False, 0, 2)
3250 3248 >>> rs = addset(baseset([]), baseset([]))
3251 3249 >>> bool(rs), 0 in rs, rs.first(), rs.last()
3252 3250 (False, False, None, None)
3253 3251
3254 3252 iterate unsorted:
3255 3253 >>> rs = addset(xs, ys)
3256 3254 >>> [x for x in rs] # without _genlist
3257 3255 [0, 3, 2, 5, 4]
3258 3256 >>> assert not rs._genlist
3259 3257 >>> len(rs)
3260 3258 5
3261 3259 >>> [x for x in rs] # with _genlist
3262 3260 [0, 3, 2, 5, 4]
3263 3261 >>> assert rs._genlist
3264 3262
3265 3263 iterate ascending:
3266 3264 >>> rs = addset(xs, ys, ascending=True)
3267 3265 >>> [x for x in rs], [x for x in rs.fastasc()] # without _asclist
3268 3266 ([0, 2, 3, 4, 5], [0, 2, 3, 4, 5])
3269 3267 >>> assert not rs._asclist
3270 3268 >>> len(rs)
3271 3269 5
3272 3270 >>> [x for x in rs], [x for x in rs.fastasc()]
3273 3271 ([0, 2, 3, 4, 5], [0, 2, 3, 4, 5])
3274 3272 >>> assert rs._asclist
3275 3273
3276 3274 iterate descending:
3277 3275 >>> rs = addset(xs, ys, ascending=False)
3278 3276 >>> [x for x in rs], [x for x in rs.fastdesc()] # without _asclist
3279 3277 ([5, 4, 3, 2, 0], [5, 4, 3, 2, 0])
3280 3278 >>> assert not rs._asclist
3281 3279 >>> len(rs)
3282 3280 5
3283 3281 >>> [x for x in rs], [x for x in rs.fastdesc()]
3284 3282 ([5, 4, 3, 2, 0], [5, 4, 3, 2, 0])
3285 3283 >>> assert rs._asclist
3286 3284
3287 3285 iterate ascending without fastasc:
3288 3286 >>> rs = addset(xs, generatorset(ys), ascending=True)
3289 3287 >>> assert rs.fastasc is None
3290 3288 >>> [x for x in rs]
3291 3289 [0, 2, 3, 4, 5]
3292 3290
3293 3291 iterate descending without fastdesc:
3294 3292 >>> rs = addset(generatorset(xs), ys, ascending=False)
3295 3293 >>> assert rs.fastdesc is None
3296 3294 >>> [x for x in rs]
3297 3295 [5, 4, 3, 2, 0]
3298 3296 """
3299 3297 def __init__(self, revs1, revs2, ascending=None):
3300 3298 self._r1 = revs1
3301 3299 self._r2 = revs2
3302 3300 self._iter = None
3303 3301 self._ascending = ascending
3304 3302 self._genlist = None
3305 3303 self._asclist = None
3306 3304
3307 3305 def __len__(self):
3308 3306 return len(self._list)
3309 3307
3310 3308 def __nonzero__(self):
3311 3309 return bool(self._r1) or bool(self._r2)
3312 3310
3313 3311 @util.propertycache
3314 3312 def _list(self):
3315 3313 if not self._genlist:
3316 3314 self._genlist = baseset(iter(self))
3317 3315 return self._genlist
3318 3316
3319 3317 def __iter__(self):
3320 3318 """Iterate over both collections without repeating elements
3321 3319
3322 3320 If the ascending attribute is not set, iterate over the first one and
3323 3321 then over the second one checking for membership on the first one so we
3324 3322 dont yield any duplicates.
3325 3323
3326 3324 If the ascending attribute is set, iterate over both collections at the
3327 3325 same time, yielding only one value at a time in the given order.
3328 3326 """
3329 3327 if self._ascending is None:
3330 3328 if self._genlist:
3331 3329 return iter(self._genlist)
3332 3330 def arbitraryordergen():
3333 3331 for r in self._r1:
3334 3332 yield r
3335 3333 inr1 = self._r1.__contains__
3336 3334 for r in self._r2:
3337 3335 if not inr1(r):
3338 3336 yield r
3339 3337 return arbitraryordergen()
3340 3338 # try to use our own fast iterator if it exists
3341 3339 self._trysetasclist()
3342 3340 if self._ascending:
3343 3341 attr = 'fastasc'
3344 3342 else:
3345 3343 attr = 'fastdesc'
3346 3344 it = getattr(self, attr)
3347 3345 if it is not None:
3348 3346 return it()
3349 3347 # maybe half of the component supports fast
3350 3348 # get iterator for _r1
3351 3349 iter1 = getattr(self._r1, attr)
3352 3350 if iter1 is None:
3353 3351 # let's avoid side effect (not sure it matters)
3354 3352 iter1 = iter(sorted(self._r1, reverse=not self._ascending))
3355 3353 else:
3356 3354 iter1 = iter1()
3357 3355 # get iterator for _r2
3358 3356 iter2 = getattr(self._r2, attr)
3359 3357 if iter2 is None:
3360 3358 # let's avoid side effect (not sure it matters)
3361 3359 iter2 = iter(sorted(self._r2, reverse=not self._ascending))
3362 3360 else:
3363 3361 iter2 = iter2()
3364 3362 return _iterordered(self._ascending, iter1, iter2)
3365 3363
3366 3364 def _trysetasclist(self):
3367 3365 """populate the _asclist attribute if possible and necessary"""
3368 3366 if self._genlist is not None and self._asclist is None:
3369 3367 self._asclist = sorted(self._genlist)
3370 3368
3371 3369 @property
3372 3370 def fastasc(self):
3373 3371 self._trysetasclist()
3374 3372 if self._asclist is not None:
3375 3373 return self._asclist.__iter__
3376 3374 iter1 = self._r1.fastasc
3377 3375 iter2 = self._r2.fastasc
3378 3376 if None in (iter1, iter2):
3379 3377 return None
3380 3378 return lambda: _iterordered(True, iter1(), iter2())
3381 3379
3382 3380 @property
3383 3381 def fastdesc(self):
3384 3382 self._trysetasclist()
3385 3383 if self._asclist is not None:
3386 3384 return self._asclist.__reversed__
3387 3385 iter1 = self._r1.fastdesc
3388 3386 iter2 = self._r2.fastdesc
3389 3387 if None in (iter1, iter2):
3390 3388 return None
3391 3389 return lambda: _iterordered(False, iter1(), iter2())
3392 3390
3393 3391 def __contains__(self, x):
3394 3392 return x in self._r1 or x in self._r2
3395 3393
3396 3394 def sort(self, reverse=False):
3397 3395 """Sort the added set
3398 3396
3399 3397 For this we use the cached list with all the generated values and if we
3400 3398 know they are ascending or descending we can sort them in a smart way.
3401 3399 """
3402 3400 self._ascending = not reverse
3403 3401
3404 3402 def isascending(self):
3405 3403 return self._ascending is not None and self._ascending
3406 3404
3407 3405 def isdescending(self):
3408 3406 return self._ascending is not None and not self._ascending
3409 3407
3410 3408 def reverse(self):
3411 3409 if self._ascending is None:
3412 3410 self._list.reverse()
3413 3411 else:
3414 3412 self._ascending = not self._ascending
3415 3413
3416 3414 def first(self):
3417 3415 for x in self:
3418 3416 return x
3419 3417 return None
3420 3418
3421 3419 def last(self):
3422 3420 self.reverse()
3423 3421 val = self.first()
3424 3422 self.reverse()
3425 3423 return val
3426 3424
3427 3425 def __repr__(self):
3428 3426 d = {None: '', False: '-', True: '+'}[self._ascending]
3429 3427 return '<%s%s %r, %r>' % (type(self).__name__, d, self._r1, self._r2)
3430 3428
3431 3429 class generatorset(abstractsmartset):
3432 3430 """Wrap a generator for lazy iteration
3433 3431
3434 3432 Wrapper structure for generators that provides lazy membership and can
3435 3433 be iterated more than once.
3436 3434 When asked for membership it generates values until either it finds the
3437 3435 requested one or has gone through all the elements in the generator
3438 3436 """
3439 3437 def __init__(self, gen, iterasc=None):
3440 3438 """
3441 3439 gen: a generator producing the values for the generatorset.
3442 3440 """
3443 3441 self._gen = gen
3444 3442 self._asclist = None
3445 3443 self._cache = {}
3446 3444 self._genlist = []
3447 3445 self._finished = False
3448 3446 self._ascending = True
3449 3447 if iterasc is not None:
3450 3448 if iterasc:
3451 3449 self.fastasc = self._iterator
3452 3450 self.__contains__ = self._asccontains
3453 3451 else:
3454 3452 self.fastdesc = self._iterator
3455 3453 self.__contains__ = self._desccontains
3456 3454
3457 3455 def __nonzero__(self):
3458 3456 # Do not use 'for r in self' because it will enforce the iteration
3459 3457 # order (default ascending), possibly unrolling a whole descending
3460 3458 # iterator.
3461 3459 if self._genlist:
3462 3460 return True
3463 3461 for r in self._consumegen():
3464 3462 return True
3465 3463 return False
3466 3464
3467 3465 def __contains__(self, x):
3468 3466 if x in self._cache:
3469 3467 return self._cache[x]
3470 3468
3471 3469 # Use new values only, as existing values would be cached.
3472 3470 for l in self._consumegen():
3473 3471 if l == x:
3474 3472 return True
3475 3473
3476 3474 self._cache[x] = False
3477 3475 return False
3478 3476
3479 3477 def _asccontains(self, x):
3480 3478 """version of contains optimised for ascending generator"""
3481 3479 if x in self._cache:
3482 3480 return self._cache[x]
3483 3481
3484 3482 # Use new values only, as existing values would be cached.
3485 3483 for l in self._consumegen():
3486 3484 if l == x:
3487 3485 return True
3488 3486 if l > x:
3489 3487 break
3490 3488
3491 3489 self._cache[x] = False
3492 3490 return False
3493 3491
3494 3492 def _desccontains(self, x):
3495 3493 """version of contains optimised for descending generator"""
3496 3494 if x in self._cache:
3497 3495 return self._cache[x]
3498 3496
3499 3497 # Use new values only, as existing values would be cached.
3500 3498 for l in self._consumegen():
3501 3499 if l == x:
3502 3500 return True
3503 3501 if l < x:
3504 3502 break
3505 3503
3506 3504 self._cache[x] = False
3507 3505 return False
3508 3506
3509 3507 def __iter__(self):
3510 3508 if self._ascending:
3511 3509 it = self.fastasc
3512 3510 else:
3513 3511 it = self.fastdesc
3514 3512 if it is not None:
3515 3513 return it()
3516 3514 # we need to consume the iterator
3517 3515 for x in self._consumegen():
3518 3516 pass
3519 3517 # recall the same code
3520 3518 return iter(self)
3521 3519
3522 3520 def _iterator(self):
3523 3521 if self._finished:
3524 3522 return iter(self._genlist)
3525 3523
3526 3524 # We have to use this complex iteration strategy to allow multiple
3527 3525 # iterations at the same time. We need to be able to catch revision
3528 3526 # removed from _consumegen and added to genlist in another instance.
3529 3527 #
3530 3528 # Getting rid of it would provide an about 15% speed up on this
3531 3529 # iteration.
3532 3530 genlist = self._genlist
3533 3531 nextrev = self._consumegen().next
3534 3532 _len = len # cache global lookup
3535 3533 def gen():
3536 3534 i = 0
3537 3535 while True:
3538 3536 if i < _len(genlist):
3539 3537 yield genlist[i]
3540 3538 else:
3541 3539 yield nextrev()
3542 3540 i += 1
3543 3541 return gen()
3544 3542
3545 3543 def _consumegen(self):
3546 3544 cache = self._cache
3547 3545 genlist = self._genlist.append
3548 3546 for item in self._gen:
3549 3547 cache[item] = True
3550 3548 genlist(item)
3551 3549 yield item
3552 3550 if not self._finished:
3553 3551 self._finished = True
3554 3552 asc = self._genlist[:]
3555 3553 asc.sort()
3556 3554 self._asclist = asc
3557 3555 self.fastasc = asc.__iter__
3558 3556 self.fastdesc = asc.__reversed__
3559 3557
3560 3558 def __len__(self):
3561 3559 for x in self._consumegen():
3562 3560 pass
3563 3561 return len(self._genlist)
3564 3562
3565 3563 def sort(self, reverse=False):
3566 3564 self._ascending = not reverse
3567 3565
3568 3566 def reverse(self):
3569 3567 self._ascending = not self._ascending
3570 3568
3571 3569 def isascending(self):
3572 3570 return self._ascending
3573 3571
3574 3572 def isdescending(self):
3575 3573 return not self._ascending
3576 3574
3577 3575 def first(self):
3578 3576 if self._ascending:
3579 3577 it = self.fastasc
3580 3578 else:
3581 3579 it = self.fastdesc
3582 3580 if it is None:
3583 3581 # we need to consume all and try again
3584 3582 for x in self._consumegen():
3585 3583 pass
3586 3584 return self.first()
3587 3585 return next(it(), None)
3588 3586
3589 3587 def last(self):
3590 3588 if self._ascending:
3591 3589 it = self.fastdesc
3592 3590 else:
3593 3591 it = self.fastasc
3594 3592 if it is None:
3595 3593 # we need to consume all and try again
3596 3594 for x in self._consumegen():
3597 3595 pass
3598 3596 return self.first()
3599 3597 return next(it(), None)
3600 3598
3601 3599 def __repr__(self):
3602 3600 d = {False: '-', True: '+'}[self._ascending]
3603 3601 return '<%s%s>' % (type(self).__name__, d)
3604 3602
3605 3603 class spanset(abstractsmartset):
3606 3604 """Duck type for baseset class which represents a range of revisions and
3607 3605 can work lazily and without having all the range in memory
3608 3606
3609 3607 Note that spanset(x, y) behave almost like xrange(x, y) except for two
3610 3608 notable points:
3611 3609 - when x < y it will be automatically descending,
3612 3610 - revision filtered with this repoview will be skipped.
3613 3611
3614 3612 """
3615 3613 def __init__(self, repo, start=0, end=None):
3616 3614 """
3617 3615 start: first revision included the set
3618 3616 (default to 0)
3619 3617 end: first revision excluded (last+1)
3620 3618 (default to len(repo)
3621 3619
3622 3620 Spanset will be descending if `end` < `start`.
3623 3621 """
3624 3622 if end is None:
3625 3623 end = len(repo)
3626 3624 self._ascending = start <= end
3627 3625 if not self._ascending:
3628 3626 start, end = end + 1, start +1
3629 3627 self._start = start
3630 3628 self._end = end
3631 3629 self._hiddenrevs = repo.changelog.filteredrevs
3632 3630
3633 3631 def sort(self, reverse=False):
3634 3632 self._ascending = not reverse
3635 3633
3636 3634 def reverse(self):
3637 3635 self._ascending = not self._ascending
3638 3636
3639 3637 def _iterfilter(self, iterrange):
3640 3638 s = self._hiddenrevs
3641 3639 for r in iterrange:
3642 3640 if r not in s:
3643 3641 yield r
3644 3642
3645 3643 def __iter__(self):
3646 3644 if self._ascending:
3647 3645 return self.fastasc()
3648 3646 else:
3649 3647 return self.fastdesc()
3650 3648
3651 3649 def fastasc(self):
3652 3650 iterrange = xrange(self._start, self._end)
3653 3651 if self._hiddenrevs:
3654 3652 return self._iterfilter(iterrange)
3655 3653 return iter(iterrange)
3656 3654
3657 3655 def fastdesc(self):
3658 3656 iterrange = xrange(self._end - 1, self._start - 1, -1)
3659 3657 if self._hiddenrevs:
3660 3658 return self._iterfilter(iterrange)
3661 3659 return iter(iterrange)
3662 3660
3663 3661 def __contains__(self, rev):
3664 3662 hidden = self._hiddenrevs
3665 3663 return ((self._start <= rev < self._end)
3666 3664 and not (hidden and rev in hidden))
3667 3665
3668 3666 def __nonzero__(self):
3669 3667 for r in self:
3670 3668 return True
3671 3669 return False
3672 3670
3673 3671 def __len__(self):
3674 3672 if not self._hiddenrevs:
3675 3673 return abs(self._end - self._start)
3676 3674 else:
3677 3675 count = 0
3678 3676 start = self._start
3679 3677 end = self._end
3680 3678 for rev in self._hiddenrevs:
3681 3679 if (end < rev <= start) or (start <= rev < end):
3682 3680 count += 1
3683 3681 return abs(self._end - self._start) - count
3684 3682
3685 3683 def isascending(self):
3686 3684 return self._ascending
3687 3685
3688 3686 def isdescending(self):
3689 3687 return not self._ascending
3690 3688
3691 3689 def first(self):
3692 3690 if self._ascending:
3693 3691 it = self.fastasc
3694 3692 else:
3695 3693 it = self.fastdesc
3696 3694 for x in it():
3697 3695 return x
3698 3696 return None
3699 3697
3700 3698 def last(self):
3701 3699 if self._ascending:
3702 3700 it = self.fastdesc
3703 3701 else:
3704 3702 it = self.fastasc
3705 3703 for x in it():
3706 3704 return x
3707 3705 return None
3708 3706
3709 3707 def __repr__(self):
3710 3708 d = {False: '-', True: '+'}[self._ascending]
3711 3709 return '<%s%s %d:%d>' % (type(self).__name__, d,
3712 3710 self._start, self._end - 1)
3713 3711
3714 3712 class fullreposet(spanset):
3715 3713 """a set containing all revisions in the repo
3716 3714
3717 3715 This class exists to host special optimization and magic to handle virtual
3718 3716 revisions such as "null".
3719 3717 """
3720 3718
3721 3719 def __init__(self, repo):
3722 3720 super(fullreposet, self).__init__(repo)
3723 3721
3724 3722 def __and__(self, other):
3725 3723 """As self contains the whole repo, all of the other set should also be
3726 3724 in self. Therefore `self & other = other`.
3727 3725
3728 3726 This boldly assumes the other contains valid revs only.
3729 3727 """
3730 3728 # other not a smartset, make is so
3731 3729 if not util.safehasattr(other, 'isascending'):
3732 3730 # filter out hidden revision
3733 3731 # (this boldly assumes all smartset are pure)
3734 3732 #
3735 3733 # `other` was used with "&", let's assume this is a set like
3736 3734 # object.
3737 3735 other = baseset(other - self._hiddenrevs)
3738 3736
3739 3737 # XXX As fullreposet is also used as bootstrap, this is wrong.
3740 3738 #
3741 3739 # With a giveme312() revset returning [3,1,2], this makes
3742 3740 # 'hg log -r "giveme312()"' -> 1, 2, 3 (wrong)
3743 3741 # We cannot just drop it because other usage still need to sort it:
3744 3742 # 'hg log -r "all() and giveme312()"' -> 1, 2, 3 (right)
3745 3743 #
3746 3744 # There is also some faulty revset implementations that rely on it
3747 3745 # (eg: children as of its state in e8075329c5fb)
3748 3746 #
3749 3747 # When we fix the two points above we can move this into the if clause
3750 3748 other.sort(reverse=self.isdescending())
3751 3749 return other
3752 3750
3753 3751 def prettyformatset(revs):
3754 3752 lines = []
3755 3753 rs = repr(revs)
3756 3754 p = 0
3757 3755 while p < len(rs):
3758 3756 q = rs.find('<', p + 1)
3759 3757 if q < 0:
3760 3758 q = len(rs)
3761 3759 l = rs.count('<', 0, p) - rs.count('>', 0, p)
3762 3760 assert l >= 0
3763 3761 lines.append((l, rs[p:q].rstrip()))
3764 3762 p = q
3765 3763 return '\n'.join(' ' * l + s for l, s in lines)
3766 3764
3767 3765 # tell hggettext to extract docstrings from these functions:
3768 3766 i18nfunctions = symbols.values()
General Comments 0
You need to be logged in to leave comments. Login now