##// END OF EJS Templates
revset: delete _updatedefaultdest as it has no users...
Pierre-Yves David -
r26571:a024e2db default
parent child Browse files
Show More
@@ -1,3781 +1,3768
1 1 # revset.py - revision set queries for mercurial
2 2 #
3 3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import heapq
11 11 import re
12 12
13 13 from .i18n import _
14 14 from . import (
15 destutil,
16 15 encoding,
17 16 error,
18 17 hbisect,
19 18 match as matchmod,
20 19 node,
21 20 obsolete as obsmod,
22 21 parser,
23 22 pathutil,
24 23 phases,
25 24 repoview,
26 25 util,
27 26 )
28 27
29 28 def _revancestors(repo, revs, followfirst):
30 29 """Like revlog.ancestors(), but supports followfirst."""
31 30 if followfirst:
32 31 cut = 1
33 32 else:
34 33 cut = None
35 34 cl = repo.changelog
36 35
37 36 def iterate():
38 37 revs.sort(reverse=True)
39 38 irevs = iter(revs)
40 39 h = []
41 40
42 41 inputrev = next(irevs, None)
43 42 if inputrev is not None:
44 43 heapq.heappush(h, -inputrev)
45 44
46 45 seen = set()
47 46 while h:
48 47 current = -heapq.heappop(h)
49 48 if current == inputrev:
50 49 inputrev = next(irevs, None)
51 50 if inputrev is not None:
52 51 heapq.heappush(h, -inputrev)
53 52 if current not in seen:
54 53 seen.add(current)
55 54 yield current
56 55 for parent in cl.parentrevs(current)[:cut]:
57 56 if parent != node.nullrev:
58 57 heapq.heappush(h, -parent)
59 58
60 59 return generatorset(iterate(), iterasc=False)
61 60
62 61 def _revdescendants(repo, revs, followfirst):
63 62 """Like revlog.descendants() but supports followfirst."""
64 63 if followfirst:
65 64 cut = 1
66 65 else:
67 66 cut = None
68 67
69 68 def iterate():
70 69 cl = repo.changelog
71 70 # XXX this should be 'parentset.min()' assuming 'parentset' is a
72 71 # smartset (and if it is not, it should.)
73 72 first = min(revs)
74 73 nullrev = node.nullrev
75 74 if first == nullrev:
76 75 # Are there nodes with a null first parent and a non-null
77 76 # second one? Maybe. Do we care? Probably not.
78 77 for i in cl:
79 78 yield i
80 79 else:
81 80 seen = set(revs)
82 81 for i in cl.revs(first + 1):
83 82 for x in cl.parentrevs(i)[:cut]:
84 83 if x != nullrev and x in seen:
85 84 seen.add(i)
86 85 yield i
87 86 break
88 87
89 88 return generatorset(iterate(), iterasc=True)
90 89
91 90 def _reachablerootspure(repo, minroot, roots, heads, includepath):
92 91 """return (heads(::<roots> and ::<heads>))
93 92
94 93 If includepath is True, return (<roots>::<heads>)."""
95 94 if not roots:
96 95 return []
97 96 parentrevs = repo.changelog.parentrevs
98 97 roots = set(roots)
99 98 visit = list(heads)
100 99 reachable = set()
101 100 seen = {}
102 101 # prefetch all the things! (because python is slow)
103 102 reached = reachable.add
104 103 dovisit = visit.append
105 104 nextvisit = visit.pop
106 105 # open-code the post-order traversal due to the tiny size of
107 106 # sys.getrecursionlimit()
108 107 while visit:
109 108 rev = nextvisit()
110 109 if rev in roots:
111 110 reached(rev)
112 111 if not includepath:
113 112 continue
114 113 parents = parentrevs(rev)
115 114 seen[rev] = parents
116 115 for parent in parents:
117 116 if parent >= minroot and parent not in seen:
118 117 dovisit(parent)
119 118 if not reachable:
120 119 return baseset()
121 120 if not includepath:
122 121 return reachable
123 122 for rev in sorted(seen):
124 123 for parent in seen[rev]:
125 124 if parent in reachable:
126 125 reached(rev)
127 126 return reachable
128 127
129 128 def reachableroots(repo, roots, heads, includepath=False):
130 129 """return (heads(::<roots> and ::<heads>))
131 130
132 131 If includepath is True, return (<roots>::<heads>)."""
133 132 if not roots:
134 133 return baseset()
135 134 minroot = roots.min()
136 135 roots = list(roots)
137 136 heads = list(heads)
138 137 try:
139 138 revs = repo.changelog.reachableroots(minroot, heads, roots, includepath)
140 139 except AttributeError:
141 140 revs = _reachablerootspure(repo, minroot, roots, heads, includepath)
142 141 revs = baseset(revs)
143 142 revs.sort()
144 143 return revs
145 144
146 145 elements = {
147 146 # token-type: binding-strength, primary, prefix, infix, suffix
148 147 "(": (21, None, ("group", 1, ")"), ("func", 1, ")"), None),
149 148 "##": (20, None, None, ("_concat", 20), None),
150 149 "~": (18, None, None, ("ancestor", 18), None),
151 150 "^": (18, None, None, ("parent", 18), ("parentpost", 18)),
152 151 "-": (5, None, ("negate", 19), ("minus", 5), None),
153 152 "::": (17, None, ("dagrangepre", 17), ("dagrange", 17),
154 153 ("dagrangepost", 17)),
155 154 "..": (17, None, ("dagrangepre", 17), ("dagrange", 17),
156 155 ("dagrangepost", 17)),
157 156 ":": (15, "rangeall", ("rangepre", 15), ("range", 15), ("rangepost", 15)),
158 157 "not": (10, None, ("not", 10), None, None),
159 158 "!": (10, None, ("not", 10), None, None),
160 159 "and": (5, None, None, ("and", 5), None),
161 160 "&": (5, None, None, ("and", 5), None),
162 161 "%": (5, None, None, ("only", 5), ("onlypost", 5)),
163 162 "or": (4, None, None, ("or", 4), None),
164 163 "|": (4, None, None, ("or", 4), None),
165 164 "+": (4, None, None, ("or", 4), None),
166 165 "=": (3, None, None, ("keyvalue", 3), None),
167 166 ",": (2, None, None, ("list", 2), None),
168 167 ")": (0, None, None, None, None),
169 168 "symbol": (0, "symbol", None, None, None),
170 169 "string": (0, "string", None, None, None),
171 170 "end": (0, None, None, None, None),
172 171 }
173 172
174 173 keywords = set(['and', 'or', 'not'])
175 174
176 175 # default set of valid characters for the initial letter of symbols
177 176 _syminitletters = set(c for c in [chr(i) for i in xrange(256)]
178 177 if c.isalnum() or c in '._@' or ord(c) > 127)
179 178
180 179 # default set of valid characters for non-initial letters of symbols
181 180 _symletters = set(c for c in [chr(i) for i in xrange(256)]
182 181 if c.isalnum() or c in '-._/@' or ord(c) > 127)
183 182
184 183 def tokenize(program, lookup=None, syminitletters=None, symletters=None):
185 184 '''
186 185 Parse a revset statement into a stream of tokens
187 186
188 187 ``syminitletters`` is the set of valid characters for the initial
189 188 letter of symbols.
190 189
191 190 By default, character ``c`` is recognized as valid for initial
192 191 letter of symbols, if ``c.isalnum() or c in '._@' or ord(c) > 127``.
193 192
194 193 ``symletters`` is the set of valid characters for non-initial
195 194 letters of symbols.
196 195
197 196 By default, character ``c`` is recognized as valid for non-initial
198 197 letters of symbols, if ``c.isalnum() or c in '-._/@' or ord(c) > 127``.
199 198
200 199 Check that @ is a valid unquoted token character (issue3686):
201 200 >>> list(tokenize("@::"))
202 201 [('symbol', '@', 0), ('::', None, 1), ('end', None, 3)]
203 202
204 203 '''
205 204 if syminitletters is None:
206 205 syminitletters = _syminitletters
207 206 if symletters is None:
208 207 symletters = _symletters
209 208
210 209 if program and lookup:
211 210 # attempt to parse old-style ranges first to deal with
212 211 # things like old-tag which contain query metacharacters
213 212 parts = program.split(':', 1)
214 213 if all(lookup(sym) for sym in parts if sym):
215 214 if parts[0]:
216 215 yield ('symbol', parts[0], 0)
217 216 if len(parts) > 1:
218 217 s = len(parts[0])
219 218 yield (':', None, s)
220 219 if parts[1]:
221 220 yield ('symbol', parts[1], s + 1)
222 221 yield ('end', None, len(program))
223 222 return
224 223
225 224 pos, l = 0, len(program)
226 225 while pos < l:
227 226 c = program[pos]
228 227 if c.isspace(): # skip inter-token whitespace
229 228 pass
230 229 elif c == ':' and program[pos:pos + 2] == '::': # look ahead carefully
231 230 yield ('::', None, pos)
232 231 pos += 1 # skip ahead
233 232 elif c == '.' and program[pos:pos + 2] == '..': # look ahead carefully
234 233 yield ('..', None, pos)
235 234 pos += 1 # skip ahead
236 235 elif c == '#' and program[pos:pos + 2] == '##': # look ahead carefully
237 236 yield ('##', None, pos)
238 237 pos += 1 # skip ahead
239 238 elif c in "():=,-|&+!~^%": # handle simple operators
240 239 yield (c, None, pos)
241 240 elif (c in '"\'' or c == 'r' and
242 241 program[pos:pos + 2] in ("r'", 'r"')): # handle quoted strings
243 242 if c == 'r':
244 243 pos += 1
245 244 c = program[pos]
246 245 decode = lambda x: x
247 246 else:
248 247 decode = parser.unescapestr
249 248 pos += 1
250 249 s = pos
251 250 while pos < l: # find closing quote
252 251 d = program[pos]
253 252 if d == '\\': # skip over escaped characters
254 253 pos += 2
255 254 continue
256 255 if d == c:
257 256 yield ('string', decode(program[s:pos]), s)
258 257 break
259 258 pos += 1
260 259 else:
261 260 raise error.ParseError(_("unterminated string"), s)
262 261 # gather up a symbol/keyword
263 262 elif c in syminitletters:
264 263 s = pos
265 264 pos += 1
266 265 while pos < l: # find end of symbol
267 266 d = program[pos]
268 267 if d not in symletters:
269 268 break
270 269 if d == '.' and program[pos - 1] == '.': # special case for ..
271 270 pos -= 1
272 271 break
273 272 pos += 1
274 273 sym = program[s:pos]
275 274 if sym in keywords: # operator keywords
276 275 yield (sym, None, s)
277 276 elif '-' in sym:
278 277 # some jerk gave us foo-bar-baz, try to check if it's a symbol
279 278 if lookup and lookup(sym):
280 279 # looks like a real symbol
281 280 yield ('symbol', sym, s)
282 281 else:
283 282 # looks like an expression
284 283 parts = sym.split('-')
285 284 for p in parts[:-1]:
286 285 if p: # possible consecutive -
287 286 yield ('symbol', p, s)
288 287 s += len(p)
289 288 yield ('-', None, pos)
290 289 s += 1
291 290 if parts[-1]: # possible trailing -
292 291 yield ('symbol', parts[-1], s)
293 292 else:
294 293 yield ('symbol', sym, s)
295 294 pos -= 1
296 295 else:
297 296 raise error.ParseError(_("syntax error in revset '%s'") %
298 297 program, pos)
299 298 pos += 1
300 299 yield ('end', None, pos)
301 300
302 301 def parseerrordetail(inst):
303 302 """Compose error message from specified ParseError object
304 303 """
305 304 if len(inst.args) > 1:
306 305 return _('at %s: %s') % (inst.args[1], inst.args[0])
307 306 else:
308 307 return inst.args[0]
309 308
310 309 # helpers
311 310
312 311 def getstring(x, err):
313 312 if x and (x[0] == 'string' or x[0] == 'symbol'):
314 313 return x[1]
315 314 raise error.ParseError(err)
316 315
317 316 def getlist(x):
318 317 if not x:
319 318 return []
320 319 if x[0] == 'list':
321 320 return getlist(x[1]) + [x[2]]
322 321 return [x]
323 322
324 323 def getargs(x, min, max, err):
325 324 l = getlist(x)
326 325 if len(l) < min or (max >= 0 and len(l) > max):
327 326 raise error.ParseError(err)
328 327 return l
329 328
330 329 def getargsdict(x, funcname, keys):
331 330 return parser.buildargsdict(getlist(x), funcname, keys.split(),
332 331 keyvaluenode='keyvalue', keynode='symbol')
333 332
334 333 def isvalidsymbol(tree):
335 334 """Examine whether specified ``tree`` is valid ``symbol`` or not
336 335 """
337 336 return tree[0] == 'symbol' and len(tree) > 1
338 337
339 338 def getsymbol(tree):
340 339 """Get symbol name from valid ``symbol`` in ``tree``
341 340
342 341 This assumes that ``tree`` is already examined by ``isvalidsymbol``.
343 342 """
344 343 return tree[1]
345 344
346 345 def isvalidfunc(tree):
347 346 """Examine whether specified ``tree`` is valid ``func`` or not
348 347 """
349 348 return tree[0] == 'func' and len(tree) > 1 and isvalidsymbol(tree[1])
350 349
351 350 def getfuncname(tree):
352 351 """Get function name from valid ``func`` in ``tree``
353 352
354 353 This assumes that ``tree`` is already examined by ``isvalidfunc``.
355 354 """
356 355 return getsymbol(tree[1])
357 356
358 357 def getfuncargs(tree):
359 358 """Get list of function arguments from valid ``func`` in ``tree``
360 359
361 360 This assumes that ``tree`` is already examined by ``isvalidfunc``.
362 361 """
363 362 if len(tree) > 2:
364 363 return getlist(tree[2])
365 364 else:
366 365 return []
367 366
368 367 def getset(repo, subset, x):
369 368 if not x:
370 369 raise error.ParseError(_("missing argument"))
371 370 s = methods[x[0]](repo, subset, *x[1:])
372 371 if util.safehasattr(s, 'isascending'):
373 372 return s
374 373 if (repo.ui.configbool('devel', 'all-warnings')
375 374 or repo.ui.configbool('devel', 'old-revset')):
376 375 # else case should not happen, because all non-func are internal,
377 376 # ignoring for now.
378 377 if x[0] == 'func' and x[1][0] == 'symbol' and x[1][1] in symbols:
379 378 repo.ui.develwarn('revset "%s" use list instead of smartset, '
380 379 '(upgrade your code)' % x[1][1])
381 380 return baseset(s)
382 381
383 382 def _getrevsource(repo, r):
384 383 extra = repo[r].extra()
385 384 for label in ('source', 'transplant_source', 'rebase_source'):
386 385 if label in extra:
387 386 try:
388 387 return repo[extra[label]].rev()
389 388 except error.RepoLookupError:
390 389 pass
391 390 return None
392 391
393 392 # operator methods
394 393
395 394 def stringset(repo, subset, x):
396 395 x = repo[x].rev()
397 396 if (x in subset
398 397 or x == node.nullrev and isinstance(subset, fullreposet)):
399 398 return baseset([x])
400 399 return baseset()
401 400
402 401 def rangeset(repo, subset, x, y):
403 402 m = getset(repo, fullreposet(repo), x)
404 403 n = getset(repo, fullreposet(repo), y)
405 404
406 405 if not m or not n:
407 406 return baseset()
408 407 m, n = m.first(), n.last()
409 408
410 409 if m == n:
411 410 r = baseset([m])
412 411 elif n == node.wdirrev:
413 412 r = spanset(repo, m, len(repo)) + baseset([n])
414 413 elif m == node.wdirrev:
415 414 r = baseset([m]) + spanset(repo, len(repo) - 1, n - 1)
416 415 elif m < n:
417 416 r = spanset(repo, m, n + 1)
418 417 else:
419 418 r = spanset(repo, m, n - 1)
420 419 # XXX We should combine with subset first: 'subset & baseset(...)'. This is
421 420 # necessary to ensure we preserve the order in subset.
422 421 #
423 422 # This has performance implication, carrying the sorting over when possible
424 423 # would be more efficient.
425 424 return r & subset
426 425
427 426 def dagrange(repo, subset, x, y):
428 427 r = fullreposet(repo)
429 428 xs = reachableroots(repo, getset(repo, r, x), getset(repo, r, y),
430 429 includepath=True)
431 430 # XXX We should combine with subset first: 'subset & baseset(...)'. This is
432 431 # necessary to ensure we preserve the order in subset.
433 432 return xs & subset
434 433
435 434 def andset(repo, subset, x, y):
436 435 return getset(repo, getset(repo, subset, x), y)
437 436
438 437 def orset(repo, subset, *xs):
439 438 assert xs
440 439 if len(xs) == 1:
441 440 return getset(repo, subset, xs[0])
442 441 p = len(xs) // 2
443 442 a = orset(repo, subset, *xs[:p])
444 443 b = orset(repo, subset, *xs[p:])
445 444 return a + b
446 445
447 446 def notset(repo, subset, x):
448 447 return subset - getset(repo, subset, x)
449 448
450 449 def listset(repo, subset, a, b):
451 450 raise error.ParseError(_("can't use a list in this context"))
452 451
453 452 def keyvaluepair(repo, subset, k, v):
454 453 raise error.ParseError(_("can't use a key-value pair in this context"))
455 454
456 455 def func(repo, subset, a, b):
457 456 if a[0] == 'symbol' and a[1] in symbols:
458 457 return symbols[a[1]](repo, subset, b)
459 458
460 459 keep = lambda fn: getattr(fn, '__doc__', None) is not None
461 460
462 461 syms = [s for (s, fn) in symbols.items() if keep(fn)]
463 462 raise error.UnknownIdentifier(a[1], syms)
464 463
465 464 # functions
466 465
467 466 def _mergedefaultdest(repo, subset, x):
468 467 # ``_mergedefaultdest()``
469 468
470 469 # default destination for merge.
471 470 # # XXX: Currently private because I expect the signature to change.
472 471 # # XXX: - taking rev as arguments,
473 472 # # XXX: - bailing out in case of ambiguity vs returning all data.
474 473 getargs(x, 0, 0, _("_mergedefaultdest takes no arguments"))
475 474 if repo._activebookmark:
476 475 bmheads = repo.bookmarkheads(repo._activebookmark)
477 476 curhead = repo[repo._activebookmark].node()
478 477 if len(bmheads) == 2:
479 478 if curhead == bmheads[0]:
480 479 node = bmheads[1]
481 480 else:
482 481 node = bmheads[0]
483 482 elif len(bmheads) > 2:
484 483 raise util.Abort(_("multiple matching bookmarks to merge - "
485 484 "please merge with an explicit rev or bookmark"),
486 485 hint=_("run 'hg heads' to see all heads"))
487 486 elif len(bmheads) <= 1:
488 487 raise util.Abort(_("no matching bookmark to merge - "
489 488 "please merge with an explicit rev or bookmark"),
490 489 hint=_("run 'hg heads' to see all heads"))
491 490 else:
492 491 branch = repo[None].branch()
493 492 bheads = repo.branchheads(branch)
494 493 nbhs = [bh for bh in bheads if not repo[bh].bookmarks()]
495 494
496 495 if len(nbhs) > 2:
497 496 raise util.Abort(_("branch '%s' has %d heads - "
498 497 "please merge with an explicit rev")
499 498 % (branch, len(bheads)),
500 499 hint=_("run 'hg heads .' to see heads"))
501 500
502 501 parent = repo.dirstate.p1()
503 502 if len(nbhs) <= 1:
504 503 if len(bheads) > 1:
505 504 raise util.Abort(_("heads are bookmarked - "
506 505 "please merge with an explicit rev"),
507 506 hint=_("run 'hg heads' to see all heads"))
508 507 if len(repo.heads()) > 1:
509 508 raise util.Abort(_("branch '%s' has one head - "
510 509 "please merge with an explicit rev")
511 510 % branch,
512 511 hint=_("run 'hg heads' to see all heads"))
513 512 msg, hint = _('nothing to merge'), None
514 513 if parent != repo.lookup(branch):
515 514 hint = _("use 'hg update' instead")
516 515 raise util.Abort(msg, hint=hint)
517 516
518 517 if parent not in bheads:
519 518 raise util.Abort(_('working directory not at a head revision'),
520 519 hint=_("use 'hg update' or merge with an "
521 520 "explicit revision"))
522 521 if parent == nbhs[0]:
523 522 node = nbhs[-1]
524 523 else:
525 524 node = nbhs[0]
526 525 return subset & baseset([repo[node].rev()])
527 526
528 def _updatedefaultdest(repo, subset, x):
529 # ``_updatedefaultdest()``
530
531 # default destination for update.
532 # # XXX: Currently private because I expect the signature to change.
533 # # XXX: - taking rev as arguments,
534 # # XXX: - bailing out in case of ambiguity vs returning all data.
535 getargs(x, 0, 0, _("_updatedefaultdest takes no arguments"))
536 rev = destutil.destupdate(repo)
537 return subset & baseset([rev])
538
539 527 def adds(repo, subset, x):
540 528 """``adds(pattern)``
541 529 Changesets that add a file matching pattern.
542 530
543 531 The pattern without explicit kind like ``glob:`` is expected to be
544 532 relative to the current directory and match against a file or a
545 533 directory.
546 534 """
547 535 # i18n: "adds" is a keyword
548 536 pat = getstring(x, _("adds requires a pattern"))
549 537 return checkstatus(repo, subset, pat, 1)
550 538
551 539 def ancestor(repo, subset, x):
552 540 """``ancestor(*changeset)``
553 541 A greatest common ancestor of the changesets.
554 542
555 543 Accepts 0 or more changesets.
556 544 Will return empty list when passed no args.
557 545 Greatest common ancestor of a single changeset is that changeset.
558 546 """
559 547 # i18n: "ancestor" is a keyword
560 548 l = getlist(x)
561 549 rl = fullreposet(repo)
562 550 anc = None
563 551
564 552 # (getset(repo, rl, i) for i in l) generates a list of lists
565 553 for revs in (getset(repo, rl, i) for i in l):
566 554 for r in revs:
567 555 if anc is None:
568 556 anc = repo[r]
569 557 else:
570 558 anc = anc.ancestor(repo[r])
571 559
572 560 if anc is not None and anc.rev() in subset:
573 561 return baseset([anc.rev()])
574 562 return baseset()
575 563
576 564 def _ancestors(repo, subset, x, followfirst=False):
577 565 heads = getset(repo, fullreposet(repo), x)
578 566 if not heads:
579 567 return baseset()
580 568 s = _revancestors(repo, heads, followfirst)
581 569 return subset & s
582 570
583 571 def ancestors(repo, subset, x):
584 572 """``ancestors(set)``
585 573 Changesets that are ancestors of a changeset in set.
586 574 """
587 575 return _ancestors(repo, subset, x)
588 576
589 577 def _firstancestors(repo, subset, x):
590 578 # ``_firstancestors(set)``
591 579 # Like ``ancestors(set)`` but follows only the first parents.
592 580 return _ancestors(repo, subset, x, followfirst=True)
593 581
594 582 def ancestorspec(repo, subset, x, n):
595 583 """``set~n``
596 584 Changesets that are the Nth ancestor (first parents only) of a changeset
597 585 in set.
598 586 """
599 587 try:
600 588 n = int(n[1])
601 589 except (TypeError, ValueError):
602 590 raise error.ParseError(_("~ expects a number"))
603 591 ps = set()
604 592 cl = repo.changelog
605 593 for r in getset(repo, fullreposet(repo), x):
606 594 for i in range(n):
607 595 r = cl.parentrevs(r)[0]
608 596 ps.add(r)
609 597 return subset & ps
610 598
611 599 def author(repo, subset, x):
612 600 """``author(string)``
613 601 Alias for ``user(string)``.
614 602 """
615 603 # i18n: "author" is a keyword
616 604 n = encoding.lower(getstring(x, _("author requires a string")))
617 605 kind, pattern, matcher = _substringmatcher(n)
618 606 return subset.filter(lambda x: matcher(encoding.lower(repo[x].user())))
619 607
620 608 def bisect(repo, subset, x):
621 609 """``bisect(string)``
622 610 Changesets marked in the specified bisect status:
623 611
624 612 - ``good``, ``bad``, ``skip``: csets explicitly marked as good/bad/skip
625 613 - ``goods``, ``bads`` : csets topologically good/bad
626 614 - ``range`` : csets taking part in the bisection
627 615 - ``pruned`` : csets that are goods, bads or skipped
628 616 - ``untested`` : csets whose fate is yet unknown
629 617 - ``ignored`` : csets ignored due to DAG topology
630 618 - ``current`` : the cset currently being bisected
631 619 """
632 620 # i18n: "bisect" is a keyword
633 621 status = getstring(x, _("bisect requires a string")).lower()
634 622 state = set(hbisect.get(repo, status))
635 623 return subset & state
636 624
637 625 # Backward-compatibility
638 626 # - no help entry so that we do not advertise it any more
639 627 def bisected(repo, subset, x):
640 628 return bisect(repo, subset, x)
641 629
642 630 def bookmark(repo, subset, x):
643 631 """``bookmark([name])``
644 632 The named bookmark or all bookmarks.
645 633
646 634 If `name` starts with `re:`, the remainder of the name is treated as
647 635 a regular expression. To match a bookmark that actually starts with `re:`,
648 636 use the prefix `literal:`.
649 637 """
650 638 # i18n: "bookmark" is a keyword
651 639 args = getargs(x, 0, 1, _('bookmark takes one or no arguments'))
652 640 if args:
653 641 bm = getstring(args[0],
654 642 # i18n: "bookmark" is a keyword
655 643 _('the argument to bookmark must be a string'))
656 644 kind, pattern, matcher = util.stringmatcher(bm)
657 645 bms = set()
658 646 if kind == 'literal':
659 647 bmrev = repo._bookmarks.get(pattern, None)
660 648 if not bmrev:
661 649 raise error.RepoLookupError(_("bookmark '%s' does not exist")
662 650 % pattern)
663 651 bms.add(repo[bmrev].rev())
664 652 else:
665 653 matchrevs = set()
666 654 for name, bmrev in repo._bookmarks.iteritems():
667 655 if matcher(name):
668 656 matchrevs.add(bmrev)
669 657 if not matchrevs:
670 658 raise error.RepoLookupError(_("no bookmarks exist"
671 659 " that match '%s'") % pattern)
672 660 for bmrev in matchrevs:
673 661 bms.add(repo[bmrev].rev())
674 662 else:
675 663 bms = set([repo[r].rev()
676 664 for r in repo._bookmarks.values()])
677 665 bms -= set([node.nullrev])
678 666 return subset & bms
679 667
680 668 def branch(repo, subset, x):
681 669 """``branch(string or set)``
682 670 All changesets belonging to the given branch or the branches of the given
683 671 changesets.
684 672
685 673 If `string` starts with `re:`, the remainder of the name is treated as
686 674 a regular expression. To match a branch that actually starts with `re:`,
687 675 use the prefix `literal:`.
688 676 """
689 677 getbi = repo.revbranchcache().branchinfo
690 678
691 679 try:
692 680 b = getstring(x, '')
693 681 except error.ParseError:
694 682 # not a string, but another revspec, e.g. tip()
695 683 pass
696 684 else:
697 685 kind, pattern, matcher = util.stringmatcher(b)
698 686 if kind == 'literal':
699 687 # note: falls through to the revspec case if no branch with
700 688 # this name exists and pattern kind is not specified explicitly
701 689 if pattern in repo.branchmap():
702 690 return subset.filter(lambda r: matcher(getbi(r)[0]))
703 691 if b.startswith('literal:'):
704 692 raise error.RepoLookupError(_("branch '%s' does not exist")
705 693 % pattern)
706 694 else:
707 695 return subset.filter(lambda r: matcher(getbi(r)[0]))
708 696
709 697 s = getset(repo, fullreposet(repo), x)
710 698 b = set()
711 699 for r in s:
712 700 b.add(getbi(r)[0])
713 701 c = s.__contains__
714 702 return subset.filter(lambda r: c(r) or getbi(r)[0] in b)
715 703
716 704 def bumped(repo, subset, x):
717 705 """``bumped()``
718 706 Mutable changesets marked as successors of public changesets.
719 707
720 708 Only non-public and non-obsolete changesets can be `bumped`.
721 709 """
722 710 # i18n: "bumped" is a keyword
723 711 getargs(x, 0, 0, _("bumped takes no arguments"))
724 712 bumped = obsmod.getrevs(repo, 'bumped')
725 713 return subset & bumped
726 714
727 715 def bundle(repo, subset, x):
728 716 """``bundle()``
729 717 Changesets in the bundle.
730 718
731 719 Bundle must be specified by the -R option."""
732 720
733 721 try:
734 722 bundlerevs = repo.changelog.bundlerevs
735 723 except AttributeError:
736 724 raise util.Abort(_("no bundle provided - specify with -R"))
737 725 return subset & bundlerevs
738 726
739 727 def checkstatus(repo, subset, pat, field):
740 728 hasset = matchmod.patkind(pat) == 'set'
741 729
742 730 mcache = [None]
743 731 def matches(x):
744 732 c = repo[x]
745 733 if not mcache[0] or hasset:
746 734 mcache[0] = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
747 735 m = mcache[0]
748 736 fname = None
749 737 if not m.anypats() and len(m.files()) == 1:
750 738 fname = m.files()[0]
751 739 if fname is not None:
752 740 if fname not in c.files():
753 741 return False
754 742 else:
755 743 for f in c.files():
756 744 if m(f):
757 745 break
758 746 else:
759 747 return False
760 748 files = repo.status(c.p1().node(), c.node())[field]
761 749 if fname is not None:
762 750 if fname in files:
763 751 return True
764 752 else:
765 753 for f in files:
766 754 if m(f):
767 755 return True
768 756
769 757 return subset.filter(matches)
770 758
771 759 def _children(repo, narrow, parentset):
772 760 if not parentset:
773 761 return baseset()
774 762 cs = set()
775 763 pr = repo.changelog.parentrevs
776 764 minrev = parentset.min()
777 765 for r in narrow:
778 766 if r <= minrev:
779 767 continue
780 768 for p in pr(r):
781 769 if p in parentset:
782 770 cs.add(r)
783 771 # XXX using a set to feed the baseset is wrong. Sets are not ordered.
784 772 # This does not break because of other fullreposet misbehavior.
785 773 return baseset(cs)
786 774
787 775 def children(repo, subset, x):
788 776 """``children(set)``
789 777 Child changesets of changesets in set.
790 778 """
791 779 s = getset(repo, fullreposet(repo), x)
792 780 cs = _children(repo, subset, s)
793 781 return subset & cs
794 782
795 783 def closed(repo, subset, x):
796 784 """``closed()``
797 785 Changeset is closed.
798 786 """
799 787 # i18n: "closed" is a keyword
800 788 getargs(x, 0, 0, _("closed takes no arguments"))
801 789 return subset.filter(lambda r: repo[r].closesbranch())
802 790
803 791 def contains(repo, subset, x):
804 792 """``contains(pattern)``
805 793 The revision's manifest contains a file matching pattern (but might not
806 794 modify it). See :hg:`help patterns` for information about file patterns.
807 795
808 796 The pattern without explicit kind like ``glob:`` is expected to be
809 797 relative to the current directory and match against a file exactly
810 798 for efficiency.
811 799 """
812 800 # i18n: "contains" is a keyword
813 801 pat = getstring(x, _("contains requires a pattern"))
814 802
815 803 def matches(x):
816 804 if not matchmod.patkind(pat):
817 805 pats = pathutil.canonpath(repo.root, repo.getcwd(), pat)
818 806 if pats in repo[x]:
819 807 return True
820 808 else:
821 809 c = repo[x]
822 810 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
823 811 for f in c.manifest():
824 812 if m(f):
825 813 return True
826 814 return False
827 815
828 816 return subset.filter(matches)
829 817
830 818 def converted(repo, subset, x):
831 819 """``converted([id])``
832 820 Changesets converted from the given identifier in the old repository if
833 821 present, or all converted changesets if no identifier is specified.
834 822 """
835 823
836 824 # There is exactly no chance of resolving the revision, so do a simple
837 825 # string compare and hope for the best
838 826
839 827 rev = None
840 828 # i18n: "converted" is a keyword
841 829 l = getargs(x, 0, 1, _('converted takes one or no arguments'))
842 830 if l:
843 831 # i18n: "converted" is a keyword
844 832 rev = getstring(l[0], _('converted requires a revision'))
845 833
846 834 def _matchvalue(r):
847 835 source = repo[r].extra().get('convert_revision', None)
848 836 return source is not None and (rev is None or source.startswith(rev))
849 837
850 838 return subset.filter(lambda r: _matchvalue(r))
851 839
852 840 def date(repo, subset, x):
853 841 """``date(interval)``
854 842 Changesets within the interval, see :hg:`help dates`.
855 843 """
856 844 # i18n: "date" is a keyword
857 845 ds = getstring(x, _("date requires a string"))
858 846 dm = util.matchdate(ds)
859 847 return subset.filter(lambda x: dm(repo[x].date()[0]))
860 848
861 849 def desc(repo, subset, x):
862 850 """``desc(string)``
863 851 Search commit message for string. The match is case-insensitive.
864 852 """
865 853 # i18n: "desc" is a keyword
866 854 ds = encoding.lower(getstring(x, _("desc requires a string")))
867 855
868 856 def matches(x):
869 857 c = repo[x]
870 858 return ds in encoding.lower(c.description())
871 859
872 860 return subset.filter(matches)
873 861
874 862 def _descendants(repo, subset, x, followfirst=False):
875 863 roots = getset(repo, fullreposet(repo), x)
876 864 if not roots:
877 865 return baseset()
878 866 s = _revdescendants(repo, roots, followfirst)
879 867
880 868 # Both sets need to be ascending in order to lazily return the union
881 869 # in the correct order.
882 870 base = subset & roots
883 871 desc = subset & s
884 872 result = base + desc
885 873 if subset.isascending():
886 874 result.sort()
887 875 elif subset.isdescending():
888 876 result.sort(reverse=True)
889 877 else:
890 878 result = subset & result
891 879 return result
892 880
893 881 def descendants(repo, subset, x):
894 882 """``descendants(set)``
895 883 Changesets which are descendants of changesets in set.
896 884 """
897 885 return _descendants(repo, subset, x)
898 886
899 887 def _firstdescendants(repo, subset, x):
900 888 # ``_firstdescendants(set)``
901 889 # Like ``descendants(set)`` but follows only the first parents.
902 890 return _descendants(repo, subset, x, followfirst=True)
903 891
904 892 def destination(repo, subset, x):
905 893 """``destination([set])``
906 894 Changesets that were created by a graft, transplant or rebase operation,
907 895 with the given revisions specified as the source. Omitting the optional set
908 896 is the same as passing all().
909 897 """
910 898 if x is not None:
911 899 sources = getset(repo, fullreposet(repo), x)
912 900 else:
913 901 sources = fullreposet(repo)
914 902
915 903 dests = set()
916 904
917 905 # subset contains all of the possible destinations that can be returned, so
918 906 # iterate over them and see if their source(s) were provided in the arg set.
919 907 # Even if the immediate src of r is not in the arg set, src's source (or
920 908 # further back) may be. Scanning back further than the immediate src allows
921 909 # transitive transplants and rebases to yield the same results as transitive
922 910 # grafts.
923 911 for r in subset:
924 912 src = _getrevsource(repo, r)
925 913 lineage = None
926 914
927 915 while src is not None:
928 916 if lineage is None:
929 917 lineage = list()
930 918
931 919 lineage.append(r)
932 920
933 921 # The visited lineage is a match if the current source is in the arg
934 922 # set. Since every candidate dest is visited by way of iterating
935 923 # subset, any dests further back in the lineage will be tested by a
936 924 # different iteration over subset. Likewise, if the src was already
937 925 # selected, the current lineage can be selected without going back
938 926 # further.
939 927 if src in sources or src in dests:
940 928 dests.update(lineage)
941 929 break
942 930
943 931 r = src
944 932 src = _getrevsource(repo, r)
945 933
946 934 return subset.filter(dests.__contains__)
947 935
948 936 def divergent(repo, subset, x):
949 937 """``divergent()``
950 938 Final successors of changesets with an alternative set of final successors.
951 939 """
952 940 # i18n: "divergent" is a keyword
953 941 getargs(x, 0, 0, _("divergent takes no arguments"))
954 942 divergent = obsmod.getrevs(repo, 'divergent')
955 943 return subset & divergent
956 944
957 945 def extinct(repo, subset, x):
958 946 """``extinct()``
959 947 Obsolete changesets with obsolete descendants only.
960 948 """
961 949 # i18n: "extinct" is a keyword
962 950 getargs(x, 0, 0, _("extinct takes no arguments"))
963 951 extincts = obsmod.getrevs(repo, 'extinct')
964 952 return subset & extincts
965 953
966 954 def extra(repo, subset, x):
967 955 """``extra(label, [value])``
968 956 Changesets with the given label in the extra metadata, with the given
969 957 optional value.
970 958
971 959 If `value` starts with `re:`, the remainder of the value is treated as
972 960 a regular expression. To match a value that actually starts with `re:`,
973 961 use the prefix `literal:`.
974 962 """
975 963 args = getargsdict(x, 'extra', 'label value')
976 964 if 'label' not in args:
977 965 # i18n: "extra" is a keyword
978 966 raise error.ParseError(_('extra takes at least 1 argument'))
979 967 # i18n: "extra" is a keyword
980 968 label = getstring(args['label'], _('first argument to extra must be '
981 969 'a string'))
982 970 value = None
983 971
984 972 if 'value' in args:
985 973 # i18n: "extra" is a keyword
986 974 value = getstring(args['value'], _('second argument to extra must be '
987 975 'a string'))
988 976 kind, value, matcher = util.stringmatcher(value)
989 977
990 978 def _matchvalue(r):
991 979 extra = repo[r].extra()
992 980 return label in extra and (value is None or matcher(extra[label]))
993 981
994 982 return subset.filter(lambda r: _matchvalue(r))
995 983
996 984 def filelog(repo, subset, x):
997 985 """``filelog(pattern)``
998 986 Changesets connected to the specified filelog.
999 987
1000 988 For performance reasons, visits only revisions mentioned in the file-level
1001 989 filelog, rather than filtering through all changesets (much faster, but
1002 990 doesn't include deletes or duplicate changes). For a slower, more accurate
1003 991 result, use ``file()``.
1004 992
1005 993 The pattern without explicit kind like ``glob:`` is expected to be
1006 994 relative to the current directory and match against a file exactly
1007 995 for efficiency.
1008 996
1009 997 If some linkrev points to revisions filtered by the current repoview, we'll
1010 998 work around it to return a non-filtered value.
1011 999 """
1012 1000
1013 1001 # i18n: "filelog" is a keyword
1014 1002 pat = getstring(x, _("filelog requires a pattern"))
1015 1003 s = set()
1016 1004 cl = repo.changelog
1017 1005
1018 1006 if not matchmod.patkind(pat):
1019 1007 f = pathutil.canonpath(repo.root, repo.getcwd(), pat)
1020 1008 files = [f]
1021 1009 else:
1022 1010 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=repo[None])
1023 1011 files = (f for f in repo[None] if m(f))
1024 1012
1025 1013 for f in files:
1026 1014 backrevref = {} # final value for: filerev -> changerev
1027 1015 lowestchild = {} # lowest known filerev child of a filerev
1028 1016 delayed = [] # filerev with filtered linkrev, for post-processing
1029 1017 lowesthead = None # cache for manifest content of all head revisions
1030 1018 fl = repo.file(f)
1031 1019 for fr in list(fl):
1032 1020 rev = fl.linkrev(fr)
1033 1021 if rev not in cl:
1034 1022 # changerev pointed in linkrev is filtered
1035 1023 # record it for post processing.
1036 1024 delayed.append((fr, rev))
1037 1025 continue
1038 1026 for p in fl.parentrevs(fr):
1039 1027 if 0 <= p and p not in lowestchild:
1040 1028 lowestchild[p] = fr
1041 1029 backrevref[fr] = rev
1042 1030 s.add(rev)
1043 1031
1044 1032 # Post-processing of all filerevs we skipped because they were
1045 1033 # filtered. If such filerevs have known and unfiltered children, this
1046 1034 # means they have an unfiltered appearance out there. We'll use linkrev
1047 1035 # adjustment to find one of these appearances. The lowest known child
1048 1036 # will be used as a starting point because it is the best upper-bound we
1049 1037 # have.
1050 1038 #
1051 1039 # This approach will fail when an unfiltered but linkrev-shadowed
1052 1040 # appearance exists in a head changeset without unfiltered filerev
1053 1041 # children anywhere.
1054 1042 while delayed:
1055 1043 # must be a descending iteration. To slowly fill lowest child
1056 1044 # information that is of potential use by the next item.
1057 1045 fr, rev = delayed.pop()
1058 1046 lkr = rev
1059 1047
1060 1048 child = lowestchild.get(fr)
1061 1049
1062 1050 if child is None:
1063 1051 # search for existence of this file revision in a head revision.
1064 1052 # There are three possibilities:
1065 1053 # - the revision exists in a head and we can find an
1066 1054 # introduction from there,
1067 1055 # - the revision does not exist in a head because it has been
1068 1056 # changed since its introduction: we would have found a child
1069 1057 # and be in the other 'else' clause,
1070 1058 # - all versions of the revision are hidden.
1071 1059 if lowesthead is None:
1072 1060 lowesthead = {}
1073 1061 for h in repo.heads():
1074 1062 fnode = repo[h].manifest().get(f)
1075 1063 if fnode is not None:
1076 1064 lowesthead[fl.rev(fnode)] = h
1077 1065 headrev = lowesthead.get(fr)
1078 1066 if headrev is None:
1079 1067 # content is nowhere unfiltered
1080 1068 continue
1081 1069 rev = repo[headrev][f].introrev()
1082 1070 else:
1083 1071 # the lowest known child is a good upper bound
1084 1072 childcrev = backrevref[child]
1085 1073 # XXX this does not guarantee returning the lowest
1086 1074 # introduction of this revision, but this gives a
1087 1075 # result which is a good start and will fit in most
1088 1076 # cases. We probably need to fix the multiple
1089 1077 # introductions case properly (report each
1090 1078 # introduction, even for identical file revisions)
1091 1079 # once and for all at some point anyway.
1092 1080 for p in repo[childcrev][f].parents():
1093 1081 if p.filerev() == fr:
1094 1082 rev = p.rev()
1095 1083 break
1096 1084 if rev == lkr: # no shadowed entry found
1097 1085 # XXX This should never happen unless some manifest points
1098 1086 # to biggish file revisions (like a revision that uses a
1099 1087 # parent that never appears in the manifest ancestors)
1100 1088 continue
1101 1089
1102 1090 # Fill the data for the next iteration.
1103 1091 for p in fl.parentrevs(fr):
1104 1092 if 0 <= p and p not in lowestchild:
1105 1093 lowestchild[p] = fr
1106 1094 backrevref[fr] = rev
1107 1095 s.add(rev)
1108 1096
1109 1097 return subset & s
1110 1098
1111 1099 def first(repo, subset, x):
1112 1100 """``first(set, [n])``
1113 1101 An alias for limit().
1114 1102 """
1115 1103 return limit(repo, subset, x)
1116 1104
1117 1105 def _follow(repo, subset, x, name, followfirst=False):
1118 1106 l = getargs(x, 0, 1, _("%s takes no arguments or a pattern") % name)
1119 1107 c = repo['.']
1120 1108 if l:
1121 1109 x = getstring(l[0], _("%s expected a pattern") % name)
1122 1110 matcher = matchmod.match(repo.root, repo.getcwd(), [x],
1123 1111 ctx=repo[None], default='path')
1124 1112
1125 1113 s = set()
1126 1114 for fname in c:
1127 1115 if matcher(fname):
1128 1116 fctx = c[fname]
1129 1117 s = s.union(set(c.rev() for c in fctx.ancestors(followfirst)))
1130 1118 # include the revision responsible for the most recent version
1131 1119 s.add(fctx.introrev())
1132 1120 else:
1133 1121 s = _revancestors(repo, baseset([c.rev()]), followfirst)
1134 1122
1135 1123 return subset & s
1136 1124
1137 1125 def follow(repo, subset, x):
1138 1126 """``follow([pattern])``
1139 1127 An alias for ``::.`` (ancestors of the working directory's first parent).
1140 1128 If pattern is specified, the histories of files matching given
1141 1129 pattern is followed, including copies.
1142 1130 """
1143 1131 return _follow(repo, subset, x, 'follow')
1144 1132
1145 1133 def _followfirst(repo, subset, x):
1146 1134 # ``followfirst([pattern])``
1147 1135 # Like ``follow([pattern])`` but follows only the first parent of
1148 1136 # every revisions or files revisions.
1149 1137 return _follow(repo, subset, x, '_followfirst', followfirst=True)
1150 1138
1151 1139 def getall(repo, subset, x):
1152 1140 """``all()``
1153 1141 All changesets, the same as ``0:tip``.
1154 1142 """
1155 1143 # i18n: "all" is a keyword
1156 1144 getargs(x, 0, 0, _("all takes no arguments"))
1157 1145 return subset & spanset(repo) # drop "null" if any
1158 1146
1159 1147 def grep(repo, subset, x):
1160 1148 """``grep(regex)``
1161 1149 Like ``keyword(string)`` but accepts a regex. Use ``grep(r'...')``
1162 1150 to ensure special escape characters are handled correctly. Unlike
1163 1151 ``keyword(string)``, the match is case-sensitive.
1164 1152 """
1165 1153 try:
1166 1154 # i18n: "grep" is a keyword
1167 1155 gr = re.compile(getstring(x, _("grep requires a string")))
1168 1156 except re.error as e:
1169 1157 raise error.ParseError(_('invalid match pattern: %s') % e)
1170 1158
1171 1159 def matches(x):
1172 1160 c = repo[x]
1173 1161 for e in c.files() + [c.user(), c.description()]:
1174 1162 if gr.search(e):
1175 1163 return True
1176 1164 return False
1177 1165
1178 1166 return subset.filter(matches)
1179 1167
1180 1168 def _matchfiles(repo, subset, x):
1181 1169 # _matchfiles takes a revset list of prefixed arguments:
1182 1170 #
1183 1171 # [p:foo, i:bar, x:baz]
1184 1172 #
1185 1173 # builds a match object from them and filters subset. Allowed
1186 1174 # prefixes are 'p:' for regular patterns, 'i:' for include
1187 1175 # patterns and 'x:' for exclude patterns. Use 'r:' prefix to pass
1188 1176 # a revision identifier, or the empty string to reference the
1189 1177 # working directory, from which the match object is
1190 1178 # initialized. Use 'd:' to set the default matching mode, default
1191 1179 # to 'glob'. At most one 'r:' and 'd:' argument can be passed.
1192 1180
1193 1181 # i18n: "_matchfiles" is a keyword
1194 1182 l = getargs(x, 1, -1, _("_matchfiles requires at least one argument"))
1195 1183 pats, inc, exc = [], [], []
1196 1184 rev, default = None, None
1197 1185 for arg in l:
1198 1186 # i18n: "_matchfiles" is a keyword
1199 1187 s = getstring(arg, _("_matchfiles requires string arguments"))
1200 1188 prefix, value = s[:2], s[2:]
1201 1189 if prefix == 'p:':
1202 1190 pats.append(value)
1203 1191 elif prefix == 'i:':
1204 1192 inc.append(value)
1205 1193 elif prefix == 'x:':
1206 1194 exc.append(value)
1207 1195 elif prefix == 'r:':
1208 1196 if rev is not None:
1209 1197 # i18n: "_matchfiles" is a keyword
1210 1198 raise error.ParseError(_('_matchfiles expected at most one '
1211 1199 'revision'))
1212 1200 if value != '': # empty means working directory; leave rev as None
1213 1201 rev = value
1214 1202 elif prefix == 'd:':
1215 1203 if default is not None:
1216 1204 # i18n: "_matchfiles" is a keyword
1217 1205 raise error.ParseError(_('_matchfiles expected at most one '
1218 1206 'default mode'))
1219 1207 default = value
1220 1208 else:
1221 1209 # i18n: "_matchfiles" is a keyword
1222 1210 raise error.ParseError(_('invalid _matchfiles prefix: %s') % prefix)
1223 1211 if not default:
1224 1212 default = 'glob'
1225 1213
1226 1214 m = matchmod.match(repo.root, repo.getcwd(), pats, include=inc,
1227 1215 exclude=exc, ctx=repo[rev], default=default)
1228 1216
1229 1217 def matches(x):
1230 1218 for f in repo[x].files():
1231 1219 if m(f):
1232 1220 return True
1233 1221 return False
1234 1222
1235 1223 return subset.filter(matches)
1236 1224
1237 1225 def hasfile(repo, subset, x):
1238 1226 """``file(pattern)``
1239 1227 Changesets affecting files matched by pattern.
1240 1228
1241 1229 For a faster but less accurate result, consider using ``filelog()``
1242 1230 instead.
1243 1231
1244 1232 This predicate uses ``glob:`` as the default kind of pattern.
1245 1233 """
1246 1234 # i18n: "file" is a keyword
1247 1235 pat = getstring(x, _("file requires a pattern"))
1248 1236 return _matchfiles(repo, subset, ('string', 'p:' + pat))
1249 1237
1250 1238 def head(repo, subset, x):
1251 1239 """``head()``
1252 1240 Changeset is a named branch head.
1253 1241 """
1254 1242 # i18n: "head" is a keyword
1255 1243 getargs(x, 0, 0, _("head takes no arguments"))
1256 1244 hs = set()
1257 1245 cl = repo.changelog
1258 1246 for b, ls in repo.branchmap().iteritems():
1259 1247 hs.update(cl.rev(h) for h in ls)
1260 1248 # XXX using a set to feed the baseset is wrong. Sets are not ordered.
1261 1249 # This does not break because of other fullreposet misbehavior.
1262 1250 # XXX We should combine with subset first: 'subset & baseset(...)'. This is
1263 1251 # necessary to ensure we preserve the order in subset.
1264 1252 return baseset(hs) & subset
1265 1253
1266 1254 def heads(repo, subset, x):
1267 1255 """``heads(set)``
1268 1256 Members of set with no children in set.
1269 1257 """
1270 1258 s = getset(repo, subset, x)
1271 1259 ps = parents(repo, subset, x)
1272 1260 return s - ps
1273 1261
1274 1262 def hidden(repo, subset, x):
1275 1263 """``hidden()``
1276 1264 Hidden changesets.
1277 1265 """
1278 1266 # i18n: "hidden" is a keyword
1279 1267 getargs(x, 0, 0, _("hidden takes no arguments"))
1280 1268 hiddenrevs = repoview.filterrevs(repo, 'visible')
1281 1269 return subset & hiddenrevs
1282 1270
1283 1271 def keyword(repo, subset, x):
1284 1272 """``keyword(string)``
1285 1273 Search commit message, user name, and names of changed files for
1286 1274 string. The match is case-insensitive.
1287 1275 """
1288 1276 # i18n: "keyword" is a keyword
1289 1277 kw = encoding.lower(getstring(x, _("keyword requires a string")))
1290 1278
1291 1279 def matches(r):
1292 1280 c = repo[r]
1293 1281 return any(kw in encoding.lower(t)
1294 1282 for t in c.files() + [c.user(), c.description()])
1295 1283
1296 1284 return subset.filter(matches)
1297 1285
1298 1286 def limit(repo, subset, x):
1299 1287 """``limit(set, [n])``
1300 1288 First n members of set, defaulting to 1.
1301 1289 """
1302 1290 # i18n: "limit" is a keyword
1303 1291 l = getargs(x, 1, 2, _("limit requires one or two arguments"))
1304 1292 try:
1305 1293 lim = 1
1306 1294 if len(l) == 2:
1307 1295 # i18n: "limit" is a keyword
1308 1296 lim = int(getstring(l[1], _("limit requires a number")))
1309 1297 except (TypeError, ValueError):
1310 1298 # i18n: "limit" is a keyword
1311 1299 raise error.ParseError(_("limit expects a number"))
1312 1300 ss = subset
1313 1301 os = getset(repo, fullreposet(repo), l[0])
1314 1302 result = []
1315 1303 it = iter(os)
1316 1304 for x in xrange(lim):
1317 1305 y = next(it, None)
1318 1306 if y is None:
1319 1307 break
1320 1308 elif y in ss:
1321 1309 result.append(y)
1322 1310 return baseset(result)
1323 1311
1324 1312 def last(repo, subset, x):
1325 1313 """``last(set, [n])``
1326 1314 Last n members of set, defaulting to 1.
1327 1315 """
1328 1316 # i18n: "last" is a keyword
1329 1317 l = getargs(x, 1, 2, _("last requires one or two arguments"))
1330 1318 try:
1331 1319 lim = 1
1332 1320 if len(l) == 2:
1333 1321 # i18n: "last" is a keyword
1334 1322 lim = int(getstring(l[1], _("last requires a number")))
1335 1323 except (TypeError, ValueError):
1336 1324 # i18n: "last" is a keyword
1337 1325 raise error.ParseError(_("last expects a number"))
1338 1326 ss = subset
1339 1327 os = getset(repo, fullreposet(repo), l[0])
1340 1328 os.reverse()
1341 1329 result = []
1342 1330 it = iter(os)
1343 1331 for x in xrange(lim):
1344 1332 y = next(it, None)
1345 1333 if y is None:
1346 1334 break
1347 1335 elif y in ss:
1348 1336 result.append(y)
1349 1337 return baseset(result)
1350 1338
1351 1339 def maxrev(repo, subset, x):
1352 1340 """``max(set)``
1353 1341 Changeset with highest revision number in set.
1354 1342 """
1355 1343 os = getset(repo, fullreposet(repo), x)
1356 1344 try:
1357 1345 m = os.max()
1358 1346 if m in subset:
1359 1347 return baseset([m])
1360 1348 except ValueError:
1361 1349 # os.max() throws a ValueError when the collection is empty.
1362 1350 # Same as python's max().
1363 1351 pass
1364 1352 return baseset()
1365 1353
1366 1354 def merge(repo, subset, x):
1367 1355 """``merge()``
1368 1356 Changeset is a merge changeset.
1369 1357 """
1370 1358 # i18n: "merge" is a keyword
1371 1359 getargs(x, 0, 0, _("merge takes no arguments"))
1372 1360 cl = repo.changelog
1373 1361 return subset.filter(lambda r: cl.parentrevs(r)[1] != -1)
1374 1362
1375 1363 def branchpoint(repo, subset, x):
1376 1364 """``branchpoint()``
1377 1365 Changesets with more than one child.
1378 1366 """
1379 1367 # i18n: "branchpoint" is a keyword
1380 1368 getargs(x, 0, 0, _("branchpoint takes no arguments"))
1381 1369 cl = repo.changelog
1382 1370 if not subset:
1383 1371 return baseset()
1384 1372 # XXX this should be 'parentset.min()' assuming 'parentset' is a smartset
1385 1373 # (and if it is not, it should.)
1386 1374 baserev = min(subset)
1387 1375 parentscount = [0]*(len(repo) - baserev)
1388 1376 for r in cl.revs(start=baserev + 1):
1389 1377 for p in cl.parentrevs(r):
1390 1378 if p >= baserev:
1391 1379 parentscount[p - baserev] += 1
1392 1380 return subset.filter(lambda r: parentscount[r - baserev] > 1)
1393 1381
1394 1382 def minrev(repo, subset, x):
1395 1383 """``min(set)``
1396 1384 Changeset with lowest revision number in set.
1397 1385 """
1398 1386 os = getset(repo, fullreposet(repo), x)
1399 1387 try:
1400 1388 m = os.min()
1401 1389 if m in subset:
1402 1390 return baseset([m])
1403 1391 except ValueError:
1404 1392 # os.min() throws a ValueError when the collection is empty.
1405 1393 # Same as python's min().
1406 1394 pass
1407 1395 return baseset()
1408 1396
1409 1397 def modifies(repo, subset, x):
1410 1398 """``modifies(pattern)``
1411 1399 Changesets modifying files matched by pattern.
1412 1400
1413 1401 The pattern without explicit kind like ``glob:`` is expected to be
1414 1402 relative to the current directory and match against a file or a
1415 1403 directory.
1416 1404 """
1417 1405 # i18n: "modifies" is a keyword
1418 1406 pat = getstring(x, _("modifies requires a pattern"))
1419 1407 return checkstatus(repo, subset, pat, 0)
1420 1408
1421 1409 def named(repo, subset, x):
1422 1410 """``named(namespace)``
1423 1411 The changesets in a given namespace.
1424 1412
1425 1413 If `namespace` starts with `re:`, the remainder of the string is treated as
1426 1414 a regular expression. To match a namespace that actually starts with `re:`,
1427 1415 use the prefix `literal:`.
1428 1416 """
1429 1417 # i18n: "named" is a keyword
1430 1418 args = getargs(x, 1, 1, _('named requires a namespace argument'))
1431 1419
1432 1420 ns = getstring(args[0],
1433 1421 # i18n: "named" is a keyword
1434 1422 _('the argument to named must be a string'))
1435 1423 kind, pattern, matcher = util.stringmatcher(ns)
1436 1424 namespaces = set()
1437 1425 if kind == 'literal':
1438 1426 if pattern not in repo.names:
1439 1427 raise error.RepoLookupError(_("namespace '%s' does not exist")
1440 1428 % ns)
1441 1429 namespaces.add(repo.names[pattern])
1442 1430 else:
1443 1431 for name, ns in repo.names.iteritems():
1444 1432 if matcher(name):
1445 1433 namespaces.add(ns)
1446 1434 if not namespaces:
1447 1435 raise error.RepoLookupError(_("no namespace exists"
1448 1436 " that match '%s'") % pattern)
1449 1437
1450 1438 names = set()
1451 1439 for ns in namespaces:
1452 1440 for name in ns.listnames(repo):
1453 1441 if name not in ns.deprecated:
1454 1442 names.update(repo[n].rev() for n in ns.nodes(repo, name))
1455 1443
1456 1444 names -= set([node.nullrev])
1457 1445 return subset & names
1458 1446
1459 1447 def node_(repo, subset, x):
1460 1448 """``id(string)``
1461 1449 Revision non-ambiguously specified by the given hex string prefix.
1462 1450 """
1463 1451 # i18n: "id" is a keyword
1464 1452 l = getargs(x, 1, 1, _("id requires one argument"))
1465 1453 # i18n: "id" is a keyword
1466 1454 n = getstring(l[0], _("id requires a string"))
1467 1455 if len(n) == 40:
1468 1456 try:
1469 1457 rn = repo.changelog.rev(node.bin(n))
1470 1458 except (LookupError, TypeError):
1471 1459 rn = None
1472 1460 else:
1473 1461 rn = None
1474 1462 pm = repo.changelog._partialmatch(n)
1475 1463 if pm is not None:
1476 1464 rn = repo.changelog.rev(pm)
1477 1465
1478 1466 if rn is None:
1479 1467 return baseset()
1480 1468 result = baseset([rn])
1481 1469 return result & subset
1482 1470
1483 1471 def obsolete(repo, subset, x):
1484 1472 """``obsolete()``
1485 1473 Mutable changeset with a newer version."""
1486 1474 # i18n: "obsolete" is a keyword
1487 1475 getargs(x, 0, 0, _("obsolete takes no arguments"))
1488 1476 obsoletes = obsmod.getrevs(repo, 'obsolete')
1489 1477 return subset & obsoletes
1490 1478
1491 1479 def only(repo, subset, x):
1492 1480 """``only(set, [set])``
1493 1481 Changesets that are ancestors of the first set that are not ancestors
1494 1482 of any other head in the repo. If a second set is specified, the result
1495 1483 is ancestors of the first set that are not ancestors of the second set
1496 1484 (i.e. ::<set1> - ::<set2>).
1497 1485 """
1498 1486 cl = repo.changelog
1499 1487 # i18n: "only" is a keyword
1500 1488 args = getargs(x, 1, 2, _('only takes one or two arguments'))
1501 1489 include = getset(repo, fullreposet(repo), args[0])
1502 1490 if len(args) == 1:
1503 1491 if not include:
1504 1492 return baseset()
1505 1493
1506 1494 descendants = set(_revdescendants(repo, include, False))
1507 1495 exclude = [rev for rev in cl.headrevs()
1508 1496 if not rev in descendants and not rev in include]
1509 1497 else:
1510 1498 exclude = getset(repo, fullreposet(repo), args[1])
1511 1499
1512 1500 results = set(cl.findmissingrevs(common=exclude, heads=include))
1513 1501 # XXX we should turn this into a baseset instead of a set, smartset may do
1514 1502 # some optimisations from the fact this is a baseset.
1515 1503 return subset & results
1516 1504
1517 1505 def origin(repo, subset, x):
1518 1506 """``origin([set])``
1519 1507 Changesets that were specified as a source for the grafts, transplants or
1520 1508 rebases that created the given revisions. Omitting the optional set is the
1521 1509 same as passing all(). If a changeset created by these operations is itself
1522 1510 specified as a source for one of these operations, only the source changeset
1523 1511 for the first operation is selected.
1524 1512 """
1525 1513 if x is not None:
1526 1514 dests = getset(repo, fullreposet(repo), x)
1527 1515 else:
1528 1516 dests = fullreposet(repo)
1529 1517
1530 1518 def _firstsrc(rev):
1531 1519 src = _getrevsource(repo, rev)
1532 1520 if src is None:
1533 1521 return None
1534 1522
1535 1523 while True:
1536 1524 prev = _getrevsource(repo, src)
1537 1525
1538 1526 if prev is None:
1539 1527 return src
1540 1528 src = prev
1541 1529
1542 1530 o = set([_firstsrc(r) for r in dests])
1543 1531 o -= set([None])
1544 1532 # XXX we should turn this into a baseset instead of a set, smartset may do
1545 1533 # some optimisations from the fact this is a baseset.
1546 1534 return subset & o
1547 1535
1548 1536 def outgoing(repo, subset, x):
1549 1537 """``outgoing([path])``
1550 1538 Changesets not found in the specified destination repository, or the
1551 1539 default push location.
1552 1540 """
1553 1541 # Avoid cycles.
1554 1542 from . import (
1555 1543 discovery,
1556 1544 hg,
1557 1545 )
1558 1546 # i18n: "outgoing" is a keyword
1559 1547 l = getargs(x, 0, 1, _("outgoing takes one or no arguments"))
1560 1548 # i18n: "outgoing" is a keyword
1561 1549 dest = l and getstring(l[0], _("outgoing requires a repository path")) or ''
1562 1550 dest = repo.ui.expandpath(dest or 'default-push', dest or 'default')
1563 1551 dest, branches = hg.parseurl(dest)
1564 1552 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1565 1553 if revs:
1566 1554 revs = [repo.lookup(rev) for rev in revs]
1567 1555 other = hg.peer(repo, {}, dest)
1568 1556 repo.ui.pushbuffer()
1569 1557 outgoing = discovery.findcommonoutgoing(repo, other, onlyheads=revs)
1570 1558 repo.ui.popbuffer()
1571 1559 cl = repo.changelog
1572 1560 o = set([cl.rev(r) for r in outgoing.missing])
1573 1561 return subset & o
1574 1562
1575 1563 def p1(repo, subset, x):
1576 1564 """``p1([set])``
1577 1565 First parent of changesets in set, or the working directory.
1578 1566 """
1579 1567 if x is None:
1580 1568 p = repo[x].p1().rev()
1581 1569 if p >= 0:
1582 1570 return subset & baseset([p])
1583 1571 return baseset()
1584 1572
1585 1573 ps = set()
1586 1574 cl = repo.changelog
1587 1575 for r in getset(repo, fullreposet(repo), x):
1588 1576 ps.add(cl.parentrevs(r)[0])
1589 1577 ps -= set([node.nullrev])
1590 1578 # XXX we should turn this into a baseset instead of a set, smartset may do
1591 1579 # some optimisations from the fact this is a baseset.
1592 1580 return subset & ps
1593 1581
1594 1582 def p2(repo, subset, x):
1595 1583 """``p2([set])``
1596 1584 Second parent of changesets in set, or the working directory.
1597 1585 """
1598 1586 if x is None:
1599 1587 ps = repo[x].parents()
1600 1588 try:
1601 1589 p = ps[1].rev()
1602 1590 if p >= 0:
1603 1591 return subset & baseset([p])
1604 1592 return baseset()
1605 1593 except IndexError:
1606 1594 return baseset()
1607 1595
1608 1596 ps = set()
1609 1597 cl = repo.changelog
1610 1598 for r in getset(repo, fullreposet(repo), x):
1611 1599 ps.add(cl.parentrevs(r)[1])
1612 1600 ps -= set([node.nullrev])
1613 1601 # XXX we should turn this into a baseset instead of a set, smartset may do
1614 1602 # some optimisations from the fact this is a baseset.
1615 1603 return subset & ps
1616 1604
1617 1605 def parents(repo, subset, x):
1618 1606 """``parents([set])``
1619 1607 The set of all parents for all changesets in set, or the working directory.
1620 1608 """
1621 1609 if x is None:
1622 1610 ps = set(p.rev() for p in repo[x].parents())
1623 1611 else:
1624 1612 ps = set()
1625 1613 cl = repo.changelog
1626 1614 up = ps.update
1627 1615 parentrevs = cl.parentrevs
1628 1616 for r in getset(repo, fullreposet(repo), x):
1629 1617 if r == node.wdirrev:
1630 1618 up(p.rev() for p in repo[r].parents())
1631 1619 else:
1632 1620 up(parentrevs(r))
1633 1621 ps -= set([node.nullrev])
1634 1622 return subset & ps
1635 1623
1636 1624 def _phase(repo, subset, target):
1637 1625 """helper to select all rev in phase <target>"""
1638 1626 repo._phasecache.loadphaserevs(repo) # ensure phase's sets are loaded
1639 1627 if repo._phasecache._phasesets:
1640 1628 s = repo._phasecache._phasesets[target] - repo.changelog.filteredrevs
1641 1629 s = baseset(s)
1642 1630 s.sort() # set are non ordered, so we enforce ascending
1643 1631 return subset & s
1644 1632 else:
1645 1633 phase = repo._phasecache.phase
1646 1634 condition = lambda r: phase(repo, r) == target
1647 1635 return subset.filter(condition, cache=False)
1648 1636
1649 1637 def draft(repo, subset, x):
1650 1638 """``draft()``
1651 1639 Changeset in draft phase."""
1652 1640 # i18n: "draft" is a keyword
1653 1641 getargs(x, 0, 0, _("draft takes no arguments"))
1654 1642 target = phases.draft
1655 1643 return _phase(repo, subset, target)
1656 1644
1657 1645 def secret(repo, subset, x):
1658 1646 """``secret()``
1659 1647 Changeset in secret phase."""
1660 1648 # i18n: "secret" is a keyword
1661 1649 getargs(x, 0, 0, _("secret takes no arguments"))
1662 1650 target = phases.secret
1663 1651 return _phase(repo, subset, target)
1664 1652
1665 1653 def parentspec(repo, subset, x, n):
1666 1654 """``set^0``
1667 1655 The set.
1668 1656 ``set^1`` (or ``set^``), ``set^2``
1669 1657 First or second parent, respectively, of all changesets in set.
1670 1658 """
1671 1659 try:
1672 1660 n = int(n[1])
1673 1661 if n not in (0, 1, 2):
1674 1662 raise ValueError
1675 1663 except (TypeError, ValueError):
1676 1664 raise error.ParseError(_("^ expects a number 0, 1, or 2"))
1677 1665 ps = set()
1678 1666 cl = repo.changelog
1679 1667 for r in getset(repo, fullreposet(repo), x):
1680 1668 if n == 0:
1681 1669 ps.add(r)
1682 1670 elif n == 1:
1683 1671 ps.add(cl.parentrevs(r)[0])
1684 1672 elif n == 2:
1685 1673 parents = cl.parentrevs(r)
1686 1674 if len(parents) > 1:
1687 1675 ps.add(parents[1])
1688 1676 return subset & ps
1689 1677
1690 1678 def present(repo, subset, x):
1691 1679 """``present(set)``
1692 1680 An empty set, if any revision in set isn't found; otherwise,
1693 1681 all revisions in set.
1694 1682
1695 1683 If any of specified revisions is not present in the local repository,
1696 1684 the query is normally aborted. But this predicate allows the query
1697 1685 to continue even in such cases.
1698 1686 """
1699 1687 try:
1700 1688 return getset(repo, subset, x)
1701 1689 except error.RepoLookupError:
1702 1690 return baseset()
1703 1691
1704 1692 # for internal use
1705 1693 def _notpublic(repo, subset, x):
1706 1694 getargs(x, 0, 0, "_notpublic takes no arguments")
1707 1695 repo._phasecache.loadphaserevs(repo) # ensure phase's sets are loaded
1708 1696 if repo._phasecache._phasesets:
1709 1697 s = set()
1710 1698 for u in repo._phasecache._phasesets[1:]:
1711 1699 s.update(u)
1712 1700 s = baseset(s - repo.changelog.filteredrevs)
1713 1701 s.sort()
1714 1702 return subset & s
1715 1703 else:
1716 1704 phase = repo._phasecache.phase
1717 1705 target = phases.public
1718 1706 condition = lambda r: phase(repo, r) != target
1719 1707 return subset.filter(condition, cache=False)
1720 1708
1721 1709 def public(repo, subset, x):
1722 1710 """``public()``
1723 1711 Changeset in public phase."""
1724 1712 # i18n: "public" is a keyword
1725 1713 getargs(x, 0, 0, _("public takes no arguments"))
1726 1714 phase = repo._phasecache.phase
1727 1715 target = phases.public
1728 1716 condition = lambda r: phase(repo, r) == target
1729 1717 return subset.filter(condition, cache=False)
1730 1718
1731 1719 def remote(repo, subset, x):
1732 1720 """``remote([id [,path]])``
1733 1721 Local revision that corresponds to the given identifier in a
1734 1722 remote repository, if present. Here, the '.' identifier is a
1735 1723 synonym for the current local branch.
1736 1724 """
1737 1725
1738 1726 from . import hg # avoid start-up nasties
1739 1727 # i18n: "remote" is a keyword
1740 1728 l = getargs(x, 0, 2, _("remote takes one, two or no arguments"))
1741 1729
1742 1730 q = '.'
1743 1731 if len(l) > 0:
1744 1732 # i18n: "remote" is a keyword
1745 1733 q = getstring(l[0], _("remote requires a string id"))
1746 1734 if q == '.':
1747 1735 q = repo['.'].branch()
1748 1736
1749 1737 dest = ''
1750 1738 if len(l) > 1:
1751 1739 # i18n: "remote" is a keyword
1752 1740 dest = getstring(l[1], _("remote requires a repository path"))
1753 1741 dest = repo.ui.expandpath(dest or 'default')
1754 1742 dest, branches = hg.parseurl(dest)
1755 1743 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1756 1744 if revs:
1757 1745 revs = [repo.lookup(rev) for rev in revs]
1758 1746 other = hg.peer(repo, {}, dest)
1759 1747 n = other.lookup(q)
1760 1748 if n in repo:
1761 1749 r = repo[n].rev()
1762 1750 if r in subset:
1763 1751 return baseset([r])
1764 1752 return baseset()
1765 1753
1766 1754 def removes(repo, subset, x):
1767 1755 """``removes(pattern)``
1768 1756 Changesets which remove files matching pattern.
1769 1757
1770 1758 The pattern without explicit kind like ``glob:`` is expected to be
1771 1759 relative to the current directory and match against a file or a
1772 1760 directory.
1773 1761 """
1774 1762 # i18n: "removes" is a keyword
1775 1763 pat = getstring(x, _("removes requires a pattern"))
1776 1764 return checkstatus(repo, subset, pat, 2)
1777 1765
1778 1766 def rev(repo, subset, x):
1779 1767 """``rev(number)``
1780 1768 Revision with the given numeric identifier.
1781 1769 """
1782 1770 # i18n: "rev" is a keyword
1783 1771 l = getargs(x, 1, 1, _("rev requires one argument"))
1784 1772 try:
1785 1773 # i18n: "rev" is a keyword
1786 1774 l = int(getstring(l[0], _("rev requires a number")))
1787 1775 except (TypeError, ValueError):
1788 1776 # i18n: "rev" is a keyword
1789 1777 raise error.ParseError(_("rev expects a number"))
1790 1778 if l not in repo.changelog and l != node.nullrev:
1791 1779 return baseset()
1792 1780 return subset & baseset([l])
1793 1781
1794 1782 def matching(repo, subset, x):
1795 1783 """``matching(revision [, field])``
1796 1784 Changesets in which a given set of fields match the set of fields in the
1797 1785 selected revision or set.
1798 1786
1799 1787 To match more than one field pass the list of fields to match separated
1800 1788 by spaces (e.g. ``author description``).
1801 1789
1802 1790 Valid fields are most regular revision fields and some special fields.
1803 1791
1804 1792 Regular revision fields are ``description``, ``author``, ``branch``,
1805 1793 ``date``, ``files``, ``phase``, ``parents``, ``substate``, ``user``
1806 1794 and ``diff``.
1807 1795 Note that ``author`` and ``user`` are synonyms. ``diff`` refers to the
1808 1796 contents of the revision. Two revisions matching their ``diff`` will
1809 1797 also match their ``files``.
1810 1798
1811 1799 Special fields are ``summary`` and ``metadata``:
1812 1800 ``summary`` matches the first line of the description.
1813 1801 ``metadata`` is equivalent to matching ``description user date``
1814 1802 (i.e. it matches the main metadata fields).
1815 1803
1816 1804 ``metadata`` is the default field which is used when no fields are
1817 1805 specified. You can match more than one field at a time.
1818 1806 """
1819 1807 # i18n: "matching" is a keyword
1820 1808 l = getargs(x, 1, 2, _("matching takes 1 or 2 arguments"))
1821 1809
1822 1810 revs = getset(repo, fullreposet(repo), l[0])
1823 1811
1824 1812 fieldlist = ['metadata']
1825 1813 if len(l) > 1:
1826 1814 fieldlist = getstring(l[1],
1827 1815 # i18n: "matching" is a keyword
1828 1816 _("matching requires a string "
1829 1817 "as its second argument")).split()
1830 1818
1831 1819 # Make sure that there are no repeated fields,
1832 1820 # expand the 'special' 'metadata' field type
1833 1821 # and check the 'files' whenever we check the 'diff'
1834 1822 fields = []
1835 1823 for field in fieldlist:
1836 1824 if field == 'metadata':
1837 1825 fields += ['user', 'description', 'date']
1838 1826 elif field == 'diff':
1839 1827 # a revision matching the diff must also match the files
1840 1828 # since matching the diff is very costly, make sure to
1841 1829 # also match the files first
1842 1830 fields += ['files', 'diff']
1843 1831 else:
1844 1832 if field == 'author':
1845 1833 field = 'user'
1846 1834 fields.append(field)
1847 1835 fields = set(fields)
1848 1836 if 'summary' in fields and 'description' in fields:
1849 1837 # If a revision matches its description it also matches its summary
1850 1838 fields.discard('summary')
1851 1839
1852 1840 # We may want to match more than one field
1853 1841 # Not all fields take the same amount of time to be matched
1854 1842 # Sort the selected fields in order of increasing matching cost
1855 1843 fieldorder = ['phase', 'parents', 'user', 'date', 'branch', 'summary',
1856 1844 'files', 'description', 'substate', 'diff']
1857 1845 def fieldkeyfunc(f):
1858 1846 try:
1859 1847 return fieldorder.index(f)
1860 1848 except ValueError:
1861 1849 # assume an unknown field is very costly
1862 1850 return len(fieldorder)
1863 1851 fields = list(fields)
1864 1852 fields.sort(key=fieldkeyfunc)
1865 1853
1866 1854 # Each field will be matched with its own "getfield" function
1867 1855 # which will be added to the getfieldfuncs array of functions
1868 1856 getfieldfuncs = []
1869 1857 _funcs = {
1870 1858 'user': lambda r: repo[r].user(),
1871 1859 'branch': lambda r: repo[r].branch(),
1872 1860 'date': lambda r: repo[r].date(),
1873 1861 'description': lambda r: repo[r].description(),
1874 1862 'files': lambda r: repo[r].files(),
1875 1863 'parents': lambda r: repo[r].parents(),
1876 1864 'phase': lambda r: repo[r].phase(),
1877 1865 'substate': lambda r: repo[r].substate,
1878 1866 'summary': lambda r: repo[r].description().splitlines()[0],
1879 1867 'diff': lambda r: list(repo[r].diff(git=True),)
1880 1868 }
1881 1869 for info in fields:
1882 1870 getfield = _funcs.get(info, None)
1883 1871 if getfield is None:
1884 1872 raise error.ParseError(
1885 1873 # i18n: "matching" is a keyword
1886 1874 _("unexpected field name passed to matching: %s") % info)
1887 1875 getfieldfuncs.append(getfield)
1888 1876 # convert the getfield array of functions into a "getinfo" function
1889 1877 # which returns an array of field values (or a single value if there
1890 1878 # is only one field to match)
1891 1879 getinfo = lambda r: [f(r) for f in getfieldfuncs]
1892 1880
1893 1881 def matches(x):
1894 1882 for rev in revs:
1895 1883 target = getinfo(rev)
1896 1884 match = True
1897 1885 for n, f in enumerate(getfieldfuncs):
1898 1886 if target[n] != f(x):
1899 1887 match = False
1900 1888 if match:
1901 1889 return True
1902 1890 return False
1903 1891
1904 1892 return subset.filter(matches)
1905 1893
1906 1894 def reverse(repo, subset, x):
1907 1895 """``reverse(set)``
1908 1896 Reverse order of set.
1909 1897 """
1910 1898 l = getset(repo, subset, x)
1911 1899 l.reverse()
1912 1900 return l
1913 1901
1914 1902 def roots(repo, subset, x):
1915 1903 """``roots(set)``
1916 1904 Changesets in set with no parent changeset in set.
1917 1905 """
1918 1906 s = getset(repo, fullreposet(repo), x)
1919 1907 parents = repo.changelog.parentrevs
1920 1908 def filter(r):
1921 1909 for p in parents(r):
1922 1910 if 0 <= p and p in s:
1923 1911 return False
1924 1912 return True
1925 1913 return subset & s.filter(filter)
1926 1914
1927 1915 def sort(repo, subset, x):
1928 1916 """``sort(set[, [-]key...])``
1929 1917 Sort set by keys. The default sort order is ascending, specify a key
1930 1918 as ``-key`` to sort in descending order.
1931 1919
1932 1920 The keys can be:
1933 1921
1934 1922 - ``rev`` for the revision number,
1935 1923 - ``branch`` for the branch name,
1936 1924 - ``desc`` for the commit message (description),
1937 1925 - ``user`` for user name (``author`` can be used as an alias),
1938 1926 - ``date`` for the commit date
1939 1927 """
1940 1928 # i18n: "sort" is a keyword
1941 1929 l = getargs(x, 1, 2, _("sort requires one or two arguments"))
1942 1930 keys = "rev"
1943 1931 if len(l) == 2:
1944 1932 # i18n: "sort" is a keyword
1945 1933 keys = getstring(l[1], _("sort spec must be a string"))
1946 1934
1947 1935 s = l[0]
1948 1936 keys = keys.split()
1949 1937 l = []
1950 1938 def invert(s):
1951 1939 return "".join(chr(255 - ord(c)) for c in s)
1952 1940 revs = getset(repo, subset, s)
1953 1941 if keys == ["rev"]:
1954 1942 revs.sort()
1955 1943 return revs
1956 1944 elif keys == ["-rev"]:
1957 1945 revs.sort(reverse=True)
1958 1946 return revs
1959 1947 for r in revs:
1960 1948 c = repo[r]
1961 1949 e = []
1962 1950 for k in keys:
1963 1951 if k == 'rev':
1964 1952 e.append(r)
1965 1953 elif k == '-rev':
1966 1954 e.append(-r)
1967 1955 elif k == 'branch':
1968 1956 e.append(c.branch())
1969 1957 elif k == '-branch':
1970 1958 e.append(invert(c.branch()))
1971 1959 elif k == 'desc':
1972 1960 e.append(c.description())
1973 1961 elif k == '-desc':
1974 1962 e.append(invert(c.description()))
1975 1963 elif k in 'user author':
1976 1964 e.append(c.user())
1977 1965 elif k in '-user -author':
1978 1966 e.append(invert(c.user()))
1979 1967 elif k == 'date':
1980 1968 e.append(c.date()[0])
1981 1969 elif k == '-date':
1982 1970 e.append(-c.date()[0])
1983 1971 else:
1984 1972 raise error.ParseError(_("unknown sort key %r") % k)
1985 1973 e.append(r)
1986 1974 l.append(e)
1987 1975 l.sort()
1988 1976 return baseset([e[-1] for e in l])
1989 1977
1990 1978 def subrepo(repo, subset, x):
1991 1979 """``subrepo([pattern])``
1992 1980 Changesets that add, modify or remove the given subrepo. If no subrepo
1993 1981 pattern is named, any subrepo changes are returned.
1994 1982 """
1995 1983 # i18n: "subrepo" is a keyword
1996 1984 args = getargs(x, 0, 1, _('subrepo takes at most one argument'))
1997 1985 if len(args) != 0:
1998 1986 pat = getstring(args[0], _("subrepo requires a pattern"))
1999 1987
2000 1988 m = matchmod.exact(repo.root, repo.root, ['.hgsubstate'])
2001 1989
2002 1990 def submatches(names):
2003 1991 k, p, m = util.stringmatcher(pat)
2004 1992 for name in names:
2005 1993 if m(name):
2006 1994 yield name
2007 1995
2008 1996 def matches(x):
2009 1997 c = repo[x]
2010 1998 s = repo.status(c.p1().node(), c.node(), match=m)
2011 1999
2012 2000 if len(args) == 0:
2013 2001 return s.added or s.modified or s.removed
2014 2002
2015 2003 if s.added:
2016 2004 return any(submatches(c.substate.keys()))
2017 2005
2018 2006 if s.modified:
2019 2007 subs = set(c.p1().substate.keys())
2020 2008 subs.update(c.substate.keys())
2021 2009
2022 2010 for path in submatches(subs):
2023 2011 if c.p1().substate.get(path) != c.substate.get(path):
2024 2012 return True
2025 2013
2026 2014 if s.removed:
2027 2015 return any(submatches(c.p1().substate.keys()))
2028 2016
2029 2017 return False
2030 2018
2031 2019 return subset.filter(matches)
2032 2020
2033 2021 def _substringmatcher(pattern):
2034 2022 kind, pattern, matcher = util.stringmatcher(pattern)
2035 2023 if kind == 'literal':
2036 2024 matcher = lambda s: pattern in s
2037 2025 return kind, pattern, matcher
2038 2026
2039 2027 def tag(repo, subset, x):
2040 2028 """``tag([name])``
2041 2029 The specified tag by name, or all tagged revisions if no name is given.
2042 2030
2043 2031 If `name` starts with `re:`, the remainder of the name is treated as
2044 2032 a regular expression. To match a tag that actually starts with `re:`,
2045 2033 use the prefix `literal:`.
2046 2034 """
2047 2035 # i18n: "tag" is a keyword
2048 2036 args = getargs(x, 0, 1, _("tag takes one or no arguments"))
2049 2037 cl = repo.changelog
2050 2038 if args:
2051 2039 pattern = getstring(args[0],
2052 2040 # i18n: "tag" is a keyword
2053 2041 _('the argument to tag must be a string'))
2054 2042 kind, pattern, matcher = util.stringmatcher(pattern)
2055 2043 if kind == 'literal':
2056 2044 # avoid resolving all tags
2057 2045 tn = repo._tagscache.tags.get(pattern, None)
2058 2046 if tn is None:
2059 2047 raise error.RepoLookupError(_("tag '%s' does not exist")
2060 2048 % pattern)
2061 2049 s = set([repo[tn].rev()])
2062 2050 else:
2063 2051 s = set([cl.rev(n) for t, n in repo.tagslist() if matcher(t)])
2064 2052 else:
2065 2053 s = set([cl.rev(n) for t, n in repo.tagslist() if t != 'tip'])
2066 2054 return subset & s
2067 2055
2068 2056 def tagged(repo, subset, x):
2069 2057 return tag(repo, subset, x)
2070 2058
2071 2059 def unstable(repo, subset, x):
2072 2060 """``unstable()``
2073 2061 Non-obsolete changesets with obsolete ancestors.
2074 2062 """
2075 2063 # i18n: "unstable" is a keyword
2076 2064 getargs(x, 0, 0, _("unstable takes no arguments"))
2077 2065 unstables = obsmod.getrevs(repo, 'unstable')
2078 2066 return subset & unstables
2079 2067
2080 2068
2081 2069 def user(repo, subset, x):
2082 2070 """``user(string)``
2083 2071 User name contains string. The match is case-insensitive.
2084 2072
2085 2073 If `string` starts with `re:`, the remainder of the string is treated as
2086 2074 a regular expression. To match a user that actually contains `re:`, use
2087 2075 the prefix `literal:`.
2088 2076 """
2089 2077 return author(repo, subset, x)
2090 2078
2091 2079 # experimental
2092 2080 def wdir(repo, subset, x):
2093 2081 # i18n: "wdir" is a keyword
2094 2082 getargs(x, 0, 0, _("wdir takes no arguments"))
2095 2083 if node.wdirrev in subset or isinstance(subset, fullreposet):
2096 2084 return baseset([node.wdirrev])
2097 2085 return baseset()
2098 2086
2099 2087 # for internal use
2100 2088 def _list(repo, subset, x):
2101 2089 s = getstring(x, "internal error")
2102 2090 if not s:
2103 2091 return baseset()
2104 2092 # remove duplicates here. it's difficult for caller to deduplicate sets
2105 2093 # because different symbols can point to the same rev.
2106 2094 cl = repo.changelog
2107 2095 ls = []
2108 2096 seen = set()
2109 2097 for t in s.split('\0'):
2110 2098 try:
2111 2099 # fast path for integer revision
2112 2100 r = int(t)
2113 2101 if str(r) != t or r not in cl:
2114 2102 raise ValueError
2115 2103 revs = [r]
2116 2104 except ValueError:
2117 2105 revs = stringset(repo, subset, t)
2118 2106
2119 2107 for r in revs:
2120 2108 if r in seen:
2121 2109 continue
2122 2110 if (r in subset
2123 2111 or r == node.nullrev and isinstance(subset, fullreposet)):
2124 2112 ls.append(r)
2125 2113 seen.add(r)
2126 2114 return baseset(ls)
2127 2115
2128 2116 # for internal use
2129 2117 def _intlist(repo, subset, x):
2130 2118 s = getstring(x, "internal error")
2131 2119 if not s:
2132 2120 return baseset()
2133 2121 ls = [int(r) for r in s.split('\0')]
2134 2122 s = subset
2135 2123 return baseset([r for r in ls if r in s])
2136 2124
2137 2125 # for internal use
2138 2126 def _hexlist(repo, subset, x):
2139 2127 s = getstring(x, "internal error")
2140 2128 if not s:
2141 2129 return baseset()
2142 2130 cl = repo.changelog
2143 2131 ls = [cl.rev(node.bin(r)) for r in s.split('\0')]
2144 2132 s = subset
2145 2133 return baseset([r for r in ls if r in s])
2146 2134
2147 2135 symbols = {
2148 2136 "_mergedefaultdest": _mergedefaultdest,
2149 "_updatedefaultdest": _updatedefaultdest,
2150 2137 "adds": adds,
2151 2138 "all": getall,
2152 2139 "ancestor": ancestor,
2153 2140 "ancestors": ancestors,
2154 2141 "_firstancestors": _firstancestors,
2155 2142 "author": author,
2156 2143 "bisect": bisect,
2157 2144 "bisected": bisected,
2158 2145 "bookmark": bookmark,
2159 2146 "branch": branch,
2160 2147 "branchpoint": branchpoint,
2161 2148 "bumped": bumped,
2162 2149 "bundle": bundle,
2163 2150 "children": children,
2164 2151 "closed": closed,
2165 2152 "contains": contains,
2166 2153 "converted": converted,
2167 2154 "date": date,
2168 2155 "desc": desc,
2169 2156 "descendants": descendants,
2170 2157 "_firstdescendants": _firstdescendants,
2171 2158 "destination": destination,
2172 2159 "divergent": divergent,
2173 2160 "draft": draft,
2174 2161 "extinct": extinct,
2175 2162 "extra": extra,
2176 2163 "file": hasfile,
2177 2164 "filelog": filelog,
2178 2165 "first": first,
2179 2166 "follow": follow,
2180 2167 "_followfirst": _followfirst,
2181 2168 "grep": grep,
2182 2169 "head": head,
2183 2170 "heads": heads,
2184 2171 "hidden": hidden,
2185 2172 "id": node_,
2186 2173 "keyword": keyword,
2187 2174 "last": last,
2188 2175 "limit": limit,
2189 2176 "_matchfiles": _matchfiles,
2190 2177 "max": maxrev,
2191 2178 "merge": merge,
2192 2179 "min": minrev,
2193 2180 "modifies": modifies,
2194 2181 "named": named,
2195 2182 "obsolete": obsolete,
2196 2183 "only": only,
2197 2184 "origin": origin,
2198 2185 "outgoing": outgoing,
2199 2186 "p1": p1,
2200 2187 "p2": p2,
2201 2188 "parents": parents,
2202 2189 "present": present,
2203 2190 "public": public,
2204 2191 "_notpublic": _notpublic,
2205 2192 "remote": remote,
2206 2193 "removes": removes,
2207 2194 "rev": rev,
2208 2195 "reverse": reverse,
2209 2196 "roots": roots,
2210 2197 "sort": sort,
2211 2198 "secret": secret,
2212 2199 "subrepo": subrepo,
2213 2200 "matching": matching,
2214 2201 "tag": tag,
2215 2202 "tagged": tagged,
2216 2203 "user": user,
2217 2204 "unstable": unstable,
2218 2205 "wdir": wdir,
2219 2206 "_list": _list,
2220 2207 "_intlist": _intlist,
2221 2208 "_hexlist": _hexlist,
2222 2209 }
2223 2210
2224 2211 # symbols which can't be used for a DoS attack for any given input
2225 2212 # (e.g. those which accept regexes as plain strings shouldn't be included)
2226 2213 # functions that just return a lot of changesets (like all) don't count here
2227 2214 safesymbols = set([
2228 2215 "adds",
2229 2216 "all",
2230 2217 "ancestor",
2231 2218 "ancestors",
2232 2219 "_firstancestors",
2233 2220 "author",
2234 2221 "bisect",
2235 2222 "bisected",
2236 2223 "bookmark",
2237 2224 "branch",
2238 2225 "branchpoint",
2239 2226 "bumped",
2240 2227 "bundle",
2241 2228 "children",
2242 2229 "closed",
2243 2230 "converted",
2244 2231 "date",
2245 2232 "desc",
2246 2233 "descendants",
2247 2234 "_firstdescendants",
2248 2235 "destination",
2249 2236 "divergent",
2250 2237 "draft",
2251 2238 "extinct",
2252 2239 "extra",
2253 2240 "file",
2254 2241 "filelog",
2255 2242 "first",
2256 2243 "follow",
2257 2244 "_followfirst",
2258 2245 "head",
2259 2246 "heads",
2260 2247 "hidden",
2261 2248 "id",
2262 2249 "keyword",
2263 2250 "last",
2264 2251 "limit",
2265 2252 "_matchfiles",
2266 2253 "max",
2267 2254 "merge",
2268 2255 "min",
2269 2256 "modifies",
2270 2257 "obsolete",
2271 2258 "only",
2272 2259 "origin",
2273 2260 "outgoing",
2274 2261 "p1",
2275 2262 "p2",
2276 2263 "parents",
2277 2264 "present",
2278 2265 "public",
2279 2266 "_notpublic",
2280 2267 "remote",
2281 2268 "removes",
2282 2269 "rev",
2283 2270 "reverse",
2284 2271 "roots",
2285 2272 "sort",
2286 2273 "secret",
2287 2274 "matching",
2288 2275 "tag",
2289 2276 "tagged",
2290 2277 "user",
2291 2278 "unstable",
2292 2279 "wdir",
2293 2280 "_list",
2294 2281 "_intlist",
2295 2282 "_hexlist",
2296 2283 ])
2297 2284
2298 2285 methods = {
2299 2286 "range": rangeset,
2300 2287 "dagrange": dagrange,
2301 2288 "string": stringset,
2302 2289 "symbol": stringset,
2303 2290 "and": andset,
2304 2291 "or": orset,
2305 2292 "not": notset,
2306 2293 "list": listset,
2307 2294 "keyvalue": keyvaluepair,
2308 2295 "func": func,
2309 2296 "ancestor": ancestorspec,
2310 2297 "parent": parentspec,
2311 2298 "parentpost": p1,
2312 2299 }
2313 2300
2314 2301 def optimize(x, small):
2315 2302 if x is None:
2316 2303 return 0, x
2317 2304
2318 2305 smallbonus = 1
2319 2306 if small:
2320 2307 smallbonus = .5
2321 2308
2322 2309 op = x[0]
2323 2310 if op == 'minus':
2324 2311 return optimize(('and', x[1], ('not', x[2])), small)
2325 2312 elif op == 'only':
2326 2313 return optimize(('func', ('symbol', 'only'),
2327 2314 ('list', x[1], x[2])), small)
2328 2315 elif op == 'onlypost':
2329 2316 return optimize(('func', ('symbol', 'only'), x[1]), small)
2330 2317 elif op == 'dagrangepre':
2331 2318 return optimize(('func', ('symbol', 'ancestors'), x[1]), small)
2332 2319 elif op == 'dagrangepost':
2333 2320 return optimize(('func', ('symbol', 'descendants'), x[1]), small)
2334 2321 elif op == 'rangeall':
2335 2322 return optimize(('range', ('string', '0'), ('string', 'tip')), small)
2336 2323 elif op == 'rangepre':
2337 2324 return optimize(('range', ('string', '0'), x[1]), small)
2338 2325 elif op == 'rangepost':
2339 2326 return optimize(('range', x[1], ('string', 'tip')), small)
2340 2327 elif op == 'negate':
2341 2328 return optimize(('string',
2342 2329 '-' + getstring(x[1], _("can't negate that"))), small)
2343 2330 elif op in 'string symbol negate':
2344 2331 return smallbonus, x # single revisions are small
2345 2332 elif op == 'and':
2346 2333 wa, ta = optimize(x[1], True)
2347 2334 wb, tb = optimize(x[2], True)
2348 2335
2349 2336 # (::x and not ::y)/(not ::y and ::x) have a fast path
2350 2337 def isonly(revs, bases):
2351 2338 return (
2352 2339 revs is not None
2353 2340 and revs[0] == 'func'
2354 2341 and getstring(revs[1], _('not a symbol')) == 'ancestors'
2355 2342 and bases is not None
2356 2343 and bases[0] == 'not'
2357 2344 and bases[1][0] == 'func'
2358 2345 and getstring(bases[1][1], _('not a symbol')) == 'ancestors')
2359 2346
2360 2347 w = min(wa, wb)
2361 2348 if isonly(ta, tb):
2362 2349 return w, ('func', ('symbol', 'only'), ('list', ta[2], tb[1][2]))
2363 2350 if isonly(tb, ta):
2364 2351 return w, ('func', ('symbol', 'only'), ('list', tb[2], ta[1][2]))
2365 2352
2366 2353 if wa > wb:
2367 2354 return w, (op, tb, ta)
2368 2355 return w, (op, ta, tb)
2369 2356 elif op == 'or':
2370 2357 # fast path for machine-generated expression, that is likely to have
2371 2358 # lots of trivial revisions: 'a + b + c()' to '_list(a b) + c()'
2372 2359 ws, ts, ss = [], [], []
2373 2360 def flushss():
2374 2361 if not ss:
2375 2362 return
2376 2363 if len(ss) == 1:
2377 2364 w, t = ss[0]
2378 2365 else:
2379 2366 s = '\0'.join(t[1] for w, t in ss)
2380 2367 y = ('func', ('symbol', '_list'), ('string', s))
2381 2368 w, t = optimize(y, False)
2382 2369 ws.append(w)
2383 2370 ts.append(t)
2384 2371 del ss[:]
2385 2372 for y in x[1:]:
2386 2373 w, t = optimize(y, False)
2387 2374 if t is not None and (t[0] == 'string' or t[0] == 'symbol'):
2388 2375 ss.append((w, t))
2389 2376 continue
2390 2377 flushss()
2391 2378 ws.append(w)
2392 2379 ts.append(t)
2393 2380 flushss()
2394 2381 if len(ts) == 1:
2395 2382 return ws[0], ts[0] # 'or' operation is fully optimized out
2396 2383 # we can't reorder trees by weight because it would change the order.
2397 2384 # ("sort(a + b)" == "sort(b + a)", but "a + b" != "b + a")
2398 2385 # ts = tuple(t for w, t in sorted(zip(ws, ts), key=lambda wt: wt[0]))
2399 2386 return max(ws), (op,) + tuple(ts)
2400 2387 elif op == 'not':
2401 2388 # Optimize not public() to _notpublic() because we have a fast version
2402 2389 if x[1] == ('func', ('symbol', 'public'), None):
2403 2390 newsym = ('func', ('symbol', '_notpublic'), None)
2404 2391 o = optimize(newsym, not small)
2405 2392 return o[0], o[1]
2406 2393 else:
2407 2394 o = optimize(x[1], not small)
2408 2395 return o[0], (op, o[1])
2409 2396 elif op == 'parentpost':
2410 2397 o = optimize(x[1], small)
2411 2398 return o[0], (op, o[1])
2412 2399 elif op == 'group':
2413 2400 return optimize(x[1], small)
2414 2401 elif op in 'dagrange range list parent ancestorspec':
2415 2402 if op == 'parent':
2416 2403 # x^:y means (x^) : y, not x ^ (:y)
2417 2404 post = ('parentpost', x[1])
2418 2405 if x[2][0] == 'dagrangepre':
2419 2406 return optimize(('dagrange', post, x[2][1]), small)
2420 2407 elif x[2][0] == 'rangepre':
2421 2408 return optimize(('range', post, x[2][1]), small)
2422 2409
2423 2410 wa, ta = optimize(x[1], small)
2424 2411 wb, tb = optimize(x[2], small)
2425 2412 return wa + wb, (op, ta, tb)
2426 2413 elif op == 'func':
2427 2414 f = getstring(x[1], _("not a symbol"))
2428 2415 wa, ta = optimize(x[2], small)
2429 2416 if f in ("author branch closed date desc file grep keyword "
2430 2417 "outgoing user"):
2431 2418 w = 10 # slow
2432 2419 elif f in "modifies adds removes":
2433 2420 w = 30 # slower
2434 2421 elif f == "contains":
2435 2422 w = 100 # very slow
2436 2423 elif f == "ancestor":
2437 2424 w = 1 * smallbonus
2438 2425 elif f in "reverse limit first _intlist":
2439 2426 w = 0
2440 2427 elif f in "sort":
2441 2428 w = 10 # assume most sorts look at changelog
2442 2429 else:
2443 2430 w = 1
2444 2431 return w + wa, (op, x[1], ta)
2445 2432 return 1, x
2446 2433
2447 2434 _aliasarg = ('func', ('symbol', '_aliasarg'))
2448 2435 def _getaliasarg(tree):
2449 2436 """If tree matches ('func', ('symbol', '_aliasarg'), ('string', X))
2450 2437 return X, None otherwise.
2451 2438 """
2452 2439 if (len(tree) == 3 and tree[:2] == _aliasarg
2453 2440 and tree[2][0] == 'string'):
2454 2441 return tree[2][1]
2455 2442 return None
2456 2443
2457 2444 def _checkaliasarg(tree, known=None):
2458 2445 """Check tree contains no _aliasarg construct or only ones which
2459 2446 value is in known. Used to avoid alias placeholders injection.
2460 2447 """
2461 2448 if isinstance(tree, tuple):
2462 2449 arg = _getaliasarg(tree)
2463 2450 if arg is not None and (not known or arg not in known):
2464 2451 raise error.UnknownIdentifier('_aliasarg', [])
2465 2452 for t in tree:
2466 2453 _checkaliasarg(t, known)
2467 2454
2468 2455 # the set of valid characters for the initial letter of symbols in
2469 2456 # alias declarations and definitions
2470 2457 _aliassyminitletters = set(c for c in [chr(i) for i in xrange(256)]
2471 2458 if c.isalnum() or c in '._@$' or ord(c) > 127)
2472 2459
2473 2460 def _tokenizealias(program, lookup=None):
2474 2461 """Parse alias declaration/definition into a stream of tokens
2475 2462
2476 2463 This allows symbol names to use also ``$`` as an initial letter
2477 2464 (for backward compatibility), and callers of this function should
2478 2465 examine whether ``$`` is used also for unexpected symbols or not.
2479 2466 """
2480 2467 return tokenize(program, lookup=lookup,
2481 2468 syminitletters=_aliassyminitletters)
2482 2469
2483 2470 def _parsealiasdecl(decl):
2484 2471 """Parse alias declaration ``decl``
2485 2472
2486 2473 This returns ``(name, tree, args, errorstr)`` tuple:
2487 2474
2488 2475 - ``name``: of declared alias (may be ``decl`` itself at error)
2489 2476 - ``tree``: parse result (or ``None`` at error)
2490 2477 - ``args``: list of alias argument names (or None for symbol declaration)
2491 2478 - ``errorstr``: detail about detected error (or None)
2492 2479
2493 2480 >>> _parsealiasdecl('foo')
2494 2481 ('foo', ('symbol', 'foo'), None, None)
2495 2482 >>> _parsealiasdecl('$foo')
2496 2483 ('$foo', None, None, "'$' not for alias arguments")
2497 2484 >>> _parsealiasdecl('foo::bar')
2498 2485 ('foo::bar', None, None, 'invalid format')
2499 2486 >>> _parsealiasdecl('foo bar')
2500 2487 ('foo bar', None, None, 'at 4: invalid token')
2501 2488 >>> _parsealiasdecl('foo()')
2502 2489 ('foo', ('func', ('symbol', 'foo')), [], None)
2503 2490 >>> _parsealiasdecl('$foo()')
2504 2491 ('$foo()', None, None, "'$' not for alias arguments")
2505 2492 >>> _parsealiasdecl('foo($1, $2)')
2506 2493 ('foo', ('func', ('symbol', 'foo')), ['$1', '$2'], None)
2507 2494 >>> _parsealiasdecl('foo(bar_bar, baz.baz)')
2508 2495 ('foo', ('func', ('symbol', 'foo')), ['bar_bar', 'baz.baz'], None)
2509 2496 >>> _parsealiasdecl('foo($1, $2, nested($1, $2))')
2510 2497 ('foo($1, $2, nested($1, $2))', None, None, 'invalid argument list')
2511 2498 >>> _parsealiasdecl('foo(bar($1, $2))')
2512 2499 ('foo(bar($1, $2))', None, None, 'invalid argument list')
2513 2500 >>> _parsealiasdecl('foo("string")')
2514 2501 ('foo("string")', None, None, 'invalid argument list')
2515 2502 >>> _parsealiasdecl('foo($1, $2')
2516 2503 ('foo($1, $2', None, None, 'at 10: unexpected token: end')
2517 2504 >>> _parsealiasdecl('foo("string')
2518 2505 ('foo("string', None, None, 'at 5: unterminated string')
2519 2506 >>> _parsealiasdecl('foo($1, $2, $1)')
2520 2507 ('foo', None, None, 'argument names collide with each other')
2521 2508 """
2522 2509 p = parser.parser(elements)
2523 2510 try:
2524 2511 tree, pos = p.parse(_tokenizealias(decl))
2525 2512 if (pos != len(decl)):
2526 2513 raise error.ParseError(_('invalid token'), pos)
2527 2514
2528 2515 if isvalidsymbol(tree):
2529 2516 # "name = ...." style
2530 2517 name = getsymbol(tree)
2531 2518 if name.startswith('$'):
2532 2519 return (decl, None, None, _("'$' not for alias arguments"))
2533 2520 return (name, ('symbol', name), None, None)
2534 2521
2535 2522 if isvalidfunc(tree):
2536 2523 # "name(arg, ....) = ...." style
2537 2524 name = getfuncname(tree)
2538 2525 if name.startswith('$'):
2539 2526 return (decl, None, None, _("'$' not for alias arguments"))
2540 2527 args = []
2541 2528 for arg in getfuncargs(tree):
2542 2529 if not isvalidsymbol(arg):
2543 2530 return (decl, None, None, _("invalid argument list"))
2544 2531 args.append(getsymbol(arg))
2545 2532 if len(args) != len(set(args)):
2546 2533 return (name, None, None,
2547 2534 _("argument names collide with each other"))
2548 2535 return (name, ('func', ('symbol', name)), args, None)
2549 2536
2550 2537 return (decl, None, None, _("invalid format"))
2551 2538 except error.ParseError as inst:
2552 2539 return (decl, None, None, parseerrordetail(inst))
2553 2540
2554 2541 def _parsealiasdefn(defn, args):
2555 2542 """Parse alias definition ``defn``
2556 2543
2557 2544 This function also replaces alias argument references in the
2558 2545 specified definition by ``_aliasarg(ARGNAME)``.
2559 2546
2560 2547 ``args`` is a list of alias argument names, or None if the alias
2561 2548 is declared as a symbol.
2562 2549
2563 2550 This returns "tree" as parsing result.
2564 2551
2565 2552 >>> args = ['$1', '$2', 'foo']
2566 2553 >>> print prettyformat(_parsealiasdefn('$1 or foo', args))
2567 2554 (or
2568 2555 (func
2569 2556 ('symbol', '_aliasarg')
2570 2557 ('string', '$1'))
2571 2558 (func
2572 2559 ('symbol', '_aliasarg')
2573 2560 ('string', 'foo')))
2574 2561 >>> try:
2575 2562 ... _parsealiasdefn('$1 or $bar', args)
2576 2563 ... except error.ParseError, inst:
2577 2564 ... print parseerrordetail(inst)
2578 2565 at 6: '$' not for alias arguments
2579 2566 >>> args = ['$1', '$10', 'foo']
2580 2567 >>> print prettyformat(_parsealiasdefn('$10 or foobar', args))
2581 2568 (or
2582 2569 (func
2583 2570 ('symbol', '_aliasarg')
2584 2571 ('string', '$10'))
2585 2572 ('symbol', 'foobar'))
2586 2573 >>> print prettyformat(_parsealiasdefn('"$1" or "foo"', args))
2587 2574 (or
2588 2575 ('string', '$1')
2589 2576 ('string', 'foo'))
2590 2577 """
2591 2578 def tokenizedefn(program, lookup=None):
2592 2579 if args:
2593 2580 argset = set(args)
2594 2581 else:
2595 2582 argset = set()
2596 2583
2597 2584 for t, value, pos in _tokenizealias(program, lookup=lookup):
2598 2585 if t == 'symbol':
2599 2586 if value in argset:
2600 2587 # emulate tokenization of "_aliasarg('ARGNAME')":
2601 2588 # "_aliasarg()" is an unknown symbol only used separate
2602 2589 # alias argument placeholders from regular strings.
2603 2590 yield ('symbol', '_aliasarg', pos)
2604 2591 yield ('(', None, pos)
2605 2592 yield ('string', value, pos)
2606 2593 yield (')', None, pos)
2607 2594 continue
2608 2595 elif value.startswith('$'):
2609 2596 raise error.ParseError(_("'$' not for alias arguments"),
2610 2597 pos)
2611 2598 yield (t, value, pos)
2612 2599
2613 2600 p = parser.parser(elements)
2614 2601 tree, pos = p.parse(tokenizedefn(defn))
2615 2602 if pos != len(defn):
2616 2603 raise error.ParseError(_('invalid token'), pos)
2617 2604 return parser.simplifyinfixops(tree, ('or',))
2618 2605
2619 2606 class revsetalias(object):
2620 2607 # whether own `error` information is already shown or not.
2621 2608 # this avoids showing same warning multiple times at each `findaliases`.
2622 2609 warned = False
2623 2610
2624 2611 def __init__(self, name, value):
2625 2612 '''Aliases like:
2626 2613
2627 2614 h = heads(default)
2628 2615 b($1) = ancestors($1) - ancestors(default)
2629 2616 '''
2630 2617 self.name, self.tree, self.args, self.error = _parsealiasdecl(name)
2631 2618 if self.error:
2632 2619 self.error = _('failed to parse the declaration of revset alias'
2633 2620 ' "%s": %s') % (self.name, self.error)
2634 2621 return
2635 2622
2636 2623 try:
2637 2624 self.replacement = _parsealiasdefn(value, self.args)
2638 2625 # Check for placeholder injection
2639 2626 _checkaliasarg(self.replacement, self.args)
2640 2627 except error.ParseError as inst:
2641 2628 self.error = _('failed to parse the definition of revset alias'
2642 2629 ' "%s": %s') % (self.name, parseerrordetail(inst))
2643 2630
2644 2631 def _getalias(aliases, tree):
2645 2632 """If tree looks like an unexpanded alias, return it. Return None
2646 2633 otherwise.
2647 2634 """
2648 2635 if isinstance(tree, tuple) and tree:
2649 2636 if tree[0] == 'symbol' and len(tree) == 2:
2650 2637 name = tree[1]
2651 2638 alias = aliases.get(name)
2652 2639 if alias and alias.args is None and alias.tree == tree:
2653 2640 return alias
2654 2641 if tree[0] == 'func' and len(tree) > 1:
2655 2642 if tree[1][0] == 'symbol' and len(tree[1]) == 2:
2656 2643 name = tree[1][1]
2657 2644 alias = aliases.get(name)
2658 2645 if alias and alias.args is not None and alias.tree == tree[:2]:
2659 2646 return alias
2660 2647 return None
2661 2648
2662 2649 def _expandargs(tree, args):
2663 2650 """Replace _aliasarg instances with the substitution value of the
2664 2651 same name in args, recursively.
2665 2652 """
2666 2653 if not tree or not isinstance(tree, tuple):
2667 2654 return tree
2668 2655 arg = _getaliasarg(tree)
2669 2656 if arg is not None:
2670 2657 return args[arg]
2671 2658 return tuple(_expandargs(t, args) for t in tree)
2672 2659
2673 2660 def _expandaliases(aliases, tree, expanding, cache):
2674 2661 """Expand aliases in tree, recursively.
2675 2662
2676 2663 'aliases' is a dictionary mapping user defined aliases to
2677 2664 revsetalias objects.
2678 2665 """
2679 2666 if not isinstance(tree, tuple):
2680 2667 # Do not expand raw strings
2681 2668 return tree
2682 2669 alias = _getalias(aliases, tree)
2683 2670 if alias is not None:
2684 2671 if alias.error:
2685 2672 raise util.Abort(alias.error)
2686 2673 if alias in expanding:
2687 2674 raise error.ParseError(_('infinite expansion of revset alias "%s" '
2688 2675 'detected') % alias.name)
2689 2676 expanding.append(alias)
2690 2677 if alias.name not in cache:
2691 2678 cache[alias.name] = _expandaliases(aliases, alias.replacement,
2692 2679 expanding, cache)
2693 2680 result = cache[alias.name]
2694 2681 expanding.pop()
2695 2682 if alias.args is not None:
2696 2683 l = getlist(tree[2])
2697 2684 if len(l) != len(alias.args):
2698 2685 raise error.ParseError(
2699 2686 _('invalid number of arguments: %s') % len(l))
2700 2687 l = [_expandaliases(aliases, a, [], cache) for a in l]
2701 2688 result = _expandargs(result, dict(zip(alias.args, l)))
2702 2689 else:
2703 2690 result = tuple(_expandaliases(aliases, t, expanding, cache)
2704 2691 for t in tree)
2705 2692 return result
2706 2693
2707 2694 def findaliases(ui, tree, showwarning=None):
2708 2695 _checkaliasarg(tree)
2709 2696 aliases = {}
2710 2697 for k, v in ui.configitems('revsetalias'):
2711 2698 alias = revsetalias(k, v)
2712 2699 aliases[alias.name] = alias
2713 2700 tree = _expandaliases(aliases, tree, [], {})
2714 2701 if showwarning:
2715 2702 # warn about problematic (but not referred) aliases
2716 2703 for name, alias in sorted(aliases.iteritems()):
2717 2704 if alias.error and not alias.warned:
2718 2705 showwarning(_('warning: %s\n') % (alias.error))
2719 2706 alias.warned = True
2720 2707 return tree
2721 2708
2722 2709 def foldconcat(tree):
2723 2710 """Fold elements to be concatenated by `##`
2724 2711 """
2725 2712 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2726 2713 return tree
2727 2714 if tree[0] == '_concat':
2728 2715 pending = [tree]
2729 2716 l = []
2730 2717 while pending:
2731 2718 e = pending.pop()
2732 2719 if e[0] == '_concat':
2733 2720 pending.extend(reversed(e[1:]))
2734 2721 elif e[0] in ('string', 'symbol'):
2735 2722 l.append(e[1])
2736 2723 else:
2737 2724 msg = _("\"##\" can't concatenate \"%s\" element") % (e[0])
2738 2725 raise error.ParseError(msg)
2739 2726 return ('string', ''.join(l))
2740 2727 else:
2741 2728 return tuple(foldconcat(t) for t in tree)
2742 2729
2743 2730 def parse(spec, lookup=None):
2744 2731 p = parser.parser(elements)
2745 2732 tree, pos = p.parse(tokenize(spec, lookup=lookup))
2746 2733 if pos != len(spec):
2747 2734 raise error.ParseError(_("invalid token"), pos)
2748 2735 return parser.simplifyinfixops(tree, ('or',))
2749 2736
2750 2737 def posttreebuilthook(tree, repo):
2751 2738 # hook for extensions to execute code on the optimized tree
2752 2739 pass
2753 2740
2754 2741 def match(ui, spec, repo=None):
2755 2742 if not spec:
2756 2743 raise error.ParseError(_("empty query"))
2757 2744 lookup = None
2758 2745 if repo:
2759 2746 lookup = repo.__contains__
2760 2747 tree = parse(spec, lookup)
2761 2748 return _makematcher(ui, tree, repo)
2762 2749
2763 2750 def matchany(ui, specs, repo=None):
2764 2751 """Create a matcher that will include any revisions matching one of the
2765 2752 given specs"""
2766 2753 if not specs:
2767 2754 def mfunc(repo, subset=None):
2768 2755 return baseset()
2769 2756 return mfunc
2770 2757 if not all(specs):
2771 2758 raise error.ParseError(_("empty query"))
2772 2759 lookup = None
2773 2760 if repo:
2774 2761 lookup = repo.__contains__
2775 2762 if len(specs) == 1:
2776 2763 tree = parse(specs[0], lookup)
2777 2764 else:
2778 2765 tree = ('or',) + tuple(parse(s, lookup) for s in specs)
2779 2766 return _makematcher(ui, tree, repo)
2780 2767
2781 2768 def _makematcher(ui, tree, repo):
2782 2769 if ui:
2783 2770 tree = findaliases(ui, tree, showwarning=ui.warn)
2784 2771 tree = foldconcat(tree)
2785 2772 weight, tree = optimize(tree, True)
2786 2773 posttreebuilthook(tree, repo)
2787 2774 def mfunc(repo, subset=None):
2788 2775 if subset is None:
2789 2776 subset = fullreposet(repo)
2790 2777 if util.safehasattr(subset, 'isascending'):
2791 2778 result = getset(repo, subset, tree)
2792 2779 else:
2793 2780 result = getset(repo, baseset(subset), tree)
2794 2781 return result
2795 2782 return mfunc
2796 2783
2797 2784 def formatspec(expr, *args):
2798 2785 '''
2799 2786 This is a convenience function for using revsets internally, and
2800 2787 escapes arguments appropriately. Aliases are intentionally ignored
2801 2788 so that intended expression behavior isn't accidentally subverted.
2802 2789
2803 2790 Supported arguments:
2804 2791
2805 2792 %r = revset expression, parenthesized
2806 2793 %d = int(arg), no quoting
2807 2794 %s = string(arg), escaped and single-quoted
2808 2795 %b = arg.branch(), escaped and single-quoted
2809 2796 %n = hex(arg), single-quoted
2810 2797 %% = a literal '%'
2811 2798
2812 2799 Prefixing the type with 'l' specifies a parenthesized list of that type.
2813 2800
2814 2801 >>> formatspec('%r:: and %lr', '10 or 11', ("this()", "that()"))
2815 2802 '(10 or 11):: and ((this()) or (that()))'
2816 2803 >>> formatspec('%d:: and not %d::', 10, 20)
2817 2804 '10:: and not 20::'
2818 2805 >>> formatspec('%ld or %ld', [], [1])
2819 2806 "_list('') or 1"
2820 2807 >>> formatspec('keyword(%s)', 'foo\\xe9')
2821 2808 "keyword('foo\\\\xe9')"
2822 2809 >>> b = lambda: 'default'
2823 2810 >>> b.branch = b
2824 2811 >>> formatspec('branch(%b)', b)
2825 2812 "branch('default')"
2826 2813 >>> formatspec('root(%ls)', ['a', 'b', 'c', 'd'])
2827 2814 "root(_list('a\\x00b\\x00c\\x00d'))"
2828 2815 '''
2829 2816
2830 2817 def quote(s):
2831 2818 return repr(str(s))
2832 2819
2833 2820 def argtype(c, arg):
2834 2821 if c == 'd':
2835 2822 return str(int(arg))
2836 2823 elif c == 's':
2837 2824 return quote(arg)
2838 2825 elif c == 'r':
2839 2826 parse(arg) # make sure syntax errors are confined
2840 2827 return '(%s)' % arg
2841 2828 elif c == 'n':
2842 2829 return quote(node.hex(arg))
2843 2830 elif c == 'b':
2844 2831 return quote(arg.branch())
2845 2832
2846 2833 def listexp(s, t):
2847 2834 l = len(s)
2848 2835 if l == 0:
2849 2836 return "_list('')"
2850 2837 elif l == 1:
2851 2838 return argtype(t, s[0])
2852 2839 elif t == 'd':
2853 2840 return "_intlist('%s')" % "\0".join(str(int(a)) for a in s)
2854 2841 elif t == 's':
2855 2842 return "_list('%s')" % "\0".join(s)
2856 2843 elif t == 'n':
2857 2844 return "_hexlist('%s')" % "\0".join(node.hex(a) for a in s)
2858 2845 elif t == 'b':
2859 2846 return "_list('%s')" % "\0".join(a.branch() for a in s)
2860 2847
2861 2848 m = l // 2
2862 2849 return '(%s or %s)' % (listexp(s[:m], t), listexp(s[m:], t))
2863 2850
2864 2851 ret = ''
2865 2852 pos = 0
2866 2853 arg = 0
2867 2854 while pos < len(expr):
2868 2855 c = expr[pos]
2869 2856 if c == '%':
2870 2857 pos += 1
2871 2858 d = expr[pos]
2872 2859 if d == '%':
2873 2860 ret += d
2874 2861 elif d in 'dsnbr':
2875 2862 ret += argtype(d, args[arg])
2876 2863 arg += 1
2877 2864 elif d == 'l':
2878 2865 # a list of some type
2879 2866 pos += 1
2880 2867 d = expr[pos]
2881 2868 ret += listexp(list(args[arg]), d)
2882 2869 arg += 1
2883 2870 else:
2884 2871 raise util.Abort('unexpected revspec format character %s' % d)
2885 2872 else:
2886 2873 ret += c
2887 2874 pos += 1
2888 2875
2889 2876 return ret
2890 2877
2891 2878 def prettyformat(tree):
2892 2879 return parser.prettyformat(tree, ('string', 'symbol'))
2893 2880
2894 2881 def depth(tree):
2895 2882 if isinstance(tree, tuple):
2896 2883 return max(map(depth, tree)) + 1
2897 2884 else:
2898 2885 return 0
2899 2886
2900 2887 def funcsused(tree):
2901 2888 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2902 2889 return set()
2903 2890 else:
2904 2891 funcs = set()
2905 2892 for s in tree[1:]:
2906 2893 funcs |= funcsused(s)
2907 2894 if tree[0] == 'func':
2908 2895 funcs.add(tree[1][1])
2909 2896 return funcs
2910 2897
2911 2898 class abstractsmartset(object):
2912 2899
2913 2900 def __nonzero__(self):
2914 2901 """True if the smartset is not empty"""
2915 2902 raise NotImplementedError()
2916 2903
2917 2904 def __contains__(self, rev):
2918 2905 """provide fast membership testing"""
2919 2906 raise NotImplementedError()
2920 2907
2921 2908 def __iter__(self):
2922 2909 """iterate the set in the order it is supposed to be iterated"""
2923 2910 raise NotImplementedError()
2924 2911
2925 2912 # Attributes containing a function to perform a fast iteration in a given
2926 2913 # direction. A smartset can have none, one, or both defined.
2927 2914 #
2928 2915 # Default value is None instead of a function returning None to avoid
2929 2916 # initializing an iterator just for testing if a fast method exists.
2930 2917 fastasc = None
2931 2918 fastdesc = None
2932 2919
2933 2920 def isascending(self):
2934 2921 """True if the set will iterate in ascending order"""
2935 2922 raise NotImplementedError()
2936 2923
2937 2924 def isdescending(self):
2938 2925 """True if the set will iterate in descending order"""
2939 2926 raise NotImplementedError()
2940 2927
2941 2928 @util.cachefunc
2942 2929 def min(self):
2943 2930 """return the minimum element in the set"""
2944 2931 if self.fastasc is not None:
2945 2932 for r in self.fastasc():
2946 2933 return r
2947 2934 raise ValueError('arg is an empty sequence')
2948 2935 return min(self)
2949 2936
2950 2937 @util.cachefunc
2951 2938 def max(self):
2952 2939 """return the maximum element in the set"""
2953 2940 if self.fastdesc is not None:
2954 2941 for r in self.fastdesc():
2955 2942 return r
2956 2943 raise ValueError('arg is an empty sequence')
2957 2944 return max(self)
2958 2945
2959 2946 def first(self):
2960 2947 """return the first element in the set (user iteration perspective)
2961 2948
2962 2949 Return None if the set is empty"""
2963 2950 raise NotImplementedError()
2964 2951
2965 2952 def last(self):
2966 2953 """return the last element in the set (user iteration perspective)
2967 2954
2968 2955 Return None if the set is empty"""
2969 2956 raise NotImplementedError()
2970 2957
2971 2958 def __len__(self):
2972 2959 """return the length of the smartsets
2973 2960
2974 2961 This can be expensive on smartset that could be lazy otherwise."""
2975 2962 raise NotImplementedError()
2976 2963
2977 2964 def reverse(self):
2978 2965 """reverse the expected iteration order"""
2979 2966 raise NotImplementedError()
2980 2967
2981 2968 def sort(self, reverse=True):
2982 2969 """get the set to iterate in an ascending or descending order"""
2983 2970 raise NotImplementedError()
2984 2971
2985 2972 def __and__(self, other):
2986 2973 """Returns a new object with the intersection of the two collections.
2987 2974
2988 2975 This is part of the mandatory API for smartset."""
2989 2976 if isinstance(other, fullreposet):
2990 2977 return self
2991 2978 return self.filter(other.__contains__, cache=False)
2992 2979
2993 2980 def __add__(self, other):
2994 2981 """Returns a new object with the union of the two collections.
2995 2982
2996 2983 This is part of the mandatory API for smartset."""
2997 2984 return addset(self, other)
2998 2985
2999 2986 def __sub__(self, other):
3000 2987 """Returns a new object with the substraction of the two collections.
3001 2988
3002 2989 This is part of the mandatory API for smartset."""
3003 2990 c = other.__contains__
3004 2991 return self.filter(lambda r: not c(r), cache=False)
3005 2992
3006 2993 def filter(self, condition, cache=True):
3007 2994 """Returns this smartset filtered by condition as a new smartset.
3008 2995
3009 2996 `condition` is a callable which takes a revision number and returns a
3010 2997 boolean.
3011 2998
3012 2999 This is part of the mandatory API for smartset."""
3013 3000 # builtin cannot be cached. but do not needs to
3014 3001 if cache and util.safehasattr(condition, 'func_code'):
3015 3002 condition = util.cachefunc(condition)
3016 3003 return filteredset(self, condition)
3017 3004
3018 3005 class baseset(abstractsmartset):
3019 3006 """Basic data structure that represents a revset and contains the basic
3020 3007 operation that it should be able to perform.
3021 3008
3022 3009 Every method in this class should be implemented by any smartset class.
3023 3010 """
3024 3011 def __init__(self, data=()):
3025 3012 if not isinstance(data, list):
3026 3013 if isinstance(data, set):
3027 3014 self._set = data
3028 3015 data = list(data)
3029 3016 self._list = data
3030 3017 self._ascending = None
3031 3018
3032 3019 @util.propertycache
3033 3020 def _set(self):
3034 3021 return set(self._list)
3035 3022
3036 3023 @util.propertycache
3037 3024 def _asclist(self):
3038 3025 asclist = self._list[:]
3039 3026 asclist.sort()
3040 3027 return asclist
3041 3028
3042 3029 def __iter__(self):
3043 3030 if self._ascending is None:
3044 3031 return iter(self._list)
3045 3032 elif self._ascending:
3046 3033 return iter(self._asclist)
3047 3034 else:
3048 3035 return reversed(self._asclist)
3049 3036
3050 3037 def fastasc(self):
3051 3038 return iter(self._asclist)
3052 3039
3053 3040 def fastdesc(self):
3054 3041 return reversed(self._asclist)
3055 3042
3056 3043 @util.propertycache
3057 3044 def __contains__(self):
3058 3045 return self._set.__contains__
3059 3046
3060 3047 def __nonzero__(self):
3061 3048 return bool(self._list)
3062 3049
3063 3050 def sort(self, reverse=False):
3064 3051 self._ascending = not bool(reverse)
3065 3052
3066 3053 def reverse(self):
3067 3054 if self._ascending is None:
3068 3055 self._list.reverse()
3069 3056 else:
3070 3057 self._ascending = not self._ascending
3071 3058
3072 3059 def __len__(self):
3073 3060 return len(self._list)
3074 3061
3075 3062 def isascending(self):
3076 3063 """Returns True if the collection is ascending order, False if not.
3077 3064
3078 3065 This is part of the mandatory API for smartset."""
3079 3066 if len(self) <= 1:
3080 3067 return True
3081 3068 return self._ascending is not None and self._ascending
3082 3069
3083 3070 def isdescending(self):
3084 3071 """Returns True if the collection is descending order, False if not.
3085 3072
3086 3073 This is part of the mandatory API for smartset."""
3087 3074 if len(self) <= 1:
3088 3075 return True
3089 3076 return self._ascending is not None and not self._ascending
3090 3077
3091 3078 def first(self):
3092 3079 if self:
3093 3080 if self._ascending is None:
3094 3081 return self._list[0]
3095 3082 elif self._ascending:
3096 3083 return self._asclist[0]
3097 3084 else:
3098 3085 return self._asclist[-1]
3099 3086 return None
3100 3087
3101 3088 def last(self):
3102 3089 if self:
3103 3090 if self._ascending is None:
3104 3091 return self._list[-1]
3105 3092 elif self._ascending:
3106 3093 return self._asclist[-1]
3107 3094 else:
3108 3095 return self._asclist[0]
3109 3096 return None
3110 3097
3111 3098 def __repr__(self):
3112 3099 d = {None: '', False: '-', True: '+'}[self._ascending]
3113 3100 return '<%s%s %r>' % (type(self).__name__, d, self._list)
3114 3101
3115 3102 class filteredset(abstractsmartset):
3116 3103 """Duck type for baseset class which iterates lazily over the revisions in
3117 3104 the subset and contains a function which tests for membership in the
3118 3105 revset
3119 3106 """
3120 3107 def __init__(self, subset, condition=lambda x: True):
3121 3108 """
3122 3109 condition: a function that decide whether a revision in the subset
3123 3110 belongs to the revset or not.
3124 3111 """
3125 3112 self._subset = subset
3126 3113 self._condition = condition
3127 3114
3128 3115 def __contains__(self, x):
3129 3116 return x in self._subset and self._condition(x)
3130 3117
3131 3118 def __iter__(self):
3132 3119 return self._iterfilter(self._subset)
3133 3120
3134 3121 def _iterfilter(self, it):
3135 3122 cond = self._condition
3136 3123 for x in it:
3137 3124 if cond(x):
3138 3125 yield x
3139 3126
3140 3127 @property
3141 3128 def fastasc(self):
3142 3129 it = self._subset.fastasc
3143 3130 if it is None:
3144 3131 return None
3145 3132 return lambda: self._iterfilter(it())
3146 3133
3147 3134 @property
3148 3135 def fastdesc(self):
3149 3136 it = self._subset.fastdesc
3150 3137 if it is None:
3151 3138 return None
3152 3139 return lambda: self._iterfilter(it())
3153 3140
3154 3141 def __nonzero__(self):
3155 3142 fast = self.fastasc
3156 3143 if fast is None:
3157 3144 fast = self.fastdesc
3158 3145 if fast is not None:
3159 3146 it = fast()
3160 3147 else:
3161 3148 it = self
3162 3149
3163 3150 for r in it:
3164 3151 return True
3165 3152 return False
3166 3153
3167 3154 def __len__(self):
3168 3155 # Basic implementation to be changed in future patches.
3169 3156 l = baseset([r for r in self])
3170 3157 return len(l)
3171 3158
3172 3159 def sort(self, reverse=False):
3173 3160 self._subset.sort(reverse=reverse)
3174 3161
3175 3162 def reverse(self):
3176 3163 self._subset.reverse()
3177 3164
3178 3165 def isascending(self):
3179 3166 return self._subset.isascending()
3180 3167
3181 3168 def isdescending(self):
3182 3169 return self._subset.isdescending()
3183 3170
3184 3171 def first(self):
3185 3172 for x in self:
3186 3173 return x
3187 3174 return None
3188 3175
3189 3176 def last(self):
3190 3177 it = None
3191 3178 if self.isascending():
3192 3179 it = self.fastdesc
3193 3180 elif self.isdescending():
3194 3181 it = self.fastasc
3195 3182 if it is not None:
3196 3183 for x in it():
3197 3184 return x
3198 3185 return None #empty case
3199 3186 else:
3200 3187 x = None
3201 3188 for x in self:
3202 3189 pass
3203 3190 return x
3204 3191
3205 3192 def __repr__(self):
3206 3193 return '<%s %r>' % (type(self).__name__, self._subset)
3207 3194
3208 3195 def _iterordered(ascending, iter1, iter2):
3209 3196 """produce an ordered iteration from two iterators with the same order
3210 3197
3211 3198 The ascending is used to indicated the iteration direction.
3212 3199 """
3213 3200 choice = max
3214 3201 if ascending:
3215 3202 choice = min
3216 3203
3217 3204 val1 = None
3218 3205 val2 = None
3219 3206 try:
3220 3207 # Consume both iterators in an ordered way until one is empty
3221 3208 while True:
3222 3209 if val1 is None:
3223 3210 val1 = iter1.next()
3224 3211 if val2 is None:
3225 3212 val2 = iter2.next()
3226 3213 next = choice(val1, val2)
3227 3214 yield next
3228 3215 if val1 == next:
3229 3216 val1 = None
3230 3217 if val2 == next:
3231 3218 val2 = None
3232 3219 except StopIteration:
3233 3220 # Flush any remaining values and consume the other one
3234 3221 it = iter2
3235 3222 if val1 is not None:
3236 3223 yield val1
3237 3224 it = iter1
3238 3225 elif val2 is not None:
3239 3226 # might have been equality and both are empty
3240 3227 yield val2
3241 3228 for val in it:
3242 3229 yield val
3243 3230
3244 3231 class addset(abstractsmartset):
3245 3232 """Represent the addition of two sets
3246 3233
3247 3234 Wrapper structure for lazily adding two structures without losing much
3248 3235 performance on the __contains__ method
3249 3236
3250 3237 If the ascending attribute is set, that means the two structures are
3251 3238 ordered in either an ascending or descending way. Therefore, we can add
3252 3239 them maintaining the order by iterating over both at the same time
3253 3240
3254 3241 >>> xs = baseset([0, 3, 2])
3255 3242 >>> ys = baseset([5, 2, 4])
3256 3243
3257 3244 >>> rs = addset(xs, ys)
3258 3245 >>> bool(rs), 0 in rs, 1 in rs, 5 in rs, rs.first(), rs.last()
3259 3246 (True, True, False, True, 0, 4)
3260 3247 >>> rs = addset(xs, baseset([]))
3261 3248 >>> bool(rs), 0 in rs, 1 in rs, rs.first(), rs.last()
3262 3249 (True, True, False, 0, 2)
3263 3250 >>> rs = addset(baseset([]), baseset([]))
3264 3251 >>> bool(rs), 0 in rs, rs.first(), rs.last()
3265 3252 (False, False, None, None)
3266 3253
3267 3254 iterate unsorted:
3268 3255 >>> rs = addset(xs, ys)
3269 3256 >>> [x for x in rs] # without _genlist
3270 3257 [0, 3, 2, 5, 4]
3271 3258 >>> assert not rs._genlist
3272 3259 >>> len(rs)
3273 3260 5
3274 3261 >>> [x for x in rs] # with _genlist
3275 3262 [0, 3, 2, 5, 4]
3276 3263 >>> assert rs._genlist
3277 3264
3278 3265 iterate ascending:
3279 3266 >>> rs = addset(xs, ys, ascending=True)
3280 3267 >>> [x for x in rs], [x for x in rs.fastasc()] # without _asclist
3281 3268 ([0, 2, 3, 4, 5], [0, 2, 3, 4, 5])
3282 3269 >>> assert not rs._asclist
3283 3270 >>> len(rs)
3284 3271 5
3285 3272 >>> [x for x in rs], [x for x in rs.fastasc()]
3286 3273 ([0, 2, 3, 4, 5], [0, 2, 3, 4, 5])
3287 3274 >>> assert rs._asclist
3288 3275
3289 3276 iterate descending:
3290 3277 >>> rs = addset(xs, ys, ascending=False)
3291 3278 >>> [x for x in rs], [x for x in rs.fastdesc()] # without _asclist
3292 3279 ([5, 4, 3, 2, 0], [5, 4, 3, 2, 0])
3293 3280 >>> assert not rs._asclist
3294 3281 >>> len(rs)
3295 3282 5
3296 3283 >>> [x for x in rs], [x for x in rs.fastdesc()]
3297 3284 ([5, 4, 3, 2, 0], [5, 4, 3, 2, 0])
3298 3285 >>> assert rs._asclist
3299 3286
3300 3287 iterate ascending without fastasc:
3301 3288 >>> rs = addset(xs, generatorset(ys), ascending=True)
3302 3289 >>> assert rs.fastasc is None
3303 3290 >>> [x for x in rs]
3304 3291 [0, 2, 3, 4, 5]
3305 3292
3306 3293 iterate descending without fastdesc:
3307 3294 >>> rs = addset(generatorset(xs), ys, ascending=False)
3308 3295 >>> assert rs.fastdesc is None
3309 3296 >>> [x for x in rs]
3310 3297 [5, 4, 3, 2, 0]
3311 3298 """
3312 3299 def __init__(self, revs1, revs2, ascending=None):
3313 3300 self._r1 = revs1
3314 3301 self._r2 = revs2
3315 3302 self._iter = None
3316 3303 self._ascending = ascending
3317 3304 self._genlist = None
3318 3305 self._asclist = None
3319 3306
3320 3307 def __len__(self):
3321 3308 return len(self._list)
3322 3309
3323 3310 def __nonzero__(self):
3324 3311 return bool(self._r1) or bool(self._r2)
3325 3312
3326 3313 @util.propertycache
3327 3314 def _list(self):
3328 3315 if not self._genlist:
3329 3316 self._genlist = baseset(iter(self))
3330 3317 return self._genlist
3331 3318
3332 3319 def __iter__(self):
3333 3320 """Iterate over both collections without repeating elements
3334 3321
3335 3322 If the ascending attribute is not set, iterate over the first one and
3336 3323 then over the second one checking for membership on the first one so we
3337 3324 dont yield any duplicates.
3338 3325
3339 3326 If the ascending attribute is set, iterate over both collections at the
3340 3327 same time, yielding only one value at a time in the given order.
3341 3328 """
3342 3329 if self._ascending is None:
3343 3330 if self._genlist:
3344 3331 return iter(self._genlist)
3345 3332 def arbitraryordergen():
3346 3333 for r in self._r1:
3347 3334 yield r
3348 3335 inr1 = self._r1.__contains__
3349 3336 for r in self._r2:
3350 3337 if not inr1(r):
3351 3338 yield r
3352 3339 return arbitraryordergen()
3353 3340 # try to use our own fast iterator if it exists
3354 3341 self._trysetasclist()
3355 3342 if self._ascending:
3356 3343 attr = 'fastasc'
3357 3344 else:
3358 3345 attr = 'fastdesc'
3359 3346 it = getattr(self, attr)
3360 3347 if it is not None:
3361 3348 return it()
3362 3349 # maybe half of the component supports fast
3363 3350 # get iterator for _r1
3364 3351 iter1 = getattr(self._r1, attr)
3365 3352 if iter1 is None:
3366 3353 # let's avoid side effect (not sure it matters)
3367 3354 iter1 = iter(sorted(self._r1, reverse=not self._ascending))
3368 3355 else:
3369 3356 iter1 = iter1()
3370 3357 # get iterator for _r2
3371 3358 iter2 = getattr(self._r2, attr)
3372 3359 if iter2 is None:
3373 3360 # let's avoid side effect (not sure it matters)
3374 3361 iter2 = iter(sorted(self._r2, reverse=not self._ascending))
3375 3362 else:
3376 3363 iter2 = iter2()
3377 3364 return _iterordered(self._ascending, iter1, iter2)
3378 3365
3379 3366 def _trysetasclist(self):
3380 3367 """populate the _asclist attribute if possible and necessary"""
3381 3368 if self._genlist is not None and self._asclist is None:
3382 3369 self._asclist = sorted(self._genlist)
3383 3370
3384 3371 @property
3385 3372 def fastasc(self):
3386 3373 self._trysetasclist()
3387 3374 if self._asclist is not None:
3388 3375 return self._asclist.__iter__
3389 3376 iter1 = self._r1.fastasc
3390 3377 iter2 = self._r2.fastasc
3391 3378 if None in (iter1, iter2):
3392 3379 return None
3393 3380 return lambda: _iterordered(True, iter1(), iter2())
3394 3381
3395 3382 @property
3396 3383 def fastdesc(self):
3397 3384 self._trysetasclist()
3398 3385 if self._asclist is not None:
3399 3386 return self._asclist.__reversed__
3400 3387 iter1 = self._r1.fastdesc
3401 3388 iter2 = self._r2.fastdesc
3402 3389 if None in (iter1, iter2):
3403 3390 return None
3404 3391 return lambda: _iterordered(False, iter1(), iter2())
3405 3392
3406 3393 def __contains__(self, x):
3407 3394 return x in self._r1 or x in self._r2
3408 3395
3409 3396 def sort(self, reverse=False):
3410 3397 """Sort the added set
3411 3398
3412 3399 For this we use the cached list with all the generated values and if we
3413 3400 know they are ascending or descending we can sort them in a smart way.
3414 3401 """
3415 3402 self._ascending = not reverse
3416 3403
3417 3404 def isascending(self):
3418 3405 return self._ascending is not None and self._ascending
3419 3406
3420 3407 def isdescending(self):
3421 3408 return self._ascending is not None and not self._ascending
3422 3409
3423 3410 def reverse(self):
3424 3411 if self._ascending is None:
3425 3412 self._list.reverse()
3426 3413 else:
3427 3414 self._ascending = not self._ascending
3428 3415
3429 3416 def first(self):
3430 3417 for x in self:
3431 3418 return x
3432 3419 return None
3433 3420
3434 3421 def last(self):
3435 3422 self.reverse()
3436 3423 val = self.first()
3437 3424 self.reverse()
3438 3425 return val
3439 3426
3440 3427 def __repr__(self):
3441 3428 d = {None: '', False: '-', True: '+'}[self._ascending]
3442 3429 return '<%s%s %r, %r>' % (type(self).__name__, d, self._r1, self._r2)
3443 3430
3444 3431 class generatorset(abstractsmartset):
3445 3432 """Wrap a generator for lazy iteration
3446 3433
3447 3434 Wrapper structure for generators that provides lazy membership and can
3448 3435 be iterated more than once.
3449 3436 When asked for membership it generates values until either it finds the
3450 3437 requested one or has gone through all the elements in the generator
3451 3438 """
3452 3439 def __init__(self, gen, iterasc=None):
3453 3440 """
3454 3441 gen: a generator producing the values for the generatorset.
3455 3442 """
3456 3443 self._gen = gen
3457 3444 self._asclist = None
3458 3445 self._cache = {}
3459 3446 self._genlist = []
3460 3447 self._finished = False
3461 3448 self._ascending = True
3462 3449 if iterasc is not None:
3463 3450 if iterasc:
3464 3451 self.fastasc = self._iterator
3465 3452 self.__contains__ = self._asccontains
3466 3453 else:
3467 3454 self.fastdesc = self._iterator
3468 3455 self.__contains__ = self._desccontains
3469 3456
3470 3457 def __nonzero__(self):
3471 3458 # Do not use 'for r in self' because it will enforce the iteration
3472 3459 # order (default ascending), possibly unrolling a whole descending
3473 3460 # iterator.
3474 3461 if self._genlist:
3475 3462 return True
3476 3463 for r in self._consumegen():
3477 3464 return True
3478 3465 return False
3479 3466
3480 3467 def __contains__(self, x):
3481 3468 if x in self._cache:
3482 3469 return self._cache[x]
3483 3470
3484 3471 # Use new values only, as existing values would be cached.
3485 3472 for l in self._consumegen():
3486 3473 if l == x:
3487 3474 return True
3488 3475
3489 3476 self._cache[x] = False
3490 3477 return False
3491 3478
3492 3479 def _asccontains(self, x):
3493 3480 """version of contains optimised for ascending generator"""
3494 3481 if x in self._cache:
3495 3482 return self._cache[x]
3496 3483
3497 3484 # Use new values only, as existing values would be cached.
3498 3485 for l in self._consumegen():
3499 3486 if l == x:
3500 3487 return True
3501 3488 if l > x:
3502 3489 break
3503 3490
3504 3491 self._cache[x] = False
3505 3492 return False
3506 3493
3507 3494 def _desccontains(self, x):
3508 3495 """version of contains optimised for descending generator"""
3509 3496 if x in self._cache:
3510 3497 return self._cache[x]
3511 3498
3512 3499 # Use new values only, as existing values would be cached.
3513 3500 for l in self._consumegen():
3514 3501 if l == x:
3515 3502 return True
3516 3503 if l < x:
3517 3504 break
3518 3505
3519 3506 self._cache[x] = False
3520 3507 return False
3521 3508
3522 3509 def __iter__(self):
3523 3510 if self._ascending:
3524 3511 it = self.fastasc
3525 3512 else:
3526 3513 it = self.fastdesc
3527 3514 if it is not None:
3528 3515 return it()
3529 3516 # we need to consume the iterator
3530 3517 for x in self._consumegen():
3531 3518 pass
3532 3519 # recall the same code
3533 3520 return iter(self)
3534 3521
3535 3522 def _iterator(self):
3536 3523 if self._finished:
3537 3524 return iter(self._genlist)
3538 3525
3539 3526 # We have to use this complex iteration strategy to allow multiple
3540 3527 # iterations at the same time. We need to be able to catch revision
3541 3528 # removed from _consumegen and added to genlist in another instance.
3542 3529 #
3543 3530 # Getting rid of it would provide an about 15% speed up on this
3544 3531 # iteration.
3545 3532 genlist = self._genlist
3546 3533 nextrev = self._consumegen().next
3547 3534 _len = len # cache global lookup
3548 3535 def gen():
3549 3536 i = 0
3550 3537 while True:
3551 3538 if i < _len(genlist):
3552 3539 yield genlist[i]
3553 3540 else:
3554 3541 yield nextrev()
3555 3542 i += 1
3556 3543 return gen()
3557 3544
3558 3545 def _consumegen(self):
3559 3546 cache = self._cache
3560 3547 genlist = self._genlist.append
3561 3548 for item in self._gen:
3562 3549 cache[item] = True
3563 3550 genlist(item)
3564 3551 yield item
3565 3552 if not self._finished:
3566 3553 self._finished = True
3567 3554 asc = self._genlist[:]
3568 3555 asc.sort()
3569 3556 self._asclist = asc
3570 3557 self.fastasc = asc.__iter__
3571 3558 self.fastdesc = asc.__reversed__
3572 3559
3573 3560 def __len__(self):
3574 3561 for x in self._consumegen():
3575 3562 pass
3576 3563 return len(self._genlist)
3577 3564
3578 3565 def sort(self, reverse=False):
3579 3566 self._ascending = not reverse
3580 3567
3581 3568 def reverse(self):
3582 3569 self._ascending = not self._ascending
3583 3570
3584 3571 def isascending(self):
3585 3572 return self._ascending
3586 3573
3587 3574 def isdescending(self):
3588 3575 return not self._ascending
3589 3576
3590 3577 def first(self):
3591 3578 if self._ascending:
3592 3579 it = self.fastasc
3593 3580 else:
3594 3581 it = self.fastdesc
3595 3582 if it is None:
3596 3583 # we need to consume all and try again
3597 3584 for x in self._consumegen():
3598 3585 pass
3599 3586 return self.first()
3600 3587 return next(it(), None)
3601 3588
3602 3589 def last(self):
3603 3590 if self._ascending:
3604 3591 it = self.fastdesc
3605 3592 else:
3606 3593 it = self.fastasc
3607 3594 if it is None:
3608 3595 # we need to consume all and try again
3609 3596 for x in self._consumegen():
3610 3597 pass
3611 3598 return self.first()
3612 3599 return next(it(), None)
3613 3600
3614 3601 def __repr__(self):
3615 3602 d = {False: '-', True: '+'}[self._ascending]
3616 3603 return '<%s%s>' % (type(self).__name__, d)
3617 3604
3618 3605 class spanset(abstractsmartset):
3619 3606 """Duck type for baseset class which represents a range of revisions and
3620 3607 can work lazily and without having all the range in memory
3621 3608
3622 3609 Note that spanset(x, y) behave almost like xrange(x, y) except for two
3623 3610 notable points:
3624 3611 - when x < y it will be automatically descending,
3625 3612 - revision filtered with this repoview will be skipped.
3626 3613
3627 3614 """
3628 3615 def __init__(self, repo, start=0, end=None):
3629 3616 """
3630 3617 start: first revision included the set
3631 3618 (default to 0)
3632 3619 end: first revision excluded (last+1)
3633 3620 (default to len(repo)
3634 3621
3635 3622 Spanset will be descending if `end` < `start`.
3636 3623 """
3637 3624 if end is None:
3638 3625 end = len(repo)
3639 3626 self._ascending = start <= end
3640 3627 if not self._ascending:
3641 3628 start, end = end + 1, start +1
3642 3629 self._start = start
3643 3630 self._end = end
3644 3631 self._hiddenrevs = repo.changelog.filteredrevs
3645 3632
3646 3633 def sort(self, reverse=False):
3647 3634 self._ascending = not reverse
3648 3635
3649 3636 def reverse(self):
3650 3637 self._ascending = not self._ascending
3651 3638
3652 3639 def _iterfilter(self, iterrange):
3653 3640 s = self._hiddenrevs
3654 3641 for r in iterrange:
3655 3642 if r not in s:
3656 3643 yield r
3657 3644
3658 3645 def __iter__(self):
3659 3646 if self._ascending:
3660 3647 return self.fastasc()
3661 3648 else:
3662 3649 return self.fastdesc()
3663 3650
3664 3651 def fastasc(self):
3665 3652 iterrange = xrange(self._start, self._end)
3666 3653 if self._hiddenrevs:
3667 3654 return self._iterfilter(iterrange)
3668 3655 return iter(iterrange)
3669 3656
3670 3657 def fastdesc(self):
3671 3658 iterrange = xrange(self._end - 1, self._start - 1, -1)
3672 3659 if self._hiddenrevs:
3673 3660 return self._iterfilter(iterrange)
3674 3661 return iter(iterrange)
3675 3662
3676 3663 def __contains__(self, rev):
3677 3664 hidden = self._hiddenrevs
3678 3665 return ((self._start <= rev < self._end)
3679 3666 and not (hidden and rev in hidden))
3680 3667
3681 3668 def __nonzero__(self):
3682 3669 for r in self:
3683 3670 return True
3684 3671 return False
3685 3672
3686 3673 def __len__(self):
3687 3674 if not self._hiddenrevs:
3688 3675 return abs(self._end - self._start)
3689 3676 else:
3690 3677 count = 0
3691 3678 start = self._start
3692 3679 end = self._end
3693 3680 for rev in self._hiddenrevs:
3694 3681 if (end < rev <= start) or (start <= rev < end):
3695 3682 count += 1
3696 3683 return abs(self._end - self._start) - count
3697 3684
3698 3685 def isascending(self):
3699 3686 return self._ascending
3700 3687
3701 3688 def isdescending(self):
3702 3689 return not self._ascending
3703 3690
3704 3691 def first(self):
3705 3692 if self._ascending:
3706 3693 it = self.fastasc
3707 3694 else:
3708 3695 it = self.fastdesc
3709 3696 for x in it():
3710 3697 return x
3711 3698 return None
3712 3699
3713 3700 def last(self):
3714 3701 if self._ascending:
3715 3702 it = self.fastdesc
3716 3703 else:
3717 3704 it = self.fastasc
3718 3705 for x in it():
3719 3706 return x
3720 3707 return None
3721 3708
3722 3709 def __repr__(self):
3723 3710 d = {False: '-', True: '+'}[self._ascending]
3724 3711 return '<%s%s %d:%d>' % (type(self).__name__, d,
3725 3712 self._start, self._end - 1)
3726 3713
3727 3714 class fullreposet(spanset):
3728 3715 """a set containing all revisions in the repo
3729 3716
3730 3717 This class exists to host special optimization and magic to handle virtual
3731 3718 revisions such as "null".
3732 3719 """
3733 3720
3734 3721 def __init__(self, repo):
3735 3722 super(fullreposet, self).__init__(repo)
3736 3723
3737 3724 def __and__(self, other):
3738 3725 """As self contains the whole repo, all of the other set should also be
3739 3726 in self. Therefore `self & other = other`.
3740 3727
3741 3728 This boldly assumes the other contains valid revs only.
3742 3729 """
3743 3730 # other not a smartset, make is so
3744 3731 if not util.safehasattr(other, 'isascending'):
3745 3732 # filter out hidden revision
3746 3733 # (this boldly assumes all smartset are pure)
3747 3734 #
3748 3735 # `other` was used with "&", let's assume this is a set like
3749 3736 # object.
3750 3737 other = baseset(other - self._hiddenrevs)
3751 3738
3752 3739 # XXX As fullreposet is also used as bootstrap, this is wrong.
3753 3740 #
3754 3741 # With a giveme312() revset returning [3,1,2], this makes
3755 3742 # 'hg log -r "giveme312()"' -> 1, 2, 3 (wrong)
3756 3743 # We cannot just drop it because other usage still need to sort it:
3757 3744 # 'hg log -r "all() and giveme312()"' -> 1, 2, 3 (right)
3758 3745 #
3759 3746 # There is also some faulty revset implementations that rely on it
3760 3747 # (eg: children as of its state in e8075329c5fb)
3761 3748 #
3762 3749 # When we fix the two points above we can move this into the if clause
3763 3750 other.sort(reverse=self.isdescending())
3764 3751 return other
3765 3752
3766 3753 def prettyformatset(revs):
3767 3754 lines = []
3768 3755 rs = repr(revs)
3769 3756 p = 0
3770 3757 while p < len(rs):
3771 3758 q = rs.find('<', p + 1)
3772 3759 if q < 0:
3773 3760 q = len(rs)
3774 3761 l = rs.count('<', 0, p) - rs.count('>', 0, p)
3775 3762 assert l >= 0
3776 3763 lines.append((l, rs[p:q].rstrip()))
3777 3764 p = q
3778 3765 return '\n'.join(' ' * l + s for l, s in lines)
3779 3766
3780 3767 # tell hggettext to extract docstrings from these functions:
3781 3768 i18nfunctions = symbols.values()
General Comments 0
You need to be logged in to leave comments. Login now