##// END OF EJS Templates
revset: strip off "literal:" prefix from bookmark not found error...
Yuya Nishihara -
r26538:5c9ec1cc default
parent child Browse files
Show More
@@ -1,3818 +1,3818
1 1 # revset.py - revision set queries for mercurial
2 2 #
3 3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import heapq
11 11 import re
12 12
13 13 from .i18n import _
14 14 from . import (
15 15 encoding,
16 16 error,
17 17 hbisect,
18 18 match as matchmod,
19 19 node,
20 20 obsolete as obsmod,
21 21 parser,
22 22 pathutil,
23 23 phases,
24 24 repoview,
25 25 util,
26 26 )
27 27
28 28 def _revancestors(repo, revs, followfirst):
29 29 """Like revlog.ancestors(), but supports followfirst."""
30 30 if followfirst:
31 31 cut = 1
32 32 else:
33 33 cut = None
34 34 cl = repo.changelog
35 35
36 36 def iterate():
37 37 revs.sort(reverse=True)
38 38 irevs = iter(revs)
39 39 h = []
40 40
41 41 inputrev = next(irevs, None)
42 42 if inputrev is not None:
43 43 heapq.heappush(h, -inputrev)
44 44
45 45 seen = set()
46 46 while h:
47 47 current = -heapq.heappop(h)
48 48 if current == inputrev:
49 49 inputrev = next(irevs, None)
50 50 if inputrev is not None:
51 51 heapq.heappush(h, -inputrev)
52 52 if current not in seen:
53 53 seen.add(current)
54 54 yield current
55 55 for parent in cl.parentrevs(current)[:cut]:
56 56 if parent != node.nullrev:
57 57 heapq.heappush(h, -parent)
58 58
59 59 return generatorset(iterate(), iterasc=False)
60 60
61 61 def _revdescendants(repo, revs, followfirst):
62 62 """Like revlog.descendants() but supports followfirst."""
63 63 if followfirst:
64 64 cut = 1
65 65 else:
66 66 cut = None
67 67
68 68 def iterate():
69 69 cl = repo.changelog
70 70 # XXX this should be 'parentset.min()' assuming 'parentset' is a
71 71 # smartset (and if it is not, it should.)
72 72 first = min(revs)
73 73 nullrev = node.nullrev
74 74 if first == nullrev:
75 75 # Are there nodes with a null first parent and a non-null
76 76 # second one? Maybe. Do we care? Probably not.
77 77 for i in cl:
78 78 yield i
79 79 else:
80 80 seen = set(revs)
81 81 for i in cl.revs(first + 1):
82 82 for x in cl.parentrevs(i)[:cut]:
83 83 if x != nullrev and x in seen:
84 84 seen.add(i)
85 85 yield i
86 86 break
87 87
88 88 return generatorset(iterate(), iterasc=True)
89 89
90 90 def _reachablerootspure(repo, minroot, roots, heads, includepath):
91 91 """return (heads(::<roots> and ::<heads>))
92 92
93 93 If includepath is True, return (<roots>::<heads>)."""
94 94 if not roots:
95 95 return []
96 96 parentrevs = repo.changelog.parentrevs
97 97 roots = set(roots)
98 98 visit = list(heads)
99 99 reachable = set()
100 100 seen = {}
101 101 # prefetch all the things! (because python is slow)
102 102 reached = reachable.add
103 103 dovisit = visit.append
104 104 nextvisit = visit.pop
105 105 # open-code the post-order traversal due to the tiny size of
106 106 # sys.getrecursionlimit()
107 107 while visit:
108 108 rev = nextvisit()
109 109 if rev in roots:
110 110 reached(rev)
111 111 if not includepath:
112 112 continue
113 113 parents = parentrevs(rev)
114 114 seen[rev] = parents
115 115 for parent in parents:
116 116 if parent >= minroot and parent not in seen:
117 117 dovisit(parent)
118 118 if not reachable:
119 119 return baseset()
120 120 if not includepath:
121 121 return reachable
122 122 for rev in sorted(seen):
123 123 for parent in seen[rev]:
124 124 if parent in reachable:
125 125 reached(rev)
126 126 return reachable
127 127
128 128 def reachableroots(repo, roots, heads, includepath=False):
129 129 """return (heads(::<roots> and ::<heads>))
130 130
131 131 If includepath is True, return (<roots>::<heads>)."""
132 132 if not roots:
133 133 return baseset()
134 134 minroot = roots.min()
135 135 roots = list(roots)
136 136 heads = list(heads)
137 137 try:
138 138 revs = repo.changelog.reachableroots(minroot, heads, roots, includepath)
139 139 except AttributeError:
140 140 revs = _reachablerootspure(repo, minroot, roots, heads, includepath)
141 141 revs = baseset(revs)
142 142 revs.sort()
143 143 return revs
144 144
145 145 elements = {
146 146 # token-type: binding-strength, primary, prefix, infix, suffix
147 147 "(": (21, None, ("group", 1, ")"), ("func", 1, ")"), None),
148 148 "##": (20, None, None, ("_concat", 20), None),
149 149 "~": (18, None, None, ("ancestor", 18), None),
150 150 "^": (18, None, None, ("parent", 18), ("parentpost", 18)),
151 151 "-": (5, None, ("negate", 19), ("minus", 5), None),
152 152 "::": (17, None, ("dagrangepre", 17), ("dagrange", 17),
153 153 ("dagrangepost", 17)),
154 154 "..": (17, None, ("dagrangepre", 17), ("dagrange", 17),
155 155 ("dagrangepost", 17)),
156 156 ":": (15, "rangeall", ("rangepre", 15), ("range", 15), ("rangepost", 15)),
157 157 "not": (10, None, ("not", 10), None, None),
158 158 "!": (10, None, ("not", 10), None, None),
159 159 "and": (5, None, None, ("and", 5), None),
160 160 "&": (5, None, None, ("and", 5), None),
161 161 "%": (5, None, None, ("only", 5), ("onlypost", 5)),
162 162 "or": (4, None, None, ("or", 4), None),
163 163 "|": (4, None, None, ("or", 4), None),
164 164 "+": (4, None, None, ("or", 4), None),
165 165 "=": (3, None, None, ("keyvalue", 3), None),
166 166 ",": (2, None, None, ("list", 2), None),
167 167 ")": (0, None, None, None, None),
168 168 "symbol": (0, "symbol", None, None, None),
169 169 "string": (0, "string", None, None, None),
170 170 "end": (0, None, None, None, None),
171 171 }
172 172
173 173 keywords = set(['and', 'or', 'not'])
174 174
175 175 # default set of valid characters for the initial letter of symbols
176 176 _syminitletters = set(c for c in [chr(i) for i in xrange(256)]
177 177 if c.isalnum() or c in '._@' or ord(c) > 127)
178 178
179 179 # default set of valid characters for non-initial letters of symbols
180 180 _symletters = set(c for c in [chr(i) for i in xrange(256)]
181 181 if c.isalnum() or c in '-._/@' or ord(c) > 127)
182 182
183 183 def tokenize(program, lookup=None, syminitletters=None, symletters=None):
184 184 '''
185 185 Parse a revset statement into a stream of tokens
186 186
187 187 ``syminitletters`` is the set of valid characters for the initial
188 188 letter of symbols.
189 189
190 190 By default, character ``c`` is recognized as valid for initial
191 191 letter of symbols, if ``c.isalnum() or c in '._@' or ord(c) > 127``.
192 192
193 193 ``symletters`` is the set of valid characters for non-initial
194 194 letters of symbols.
195 195
196 196 By default, character ``c`` is recognized as valid for non-initial
197 197 letters of symbols, if ``c.isalnum() or c in '-._/@' or ord(c) > 127``.
198 198
199 199 Check that @ is a valid unquoted token character (issue3686):
200 200 >>> list(tokenize("@::"))
201 201 [('symbol', '@', 0), ('::', None, 1), ('end', None, 3)]
202 202
203 203 '''
204 204 if syminitletters is None:
205 205 syminitletters = _syminitletters
206 206 if symletters is None:
207 207 symletters = _symletters
208 208
209 209 if program and lookup:
210 210 # attempt to parse old-style ranges first to deal with
211 211 # things like old-tag which contain query metacharacters
212 212 parts = program.split(':', 1)
213 213 if all(lookup(sym) for sym in parts if sym):
214 214 if parts[0]:
215 215 yield ('symbol', parts[0], 0)
216 216 if len(parts) > 1:
217 217 s = len(parts[0])
218 218 yield (':', None, s)
219 219 if parts[1]:
220 220 yield ('symbol', parts[1], s + 1)
221 221 yield ('end', None, len(program))
222 222 return
223 223
224 224 pos, l = 0, len(program)
225 225 while pos < l:
226 226 c = program[pos]
227 227 if c.isspace(): # skip inter-token whitespace
228 228 pass
229 229 elif c == ':' and program[pos:pos + 2] == '::': # look ahead carefully
230 230 yield ('::', None, pos)
231 231 pos += 1 # skip ahead
232 232 elif c == '.' and program[pos:pos + 2] == '..': # look ahead carefully
233 233 yield ('..', None, pos)
234 234 pos += 1 # skip ahead
235 235 elif c == '#' and program[pos:pos + 2] == '##': # look ahead carefully
236 236 yield ('##', None, pos)
237 237 pos += 1 # skip ahead
238 238 elif c in "():=,-|&+!~^%": # handle simple operators
239 239 yield (c, None, pos)
240 240 elif (c in '"\'' or c == 'r' and
241 241 program[pos:pos + 2] in ("r'", 'r"')): # handle quoted strings
242 242 if c == 'r':
243 243 pos += 1
244 244 c = program[pos]
245 245 decode = lambda x: x
246 246 else:
247 247 decode = parser.unescapestr
248 248 pos += 1
249 249 s = pos
250 250 while pos < l: # find closing quote
251 251 d = program[pos]
252 252 if d == '\\': # skip over escaped characters
253 253 pos += 2
254 254 continue
255 255 if d == c:
256 256 yield ('string', decode(program[s:pos]), s)
257 257 break
258 258 pos += 1
259 259 else:
260 260 raise error.ParseError(_("unterminated string"), s)
261 261 # gather up a symbol/keyword
262 262 elif c in syminitletters:
263 263 s = pos
264 264 pos += 1
265 265 while pos < l: # find end of symbol
266 266 d = program[pos]
267 267 if d not in symletters:
268 268 break
269 269 if d == '.' and program[pos - 1] == '.': # special case for ..
270 270 pos -= 1
271 271 break
272 272 pos += 1
273 273 sym = program[s:pos]
274 274 if sym in keywords: # operator keywords
275 275 yield (sym, None, s)
276 276 elif '-' in sym:
277 277 # some jerk gave us foo-bar-baz, try to check if it's a symbol
278 278 if lookup and lookup(sym):
279 279 # looks like a real symbol
280 280 yield ('symbol', sym, s)
281 281 else:
282 282 # looks like an expression
283 283 parts = sym.split('-')
284 284 for p in parts[:-1]:
285 285 if p: # possible consecutive -
286 286 yield ('symbol', p, s)
287 287 s += len(p)
288 288 yield ('-', None, pos)
289 289 s += 1
290 290 if parts[-1]: # possible trailing -
291 291 yield ('symbol', parts[-1], s)
292 292 else:
293 293 yield ('symbol', sym, s)
294 294 pos -= 1
295 295 else:
296 296 raise error.ParseError(_("syntax error in revset '%s'") %
297 297 program, pos)
298 298 pos += 1
299 299 yield ('end', None, pos)
300 300
301 301 def parseerrordetail(inst):
302 302 """Compose error message from specified ParseError object
303 303 """
304 304 if len(inst.args) > 1:
305 305 return _('at %s: %s') % (inst.args[1], inst.args[0])
306 306 else:
307 307 return inst.args[0]
308 308
309 309 # helpers
310 310
311 311 def getstring(x, err):
312 312 if x and (x[0] == 'string' or x[0] == 'symbol'):
313 313 return x[1]
314 314 raise error.ParseError(err)
315 315
316 316 def getlist(x):
317 317 if not x:
318 318 return []
319 319 if x[0] == 'list':
320 320 return getlist(x[1]) + [x[2]]
321 321 return [x]
322 322
323 323 def getargs(x, min, max, err):
324 324 l = getlist(x)
325 325 if len(l) < min or (max >= 0 and len(l) > max):
326 326 raise error.ParseError(err)
327 327 return l
328 328
329 329 def getargsdict(x, funcname, keys):
330 330 return parser.buildargsdict(getlist(x), funcname, keys.split(),
331 331 keyvaluenode='keyvalue', keynode='symbol')
332 332
333 333 def isvalidsymbol(tree):
334 334 """Examine whether specified ``tree`` is valid ``symbol`` or not
335 335 """
336 336 return tree[0] == 'symbol' and len(tree) > 1
337 337
338 338 def getsymbol(tree):
339 339 """Get symbol name from valid ``symbol`` in ``tree``
340 340
341 341 This assumes that ``tree`` is already examined by ``isvalidsymbol``.
342 342 """
343 343 return tree[1]
344 344
345 345 def isvalidfunc(tree):
346 346 """Examine whether specified ``tree`` is valid ``func`` or not
347 347 """
348 348 return tree[0] == 'func' and len(tree) > 1 and isvalidsymbol(tree[1])
349 349
350 350 def getfuncname(tree):
351 351 """Get function name from valid ``func`` in ``tree``
352 352
353 353 This assumes that ``tree`` is already examined by ``isvalidfunc``.
354 354 """
355 355 return getsymbol(tree[1])
356 356
357 357 def getfuncargs(tree):
358 358 """Get list of function arguments from valid ``func`` in ``tree``
359 359
360 360 This assumes that ``tree`` is already examined by ``isvalidfunc``.
361 361 """
362 362 if len(tree) > 2:
363 363 return getlist(tree[2])
364 364 else:
365 365 return []
366 366
367 367 def getset(repo, subset, x):
368 368 if not x:
369 369 raise error.ParseError(_("missing argument"))
370 370 s = methods[x[0]](repo, subset, *x[1:])
371 371 if util.safehasattr(s, 'isascending'):
372 372 return s
373 373 if (repo.ui.configbool('devel', 'all-warnings')
374 374 or repo.ui.configbool('devel', 'old-revset')):
375 375 # else case should not happen, because all non-func are internal,
376 376 # ignoring for now.
377 377 if x[0] == 'func' and x[1][0] == 'symbol' and x[1][1] in symbols:
378 378 repo.ui.develwarn('revset "%s" use list instead of smartset, '
379 379 '(upgrade your code)' % x[1][1])
380 380 return baseset(s)
381 381
382 382 def _getrevsource(repo, r):
383 383 extra = repo[r].extra()
384 384 for label in ('source', 'transplant_source', 'rebase_source'):
385 385 if label in extra:
386 386 try:
387 387 return repo[extra[label]].rev()
388 388 except error.RepoLookupError:
389 389 pass
390 390 return None
391 391
392 392 # operator methods
393 393
394 394 def stringset(repo, subset, x):
395 395 x = repo[x].rev()
396 396 if (x in subset
397 397 or x == node.nullrev and isinstance(subset, fullreposet)):
398 398 return baseset([x])
399 399 return baseset()
400 400
401 401 def rangeset(repo, subset, x, y):
402 402 m = getset(repo, fullreposet(repo), x)
403 403 n = getset(repo, fullreposet(repo), y)
404 404
405 405 if not m or not n:
406 406 return baseset()
407 407 m, n = m.first(), n.last()
408 408
409 409 if m == n:
410 410 r = baseset([m])
411 411 elif n == node.wdirrev:
412 412 r = spanset(repo, m, len(repo)) + baseset([n])
413 413 elif m == node.wdirrev:
414 414 r = baseset([m]) + spanset(repo, len(repo) - 1, n - 1)
415 415 elif m < n:
416 416 r = spanset(repo, m, n + 1)
417 417 else:
418 418 r = spanset(repo, m, n - 1)
419 419 # XXX We should combine with subset first: 'subset & baseset(...)'. This is
420 420 # necessary to ensure we preserve the order in subset.
421 421 #
422 422 # This has performance implication, carrying the sorting over when possible
423 423 # would be more efficient.
424 424 return r & subset
425 425
426 426 def dagrange(repo, subset, x, y):
427 427 r = fullreposet(repo)
428 428 xs = reachableroots(repo, getset(repo, r, x), getset(repo, r, y),
429 429 includepath=True)
430 430 # XXX We should combine with subset first: 'subset & baseset(...)'. This is
431 431 # necessary to ensure we preserve the order in subset.
432 432 return xs & subset
433 433
434 434 def andset(repo, subset, x, y):
435 435 return getset(repo, getset(repo, subset, x), y)
436 436
437 437 def orset(repo, subset, *xs):
438 438 assert xs
439 439 if len(xs) == 1:
440 440 return getset(repo, subset, xs[0])
441 441 p = len(xs) // 2
442 442 a = orset(repo, subset, *xs[:p])
443 443 b = orset(repo, subset, *xs[p:])
444 444 return a + b
445 445
446 446 def notset(repo, subset, x):
447 447 return subset - getset(repo, subset, x)
448 448
449 449 def listset(repo, subset, a, b):
450 450 raise error.ParseError(_("can't use a list in this context"))
451 451
452 452 def keyvaluepair(repo, subset, k, v):
453 453 raise error.ParseError(_("can't use a key-value pair in this context"))
454 454
455 455 def func(repo, subset, a, b):
456 456 if a[0] == 'symbol' and a[1] in symbols:
457 457 return symbols[a[1]](repo, subset, b)
458 458
459 459 keep = lambda fn: getattr(fn, '__doc__', None) is not None
460 460
461 461 syms = [s for (s, fn) in symbols.items() if keep(fn)]
462 462 raise error.UnknownIdentifier(a[1], syms)
463 463
464 464 # functions
465 465
466 466 def _mergedefaultdest(repo, subset, x):
467 467 # ``_mergedefaultdest()``
468 468
469 469 # default destination for merge.
470 470 # # XXX: Currently private because I expect the signature to change.
471 471 # # XXX: - taking rev as arguments,
472 472 # # XXX: - bailing out in case of ambiguity vs returning all data.
473 473 getargs(x, 0, 0, _("_mergedefaultdest takes no arguments"))
474 474 if repo._activebookmark:
475 475 bmheads = repo.bookmarkheads(repo._activebookmark)
476 476 curhead = repo[repo._activebookmark].node()
477 477 if len(bmheads) == 2:
478 478 if curhead == bmheads[0]:
479 479 node = bmheads[1]
480 480 else:
481 481 node = bmheads[0]
482 482 elif len(bmheads) > 2:
483 483 raise util.Abort(_("multiple matching bookmarks to merge - "
484 484 "please merge with an explicit rev or bookmark"),
485 485 hint=_("run 'hg heads' to see all heads"))
486 486 elif len(bmheads) <= 1:
487 487 raise util.Abort(_("no matching bookmark to merge - "
488 488 "please merge with an explicit rev or bookmark"),
489 489 hint=_("run 'hg heads' to see all heads"))
490 490 else:
491 491 branch = repo[None].branch()
492 492 bheads = repo.branchheads(branch)
493 493 nbhs = [bh for bh in bheads if not repo[bh].bookmarks()]
494 494
495 495 if len(nbhs) > 2:
496 496 raise util.Abort(_("branch '%s' has %d heads - "
497 497 "please merge with an explicit rev")
498 498 % (branch, len(bheads)),
499 499 hint=_("run 'hg heads .' to see heads"))
500 500
501 501 parent = repo.dirstate.p1()
502 502 if len(nbhs) <= 1:
503 503 if len(bheads) > 1:
504 504 raise util.Abort(_("heads are bookmarked - "
505 505 "please merge with an explicit rev"),
506 506 hint=_("run 'hg heads' to see all heads"))
507 507 if len(repo.heads()) > 1:
508 508 raise util.Abort(_("branch '%s' has one head - "
509 509 "please merge with an explicit rev")
510 510 % branch,
511 511 hint=_("run 'hg heads' to see all heads"))
512 512 msg, hint = _('nothing to merge'), None
513 513 if parent != repo.lookup(branch):
514 514 hint = _("use 'hg update' instead")
515 515 raise util.Abort(msg, hint=hint)
516 516
517 517 if parent not in bheads:
518 518 raise util.Abort(_('working directory not at a head revision'),
519 519 hint=_("use 'hg update' or merge with an "
520 520 "explicit revision"))
521 521 if parent == nbhs[0]:
522 522 node = nbhs[-1]
523 523 else:
524 524 node = nbhs[0]
525 525 return subset & baseset([repo[node].rev()])
526 526
527 527 def _updatedefaultdest(repo, subset, x):
528 528 # ``_updatedefaultdest()``
529 529
530 530 # default destination for update.
531 531 # # XXX: Currently private because I expect the signature to change.
532 532 # # XXX: - taking rev as arguments,
533 533 # # XXX: - bailing out in case of ambiguity vs returning all data.
534 534 getargs(x, 0, 0, _("_updatedefaultdest takes no arguments"))
535 535 # Here is where we should consider bookmarks, divergent bookmarks,
536 536 # foreground changesets (successors), and tip of current branch;
537 537 # but currently we are only checking the branch tips.
538 538 node = None
539 539 wc = repo[None]
540 540 p1 = wc.p1()
541 541 try:
542 542 node = repo.branchtip(wc.branch())
543 543 except error.RepoLookupError:
544 544 if wc.branch() == 'default': # no default branch!
545 545 node = repo.lookup('tip') # update to tip
546 546 else:
547 547 raise util.Abort(_("branch %s not found") % wc.branch())
548 548
549 549 if p1.obsolete() and not p1.children():
550 550 # allow updating to successors
551 551 successors = obsmod.successorssets(repo, p1.node())
552 552
553 553 # behavior of certain cases is as follows,
554 554 #
555 555 # divergent changesets: update to highest rev, similar to what
556 556 # is currently done when there are more than one head
557 557 # (i.e. 'tip')
558 558 #
559 559 # replaced changesets: same as divergent except we know there
560 560 # is no conflict
561 561 #
562 562 # pruned changeset: no update is done; though, we could
563 563 # consider updating to the first non-obsolete parent,
564 564 # similar to what is current done for 'hg prune'
565 565
566 566 if successors:
567 567 # flatten the list here handles both divergent (len > 1)
568 568 # and the usual case (len = 1)
569 569 successors = [n for sub in successors for n in sub]
570 570
571 571 # get the max revision for the given successors set,
572 572 # i.e. the 'tip' of a set
573 573 node = repo.revs('max(%ln)', successors).first()
574 574 return subset & baseset([repo[node].rev()])
575 575
576 576 def adds(repo, subset, x):
577 577 """``adds(pattern)``
578 578 Changesets that add a file matching pattern.
579 579
580 580 The pattern without explicit kind like ``glob:`` is expected to be
581 581 relative to the current directory and match against a file or a
582 582 directory.
583 583 """
584 584 # i18n: "adds" is a keyword
585 585 pat = getstring(x, _("adds requires a pattern"))
586 586 return checkstatus(repo, subset, pat, 1)
587 587
588 588 def ancestor(repo, subset, x):
589 589 """``ancestor(*changeset)``
590 590 A greatest common ancestor of the changesets.
591 591
592 592 Accepts 0 or more changesets.
593 593 Will return empty list when passed no args.
594 594 Greatest common ancestor of a single changeset is that changeset.
595 595 """
596 596 # i18n: "ancestor" is a keyword
597 597 l = getlist(x)
598 598 rl = fullreposet(repo)
599 599 anc = None
600 600
601 601 # (getset(repo, rl, i) for i in l) generates a list of lists
602 602 for revs in (getset(repo, rl, i) for i in l):
603 603 for r in revs:
604 604 if anc is None:
605 605 anc = repo[r]
606 606 else:
607 607 anc = anc.ancestor(repo[r])
608 608
609 609 if anc is not None and anc.rev() in subset:
610 610 return baseset([anc.rev()])
611 611 return baseset()
612 612
613 613 def _ancestors(repo, subset, x, followfirst=False):
614 614 heads = getset(repo, fullreposet(repo), x)
615 615 if not heads:
616 616 return baseset()
617 617 s = _revancestors(repo, heads, followfirst)
618 618 return subset & s
619 619
620 620 def ancestors(repo, subset, x):
621 621 """``ancestors(set)``
622 622 Changesets that are ancestors of a changeset in set.
623 623 """
624 624 return _ancestors(repo, subset, x)
625 625
626 626 def _firstancestors(repo, subset, x):
627 627 # ``_firstancestors(set)``
628 628 # Like ``ancestors(set)`` but follows only the first parents.
629 629 return _ancestors(repo, subset, x, followfirst=True)
630 630
631 631 def ancestorspec(repo, subset, x, n):
632 632 """``set~n``
633 633 Changesets that are the Nth ancestor (first parents only) of a changeset
634 634 in set.
635 635 """
636 636 try:
637 637 n = int(n[1])
638 638 except (TypeError, ValueError):
639 639 raise error.ParseError(_("~ expects a number"))
640 640 ps = set()
641 641 cl = repo.changelog
642 642 for r in getset(repo, fullreposet(repo), x):
643 643 for i in range(n):
644 644 r = cl.parentrevs(r)[0]
645 645 ps.add(r)
646 646 return subset & ps
647 647
648 648 def author(repo, subset, x):
649 649 """``author(string)``
650 650 Alias for ``user(string)``.
651 651 """
652 652 # i18n: "author" is a keyword
653 653 n = encoding.lower(getstring(x, _("author requires a string")))
654 654 kind, pattern, matcher = _substringmatcher(n)
655 655 return subset.filter(lambda x: matcher(encoding.lower(repo[x].user())))
656 656
657 657 def bisect(repo, subset, x):
658 658 """``bisect(string)``
659 659 Changesets marked in the specified bisect status:
660 660
661 661 - ``good``, ``bad``, ``skip``: csets explicitly marked as good/bad/skip
662 662 - ``goods``, ``bads`` : csets topologically good/bad
663 663 - ``range`` : csets taking part in the bisection
664 664 - ``pruned`` : csets that are goods, bads or skipped
665 665 - ``untested`` : csets whose fate is yet unknown
666 666 - ``ignored`` : csets ignored due to DAG topology
667 667 - ``current`` : the cset currently being bisected
668 668 """
669 669 # i18n: "bisect" is a keyword
670 670 status = getstring(x, _("bisect requires a string")).lower()
671 671 state = set(hbisect.get(repo, status))
672 672 return subset & state
673 673
674 674 # Backward-compatibility
675 675 # - no help entry so that we do not advertise it any more
676 676 def bisected(repo, subset, x):
677 677 return bisect(repo, subset, x)
678 678
679 679 def bookmark(repo, subset, x):
680 680 """``bookmark([name])``
681 681 The named bookmark or all bookmarks.
682 682
683 683 If `name` starts with `re:`, the remainder of the name is treated as
684 684 a regular expression. To match a bookmark that actually starts with `re:`,
685 685 use the prefix `literal:`.
686 686 """
687 687 # i18n: "bookmark" is a keyword
688 688 args = getargs(x, 0, 1, _('bookmark takes one or no arguments'))
689 689 if args:
690 690 bm = getstring(args[0],
691 691 # i18n: "bookmark" is a keyword
692 692 _('the argument to bookmark must be a string'))
693 693 kind, pattern, matcher = util.stringmatcher(bm)
694 694 bms = set()
695 695 if kind == 'literal':
696 696 bmrev = repo._bookmarks.get(pattern, None)
697 697 if not bmrev:
698 698 raise error.RepoLookupError(_("bookmark '%s' does not exist")
699 % bm)
699 % pattern)
700 700 bms.add(repo[bmrev].rev())
701 701 else:
702 702 matchrevs = set()
703 703 for name, bmrev in repo._bookmarks.iteritems():
704 704 if matcher(name):
705 705 matchrevs.add(bmrev)
706 706 if not matchrevs:
707 707 raise error.RepoLookupError(_("no bookmarks exist"
708 708 " that match '%s'") % pattern)
709 709 for bmrev in matchrevs:
710 710 bms.add(repo[bmrev].rev())
711 711 else:
712 712 bms = set([repo[r].rev()
713 713 for r in repo._bookmarks.values()])
714 714 bms -= set([node.nullrev])
715 715 return subset & bms
716 716
717 717 def branch(repo, subset, x):
718 718 """``branch(string or set)``
719 719 All changesets belonging to the given branch or the branches of the given
720 720 changesets.
721 721
722 722 If `string` starts with `re:`, the remainder of the name is treated as
723 723 a regular expression. To match a branch that actually starts with `re:`,
724 724 use the prefix `literal:`.
725 725 """
726 726 getbi = repo.revbranchcache().branchinfo
727 727
728 728 try:
729 729 b = getstring(x, '')
730 730 except error.ParseError:
731 731 # not a string, but another revspec, e.g. tip()
732 732 pass
733 733 else:
734 734 kind, pattern, matcher = util.stringmatcher(b)
735 735 if kind == 'literal':
736 736 # note: falls through to the revspec case if no branch with
737 737 # this name exists and pattern kind is not specified explicitly
738 738 if pattern in repo.branchmap():
739 739 return subset.filter(lambda r: matcher(getbi(r)[0]))
740 740 if b.startswith('literal:'):
741 741 raise error.RepoLookupError(_("branch '%s' does not exist")
742 742 % pattern)
743 743 else:
744 744 return subset.filter(lambda r: matcher(getbi(r)[0]))
745 745
746 746 s = getset(repo, fullreposet(repo), x)
747 747 b = set()
748 748 for r in s:
749 749 b.add(getbi(r)[0])
750 750 c = s.__contains__
751 751 return subset.filter(lambda r: c(r) or getbi(r)[0] in b)
752 752
753 753 def bumped(repo, subset, x):
754 754 """``bumped()``
755 755 Mutable changesets marked as successors of public changesets.
756 756
757 757 Only non-public and non-obsolete changesets can be `bumped`.
758 758 """
759 759 # i18n: "bumped" is a keyword
760 760 getargs(x, 0, 0, _("bumped takes no arguments"))
761 761 bumped = obsmod.getrevs(repo, 'bumped')
762 762 return subset & bumped
763 763
764 764 def bundle(repo, subset, x):
765 765 """``bundle()``
766 766 Changesets in the bundle.
767 767
768 768 Bundle must be specified by the -R option."""
769 769
770 770 try:
771 771 bundlerevs = repo.changelog.bundlerevs
772 772 except AttributeError:
773 773 raise util.Abort(_("no bundle provided - specify with -R"))
774 774 return subset & bundlerevs
775 775
776 776 def checkstatus(repo, subset, pat, field):
777 777 hasset = matchmod.patkind(pat) == 'set'
778 778
779 779 mcache = [None]
780 780 def matches(x):
781 781 c = repo[x]
782 782 if not mcache[0] or hasset:
783 783 mcache[0] = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
784 784 m = mcache[0]
785 785 fname = None
786 786 if not m.anypats() and len(m.files()) == 1:
787 787 fname = m.files()[0]
788 788 if fname is not None:
789 789 if fname not in c.files():
790 790 return False
791 791 else:
792 792 for f in c.files():
793 793 if m(f):
794 794 break
795 795 else:
796 796 return False
797 797 files = repo.status(c.p1().node(), c.node())[field]
798 798 if fname is not None:
799 799 if fname in files:
800 800 return True
801 801 else:
802 802 for f in files:
803 803 if m(f):
804 804 return True
805 805
806 806 return subset.filter(matches)
807 807
808 808 def _children(repo, narrow, parentset):
809 809 if not parentset:
810 810 return baseset()
811 811 cs = set()
812 812 pr = repo.changelog.parentrevs
813 813 minrev = parentset.min()
814 814 for r in narrow:
815 815 if r <= minrev:
816 816 continue
817 817 for p in pr(r):
818 818 if p in parentset:
819 819 cs.add(r)
820 820 # XXX using a set to feed the baseset is wrong. Sets are not ordered.
821 821 # This does not break because of other fullreposet misbehavior.
822 822 return baseset(cs)
823 823
824 824 def children(repo, subset, x):
825 825 """``children(set)``
826 826 Child changesets of changesets in set.
827 827 """
828 828 s = getset(repo, fullreposet(repo), x)
829 829 cs = _children(repo, subset, s)
830 830 return subset & cs
831 831
832 832 def closed(repo, subset, x):
833 833 """``closed()``
834 834 Changeset is closed.
835 835 """
836 836 # i18n: "closed" is a keyword
837 837 getargs(x, 0, 0, _("closed takes no arguments"))
838 838 return subset.filter(lambda r: repo[r].closesbranch())
839 839
840 840 def contains(repo, subset, x):
841 841 """``contains(pattern)``
842 842 The revision's manifest contains a file matching pattern (but might not
843 843 modify it). See :hg:`help patterns` for information about file patterns.
844 844
845 845 The pattern without explicit kind like ``glob:`` is expected to be
846 846 relative to the current directory and match against a file exactly
847 847 for efficiency.
848 848 """
849 849 # i18n: "contains" is a keyword
850 850 pat = getstring(x, _("contains requires a pattern"))
851 851
852 852 def matches(x):
853 853 if not matchmod.patkind(pat):
854 854 pats = pathutil.canonpath(repo.root, repo.getcwd(), pat)
855 855 if pats in repo[x]:
856 856 return True
857 857 else:
858 858 c = repo[x]
859 859 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
860 860 for f in c.manifest():
861 861 if m(f):
862 862 return True
863 863 return False
864 864
865 865 return subset.filter(matches)
866 866
867 867 def converted(repo, subset, x):
868 868 """``converted([id])``
869 869 Changesets converted from the given identifier in the old repository if
870 870 present, or all converted changesets if no identifier is specified.
871 871 """
872 872
873 873 # There is exactly no chance of resolving the revision, so do a simple
874 874 # string compare and hope for the best
875 875
876 876 rev = None
877 877 # i18n: "converted" is a keyword
878 878 l = getargs(x, 0, 1, _('converted takes one or no arguments'))
879 879 if l:
880 880 # i18n: "converted" is a keyword
881 881 rev = getstring(l[0], _('converted requires a revision'))
882 882
883 883 def _matchvalue(r):
884 884 source = repo[r].extra().get('convert_revision', None)
885 885 return source is not None and (rev is None or source.startswith(rev))
886 886
887 887 return subset.filter(lambda r: _matchvalue(r))
888 888
889 889 def date(repo, subset, x):
890 890 """``date(interval)``
891 891 Changesets within the interval, see :hg:`help dates`.
892 892 """
893 893 # i18n: "date" is a keyword
894 894 ds = getstring(x, _("date requires a string"))
895 895 dm = util.matchdate(ds)
896 896 return subset.filter(lambda x: dm(repo[x].date()[0]))
897 897
898 898 def desc(repo, subset, x):
899 899 """``desc(string)``
900 900 Search commit message for string. The match is case-insensitive.
901 901 """
902 902 # i18n: "desc" is a keyword
903 903 ds = encoding.lower(getstring(x, _("desc requires a string")))
904 904
905 905 def matches(x):
906 906 c = repo[x]
907 907 return ds in encoding.lower(c.description())
908 908
909 909 return subset.filter(matches)
910 910
911 911 def _descendants(repo, subset, x, followfirst=False):
912 912 roots = getset(repo, fullreposet(repo), x)
913 913 if not roots:
914 914 return baseset()
915 915 s = _revdescendants(repo, roots, followfirst)
916 916
917 917 # Both sets need to be ascending in order to lazily return the union
918 918 # in the correct order.
919 919 base = subset & roots
920 920 desc = subset & s
921 921 result = base + desc
922 922 if subset.isascending():
923 923 result.sort()
924 924 elif subset.isdescending():
925 925 result.sort(reverse=True)
926 926 else:
927 927 result = subset & result
928 928 return result
929 929
930 930 def descendants(repo, subset, x):
931 931 """``descendants(set)``
932 932 Changesets which are descendants of changesets in set.
933 933 """
934 934 return _descendants(repo, subset, x)
935 935
936 936 def _firstdescendants(repo, subset, x):
937 937 # ``_firstdescendants(set)``
938 938 # Like ``descendants(set)`` but follows only the first parents.
939 939 return _descendants(repo, subset, x, followfirst=True)
940 940
941 941 def destination(repo, subset, x):
942 942 """``destination([set])``
943 943 Changesets that were created by a graft, transplant or rebase operation,
944 944 with the given revisions specified as the source. Omitting the optional set
945 945 is the same as passing all().
946 946 """
947 947 if x is not None:
948 948 sources = getset(repo, fullreposet(repo), x)
949 949 else:
950 950 sources = fullreposet(repo)
951 951
952 952 dests = set()
953 953
954 954 # subset contains all of the possible destinations that can be returned, so
955 955 # iterate over them and see if their source(s) were provided in the arg set.
956 956 # Even if the immediate src of r is not in the arg set, src's source (or
957 957 # further back) may be. Scanning back further than the immediate src allows
958 958 # transitive transplants and rebases to yield the same results as transitive
959 959 # grafts.
960 960 for r in subset:
961 961 src = _getrevsource(repo, r)
962 962 lineage = None
963 963
964 964 while src is not None:
965 965 if lineage is None:
966 966 lineage = list()
967 967
968 968 lineage.append(r)
969 969
970 970 # The visited lineage is a match if the current source is in the arg
971 971 # set. Since every candidate dest is visited by way of iterating
972 972 # subset, any dests further back in the lineage will be tested by a
973 973 # different iteration over subset. Likewise, if the src was already
974 974 # selected, the current lineage can be selected without going back
975 975 # further.
976 976 if src in sources or src in dests:
977 977 dests.update(lineage)
978 978 break
979 979
980 980 r = src
981 981 src = _getrevsource(repo, r)
982 982
983 983 return subset.filter(dests.__contains__)
984 984
985 985 def divergent(repo, subset, x):
986 986 """``divergent()``
987 987 Final successors of changesets with an alternative set of final successors.
988 988 """
989 989 # i18n: "divergent" is a keyword
990 990 getargs(x, 0, 0, _("divergent takes no arguments"))
991 991 divergent = obsmod.getrevs(repo, 'divergent')
992 992 return subset & divergent
993 993
994 994 def extinct(repo, subset, x):
995 995 """``extinct()``
996 996 Obsolete changesets with obsolete descendants only.
997 997 """
998 998 # i18n: "extinct" is a keyword
999 999 getargs(x, 0, 0, _("extinct takes no arguments"))
1000 1000 extincts = obsmod.getrevs(repo, 'extinct')
1001 1001 return subset & extincts
1002 1002
1003 1003 def extra(repo, subset, x):
1004 1004 """``extra(label, [value])``
1005 1005 Changesets with the given label in the extra metadata, with the given
1006 1006 optional value.
1007 1007
1008 1008 If `value` starts with `re:`, the remainder of the value is treated as
1009 1009 a regular expression. To match a value that actually starts with `re:`,
1010 1010 use the prefix `literal:`.
1011 1011 """
1012 1012 args = getargsdict(x, 'extra', 'label value')
1013 1013 if 'label' not in args:
1014 1014 # i18n: "extra" is a keyword
1015 1015 raise error.ParseError(_('extra takes at least 1 argument'))
1016 1016 # i18n: "extra" is a keyword
1017 1017 label = getstring(args['label'], _('first argument to extra must be '
1018 1018 'a string'))
1019 1019 value = None
1020 1020
1021 1021 if 'value' in args:
1022 1022 # i18n: "extra" is a keyword
1023 1023 value = getstring(args['value'], _('second argument to extra must be '
1024 1024 'a string'))
1025 1025 kind, value, matcher = util.stringmatcher(value)
1026 1026
1027 1027 def _matchvalue(r):
1028 1028 extra = repo[r].extra()
1029 1029 return label in extra and (value is None or matcher(extra[label]))
1030 1030
1031 1031 return subset.filter(lambda r: _matchvalue(r))
1032 1032
1033 1033 def filelog(repo, subset, x):
1034 1034 """``filelog(pattern)``
1035 1035 Changesets connected to the specified filelog.
1036 1036
1037 1037 For performance reasons, visits only revisions mentioned in the file-level
1038 1038 filelog, rather than filtering through all changesets (much faster, but
1039 1039 doesn't include deletes or duplicate changes). For a slower, more accurate
1040 1040 result, use ``file()``.
1041 1041
1042 1042 The pattern without explicit kind like ``glob:`` is expected to be
1043 1043 relative to the current directory and match against a file exactly
1044 1044 for efficiency.
1045 1045
1046 1046 If some linkrev points to revisions filtered by the current repoview, we'll
1047 1047 work around it to return a non-filtered value.
1048 1048 """
1049 1049
1050 1050 # i18n: "filelog" is a keyword
1051 1051 pat = getstring(x, _("filelog requires a pattern"))
1052 1052 s = set()
1053 1053 cl = repo.changelog
1054 1054
1055 1055 if not matchmod.patkind(pat):
1056 1056 f = pathutil.canonpath(repo.root, repo.getcwd(), pat)
1057 1057 files = [f]
1058 1058 else:
1059 1059 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=repo[None])
1060 1060 files = (f for f in repo[None] if m(f))
1061 1061
1062 1062 for f in files:
1063 1063 backrevref = {} # final value for: filerev -> changerev
1064 1064 lowestchild = {} # lowest known filerev child of a filerev
1065 1065 delayed = [] # filerev with filtered linkrev, for post-processing
1066 1066 lowesthead = None # cache for manifest content of all head revisions
1067 1067 fl = repo.file(f)
1068 1068 for fr in list(fl):
1069 1069 rev = fl.linkrev(fr)
1070 1070 if rev not in cl:
1071 1071 # changerev pointed in linkrev is filtered
1072 1072 # record it for post processing.
1073 1073 delayed.append((fr, rev))
1074 1074 continue
1075 1075 for p in fl.parentrevs(fr):
1076 1076 if 0 <= p and p not in lowestchild:
1077 1077 lowestchild[p] = fr
1078 1078 backrevref[fr] = rev
1079 1079 s.add(rev)
1080 1080
1081 1081 # Post-processing of all filerevs we skipped because they were
1082 1082 # filtered. If such filerevs have known and unfiltered children, this
1083 1083 # means they have an unfiltered appearance out there. We'll use linkrev
1084 1084 # adjustment to find one of these appearances. The lowest known child
1085 1085 # will be used as a starting point because it is the best upper-bound we
1086 1086 # have.
1087 1087 #
1088 1088 # This approach will fail when an unfiltered but linkrev-shadowed
1089 1089 # appearance exists in a head changeset without unfiltered filerev
1090 1090 # children anywhere.
1091 1091 while delayed:
1092 1092 # must be a descending iteration. To slowly fill lowest child
1093 1093 # information that is of potential use by the next item.
1094 1094 fr, rev = delayed.pop()
1095 1095 lkr = rev
1096 1096
1097 1097 child = lowestchild.get(fr)
1098 1098
1099 1099 if child is None:
1100 1100 # search for existence of this file revision in a head revision.
1101 1101 # There are three possibilities:
1102 1102 # - the revision exists in a head and we can find an
1103 1103 # introduction from there,
1104 1104 # - the revision does not exist in a head because it has been
1105 1105 # changed since its introduction: we would have found a child
1106 1106 # and be in the other 'else' clause,
1107 1107 # - all versions of the revision are hidden.
1108 1108 if lowesthead is None:
1109 1109 lowesthead = {}
1110 1110 for h in repo.heads():
1111 1111 fnode = repo[h].manifest().get(f)
1112 1112 if fnode is not None:
1113 1113 lowesthead[fl.rev(fnode)] = h
1114 1114 headrev = lowesthead.get(fr)
1115 1115 if headrev is None:
1116 1116 # content is nowhere unfiltered
1117 1117 continue
1118 1118 rev = repo[headrev][f].introrev()
1119 1119 else:
1120 1120 # the lowest known child is a good upper bound
1121 1121 childcrev = backrevref[child]
1122 1122 # XXX this does not guarantee returning the lowest
1123 1123 # introduction of this revision, but this gives a
1124 1124 # result which is a good start and will fit in most
1125 1125 # cases. We probably need to fix the multiple
1126 1126 # introductions case properly (report each
1127 1127 # introduction, even for identical file revisions)
1128 1128 # once and for all at some point anyway.
1129 1129 for p in repo[childcrev][f].parents():
1130 1130 if p.filerev() == fr:
1131 1131 rev = p.rev()
1132 1132 break
1133 1133 if rev == lkr: # no shadowed entry found
1134 1134 # XXX This should never happen unless some manifest points
1135 1135 # to biggish file revisions (like a revision that uses a
1136 1136 # parent that never appears in the manifest ancestors)
1137 1137 continue
1138 1138
1139 1139 # Fill the data for the next iteration.
1140 1140 for p in fl.parentrevs(fr):
1141 1141 if 0 <= p and p not in lowestchild:
1142 1142 lowestchild[p] = fr
1143 1143 backrevref[fr] = rev
1144 1144 s.add(rev)
1145 1145
1146 1146 return subset & s
1147 1147
1148 1148 def first(repo, subset, x):
1149 1149 """``first(set, [n])``
1150 1150 An alias for limit().
1151 1151 """
1152 1152 return limit(repo, subset, x)
1153 1153
1154 1154 def _follow(repo, subset, x, name, followfirst=False):
1155 1155 l = getargs(x, 0, 1, _("%s takes no arguments or a pattern") % name)
1156 1156 c = repo['.']
1157 1157 if l:
1158 1158 x = getstring(l[0], _("%s expected a pattern") % name)
1159 1159 matcher = matchmod.match(repo.root, repo.getcwd(), [x],
1160 1160 ctx=repo[None], default='path')
1161 1161
1162 1162 s = set()
1163 1163 for fname in c:
1164 1164 if matcher(fname):
1165 1165 fctx = c[fname]
1166 1166 s = s.union(set(c.rev() for c in fctx.ancestors(followfirst)))
1167 1167 # include the revision responsible for the most recent version
1168 1168 s.add(fctx.introrev())
1169 1169 else:
1170 1170 s = _revancestors(repo, baseset([c.rev()]), followfirst)
1171 1171
1172 1172 return subset & s
1173 1173
1174 1174 def follow(repo, subset, x):
1175 1175 """``follow([pattern])``
1176 1176 An alias for ``::.`` (ancestors of the working directory's first parent).
1177 1177 If pattern is specified, the histories of files matching given
1178 1178 pattern is followed, including copies.
1179 1179 """
1180 1180 return _follow(repo, subset, x, 'follow')
1181 1181
1182 1182 def _followfirst(repo, subset, x):
1183 1183 # ``followfirst([pattern])``
1184 1184 # Like ``follow([pattern])`` but follows only the first parent of
1185 1185 # every revisions or files revisions.
1186 1186 return _follow(repo, subset, x, '_followfirst', followfirst=True)
1187 1187
1188 1188 def getall(repo, subset, x):
1189 1189 """``all()``
1190 1190 All changesets, the same as ``0:tip``.
1191 1191 """
1192 1192 # i18n: "all" is a keyword
1193 1193 getargs(x, 0, 0, _("all takes no arguments"))
1194 1194 return subset & spanset(repo) # drop "null" if any
1195 1195
1196 1196 def grep(repo, subset, x):
1197 1197 """``grep(regex)``
1198 1198 Like ``keyword(string)`` but accepts a regex. Use ``grep(r'...')``
1199 1199 to ensure special escape characters are handled correctly. Unlike
1200 1200 ``keyword(string)``, the match is case-sensitive.
1201 1201 """
1202 1202 try:
1203 1203 # i18n: "grep" is a keyword
1204 1204 gr = re.compile(getstring(x, _("grep requires a string")))
1205 1205 except re.error as e:
1206 1206 raise error.ParseError(_('invalid match pattern: %s') % e)
1207 1207
1208 1208 def matches(x):
1209 1209 c = repo[x]
1210 1210 for e in c.files() + [c.user(), c.description()]:
1211 1211 if gr.search(e):
1212 1212 return True
1213 1213 return False
1214 1214
1215 1215 return subset.filter(matches)
1216 1216
1217 1217 def _matchfiles(repo, subset, x):
1218 1218 # _matchfiles takes a revset list of prefixed arguments:
1219 1219 #
1220 1220 # [p:foo, i:bar, x:baz]
1221 1221 #
1222 1222 # builds a match object from them and filters subset. Allowed
1223 1223 # prefixes are 'p:' for regular patterns, 'i:' for include
1224 1224 # patterns and 'x:' for exclude patterns. Use 'r:' prefix to pass
1225 1225 # a revision identifier, or the empty string to reference the
1226 1226 # working directory, from which the match object is
1227 1227 # initialized. Use 'd:' to set the default matching mode, default
1228 1228 # to 'glob'. At most one 'r:' and 'd:' argument can be passed.
1229 1229
1230 1230 # i18n: "_matchfiles" is a keyword
1231 1231 l = getargs(x, 1, -1, _("_matchfiles requires at least one argument"))
1232 1232 pats, inc, exc = [], [], []
1233 1233 rev, default = None, None
1234 1234 for arg in l:
1235 1235 # i18n: "_matchfiles" is a keyword
1236 1236 s = getstring(arg, _("_matchfiles requires string arguments"))
1237 1237 prefix, value = s[:2], s[2:]
1238 1238 if prefix == 'p:':
1239 1239 pats.append(value)
1240 1240 elif prefix == 'i:':
1241 1241 inc.append(value)
1242 1242 elif prefix == 'x:':
1243 1243 exc.append(value)
1244 1244 elif prefix == 'r:':
1245 1245 if rev is not None:
1246 1246 # i18n: "_matchfiles" is a keyword
1247 1247 raise error.ParseError(_('_matchfiles expected at most one '
1248 1248 'revision'))
1249 1249 if value != '': # empty means working directory; leave rev as None
1250 1250 rev = value
1251 1251 elif prefix == 'd:':
1252 1252 if default is not None:
1253 1253 # i18n: "_matchfiles" is a keyword
1254 1254 raise error.ParseError(_('_matchfiles expected at most one '
1255 1255 'default mode'))
1256 1256 default = value
1257 1257 else:
1258 1258 # i18n: "_matchfiles" is a keyword
1259 1259 raise error.ParseError(_('invalid _matchfiles prefix: %s') % prefix)
1260 1260 if not default:
1261 1261 default = 'glob'
1262 1262
1263 1263 m = matchmod.match(repo.root, repo.getcwd(), pats, include=inc,
1264 1264 exclude=exc, ctx=repo[rev], default=default)
1265 1265
1266 1266 def matches(x):
1267 1267 for f in repo[x].files():
1268 1268 if m(f):
1269 1269 return True
1270 1270 return False
1271 1271
1272 1272 return subset.filter(matches)
1273 1273
1274 1274 def hasfile(repo, subset, x):
1275 1275 """``file(pattern)``
1276 1276 Changesets affecting files matched by pattern.
1277 1277
1278 1278 For a faster but less accurate result, consider using ``filelog()``
1279 1279 instead.
1280 1280
1281 1281 This predicate uses ``glob:`` as the default kind of pattern.
1282 1282 """
1283 1283 # i18n: "file" is a keyword
1284 1284 pat = getstring(x, _("file requires a pattern"))
1285 1285 return _matchfiles(repo, subset, ('string', 'p:' + pat))
1286 1286
1287 1287 def head(repo, subset, x):
1288 1288 """``head()``
1289 1289 Changeset is a named branch head.
1290 1290 """
1291 1291 # i18n: "head" is a keyword
1292 1292 getargs(x, 0, 0, _("head takes no arguments"))
1293 1293 hs = set()
1294 1294 cl = repo.changelog
1295 1295 for b, ls in repo.branchmap().iteritems():
1296 1296 hs.update(cl.rev(h) for h in ls)
1297 1297 # XXX using a set to feed the baseset is wrong. Sets are not ordered.
1298 1298 # This does not break because of other fullreposet misbehavior.
1299 1299 # XXX We should combine with subset first: 'subset & baseset(...)'. This is
1300 1300 # necessary to ensure we preserve the order in subset.
1301 1301 return baseset(hs) & subset
1302 1302
1303 1303 def heads(repo, subset, x):
1304 1304 """``heads(set)``
1305 1305 Members of set with no children in set.
1306 1306 """
1307 1307 s = getset(repo, subset, x)
1308 1308 ps = parents(repo, subset, x)
1309 1309 return s - ps
1310 1310
1311 1311 def hidden(repo, subset, x):
1312 1312 """``hidden()``
1313 1313 Hidden changesets.
1314 1314 """
1315 1315 # i18n: "hidden" is a keyword
1316 1316 getargs(x, 0, 0, _("hidden takes no arguments"))
1317 1317 hiddenrevs = repoview.filterrevs(repo, 'visible')
1318 1318 return subset & hiddenrevs
1319 1319
1320 1320 def keyword(repo, subset, x):
1321 1321 """``keyword(string)``
1322 1322 Search commit message, user name, and names of changed files for
1323 1323 string. The match is case-insensitive.
1324 1324 """
1325 1325 # i18n: "keyword" is a keyword
1326 1326 kw = encoding.lower(getstring(x, _("keyword requires a string")))
1327 1327
1328 1328 def matches(r):
1329 1329 c = repo[r]
1330 1330 return any(kw in encoding.lower(t)
1331 1331 for t in c.files() + [c.user(), c.description()])
1332 1332
1333 1333 return subset.filter(matches)
1334 1334
1335 1335 def limit(repo, subset, x):
1336 1336 """``limit(set, [n])``
1337 1337 First n members of set, defaulting to 1.
1338 1338 """
1339 1339 # i18n: "limit" is a keyword
1340 1340 l = getargs(x, 1, 2, _("limit requires one or two arguments"))
1341 1341 try:
1342 1342 lim = 1
1343 1343 if len(l) == 2:
1344 1344 # i18n: "limit" is a keyword
1345 1345 lim = int(getstring(l[1], _("limit requires a number")))
1346 1346 except (TypeError, ValueError):
1347 1347 # i18n: "limit" is a keyword
1348 1348 raise error.ParseError(_("limit expects a number"))
1349 1349 ss = subset
1350 1350 os = getset(repo, fullreposet(repo), l[0])
1351 1351 result = []
1352 1352 it = iter(os)
1353 1353 for x in xrange(lim):
1354 1354 y = next(it, None)
1355 1355 if y is None:
1356 1356 break
1357 1357 elif y in ss:
1358 1358 result.append(y)
1359 1359 return baseset(result)
1360 1360
1361 1361 def last(repo, subset, x):
1362 1362 """``last(set, [n])``
1363 1363 Last n members of set, defaulting to 1.
1364 1364 """
1365 1365 # i18n: "last" is a keyword
1366 1366 l = getargs(x, 1, 2, _("last requires one or two arguments"))
1367 1367 try:
1368 1368 lim = 1
1369 1369 if len(l) == 2:
1370 1370 # i18n: "last" is a keyword
1371 1371 lim = int(getstring(l[1], _("last requires a number")))
1372 1372 except (TypeError, ValueError):
1373 1373 # i18n: "last" is a keyword
1374 1374 raise error.ParseError(_("last expects a number"))
1375 1375 ss = subset
1376 1376 os = getset(repo, fullreposet(repo), l[0])
1377 1377 os.reverse()
1378 1378 result = []
1379 1379 it = iter(os)
1380 1380 for x in xrange(lim):
1381 1381 y = next(it, None)
1382 1382 if y is None:
1383 1383 break
1384 1384 elif y in ss:
1385 1385 result.append(y)
1386 1386 return baseset(result)
1387 1387
1388 1388 def maxrev(repo, subset, x):
1389 1389 """``max(set)``
1390 1390 Changeset with highest revision number in set.
1391 1391 """
1392 1392 os = getset(repo, fullreposet(repo), x)
1393 1393 try:
1394 1394 m = os.max()
1395 1395 if m in subset:
1396 1396 return baseset([m])
1397 1397 except ValueError:
1398 1398 # os.max() throws a ValueError when the collection is empty.
1399 1399 # Same as python's max().
1400 1400 pass
1401 1401 return baseset()
1402 1402
1403 1403 def merge(repo, subset, x):
1404 1404 """``merge()``
1405 1405 Changeset is a merge changeset.
1406 1406 """
1407 1407 # i18n: "merge" is a keyword
1408 1408 getargs(x, 0, 0, _("merge takes no arguments"))
1409 1409 cl = repo.changelog
1410 1410 return subset.filter(lambda r: cl.parentrevs(r)[1] != -1)
1411 1411
1412 1412 def branchpoint(repo, subset, x):
1413 1413 """``branchpoint()``
1414 1414 Changesets with more than one child.
1415 1415 """
1416 1416 # i18n: "branchpoint" is a keyword
1417 1417 getargs(x, 0, 0, _("branchpoint takes no arguments"))
1418 1418 cl = repo.changelog
1419 1419 if not subset:
1420 1420 return baseset()
1421 1421 # XXX this should be 'parentset.min()' assuming 'parentset' is a smartset
1422 1422 # (and if it is not, it should.)
1423 1423 baserev = min(subset)
1424 1424 parentscount = [0]*(len(repo) - baserev)
1425 1425 for r in cl.revs(start=baserev + 1):
1426 1426 for p in cl.parentrevs(r):
1427 1427 if p >= baserev:
1428 1428 parentscount[p - baserev] += 1
1429 1429 return subset.filter(lambda r: parentscount[r - baserev] > 1)
1430 1430
1431 1431 def minrev(repo, subset, x):
1432 1432 """``min(set)``
1433 1433 Changeset with lowest revision number in set.
1434 1434 """
1435 1435 os = getset(repo, fullreposet(repo), x)
1436 1436 try:
1437 1437 m = os.min()
1438 1438 if m in subset:
1439 1439 return baseset([m])
1440 1440 except ValueError:
1441 1441 # os.min() throws a ValueError when the collection is empty.
1442 1442 # Same as python's min().
1443 1443 pass
1444 1444 return baseset()
1445 1445
1446 1446 def modifies(repo, subset, x):
1447 1447 """``modifies(pattern)``
1448 1448 Changesets modifying files matched by pattern.
1449 1449
1450 1450 The pattern without explicit kind like ``glob:`` is expected to be
1451 1451 relative to the current directory and match against a file or a
1452 1452 directory.
1453 1453 """
1454 1454 # i18n: "modifies" is a keyword
1455 1455 pat = getstring(x, _("modifies requires a pattern"))
1456 1456 return checkstatus(repo, subset, pat, 0)
1457 1457
1458 1458 def named(repo, subset, x):
1459 1459 """``named(namespace)``
1460 1460 The changesets in a given namespace.
1461 1461
1462 1462 If `namespace` starts with `re:`, the remainder of the string is treated as
1463 1463 a regular expression. To match a namespace that actually starts with `re:`,
1464 1464 use the prefix `literal:`.
1465 1465 """
1466 1466 # i18n: "named" is a keyword
1467 1467 args = getargs(x, 1, 1, _('named requires a namespace argument'))
1468 1468
1469 1469 ns = getstring(args[0],
1470 1470 # i18n: "named" is a keyword
1471 1471 _('the argument to named must be a string'))
1472 1472 kind, pattern, matcher = util.stringmatcher(ns)
1473 1473 namespaces = set()
1474 1474 if kind == 'literal':
1475 1475 if pattern not in repo.names:
1476 1476 raise error.RepoLookupError(_("namespace '%s' does not exist")
1477 1477 % ns)
1478 1478 namespaces.add(repo.names[pattern])
1479 1479 else:
1480 1480 for name, ns in repo.names.iteritems():
1481 1481 if matcher(name):
1482 1482 namespaces.add(ns)
1483 1483 if not namespaces:
1484 1484 raise error.RepoLookupError(_("no namespace exists"
1485 1485 " that match '%s'") % pattern)
1486 1486
1487 1487 names = set()
1488 1488 for ns in namespaces:
1489 1489 for name in ns.listnames(repo):
1490 1490 if name not in ns.deprecated:
1491 1491 names.update(repo[n].rev() for n in ns.nodes(repo, name))
1492 1492
1493 1493 names -= set([node.nullrev])
1494 1494 return subset & names
1495 1495
1496 1496 def node_(repo, subset, x):
1497 1497 """``id(string)``
1498 1498 Revision non-ambiguously specified by the given hex string prefix.
1499 1499 """
1500 1500 # i18n: "id" is a keyword
1501 1501 l = getargs(x, 1, 1, _("id requires one argument"))
1502 1502 # i18n: "id" is a keyword
1503 1503 n = getstring(l[0], _("id requires a string"))
1504 1504 if len(n) == 40:
1505 1505 try:
1506 1506 rn = repo.changelog.rev(node.bin(n))
1507 1507 except (LookupError, TypeError):
1508 1508 rn = None
1509 1509 else:
1510 1510 rn = None
1511 1511 pm = repo.changelog._partialmatch(n)
1512 1512 if pm is not None:
1513 1513 rn = repo.changelog.rev(pm)
1514 1514
1515 1515 if rn is None:
1516 1516 return baseset()
1517 1517 result = baseset([rn])
1518 1518 return result & subset
1519 1519
1520 1520 def obsolete(repo, subset, x):
1521 1521 """``obsolete()``
1522 1522 Mutable changeset with a newer version."""
1523 1523 # i18n: "obsolete" is a keyword
1524 1524 getargs(x, 0, 0, _("obsolete takes no arguments"))
1525 1525 obsoletes = obsmod.getrevs(repo, 'obsolete')
1526 1526 return subset & obsoletes
1527 1527
1528 1528 def only(repo, subset, x):
1529 1529 """``only(set, [set])``
1530 1530 Changesets that are ancestors of the first set that are not ancestors
1531 1531 of any other head in the repo. If a second set is specified, the result
1532 1532 is ancestors of the first set that are not ancestors of the second set
1533 1533 (i.e. ::<set1> - ::<set2>).
1534 1534 """
1535 1535 cl = repo.changelog
1536 1536 # i18n: "only" is a keyword
1537 1537 args = getargs(x, 1, 2, _('only takes one or two arguments'))
1538 1538 include = getset(repo, fullreposet(repo), args[0])
1539 1539 if len(args) == 1:
1540 1540 if not include:
1541 1541 return baseset()
1542 1542
1543 1543 descendants = set(_revdescendants(repo, include, False))
1544 1544 exclude = [rev for rev in cl.headrevs()
1545 1545 if not rev in descendants and not rev in include]
1546 1546 else:
1547 1547 exclude = getset(repo, fullreposet(repo), args[1])
1548 1548
1549 1549 results = set(cl.findmissingrevs(common=exclude, heads=include))
1550 1550 # XXX we should turn this into a baseset instead of a set, smartset may do
1551 1551 # some optimisations from the fact this is a baseset.
1552 1552 return subset & results
1553 1553
1554 1554 def origin(repo, subset, x):
1555 1555 """``origin([set])``
1556 1556 Changesets that were specified as a source for the grafts, transplants or
1557 1557 rebases that created the given revisions. Omitting the optional set is the
1558 1558 same as passing all(). If a changeset created by these operations is itself
1559 1559 specified as a source for one of these operations, only the source changeset
1560 1560 for the first operation is selected.
1561 1561 """
1562 1562 if x is not None:
1563 1563 dests = getset(repo, fullreposet(repo), x)
1564 1564 else:
1565 1565 dests = fullreposet(repo)
1566 1566
1567 1567 def _firstsrc(rev):
1568 1568 src = _getrevsource(repo, rev)
1569 1569 if src is None:
1570 1570 return None
1571 1571
1572 1572 while True:
1573 1573 prev = _getrevsource(repo, src)
1574 1574
1575 1575 if prev is None:
1576 1576 return src
1577 1577 src = prev
1578 1578
1579 1579 o = set([_firstsrc(r) for r in dests])
1580 1580 o -= set([None])
1581 1581 # XXX we should turn this into a baseset instead of a set, smartset may do
1582 1582 # some optimisations from the fact this is a baseset.
1583 1583 return subset & o
1584 1584
1585 1585 def outgoing(repo, subset, x):
1586 1586 """``outgoing([path])``
1587 1587 Changesets not found in the specified destination repository, or the
1588 1588 default push location.
1589 1589 """
1590 1590 # Avoid cycles.
1591 1591 from . import (
1592 1592 discovery,
1593 1593 hg,
1594 1594 )
1595 1595 # i18n: "outgoing" is a keyword
1596 1596 l = getargs(x, 0, 1, _("outgoing takes one or no arguments"))
1597 1597 # i18n: "outgoing" is a keyword
1598 1598 dest = l and getstring(l[0], _("outgoing requires a repository path")) or ''
1599 1599 dest = repo.ui.expandpath(dest or 'default-push', dest or 'default')
1600 1600 dest, branches = hg.parseurl(dest)
1601 1601 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1602 1602 if revs:
1603 1603 revs = [repo.lookup(rev) for rev in revs]
1604 1604 other = hg.peer(repo, {}, dest)
1605 1605 repo.ui.pushbuffer()
1606 1606 outgoing = discovery.findcommonoutgoing(repo, other, onlyheads=revs)
1607 1607 repo.ui.popbuffer()
1608 1608 cl = repo.changelog
1609 1609 o = set([cl.rev(r) for r in outgoing.missing])
1610 1610 return subset & o
1611 1611
1612 1612 def p1(repo, subset, x):
1613 1613 """``p1([set])``
1614 1614 First parent of changesets in set, or the working directory.
1615 1615 """
1616 1616 if x is None:
1617 1617 p = repo[x].p1().rev()
1618 1618 if p >= 0:
1619 1619 return subset & baseset([p])
1620 1620 return baseset()
1621 1621
1622 1622 ps = set()
1623 1623 cl = repo.changelog
1624 1624 for r in getset(repo, fullreposet(repo), x):
1625 1625 ps.add(cl.parentrevs(r)[0])
1626 1626 ps -= set([node.nullrev])
1627 1627 # XXX we should turn this into a baseset instead of a set, smartset may do
1628 1628 # some optimisations from the fact this is a baseset.
1629 1629 return subset & ps
1630 1630
1631 1631 def p2(repo, subset, x):
1632 1632 """``p2([set])``
1633 1633 Second parent of changesets in set, or the working directory.
1634 1634 """
1635 1635 if x is None:
1636 1636 ps = repo[x].parents()
1637 1637 try:
1638 1638 p = ps[1].rev()
1639 1639 if p >= 0:
1640 1640 return subset & baseset([p])
1641 1641 return baseset()
1642 1642 except IndexError:
1643 1643 return baseset()
1644 1644
1645 1645 ps = set()
1646 1646 cl = repo.changelog
1647 1647 for r in getset(repo, fullreposet(repo), x):
1648 1648 ps.add(cl.parentrevs(r)[1])
1649 1649 ps -= set([node.nullrev])
1650 1650 # XXX we should turn this into a baseset instead of a set, smartset may do
1651 1651 # some optimisations from the fact this is a baseset.
1652 1652 return subset & ps
1653 1653
1654 1654 def parents(repo, subset, x):
1655 1655 """``parents([set])``
1656 1656 The set of all parents for all changesets in set, or the working directory.
1657 1657 """
1658 1658 if x is None:
1659 1659 ps = set(p.rev() for p in repo[x].parents())
1660 1660 else:
1661 1661 ps = set()
1662 1662 cl = repo.changelog
1663 1663 up = ps.update
1664 1664 parentrevs = cl.parentrevs
1665 1665 for r in getset(repo, fullreposet(repo), x):
1666 1666 if r == node.wdirrev:
1667 1667 up(p.rev() for p in repo[r].parents())
1668 1668 else:
1669 1669 up(parentrevs(r))
1670 1670 ps -= set([node.nullrev])
1671 1671 return subset & ps
1672 1672
1673 1673 def _phase(repo, subset, target):
1674 1674 """helper to select all rev in phase <target>"""
1675 1675 repo._phasecache.loadphaserevs(repo) # ensure phase's sets are loaded
1676 1676 if repo._phasecache._phasesets:
1677 1677 s = repo._phasecache._phasesets[target] - repo.changelog.filteredrevs
1678 1678 s = baseset(s)
1679 1679 s.sort() # set are non ordered, so we enforce ascending
1680 1680 return subset & s
1681 1681 else:
1682 1682 phase = repo._phasecache.phase
1683 1683 condition = lambda r: phase(repo, r) == target
1684 1684 return subset.filter(condition, cache=False)
1685 1685
1686 1686 def draft(repo, subset, x):
1687 1687 """``draft()``
1688 1688 Changeset in draft phase."""
1689 1689 # i18n: "draft" is a keyword
1690 1690 getargs(x, 0, 0, _("draft takes no arguments"))
1691 1691 target = phases.draft
1692 1692 return _phase(repo, subset, target)
1693 1693
1694 1694 def secret(repo, subset, x):
1695 1695 """``secret()``
1696 1696 Changeset in secret phase."""
1697 1697 # i18n: "secret" is a keyword
1698 1698 getargs(x, 0, 0, _("secret takes no arguments"))
1699 1699 target = phases.secret
1700 1700 return _phase(repo, subset, target)
1701 1701
1702 1702 def parentspec(repo, subset, x, n):
1703 1703 """``set^0``
1704 1704 The set.
1705 1705 ``set^1`` (or ``set^``), ``set^2``
1706 1706 First or second parent, respectively, of all changesets in set.
1707 1707 """
1708 1708 try:
1709 1709 n = int(n[1])
1710 1710 if n not in (0, 1, 2):
1711 1711 raise ValueError
1712 1712 except (TypeError, ValueError):
1713 1713 raise error.ParseError(_("^ expects a number 0, 1, or 2"))
1714 1714 ps = set()
1715 1715 cl = repo.changelog
1716 1716 for r in getset(repo, fullreposet(repo), x):
1717 1717 if n == 0:
1718 1718 ps.add(r)
1719 1719 elif n == 1:
1720 1720 ps.add(cl.parentrevs(r)[0])
1721 1721 elif n == 2:
1722 1722 parents = cl.parentrevs(r)
1723 1723 if len(parents) > 1:
1724 1724 ps.add(parents[1])
1725 1725 return subset & ps
1726 1726
1727 1727 def present(repo, subset, x):
1728 1728 """``present(set)``
1729 1729 An empty set, if any revision in set isn't found; otherwise,
1730 1730 all revisions in set.
1731 1731
1732 1732 If any of specified revisions is not present in the local repository,
1733 1733 the query is normally aborted. But this predicate allows the query
1734 1734 to continue even in such cases.
1735 1735 """
1736 1736 try:
1737 1737 return getset(repo, subset, x)
1738 1738 except error.RepoLookupError:
1739 1739 return baseset()
1740 1740
1741 1741 # for internal use
1742 1742 def _notpublic(repo, subset, x):
1743 1743 getargs(x, 0, 0, "_notpublic takes no arguments")
1744 1744 repo._phasecache.loadphaserevs(repo) # ensure phase's sets are loaded
1745 1745 if repo._phasecache._phasesets:
1746 1746 s = set()
1747 1747 for u in repo._phasecache._phasesets[1:]:
1748 1748 s.update(u)
1749 1749 s = baseset(s - repo.changelog.filteredrevs)
1750 1750 s.sort()
1751 1751 return subset & s
1752 1752 else:
1753 1753 phase = repo._phasecache.phase
1754 1754 target = phases.public
1755 1755 condition = lambda r: phase(repo, r) != target
1756 1756 return subset.filter(condition, cache=False)
1757 1757
1758 1758 def public(repo, subset, x):
1759 1759 """``public()``
1760 1760 Changeset in public phase."""
1761 1761 # i18n: "public" is a keyword
1762 1762 getargs(x, 0, 0, _("public takes no arguments"))
1763 1763 phase = repo._phasecache.phase
1764 1764 target = phases.public
1765 1765 condition = lambda r: phase(repo, r) == target
1766 1766 return subset.filter(condition, cache=False)
1767 1767
1768 1768 def remote(repo, subset, x):
1769 1769 """``remote([id [,path]])``
1770 1770 Local revision that corresponds to the given identifier in a
1771 1771 remote repository, if present. Here, the '.' identifier is a
1772 1772 synonym for the current local branch.
1773 1773 """
1774 1774
1775 1775 from . import hg # avoid start-up nasties
1776 1776 # i18n: "remote" is a keyword
1777 1777 l = getargs(x, 0, 2, _("remote takes one, two or no arguments"))
1778 1778
1779 1779 q = '.'
1780 1780 if len(l) > 0:
1781 1781 # i18n: "remote" is a keyword
1782 1782 q = getstring(l[0], _("remote requires a string id"))
1783 1783 if q == '.':
1784 1784 q = repo['.'].branch()
1785 1785
1786 1786 dest = ''
1787 1787 if len(l) > 1:
1788 1788 # i18n: "remote" is a keyword
1789 1789 dest = getstring(l[1], _("remote requires a repository path"))
1790 1790 dest = repo.ui.expandpath(dest or 'default')
1791 1791 dest, branches = hg.parseurl(dest)
1792 1792 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1793 1793 if revs:
1794 1794 revs = [repo.lookup(rev) for rev in revs]
1795 1795 other = hg.peer(repo, {}, dest)
1796 1796 n = other.lookup(q)
1797 1797 if n in repo:
1798 1798 r = repo[n].rev()
1799 1799 if r in subset:
1800 1800 return baseset([r])
1801 1801 return baseset()
1802 1802
1803 1803 def removes(repo, subset, x):
1804 1804 """``removes(pattern)``
1805 1805 Changesets which remove files matching pattern.
1806 1806
1807 1807 The pattern without explicit kind like ``glob:`` is expected to be
1808 1808 relative to the current directory and match against a file or a
1809 1809 directory.
1810 1810 """
1811 1811 # i18n: "removes" is a keyword
1812 1812 pat = getstring(x, _("removes requires a pattern"))
1813 1813 return checkstatus(repo, subset, pat, 2)
1814 1814
1815 1815 def rev(repo, subset, x):
1816 1816 """``rev(number)``
1817 1817 Revision with the given numeric identifier.
1818 1818 """
1819 1819 # i18n: "rev" is a keyword
1820 1820 l = getargs(x, 1, 1, _("rev requires one argument"))
1821 1821 try:
1822 1822 # i18n: "rev" is a keyword
1823 1823 l = int(getstring(l[0], _("rev requires a number")))
1824 1824 except (TypeError, ValueError):
1825 1825 # i18n: "rev" is a keyword
1826 1826 raise error.ParseError(_("rev expects a number"))
1827 1827 if l not in repo.changelog and l != node.nullrev:
1828 1828 return baseset()
1829 1829 return subset & baseset([l])
1830 1830
1831 1831 def matching(repo, subset, x):
1832 1832 """``matching(revision [, field])``
1833 1833 Changesets in which a given set of fields match the set of fields in the
1834 1834 selected revision or set.
1835 1835
1836 1836 To match more than one field pass the list of fields to match separated
1837 1837 by spaces (e.g. ``author description``).
1838 1838
1839 1839 Valid fields are most regular revision fields and some special fields.
1840 1840
1841 1841 Regular revision fields are ``description``, ``author``, ``branch``,
1842 1842 ``date``, ``files``, ``phase``, ``parents``, ``substate``, ``user``
1843 1843 and ``diff``.
1844 1844 Note that ``author`` and ``user`` are synonyms. ``diff`` refers to the
1845 1845 contents of the revision. Two revisions matching their ``diff`` will
1846 1846 also match their ``files``.
1847 1847
1848 1848 Special fields are ``summary`` and ``metadata``:
1849 1849 ``summary`` matches the first line of the description.
1850 1850 ``metadata`` is equivalent to matching ``description user date``
1851 1851 (i.e. it matches the main metadata fields).
1852 1852
1853 1853 ``metadata`` is the default field which is used when no fields are
1854 1854 specified. You can match more than one field at a time.
1855 1855 """
1856 1856 # i18n: "matching" is a keyword
1857 1857 l = getargs(x, 1, 2, _("matching takes 1 or 2 arguments"))
1858 1858
1859 1859 revs = getset(repo, fullreposet(repo), l[0])
1860 1860
1861 1861 fieldlist = ['metadata']
1862 1862 if len(l) > 1:
1863 1863 fieldlist = getstring(l[1],
1864 1864 # i18n: "matching" is a keyword
1865 1865 _("matching requires a string "
1866 1866 "as its second argument")).split()
1867 1867
1868 1868 # Make sure that there are no repeated fields,
1869 1869 # expand the 'special' 'metadata' field type
1870 1870 # and check the 'files' whenever we check the 'diff'
1871 1871 fields = []
1872 1872 for field in fieldlist:
1873 1873 if field == 'metadata':
1874 1874 fields += ['user', 'description', 'date']
1875 1875 elif field == 'diff':
1876 1876 # a revision matching the diff must also match the files
1877 1877 # since matching the diff is very costly, make sure to
1878 1878 # also match the files first
1879 1879 fields += ['files', 'diff']
1880 1880 else:
1881 1881 if field == 'author':
1882 1882 field = 'user'
1883 1883 fields.append(field)
1884 1884 fields = set(fields)
1885 1885 if 'summary' in fields and 'description' in fields:
1886 1886 # If a revision matches its description it also matches its summary
1887 1887 fields.discard('summary')
1888 1888
1889 1889 # We may want to match more than one field
1890 1890 # Not all fields take the same amount of time to be matched
1891 1891 # Sort the selected fields in order of increasing matching cost
1892 1892 fieldorder = ['phase', 'parents', 'user', 'date', 'branch', 'summary',
1893 1893 'files', 'description', 'substate', 'diff']
1894 1894 def fieldkeyfunc(f):
1895 1895 try:
1896 1896 return fieldorder.index(f)
1897 1897 except ValueError:
1898 1898 # assume an unknown field is very costly
1899 1899 return len(fieldorder)
1900 1900 fields = list(fields)
1901 1901 fields.sort(key=fieldkeyfunc)
1902 1902
1903 1903 # Each field will be matched with its own "getfield" function
1904 1904 # which will be added to the getfieldfuncs array of functions
1905 1905 getfieldfuncs = []
1906 1906 _funcs = {
1907 1907 'user': lambda r: repo[r].user(),
1908 1908 'branch': lambda r: repo[r].branch(),
1909 1909 'date': lambda r: repo[r].date(),
1910 1910 'description': lambda r: repo[r].description(),
1911 1911 'files': lambda r: repo[r].files(),
1912 1912 'parents': lambda r: repo[r].parents(),
1913 1913 'phase': lambda r: repo[r].phase(),
1914 1914 'substate': lambda r: repo[r].substate,
1915 1915 'summary': lambda r: repo[r].description().splitlines()[0],
1916 1916 'diff': lambda r: list(repo[r].diff(git=True),)
1917 1917 }
1918 1918 for info in fields:
1919 1919 getfield = _funcs.get(info, None)
1920 1920 if getfield is None:
1921 1921 raise error.ParseError(
1922 1922 # i18n: "matching" is a keyword
1923 1923 _("unexpected field name passed to matching: %s") % info)
1924 1924 getfieldfuncs.append(getfield)
1925 1925 # convert the getfield array of functions into a "getinfo" function
1926 1926 # which returns an array of field values (or a single value if there
1927 1927 # is only one field to match)
1928 1928 getinfo = lambda r: [f(r) for f in getfieldfuncs]
1929 1929
1930 1930 def matches(x):
1931 1931 for rev in revs:
1932 1932 target = getinfo(rev)
1933 1933 match = True
1934 1934 for n, f in enumerate(getfieldfuncs):
1935 1935 if target[n] != f(x):
1936 1936 match = False
1937 1937 if match:
1938 1938 return True
1939 1939 return False
1940 1940
1941 1941 return subset.filter(matches)
1942 1942
1943 1943 def reverse(repo, subset, x):
1944 1944 """``reverse(set)``
1945 1945 Reverse order of set.
1946 1946 """
1947 1947 l = getset(repo, subset, x)
1948 1948 l.reverse()
1949 1949 return l
1950 1950
1951 1951 def roots(repo, subset, x):
1952 1952 """``roots(set)``
1953 1953 Changesets in set with no parent changeset in set.
1954 1954 """
1955 1955 s = getset(repo, fullreposet(repo), x)
1956 1956 parents = repo.changelog.parentrevs
1957 1957 def filter(r):
1958 1958 for p in parents(r):
1959 1959 if 0 <= p and p in s:
1960 1960 return False
1961 1961 return True
1962 1962 return subset & s.filter(filter)
1963 1963
1964 1964 def sort(repo, subset, x):
1965 1965 """``sort(set[, [-]key...])``
1966 1966 Sort set by keys. The default sort order is ascending, specify a key
1967 1967 as ``-key`` to sort in descending order.
1968 1968
1969 1969 The keys can be:
1970 1970
1971 1971 - ``rev`` for the revision number,
1972 1972 - ``branch`` for the branch name,
1973 1973 - ``desc`` for the commit message (description),
1974 1974 - ``user`` for user name (``author`` can be used as an alias),
1975 1975 - ``date`` for the commit date
1976 1976 """
1977 1977 # i18n: "sort" is a keyword
1978 1978 l = getargs(x, 1, 2, _("sort requires one or two arguments"))
1979 1979 keys = "rev"
1980 1980 if len(l) == 2:
1981 1981 # i18n: "sort" is a keyword
1982 1982 keys = getstring(l[1], _("sort spec must be a string"))
1983 1983
1984 1984 s = l[0]
1985 1985 keys = keys.split()
1986 1986 l = []
1987 1987 def invert(s):
1988 1988 return "".join(chr(255 - ord(c)) for c in s)
1989 1989 revs = getset(repo, subset, s)
1990 1990 if keys == ["rev"]:
1991 1991 revs.sort()
1992 1992 return revs
1993 1993 elif keys == ["-rev"]:
1994 1994 revs.sort(reverse=True)
1995 1995 return revs
1996 1996 for r in revs:
1997 1997 c = repo[r]
1998 1998 e = []
1999 1999 for k in keys:
2000 2000 if k == 'rev':
2001 2001 e.append(r)
2002 2002 elif k == '-rev':
2003 2003 e.append(-r)
2004 2004 elif k == 'branch':
2005 2005 e.append(c.branch())
2006 2006 elif k == '-branch':
2007 2007 e.append(invert(c.branch()))
2008 2008 elif k == 'desc':
2009 2009 e.append(c.description())
2010 2010 elif k == '-desc':
2011 2011 e.append(invert(c.description()))
2012 2012 elif k in 'user author':
2013 2013 e.append(c.user())
2014 2014 elif k in '-user -author':
2015 2015 e.append(invert(c.user()))
2016 2016 elif k == 'date':
2017 2017 e.append(c.date()[0])
2018 2018 elif k == '-date':
2019 2019 e.append(-c.date()[0])
2020 2020 else:
2021 2021 raise error.ParseError(_("unknown sort key %r") % k)
2022 2022 e.append(r)
2023 2023 l.append(e)
2024 2024 l.sort()
2025 2025 return baseset([e[-1] for e in l])
2026 2026
2027 2027 def subrepo(repo, subset, x):
2028 2028 """``subrepo([pattern])``
2029 2029 Changesets that add, modify or remove the given subrepo. If no subrepo
2030 2030 pattern is named, any subrepo changes are returned.
2031 2031 """
2032 2032 # i18n: "subrepo" is a keyword
2033 2033 args = getargs(x, 0, 1, _('subrepo takes at most one argument'))
2034 2034 if len(args) != 0:
2035 2035 pat = getstring(args[0], _("subrepo requires a pattern"))
2036 2036
2037 2037 m = matchmod.exact(repo.root, repo.root, ['.hgsubstate'])
2038 2038
2039 2039 def submatches(names):
2040 2040 k, p, m = util.stringmatcher(pat)
2041 2041 for name in names:
2042 2042 if m(name):
2043 2043 yield name
2044 2044
2045 2045 def matches(x):
2046 2046 c = repo[x]
2047 2047 s = repo.status(c.p1().node(), c.node(), match=m)
2048 2048
2049 2049 if len(args) == 0:
2050 2050 return s.added or s.modified or s.removed
2051 2051
2052 2052 if s.added:
2053 2053 return any(submatches(c.substate.keys()))
2054 2054
2055 2055 if s.modified:
2056 2056 subs = set(c.p1().substate.keys())
2057 2057 subs.update(c.substate.keys())
2058 2058
2059 2059 for path in submatches(subs):
2060 2060 if c.p1().substate.get(path) != c.substate.get(path):
2061 2061 return True
2062 2062
2063 2063 if s.removed:
2064 2064 return any(submatches(c.p1().substate.keys()))
2065 2065
2066 2066 return False
2067 2067
2068 2068 return subset.filter(matches)
2069 2069
2070 2070 def _substringmatcher(pattern):
2071 2071 kind, pattern, matcher = util.stringmatcher(pattern)
2072 2072 if kind == 'literal':
2073 2073 matcher = lambda s: pattern in s
2074 2074 return kind, pattern, matcher
2075 2075
2076 2076 def tag(repo, subset, x):
2077 2077 """``tag([name])``
2078 2078 The specified tag by name, or all tagged revisions if no name is given.
2079 2079
2080 2080 If `name` starts with `re:`, the remainder of the name is treated as
2081 2081 a regular expression. To match a tag that actually starts with `re:`,
2082 2082 use the prefix `literal:`.
2083 2083 """
2084 2084 # i18n: "tag" is a keyword
2085 2085 args = getargs(x, 0, 1, _("tag takes one or no arguments"))
2086 2086 cl = repo.changelog
2087 2087 if args:
2088 2088 pattern = getstring(args[0],
2089 2089 # i18n: "tag" is a keyword
2090 2090 _('the argument to tag must be a string'))
2091 2091 kind, pattern, matcher = util.stringmatcher(pattern)
2092 2092 if kind == 'literal':
2093 2093 # avoid resolving all tags
2094 2094 tn = repo._tagscache.tags.get(pattern, None)
2095 2095 if tn is None:
2096 2096 raise error.RepoLookupError(_("tag '%s' does not exist")
2097 2097 % pattern)
2098 2098 s = set([repo[tn].rev()])
2099 2099 else:
2100 2100 s = set([cl.rev(n) for t, n in repo.tagslist() if matcher(t)])
2101 2101 else:
2102 2102 s = set([cl.rev(n) for t, n in repo.tagslist() if t != 'tip'])
2103 2103 return subset & s
2104 2104
2105 2105 def tagged(repo, subset, x):
2106 2106 return tag(repo, subset, x)
2107 2107
2108 2108 def unstable(repo, subset, x):
2109 2109 """``unstable()``
2110 2110 Non-obsolete changesets with obsolete ancestors.
2111 2111 """
2112 2112 # i18n: "unstable" is a keyword
2113 2113 getargs(x, 0, 0, _("unstable takes no arguments"))
2114 2114 unstables = obsmod.getrevs(repo, 'unstable')
2115 2115 return subset & unstables
2116 2116
2117 2117
2118 2118 def user(repo, subset, x):
2119 2119 """``user(string)``
2120 2120 User name contains string. The match is case-insensitive.
2121 2121
2122 2122 If `string` starts with `re:`, the remainder of the string is treated as
2123 2123 a regular expression. To match a user that actually contains `re:`, use
2124 2124 the prefix `literal:`.
2125 2125 """
2126 2126 return author(repo, subset, x)
2127 2127
2128 2128 # experimental
2129 2129 def wdir(repo, subset, x):
2130 2130 # i18n: "wdir" is a keyword
2131 2131 getargs(x, 0, 0, _("wdir takes no arguments"))
2132 2132 if node.wdirrev in subset or isinstance(subset, fullreposet):
2133 2133 return baseset([node.wdirrev])
2134 2134 return baseset()
2135 2135
2136 2136 # for internal use
2137 2137 def _list(repo, subset, x):
2138 2138 s = getstring(x, "internal error")
2139 2139 if not s:
2140 2140 return baseset()
2141 2141 # remove duplicates here. it's difficult for caller to deduplicate sets
2142 2142 # because different symbols can point to the same rev.
2143 2143 cl = repo.changelog
2144 2144 ls = []
2145 2145 seen = set()
2146 2146 for t in s.split('\0'):
2147 2147 try:
2148 2148 # fast path for integer revision
2149 2149 r = int(t)
2150 2150 if str(r) != t or r not in cl:
2151 2151 raise ValueError
2152 2152 revs = [r]
2153 2153 except ValueError:
2154 2154 revs = stringset(repo, subset, t)
2155 2155
2156 2156 for r in revs:
2157 2157 if r in seen:
2158 2158 continue
2159 2159 if (r in subset
2160 2160 or r == node.nullrev and isinstance(subset, fullreposet)):
2161 2161 ls.append(r)
2162 2162 seen.add(r)
2163 2163 return baseset(ls)
2164 2164
2165 2165 # for internal use
2166 2166 def _intlist(repo, subset, x):
2167 2167 s = getstring(x, "internal error")
2168 2168 if not s:
2169 2169 return baseset()
2170 2170 ls = [int(r) for r in s.split('\0')]
2171 2171 s = subset
2172 2172 return baseset([r for r in ls if r in s])
2173 2173
2174 2174 # for internal use
2175 2175 def _hexlist(repo, subset, x):
2176 2176 s = getstring(x, "internal error")
2177 2177 if not s:
2178 2178 return baseset()
2179 2179 cl = repo.changelog
2180 2180 ls = [cl.rev(node.bin(r)) for r in s.split('\0')]
2181 2181 s = subset
2182 2182 return baseset([r for r in ls if r in s])
2183 2183
2184 2184 symbols = {
2185 2185 "_mergedefaultdest": _mergedefaultdest,
2186 2186 "_updatedefaultdest": _updatedefaultdest,
2187 2187 "adds": adds,
2188 2188 "all": getall,
2189 2189 "ancestor": ancestor,
2190 2190 "ancestors": ancestors,
2191 2191 "_firstancestors": _firstancestors,
2192 2192 "author": author,
2193 2193 "bisect": bisect,
2194 2194 "bisected": bisected,
2195 2195 "bookmark": bookmark,
2196 2196 "branch": branch,
2197 2197 "branchpoint": branchpoint,
2198 2198 "bumped": bumped,
2199 2199 "bundle": bundle,
2200 2200 "children": children,
2201 2201 "closed": closed,
2202 2202 "contains": contains,
2203 2203 "converted": converted,
2204 2204 "date": date,
2205 2205 "desc": desc,
2206 2206 "descendants": descendants,
2207 2207 "_firstdescendants": _firstdescendants,
2208 2208 "destination": destination,
2209 2209 "divergent": divergent,
2210 2210 "draft": draft,
2211 2211 "extinct": extinct,
2212 2212 "extra": extra,
2213 2213 "file": hasfile,
2214 2214 "filelog": filelog,
2215 2215 "first": first,
2216 2216 "follow": follow,
2217 2217 "_followfirst": _followfirst,
2218 2218 "grep": grep,
2219 2219 "head": head,
2220 2220 "heads": heads,
2221 2221 "hidden": hidden,
2222 2222 "id": node_,
2223 2223 "keyword": keyword,
2224 2224 "last": last,
2225 2225 "limit": limit,
2226 2226 "_matchfiles": _matchfiles,
2227 2227 "max": maxrev,
2228 2228 "merge": merge,
2229 2229 "min": minrev,
2230 2230 "modifies": modifies,
2231 2231 "named": named,
2232 2232 "obsolete": obsolete,
2233 2233 "only": only,
2234 2234 "origin": origin,
2235 2235 "outgoing": outgoing,
2236 2236 "p1": p1,
2237 2237 "p2": p2,
2238 2238 "parents": parents,
2239 2239 "present": present,
2240 2240 "public": public,
2241 2241 "_notpublic": _notpublic,
2242 2242 "remote": remote,
2243 2243 "removes": removes,
2244 2244 "rev": rev,
2245 2245 "reverse": reverse,
2246 2246 "roots": roots,
2247 2247 "sort": sort,
2248 2248 "secret": secret,
2249 2249 "subrepo": subrepo,
2250 2250 "matching": matching,
2251 2251 "tag": tag,
2252 2252 "tagged": tagged,
2253 2253 "user": user,
2254 2254 "unstable": unstable,
2255 2255 "wdir": wdir,
2256 2256 "_list": _list,
2257 2257 "_intlist": _intlist,
2258 2258 "_hexlist": _hexlist,
2259 2259 }
2260 2260
2261 2261 # symbols which can't be used for a DoS attack for any given input
2262 2262 # (e.g. those which accept regexes as plain strings shouldn't be included)
2263 2263 # functions that just return a lot of changesets (like all) don't count here
2264 2264 safesymbols = set([
2265 2265 "adds",
2266 2266 "all",
2267 2267 "ancestor",
2268 2268 "ancestors",
2269 2269 "_firstancestors",
2270 2270 "author",
2271 2271 "bisect",
2272 2272 "bisected",
2273 2273 "bookmark",
2274 2274 "branch",
2275 2275 "branchpoint",
2276 2276 "bumped",
2277 2277 "bundle",
2278 2278 "children",
2279 2279 "closed",
2280 2280 "converted",
2281 2281 "date",
2282 2282 "desc",
2283 2283 "descendants",
2284 2284 "_firstdescendants",
2285 2285 "destination",
2286 2286 "divergent",
2287 2287 "draft",
2288 2288 "extinct",
2289 2289 "extra",
2290 2290 "file",
2291 2291 "filelog",
2292 2292 "first",
2293 2293 "follow",
2294 2294 "_followfirst",
2295 2295 "head",
2296 2296 "heads",
2297 2297 "hidden",
2298 2298 "id",
2299 2299 "keyword",
2300 2300 "last",
2301 2301 "limit",
2302 2302 "_matchfiles",
2303 2303 "max",
2304 2304 "merge",
2305 2305 "min",
2306 2306 "modifies",
2307 2307 "obsolete",
2308 2308 "only",
2309 2309 "origin",
2310 2310 "outgoing",
2311 2311 "p1",
2312 2312 "p2",
2313 2313 "parents",
2314 2314 "present",
2315 2315 "public",
2316 2316 "_notpublic",
2317 2317 "remote",
2318 2318 "removes",
2319 2319 "rev",
2320 2320 "reverse",
2321 2321 "roots",
2322 2322 "sort",
2323 2323 "secret",
2324 2324 "matching",
2325 2325 "tag",
2326 2326 "tagged",
2327 2327 "user",
2328 2328 "unstable",
2329 2329 "wdir",
2330 2330 "_list",
2331 2331 "_intlist",
2332 2332 "_hexlist",
2333 2333 ])
2334 2334
2335 2335 methods = {
2336 2336 "range": rangeset,
2337 2337 "dagrange": dagrange,
2338 2338 "string": stringset,
2339 2339 "symbol": stringset,
2340 2340 "and": andset,
2341 2341 "or": orset,
2342 2342 "not": notset,
2343 2343 "list": listset,
2344 2344 "keyvalue": keyvaluepair,
2345 2345 "func": func,
2346 2346 "ancestor": ancestorspec,
2347 2347 "parent": parentspec,
2348 2348 "parentpost": p1,
2349 2349 }
2350 2350
2351 2351 def optimize(x, small):
2352 2352 if x is None:
2353 2353 return 0, x
2354 2354
2355 2355 smallbonus = 1
2356 2356 if small:
2357 2357 smallbonus = .5
2358 2358
2359 2359 op = x[0]
2360 2360 if op == 'minus':
2361 2361 return optimize(('and', x[1], ('not', x[2])), small)
2362 2362 elif op == 'only':
2363 2363 return optimize(('func', ('symbol', 'only'),
2364 2364 ('list', x[1], x[2])), small)
2365 2365 elif op == 'onlypost':
2366 2366 return optimize(('func', ('symbol', 'only'), x[1]), small)
2367 2367 elif op == 'dagrangepre':
2368 2368 return optimize(('func', ('symbol', 'ancestors'), x[1]), small)
2369 2369 elif op == 'dagrangepost':
2370 2370 return optimize(('func', ('symbol', 'descendants'), x[1]), small)
2371 2371 elif op == 'rangeall':
2372 2372 return optimize(('range', ('string', '0'), ('string', 'tip')), small)
2373 2373 elif op == 'rangepre':
2374 2374 return optimize(('range', ('string', '0'), x[1]), small)
2375 2375 elif op == 'rangepost':
2376 2376 return optimize(('range', x[1], ('string', 'tip')), small)
2377 2377 elif op == 'negate':
2378 2378 return optimize(('string',
2379 2379 '-' + getstring(x[1], _("can't negate that"))), small)
2380 2380 elif op in 'string symbol negate':
2381 2381 return smallbonus, x # single revisions are small
2382 2382 elif op == 'and':
2383 2383 wa, ta = optimize(x[1], True)
2384 2384 wb, tb = optimize(x[2], True)
2385 2385
2386 2386 # (::x and not ::y)/(not ::y and ::x) have a fast path
2387 2387 def isonly(revs, bases):
2388 2388 return (
2389 2389 revs is not None
2390 2390 and revs[0] == 'func'
2391 2391 and getstring(revs[1], _('not a symbol')) == 'ancestors'
2392 2392 and bases is not None
2393 2393 and bases[0] == 'not'
2394 2394 and bases[1][0] == 'func'
2395 2395 and getstring(bases[1][1], _('not a symbol')) == 'ancestors')
2396 2396
2397 2397 w = min(wa, wb)
2398 2398 if isonly(ta, tb):
2399 2399 return w, ('func', ('symbol', 'only'), ('list', ta[2], tb[1][2]))
2400 2400 if isonly(tb, ta):
2401 2401 return w, ('func', ('symbol', 'only'), ('list', tb[2], ta[1][2]))
2402 2402
2403 2403 if wa > wb:
2404 2404 return w, (op, tb, ta)
2405 2405 return w, (op, ta, tb)
2406 2406 elif op == 'or':
2407 2407 # fast path for machine-generated expression, that is likely to have
2408 2408 # lots of trivial revisions: 'a + b + c()' to '_list(a b) + c()'
2409 2409 ws, ts, ss = [], [], []
2410 2410 def flushss():
2411 2411 if not ss:
2412 2412 return
2413 2413 if len(ss) == 1:
2414 2414 w, t = ss[0]
2415 2415 else:
2416 2416 s = '\0'.join(t[1] for w, t in ss)
2417 2417 y = ('func', ('symbol', '_list'), ('string', s))
2418 2418 w, t = optimize(y, False)
2419 2419 ws.append(w)
2420 2420 ts.append(t)
2421 2421 del ss[:]
2422 2422 for y in x[1:]:
2423 2423 w, t = optimize(y, False)
2424 2424 if t is not None and (t[0] == 'string' or t[0] == 'symbol'):
2425 2425 ss.append((w, t))
2426 2426 continue
2427 2427 flushss()
2428 2428 ws.append(w)
2429 2429 ts.append(t)
2430 2430 flushss()
2431 2431 if len(ts) == 1:
2432 2432 return ws[0], ts[0] # 'or' operation is fully optimized out
2433 2433 # we can't reorder trees by weight because it would change the order.
2434 2434 # ("sort(a + b)" == "sort(b + a)", but "a + b" != "b + a")
2435 2435 # ts = tuple(t for w, t in sorted(zip(ws, ts), key=lambda wt: wt[0]))
2436 2436 return max(ws), (op,) + tuple(ts)
2437 2437 elif op == 'not':
2438 2438 # Optimize not public() to _notpublic() because we have a fast version
2439 2439 if x[1] == ('func', ('symbol', 'public'), None):
2440 2440 newsym = ('func', ('symbol', '_notpublic'), None)
2441 2441 o = optimize(newsym, not small)
2442 2442 return o[0], o[1]
2443 2443 else:
2444 2444 o = optimize(x[1], not small)
2445 2445 return o[0], (op, o[1])
2446 2446 elif op == 'parentpost':
2447 2447 o = optimize(x[1], small)
2448 2448 return o[0], (op, o[1])
2449 2449 elif op == 'group':
2450 2450 return optimize(x[1], small)
2451 2451 elif op in 'dagrange range list parent ancestorspec':
2452 2452 if op == 'parent':
2453 2453 # x^:y means (x^) : y, not x ^ (:y)
2454 2454 post = ('parentpost', x[1])
2455 2455 if x[2][0] == 'dagrangepre':
2456 2456 return optimize(('dagrange', post, x[2][1]), small)
2457 2457 elif x[2][0] == 'rangepre':
2458 2458 return optimize(('range', post, x[2][1]), small)
2459 2459
2460 2460 wa, ta = optimize(x[1], small)
2461 2461 wb, tb = optimize(x[2], small)
2462 2462 return wa + wb, (op, ta, tb)
2463 2463 elif op == 'func':
2464 2464 f = getstring(x[1], _("not a symbol"))
2465 2465 wa, ta = optimize(x[2], small)
2466 2466 if f in ("author branch closed date desc file grep keyword "
2467 2467 "outgoing user"):
2468 2468 w = 10 # slow
2469 2469 elif f in "modifies adds removes":
2470 2470 w = 30 # slower
2471 2471 elif f == "contains":
2472 2472 w = 100 # very slow
2473 2473 elif f == "ancestor":
2474 2474 w = 1 * smallbonus
2475 2475 elif f in "reverse limit first _intlist":
2476 2476 w = 0
2477 2477 elif f in "sort":
2478 2478 w = 10 # assume most sorts look at changelog
2479 2479 else:
2480 2480 w = 1
2481 2481 return w + wa, (op, x[1], ta)
2482 2482 return 1, x
2483 2483
2484 2484 _aliasarg = ('func', ('symbol', '_aliasarg'))
2485 2485 def _getaliasarg(tree):
2486 2486 """If tree matches ('func', ('symbol', '_aliasarg'), ('string', X))
2487 2487 return X, None otherwise.
2488 2488 """
2489 2489 if (len(tree) == 3 and tree[:2] == _aliasarg
2490 2490 and tree[2][0] == 'string'):
2491 2491 return tree[2][1]
2492 2492 return None
2493 2493
2494 2494 def _checkaliasarg(tree, known=None):
2495 2495 """Check tree contains no _aliasarg construct or only ones which
2496 2496 value is in known. Used to avoid alias placeholders injection.
2497 2497 """
2498 2498 if isinstance(tree, tuple):
2499 2499 arg = _getaliasarg(tree)
2500 2500 if arg is not None and (not known or arg not in known):
2501 2501 raise error.UnknownIdentifier('_aliasarg', [])
2502 2502 for t in tree:
2503 2503 _checkaliasarg(t, known)
2504 2504
2505 2505 # the set of valid characters for the initial letter of symbols in
2506 2506 # alias declarations and definitions
2507 2507 _aliassyminitletters = set(c for c in [chr(i) for i in xrange(256)]
2508 2508 if c.isalnum() or c in '._@$' or ord(c) > 127)
2509 2509
2510 2510 def _tokenizealias(program, lookup=None):
2511 2511 """Parse alias declaration/definition into a stream of tokens
2512 2512
2513 2513 This allows symbol names to use also ``$`` as an initial letter
2514 2514 (for backward compatibility), and callers of this function should
2515 2515 examine whether ``$`` is used also for unexpected symbols or not.
2516 2516 """
2517 2517 return tokenize(program, lookup=lookup,
2518 2518 syminitletters=_aliassyminitletters)
2519 2519
2520 2520 def _parsealiasdecl(decl):
2521 2521 """Parse alias declaration ``decl``
2522 2522
2523 2523 This returns ``(name, tree, args, errorstr)`` tuple:
2524 2524
2525 2525 - ``name``: of declared alias (may be ``decl`` itself at error)
2526 2526 - ``tree``: parse result (or ``None`` at error)
2527 2527 - ``args``: list of alias argument names (or None for symbol declaration)
2528 2528 - ``errorstr``: detail about detected error (or None)
2529 2529
2530 2530 >>> _parsealiasdecl('foo')
2531 2531 ('foo', ('symbol', 'foo'), None, None)
2532 2532 >>> _parsealiasdecl('$foo')
2533 2533 ('$foo', None, None, "'$' not for alias arguments")
2534 2534 >>> _parsealiasdecl('foo::bar')
2535 2535 ('foo::bar', None, None, 'invalid format')
2536 2536 >>> _parsealiasdecl('foo bar')
2537 2537 ('foo bar', None, None, 'at 4: invalid token')
2538 2538 >>> _parsealiasdecl('foo()')
2539 2539 ('foo', ('func', ('symbol', 'foo')), [], None)
2540 2540 >>> _parsealiasdecl('$foo()')
2541 2541 ('$foo()', None, None, "'$' not for alias arguments")
2542 2542 >>> _parsealiasdecl('foo($1, $2)')
2543 2543 ('foo', ('func', ('symbol', 'foo')), ['$1', '$2'], None)
2544 2544 >>> _parsealiasdecl('foo(bar_bar, baz.baz)')
2545 2545 ('foo', ('func', ('symbol', 'foo')), ['bar_bar', 'baz.baz'], None)
2546 2546 >>> _parsealiasdecl('foo($1, $2, nested($1, $2))')
2547 2547 ('foo($1, $2, nested($1, $2))', None, None, 'invalid argument list')
2548 2548 >>> _parsealiasdecl('foo(bar($1, $2))')
2549 2549 ('foo(bar($1, $2))', None, None, 'invalid argument list')
2550 2550 >>> _parsealiasdecl('foo("string")')
2551 2551 ('foo("string")', None, None, 'invalid argument list')
2552 2552 >>> _parsealiasdecl('foo($1, $2')
2553 2553 ('foo($1, $2', None, None, 'at 10: unexpected token: end')
2554 2554 >>> _parsealiasdecl('foo("string')
2555 2555 ('foo("string', None, None, 'at 5: unterminated string')
2556 2556 >>> _parsealiasdecl('foo($1, $2, $1)')
2557 2557 ('foo', None, None, 'argument names collide with each other')
2558 2558 """
2559 2559 p = parser.parser(elements)
2560 2560 try:
2561 2561 tree, pos = p.parse(_tokenizealias(decl))
2562 2562 if (pos != len(decl)):
2563 2563 raise error.ParseError(_('invalid token'), pos)
2564 2564
2565 2565 if isvalidsymbol(tree):
2566 2566 # "name = ...." style
2567 2567 name = getsymbol(tree)
2568 2568 if name.startswith('$'):
2569 2569 return (decl, None, None, _("'$' not for alias arguments"))
2570 2570 return (name, ('symbol', name), None, None)
2571 2571
2572 2572 if isvalidfunc(tree):
2573 2573 # "name(arg, ....) = ...." style
2574 2574 name = getfuncname(tree)
2575 2575 if name.startswith('$'):
2576 2576 return (decl, None, None, _("'$' not for alias arguments"))
2577 2577 args = []
2578 2578 for arg in getfuncargs(tree):
2579 2579 if not isvalidsymbol(arg):
2580 2580 return (decl, None, None, _("invalid argument list"))
2581 2581 args.append(getsymbol(arg))
2582 2582 if len(args) != len(set(args)):
2583 2583 return (name, None, None,
2584 2584 _("argument names collide with each other"))
2585 2585 return (name, ('func', ('symbol', name)), args, None)
2586 2586
2587 2587 return (decl, None, None, _("invalid format"))
2588 2588 except error.ParseError as inst:
2589 2589 return (decl, None, None, parseerrordetail(inst))
2590 2590
2591 2591 def _parsealiasdefn(defn, args):
2592 2592 """Parse alias definition ``defn``
2593 2593
2594 2594 This function also replaces alias argument references in the
2595 2595 specified definition by ``_aliasarg(ARGNAME)``.
2596 2596
2597 2597 ``args`` is a list of alias argument names, or None if the alias
2598 2598 is declared as a symbol.
2599 2599
2600 2600 This returns "tree" as parsing result.
2601 2601
2602 2602 >>> args = ['$1', '$2', 'foo']
2603 2603 >>> print prettyformat(_parsealiasdefn('$1 or foo', args))
2604 2604 (or
2605 2605 (func
2606 2606 ('symbol', '_aliasarg')
2607 2607 ('string', '$1'))
2608 2608 (func
2609 2609 ('symbol', '_aliasarg')
2610 2610 ('string', 'foo')))
2611 2611 >>> try:
2612 2612 ... _parsealiasdefn('$1 or $bar', args)
2613 2613 ... except error.ParseError, inst:
2614 2614 ... print parseerrordetail(inst)
2615 2615 at 6: '$' not for alias arguments
2616 2616 >>> args = ['$1', '$10', 'foo']
2617 2617 >>> print prettyformat(_parsealiasdefn('$10 or foobar', args))
2618 2618 (or
2619 2619 (func
2620 2620 ('symbol', '_aliasarg')
2621 2621 ('string', '$10'))
2622 2622 ('symbol', 'foobar'))
2623 2623 >>> print prettyformat(_parsealiasdefn('"$1" or "foo"', args))
2624 2624 (or
2625 2625 ('string', '$1')
2626 2626 ('string', 'foo'))
2627 2627 """
2628 2628 def tokenizedefn(program, lookup=None):
2629 2629 if args:
2630 2630 argset = set(args)
2631 2631 else:
2632 2632 argset = set()
2633 2633
2634 2634 for t, value, pos in _tokenizealias(program, lookup=lookup):
2635 2635 if t == 'symbol':
2636 2636 if value in argset:
2637 2637 # emulate tokenization of "_aliasarg('ARGNAME')":
2638 2638 # "_aliasarg()" is an unknown symbol only used separate
2639 2639 # alias argument placeholders from regular strings.
2640 2640 yield ('symbol', '_aliasarg', pos)
2641 2641 yield ('(', None, pos)
2642 2642 yield ('string', value, pos)
2643 2643 yield (')', None, pos)
2644 2644 continue
2645 2645 elif value.startswith('$'):
2646 2646 raise error.ParseError(_("'$' not for alias arguments"),
2647 2647 pos)
2648 2648 yield (t, value, pos)
2649 2649
2650 2650 p = parser.parser(elements)
2651 2651 tree, pos = p.parse(tokenizedefn(defn))
2652 2652 if pos != len(defn):
2653 2653 raise error.ParseError(_('invalid token'), pos)
2654 2654 return parser.simplifyinfixops(tree, ('or',))
2655 2655
2656 2656 class revsetalias(object):
2657 2657 # whether own `error` information is already shown or not.
2658 2658 # this avoids showing same warning multiple times at each `findaliases`.
2659 2659 warned = False
2660 2660
2661 2661 def __init__(self, name, value):
2662 2662 '''Aliases like:
2663 2663
2664 2664 h = heads(default)
2665 2665 b($1) = ancestors($1) - ancestors(default)
2666 2666 '''
2667 2667 self.name, self.tree, self.args, self.error = _parsealiasdecl(name)
2668 2668 if self.error:
2669 2669 self.error = _('failed to parse the declaration of revset alias'
2670 2670 ' "%s": %s') % (self.name, self.error)
2671 2671 return
2672 2672
2673 2673 try:
2674 2674 self.replacement = _parsealiasdefn(value, self.args)
2675 2675 # Check for placeholder injection
2676 2676 _checkaliasarg(self.replacement, self.args)
2677 2677 except error.ParseError as inst:
2678 2678 self.error = _('failed to parse the definition of revset alias'
2679 2679 ' "%s": %s') % (self.name, parseerrordetail(inst))
2680 2680
2681 2681 def _getalias(aliases, tree):
2682 2682 """If tree looks like an unexpanded alias, return it. Return None
2683 2683 otherwise.
2684 2684 """
2685 2685 if isinstance(tree, tuple) and tree:
2686 2686 if tree[0] == 'symbol' and len(tree) == 2:
2687 2687 name = tree[1]
2688 2688 alias = aliases.get(name)
2689 2689 if alias and alias.args is None and alias.tree == tree:
2690 2690 return alias
2691 2691 if tree[0] == 'func' and len(tree) > 1:
2692 2692 if tree[1][0] == 'symbol' and len(tree[1]) == 2:
2693 2693 name = tree[1][1]
2694 2694 alias = aliases.get(name)
2695 2695 if alias and alias.args is not None and alias.tree == tree[:2]:
2696 2696 return alias
2697 2697 return None
2698 2698
2699 2699 def _expandargs(tree, args):
2700 2700 """Replace _aliasarg instances with the substitution value of the
2701 2701 same name in args, recursively.
2702 2702 """
2703 2703 if not tree or not isinstance(tree, tuple):
2704 2704 return tree
2705 2705 arg = _getaliasarg(tree)
2706 2706 if arg is not None:
2707 2707 return args[arg]
2708 2708 return tuple(_expandargs(t, args) for t in tree)
2709 2709
2710 2710 def _expandaliases(aliases, tree, expanding, cache):
2711 2711 """Expand aliases in tree, recursively.
2712 2712
2713 2713 'aliases' is a dictionary mapping user defined aliases to
2714 2714 revsetalias objects.
2715 2715 """
2716 2716 if not isinstance(tree, tuple):
2717 2717 # Do not expand raw strings
2718 2718 return tree
2719 2719 alias = _getalias(aliases, tree)
2720 2720 if alias is not None:
2721 2721 if alias.error:
2722 2722 raise util.Abort(alias.error)
2723 2723 if alias in expanding:
2724 2724 raise error.ParseError(_('infinite expansion of revset alias "%s" '
2725 2725 'detected') % alias.name)
2726 2726 expanding.append(alias)
2727 2727 if alias.name not in cache:
2728 2728 cache[alias.name] = _expandaliases(aliases, alias.replacement,
2729 2729 expanding, cache)
2730 2730 result = cache[alias.name]
2731 2731 expanding.pop()
2732 2732 if alias.args is not None:
2733 2733 l = getlist(tree[2])
2734 2734 if len(l) != len(alias.args):
2735 2735 raise error.ParseError(
2736 2736 _('invalid number of arguments: %s') % len(l))
2737 2737 l = [_expandaliases(aliases, a, [], cache) for a in l]
2738 2738 result = _expandargs(result, dict(zip(alias.args, l)))
2739 2739 else:
2740 2740 result = tuple(_expandaliases(aliases, t, expanding, cache)
2741 2741 for t in tree)
2742 2742 return result
2743 2743
2744 2744 def findaliases(ui, tree, showwarning=None):
2745 2745 _checkaliasarg(tree)
2746 2746 aliases = {}
2747 2747 for k, v in ui.configitems('revsetalias'):
2748 2748 alias = revsetalias(k, v)
2749 2749 aliases[alias.name] = alias
2750 2750 tree = _expandaliases(aliases, tree, [], {})
2751 2751 if showwarning:
2752 2752 # warn about problematic (but not referred) aliases
2753 2753 for name, alias in sorted(aliases.iteritems()):
2754 2754 if alias.error and not alias.warned:
2755 2755 showwarning(_('warning: %s\n') % (alias.error))
2756 2756 alias.warned = True
2757 2757 return tree
2758 2758
2759 2759 def foldconcat(tree):
2760 2760 """Fold elements to be concatenated by `##`
2761 2761 """
2762 2762 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2763 2763 return tree
2764 2764 if tree[0] == '_concat':
2765 2765 pending = [tree]
2766 2766 l = []
2767 2767 while pending:
2768 2768 e = pending.pop()
2769 2769 if e[0] == '_concat':
2770 2770 pending.extend(reversed(e[1:]))
2771 2771 elif e[0] in ('string', 'symbol'):
2772 2772 l.append(e[1])
2773 2773 else:
2774 2774 msg = _("\"##\" can't concatenate \"%s\" element") % (e[0])
2775 2775 raise error.ParseError(msg)
2776 2776 return ('string', ''.join(l))
2777 2777 else:
2778 2778 return tuple(foldconcat(t) for t in tree)
2779 2779
2780 2780 def parse(spec, lookup=None):
2781 2781 p = parser.parser(elements)
2782 2782 tree, pos = p.parse(tokenize(spec, lookup=lookup))
2783 2783 if pos != len(spec):
2784 2784 raise error.ParseError(_("invalid token"), pos)
2785 2785 return parser.simplifyinfixops(tree, ('or',))
2786 2786
2787 2787 def posttreebuilthook(tree, repo):
2788 2788 # hook for extensions to execute code on the optimized tree
2789 2789 pass
2790 2790
2791 2791 def match(ui, spec, repo=None):
2792 2792 if not spec:
2793 2793 raise error.ParseError(_("empty query"))
2794 2794 lookup = None
2795 2795 if repo:
2796 2796 lookup = repo.__contains__
2797 2797 tree = parse(spec, lookup)
2798 2798 return _makematcher(ui, tree, repo)
2799 2799
2800 2800 def matchany(ui, specs, repo=None):
2801 2801 """Create a matcher that will include any revisions matching one of the
2802 2802 given specs"""
2803 2803 if not specs:
2804 2804 def mfunc(repo, subset=None):
2805 2805 return baseset()
2806 2806 return mfunc
2807 2807 if not all(specs):
2808 2808 raise error.ParseError(_("empty query"))
2809 2809 lookup = None
2810 2810 if repo:
2811 2811 lookup = repo.__contains__
2812 2812 if len(specs) == 1:
2813 2813 tree = parse(specs[0], lookup)
2814 2814 else:
2815 2815 tree = ('or',) + tuple(parse(s, lookup) for s in specs)
2816 2816 return _makematcher(ui, tree, repo)
2817 2817
2818 2818 def _makematcher(ui, tree, repo):
2819 2819 if ui:
2820 2820 tree = findaliases(ui, tree, showwarning=ui.warn)
2821 2821 tree = foldconcat(tree)
2822 2822 weight, tree = optimize(tree, True)
2823 2823 posttreebuilthook(tree, repo)
2824 2824 def mfunc(repo, subset=None):
2825 2825 if subset is None:
2826 2826 subset = fullreposet(repo)
2827 2827 if util.safehasattr(subset, 'isascending'):
2828 2828 result = getset(repo, subset, tree)
2829 2829 else:
2830 2830 result = getset(repo, baseset(subset), tree)
2831 2831 return result
2832 2832 return mfunc
2833 2833
2834 2834 def formatspec(expr, *args):
2835 2835 '''
2836 2836 This is a convenience function for using revsets internally, and
2837 2837 escapes arguments appropriately. Aliases are intentionally ignored
2838 2838 so that intended expression behavior isn't accidentally subverted.
2839 2839
2840 2840 Supported arguments:
2841 2841
2842 2842 %r = revset expression, parenthesized
2843 2843 %d = int(arg), no quoting
2844 2844 %s = string(arg), escaped and single-quoted
2845 2845 %b = arg.branch(), escaped and single-quoted
2846 2846 %n = hex(arg), single-quoted
2847 2847 %% = a literal '%'
2848 2848
2849 2849 Prefixing the type with 'l' specifies a parenthesized list of that type.
2850 2850
2851 2851 >>> formatspec('%r:: and %lr', '10 or 11', ("this()", "that()"))
2852 2852 '(10 or 11):: and ((this()) or (that()))'
2853 2853 >>> formatspec('%d:: and not %d::', 10, 20)
2854 2854 '10:: and not 20::'
2855 2855 >>> formatspec('%ld or %ld', [], [1])
2856 2856 "_list('') or 1"
2857 2857 >>> formatspec('keyword(%s)', 'foo\\xe9')
2858 2858 "keyword('foo\\\\xe9')"
2859 2859 >>> b = lambda: 'default'
2860 2860 >>> b.branch = b
2861 2861 >>> formatspec('branch(%b)', b)
2862 2862 "branch('default')"
2863 2863 >>> formatspec('root(%ls)', ['a', 'b', 'c', 'd'])
2864 2864 "root(_list('a\\x00b\\x00c\\x00d'))"
2865 2865 '''
2866 2866
2867 2867 def quote(s):
2868 2868 return repr(str(s))
2869 2869
2870 2870 def argtype(c, arg):
2871 2871 if c == 'd':
2872 2872 return str(int(arg))
2873 2873 elif c == 's':
2874 2874 return quote(arg)
2875 2875 elif c == 'r':
2876 2876 parse(arg) # make sure syntax errors are confined
2877 2877 return '(%s)' % arg
2878 2878 elif c == 'n':
2879 2879 return quote(node.hex(arg))
2880 2880 elif c == 'b':
2881 2881 return quote(arg.branch())
2882 2882
2883 2883 def listexp(s, t):
2884 2884 l = len(s)
2885 2885 if l == 0:
2886 2886 return "_list('')"
2887 2887 elif l == 1:
2888 2888 return argtype(t, s[0])
2889 2889 elif t == 'd':
2890 2890 return "_intlist('%s')" % "\0".join(str(int(a)) for a in s)
2891 2891 elif t == 's':
2892 2892 return "_list('%s')" % "\0".join(s)
2893 2893 elif t == 'n':
2894 2894 return "_hexlist('%s')" % "\0".join(node.hex(a) for a in s)
2895 2895 elif t == 'b':
2896 2896 return "_list('%s')" % "\0".join(a.branch() for a in s)
2897 2897
2898 2898 m = l // 2
2899 2899 return '(%s or %s)' % (listexp(s[:m], t), listexp(s[m:], t))
2900 2900
2901 2901 ret = ''
2902 2902 pos = 0
2903 2903 arg = 0
2904 2904 while pos < len(expr):
2905 2905 c = expr[pos]
2906 2906 if c == '%':
2907 2907 pos += 1
2908 2908 d = expr[pos]
2909 2909 if d == '%':
2910 2910 ret += d
2911 2911 elif d in 'dsnbr':
2912 2912 ret += argtype(d, args[arg])
2913 2913 arg += 1
2914 2914 elif d == 'l':
2915 2915 # a list of some type
2916 2916 pos += 1
2917 2917 d = expr[pos]
2918 2918 ret += listexp(list(args[arg]), d)
2919 2919 arg += 1
2920 2920 else:
2921 2921 raise util.Abort('unexpected revspec format character %s' % d)
2922 2922 else:
2923 2923 ret += c
2924 2924 pos += 1
2925 2925
2926 2926 return ret
2927 2927
2928 2928 def prettyformat(tree):
2929 2929 return parser.prettyformat(tree, ('string', 'symbol'))
2930 2930
2931 2931 def depth(tree):
2932 2932 if isinstance(tree, tuple):
2933 2933 return max(map(depth, tree)) + 1
2934 2934 else:
2935 2935 return 0
2936 2936
2937 2937 def funcsused(tree):
2938 2938 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2939 2939 return set()
2940 2940 else:
2941 2941 funcs = set()
2942 2942 for s in tree[1:]:
2943 2943 funcs |= funcsused(s)
2944 2944 if tree[0] == 'func':
2945 2945 funcs.add(tree[1][1])
2946 2946 return funcs
2947 2947
2948 2948 class abstractsmartset(object):
2949 2949
2950 2950 def __nonzero__(self):
2951 2951 """True if the smartset is not empty"""
2952 2952 raise NotImplementedError()
2953 2953
2954 2954 def __contains__(self, rev):
2955 2955 """provide fast membership testing"""
2956 2956 raise NotImplementedError()
2957 2957
2958 2958 def __iter__(self):
2959 2959 """iterate the set in the order it is supposed to be iterated"""
2960 2960 raise NotImplementedError()
2961 2961
2962 2962 # Attributes containing a function to perform a fast iteration in a given
2963 2963 # direction. A smartset can have none, one, or both defined.
2964 2964 #
2965 2965 # Default value is None instead of a function returning None to avoid
2966 2966 # initializing an iterator just for testing if a fast method exists.
2967 2967 fastasc = None
2968 2968 fastdesc = None
2969 2969
2970 2970 def isascending(self):
2971 2971 """True if the set will iterate in ascending order"""
2972 2972 raise NotImplementedError()
2973 2973
2974 2974 def isdescending(self):
2975 2975 """True if the set will iterate in descending order"""
2976 2976 raise NotImplementedError()
2977 2977
2978 2978 @util.cachefunc
2979 2979 def min(self):
2980 2980 """return the minimum element in the set"""
2981 2981 if self.fastasc is not None:
2982 2982 for r in self.fastasc():
2983 2983 return r
2984 2984 raise ValueError('arg is an empty sequence')
2985 2985 return min(self)
2986 2986
2987 2987 @util.cachefunc
2988 2988 def max(self):
2989 2989 """return the maximum element in the set"""
2990 2990 if self.fastdesc is not None:
2991 2991 for r in self.fastdesc():
2992 2992 return r
2993 2993 raise ValueError('arg is an empty sequence')
2994 2994 return max(self)
2995 2995
2996 2996 def first(self):
2997 2997 """return the first element in the set (user iteration perspective)
2998 2998
2999 2999 Return None if the set is empty"""
3000 3000 raise NotImplementedError()
3001 3001
3002 3002 def last(self):
3003 3003 """return the last element in the set (user iteration perspective)
3004 3004
3005 3005 Return None if the set is empty"""
3006 3006 raise NotImplementedError()
3007 3007
3008 3008 def __len__(self):
3009 3009 """return the length of the smartsets
3010 3010
3011 3011 This can be expensive on smartset that could be lazy otherwise."""
3012 3012 raise NotImplementedError()
3013 3013
3014 3014 def reverse(self):
3015 3015 """reverse the expected iteration order"""
3016 3016 raise NotImplementedError()
3017 3017
3018 3018 def sort(self, reverse=True):
3019 3019 """get the set to iterate in an ascending or descending order"""
3020 3020 raise NotImplementedError()
3021 3021
3022 3022 def __and__(self, other):
3023 3023 """Returns a new object with the intersection of the two collections.
3024 3024
3025 3025 This is part of the mandatory API for smartset."""
3026 3026 if isinstance(other, fullreposet):
3027 3027 return self
3028 3028 return self.filter(other.__contains__, cache=False)
3029 3029
3030 3030 def __add__(self, other):
3031 3031 """Returns a new object with the union of the two collections.
3032 3032
3033 3033 This is part of the mandatory API for smartset."""
3034 3034 return addset(self, other)
3035 3035
3036 3036 def __sub__(self, other):
3037 3037 """Returns a new object with the substraction of the two collections.
3038 3038
3039 3039 This is part of the mandatory API for smartset."""
3040 3040 c = other.__contains__
3041 3041 return self.filter(lambda r: not c(r), cache=False)
3042 3042
3043 3043 def filter(self, condition, cache=True):
3044 3044 """Returns this smartset filtered by condition as a new smartset.
3045 3045
3046 3046 `condition` is a callable which takes a revision number and returns a
3047 3047 boolean.
3048 3048
3049 3049 This is part of the mandatory API for smartset."""
3050 3050 # builtin cannot be cached. but do not needs to
3051 3051 if cache and util.safehasattr(condition, 'func_code'):
3052 3052 condition = util.cachefunc(condition)
3053 3053 return filteredset(self, condition)
3054 3054
3055 3055 class baseset(abstractsmartset):
3056 3056 """Basic data structure that represents a revset and contains the basic
3057 3057 operation that it should be able to perform.
3058 3058
3059 3059 Every method in this class should be implemented by any smartset class.
3060 3060 """
3061 3061 def __init__(self, data=()):
3062 3062 if not isinstance(data, list):
3063 3063 if isinstance(data, set):
3064 3064 self._set = data
3065 3065 data = list(data)
3066 3066 self._list = data
3067 3067 self._ascending = None
3068 3068
3069 3069 @util.propertycache
3070 3070 def _set(self):
3071 3071 return set(self._list)
3072 3072
3073 3073 @util.propertycache
3074 3074 def _asclist(self):
3075 3075 asclist = self._list[:]
3076 3076 asclist.sort()
3077 3077 return asclist
3078 3078
3079 3079 def __iter__(self):
3080 3080 if self._ascending is None:
3081 3081 return iter(self._list)
3082 3082 elif self._ascending:
3083 3083 return iter(self._asclist)
3084 3084 else:
3085 3085 return reversed(self._asclist)
3086 3086
3087 3087 def fastasc(self):
3088 3088 return iter(self._asclist)
3089 3089
3090 3090 def fastdesc(self):
3091 3091 return reversed(self._asclist)
3092 3092
3093 3093 @util.propertycache
3094 3094 def __contains__(self):
3095 3095 return self._set.__contains__
3096 3096
3097 3097 def __nonzero__(self):
3098 3098 return bool(self._list)
3099 3099
3100 3100 def sort(self, reverse=False):
3101 3101 self._ascending = not bool(reverse)
3102 3102
3103 3103 def reverse(self):
3104 3104 if self._ascending is None:
3105 3105 self._list.reverse()
3106 3106 else:
3107 3107 self._ascending = not self._ascending
3108 3108
3109 3109 def __len__(self):
3110 3110 return len(self._list)
3111 3111
3112 3112 def isascending(self):
3113 3113 """Returns True if the collection is ascending order, False if not.
3114 3114
3115 3115 This is part of the mandatory API for smartset."""
3116 3116 if len(self) <= 1:
3117 3117 return True
3118 3118 return self._ascending is not None and self._ascending
3119 3119
3120 3120 def isdescending(self):
3121 3121 """Returns True if the collection is descending order, False if not.
3122 3122
3123 3123 This is part of the mandatory API for smartset."""
3124 3124 if len(self) <= 1:
3125 3125 return True
3126 3126 return self._ascending is not None and not self._ascending
3127 3127
3128 3128 def first(self):
3129 3129 if self:
3130 3130 if self._ascending is None:
3131 3131 return self._list[0]
3132 3132 elif self._ascending:
3133 3133 return self._asclist[0]
3134 3134 else:
3135 3135 return self._asclist[-1]
3136 3136 return None
3137 3137
3138 3138 def last(self):
3139 3139 if self:
3140 3140 if self._ascending is None:
3141 3141 return self._list[-1]
3142 3142 elif self._ascending:
3143 3143 return self._asclist[-1]
3144 3144 else:
3145 3145 return self._asclist[0]
3146 3146 return None
3147 3147
3148 3148 def __repr__(self):
3149 3149 d = {None: '', False: '-', True: '+'}[self._ascending]
3150 3150 return '<%s%s %r>' % (type(self).__name__, d, self._list)
3151 3151
3152 3152 class filteredset(abstractsmartset):
3153 3153 """Duck type for baseset class which iterates lazily over the revisions in
3154 3154 the subset and contains a function which tests for membership in the
3155 3155 revset
3156 3156 """
3157 3157 def __init__(self, subset, condition=lambda x: True):
3158 3158 """
3159 3159 condition: a function that decide whether a revision in the subset
3160 3160 belongs to the revset or not.
3161 3161 """
3162 3162 self._subset = subset
3163 3163 self._condition = condition
3164 3164
3165 3165 def __contains__(self, x):
3166 3166 return x in self._subset and self._condition(x)
3167 3167
3168 3168 def __iter__(self):
3169 3169 return self._iterfilter(self._subset)
3170 3170
3171 3171 def _iterfilter(self, it):
3172 3172 cond = self._condition
3173 3173 for x in it:
3174 3174 if cond(x):
3175 3175 yield x
3176 3176
3177 3177 @property
3178 3178 def fastasc(self):
3179 3179 it = self._subset.fastasc
3180 3180 if it is None:
3181 3181 return None
3182 3182 return lambda: self._iterfilter(it())
3183 3183
3184 3184 @property
3185 3185 def fastdesc(self):
3186 3186 it = self._subset.fastdesc
3187 3187 if it is None:
3188 3188 return None
3189 3189 return lambda: self._iterfilter(it())
3190 3190
3191 3191 def __nonzero__(self):
3192 3192 fast = self.fastasc
3193 3193 if fast is None:
3194 3194 fast = self.fastdesc
3195 3195 if fast is not None:
3196 3196 it = fast()
3197 3197 else:
3198 3198 it = self
3199 3199
3200 3200 for r in it:
3201 3201 return True
3202 3202 return False
3203 3203
3204 3204 def __len__(self):
3205 3205 # Basic implementation to be changed in future patches.
3206 3206 l = baseset([r for r in self])
3207 3207 return len(l)
3208 3208
3209 3209 def sort(self, reverse=False):
3210 3210 self._subset.sort(reverse=reverse)
3211 3211
3212 3212 def reverse(self):
3213 3213 self._subset.reverse()
3214 3214
3215 3215 def isascending(self):
3216 3216 return self._subset.isascending()
3217 3217
3218 3218 def isdescending(self):
3219 3219 return self._subset.isdescending()
3220 3220
3221 3221 def first(self):
3222 3222 for x in self:
3223 3223 return x
3224 3224 return None
3225 3225
3226 3226 def last(self):
3227 3227 it = None
3228 3228 if self.isascending():
3229 3229 it = self.fastdesc
3230 3230 elif self.isdescending():
3231 3231 it = self.fastasc
3232 3232 if it is not None:
3233 3233 for x in it():
3234 3234 return x
3235 3235 return None #empty case
3236 3236 else:
3237 3237 x = None
3238 3238 for x in self:
3239 3239 pass
3240 3240 return x
3241 3241
3242 3242 def __repr__(self):
3243 3243 return '<%s %r>' % (type(self).__name__, self._subset)
3244 3244
3245 3245 def _iterordered(ascending, iter1, iter2):
3246 3246 """produce an ordered iteration from two iterators with the same order
3247 3247
3248 3248 The ascending is used to indicated the iteration direction.
3249 3249 """
3250 3250 choice = max
3251 3251 if ascending:
3252 3252 choice = min
3253 3253
3254 3254 val1 = None
3255 3255 val2 = None
3256 3256 try:
3257 3257 # Consume both iterators in an ordered way until one is empty
3258 3258 while True:
3259 3259 if val1 is None:
3260 3260 val1 = iter1.next()
3261 3261 if val2 is None:
3262 3262 val2 = iter2.next()
3263 3263 next = choice(val1, val2)
3264 3264 yield next
3265 3265 if val1 == next:
3266 3266 val1 = None
3267 3267 if val2 == next:
3268 3268 val2 = None
3269 3269 except StopIteration:
3270 3270 # Flush any remaining values and consume the other one
3271 3271 it = iter2
3272 3272 if val1 is not None:
3273 3273 yield val1
3274 3274 it = iter1
3275 3275 elif val2 is not None:
3276 3276 # might have been equality and both are empty
3277 3277 yield val2
3278 3278 for val in it:
3279 3279 yield val
3280 3280
3281 3281 class addset(abstractsmartset):
3282 3282 """Represent the addition of two sets
3283 3283
3284 3284 Wrapper structure for lazily adding two structures without losing much
3285 3285 performance on the __contains__ method
3286 3286
3287 3287 If the ascending attribute is set, that means the two structures are
3288 3288 ordered in either an ascending or descending way. Therefore, we can add
3289 3289 them maintaining the order by iterating over both at the same time
3290 3290
3291 3291 >>> xs = baseset([0, 3, 2])
3292 3292 >>> ys = baseset([5, 2, 4])
3293 3293
3294 3294 >>> rs = addset(xs, ys)
3295 3295 >>> bool(rs), 0 in rs, 1 in rs, 5 in rs, rs.first(), rs.last()
3296 3296 (True, True, False, True, 0, 4)
3297 3297 >>> rs = addset(xs, baseset([]))
3298 3298 >>> bool(rs), 0 in rs, 1 in rs, rs.first(), rs.last()
3299 3299 (True, True, False, 0, 2)
3300 3300 >>> rs = addset(baseset([]), baseset([]))
3301 3301 >>> bool(rs), 0 in rs, rs.first(), rs.last()
3302 3302 (False, False, None, None)
3303 3303
3304 3304 iterate unsorted:
3305 3305 >>> rs = addset(xs, ys)
3306 3306 >>> [x for x in rs] # without _genlist
3307 3307 [0, 3, 2, 5, 4]
3308 3308 >>> assert not rs._genlist
3309 3309 >>> len(rs)
3310 3310 5
3311 3311 >>> [x for x in rs] # with _genlist
3312 3312 [0, 3, 2, 5, 4]
3313 3313 >>> assert rs._genlist
3314 3314
3315 3315 iterate ascending:
3316 3316 >>> rs = addset(xs, ys, ascending=True)
3317 3317 >>> [x for x in rs], [x for x in rs.fastasc()] # without _asclist
3318 3318 ([0, 2, 3, 4, 5], [0, 2, 3, 4, 5])
3319 3319 >>> assert not rs._asclist
3320 3320 >>> len(rs)
3321 3321 5
3322 3322 >>> [x for x in rs], [x for x in rs.fastasc()]
3323 3323 ([0, 2, 3, 4, 5], [0, 2, 3, 4, 5])
3324 3324 >>> assert rs._asclist
3325 3325
3326 3326 iterate descending:
3327 3327 >>> rs = addset(xs, ys, ascending=False)
3328 3328 >>> [x for x in rs], [x for x in rs.fastdesc()] # without _asclist
3329 3329 ([5, 4, 3, 2, 0], [5, 4, 3, 2, 0])
3330 3330 >>> assert not rs._asclist
3331 3331 >>> len(rs)
3332 3332 5
3333 3333 >>> [x for x in rs], [x for x in rs.fastdesc()]
3334 3334 ([5, 4, 3, 2, 0], [5, 4, 3, 2, 0])
3335 3335 >>> assert rs._asclist
3336 3336
3337 3337 iterate ascending without fastasc:
3338 3338 >>> rs = addset(xs, generatorset(ys), ascending=True)
3339 3339 >>> assert rs.fastasc is None
3340 3340 >>> [x for x in rs]
3341 3341 [0, 2, 3, 4, 5]
3342 3342
3343 3343 iterate descending without fastdesc:
3344 3344 >>> rs = addset(generatorset(xs), ys, ascending=False)
3345 3345 >>> assert rs.fastdesc is None
3346 3346 >>> [x for x in rs]
3347 3347 [5, 4, 3, 2, 0]
3348 3348 """
3349 3349 def __init__(self, revs1, revs2, ascending=None):
3350 3350 self._r1 = revs1
3351 3351 self._r2 = revs2
3352 3352 self._iter = None
3353 3353 self._ascending = ascending
3354 3354 self._genlist = None
3355 3355 self._asclist = None
3356 3356
3357 3357 def __len__(self):
3358 3358 return len(self._list)
3359 3359
3360 3360 def __nonzero__(self):
3361 3361 return bool(self._r1) or bool(self._r2)
3362 3362
3363 3363 @util.propertycache
3364 3364 def _list(self):
3365 3365 if not self._genlist:
3366 3366 self._genlist = baseset(iter(self))
3367 3367 return self._genlist
3368 3368
3369 3369 def __iter__(self):
3370 3370 """Iterate over both collections without repeating elements
3371 3371
3372 3372 If the ascending attribute is not set, iterate over the first one and
3373 3373 then over the second one checking for membership on the first one so we
3374 3374 dont yield any duplicates.
3375 3375
3376 3376 If the ascending attribute is set, iterate over both collections at the
3377 3377 same time, yielding only one value at a time in the given order.
3378 3378 """
3379 3379 if self._ascending is None:
3380 3380 if self._genlist:
3381 3381 return iter(self._genlist)
3382 3382 def arbitraryordergen():
3383 3383 for r in self._r1:
3384 3384 yield r
3385 3385 inr1 = self._r1.__contains__
3386 3386 for r in self._r2:
3387 3387 if not inr1(r):
3388 3388 yield r
3389 3389 return arbitraryordergen()
3390 3390 # try to use our own fast iterator if it exists
3391 3391 self._trysetasclist()
3392 3392 if self._ascending:
3393 3393 attr = 'fastasc'
3394 3394 else:
3395 3395 attr = 'fastdesc'
3396 3396 it = getattr(self, attr)
3397 3397 if it is not None:
3398 3398 return it()
3399 3399 # maybe half of the component supports fast
3400 3400 # get iterator for _r1
3401 3401 iter1 = getattr(self._r1, attr)
3402 3402 if iter1 is None:
3403 3403 # let's avoid side effect (not sure it matters)
3404 3404 iter1 = iter(sorted(self._r1, reverse=not self._ascending))
3405 3405 else:
3406 3406 iter1 = iter1()
3407 3407 # get iterator for _r2
3408 3408 iter2 = getattr(self._r2, attr)
3409 3409 if iter2 is None:
3410 3410 # let's avoid side effect (not sure it matters)
3411 3411 iter2 = iter(sorted(self._r2, reverse=not self._ascending))
3412 3412 else:
3413 3413 iter2 = iter2()
3414 3414 return _iterordered(self._ascending, iter1, iter2)
3415 3415
3416 3416 def _trysetasclist(self):
3417 3417 """populate the _asclist attribute if possible and necessary"""
3418 3418 if self._genlist is not None and self._asclist is None:
3419 3419 self._asclist = sorted(self._genlist)
3420 3420
3421 3421 @property
3422 3422 def fastasc(self):
3423 3423 self._trysetasclist()
3424 3424 if self._asclist is not None:
3425 3425 return self._asclist.__iter__
3426 3426 iter1 = self._r1.fastasc
3427 3427 iter2 = self._r2.fastasc
3428 3428 if None in (iter1, iter2):
3429 3429 return None
3430 3430 return lambda: _iterordered(True, iter1(), iter2())
3431 3431
3432 3432 @property
3433 3433 def fastdesc(self):
3434 3434 self._trysetasclist()
3435 3435 if self._asclist is not None:
3436 3436 return self._asclist.__reversed__
3437 3437 iter1 = self._r1.fastdesc
3438 3438 iter2 = self._r2.fastdesc
3439 3439 if None in (iter1, iter2):
3440 3440 return None
3441 3441 return lambda: _iterordered(False, iter1(), iter2())
3442 3442
3443 3443 def __contains__(self, x):
3444 3444 return x in self._r1 or x in self._r2
3445 3445
3446 3446 def sort(self, reverse=False):
3447 3447 """Sort the added set
3448 3448
3449 3449 For this we use the cached list with all the generated values and if we
3450 3450 know they are ascending or descending we can sort them in a smart way.
3451 3451 """
3452 3452 self._ascending = not reverse
3453 3453
3454 3454 def isascending(self):
3455 3455 return self._ascending is not None and self._ascending
3456 3456
3457 3457 def isdescending(self):
3458 3458 return self._ascending is not None and not self._ascending
3459 3459
3460 3460 def reverse(self):
3461 3461 if self._ascending is None:
3462 3462 self._list.reverse()
3463 3463 else:
3464 3464 self._ascending = not self._ascending
3465 3465
3466 3466 def first(self):
3467 3467 for x in self:
3468 3468 return x
3469 3469 return None
3470 3470
3471 3471 def last(self):
3472 3472 self.reverse()
3473 3473 val = self.first()
3474 3474 self.reverse()
3475 3475 return val
3476 3476
3477 3477 def __repr__(self):
3478 3478 d = {None: '', False: '-', True: '+'}[self._ascending]
3479 3479 return '<%s%s %r, %r>' % (type(self).__name__, d, self._r1, self._r2)
3480 3480
3481 3481 class generatorset(abstractsmartset):
3482 3482 """Wrap a generator for lazy iteration
3483 3483
3484 3484 Wrapper structure for generators that provides lazy membership and can
3485 3485 be iterated more than once.
3486 3486 When asked for membership it generates values until either it finds the
3487 3487 requested one or has gone through all the elements in the generator
3488 3488 """
3489 3489 def __init__(self, gen, iterasc=None):
3490 3490 """
3491 3491 gen: a generator producing the values for the generatorset.
3492 3492 """
3493 3493 self._gen = gen
3494 3494 self._asclist = None
3495 3495 self._cache = {}
3496 3496 self._genlist = []
3497 3497 self._finished = False
3498 3498 self._ascending = True
3499 3499 if iterasc is not None:
3500 3500 if iterasc:
3501 3501 self.fastasc = self._iterator
3502 3502 self.__contains__ = self._asccontains
3503 3503 else:
3504 3504 self.fastdesc = self._iterator
3505 3505 self.__contains__ = self._desccontains
3506 3506
3507 3507 def __nonzero__(self):
3508 3508 # Do not use 'for r in self' because it will enforce the iteration
3509 3509 # order (default ascending), possibly unrolling a whole descending
3510 3510 # iterator.
3511 3511 if self._genlist:
3512 3512 return True
3513 3513 for r in self._consumegen():
3514 3514 return True
3515 3515 return False
3516 3516
3517 3517 def __contains__(self, x):
3518 3518 if x in self._cache:
3519 3519 return self._cache[x]
3520 3520
3521 3521 # Use new values only, as existing values would be cached.
3522 3522 for l in self._consumegen():
3523 3523 if l == x:
3524 3524 return True
3525 3525
3526 3526 self._cache[x] = False
3527 3527 return False
3528 3528
3529 3529 def _asccontains(self, x):
3530 3530 """version of contains optimised for ascending generator"""
3531 3531 if x in self._cache:
3532 3532 return self._cache[x]
3533 3533
3534 3534 # Use new values only, as existing values would be cached.
3535 3535 for l in self._consumegen():
3536 3536 if l == x:
3537 3537 return True
3538 3538 if l > x:
3539 3539 break
3540 3540
3541 3541 self._cache[x] = False
3542 3542 return False
3543 3543
3544 3544 def _desccontains(self, x):
3545 3545 """version of contains optimised for descending generator"""
3546 3546 if x in self._cache:
3547 3547 return self._cache[x]
3548 3548
3549 3549 # Use new values only, as existing values would be cached.
3550 3550 for l in self._consumegen():
3551 3551 if l == x:
3552 3552 return True
3553 3553 if l < x:
3554 3554 break
3555 3555
3556 3556 self._cache[x] = False
3557 3557 return False
3558 3558
3559 3559 def __iter__(self):
3560 3560 if self._ascending:
3561 3561 it = self.fastasc
3562 3562 else:
3563 3563 it = self.fastdesc
3564 3564 if it is not None:
3565 3565 return it()
3566 3566 # we need to consume the iterator
3567 3567 for x in self._consumegen():
3568 3568 pass
3569 3569 # recall the same code
3570 3570 return iter(self)
3571 3571
3572 3572 def _iterator(self):
3573 3573 if self._finished:
3574 3574 return iter(self._genlist)
3575 3575
3576 3576 # We have to use this complex iteration strategy to allow multiple
3577 3577 # iterations at the same time. We need to be able to catch revision
3578 3578 # removed from _consumegen and added to genlist in another instance.
3579 3579 #
3580 3580 # Getting rid of it would provide an about 15% speed up on this
3581 3581 # iteration.
3582 3582 genlist = self._genlist
3583 3583 nextrev = self._consumegen().next
3584 3584 _len = len # cache global lookup
3585 3585 def gen():
3586 3586 i = 0
3587 3587 while True:
3588 3588 if i < _len(genlist):
3589 3589 yield genlist[i]
3590 3590 else:
3591 3591 yield nextrev()
3592 3592 i += 1
3593 3593 return gen()
3594 3594
3595 3595 def _consumegen(self):
3596 3596 cache = self._cache
3597 3597 genlist = self._genlist.append
3598 3598 for item in self._gen:
3599 3599 cache[item] = True
3600 3600 genlist(item)
3601 3601 yield item
3602 3602 if not self._finished:
3603 3603 self._finished = True
3604 3604 asc = self._genlist[:]
3605 3605 asc.sort()
3606 3606 self._asclist = asc
3607 3607 self.fastasc = asc.__iter__
3608 3608 self.fastdesc = asc.__reversed__
3609 3609
3610 3610 def __len__(self):
3611 3611 for x in self._consumegen():
3612 3612 pass
3613 3613 return len(self._genlist)
3614 3614
3615 3615 def sort(self, reverse=False):
3616 3616 self._ascending = not reverse
3617 3617
3618 3618 def reverse(self):
3619 3619 self._ascending = not self._ascending
3620 3620
3621 3621 def isascending(self):
3622 3622 return self._ascending
3623 3623
3624 3624 def isdescending(self):
3625 3625 return not self._ascending
3626 3626
3627 3627 def first(self):
3628 3628 if self._ascending:
3629 3629 it = self.fastasc
3630 3630 else:
3631 3631 it = self.fastdesc
3632 3632 if it is None:
3633 3633 # we need to consume all and try again
3634 3634 for x in self._consumegen():
3635 3635 pass
3636 3636 return self.first()
3637 3637 return next(it(), None)
3638 3638
3639 3639 def last(self):
3640 3640 if self._ascending:
3641 3641 it = self.fastdesc
3642 3642 else:
3643 3643 it = self.fastasc
3644 3644 if it is None:
3645 3645 # we need to consume all and try again
3646 3646 for x in self._consumegen():
3647 3647 pass
3648 3648 return self.first()
3649 3649 return next(it(), None)
3650 3650
3651 3651 def __repr__(self):
3652 3652 d = {False: '-', True: '+'}[self._ascending]
3653 3653 return '<%s%s>' % (type(self).__name__, d)
3654 3654
3655 3655 class spanset(abstractsmartset):
3656 3656 """Duck type for baseset class which represents a range of revisions and
3657 3657 can work lazily and without having all the range in memory
3658 3658
3659 3659 Note that spanset(x, y) behave almost like xrange(x, y) except for two
3660 3660 notable points:
3661 3661 - when x < y it will be automatically descending,
3662 3662 - revision filtered with this repoview will be skipped.
3663 3663
3664 3664 """
3665 3665 def __init__(self, repo, start=0, end=None):
3666 3666 """
3667 3667 start: first revision included the set
3668 3668 (default to 0)
3669 3669 end: first revision excluded (last+1)
3670 3670 (default to len(repo)
3671 3671
3672 3672 Spanset will be descending if `end` < `start`.
3673 3673 """
3674 3674 if end is None:
3675 3675 end = len(repo)
3676 3676 self._ascending = start <= end
3677 3677 if not self._ascending:
3678 3678 start, end = end + 1, start +1
3679 3679 self._start = start
3680 3680 self._end = end
3681 3681 self._hiddenrevs = repo.changelog.filteredrevs
3682 3682
3683 3683 def sort(self, reverse=False):
3684 3684 self._ascending = not reverse
3685 3685
3686 3686 def reverse(self):
3687 3687 self._ascending = not self._ascending
3688 3688
3689 3689 def _iterfilter(self, iterrange):
3690 3690 s = self._hiddenrevs
3691 3691 for r in iterrange:
3692 3692 if r not in s:
3693 3693 yield r
3694 3694
3695 3695 def __iter__(self):
3696 3696 if self._ascending:
3697 3697 return self.fastasc()
3698 3698 else:
3699 3699 return self.fastdesc()
3700 3700
3701 3701 def fastasc(self):
3702 3702 iterrange = xrange(self._start, self._end)
3703 3703 if self._hiddenrevs:
3704 3704 return self._iterfilter(iterrange)
3705 3705 return iter(iterrange)
3706 3706
3707 3707 def fastdesc(self):
3708 3708 iterrange = xrange(self._end - 1, self._start - 1, -1)
3709 3709 if self._hiddenrevs:
3710 3710 return self._iterfilter(iterrange)
3711 3711 return iter(iterrange)
3712 3712
3713 3713 def __contains__(self, rev):
3714 3714 hidden = self._hiddenrevs
3715 3715 return ((self._start <= rev < self._end)
3716 3716 and not (hidden and rev in hidden))
3717 3717
3718 3718 def __nonzero__(self):
3719 3719 for r in self:
3720 3720 return True
3721 3721 return False
3722 3722
3723 3723 def __len__(self):
3724 3724 if not self._hiddenrevs:
3725 3725 return abs(self._end - self._start)
3726 3726 else:
3727 3727 count = 0
3728 3728 start = self._start
3729 3729 end = self._end
3730 3730 for rev in self._hiddenrevs:
3731 3731 if (end < rev <= start) or (start <= rev < end):
3732 3732 count += 1
3733 3733 return abs(self._end - self._start) - count
3734 3734
3735 3735 def isascending(self):
3736 3736 return self._ascending
3737 3737
3738 3738 def isdescending(self):
3739 3739 return not self._ascending
3740 3740
3741 3741 def first(self):
3742 3742 if self._ascending:
3743 3743 it = self.fastasc
3744 3744 else:
3745 3745 it = self.fastdesc
3746 3746 for x in it():
3747 3747 return x
3748 3748 return None
3749 3749
3750 3750 def last(self):
3751 3751 if self._ascending:
3752 3752 it = self.fastdesc
3753 3753 else:
3754 3754 it = self.fastasc
3755 3755 for x in it():
3756 3756 return x
3757 3757 return None
3758 3758
3759 3759 def __repr__(self):
3760 3760 d = {False: '-', True: '+'}[self._ascending]
3761 3761 return '<%s%s %d:%d>' % (type(self).__name__, d,
3762 3762 self._start, self._end - 1)
3763 3763
3764 3764 class fullreposet(spanset):
3765 3765 """a set containing all revisions in the repo
3766 3766
3767 3767 This class exists to host special optimization and magic to handle virtual
3768 3768 revisions such as "null".
3769 3769 """
3770 3770
3771 3771 def __init__(self, repo):
3772 3772 super(fullreposet, self).__init__(repo)
3773 3773
3774 3774 def __and__(self, other):
3775 3775 """As self contains the whole repo, all of the other set should also be
3776 3776 in self. Therefore `self & other = other`.
3777 3777
3778 3778 This boldly assumes the other contains valid revs only.
3779 3779 """
3780 3780 # other not a smartset, make is so
3781 3781 if not util.safehasattr(other, 'isascending'):
3782 3782 # filter out hidden revision
3783 3783 # (this boldly assumes all smartset are pure)
3784 3784 #
3785 3785 # `other` was used with "&", let's assume this is a set like
3786 3786 # object.
3787 3787 other = baseset(other - self._hiddenrevs)
3788 3788
3789 3789 # XXX As fullreposet is also used as bootstrap, this is wrong.
3790 3790 #
3791 3791 # With a giveme312() revset returning [3,1,2], this makes
3792 3792 # 'hg log -r "giveme312()"' -> 1, 2, 3 (wrong)
3793 3793 # We cannot just drop it because other usage still need to sort it:
3794 3794 # 'hg log -r "all() and giveme312()"' -> 1, 2, 3 (right)
3795 3795 #
3796 3796 # There is also some faulty revset implementations that rely on it
3797 3797 # (eg: children as of its state in e8075329c5fb)
3798 3798 #
3799 3799 # When we fix the two points above we can move this into the if clause
3800 3800 other.sort(reverse=self.isdescending())
3801 3801 return other
3802 3802
3803 3803 def prettyformatset(revs):
3804 3804 lines = []
3805 3805 rs = repr(revs)
3806 3806 p = 0
3807 3807 while p < len(rs):
3808 3808 q = rs.find('<', p + 1)
3809 3809 if q < 0:
3810 3810 q = len(rs)
3811 3811 l = rs.count('<', 0, p) - rs.count('>', 0, p)
3812 3812 assert l >= 0
3813 3813 lines.append((l, rs[p:q].rstrip()))
3814 3814 p = q
3815 3815 return '\n'.join(' ' * l + s for l, s in lines)
3816 3816
3817 3817 # tell hggettext to extract docstrings from these functions:
3818 3818 i18nfunctions = symbols.values()
@@ -1,767 +1,770
1 1 $ hg init
2 2
3 3 no bookmarks
4 4
5 5 $ hg bookmarks
6 6 no bookmarks set
7 7
8 8 $ hg bookmarks -Tjson
9 9 [
10 10 ]
11 11
12 12 bookmark rev -1
13 13
14 14 $ hg bookmark X
15 15
16 16 list bookmarks
17 17
18 18 $ hg bookmarks
19 19 * X -1:000000000000
20 20
21 21 list bookmarks with color
22 22
23 23 $ hg --config extensions.color= --config color.mode=ansi \
24 24 > bookmarks --color=always
25 25 \x1b[0;32m * \x1b[0m\x1b[0;32mX\x1b[0m\x1b[0;32m -1:000000000000\x1b[0m (esc)
26 26
27 27 $ echo a > a
28 28 $ hg add a
29 29 $ hg commit -m 0
30 30
31 31 bookmark X moved to rev 0
32 32
33 33 $ hg bookmarks
34 34 * X 0:f7b1eb17ad24
35 35
36 36 look up bookmark
37 37
38 38 $ hg log -r X
39 39 changeset: 0:f7b1eb17ad24
40 40 bookmark: X
41 41 tag: tip
42 42 user: test
43 43 date: Thu Jan 01 00:00:00 1970 +0000
44 44 summary: 0
45 45
46 46
47 47 second bookmark for rev 0, command should work even with ui.strict on
48 48
49 49 $ hg --config ui.strict=1 bookmark X2
50 50
51 51 bookmark rev -1 again
52 52
53 53 $ hg bookmark -r null Y
54 54
55 55 list bookmarks
56 56
57 57 $ hg bookmarks
58 58 X 0:f7b1eb17ad24
59 59 * X2 0:f7b1eb17ad24
60 60 Y -1:000000000000
61 61
62 62 $ echo b > b
63 63 $ hg add b
64 64 $ hg commit -m 1
65 65
66 66 $ hg bookmarks -Tjson
67 67 [
68 68 {
69 69 "active": false,
70 70 "bookmark": "X",
71 71 "node": "f7b1eb17ad24730a1651fccd46c43826d1bbc2ac",
72 72 "rev": 0
73 73 },
74 74 {
75 75 "active": true,
76 76 "bookmark": "X2",
77 77 "node": "925d80f479bb026b0fb3deb27503780b13f74123",
78 78 "rev": 1
79 79 },
80 80 {
81 81 "active": false,
82 82 "bookmark": "Y",
83 83 "node": "0000000000000000000000000000000000000000",
84 84 "rev": -1
85 85 }
86 86 ]
87 87
88 88 bookmarks revset
89 89
90 90 $ hg log -r 'bookmark()'
91 91 changeset: 0:f7b1eb17ad24
92 92 bookmark: X
93 93 user: test
94 94 date: Thu Jan 01 00:00:00 1970 +0000
95 95 summary: 0
96 96
97 97 changeset: 1:925d80f479bb
98 98 bookmark: X2
99 99 tag: tip
100 100 user: test
101 101 date: Thu Jan 01 00:00:00 1970 +0000
102 102 summary: 1
103 103
104 104 $ hg log -r 'bookmark(Y)'
105 105 $ hg log -r 'bookmark(X2)'
106 106 changeset: 1:925d80f479bb
107 107 bookmark: X2
108 108 tag: tip
109 109 user: test
110 110 date: Thu Jan 01 00:00:00 1970 +0000
111 111 summary: 1
112 112
113 113 $ hg log -r 'bookmark("re:X")'
114 114 changeset: 0:f7b1eb17ad24
115 115 bookmark: X
116 116 user: test
117 117 date: Thu Jan 01 00:00:00 1970 +0000
118 118 summary: 0
119 119
120 120 changeset: 1:925d80f479bb
121 121 bookmark: X2
122 122 tag: tip
123 123 user: test
124 124 date: Thu Jan 01 00:00:00 1970 +0000
125 125 summary: 1
126 126
127 127 $ hg log -r 'bookmark("literal:X")'
128 128 changeset: 0:f7b1eb17ad24
129 129 bookmark: X
130 130 user: test
131 131 date: Thu Jan 01 00:00:00 1970 +0000
132 132 summary: 0
133 133
134 134
135 135 $ hg log -r 'bookmark(unknown)'
136 136 abort: bookmark 'unknown' does not exist!
137 137 [255]
138 $ hg log -r 'bookmark("literal:unknown")'
139 abort: bookmark 'unknown' does not exist!
140 [255]
138 141 $ hg log -r 'bookmark("re:unknown")'
139 142 abort: no bookmarks exist that match 'unknown'!
140 143 [255]
141 144 $ hg log -r 'present(bookmark("literal:unknown"))'
142 145 $ hg log -r 'present(bookmark("re:unknown"))'
143 146
144 147 $ hg help revsets | grep 'bookmark('
145 148 "bookmark([name])"
146 149
147 150 bookmarks X and X2 moved to rev 1, Y at rev -1
148 151
149 152 $ hg bookmarks
150 153 X 0:f7b1eb17ad24
151 154 * X2 1:925d80f479bb
152 155 Y -1:000000000000
153 156
154 157 bookmark rev 0 again
155 158
156 159 $ hg bookmark -r 0 Z
157 160
158 161 $ hg update X
159 162 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
160 163 (activating bookmark X)
161 164 $ echo c > c
162 165 $ hg add c
163 166 $ hg commit -m 2
164 167 created new head
165 168
166 169 bookmarks X moved to rev 2, Y at rev -1, Z at rev 0
167 170
168 171 $ hg bookmarks
169 172 * X 2:db815d6d32e6
170 173 X2 1:925d80f479bb
171 174 Y -1:000000000000
172 175 Z 0:f7b1eb17ad24
173 176
174 177 rename nonexistent bookmark
175 178
176 179 $ hg bookmark -m A B
177 180 abort: bookmark 'A' does not exist
178 181 [255]
179 182
180 183 rename to existent bookmark
181 184
182 185 $ hg bookmark -m X Y
183 186 abort: bookmark 'Y' already exists (use -f to force)
184 187 [255]
185 188
186 189 force rename to existent bookmark
187 190
188 191 $ hg bookmark -f -m X Y
189 192
190 193 list bookmarks
191 194
192 195 $ hg bookmark
193 196 X2 1:925d80f479bb
194 197 * Y 2:db815d6d32e6
195 198 Z 0:f7b1eb17ad24
196 199
197 200 bookmarks from a revset
198 201 $ hg bookmark -r '.^1' REVSET
199 202 $ hg bookmark -r ':tip' TIP
200 203 $ hg up -q TIP
201 204 $ hg bookmarks
202 205 REVSET 0:f7b1eb17ad24
203 206 * TIP 2:db815d6d32e6
204 207 X2 1:925d80f479bb
205 208 Y 2:db815d6d32e6
206 209 Z 0:f7b1eb17ad24
207 210
208 211 $ hg bookmark -d REVSET
209 212 $ hg bookmark -d TIP
210 213
211 214 rename without new name or multiple names
212 215
213 216 $ hg bookmark -m Y
214 217 abort: new bookmark name required
215 218 [255]
216 219 $ hg bookmark -m Y Y2 Y3
217 220 abort: only one new bookmark name allowed
218 221 [255]
219 222
220 223 delete without name
221 224
222 225 $ hg bookmark -d
223 226 abort: bookmark name required
224 227 [255]
225 228
226 229 delete nonexistent bookmark
227 230
228 231 $ hg bookmark -d A
229 232 abort: bookmark 'A' does not exist
230 233 [255]
231 234
232 235 bookmark name with spaces should be stripped
233 236
234 237 $ hg bookmark ' x y '
235 238
236 239 list bookmarks
237 240
238 241 $ hg bookmarks
239 242 X2 1:925d80f479bb
240 243 Y 2:db815d6d32e6
241 244 Z 0:f7b1eb17ad24
242 245 * x y 2:db815d6d32e6
243 246
244 247 look up stripped bookmark name
245 248
246 249 $ hg log -r '"x y"'
247 250 changeset: 2:db815d6d32e6
248 251 bookmark: Y
249 252 bookmark: x y
250 253 tag: tip
251 254 parent: 0:f7b1eb17ad24
252 255 user: test
253 256 date: Thu Jan 01 00:00:00 1970 +0000
254 257 summary: 2
255 258
256 259
257 260 reject bookmark name with newline
258 261
259 262 $ hg bookmark '
260 263 > '
261 264 abort: bookmark names cannot consist entirely of whitespace
262 265 [255]
263 266
264 267 $ hg bookmark -m Z '
265 268 > '
266 269 abort: bookmark names cannot consist entirely of whitespace
267 270 [255]
268 271
269 272 bookmark with reserved name
270 273
271 274 $ hg bookmark tip
272 275 abort: the name 'tip' is reserved
273 276 [255]
274 277
275 278 $ hg bookmark .
276 279 abort: the name '.' is reserved
277 280 [255]
278 281
279 282 $ hg bookmark null
280 283 abort: the name 'null' is reserved
281 284 [255]
282 285
283 286
284 287 bookmark with existing name
285 288
286 289 $ hg bookmark X2
287 290 abort: bookmark 'X2' already exists (use -f to force)
288 291 [255]
289 292
290 293 $ hg bookmark -m Y Z
291 294 abort: bookmark 'Z' already exists (use -f to force)
292 295 [255]
293 296
294 297 bookmark with name of branch
295 298
296 299 $ hg bookmark default
297 300 abort: a bookmark cannot have the name of an existing branch
298 301 [255]
299 302
300 303 $ hg bookmark -m Y default
301 304 abort: a bookmark cannot have the name of an existing branch
302 305 [255]
303 306
304 307 bookmark with integer name
305 308
306 309 $ hg bookmark 10
307 310 abort: cannot use an integer as a name
308 311 [255]
309 312
310 313 incompatible options
311 314
312 315 $ hg bookmark -m Y -d Z
313 316 abort: --delete and --rename are incompatible
314 317 [255]
315 318
316 319 $ hg bookmark -r 1 -d Z
317 320 abort: --rev is incompatible with --delete
318 321 [255]
319 322
320 323 $ hg bookmark -r 1 -m Z Y
321 324 abort: --rev is incompatible with --rename
322 325 [255]
323 326
324 327 force bookmark with existing name
325 328
326 329 $ hg bookmark -f X2
327 330
328 331 force bookmark back to where it was, should deactivate it
329 332
330 333 $ hg bookmark -fr1 X2
331 334 $ hg bookmarks
332 335 X2 1:925d80f479bb
333 336 Y 2:db815d6d32e6
334 337 Z 0:f7b1eb17ad24
335 338 x y 2:db815d6d32e6
336 339
337 340 forward bookmark to descendant without --force
338 341
339 342 $ hg bookmark Z
340 343 moving bookmark 'Z' forward from f7b1eb17ad24
341 344
342 345 list bookmarks
343 346
344 347 $ hg bookmark
345 348 X2 1:925d80f479bb
346 349 Y 2:db815d6d32e6
347 350 * Z 2:db815d6d32e6
348 351 x y 2:db815d6d32e6
349 352
350 353 revision but no bookmark name
351 354
352 355 $ hg bookmark -r .
353 356 abort: bookmark name required
354 357 [255]
355 358
356 359 bookmark name with whitespace only
357 360
358 361 $ hg bookmark ' '
359 362 abort: bookmark names cannot consist entirely of whitespace
360 363 [255]
361 364
362 365 $ hg bookmark -m Y ' '
363 366 abort: bookmark names cannot consist entirely of whitespace
364 367 [255]
365 368
366 369 invalid bookmark
367 370
368 371 $ hg bookmark 'foo:bar'
369 372 abort: ':' cannot be used in a name
370 373 [255]
371 374
372 375 $ hg bookmark 'foo
373 376 > bar'
374 377 abort: '\n' cannot be used in a name
375 378 [255]
376 379
377 380 the bookmark extension should be ignored now that it is part of core
378 381
379 382 $ echo "[extensions]" >> $HGRCPATH
380 383 $ echo "bookmarks=" >> $HGRCPATH
381 384 $ hg bookmarks
382 385 X2 1:925d80f479bb
383 386 Y 2:db815d6d32e6
384 387 * Z 2:db815d6d32e6
385 388 x y 2:db815d6d32e6
386 389
387 390 test summary
388 391
389 392 $ hg summary
390 393 parent: 2:db815d6d32e6 tip
391 394 2
392 395 branch: default
393 396 bookmarks: *Z Y x y
394 397 commit: (clean)
395 398 update: 1 new changesets, 2 branch heads (merge)
396 399 phases: 3 draft
397 400
398 401 test id
399 402
400 403 $ hg id
401 404 db815d6d32e6 tip Y/Z/x y
402 405
403 406 test rollback
404 407
405 408 $ echo foo > f1
406 409 $ hg bookmark tmp-rollback
407 410 $ hg ci -Amr
408 411 adding f1
409 412 $ hg bookmarks
410 413 X2 1:925d80f479bb
411 414 Y 2:db815d6d32e6
412 415 Z 2:db815d6d32e6
413 416 * tmp-rollback 3:2bf5cfec5864
414 417 x y 2:db815d6d32e6
415 418 $ hg rollback
416 419 repository tip rolled back to revision 2 (undo commit)
417 420 working directory now based on revision 2
418 421 $ hg bookmarks
419 422 X2 1:925d80f479bb
420 423 Y 2:db815d6d32e6
421 424 Z 2:db815d6d32e6
422 425 * tmp-rollback 2:db815d6d32e6
423 426 x y 2:db815d6d32e6
424 427 $ hg bookmark -f Z -r 1
425 428 $ hg rollback
426 429 repository tip rolled back to revision 2 (undo bookmark)
427 430 $ hg bookmarks
428 431 X2 1:925d80f479bb
429 432 Y 2:db815d6d32e6
430 433 Z 2:db815d6d32e6
431 434 * tmp-rollback 2:db815d6d32e6
432 435 x y 2:db815d6d32e6
433 436 $ hg bookmark -d tmp-rollback
434 437
435 438 activate bookmark on working dir parent without --force
436 439
437 440 $ hg bookmark --inactive Z
438 441 $ hg bookmark Z
439 442
440 443 test clone
441 444
442 445 $ hg bookmark -r 2 -i @
443 446 $ hg bookmark -r 2 -i a@
444 447 $ hg bookmarks
445 448 @ 2:db815d6d32e6
446 449 X2 1:925d80f479bb
447 450 Y 2:db815d6d32e6
448 451 * Z 2:db815d6d32e6
449 452 a@ 2:db815d6d32e6
450 453 x y 2:db815d6d32e6
451 454 $ hg clone . cloned-bookmarks
452 455 updating to bookmark @
453 456 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
454 457 $ hg -R cloned-bookmarks bookmarks
455 458 * @ 2:db815d6d32e6
456 459 X2 1:925d80f479bb
457 460 Y 2:db815d6d32e6
458 461 Z 2:db815d6d32e6
459 462 a@ 2:db815d6d32e6
460 463 x y 2:db815d6d32e6
461 464
462 465 test clone with pull protocol
463 466
464 467 $ hg clone --pull . cloned-bookmarks-pull
465 468 requesting all changes
466 469 adding changesets
467 470 adding manifests
468 471 adding file changes
469 472 added 3 changesets with 3 changes to 3 files (+1 heads)
470 473 updating to bookmark @
471 474 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
472 475 $ hg -R cloned-bookmarks-pull bookmarks
473 476 * @ 2:db815d6d32e6
474 477 X2 1:925d80f479bb
475 478 Y 2:db815d6d32e6
476 479 Z 2:db815d6d32e6
477 480 a@ 2:db815d6d32e6
478 481 x y 2:db815d6d32e6
479 482
480 483 delete multiple bookmarks at once
481 484
482 485 $ hg bookmark -d @ a@
483 486
484 487 test clone with a bookmark named "default" (issue3677)
485 488
486 489 $ hg bookmark -r 1 -f -i default
487 490 $ hg clone . cloned-bookmark-default
488 491 updating to branch default
489 492 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
490 493 $ hg -R cloned-bookmark-default bookmarks
491 494 X2 1:925d80f479bb
492 495 Y 2:db815d6d32e6
493 496 Z 2:db815d6d32e6
494 497 default 1:925d80f479bb
495 498 x y 2:db815d6d32e6
496 499 $ hg -R cloned-bookmark-default parents -q
497 500 2:db815d6d32e6
498 501 $ hg bookmark -d default
499 502
500 503 test clone with a specific revision
501 504
502 505 $ hg clone -r 925d80 . cloned-bookmarks-rev
503 506 adding changesets
504 507 adding manifests
505 508 adding file changes
506 509 added 2 changesets with 2 changes to 2 files
507 510 updating to branch default
508 511 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
509 512 $ hg -R cloned-bookmarks-rev bookmarks
510 513 X2 1:925d80f479bb
511 514
512 515 test clone with update to a bookmark
513 516
514 517 $ hg clone -u Z . ../cloned-bookmarks-update
515 518 updating to branch default
516 519 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
517 520 $ hg -R ../cloned-bookmarks-update bookmarks
518 521 X2 1:925d80f479bb
519 522 Y 2:db815d6d32e6
520 523 * Z 2:db815d6d32e6
521 524 x y 2:db815d6d32e6
522 525
523 526 create bundle with two heads
524 527
525 528 $ hg clone . tobundle
526 529 updating to branch default
527 530 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
528 531 $ echo x > tobundle/x
529 532 $ hg -R tobundle add tobundle/x
530 533 $ hg -R tobundle commit -m'x'
531 534 $ hg -R tobundle update -r -2
532 535 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
533 536 $ echo y > tobundle/y
534 537 $ hg -R tobundle branch test
535 538 marked working directory as branch test
536 539 (branches are permanent and global, did you want a bookmark?)
537 540 $ hg -R tobundle add tobundle/y
538 541 $ hg -R tobundle commit -m'y'
539 542 $ hg -R tobundle bundle tobundle.hg
540 543 searching for changes
541 544 2 changesets found
542 545 $ hg unbundle tobundle.hg
543 546 adding changesets
544 547 adding manifests
545 548 adding file changes
546 549 added 2 changesets with 2 changes to 2 files (+1 heads)
547 550 (run 'hg heads' to see heads, 'hg merge' to merge)
548 551
549 552 update to active bookmark if it's not the parent
550 553
551 554 $ hg summary
552 555 parent: 2:db815d6d32e6
553 556 2
554 557 branch: default
555 558 bookmarks: *Z Y x y
556 559 commit: 1 added, 1 unknown (new branch head)
557 560 update: 2 new changesets (update)
558 561 phases: 5 draft
559 562 $ hg update
560 563 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
561 564 updating bookmark Z
562 565 $ hg bookmarks
563 566 X2 1:925d80f479bb
564 567 Y 2:db815d6d32e6
565 568 * Z 3:125c9a1d6df6
566 569 x y 2:db815d6d32e6
567 570
568 571 pull --update works the same as pull && update
569 572
570 573 $ hg bookmark -r3 Y
571 574 moving bookmark 'Y' forward from db815d6d32e6
572 575 $ cp -r ../cloned-bookmarks-update ../cloned-bookmarks-manual-update
573 576
574 577 (manual version)
575 578
576 579 $ hg -R ../cloned-bookmarks-manual-update update Y
577 580 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
578 581 (activating bookmark Y)
579 582 $ hg -R ../cloned-bookmarks-manual-update pull .
580 583 pulling from .
581 584 searching for changes
582 585 adding changesets
583 586 adding manifests
584 587 adding file changes
585 588 added 2 changesets with 2 changes to 2 files (+1 heads)
586 589 updating bookmark Y
587 590 updating bookmark Z
588 591 (run 'hg heads' to see heads, 'hg merge' to merge)
589 592
590 593 (# tests strange but with --date crashing when bookmark have to move)
591 594
592 595 $ hg -R ../cloned-bookmarks-manual-update update -d 1986
593 596 abort: revision matching date not found
594 597 [255]
595 598 $ hg -R ../cloned-bookmarks-manual-update update
596 599 updating to active bookmark Y
597 600 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
598 601 (activating bookmark Y)
599 602
600 603 (all in one version)
601 604
602 605 $ hg -R ../cloned-bookmarks-update update Y
603 606 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
604 607 (activating bookmark Y)
605 608 $ hg -R ../cloned-bookmarks-update pull --update .
606 609 pulling from .
607 610 searching for changes
608 611 adding changesets
609 612 adding manifests
610 613 adding file changes
611 614 added 2 changesets with 2 changes to 2 files (+1 heads)
612 615 updating bookmark Y
613 616 updating bookmark Z
614 617 updating to active bookmark Y
615 618 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
616 619
617 620 test wrongly formated bookmark
618 621
619 622 $ echo '' >> .hg/bookmarks
620 623 $ hg bookmarks
621 624 X2 1:925d80f479bb
622 625 Y 3:125c9a1d6df6
623 626 * Z 3:125c9a1d6df6
624 627 x y 2:db815d6d32e6
625 628 $ echo "Ican'thasformatedlines" >> .hg/bookmarks
626 629 $ hg bookmarks
627 630 malformed line in .hg/bookmarks: "Ican'thasformatedlines"
628 631 X2 1:925d80f479bb
629 632 Y 3:125c9a1d6df6
630 633 * Z 3:125c9a1d6df6
631 634 x y 2:db815d6d32e6
632 635
633 636 test missing revisions
634 637
635 638 $ echo "925d80f479bc z" > .hg/bookmarks
636 639 $ hg book
637 640 no bookmarks set
638 641
639 642 test stripping a non-checked-out but bookmarked revision
640 643
641 644 $ hg log --graph
642 645 o changeset: 4:9ba5f110a0b3
643 646 | branch: test
644 647 | tag: tip
645 648 | parent: 2:db815d6d32e6
646 649 | user: test
647 650 | date: Thu Jan 01 00:00:00 1970 +0000
648 651 | summary: y
649 652 |
650 653 | @ changeset: 3:125c9a1d6df6
651 654 |/ user: test
652 655 | date: Thu Jan 01 00:00:00 1970 +0000
653 656 | summary: x
654 657 |
655 658 o changeset: 2:db815d6d32e6
656 659 | parent: 0:f7b1eb17ad24
657 660 | user: test
658 661 | date: Thu Jan 01 00:00:00 1970 +0000
659 662 | summary: 2
660 663 |
661 664 | o changeset: 1:925d80f479bb
662 665 |/ user: test
663 666 | date: Thu Jan 01 00:00:00 1970 +0000
664 667 | summary: 1
665 668 |
666 669 o changeset: 0:f7b1eb17ad24
667 670 user: test
668 671 date: Thu Jan 01 00:00:00 1970 +0000
669 672 summary: 0
670 673
671 674 $ hg book should-end-on-two
672 675 $ hg co --clean 4
673 676 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
674 677 (leaving bookmark should-end-on-two)
675 678 $ hg book four
676 679 $ hg --config extensions.mq= strip 3
677 680 saved backup bundle to * (glob)
678 681 should-end-on-two should end up pointing to revision 2, as that's the
679 682 tipmost surviving ancestor of the stripped revision.
680 683 $ hg log --graph
681 684 @ changeset: 3:9ba5f110a0b3
682 685 | branch: test
683 686 | bookmark: four
684 687 | tag: tip
685 688 | user: test
686 689 | date: Thu Jan 01 00:00:00 1970 +0000
687 690 | summary: y
688 691 |
689 692 o changeset: 2:db815d6d32e6
690 693 | bookmark: should-end-on-two
691 694 | parent: 0:f7b1eb17ad24
692 695 | user: test
693 696 | date: Thu Jan 01 00:00:00 1970 +0000
694 697 | summary: 2
695 698 |
696 699 | o changeset: 1:925d80f479bb
697 700 |/ user: test
698 701 | date: Thu Jan 01 00:00:00 1970 +0000
699 702 | summary: 1
700 703 |
701 704 o changeset: 0:f7b1eb17ad24
702 705 user: test
703 706 date: Thu Jan 01 00:00:00 1970 +0000
704 707 summary: 0
705 708
706 709 test non-linear update not clearing active bookmark
707 710
708 711 $ hg up 1
709 712 1 files updated, 0 files merged, 2 files removed, 0 files unresolved
710 713 (leaving bookmark four)
711 714 $ hg book drop
712 715 $ hg up -C
713 716 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
714 717 (leaving bookmark drop)
715 718 $ hg sum
716 719 parent: 2:db815d6d32e6
717 720 2
718 721 branch: default
719 722 bookmarks: should-end-on-two
720 723 commit: 2 unknown (clean)
721 724 update: 1 new changesets, 2 branch heads (merge)
722 725 phases: 4 draft
723 726 $ hg book
724 727 drop 1:925d80f479bb
725 728 four 3:9ba5f110a0b3
726 729 should-end-on-two 2:db815d6d32e6
727 730 $ hg book -d drop
728 731 $ hg up four
729 732 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
730 733 (activating bookmark four)
731 734
732 735 test clearing divergent bookmarks of linear ancestors
733 736
734 737 $ hg bookmark Z -r 0
735 738 $ hg bookmark Z@1 -r 1
736 739 $ hg bookmark Z@2 -r 2
737 740 $ hg bookmark Z@3 -r 3
738 741 $ hg book
739 742 Z 0:f7b1eb17ad24
740 743 Z@1 1:925d80f479bb
741 744 Z@2 2:db815d6d32e6
742 745 Z@3 3:9ba5f110a0b3
743 746 * four 3:9ba5f110a0b3
744 747 should-end-on-two 2:db815d6d32e6
745 748 $ hg bookmark Z
746 749 moving bookmark 'Z' forward from f7b1eb17ad24
747 750 $ hg book
748 751 * Z 3:9ba5f110a0b3
749 752 Z@1 1:925d80f479bb
750 753 four 3:9ba5f110a0b3
751 754 should-end-on-two 2:db815d6d32e6
752 755
753 756 test clearing only a single divergent bookmark across branches
754 757
755 758 $ hg book foo -r 1
756 759 $ hg book foo@1 -r 0
757 760 $ hg book foo@2 -r 2
758 761 $ hg book foo@3 -r 3
759 762 $ hg book foo -r foo@3
760 763 $ hg book
761 764 * Z 3:9ba5f110a0b3
762 765 Z@1 1:925d80f479bb
763 766 foo 3:9ba5f110a0b3
764 767 foo@1 0:f7b1eb17ad24
765 768 foo@2 2:db815d6d32e6
766 769 four 3:9ba5f110a0b3
767 770 should-end-on-two 2:db815d6d32e6
General Comments 0
You need to be logged in to leave comments. Login now