##// END OF EJS Templates
revset: use localrepo revbranchcache for branch name filtering...
Mads Kiilerich -
r23787:678f5386 default
parent child Browse files
Show More
@@ -1,3107 +1,3112 b''
1 1 # revset.py - revision set queries for mercurial
2 2 #
3 3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 import re
9 9 import parser, util, error, discovery, hbisect, phases
10 10 import node
11 11 import heapq
12 12 import match as matchmod
13 13 from i18n import _
14 14 import encoding
15 15 import obsolete as obsmod
16 16 import pathutil
17 17 import repoview
18 18
19 19 def _revancestors(repo, revs, followfirst):
20 20 """Like revlog.ancestors(), but supports followfirst."""
21 21 cut = followfirst and 1 or None
22 22 cl = repo.changelog
23 23
24 24 def iterate():
25 25 revqueue, revsnode = None, None
26 26 h = []
27 27
28 28 revs.sort(reverse=True)
29 29 revqueue = util.deque(revs)
30 30 if revqueue:
31 31 revsnode = revqueue.popleft()
32 32 heapq.heappush(h, -revsnode)
33 33
34 34 seen = set([node.nullrev])
35 35 while h:
36 36 current = -heapq.heappop(h)
37 37 if current not in seen:
38 38 if revsnode and current == revsnode:
39 39 if revqueue:
40 40 revsnode = revqueue.popleft()
41 41 heapq.heappush(h, -revsnode)
42 42 seen.add(current)
43 43 yield current
44 44 for parent in cl.parentrevs(current)[:cut]:
45 45 if parent != node.nullrev:
46 46 heapq.heappush(h, -parent)
47 47
48 48 return generatorset(iterate(), iterasc=False)
49 49
50 50 def _revdescendants(repo, revs, followfirst):
51 51 """Like revlog.descendants() but supports followfirst."""
52 52 cut = followfirst and 1 or None
53 53
54 54 def iterate():
55 55 cl = repo.changelog
56 56 first = min(revs)
57 57 nullrev = node.nullrev
58 58 if first == nullrev:
59 59 # Are there nodes with a null first parent and a non-null
60 60 # second one? Maybe. Do we care? Probably not.
61 61 for i in cl:
62 62 yield i
63 63 else:
64 64 seen = set(revs)
65 65 for i in cl.revs(first + 1):
66 66 for x in cl.parentrevs(i)[:cut]:
67 67 if x != nullrev and x in seen:
68 68 seen.add(i)
69 69 yield i
70 70 break
71 71
72 72 return generatorset(iterate(), iterasc=True)
73 73
74 74 def _revsbetween(repo, roots, heads):
75 75 """Return all paths between roots and heads, inclusive of both endpoint
76 76 sets."""
77 77 if not roots:
78 78 return baseset()
79 79 parentrevs = repo.changelog.parentrevs
80 80 visit = list(heads)
81 81 reachable = set()
82 82 seen = {}
83 83 minroot = min(roots)
84 84 roots = set(roots)
85 85 # open-code the post-order traversal due to the tiny size of
86 86 # sys.getrecursionlimit()
87 87 while visit:
88 88 rev = visit.pop()
89 89 if rev in roots:
90 90 reachable.add(rev)
91 91 parents = parentrevs(rev)
92 92 seen[rev] = parents
93 93 for parent in parents:
94 94 if parent >= minroot and parent not in seen:
95 95 visit.append(parent)
96 96 if not reachable:
97 97 return baseset()
98 98 for rev in sorted(seen):
99 99 for parent in seen[rev]:
100 100 if parent in reachable:
101 101 reachable.add(rev)
102 102 return baseset(sorted(reachable))
103 103
104 104 elements = {
105 105 "(": (21, ("group", 1, ")"), ("func", 1, ")")),
106 106 "##": (20, None, ("_concat", 20)),
107 107 "~": (18, None, ("ancestor", 18)),
108 108 "^": (18, None, ("parent", 18), ("parentpost", 18)),
109 109 "-": (5, ("negate", 19), ("minus", 5)),
110 110 "::": (17, ("dagrangepre", 17), ("dagrange", 17),
111 111 ("dagrangepost", 17)),
112 112 "..": (17, ("dagrangepre", 17), ("dagrange", 17),
113 113 ("dagrangepost", 17)),
114 114 ":": (15, ("rangepre", 15), ("range", 15), ("rangepost", 15)),
115 115 "not": (10, ("not", 10)),
116 116 "!": (10, ("not", 10)),
117 117 "and": (5, None, ("and", 5)),
118 118 "&": (5, None, ("and", 5)),
119 119 "%": (5, None, ("only", 5), ("onlypost", 5)),
120 120 "or": (4, None, ("or", 4)),
121 121 "|": (4, None, ("or", 4)),
122 122 "+": (4, None, ("or", 4)),
123 123 ",": (2, None, ("list", 2)),
124 124 ")": (0, None, None),
125 125 "symbol": (0, ("symbol",), None),
126 126 "string": (0, ("string",), None),
127 127 "end": (0, None, None),
128 128 }
129 129
130 130 keywords = set(['and', 'or', 'not'])
131 131
132 132 def tokenize(program, lookup=None):
133 133 '''
134 134 Parse a revset statement into a stream of tokens
135 135
136 136 Check that @ is a valid unquoted token character (issue3686):
137 137 >>> list(tokenize("@::"))
138 138 [('symbol', '@', 0), ('::', None, 1), ('end', None, 3)]
139 139
140 140 '''
141 141
142 142 pos, l = 0, len(program)
143 143 while pos < l:
144 144 c = program[pos]
145 145 if c.isspace(): # skip inter-token whitespace
146 146 pass
147 147 elif c == ':' and program[pos:pos + 2] == '::': # look ahead carefully
148 148 yield ('::', None, pos)
149 149 pos += 1 # skip ahead
150 150 elif c == '.' and program[pos:pos + 2] == '..': # look ahead carefully
151 151 yield ('..', None, pos)
152 152 pos += 1 # skip ahead
153 153 elif c == '#' and program[pos:pos + 2] == '##': # look ahead carefully
154 154 yield ('##', None, pos)
155 155 pos += 1 # skip ahead
156 156 elif c in "():,-|&+!~^%": # handle simple operators
157 157 yield (c, None, pos)
158 158 elif (c in '"\'' or c == 'r' and
159 159 program[pos:pos + 2] in ("r'", 'r"')): # handle quoted strings
160 160 if c == 'r':
161 161 pos += 1
162 162 c = program[pos]
163 163 decode = lambda x: x
164 164 else:
165 165 decode = lambda x: x.decode('string-escape')
166 166 pos += 1
167 167 s = pos
168 168 while pos < l: # find closing quote
169 169 d = program[pos]
170 170 if d == '\\': # skip over escaped characters
171 171 pos += 2
172 172 continue
173 173 if d == c:
174 174 yield ('string', decode(program[s:pos]), s)
175 175 break
176 176 pos += 1
177 177 else:
178 178 raise error.ParseError(_("unterminated string"), s)
179 179 # gather up a symbol/keyword
180 180 elif c.isalnum() or c in '._@' or ord(c) > 127:
181 181 s = pos
182 182 pos += 1
183 183 while pos < l: # find end of symbol
184 184 d = program[pos]
185 185 if not (d.isalnum() or d in "-._/@" or ord(d) > 127):
186 186 break
187 187 if d == '.' and program[pos - 1] == '.': # special case for ..
188 188 pos -= 1
189 189 break
190 190 pos += 1
191 191 sym = program[s:pos]
192 192 if sym in keywords: # operator keywords
193 193 yield (sym, None, s)
194 194 elif '-' in sym:
195 195 # some jerk gave us foo-bar-baz, try to check if it's a symbol
196 196 if lookup and lookup(sym):
197 197 # looks like a real symbol
198 198 yield ('symbol', sym, s)
199 199 else:
200 200 # looks like an expression
201 201 parts = sym.split('-')
202 202 for p in parts[:-1]:
203 203 if p: # possible consecutive -
204 204 yield ('symbol', p, s)
205 205 s += len(p)
206 206 yield ('-', None, pos)
207 207 s += 1
208 208 if parts[-1]: # possible trailing -
209 209 yield ('symbol', parts[-1], s)
210 210 else:
211 211 yield ('symbol', sym, s)
212 212 pos -= 1
213 213 else:
214 214 raise error.ParseError(_("syntax error"), pos)
215 215 pos += 1
216 216 yield ('end', None, pos)
217 217
218 218 # helpers
219 219
220 220 def getstring(x, err):
221 221 if x and (x[0] == 'string' or x[0] == 'symbol'):
222 222 return x[1]
223 223 raise error.ParseError(err)
224 224
225 225 def getlist(x):
226 226 if not x:
227 227 return []
228 228 if x[0] == 'list':
229 229 return getlist(x[1]) + [x[2]]
230 230 return [x]
231 231
232 232 def getargs(x, min, max, err):
233 233 l = getlist(x)
234 234 if len(l) < min or (max >= 0 and len(l) > max):
235 235 raise error.ParseError(err)
236 236 return l
237 237
238 238 def getset(repo, subset, x):
239 239 if not x:
240 240 raise error.ParseError(_("missing argument"))
241 241 s = methods[x[0]](repo, subset, *x[1:])
242 242 if util.safehasattr(s, 'isascending'):
243 243 return s
244 244 return baseset(s)
245 245
246 246 def _getrevsource(repo, r):
247 247 extra = repo[r].extra()
248 248 for label in ('source', 'transplant_source', 'rebase_source'):
249 249 if label in extra:
250 250 try:
251 251 return repo[extra[label]].rev()
252 252 except error.RepoLookupError:
253 253 pass
254 254 return None
255 255
256 256 # operator methods
257 257
258 258 def stringset(repo, subset, x):
259 259 x = repo[x].rev()
260 260 if x == -1 and len(subset) == len(repo):
261 261 return baseset([-1])
262 262 if x in subset:
263 263 return baseset([x])
264 264 return baseset()
265 265
266 266 def symbolset(repo, subset, x):
267 267 if x in symbols:
268 268 raise error.ParseError(_("can't use %s here") % x)
269 269 return stringset(repo, subset, x)
270 270
271 271 def rangeset(repo, subset, x, y):
272 272 m = getset(repo, fullreposet(repo), x)
273 273 n = getset(repo, fullreposet(repo), y)
274 274
275 275 if not m or not n:
276 276 return baseset()
277 277 m, n = m.first(), n.last()
278 278
279 279 if m < n:
280 280 r = spanset(repo, m, n + 1)
281 281 else:
282 282 r = spanset(repo, m, n - 1)
283 283 return r & subset
284 284
285 285 def dagrange(repo, subset, x, y):
286 286 r = spanset(repo)
287 287 xs = _revsbetween(repo, getset(repo, r, x), getset(repo, r, y))
288 288 return xs & subset
289 289
290 290 def andset(repo, subset, x, y):
291 291 return getset(repo, getset(repo, subset, x), y)
292 292
293 293 def orset(repo, subset, x, y):
294 294 xl = getset(repo, subset, x)
295 295 yl = getset(repo, subset - xl, y)
296 296 return xl + yl
297 297
298 298 def notset(repo, subset, x):
299 299 return subset - getset(repo, subset, x)
300 300
301 301 def listset(repo, subset, a, b):
302 302 raise error.ParseError(_("can't use a list in this context"))
303 303
304 304 def func(repo, subset, a, b):
305 305 if a[0] == 'symbol' and a[1] in symbols:
306 306 return symbols[a[1]](repo, subset, b)
307 307 raise error.ParseError(_("not a function: %s") % a[1])
308 308
309 309 # functions
310 310
311 311 def adds(repo, subset, x):
312 312 """``adds(pattern)``
313 313 Changesets that add a file matching pattern.
314 314
315 315 The pattern without explicit kind like ``glob:`` is expected to be
316 316 relative to the current directory and match against a file or a
317 317 directory.
318 318 """
319 319 # i18n: "adds" is a keyword
320 320 pat = getstring(x, _("adds requires a pattern"))
321 321 return checkstatus(repo, subset, pat, 1)
322 322
323 323 def ancestor(repo, subset, x):
324 324 """``ancestor(*changeset)``
325 325 A greatest common ancestor of the changesets.
326 326
327 327 Accepts 0 or more changesets.
328 328 Will return empty list when passed no args.
329 329 Greatest common ancestor of a single changeset is that changeset.
330 330 """
331 331 # i18n: "ancestor" is a keyword
332 332 l = getlist(x)
333 333 rl = spanset(repo)
334 334 anc = None
335 335
336 336 # (getset(repo, rl, i) for i in l) generates a list of lists
337 337 for revs in (getset(repo, rl, i) for i in l):
338 338 for r in revs:
339 339 if anc is None:
340 340 anc = repo[r]
341 341 else:
342 342 anc = anc.ancestor(repo[r])
343 343
344 344 if anc is not None and anc.rev() in subset:
345 345 return baseset([anc.rev()])
346 346 return baseset()
347 347
348 348 def _ancestors(repo, subset, x, followfirst=False):
349 349 heads = getset(repo, spanset(repo), x)
350 350 if not heads:
351 351 return baseset()
352 352 s = _revancestors(repo, heads, followfirst)
353 353 return subset & s
354 354
355 355 def ancestors(repo, subset, x):
356 356 """``ancestors(set)``
357 357 Changesets that are ancestors of a changeset in set.
358 358 """
359 359 return _ancestors(repo, subset, x)
360 360
361 361 def _firstancestors(repo, subset, x):
362 362 # ``_firstancestors(set)``
363 363 # Like ``ancestors(set)`` but follows only the first parents.
364 364 return _ancestors(repo, subset, x, followfirst=True)
365 365
366 366 def ancestorspec(repo, subset, x, n):
367 367 """``set~n``
368 368 Changesets that are the Nth ancestor (first parents only) of a changeset
369 369 in set.
370 370 """
371 371 try:
372 372 n = int(n[1])
373 373 except (TypeError, ValueError):
374 374 raise error.ParseError(_("~ expects a number"))
375 375 ps = set()
376 376 cl = repo.changelog
377 377 for r in getset(repo, fullreposet(repo), x):
378 378 for i in range(n):
379 379 r = cl.parentrevs(r)[0]
380 380 ps.add(r)
381 381 return subset & ps
382 382
383 383 def author(repo, subset, x):
384 384 """``author(string)``
385 385 Alias for ``user(string)``.
386 386 """
387 387 # i18n: "author" is a keyword
388 388 n = encoding.lower(getstring(x, _("author requires a string")))
389 389 kind, pattern, matcher = _substringmatcher(n)
390 390 return subset.filter(lambda x: matcher(encoding.lower(repo[x].user())))
391 391
392 392 def bisect(repo, subset, x):
393 393 """``bisect(string)``
394 394 Changesets marked in the specified bisect status:
395 395
396 396 - ``good``, ``bad``, ``skip``: csets explicitly marked as good/bad/skip
397 397 - ``goods``, ``bads`` : csets topologically good/bad
398 398 - ``range`` : csets taking part in the bisection
399 399 - ``pruned`` : csets that are goods, bads or skipped
400 400 - ``untested`` : csets whose fate is yet unknown
401 401 - ``ignored`` : csets ignored due to DAG topology
402 402 - ``current`` : the cset currently being bisected
403 403 """
404 404 # i18n: "bisect" is a keyword
405 405 status = getstring(x, _("bisect requires a string")).lower()
406 406 state = set(hbisect.get(repo, status))
407 407 return subset & state
408 408
409 409 # Backward-compatibility
410 410 # - no help entry so that we do not advertise it any more
411 411 def bisected(repo, subset, x):
412 412 return bisect(repo, subset, x)
413 413
414 414 def bookmark(repo, subset, x):
415 415 """``bookmark([name])``
416 416 The named bookmark or all bookmarks.
417 417
418 418 If `name` starts with `re:`, the remainder of the name is treated as
419 419 a regular expression. To match a bookmark that actually starts with `re:`,
420 420 use the prefix `literal:`.
421 421 """
422 422 # i18n: "bookmark" is a keyword
423 423 args = getargs(x, 0, 1, _('bookmark takes one or no arguments'))
424 424 if args:
425 425 bm = getstring(args[0],
426 426 # i18n: "bookmark" is a keyword
427 427 _('the argument to bookmark must be a string'))
428 428 kind, pattern, matcher = _stringmatcher(bm)
429 429 bms = set()
430 430 if kind == 'literal':
431 431 bmrev = repo._bookmarks.get(pattern, None)
432 432 if not bmrev:
433 433 raise util.Abort(_("bookmark '%s' does not exist") % bm)
434 434 bms.add(repo[bmrev].rev())
435 435 else:
436 436 matchrevs = set()
437 437 for name, bmrev in repo._bookmarks.iteritems():
438 438 if matcher(name):
439 439 matchrevs.add(bmrev)
440 440 if not matchrevs:
441 441 raise util.Abort(_("no bookmarks exist that match '%s'")
442 442 % pattern)
443 443 for bmrev in matchrevs:
444 444 bms.add(repo[bmrev].rev())
445 445 else:
446 446 bms = set([repo[r].rev()
447 447 for r in repo._bookmarks.values()])
448 448 bms -= set([node.nullrev])
449 449 return subset & bms
450 450
451 451 def branch(repo, subset, x):
452 452 """``branch(string or set)``
453 453 All changesets belonging to the given branch or the branches of the given
454 454 changesets.
455 455
456 456 If `string` starts with `re:`, the remainder of the name is treated as
457 457 a regular expression. To match a branch that actually starts with `re:`,
458 458 use the prefix `literal:`.
459 459 """
460 import branchmap
461 urepo = repo.unfiltered()
462 ucl = urepo.changelog
463 getbi = branchmap.revbranchcache(urepo).branchinfo
464
460 465 try:
461 466 b = getstring(x, '')
462 467 except error.ParseError:
463 468 # not a string, but another revspec, e.g. tip()
464 469 pass
465 470 else:
466 471 kind, pattern, matcher = _stringmatcher(b)
467 472 if kind == 'literal':
468 473 # note: falls through to the revspec case if no branch with
469 474 # this name exists
470 475 if pattern in repo.branchmap():
471 return subset.filter(lambda r: matcher(repo[r].branch()))
476 return subset.filter(lambda r: matcher(getbi(ucl, r)[0]))
472 477 else:
473 return subset.filter(lambda r: matcher(repo[r].branch()))
478 return subset.filter(lambda r: matcher(getbi(ucl, r)[0]))
474 479
475 480 s = getset(repo, spanset(repo), x)
476 481 b = set()
477 482 for r in s:
478 b.add(repo[r].branch())
483 b.add(getbi(ucl, r)[0])
479 484 c = s.__contains__
480 return subset.filter(lambda r: c(r) or repo[r].branch() in b)
485 return subset.filter(lambda r: c(r) or getbi(ucl, r)[0] in b)
481 486
482 487 def bumped(repo, subset, x):
483 488 """``bumped()``
484 489 Mutable changesets marked as successors of public changesets.
485 490
486 491 Only non-public and non-obsolete changesets can be `bumped`.
487 492 """
488 493 # i18n: "bumped" is a keyword
489 494 getargs(x, 0, 0, _("bumped takes no arguments"))
490 495 bumped = obsmod.getrevs(repo, 'bumped')
491 496 return subset & bumped
492 497
493 498 def bundle(repo, subset, x):
494 499 """``bundle()``
495 500 Changesets in the bundle.
496 501
497 502 Bundle must be specified by the -R option."""
498 503
499 504 try:
500 505 bundlerevs = repo.changelog.bundlerevs
501 506 except AttributeError:
502 507 raise util.Abort(_("no bundle provided - specify with -R"))
503 508 return subset & bundlerevs
504 509
505 510 def checkstatus(repo, subset, pat, field):
506 511 hasset = matchmod.patkind(pat) == 'set'
507 512
508 513 mcache = [None]
509 514 def matches(x):
510 515 c = repo[x]
511 516 if not mcache[0] or hasset:
512 517 mcache[0] = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
513 518 m = mcache[0]
514 519 fname = None
515 520 if not m.anypats() and len(m.files()) == 1:
516 521 fname = m.files()[0]
517 522 if fname is not None:
518 523 if fname not in c.files():
519 524 return False
520 525 else:
521 526 for f in c.files():
522 527 if m(f):
523 528 break
524 529 else:
525 530 return False
526 531 files = repo.status(c.p1().node(), c.node())[field]
527 532 if fname is not None:
528 533 if fname in files:
529 534 return True
530 535 else:
531 536 for f in files:
532 537 if m(f):
533 538 return True
534 539
535 540 return subset.filter(matches)
536 541
537 542 def _children(repo, narrow, parentset):
538 543 cs = set()
539 544 if not parentset:
540 545 return baseset(cs)
541 546 pr = repo.changelog.parentrevs
542 547 minrev = min(parentset)
543 548 for r in narrow:
544 549 if r <= minrev:
545 550 continue
546 551 for p in pr(r):
547 552 if p in parentset:
548 553 cs.add(r)
549 554 return baseset(cs)
550 555
551 556 def children(repo, subset, x):
552 557 """``children(set)``
553 558 Child changesets of changesets in set.
554 559 """
555 560 s = getset(repo, fullreposet(repo), x)
556 561 cs = _children(repo, subset, s)
557 562 return subset & cs
558 563
559 564 def closed(repo, subset, x):
560 565 """``closed()``
561 566 Changeset is closed.
562 567 """
563 568 # i18n: "closed" is a keyword
564 569 getargs(x, 0, 0, _("closed takes no arguments"))
565 570 return subset.filter(lambda r: repo[r].closesbranch())
566 571
567 572 def contains(repo, subset, x):
568 573 """``contains(pattern)``
569 574 The revision's manifest contains a file matching pattern (but might not
570 575 modify it). See :hg:`help patterns` for information about file patterns.
571 576
572 577 The pattern without explicit kind like ``glob:`` is expected to be
573 578 relative to the current directory and match against a file exactly
574 579 for efficiency.
575 580 """
576 581 # i18n: "contains" is a keyword
577 582 pat = getstring(x, _("contains requires a pattern"))
578 583
579 584 def matches(x):
580 585 if not matchmod.patkind(pat):
581 586 pats = pathutil.canonpath(repo.root, repo.getcwd(), pat)
582 587 if pats in repo[x]:
583 588 return True
584 589 else:
585 590 c = repo[x]
586 591 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
587 592 for f in c.manifest():
588 593 if m(f):
589 594 return True
590 595 return False
591 596
592 597 return subset.filter(matches)
593 598
594 599 def converted(repo, subset, x):
595 600 """``converted([id])``
596 601 Changesets converted from the given identifier in the old repository if
597 602 present, or all converted changesets if no identifier is specified.
598 603 """
599 604
600 605 # There is exactly no chance of resolving the revision, so do a simple
601 606 # string compare and hope for the best
602 607
603 608 rev = None
604 609 # i18n: "converted" is a keyword
605 610 l = getargs(x, 0, 1, _('converted takes one or no arguments'))
606 611 if l:
607 612 # i18n: "converted" is a keyword
608 613 rev = getstring(l[0], _('converted requires a revision'))
609 614
610 615 def _matchvalue(r):
611 616 source = repo[r].extra().get('convert_revision', None)
612 617 return source is not None and (rev is None or source.startswith(rev))
613 618
614 619 return subset.filter(lambda r: _matchvalue(r))
615 620
616 621 def date(repo, subset, x):
617 622 """``date(interval)``
618 623 Changesets within the interval, see :hg:`help dates`.
619 624 """
620 625 # i18n: "date" is a keyword
621 626 ds = getstring(x, _("date requires a string"))
622 627 dm = util.matchdate(ds)
623 628 return subset.filter(lambda x: dm(repo[x].date()[0]))
624 629
625 630 def desc(repo, subset, x):
626 631 """``desc(string)``
627 632 Search commit message for string. The match is case-insensitive.
628 633 """
629 634 # i18n: "desc" is a keyword
630 635 ds = encoding.lower(getstring(x, _("desc requires a string")))
631 636
632 637 def matches(x):
633 638 c = repo[x]
634 639 return ds in encoding.lower(c.description())
635 640
636 641 return subset.filter(matches)
637 642
638 643 def _descendants(repo, subset, x, followfirst=False):
639 644 roots = getset(repo, spanset(repo), x)
640 645 if not roots:
641 646 return baseset()
642 647 s = _revdescendants(repo, roots, followfirst)
643 648
644 649 # Both sets need to be ascending in order to lazily return the union
645 650 # in the correct order.
646 651 base = subset & roots
647 652 desc = subset & s
648 653 result = base + desc
649 654 if subset.isascending():
650 655 result.sort()
651 656 elif subset.isdescending():
652 657 result.sort(reverse=True)
653 658 else:
654 659 result = subset & result
655 660 return result
656 661
657 662 def descendants(repo, subset, x):
658 663 """``descendants(set)``
659 664 Changesets which are descendants of changesets in set.
660 665 """
661 666 return _descendants(repo, subset, x)
662 667
663 668 def _firstdescendants(repo, subset, x):
664 669 # ``_firstdescendants(set)``
665 670 # Like ``descendants(set)`` but follows only the first parents.
666 671 return _descendants(repo, subset, x, followfirst=True)
667 672
668 673 def destination(repo, subset, x):
669 674 """``destination([set])``
670 675 Changesets that were created by a graft, transplant or rebase operation,
671 676 with the given revisions specified as the source. Omitting the optional set
672 677 is the same as passing all().
673 678 """
674 679 if x is not None:
675 680 sources = getset(repo, spanset(repo), x)
676 681 else:
677 682 sources = getall(repo, spanset(repo), x)
678 683
679 684 dests = set()
680 685
681 686 # subset contains all of the possible destinations that can be returned, so
682 687 # iterate over them and see if their source(s) were provided in the arg set.
683 688 # Even if the immediate src of r is not in the arg set, src's source (or
684 689 # further back) may be. Scanning back further than the immediate src allows
685 690 # transitive transplants and rebases to yield the same results as transitive
686 691 # grafts.
687 692 for r in subset:
688 693 src = _getrevsource(repo, r)
689 694 lineage = None
690 695
691 696 while src is not None:
692 697 if lineage is None:
693 698 lineage = list()
694 699
695 700 lineage.append(r)
696 701
697 702 # The visited lineage is a match if the current source is in the arg
698 703 # set. Since every candidate dest is visited by way of iterating
699 704 # subset, any dests further back in the lineage will be tested by a
700 705 # different iteration over subset. Likewise, if the src was already
701 706 # selected, the current lineage can be selected without going back
702 707 # further.
703 708 if src in sources or src in dests:
704 709 dests.update(lineage)
705 710 break
706 711
707 712 r = src
708 713 src = _getrevsource(repo, r)
709 714
710 715 return subset.filter(dests.__contains__)
711 716
712 717 def divergent(repo, subset, x):
713 718 """``divergent()``
714 719 Final successors of changesets with an alternative set of final successors.
715 720 """
716 721 # i18n: "divergent" is a keyword
717 722 getargs(x, 0, 0, _("divergent takes no arguments"))
718 723 divergent = obsmod.getrevs(repo, 'divergent')
719 724 return subset & divergent
720 725
721 726 def draft(repo, subset, x):
722 727 """``draft()``
723 728 Changeset in draft phase."""
724 729 # i18n: "draft" is a keyword
725 730 getargs(x, 0, 0, _("draft takes no arguments"))
726 731 phase = repo._phasecache.phase
727 732 target = phases.draft
728 733 condition = lambda r: phase(repo, r) == target
729 734 return subset.filter(condition, cache=False)
730 735
731 736 def extinct(repo, subset, x):
732 737 """``extinct()``
733 738 Obsolete changesets with obsolete descendants only.
734 739 """
735 740 # i18n: "extinct" is a keyword
736 741 getargs(x, 0, 0, _("extinct takes no arguments"))
737 742 extincts = obsmod.getrevs(repo, 'extinct')
738 743 return subset & extincts
739 744
740 745 def extra(repo, subset, x):
741 746 """``extra(label, [value])``
742 747 Changesets with the given label in the extra metadata, with the given
743 748 optional value.
744 749
745 750 If `value` starts with `re:`, the remainder of the value is treated as
746 751 a regular expression. To match a value that actually starts with `re:`,
747 752 use the prefix `literal:`.
748 753 """
749 754
750 755 # i18n: "extra" is a keyword
751 756 l = getargs(x, 1, 2, _('extra takes at least 1 and at most 2 arguments'))
752 757 # i18n: "extra" is a keyword
753 758 label = getstring(l[0], _('first argument to extra must be a string'))
754 759 value = None
755 760
756 761 if len(l) > 1:
757 762 # i18n: "extra" is a keyword
758 763 value = getstring(l[1], _('second argument to extra must be a string'))
759 764 kind, value, matcher = _stringmatcher(value)
760 765
761 766 def _matchvalue(r):
762 767 extra = repo[r].extra()
763 768 return label in extra and (value is None or matcher(extra[label]))
764 769
765 770 return subset.filter(lambda r: _matchvalue(r))
766 771
767 772 def filelog(repo, subset, x):
768 773 """``filelog(pattern)``
769 774 Changesets connected to the specified filelog.
770 775
771 776 For performance reasons, visits only revisions mentioned in the file-level
772 777 filelog, rather than filtering through all changesets (much faster, but
773 778 doesn't include deletes or duplicate changes). For a slower, more accurate
774 779 result, use ``file()``.
775 780
776 781 The pattern without explicit kind like ``glob:`` is expected to be
777 782 relative to the current directory and match against a file exactly
778 783 for efficiency.
779 784
780 785 If some linkrev points to revisions filtered by the current repoview, we'll
781 786 work around it to return a non-filtered value.
782 787 """
783 788
784 789 # i18n: "filelog" is a keyword
785 790 pat = getstring(x, _("filelog requires a pattern"))
786 791 s = set()
787 792 cl = repo.changelog
788 793
789 794 if not matchmod.patkind(pat):
790 795 f = pathutil.canonpath(repo.root, repo.getcwd(), pat)
791 796 files = [f]
792 797 else:
793 798 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=repo[None])
794 799 files = (f for f in repo[None] if m(f))
795 800
796 801 for f in files:
797 802 backrevref = {} # final value for: changerev -> filerev
798 803 lowestchild = {} # lowest known filerev child of a filerev
799 804 delayed = [] # filerev with filtered linkrev, for post-processing
800 805 lowesthead = None # cache for manifest content of all head revisions
801 806 fl = repo.file(f)
802 807 for fr in list(fl):
803 808 lkr = rev = fl.linkrev(fr)
804 809 if rev not in cl:
805 810 # changerev pointed in linkrev is filtered
806 811 # record it for post processing.
807 812 delayed.append((fr, rev))
808 813 continue
809 814 for p in fl.parentrevs(fr):
810 815 if 0 <= p and p not in lowestchild:
811 816 lowestchild[p] = fr
812 817 backrevref[fr] = rev
813 818 s.add(rev)
814 819
815 820 # Post-processing of all filerevs we skipped because they were
816 821 # filtered. If such filerevs have known and unfiltered children, this
817 822 # means they have an unfiltered appearance out there. We'll use linkrev
818 823 # adjustment to find one of these appearances. The lowest known child
819 824 # will be used as a starting point because it is the best upper-bound we
820 825 # have.
821 826 #
822 827 # This approach will fail when an unfiltered but linkrev-shadowed
823 828 # appearance exists in a head changeset without unfiltered filerev
824 829 # children anywhere.
825 830 while delayed:
826 831 # must be a descending iteration. To slowly fill lowest child
827 832 # information that is of potential use by the next item.
828 833 fr, rev = delayed.pop()
829 834 lkr = rev
830 835
831 836 child = lowestchild.get(fr)
832 837
833 838 if child is None:
834 839 # search for existence of this file revision in a head revision.
835 840 # There are three possibilities:
836 841 # - the revision exists in a head and we can find an
837 842 # introduction from there,
838 843 # - the revision does not exist in a head because it has been
839 844 # changed since its introduction: we would have found a child
840 845 # and be in the other 'else' clause,
841 846 # - all versions of the revision are hidden.
842 847 if lowesthead is None:
843 848 lowesthead = {}
844 849 for h in repo.heads():
845 850 fnode = repo[h].manifest().get(f)
846 851 if fnode is not None:
847 852 lowesthead[fl.rev(fnode)] = h
848 853 headrev = lowesthead.get(fr)
849 854 if headrev is None:
850 855 # content is nowhere unfiltered
851 856 continue
852 857 rev = repo[headrev][f].introrev()
853 858 else:
854 859 # the lowest known child is a good upper bound
855 860 childcrev = backrevref[child]
856 861 # XXX this does not guarantee returning the lowest
857 862 # introduction of this revision, but this gives a
858 863 # result which is a good start and will fit in most
859 864 # cases. We probably need to fix the multiple
860 865 # introductions case properly (report each
861 866 # introduction, even for identical file revisions)
862 867 # once and for all at some point anyway.
863 868 for p in repo[childcrev][f].parents():
864 869 if p.filerev() == fr:
865 870 rev = p.rev()
866 871 break
867 872 if rev == lkr: # no shadowed entry found
868 873 # XXX This should never happen unless some manifest points
869 874 # to biggish file revisions (like a revision that uses a
870 875 # parent that never appears in the manifest ancestors)
871 876 continue
872 877
873 878 # Fill the data for the next iteration.
874 879 for p in fl.parentrevs(fr):
875 880 if 0 <= p and p not in lowestchild:
876 881 lowestchild[p] = fr
877 882 backrevref[fr] = rev
878 883 s.add(rev)
879 884
880 885 return subset & s
881 886
882 887 def first(repo, subset, x):
883 888 """``first(set, [n])``
884 889 An alias for limit().
885 890 """
886 891 return limit(repo, subset, x)
887 892
888 893 def _follow(repo, subset, x, name, followfirst=False):
889 894 l = getargs(x, 0, 1, _("%s takes no arguments or a filename") % name)
890 895 c = repo['.']
891 896 if l:
892 897 x = getstring(l[0], _("%s expected a filename") % name)
893 898 if x in c:
894 899 cx = c[x]
895 900 s = set(ctx.rev() for ctx in cx.ancestors(followfirst=followfirst))
896 901 # include the revision responsible for the most recent version
897 902 s.add(cx.introrev())
898 903 else:
899 904 return baseset()
900 905 else:
901 906 s = _revancestors(repo, baseset([c.rev()]), followfirst)
902 907
903 908 return subset & s
904 909
905 910 def follow(repo, subset, x):
906 911 """``follow([file])``
907 912 An alias for ``::.`` (ancestors of the working copy's first parent).
908 913 If a filename is specified, the history of the given file is followed,
909 914 including copies.
910 915 """
911 916 return _follow(repo, subset, x, 'follow')
912 917
913 918 def _followfirst(repo, subset, x):
914 919 # ``followfirst([file])``
915 920 # Like ``follow([file])`` but follows only the first parent of
916 921 # every revision or file revision.
917 922 return _follow(repo, subset, x, '_followfirst', followfirst=True)
918 923
919 924 def getall(repo, subset, x):
920 925 """``all()``
921 926 All changesets, the same as ``0:tip``.
922 927 """
923 928 # i18n: "all" is a keyword
924 929 getargs(x, 0, 0, _("all takes no arguments"))
925 930 return subset
926 931
927 932 def grep(repo, subset, x):
928 933 """``grep(regex)``
929 934 Like ``keyword(string)`` but accepts a regex. Use ``grep(r'...')``
930 935 to ensure special escape characters are handled correctly. Unlike
931 936 ``keyword(string)``, the match is case-sensitive.
932 937 """
933 938 try:
934 939 # i18n: "grep" is a keyword
935 940 gr = re.compile(getstring(x, _("grep requires a string")))
936 941 except re.error, e:
937 942 raise error.ParseError(_('invalid match pattern: %s') % e)
938 943
939 944 def matches(x):
940 945 c = repo[x]
941 946 for e in c.files() + [c.user(), c.description()]:
942 947 if gr.search(e):
943 948 return True
944 949 return False
945 950
946 951 return subset.filter(matches)
947 952
948 953 def _matchfiles(repo, subset, x):
949 954 # _matchfiles takes a revset list of prefixed arguments:
950 955 #
951 956 # [p:foo, i:bar, x:baz]
952 957 #
953 958 # builds a match object from them and filters subset. Allowed
954 959 # prefixes are 'p:' for regular patterns, 'i:' for include
955 960 # patterns and 'x:' for exclude patterns. Use 'r:' prefix to pass
956 961 # a revision identifier, or the empty string to reference the
957 962 # working directory, from which the match object is
958 963 # initialized. Use 'd:' to set the default matching mode, default
959 964 # to 'glob'. At most one 'r:' and 'd:' argument can be passed.
960 965
961 966 # i18n: "_matchfiles" is a keyword
962 967 l = getargs(x, 1, -1, _("_matchfiles requires at least one argument"))
963 968 pats, inc, exc = [], [], []
964 969 rev, default = None, None
965 970 for arg in l:
966 971 # i18n: "_matchfiles" is a keyword
967 972 s = getstring(arg, _("_matchfiles requires string arguments"))
968 973 prefix, value = s[:2], s[2:]
969 974 if prefix == 'p:':
970 975 pats.append(value)
971 976 elif prefix == 'i:':
972 977 inc.append(value)
973 978 elif prefix == 'x:':
974 979 exc.append(value)
975 980 elif prefix == 'r:':
976 981 if rev is not None:
977 982 # i18n: "_matchfiles" is a keyword
978 983 raise error.ParseError(_('_matchfiles expected at most one '
979 984 'revision'))
980 985 rev = value
981 986 elif prefix == 'd:':
982 987 if default is not None:
983 988 # i18n: "_matchfiles" is a keyword
984 989 raise error.ParseError(_('_matchfiles expected at most one '
985 990 'default mode'))
986 991 default = value
987 992 else:
988 993 # i18n: "_matchfiles" is a keyword
989 994 raise error.ParseError(_('invalid _matchfiles prefix: %s') % prefix)
990 995 if not default:
991 996 default = 'glob'
992 997
993 998 m = matchmod.match(repo.root, repo.getcwd(), pats, include=inc,
994 999 exclude=exc, ctx=repo[rev], default=default)
995 1000
996 1001 def matches(x):
997 1002 for f in repo[x].files():
998 1003 if m(f):
999 1004 return True
1000 1005 return False
1001 1006
1002 1007 return subset.filter(matches)
1003 1008
1004 1009 def hasfile(repo, subset, x):
1005 1010 """``file(pattern)``
1006 1011 Changesets affecting files matched by pattern.
1007 1012
1008 1013 For a faster but less accurate result, consider using ``filelog()``
1009 1014 instead.
1010 1015
1011 1016 This predicate uses ``glob:`` as the default kind of pattern.
1012 1017 """
1013 1018 # i18n: "file" is a keyword
1014 1019 pat = getstring(x, _("file requires a pattern"))
1015 1020 return _matchfiles(repo, subset, ('string', 'p:' + pat))
1016 1021
1017 1022 def head(repo, subset, x):
1018 1023 """``head()``
1019 1024 Changeset is a named branch head.
1020 1025 """
1021 1026 # i18n: "head" is a keyword
1022 1027 getargs(x, 0, 0, _("head takes no arguments"))
1023 1028 hs = set()
1024 1029 for b, ls in repo.branchmap().iteritems():
1025 1030 hs.update(repo[h].rev() for h in ls)
1026 1031 return baseset(hs).filter(subset.__contains__)
1027 1032
1028 1033 def heads(repo, subset, x):
1029 1034 """``heads(set)``
1030 1035 Members of set with no children in set.
1031 1036 """
1032 1037 s = getset(repo, subset, x)
1033 1038 ps = parents(repo, subset, x)
1034 1039 return s - ps
1035 1040
1036 1041 def hidden(repo, subset, x):
1037 1042 """``hidden()``
1038 1043 Hidden changesets.
1039 1044 """
1040 1045 # i18n: "hidden" is a keyword
1041 1046 getargs(x, 0, 0, _("hidden takes no arguments"))
1042 1047 hiddenrevs = repoview.filterrevs(repo, 'visible')
1043 1048 return subset & hiddenrevs
1044 1049
1045 1050 def keyword(repo, subset, x):
1046 1051 """``keyword(string)``
1047 1052 Search commit message, user name, and names of changed files for
1048 1053 string. The match is case-insensitive.
1049 1054 """
1050 1055 # i18n: "keyword" is a keyword
1051 1056 kw = encoding.lower(getstring(x, _("keyword requires a string")))
1052 1057
1053 1058 def matches(r):
1054 1059 c = repo[r]
1055 1060 return util.any(kw in encoding.lower(t) for t in c.files() + [c.user(),
1056 1061 c.description()])
1057 1062
1058 1063 return subset.filter(matches)
1059 1064
1060 1065 def limit(repo, subset, x):
1061 1066 """``limit(set, [n])``
1062 1067 First n members of set, defaulting to 1.
1063 1068 """
1064 1069 # i18n: "limit" is a keyword
1065 1070 l = getargs(x, 1, 2, _("limit requires one or two arguments"))
1066 1071 try:
1067 1072 lim = 1
1068 1073 if len(l) == 2:
1069 1074 # i18n: "limit" is a keyword
1070 1075 lim = int(getstring(l[1], _("limit requires a number")))
1071 1076 except (TypeError, ValueError):
1072 1077 # i18n: "limit" is a keyword
1073 1078 raise error.ParseError(_("limit expects a number"))
1074 1079 ss = subset
1075 1080 os = getset(repo, spanset(repo), l[0])
1076 1081 result = []
1077 1082 it = iter(os)
1078 1083 for x in xrange(lim):
1079 1084 try:
1080 1085 y = it.next()
1081 1086 if y in ss:
1082 1087 result.append(y)
1083 1088 except (StopIteration):
1084 1089 break
1085 1090 return baseset(result)
1086 1091
1087 1092 def last(repo, subset, x):
1088 1093 """``last(set, [n])``
1089 1094 Last n members of set, defaulting to 1.
1090 1095 """
1091 1096 # i18n: "last" is a keyword
1092 1097 l = getargs(x, 1, 2, _("last requires one or two arguments"))
1093 1098 try:
1094 1099 lim = 1
1095 1100 if len(l) == 2:
1096 1101 # i18n: "last" is a keyword
1097 1102 lim = int(getstring(l[1], _("last requires a number")))
1098 1103 except (TypeError, ValueError):
1099 1104 # i18n: "last" is a keyword
1100 1105 raise error.ParseError(_("last expects a number"))
1101 1106 ss = subset
1102 1107 os = getset(repo, spanset(repo), l[0])
1103 1108 os.reverse()
1104 1109 result = []
1105 1110 it = iter(os)
1106 1111 for x in xrange(lim):
1107 1112 try:
1108 1113 y = it.next()
1109 1114 if y in ss:
1110 1115 result.append(y)
1111 1116 except (StopIteration):
1112 1117 break
1113 1118 return baseset(result)
1114 1119
1115 1120 def maxrev(repo, subset, x):
1116 1121 """``max(set)``
1117 1122 Changeset with highest revision number in set.
1118 1123 """
1119 1124 os = getset(repo, spanset(repo), x)
1120 1125 if os:
1121 1126 m = os.max()
1122 1127 if m in subset:
1123 1128 return baseset([m])
1124 1129 return baseset()
1125 1130
1126 1131 def merge(repo, subset, x):
1127 1132 """``merge()``
1128 1133 Changeset is a merge changeset.
1129 1134 """
1130 1135 # i18n: "merge" is a keyword
1131 1136 getargs(x, 0, 0, _("merge takes no arguments"))
1132 1137 cl = repo.changelog
1133 1138 return subset.filter(lambda r: cl.parentrevs(r)[1] != -1)
1134 1139
1135 1140 def branchpoint(repo, subset, x):
1136 1141 """``branchpoint()``
1137 1142 Changesets with more than one child.
1138 1143 """
1139 1144 # i18n: "branchpoint" is a keyword
1140 1145 getargs(x, 0, 0, _("branchpoint takes no arguments"))
1141 1146 cl = repo.changelog
1142 1147 if not subset:
1143 1148 return baseset()
1144 1149 baserev = min(subset)
1145 1150 parentscount = [0]*(len(repo) - baserev)
1146 1151 for r in cl.revs(start=baserev + 1):
1147 1152 for p in cl.parentrevs(r):
1148 1153 if p >= baserev:
1149 1154 parentscount[p - baserev] += 1
1150 1155 return subset.filter(lambda r: parentscount[r - baserev] > 1)
1151 1156
1152 1157 def minrev(repo, subset, x):
1153 1158 """``min(set)``
1154 1159 Changeset with lowest revision number in set.
1155 1160 """
1156 1161 os = getset(repo, spanset(repo), x)
1157 1162 if os:
1158 1163 m = os.min()
1159 1164 if m in subset:
1160 1165 return baseset([m])
1161 1166 return baseset()
1162 1167
1163 1168 def modifies(repo, subset, x):
1164 1169 """``modifies(pattern)``
1165 1170 Changesets modifying files matched by pattern.
1166 1171
1167 1172 The pattern without explicit kind like ``glob:`` is expected to be
1168 1173 relative to the current directory and match against a file or a
1169 1174 directory.
1170 1175 """
1171 1176 # i18n: "modifies" is a keyword
1172 1177 pat = getstring(x, _("modifies requires a pattern"))
1173 1178 return checkstatus(repo, subset, pat, 0)
1174 1179
1175 1180 def node_(repo, subset, x):
1176 1181 """``id(string)``
1177 1182 Revision non-ambiguously specified by the given hex string prefix.
1178 1183 """
1179 1184 # i18n: "id" is a keyword
1180 1185 l = getargs(x, 1, 1, _("id requires one argument"))
1181 1186 # i18n: "id" is a keyword
1182 1187 n = getstring(l[0], _("id requires a string"))
1183 1188 if len(n) == 40:
1184 1189 rn = repo[n].rev()
1185 1190 else:
1186 1191 rn = None
1187 1192 pm = repo.changelog._partialmatch(n)
1188 1193 if pm is not None:
1189 1194 rn = repo.changelog.rev(pm)
1190 1195
1191 1196 if rn is None:
1192 1197 return baseset()
1193 1198 result = baseset([rn])
1194 1199 return result & subset
1195 1200
1196 1201 def obsolete(repo, subset, x):
1197 1202 """``obsolete()``
1198 1203 Mutable changeset with a newer version."""
1199 1204 # i18n: "obsolete" is a keyword
1200 1205 getargs(x, 0, 0, _("obsolete takes no arguments"))
1201 1206 obsoletes = obsmod.getrevs(repo, 'obsolete')
1202 1207 return subset & obsoletes
1203 1208
1204 1209 def only(repo, subset, x):
1205 1210 """``only(set, [set])``
1206 1211 Changesets that are ancestors of the first set that are not ancestors
1207 1212 of any other head in the repo. If a second set is specified, the result
1208 1213 is ancestors of the first set that are not ancestors of the second set
1209 1214 (i.e. ::<set1> - ::<set2>).
1210 1215 """
1211 1216 cl = repo.changelog
1212 1217 # i18n: "only" is a keyword
1213 1218 args = getargs(x, 1, 2, _('only takes one or two arguments'))
1214 1219 include = getset(repo, spanset(repo), args[0])
1215 1220 if len(args) == 1:
1216 1221 if not include:
1217 1222 return baseset()
1218 1223
1219 1224 descendants = set(_revdescendants(repo, include, False))
1220 1225 exclude = [rev for rev in cl.headrevs()
1221 1226 if not rev in descendants and not rev in include]
1222 1227 else:
1223 1228 exclude = getset(repo, spanset(repo), args[1])
1224 1229
1225 1230 results = set(cl.findmissingrevs(common=exclude, heads=include))
1226 1231 return subset & results
1227 1232
1228 1233 def origin(repo, subset, x):
1229 1234 """``origin([set])``
1230 1235 Changesets that were specified as a source for the grafts, transplants or
1231 1236 rebases that created the given revisions. Omitting the optional set is the
1232 1237 same as passing all(). If a changeset created by these operations is itself
1233 1238 specified as a source for one of these operations, only the source changeset
1234 1239 for the first operation is selected.
1235 1240 """
1236 1241 if x is not None:
1237 1242 dests = getset(repo, spanset(repo), x)
1238 1243 else:
1239 1244 dests = getall(repo, spanset(repo), x)
1240 1245
1241 1246 def _firstsrc(rev):
1242 1247 src = _getrevsource(repo, rev)
1243 1248 if src is None:
1244 1249 return None
1245 1250
1246 1251 while True:
1247 1252 prev = _getrevsource(repo, src)
1248 1253
1249 1254 if prev is None:
1250 1255 return src
1251 1256 src = prev
1252 1257
1253 1258 o = set([_firstsrc(r) for r in dests])
1254 1259 o -= set([None])
1255 1260 return subset & o
1256 1261
1257 1262 def outgoing(repo, subset, x):
1258 1263 """``outgoing([path])``
1259 1264 Changesets not found in the specified destination repository, or the
1260 1265 default push location.
1261 1266 """
1262 1267 import hg # avoid start-up nasties
1263 1268 # i18n: "outgoing" is a keyword
1264 1269 l = getargs(x, 0, 1, _("outgoing takes one or no arguments"))
1265 1270 # i18n: "outgoing" is a keyword
1266 1271 dest = l and getstring(l[0], _("outgoing requires a repository path")) or ''
1267 1272 dest = repo.ui.expandpath(dest or 'default-push', dest or 'default')
1268 1273 dest, branches = hg.parseurl(dest)
1269 1274 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1270 1275 if revs:
1271 1276 revs = [repo.lookup(rev) for rev in revs]
1272 1277 other = hg.peer(repo, {}, dest)
1273 1278 repo.ui.pushbuffer()
1274 1279 outgoing = discovery.findcommonoutgoing(repo, other, onlyheads=revs)
1275 1280 repo.ui.popbuffer()
1276 1281 cl = repo.changelog
1277 1282 o = set([cl.rev(r) for r in outgoing.missing])
1278 1283 return subset & o
1279 1284
1280 1285 def p1(repo, subset, x):
1281 1286 """``p1([set])``
1282 1287 First parent of changesets in set, or the working directory.
1283 1288 """
1284 1289 if x is None:
1285 1290 p = repo[x].p1().rev()
1286 1291 if p >= 0:
1287 1292 return subset & baseset([p])
1288 1293 return baseset()
1289 1294
1290 1295 ps = set()
1291 1296 cl = repo.changelog
1292 1297 for r in getset(repo, spanset(repo), x):
1293 1298 ps.add(cl.parentrevs(r)[0])
1294 1299 ps -= set([node.nullrev])
1295 1300 return subset & ps
1296 1301
1297 1302 def p2(repo, subset, x):
1298 1303 """``p2([set])``
1299 1304 Second parent of changesets in set, or the working directory.
1300 1305 """
1301 1306 if x is None:
1302 1307 ps = repo[x].parents()
1303 1308 try:
1304 1309 p = ps[1].rev()
1305 1310 if p >= 0:
1306 1311 return subset & baseset([p])
1307 1312 return baseset()
1308 1313 except IndexError:
1309 1314 return baseset()
1310 1315
1311 1316 ps = set()
1312 1317 cl = repo.changelog
1313 1318 for r in getset(repo, spanset(repo), x):
1314 1319 ps.add(cl.parentrevs(r)[1])
1315 1320 ps -= set([node.nullrev])
1316 1321 return subset & ps
1317 1322
1318 1323 def parents(repo, subset, x):
1319 1324 """``parents([set])``
1320 1325 The set of all parents for all changesets in set, or the working directory.
1321 1326 """
1322 1327 if x is None:
1323 1328 ps = set(p.rev() for p in repo[x].parents())
1324 1329 else:
1325 1330 ps = set()
1326 1331 cl = repo.changelog
1327 1332 for r in getset(repo, spanset(repo), x):
1328 1333 ps.update(cl.parentrevs(r))
1329 1334 ps -= set([node.nullrev])
1330 1335 return subset & ps
1331 1336
1332 1337 def parentspec(repo, subset, x, n):
1333 1338 """``set^0``
1334 1339 The set.
1335 1340 ``set^1`` (or ``set^``), ``set^2``
1336 1341 First or second parent, respectively, of all changesets in set.
1337 1342 """
1338 1343 try:
1339 1344 n = int(n[1])
1340 1345 if n not in (0, 1, 2):
1341 1346 raise ValueError
1342 1347 except (TypeError, ValueError):
1343 1348 raise error.ParseError(_("^ expects a number 0, 1, or 2"))
1344 1349 ps = set()
1345 1350 cl = repo.changelog
1346 1351 for r in getset(repo, fullreposet(repo), x):
1347 1352 if n == 0:
1348 1353 ps.add(r)
1349 1354 elif n == 1:
1350 1355 ps.add(cl.parentrevs(r)[0])
1351 1356 elif n == 2:
1352 1357 parents = cl.parentrevs(r)
1353 1358 if len(parents) > 1:
1354 1359 ps.add(parents[1])
1355 1360 return subset & ps
1356 1361
1357 1362 def present(repo, subset, x):
1358 1363 """``present(set)``
1359 1364 An empty set, if any revision in set isn't found; otherwise,
1360 1365 all revisions in set.
1361 1366
1362 1367 If any of specified revisions is not present in the local repository,
1363 1368 the query is normally aborted. But this predicate allows the query
1364 1369 to continue even in such cases.
1365 1370 """
1366 1371 try:
1367 1372 return getset(repo, subset, x)
1368 1373 except error.RepoLookupError:
1369 1374 return baseset()
1370 1375
1371 1376 def public(repo, subset, x):
1372 1377 """``public()``
1373 1378 Changeset in public phase."""
1374 1379 # i18n: "public" is a keyword
1375 1380 getargs(x, 0, 0, _("public takes no arguments"))
1376 1381 phase = repo._phasecache.phase
1377 1382 target = phases.public
1378 1383 condition = lambda r: phase(repo, r) == target
1379 1384 return subset.filter(condition, cache=False)
1380 1385
1381 1386 def remote(repo, subset, x):
1382 1387 """``remote([id [,path]])``
1383 1388 Local revision that corresponds to the given identifier in a
1384 1389 remote repository, if present. Here, the '.' identifier is a
1385 1390 synonym for the current local branch.
1386 1391 """
1387 1392
1388 1393 import hg # avoid start-up nasties
1389 1394 # i18n: "remote" is a keyword
1390 1395 l = getargs(x, 0, 2, _("remote takes one, two or no arguments"))
1391 1396
1392 1397 q = '.'
1393 1398 if len(l) > 0:
1394 1399 # i18n: "remote" is a keyword
1395 1400 q = getstring(l[0], _("remote requires a string id"))
1396 1401 if q == '.':
1397 1402 q = repo['.'].branch()
1398 1403
1399 1404 dest = ''
1400 1405 if len(l) > 1:
1401 1406 # i18n: "remote" is a keyword
1402 1407 dest = getstring(l[1], _("remote requires a repository path"))
1403 1408 dest = repo.ui.expandpath(dest or 'default')
1404 1409 dest, branches = hg.parseurl(dest)
1405 1410 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1406 1411 if revs:
1407 1412 revs = [repo.lookup(rev) for rev in revs]
1408 1413 other = hg.peer(repo, {}, dest)
1409 1414 n = other.lookup(q)
1410 1415 if n in repo:
1411 1416 r = repo[n].rev()
1412 1417 if r in subset:
1413 1418 return baseset([r])
1414 1419 return baseset()
1415 1420
1416 1421 def removes(repo, subset, x):
1417 1422 """``removes(pattern)``
1418 1423 Changesets which remove files matching pattern.
1419 1424
1420 1425 The pattern without explicit kind like ``glob:`` is expected to be
1421 1426 relative to the current directory and match against a file or a
1422 1427 directory.
1423 1428 """
1424 1429 # i18n: "removes" is a keyword
1425 1430 pat = getstring(x, _("removes requires a pattern"))
1426 1431 return checkstatus(repo, subset, pat, 2)
1427 1432
1428 1433 def rev(repo, subset, x):
1429 1434 """``rev(number)``
1430 1435 Revision with the given numeric identifier.
1431 1436 """
1432 1437 # i18n: "rev" is a keyword
1433 1438 l = getargs(x, 1, 1, _("rev requires one argument"))
1434 1439 try:
1435 1440 # i18n: "rev" is a keyword
1436 1441 l = int(getstring(l[0], _("rev requires a number")))
1437 1442 except (TypeError, ValueError):
1438 1443 # i18n: "rev" is a keyword
1439 1444 raise error.ParseError(_("rev expects a number"))
1440 1445 if l not in fullreposet(repo):
1441 1446 return baseset()
1442 1447 return subset & baseset([l])
1443 1448
1444 1449 def matching(repo, subset, x):
1445 1450 """``matching(revision [, field])``
1446 1451 Changesets in which a given set of fields match the set of fields in the
1447 1452 selected revision or set.
1448 1453
1449 1454 To match more than one field pass the list of fields to match separated
1450 1455 by spaces (e.g. ``author description``).
1451 1456
1452 1457 Valid fields are most regular revision fields and some special fields.
1453 1458
1454 1459 Regular revision fields are ``description``, ``author``, ``branch``,
1455 1460 ``date``, ``files``, ``phase``, ``parents``, ``substate``, ``user``
1456 1461 and ``diff``.
1457 1462 Note that ``author`` and ``user`` are synonyms. ``diff`` refers to the
1458 1463 contents of the revision. Two revisions matching their ``diff`` will
1459 1464 also match their ``files``.
1460 1465
1461 1466 Special fields are ``summary`` and ``metadata``:
1462 1467 ``summary`` matches the first line of the description.
1463 1468 ``metadata`` is equivalent to matching ``description user date``
1464 1469 (i.e. it matches the main metadata fields).
1465 1470
1466 1471 ``metadata`` is the default field which is used when no fields are
1467 1472 specified. You can match more than one field at a time.
1468 1473 """
1469 1474 # i18n: "matching" is a keyword
1470 1475 l = getargs(x, 1, 2, _("matching takes 1 or 2 arguments"))
1471 1476
1472 1477 revs = getset(repo, fullreposet(repo), l[0])
1473 1478
1474 1479 fieldlist = ['metadata']
1475 1480 if len(l) > 1:
1476 1481 fieldlist = getstring(l[1],
1477 1482 # i18n: "matching" is a keyword
1478 1483 _("matching requires a string "
1479 1484 "as its second argument")).split()
1480 1485
1481 1486 # Make sure that there are no repeated fields,
1482 1487 # expand the 'special' 'metadata' field type
1483 1488 # and check the 'files' whenever we check the 'diff'
1484 1489 fields = []
1485 1490 for field in fieldlist:
1486 1491 if field == 'metadata':
1487 1492 fields += ['user', 'description', 'date']
1488 1493 elif field == 'diff':
1489 1494 # a revision matching the diff must also match the files
1490 1495 # since matching the diff is very costly, make sure to
1491 1496 # also match the files first
1492 1497 fields += ['files', 'diff']
1493 1498 else:
1494 1499 if field == 'author':
1495 1500 field = 'user'
1496 1501 fields.append(field)
1497 1502 fields = set(fields)
1498 1503 if 'summary' in fields and 'description' in fields:
1499 1504 # If a revision matches its description it also matches its summary
1500 1505 fields.discard('summary')
1501 1506
1502 1507 # We may want to match more than one field
1503 1508 # Not all fields take the same amount of time to be matched
1504 1509 # Sort the selected fields in order of increasing matching cost
1505 1510 fieldorder = ['phase', 'parents', 'user', 'date', 'branch', 'summary',
1506 1511 'files', 'description', 'substate', 'diff']
1507 1512 def fieldkeyfunc(f):
1508 1513 try:
1509 1514 return fieldorder.index(f)
1510 1515 except ValueError:
1511 1516 # assume an unknown field is very costly
1512 1517 return len(fieldorder)
1513 1518 fields = list(fields)
1514 1519 fields.sort(key=fieldkeyfunc)
1515 1520
1516 1521 # Each field will be matched with its own "getfield" function
1517 1522 # which will be added to the getfieldfuncs array of functions
1518 1523 getfieldfuncs = []
1519 1524 _funcs = {
1520 1525 'user': lambda r: repo[r].user(),
1521 1526 'branch': lambda r: repo[r].branch(),
1522 1527 'date': lambda r: repo[r].date(),
1523 1528 'description': lambda r: repo[r].description(),
1524 1529 'files': lambda r: repo[r].files(),
1525 1530 'parents': lambda r: repo[r].parents(),
1526 1531 'phase': lambda r: repo[r].phase(),
1527 1532 'substate': lambda r: repo[r].substate,
1528 1533 'summary': lambda r: repo[r].description().splitlines()[0],
1529 1534 'diff': lambda r: list(repo[r].diff(git=True),)
1530 1535 }
1531 1536 for info in fields:
1532 1537 getfield = _funcs.get(info, None)
1533 1538 if getfield is None:
1534 1539 raise error.ParseError(
1535 1540 # i18n: "matching" is a keyword
1536 1541 _("unexpected field name passed to matching: %s") % info)
1537 1542 getfieldfuncs.append(getfield)
1538 1543 # convert the getfield array of functions into a "getinfo" function
1539 1544 # which returns an array of field values (or a single value if there
1540 1545 # is only one field to match)
1541 1546 getinfo = lambda r: [f(r) for f in getfieldfuncs]
1542 1547
1543 1548 def matches(x):
1544 1549 for rev in revs:
1545 1550 target = getinfo(rev)
1546 1551 match = True
1547 1552 for n, f in enumerate(getfieldfuncs):
1548 1553 if target[n] != f(x):
1549 1554 match = False
1550 1555 if match:
1551 1556 return True
1552 1557 return False
1553 1558
1554 1559 return subset.filter(matches)
1555 1560
1556 1561 def reverse(repo, subset, x):
1557 1562 """``reverse(set)``
1558 1563 Reverse order of set.
1559 1564 """
1560 1565 l = getset(repo, subset, x)
1561 1566 l.reverse()
1562 1567 return l
1563 1568
1564 1569 def roots(repo, subset, x):
1565 1570 """``roots(set)``
1566 1571 Changesets in set with no parent changeset in set.
1567 1572 """
1568 1573 s = getset(repo, spanset(repo), x)
1569 1574 subset = baseset([r for r in s if r in subset])
1570 1575 cs = _children(repo, subset, s)
1571 1576 return subset - cs
1572 1577
1573 1578 def secret(repo, subset, x):
1574 1579 """``secret()``
1575 1580 Changeset in secret phase."""
1576 1581 # i18n: "secret" is a keyword
1577 1582 getargs(x, 0, 0, _("secret takes no arguments"))
1578 1583 phase = repo._phasecache.phase
1579 1584 target = phases.secret
1580 1585 condition = lambda r: phase(repo, r) == target
1581 1586 return subset.filter(condition, cache=False)
1582 1587
1583 1588 def sort(repo, subset, x):
1584 1589 """``sort(set[, [-]key...])``
1585 1590 Sort set by keys. The default sort order is ascending, specify a key
1586 1591 as ``-key`` to sort in descending order.
1587 1592
1588 1593 The keys can be:
1589 1594
1590 1595 - ``rev`` for the revision number,
1591 1596 - ``branch`` for the branch name,
1592 1597 - ``desc`` for the commit message (description),
1593 1598 - ``user`` for user name (``author`` can be used as an alias),
1594 1599 - ``date`` for the commit date
1595 1600 """
1596 1601 # i18n: "sort" is a keyword
1597 1602 l = getargs(x, 1, 2, _("sort requires one or two arguments"))
1598 1603 keys = "rev"
1599 1604 if len(l) == 2:
1600 1605 # i18n: "sort" is a keyword
1601 1606 keys = getstring(l[1], _("sort spec must be a string"))
1602 1607
1603 1608 s = l[0]
1604 1609 keys = keys.split()
1605 1610 l = []
1606 1611 def invert(s):
1607 1612 return "".join(chr(255 - ord(c)) for c in s)
1608 1613 revs = getset(repo, subset, s)
1609 1614 if keys == ["rev"]:
1610 1615 revs.sort()
1611 1616 return revs
1612 1617 elif keys == ["-rev"]:
1613 1618 revs.sort(reverse=True)
1614 1619 return revs
1615 1620 for r in revs:
1616 1621 c = repo[r]
1617 1622 e = []
1618 1623 for k in keys:
1619 1624 if k == 'rev':
1620 1625 e.append(r)
1621 1626 elif k == '-rev':
1622 1627 e.append(-r)
1623 1628 elif k == 'branch':
1624 1629 e.append(c.branch())
1625 1630 elif k == '-branch':
1626 1631 e.append(invert(c.branch()))
1627 1632 elif k == 'desc':
1628 1633 e.append(c.description())
1629 1634 elif k == '-desc':
1630 1635 e.append(invert(c.description()))
1631 1636 elif k in 'user author':
1632 1637 e.append(c.user())
1633 1638 elif k in '-user -author':
1634 1639 e.append(invert(c.user()))
1635 1640 elif k == 'date':
1636 1641 e.append(c.date()[0])
1637 1642 elif k == '-date':
1638 1643 e.append(-c.date()[0])
1639 1644 else:
1640 1645 raise error.ParseError(_("unknown sort key %r") % k)
1641 1646 e.append(r)
1642 1647 l.append(e)
1643 1648 l.sort()
1644 1649 return baseset([e[-1] for e in l])
1645 1650
1646 1651 def _stringmatcher(pattern):
1647 1652 """
1648 1653 accepts a string, possibly starting with 're:' or 'literal:' prefix.
1649 1654 returns the matcher name, pattern, and matcher function.
1650 1655 missing or unknown prefixes are treated as literal matches.
1651 1656
1652 1657 helper for tests:
1653 1658 >>> def test(pattern, *tests):
1654 1659 ... kind, pattern, matcher = _stringmatcher(pattern)
1655 1660 ... return (kind, pattern, [bool(matcher(t)) for t in tests])
1656 1661
1657 1662 exact matching (no prefix):
1658 1663 >>> test('abcdefg', 'abc', 'def', 'abcdefg')
1659 1664 ('literal', 'abcdefg', [False, False, True])
1660 1665
1661 1666 regex matching ('re:' prefix)
1662 1667 >>> test('re:a.+b', 'nomatch', 'fooadef', 'fooadefbar')
1663 1668 ('re', 'a.+b', [False, False, True])
1664 1669
1665 1670 force exact matches ('literal:' prefix)
1666 1671 >>> test('literal:re:foobar', 'foobar', 're:foobar')
1667 1672 ('literal', 're:foobar', [False, True])
1668 1673
1669 1674 unknown prefixes are ignored and treated as literals
1670 1675 >>> test('foo:bar', 'foo', 'bar', 'foo:bar')
1671 1676 ('literal', 'foo:bar', [False, False, True])
1672 1677 """
1673 1678 if pattern.startswith('re:'):
1674 1679 pattern = pattern[3:]
1675 1680 try:
1676 1681 regex = re.compile(pattern)
1677 1682 except re.error, e:
1678 1683 raise error.ParseError(_('invalid regular expression: %s')
1679 1684 % e)
1680 1685 return 're', pattern, regex.search
1681 1686 elif pattern.startswith('literal:'):
1682 1687 pattern = pattern[8:]
1683 1688 return 'literal', pattern, pattern.__eq__
1684 1689
1685 1690 def _substringmatcher(pattern):
1686 1691 kind, pattern, matcher = _stringmatcher(pattern)
1687 1692 if kind == 'literal':
1688 1693 matcher = lambda s: pattern in s
1689 1694 return kind, pattern, matcher
1690 1695
1691 1696 def tag(repo, subset, x):
1692 1697 """``tag([name])``
1693 1698 The specified tag by name, or all tagged revisions if no name is given.
1694 1699
1695 1700 If `name` starts with `re:`, the remainder of the name is treated as
1696 1701 a regular expression. To match a tag that actually starts with `re:`,
1697 1702 use the prefix `literal:`.
1698 1703 """
1699 1704 # i18n: "tag" is a keyword
1700 1705 args = getargs(x, 0, 1, _("tag takes one or no arguments"))
1701 1706 cl = repo.changelog
1702 1707 if args:
1703 1708 pattern = getstring(args[0],
1704 1709 # i18n: "tag" is a keyword
1705 1710 _('the argument to tag must be a string'))
1706 1711 kind, pattern, matcher = _stringmatcher(pattern)
1707 1712 if kind == 'literal':
1708 1713 # avoid resolving all tags
1709 1714 tn = repo._tagscache.tags.get(pattern, None)
1710 1715 if tn is None:
1711 1716 raise util.Abort(_("tag '%s' does not exist") % pattern)
1712 1717 s = set([repo[tn].rev()])
1713 1718 else:
1714 1719 s = set([cl.rev(n) for t, n in repo.tagslist() if matcher(t)])
1715 1720 else:
1716 1721 s = set([cl.rev(n) for t, n in repo.tagslist() if t != 'tip'])
1717 1722 return subset & s
1718 1723
1719 1724 def tagged(repo, subset, x):
1720 1725 return tag(repo, subset, x)
1721 1726
1722 1727 def unstable(repo, subset, x):
1723 1728 """``unstable()``
1724 1729 Non-obsolete changesets with obsolete ancestors.
1725 1730 """
1726 1731 # i18n: "unstable" is a keyword
1727 1732 getargs(x, 0, 0, _("unstable takes no arguments"))
1728 1733 unstables = obsmod.getrevs(repo, 'unstable')
1729 1734 return subset & unstables
1730 1735
1731 1736
1732 1737 def user(repo, subset, x):
1733 1738 """``user(string)``
1734 1739 User name contains string. The match is case-insensitive.
1735 1740
1736 1741 If `string` starts with `re:`, the remainder of the string is treated as
1737 1742 a regular expression. To match a user that actually contains `re:`, use
1738 1743 the prefix `literal:`.
1739 1744 """
1740 1745 return author(repo, subset, x)
1741 1746
1742 1747 # for internal use
1743 1748 def _list(repo, subset, x):
1744 1749 s = getstring(x, "internal error")
1745 1750 if not s:
1746 1751 return baseset()
1747 1752 ls = [repo[r].rev() for r in s.split('\0')]
1748 1753 s = subset
1749 1754 return baseset([r for r in ls if r in s])
1750 1755
1751 1756 # for internal use
1752 1757 def _intlist(repo, subset, x):
1753 1758 s = getstring(x, "internal error")
1754 1759 if not s:
1755 1760 return baseset()
1756 1761 ls = [int(r) for r in s.split('\0')]
1757 1762 s = subset
1758 1763 return baseset([r for r in ls if r in s])
1759 1764
1760 1765 # for internal use
1761 1766 def _hexlist(repo, subset, x):
1762 1767 s = getstring(x, "internal error")
1763 1768 if not s:
1764 1769 return baseset()
1765 1770 cl = repo.changelog
1766 1771 ls = [cl.rev(node.bin(r)) for r in s.split('\0')]
1767 1772 s = subset
1768 1773 return baseset([r for r in ls if r in s])
1769 1774
1770 1775 symbols = {
1771 1776 "adds": adds,
1772 1777 "all": getall,
1773 1778 "ancestor": ancestor,
1774 1779 "ancestors": ancestors,
1775 1780 "_firstancestors": _firstancestors,
1776 1781 "author": author,
1777 1782 "bisect": bisect,
1778 1783 "bisected": bisected,
1779 1784 "bookmark": bookmark,
1780 1785 "branch": branch,
1781 1786 "branchpoint": branchpoint,
1782 1787 "bumped": bumped,
1783 1788 "bundle": bundle,
1784 1789 "children": children,
1785 1790 "closed": closed,
1786 1791 "contains": contains,
1787 1792 "converted": converted,
1788 1793 "date": date,
1789 1794 "desc": desc,
1790 1795 "descendants": descendants,
1791 1796 "_firstdescendants": _firstdescendants,
1792 1797 "destination": destination,
1793 1798 "divergent": divergent,
1794 1799 "draft": draft,
1795 1800 "extinct": extinct,
1796 1801 "extra": extra,
1797 1802 "file": hasfile,
1798 1803 "filelog": filelog,
1799 1804 "first": first,
1800 1805 "follow": follow,
1801 1806 "_followfirst": _followfirst,
1802 1807 "grep": grep,
1803 1808 "head": head,
1804 1809 "heads": heads,
1805 1810 "hidden": hidden,
1806 1811 "id": node_,
1807 1812 "keyword": keyword,
1808 1813 "last": last,
1809 1814 "limit": limit,
1810 1815 "_matchfiles": _matchfiles,
1811 1816 "max": maxrev,
1812 1817 "merge": merge,
1813 1818 "min": minrev,
1814 1819 "modifies": modifies,
1815 1820 "obsolete": obsolete,
1816 1821 "only": only,
1817 1822 "origin": origin,
1818 1823 "outgoing": outgoing,
1819 1824 "p1": p1,
1820 1825 "p2": p2,
1821 1826 "parents": parents,
1822 1827 "present": present,
1823 1828 "public": public,
1824 1829 "remote": remote,
1825 1830 "removes": removes,
1826 1831 "rev": rev,
1827 1832 "reverse": reverse,
1828 1833 "roots": roots,
1829 1834 "sort": sort,
1830 1835 "secret": secret,
1831 1836 "matching": matching,
1832 1837 "tag": tag,
1833 1838 "tagged": tagged,
1834 1839 "user": user,
1835 1840 "unstable": unstable,
1836 1841 "_list": _list,
1837 1842 "_intlist": _intlist,
1838 1843 "_hexlist": _hexlist,
1839 1844 }
1840 1845
1841 1846 # symbols which can't be used for a DoS attack for any given input
1842 1847 # (e.g. those which accept regexes as plain strings shouldn't be included)
1843 1848 # functions that just return a lot of changesets (like all) don't count here
1844 1849 safesymbols = set([
1845 1850 "adds",
1846 1851 "all",
1847 1852 "ancestor",
1848 1853 "ancestors",
1849 1854 "_firstancestors",
1850 1855 "author",
1851 1856 "bisect",
1852 1857 "bisected",
1853 1858 "bookmark",
1854 1859 "branch",
1855 1860 "branchpoint",
1856 1861 "bumped",
1857 1862 "bundle",
1858 1863 "children",
1859 1864 "closed",
1860 1865 "converted",
1861 1866 "date",
1862 1867 "desc",
1863 1868 "descendants",
1864 1869 "_firstdescendants",
1865 1870 "destination",
1866 1871 "divergent",
1867 1872 "draft",
1868 1873 "extinct",
1869 1874 "extra",
1870 1875 "file",
1871 1876 "filelog",
1872 1877 "first",
1873 1878 "follow",
1874 1879 "_followfirst",
1875 1880 "head",
1876 1881 "heads",
1877 1882 "hidden",
1878 1883 "id",
1879 1884 "keyword",
1880 1885 "last",
1881 1886 "limit",
1882 1887 "_matchfiles",
1883 1888 "max",
1884 1889 "merge",
1885 1890 "min",
1886 1891 "modifies",
1887 1892 "obsolete",
1888 1893 "only",
1889 1894 "origin",
1890 1895 "outgoing",
1891 1896 "p1",
1892 1897 "p2",
1893 1898 "parents",
1894 1899 "present",
1895 1900 "public",
1896 1901 "remote",
1897 1902 "removes",
1898 1903 "rev",
1899 1904 "reverse",
1900 1905 "roots",
1901 1906 "sort",
1902 1907 "secret",
1903 1908 "matching",
1904 1909 "tag",
1905 1910 "tagged",
1906 1911 "user",
1907 1912 "unstable",
1908 1913 "_list",
1909 1914 "_intlist",
1910 1915 "_hexlist",
1911 1916 ])
1912 1917
1913 1918 methods = {
1914 1919 "range": rangeset,
1915 1920 "dagrange": dagrange,
1916 1921 "string": stringset,
1917 1922 "symbol": symbolset,
1918 1923 "and": andset,
1919 1924 "or": orset,
1920 1925 "not": notset,
1921 1926 "list": listset,
1922 1927 "func": func,
1923 1928 "ancestor": ancestorspec,
1924 1929 "parent": parentspec,
1925 1930 "parentpost": p1,
1926 1931 "only": only,
1927 1932 "onlypost": only,
1928 1933 }
1929 1934
1930 1935 def optimize(x, small):
1931 1936 if x is None:
1932 1937 return 0, x
1933 1938
1934 1939 smallbonus = 1
1935 1940 if small:
1936 1941 smallbonus = .5
1937 1942
1938 1943 op = x[0]
1939 1944 if op == 'minus':
1940 1945 return optimize(('and', x[1], ('not', x[2])), small)
1941 1946 elif op == 'only':
1942 1947 return optimize(('func', ('symbol', 'only'),
1943 1948 ('list', x[1], x[2])), small)
1944 1949 elif op == 'dagrangepre':
1945 1950 return optimize(('func', ('symbol', 'ancestors'), x[1]), small)
1946 1951 elif op == 'dagrangepost':
1947 1952 return optimize(('func', ('symbol', 'descendants'), x[1]), small)
1948 1953 elif op == 'rangepre':
1949 1954 return optimize(('range', ('string', '0'), x[1]), small)
1950 1955 elif op == 'rangepost':
1951 1956 return optimize(('range', x[1], ('string', 'tip')), small)
1952 1957 elif op == 'negate':
1953 1958 return optimize(('string',
1954 1959 '-' + getstring(x[1], _("can't negate that"))), small)
1955 1960 elif op in 'string symbol negate':
1956 1961 return smallbonus, x # single revisions are small
1957 1962 elif op == 'and':
1958 1963 wa, ta = optimize(x[1], True)
1959 1964 wb, tb = optimize(x[2], True)
1960 1965
1961 1966 # (::x and not ::y)/(not ::y and ::x) have a fast path
1962 1967 def isonly(revs, bases):
1963 1968 return (
1964 1969 revs[0] == 'func'
1965 1970 and getstring(revs[1], _('not a symbol')) == 'ancestors'
1966 1971 and bases[0] == 'not'
1967 1972 and bases[1][0] == 'func'
1968 1973 and getstring(bases[1][1], _('not a symbol')) == 'ancestors')
1969 1974
1970 1975 w = min(wa, wb)
1971 1976 if isonly(ta, tb):
1972 1977 return w, ('func', ('symbol', 'only'), ('list', ta[2], tb[1][2]))
1973 1978 if isonly(tb, ta):
1974 1979 return w, ('func', ('symbol', 'only'), ('list', tb[2], ta[1][2]))
1975 1980
1976 1981 if wa > wb:
1977 1982 return w, (op, tb, ta)
1978 1983 return w, (op, ta, tb)
1979 1984 elif op == 'or':
1980 1985 wa, ta = optimize(x[1], False)
1981 1986 wb, tb = optimize(x[2], False)
1982 1987 if wb < wa:
1983 1988 wb, wa = wa, wb
1984 1989 return max(wa, wb), (op, ta, tb)
1985 1990 elif op == 'not':
1986 1991 o = optimize(x[1], not small)
1987 1992 return o[0], (op, o[1])
1988 1993 elif op == 'parentpost':
1989 1994 o = optimize(x[1], small)
1990 1995 return o[0], (op, o[1])
1991 1996 elif op == 'group':
1992 1997 return optimize(x[1], small)
1993 1998 elif op in 'dagrange range list parent ancestorspec':
1994 1999 if op == 'parent':
1995 2000 # x^:y means (x^) : y, not x ^ (:y)
1996 2001 post = ('parentpost', x[1])
1997 2002 if x[2][0] == 'dagrangepre':
1998 2003 return optimize(('dagrange', post, x[2][1]), small)
1999 2004 elif x[2][0] == 'rangepre':
2000 2005 return optimize(('range', post, x[2][1]), small)
2001 2006
2002 2007 wa, ta = optimize(x[1], small)
2003 2008 wb, tb = optimize(x[2], small)
2004 2009 return wa + wb, (op, ta, tb)
2005 2010 elif op == 'func':
2006 2011 f = getstring(x[1], _("not a symbol"))
2007 2012 wa, ta = optimize(x[2], small)
2008 2013 if f in ("author branch closed date desc file grep keyword "
2009 2014 "outgoing user"):
2010 2015 w = 10 # slow
2011 2016 elif f in "modifies adds removes":
2012 2017 w = 30 # slower
2013 2018 elif f == "contains":
2014 2019 w = 100 # very slow
2015 2020 elif f == "ancestor":
2016 2021 w = 1 * smallbonus
2017 2022 elif f in "reverse limit first _intlist":
2018 2023 w = 0
2019 2024 elif f in "sort":
2020 2025 w = 10 # assume most sorts look at changelog
2021 2026 else:
2022 2027 w = 1
2023 2028 return w + wa, (op, x[1], ta)
2024 2029 return 1, x
2025 2030
2026 2031 _aliasarg = ('func', ('symbol', '_aliasarg'))
2027 2032 def _getaliasarg(tree):
2028 2033 """If tree matches ('func', ('symbol', '_aliasarg'), ('string', X))
2029 2034 return X, None otherwise.
2030 2035 """
2031 2036 if (len(tree) == 3 and tree[:2] == _aliasarg
2032 2037 and tree[2][0] == 'string'):
2033 2038 return tree[2][1]
2034 2039 return None
2035 2040
2036 2041 def _checkaliasarg(tree, known=None):
2037 2042 """Check tree contains no _aliasarg construct or only ones which
2038 2043 value is in known. Used to avoid alias placeholders injection.
2039 2044 """
2040 2045 if isinstance(tree, tuple):
2041 2046 arg = _getaliasarg(tree)
2042 2047 if arg is not None and (not known or arg not in known):
2043 2048 raise error.ParseError(_("not a function: %s") % '_aliasarg')
2044 2049 for t in tree:
2045 2050 _checkaliasarg(t, known)
2046 2051
2047 2052 class revsetalias(object):
2048 2053 funcre = re.compile('^([^(]+)\(([^)]+)\)$')
2049 2054 args = None
2050 2055
2051 2056 # error message at parsing, or None
2052 2057 error = None
2053 2058 # whether own `error` information is already shown or not.
2054 2059 # this avoids showing same warning multiple times at each `findaliases`.
2055 2060 warned = False
2056 2061
2057 2062 def __init__(self, name, value):
2058 2063 '''Aliases like:
2059 2064
2060 2065 h = heads(default)
2061 2066 b($1) = ancestors($1) - ancestors(default)
2062 2067 '''
2063 2068 m = self.funcre.search(name)
2064 2069 if m:
2065 2070 self.name = m.group(1)
2066 2071 self.tree = ('func', ('symbol', m.group(1)))
2067 2072 self.args = [x.strip() for x in m.group(2).split(',')]
2068 2073 for arg in self.args:
2069 2074 # _aliasarg() is an unknown symbol only used separate
2070 2075 # alias argument placeholders from regular strings.
2071 2076 value = value.replace(arg, '_aliasarg(%r)' % (arg,))
2072 2077 else:
2073 2078 self.name = name
2074 2079 self.tree = ('symbol', name)
2075 2080
2076 2081 try:
2077 2082 self.replacement, pos = parse(value)
2078 2083 if pos != len(value):
2079 2084 raise error.ParseError(_('invalid token'), pos)
2080 2085 # Check for placeholder injection
2081 2086 _checkaliasarg(self.replacement, self.args)
2082 2087 except error.ParseError, inst:
2083 2088 if len(inst.args) > 1:
2084 2089 self.error = _('at %s: %s') % (inst.args[1], inst.args[0])
2085 2090 else:
2086 2091 self.error = inst.args[0]
2087 2092
2088 2093 def _getalias(aliases, tree):
2089 2094 """If tree looks like an unexpanded alias, return it. Return None
2090 2095 otherwise.
2091 2096 """
2092 2097 if isinstance(tree, tuple) and tree:
2093 2098 if tree[0] == 'symbol' and len(tree) == 2:
2094 2099 name = tree[1]
2095 2100 alias = aliases.get(name)
2096 2101 if alias and alias.args is None and alias.tree == tree:
2097 2102 return alias
2098 2103 if tree[0] == 'func' and len(tree) > 1:
2099 2104 if tree[1][0] == 'symbol' and len(tree[1]) == 2:
2100 2105 name = tree[1][1]
2101 2106 alias = aliases.get(name)
2102 2107 if alias and alias.args is not None and alias.tree == tree[:2]:
2103 2108 return alias
2104 2109 return None
2105 2110
2106 2111 def _expandargs(tree, args):
2107 2112 """Replace _aliasarg instances with the substitution value of the
2108 2113 same name in args, recursively.
2109 2114 """
2110 2115 if not tree or not isinstance(tree, tuple):
2111 2116 return tree
2112 2117 arg = _getaliasarg(tree)
2113 2118 if arg is not None:
2114 2119 return args[arg]
2115 2120 return tuple(_expandargs(t, args) for t in tree)
2116 2121
2117 2122 def _expandaliases(aliases, tree, expanding, cache):
2118 2123 """Expand aliases in tree, recursively.
2119 2124
2120 2125 'aliases' is a dictionary mapping user defined aliases to
2121 2126 revsetalias objects.
2122 2127 """
2123 2128 if not isinstance(tree, tuple):
2124 2129 # Do not expand raw strings
2125 2130 return tree
2126 2131 alias = _getalias(aliases, tree)
2127 2132 if alias is not None:
2128 2133 if alias.error:
2129 2134 raise util.Abort(_('failed to parse revset alias "%s": %s') %
2130 2135 (alias.name, alias.error))
2131 2136 if alias in expanding:
2132 2137 raise error.ParseError(_('infinite expansion of revset alias "%s" '
2133 2138 'detected') % alias.name)
2134 2139 expanding.append(alias)
2135 2140 if alias.name not in cache:
2136 2141 cache[alias.name] = _expandaliases(aliases, alias.replacement,
2137 2142 expanding, cache)
2138 2143 result = cache[alias.name]
2139 2144 expanding.pop()
2140 2145 if alias.args is not None:
2141 2146 l = getlist(tree[2])
2142 2147 if len(l) != len(alias.args):
2143 2148 raise error.ParseError(
2144 2149 _('invalid number of arguments: %s') % len(l))
2145 2150 l = [_expandaliases(aliases, a, [], cache) for a in l]
2146 2151 result = _expandargs(result, dict(zip(alias.args, l)))
2147 2152 else:
2148 2153 result = tuple(_expandaliases(aliases, t, expanding, cache)
2149 2154 for t in tree)
2150 2155 return result
2151 2156
2152 2157 def findaliases(ui, tree, showwarning=None):
2153 2158 _checkaliasarg(tree)
2154 2159 aliases = {}
2155 2160 for k, v in ui.configitems('revsetalias'):
2156 2161 alias = revsetalias(k, v)
2157 2162 aliases[alias.name] = alias
2158 2163 tree = _expandaliases(aliases, tree, [], {})
2159 2164 if showwarning:
2160 2165 # warn about problematic (but not referred) aliases
2161 2166 for name, alias in sorted(aliases.iteritems()):
2162 2167 if alias.error and not alias.warned:
2163 2168 msg = _('failed to parse revset alias "%s": %s'
2164 2169 ) % (name, alias.error)
2165 2170 showwarning(_('warning: %s\n') % (msg))
2166 2171 alias.warned = True
2167 2172 return tree
2168 2173
2169 2174 def foldconcat(tree):
2170 2175 """Fold elements to be concatenated by `##`
2171 2176 """
2172 2177 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2173 2178 return tree
2174 2179 if tree[0] == '_concat':
2175 2180 pending = [tree]
2176 2181 l = []
2177 2182 while pending:
2178 2183 e = pending.pop()
2179 2184 if e[0] == '_concat':
2180 2185 pending.extend(reversed(e[1:]))
2181 2186 elif e[0] in ('string', 'symbol'):
2182 2187 l.append(e[1])
2183 2188 else:
2184 2189 msg = _("\"##\" can't concatenate \"%s\" element") % (e[0])
2185 2190 raise error.ParseError(msg)
2186 2191 return ('string', ''.join(l))
2187 2192 else:
2188 2193 return tuple(foldconcat(t) for t in tree)
2189 2194
2190 2195 def parse(spec, lookup=None):
2191 2196 p = parser.parser(tokenize, elements)
2192 2197 return p.parse(spec, lookup=lookup)
2193 2198
2194 2199 def match(ui, spec, repo=None):
2195 2200 if not spec:
2196 2201 raise error.ParseError(_("empty query"))
2197 2202 lookup = None
2198 2203 if repo:
2199 2204 lookup = repo.__contains__
2200 2205 tree, pos = parse(spec, lookup)
2201 2206 if (pos != len(spec)):
2202 2207 raise error.ParseError(_("invalid token"), pos)
2203 2208 if ui:
2204 2209 tree = findaliases(ui, tree, showwarning=ui.warn)
2205 2210 tree = foldconcat(tree)
2206 2211 weight, tree = optimize(tree, True)
2207 2212 def mfunc(repo, subset):
2208 2213 if util.safehasattr(subset, 'isascending'):
2209 2214 result = getset(repo, subset, tree)
2210 2215 else:
2211 2216 result = getset(repo, baseset(subset), tree)
2212 2217 return result
2213 2218 return mfunc
2214 2219
2215 2220 def formatspec(expr, *args):
2216 2221 '''
2217 2222 This is a convenience function for using revsets internally, and
2218 2223 escapes arguments appropriately. Aliases are intentionally ignored
2219 2224 so that intended expression behavior isn't accidentally subverted.
2220 2225
2221 2226 Supported arguments:
2222 2227
2223 2228 %r = revset expression, parenthesized
2224 2229 %d = int(arg), no quoting
2225 2230 %s = string(arg), escaped and single-quoted
2226 2231 %b = arg.branch(), escaped and single-quoted
2227 2232 %n = hex(arg), single-quoted
2228 2233 %% = a literal '%'
2229 2234
2230 2235 Prefixing the type with 'l' specifies a parenthesized list of that type.
2231 2236
2232 2237 >>> formatspec('%r:: and %lr', '10 or 11', ("this()", "that()"))
2233 2238 '(10 or 11):: and ((this()) or (that()))'
2234 2239 >>> formatspec('%d:: and not %d::', 10, 20)
2235 2240 '10:: and not 20::'
2236 2241 >>> formatspec('%ld or %ld', [], [1])
2237 2242 "_list('') or 1"
2238 2243 >>> formatspec('keyword(%s)', 'foo\\xe9')
2239 2244 "keyword('foo\\\\xe9')"
2240 2245 >>> b = lambda: 'default'
2241 2246 >>> b.branch = b
2242 2247 >>> formatspec('branch(%b)', b)
2243 2248 "branch('default')"
2244 2249 >>> formatspec('root(%ls)', ['a', 'b', 'c', 'd'])
2245 2250 "root(_list('a\\x00b\\x00c\\x00d'))"
2246 2251 '''
2247 2252
2248 2253 def quote(s):
2249 2254 return repr(str(s))
2250 2255
2251 2256 def argtype(c, arg):
2252 2257 if c == 'd':
2253 2258 return str(int(arg))
2254 2259 elif c == 's':
2255 2260 return quote(arg)
2256 2261 elif c == 'r':
2257 2262 parse(arg) # make sure syntax errors are confined
2258 2263 return '(%s)' % arg
2259 2264 elif c == 'n':
2260 2265 return quote(node.hex(arg))
2261 2266 elif c == 'b':
2262 2267 return quote(arg.branch())
2263 2268
2264 2269 def listexp(s, t):
2265 2270 l = len(s)
2266 2271 if l == 0:
2267 2272 return "_list('')"
2268 2273 elif l == 1:
2269 2274 return argtype(t, s[0])
2270 2275 elif t == 'd':
2271 2276 return "_intlist('%s')" % "\0".join(str(int(a)) for a in s)
2272 2277 elif t == 's':
2273 2278 return "_list('%s')" % "\0".join(s)
2274 2279 elif t == 'n':
2275 2280 return "_hexlist('%s')" % "\0".join(node.hex(a) for a in s)
2276 2281 elif t == 'b':
2277 2282 return "_list('%s')" % "\0".join(a.branch() for a in s)
2278 2283
2279 2284 m = l // 2
2280 2285 return '(%s or %s)' % (listexp(s[:m], t), listexp(s[m:], t))
2281 2286
2282 2287 ret = ''
2283 2288 pos = 0
2284 2289 arg = 0
2285 2290 while pos < len(expr):
2286 2291 c = expr[pos]
2287 2292 if c == '%':
2288 2293 pos += 1
2289 2294 d = expr[pos]
2290 2295 if d == '%':
2291 2296 ret += d
2292 2297 elif d in 'dsnbr':
2293 2298 ret += argtype(d, args[arg])
2294 2299 arg += 1
2295 2300 elif d == 'l':
2296 2301 # a list of some type
2297 2302 pos += 1
2298 2303 d = expr[pos]
2299 2304 ret += listexp(list(args[arg]), d)
2300 2305 arg += 1
2301 2306 else:
2302 2307 raise util.Abort('unexpected revspec format character %s' % d)
2303 2308 else:
2304 2309 ret += c
2305 2310 pos += 1
2306 2311
2307 2312 return ret
2308 2313
2309 2314 def prettyformat(tree):
2310 2315 def _prettyformat(tree, level, lines):
2311 2316 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2312 2317 lines.append((level, str(tree)))
2313 2318 else:
2314 2319 lines.append((level, '(%s' % tree[0]))
2315 2320 for s in tree[1:]:
2316 2321 _prettyformat(s, level + 1, lines)
2317 2322 lines[-1:] = [(lines[-1][0], lines[-1][1] + ')')]
2318 2323
2319 2324 lines = []
2320 2325 _prettyformat(tree, 0, lines)
2321 2326 output = '\n'.join((' '*l + s) for l, s in lines)
2322 2327 return output
2323 2328
2324 2329 def depth(tree):
2325 2330 if isinstance(tree, tuple):
2326 2331 return max(map(depth, tree)) + 1
2327 2332 else:
2328 2333 return 0
2329 2334
2330 2335 def funcsused(tree):
2331 2336 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2332 2337 return set()
2333 2338 else:
2334 2339 funcs = set()
2335 2340 for s in tree[1:]:
2336 2341 funcs |= funcsused(s)
2337 2342 if tree[0] == 'func':
2338 2343 funcs.add(tree[1][1])
2339 2344 return funcs
2340 2345
2341 2346 class abstractsmartset(object):
2342 2347
2343 2348 def __nonzero__(self):
2344 2349 """True if the smartset is not empty"""
2345 2350 raise NotImplementedError()
2346 2351
2347 2352 def __contains__(self, rev):
2348 2353 """provide fast membership testing"""
2349 2354 raise NotImplementedError()
2350 2355
2351 2356 def __iter__(self):
2352 2357 """iterate the set in the order it is supposed to be iterated"""
2353 2358 raise NotImplementedError()
2354 2359
2355 2360 # Attributes containing a function to perform a fast iteration in a given
2356 2361 # direction. A smartset can have none, one, or both defined.
2357 2362 #
2358 2363 # Default value is None instead of a function returning None to avoid
2359 2364 # initializing an iterator just for testing if a fast method exists.
2360 2365 fastasc = None
2361 2366 fastdesc = None
2362 2367
2363 2368 def isascending(self):
2364 2369 """True if the set will iterate in ascending order"""
2365 2370 raise NotImplementedError()
2366 2371
2367 2372 def isdescending(self):
2368 2373 """True if the set will iterate in descending order"""
2369 2374 raise NotImplementedError()
2370 2375
2371 2376 def min(self):
2372 2377 """return the minimum element in the set"""
2373 2378 if self.fastasc is not None:
2374 2379 for r in self.fastasc():
2375 2380 return r
2376 2381 raise ValueError('arg is an empty sequence')
2377 2382 return min(self)
2378 2383
2379 2384 def max(self):
2380 2385 """return the maximum element in the set"""
2381 2386 if self.fastdesc is not None:
2382 2387 for r in self.fastdesc():
2383 2388 return r
2384 2389 raise ValueError('arg is an empty sequence')
2385 2390 return max(self)
2386 2391
2387 2392 def first(self):
2388 2393 """return the first element in the set (user iteration perspective)
2389 2394
2390 2395 Return None if the set is empty"""
2391 2396 raise NotImplementedError()
2392 2397
2393 2398 def last(self):
2394 2399 """return the last element in the set (user iteration perspective)
2395 2400
2396 2401 Return None if the set is empty"""
2397 2402 raise NotImplementedError()
2398 2403
2399 2404 def __len__(self):
2400 2405 """return the length of the smartsets
2401 2406
2402 2407 This can be expensive on smartset that could be lazy otherwise."""
2403 2408 raise NotImplementedError()
2404 2409
2405 2410 def reverse(self):
2406 2411 """reverse the expected iteration order"""
2407 2412 raise NotImplementedError()
2408 2413
2409 2414 def sort(self, reverse=True):
2410 2415 """get the set to iterate in an ascending or descending order"""
2411 2416 raise NotImplementedError()
2412 2417
2413 2418 def __and__(self, other):
2414 2419 """Returns a new object with the intersection of the two collections.
2415 2420
2416 2421 This is part of the mandatory API for smartset."""
2417 2422 return self.filter(other.__contains__, cache=False)
2418 2423
2419 2424 def __add__(self, other):
2420 2425 """Returns a new object with the union of the two collections.
2421 2426
2422 2427 This is part of the mandatory API for smartset."""
2423 2428 return addset(self, other)
2424 2429
2425 2430 def __sub__(self, other):
2426 2431 """Returns a new object with the substraction of the two collections.
2427 2432
2428 2433 This is part of the mandatory API for smartset."""
2429 2434 c = other.__contains__
2430 2435 return self.filter(lambda r: not c(r), cache=False)
2431 2436
2432 2437 def filter(self, condition, cache=True):
2433 2438 """Returns this smartset filtered by condition as a new smartset.
2434 2439
2435 2440 `condition` is a callable which takes a revision number and returns a
2436 2441 boolean.
2437 2442
2438 2443 This is part of the mandatory API for smartset."""
2439 2444 # builtin cannot be cached. but do not needs to
2440 2445 if cache and util.safehasattr(condition, 'func_code'):
2441 2446 condition = util.cachefunc(condition)
2442 2447 return filteredset(self, condition)
2443 2448
2444 2449 class baseset(abstractsmartset):
2445 2450 """Basic data structure that represents a revset and contains the basic
2446 2451 operation that it should be able to perform.
2447 2452
2448 2453 Every method in this class should be implemented by any smartset class.
2449 2454 """
2450 2455 def __init__(self, data=()):
2451 2456 if not isinstance(data, list):
2452 2457 data = list(data)
2453 2458 self._list = data
2454 2459 self._ascending = None
2455 2460
2456 2461 @util.propertycache
2457 2462 def _set(self):
2458 2463 return set(self._list)
2459 2464
2460 2465 @util.propertycache
2461 2466 def _asclist(self):
2462 2467 asclist = self._list[:]
2463 2468 asclist.sort()
2464 2469 return asclist
2465 2470
2466 2471 def __iter__(self):
2467 2472 if self._ascending is None:
2468 2473 return iter(self._list)
2469 2474 elif self._ascending:
2470 2475 return iter(self._asclist)
2471 2476 else:
2472 2477 return reversed(self._asclist)
2473 2478
2474 2479 def fastasc(self):
2475 2480 return iter(self._asclist)
2476 2481
2477 2482 def fastdesc(self):
2478 2483 return reversed(self._asclist)
2479 2484
2480 2485 @util.propertycache
2481 2486 def __contains__(self):
2482 2487 return self._set.__contains__
2483 2488
2484 2489 def __nonzero__(self):
2485 2490 return bool(self._list)
2486 2491
2487 2492 def sort(self, reverse=False):
2488 2493 self._ascending = not bool(reverse)
2489 2494
2490 2495 def reverse(self):
2491 2496 if self._ascending is None:
2492 2497 self._list.reverse()
2493 2498 else:
2494 2499 self._ascending = not self._ascending
2495 2500
2496 2501 def __len__(self):
2497 2502 return len(self._list)
2498 2503
2499 2504 def isascending(self):
2500 2505 """Returns True if the collection is ascending order, False if not.
2501 2506
2502 2507 This is part of the mandatory API for smartset."""
2503 2508 if len(self) <= 1:
2504 2509 return True
2505 2510 return self._ascending is not None and self._ascending
2506 2511
2507 2512 def isdescending(self):
2508 2513 """Returns True if the collection is descending order, False if not.
2509 2514
2510 2515 This is part of the mandatory API for smartset."""
2511 2516 if len(self) <= 1:
2512 2517 return True
2513 2518 return self._ascending is not None and not self._ascending
2514 2519
2515 2520 def first(self):
2516 2521 if self:
2517 2522 if self._ascending is None:
2518 2523 return self._list[0]
2519 2524 elif self._ascending:
2520 2525 return self._asclist[0]
2521 2526 else:
2522 2527 return self._asclist[-1]
2523 2528 return None
2524 2529
2525 2530 def last(self):
2526 2531 if self:
2527 2532 if self._ascending is None:
2528 2533 return self._list[-1]
2529 2534 elif self._ascending:
2530 2535 return self._asclist[-1]
2531 2536 else:
2532 2537 return self._asclist[0]
2533 2538 return None
2534 2539
2535 2540 class filteredset(abstractsmartset):
2536 2541 """Duck type for baseset class which iterates lazily over the revisions in
2537 2542 the subset and contains a function which tests for membership in the
2538 2543 revset
2539 2544 """
2540 2545 def __init__(self, subset, condition=lambda x: True):
2541 2546 """
2542 2547 condition: a function that decide whether a revision in the subset
2543 2548 belongs to the revset or not.
2544 2549 """
2545 2550 self._subset = subset
2546 2551 self._condition = condition
2547 2552 self._cache = {}
2548 2553
2549 2554 def __contains__(self, x):
2550 2555 c = self._cache
2551 2556 if x not in c:
2552 2557 v = c[x] = x in self._subset and self._condition(x)
2553 2558 return v
2554 2559 return c[x]
2555 2560
2556 2561 def __iter__(self):
2557 2562 return self._iterfilter(self._subset)
2558 2563
2559 2564 def _iterfilter(self, it):
2560 2565 cond = self._condition
2561 2566 for x in it:
2562 2567 if cond(x):
2563 2568 yield x
2564 2569
2565 2570 @property
2566 2571 def fastasc(self):
2567 2572 it = self._subset.fastasc
2568 2573 if it is None:
2569 2574 return None
2570 2575 return lambda: self._iterfilter(it())
2571 2576
2572 2577 @property
2573 2578 def fastdesc(self):
2574 2579 it = self._subset.fastdesc
2575 2580 if it is None:
2576 2581 return None
2577 2582 return lambda: self._iterfilter(it())
2578 2583
2579 2584 def __nonzero__(self):
2580 2585 for r in self:
2581 2586 return True
2582 2587 return False
2583 2588
2584 2589 def __len__(self):
2585 2590 # Basic implementation to be changed in future patches.
2586 2591 l = baseset([r for r in self])
2587 2592 return len(l)
2588 2593
2589 2594 def sort(self, reverse=False):
2590 2595 self._subset.sort(reverse=reverse)
2591 2596
2592 2597 def reverse(self):
2593 2598 self._subset.reverse()
2594 2599
2595 2600 def isascending(self):
2596 2601 return self._subset.isascending()
2597 2602
2598 2603 def isdescending(self):
2599 2604 return self._subset.isdescending()
2600 2605
2601 2606 def first(self):
2602 2607 for x in self:
2603 2608 return x
2604 2609 return None
2605 2610
2606 2611 def last(self):
2607 2612 it = None
2608 2613 if self._subset.isascending:
2609 2614 it = self.fastdesc
2610 2615 elif self._subset.isdescending:
2611 2616 it = self.fastdesc
2612 2617 if it is None:
2613 2618 # slowly consume everything. This needs improvement
2614 2619 it = lambda: reversed(list(self))
2615 2620 for x in it():
2616 2621 return x
2617 2622 return None
2618 2623
2619 2624 class addset(abstractsmartset):
2620 2625 """Represent the addition of two sets
2621 2626
2622 2627 Wrapper structure for lazily adding two structures without losing much
2623 2628 performance on the __contains__ method
2624 2629
2625 2630 If the ascending attribute is set, that means the two structures are
2626 2631 ordered in either an ascending or descending way. Therefore, we can add
2627 2632 them maintaining the order by iterating over both at the same time
2628 2633 """
2629 2634 def __init__(self, revs1, revs2, ascending=None):
2630 2635 self._r1 = revs1
2631 2636 self._r2 = revs2
2632 2637 self._iter = None
2633 2638 self._ascending = ascending
2634 2639 self._genlist = None
2635 2640 self._asclist = None
2636 2641
2637 2642 def __len__(self):
2638 2643 return len(self._list)
2639 2644
2640 2645 def __nonzero__(self):
2641 2646 return bool(self._r1) or bool(self._r2)
2642 2647
2643 2648 @util.propertycache
2644 2649 def _list(self):
2645 2650 if not self._genlist:
2646 2651 self._genlist = baseset(self._iterator())
2647 2652 return self._genlist
2648 2653
2649 2654 def _iterator(self):
2650 2655 """Iterate over both collections without repeating elements
2651 2656
2652 2657 If the ascending attribute is not set, iterate over the first one and
2653 2658 then over the second one checking for membership on the first one so we
2654 2659 dont yield any duplicates.
2655 2660
2656 2661 If the ascending attribute is set, iterate over both collections at the
2657 2662 same time, yielding only one value at a time in the given order.
2658 2663 """
2659 2664 if self._ascending is None:
2660 2665 def gen():
2661 2666 for r in self._r1:
2662 2667 yield r
2663 2668 inr1 = self._r1.__contains__
2664 2669 for r in self._r2:
2665 2670 if not inr1(r):
2666 2671 yield r
2667 2672 gen = gen()
2668 2673 else:
2669 2674 iter1 = iter(self._r1)
2670 2675 iter2 = iter(self._r2)
2671 2676 gen = self._iterordered(self._ascending, iter1, iter2)
2672 2677 return gen
2673 2678
2674 2679 def __iter__(self):
2675 2680 if self._ascending is None:
2676 2681 if self._genlist:
2677 2682 return iter(self._genlist)
2678 2683 return iter(self._iterator())
2679 2684 self._trysetasclist()
2680 2685 if self._ascending:
2681 2686 it = self.fastasc
2682 2687 else:
2683 2688 it = self.fastdesc
2684 2689 if it is None:
2685 2690 # consume the gen and try again
2686 2691 self._list
2687 2692 return iter(self)
2688 2693 return it()
2689 2694
2690 2695 def _trysetasclist(self):
2691 2696 """populate the _asclist attribute if possible and necessary"""
2692 2697 if self._genlist is not None and self._asclist is None:
2693 2698 self._asclist = sorted(self._genlist)
2694 2699
2695 2700 @property
2696 2701 def fastasc(self):
2697 2702 self._trysetasclist()
2698 2703 if self._asclist is not None:
2699 2704 return self._asclist.__iter__
2700 2705 iter1 = self._r1.fastasc
2701 2706 iter2 = self._r2.fastasc
2702 2707 if None in (iter1, iter2):
2703 2708 return None
2704 2709 return lambda: self._iterordered(True, iter1(), iter2())
2705 2710
2706 2711 @property
2707 2712 def fastdesc(self):
2708 2713 self._trysetasclist()
2709 2714 if self._asclist is not None:
2710 2715 return self._asclist.__reversed__
2711 2716 iter1 = self._r1.fastdesc
2712 2717 iter2 = self._r2.fastdesc
2713 2718 if None in (iter1, iter2):
2714 2719 return None
2715 2720 return lambda: self._iterordered(False, iter1(), iter2())
2716 2721
2717 2722 def _iterordered(self, ascending, iter1, iter2):
2718 2723 """produce an ordered iteration from two iterators with the same order
2719 2724
2720 2725 The ascending is used to indicated the iteration direction.
2721 2726 """
2722 2727 choice = max
2723 2728 if ascending:
2724 2729 choice = min
2725 2730
2726 2731 val1 = None
2727 2732 val2 = None
2728 2733
2729 2734 choice = max
2730 2735 if ascending:
2731 2736 choice = min
2732 2737 try:
2733 2738 # Consume both iterators in an ordered way until one is
2734 2739 # empty
2735 2740 while True:
2736 2741 if val1 is None:
2737 2742 val1 = iter1.next()
2738 2743 if val2 is None:
2739 2744 val2 = iter2.next()
2740 2745 next = choice(val1, val2)
2741 2746 yield next
2742 2747 if val1 == next:
2743 2748 val1 = None
2744 2749 if val2 == next:
2745 2750 val2 = None
2746 2751 except StopIteration:
2747 2752 # Flush any remaining values and consume the other one
2748 2753 it = iter2
2749 2754 if val1 is not None:
2750 2755 yield val1
2751 2756 it = iter1
2752 2757 elif val2 is not None:
2753 2758 # might have been equality and both are empty
2754 2759 yield val2
2755 2760 for val in it:
2756 2761 yield val
2757 2762
2758 2763 def __contains__(self, x):
2759 2764 return x in self._r1 or x in self._r2
2760 2765
2761 2766 def sort(self, reverse=False):
2762 2767 """Sort the added set
2763 2768
2764 2769 For this we use the cached list with all the generated values and if we
2765 2770 know they are ascending or descending we can sort them in a smart way.
2766 2771 """
2767 2772 self._ascending = not reverse
2768 2773
2769 2774 def isascending(self):
2770 2775 return self._ascending is not None and self._ascending
2771 2776
2772 2777 def isdescending(self):
2773 2778 return self._ascending is not None and not self._ascending
2774 2779
2775 2780 def reverse(self):
2776 2781 if self._ascending is None:
2777 2782 self._list.reverse()
2778 2783 else:
2779 2784 self._ascending = not self._ascending
2780 2785
2781 2786 def first(self):
2782 2787 for x in self:
2783 2788 return x
2784 2789 return None
2785 2790
2786 2791 def last(self):
2787 2792 self.reverse()
2788 2793 val = self.first()
2789 2794 self.reverse()
2790 2795 return val
2791 2796
2792 2797 class generatorset(abstractsmartset):
2793 2798 """Wrap a generator for lazy iteration
2794 2799
2795 2800 Wrapper structure for generators that provides lazy membership and can
2796 2801 be iterated more than once.
2797 2802 When asked for membership it generates values until either it finds the
2798 2803 requested one or has gone through all the elements in the generator
2799 2804 """
2800 2805 def __init__(self, gen, iterasc=None):
2801 2806 """
2802 2807 gen: a generator producing the values for the generatorset.
2803 2808 """
2804 2809 self._gen = gen
2805 2810 self._asclist = None
2806 2811 self._cache = {}
2807 2812 self._genlist = []
2808 2813 self._finished = False
2809 2814 self._ascending = True
2810 2815 if iterasc is not None:
2811 2816 if iterasc:
2812 2817 self.fastasc = self._iterator
2813 2818 self.__contains__ = self._asccontains
2814 2819 else:
2815 2820 self.fastdesc = self._iterator
2816 2821 self.__contains__ = self._desccontains
2817 2822
2818 2823 def __nonzero__(self):
2819 2824 for r in self:
2820 2825 return True
2821 2826 return False
2822 2827
2823 2828 def __contains__(self, x):
2824 2829 if x in self._cache:
2825 2830 return self._cache[x]
2826 2831
2827 2832 # Use new values only, as existing values would be cached.
2828 2833 for l in self._consumegen():
2829 2834 if l == x:
2830 2835 return True
2831 2836
2832 2837 self._cache[x] = False
2833 2838 return False
2834 2839
2835 2840 def _asccontains(self, x):
2836 2841 """version of contains optimised for ascending generator"""
2837 2842 if x in self._cache:
2838 2843 return self._cache[x]
2839 2844
2840 2845 # Use new values only, as existing values would be cached.
2841 2846 for l in self._consumegen():
2842 2847 if l == x:
2843 2848 return True
2844 2849 if l > x:
2845 2850 break
2846 2851
2847 2852 self._cache[x] = False
2848 2853 return False
2849 2854
2850 2855 def _desccontains(self, x):
2851 2856 """version of contains optimised for descending generator"""
2852 2857 if x in self._cache:
2853 2858 return self._cache[x]
2854 2859
2855 2860 # Use new values only, as existing values would be cached.
2856 2861 for l in self._consumegen():
2857 2862 if l == x:
2858 2863 return True
2859 2864 if l < x:
2860 2865 break
2861 2866
2862 2867 self._cache[x] = False
2863 2868 return False
2864 2869
2865 2870 def __iter__(self):
2866 2871 if self._ascending:
2867 2872 it = self.fastasc
2868 2873 else:
2869 2874 it = self.fastdesc
2870 2875 if it is not None:
2871 2876 return it()
2872 2877 # we need to consume the iterator
2873 2878 for x in self._consumegen():
2874 2879 pass
2875 2880 # recall the same code
2876 2881 return iter(self)
2877 2882
2878 2883 def _iterator(self):
2879 2884 if self._finished:
2880 2885 return iter(self._genlist)
2881 2886
2882 2887 # We have to use this complex iteration strategy to allow multiple
2883 2888 # iterations at the same time. We need to be able to catch revision
2884 2889 # removed from _consumegen and added to genlist in another instance.
2885 2890 #
2886 2891 # Getting rid of it would provide an about 15% speed up on this
2887 2892 # iteration.
2888 2893 genlist = self._genlist
2889 2894 nextrev = self._consumegen().next
2890 2895 _len = len # cache global lookup
2891 2896 def gen():
2892 2897 i = 0
2893 2898 while True:
2894 2899 if i < _len(genlist):
2895 2900 yield genlist[i]
2896 2901 else:
2897 2902 yield nextrev()
2898 2903 i += 1
2899 2904 return gen()
2900 2905
2901 2906 def _consumegen(self):
2902 2907 cache = self._cache
2903 2908 genlist = self._genlist.append
2904 2909 for item in self._gen:
2905 2910 cache[item] = True
2906 2911 genlist(item)
2907 2912 yield item
2908 2913 if not self._finished:
2909 2914 self._finished = True
2910 2915 asc = self._genlist[:]
2911 2916 asc.sort()
2912 2917 self._asclist = asc
2913 2918 self.fastasc = asc.__iter__
2914 2919 self.fastdesc = asc.__reversed__
2915 2920
2916 2921 def __len__(self):
2917 2922 for x in self._consumegen():
2918 2923 pass
2919 2924 return len(self._genlist)
2920 2925
2921 2926 def sort(self, reverse=False):
2922 2927 self._ascending = not reverse
2923 2928
2924 2929 def reverse(self):
2925 2930 self._ascending = not self._ascending
2926 2931
2927 2932 def isascending(self):
2928 2933 return self._ascending
2929 2934
2930 2935 def isdescending(self):
2931 2936 return not self._ascending
2932 2937
2933 2938 def first(self):
2934 2939 if self._ascending:
2935 2940 it = self.fastasc
2936 2941 else:
2937 2942 it = self.fastdesc
2938 2943 if it is None:
2939 2944 # we need to consume all and try again
2940 2945 for x in self._consumegen():
2941 2946 pass
2942 2947 return self.first()
2943 2948 if self:
2944 2949 return it().next()
2945 2950 return None
2946 2951
2947 2952 def last(self):
2948 2953 if self._ascending:
2949 2954 it = self.fastdesc
2950 2955 else:
2951 2956 it = self.fastasc
2952 2957 if it is None:
2953 2958 # we need to consume all and try again
2954 2959 for x in self._consumegen():
2955 2960 pass
2956 2961 return self.first()
2957 2962 if self:
2958 2963 return it().next()
2959 2964 return None
2960 2965
2961 2966 def spanset(repo, start=None, end=None):
2962 2967 """factory function to dispatch between fullreposet and actual spanset
2963 2968
2964 2969 Feel free to update all spanset call sites and kill this function at some
2965 2970 point.
2966 2971 """
2967 2972 if start is None and end is None:
2968 2973 return fullreposet(repo)
2969 2974 return _spanset(repo, start, end)
2970 2975
2971 2976
2972 2977 class _spanset(abstractsmartset):
2973 2978 """Duck type for baseset class which represents a range of revisions and
2974 2979 can work lazily and without having all the range in memory
2975 2980
2976 2981 Note that spanset(x, y) behave almost like xrange(x, y) except for two
2977 2982 notable points:
2978 2983 - when x < y it will be automatically descending,
2979 2984 - revision filtered with this repoview will be skipped.
2980 2985
2981 2986 """
2982 2987 def __init__(self, repo, start=0, end=None):
2983 2988 """
2984 2989 start: first revision included the set
2985 2990 (default to 0)
2986 2991 end: first revision excluded (last+1)
2987 2992 (default to len(repo)
2988 2993
2989 2994 Spanset will be descending if `end` < `start`.
2990 2995 """
2991 2996 if end is None:
2992 2997 end = len(repo)
2993 2998 self._ascending = start <= end
2994 2999 if not self._ascending:
2995 3000 start, end = end + 1, start +1
2996 3001 self._start = start
2997 3002 self._end = end
2998 3003 self._hiddenrevs = repo.changelog.filteredrevs
2999 3004
3000 3005 def sort(self, reverse=False):
3001 3006 self._ascending = not reverse
3002 3007
3003 3008 def reverse(self):
3004 3009 self._ascending = not self._ascending
3005 3010
3006 3011 def _iterfilter(self, iterrange):
3007 3012 s = self._hiddenrevs
3008 3013 for r in iterrange:
3009 3014 if r not in s:
3010 3015 yield r
3011 3016
3012 3017 def __iter__(self):
3013 3018 if self._ascending:
3014 3019 return self.fastasc()
3015 3020 else:
3016 3021 return self.fastdesc()
3017 3022
3018 3023 def fastasc(self):
3019 3024 iterrange = xrange(self._start, self._end)
3020 3025 if self._hiddenrevs:
3021 3026 return self._iterfilter(iterrange)
3022 3027 return iter(iterrange)
3023 3028
3024 3029 def fastdesc(self):
3025 3030 iterrange = xrange(self._end - 1, self._start - 1, -1)
3026 3031 if self._hiddenrevs:
3027 3032 return self._iterfilter(iterrange)
3028 3033 return iter(iterrange)
3029 3034
3030 3035 def __contains__(self, rev):
3031 3036 hidden = self._hiddenrevs
3032 3037 return ((self._start <= rev < self._end)
3033 3038 and not (hidden and rev in hidden))
3034 3039
3035 3040 def __nonzero__(self):
3036 3041 for r in self:
3037 3042 return True
3038 3043 return False
3039 3044
3040 3045 def __len__(self):
3041 3046 if not self._hiddenrevs:
3042 3047 return abs(self._end - self._start)
3043 3048 else:
3044 3049 count = 0
3045 3050 start = self._start
3046 3051 end = self._end
3047 3052 for rev in self._hiddenrevs:
3048 3053 if (end < rev <= start) or (start <= rev < end):
3049 3054 count += 1
3050 3055 return abs(self._end - self._start) - count
3051 3056
3052 3057 def isascending(self):
3053 3058 return self._start <= self._end
3054 3059
3055 3060 def isdescending(self):
3056 3061 return self._start >= self._end
3057 3062
3058 3063 def first(self):
3059 3064 if self._ascending:
3060 3065 it = self.fastasc
3061 3066 else:
3062 3067 it = self.fastdesc
3063 3068 for x in it():
3064 3069 return x
3065 3070 return None
3066 3071
3067 3072 def last(self):
3068 3073 if self._ascending:
3069 3074 it = self.fastdesc
3070 3075 else:
3071 3076 it = self.fastasc
3072 3077 for x in it():
3073 3078 return x
3074 3079 return None
3075 3080
3076 3081 class fullreposet(_spanset):
3077 3082 """a set containing all revisions in the repo
3078 3083
3079 3084 This class exists to host special optimization.
3080 3085 """
3081 3086
3082 3087 def __init__(self, repo):
3083 3088 super(fullreposet, self).__init__(repo)
3084 3089
3085 3090 def __and__(self, other):
3086 3091 """As self contains the whole repo, all of the other set should also be
3087 3092 in self. Therefore `self & other = other`.
3088 3093
3089 3094 This boldly assumes the other contains valid revs only.
3090 3095 """
3091 3096 # other not a smartset, make is so
3092 3097 if not util.safehasattr(other, 'isascending'):
3093 3098 # filter out hidden revision
3094 3099 # (this boldly assumes all smartset are pure)
3095 3100 #
3096 3101 # `other` was used with "&", let's assume this is a set like
3097 3102 # object.
3098 3103 other = baseset(other - self._hiddenrevs)
3099 3104
3100 3105 if self.isascending():
3101 3106 other.sort()
3102 3107 else:
3103 3108 other.sort(reverse)
3104 3109 return other
3105 3110
3106 3111 # tell hggettext to extract docstrings from these functions:
3107 3112 i18nfunctions = symbols.values()
@@ -1,523 +1,598 b''
1 1 $ hg init a
2 2 $ cd a
3 3 $ echo 'root' >root
4 4 $ hg add root
5 5 $ hg commit -d '0 0' -m "Adding root node"
6 6
7 7 $ echo 'a' >a
8 8 $ hg add a
9 9 $ hg branch a
10 10 marked working directory as branch a
11 11 (branches are permanent and global, did you want a bookmark?)
12 12 $ hg commit -d '1 0' -m "Adding a branch"
13 13
14 14 $ hg branch q
15 15 marked working directory as branch q
16 16 (branches are permanent and global, did you want a bookmark?)
17 17 $ echo 'aa' >a
18 18 $ hg branch -C
19 19 reset working directory to branch a
20 20 $ hg commit -d '2 0' -m "Adding to a branch"
21 21
22 22 $ hg update -C 0
23 23 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
24 24 $ echo 'b' >b
25 25 $ hg add b
26 26 $ hg branch b
27 27 marked working directory as branch b
28 28 (branches are permanent and global, did you want a bookmark?)
29 29 $ hg commit -d '2 0' -m "Adding b branch"
30 30
31 31 $ echo 'bh1' >bh1
32 32 $ hg add bh1
33 33 $ hg commit -d '3 0' -m "Adding b branch head 1"
34 34
35 35 $ hg update -C 2
36 36 1 files updated, 0 files merged, 2 files removed, 0 files unresolved
37 37 $ echo 'bh2' >bh2
38 38 $ hg add bh2
39 39 $ hg commit -d '4 0' -m "Adding b branch head 2"
40 40
41 41 $ echo 'c' >c
42 42 $ hg add c
43 43 $ hg branch c
44 44 marked working directory as branch c
45 45 (branches are permanent and global, did you want a bookmark?)
46 46 $ hg commit -d '5 0' -m "Adding c branch"
47 47
48 48 reserved names
49 49
50 50 $ hg branch tip
51 51 abort: the name 'tip' is reserved
52 52 [255]
53 53 $ hg branch null
54 54 abort: the name 'null' is reserved
55 55 [255]
56 56 $ hg branch .
57 57 abort: the name '.' is reserved
58 58 [255]
59 59
60 60 invalid characters
61 61
62 62 $ hg branch 'foo:bar'
63 63 abort: ':' cannot be used in a name
64 64 [255]
65 65
66 66 $ hg branch 'foo
67 67 > bar'
68 68 abort: '\n' cannot be used in a name
69 69 [255]
70 70
71 71 trailing or leading spaces should be stripped before testing duplicates
72 72
73 73 $ hg branch 'b '
74 74 abort: a branch of the same name already exists
75 75 (use 'hg update' to switch to it)
76 76 [255]
77 77
78 78 $ hg branch ' b'
79 79 abort: a branch of the same name already exists
80 80 (use 'hg update' to switch to it)
81 81 [255]
82 82
83 83 verify update will accept invalid legacy branch names
84 84
85 85 $ hg init test-invalid-branch-name
86 86 $ cd test-invalid-branch-name
87 87 $ hg pull -u "$TESTDIR"/bundles/test-invalid-branch-name.hg
88 88 pulling from *test-invalid-branch-name.hg (glob)
89 89 requesting all changes
90 90 adding changesets
91 91 adding manifests
92 92 adding file changes
93 93 added 3 changesets with 3 changes to 2 files
94 94 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
95 95
96 96 $ hg update '"colon:test"'
97 97 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
98 98 $ cd ..
99 99
100 100 $ echo 'd' >d
101 101 $ hg add d
102 102 $ hg branch 'a branch name much longer than the default justification used by branches'
103 103 marked working directory as branch a branch name much longer than the default justification used by branches
104 104 (branches are permanent and global, did you want a bookmark?)
105 105 $ hg commit -d '6 0' -m "Adding d branch"
106 106
107 107 $ hg branches
108 108 a branch name much longer than the default justification used by branches 7:10ff5895aa57
109 109 b 4:aee39cd168d0
110 110 c 6:589736a22561 (inactive)
111 111 a 5:d8cbc61dbaa6 (inactive)
112 112 default 0:19709c5a4e75 (inactive)
113 113
114 114 -------
115 115
116 116 $ hg branches -a
117 117 a branch name much longer than the default justification used by branches 7:10ff5895aa57
118 118 b 4:aee39cd168d0
119 119
120 120 --- Branch a
121 121
122 122 $ hg log -b a
123 123 changeset: 5:d8cbc61dbaa6
124 124 branch: a
125 125 parent: 2:881fe2b92ad0
126 126 user: test
127 127 date: Thu Jan 01 00:00:04 1970 +0000
128 128 summary: Adding b branch head 2
129 129
130 130 changeset: 2:881fe2b92ad0
131 131 branch: a
132 132 user: test
133 133 date: Thu Jan 01 00:00:02 1970 +0000
134 134 summary: Adding to a branch
135 135
136 136 changeset: 1:dd6b440dd85a
137 137 branch: a
138 138 user: test
139 139 date: Thu Jan 01 00:00:01 1970 +0000
140 140 summary: Adding a branch
141 141
142 142
143 143 ---- Branch b
144 144
145 145 $ hg log -b b
146 146 changeset: 4:aee39cd168d0
147 147 branch: b
148 148 user: test
149 149 date: Thu Jan 01 00:00:03 1970 +0000
150 150 summary: Adding b branch head 1
151 151
152 152 changeset: 3:ac22033332d1
153 153 branch: b
154 154 parent: 0:19709c5a4e75
155 155 user: test
156 156 date: Thu Jan 01 00:00:02 1970 +0000
157 157 summary: Adding b branch
158 158
159 159
160 160 ---- going to test branch closing
161 161
162 162 $ hg branches
163 163 a branch name much longer than the default justification used by branches 7:10ff5895aa57
164 164 b 4:aee39cd168d0
165 165 c 6:589736a22561 (inactive)
166 166 a 5:d8cbc61dbaa6 (inactive)
167 167 default 0:19709c5a4e75 (inactive)
168 168 $ hg up -C b
169 169 2 files updated, 0 files merged, 4 files removed, 0 files unresolved
170 170 $ echo 'xxx1' >> b
171 171 $ hg commit -d '7 0' -m 'adding cset to branch b'
172 172 $ hg up -C aee39cd168d0
173 173 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
174 174 $ echo 'xxx2' >> b
175 175 $ hg commit -d '8 0' -m 'adding head to branch b'
176 176 created new head
177 177 $ echo 'xxx3' >> b
178 178 $ hg commit -d '9 0' -m 'adding another cset to branch b'
179 179 $ hg branches
180 180 b 10:bfbe841b666e
181 181 a branch name much longer than the default justification used by branches 7:10ff5895aa57
182 182 c 6:589736a22561 (inactive)
183 183 a 5:d8cbc61dbaa6 (inactive)
184 184 default 0:19709c5a4e75 (inactive)
185 185 $ hg heads --closed
186 186 changeset: 10:bfbe841b666e
187 187 branch: b
188 188 tag: tip
189 189 user: test
190 190 date: Thu Jan 01 00:00:09 1970 +0000
191 191 summary: adding another cset to branch b
192 192
193 193 changeset: 8:eebb944467c9
194 194 branch: b
195 195 parent: 4:aee39cd168d0
196 196 user: test
197 197 date: Thu Jan 01 00:00:07 1970 +0000
198 198 summary: adding cset to branch b
199 199
200 200 changeset: 7:10ff5895aa57
201 201 branch: a branch name much longer than the default justification used by branches
202 202 user: test
203 203 date: Thu Jan 01 00:00:06 1970 +0000
204 204 summary: Adding d branch
205 205
206 206 changeset: 6:589736a22561
207 207 branch: c
208 208 user: test
209 209 date: Thu Jan 01 00:00:05 1970 +0000
210 210 summary: Adding c branch
211 211
212 212 changeset: 5:d8cbc61dbaa6
213 213 branch: a
214 214 parent: 2:881fe2b92ad0
215 215 user: test
216 216 date: Thu Jan 01 00:00:04 1970 +0000
217 217 summary: Adding b branch head 2
218 218
219 219 changeset: 0:19709c5a4e75
220 220 user: test
221 221 date: Thu Jan 01 00:00:00 1970 +0000
222 222 summary: Adding root node
223 223
224 224 $ hg heads
225 225 changeset: 10:bfbe841b666e
226 226 branch: b
227 227 tag: tip
228 228 user: test
229 229 date: Thu Jan 01 00:00:09 1970 +0000
230 230 summary: adding another cset to branch b
231 231
232 232 changeset: 8:eebb944467c9
233 233 branch: b
234 234 parent: 4:aee39cd168d0
235 235 user: test
236 236 date: Thu Jan 01 00:00:07 1970 +0000
237 237 summary: adding cset to branch b
238 238
239 239 changeset: 7:10ff5895aa57
240 240 branch: a branch name much longer than the default justification used by branches
241 241 user: test
242 242 date: Thu Jan 01 00:00:06 1970 +0000
243 243 summary: Adding d branch
244 244
245 245 changeset: 6:589736a22561
246 246 branch: c
247 247 user: test
248 248 date: Thu Jan 01 00:00:05 1970 +0000
249 249 summary: Adding c branch
250 250
251 251 changeset: 5:d8cbc61dbaa6
252 252 branch: a
253 253 parent: 2:881fe2b92ad0
254 254 user: test
255 255 date: Thu Jan 01 00:00:04 1970 +0000
256 256 summary: Adding b branch head 2
257 257
258 258 changeset: 0:19709c5a4e75
259 259 user: test
260 260 date: Thu Jan 01 00:00:00 1970 +0000
261 261 summary: Adding root node
262 262
263 263 $ hg commit -d '9 0' --close-branch -m 'prune bad branch'
264 264 $ hg branches -a
265 265 b 8:eebb944467c9
266 266 a branch name much longer than the default justification used by branches 7:10ff5895aa57
267 267 $ hg up -C b
268 268 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
269 269 $ hg commit -d '9 0' --close-branch -m 'close this part branch too'
270 270 $ hg commit -d '9 0' --close-branch -m 're-closing this branch'
271 271 abort: can only close branch heads
272 272 [255]
273 273
274 274 $ hg log -r tip --debug
275 275 changeset: 12:e3d49c0575d8fc2cb1cd6859c747c14f5f6d499f
276 276 branch: b
277 277 tag: tip
278 278 phase: draft
279 279 parent: 8:eebb944467c9fb9651ed232aeaf31b3c0a7fc6c1
280 280 parent: -1:0000000000000000000000000000000000000000
281 281 manifest: 8:6f9ed32d2b310e391a4f107d5f0f071df785bfee
282 282 user: test
283 283 date: Thu Jan 01 00:00:09 1970 +0000
284 284 extra: branch=b
285 285 extra: close=1
286 286 description:
287 287 close this part branch too
288 288
289 289
290 290 --- b branch should be inactive
291 291
292 292 $ hg branches
293 293 a branch name much longer than the default justification used by branches 7:10ff5895aa57
294 294 c 6:589736a22561 (inactive)
295 295 a 5:d8cbc61dbaa6 (inactive)
296 296 default 0:19709c5a4e75 (inactive)
297 297 $ hg branches -c
298 298 a branch name much longer than the default justification used by branches 7:10ff5895aa57
299 299 b 12:e3d49c0575d8 (closed)
300 300 c 6:589736a22561 (inactive)
301 301 a 5:d8cbc61dbaa6 (inactive)
302 302 default 0:19709c5a4e75 (inactive)
303 303 $ hg branches -a
304 304 a branch name much longer than the default justification used by branches 7:10ff5895aa57
305 305 $ hg branches -q
306 306 a branch name much longer than the default justification used by branches
307 307 c
308 308 a
309 309 default
310 310 $ hg heads b
311 311 no open branch heads found on branches b
312 312 [1]
313 313 $ hg heads --closed b
314 314 changeset: 12:e3d49c0575d8
315 315 branch: b
316 316 tag: tip
317 317 parent: 8:eebb944467c9
318 318 user: test
319 319 date: Thu Jan 01 00:00:09 1970 +0000
320 320 summary: close this part branch too
321 321
322 322 changeset: 11:d3f163457ebf
323 323 branch: b
324 324 user: test
325 325 date: Thu Jan 01 00:00:09 1970 +0000
326 326 summary: prune bad branch
327 327
328 328 $ echo 'xxx4' >> b
329 329 $ hg commit -d '9 0' -m 'reopen branch with a change'
330 330 reopening closed branch head 12
331 331
332 332 --- branch b is back in action
333 333
334 334 $ hg branches -a
335 335 b 13:e23b5505d1ad
336 336 a branch name much longer than the default justification used by branches 7:10ff5895aa57
337 337
338 338 ---- test heads listings
339 339
340 340 $ hg heads
341 341 changeset: 13:e23b5505d1ad
342 342 branch: b
343 343 tag: tip
344 344 user: test
345 345 date: Thu Jan 01 00:00:09 1970 +0000
346 346 summary: reopen branch with a change
347 347
348 348 changeset: 7:10ff5895aa57
349 349 branch: a branch name much longer than the default justification used by branches
350 350 user: test
351 351 date: Thu Jan 01 00:00:06 1970 +0000
352 352 summary: Adding d branch
353 353
354 354 changeset: 6:589736a22561
355 355 branch: c
356 356 user: test
357 357 date: Thu Jan 01 00:00:05 1970 +0000
358 358 summary: Adding c branch
359 359
360 360 changeset: 5:d8cbc61dbaa6
361 361 branch: a
362 362 parent: 2:881fe2b92ad0
363 363 user: test
364 364 date: Thu Jan 01 00:00:04 1970 +0000
365 365 summary: Adding b branch head 2
366 366
367 367 changeset: 0:19709c5a4e75
368 368 user: test
369 369 date: Thu Jan 01 00:00:00 1970 +0000
370 370 summary: Adding root node
371 371
372 372
373 373 branch default
374 374
375 375 $ hg heads default
376 376 changeset: 0:19709c5a4e75
377 377 user: test
378 378 date: Thu Jan 01 00:00:00 1970 +0000
379 379 summary: Adding root node
380 380
381 381
382 382 branch a
383 383
384 384 $ hg heads a
385 385 changeset: 5:d8cbc61dbaa6
386 386 branch: a
387 387 parent: 2:881fe2b92ad0
388 388 user: test
389 389 date: Thu Jan 01 00:00:04 1970 +0000
390 390 summary: Adding b branch head 2
391 391
392 392 $ hg heads --active a
393 393 no open branch heads found on branches a
394 394 [1]
395 395
396 396 branch b
397 397
398 398 $ hg heads b
399 399 changeset: 13:e23b5505d1ad
400 400 branch: b
401 401 tag: tip
402 402 user: test
403 403 date: Thu Jan 01 00:00:09 1970 +0000
404 404 summary: reopen branch with a change
405 405
406 406 $ hg heads --closed b
407 407 changeset: 13:e23b5505d1ad
408 408 branch: b
409 409 tag: tip
410 410 user: test
411 411 date: Thu Jan 01 00:00:09 1970 +0000
412 412 summary: reopen branch with a change
413 413
414 414 changeset: 11:d3f163457ebf
415 415 branch: b
416 416 user: test
417 417 date: Thu Jan 01 00:00:09 1970 +0000
418 418 summary: prune bad branch
419 419
420 420 default branch colors:
421 421
422 422 $ cat <<EOF >> $HGRCPATH
423 423 > [extensions]
424 424 > color =
425 425 > [color]
426 426 > mode = ansi
427 427 > EOF
428 428
429 429 $ hg up -C c
430 430 3 files updated, 0 files merged, 2 files removed, 0 files unresolved
431 431 $ hg commit -d '9 0' --close-branch -m 'reclosing this branch'
432 432 $ hg up -C b
433 433 2 files updated, 0 files merged, 3 files removed, 0 files unresolved
434 434 $ hg branches --color=always
435 435 \x1b[0;32mb\x1b[0m\x1b[0;33m 13:e23b5505d1ad\x1b[0m (esc)
436 436 \x1b[0;0ma branch name much longer than the default justification used by branches\x1b[0m\x1b[0;33m 7:10ff5895aa57\x1b[0m (esc)
437 437 \x1b[0;0ma\x1b[0m\x1b[0;33m 5:d8cbc61dbaa6\x1b[0m (inactive) (esc)
438 438 \x1b[0;0mdefault\x1b[0m\x1b[0;33m 0:19709c5a4e75\x1b[0m (inactive) (esc)
439 439
440 440 default closed branch color:
441 441
442 442 $ hg branches --color=always --closed
443 443 \x1b[0;32mb\x1b[0m\x1b[0;33m 13:e23b5505d1ad\x1b[0m (esc)
444 444 \x1b[0;0ma branch name much longer than the default justification used by branches\x1b[0m\x1b[0;33m 7:10ff5895aa57\x1b[0m (esc)
445 445 \x1b[0;30;1mc\x1b[0m\x1b[0;33m 14:f894c25619d3\x1b[0m (closed) (esc)
446 446 \x1b[0;0ma\x1b[0m\x1b[0;33m 5:d8cbc61dbaa6\x1b[0m (inactive) (esc)
447 447 \x1b[0;0mdefault\x1b[0m\x1b[0;33m 0:19709c5a4e75\x1b[0m (inactive) (esc)
448 448
449 449 $ cat <<EOF >> $HGRCPATH
450 450 > [extensions]
451 451 > color =
452 452 > [color]
453 453 > branches.active = green
454 454 > branches.closed = blue
455 455 > branches.current = red
456 456 > branches.inactive = magenta
457 457 > log.changeset = cyan
458 458 > EOF
459 459
460 460 custom branch colors:
461 461
462 462 $ hg branches --color=always
463 463 \x1b[0;31mb\x1b[0m\x1b[0;36m 13:e23b5505d1ad\x1b[0m (esc)
464 464 \x1b[0;32ma branch name much longer than the default justification used by branches\x1b[0m\x1b[0;36m 7:10ff5895aa57\x1b[0m (esc)
465 465 \x1b[0;35ma\x1b[0m\x1b[0;36m 5:d8cbc61dbaa6\x1b[0m (inactive) (esc)
466 466 \x1b[0;35mdefault\x1b[0m\x1b[0;36m 0:19709c5a4e75\x1b[0m (inactive) (esc)
467 467
468 468 custom closed branch color:
469 469
470 470 $ hg branches --color=always --closed
471 471 \x1b[0;31mb\x1b[0m\x1b[0;36m 13:e23b5505d1ad\x1b[0m (esc)
472 472 \x1b[0;32ma branch name much longer than the default justification used by branches\x1b[0m\x1b[0;36m 7:10ff5895aa57\x1b[0m (esc)
473 473 \x1b[0;34mc\x1b[0m\x1b[0;36m 14:f894c25619d3\x1b[0m (closed) (esc)
474 474 \x1b[0;35ma\x1b[0m\x1b[0;36m 5:d8cbc61dbaa6\x1b[0m (inactive) (esc)
475 475 \x1b[0;35mdefault\x1b[0m\x1b[0;36m 0:19709c5a4e75\x1b[0m (inactive) (esc)
476 476
477 477 template output:
478 478
479 479 $ hg branches -Tjson --closed
480 480 [
481 481 {
482 482 "active": true,
483 483 "branch": "b",
484 484 "closed": false,
485 485 "current": true,
486 486 "node": "e23b5505d1ad24aab6f84fd8c7cb8cd8e5e93be0",
487 487 "rev": 13
488 488 },
489 489 {
490 490 "active": true,
491 491 "branch": "a branch name much longer than the default justification used by branches",
492 492 "closed": false,
493 493 "current": false,
494 494 "node": "10ff5895aa5793bd378da574af8cec8ea408d831",
495 495 "rev": 7
496 496 },
497 497 {
498 498 "active": false,
499 499 "branch": "c",
500 500 "closed": true,
501 501 "current": false,
502 502 "node": "f894c25619d3f1484639d81be950e0a07bc6f1f6",
503 503 "rev": 14
504 504 },
505 505 {
506 506 "active": false,
507 507 "branch": "a",
508 508 "closed": false,
509 509 "current": false,
510 510 "node": "d8cbc61dbaa6dc817175d1e301eecb863f280832",
511 511 "rev": 5
512 512 },
513 513 {
514 514 "active": false,
515 515 "branch": "default",
516 516 "closed": false,
517 517 "current": false,
518 518 "node": "19709c5a4e75bf938f8e349aff97438539bb729e",
519 519 "rev": 0
520 520 }
521 521 ]
522 522
523 revision branch name caching implementation
524
525 cache creation
526 $ rm .hg/cache/rbc-revs-v1
527 $ hg debugrevspec 'branch("re:a ")'
528 7
529 $ [ -f .hg/cache/rbc-revs-v1 ] || echo no file
530 no file
531 recovery from invalid cache file
532 $ echo > .hg/cache/rbc-revs-v1
533 $ hg debugrevspec 'branch("re:a ")'
534 7
535 cache update NOT fully written from revset
536 $ "$TESTDIR/md5sum.py" .hg/cache/rbc-revs-v1
537 68b329da9893e34099c7d8ad5cb9c940 .hg/cache/rbc-revs-v1
538 recovery from other corruption - extra trailing data
539 $ echo >> .hg/cache/rbc-revs-v1
540 $ hg debugrevspec 'branch("re:a ")'
541 7
542 cache update NOT fully written from revset
543 $ "$TESTDIR/md5sum.py" .hg/cache/rbc-revs-v1
544 e1c06d85ae7b8b032bef47e42e4c08f9 .hg/cache/rbc-revs-v1
545 lazy update after commit
546 $ hg tag tag
547 $ "$TESTDIR/md5sum.py" .hg/cache/rbc-revs-v1
548 d0c0166808ee0a1f0e8894915ad363b6 .hg/cache/rbc-revs-v1
549 $ hg debugrevspec 'branch("re:a ")'
550 7
551 $ "$TESTDIR/md5sum.py" .hg/cache/rbc-revs-v1
552 d0c0166808ee0a1f0e8894915ad363b6 .hg/cache/rbc-revs-v1
553 update after rollback - cache keeps stripped revs until written for other reasons
554 $ hg up -qr '.^'
555 $ hg rollback -qf
556 $ "$TESTDIR/md5sum.py" .hg/cache/rbc-revs-v1
557 d8c2acdc229bf942fde1dfdbe8f9d933 .hg/cache/rbc-revs-v1
558 $ hg debugrevspec 'branch("re:a ")'
559 7
560 $ "$TESTDIR/md5sum.py" .hg/cache/rbc-revs-v1
561 d8c2acdc229bf942fde1dfdbe8f9d933 .hg/cache/rbc-revs-v1
562 handle history mutations that doesn't change the tip node - this is a problem
563 with the cache invalidation scheme used by branchmap
564 $ hg log -r tip+b -T'{rev}:{node|short} {branch}\n'
565 14:f894c25619d3 c
566 13:e23b5505d1ad b
567 $ hg bundle -q --all bu.hg
568 $ hg --config extensions.strip= strip --no-b -qr -1:
569 $ hg up -q tip
570 $ hg branch
571 b
572 $ hg branch -q hacked
573 $ hg ci --amend -qm 'hacked'
574 $ hg pull -q bu.hg -r f894c25619d3
575 $ hg log -r tip+b -T'{rev}:{node|short} {branch}\n'
576 14:f894c25619d3 c
577 12:e3d49c0575d8 b
578 $ hg debugrevspec 'branch("hacked")'
579 13
580 $ "$TESTDIR/md5sum.py" .hg/cache/rbc-revs-v1
581 22424d7e106c894336d9d705b0241bc5 .hg/cache/rbc-revs-v1
582 cleanup, restore old state
583 $ hg --config extensions.strip= strip --no-b -qr -2:
584 $ hg pull -q bu.hg
585 $ rm bu.hg
586 $ hg up -qr tip
587 $ hg log -r tip -T'{rev}:{node|short}\n'
588 14:f894c25619d3
589 the cache file do not go back to the old state - it still contains the
590 now unused 'hacked' branch name)
591 $ hg debugrevspec 'branch("re:a ")'
592 7
593 $ "$TESTDIR/md5sum.py" .hg/cache/rbc-revs-v1
594 d8c2acdc229bf942fde1dfdbe8f9d933 .hg/cache/rbc-revs-v1
595 $ cat .hg/cache/rbc-names-v1
596 default\x00a\x00b\x00c\x00a branch name much longer than the default justification used by branches\x00hacked (no-eol) (esc)
597
523 598 $ cd ..
General Comments 0
You need to be logged in to leave comments. Login now