##// END OF EJS Templates
revset: support "follow(renamed.py, e22f4f3f06c3)" (issue5334)...
Gábor Stefanik -
r29814:cbf9984a default
parent child Browse files
Show More
@@ -1,3679 +1,3687
1 1 # revset.py - revision set queries for mercurial
2 2 #
3 3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import heapq
11 11 import re
12 12
13 13 from .i18n import _
14 14 from . import (
15 15 destutil,
16 16 encoding,
17 17 error,
18 18 hbisect,
19 19 match as matchmod,
20 20 node,
21 21 obsolete as obsmod,
22 22 parser,
23 23 pathutil,
24 24 phases,
25 25 registrar,
26 26 repoview,
27 27 util,
28 28 )
29 29
30 30 def _revancestors(repo, revs, followfirst):
31 31 """Like revlog.ancestors(), but supports followfirst."""
32 32 if followfirst:
33 33 cut = 1
34 34 else:
35 35 cut = None
36 36 cl = repo.changelog
37 37
38 38 def iterate():
39 39 revs.sort(reverse=True)
40 40 irevs = iter(revs)
41 41 h = []
42 42
43 43 inputrev = next(irevs, None)
44 44 if inputrev is not None:
45 45 heapq.heappush(h, -inputrev)
46 46
47 47 seen = set()
48 48 while h:
49 49 current = -heapq.heappop(h)
50 50 if current == inputrev:
51 51 inputrev = next(irevs, None)
52 52 if inputrev is not None:
53 53 heapq.heappush(h, -inputrev)
54 54 if current not in seen:
55 55 seen.add(current)
56 56 yield current
57 57 for parent in cl.parentrevs(current)[:cut]:
58 58 if parent != node.nullrev:
59 59 heapq.heappush(h, -parent)
60 60
61 61 return generatorset(iterate(), iterasc=False)
62 62
63 63 def _revdescendants(repo, revs, followfirst):
64 64 """Like revlog.descendants() but supports followfirst."""
65 65 if followfirst:
66 66 cut = 1
67 67 else:
68 68 cut = None
69 69
70 70 def iterate():
71 71 cl = repo.changelog
72 72 # XXX this should be 'parentset.min()' assuming 'parentset' is a
73 73 # smartset (and if it is not, it should.)
74 74 first = min(revs)
75 75 nullrev = node.nullrev
76 76 if first == nullrev:
77 77 # Are there nodes with a null first parent and a non-null
78 78 # second one? Maybe. Do we care? Probably not.
79 79 for i in cl:
80 80 yield i
81 81 else:
82 82 seen = set(revs)
83 83 for i in cl.revs(first + 1):
84 84 for x in cl.parentrevs(i)[:cut]:
85 85 if x != nullrev and x in seen:
86 86 seen.add(i)
87 87 yield i
88 88 break
89 89
90 90 return generatorset(iterate(), iterasc=True)
91 91
92 92 def _reachablerootspure(repo, minroot, roots, heads, includepath):
93 93 """return (heads(::<roots> and ::<heads>))
94 94
95 95 If includepath is True, return (<roots>::<heads>)."""
96 96 if not roots:
97 97 return []
98 98 parentrevs = repo.changelog.parentrevs
99 99 roots = set(roots)
100 100 visit = list(heads)
101 101 reachable = set()
102 102 seen = {}
103 103 # prefetch all the things! (because python is slow)
104 104 reached = reachable.add
105 105 dovisit = visit.append
106 106 nextvisit = visit.pop
107 107 # open-code the post-order traversal due to the tiny size of
108 108 # sys.getrecursionlimit()
109 109 while visit:
110 110 rev = nextvisit()
111 111 if rev in roots:
112 112 reached(rev)
113 113 if not includepath:
114 114 continue
115 115 parents = parentrevs(rev)
116 116 seen[rev] = parents
117 117 for parent in parents:
118 118 if parent >= minroot and parent not in seen:
119 119 dovisit(parent)
120 120 if not reachable:
121 121 return baseset()
122 122 if not includepath:
123 123 return reachable
124 124 for rev in sorted(seen):
125 125 for parent in seen[rev]:
126 126 if parent in reachable:
127 127 reached(rev)
128 128 return reachable
129 129
130 130 def reachableroots(repo, roots, heads, includepath=False):
131 131 """return (heads(::<roots> and ::<heads>))
132 132
133 133 If includepath is True, return (<roots>::<heads>)."""
134 134 if not roots:
135 135 return baseset()
136 136 minroot = roots.min()
137 137 roots = list(roots)
138 138 heads = list(heads)
139 139 try:
140 140 revs = repo.changelog.reachableroots(minroot, heads, roots, includepath)
141 141 except AttributeError:
142 142 revs = _reachablerootspure(repo, minroot, roots, heads, includepath)
143 143 revs = baseset(revs)
144 144 revs.sort()
145 145 return revs
146 146
147 147 elements = {
148 148 # token-type: binding-strength, primary, prefix, infix, suffix
149 149 "(": (21, None, ("group", 1, ")"), ("func", 1, ")"), None),
150 150 "##": (20, None, None, ("_concat", 20), None),
151 151 "~": (18, None, None, ("ancestor", 18), None),
152 152 "^": (18, None, None, ("parent", 18), "parentpost"),
153 153 "-": (5, None, ("negate", 19), ("minus", 5), None),
154 154 "::": (17, None, ("dagrangepre", 17), ("dagrange", 17), "dagrangepost"),
155 155 "..": (17, None, ("dagrangepre", 17), ("dagrange", 17), "dagrangepost"),
156 156 ":": (15, "rangeall", ("rangepre", 15), ("range", 15), "rangepost"),
157 157 "not": (10, None, ("not", 10), None, None),
158 158 "!": (10, None, ("not", 10), None, None),
159 159 "and": (5, None, None, ("and", 5), None),
160 160 "&": (5, None, None, ("and", 5), None),
161 161 "%": (5, None, None, ("only", 5), "onlypost"),
162 162 "or": (4, None, None, ("or", 4), None),
163 163 "|": (4, None, None, ("or", 4), None),
164 164 "+": (4, None, None, ("or", 4), None),
165 165 "=": (3, None, None, ("keyvalue", 3), None),
166 166 ",": (2, None, None, ("list", 2), None),
167 167 ")": (0, None, None, None, None),
168 168 "symbol": (0, "symbol", None, None, None),
169 169 "string": (0, "string", None, None, None),
170 170 "end": (0, None, None, None, None),
171 171 }
172 172
173 173 keywords = set(['and', 'or', 'not'])
174 174
175 175 # default set of valid characters for the initial letter of symbols
176 176 _syminitletters = set(c for c in [chr(i) for i in xrange(256)]
177 177 if c.isalnum() or c in '._@' or ord(c) > 127)
178 178
179 179 # default set of valid characters for non-initial letters of symbols
180 180 _symletters = set(c for c in [chr(i) for i in xrange(256)]
181 181 if c.isalnum() or c in '-._/@' or ord(c) > 127)
182 182
183 183 def tokenize(program, lookup=None, syminitletters=None, symletters=None):
184 184 '''
185 185 Parse a revset statement into a stream of tokens
186 186
187 187 ``syminitletters`` is the set of valid characters for the initial
188 188 letter of symbols.
189 189
190 190 By default, character ``c`` is recognized as valid for initial
191 191 letter of symbols, if ``c.isalnum() or c in '._@' or ord(c) > 127``.
192 192
193 193 ``symletters`` is the set of valid characters for non-initial
194 194 letters of symbols.
195 195
196 196 By default, character ``c`` is recognized as valid for non-initial
197 197 letters of symbols, if ``c.isalnum() or c in '-._/@' or ord(c) > 127``.
198 198
199 199 Check that @ is a valid unquoted token character (issue3686):
200 200 >>> list(tokenize("@::"))
201 201 [('symbol', '@', 0), ('::', None, 1), ('end', None, 3)]
202 202
203 203 '''
204 204 if syminitletters is None:
205 205 syminitletters = _syminitletters
206 206 if symletters is None:
207 207 symletters = _symletters
208 208
209 209 if program and lookup:
210 210 # attempt to parse old-style ranges first to deal with
211 211 # things like old-tag which contain query metacharacters
212 212 parts = program.split(':', 1)
213 213 if all(lookup(sym) for sym in parts if sym):
214 214 if parts[0]:
215 215 yield ('symbol', parts[0], 0)
216 216 if len(parts) > 1:
217 217 s = len(parts[0])
218 218 yield (':', None, s)
219 219 if parts[1]:
220 220 yield ('symbol', parts[1], s + 1)
221 221 yield ('end', None, len(program))
222 222 return
223 223
224 224 pos, l = 0, len(program)
225 225 while pos < l:
226 226 c = program[pos]
227 227 if c.isspace(): # skip inter-token whitespace
228 228 pass
229 229 elif c == ':' and program[pos:pos + 2] == '::': # look ahead carefully
230 230 yield ('::', None, pos)
231 231 pos += 1 # skip ahead
232 232 elif c == '.' and program[pos:pos + 2] == '..': # look ahead carefully
233 233 yield ('..', None, pos)
234 234 pos += 1 # skip ahead
235 235 elif c == '#' and program[pos:pos + 2] == '##': # look ahead carefully
236 236 yield ('##', None, pos)
237 237 pos += 1 # skip ahead
238 238 elif c in "():=,-|&+!~^%": # handle simple operators
239 239 yield (c, None, pos)
240 240 elif (c in '"\'' or c == 'r' and
241 241 program[pos:pos + 2] in ("r'", 'r"')): # handle quoted strings
242 242 if c == 'r':
243 243 pos += 1
244 244 c = program[pos]
245 245 decode = lambda x: x
246 246 else:
247 247 decode = parser.unescapestr
248 248 pos += 1
249 249 s = pos
250 250 while pos < l: # find closing quote
251 251 d = program[pos]
252 252 if d == '\\': # skip over escaped characters
253 253 pos += 2
254 254 continue
255 255 if d == c:
256 256 yield ('string', decode(program[s:pos]), s)
257 257 break
258 258 pos += 1
259 259 else:
260 260 raise error.ParseError(_("unterminated string"), s)
261 261 # gather up a symbol/keyword
262 262 elif c in syminitletters:
263 263 s = pos
264 264 pos += 1
265 265 while pos < l: # find end of symbol
266 266 d = program[pos]
267 267 if d not in symletters:
268 268 break
269 269 if d == '.' and program[pos - 1] == '.': # special case for ..
270 270 pos -= 1
271 271 break
272 272 pos += 1
273 273 sym = program[s:pos]
274 274 if sym in keywords: # operator keywords
275 275 yield (sym, None, s)
276 276 elif '-' in sym:
277 277 # some jerk gave us foo-bar-baz, try to check if it's a symbol
278 278 if lookup and lookup(sym):
279 279 # looks like a real symbol
280 280 yield ('symbol', sym, s)
281 281 else:
282 282 # looks like an expression
283 283 parts = sym.split('-')
284 284 for p in parts[:-1]:
285 285 if p: # possible consecutive -
286 286 yield ('symbol', p, s)
287 287 s += len(p)
288 288 yield ('-', None, pos)
289 289 s += 1
290 290 if parts[-1]: # possible trailing -
291 291 yield ('symbol', parts[-1], s)
292 292 else:
293 293 yield ('symbol', sym, s)
294 294 pos -= 1
295 295 else:
296 296 raise error.ParseError(_("syntax error in revset '%s'") %
297 297 program, pos)
298 298 pos += 1
299 299 yield ('end', None, pos)
300 300
301 301 # helpers
302 302
303 303 def getsymbol(x):
304 304 if x and x[0] == 'symbol':
305 305 return x[1]
306 306 raise error.ParseError(_('not a symbol'))
307 307
308 308 def getstring(x, err):
309 309 if x and (x[0] == 'string' or x[0] == 'symbol'):
310 310 return x[1]
311 311 raise error.ParseError(err)
312 312
313 313 def getlist(x):
314 314 if not x:
315 315 return []
316 316 if x[0] == 'list':
317 317 return list(x[1:])
318 318 return [x]
319 319
320 320 def getargs(x, min, max, err):
321 321 l = getlist(x)
322 322 if len(l) < min or (max >= 0 and len(l) > max):
323 323 raise error.ParseError(err)
324 324 return l
325 325
326 326 def getargsdict(x, funcname, keys):
327 327 return parser.buildargsdict(getlist(x), funcname, keys.split(),
328 328 keyvaluenode='keyvalue', keynode='symbol')
329 329
330 330 def getset(repo, subset, x):
331 331 if not x:
332 332 raise error.ParseError(_("missing argument"))
333 333 s = methods[x[0]](repo, subset, *x[1:])
334 334 if util.safehasattr(s, 'isascending'):
335 335 return s
336 336 # else case should not happen, because all non-func are internal,
337 337 # ignoring for now.
338 338 if x[0] == 'func' and x[1][0] == 'symbol' and x[1][1] in symbols:
339 339 repo.ui.deprecwarn('revset "%s" uses list instead of smartset'
340 340 % x[1][1],
341 341 '3.9')
342 342 return baseset(s)
343 343
344 344 def _getrevsource(repo, r):
345 345 extra = repo[r].extra()
346 346 for label in ('source', 'transplant_source', 'rebase_source'):
347 347 if label in extra:
348 348 try:
349 349 return repo[extra[label]].rev()
350 350 except error.RepoLookupError:
351 351 pass
352 352 return None
353 353
354 354 # operator methods
355 355
356 356 def stringset(repo, subset, x):
357 357 x = repo[x].rev()
358 358 if (x in subset
359 359 or x == node.nullrev and isinstance(subset, fullreposet)):
360 360 return baseset([x])
361 361 return baseset()
362 362
363 363 def rangeset(repo, subset, x, y):
364 364 m = getset(repo, fullreposet(repo), x)
365 365 n = getset(repo, fullreposet(repo), y)
366 366
367 367 if not m or not n:
368 368 return baseset()
369 369 m, n = m.first(), n.last()
370 370
371 371 if m == n:
372 372 r = baseset([m])
373 373 elif n == node.wdirrev:
374 374 r = spanset(repo, m, len(repo)) + baseset([n])
375 375 elif m == node.wdirrev:
376 376 r = baseset([m]) + spanset(repo, len(repo) - 1, n - 1)
377 377 elif m < n:
378 378 r = spanset(repo, m, n + 1)
379 379 else:
380 380 r = spanset(repo, m, n - 1)
381 381 # XXX We should combine with subset first: 'subset & baseset(...)'. This is
382 382 # necessary to ensure we preserve the order in subset.
383 383 #
384 384 # This has performance implication, carrying the sorting over when possible
385 385 # would be more efficient.
386 386 return r & subset
387 387
388 388 def dagrange(repo, subset, x, y):
389 389 r = fullreposet(repo)
390 390 xs = reachableroots(repo, getset(repo, r, x), getset(repo, r, y),
391 391 includepath=True)
392 392 return subset & xs
393 393
394 394 def andset(repo, subset, x, y):
395 395 return getset(repo, getset(repo, subset, x), y)
396 396
397 397 def differenceset(repo, subset, x, y):
398 398 return getset(repo, subset, x) - getset(repo, subset, y)
399 399
400 400 def orset(repo, subset, *xs):
401 401 assert xs
402 402 if len(xs) == 1:
403 403 return getset(repo, subset, xs[0])
404 404 p = len(xs) // 2
405 405 a = orset(repo, subset, *xs[:p])
406 406 b = orset(repo, subset, *xs[p:])
407 407 return a + b
408 408
409 409 def notset(repo, subset, x):
410 410 return subset - getset(repo, subset, x)
411 411
412 412 def listset(repo, subset, *xs):
413 413 raise error.ParseError(_("can't use a list in this context"),
414 414 hint=_('see hg help "revsets.x or y"'))
415 415
416 416 def keyvaluepair(repo, subset, k, v):
417 417 raise error.ParseError(_("can't use a key-value pair in this context"))
418 418
419 419 def func(repo, subset, a, b):
420 420 f = getsymbol(a)
421 421 if f in symbols:
422 422 return symbols[f](repo, subset, b)
423 423
424 424 keep = lambda fn: getattr(fn, '__doc__', None) is not None
425 425
426 426 syms = [s for (s, fn) in symbols.items() if keep(fn)]
427 427 raise error.UnknownIdentifier(f, syms)
428 428
429 429 # functions
430 430
431 431 # symbols are callables like:
432 432 # fn(repo, subset, x)
433 433 # with:
434 434 # repo - current repository instance
435 435 # subset - of revisions to be examined
436 436 # x - argument in tree form
437 437 symbols = {}
438 438
439 439 # symbols which can't be used for a DoS attack for any given input
440 440 # (e.g. those which accept regexes as plain strings shouldn't be included)
441 441 # functions that just return a lot of changesets (like all) don't count here
442 442 safesymbols = set()
443 443
444 444 predicate = registrar.revsetpredicate()
445 445
446 446 @predicate('_destupdate')
447 447 def _destupdate(repo, subset, x):
448 448 # experimental revset for update destination
449 449 args = getargsdict(x, 'limit', 'clean check')
450 450 return subset & baseset([destutil.destupdate(repo, **args)[0]])
451 451
452 452 @predicate('_destmerge')
453 453 def _destmerge(repo, subset, x):
454 454 # experimental revset for merge destination
455 455 sourceset = None
456 456 if x is not None:
457 457 sourceset = getset(repo, fullreposet(repo), x)
458 458 return subset & baseset([destutil.destmerge(repo, sourceset=sourceset)])
459 459
460 460 @predicate('adds(pattern)', safe=True)
461 461 def adds(repo, subset, x):
462 462 """Changesets that add a file matching pattern.
463 463
464 464 The pattern without explicit kind like ``glob:`` is expected to be
465 465 relative to the current directory and match against a file or a
466 466 directory.
467 467 """
468 468 # i18n: "adds" is a keyword
469 469 pat = getstring(x, _("adds requires a pattern"))
470 470 return checkstatus(repo, subset, pat, 1)
471 471
472 472 @predicate('ancestor(*changeset)', safe=True)
473 473 def ancestor(repo, subset, x):
474 474 """A greatest common ancestor of the changesets.
475 475
476 476 Accepts 0 or more changesets.
477 477 Will return empty list when passed no args.
478 478 Greatest common ancestor of a single changeset is that changeset.
479 479 """
480 480 # i18n: "ancestor" is a keyword
481 481 l = getlist(x)
482 482 rl = fullreposet(repo)
483 483 anc = None
484 484
485 485 # (getset(repo, rl, i) for i in l) generates a list of lists
486 486 for revs in (getset(repo, rl, i) for i in l):
487 487 for r in revs:
488 488 if anc is None:
489 489 anc = repo[r]
490 490 else:
491 491 anc = anc.ancestor(repo[r])
492 492
493 493 if anc is not None and anc.rev() in subset:
494 494 return baseset([anc.rev()])
495 495 return baseset()
496 496
497 497 def _ancestors(repo, subset, x, followfirst=False):
498 498 heads = getset(repo, fullreposet(repo), x)
499 499 if not heads:
500 500 return baseset()
501 501 s = _revancestors(repo, heads, followfirst)
502 502 return subset & s
503 503
504 504 @predicate('ancestors(set)', safe=True)
505 505 def ancestors(repo, subset, x):
506 506 """Changesets that are ancestors of a changeset in set.
507 507 """
508 508 return _ancestors(repo, subset, x)
509 509
510 510 @predicate('_firstancestors', safe=True)
511 511 def _firstancestors(repo, subset, x):
512 512 # ``_firstancestors(set)``
513 513 # Like ``ancestors(set)`` but follows only the first parents.
514 514 return _ancestors(repo, subset, x, followfirst=True)
515 515
516 516 def ancestorspec(repo, subset, x, n):
517 517 """``set~n``
518 518 Changesets that are the Nth ancestor (first parents only) of a changeset
519 519 in set.
520 520 """
521 521 try:
522 522 n = int(n[1])
523 523 except (TypeError, ValueError):
524 524 raise error.ParseError(_("~ expects a number"))
525 525 ps = set()
526 526 cl = repo.changelog
527 527 for r in getset(repo, fullreposet(repo), x):
528 528 for i in range(n):
529 529 r = cl.parentrevs(r)[0]
530 530 ps.add(r)
531 531 return subset & ps
532 532
533 533 @predicate('author(string)', safe=True)
534 534 def author(repo, subset, x):
535 535 """Alias for ``user(string)``.
536 536 """
537 537 # i18n: "author" is a keyword
538 538 n = encoding.lower(getstring(x, _("author requires a string")))
539 539 kind, pattern, matcher = _substringmatcher(n)
540 540 return subset.filter(lambda x: matcher(encoding.lower(repo[x].user())),
541 541 condrepr=('<user %r>', n))
542 542
543 543 @predicate('bisect(string)', safe=True)
544 544 def bisect(repo, subset, x):
545 545 """Changesets marked in the specified bisect status:
546 546
547 547 - ``good``, ``bad``, ``skip``: csets explicitly marked as good/bad/skip
548 548 - ``goods``, ``bads`` : csets topologically good/bad
549 549 - ``range`` : csets taking part in the bisection
550 550 - ``pruned`` : csets that are goods, bads or skipped
551 551 - ``untested`` : csets whose fate is yet unknown
552 552 - ``ignored`` : csets ignored due to DAG topology
553 553 - ``current`` : the cset currently being bisected
554 554 """
555 555 # i18n: "bisect" is a keyword
556 556 status = getstring(x, _("bisect requires a string")).lower()
557 557 state = set(hbisect.get(repo, status))
558 558 return subset & state
559 559
560 560 # Backward-compatibility
561 561 # - no help entry so that we do not advertise it any more
562 562 @predicate('bisected', safe=True)
563 563 def bisected(repo, subset, x):
564 564 return bisect(repo, subset, x)
565 565
566 566 @predicate('bookmark([name])', safe=True)
567 567 def bookmark(repo, subset, x):
568 568 """The named bookmark or all bookmarks.
569 569
570 570 If `name` starts with `re:`, the remainder of the name is treated as
571 571 a regular expression. To match a bookmark that actually starts with `re:`,
572 572 use the prefix `literal:`.
573 573 """
574 574 # i18n: "bookmark" is a keyword
575 575 args = getargs(x, 0, 1, _('bookmark takes one or no arguments'))
576 576 if args:
577 577 bm = getstring(args[0],
578 578 # i18n: "bookmark" is a keyword
579 579 _('the argument to bookmark must be a string'))
580 580 kind, pattern, matcher = util.stringmatcher(bm)
581 581 bms = set()
582 582 if kind == 'literal':
583 583 bmrev = repo._bookmarks.get(pattern, None)
584 584 if not bmrev:
585 585 raise error.RepoLookupError(_("bookmark '%s' does not exist")
586 586 % pattern)
587 587 bms.add(repo[bmrev].rev())
588 588 else:
589 589 matchrevs = set()
590 590 for name, bmrev in repo._bookmarks.iteritems():
591 591 if matcher(name):
592 592 matchrevs.add(bmrev)
593 593 if not matchrevs:
594 594 raise error.RepoLookupError(_("no bookmarks exist"
595 595 " that match '%s'") % pattern)
596 596 for bmrev in matchrevs:
597 597 bms.add(repo[bmrev].rev())
598 598 else:
599 599 bms = set([repo[r].rev()
600 600 for r in repo._bookmarks.values()])
601 601 bms -= set([node.nullrev])
602 602 return subset & bms
603 603
604 604 @predicate('branch(string or set)', safe=True)
605 605 def branch(repo, subset, x):
606 606 """
607 607 All changesets belonging to the given branch or the branches of the given
608 608 changesets.
609 609
610 610 If `string` starts with `re:`, the remainder of the name is treated as
611 611 a regular expression. To match a branch that actually starts with `re:`,
612 612 use the prefix `literal:`.
613 613 """
614 614 getbi = repo.revbranchcache().branchinfo
615 615
616 616 try:
617 617 b = getstring(x, '')
618 618 except error.ParseError:
619 619 # not a string, but another revspec, e.g. tip()
620 620 pass
621 621 else:
622 622 kind, pattern, matcher = util.stringmatcher(b)
623 623 if kind == 'literal':
624 624 # note: falls through to the revspec case if no branch with
625 625 # this name exists and pattern kind is not specified explicitly
626 626 if pattern in repo.branchmap():
627 627 return subset.filter(lambda r: matcher(getbi(r)[0]),
628 628 condrepr=('<branch %r>', b))
629 629 if b.startswith('literal:'):
630 630 raise error.RepoLookupError(_("branch '%s' does not exist")
631 631 % pattern)
632 632 else:
633 633 return subset.filter(lambda r: matcher(getbi(r)[0]),
634 634 condrepr=('<branch %r>', b))
635 635
636 636 s = getset(repo, fullreposet(repo), x)
637 637 b = set()
638 638 for r in s:
639 639 b.add(getbi(r)[0])
640 640 c = s.__contains__
641 641 return subset.filter(lambda r: c(r) or getbi(r)[0] in b,
642 642 condrepr=lambda: '<branch %r>' % sorted(b))
643 643
644 644 @predicate('bumped()', safe=True)
645 645 def bumped(repo, subset, x):
646 646 """Mutable changesets marked as successors of public changesets.
647 647
648 648 Only non-public and non-obsolete changesets can be `bumped`.
649 649 """
650 650 # i18n: "bumped" is a keyword
651 651 getargs(x, 0, 0, _("bumped takes no arguments"))
652 652 bumped = obsmod.getrevs(repo, 'bumped')
653 653 return subset & bumped
654 654
655 655 @predicate('bundle()', safe=True)
656 656 def bundle(repo, subset, x):
657 657 """Changesets in the bundle.
658 658
659 659 Bundle must be specified by the -R option."""
660 660
661 661 try:
662 662 bundlerevs = repo.changelog.bundlerevs
663 663 except AttributeError:
664 664 raise error.Abort(_("no bundle provided - specify with -R"))
665 665 return subset & bundlerevs
666 666
667 667 def checkstatus(repo, subset, pat, field):
668 668 hasset = matchmod.patkind(pat) == 'set'
669 669
670 670 mcache = [None]
671 671 def matches(x):
672 672 c = repo[x]
673 673 if not mcache[0] or hasset:
674 674 mcache[0] = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
675 675 m = mcache[0]
676 676 fname = None
677 677 if not m.anypats() and len(m.files()) == 1:
678 678 fname = m.files()[0]
679 679 if fname is not None:
680 680 if fname not in c.files():
681 681 return False
682 682 else:
683 683 for f in c.files():
684 684 if m(f):
685 685 break
686 686 else:
687 687 return False
688 688 files = repo.status(c.p1().node(), c.node())[field]
689 689 if fname is not None:
690 690 if fname in files:
691 691 return True
692 692 else:
693 693 for f in files:
694 694 if m(f):
695 695 return True
696 696
697 697 return subset.filter(matches, condrepr=('<status[%r] %r>', field, pat))
698 698
699 699 def _children(repo, subset, parentset):
700 700 if not parentset:
701 701 return baseset()
702 702 cs = set()
703 703 pr = repo.changelog.parentrevs
704 704 minrev = parentset.min()
705 705 for r in subset:
706 706 if r <= minrev:
707 707 continue
708 708 for p in pr(r):
709 709 if p in parentset:
710 710 cs.add(r)
711 711 return baseset(cs)
712 712
713 713 @predicate('children(set)', safe=True)
714 714 def children(repo, subset, x):
715 715 """Child changesets of changesets in set.
716 716 """
717 717 s = getset(repo, fullreposet(repo), x)
718 718 cs = _children(repo, subset, s)
719 719 return subset & cs
720 720
721 721 @predicate('closed()', safe=True)
722 722 def closed(repo, subset, x):
723 723 """Changeset is closed.
724 724 """
725 725 # i18n: "closed" is a keyword
726 726 getargs(x, 0, 0, _("closed takes no arguments"))
727 727 return subset.filter(lambda r: repo[r].closesbranch(),
728 728 condrepr='<branch closed>')
729 729
730 730 @predicate('contains(pattern)')
731 731 def contains(repo, subset, x):
732 732 """The revision's manifest contains a file matching pattern (but might not
733 733 modify it). See :hg:`help patterns` for information about file patterns.
734 734
735 735 The pattern without explicit kind like ``glob:`` is expected to be
736 736 relative to the current directory and match against a file exactly
737 737 for efficiency.
738 738 """
739 739 # i18n: "contains" is a keyword
740 740 pat = getstring(x, _("contains requires a pattern"))
741 741
742 742 def matches(x):
743 743 if not matchmod.patkind(pat):
744 744 pats = pathutil.canonpath(repo.root, repo.getcwd(), pat)
745 745 if pats in repo[x]:
746 746 return True
747 747 else:
748 748 c = repo[x]
749 749 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
750 750 for f in c.manifest():
751 751 if m(f):
752 752 return True
753 753 return False
754 754
755 755 return subset.filter(matches, condrepr=('<contains %r>', pat))
756 756
757 757 @predicate('converted([id])', safe=True)
758 758 def converted(repo, subset, x):
759 759 """Changesets converted from the given identifier in the old repository if
760 760 present, or all converted changesets if no identifier is specified.
761 761 """
762 762
763 763 # There is exactly no chance of resolving the revision, so do a simple
764 764 # string compare and hope for the best
765 765
766 766 rev = None
767 767 # i18n: "converted" is a keyword
768 768 l = getargs(x, 0, 1, _('converted takes one or no arguments'))
769 769 if l:
770 770 # i18n: "converted" is a keyword
771 771 rev = getstring(l[0], _('converted requires a revision'))
772 772
773 773 def _matchvalue(r):
774 774 source = repo[r].extra().get('convert_revision', None)
775 775 return source is not None and (rev is None or source.startswith(rev))
776 776
777 777 return subset.filter(lambda r: _matchvalue(r),
778 778 condrepr=('<converted %r>', rev))
779 779
780 780 @predicate('date(interval)', safe=True)
781 781 def date(repo, subset, x):
782 782 """Changesets within the interval, see :hg:`help dates`.
783 783 """
784 784 # i18n: "date" is a keyword
785 785 ds = getstring(x, _("date requires a string"))
786 786 dm = util.matchdate(ds)
787 787 return subset.filter(lambda x: dm(repo[x].date()[0]),
788 788 condrepr=('<date %r>', ds))
789 789
790 790 @predicate('desc(string)', safe=True)
791 791 def desc(repo, subset, x):
792 792 """Search commit message for string. The match is case-insensitive.
793 793 """
794 794 # i18n: "desc" is a keyword
795 795 ds = encoding.lower(getstring(x, _("desc requires a string")))
796 796
797 797 def matches(x):
798 798 c = repo[x]
799 799 return ds in encoding.lower(c.description())
800 800
801 801 return subset.filter(matches, condrepr=('<desc %r>', ds))
802 802
803 803 def _descendants(repo, subset, x, followfirst=False):
804 804 roots = getset(repo, fullreposet(repo), x)
805 805 if not roots:
806 806 return baseset()
807 807 s = _revdescendants(repo, roots, followfirst)
808 808
809 809 # Both sets need to be ascending in order to lazily return the union
810 810 # in the correct order.
811 811 base = subset & roots
812 812 desc = subset & s
813 813 result = base + desc
814 814 if subset.isascending():
815 815 result.sort()
816 816 elif subset.isdescending():
817 817 result.sort(reverse=True)
818 818 else:
819 819 result = subset & result
820 820 return result
821 821
822 822 @predicate('descendants(set)', safe=True)
823 823 def descendants(repo, subset, x):
824 824 """Changesets which are descendants of changesets in set.
825 825 """
826 826 return _descendants(repo, subset, x)
827 827
828 828 @predicate('_firstdescendants', safe=True)
829 829 def _firstdescendants(repo, subset, x):
830 830 # ``_firstdescendants(set)``
831 831 # Like ``descendants(set)`` but follows only the first parents.
832 832 return _descendants(repo, subset, x, followfirst=True)
833 833
834 834 @predicate('destination([set])', safe=True)
835 835 def destination(repo, subset, x):
836 836 """Changesets that were created by a graft, transplant or rebase operation,
837 837 with the given revisions specified as the source. Omitting the optional set
838 838 is the same as passing all().
839 839 """
840 840 if x is not None:
841 841 sources = getset(repo, fullreposet(repo), x)
842 842 else:
843 843 sources = fullreposet(repo)
844 844
845 845 dests = set()
846 846
847 847 # subset contains all of the possible destinations that can be returned, so
848 848 # iterate over them and see if their source(s) were provided in the arg set.
849 849 # Even if the immediate src of r is not in the arg set, src's source (or
850 850 # further back) may be. Scanning back further than the immediate src allows
851 851 # transitive transplants and rebases to yield the same results as transitive
852 852 # grafts.
853 853 for r in subset:
854 854 src = _getrevsource(repo, r)
855 855 lineage = None
856 856
857 857 while src is not None:
858 858 if lineage is None:
859 859 lineage = list()
860 860
861 861 lineage.append(r)
862 862
863 863 # The visited lineage is a match if the current source is in the arg
864 864 # set. Since every candidate dest is visited by way of iterating
865 865 # subset, any dests further back in the lineage will be tested by a
866 866 # different iteration over subset. Likewise, if the src was already
867 867 # selected, the current lineage can be selected without going back
868 868 # further.
869 869 if src in sources or src in dests:
870 870 dests.update(lineage)
871 871 break
872 872
873 873 r = src
874 874 src = _getrevsource(repo, r)
875 875
876 876 return subset.filter(dests.__contains__,
877 877 condrepr=lambda: '<destination %r>' % sorted(dests))
878 878
879 879 @predicate('divergent()', safe=True)
880 880 def divergent(repo, subset, x):
881 881 """
882 882 Final successors of changesets with an alternative set of final successors.
883 883 """
884 884 # i18n: "divergent" is a keyword
885 885 getargs(x, 0, 0, _("divergent takes no arguments"))
886 886 divergent = obsmod.getrevs(repo, 'divergent')
887 887 return subset & divergent
888 888
889 889 @predicate('extinct()', safe=True)
890 890 def extinct(repo, subset, x):
891 891 """Obsolete changesets with obsolete descendants only.
892 892 """
893 893 # i18n: "extinct" is a keyword
894 894 getargs(x, 0, 0, _("extinct takes no arguments"))
895 895 extincts = obsmod.getrevs(repo, 'extinct')
896 896 return subset & extincts
897 897
898 898 @predicate('extra(label, [value])', safe=True)
899 899 def extra(repo, subset, x):
900 900 """Changesets with the given label in the extra metadata, with the given
901 901 optional value.
902 902
903 903 If `value` starts with `re:`, the remainder of the value is treated as
904 904 a regular expression. To match a value that actually starts with `re:`,
905 905 use the prefix `literal:`.
906 906 """
907 907 args = getargsdict(x, 'extra', 'label value')
908 908 if 'label' not in args:
909 909 # i18n: "extra" is a keyword
910 910 raise error.ParseError(_('extra takes at least 1 argument'))
911 911 # i18n: "extra" is a keyword
912 912 label = getstring(args['label'], _('first argument to extra must be '
913 913 'a string'))
914 914 value = None
915 915
916 916 if 'value' in args:
917 917 # i18n: "extra" is a keyword
918 918 value = getstring(args['value'], _('second argument to extra must be '
919 919 'a string'))
920 920 kind, value, matcher = util.stringmatcher(value)
921 921
922 922 def _matchvalue(r):
923 923 extra = repo[r].extra()
924 924 return label in extra and (value is None or matcher(extra[label]))
925 925
926 926 return subset.filter(lambda r: _matchvalue(r),
927 927 condrepr=('<extra[%r] %r>', label, value))
928 928
929 929 @predicate('filelog(pattern)', safe=True)
930 930 def filelog(repo, subset, x):
931 931 """Changesets connected to the specified filelog.
932 932
933 933 For performance reasons, visits only revisions mentioned in the file-level
934 934 filelog, rather than filtering through all changesets (much faster, but
935 935 doesn't include deletes or duplicate changes). For a slower, more accurate
936 936 result, use ``file()``.
937 937
938 938 The pattern without explicit kind like ``glob:`` is expected to be
939 939 relative to the current directory and match against a file exactly
940 940 for efficiency.
941 941
942 942 If some linkrev points to revisions filtered by the current repoview, we'll
943 943 work around it to return a non-filtered value.
944 944 """
945 945
946 946 # i18n: "filelog" is a keyword
947 947 pat = getstring(x, _("filelog requires a pattern"))
948 948 s = set()
949 949 cl = repo.changelog
950 950
951 951 if not matchmod.patkind(pat):
952 952 f = pathutil.canonpath(repo.root, repo.getcwd(), pat)
953 953 files = [f]
954 954 else:
955 955 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=repo[None])
956 956 files = (f for f in repo[None] if m(f))
957 957
958 958 for f in files:
959 959 fl = repo.file(f)
960 960 known = {}
961 961 scanpos = 0
962 962 for fr in list(fl):
963 963 fn = fl.node(fr)
964 964 if fn in known:
965 965 s.add(known[fn])
966 966 continue
967 967
968 968 lr = fl.linkrev(fr)
969 969 if lr in cl:
970 970 s.add(lr)
971 971 elif scanpos is not None:
972 972 # lowest matching changeset is filtered, scan further
973 973 # ahead in changelog
974 974 start = max(lr, scanpos) + 1
975 975 scanpos = None
976 976 for r in cl.revs(start):
977 977 # minimize parsing of non-matching entries
978 978 if f in cl.revision(r) and f in cl.readfiles(r):
979 979 try:
980 980 # try to use manifest delta fastpath
981 981 n = repo[r].filenode(f)
982 982 if n not in known:
983 983 if n == fn:
984 984 s.add(r)
985 985 scanpos = r
986 986 break
987 987 else:
988 988 known[n] = r
989 989 except error.ManifestLookupError:
990 990 # deletion in changelog
991 991 continue
992 992
993 993 return subset & s
994 994
995 995 @predicate('first(set, [n])', safe=True)
996 996 def first(repo, subset, x):
997 997 """An alias for limit().
998 998 """
999 999 return limit(repo, subset, x)
1000 1000
1001 1001 def _follow(repo, subset, x, name, followfirst=False):
1002 l = getargs(x, 0, 1, _("%s takes no arguments or a pattern") % name)
1002 l = getargs(x, 0, 2, _("%s takes no arguments or a pattern "
1003 "and an optional revset") % name)
1003 1004 c = repo['.']
1004 1005 if l:
1005 1006 x = getstring(l[0], _("%s expected a pattern") % name)
1007 rev = None
1008 if len(l) >= 2:
1009 rev = getset(repo, fullreposet(repo), l[1]).last()
1010 if rev is None:
1011 raise error.RepoLookupError(
1012 _("%s: starting revision set cannot be empty") % name)
1013 c = repo[rev]
1006 1014 matcher = matchmod.match(repo.root, repo.getcwd(), [x],
1007 ctx=repo[None], default='path')
1015 ctx=repo[rev], default='path')
1008 1016
1009 1017 files = c.manifest().walk(matcher)
1010 1018
1011 1019 s = set()
1012 1020 for fname in files:
1013 1021 fctx = c[fname]
1014 1022 s = s.union(set(c.rev() for c in fctx.ancestors(followfirst)))
1015 1023 # include the revision responsible for the most recent version
1016 1024 s.add(fctx.introrev())
1017 1025 else:
1018 1026 s = _revancestors(repo, baseset([c.rev()]), followfirst)
1019 1027
1020 1028 return subset & s
1021 1029
1022 @predicate('follow([pattern])', safe=True)
1030 @predicate('follow([pattern[, startrev]])', safe=True)
1023 1031 def follow(repo, subset, x):
1024 1032 """
1025 1033 An alias for ``::.`` (ancestors of the working directory's first parent).
1026 1034 If pattern is specified, the histories of files matching given
1027 pattern is followed, including copies.
1035 pattern in the revision given by startrev are followed, including copies.
1028 1036 """
1029 1037 return _follow(repo, subset, x, 'follow')
1030 1038
1031 1039 @predicate('_followfirst', safe=True)
1032 1040 def _followfirst(repo, subset, x):
1033 # ``followfirst([pattern])``
1034 # Like ``follow([pattern])`` but follows only the first parent of
1035 # every revisions or files revisions.
1041 # ``followfirst([pattern[, startrev]])``
1042 # Like ``follow([pattern[, startrev]])`` but follows only the first parent
1043 # of every revisions or files revisions.
1036 1044 return _follow(repo, subset, x, '_followfirst', followfirst=True)
1037 1045
1038 1046 @predicate('all()', safe=True)
1039 1047 def getall(repo, subset, x):
1040 1048 """All changesets, the same as ``0:tip``.
1041 1049 """
1042 1050 # i18n: "all" is a keyword
1043 1051 getargs(x, 0, 0, _("all takes no arguments"))
1044 1052 return subset & spanset(repo) # drop "null" if any
1045 1053
1046 1054 @predicate('grep(regex)')
1047 1055 def grep(repo, subset, x):
1048 1056 """Like ``keyword(string)`` but accepts a regex. Use ``grep(r'...')``
1049 1057 to ensure special escape characters are handled correctly. Unlike
1050 1058 ``keyword(string)``, the match is case-sensitive.
1051 1059 """
1052 1060 try:
1053 1061 # i18n: "grep" is a keyword
1054 1062 gr = re.compile(getstring(x, _("grep requires a string")))
1055 1063 except re.error as e:
1056 1064 raise error.ParseError(_('invalid match pattern: %s') % e)
1057 1065
1058 1066 def matches(x):
1059 1067 c = repo[x]
1060 1068 for e in c.files() + [c.user(), c.description()]:
1061 1069 if gr.search(e):
1062 1070 return True
1063 1071 return False
1064 1072
1065 1073 return subset.filter(matches, condrepr=('<grep %r>', gr.pattern))
1066 1074
1067 1075 @predicate('_matchfiles', safe=True)
1068 1076 def _matchfiles(repo, subset, x):
1069 1077 # _matchfiles takes a revset list of prefixed arguments:
1070 1078 #
1071 1079 # [p:foo, i:bar, x:baz]
1072 1080 #
1073 1081 # builds a match object from them and filters subset. Allowed
1074 1082 # prefixes are 'p:' for regular patterns, 'i:' for include
1075 1083 # patterns and 'x:' for exclude patterns. Use 'r:' prefix to pass
1076 1084 # a revision identifier, or the empty string to reference the
1077 1085 # working directory, from which the match object is
1078 1086 # initialized. Use 'd:' to set the default matching mode, default
1079 1087 # to 'glob'. At most one 'r:' and 'd:' argument can be passed.
1080 1088
1081 1089 l = getargs(x, 1, -1, "_matchfiles requires at least one argument")
1082 1090 pats, inc, exc = [], [], []
1083 1091 rev, default = None, None
1084 1092 for arg in l:
1085 1093 s = getstring(arg, "_matchfiles requires string arguments")
1086 1094 prefix, value = s[:2], s[2:]
1087 1095 if prefix == 'p:':
1088 1096 pats.append(value)
1089 1097 elif prefix == 'i:':
1090 1098 inc.append(value)
1091 1099 elif prefix == 'x:':
1092 1100 exc.append(value)
1093 1101 elif prefix == 'r:':
1094 1102 if rev is not None:
1095 1103 raise error.ParseError('_matchfiles expected at most one '
1096 1104 'revision')
1097 1105 if value != '': # empty means working directory; leave rev as None
1098 1106 rev = value
1099 1107 elif prefix == 'd:':
1100 1108 if default is not None:
1101 1109 raise error.ParseError('_matchfiles expected at most one '
1102 1110 'default mode')
1103 1111 default = value
1104 1112 else:
1105 1113 raise error.ParseError('invalid _matchfiles prefix: %s' % prefix)
1106 1114 if not default:
1107 1115 default = 'glob'
1108 1116
1109 1117 m = matchmod.match(repo.root, repo.getcwd(), pats, include=inc,
1110 1118 exclude=exc, ctx=repo[rev], default=default)
1111 1119
1112 1120 # This directly read the changelog data as creating changectx for all
1113 1121 # revisions is quite expensive.
1114 1122 getfiles = repo.changelog.readfiles
1115 1123 wdirrev = node.wdirrev
1116 1124 def matches(x):
1117 1125 if x == wdirrev:
1118 1126 files = repo[x].files()
1119 1127 else:
1120 1128 files = getfiles(x)
1121 1129 for f in files:
1122 1130 if m(f):
1123 1131 return True
1124 1132 return False
1125 1133
1126 1134 return subset.filter(matches,
1127 1135 condrepr=('<matchfiles patterns=%r, include=%r '
1128 1136 'exclude=%r, default=%r, rev=%r>',
1129 1137 pats, inc, exc, default, rev))
1130 1138
1131 1139 @predicate('file(pattern)', safe=True)
1132 1140 def hasfile(repo, subset, x):
1133 1141 """Changesets affecting files matched by pattern.
1134 1142
1135 1143 For a faster but less accurate result, consider using ``filelog()``
1136 1144 instead.
1137 1145
1138 1146 This predicate uses ``glob:`` as the default kind of pattern.
1139 1147 """
1140 1148 # i18n: "file" is a keyword
1141 1149 pat = getstring(x, _("file requires a pattern"))
1142 1150 return _matchfiles(repo, subset, ('string', 'p:' + pat))
1143 1151
1144 1152 @predicate('head()', safe=True)
1145 1153 def head(repo, subset, x):
1146 1154 """Changeset is a named branch head.
1147 1155 """
1148 1156 # i18n: "head" is a keyword
1149 1157 getargs(x, 0, 0, _("head takes no arguments"))
1150 1158 hs = set()
1151 1159 cl = repo.changelog
1152 1160 for ls in repo.branchmap().itervalues():
1153 1161 hs.update(cl.rev(h) for h in ls)
1154 1162 return subset & baseset(hs)
1155 1163
1156 1164 @predicate('heads(set)', safe=True)
1157 1165 def heads(repo, subset, x):
1158 1166 """Members of set with no children in set.
1159 1167 """
1160 1168 s = getset(repo, subset, x)
1161 1169 ps = parents(repo, subset, x)
1162 1170 return s - ps
1163 1171
1164 1172 @predicate('hidden()', safe=True)
1165 1173 def hidden(repo, subset, x):
1166 1174 """Hidden changesets.
1167 1175 """
1168 1176 # i18n: "hidden" is a keyword
1169 1177 getargs(x, 0, 0, _("hidden takes no arguments"))
1170 1178 hiddenrevs = repoview.filterrevs(repo, 'visible')
1171 1179 return subset & hiddenrevs
1172 1180
1173 1181 @predicate('keyword(string)', safe=True)
1174 1182 def keyword(repo, subset, x):
1175 1183 """Search commit message, user name, and names of changed files for
1176 1184 string. The match is case-insensitive.
1177 1185 """
1178 1186 # i18n: "keyword" is a keyword
1179 1187 kw = encoding.lower(getstring(x, _("keyword requires a string")))
1180 1188
1181 1189 def matches(r):
1182 1190 c = repo[r]
1183 1191 return any(kw in encoding.lower(t)
1184 1192 for t in c.files() + [c.user(), c.description()])
1185 1193
1186 1194 return subset.filter(matches, condrepr=('<keyword %r>', kw))
1187 1195
1188 1196 @predicate('limit(set[, n[, offset]])', safe=True)
1189 1197 def limit(repo, subset, x):
1190 1198 """First n members of set, defaulting to 1, starting from offset.
1191 1199 """
1192 1200 args = getargsdict(x, 'limit', 'set n offset')
1193 1201 if 'set' not in args:
1194 1202 # i18n: "limit" is a keyword
1195 1203 raise error.ParseError(_("limit requires one to three arguments"))
1196 1204 try:
1197 1205 lim, ofs = 1, 0
1198 1206 if 'n' in args:
1199 1207 # i18n: "limit" is a keyword
1200 1208 lim = int(getstring(args['n'], _("limit requires a number")))
1201 1209 if 'offset' in args:
1202 1210 # i18n: "limit" is a keyword
1203 1211 ofs = int(getstring(args['offset'], _("limit requires a number")))
1204 1212 if ofs < 0:
1205 1213 raise error.ParseError(_("negative offset"))
1206 1214 except (TypeError, ValueError):
1207 1215 # i18n: "limit" is a keyword
1208 1216 raise error.ParseError(_("limit expects a number"))
1209 1217 os = getset(repo, fullreposet(repo), args['set'])
1210 1218 result = []
1211 1219 it = iter(os)
1212 1220 for x in xrange(ofs):
1213 1221 y = next(it, None)
1214 1222 if y is None:
1215 1223 break
1216 1224 for x in xrange(lim):
1217 1225 y = next(it, None)
1218 1226 if y is None:
1219 1227 break
1220 1228 elif y in subset:
1221 1229 result.append(y)
1222 1230 return baseset(result, datarepr=('<limit n=%d, offset=%d, %r, %r>',
1223 1231 lim, ofs, subset, os))
1224 1232
1225 1233 @predicate('last(set, [n])', safe=True)
1226 1234 def last(repo, subset, x):
1227 1235 """Last n members of set, defaulting to 1.
1228 1236 """
1229 1237 # i18n: "last" is a keyword
1230 1238 l = getargs(x, 1, 2, _("last requires one or two arguments"))
1231 1239 try:
1232 1240 lim = 1
1233 1241 if len(l) == 2:
1234 1242 # i18n: "last" is a keyword
1235 1243 lim = int(getstring(l[1], _("last requires a number")))
1236 1244 except (TypeError, ValueError):
1237 1245 # i18n: "last" is a keyword
1238 1246 raise error.ParseError(_("last expects a number"))
1239 1247 os = getset(repo, fullreposet(repo), l[0])
1240 1248 os.reverse()
1241 1249 result = []
1242 1250 it = iter(os)
1243 1251 for x in xrange(lim):
1244 1252 y = next(it, None)
1245 1253 if y is None:
1246 1254 break
1247 1255 elif y in subset:
1248 1256 result.append(y)
1249 1257 return baseset(result, datarepr=('<last n=%d, %r, %r>', lim, subset, os))
1250 1258
1251 1259 @predicate('max(set)', safe=True)
1252 1260 def maxrev(repo, subset, x):
1253 1261 """Changeset with highest revision number in set.
1254 1262 """
1255 1263 os = getset(repo, fullreposet(repo), x)
1256 1264 try:
1257 1265 m = os.max()
1258 1266 if m in subset:
1259 1267 return baseset([m], datarepr=('<max %r, %r>', subset, os))
1260 1268 except ValueError:
1261 1269 # os.max() throws a ValueError when the collection is empty.
1262 1270 # Same as python's max().
1263 1271 pass
1264 1272 return baseset(datarepr=('<max %r, %r>', subset, os))
1265 1273
1266 1274 @predicate('merge()', safe=True)
1267 1275 def merge(repo, subset, x):
1268 1276 """Changeset is a merge changeset.
1269 1277 """
1270 1278 # i18n: "merge" is a keyword
1271 1279 getargs(x, 0, 0, _("merge takes no arguments"))
1272 1280 cl = repo.changelog
1273 1281 return subset.filter(lambda r: cl.parentrevs(r)[1] != -1,
1274 1282 condrepr='<merge>')
1275 1283
1276 1284 @predicate('branchpoint()', safe=True)
1277 1285 def branchpoint(repo, subset, x):
1278 1286 """Changesets with more than one child.
1279 1287 """
1280 1288 # i18n: "branchpoint" is a keyword
1281 1289 getargs(x, 0, 0, _("branchpoint takes no arguments"))
1282 1290 cl = repo.changelog
1283 1291 if not subset:
1284 1292 return baseset()
1285 1293 # XXX this should be 'parentset.min()' assuming 'parentset' is a smartset
1286 1294 # (and if it is not, it should.)
1287 1295 baserev = min(subset)
1288 1296 parentscount = [0]*(len(repo) - baserev)
1289 1297 for r in cl.revs(start=baserev + 1):
1290 1298 for p in cl.parentrevs(r):
1291 1299 if p >= baserev:
1292 1300 parentscount[p - baserev] += 1
1293 1301 return subset.filter(lambda r: parentscount[r - baserev] > 1,
1294 1302 condrepr='<branchpoint>')
1295 1303
1296 1304 @predicate('min(set)', safe=True)
1297 1305 def minrev(repo, subset, x):
1298 1306 """Changeset with lowest revision number in set.
1299 1307 """
1300 1308 os = getset(repo, fullreposet(repo), x)
1301 1309 try:
1302 1310 m = os.min()
1303 1311 if m in subset:
1304 1312 return baseset([m], datarepr=('<min %r, %r>', subset, os))
1305 1313 except ValueError:
1306 1314 # os.min() throws a ValueError when the collection is empty.
1307 1315 # Same as python's min().
1308 1316 pass
1309 1317 return baseset(datarepr=('<min %r, %r>', subset, os))
1310 1318
1311 1319 @predicate('modifies(pattern)', safe=True)
1312 1320 def modifies(repo, subset, x):
1313 1321 """Changesets modifying files matched by pattern.
1314 1322
1315 1323 The pattern without explicit kind like ``glob:`` is expected to be
1316 1324 relative to the current directory and match against a file or a
1317 1325 directory.
1318 1326 """
1319 1327 # i18n: "modifies" is a keyword
1320 1328 pat = getstring(x, _("modifies requires a pattern"))
1321 1329 return checkstatus(repo, subset, pat, 0)
1322 1330
1323 1331 @predicate('named(namespace)')
1324 1332 def named(repo, subset, x):
1325 1333 """The changesets in a given namespace.
1326 1334
1327 1335 If `namespace` starts with `re:`, the remainder of the string is treated as
1328 1336 a regular expression. To match a namespace that actually starts with `re:`,
1329 1337 use the prefix `literal:`.
1330 1338 """
1331 1339 # i18n: "named" is a keyword
1332 1340 args = getargs(x, 1, 1, _('named requires a namespace argument'))
1333 1341
1334 1342 ns = getstring(args[0],
1335 1343 # i18n: "named" is a keyword
1336 1344 _('the argument to named must be a string'))
1337 1345 kind, pattern, matcher = util.stringmatcher(ns)
1338 1346 namespaces = set()
1339 1347 if kind == 'literal':
1340 1348 if pattern not in repo.names:
1341 1349 raise error.RepoLookupError(_("namespace '%s' does not exist")
1342 1350 % ns)
1343 1351 namespaces.add(repo.names[pattern])
1344 1352 else:
1345 1353 for name, ns in repo.names.iteritems():
1346 1354 if matcher(name):
1347 1355 namespaces.add(ns)
1348 1356 if not namespaces:
1349 1357 raise error.RepoLookupError(_("no namespace exists"
1350 1358 " that match '%s'") % pattern)
1351 1359
1352 1360 names = set()
1353 1361 for ns in namespaces:
1354 1362 for name in ns.listnames(repo):
1355 1363 if name not in ns.deprecated:
1356 1364 names.update(repo[n].rev() for n in ns.nodes(repo, name))
1357 1365
1358 1366 names -= set([node.nullrev])
1359 1367 return subset & names
1360 1368
1361 1369 @predicate('id(string)', safe=True)
1362 1370 def node_(repo, subset, x):
1363 1371 """Revision non-ambiguously specified by the given hex string prefix.
1364 1372 """
1365 1373 # i18n: "id" is a keyword
1366 1374 l = getargs(x, 1, 1, _("id requires one argument"))
1367 1375 # i18n: "id" is a keyword
1368 1376 n = getstring(l[0], _("id requires a string"))
1369 1377 if len(n) == 40:
1370 1378 try:
1371 1379 rn = repo.changelog.rev(node.bin(n))
1372 1380 except (LookupError, TypeError):
1373 1381 rn = None
1374 1382 else:
1375 1383 rn = None
1376 1384 pm = repo.changelog._partialmatch(n)
1377 1385 if pm is not None:
1378 1386 rn = repo.changelog.rev(pm)
1379 1387
1380 1388 if rn is None:
1381 1389 return baseset()
1382 1390 result = baseset([rn])
1383 1391 return result & subset
1384 1392
1385 1393 @predicate('obsolete()', safe=True)
1386 1394 def obsolete(repo, subset, x):
1387 1395 """Mutable changeset with a newer version."""
1388 1396 # i18n: "obsolete" is a keyword
1389 1397 getargs(x, 0, 0, _("obsolete takes no arguments"))
1390 1398 obsoletes = obsmod.getrevs(repo, 'obsolete')
1391 1399 return subset & obsoletes
1392 1400
1393 1401 @predicate('only(set, [set])', safe=True)
1394 1402 def only(repo, subset, x):
1395 1403 """Changesets that are ancestors of the first set that are not ancestors
1396 1404 of any other head in the repo. If a second set is specified, the result
1397 1405 is ancestors of the first set that are not ancestors of the second set
1398 1406 (i.e. ::<set1> - ::<set2>).
1399 1407 """
1400 1408 cl = repo.changelog
1401 1409 # i18n: "only" is a keyword
1402 1410 args = getargs(x, 1, 2, _('only takes one or two arguments'))
1403 1411 include = getset(repo, fullreposet(repo), args[0])
1404 1412 if len(args) == 1:
1405 1413 if not include:
1406 1414 return baseset()
1407 1415
1408 1416 descendants = set(_revdescendants(repo, include, False))
1409 1417 exclude = [rev for rev in cl.headrevs()
1410 1418 if not rev in descendants and not rev in include]
1411 1419 else:
1412 1420 exclude = getset(repo, fullreposet(repo), args[1])
1413 1421
1414 1422 results = set(cl.findmissingrevs(common=exclude, heads=include))
1415 1423 # XXX we should turn this into a baseset instead of a set, smartset may do
1416 1424 # some optimisations from the fact this is a baseset.
1417 1425 return subset & results
1418 1426
1419 1427 @predicate('origin([set])', safe=True)
1420 1428 def origin(repo, subset, x):
1421 1429 """
1422 1430 Changesets that were specified as a source for the grafts, transplants or
1423 1431 rebases that created the given revisions. Omitting the optional set is the
1424 1432 same as passing all(). If a changeset created by these operations is itself
1425 1433 specified as a source for one of these operations, only the source changeset
1426 1434 for the first operation is selected.
1427 1435 """
1428 1436 if x is not None:
1429 1437 dests = getset(repo, fullreposet(repo), x)
1430 1438 else:
1431 1439 dests = fullreposet(repo)
1432 1440
1433 1441 def _firstsrc(rev):
1434 1442 src = _getrevsource(repo, rev)
1435 1443 if src is None:
1436 1444 return None
1437 1445
1438 1446 while True:
1439 1447 prev = _getrevsource(repo, src)
1440 1448
1441 1449 if prev is None:
1442 1450 return src
1443 1451 src = prev
1444 1452
1445 1453 o = set([_firstsrc(r) for r in dests])
1446 1454 o -= set([None])
1447 1455 # XXX we should turn this into a baseset instead of a set, smartset may do
1448 1456 # some optimisations from the fact this is a baseset.
1449 1457 return subset & o
1450 1458
1451 1459 @predicate('outgoing([path])', safe=True)
1452 1460 def outgoing(repo, subset, x):
1453 1461 """Changesets not found in the specified destination repository, or the
1454 1462 default push location.
1455 1463 """
1456 1464 # Avoid cycles.
1457 1465 from . import (
1458 1466 discovery,
1459 1467 hg,
1460 1468 )
1461 1469 # i18n: "outgoing" is a keyword
1462 1470 l = getargs(x, 0, 1, _("outgoing takes one or no arguments"))
1463 1471 # i18n: "outgoing" is a keyword
1464 1472 dest = l and getstring(l[0], _("outgoing requires a repository path")) or ''
1465 1473 dest = repo.ui.expandpath(dest or 'default-push', dest or 'default')
1466 1474 dest, branches = hg.parseurl(dest)
1467 1475 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1468 1476 if revs:
1469 1477 revs = [repo.lookup(rev) for rev in revs]
1470 1478 other = hg.peer(repo, {}, dest)
1471 1479 repo.ui.pushbuffer()
1472 1480 outgoing = discovery.findcommonoutgoing(repo, other, onlyheads=revs)
1473 1481 repo.ui.popbuffer()
1474 1482 cl = repo.changelog
1475 1483 o = set([cl.rev(r) for r in outgoing.missing])
1476 1484 return subset & o
1477 1485
1478 1486 @predicate('p1([set])', safe=True)
1479 1487 def p1(repo, subset, x):
1480 1488 """First parent of changesets in set, or the working directory.
1481 1489 """
1482 1490 if x is None:
1483 1491 p = repo[x].p1().rev()
1484 1492 if p >= 0:
1485 1493 return subset & baseset([p])
1486 1494 return baseset()
1487 1495
1488 1496 ps = set()
1489 1497 cl = repo.changelog
1490 1498 for r in getset(repo, fullreposet(repo), x):
1491 1499 ps.add(cl.parentrevs(r)[0])
1492 1500 ps -= set([node.nullrev])
1493 1501 # XXX we should turn this into a baseset instead of a set, smartset may do
1494 1502 # some optimisations from the fact this is a baseset.
1495 1503 return subset & ps
1496 1504
1497 1505 @predicate('p2([set])', safe=True)
1498 1506 def p2(repo, subset, x):
1499 1507 """Second parent of changesets in set, or the working directory.
1500 1508 """
1501 1509 if x is None:
1502 1510 ps = repo[x].parents()
1503 1511 try:
1504 1512 p = ps[1].rev()
1505 1513 if p >= 0:
1506 1514 return subset & baseset([p])
1507 1515 return baseset()
1508 1516 except IndexError:
1509 1517 return baseset()
1510 1518
1511 1519 ps = set()
1512 1520 cl = repo.changelog
1513 1521 for r in getset(repo, fullreposet(repo), x):
1514 1522 ps.add(cl.parentrevs(r)[1])
1515 1523 ps -= set([node.nullrev])
1516 1524 # XXX we should turn this into a baseset instead of a set, smartset may do
1517 1525 # some optimisations from the fact this is a baseset.
1518 1526 return subset & ps
1519 1527
1520 1528 @predicate('parents([set])', safe=True)
1521 1529 def parents(repo, subset, x):
1522 1530 """
1523 1531 The set of all parents for all changesets in set, or the working directory.
1524 1532 """
1525 1533 if x is None:
1526 1534 ps = set(p.rev() for p in repo[x].parents())
1527 1535 else:
1528 1536 ps = set()
1529 1537 cl = repo.changelog
1530 1538 up = ps.update
1531 1539 parentrevs = cl.parentrevs
1532 1540 for r in getset(repo, fullreposet(repo), x):
1533 1541 if r == node.wdirrev:
1534 1542 up(p.rev() for p in repo[r].parents())
1535 1543 else:
1536 1544 up(parentrevs(r))
1537 1545 ps -= set([node.nullrev])
1538 1546 return subset & ps
1539 1547
1540 1548 def _phase(repo, subset, target):
1541 1549 """helper to select all rev in phase <target>"""
1542 1550 repo._phasecache.loadphaserevs(repo) # ensure phase's sets are loaded
1543 1551 if repo._phasecache._phasesets:
1544 1552 s = repo._phasecache._phasesets[target] - repo.changelog.filteredrevs
1545 1553 s = baseset(s)
1546 1554 s.sort() # set are non ordered, so we enforce ascending
1547 1555 return subset & s
1548 1556 else:
1549 1557 phase = repo._phasecache.phase
1550 1558 condition = lambda r: phase(repo, r) == target
1551 1559 return subset.filter(condition, condrepr=('<phase %r>', target),
1552 1560 cache=False)
1553 1561
1554 1562 @predicate('draft()', safe=True)
1555 1563 def draft(repo, subset, x):
1556 1564 """Changeset in draft phase."""
1557 1565 # i18n: "draft" is a keyword
1558 1566 getargs(x, 0, 0, _("draft takes no arguments"))
1559 1567 target = phases.draft
1560 1568 return _phase(repo, subset, target)
1561 1569
1562 1570 @predicate('secret()', safe=True)
1563 1571 def secret(repo, subset, x):
1564 1572 """Changeset in secret phase."""
1565 1573 # i18n: "secret" is a keyword
1566 1574 getargs(x, 0, 0, _("secret takes no arguments"))
1567 1575 target = phases.secret
1568 1576 return _phase(repo, subset, target)
1569 1577
1570 1578 def parentspec(repo, subset, x, n):
1571 1579 """``set^0``
1572 1580 The set.
1573 1581 ``set^1`` (or ``set^``), ``set^2``
1574 1582 First or second parent, respectively, of all changesets in set.
1575 1583 """
1576 1584 try:
1577 1585 n = int(n[1])
1578 1586 if n not in (0, 1, 2):
1579 1587 raise ValueError
1580 1588 except (TypeError, ValueError):
1581 1589 raise error.ParseError(_("^ expects a number 0, 1, or 2"))
1582 1590 ps = set()
1583 1591 cl = repo.changelog
1584 1592 for r in getset(repo, fullreposet(repo), x):
1585 1593 if n == 0:
1586 1594 ps.add(r)
1587 1595 elif n == 1:
1588 1596 ps.add(cl.parentrevs(r)[0])
1589 1597 elif n == 2:
1590 1598 parents = cl.parentrevs(r)
1591 1599 if len(parents) > 1:
1592 1600 ps.add(parents[1])
1593 1601 return subset & ps
1594 1602
1595 1603 @predicate('present(set)', safe=True)
1596 1604 def present(repo, subset, x):
1597 1605 """An empty set, if any revision in set isn't found; otherwise,
1598 1606 all revisions in set.
1599 1607
1600 1608 If any of specified revisions is not present in the local repository,
1601 1609 the query is normally aborted. But this predicate allows the query
1602 1610 to continue even in such cases.
1603 1611 """
1604 1612 try:
1605 1613 return getset(repo, subset, x)
1606 1614 except error.RepoLookupError:
1607 1615 return baseset()
1608 1616
1609 1617 # for internal use
1610 1618 @predicate('_notpublic', safe=True)
1611 1619 def _notpublic(repo, subset, x):
1612 1620 getargs(x, 0, 0, "_notpublic takes no arguments")
1613 1621 repo._phasecache.loadphaserevs(repo) # ensure phase's sets are loaded
1614 1622 if repo._phasecache._phasesets:
1615 1623 s = set()
1616 1624 for u in repo._phasecache._phasesets[1:]:
1617 1625 s.update(u)
1618 1626 s = baseset(s - repo.changelog.filteredrevs)
1619 1627 s.sort()
1620 1628 return subset & s
1621 1629 else:
1622 1630 phase = repo._phasecache.phase
1623 1631 target = phases.public
1624 1632 condition = lambda r: phase(repo, r) != target
1625 1633 return subset.filter(condition, condrepr=('<phase %r>', target),
1626 1634 cache=False)
1627 1635
1628 1636 @predicate('public()', safe=True)
1629 1637 def public(repo, subset, x):
1630 1638 """Changeset in public phase."""
1631 1639 # i18n: "public" is a keyword
1632 1640 getargs(x, 0, 0, _("public takes no arguments"))
1633 1641 phase = repo._phasecache.phase
1634 1642 target = phases.public
1635 1643 condition = lambda r: phase(repo, r) == target
1636 1644 return subset.filter(condition, condrepr=('<phase %r>', target),
1637 1645 cache=False)
1638 1646
1639 1647 @predicate('remote([id [,path]])', safe=True)
1640 1648 def remote(repo, subset, x):
1641 1649 """Local revision that corresponds to the given identifier in a
1642 1650 remote repository, if present. Here, the '.' identifier is a
1643 1651 synonym for the current local branch.
1644 1652 """
1645 1653
1646 1654 from . import hg # avoid start-up nasties
1647 1655 # i18n: "remote" is a keyword
1648 1656 l = getargs(x, 0, 2, _("remote takes zero, one, or two arguments"))
1649 1657
1650 1658 q = '.'
1651 1659 if len(l) > 0:
1652 1660 # i18n: "remote" is a keyword
1653 1661 q = getstring(l[0], _("remote requires a string id"))
1654 1662 if q == '.':
1655 1663 q = repo['.'].branch()
1656 1664
1657 1665 dest = ''
1658 1666 if len(l) > 1:
1659 1667 # i18n: "remote" is a keyword
1660 1668 dest = getstring(l[1], _("remote requires a repository path"))
1661 1669 dest = repo.ui.expandpath(dest or 'default')
1662 1670 dest, branches = hg.parseurl(dest)
1663 1671 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1664 1672 if revs:
1665 1673 revs = [repo.lookup(rev) for rev in revs]
1666 1674 other = hg.peer(repo, {}, dest)
1667 1675 n = other.lookup(q)
1668 1676 if n in repo:
1669 1677 r = repo[n].rev()
1670 1678 if r in subset:
1671 1679 return baseset([r])
1672 1680 return baseset()
1673 1681
1674 1682 @predicate('removes(pattern)', safe=True)
1675 1683 def removes(repo, subset, x):
1676 1684 """Changesets which remove files matching pattern.
1677 1685
1678 1686 The pattern without explicit kind like ``glob:`` is expected to be
1679 1687 relative to the current directory and match against a file or a
1680 1688 directory.
1681 1689 """
1682 1690 # i18n: "removes" is a keyword
1683 1691 pat = getstring(x, _("removes requires a pattern"))
1684 1692 return checkstatus(repo, subset, pat, 2)
1685 1693
1686 1694 @predicate('rev(number)', safe=True)
1687 1695 def rev(repo, subset, x):
1688 1696 """Revision with the given numeric identifier.
1689 1697 """
1690 1698 # i18n: "rev" is a keyword
1691 1699 l = getargs(x, 1, 1, _("rev requires one argument"))
1692 1700 try:
1693 1701 # i18n: "rev" is a keyword
1694 1702 l = int(getstring(l[0], _("rev requires a number")))
1695 1703 except (TypeError, ValueError):
1696 1704 # i18n: "rev" is a keyword
1697 1705 raise error.ParseError(_("rev expects a number"))
1698 1706 if l not in repo.changelog and l != node.nullrev:
1699 1707 return baseset()
1700 1708 return subset & baseset([l])
1701 1709
1702 1710 @predicate('matching(revision [, field])', safe=True)
1703 1711 def matching(repo, subset, x):
1704 1712 """Changesets in which a given set of fields match the set of fields in the
1705 1713 selected revision or set.
1706 1714
1707 1715 To match more than one field pass the list of fields to match separated
1708 1716 by spaces (e.g. ``author description``).
1709 1717
1710 1718 Valid fields are most regular revision fields and some special fields.
1711 1719
1712 1720 Regular revision fields are ``description``, ``author``, ``branch``,
1713 1721 ``date``, ``files``, ``phase``, ``parents``, ``substate``, ``user``
1714 1722 and ``diff``.
1715 1723 Note that ``author`` and ``user`` are synonyms. ``diff`` refers to the
1716 1724 contents of the revision. Two revisions matching their ``diff`` will
1717 1725 also match their ``files``.
1718 1726
1719 1727 Special fields are ``summary`` and ``metadata``:
1720 1728 ``summary`` matches the first line of the description.
1721 1729 ``metadata`` is equivalent to matching ``description user date``
1722 1730 (i.e. it matches the main metadata fields).
1723 1731
1724 1732 ``metadata`` is the default field which is used when no fields are
1725 1733 specified. You can match more than one field at a time.
1726 1734 """
1727 1735 # i18n: "matching" is a keyword
1728 1736 l = getargs(x, 1, 2, _("matching takes 1 or 2 arguments"))
1729 1737
1730 1738 revs = getset(repo, fullreposet(repo), l[0])
1731 1739
1732 1740 fieldlist = ['metadata']
1733 1741 if len(l) > 1:
1734 1742 fieldlist = getstring(l[1],
1735 1743 # i18n: "matching" is a keyword
1736 1744 _("matching requires a string "
1737 1745 "as its second argument")).split()
1738 1746
1739 1747 # Make sure that there are no repeated fields,
1740 1748 # expand the 'special' 'metadata' field type
1741 1749 # and check the 'files' whenever we check the 'diff'
1742 1750 fields = []
1743 1751 for field in fieldlist:
1744 1752 if field == 'metadata':
1745 1753 fields += ['user', 'description', 'date']
1746 1754 elif field == 'diff':
1747 1755 # a revision matching the diff must also match the files
1748 1756 # since matching the diff is very costly, make sure to
1749 1757 # also match the files first
1750 1758 fields += ['files', 'diff']
1751 1759 else:
1752 1760 if field == 'author':
1753 1761 field = 'user'
1754 1762 fields.append(field)
1755 1763 fields = set(fields)
1756 1764 if 'summary' in fields and 'description' in fields:
1757 1765 # If a revision matches its description it also matches its summary
1758 1766 fields.discard('summary')
1759 1767
1760 1768 # We may want to match more than one field
1761 1769 # Not all fields take the same amount of time to be matched
1762 1770 # Sort the selected fields in order of increasing matching cost
1763 1771 fieldorder = ['phase', 'parents', 'user', 'date', 'branch', 'summary',
1764 1772 'files', 'description', 'substate', 'diff']
1765 1773 def fieldkeyfunc(f):
1766 1774 try:
1767 1775 return fieldorder.index(f)
1768 1776 except ValueError:
1769 1777 # assume an unknown field is very costly
1770 1778 return len(fieldorder)
1771 1779 fields = list(fields)
1772 1780 fields.sort(key=fieldkeyfunc)
1773 1781
1774 1782 # Each field will be matched with its own "getfield" function
1775 1783 # which will be added to the getfieldfuncs array of functions
1776 1784 getfieldfuncs = []
1777 1785 _funcs = {
1778 1786 'user': lambda r: repo[r].user(),
1779 1787 'branch': lambda r: repo[r].branch(),
1780 1788 'date': lambda r: repo[r].date(),
1781 1789 'description': lambda r: repo[r].description(),
1782 1790 'files': lambda r: repo[r].files(),
1783 1791 'parents': lambda r: repo[r].parents(),
1784 1792 'phase': lambda r: repo[r].phase(),
1785 1793 'substate': lambda r: repo[r].substate,
1786 1794 'summary': lambda r: repo[r].description().splitlines()[0],
1787 1795 'diff': lambda r: list(repo[r].diff(git=True),)
1788 1796 }
1789 1797 for info in fields:
1790 1798 getfield = _funcs.get(info, None)
1791 1799 if getfield is None:
1792 1800 raise error.ParseError(
1793 1801 # i18n: "matching" is a keyword
1794 1802 _("unexpected field name passed to matching: %s") % info)
1795 1803 getfieldfuncs.append(getfield)
1796 1804 # convert the getfield array of functions into a "getinfo" function
1797 1805 # which returns an array of field values (or a single value if there
1798 1806 # is only one field to match)
1799 1807 getinfo = lambda r: [f(r) for f in getfieldfuncs]
1800 1808
1801 1809 def matches(x):
1802 1810 for rev in revs:
1803 1811 target = getinfo(rev)
1804 1812 match = True
1805 1813 for n, f in enumerate(getfieldfuncs):
1806 1814 if target[n] != f(x):
1807 1815 match = False
1808 1816 if match:
1809 1817 return True
1810 1818 return False
1811 1819
1812 1820 return subset.filter(matches, condrepr=('<matching%r %r>', fields, revs))
1813 1821
1814 1822 @predicate('reverse(set)', safe=True)
1815 1823 def reverse(repo, subset, x):
1816 1824 """Reverse order of set.
1817 1825 """
1818 1826 l = getset(repo, subset, x)
1819 1827 l.reverse()
1820 1828 return l
1821 1829
1822 1830 @predicate('roots(set)', safe=True)
1823 1831 def roots(repo, subset, x):
1824 1832 """Changesets in set with no parent changeset in set.
1825 1833 """
1826 1834 s = getset(repo, fullreposet(repo), x)
1827 1835 parents = repo.changelog.parentrevs
1828 1836 def filter(r):
1829 1837 for p in parents(r):
1830 1838 if 0 <= p and p in s:
1831 1839 return False
1832 1840 return True
1833 1841 return subset & s.filter(filter, condrepr='<roots>')
1834 1842
1835 1843 _sortkeyfuncs = {
1836 1844 'rev': lambda c: c.rev(),
1837 1845 'branch': lambda c: c.branch(),
1838 1846 'desc': lambda c: c.description(),
1839 1847 'user': lambda c: c.user(),
1840 1848 'author': lambda c: c.user(),
1841 1849 'date': lambda c: c.date()[0],
1842 1850 }
1843 1851
1844 1852 def _getsortargs(x):
1845 1853 """Parse sort options into (set, [(key, reverse)], opts)"""
1846 1854 args = getargsdict(x, 'sort', 'set keys topo.firstbranch')
1847 1855 if 'set' not in args:
1848 1856 # i18n: "sort" is a keyword
1849 1857 raise error.ParseError(_('sort requires one or two arguments'))
1850 1858 keys = "rev"
1851 1859 if 'keys' in args:
1852 1860 # i18n: "sort" is a keyword
1853 1861 keys = getstring(args['keys'], _("sort spec must be a string"))
1854 1862
1855 1863 keyflags = []
1856 1864 for k in keys.split():
1857 1865 fk = k
1858 1866 reverse = (k[0] == '-')
1859 1867 if reverse:
1860 1868 k = k[1:]
1861 1869 if k not in _sortkeyfuncs and k != 'topo':
1862 1870 raise error.ParseError(_("unknown sort key %r") % fk)
1863 1871 keyflags.append((k, reverse))
1864 1872
1865 1873 if len(keyflags) > 1 and any(k == 'topo' for k, reverse in keyflags):
1866 1874 # i18n: "topo" is a keyword
1867 1875 raise error.ParseError(_('topo sort order cannot be combined '
1868 1876 'with other sort keys'))
1869 1877
1870 1878 opts = {}
1871 1879 if 'topo.firstbranch' in args:
1872 1880 if any(k == 'topo' for k, reverse in keyflags):
1873 1881 opts['topo.firstbranch'] = args['topo.firstbranch']
1874 1882 else:
1875 1883 # i18n: "topo" and "topo.firstbranch" are keywords
1876 1884 raise error.ParseError(_('topo.firstbranch can only be used '
1877 1885 'when using the topo sort key'))
1878 1886
1879 1887 return args['set'], keyflags, opts
1880 1888
1881 1889 @predicate('sort(set[, [-]key... [, ...]])', safe=True)
1882 1890 def sort(repo, subset, x):
1883 1891 """Sort set by keys. The default sort order is ascending, specify a key
1884 1892 as ``-key`` to sort in descending order.
1885 1893
1886 1894 The keys can be:
1887 1895
1888 1896 - ``rev`` for the revision number,
1889 1897 - ``branch`` for the branch name,
1890 1898 - ``desc`` for the commit message (description),
1891 1899 - ``user`` for user name (``author`` can be used as an alias),
1892 1900 - ``date`` for the commit date
1893 1901 - ``topo`` for a reverse topographical sort
1894 1902
1895 1903 The ``topo`` sort order cannot be combined with other sort keys. This sort
1896 1904 takes one optional argument, ``topo.firstbranch``, which takes a revset that
1897 1905 specifies what topographical branches to prioritize in the sort.
1898 1906
1899 1907 """
1900 1908 s, keyflags, opts = _getsortargs(x)
1901 1909 revs = getset(repo, subset, s)
1902 1910
1903 1911 if not keyflags:
1904 1912 return revs
1905 1913 if len(keyflags) == 1 and keyflags[0][0] == "rev":
1906 1914 revs.sort(reverse=keyflags[0][1])
1907 1915 return revs
1908 1916 elif keyflags[0][0] == "topo":
1909 1917 firstbranch = ()
1910 1918 if 'topo.firstbranch' in opts:
1911 1919 firstbranch = getset(repo, subset, opts['topo.firstbranch'])
1912 1920 revs = baseset(_toposort(revs, repo.changelog.parentrevs, firstbranch),
1913 1921 istopo=True)
1914 1922 if keyflags[0][1]:
1915 1923 revs.reverse()
1916 1924 return revs
1917 1925
1918 1926 # sort() is guaranteed to be stable
1919 1927 ctxs = [repo[r] for r in revs]
1920 1928 for k, reverse in reversed(keyflags):
1921 1929 ctxs.sort(key=_sortkeyfuncs[k], reverse=reverse)
1922 1930 return baseset([c.rev() for c in ctxs])
1923 1931
1924 1932 def _toposort(revs, parentsfunc, firstbranch=()):
1925 1933 """Yield revisions from heads to roots one (topo) branch at a time.
1926 1934
1927 1935 This function aims to be used by a graph generator that wishes to minimize
1928 1936 the number of parallel branches and their interleaving.
1929 1937
1930 1938 Example iteration order (numbers show the "true" order in a changelog):
1931 1939
1932 1940 o 4
1933 1941 |
1934 1942 o 1
1935 1943 |
1936 1944 | o 3
1937 1945 | |
1938 1946 | o 2
1939 1947 |/
1940 1948 o 0
1941 1949
1942 1950 Note that the ancestors of merges are understood by the current
1943 1951 algorithm to be on the same branch. This means no reordering will
1944 1952 occur behind a merge.
1945 1953 """
1946 1954
1947 1955 ### Quick summary of the algorithm
1948 1956 #
1949 1957 # This function is based around a "retention" principle. We keep revisions
1950 1958 # in memory until we are ready to emit a whole branch that immediately
1951 1959 # "merges" into an existing one. This reduces the number of parallel
1952 1960 # branches with interleaved revisions.
1953 1961 #
1954 1962 # During iteration revs are split into two groups:
1955 1963 # A) revision already emitted
1956 1964 # B) revision in "retention". They are stored as different subgroups.
1957 1965 #
1958 1966 # for each REV, we do the following logic:
1959 1967 #
1960 1968 # 1) if REV is a parent of (A), we will emit it. If there is a
1961 1969 # retention group ((B) above) that is blocked on REV being
1962 1970 # available, we emit all the revisions out of that retention
1963 1971 # group first.
1964 1972 #
1965 1973 # 2) else, we'll search for a subgroup in (B) awaiting for REV to be
1966 1974 # available, if such subgroup exist, we add REV to it and the subgroup is
1967 1975 # now awaiting for REV.parents() to be available.
1968 1976 #
1969 1977 # 3) finally if no such group existed in (B), we create a new subgroup.
1970 1978 #
1971 1979 #
1972 1980 # To bootstrap the algorithm, we emit the tipmost revision (which
1973 1981 # puts it in group (A) from above).
1974 1982
1975 1983 revs.sort(reverse=True)
1976 1984
1977 1985 # Set of parents of revision that have been emitted. They can be considered
1978 1986 # unblocked as the graph generator is already aware of them so there is no
1979 1987 # need to delay the revisions that reference them.
1980 1988 #
1981 1989 # If someone wants to prioritize a branch over the others, pre-filling this
1982 1990 # set will force all other branches to wait until this branch is ready to be
1983 1991 # emitted.
1984 1992 unblocked = set(firstbranch)
1985 1993
1986 1994 # list of groups waiting to be displayed, each group is defined by:
1987 1995 #
1988 1996 # (revs: lists of revs waiting to be displayed,
1989 1997 # blocked: set of that cannot be displayed before those in 'revs')
1990 1998 #
1991 1999 # The second value ('blocked') correspond to parents of any revision in the
1992 2000 # group ('revs') that is not itself contained in the group. The main idea
1993 2001 # of this algorithm is to delay as much as possible the emission of any
1994 2002 # revision. This means waiting for the moment we are about to display
1995 2003 # these parents to display the revs in a group.
1996 2004 #
1997 2005 # This first implementation is smart until it encounters a merge: it will
1998 2006 # emit revs as soon as any parent is about to be emitted and can grow an
1999 2007 # arbitrary number of revs in 'blocked'. In practice this mean we properly
2000 2008 # retains new branches but gives up on any special ordering for ancestors
2001 2009 # of merges. The implementation can be improved to handle this better.
2002 2010 #
2003 2011 # The first subgroup is special. It corresponds to all the revision that
2004 2012 # were already emitted. The 'revs' lists is expected to be empty and the
2005 2013 # 'blocked' set contains the parents revisions of already emitted revision.
2006 2014 #
2007 2015 # You could pre-seed the <parents> set of groups[0] to a specific
2008 2016 # changesets to select what the first emitted branch should be.
2009 2017 groups = [([], unblocked)]
2010 2018 pendingheap = []
2011 2019 pendingset = set()
2012 2020
2013 2021 heapq.heapify(pendingheap)
2014 2022 heappop = heapq.heappop
2015 2023 heappush = heapq.heappush
2016 2024 for currentrev in revs:
2017 2025 # Heap works with smallest element, we want highest so we invert
2018 2026 if currentrev not in pendingset:
2019 2027 heappush(pendingheap, -currentrev)
2020 2028 pendingset.add(currentrev)
2021 2029 # iterates on pending rev until after the current rev have been
2022 2030 # processed.
2023 2031 rev = None
2024 2032 while rev != currentrev:
2025 2033 rev = -heappop(pendingheap)
2026 2034 pendingset.remove(rev)
2027 2035
2028 2036 # Seek for a subgroup blocked, waiting for the current revision.
2029 2037 matching = [i for i, g in enumerate(groups) if rev in g[1]]
2030 2038
2031 2039 if matching:
2032 2040 # The main idea is to gather together all sets that are blocked
2033 2041 # on the same revision.
2034 2042 #
2035 2043 # Groups are merged when a common blocking ancestor is
2036 2044 # observed. For example, given two groups:
2037 2045 #
2038 2046 # revs [5, 4] waiting for 1
2039 2047 # revs [3, 2] waiting for 1
2040 2048 #
2041 2049 # These two groups will be merged when we process
2042 2050 # 1. In theory, we could have merged the groups when
2043 2051 # we added 2 to the group it is now in (we could have
2044 2052 # noticed the groups were both blocked on 1 then), but
2045 2053 # the way it works now makes the algorithm simpler.
2046 2054 #
2047 2055 # We also always keep the oldest subgroup first. We can
2048 2056 # probably improve the behavior by having the longest set
2049 2057 # first. That way, graph algorithms could minimise the length
2050 2058 # of parallel lines their drawing. This is currently not done.
2051 2059 targetidx = matching.pop(0)
2052 2060 trevs, tparents = groups[targetidx]
2053 2061 for i in matching:
2054 2062 gr = groups[i]
2055 2063 trevs.extend(gr[0])
2056 2064 tparents |= gr[1]
2057 2065 # delete all merged subgroups (except the one we kept)
2058 2066 # (starting from the last subgroup for performance and
2059 2067 # sanity reasons)
2060 2068 for i in reversed(matching):
2061 2069 del groups[i]
2062 2070 else:
2063 2071 # This is a new head. We create a new subgroup for it.
2064 2072 targetidx = len(groups)
2065 2073 groups.append(([], set([rev])))
2066 2074
2067 2075 gr = groups[targetidx]
2068 2076
2069 2077 # We now add the current nodes to this subgroups. This is done
2070 2078 # after the subgroup merging because all elements from a subgroup
2071 2079 # that relied on this rev must precede it.
2072 2080 #
2073 2081 # we also update the <parents> set to include the parents of the
2074 2082 # new nodes.
2075 2083 if rev == currentrev: # only display stuff in rev
2076 2084 gr[0].append(rev)
2077 2085 gr[1].remove(rev)
2078 2086 parents = [p for p in parentsfunc(rev) if p > node.nullrev]
2079 2087 gr[1].update(parents)
2080 2088 for p in parents:
2081 2089 if p not in pendingset:
2082 2090 pendingset.add(p)
2083 2091 heappush(pendingheap, -p)
2084 2092
2085 2093 # Look for a subgroup to display
2086 2094 #
2087 2095 # When unblocked is empty (if clause), we were not waiting for any
2088 2096 # revisions during the first iteration (if no priority was given) or
2089 2097 # if we emitted a whole disconnected set of the graph (reached a
2090 2098 # root). In that case we arbitrarily take the oldest known
2091 2099 # subgroup. The heuristic could probably be better.
2092 2100 #
2093 2101 # Otherwise (elif clause) if the subgroup is blocked on
2094 2102 # a revision we just emitted, we can safely emit it as
2095 2103 # well.
2096 2104 if not unblocked:
2097 2105 if len(groups) > 1: # display other subset
2098 2106 targetidx = 1
2099 2107 gr = groups[1]
2100 2108 elif not gr[1] & unblocked:
2101 2109 gr = None
2102 2110
2103 2111 if gr is not None:
2104 2112 # update the set of awaited revisions with the one from the
2105 2113 # subgroup
2106 2114 unblocked |= gr[1]
2107 2115 # output all revisions in the subgroup
2108 2116 for r in gr[0]:
2109 2117 yield r
2110 2118 # delete the subgroup that you just output
2111 2119 # unless it is groups[0] in which case you just empty it.
2112 2120 if targetidx:
2113 2121 del groups[targetidx]
2114 2122 else:
2115 2123 gr[0][:] = []
2116 2124 # Check if we have some subgroup waiting for revisions we are not going to
2117 2125 # iterate over
2118 2126 for g in groups:
2119 2127 for r in g[0]:
2120 2128 yield r
2121 2129
2122 2130 @predicate('subrepo([pattern])')
2123 2131 def subrepo(repo, subset, x):
2124 2132 """Changesets that add, modify or remove the given subrepo. If no subrepo
2125 2133 pattern is named, any subrepo changes are returned.
2126 2134 """
2127 2135 # i18n: "subrepo" is a keyword
2128 2136 args = getargs(x, 0, 1, _('subrepo takes at most one argument'))
2129 2137 pat = None
2130 2138 if len(args) != 0:
2131 2139 pat = getstring(args[0], _("subrepo requires a pattern"))
2132 2140
2133 2141 m = matchmod.exact(repo.root, repo.root, ['.hgsubstate'])
2134 2142
2135 2143 def submatches(names):
2136 2144 k, p, m = util.stringmatcher(pat)
2137 2145 for name in names:
2138 2146 if m(name):
2139 2147 yield name
2140 2148
2141 2149 def matches(x):
2142 2150 c = repo[x]
2143 2151 s = repo.status(c.p1().node(), c.node(), match=m)
2144 2152
2145 2153 if pat is None:
2146 2154 return s.added or s.modified or s.removed
2147 2155
2148 2156 if s.added:
2149 2157 return any(submatches(c.substate.keys()))
2150 2158
2151 2159 if s.modified:
2152 2160 subs = set(c.p1().substate.keys())
2153 2161 subs.update(c.substate.keys())
2154 2162
2155 2163 for path in submatches(subs):
2156 2164 if c.p1().substate.get(path) != c.substate.get(path):
2157 2165 return True
2158 2166
2159 2167 if s.removed:
2160 2168 return any(submatches(c.p1().substate.keys()))
2161 2169
2162 2170 return False
2163 2171
2164 2172 return subset.filter(matches, condrepr=('<subrepo %r>', pat))
2165 2173
2166 2174 def _substringmatcher(pattern):
2167 2175 kind, pattern, matcher = util.stringmatcher(pattern)
2168 2176 if kind == 'literal':
2169 2177 matcher = lambda s: pattern in s
2170 2178 return kind, pattern, matcher
2171 2179
2172 2180 @predicate('tag([name])', safe=True)
2173 2181 def tag(repo, subset, x):
2174 2182 """The specified tag by name, or all tagged revisions if no name is given.
2175 2183
2176 2184 If `name` starts with `re:`, the remainder of the name is treated as
2177 2185 a regular expression. To match a tag that actually starts with `re:`,
2178 2186 use the prefix `literal:`.
2179 2187 """
2180 2188 # i18n: "tag" is a keyword
2181 2189 args = getargs(x, 0, 1, _("tag takes one or no arguments"))
2182 2190 cl = repo.changelog
2183 2191 if args:
2184 2192 pattern = getstring(args[0],
2185 2193 # i18n: "tag" is a keyword
2186 2194 _('the argument to tag must be a string'))
2187 2195 kind, pattern, matcher = util.stringmatcher(pattern)
2188 2196 if kind == 'literal':
2189 2197 # avoid resolving all tags
2190 2198 tn = repo._tagscache.tags.get(pattern, None)
2191 2199 if tn is None:
2192 2200 raise error.RepoLookupError(_("tag '%s' does not exist")
2193 2201 % pattern)
2194 2202 s = set([repo[tn].rev()])
2195 2203 else:
2196 2204 s = set([cl.rev(n) for t, n in repo.tagslist() if matcher(t)])
2197 2205 else:
2198 2206 s = set([cl.rev(n) for t, n in repo.tagslist() if t != 'tip'])
2199 2207 return subset & s
2200 2208
2201 2209 @predicate('tagged', safe=True)
2202 2210 def tagged(repo, subset, x):
2203 2211 return tag(repo, subset, x)
2204 2212
2205 2213 @predicate('unstable()', safe=True)
2206 2214 def unstable(repo, subset, x):
2207 2215 """Non-obsolete changesets with obsolete ancestors.
2208 2216 """
2209 2217 # i18n: "unstable" is a keyword
2210 2218 getargs(x, 0, 0, _("unstable takes no arguments"))
2211 2219 unstables = obsmod.getrevs(repo, 'unstable')
2212 2220 return subset & unstables
2213 2221
2214 2222
2215 2223 @predicate('user(string)', safe=True)
2216 2224 def user(repo, subset, x):
2217 2225 """User name contains string. The match is case-insensitive.
2218 2226
2219 2227 If `string` starts with `re:`, the remainder of the string is treated as
2220 2228 a regular expression. To match a user that actually contains `re:`, use
2221 2229 the prefix `literal:`.
2222 2230 """
2223 2231 return author(repo, subset, x)
2224 2232
2225 2233 # experimental
2226 2234 @predicate('wdir', safe=True)
2227 2235 def wdir(repo, subset, x):
2228 2236 # i18n: "wdir" is a keyword
2229 2237 getargs(x, 0, 0, _("wdir takes no arguments"))
2230 2238 if node.wdirrev in subset or isinstance(subset, fullreposet):
2231 2239 return baseset([node.wdirrev])
2232 2240 return baseset()
2233 2241
2234 2242 # for internal use
2235 2243 @predicate('_list', safe=True)
2236 2244 def _list(repo, subset, x):
2237 2245 s = getstring(x, "internal error")
2238 2246 if not s:
2239 2247 return baseset()
2240 2248 # remove duplicates here. it's difficult for caller to deduplicate sets
2241 2249 # because different symbols can point to the same rev.
2242 2250 cl = repo.changelog
2243 2251 ls = []
2244 2252 seen = set()
2245 2253 for t in s.split('\0'):
2246 2254 try:
2247 2255 # fast path for integer revision
2248 2256 r = int(t)
2249 2257 if str(r) != t or r not in cl:
2250 2258 raise ValueError
2251 2259 revs = [r]
2252 2260 except ValueError:
2253 2261 revs = stringset(repo, subset, t)
2254 2262
2255 2263 for r in revs:
2256 2264 if r in seen:
2257 2265 continue
2258 2266 if (r in subset
2259 2267 or r == node.nullrev and isinstance(subset, fullreposet)):
2260 2268 ls.append(r)
2261 2269 seen.add(r)
2262 2270 return baseset(ls)
2263 2271
2264 2272 # for internal use
2265 2273 @predicate('_intlist', safe=True)
2266 2274 def _intlist(repo, subset, x):
2267 2275 s = getstring(x, "internal error")
2268 2276 if not s:
2269 2277 return baseset()
2270 2278 ls = [int(r) for r in s.split('\0')]
2271 2279 s = subset
2272 2280 return baseset([r for r in ls if r in s])
2273 2281
2274 2282 # for internal use
2275 2283 @predicate('_hexlist', safe=True)
2276 2284 def _hexlist(repo, subset, x):
2277 2285 s = getstring(x, "internal error")
2278 2286 if not s:
2279 2287 return baseset()
2280 2288 cl = repo.changelog
2281 2289 ls = [cl.rev(node.bin(r)) for r in s.split('\0')]
2282 2290 s = subset
2283 2291 return baseset([r for r in ls if r in s])
2284 2292
2285 2293 methods = {
2286 2294 "range": rangeset,
2287 2295 "dagrange": dagrange,
2288 2296 "string": stringset,
2289 2297 "symbol": stringset,
2290 2298 "and": andset,
2291 2299 "or": orset,
2292 2300 "not": notset,
2293 2301 "difference": differenceset,
2294 2302 "list": listset,
2295 2303 "keyvalue": keyvaluepair,
2296 2304 "func": func,
2297 2305 "ancestor": ancestorspec,
2298 2306 "parent": parentspec,
2299 2307 "parentpost": p1,
2300 2308 }
2301 2309
2302 2310 def _matchonly(revs, bases):
2303 2311 """
2304 2312 >>> f = lambda *args: _matchonly(*map(parse, args))
2305 2313 >>> f('ancestors(A)', 'not ancestors(B)')
2306 2314 ('list', ('symbol', 'A'), ('symbol', 'B'))
2307 2315 """
2308 2316 if (revs is not None
2309 2317 and revs[0] == 'func'
2310 2318 and getsymbol(revs[1]) == 'ancestors'
2311 2319 and bases is not None
2312 2320 and bases[0] == 'not'
2313 2321 and bases[1][0] == 'func'
2314 2322 and getsymbol(bases[1][1]) == 'ancestors'):
2315 2323 return ('list', revs[2], bases[1][2])
2316 2324
2317 2325 def _fixops(x):
2318 2326 """Rewrite raw parsed tree to resolve ambiguous syntax which cannot be
2319 2327 handled well by our simple top-down parser"""
2320 2328 if not isinstance(x, tuple):
2321 2329 return x
2322 2330
2323 2331 op = x[0]
2324 2332 if op == 'parent':
2325 2333 # x^:y means (x^) : y, not x ^ (:y)
2326 2334 # x^: means (x^) :, not x ^ (:)
2327 2335 post = ('parentpost', x[1])
2328 2336 if x[2][0] == 'dagrangepre':
2329 2337 return _fixops(('dagrange', post, x[2][1]))
2330 2338 elif x[2][0] == 'rangepre':
2331 2339 return _fixops(('range', post, x[2][1]))
2332 2340 elif x[2][0] == 'rangeall':
2333 2341 return _fixops(('rangepost', post))
2334 2342
2335 2343 return (op,) + tuple(_fixops(y) for y in x[1:])
2336 2344
2337 2345 def _optimize(x, small):
2338 2346 if x is None:
2339 2347 return 0, x
2340 2348
2341 2349 smallbonus = 1
2342 2350 if small:
2343 2351 smallbonus = .5
2344 2352
2345 2353 op = x[0]
2346 2354 if op == 'minus':
2347 2355 return _optimize(('and', x[1], ('not', x[2])), small)
2348 2356 elif op == 'only':
2349 2357 t = ('func', ('symbol', 'only'), ('list', x[1], x[2]))
2350 2358 return _optimize(t, small)
2351 2359 elif op == 'onlypost':
2352 2360 return _optimize(('func', ('symbol', 'only'), x[1]), small)
2353 2361 elif op == 'dagrangepre':
2354 2362 return _optimize(('func', ('symbol', 'ancestors'), x[1]), small)
2355 2363 elif op == 'dagrangepost':
2356 2364 return _optimize(('func', ('symbol', 'descendants'), x[1]), small)
2357 2365 elif op == 'rangeall':
2358 2366 return _optimize(('range', ('string', '0'), ('string', 'tip')), small)
2359 2367 elif op == 'rangepre':
2360 2368 return _optimize(('range', ('string', '0'), x[1]), small)
2361 2369 elif op == 'rangepost':
2362 2370 return _optimize(('range', x[1], ('string', 'tip')), small)
2363 2371 elif op == 'negate':
2364 2372 s = getstring(x[1], _("can't negate that"))
2365 2373 return _optimize(('string', '-' + s), small)
2366 2374 elif op in 'string symbol negate':
2367 2375 return smallbonus, x # single revisions are small
2368 2376 elif op == 'and':
2369 2377 wa, ta = _optimize(x[1], True)
2370 2378 wb, tb = _optimize(x[2], True)
2371 2379 w = min(wa, wb)
2372 2380
2373 2381 # (::x and not ::y)/(not ::y and ::x) have a fast path
2374 2382 tm = _matchonly(ta, tb) or _matchonly(tb, ta)
2375 2383 if tm:
2376 2384 return w, ('func', ('symbol', 'only'), tm)
2377 2385
2378 2386 if tb is not None and tb[0] == 'not':
2379 2387 return wa, ('difference', ta, tb[1])
2380 2388
2381 2389 if wa > wb:
2382 2390 return w, (op, tb, ta)
2383 2391 return w, (op, ta, tb)
2384 2392 elif op == 'or':
2385 2393 # fast path for machine-generated expression, that is likely to have
2386 2394 # lots of trivial revisions: 'a + b + c()' to '_list(a b) + c()'
2387 2395 ws, ts, ss = [], [], []
2388 2396 def flushss():
2389 2397 if not ss:
2390 2398 return
2391 2399 if len(ss) == 1:
2392 2400 w, t = ss[0]
2393 2401 else:
2394 2402 s = '\0'.join(t[1] for w, t in ss)
2395 2403 y = ('func', ('symbol', '_list'), ('string', s))
2396 2404 w, t = _optimize(y, False)
2397 2405 ws.append(w)
2398 2406 ts.append(t)
2399 2407 del ss[:]
2400 2408 for y in x[1:]:
2401 2409 w, t = _optimize(y, False)
2402 2410 if t is not None and (t[0] == 'string' or t[0] == 'symbol'):
2403 2411 ss.append((w, t))
2404 2412 continue
2405 2413 flushss()
2406 2414 ws.append(w)
2407 2415 ts.append(t)
2408 2416 flushss()
2409 2417 if len(ts) == 1:
2410 2418 return ws[0], ts[0] # 'or' operation is fully optimized out
2411 2419 # we can't reorder trees by weight because it would change the order.
2412 2420 # ("sort(a + b)" == "sort(b + a)", but "a + b" != "b + a")
2413 2421 # ts = tuple(t for w, t in sorted(zip(ws, ts), key=lambda wt: wt[0]))
2414 2422 return max(ws), (op,) + tuple(ts)
2415 2423 elif op == 'not':
2416 2424 # Optimize not public() to _notpublic() because we have a fast version
2417 2425 if x[1] == ('func', ('symbol', 'public'), None):
2418 2426 newsym = ('func', ('symbol', '_notpublic'), None)
2419 2427 o = _optimize(newsym, not small)
2420 2428 return o[0], o[1]
2421 2429 else:
2422 2430 o = _optimize(x[1], not small)
2423 2431 return o[0], (op, o[1])
2424 2432 elif op == 'parentpost':
2425 2433 o = _optimize(x[1], small)
2426 2434 return o[0], (op, o[1])
2427 2435 elif op == 'group':
2428 2436 return _optimize(x[1], small)
2429 2437 elif op in 'dagrange range parent ancestorspec':
2430 2438 wa, ta = _optimize(x[1], small)
2431 2439 wb, tb = _optimize(x[2], small)
2432 2440 return wa + wb, (op, ta, tb)
2433 2441 elif op == 'list':
2434 2442 ws, ts = zip(*(_optimize(y, small) for y in x[1:]))
2435 2443 return sum(ws), (op,) + ts
2436 2444 elif op == 'keyvalue':
2437 2445 w, t = _optimize(x[2], small)
2438 2446 return w, (op, x[1], t)
2439 2447 elif op == 'func':
2440 2448 f = getsymbol(x[1])
2441 2449 wa, ta = _optimize(x[2], small)
2442 2450 if f in ("author branch closed date desc file grep keyword "
2443 2451 "outgoing user"):
2444 2452 w = 10 # slow
2445 2453 elif f in "modifies adds removes":
2446 2454 w = 30 # slower
2447 2455 elif f == "contains":
2448 2456 w = 100 # very slow
2449 2457 elif f == "ancestor":
2450 2458 w = 1 * smallbonus
2451 2459 elif f in "reverse limit first _intlist":
2452 2460 w = 0
2453 2461 elif f in "sort":
2454 2462 w = 10 # assume most sorts look at changelog
2455 2463 else:
2456 2464 w = 1
2457 2465 return w + wa, (op, x[1], ta)
2458 2466 return 1, x
2459 2467
2460 2468 def optimize(tree):
2461 2469 _weight, newtree = _optimize(tree, small=True)
2462 2470 return newtree
2463 2471
2464 2472 # the set of valid characters for the initial letter of symbols in
2465 2473 # alias declarations and definitions
2466 2474 _aliassyminitletters = set(c for c in [chr(i) for i in xrange(256)]
2467 2475 if c.isalnum() or c in '._@$' or ord(c) > 127)
2468 2476
2469 2477 def _parsewith(spec, lookup=None, syminitletters=None):
2470 2478 """Generate a parse tree of given spec with given tokenizing options
2471 2479
2472 2480 >>> _parsewith('foo($1)', syminitletters=_aliassyminitletters)
2473 2481 ('func', ('symbol', 'foo'), ('symbol', '$1'))
2474 2482 >>> _parsewith('$1')
2475 2483 Traceback (most recent call last):
2476 2484 ...
2477 2485 ParseError: ("syntax error in revset '$1'", 0)
2478 2486 >>> _parsewith('foo bar')
2479 2487 Traceback (most recent call last):
2480 2488 ...
2481 2489 ParseError: ('invalid token', 4)
2482 2490 """
2483 2491 p = parser.parser(elements)
2484 2492 tree, pos = p.parse(tokenize(spec, lookup=lookup,
2485 2493 syminitletters=syminitletters))
2486 2494 if pos != len(spec):
2487 2495 raise error.ParseError(_('invalid token'), pos)
2488 2496 return _fixops(parser.simplifyinfixops(tree, ('list', 'or')))
2489 2497
2490 2498 class _aliasrules(parser.basealiasrules):
2491 2499 """Parsing and expansion rule set of revset aliases"""
2492 2500 _section = _('revset alias')
2493 2501
2494 2502 @staticmethod
2495 2503 def _parse(spec):
2496 2504 """Parse alias declaration/definition ``spec``
2497 2505
2498 2506 This allows symbol names to use also ``$`` as an initial letter
2499 2507 (for backward compatibility), and callers of this function should
2500 2508 examine whether ``$`` is used also for unexpected symbols or not.
2501 2509 """
2502 2510 return _parsewith(spec, syminitletters=_aliassyminitletters)
2503 2511
2504 2512 @staticmethod
2505 2513 def _trygetfunc(tree):
2506 2514 if tree[0] == 'func' and tree[1][0] == 'symbol':
2507 2515 return tree[1][1], getlist(tree[2])
2508 2516
2509 2517 def expandaliases(ui, tree, showwarning=None):
2510 2518 aliases = _aliasrules.buildmap(ui.configitems('revsetalias'))
2511 2519 tree = _aliasrules.expand(aliases, tree)
2512 2520 if showwarning:
2513 2521 # warn about problematic (but not referred) aliases
2514 2522 for name, alias in sorted(aliases.iteritems()):
2515 2523 if alias.error and not alias.warned:
2516 2524 showwarning(_('warning: %s\n') % (alias.error))
2517 2525 alias.warned = True
2518 2526 return tree
2519 2527
2520 2528 def foldconcat(tree):
2521 2529 """Fold elements to be concatenated by `##`
2522 2530 """
2523 2531 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2524 2532 return tree
2525 2533 if tree[0] == '_concat':
2526 2534 pending = [tree]
2527 2535 l = []
2528 2536 while pending:
2529 2537 e = pending.pop()
2530 2538 if e[0] == '_concat':
2531 2539 pending.extend(reversed(e[1:]))
2532 2540 elif e[0] in ('string', 'symbol'):
2533 2541 l.append(e[1])
2534 2542 else:
2535 2543 msg = _("\"##\" can't concatenate \"%s\" element") % (e[0])
2536 2544 raise error.ParseError(msg)
2537 2545 return ('string', ''.join(l))
2538 2546 else:
2539 2547 return tuple(foldconcat(t) for t in tree)
2540 2548
2541 2549 def parse(spec, lookup=None):
2542 2550 return _parsewith(spec, lookup=lookup)
2543 2551
2544 2552 def posttreebuilthook(tree, repo):
2545 2553 # hook for extensions to execute code on the optimized tree
2546 2554 pass
2547 2555
2548 2556 def match(ui, spec, repo=None):
2549 2557 """Create a matcher for a single revision spec."""
2550 2558 return matchany(ui, [spec], repo=repo)
2551 2559
2552 2560 def matchany(ui, specs, repo=None):
2553 2561 """Create a matcher that will include any revisions matching one of the
2554 2562 given specs"""
2555 2563 if not specs:
2556 2564 def mfunc(repo, subset=None):
2557 2565 return baseset()
2558 2566 return mfunc
2559 2567 if not all(specs):
2560 2568 raise error.ParseError(_("empty query"))
2561 2569 lookup = None
2562 2570 if repo:
2563 2571 lookup = repo.__contains__
2564 2572 if len(specs) == 1:
2565 2573 tree = parse(specs[0], lookup)
2566 2574 else:
2567 2575 tree = ('or',) + tuple(parse(s, lookup) for s in specs)
2568 2576 return _makematcher(ui, tree, repo)
2569 2577
2570 2578 def _makematcher(ui, tree, repo):
2571 2579 if ui:
2572 2580 tree = expandaliases(ui, tree, showwarning=ui.warn)
2573 2581 tree = foldconcat(tree)
2574 2582 tree = optimize(tree)
2575 2583 posttreebuilthook(tree, repo)
2576 2584 def mfunc(repo, subset=None):
2577 2585 if subset is None:
2578 2586 subset = fullreposet(repo)
2579 2587 if util.safehasattr(subset, 'isascending'):
2580 2588 result = getset(repo, subset, tree)
2581 2589 else:
2582 2590 result = getset(repo, baseset(subset), tree)
2583 2591 return result
2584 2592 return mfunc
2585 2593
2586 2594 def formatspec(expr, *args):
2587 2595 '''
2588 2596 This is a convenience function for using revsets internally, and
2589 2597 escapes arguments appropriately. Aliases are intentionally ignored
2590 2598 so that intended expression behavior isn't accidentally subverted.
2591 2599
2592 2600 Supported arguments:
2593 2601
2594 2602 %r = revset expression, parenthesized
2595 2603 %d = int(arg), no quoting
2596 2604 %s = string(arg), escaped and single-quoted
2597 2605 %b = arg.branch(), escaped and single-quoted
2598 2606 %n = hex(arg), single-quoted
2599 2607 %% = a literal '%'
2600 2608
2601 2609 Prefixing the type with 'l' specifies a parenthesized list of that type.
2602 2610
2603 2611 >>> formatspec('%r:: and %lr', '10 or 11', ("this()", "that()"))
2604 2612 '(10 or 11):: and ((this()) or (that()))'
2605 2613 >>> formatspec('%d:: and not %d::', 10, 20)
2606 2614 '10:: and not 20::'
2607 2615 >>> formatspec('%ld or %ld', [], [1])
2608 2616 "_list('') or 1"
2609 2617 >>> formatspec('keyword(%s)', 'foo\\xe9')
2610 2618 "keyword('foo\\\\xe9')"
2611 2619 >>> b = lambda: 'default'
2612 2620 >>> b.branch = b
2613 2621 >>> formatspec('branch(%b)', b)
2614 2622 "branch('default')"
2615 2623 >>> formatspec('root(%ls)', ['a', 'b', 'c', 'd'])
2616 2624 "root(_list('a\\x00b\\x00c\\x00d'))"
2617 2625 '''
2618 2626
2619 2627 def quote(s):
2620 2628 return repr(str(s))
2621 2629
2622 2630 def argtype(c, arg):
2623 2631 if c == 'd':
2624 2632 return str(int(arg))
2625 2633 elif c == 's':
2626 2634 return quote(arg)
2627 2635 elif c == 'r':
2628 2636 parse(arg) # make sure syntax errors are confined
2629 2637 return '(%s)' % arg
2630 2638 elif c == 'n':
2631 2639 return quote(node.hex(arg))
2632 2640 elif c == 'b':
2633 2641 return quote(arg.branch())
2634 2642
2635 2643 def listexp(s, t):
2636 2644 l = len(s)
2637 2645 if l == 0:
2638 2646 return "_list('')"
2639 2647 elif l == 1:
2640 2648 return argtype(t, s[0])
2641 2649 elif t == 'd':
2642 2650 return "_intlist('%s')" % "\0".join(str(int(a)) for a in s)
2643 2651 elif t == 's':
2644 2652 return "_list('%s')" % "\0".join(s)
2645 2653 elif t == 'n':
2646 2654 return "_hexlist('%s')" % "\0".join(node.hex(a) for a in s)
2647 2655 elif t == 'b':
2648 2656 return "_list('%s')" % "\0".join(a.branch() for a in s)
2649 2657
2650 2658 m = l // 2
2651 2659 return '(%s or %s)' % (listexp(s[:m], t), listexp(s[m:], t))
2652 2660
2653 2661 ret = ''
2654 2662 pos = 0
2655 2663 arg = 0
2656 2664 while pos < len(expr):
2657 2665 c = expr[pos]
2658 2666 if c == '%':
2659 2667 pos += 1
2660 2668 d = expr[pos]
2661 2669 if d == '%':
2662 2670 ret += d
2663 2671 elif d in 'dsnbr':
2664 2672 ret += argtype(d, args[arg])
2665 2673 arg += 1
2666 2674 elif d == 'l':
2667 2675 # a list of some type
2668 2676 pos += 1
2669 2677 d = expr[pos]
2670 2678 ret += listexp(list(args[arg]), d)
2671 2679 arg += 1
2672 2680 else:
2673 2681 raise error.Abort(_('unexpected revspec format character %s')
2674 2682 % d)
2675 2683 else:
2676 2684 ret += c
2677 2685 pos += 1
2678 2686
2679 2687 return ret
2680 2688
2681 2689 def prettyformat(tree):
2682 2690 return parser.prettyformat(tree, ('string', 'symbol'))
2683 2691
2684 2692 def depth(tree):
2685 2693 if isinstance(tree, tuple):
2686 2694 return max(map(depth, tree)) + 1
2687 2695 else:
2688 2696 return 0
2689 2697
2690 2698 def funcsused(tree):
2691 2699 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2692 2700 return set()
2693 2701 else:
2694 2702 funcs = set()
2695 2703 for s in tree[1:]:
2696 2704 funcs |= funcsused(s)
2697 2705 if tree[0] == 'func':
2698 2706 funcs.add(tree[1][1])
2699 2707 return funcs
2700 2708
2701 2709 def _formatsetrepr(r):
2702 2710 """Format an optional printable representation of a set
2703 2711
2704 2712 ======== =================================
2705 2713 type(r) example
2706 2714 ======== =================================
2707 2715 tuple ('<not %r>', other)
2708 2716 str '<branch closed>'
2709 2717 callable lambda: '<branch %r>' % sorted(b)
2710 2718 object other
2711 2719 ======== =================================
2712 2720 """
2713 2721 if r is None:
2714 2722 return ''
2715 2723 elif isinstance(r, tuple):
2716 2724 return r[0] % r[1:]
2717 2725 elif isinstance(r, str):
2718 2726 return r
2719 2727 elif callable(r):
2720 2728 return r()
2721 2729 else:
2722 2730 return repr(r)
2723 2731
2724 2732 class abstractsmartset(object):
2725 2733
2726 2734 def __nonzero__(self):
2727 2735 """True if the smartset is not empty"""
2728 2736 raise NotImplementedError()
2729 2737
2730 2738 def __contains__(self, rev):
2731 2739 """provide fast membership testing"""
2732 2740 raise NotImplementedError()
2733 2741
2734 2742 def __iter__(self):
2735 2743 """iterate the set in the order it is supposed to be iterated"""
2736 2744 raise NotImplementedError()
2737 2745
2738 2746 # Attributes containing a function to perform a fast iteration in a given
2739 2747 # direction. A smartset can have none, one, or both defined.
2740 2748 #
2741 2749 # Default value is None instead of a function returning None to avoid
2742 2750 # initializing an iterator just for testing if a fast method exists.
2743 2751 fastasc = None
2744 2752 fastdesc = None
2745 2753
2746 2754 def isascending(self):
2747 2755 """True if the set will iterate in ascending order"""
2748 2756 raise NotImplementedError()
2749 2757
2750 2758 def isdescending(self):
2751 2759 """True if the set will iterate in descending order"""
2752 2760 raise NotImplementedError()
2753 2761
2754 2762 def istopo(self):
2755 2763 """True if the set will iterate in topographical order"""
2756 2764 raise NotImplementedError()
2757 2765
2758 2766 @util.cachefunc
2759 2767 def min(self):
2760 2768 """return the minimum element in the set"""
2761 2769 if self.fastasc is not None:
2762 2770 for r in self.fastasc():
2763 2771 return r
2764 2772 raise ValueError('arg is an empty sequence')
2765 2773 return min(self)
2766 2774
2767 2775 @util.cachefunc
2768 2776 def max(self):
2769 2777 """return the maximum element in the set"""
2770 2778 if self.fastdesc is not None:
2771 2779 for r in self.fastdesc():
2772 2780 return r
2773 2781 raise ValueError('arg is an empty sequence')
2774 2782 return max(self)
2775 2783
2776 2784 def first(self):
2777 2785 """return the first element in the set (user iteration perspective)
2778 2786
2779 2787 Return None if the set is empty"""
2780 2788 raise NotImplementedError()
2781 2789
2782 2790 def last(self):
2783 2791 """return the last element in the set (user iteration perspective)
2784 2792
2785 2793 Return None if the set is empty"""
2786 2794 raise NotImplementedError()
2787 2795
2788 2796 def __len__(self):
2789 2797 """return the length of the smartsets
2790 2798
2791 2799 This can be expensive on smartset that could be lazy otherwise."""
2792 2800 raise NotImplementedError()
2793 2801
2794 2802 def reverse(self):
2795 2803 """reverse the expected iteration order"""
2796 2804 raise NotImplementedError()
2797 2805
2798 2806 def sort(self, reverse=True):
2799 2807 """get the set to iterate in an ascending or descending order"""
2800 2808 raise NotImplementedError()
2801 2809
2802 2810 def __and__(self, other):
2803 2811 """Returns a new object with the intersection of the two collections.
2804 2812
2805 2813 This is part of the mandatory API for smartset."""
2806 2814 if isinstance(other, fullreposet):
2807 2815 return self
2808 2816 return self.filter(other.__contains__, condrepr=other, cache=False)
2809 2817
2810 2818 def __add__(self, other):
2811 2819 """Returns a new object with the union of the two collections.
2812 2820
2813 2821 This is part of the mandatory API for smartset."""
2814 2822 return addset(self, other)
2815 2823
2816 2824 def __sub__(self, other):
2817 2825 """Returns a new object with the substraction of the two collections.
2818 2826
2819 2827 This is part of the mandatory API for smartset."""
2820 2828 c = other.__contains__
2821 2829 return self.filter(lambda r: not c(r), condrepr=('<not %r>', other),
2822 2830 cache=False)
2823 2831
2824 2832 def filter(self, condition, condrepr=None, cache=True):
2825 2833 """Returns this smartset filtered by condition as a new smartset.
2826 2834
2827 2835 `condition` is a callable which takes a revision number and returns a
2828 2836 boolean. Optional `condrepr` provides a printable representation of
2829 2837 the given `condition`.
2830 2838
2831 2839 This is part of the mandatory API for smartset."""
2832 2840 # builtin cannot be cached. but do not needs to
2833 2841 if cache and util.safehasattr(condition, 'func_code'):
2834 2842 condition = util.cachefunc(condition)
2835 2843 return filteredset(self, condition, condrepr)
2836 2844
2837 2845 class baseset(abstractsmartset):
2838 2846 """Basic data structure that represents a revset and contains the basic
2839 2847 operation that it should be able to perform.
2840 2848
2841 2849 Every method in this class should be implemented by any smartset class.
2842 2850 """
2843 2851 def __init__(self, data=(), datarepr=None, istopo=False):
2844 2852 """
2845 2853 datarepr: a tuple of (format, obj, ...), a function or an object that
2846 2854 provides a printable representation of the given data.
2847 2855 """
2848 2856 self._ascending = None
2849 2857 self._istopo = istopo
2850 2858 if not isinstance(data, list):
2851 2859 if isinstance(data, set):
2852 2860 self._set = data
2853 2861 # set has no order we pick one for stability purpose
2854 2862 self._ascending = True
2855 2863 data = list(data)
2856 2864 self._list = data
2857 2865 self._datarepr = datarepr
2858 2866
2859 2867 @util.propertycache
2860 2868 def _set(self):
2861 2869 return set(self._list)
2862 2870
2863 2871 @util.propertycache
2864 2872 def _asclist(self):
2865 2873 asclist = self._list[:]
2866 2874 asclist.sort()
2867 2875 return asclist
2868 2876
2869 2877 def __iter__(self):
2870 2878 if self._ascending is None:
2871 2879 return iter(self._list)
2872 2880 elif self._ascending:
2873 2881 return iter(self._asclist)
2874 2882 else:
2875 2883 return reversed(self._asclist)
2876 2884
2877 2885 def fastasc(self):
2878 2886 return iter(self._asclist)
2879 2887
2880 2888 def fastdesc(self):
2881 2889 return reversed(self._asclist)
2882 2890
2883 2891 @util.propertycache
2884 2892 def __contains__(self):
2885 2893 return self._set.__contains__
2886 2894
2887 2895 def __nonzero__(self):
2888 2896 return bool(self._list)
2889 2897
2890 2898 def sort(self, reverse=False):
2891 2899 self._ascending = not bool(reverse)
2892 2900 self._istopo = False
2893 2901
2894 2902 def reverse(self):
2895 2903 if self._ascending is None:
2896 2904 self._list.reverse()
2897 2905 else:
2898 2906 self._ascending = not self._ascending
2899 2907 self._istopo = False
2900 2908
2901 2909 def __len__(self):
2902 2910 return len(self._list)
2903 2911
2904 2912 def isascending(self):
2905 2913 """Returns True if the collection is ascending order, False if not.
2906 2914
2907 2915 This is part of the mandatory API for smartset."""
2908 2916 if len(self) <= 1:
2909 2917 return True
2910 2918 return self._ascending is not None and self._ascending
2911 2919
2912 2920 def isdescending(self):
2913 2921 """Returns True if the collection is descending order, False if not.
2914 2922
2915 2923 This is part of the mandatory API for smartset."""
2916 2924 if len(self) <= 1:
2917 2925 return True
2918 2926 return self._ascending is not None and not self._ascending
2919 2927
2920 2928 def istopo(self):
2921 2929 """Is the collection is in topographical order or not.
2922 2930
2923 2931 This is part of the mandatory API for smartset."""
2924 2932 if len(self) <= 1:
2925 2933 return True
2926 2934 return self._istopo
2927 2935
2928 2936 def first(self):
2929 2937 if self:
2930 2938 if self._ascending is None:
2931 2939 return self._list[0]
2932 2940 elif self._ascending:
2933 2941 return self._asclist[0]
2934 2942 else:
2935 2943 return self._asclist[-1]
2936 2944 return None
2937 2945
2938 2946 def last(self):
2939 2947 if self:
2940 2948 if self._ascending is None:
2941 2949 return self._list[-1]
2942 2950 elif self._ascending:
2943 2951 return self._asclist[-1]
2944 2952 else:
2945 2953 return self._asclist[0]
2946 2954 return None
2947 2955
2948 2956 def __repr__(self):
2949 2957 d = {None: '', False: '-', True: '+'}[self._ascending]
2950 2958 s = _formatsetrepr(self._datarepr)
2951 2959 if not s:
2952 2960 l = self._list
2953 2961 # if _list has been built from a set, it might have a different
2954 2962 # order from one python implementation to another.
2955 2963 # We fallback to the sorted version for a stable output.
2956 2964 if self._ascending is not None:
2957 2965 l = self._asclist
2958 2966 s = repr(l)
2959 2967 return '<%s%s %s>' % (type(self).__name__, d, s)
2960 2968
2961 2969 class filteredset(abstractsmartset):
2962 2970 """Duck type for baseset class which iterates lazily over the revisions in
2963 2971 the subset and contains a function which tests for membership in the
2964 2972 revset
2965 2973 """
2966 2974 def __init__(self, subset, condition=lambda x: True, condrepr=None):
2967 2975 """
2968 2976 condition: a function that decide whether a revision in the subset
2969 2977 belongs to the revset or not.
2970 2978 condrepr: a tuple of (format, obj, ...), a function or an object that
2971 2979 provides a printable representation of the given condition.
2972 2980 """
2973 2981 self._subset = subset
2974 2982 self._condition = condition
2975 2983 self._condrepr = condrepr
2976 2984
2977 2985 def __contains__(self, x):
2978 2986 return x in self._subset and self._condition(x)
2979 2987
2980 2988 def __iter__(self):
2981 2989 return self._iterfilter(self._subset)
2982 2990
2983 2991 def _iterfilter(self, it):
2984 2992 cond = self._condition
2985 2993 for x in it:
2986 2994 if cond(x):
2987 2995 yield x
2988 2996
2989 2997 @property
2990 2998 def fastasc(self):
2991 2999 it = self._subset.fastasc
2992 3000 if it is None:
2993 3001 return None
2994 3002 return lambda: self._iterfilter(it())
2995 3003
2996 3004 @property
2997 3005 def fastdesc(self):
2998 3006 it = self._subset.fastdesc
2999 3007 if it is None:
3000 3008 return None
3001 3009 return lambda: self._iterfilter(it())
3002 3010
3003 3011 def __nonzero__(self):
3004 3012 fast = None
3005 3013 candidates = [self.fastasc if self.isascending() else None,
3006 3014 self.fastdesc if self.isdescending() else None,
3007 3015 self.fastasc,
3008 3016 self.fastdesc]
3009 3017 for candidate in candidates:
3010 3018 if candidate is not None:
3011 3019 fast = candidate
3012 3020 break
3013 3021
3014 3022 if fast is not None:
3015 3023 it = fast()
3016 3024 else:
3017 3025 it = self
3018 3026
3019 3027 for r in it:
3020 3028 return True
3021 3029 return False
3022 3030
3023 3031 def __len__(self):
3024 3032 # Basic implementation to be changed in future patches.
3025 3033 # until this gets improved, we use generator expression
3026 3034 # here, since list compr is free to call __len__ again
3027 3035 # causing infinite recursion
3028 3036 l = baseset(r for r in self)
3029 3037 return len(l)
3030 3038
3031 3039 def sort(self, reverse=False):
3032 3040 self._subset.sort(reverse=reverse)
3033 3041
3034 3042 def reverse(self):
3035 3043 self._subset.reverse()
3036 3044
3037 3045 def isascending(self):
3038 3046 return self._subset.isascending()
3039 3047
3040 3048 def isdescending(self):
3041 3049 return self._subset.isdescending()
3042 3050
3043 3051 def istopo(self):
3044 3052 return self._subset.istopo()
3045 3053
3046 3054 def first(self):
3047 3055 for x in self:
3048 3056 return x
3049 3057 return None
3050 3058
3051 3059 def last(self):
3052 3060 it = None
3053 3061 if self.isascending():
3054 3062 it = self.fastdesc
3055 3063 elif self.isdescending():
3056 3064 it = self.fastasc
3057 3065 if it is not None:
3058 3066 for x in it():
3059 3067 return x
3060 3068 return None #empty case
3061 3069 else:
3062 3070 x = None
3063 3071 for x in self:
3064 3072 pass
3065 3073 return x
3066 3074
3067 3075 def __repr__(self):
3068 3076 xs = [repr(self._subset)]
3069 3077 s = _formatsetrepr(self._condrepr)
3070 3078 if s:
3071 3079 xs.append(s)
3072 3080 return '<%s %s>' % (type(self).__name__, ', '.join(xs))
3073 3081
3074 3082 def _iterordered(ascending, iter1, iter2):
3075 3083 """produce an ordered iteration from two iterators with the same order
3076 3084
3077 3085 The ascending is used to indicated the iteration direction.
3078 3086 """
3079 3087 choice = max
3080 3088 if ascending:
3081 3089 choice = min
3082 3090
3083 3091 val1 = None
3084 3092 val2 = None
3085 3093 try:
3086 3094 # Consume both iterators in an ordered way until one is empty
3087 3095 while True:
3088 3096 if val1 is None:
3089 3097 val1 = next(iter1)
3090 3098 if val2 is None:
3091 3099 val2 = next(iter2)
3092 3100 n = choice(val1, val2)
3093 3101 yield n
3094 3102 if val1 == n:
3095 3103 val1 = None
3096 3104 if val2 == n:
3097 3105 val2 = None
3098 3106 except StopIteration:
3099 3107 # Flush any remaining values and consume the other one
3100 3108 it = iter2
3101 3109 if val1 is not None:
3102 3110 yield val1
3103 3111 it = iter1
3104 3112 elif val2 is not None:
3105 3113 # might have been equality and both are empty
3106 3114 yield val2
3107 3115 for val in it:
3108 3116 yield val
3109 3117
3110 3118 class addset(abstractsmartset):
3111 3119 """Represent the addition of two sets
3112 3120
3113 3121 Wrapper structure for lazily adding two structures without losing much
3114 3122 performance on the __contains__ method
3115 3123
3116 3124 If the ascending attribute is set, that means the two structures are
3117 3125 ordered in either an ascending or descending way. Therefore, we can add
3118 3126 them maintaining the order by iterating over both at the same time
3119 3127
3120 3128 >>> xs = baseset([0, 3, 2])
3121 3129 >>> ys = baseset([5, 2, 4])
3122 3130
3123 3131 >>> rs = addset(xs, ys)
3124 3132 >>> bool(rs), 0 in rs, 1 in rs, 5 in rs, rs.first(), rs.last()
3125 3133 (True, True, False, True, 0, 4)
3126 3134 >>> rs = addset(xs, baseset([]))
3127 3135 >>> bool(rs), 0 in rs, 1 in rs, rs.first(), rs.last()
3128 3136 (True, True, False, 0, 2)
3129 3137 >>> rs = addset(baseset([]), baseset([]))
3130 3138 >>> bool(rs), 0 in rs, rs.first(), rs.last()
3131 3139 (False, False, None, None)
3132 3140
3133 3141 iterate unsorted:
3134 3142 >>> rs = addset(xs, ys)
3135 3143 >>> # (use generator because pypy could call len())
3136 3144 >>> list(x for x in rs) # without _genlist
3137 3145 [0, 3, 2, 5, 4]
3138 3146 >>> assert not rs._genlist
3139 3147 >>> len(rs)
3140 3148 5
3141 3149 >>> [x for x in rs] # with _genlist
3142 3150 [0, 3, 2, 5, 4]
3143 3151 >>> assert rs._genlist
3144 3152
3145 3153 iterate ascending:
3146 3154 >>> rs = addset(xs, ys, ascending=True)
3147 3155 >>> # (use generator because pypy could call len())
3148 3156 >>> list(x for x in rs), list(x for x in rs.fastasc()) # without _asclist
3149 3157 ([0, 2, 3, 4, 5], [0, 2, 3, 4, 5])
3150 3158 >>> assert not rs._asclist
3151 3159 >>> len(rs)
3152 3160 5
3153 3161 >>> [x for x in rs], [x for x in rs.fastasc()]
3154 3162 ([0, 2, 3, 4, 5], [0, 2, 3, 4, 5])
3155 3163 >>> assert rs._asclist
3156 3164
3157 3165 iterate descending:
3158 3166 >>> rs = addset(xs, ys, ascending=False)
3159 3167 >>> # (use generator because pypy could call len())
3160 3168 >>> list(x for x in rs), list(x for x in rs.fastdesc()) # without _asclist
3161 3169 ([5, 4, 3, 2, 0], [5, 4, 3, 2, 0])
3162 3170 >>> assert not rs._asclist
3163 3171 >>> len(rs)
3164 3172 5
3165 3173 >>> [x for x in rs], [x for x in rs.fastdesc()]
3166 3174 ([5, 4, 3, 2, 0], [5, 4, 3, 2, 0])
3167 3175 >>> assert rs._asclist
3168 3176
3169 3177 iterate ascending without fastasc:
3170 3178 >>> rs = addset(xs, generatorset(ys), ascending=True)
3171 3179 >>> assert rs.fastasc is None
3172 3180 >>> [x for x in rs]
3173 3181 [0, 2, 3, 4, 5]
3174 3182
3175 3183 iterate descending without fastdesc:
3176 3184 >>> rs = addset(generatorset(xs), ys, ascending=False)
3177 3185 >>> assert rs.fastdesc is None
3178 3186 >>> [x for x in rs]
3179 3187 [5, 4, 3, 2, 0]
3180 3188 """
3181 3189 def __init__(self, revs1, revs2, ascending=None):
3182 3190 self._r1 = revs1
3183 3191 self._r2 = revs2
3184 3192 self._iter = None
3185 3193 self._ascending = ascending
3186 3194 self._genlist = None
3187 3195 self._asclist = None
3188 3196
3189 3197 def __len__(self):
3190 3198 return len(self._list)
3191 3199
3192 3200 def __nonzero__(self):
3193 3201 return bool(self._r1) or bool(self._r2)
3194 3202
3195 3203 @util.propertycache
3196 3204 def _list(self):
3197 3205 if not self._genlist:
3198 3206 self._genlist = baseset(iter(self))
3199 3207 return self._genlist
3200 3208
3201 3209 def __iter__(self):
3202 3210 """Iterate over both collections without repeating elements
3203 3211
3204 3212 If the ascending attribute is not set, iterate over the first one and
3205 3213 then over the second one checking for membership on the first one so we
3206 3214 dont yield any duplicates.
3207 3215
3208 3216 If the ascending attribute is set, iterate over both collections at the
3209 3217 same time, yielding only one value at a time in the given order.
3210 3218 """
3211 3219 if self._ascending is None:
3212 3220 if self._genlist:
3213 3221 return iter(self._genlist)
3214 3222 def arbitraryordergen():
3215 3223 for r in self._r1:
3216 3224 yield r
3217 3225 inr1 = self._r1.__contains__
3218 3226 for r in self._r2:
3219 3227 if not inr1(r):
3220 3228 yield r
3221 3229 return arbitraryordergen()
3222 3230 # try to use our own fast iterator if it exists
3223 3231 self._trysetasclist()
3224 3232 if self._ascending:
3225 3233 attr = 'fastasc'
3226 3234 else:
3227 3235 attr = 'fastdesc'
3228 3236 it = getattr(self, attr)
3229 3237 if it is not None:
3230 3238 return it()
3231 3239 # maybe half of the component supports fast
3232 3240 # get iterator for _r1
3233 3241 iter1 = getattr(self._r1, attr)
3234 3242 if iter1 is None:
3235 3243 # let's avoid side effect (not sure it matters)
3236 3244 iter1 = iter(sorted(self._r1, reverse=not self._ascending))
3237 3245 else:
3238 3246 iter1 = iter1()
3239 3247 # get iterator for _r2
3240 3248 iter2 = getattr(self._r2, attr)
3241 3249 if iter2 is None:
3242 3250 # let's avoid side effect (not sure it matters)
3243 3251 iter2 = iter(sorted(self._r2, reverse=not self._ascending))
3244 3252 else:
3245 3253 iter2 = iter2()
3246 3254 return _iterordered(self._ascending, iter1, iter2)
3247 3255
3248 3256 def _trysetasclist(self):
3249 3257 """populate the _asclist attribute if possible and necessary"""
3250 3258 if self._genlist is not None and self._asclist is None:
3251 3259 self._asclist = sorted(self._genlist)
3252 3260
3253 3261 @property
3254 3262 def fastasc(self):
3255 3263 self._trysetasclist()
3256 3264 if self._asclist is not None:
3257 3265 return self._asclist.__iter__
3258 3266 iter1 = self._r1.fastasc
3259 3267 iter2 = self._r2.fastasc
3260 3268 if None in (iter1, iter2):
3261 3269 return None
3262 3270 return lambda: _iterordered(True, iter1(), iter2())
3263 3271
3264 3272 @property
3265 3273 def fastdesc(self):
3266 3274 self._trysetasclist()
3267 3275 if self._asclist is not None:
3268 3276 return self._asclist.__reversed__
3269 3277 iter1 = self._r1.fastdesc
3270 3278 iter2 = self._r2.fastdesc
3271 3279 if None in (iter1, iter2):
3272 3280 return None
3273 3281 return lambda: _iterordered(False, iter1(), iter2())
3274 3282
3275 3283 def __contains__(self, x):
3276 3284 return x in self._r1 or x in self._r2
3277 3285
3278 3286 def sort(self, reverse=False):
3279 3287 """Sort the added set
3280 3288
3281 3289 For this we use the cached list with all the generated values and if we
3282 3290 know they are ascending or descending we can sort them in a smart way.
3283 3291 """
3284 3292 self._ascending = not reverse
3285 3293
3286 3294 def isascending(self):
3287 3295 return self._ascending is not None and self._ascending
3288 3296
3289 3297 def isdescending(self):
3290 3298 return self._ascending is not None and not self._ascending
3291 3299
3292 3300 def istopo(self):
3293 3301 # not worth the trouble asserting if the two sets combined are still
3294 3302 # in topographical order. Use the sort() predicate to explicitly sort
3295 3303 # again instead.
3296 3304 return False
3297 3305
3298 3306 def reverse(self):
3299 3307 if self._ascending is None:
3300 3308 self._list.reverse()
3301 3309 else:
3302 3310 self._ascending = not self._ascending
3303 3311
3304 3312 def first(self):
3305 3313 for x in self:
3306 3314 return x
3307 3315 return None
3308 3316
3309 3317 def last(self):
3310 3318 self.reverse()
3311 3319 val = self.first()
3312 3320 self.reverse()
3313 3321 return val
3314 3322
3315 3323 def __repr__(self):
3316 3324 d = {None: '', False: '-', True: '+'}[self._ascending]
3317 3325 return '<%s%s %r, %r>' % (type(self).__name__, d, self._r1, self._r2)
3318 3326
3319 3327 class generatorset(abstractsmartset):
3320 3328 """Wrap a generator for lazy iteration
3321 3329
3322 3330 Wrapper structure for generators that provides lazy membership and can
3323 3331 be iterated more than once.
3324 3332 When asked for membership it generates values until either it finds the
3325 3333 requested one or has gone through all the elements in the generator
3326 3334 """
3327 3335 def __init__(self, gen, iterasc=None):
3328 3336 """
3329 3337 gen: a generator producing the values for the generatorset.
3330 3338 """
3331 3339 self._gen = gen
3332 3340 self._asclist = None
3333 3341 self._cache = {}
3334 3342 self._genlist = []
3335 3343 self._finished = False
3336 3344 self._ascending = True
3337 3345 if iterasc is not None:
3338 3346 if iterasc:
3339 3347 self.fastasc = self._iterator
3340 3348 self.__contains__ = self._asccontains
3341 3349 else:
3342 3350 self.fastdesc = self._iterator
3343 3351 self.__contains__ = self._desccontains
3344 3352
3345 3353 def __nonzero__(self):
3346 3354 # Do not use 'for r in self' because it will enforce the iteration
3347 3355 # order (default ascending), possibly unrolling a whole descending
3348 3356 # iterator.
3349 3357 if self._genlist:
3350 3358 return True
3351 3359 for r in self._consumegen():
3352 3360 return True
3353 3361 return False
3354 3362
3355 3363 def __contains__(self, x):
3356 3364 if x in self._cache:
3357 3365 return self._cache[x]
3358 3366
3359 3367 # Use new values only, as existing values would be cached.
3360 3368 for l in self._consumegen():
3361 3369 if l == x:
3362 3370 return True
3363 3371
3364 3372 self._cache[x] = False
3365 3373 return False
3366 3374
3367 3375 def _asccontains(self, x):
3368 3376 """version of contains optimised for ascending generator"""
3369 3377 if x in self._cache:
3370 3378 return self._cache[x]
3371 3379
3372 3380 # Use new values only, as existing values would be cached.
3373 3381 for l in self._consumegen():
3374 3382 if l == x:
3375 3383 return True
3376 3384 if l > x:
3377 3385 break
3378 3386
3379 3387 self._cache[x] = False
3380 3388 return False
3381 3389
3382 3390 def _desccontains(self, x):
3383 3391 """version of contains optimised for descending generator"""
3384 3392 if x in self._cache:
3385 3393 return self._cache[x]
3386 3394
3387 3395 # Use new values only, as existing values would be cached.
3388 3396 for l in self._consumegen():
3389 3397 if l == x:
3390 3398 return True
3391 3399 if l < x:
3392 3400 break
3393 3401
3394 3402 self._cache[x] = False
3395 3403 return False
3396 3404
3397 3405 def __iter__(self):
3398 3406 if self._ascending:
3399 3407 it = self.fastasc
3400 3408 else:
3401 3409 it = self.fastdesc
3402 3410 if it is not None:
3403 3411 return it()
3404 3412 # we need to consume the iterator
3405 3413 for x in self._consumegen():
3406 3414 pass
3407 3415 # recall the same code
3408 3416 return iter(self)
3409 3417
3410 3418 def _iterator(self):
3411 3419 if self._finished:
3412 3420 return iter(self._genlist)
3413 3421
3414 3422 # We have to use this complex iteration strategy to allow multiple
3415 3423 # iterations at the same time. We need to be able to catch revision
3416 3424 # removed from _consumegen and added to genlist in another instance.
3417 3425 #
3418 3426 # Getting rid of it would provide an about 15% speed up on this
3419 3427 # iteration.
3420 3428 genlist = self._genlist
3421 3429 nextrev = self._consumegen().next
3422 3430 _len = len # cache global lookup
3423 3431 def gen():
3424 3432 i = 0
3425 3433 while True:
3426 3434 if i < _len(genlist):
3427 3435 yield genlist[i]
3428 3436 else:
3429 3437 yield nextrev()
3430 3438 i += 1
3431 3439 return gen()
3432 3440
3433 3441 def _consumegen(self):
3434 3442 cache = self._cache
3435 3443 genlist = self._genlist.append
3436 3444 for item in self._gen:
3437 3445 cache[item] = True
3438 3446 genlist(item)
3439 3447 yield item
3440 3448 if not self._finished:
3441 3449 self._finished = True
3442 3450 asc = self._genlist[:]
3443 3451 asc.sort()
3444 3452 self._asclist = asc
3445 3453 self.fastasc = asc.__iter__
3446 3454 self.fastdesc = asc.__reversed__
3447 3455
3448 3456 def __len__(self):
3449 3457 for x in self._consumegen():
3450 3458 pass
3451 3459 return len(self._genlist)
3452 3460
3453 3461 def sort(self, reverse=False):
3454 3462 self._ascending = not reverse
3455 3463
3456 3464 def reverse(self):
3457 3465 self._ascending = not self._ascending
3458 3466
3459 3467 def isascending(self):
3460 3468 return self._ascending
3461 3469
3462 3470 def isdescending(self):
3463 3471 return not self._ascending
3464 3472
3465 3473 def istopo(self):
3466 3474 # not worth the trouble asserting if the two sets combined are still
3467 3475 # in topographical order. Use the sort() predicate to explicitly sort
3468 3476 # again instead.
3469 3477 return False
3470 3478
3471 3479 def first(self):
3472 3480 if self._ascending:
3473 3481 it = self.fastasc
3474 3482 else:
3475 3483 it = self.fastdesc
3476 3484 if it is None:
3477 3485 # we need to consume all and try again
3478 3486 for x in self._consumegen():
3479 3487 pass
3480 3488 return self.first()
3481 3489 return next(it(), None)
3482 3490
3483 3491 def last(self):
3484 3492 if self._ascending:
3485 3493 it = self.fastdesc
3486 3494 else:
3487 3495 it = self.fastasc
3488 3496 if it is None:
3489 3497 # we need to consume all and try again
3490 3498 for x in self._consumegen():
3491 3499 pass
3492 3500 return self.first()
3493 3501 return next(it(), None)
3494 3502
3495 3503 def __repr__(self):
3496 3504 d = {False: '-', True: '+'}[self._ascending]
3497 3505 return '<%s%s>' % (type(self).__name__, d)
3498 3506
3499 3507 class spanset(abstractsmartset):
3500 3508 """Duck type for baseset class which represents a range of revisions and
3501 3509 can work lazily and without having all the range in memory
3502 3510
3503 3511 Note that spanset(x, y) behave almost like xrange(x, y) except for two
3504 3512 notable points:
3505 3513 - when x < y it will be automatically descending,
3506 3514 - revision filtered with this repoview will be skipped.
3507 3515
3508 3516 """
3509 3517 def __init__(self, repo, start=0, end=None):
3510 3518 """
3511 3519 start: first revision included the set
3512 3520 (default to 0)
3513 3521 end: first revision excluded (last+1)
3514 3522 (default to len(repo)
3515 3523
3516 3524 Spanset will be descending if `end` < `start`.
3517 3525 """
3518 3526 if end is None:
3519 3527 end = len(repo)
3520 3528 self._ascending = start <= end
3521 3529 if not self._ascending:
3522 3530 start, end = end + 1, start +1
3523 3531 self._start = start
3524 3532 self._end = end
3525 3533 self._hiddenrevs = repo.changelog.filteredrevs
3526 3534
3527 3535 def sort(self, reverse=False):
3528 3536 self._ascending = not reverse
3529 3537
3530 3538 def reverse(self):
3531 3539 self._ascending = not self._ascending
3532 3540
3533 3541 def istopo(self):
3534 3542 # not worth the trouble asserting if the two sets combined are still
3535 3543 # in topographical order. Use the sort() predicate to explicitly sort
3536 3544 # again instead.
3537 3545 return False
3538 3546
3539 3547 def _iterfilter(self, iterrange):
3540 3548 s = self._hiddenrevs
3541 3549 for r in iterrange:
3542 3550 if r not in s:
3543 3551 yield r
3544 3552
3545 3553 def __iter__(self):
3546 3554 if self._ascending:
3547 3555 return self.fastasc()
3548 3556 else:
3549 3557 return self.fastdesc()
3550 3558
3551 3559 def fastasc(self):
3552 3560 iterrange = xrange(self._start, self._end)
3553 3561 if self._hiddenrevs:
3554 3562 return self._iterfilter(iterrange)
3555 3563 return iter(iterrange)
3556 3564
3557 3565 def fastdesc(self):
3558 3566 iterrange = xrange(self._end - 1, self._start - 1, -1)
3559 3567 if self._hiddenrevs:
3560 3568 return self._iterfilter(iterrange)
3561 3569 return iter(iterrange)
3562 3570
3563 3571 def __contains__(self, rev):
3564 3572 hidden = self._hiddenrevs
3565 3573 return ((self._start <= rev < self._end)
3566 3574 and not (hidden and rev in hidden))
3567 3575
3568 3576 def __nonzero__(self):
3569 3577 for r in self:
3570 3578 return True
3571 3579 return False
3572 3580
3573 3581 def __len__(self):
3574 3582 if not self._hiddenrevs:
3575 3583 return abs(self._end - self._start)
3576 3584 else:
3577 3585 count = 0
3578 3586 start = self._start
3579 3587 end = self._end
3580 3588 for rev in self._hiddenrevs:
3581 3589 if (end < rev <= start) or (start <= rev < end):
3582 3590 count += 1
3583 3591 return abs(self._end - self._start) - count
3584 3592
3585 3593 def isascending(self):
3586 3594 return self._ascending
3587 3595
3588 3596 def isdescending(self):
3589 3597 return not self._ascending
3590 3598
3591 3599 def first(self):
3592 3600 if self._ascending:
3593 3601 it = self.fastasc
3594 3602 else:
3595 3603 it = self.fastdesc
3596 3604 for x in it():
3597 3605 return x
3598 3606 return None
3599 3607
3600 3608 def last(self):
3601 3609 if self._ascending:
3602 3610 it = self.fastdesc
3603 3611 else:
3604 3612 it = self.fastasc
3605 3613 for x in it():
3606 3614 return x
3607 3615 return None
3608 3616
3609 3617 def __repr__(self):
3610 3618 d = {False: '-', True: '+'}[self._ascending]
3611 3619 return '<%s%s %d:%d>' % (type(self).__name__, d,
3612 3620 self._start, self._end - 1)
3613 3621
3614 3622 class fullreposet(spanset):
3615 3623 """a set containing all revisions in the repo
3616 3624
3617 3625 This class exists to host special optimization and magic to handle virtual
3618 3626 revisions such as "null".
3619 3627 """
3620 3628
3621 3629 def __init__(self, repo):
3622 3630 super(fullreposet, self).__init__(repo)
3623 3631
3624 3632 def __and__(self, other):
3625 3633 """As self contains the whole repo, all of the other set should also be
3626 3634 in self. Therefore `self & other = other`.
3627 3635
3628 3636 This boldly assumes the other contains valid revs only.
3629 3637 """
3630 3638 # other not a smartset, make is so
3631 3639 if not util.safehasattr(other, 'isascending'):
3632 3640 # filter out hidden revision
3633 3641 # (this boldly assumes all smartset are pure)
3634 3642 #
3635 3643 # `other` was used with "&", let's assume this is a set like
3636 3644 # object.
3637 3645 other = baseset(other - self._hiddenrevs)
3638 3646
3639 3647 # XXX As fullreposet is also used as bootstrap, this is wrong.
3640 3648 #
3641 3649 # With a giveme312() revset returning [3,1,2], this makes
3642 3650 # 'hg log -r "giveme312()"' -> 1, 2, 3 (wrong)
3643 3651 # We cannot just drop it because other usage still need to sort it:
3644 3652 # 'hg log -r "all() and giveme312()"' -> 1, 2, 3 (right)
3645 3653 #
3646 3654 # There is also some faulty revset implementations that rely on it
3647 3655 # (eg: children as of its state in e8075329c5fb)
3648 3656 #
3649 3657 # When we fix the two points above we can move this into the if clause
3650 3658 other.sort(reverse=self.isdescending())
3651 3659 return other
3652 3660
3653 3661 def prettyformatset(revs):
3654 3662 lines = []
3655 3663 rs = repr(revs)
3656 3664 p = 0
3657 3665 while p < len(rs):
3658 3666 q = rs.find('<', p + 1)
3659 3667 if q < 0:
3660 3668 q = len(rs)
3661 3669 l = rs.count('<', 0, p) - rs.count('>', 0, p)
3662 3670 assert l >= 0
3663 3671 lines.append((l, rs[p:q].rstrip()))
3664 3672 p = q
3665 3673 return '\n'.join(' ' * l + s for l, s in lines)
3666 3674
3667 3675 def loadpredicate(ui, extname, registrarobj):
3668 3676 """Load revset predicates from specified registrarobj
3669 3677 """
3670 3678 for name, func in registrarobj._table.iteritems():
3671 3679 symbols[name] = func
3672 3680 if func._safe:
3673 3681 safesymbols.add(name)
3674 3682
3675 3683 # load built-in predicates explicitly to setup safesymbols
3676 3684 loadpredicate(None, None, predicate)
3677 3685
3678 3686 # tell hggettext to extract docstrings from these functions:
3679 3687 i18nfunctions = symbols.values()
@@ -1,2271 +1,2284
1 1 Log on empty repository: checking consistency
2 2
3 3 $ hg init empty
4 4 $ cd empty
5 5 $ hg log
6 6 $ hg log -r 1
7 7 abort: unknown revision '1'!
8 8 [255]
9 9 $ hg log -r -1:0
10 10 abort: unknown revision '-1'!
11 11 [255]
12 12 $ hg log -r 'branch(name)'
13 13 abort: unknown revision 'name'!
14 14 [255]
15 15 $ hg log -r null -q
16 16 -1:000000000000
17 17
18 18 The g is crafted to have 2 filelog topological heads in a linear
19 19 changeset graph
20 20
21 21 $ hg init a
22 22 $ cd a
23 23 $ echo a > a
24 24 $ echo f > f
25 25 $ hg ci -Ama -d '1 0'
26 26 adding a
27 27 adding f
28 28
29 29 $ hg cp a b
30 30 $ hg cp f g
31 31 $ hg ci -mb -d '2 0'
32 32
33 33 $ mkdir dir
34 34 $ hg mv b dir
35 35 $ echo g >> g
36 36 $ echo f >> f
37 37 $ hg ci -mc -d '3 0'
38 38
39 39 $ hg mv a b
40 40 $ hg cp -f f g
41 41 $ echo a > d
42 42 $ hg add d
43 43 $ hg ci -md -d '4 0'
44 44
45 45 $ hg mv dir/b e
46 46 $ hg ci -me -d '5 0'
47 47
48 48 Make sure largefiles doesn't interfere with logging a regular file
49 49 $ hg --debug log a -T '{rev}: {desc}\n' --config extensions.largefiles=
50 50 updated patterns: ['.hglf/a', 'a']
51 51 0: a
52 52 $ hg log a
53 53 changeset: 0:9161b9aeaf16
54 54 user: test
55 55 date: Thu Jan 01 00:00:01 1970 +0000
56 56 summary: a
57 57
58 58 $ hg log glob:a*
59 59 changeset: 3:2ca5ba701980
60 60 user: test
61 61 date: Thu Jan 01 00:00:04 1970 +0000
62 62 summary: d
63 63
64 64 changeset: 0:9161b9aeaf16
65 65 user: test
66 66 date: Thu Jan 01 00:00:01 1970 +0000
67 67 summary: a
68 68
69 69 $ hg --debug log glob:a* -T '{rev}: {desc}\n' --config extensions.largefiles=
70 70 updated patterns: ['glob:.hglf/a*', 'glob:a*']
71 71 3: d
72 72 0: a
73 73
74 74 log on directory
75 75
76 76 $ hg log dir
77 77 changeset: 4:7e4639b4691b
78 78 tag: tip
79 79 user: test
80 80 date: Thu Jan 01 00:00:05 1970 +0000
81 81 summary: e
82 82
83 83 changeset: 2:f8954cd4dc1f
84 84 user: test
85 85 date: Thu Jan 01 00:00:03 1970 +0000
86 86 summary: c
87 87
88 88 $ hg log somethingthatdoesntexist dir
89 89 changeset: 4:7e4639b4691b
90 90 tag: tip
91 91 user: test
92 92 date: Thu Jan 01 00:00:05 1970 +0000
93 93 summary: e
94 94
95 95 changeset: 2:f8954cd4dc1f
96 96 user: test
97 97 date: Thu Jan 01 00:00:03 1970 +0000
98 98 summary: c
99 99
100 100
101 101 -f, non-existent directory
102 102
103 103 $ hg log -f dir
104 104 abort: cannot follow file not in parent revision: "dir"
105 105 [255]
106 106
107 107 -f, directory
108 108
109 109 $ hg up -q 3
110 110 $ hg log -f dir
111 111 changeset: 2:f8954cd4dc1f
112 112 user: test
113 113 date: Thu Jan 01 00:00:03 1970 +0000
114 114 summary: c
115 115
116 116 -f, directory with --patch
117 117
118 118 $ hg log -f dir -p
119 119 changeset: 2:f8954cd4dc1f
120 120 user: test
121 121 date: Thu Jan 01 00:00:03 1970 +0000
122 122 summary: c
123 123
124 124 diff -r d89b0a12d229 -r f8954cd4dc1f dir/b
125 125 --- /dev/null* (glob)
126 126 +++ b/dir/b* (glob)
127 127 @@ -0,0 +1,1 @@
128 128 +a
129 129
130 130
131 131 -f, pattern
132 132
133 133 $ hg log -f -I 'dir**' -p
134 134 changeset: 2:f8954cd4dc1f
135 135 user: test
136 136 date: Thu Jan 01 00:00:03 1970 +0000
137 137 summary: c
138 138
139 139 diff -r d89b0a12d229 -r f8954cd4dc1f dir/b
140 140 --- /dev/null* (glob)
141 141 +++ b/dir/b* (glob)
142 142 @@ -0,0 +1,1 @@
143 143 +a
144 144
145 145 $ hg up -q 4
146 146
147 147 -f, a wrong style
148 148
149 149 $ hg log -f -l1 --style something
150 150 abort: style 'something' not found
151 151 (available styles: bisect, changelog, compact, default, phases, status, xml)
152 152 [255]
153 153
154 154 -f, phases style
155 155
156 156
157 157 $ hg log -f -l1 --style phases
158 158 changeset: 4:7e4639b4691b
159 159 tag: tip
160 160 phase: draft
161 161 user: test
162 162 date: Thu Jan 01 00:00:05 1970 +0000
163 163 summary: e
164 164
165 165
166 166 $ hg log -f -l1 --style phases -q
167 167 4:7e4639b4691b
168 168
169 169 -f, but no args
170 170
171 171 $ hg log -f
172 172 changeset: 4:7e4639b4691b
173 173 tag: tip
174 174 user: test
175 175 date: Thu Jan 01 00:00:05 1970 +0000
176 176 summary: e
177 177
178 178 changeset: 3:2ca5ba701980
179 179 user: test
180 180 date: Thu Jan 01 00:00:04 1970 +0000
181 181 summary: d
182 182
183 183 changeset: 2:f8954cd4dc1f
184 184 user: test
185 185 date: Thu Jan 01 00:00:03 1970 +0000
186 186 summary: c
187 187
188 188 changeset: 1:d89b0a12d229
189 189 user: test
190 190 date: Thu Jan 01 00:00:02 1970 +0000
191 191 summary: b
192 192
193 193 changeset: 0:9161b9aeaf16
194 194 user: test
195 195 date: Thu Jan 01 00:00:01 1970 +0000
196 196 summary: a
197 197
198 198
199 199 one rename
200 200
201 201 $ hg up -q 2
202 202 $ hg log -vf a
203 203 changeset: 0:9161b9aeaf16
204 204 user: test
205 205 date: Thu Jan 01 00:00:01 1970 +0000
206 206 files: a f
207 207 description:
208 208 a
209 209
210 210
211 211
212 212 many renames
213 213
214 214 $ hg up -q tip
215 215 $ hg log -vf e
216 216 changeset: 4:7e4639b4691b
217 217 tag: tip
218 218 user: test
219 219 date: Thu Jan 01 00:00:05 1970 +0000
220 220 files: dir/b e
221 221 description:
222 222 e
223 223
224 224
225 225 changeset: 2:f8954cd4dc1f
226 226 user: test
227 227 date: Thu Jan 01 00:00:03 1970 +0000
228 228 files: b dir/b f g
229 229 description:
230 230 c
231 231
232 232
233 233 changeset: 1:d89b0a12d229
234 234 user: test
235 235 date: Thu Jan 01 00:00:02 1970 +0000
236 236 files: b g
237 237 description:
238 238 b
239 239
240 240
241 241 changeset: 0:9161b9aeaf16
242 242 user: test
243 243 date: Thu Jan 01 00:00:01 1970 +0000
244 244 files: a f
245 245 description:
246 246 a
247 247
248 248
249 249
250 250
251 251 log -pf dir/b
252 252
253 253 $ hg up -q 3
254 254 $ hg log -pf dir/b
255 255 changeset: 2:f8954cd4dc1f
256 256 user: test
257 257 date: Thu Jan 01 00:00:03 1970 +0000
258 258 summary: c
259 259
260 260 diff -r d89b0a12d229 -r f8954cd4dc1f dir/b
261 261 --- /dev/null Thu Jan 01 00:00:00 1970 +0000
262 262 +++ b/dir/b Thu Jan 01 00:00:03 1970 +0000
263 263 @@ -0,0 +1,1 @@
264 264 +a
265 265
266 266 changeset: 1:d89b0a12d229
267 267 user: test
268 268 date: Thu Jan 01 00:00:02 1970 +0000
269 269 summary: b
270 270
271 271 diff -r 9161b9aeaf16 -r d89b0a12d229 b
272 272 --- /dev/null Thu Jan 01 00:00:00 1970 +0000
273 273 +++ b/b Thu Jan 01 00:00:02 1970 +0000
274 274 @@ -0,0 +1,1 @@
275 275 +a
276 276
277 277 changeset: 0:9161b9aeaf16
278 278 user: test
279 279 date: Thu Jan 01 00:00:01 1970 +0000
280 280 summary: a
281 281
282 282 diff -r 000000000000 -r 9161b9aeaf16 a
283 283 --- /dev/null Thu Jan 01 00:00:00 1970 +0000
284 284 +++ b/a Thu Jan 01 00:00:01 1970 +0000
285 285 @@ -0,0 +1,1 @@
286 286 +a
287 287
288 288
289 289 log -pf b inside dir
290 290
291 291 $ hg --cwd=dir log -pf b
292 292 changeset: 2:f8954cd4dc1f
293 293 user: test
294 294 date: Thu Jan 01 00:00:03 1970 +0000
295 295 summary: c
296 296
297 297 diff -r d89b0a12d229 -r f8954cd4dc1f dir/b
298 298 --- /dev/null Thu Jan 01 00:00:00 1970 +0000
299 299 +++ b/dir/b Thu Jan 01 00:00:03 1970 +0000
300 300 @@ -0,0 +1,1 @@
301 301 +a
302 302
303 303 changeset: 1:d89b0a12d229
304 304 user: test
305 305 date: Thu Jan 01 00:00:02 1970 +0000
306 306 summary: b
307 307
308 308 diff -r 9161b9aeaf16 -r d89b0a12d229 b
309 309 --- /dev/null Thu Jan 01 00:00:00 1970 +0000
310 310 +++ b/b Thu Jan 01 00:00:02 1970 +0000
311 311 @@ -0,0 +1,1 @@
312 312 +a
313 313
314 314 changeset: 0:9161b9aeaf16
315 315 user: test
316 316 date: Thu Jan 01 00:00:01 1970 +0000
317 317 summary: a
318 318
319 319 diff -r 000000000000 -r 9161b9aeaf16 a
320 320 --- /dev/null Thu Jan 01 00:00:00 1970 +0000
321 321 +++ b/a Thu Jan 01 00:00:01 1970 +0000
322 322 @@ -0,0 +1,1 @@
323 323 +a
324 324
325 325
326 326 log -pf, but no args
327 327
328 328 $ hg log -pf
329 329 changeset: 3:2ca5ba701980
330 330 user: test
331 331 date: Thu Jan 01 00:00:04 1970 +0000
332 332 summary: d
333 333
334 334 diff -r f8954cd4dc1f -r 2ca5ba701980 a
335 335 --- a/a Thu Jan 01 00:00:03 1970 +0000
336 336 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000
337 337 @@ -1,1 +0,0 @@
338 338 -a
339 339 diff -r f8954cd4dc1f -r 2ca5ba701980 b
340 340 --- /dev/null Thu Jan 01 00:00:00 1970 +0000
341 341 +++ b/b Thu Jan 01 00:00:04 1970 +0000
342 342 @@ -0,0 +1,1 @@
343 343 +a
344 344 diff -r f8954cd4dc1f -r 2ca5ba701980 d
345 345 --- /dev/null Thu Jan 01 00:00:00 1970 +0000
346 346 +++ b/d Thu Jan 01 00:00:04 1970 +0000
347 347 @@ -0,0 +1,1 @@
348 348 +a
349 349 diff -r f8954cd4dc1f -r 2ca5ba701980 g
350 350 --- a/g Thu Jan 01 00:00:03 1970 +0000
351 351 +++ b/g Thu Jan 01 00:00:04 1970 +0000
352 352 @@ -1,2 +1,2 @@
353 353 f
354 354 -g
355 355 +f
356 356
357 357 changeset: 2:f8954cd4dc1f
358 358 user: test
359 359 date: Thu Jan 01 00:00:03 1970 +0000
360 360 summary: c
361 361
362 362 diff -r d89b0a12d229 -r f8954cd4dc1f b
363 363 --- a/b Thu Jan 01 00:00:02 1970 +0000
364 364 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000
365 365 @@ -1,1 +0,0 @@
366 366 -a
367 367 diff -r d89b0a12d229 -r f8954cd4dc1f dir/b
368 368 --- /dev/null Thu Jan 01 00:00:00 1970 +0000
369 369 +++ b/dir/b Thu Jan 01 00:00:03 1970 +0000
370 370 @@ -0,0 +1,1 @@
371 371 +a
372 372 diff -r d89b0a12d229 -r f8954cd4dc1f f
373 373 --- a/f Thu Jan 01 00:00:02 1970 +0000
374 374 +++ b/f Thu Jan 01 00:00:03 1970 +0000
375 375 @@ -1,1 +1,2 @@
376 376 f
377 377 +f
378 378 diff -r d89b0a12d229 -r f8954cd4dc1f g
379 379 --- a/g Thu Jan 01 00:00:02 1970 +0000
380 380 +++ b/g Thu Jan 01 00:00:03 1970 +0000
381 381 @@ -1,1 +1,2 @@
382 382 f
383 383 +g
384 384
385 385 changeset: 1:d89b0a12d229
386 386 user: test
387 387 date: Thu Jan 01 00:00:02 1970 +0000
388 388 summary: b
389 389
390 390 diff -r 9161b9aeaf16 -r d89b0a12d229 b
391 391 --- /dev/null Thu Jan 01 00:00:00 1970 +0000
392 392 +++ b/b Thu Jan 01 00:00:02 1970 +0000
393 393 @@ -0,0 +1,1 @@
394 394 +a
395 395 diff -r 9161b9aeaf16 -r d89b0a12d229 g
396 396 --- /dev/null Thu Jan 01 00:00:00 1970 +0000
397 397 +++ b/g Thu Jan 01 00:00:02 1970 +0000
398 398 @@ -0,0 +1,1 @@
399 399 +f
400 400
401 401 changeset: 0:9161b9aeaf16
402 402 user: test
403 403 date: Thu Jan 01 00:00:01 1970 +0000
404 404 summary: a
405 405
406 406 diff -r 000000000000 -r 9161b9aeaf16 a
407 407 --- /dev/null Thu Jan 01 00:00:00 1970 +0000
408 408 +++ b/a Thu Jan 01 00:00:01 1970 +0000
409 409 @@ -0,0 +1,1 @@
410 410 +a
411 411 diff -r 000000000000 -r 9161b9aeaf16 f
412 412 --- /dev/null Thu Jan 01 00:00:00 1970 +0000
413 413 +++ b/f Thu Jan 01 00:00:01 1970 +0000
414 414 @@ -0,0 +1,1 @@
415 415 +f
416 416
417 417
418 418 log -vf dir/b
419 419
420 420 $ hg log -vf dir/b
421 421 changeset: 2:f8954cd4dc1f
422 422 user: test
423 423 date: Thu Jan 01 00:00:03 1970 +0000
424 424 files: b dir/b f g
425 425 description:
426 426 c
427 427
428 428
429 429 changeset: 1:d89b0a12d229
430 430 user: test
431 431 date: Thu Jan 01 00:00:02 1970 +0000
432 432 files: b g
433 433 description:
434 434 b
435 435
436 436
437 437 changeset: 0:9161b9aeaf16
438 438 user: test
439 439 date: Thu Jan 01 00:00:01 1970 +0000
440 440 files: a f
441 441 description:
442 442 a
443 443
444 444
445 445
446 446
447 447 -f and multiple filelog heads
448 448
449 449 $ hg up -q 2
450 450 $ hg log -f g --template '{rev}\n'
451 451 2
452 452 1
453 453 0
454 454 $ hg up -q tip
455 455 $ hg log -f g --template '{rev}\n'
456 456 3
457 457 2
458 458 0
459 459
460 460
461 461 log copies with --copies
462 462
463 463 $ hg log -vC --template '{rev} {file_copies}\n'
464 464 4 e (dir/b)
465 465 3 b (a)g (f)
466 466 2 dir/b (b)
467 467 1 b (a)g (f)
468 468 0
469 469
470 470 log copies switch without --copies, with old filecopy template
471 471
472 472 $ hg log -v --template '{rev} {file_copies_switch%filecopy}\n'
473 473 4
474 474 3
475 475 2
476 476 1
477 477 0
478 478
479 479 log copies switch with --copies
480 480
481 481 $ hg log -vC --template '{rev} {file_copies_switch}\n'
482 482 4 e (dir/b)
483 483 3 b (a)g (f)
484 484 2 dir/b (b)
485 485 1 b (a)g (f)
486 486 0
487 487
488 488
489 489 log copies with hardcoded style and with --style=default
490 490
491 491 $ hg log -vC -r4
492 492 changeset: 4:7e4639b4691b
493 493 tag: tip
494 494 user: test
495 495 date: Thu Jan 01 00:00:05 1970 +0000
496 496 files: dir/b e
497 497 copies: e (dir/b)
498 498 description:
499 499 e
500 500
501 501
502 502 $ hg log -vC -r4 --style=default
503 503 changeset: 4:7e4639b4691b
504 504 tag: tip
505 505 user: test
506 506 date: Thu Jan 01 00:00:05 1970 +0000
507 507 files: dir/b e
508 508 copies: e (dir/b)
509 509 description:
510 510 e
511 511
512 512
513 513 $ hg log -vC -r4 -Tjson
514 514 [
515 515 {
516 516 "rev": 4,
517 517 "node": "7e4639b4691b9f84b81036a8d4fb218ce3c5e3a3",
518 518 "branch": "default",
519 519 "phase": "draft",
520 520 "user": "test",
521 521 "date": [5, 0],
522 522 "desc": "e",
523 523 "bookmarks": [],
524 524 "tags": ["tip"],
525 525 "parents": ["2ca5ba7019804f1f597249caddf22a64d34df0ba"],
526 526 "files": ["dir/b", "e"],
527 527 "copies": {"e": "dir/b"}
528 528 }
529 529 ]
530 530
531 531 log copies, non-linear manifest
532 532
533 533 $ hg up -C 3
534 534 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
535 535 $ hg mv dir/b e
536 536 $ echo foo > foo
537 537 $ hg ci -Ame2 -d '6 0'
538 538 adding foo
539 539 created new head
540 540 $ hg log -v --template '{rev} {file_copies}\n' -r 5
541 541 5 e (dir/b)
542 542
543 543
544 544 log copies, execute bit set
545 545
546 546 #if execbit
547 547 $ chmod +x e
548 548 $ hg ci -me3 -d '7 0'
549 549 $ hg log -v --template '{rev} {file_copies}\n' -r 6
550 550 6
551 551 #endif
552 552
553 553
554 554 log -p d
555 555
556 556 $ hg log -pv d
557 557 changeset: 3:2ca5ba701980
558 558 user: test
559 559 date: Thu Jan 01 00:00:04 1970 +0000
560 560 files: a b d g
561 561 description:
562 562 d
563 563
564 564
565 565 diff -r f8954cd4dc1f -r 2ca5ba701980 d
566 566 --- /dev/null Thu Jan 01 00:00:00 1970 +0000
567 567 +++ b/d Thu Jan 01 00:00:04 1970 +0000
568 568 @@ -0,0 +1,1 @@
569 569 +a
570 570
571 571
572 572
573 573 log --removed file
574 574
575 575 $ hg log --removed -v a
576 576 changeset: 3:2ca5ba701980
577 577 user: test
578 578 date: Thu Jan 01 00:00:04 1970 +0000
579 579 files: a b d g
580 580 description:
581 581 d
582 582
583 583
584 584 changeset: 0:9161b9aeaf16
585 585 user: test
586 586 date: Thu Jan 01 00:00:01 1970 +0000
587 587 files: a f
588 588 description:
589 589 a
590 590
591 591
592 592
593 593 log --removed revrange file
594 594
595 595 $ hg log --removed -v -r0:2 a
596 596 changeset: 0:9161b9aeaf16
597 597 user: test
598 598 date: Thu Jan 01 00:00:01 1970 +0000
599 599 files: a f
600 600 description:
601 601 a
602 602
603 603
604 604 $ cd ..
605 605
606 606 log --follow tests
607 607
608 608 $ hg init follow
609 609 $ cd follow
610 610
611 611 $ echo base > base
612 612 $ hg ci -Ambase -d '1 0'
613 613 adding base
614 614
615 615 $ echo r1 >> base
616 616 $ hg ci -Amr1 -d '1 0'
617 617 $ echo r2 >> base
618 618 $ hg ci -Amr2 -d '1 0'
619 619
620 620 $ hg up -C 1
621 621 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
622 622 $ echo b1 > b1
623 623
624 624 log -r "follow('set:clean()')"
625 625
626 626 $ hg log -r "follow('set:clean()')"
627 627 changeset: 0:67e992f2c4f3
628 628 user: test
629 629 date: Thu Jan 01 00:00:01 1970 +0000
630 630 summary: base
631 631
632 632 changeset: 1:3d5bf5654eda
633 633 user: test
634 634 date: Thu Jan 01 00:00:01 1970 +0000
635 635 summary: r1
636 636
637 637
638 638 $ hg ci -Amb1 -d '1 0'
639 639 adding b1
640 640 created new head
641 641
642 642
643 643 log -f
644 644
645 645 $ hg log -f
646 646 changeset: 3:e62f78d544b4
647 647 tag: tip
648 648 parent: 1:3d5bf5654eda
649 649 user: test
650 650 date: Thu Jan 01 00:00:01 1970 +0000
651 651 summary: b1
652 652
653 653 changeset: 1:3d5bf5654eda
654 654 user: test
655 655 date: Thu Jan 01 00:00:01 1970 +0000
656 656 summary: r1
657 657
658 658 changeset: 0:67e992f2c4f3
659 659 user: test
660 660 date: Thu Jan 01 00:00:01 1970 +0000
661 661 summary: base
662 662
663 663
664 664 log -r follow('glob:b*')
665 665
666 666 $ hg log -r "follow('glob:b*')"
667 667 changeset: 0:67e992f2c4f3
668 668 user: test
669 669 date: Thu Jan 01 00:00:01 1970 +0000
670 670 summary: base
671 671
672 672 changeset: 1:3d5bf5654eda
673 673 user: test
674 674 date: Thu Jan 01 00:00:01 1970 +0000
675 675 summary: r1
676 676
677 677 changeset: 3:e62f78d544b4
678 678 tag: tip
679 679 parent: 1:3d5bf5654eda
680 680 user: test
681 681 date: Thu Jan 01 00:00:01 1970 +0000
682 682 summary: b1
683 683
684 684 log -f -r '1 + 4'
685 685
686 686 $ hg up -C 0
687 687 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
688 688 $ echo b2 > b2
689 689 $ hg ci -Amb2 -d '1 0'
690 690 adding b2
691 691 created new head
692 692 $ hg log -f -r '1 + 4'
693 693 changeset: 4:ddb82e70d1a1
694 694 tag: tip
695 695 parent: 0:67e992f2c4f3
696 696 user: test
697 697 date: Thu Jan 01 00:00:01 1970 +0000
698 698 summary: b2
699 699
700 700 changeset: 1:3d5bf5654eda
701 701 user: test
702 702 date: Thu Jan 01 00:00:01 1970 +0000
703 703 summary: r1
704 704
705 705 changeset: 0:67e992f2c4f3
706 706 user: test
707 707 date: Thu Jan 01 00:00:01 1970 +0000
708 708 summary: base
709 709
710 710 log -r "follow('set:grep(b2)')"
711 711
712 712 $ hg log -r "follow('set:grep(b2)')"
713 713 changeset: 4:ddb82e70d1a1
714 714 tag: tip
715 715 parent: 0:67e992f2c4f3
716 716 user: test
717 717 date: Thu Jan 01 00:00:01 1970 +0000
718 718 summary: b2
719 719
720 log -r "follow('set:grep(b2)', 4)"
721
722 $ hg up -qC 0
723 $ hg log -r "follow('set:grep(b2)', 4)"
724 changeset: 4:ddb82e70d1a1
725 tag: tip
726 parent: 0:67e992f2c4f3
727 user: test
728 date: Thu Jan 01 00:00:01 1970 +0000
729 summary: b2
730
731 $ hg up -qC 4
732
720 733 log -f -r null
721 734
722 735 $ hg log -f -r null
723 736 changeset: -1:000000000000
724 737 user:
725 738 date: Thu Jan 01 00:00:00 1970 +0000
726 739
727 740 $ hg log -f -r null -G
728 741 o changeset: -1:000000000000
729 742 user:
730 743 date: Thu Jan 01 00:00:00 1970 +0000
731 744
732 745
733 746
734 747 log -f with null parent
735 748
736 749 $ hg up -C null
737 750 0 files updated, 0 files merged, 2 files removed, 0 files unresolved
738 751 $ hg log -f
739 752
740 753
741 754 log -r . with two parents
742 755
743 756 $ hg up -C 3
744 757 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
745 758 $ hg merge tip
746 759 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
747 760 (branch merge, don't forget to commit)
748 761 $ hg log -r .
749 762 changeset: 3:e62f78d544b4
750 763 parent: 1:3d5bf5654eda
751 764 user: test
752 765 date: Thu Jan 01 00:00:01 1970 +0000
753 766 summary: b1
754 767
755 768
756 769
757 770 log -r . with one parent
758 771
759 772 $ hg ci -mm12 -d '1 0'
760 773 $ hg log -r .
761 774 changeset: 5:302e9dd6890d
762 775 tag: tip
763 776 parent: 3:e62f78d544b4
764 777 parent: 4:ddb82e70d1a1
765 778 user: test
766 779 date: Thu Jan 01 00:00:01 1970 +0000
767 780 summary: m12
768 781
769 782
770 783 $ echo postm >> b1
771 784 $ hg ci -Amb1.1 -d'1 0'
772 785
773 786
774 787 log --follow-first
775 788
776 789 $ hg log --follow-first
777 790 changeset: 6:2404bbcab562
778 791 tag: tip
779 792 user: test
780 793 date: Thu Jan 01 00:00:01 1970 +0000
781 794 summary: b1.1
782 795
783 796 changeset: 5:302e9dd6890d
784 797 parent: 3:e62f78d544b4
785 798 parent: 4:ddb82e70d1a1
786 799 user: test
787 800 date: Thu Jan 01 00:00:01 1970 +0000
788 801 summary: m12
789 802
790 803 changeset: 3:e62f78d544b4
791 804 parent: 1:3d5bf5654eda
792 805 user: test
793 806 date: Thu Jan 01 00:00:01 1970 +0000
794 807 summary: b1
795 808
796 809 changeset: 1:3d5bf5654eda
797 810 user: test
798 811 date: Thu Jan 01 00:00:01 1970 +0000
799 812 summary: r1
800 813
801 814 changeset: 0:67e992f2c4f3
802 815 user: test
803 816 date: Thu Jan 01 00:00:01 1970 +0000
804 817 summary: base
805 818
806 819
807 820
808 821 log -P 2
809 822
810 823 $ hg log -P 2
811 824 changeset: 6:2404bbcab562
812 825 tag: tip
813 826 user: test
814 827 date: Thu Jan 01 00:00:01 1970 +0000
815 828 summary: b1.1
816 829
817 830 changeset: 5:302e9dd6890d
818 831 parent: 3:e62f78d544b4
819 832 parent: 4:ddb82e70d1a1
820 833 user: test
821 834 date: Thu Jan 01 00:00:01 1970 +0000
822 835 summary: m12
823 836
824 837 changeset: 4:ddb82e70d1a1
825 838 parent: 0:67e992f2c4f3
826 839 user: test
827 840 date: Thu Jan 01 00:00:01 1970 +0000
828 841 summary: b2
829 842
830 843 changeset: 3:e62f78d544b4
831 844 parent: 1:3d5bf5654eda
832 845 user: test
833 846 date: Thu Jan 01 00:00:01 1970 +0000
834 847 summary: b1
835 848
836 849
837 850
838 851 log -r tip -p --git
839 852
840 853 $ hg log -r tip -p --git
841 854 changeset: 6:2404bbcab562
842 855 tag: tip
843 856 user: test
844 857 date: Thu Jan 01 00:00:01 1970 +0000
845 858 summary: b1.1
846 859
847 860 diff --git a/b1 b/b1
848 861 --- a/b1
849 862 +++ b/b1
850 863 @@ -1,1 +1,2 @@
851 864 b1
852 865 +postm
853 866
854 867
855 868
856 869 log -r ""
857 870
858 871 $ hg log -r ''
859 872 hg: parse error: empty query
860 873 [255]
861 874
862 875 log -r <some unknown node id>
863 876
864 877 $ hg log -r 1000000000000000000000000000000000000000
865 878 abort: unknown revision '1000000000000000000000000000000000000000'!
866 879 [255]
867 880
868 881 log -k r1
869 882
870 883 $ hg log -k r1
871 884 changeset: 1:3d5bf5654eda
872 885 user: test
873 886 date: Thu Jan 01 00:00:01 1970 +0000
874 887 summary: r1
875 888
876 889 log -p -l2 --color=always
877 890
878 891 $ hg --config extensions.color= --config color.mode=ansi \
879 892 > log -p -l2 --color=always
880 893 \x1b[0;33mchangeset: 6:2404bbcab562\x1b[0m (esc)
881 894 tag: tip
882 895 user: test
883 896 date: Thu Jan 01 00:00:01 1970 +0000
884 897 summary: b1.1
885 898
886 899 \x1b[0;1mdiff -r 302e9dd6890d -r 2404bbcab562 b1\x1b[0m (esc)
887 900 \x1b[0;31;1m--- a/b1 Thu Jan 01 00:00:01 1970 +0000\x1b[0m (esc)
888 901 \x1b[0;32;1m+++ b/b1 Thu Jan 01 00:00:01 1970 +0000\x1b[0m (esc)
889 902 \x1b[0;35m@@ -1,1 +1,2 @@\x1b[0m (esc)
890 903 b1
891 904 \x1b[0;32m+postm\x1b[0m (esc)
892 905
893 906 \x1b[0;33mchangeset: 5:302e9dd6890d\x1b[0m (esc)
894 907 parent: 3:e62f78d544b4
895 908 parent: 4:ddb82e70d1a1
896 909 user: test
897 910 date: Thu Jan 01 00:00:01 1970 +0000
898 911 summary: m12
899 912
900 913 \x1b[0;1mdiff -r e62f78d544b4 -r 302e9dd6890d b2\x1b[0m (esc)
901 914 \x1b[0;31;1m--- /dev/null Thu Jan 01 00:00:00 1970 +0000\x1b[0m (esc)
902 915 \x1b[0;32;1m+++ b/b2 Thu Jan 01 00:00:01 1970 +0000\x1b[0m (esc)
903 916 \x1b[0;35m@@ -0,0 +1,1 @@\x1b[0m (esc)
904 917 \x1b[0;32m+b2\x1b[0m (esc)
905 918
906 919
907 920
908 921 log -r tip --stat
909 922
910 923 $ hg log -r tip --stat
911 924 changeset: 6:2404bbcab562
912 925 tag: tip
913 926 user: test
914 927 date: Thu Jan 01 00:00:01 1970 +0000
915 928 summary: b1.1
916 929
917 930 b1 | 1 +
918 931 1 files changed, 1 insertions(+), 0 deletions(-)
919 932
920 933
921 934 $ cd ..
922 935
923 936 Test that log should respect the order of -rREV even if multiple OR conditions
924 937 are specified (issue5100):
925 938
926 939 $ hg init revorder
927 940 $ cd revorder
928 941
929 942 $ hg branch -q b0
930 943 $ echo 0 >> f0
931 944 $ hg ci -qAm k0 -u u0
932 945 $ hg branch -q b1
933 946 $ echo 1 >> f1
934 947 $ hg ci -qAm k1 -u u1
935 948 $ hg branch -q b2
936 949 $ echo 2 >> f2
937 950 $ hg ci -qAm k2 -u u2
938 951
939 952 $ hg update -q b2
940 953 $ echo 3 >> f2
941 954 $ hg ci -qAm k2 -u u2
942 955 $ hg update -q b1
943 956 $ echo 4 >> f1
944 957 $ hg ci -qAm k1 -u u1
945 958 $ hg update -q b0
946 959 $ echo 5 >> f0
947 960 $ hg ci -qAm k0 -u u0
948 961
949 962 summary of revisions:
950 963
951 964 $ hg log -G -T '{rev} {branch} {author} {desc} {files}\n'
952 965 @ 5 b0 u0 k0 f0
953 966 |
954 967 | o 4 b1 u1 k1 f1
955 968 | |
956 969 | | o 3 b2 u2 k2 f2
957 970 | | |
958 971 | | o 2 b2 u2 k2 f2
959 972 | |/
960 973 | o 1 b1 u1 k1 f1
961 974 |/
962 975 o 0 b0 u0 k0 f0
963 976
964 977
965 978 log -b BRANCH in ascending order:
966 979
967 980 $ hg log -r0:tip -T '{rev} {branch}\n' -b b0 -b b1
968 981 0 b0
969 982 1 b1
970 983 4 b1
971 984 5 b0
972 985 $ hg log -r0:tip -T '{rev} {branch}\n' -b b1 -b b0
973 986 0 b0
974 987 1 b1
975 988 4 b1
976 989 5 b0
977 990
978 991 log --only-branch BRANCH in descending order:
979 992
980 993 $ hg log -rtip:0 -T '{rev} {branch}\n' --only-branch b1 --only-branch b2
981 994 4 b1
982 995 3 b2
983 996 2 b2
984 997 1 b1
985 998 $ hg log -rtip:0 -T '{rev} {branch}\n' --only-branch b2 --only-branch b1
986 999 4 b1
987 1000 3 b2
988 1001 2 b2
989 1002 1 b1
990 1003
991 1004 log -u USER in ascending order, against compound set:
992 1005
993 1006 $ hg log -r'::head()' -T '{rev} {author}\n' -u u0 -u u2
994 1007 0 u0
995 1008 2 u2
996 1009 3 u2
997 1010 5 u0
998 1011 $ hg log -r'::head()' -T '{rev} {author}\n' -u u2 -u u0
999 1012 0 u0
1000 1013 2 u2
1001 1014 3 u2
1002 1015 5 u0
1003 1016
1004 1017 log -k TEXT in descending order, against compound set:
1005 1018
1006 1019 $ hg log -r'5 + reverse(::3)' -T '{rev} {desc}\n' -k k0 -k k1 -k k2
1007 1020 5 k0
1008 1021 3 k2
1009 1022 2 k2
1010 1023 1 k1
1011 1024 0 k0
1012 1025 $ hg log -r'5 + reverse(::3)' -T '{rev} {desc}\n' -k k2 -k k1 -k k0
1013 1026 5 k0
1014 1027 3 k2
1015 1028 2 k2
1016 1029 1 k1
1017 1030 0 k0
1018 1031
1019 1032 log FILE in ascending order, against dagrange:
1020 1033
1021 1034 $ hg log -r1:: -T '{rev} {files}\n' f1 f2
1022 1035 1 f1
1023 1036 2 f2
1024 1037 3 f2
1025 1038 4 f1
1026 1039 $ hg log -r1:: -T '{rev} {files}\n' f2 f1
1027 1040 1 f1
1028 1041 2 f2
1029 1042 3 f2
1030 1043 4 f1
1031 1044
1032 1045 $ cd ..
1033 1046
1034 1047 User
1035 1048
1036 1049 $ hg init usertest
1037 1050 $ cd usertest
1038 1051
1039 1052 $ echo a > a
1040 1053 $ hg ci -A -m "a" -u "User One <user1@example.org>"
1041 1054 adding a
1042 1055 $ echo b > b
1043 1056 $ hg ci -A -m "b" -u "User Two <user2@example.org>"
1044 1057 adding b
1045 1058
1046 1059 $ hg log -u "User One <user1@example.org>"
1047 1060 changeset: 0:29a4c94f1924
1048 1061 user: User One <user1@example.org>
1049 1062 date: Thu Jan 01 00:00:00 1970 +0000
1050 1063 summary: a
1051 1064
1052 1065 $ hg log -u "user1" -u "user2"
1053 1066 changeset: 1:e834b5e69c0e
1054 1067 tag: tip
1055 1068 user: User Two <user2@example.org>
1056 1069 date: Thu Jan 01 00:00:00 1970 +0000
1057 1070 summary: b
1058 1071
1059 1072 changeset: 0:29a4c94f1924
1060 1073 user: User One <user1@example.org>
1061 1074 date: Thu Jan 01 00:00:00 1970 +0000
1062 1075 summary: a
1063 1076
1064 1077 $ hg log -u "user3"
1065 1078
1066 1079 $ cd ..
1067 1080
1068 1081 $ hg init branches
1069 1082 $ cd branches
1070 1083
1071 1084 $ echo a > a
1072 1085 $ hg ci -A -m "commit on default"
1073 1086 adding a
1074 1087 $ hg branch test
1075 1088 marked working directory as branch test
1076 1089 (branches are permanent and global, did you want a bookmark?)
1077 1090 $ echo b > b
1078 1091 $ hg ci -A -m "commit on test"
1079 1092 adding b
1080 1093
1081 1094 $ hg up default
1082 1095 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
1083 1096 $ echo c > c
1084 1097 $ hg ci -A -m "commit on default"
1085 1098 adding c
1086 1099 $ hg up test
1087 1100 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
1088 1101 $ echo c > c
1089 1102 $ hg ci -A -m "commit on test"
1090 1103 adding c
1091 1104
1092 1105
1093 1106 log -b default
1094 1107
1095 1108 $ hg log -b default
1096 1109 changeset: 2:c3a4f03cc9a7
1097 1110 parent: 0:24427303d56f
1098 1111 user: test
1099 1112 date: Thu Jan 01 00:00:00 1970 +0000
1100 1113 summary: commit on default
1101 1114
1102 1115 changeset: 0:24427303d56f
1103 1116 user: test
1104 1117 date: Thu Jan 01 00:00:00 1970 +0000
1105 1118 summary: commit on default
1106 1119
1107 1120
1108 1121
1109 1122 log -b test
1110 1123
1111 1124 $ hg log -b test
1112 1125 changeset: 3:f5d8de11c2e2
1113 1126 branch: test
1114 1127 tag: tip
1115 1128 parent: 1:d32277701ccb
1116 1129 user: test
1117 1130 date: Thu Jan 01 00:00:00 1970 +0000
1118 1131 summary: commit on test
1119 1132
1120 1133 changeset: 1:d32277701ccb
1121 1134 branch: test
1122 1135 user: test
1123 1136 date: Thu Jan 01 00:00:00 1970 +0000
1124 1137 summary: commit on test
1125 1138
1126 1139
1127 1140
1128 1141 log -b dummy
1129 1142
1130 1143 $ hg log -b dummy
1131 1144 abort: unknown revision 'dummy'!
1132 1145 [255]
1133 1146
1134 1147
1135 1148 log -b .
1136 1149
1137 1150 $ hg log -b .
1138 1151 changeset: 3:f5d8de11c2e2
1139 1152 branch: test
1140 1153 tag: tip
1141 1154 parent: 1:d32277701ccb
1142 1155 user: test
1143 1156 date: Thu Jan 01 00:00:00 1970 +0000
1144 1157 summary: commit on test
1145 1158
1146 1159 changeset: 1:d32277701ccb
1147 1160 branch: test
1148 1161 user: test
1149 1162 date: Thu Jan 01 00:00:00 1970 +0000
1150 1163 summary: commit on test
1151 1164
1152 1165
1153 1166
1154 1167 log -b default -b test
1155 1168
1156 1169 $ hg log -b default -b test
1157 1170 changeset: 3:f5d8de11c2e2
1158 1171 branch: test
1159 1172 tag: tip
1160 1173 parent: 1:d32277701ccb
1161 1174 user: test
1162 1175 date: Thu Jan 01 00:00:00 1970 +0000
1163 1176 summary: commit on test
1164 1177
1165 1178 changeset: 2:c3a4f03cc9a7
1166 1179 parent: 0:24427303d56f
1167 1180 user: test
1168 1181 date: Thu Jan 01 00:00:00 1970 +0000
1169 1182 summary: commit on default
1170 1183
1171 1184 changeset: 1:d32277701ccb
1172 1185 branch: test
1173 1186 user: test
1174 1187 date: Thu Jan 01 00:00:00 1970 +0000
1175 1188 summary: commit on test
1176 1189
1177 1190 changeset: 0:24427303d56f
1178 1191 user: test
1179 1192 date: Thu Jan 01 00:00:00 1970 +0000
1180 1193 summary: commit on default
1181 1194
1182 1195
1183 1196
1184 1197 log -b default -b .
1185 1198
1186 1199 $ hg log -b default -b .
1187 1200 changeset: 3:f5d8de11c2e2
1188 1201 branch: test
1189 1202 tag: tip
1190 1203 parent: 1:d32277701ccb
1191 1204 user: test
1192 1205 date: Thu Jan 01 00:00:00 1970 +0000
1193 1206 summary: commit on test
1194 1207
1195 1208 changeset: 2:c3a4f03cc9a7
1196 1209 parent: 0:24427303d56f
1197 1210 user: test
1198 1211 date: Thu Jan 01 00:00:00 1970 +0000
1199 1212 summary: commit on default
1200 1213
1201 1214 changeset: 1:d32277701ccb
1202 1215 branch: test
1203 1216 user: test
1204 1217 date: Thu Jan 01 00:00:00 1970 +0000
1205 1218 summary: commit on test
1206 1219
1207 1220 changeset: 0:24427303d56f
1208 1221 user: test
1209 1222 date: Thu Jan 01 00:00:00 1970 +0000
1210 1223 summary: commit on default
1211 1224
1212 1225
1213 1226
1214 1227 log -b . -b test
1215 1228
1216 1229 $ hg log -b . -b test
1217 1230 changeset: 3:f5d8de11c2e2
1218 1231 branch: test
1219 1232 tag: tip
1220 1233 parent: 1:d32277701ccb
1221 1234 user: test
1222 1235 date: Thu Jan 01 00:00:00 1970 +0000
1223 1236 summary: commit on test
1224 1237
1225 1238 changeset: 1:d32277701ccb
1226 1239 branch: test
1227 1240 user: test
1228 1241 date: Thu Jan 01 00:00:00 1970 +0000
1229 1242 summary: commit on test
1230 1243
1231 1244
1232 1245
1233 1246 log -b 2
1234 1247
1235 1248 $ hg log -b 2
1236 1249 changeset: 2:c3a4f03cc9a7
1237 1250 parent: 0:24427303d56f
1238 1251 user: test
1239 1252 date: Thu Jan 01 00:00:00 1970 +0000
1240 1253 summary: commit on default
1241 1254
1242 1255 changeset: 0:24427303d56f
1243 1256 user: test
1244 1257 date: Thu Jan 01 00:00:00 1970 +0000
1245 1258 summary: commit on default
1246 1259
1247 1260 #if gettext
1248 1261
1249 1262 Test that all log names are translated (e.g. branches, bookmarks, tags):
1250 1263
1251 1264 $ hg bookmark babar -r tip
1252 1265
1253 1266 $ HGENCODING=UTF-8 LANGUAGE=de hg log -r tip
1254 1267 \xc3\x84nderung: 3:f5d8de11c2e2 (esc)
1255 1268 Zweig: test
1256 1269 Lesezeichen: babar
1257 1270 Marke: tip
1258 1271 Vorg\xc3\xa4nger: 1:d32277701ccb (esc)
1259 1272 Nutzer: test
1260 1273 Datum: Thu Jan 01 00:00:00 1970 +0000
1261 1274 Zusammenfassung: commit on test
1262 1275
1263 1276 $ hg bookmark -d babar
1264 1277
1265 1278 #endif
1266 1279
1267 1280 log -p --cwd dir (in subdir)
1268 1281
1269 1282 $ mkdir dir
1270 1283 $ hg log -p --cwd dir
1271 1284 changeset: 3:f5d8de11c2e2
1272 1285 branch: test
1273 1286 tag: tip
1274 1287 parent: 1:d32277701ccb
1275 1288 user: test
1276 1289 date: Thu Jan 01 00:00:00 1970 +0000
1277 1290 summary: commit on test
1278 1291
1279 1292 diff -r d32277701ccb -r f5d8de11c2e2 c
1280 1293 --- /dev/null Thu Jan 01 00:00:00 1970 +0000
1281 1294 +++ b/c Thu Jan 01 00:00:00 1970 +0000
1282 1295 @@ -0,0 +1,1 @@
1283 1296 +c
1284 1297
1285 1298 changeset: 2:c3a4f03cc9a7
1286 1299 parent: 0:24427303d56f
1287 1300 user: test
1288 1301 date: Thu Jan 01 00:00:00 1970 +0000
1289 1302 summary: commit on default
1290 1303
1291 1304 diff -r 24427303d56f -r c3a4f03cc9a7 c
1292 1305 --- /dev/null Thu Jan 01 00:00:00 1970 +0000
1293 1306 +++ b/c Thu Jan 01 00:00:00 1970 +0000
1294 1307 @@ -0,0 +1,1 @@
1295 1308 +c
1296 1309
1297 1310 changeset: 1:d32277701ccb
1298 1311 branch: test
1299 1312 user: test
1300 1313 date: Thu Jan 01 00:00:00 1970 +0000
1301 1314 summary: commit on test
1302 1315
1303 1316 diff -r 24427303d56f -r d32277701ccb b
1304 1317 --- /dev/null Thu Jan 01 00:00:00 1970 +0000
1305 1318 +++ b/b Thu Jan 01 00:00:00 1970 +0000
1306 1319 @@ -0,0 +1,1 @@
1307 1320 +b
1308 1321
1309 1322 changeset: 0:24427303d56f
1310 1323 user: test
1311 1324 date: Thu Jan 01 00:00:00 1970 +0000
1312 1325 summary: commit on default
1313 1326
1314 1327 diff -r 000000000000 -r 24427303d56f a
1315 1328 --- /dev/null Thu Jan 01 00:00:00 1970 +0000
1316 1329 +++ b/a Thu Jan 01 00:00:00 1970 +0000
1317 1330 @@ -0,0 +1,1 @@
1318 1331 +a
1319 1332
1320 1333
1321 1334
1322 1335 log -p -R repo
1323 1336
1324 1337 $ cd dir
1325 1338 $ hg log -p -R .. ../a
1326 1339 changeset: 0:24427303d56f
1327 1340 user: test
1328 1341 date: Thu Jan 01 00:00:00 1970 +0000
1329 1342 summary: commit on default
1330 1343
1331 1344 diff -r 000000000000 -r 24427303d56f a
1332 1345 --- /dev/null Thu Jan 01 00:00:00 1970 +0000
1333 1346 +++ b/a Thu Jan 01 00:00:00 1970 +0000
1334 1347 @@ -0,0 +1,1 @@
1335 1348 +a
1336 1349
1337 1350
1338 1351 $ cd ../..
1339 1352
1340 1353 $ hg init follow2
1341 1354 $ cd follow2
1342 1355
1343 1356 # Build the following history:
1344 1357 # tip - o - x - o - x - x
1345 1358 # \ /
1346 1359 # o - o - o - x
1347 1360 # \ /
1348 1361 # o
1349 1362 #
1350 1363 # Where "o" is a revision containing "foo" and
1351 1364 # "x" is a revision without "foo"
1352 1365
1353 1366 $ touch init
1354 1367 $ hg ci -A -m "init, unrelated"
1355 1368 adding init
1356 1369 $ echo 'foo' > init
1357 1370 $ hg ci -m "change, unrelated"
1358 1371 $ echo 'foo' > foo
1359 1372 $ hg ci -A -m "add unrelated old foo"
1360 1373 adding foo
1361 1374 $ hg rm foo
1362 1375 $ hg ci -m "delete foo, unrelated"
1363 1376 $ echo 'related' > foo
1364 1377 $ hg ci -A -m "add foo, related"
1365 1378 adding foo
1366 1379
1367 1380 $ hg up 0
1368 1381 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
1369 1382 $ touch branch
1370 1383 $ hg ci -A -m "first branch, unrelated"
1371 1384 adding branch
1372 1385 created new head
1373 1386 $ touch foo
1374 1387 $ hg ci -A -m "create foo, related"
1375 1388 adding foo
1376 1389 $ echo 'change' > foo
1377 1390 $ hg ci -m "change foo, related"
1378 1391
1379 1392 $ hg up 6
1380 1393 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
1381 1394 $ echo 'change foo in branch' > foo
1382 1395 $ hg ci -m "change foo in branch, related"
1383 1396 created new head
1384 1397 $ hg merge 7
1385 1398 merging foo
1386 1399 warning: conflicts while merging foo! (edit, then use 'hg resolve --mark')
1387 1400 0 files updated, 0 files merged, 0 files removed, 1 files unresolved
1388 1401 use 'hg resolve' to retry unresolved file merges or 'hg update -C .' to abandon
1389 1402 [1]
1390 1403 $ echo 'merge 1' > foo
1391 1404 $ hg resolve -m foo
1392 1405 (no more unresolved files)
1393 1406 $ hg ci -m "First merge, related"
1394 1407
1395 1408 $ hg merge 4
1396 1409 merging foo
1397 1410 warning: conflicts while merging foo! (edit, then use 'hg resolve --mark')
1398 1411 1 files updated, 0 files merged, 0 files removed, 1 files unresolved
1399 1412 use 'hg resolve' to retry unresolved file merges or 'hg update -C .' to abandon
1400 1413 [1]
1401 1414 $ echo 'merge 2' > foo
1402 1415 $ hg resolve -m foo
1403 1416 (no more unresolved files)
1404 1417 $ hg ci -m "Last merge, related"
1405 1418
1406 1419 $ hg log --graph
1407 1420 @ changeset: 10:4dae8563d2c5
1408 1421 |\ tag: tip
1409 1422 | | parent: 9:7b35701b003e
1410 1423 | | parent: 4:88176d361b69
1411 1424 | | user: test
1412 1425 | | date: Thu Jan 01 00:00:00 1970 +0000
1413 1426 | | summary: Last merge, related
1414 1427 | |
1415 1428 | o changeset: 9:7b35701b003e
1416 1429 | |\ parent: 8:e5416ad8a855
1417 1430 | | | parent: 7:87fe3144dcfa
1418 1431 | | | user: test
1419 1432 | | | date: Thu Jan 01 00:00:00 1970 +0000
1420 1433 | | | summary: First merge, related
1421 1434 | | |
1422 1435 | | o changeset: 8:e5416ad8a855
1423 1436 | | | parent: 6:dc6c325fe5ee
1424 1437 | | | user: test
1425 1438 | | | date: Thu Jan 01 00:00:00 1970 +0000
1426 1439 | | | summary: change foo in branch, related
1427 1440 | | |
1428 1441 | o | changeset: 7:87fe3144dcfa
1429 1442 | |/ user: test
1430 1443 | | date: Thu Jan 01 00:00:00 1970 +0000
1431 1444 | | summary: change foo, related
1432 1445 | |
1433 1446 | o changeset: 6:dc6c325fe5ee
1434 1447 | | user: test
1435 1448 | | date: Thu Jan 01 00:00:00 1970 +0000
1436 1449 | | summary: create foo, related
1437 1450 | |
1438 1451 | o changeset: 5:73db34516eb9
1439 1452 | | parent: 0:e87515fd044a
1440 1453 | | user: test
1441 1454 | | date: Thu Jan 01 00:00:00 1970 +0000
1442 1455 | | summary: first branch, unrelated
1443 1456 | |
1444 1457 o | changeset: 4:88176d361b69
1445 1458 | | user: test
1446 1459 | | date: Thu Jan 01 00:00:00 1970 +0000
1447 1460 | | summary: add foo, related
1448 1461 | |
1449 1462 o | changeset: 3:dd78ae4afb56
1450 1463 | | user: test
1451 1464 | | date: Thu Jan 01 00:00:00 1970 +0000
1452 1465 | | summary: delete foo, unrelated
1453 1466 | |
1454 1467 o | changeset: 2:c4c64aedf0f7
1455 1468 | | user: test
1456 1469 | | date: Thu Jan 01 00:00:00 1970 +0000
1457 1470 | | summary: add unrelated old foo
1458 1471 | |
1459 1472 o | changeset: 1:e5faa7440653
1460 1473 |/ user: test
1461 1474 | date: Thu Jan 01 00:00:00 1970 +0000
1462 1475 | summary: change, unrelated
1463 1476 |
1464 1477 o changeset: 0:e87515fd044a
1465 1478 user: test
1466 1479 date: Thu Jan 01 00:00:00 1970 +0000
1467 1480 summary: init, unrelated
1468 1481
1469 1482
1470 1483 $ hg --traceback log -f foo
1471 1484 changeset: 10:4dae8563d2c5
1472 1485 tag: tip
1473 1486 parent: 9:7b35701b003e
1474 1487 parent: 4:88176d361b69
1475 1488 user: test
1476 1489 date: Thu Jan 01 00:00:00 1970 +0000
1477 1490 summary: Last merge, related
1478 1491
1479 1492 changeset: 9:7b35701b003e
1480 1493 parent: 8:e5416ad8a855
1481 1494 parent: 7:87fe3144dcfa
1482 1495 user: test
1483 1496 date: Thu Jan 01 00:00:00 1970 +0000
1484 1497 summary: First merge, related
1485 1498
1486 1499 changeset: 8:e5416ad8a855
1487 1500 parent: 6:dc6c325fe5ee
1488 1501 user: test
1489 1502 date: Thu Jan 01 00:00:00 1970 +0000
1490 1503 summary: change foo in branch, related
1491 1504
1492 1505 changeset: 7:87fe3144dcfa
1493 1506 user: test
1494 1507 date: Thu Jan 01 00:00:00 1970 +0000
1495 1508 summary: change foo, related
1496 1509
1497 1510 changeset: 6:dc6c325fe5ee
1498 1511 user: test
1499 1512 date: Thu Jan 01 00:00:00 1970 +0000
1500 1513 summary: create foo, related
1501 1514
1502 1515 changeset: 4:88176d361b69
1503 1516 user: test
1504 1517 date: Thu Jan 01 00:00:00 1970 +0000
1505 1518 summary: add foo, related
1506 1519
1507 1520
1508 1521 Also check when maxrev < lastrevfilelog
1509 1522
1510 1523 $ hg --traceback log -f -r4 foo
1511 1524 changeset: 4:88176d361b69
1512 1525 user: test
1513 1526 date: Thu Jan 01 00:00:00 1970 +0000
1514 1527 summary: add foo, related
1515 1528
1516 1529 changeset: 2:c4c64aedf0f7
1517 1530 user: test
1518 1531 date: Thu Jan 01 00:00:00 1970 +0000
1519 1532 summary: add unrelated old foo
1520 1533
1521 1534 $ cd ..
1522 1535
1523 1536 Issue2383: hg log showing _less_ differences than hg diff
1524 1537
1525 1538 $ hg init issue2383
1526 1539 $ cd issue2383
1527 1540
1528 1541 Create a test repo:
1529 1542
1530 1543 $ echo a > a
1531 1544 $ hg ci -Am0
1532 1545 adding a
1533 1546 $ echo b > b
1534 1547 $ hg ci -Am1
1535 1548 adding b
1536 1549 $ hg co 0
1537 1550 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
1538 1551 $ echo b > a
1539 1552 $ hg ci -m2
1540 1553 created new head
1541 1554
1542 1555 Merge:
1543 1556
1544 1557 $ hg merge
1545 1558 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
1546 1559 (branch merge, don't forget to commit)
1547 1560
1548 1561 Make sure there's a file listed in the merge to trigger the bug:
1549 1562
1550 1563 $ echo c > a
1551 1564 $ hg ci -m3
1552 1565
1553 1566 Two files shown here in diff:
1554 1567
1555 1568 $ hg diff --rev 2:3
1556 1569 diff -r b09be438c43a -r 8e07aafe1edc a
1557 1570 --- a/a Thu Jan 01 00:00:00 1970 +0000
1558 1571 +++ b/a Thu Jan 01 00:00:00 1970 +0000
1559 1572 @@ -1,1 +1,1 @@
1560 1573 -b
1561 1574 +c
1562 1575 diff -r b09be438c43a -r 8e07aafe1edc b
1563 1576 --- /dev/null Thu Jan 01 00:00:00 1970 +0000
1564 1577 +++ b/b Thu Jan 01 00:00:00 1970 +0000
1565 1578 @@ -0,0 +1,1 @@
1566 1579 +b
1567 1580
1568 1581 Diff here should be the same:
1569 1582
1570 1583 $ hg log -vpr 3
1571 1584 changeset: 3:8e07aafe1edc
1572 1585 tag: tip
1573 1586 parent: 2:b09be438c43a
1574 1587 parent: 1:925d80f479bb
1575 1588 user: test
1576 1589 date: Thu Jan 01 00:00:00 1970 +0000
1577 1590 files: a
1578 1591 description:
1579 1592 3
1580 1593
1581 1594
1582 1595 diff -r b09be438c43a -r 8e07aafe1edc a
1583 1596 --- a/a Thu Jan 01 00:00:00 1970 +0000
1584 1597 +++ b/a Thu Jan 01 00:00:00 1970 +0000
1585 1598 @@ -1,1 +1,1 @@
1586 1599 -b
1587 1600 +c
1588 1601 diff -r b09be438c43a -r 8e07aafe1edc b
1589 1602 --- /dev/null Thu Jan 01 00:00:00 1970 +0000
1590 1603 +++ b/b Thu Jan 01 00:00:00 1970 +0000
1591 1604 @@ -0,0 +1,1 @@
1592 1605 +b
1593 1606
1594 1607 $ cd ..
1595 1608
1596 1609 'hg log -r rev fn' when last(filelog(fn)) != rev
1597 1610
1598 1611 $ hg init simplelog
1599 1612 $ cd simplelog
1600 1613 $ echo f > a
1601 1614 $ hg ci -Am'a' -d '0 0'
1602 1615 adding a
1603 1616 $ echo f >> a
1604 1617 $ hg ci -Am'a bis' -d '1 0'
1605 1618
1606 1619 $ hg log -r0 a
1607 1620 changeset: 0:9f758d63dcde
1608 1621 user: test
1609 1622 date: Thu Jan 01 00:00:00 1970 +0000
1610 1623 summary: a
1611 1624
1612 1625 enable obsolete to test hidden feature
1613 1626
1614 1627 $ cat >> $HGRCPATH << EOF
1615 1628 > [experimental]
1616 1629 > evolution=createmarkers
1617 1630 > EOF
1618 1631
1619 1632 $ hg log --template='{rev}:{node}\n'
1620 1633 1:a765632148dc55d38c35c4f247c618701886cb2f
1621 1634 0:9f758d63dcde62d547ebfb08e1e7ee96535f2b05
1622 1635 $ hg debugobsolete a765632148dc55d38c35c4f247c618701886cb2f
1623 1636 $ hg up null -q
1624 1637 $ hg log --template='{rev}:{node}\n'
1625 1638 0:9f758d63dcde62d547ebfb08e1e7ee96535f2b05
1626 1639 $ hg log --template='{rev}:{node}\n' --hidden
1627 1640 1:a765632148dc55d38c35c4f247c618701886cb2f
1628 1641 0:9f758d63dcde62d547ebfb08e1e7ee96535f2b05
1629 1642 $ hg log -r a
1630 1643 abort: hidden revision 'a'!
1631 1644 (use --hidden to access hidden revisions)
1632 1645 [255]
1633 1646
1634 1647 test that parent prevent a changeset to be hidden
1635 1648
1636 1649 $ hg up 1 -q --hidden
1637 1650 $ hg log --template='{rev}:{node}\n'
1638 1651 1:a765632148dc55d38c35c4f247c618701886cb2f
1639 1652 0:9f758d63dcde62d547ebfb08e1e7ee96535f2b05
1640 1653
1641 1654 test that second parent prevent a changeset to be hidden too
1642 1655
1643 1656 $ hg debugsetparents 0 1 # nothing suitable to merge here
1644 1657 $ hg log --template='{rev}:{node}\n'
1645 1658 1:a765632148dc55d38c35c4f247c618701886cb2f
1646 1659 0:9f758d63dcde62d547ebfb08e1e7ee96535f2b05
1647 1660 $ hg debugsetparents 1
1648 1661 $ hg up -q null
1649 1662
1650 1663 bookmarks prevent a changeset being hidden
1651 1664
1652 1665 $ hg bookmark --hidden -r 1 X
1653 1666 $ hg log --template '{rev}:{node}\n'
1654 1667 1:a765632148dc55d38c35c4f247c618701886cb2f
1655 1668 0:9f758d63dcde62d547ebfb08e1e7ee96535f2b05
1656 1669 $ hg bookmark -d X
1657 1670
1658 1671 divergent bookmarks are not hidden
1659 1672
1660 1673 $ hg bookmark --hidden -r 1 X@foo
1661 1674 $ hg log --template '{rev}:{node}\n'
1662 1675 1:a765632148dc55d38c35c4f247c618701886cb2f
1663 1676 0:9f758d63dcde62d547ebfb08e1e7ee96535f2b05
1664 1677
1665 1678 clear extensions configuration
1666 1679 $ echo '[extensions]' >> $HGRCPATH
1667 1680 $ echo "obs=!" >> $HGRCPATH
1668 1681 $ cd ..
1669 1682
1670 1683 test -u/-k for problematic encoding
1671 1684 # unicode: cp932:
1672 1685 # u30A2 0x83 0x41(= 'A')
1673 1686 # u30C2 0x83 0x61(= 'a')
1674 1687
1675 1688 $ hg init problematicencoding
1676 1689 $ cd problematicencoding
1677 1690
1678 1691 $ python > setup.sh <<EOF
1679 1692 > print u'''
1680 1693 > echo a > text
1681 1694 > hg add text
1682 1695 > hg --encoding utf-8 commit -u '\u30A2' -m none
1683 1696 > echo b > text
1684 1697 > hg --encoding utf-8 commit -u '\u30C2' -m none
1685 1698 > echo c > text
1686 1699 > hg --encoding utf-8 commit -u none -m '\u30A2'
1687 1700 > echo d > text
1688 1701 > hg --encoding utf-8 commit -u none -m '\u30C2'
1689 1702 > '''.encode('utf-8')
1690 1703 > EOF
1691 1704 $ sh < setup.sh
1692 1705
1693 1706 test in problematic encoding
1694 1707 $ python > test.sh <<EOF
1695 1708 > print u'''
1696 1709 > hg --encoding cp932 log --template '{rev}\\n' -u '\u30A2'
1697 1710 > echo ====
1698 1711 > hg --encoding cp932 log --template '{rev}\\n' -u '\u30C2'
1699 1712 > echo ====
1700 1713 > hg --encoding cp932 log --template '{rev}\\n' -k '\u30A2'
1701 1714 > echo ====
1702 1715 > hg --encoding cp932 log --template '{rev}\\n' -k '\u30C2'
1703 1716 > '''.encode('cp932')
1704 1717 > EOF
1705 1718 $ sh < test.sh
1706 1719 0
1707 1720 ====
1708 1721 1
1709 1722 ====
1710 1723 2
1711 1724 0
1712 1725 ====
1713 1726 3
1714 1727 1
1715 1728
1716 1729 $ cd ..
1717 1730
1718 1731 test hg log on non-existent files and on directories
1719 1732 $ hg init issue1340
1720 1733 $ cd issue1340
1721 1734 $ mkdir d1; mkdir D2; mkdir D3.i; mkdir d4.hg; mkdir d5.d; mkdir .d6
1722 1735 $ echo 1 > d1/f1
1723 1736 $ echo 1 > D2/f1
1724 1737 $ echo 1 > D3.i/f1
1725 1738 $ echo 1 > d4.hg/f1
1726 1739 $ echo 1 > d5.d/f1
1727 1740 $ echo 1 > .d6/f1
1728 1741 $ hg -q add .
1729 1742 $ hg commit -m "a bunch of weird directories"
1730 1743 $ hg log -l1 d1/f1 | grep changeset
1731 1744 changeset: 0:65624cd9070a
1732 1745 $ hg log -l1 f1
1733 1746 $ hg log -l1 . | grep changeset
1734 1747 changeset: 0:65624cd9070a
1735 1748 $ hg log -l1 ./ | grep changeset
1736 1749 changeset: 0:65624cd9070a
1737 1750 $ hg log -l1 d1 | grep changeset
1738 1751 changeset: 0:65624cd9070a
1739 1752 $ hg log -l1 D2 | grep changeset
1740 1753 changeset: 0:65624cd9070a
1741 1754 $ hg log -l1 D2/f1 | grep changeset
1742 1755 changeset: 0:65624cd9070a
1743 1756 $ hg log -l1 D3.i | grep changeset
1744 1757 changeset: 0:65624cd9070a
1745 1758 $ hg log -l1 D3.i/f1 | grep changeset
1746 1759 changeset: 0:65624cd9070a
1747 1760 $ hg log -l1 d4.hg | grep changeset
1748 1761 changeset: 0:65624cd9070a
1749 1762 $ hg log -l1 d4.hg/f1 | grep changeset
1750 1763 changeset: 0:65624cd9070a
1751 1764 $ hg log -l1 d5.d | grep changeset
1752 1765 changeset: 0:65624cd9070a
1753 1766 $ hg log -l1 d5.d/f1 | grep changeset
1754 1767 changeset: 0:65624cd9070a
1755 1768 $ hg log -l1 .d6 | grep changeset
1756 1769 changeset: 0:65624cd9070a
1757 1770 $ hg log -l1 .d6/f1 | grep changeset
1758 1771 changeset: 0:65624cd9070a
1759 1772
1760 1773 issue3772: hg log -r :null showing revision 0 as well
1761 1774
1762 1775 $ hg log -r :null
1763 1776 changeset: 0:65624cd9070a
1764 1777 tag: tip
1765 1778 user: test
1766 1779 date: Thu Jan 01 00:00:00 1970 +0000
1767 1780 summary: a bunch of weird directories
1768 1781
1769 1782 changeset: -1:000000000000
1770 1783 user:
1771 1784 date: Thu Jan 01 00:00:00 1970 +0000
1772 1785
1773 1786 $ hg log -r null:null
1774 1787 changeset: -1:000000000000
1775 1788 user:
1776 1789 date: Thu Jan 01 00:00:00 1970 +0000
1777 1790
1778 1791 working-directory revision requires special treatment
1779 1792
1780 1793 clean:
1781 1794
1782 1795 $ hg log -r 'wdir()' --debug
1783 1796 changeset: 2147483647:ffffffffffffffffffffffffffffffffffffffff
1784 1797 phase: draft
1785 1798 parent: 0:65624cd9070a035fa7191a54f2b8af39f16b0c08
1786 1799 parent: -1:0000000000000000000000000000000000000000
1787 1800 user: test
1788 1801 date: [A-Za-z0-9:+ ]+ (re)
1789 1802 extra: branch=default
1790 1803
1791 1804 $ hg log -r 'wdir()' -p --stat
1792 1805 changeset: 2147483647:ffffffffffff
1793 1806 parent: 0:65624cd9070a
1794 1807 user: test
1795 1808 date: [A-Za-z0-9:+ ]+ (re)
1796 1809
1797 1810
1798 1811
1799 1812
1800 1813 dirty:
1801 1814
1802 1815 $ echo 2 >> d1/f1
1803 1816 $ echo 2 > d1/f2
1804 1817 $ hg add d1/f2
1805 1818 $ hg remove .d6/f1
1806 1819 $ hg status
1807 1820 M d1/f1
1808 1821 A d1/f2
1809 1822 R .d6/f1
1810 1823
1811 1824 $ hg log -r 'wdir()'
1812 1825 changeset: 2147483647:ffffffffffff
1813 1826 parent: 0:65624cd9070a
1814 1827 user: test
1815 1828 date: [A-Za-z0-9:+ ]+ (re)
1816 1829
1817 1830 $ hg log -r 'wdir()' -q
1818 1831 2147483647:ffffffffffff
1819 1832
1820 1833 $ hg log -r 'wdir()' --debug
1821 1834 changeset: 2147483647:ffffffffffffffffffffffffffffffffffffffff
1822 1835 phase: draft
1823 1836 parent: 0:65624cd9070a035fa7191a54f2b8af39f16b0c08
1824 1837 parent: -1:0000000000000000000000000000000000000000
1825 1838 user: test
1826 1839 date: [A-Za-z0-9:+ ]+ (re)
1827 1840 files: d1/f1
1828 1841 files+: d1/f2
1829 1842 files-: .d6/f1
1830 1843 extra: branch=default
1831 1844
1832 1845 $ hg log -r 'wdir()' -p --stat --git
1833 1846 changeset: 2147483647:ffffffffffff
1834 1847 parent: 0:65624cd9070a
1835 1848 user: test
1836 1849 date: [A-Za-z0-9:+ ]+ (re)
1837 1850
1838 1851 .d6/f1 | 1 -
1839 1852 d1/f1 | 1 +
1840 1853 d1/f2 | 1 +
1841 1854 3 files changed, 2 insertions(+), 1 deletions(-)
1842 1855
1843 1856 diff --git a/.d6/f1 b/.d6/f1
1844 1857 deleted file mode 100644
1845 1858 --- a/.d6/f1
1846 1859 +++ /dev/null
1847 1860 @@ -1,1 +0,0 @@
1848 1861 -1
1849 1862 diff --git a/d1/f1 b/d1/f1
1850 1863 --- a/d1/f1
1851 1864 +++ b/d1/f1
1852 1865 @@ -1,1 +1,2 @@
1853 1866 1
1854 1867 +2
1855 1868 diff --git a/d1/f2 b/d1/f2
1856 1869 new file mode 100644
1857 1870 --- /dev/null
1858 1871 +++ b/d1/f2
1859 1872 @@ -0,0 +1,1 @@
1860 1873 +2
1861 1874
1862 1875 $ hg log -r 'wdir()' -Tjson
1863 1876 [
1864 1877 {
1865 1878 "rev": null,
1866 1879 "node": null,
1867 1880 "branch": "default",
1868 1881 "phase": "draft",
1869 1882 "user": "test",
1870 1883 "date": [*, 0], (glob)
1871 1884 "desc": "",
1872 1885 "bookmarks": [],
1873 1886 "tags": [],
1874 1887 "parents": ["65624cd9070a035fa7191a54f2b8af39f16b0c08"]
1875 1888 }
1876 1889 ]
1877 1890
1878 1891 $ hg log -r 'wdir()' -Tjson -q
1879 1892 [
1880 1893 {
1881 1894 "rev": null,
1882 1895 "node": null
1883 1896 }
1884 1897 ]
1885 1898
1886 1899 $ hg log -r 'wdir()' -Tjson --debug
1887 1900 [
1888 1901 {
1889 1902 "rev": null,
1890 1903 "node": null,
1891 1904 "branch": "default",
1892 1905 "phase": "draft",
1893 1906 "user": "test",
1894 1907 "date": [*, 0], (glob)
1895 1908 "desc": "",
1896 1909 "bookmarks": [],
1897 1910 "tags": [],
1898 1911 "parents": ["65624cd9070a035fa7191a54f2b8af39f16b0c08"],
1899 1912 "manifest": null,
1900 1913 "extra": {"branch": "default"},
1901 1914 "modified": ["d1/f1"],
1902 1915 "added": ["d1/f2"],
1903 1916 "removed": [".d6/f1"]
1904 1917 }
1905 1918 ]
1906 1919
1907 1920 $ hg revert -aqC
1908 1921
1909 1922 Check that adding an arbitrary name shows up in log automatically
1910 1923
1911 1924 $ cat > ../names.py <<EOF
1912 1925 > """A small extension to test adding arbitrary names to a repo"""
1913 1926 > from mercurial.namespaces import namespace
1914 1927 >
1915 1928 > def reposetup(ui, repo):
1916 1929 > foo = {'foo': repo[0].node()}
1917 1930 > names = lambda r: foo.keys()
1918 1931 > namemap = lambda r, name: foo.get(name)
1919 1932 > nodemap = lambda r, node: [name for name, n in foo.iteritems()
1920 1933 > if n == node]
1921 1934 > ns = namespace("bars", templatename="bar", logname="barlog",
1922 1935 > colorname="barcolor", listnames=names, namemap=namemap,
1923 1936 > nodemap=nodemap)
1924 1937 >
1925 1938 > repo.names.addnamespace(ns)
1926 1939 > EOF
1927 1940
1928 1941 $ hg --config extensions.names=../names.py log -r 0
1929 1942 changeset: 0:65624cd9070a
1930 1943 tag: tip
1931 1944 barlog: foo
1932 1945 user: test
1933 1946 date: Thu Jan 01 00:00:00 1970 +0000
1934 1947 summary: a bunch of weird directories
1935 1948
1936 1949 $ hg --config extensions.names=../names.py \
1937 1950 > --config extensions.color= --config color.log.barcolor=red \
1938 1951 > --color=always log -r 0
1939 1952 \x1b[0;33mchangeset: 0:65624cd9070a\x1b[0m (esc)
1940 1953 tag: tip
1941 1954 \x1b[0;31mbarlog: foo\x1b[0m (esc)
1942 1955 user: test
1943 1956 date: Thu Jan 01 00:00:00 1970 +0000
1944 1957 summary: a bunch of weird directories
1945 1958
1946 1959 $ hg --config extensions.names=../names.py log -r 0 --template '{bars}\n'
1947 1960 foo
1948 1961
1949 1962 $ cd ..
1950 1963
1951 1964 hg log -f dir across branches
1952 1965
1953 1966 $ hg init acrossbranches
1954 1967 $ cd acrossbranches
1955 1968 $ mkdir d
1956 1969 $ echo a > d/a && hg ci -Aqm a
1957 1970 $ echo b > d/a && hg ci -Aqm b
1958 1971 $ hg up -q 0
1959 1972 $ echo b > d/a && hg ci -Aqm c
1960 1973 $ hg log -f d -T '{desc}' -G
1961 1974 @ c
1962 1975 |
1963 1976 o a
1964 1977
1965 1978 Ensure that largefiles doesn't interfere with following a normal file
1966 1979 $ hg --config extensions.largefiles= log -f d -T '{desc}' -G
1967 1980 @ c
1968 1981 |
1969 1982 o a
1970 1983
1971 1984 $ hg log -f d/a -T '{desc}' -G
1972 1985 @ c
1973 1986 |
1974 1987 o a
1975 1988
1976 1989 $ cd ..
1977 1990
1978 1991 hg log -f with linkrev pointing to another branch
1979 1992 -------------------------------------------------
1980 1993
1981 1994 create history with a filerev whose linkrev points to another branch
1982 1995
1983 1996 $ hg init branchedlinkrev
1984 1997 $ cd branchedlinkrev
1985 1998 $ echo 1 > a
1986 1999 $ hg commit -Am 'content1'
1987 2000 adding a
1988 2001 $ echo 2 > a
1989 2002 $ hg commit -m 'content2'
1990 2003 $ hg up --rev 'desc(content1)'
1991 2004 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
1992 2005 $ echo unrelated > unrelated
1993 2006 $ hg commit -Am 'unrelated'
1994 2007 adding unrelated
1995 2008 created new head
1996 2009 $ hg graft -r 'desc(content2)'
1997 2010 grafting 1:2294ae80ad84 "content2"
1998 2011 $ echo 3 > a
1999 2012 $ hg commit -m 'content3'
2000 2013 $ hg log -G
2001 2014 @ changeset: 4:50b9b36e9c5d
2002 2015 | tag: tip
2003 2016 | user: test
2004 2017 | date: Thu Jan 01 00:00:00 1970 +0000
2005 2018 | summary: content3
2006 2019 |
2007 2020 o changeset: 3:15b2327059e5
2008 2021 | user: test
2009 2022 | date: Thu Jan 01 00:00:00 1970 +0000
2010 2023 | summary: content2
2011 2024 |
2012 2025 o changeset: 2:2029acd1168c
2013 2026 | parent: 0:ae0a3c9f9e95
2014 2027 | user: test
2015 2028 | date: Thu Jan 01 00:00:00 1970 +0000
2016 2029 | summary: unrelated
2017 2030 |
2018 2031 | o changeset: 1:2294ae80ad84
2019 2032 |/ user: test
2020 2033 | date: Thu Jan 01 00:00:00 1970 +0000
2021 2034 | summary: content2
2022 2035 |
2023 2036 o changeset: 0:ae0a3c9f9e95
2024 2037 user: test
2025 2038 date: Thu Jan 01 00:00:00 1970 +0000
2026 2039 summary: content1
2027 2040
2028 2041
2029 2042 log -f on the file should list the graft result.
2030 2043
2031 2044 $ hg log -Gf a
2032 2045 @ changeset: 4:50b9b36e9c5d
2033 2046 | tag: tip
2034 2047 | user: test
2035 2048 | date: Thu Jan 01 00:00:00 1970 +0000
2036 2049 | summary: content3
2037 2050 |
2038 2051 o changeset: 3:15b2327059e5
2039 2052 : user: test
2040 2053 : date: Thu Jan 01 00:00:00 1970 +0000
2041 2054 : summary: content2
2042 2055 :
2043 2056 o changeset: 0:ae0a3c9f9e95
2044 2057 user: test
2045 2058 date: Thu Jan 01 00:00:00 1970 +0000
2046 2059 summary: content1
2047 2060
2048 2061
2049 2062 plain log lists the original version
2050 2063 (XXX we should probably list both)
2051 2064
2052 2065 $ hg log -G a
2053 2066 @ changeset: 4:50b9b36e9c5d
2054 2067 : tag: tip
2055 2068 : user: test
2056 2069 : date: Thu Jan 01 00:00:00 1970 +0000
2057 2070 : summary: content3
2058 2071 :
2059 2072 : o changeset: 1:2294ae80ad84
2060 2073 :/ user: test
2061 2074 : date: Thu Jan 01 00:00:00 1970 +0000
2062 2075 : summary: content2
2063 2076 :
2064 2077 o changeset: 0:ae0a3c9f9e95
2065 2078 user: test
2066 2079 date: Thu Jan 01 00:00:00 1970 +0000
2067 2080 summary: content1
2068 2081
2069 2082
2070 2083 hg log -f from the grafted changeset
2071 2084 (The bootstrap should properly take the topology in account)
2072 2085
2073 2086 $ hg up 'desc(content3)^'
2074 2087 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
2075 2088 $ hg log -Gf a
2076 2089 @ changeset: 3:15b2327059e5
2077 2090 : user: test
2078 2091 : date: Thu Jan 01 00:00:00 1970 +0000
2079 2092 : summary: content2
2080 2093 :
2081 2094 o changeset: 0:ae0a3c9f9e95
2082 2095 user: test
2083 2096 date: Thu Jan 01 00:00:00 1970 +0000
2084 2097 summary: content1
2085 2098
2086 2099
2087 2100 Test that we use the first non-hidden changeset in that case.
2088 2101
2089 2102 (hide the changeset)
2090 2103
2091 2104 $ hg log -T '{node}\n' -r 1
2092 2105 2294ae80ad8447bc78383182eeac50cb049df623
2093 2106 $ hg debugobsolete 2294ae80ad8447bc78383182eeac50cb049df623
2094 2107 $ hg log -G
2095 2108 o changeset: 4:50b9b36e9c5d
2096 2109 | tag: tip
2097 2110 | user: test
2098 2111 | date: Thu Jan 01 00:00:00 1970 +0000
2099 2112 | summary: content3
2100 2113 |
2101 2114 @ changeset: 3:15b2327059e5
2102 2115 | user: test
2103 2116 | date: Thu Jan 01 00:00:00 1970 +0000
2104 2117 | summary: content2
2105 2118 |
2106 2119 o changeset: 2:2029acd1168c
2107 2120 | parent: 0:ae0a3c9f9e95
2108 2121 | user: test
2109 2122 | date: Thu Jan 01 00:00:00 1970 +0000
2110 2123 | summary: unrelated
2111 2124 |
2112 2125 o changeset: 0:ae0a3c9f9e95
2113 2126 user: test
2114 2127 date: Thu Jan 01 00:00:00 1970 +0000
2115 2128 summary: content1
2116 2129
2117 2130
2118 2131 Check that log on the file does not drop the file revision.
2119 2132
2120 2133 $ hg log -G a
2121 2134 o changeset: 4:50b9b36e9c5d
2122 2135 | tag: tip
2123 2136 | user: test
2124 2137 | date: Thu Jan 01 00:00:00 1970 +0000
2125 2138 | summary: content3
2126 2139 |
2127 2140 @ changeset: 3:15b2327059e5
2128 2141 : user: test
2129 2142 : date: Thu Jan 01 00:00:00 1970 +0000
2130 2143 : summary: content2
2131 2144 :
2132 2145 o changeset: 0:ae0a3c9f9e95
2133 2146 user: test
2134 2147 date: Thu Jan 01 00:00:00 1970 +0000
2135 2148 summary: content1
2136 2149
2137 2150
2138 2151 Even when a head revision is linkrev-shadowed.
2139 2152
2140 2153 $ hg log -T '{node}\n' -r 4
2141 2154 50b9b36e9c5df2c6fc6dcefa8ad0da929e84aed2
2142 2155 $ hg debugobsolete 50b9b36e9c5df2c6fc6dcefa8ad0da929e84aed2
2143 2156 $ hg log -G a
2144 2157 @ changeset: 3:15b2327059e5
2145 2158 : tag: tip
2146 2159 : user: test
2147 2160 : date: Thu Jan 01 00:00:00 1970 +0000
2148 2161 : summary: content2
2149 2162 :
2150 2163 o changeset: 0:ae0a3c9f9e95
2151 2164 user: test
2152 2165 date: Thu Jan 01 00:00:00 1970 +0000
2153 2166 summary: content1
2154 2167
2155 2168
2156 2169 $ cd ..
2157 2170
2158 2171 Even when the file revision is missing from some head:
2159 2172
2160 2173 $ hg init issue4490
2161 2174 $ cd issue4490
2162 2175 $ echo '[experimental]' >> .hg/hgrc
2163 2176 $ echo 'evolution=createmarkers' >> .hg/hgrc
2164 2177 $ echo a > a
2165 2178 $ hg ci -Am0
2166 2179 adding a
2167 2180 $ echo b > b
2168 2181 $ hg ci -Am1
2169 2182 adding b
2170 2183 $ echo B > b
2171 2184 $ hg ci --amend -m 1
2172 2185 $ hg up 0
2173 2186 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
2174 2187 $ echo c > c
2175 2188 $ hg ci -Am2
2176 2189 adding c
2177 2190 created new head
2178 2191 $ hg up 'head() and not .'
2179 2192 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
2180 2193 $ hg log -G
2181 2194 o changeset: 4:db815d6d32e6
2182 2195 | tag: tip
2183 2196 | parent: 0:f7b1eb17ad24
2184 2197 | user: test
2185 2198 | date: Thu Jan 01 00:00:00 1970 +0000
2186 2199 | summary: 2
2187 2200 |
2188 2201 | @ changeset: 3:9bc8ce7f9356
2189 2202 |/ parent: 0:f7b1eb17ad24
2190 2203 | user: test
2191 2204 | date: Thu Jan 01 00:00:00 1970 +0000
2192 2205 | summary: 1
2193 2206 |
2194 2207 o changeset: 0:f7b1eb17ad24
2195 2208 user: test
2196 2209 date: Thu Jan 01 00:00:00 1970 +0000
2197 2210 summary: 0
2198 2211
2199 2212 $ hg log -f -G b
2200 2213 @ changeset: 3:9bc8ce7f9356
2201 2214 | parent: 0:f7b1eb17ad24
2202 2215 ~ user: test
2203 2216 date: Thu Jan 01 00:00:00 1970 +0000
2204 2217 summary: 1
2205 2218
2206 2219 $ hg log -G b
2207 2220 @ changeset: 3:9bc8ce7f9356
2208 2221 | parent: 0:f7b1eb17ad24
2209 2222 ~ user: test
2210 2223 date: Thu Jan 01 00:00:00 1970 +0000
2211 2224 summary: 1
2212 2225
2213 2226 $ cd ..
2214 2227
2215 2228 Check proper report when the manifest changes but not the file issue4499
2216 2229 ------------------------------------------------------------------------
2217 2230
2218 2231 $ hg init issue4499
2219 2232 $ cd issue4499
2220 2233 $ for f in A B C D F E G H I J K L M N O P Q R S T U; do
2221 2234 > echo 1 > $f;
2222 2235 > hg add $f;
2223 2236 > done
2224 2237 $ hg commit -m 'A1B1C1'
2225 2238 $ echo 2 > A
2226 2239 $ echo 2 > B
2227 2240 $ echo 2 > C
2228 2241 $ hg commit -m 'A2B2C2'
2229 2242 $ hg up 0
2230 2243 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
2231 2244 $ echo 3 > A
2232 2245 $ echo 2 > B
2233 2246 $ echo 2 > C
2234 2247 $ hg commit -m 'A3B2C2'
2235 2248 created new head
2236 2249
2237 2250 $ hg log -G
2238 2251 @ changeset: 2:fe5fc3d0eb17
2239 2252 | tag: tip
2240 2253 | parent: 0:abf4f0e38563
2241 2254 | user: test
2242 2255 | date: Thu Jan 01 00:00:00 1970 +0000
2243 2256 | summary: A3B2C2
2244 2257 |
2245 2258 | o changeset: 1:07dcc6b312c0
2246 2259 |/ user: test
2247 2260 | date: Thu Jan 01 00:00:00 1970 +0000
2248 2261 | summary: A2B2C2
2249 2262 |
2250 2263 o changeset: 0:abf4f0e38563
2251 2264 user: test
2252 2265 date: Thu Jan 01 00:00:00 1970 +0000
2253 2266 summary: A1B1C1
2254 2267
2255 2268
2256 2269 Log -f on B should reports current changesets
2257 2270
2258 2271 $ hg log -fG B
2259 2272 @ changeset: 2:fe5fc3d0eb17
2260 2273 | tag: tip
2261 2274 | parent: 0:abf4f0e38563
2262 2275 | user: test
2263 2276 | date: Thu Jan 01 00:00:00 1970 +0000
2264 2277 | summary: A3B2C2
2265 2278 |
2266 2279 o changeset: 0:abf4f0e38563
2267 2280 user: test
2268 2281 date: Thu Jan 01 00:00:00 1970 +0000
2269 2282 summary: A1B1C1
2270 2283
2271 2284 $ cd ..
General Comments 0
You need to be logged in to leave comments. Login now