##// END OF EJS Templates
revset: support ranges in #generations relation
av6 -
r41395:431cf2c8 default
parent child Browse files
Show More
@@ -1,846 +1,846
1 1 # dagop.py - graph ancestry and topology algorithm for revset
2 2 #
3 3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import heapq
11 11
12 12 from .node import (
13 13 nullrev,
14 14 )
15 15 from .thirdparty import (
16 16 attr,
17 17 )
18 18 from . import (
19 19 error,
20 20 mdiff,
21 21 node,
22 22 patch,
23 23 pycompat,
24 24 smartset,
25 25 )
26 26
27 27 baseset = smartset.baseset
28 28 generatorset = smartset.generatorset
29 29
30 30 # possible maximum depth between null and wdir()
31 _maxlogdepth = 0x80000000
31 maxlogdepth = 0x80000000
32 32
33 33 def _walkrevtree(pfunc, revs, startdepth, stopdepth, reverse):
34 34 """Walk DAG using 'pfunc' from the given 'revs' nodes
35 35
36 36 'pfunc(rev)' should return the parent/child revisions of the given 'rev'
37 37 if 'reverse' is True/False respectively.
38 38
39 39 Scan ends at the stopdepth (exlusive) if specified. Revisions found
40 40 earlier than the startdepth are omitted.
41 41 """
42 42 if startdepth is None:
43 43 startdepth = 0
44 44 if stopdepth is None:
45 stopdepth = _maxlogdepth
45 stopdepth = maxlogdepth
46 46 if stopdepth == 0:
47 47 return
48 48 if stopdepth < 0:
49 49 raise error.ProgrammingError('negative stopdepth')
50 50 if reverse:
51 51 heapsign = -1 # max heap
52 52 else:
53 53 heapsign = +1 # min heap
54 54
55 55 # load input revs lazily to heap so earlier revisions can be yielded
56 56 # without fully computing the input revs
57 57 revs.sort(reverse)
58 58 irevs = iter(revs)
59 59 pendingheap = [] # [(heapsign * rev, depth), ...] (i.e. lower depth first)
60 60
61 61 inputrev = next(irevs, None)
62 62 if inputrev is not None:
63 63 heapq.heappush(pendingheap, (heapsign * inputrev, 0))
64 64
65 65 lastrev = None
66 66 while pendingheap:
67 67 currev, curdepth = heapq.heappop(pendingheap)
68 68 currev = heapsign * currev
69 69 if currev == inputrev:
70 70 inputrev = next(irevs, None)
71 71 if inputrev is not None:
72 72 heapq.heappush(pendingheap, (heapsign * inputrev, 0))
73 73 # rescan parents until curdepth >= startdepth because queued entries
74 74 # of the same revision are iterated from the lowest depth
75 75 foundnew = (currev != lastrev)
76 76 if foundnew and curdepth >= startdepth:
77 77 lastrev = currev
78 78 yield currev
79 79 pdepth = curdepth + 1
80 80 if foundnew and pdepth < stopdepth:
81 81 for prev in pfunc(currev):
82 82 if prev != node.nullrev:
83 83 heapq.heappush(pendingheap, (heapsign * prev, pdepth))
84 84
85 85 def filectxancestors(fctxs, followfirst=False):
86 86 """Like filectx.ancestors(), but can walk from multiple files/revisions,
87 87 and includes the given fctxs themselves
88 88
89 89 Yields (rev, {fctx, ...}) pairs in descending order.
90 90 """
91 91 visit = {}
92 92 visitheap = []
93 93 def addvisit(fctx):
94 94 rev = fctx.rev()
95 95 if rev not in visit:
96 96 visit[rev] = set()
97 97 heapq.heappush(visitheap, -rev) # max heap
98 98 visit[rev].add(fctx)
99 99
100 100 if followfirst:
101 101 cut = 1
102 102 else:
103 103 cut = None
104 104
105 105 for c in fctxs:
106 106 addvisit(c)
107 107 while visit:
108 108 currev = -heapq.heappop(visitheap)
109 109 curfctxs = visit.pop(currev)
110 110 yield currev, curfctxs
111 111 for c in curfctxs:
112 112 for parent in c.parents()[:cut]:
113 113 addvisit(parent)
114 114 assert not visitheap
115 115
116 116 def filerevancestors(fctxs, followfirst=False):
117 117 """Like filectx.ancestors(), but can walk from multiple files/revisions,
118 118 and includes the given fctxs themselves
119 119
120 120 Returns a smartset.
121 121 """
122 122 gen = (rev for rev, _cs in filectxancestors(fctxs, followfirst))
123 123 return generatorset(gen, iterasc=False)
124 124
125 125 def _genrevancestors(repo, revs, followfirst, startdepth, stopdepth, cutfunc):
126 126 if followfirst:
127 127 cut = 1
128 128 else:
129 129 cut = None
130 130 cl = repo.changelog
131 131 def plainpfunc(rev):
132 132 try:
133 133 return cl.parentrevs(rev)[:cut]
134 134 except error.WdirUnsupported:
135 135 return (pctx.rev() for pctx in repo[rev].parents()[:cut])
136 136 if cutfunc is None:
137 137 pfunc = plainpfunc
138 138 else:
139 139 pfunc = lambda rev: [r for r in plainpfunc(rev) if not cutfunc(r)]
140 140 revs = revs.filter(lambda rev: not cutfunc(rev))
141 141 return _walkrevtree(pfunc, revs, startdepth, stopdepth, reverse=True)
142 142
143 143 def revancestors(repo, revs, followfirst=False, startdepth=None,
144 144 stopdepth=None, cutfunc=None):
145 145 """Like revlog.ancestors(), but supports additional options, includes
146 146 the given revs themselves, and returns a smartset
147 147
148 148 Scan ends at the stopdepth (exlusive) if specified. Revisions found
149 149 earlier than the startdepth are omitted.
150 150
151 151 If cutfunc is provided, it will be used to cut the traversal of the DAG.
152 152 When cutfunc(X) returns True, the DAG traversal stops - revision X and
153 153 X's ancestors in the traversal path will be skipped. This could be an
154 154 optimization sometimes.
155 155
156 156 Note: if Y is an ancestor of X, cutfunc(X) returning True does not
157 157 necessarily mean Y will also be cut. Usually cutfunc(Y) also wants to
158 158 return True in this case. For example,
159 159
160 160 D # revancestors(repo, D, cutfunc=lambda rev: rev == B)
161 161 |\ # will include "A", because the path D -> C -> A was not cut.
162 162 B C # If "B" gets cut, "A" might want to be cut too.
163 163 |/
164 164 A
165 165 """
166 166 gen = _genrevancestors(repo, revs, followfirst, startdepth, stopdepth,
167 167 cutfunc)
168 168 return generatorset(gen, iterasc=False)
169 169
170 170 def _genrevdescendants(repo, revs, followfirst):
171 171 if followfirst:
172 172 cut = 1
173 173 else:
174 174 cut = None
175 175
176 176 cl = repo.changelog
177 177 first = revs.min()
178 178 nullrev = node.nullrev
179 179 if first == nullrev:
180 180 # Are there nodes with a null first parent and a non-null
181 181 # second one? Maybe. Do we care? Probably not.
182 182 yield first
183 183 for i in cl:
184 184 yield i
185 185 else:
186 186 seen = set(revs)
187 187 for i in cl.revs(first):
188 188 if i in seen:
189 189 yield i
190 190 continue
191 191 for x in cl.parentrevs(i)[:cut]:
192 192 if x != nullrev and x in seen:
193 193 seen.add(i)
194 194 yield i
195 195 break
196 196
197 197 def _builddescendantsmap(repo, startrev, followfirst):
198 198 """Build map of 'rev -> child revs', offset from startrev"""
199 199 cl = repo.changelog
200 200 nullrev = node.nullrev
201 201 descmap = [[] for _rev in pycompat.xrange(startrev, len(cl))]
202 202 for currev in cl.revs(startrev + 1):
203 203 p1rev, p2rev = cl.parentrevs(currev)
204 204 if p1rev >= startrev:
205 205 descmap[p1rev - startrev].append(currev)
206 206 if not followfirst and p2rev != nullrev and p2rev >= startrev:
207 207 descmap[p2rev - startrev].append(currev)
208 208 return descmap
209 209
210 210 def _genrevdescendantsofdepth(repo, revs, followfirst, startdepth, stopdepth):
211 211 startrev = revs.min()
212 212 descmap = _builddescendantsmap(repo, startrev, followfirst)
213 213 def pfunc(rev):
214 214 return descmap[rev - startrev]
215 215 return _walkrevtree(pfunc, revs, startdepth, stopdepth, reverse=False)
216 216
217 217 def revdescendants(repo, revs, followfirst, startdepth=None, stopdepth=None):
218 218 """Like revlog.descendants() but supports additional options, includes
219 219 the given revs themselves, and returns a smartset
220 220
221 221 Scan ends at the stopdepth (exlusive) if specified. Revisions found
222 222 earlier than the startdepth are omitted.
223 223 """
224 if startdepth is None and stopdepth is None:
224 if startdepth is None and (stopdepth is None or stopdepth == maxlogdepth):
225 225 gen = _genrevdescendants(repo, revs, followfirst)
226 226 else:
227 227 gen = _genrevdescendantsofdepth(repo, revs, followfirst,
228 228 startdepth, stopdepth)
229 229 return generatorset(gen, iterasc=True)
230 230
231 231 def descendantrevs(revs, revsfn, parentrevsfn):
232 232 """Generate revision number descendants in revision order.
233 233
234 234 Yields revision numbers starting with a child of some rev in
235 235 ``revs``. Results are ordered by revision number and are
236 236 therefore topological. Each revision is not considered a descendant
237 237 of itself.
238 238
239 239 ``revsfn`` is a callable that with no argument iterates over all
240 240 revision numbers and with a ``start`` argument iterates over revision
241 241 numbers beginning with that value.
242 242
243 243 ``parentrevsfn`` is a callable that receives a revision number and
244 244 returns an iterable of parent revision numbers, whose values may include
245 245 nullrev.
246 246 """
247 247 first = min(revs)
248 248
249 249 if first == nullrev:
250 250 for rev in revsfn():
251 251 yield rev
252 252 return
253 253
254 254 seen = set(revs)
255 255 for rev in revsfn(start=first + 1):
256 256 for prev in parentrevsfn(rev):
257 257 if prev != nullrev and prev in seen:
258 258 seen.add(rev)
259 259 yield rev
260 260 break
261 261
262 262 def _reachablerootspure(repo, minroot, roots, heads, includepath):
263 263 """return (heads(::<roots> and ::<heads>))
264 264
265 265 If includepath is True, return (<roots>::<heads>)."""
266 266 if not roots:
267 267 return []
268 268 parentrevs = repo.changelog.parentrevs
269 269 roots = set(roots)
270 270 visit = list(heads)
271 271 reachable = set()
272 272 seen = {}
273 273 # prefetch all the things! (because python is slow)
274 274 reached = reachable.add
275 275 dovisit = visit.append
276 276 nextvisit = visit.pop
277 277 # open-code the post-order traversal due to the tiny size of
278 278 # sys.getrecursionlimit()
279 279 while visit:
280 280 rev = nextvisit()
281 281 if rev in roots:
282 282 reached(rev)
283 283 if not includepath:
284 284 continue
285 285 parents = parentrevs(rev)
286 286 seen[rev] = parents
287 287 for parent in parents:
288 288 if parent >= minroot and parent not in seen:
289 289 dovisit(parent)
290 290 if not reachable:
291 291 return baseset()
292 292 if not includepath:
293 293 return reachable
294 294 for rev in sorted(seen):
295 295 for parent in seen[rev]:
296 296 if parent in reachable:
297 297 reached(rev)
298 298 return reachable
299 299
300 300 def reachableroots(repo, roots, heads, includepath=False):
301 301 """return (heads(::<roots> and ::<heads>))
302 302
303 303 If includepath is True, return (<roots>::<heads>)."""
304 304 if not roots:
305 305 return baseset()
306 306 minroot = roots.min()
307 307 roots = list(roots)
308 308 heads = list(heads)
309 309 try:
310 310 revs = repo.changelog.reachableroots(minroot, heads, roots, includepath)
311 311 except AttributeError:
312 312 revs = _reachablerootspure(repo, minroot, roots, heads, includepath)
313 313 revs = baseset(revs)
314 314 revs.sort()
315 315 return revs
316 316
317 317 def _changesrange(fctx1, fctx2, linerange2, diffopts):
318 318 """Return `(diffinrange, linerange1)` where `diffinrange` is True
319 319 if diff from fctx2 to fctx1 has changes in linerange2 and
320 320 `linerange1` is the new line range for fctx1.
321 321 """
322 322 blocks = mdiff.allblocks(fctx1.data(), fctx2.data(), diffopts)
323 323 filteredblocks, linerange1 = mdiff.blocksinrange(blocks, linerange2)
324 324 diffinrange = any(stype == '!' for _, stype in filteredblocks)
325 325 return diffinrange, linerange1
326 326
327 327 def blockancestors(fctx, fromline, toline, followfirst=False):
328 328 """Yield ancestors of `fctx` with respect to the block of lines within
329 329 `fromline`-`toline` range.
330 330 """
331 331 diffopts = patch.diffopts(fctx._repo.ui)
332 332 fctx = fctx.introfilectx()
333 333 visit = {(fctx.linkrev(), fctx.filenode()): (fctx, (fromline, toline))}
334 334 while visit:
335 335 c, linerange2 = visit.pop(max(visit))
336 336 pl = c.parents()
337 337 if followfirst:
338 338 pl = pl[:1]
339 339 if not pl:
340 340 # The block originates from the initial revision.
341 341 yield c, linerange2
342 342 continue
343 343 inrange = False
344 344 for p in pl:
345 345 inrangep, linerange1 = _changesrange(p, c, linerange2, diffopts)
346 346 inrange = inrange or inrangep
347 347 if linerange1[0] == linerange1[1]:
348 348 # Parent's linerange is empty, meaning that the block got
349 349 # introduced in this revision; no need to go futher in this
350 350 # branch.
351 351 continue
352 352 # Set _descendantrev with 'c' (a known descendant) so that, when
353 353 # _adjustlinkrev is called for 'p', it receives this descendant
354 354 # (as srcrev) instead possibly topmost introrev.
355 355 p._descendantrev = c.rev()
356 356 visit[p.linkrev(), p.filenode()] = p, linerange1
357 357 if inrange:
358 358 yield c, linerange2
359 359
360 360 def blockdescendants(fctx, fromline, toline):
361 361 """Yield descendants of `fctx` with respect to the block of lines within
362 362 `fromline`-`toline` range.
363 363 """
364 364 # First possibly yield 'fctx' if it has changes in range with respect to
365 365 # its parents.
366 366 try:
367 367 c, linerange1 = next(blockancestors(fctx, fromline, toline))
368 368 except StopIteration:
369 369 pass
370 370 else:
371 371 if c == fctx:
372 372 yield c, linerange1
373 373
374 374 diffopts = patch.diffopts(fctx._repo.ui)
375 375 fl = fctx.filelog()
376 376 seen = {fctx.filerev(): (fctx, (fromline, toline))}
377 377 for i in fl.descendants([fctx.filerev()]):
378 378 c = fctx.filectx(i)
379 379 inrange = False
380 380 for x in fl.parentrevs(i):
381 381 try:
382 382 p, linerange2 = seen[x]
383 383 except KeyError:
384 384 # nullrev or other branch
385 385 continue
386 386 inrangep, linerange1 = _changesrange(c, p, linerange2, diffopts)
387 387 inrange = inrange or inrangep
388 388 # If revision 'i' has been seen (it's a merge) and the line range
389 389 # previously computed differs from the one we just got, we take the
390 390 # surrounding interval. This is conservative but avoids loosing
391 391 # information.
392 392 if i in seen and seen[i][1] != linerange1:
393 393 lbs, ubs = zip(linerange1, seen[i][1])
394 394 linerange1 = min(lbs), max(ubs)
395 395 seen[i] = c, linerange1
396 396 if inrange:
397 397 yield c, linerange1
398 398
399 399 @attr.s(slots=True, frozen=True)
400 400 class annotateline(object):
401 401 fctx = attr.ib()
402 402 lineno = attr.ib()
403 403 # Whether this annotation was the result of a skip-annotate.
404 404 skip = attr.ib(default=False)
405 405 text = attr.ib(default=None)
406 406
407 407 @attr.s(slots=True, frozen=True)
408 408 class _annotatedfile(object):
409 409 # list indexed by lineno - 1
410 410 fctxs = attr.ib()
411 411 linenos = attr.ib()
412 412 skips = attr.ib()
413 413 # full file content
414 414 text = attr.ib()
415 415
416 416 def _countlines(text):
417 417 if text.endswith("\n"):
418 418 return text.count("\n")
419 419 return text.count("\n") + int(bool(text))
420 420
421 421 def _decoratelines(text, fctx):
422 422 n = _countlines(text)
423 423 linenos = pycompat.rangelist(1, n + 1)
424 424 return _annotatedfile([fctx] * n, linenos, [False] * n, text)
425 425
426 426 def _annotatepair(parents, childfctx, child, skipchild, diffopts):
427 427 r'''
428 428 Given parent and child fctxes and annotate data for parents, for all lines
429 429 in either parent that match the child, annotate the child with the parent's
430 430 data.
431 431
432 432 Additionally, if `skipchild` is True, replace all other lines with parent
433 433 annotate data as well such that child is never blamed for any lines.
434 434
435 435 See test-annotate.py for unit tests.
436 436 '''
437 437 pblocks = [(parent, mdiff.allblocks(parent.text, child.text, opts=diffopts))
438 438 for parent in parents]
439 439
440 440 if skipchild:
441 441 # Need to iterate over the blocks twice -- make it a list
442 442 pblocks = [(p, list(blocks)) for (p, blocks) in pblocks]
443 443 # Mercurial currently prefers p2 over p1 for annotate.
444 444 # TODO: change this?
445 445 for parent, blocks in pblocks:
446 446 for (a1, a2, b1, b2), t in blocks:
447 447 # Changed blocks ('!') or blocks made only of blank lines ('~')
448 448 # belong to the child.
449 449 if t == '=':
450 450 child.fctxs[b1:b2] = parent.fctxs[a1:a2]
451 451 child.linenos[b1:b2] = parent.linenos[a1:a2]
452 452 child.skips[b1:b2] = parent.skips[a1:a2]
453 453
454 454 if skipchild:
455 455 # Now try and match up anything that couldn't be matched,
456 456 # Reversing pblocks maintains bias towards p2, matching above
457 457 # behavior.
458 458 pblocks.reverse()
459 459
460 460 # The heuristics are:
461 461 # * Work on blocks of changed lines (effectively diff hunks with -U0).
462 462 # This could potentially be smarter but works well enough.
463 463 # * For a non-matching section, do a best-effort fit. Match lines in
464 464 # diff hunks 1:1, dropping lines as necessary.
465 465 # * Repeat the last line as a last resort.
466 466
467 467 # First, replace as much as possible without repeating the last line.
468 468 remaining = [(parent, []) for parent, _blocks in pblocks]
469 469 for idx, (parent, blocks) in enumerate(pblocks):
470 470 for (a1, a2, b1, b2), _t in blocks:
471 471 if a2 - a1 >= b2 - b1:
472 472 for bk in pycompat.xrange(b1, b2):
473 473 if child.fctxs[bk] == childfctx:
474 474 ak = min(a1 + (bk - b1), a2 - 1)
475 475 child.fctxs[bk] = parent.fctxs[ak]
476 476 child.linenos[bk] = parent.linenos[ak]
477 477 child.skips[bk] = True
478 478 else:
479 479 remaining[idx][1].append((a1, a2, b1, b2))
480 480
481 481 # Then, look at anything left, which might involve repeating the last
482 482 # line.
483 483 for parent, blocks in remaining:
484 484 for a1, a2, b1, b2 in blocks:
485 485 for bk in pycompat.xrange(b1, b2):
486 486 if child.fctxs[bk] == childfctx:
487 487 ak = min(a1 + (bk - b1), a2 - 1)
488 488 child.fctxs[bk] = parent.fctxs[ak]
489 489 child.linenos[bk] = parent.linenos[ak]
490 490 child.skips[bk] = True
491 491 return child
492 492
493 493 def annotate(base, parents, skiprevs=None, diffopts=None):
494 494 """Core algorithm for filectx.annotate()
495 495
496 496 `parents(fctx)` is a function returning a list of parent filectxs.
497 497 """
498 498
499 499 # This algorithm would prefer to be recursive, but Python is a
500 500 # bit recursion-hostile. Instead we do an iterative
501 501 # depth-first search.
502 502
503 503 # 1st DFS pre-calculates pcache and needed
504 504 visit = [base]
505 505 pcache = {}
506 506 needed = {base: 1}
507 507 while visit:
508 508 f = visit.pop()
509 509 if f in pcache:
510 510 continue
511 511 pl = parents(f)
512 512 pcache[f] = pl
513 513 for p in pl:
514 514 needed[p] = needed.get(p, 0) + 1
515 515 if p not in pcache:
516 516 visit.append(p)
517 517
518 518 # 2nd DFS does the actual annotate
519 519 visit[:] = [base]
520 520 hist = {}
521 521 while visit:
522 522 f = visit[-1]
523 523 if f in hist:
524 524 visit.pop()
525 525 continue
526 526
527 527 ready = True
528 528 pl = pcache[f]
529 529 for p in pl:
530 530 if p not in hist:
531 531 ready = False
532 532 visit.append(p)
533 533 if ready:
534 534 visit.pop()
535 535 curr = _decoratelines(f.data(), f)
536 536 skipchild = False
537 537 if skiprevs is not None:
538 538 skipchild = f._changeid in skiprevs
539 539 curr = _annotatepair([hist[p] for p in pl], f, curr, skipchild,
540 540 diffopts)
541 541 for p in pl:
542 542 if needed[p] == 1:
543 543 del hist[p]
544 544 del needed[p]
545 545 else:
546 546 needed[p] -= 1
547 547
548 548 hist[f] = curr
549 549 del pcache[f]
550 550
551 551 a = hist[base]
552 552 return [annotateline(*r) for r in zip(a.fctxs, a.linenos, a.skips,
553 553 mdiff.splitnewlines(a.text))]
554 554
555 555 def toposort(revs, parentsfunc, firstbranch=()):
556 556 """Yield revisions from heads to roots one (topo) branch at a time.
557 557
558 558 This function aims to be used by a graph generator that wishes to minimize
559 559 the number of parallel branches and their interleaving.
560 560
561 561 Example iteration order (numbers show the "true" order in a changelog):
562 562
563 563 o 4
564 564 |
565 565 o 1
566 566 |
567 567 | o 3
568 568 | |
569 569 | o 2
570 570 |/
571 571 o 0
572 572
573 573 Note that the ancestors of merges are understood by the current
574 574 algorithm to be on the same branch. This means no reordering will
575 575 occur behind a merge.
576 576 """
577 577
578 578 ### Quick summary of the algorithm
579 579 #
580 580 # This function is based around a "retention" principle. We keep revisions
581 581 # in memory until we are ready to emit a whole branch that immediately
582 582 # "merges" into an existing one. This reduces the number of parallel
583 583 # branches with interleaved revisions.
584 584 #
585 585 # During iteration revs are split into two groups:
586 586 # A) revision already emitted
587 587 # B) revision in "retention". They are stored as different subgroups.
588 588 #
589 589 # for each REV, we do the following logic:
590 590 #
591 591 # 1) if REV is a parent of (A), we will emit it. If there is a
592 592 # retention group ((B) above) that is blocked on REV being
593 593 # available, we emit all the revisions out of that retention
594 594 # group first.
595 595 #
596 596 # 2) else, we'll search for a subgroup in (B) awaiting for REV to be
597 597 # available, if such subgroup exist, we add REV to it and the subgroup is
598 598 # now awaiting for REV.parents() to be available.
599 599 #
600 600 # 3) finally if no such group existed in (B), we create a new subgroup.
601 601 #
602 602 #
603 603 # To bootstrap the algorithm, we emit the tipmost revision (which
604 604 # puts it in group (A) from above).
605 605
606 606 revs.sort(reverse=True)
607 607
608 608 # Set of parents of revision that have been emitted. They can be considered
609 609 # unblocked as the graph generator is already aware of them so there is no
610 610 # need to delay the revisions that reference them.
611 611 #
612 612 # If someone wants to prioritize a branch over the others, pre-filling this
613 613 # set will force all other branches to wait until this branch is ready to be
614 614 # emitted.
615 615 unblocked = set(firstbranch)
616 616
617 617 # list of groups waiting to be displayed, each group is defined by:
618 618 #
619 619 # (revs: lists of revs waiting to be displayed,
620 620 # blocked: set of that cannot be displayed before those in 'revs')
621 621 #
622 622 # The second value ('blocked') correspond to parents of any revision in the
623 623 # group ('revs') that is not itself contained in the group. The main idea
624 624 # of this algorithm is to delay as much as possible the emission of any
625 625 # revision. This means waiting for the moment we are about to display
626 626 # these parents to display the revs in a group.
627 627 #
628 628 # This first implementation is smart until it encounters a merge: it will
629 629 # emit revs as soon as any parent is about to be emitted and can grow an
630 630 # arbitrary number of revs in 'blocked'. In practice this mean we properly
631 631 # retains new branches but gives up on any special ordering for ancestors
632 632 # of merges. The implementation can be improved to handle this better.
633 633 #
634 634 # The first subgroup is special. It corresponds to all the revision that
635 635 # were already emitted. The 'revs' lists is expected to be empty and the
636 636 # 'blocked' set contains the parents revisions of already emitted revision.
637 637 #
638 638 # You could pre-seed the <parents> set of groups[0] to a specific
639 639 # changesets to select what the first emitted branch should be.
640 640 groups = [([], unblocked)]
641 641 pendingheap = []
642 642 pendingset = set()
643 643
644 644 heapq.heapify(pendingheap)
645 645 heappop = heapq.heappop
646 646 heappush = heapq.heappush
647 647 for currentrev in revs:
648 648 # Heap works with smallest element, we want highest so we invert
649 649 if currentrev not in pendingset:
650 650 heappush(pendingheap, -currentrev)
651 651 pendingset.add(currentrev)
652 652 # iterates on pending rev until after the current rev have been
653 653 # processed.
654 654 rev = None
655 655 while rev != currentrev:
656 656 rev = -heappop(pendingheap)
657 657 pendingset.remove(rev)
658 658
659 659 # Seek for a subgroup blocked, waiting for the current revision.
660 660 matching = [i for i, g in enumerate(groups) if rev in g[1]]
661 661
662 662 if matching:
663 663 # The main idea is to gather together all sets that are blocked
664 664 # on the same revision.
665 665 #
666 666 # Groups are merged when a common blocking ancestor is
667 667 # observed. For example, given two groups:
668 668 #
669 669 # revs [5, 4] waiting for 1
670 670 # revs [3, 2] waiting for 1
671 671 #
672 672 # These two groups will be merged when we process
673 673 # 1. In theory, we could have merged the groups when
674 674 # we added 2 to the group it is now in (we could have
675 675 # noticed the groups were both blocked on 1 then), but
676 676 # the way it works now makes the algorithm simpler.
677 677 #
678 678 # We also always keep the oldest subgroup first. We can
679 679 # probably improve the behavior by having the longest set
680 680 # first. That way, graph algorithms could minimise the length
681 681 # of parallel lines their drawing. This is currently not done.
682 682 targetidx = matching.pop(0)
683 683 trevs, tparents = groups[targetidx]
684 684 for i in matching:
685 685 gr = groups[i]
686 686 trevs.extend(gr[0])
687 687 tparents |= gr[1]
688 688 # delete all merged subgroups (except the one we kept)
689 689 # (starting from the last subgroup for performance and
690 690 # sanity reasons)
691 691 for i in reversed(matching):
692 692 del groups[i]
693 693 else:
694 694 # This is a new head. We create a new subgroup for it.
695 695 targetidx = len(groups)
696 696 groups.append(([], {rev}))
697 697
698 698 gr = groups[targetidx]
699 699
700 700 # We now add the current nodes to this subgroups. This is done
701 701 # after the subgroup merging because all elements from a subgroup
702 702 # that relied on this rev must precede it.
703 703 #
704 704 # we also update the <parents> set to include the parents of the
705 705 # new nodes.
706 706 if rev == currentrev: # only display stuff in rev
707 707 gr[0].append(rev)
708 708 gr[1].remove(rev)
709 709 parents = [p for p in parentsfunc(rev) if p > node.nullrev]
710 710 gr[1].update(parents)
711 711 for p in parents:
712 712 if p not in pendingset:
713 713 pendingset.add(p)
714 714 heappush(pendingheap, -p)
715 715
716 716 # Look for a subgroup to display
717 717 #
718 718 # When unblocked is empty (if clause), we were not waiting for any
719 719 # revisions during the first iteration (if no priority was given) or
720 720 # if we emitted a whole disconnected set of the graph (reached a
721 721 # root). In that case we arbitrarily take the oldest known
722 722 # subgroup. The heuristic could probably be better.
723 723 #
724 724 # Otherwise (elif clause) if the subgroup is blocked on
725 725 # a revision we just emitted, we can safely emit it as
726 726 # well.
727 727 if not unblocked:
728 728 if len(groups) > 1: # display other subset
729 729 targetidx = 1
730 730 gr = groups[1]
731 731 elif not gr[1] & unblocked:
732 732 gr = None
733 733
734 734 if gr is not None:
735 735 # update the set of awaited revisions with the one from the
736 736 # subgroup
737 737 unblocked |= gr[1]
738 738 # output all revisions in the subgroup
739 739 for r in gr[0]:
740 740 yield r
741 741 # delete the subgroup that you just output
742 742 # unless it is groups[0] in which case you just empty it.
743 743 if targetidx:
744 744 del groups[targetidx]
745 745 else:
746 746 gr[0][:] = []
747 747 # Check if we have some subgroup waiting for revisions we are not going to
748 748 # iterate over
749 749 for g in groups:
750 750 for r in g[0]:
751 751 yield r
752 752
753 753 def headrevs(revs, parentsfn):
754 754 """Resolve the set of heads from a set of revisions.
755 755
756 756 Receives an iterable of revision numbers and a callbable that receives a
757 757 revision number and returns an iterable of parent revision numbers, possibly
758 758 including nullrev.
759 759
760 760 Returns a set of revision numbers that are DAG heads within the passed
761 761 subset.
762 762
763 763 ``nullrev`` is never included in the returned set, even if it is provided in
764 764 the input set.
765 765 """
766 766 headrevs = set(revs)
767 767 parents = set([node.nullrev])
768 768 up = parents.update
769 769
770 770 for rev in revs:
771 771 up(parentsfn(rev))
772 772 headrevs.difference_update(parents)
773 773 return headrevs
774 774
775 775 def headrevssubset(revsfn, parentrevsfn, startrev=None, stoprevs=None):
776 776 """Returns the set of all revs that have no children with control.
777 777
778 778 ``revsfn`` is a callable that with no arguments returns an iterator over
779 779 all revision numbers in topological order. With a ``start`` argument, it
780 780 returns revision numbers starting at that number.
781 781
782 782 ``parentrevsfn`` is a callable receiving a revision number and returns an
783 783 iterable of parent revision numbers, where values can include nullrev.
784 784
785 785 ``startrev`` is a revision number at which to start the search.
786 786
787 787 ``stoprevs`` is an iterable of revision numbers that, when encountered,
788 788 will stop DAG traversal beyond them. Parents of revisions in this
789 789 collection will be heads.
790 790 """
791 791 if startrev is None:
792 792 startrev = nullrev
793 793
794 794 stoprevs = set(stoprevs or [])
795 795
796 796 reachable = {startrev}
797 797 heads = {startrev}
798 798
799 799 for rev in revsfn(start=startrev + 1):
800 800 for prev in parentrevsfn(rev):
801 801 if prev in reachable:
802 802 if rev not in stoprevs:
803 803 reachable.add(rev)
804 804 heads.add(rev)
805 805
806 806 if prev in heads and prev not in stoprevs:
807 807 heads.remove(prev)
808 808
809 809 return heads
810 810
811 811 def linearize(revs, parentsfn):
812 812 """Linearize and topologically sort a list of revisions.
813 813
814 814 The linearization process tries to create long runs of revs where a child
815 815 rev comes immediately after its first parent. This is done by visiting the
816 816 heads of the revs in inverse topological order, and for each visited rev,
817 817 visiting its second parent, then its first parent, then adding the rev
818 818 itself to the output list.
819 819
820 820 Returns a list of revision numbers.
821 821 """
822 822 visit = list(sorted(headrevs(revs, parentsfn), reverse=True))
823 823 finished = set()
824 824 result = []
825 825
826 826 while visit:
827 827 rev = visit.pop()
828 828 if rev < 0:
829 829 rev = -rev - 1
830 830
831 831 if rev not in finished:
832 832 result.append(rev)
833 833 finished.add(rev)
834 834
835 835 else:
836 836 visit.append(-rev - 1)
837 837
838 838 for prev in parentsfn(rev):
839 839 if prev == node.nullrev or prev not in revs or prev in finished:
840 840 continue
841 841
842 842 visit.append(prev)
843 843
844 844 assert len(result) == len(revs)
845 845
846 846 return result
@@ -1,2326 +1,2384
1 1 # revset.py - revision set queries for mercurial
2 2 #
3 3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import re
11 11
12 12 from .i18n import _
13 13 from . import (
14 14 dagop,
15 15 destutil,
16 16 diffutil,
17 17 encoding,
18 18 error,
19 19 hbisect,
20 20 match as matchmod,
21 21 node,
22 22 obsolete as obsmod,
23 23 obsutil,
24 24 pathutil,
25 25 phases,
26 26 pycompat,
27 27 registrar,
28 28 repoview,
29 29 revsetlang,
30 30 scmutil,
31 31 smartset,
32 32 stack as stackmod,
33 33 util,
34 34 )
35 35 from .utils import (
36 36 dateutil,
37 37 stringutil,
38 38 )
39 39
40 40 # helpers for processing parsed tree
41 41 getsymbol = revsetlang.getsymbol
42 42 getstring = revsetlang.getstring
43 43 getinteger = revsetlang.getinteger
44 44 getboolean = revsetlang.getboolean
45 45 getlist = revsetlang.getlist
46 46 getrange = revsetlang.getrange
47 47 getargs = revsetlang.getargs
48 48 getargsdict = revsetlang.getargsdict
49 49
50 50 baseset = smartset.baseset
51 51 generatorset = smartset.generatorset
52 52 spanset = smartset.spanset
53 53 fullreposet = smartset.fullreposet
54 54
55 55 # Constants for ordering requirement, used in getset():
56 56 #
57 57 # If 'define', any nested functions and operations MAY change the ordering of
58 58 # the entries in the set (but if changes the ordering, it MUST ALWAYS change
59 59 # it). If 'follow', any nested functions and operations MUST take the ordering
60 60 # specified by the first operand to the '&' operator.
61 61 #
62 62 # For instance,
63 63 #
64 64 # X & (Y | Z)
65 65 # ^ ^^^^^^^
66 66 # | follow
67 67 # define
68 68 #
69 69 # will be evaluated as 'or(y(x()), z(x()))', where 'x()' can change the order
70 70 # of the entries in the set, but 'y()', 'z()' and 'or()' shouldn't.
71 71 #
72 72 # 'any' means the order doesn't matter. For instance,
73 73 #
74 74 # (X & !Y) | ancestors(Z)
75 75 # ^ ^
76 76 # any any
77 77 #
78 78 # For 'X & !Y', 'X' decides the order and 'Y' is subtracted from 'X', so the
79 79 # order of 'Y' does not matter. For 'ancestors(Z)', Z's order does not matter
80 80 # since 'ancestors' does not care about the order of its argument.
81 81 #
82 82 # Currently, most revsets do not care about the order, so 'define' is
83 83 # equivalent to 'follow' for them, and the resulting order is based on the
84 84 # 'subset' parameter passed down to them:
85 85 #
86 86 # m = revset.match(...)
87 87 # m(repo, subset, order=defineorder)
88 88 # ^^^^^^
89 89 # For most revsets, 'define' means using the order this subset provides
90 90 #
91 91 # There are a few revsets that always redefine the order if 'define' is
92 92 # specified: 'sort(X)', 'reverse(X)', 'x:y'.
93 93 anyorder = 'any' # don't care the order, could be even random-shuffled
94 94 defineorder = 'define' # ALWAYS redefine, or ALWAYS follow the current order
95 95 followorder = 'follow' # MUST follow the current order
96 96
97 97 # helpers
98 98
99 99 def getset(repo, subset, x, order=defineorder):
100 100 if not x:
101 101 raise error.ParseError(_("missing argument"))
102 102 return methods[x[0]](repo, subset, *x[1:], order=order)
103 103
104 104 def _getrevsource(repo, r):
105 105 extra = repo[r].extra()
106 106 for label in ('source', 'transplant_source', 'rebase_source'):
107 107 if label in extra:
108 108 try:
109 109 return repo[extra[label]].rev()
110 110 except error.RepoLookupError:
111 111 pass
112 112 return None
113 113
114 114 def _sortedb(xs):
115 115 return sorted(pycompat.rapply(pycompat.maybebytestr, xs))
116 116
117 117 # operator methods
118 118
119 119 def stringset(repo, subset, x, order):
120 120 if not x:
121 121 raise error.ParseError(_("empty string is not a valid revision"))
122 122 x = scmutil.intrev(scmutil.revsymbol(repo, x))
123 123 if (x in subset
124 124 or x == node.nullrev and isinstance(subset, fullreposet)):
125 125 return baseset([x])
126 126 return baseset()
127 127
128 128 def rawsmartset(repo, subset, x, order):
129 129 """argument is already a smartset, use that directly"""
130 130 if order == followorder:
131 131 return subset & x
132 132 else:
133 133 return x & subset
134 134
135 135 def rangeset(repo, subset, x, y, order):
136 136 m = getset(repo, fullreposet(repo), x)
137 137 n = getset(repo, fullreposet(repo), y)
138 138
139 139 if not m or not n:
140 140 return baseset()
141 141 return _makerangeset(repo, subset, m.first(), n.last(), order)
142 142
143 143 def rangeall(repo, subset, x, order):
144 144 assert x is None
145 145 return _makerangeset(repo, subset, 0, repo.changelog.tiprev(), order)
146 146
147 147 def rangepre(repo, subset, y, order):
148 148 # ':y' can't be rewritten to '0:y' since '0' may be hidden
149 149 n = getset(repo, fullreposet(repo), y)
150 150 if not n:
151 151 return baseset()
152 152 return _makerangeset(repo, subset, 0, n.last(), order)
153 153
154 154 def rangepost(repo, subset, x, order):
155 155 m = getset(repo, fullreposet(repo), x)
156 156 if not m:
157 157 return baseset()
158 158 return _makerangeset(repo, subset, m.first(), repo.changelog.tiprev(),
159 159 order)
160 160
161 161 def _makerangeset(repo, subset, m, n, order):
162 162 if m == n:
163 163 r = baseset([m])
164 164 elif n == node.wdirrev:
165 165 r = spanset(repo, m, len(repo)) + baseset([n])
166 166 elif m == node.wdirrev:
167 167 r = baseset([m]) + spanset(repo, repo.changelog.tiprev(), n - 1)
168 168 elif m < n:
169 169 r = spanset(repo, m, n + 1)
170 170 else:
171 171 r = spanset(repo, m, n - 1)
172 172
173 173 if order == defineorder:
174 174 return r & subset
175 175 else:
176 176 # carrying the sorting over when possible would be more efficient
177 177 return subset & r
178 178
179 179 def dagrange(repo, subset, x, y, order):
180 180 r = fullreposet(repo)
181 181 xs = dagop.reachableroots(repo, getset(repo, r, x), getset(repo, r, y),
182 182 includepath=True)
183 183 return subset & xs
184 184
185 185 def andset(repo, subset, x, y, order):
186 186 if order == anyorder:
187 187 yorder = anyorder
188 188 else:
189 189 yorder = followorder
190 190 return getset(repo, getset(repo, subset, x, order), y, yorder)
191 191
192 192 def andsmallyset(repo, subset, x, y, order):
193 193 # 'andsmally(x, y)' is equivalent to 'and(x, y)', but faster when y is small
194 194 if order == anyorder:
195 195 yorder = anyorder
196 196 else:
197 197 yorder = followorder
198 198 return getset(repo, getset(repo, subset, y, yorder), x, order)
199 199
200 200 def differenceset(repo, subset, x, y, order):
201 201 return getset(repo, subset, x, order) - getset(repo, subset, y, anyorder)
202 202
203 203 def _orsetlist(repo, subset, xs, order):
204 204 assert xs
205 205 if len(xs) == 1:
206 206 return getset(repo, subset, xs[0], order)
207 207 p = len(xs) // 2
208 208 a = _orsetlist(repo, subset, xs[:p], order)
209 209 b = _orsetlist(repo, subset, xs[p:], order)
210 210 return a + b
211 211
212 212 def orset(repo, subset, x, order):
213 213 xs = getlist(x)
214 214 if not xs:
215 215 return baseset()
216 216 if order == followorder:
217 217 # slow path to take the subset order
218 218 return subset & _orsetlist(repo, fullreposet(repo), xs, anyorder)
219 219 else:
220 220 return _orsetlist(repo, subset, xs, order)
221 221
222 222 def notset(repo, subset, x, order):
223 223 return subset - getset(repo, subset, x, anyorder)
224 224
225 225 def relationset(repo, subset, x, y, order):
226 226 raise error.ParseError(_("can't use a relation in this context"))
227 227
228 def generationsrel(repo, subset, x, rel, n, order):
229 # TODO: support range, rewrite tests, and drop startdepth argument
230 # from ancestors() and descendants() predicates
231 if n <= 0:
232 n = -n
233 return _ancestors(repo, subset, x, startdepth=n, stopdepth=n + 1)
234 else:
235 return _descendants(repo, subset, x, startdepth=n, stopdepth=n + 1)
228 def _splitrange(a, b):
229 """Split range with bounds a and b into two ranges at 0 and return two
230 tuples of numbers for use as startdepth and stopdepth arguments of
231 revancestors and revdescendants.
232
233 >>> _splitrange(-10, -5) # [-10:-5]
234 ((5, 11), (None, None))
235 >>> _splitrange(5, 10) # [5:10]
236 ((None, None), (5, 11))
237 >>> _splitrange(-10, 10) # [-10:10]
238 ((0, 11), (0, 11))
239 >>> _splitrange(-10, 0) # [-10:0]
240 ((0, 11), (None, None))
241 >>> _splitrange(0, 10) # [0:10]
242 ((None, None), (0, 11))
243 >>> _splitrange(0, 0) # [0:0]
244 ((0, 1), (None, None))
245 >>> _splitrange(1, -1) # [1:-1]
246 ((None, None), (None, None))
247 """
248 ancdepths = (None, None)
249 descdepths = (None, None)
250 if a == b == 0:
251 ancdepths = (0, 1)
252 if a < 0:
253 ancdepths = (-min(b, 0), -a + 1)
254 if b > 0:
255 descdepths = (max(a, 0), b + 1)
256 return ancdepths, descdepths
257
258 def generationsrel(repo, subset, x, rel, a, b, order):
259 # TODO: rewrite tests, and drop startdepth argument from ancestors() and
260 # descendants() predicates
261 (ancstart, ancstop), (descstart, descstop) = _splitrange(a, b)
262
263 if ancstart is None and descstart is None:
264 return baseset()
265
266 revs = getset(repo, fullreposet(repo), x)
267 if not revs:
268 return baseset()
269
270 if ancstart is not None and descstart is not None:
271 s = dagop.revancestors(repo, revs, False, ancstart, ancstop)
272 s += dagop.revdescendants(repo, revs, False, descstart, descstop)
273 elif ancstart is not None:
274 s = dagop.revancestors(repo, revs, False, ancstart, ancstop)
275 elif descstart is not None:
276 s = dagop.revdescendants(repo, revs, False, descstart, descstop)
277
278 return subset & s
236 279
237 280 def relsubscriptset(repo, subset, x, y, z, order):
238 281 # this is pretty basic implementation of 'x#y[z]' operator, still
239 282 # experimental so undocumented. see the wiki for further ideas.
240 283 # https://www.mercurial-scm.org/wiki/RevsetOperatorPlan
241 284 rel = getsymbol(y)
242 n = getinteger(z, _("relation subscript must be an integer"))
285 try:
286 a, b = getrange(z, '')
287 except error.ParseError:
288 a = getinteger(z, _("relation subscript must be an integer"))
289 b = a
290 else:
291 def getbound(i):
292 if i is None:
293 return None
294 msg = _("relation subscript bounds must be integers")
295 return getinteger(i, msg)
296 a, b = [getbound(i) for i in (a, b)]
297 if a is None:
298 a = -(dagop.maxlogdepth - 1)
299 if b is None:
300 b = +(dagop.maxlogdepth - 1)
243 301
244 302 if rel in subscriptrelations:
245 return subscriptrelations[rel](repo, subset, x, rel, n, order)
303 return subscriptrelations[rel](repo, subset, x, rel, a, b, order)
246 304
247 305 relnames = [r for r in subscriptrelations.keys() if len(r) > 1]
248 306 raise error.UnknownIdentifier(rel, relnames)
249 307
250 308 def subscriptset(repo, subset, x, y, order):
251 309 raise error.ParseError(_("can't use a subscript in this context"))
252 310
253 311 def listset(repo, subset, *xs, **opts):
254 312 raise error.ParseError(_("can't use a list in this context"),
255 313 hint=_('see \'hg help "revsets.x or y"\''))
256 314
257 315 def keyvaluepair(repo, subset, k, v, order):
258 316 raise error.ParseError(_("can't use a key-value pair in this context"))
259 317
260 318 def func(repo, subset, a, b, order):
261 319 f = getsymbol(a)
262 320 if f in symbols:
263 321 func = symbols[f]
264 322 if getattr(func, '_takeorder', False):
265 323 return func(repo, subset, b, order)
266 324 return func(repo, subset, b)
267 325
268 326 keep = lambda fn: getattr(fn, '__doc__', None) is not None
269 327
270 328 syms = [s for (s, fn) in symbols.items() if keep(fn)]
271 329 raise error.UnknownIdentifier(f, syms)
272 330
273 331 # functions
274 332
275 333 # symbols are callables like:
276 334 # fn(repo, subset, x)
277 335 # with:
278 336 # repo - current repository instance
279 337 # subset - of revisions to be examined
280 338 # x - argument in tree form
281 339 symbols = revsetlang.symbols
282 340
283 341 # symbols which can't be used for a DoS attack for any given input
284 342 # (e.g. those which accept regexes as plain strings shouldn't be included)
285 343 # functions that just return a lot of changesets (like all) don't count here
286 344 safesymbols = set()
287 345
288 346 predicate = registrar.revsetpredicate()
289 347
290 348 @predicate('_destupdate')
291 349 def _destupdate(repo, subset, x):
292 350 # experimental revset for update destination
293 351 args = getargsdict(x, 'limit', 'clean')
294 352 return subset & baseset([destutil.destupdate(repo,
295 353 **pycompat.strkwargs(args))[0]])
296 354
297 355 @predicate('_destmerge')
298 356 def _destmerge(repo, subset, x):
299 357 # experimental revset for merge destination
300 358 sourceset = None
301 359 if x is not None:
302 360 sourceset = getset(repo, fullreposet(repo), x)
303 361 return subset & baseset([destutil.destmerge(repo, sourceset=sourceset)])
304 362
305 363 @predicate('adds(pattern)', safe=True, weight=30)
306 364 def adds(repo, subset, x):
307 365 """Changesets that add a file matching pattern.
308 366
309 367 The pattern without explicit kind like ``glob:`` is expected to be
310 368 relative to the current directory and match against a file or a
311 369 directory.
312 370 """
313 371 # i18n: "adds" is a keyword
314 372 pat = getstring(x, _("adds requires a pattern"))
315 373 return checkstatus(repo, subset, pat, 1)
316 374
317 375 @predicate('ancestor(*changeset)', safe=True, weight=0.5)
318 376 def ancestor(repo, subset, x):
319 377 """A greatest common ancestor of the changesets.
320 378
321 379 Accepts 0 or more changesets.
322 380 Will return empty list when passed no args.
323 381 Greatest common ancestor of a single changeset is that changeset.
324 382 """
325 383 reviter = iter(orset(repo, fullreposet(repo), x, order=anyorder))
326 384 try:
327 385 anc = repo[next(reviter)]
328 386 except StopIteration:
329 387 return baseset()
330 388 for r in reviter:
331 389 anc = anc.ancestor(repo[r])
332 390
333 391 r = scmutil.intrev(anc)
334 392 if r in subset:
335 393 return baseset([r])
336 394 return baseset()
337 395
338 396 def _ancestors(repo, subset, x, followfirst=False, startdepth=None,
339 397 stopdepth=None):
340 398 heads = getset(repo, fullreposet(repo), x)
341 399 if not heads:
342 400 return baseset()
343 401 s = dagop.revancestors(repo, heads, followfirst, startdepth, stopdepth)
344 402 return subset & s
345 403
346 404 @predicate('ancestors(set[, depth])', safe=True)
347 405 def ancestors(repo, subset, x):
348 406 """Changesets that are ancestors of changesets in set, including the
349 407 given changesets themselves.
350 408
351 409 If depth is specified, the result only includes changesets up to
352 410 the specified generation.
353 411 """
354 412 # startdepth is for internal use only until we can decide the UI
355 413 args = getargsdict(x, 'ancestors', 'set depth startdepth')
356 414 if 'set' not in args:
357 415 # i18n: "ancestors" is a keyword
358 416 raise error.ParseError(_('ancestors takes at least 1 argument'))
359 417 startdepth = stopdepth = None
360 418 if 'startdepth' in args:
361 419 n = getinteger(args['startdepth'],
362 420 "ancestors expects an integer startdepth")
363 421 if n < 0:
364 422 raise error.ParseError("negative startdepth")
365 423 startdepth = n
366 424 if 'depth' in args:
367 425 # i18n: "ancestors" is a keyword
368 426 n = getinteger(args['depth'], _("ancestors expects an integer depth"))
369 427 if n < 0:
370 428 raise error.ParseError(_("negative depth"))
371 429 stopdepth = n + 1
372 430 return _ancestors(repo, subset, args['set'],
373 431 startdepth=startdepth, stopdepth=stopdepth)
374 432
375 433 @predicate('_firstancestors', safe=True)
376 434 def _firstancestors(repo, subset, x):
377 435 # ``_firstancestors(set)``
378 436 # Like ``ancestors(set)`` but follows only the first parents.
379 437 return _ancestors(repo, subset, x, followfirst=True)
380 438
381 439 def _childrenspec(repo, subset, x, n, order):
382 440 """Changesets that are the Nth child of a changeset
383 441 in set.
384 442 """
385 443 cs = set()
386 444 for r in getset(repo, fullreposet(repo), x):
387 445 for i in range(n):
388 446 c = repo[r].children()
389 447 if len(c) == 0:
390 448 break
391 449 if len(c) > 1:
392 450 raise error.RepoLookupError(
393 451 _("revision in set has more than one child"))
394 452 r = c[0].rev()
395 453 else:
396 454 cs.add(r)
397 455 return subset & cs
398 456
399 457 def ancestorspec(repo, subset, x, n, order):
400 458 """``set~n``
401 459 Changesets that are the Nth ancestor (first parents only) of a changeset
402 460 in set.
403 461 """
404 462 n = getinteger(n, _("~ expects a number"))
405 463 if n < 0:
406 464 # children lookup
407 465 return _childrenspec(repo, subset, x, -n, order)
408 466 ps = set()
409 467 cl = repo.changelog
410 468 for r in getset(repo, fullreposet(repo), x):
411 469 for i in range(n):
412 470 try:
413 471 r = cl.parentrevs(r)[0]
414 472 except error.WdirUnsupported:
415 473 r = repo[r].parents()[0].rev()
416 474 ps.add(r)
417 475 return subset & ps
418 476
419 477 @predicate('author(string)', safe=True, weight=10)
420 478 def author(repo, subset, x):
421 479 """Alias for ``user(string)``.
422 480 """
423 481 # i18n: "author" is a keyword
424 482 n = getstring(x, _("author requires a string"))
425 483 kind, pattern, matcher = _substringmatcher(n, casesensitive=False)
426 484 return subset.filter(lambda x: matcher(repo[x].user()),
427 485 condrepr=('<user %r>', n))
428 486
429 487 @predicate('bisect(string)', safe=True)
430 488 def bisect(repo, subset, x):
431 489 """Changesets marked in the specified bisect status:
432 490
433 491 - ``good``, ``bad``, ``skip``: csets explicitly marked as good/bad/skip
434 492 - ``goods``, ``bads`` : csets topologically good/bad
435 493 - ``range`` : csets taking part in the bisection
436 494 - ``pruned`` : csets that are goods, bads or skipped
437 495 - ``untested`` : csets whose fate is yet unknown
438 496 - ``ignored`` : csets ignored due to DAG topology
439 497 - ``current`` : the cset currently being bisected
440 498 """
441 499 # i18n: "bisect" is a keyword
442 500 status = getstring(x, _("bisect requires a string")).lower()
443 501 state = set(hbisect.get(repo, status))
444 502 return subset & state
445 503
446 504 # Backward-compatibility
447 505 # - no help entry so that we do not advertise it any more
448 506 @predicate('bisected', safe=True)
449 507 def bisected(repo, subset, x):
450 508 return bisect(repo, subset, x)
451 509
452 510 @predicate('bookmark([name])', safe=True)
453 511 def bookmark(repo, subset, x):
454 512 """The named bookmark or all bookmarks.
455 513
456 514 Pattern matching is supported for `name`. See :hg:`help revisions.patterns`.
457 515 """
458 516 # i18n: "bookmark" is a keyword
459 517 args = getargs(x, 0, 1, _('bookmark takes one or no arguments'))
460 518 if args:
461 519 bm = getstring(args[0],
462 520 # i18n: "bookmark" is a keyword
463 521 _('the argument to bookmark must be a string'))
464 522 kind, pattern, matcher = stringutil.stringmatcher(bm)
465 523 bms = set()
466 524 if kind == 'literal':
467 525 if bm == pattern:
468 526 pattern = repo._bookmarks.expandname(pattern)
469 527 bmrev = repo._bookmarks.get(pattern, None)
470 528 if not bmrev:
471 529 raise error.RepoLookupError(_("bookmark '%s' does not exist")
472 530 % pattern)
473 531 bms.add(repo[bmrev].rev())
474 532 else:
475 533 matchrevs = set()
476 534 for name, bmrev in repo._bookmarks.iteritems():
477 535 if matcher(name):
478 536 matchrevs.add(bmrev)
479 537 for bmrev in matchrevs:
480 538 bms.add(repo[bmrev].rev())
481 539 else:
482 540 bms = {repo[r].rev() for r in repo._bookmarks.values()}
483 541 bms -= {node.nullrev}
484 542 return subset & bms
485 543
486 544 @predicate('branch(string or set)', safe=True, weight=10)
487 545 def branch(repo, subset, x):
488 546 """
489 547 All changesets belonging to the given branch or the branches of the given
490 548 changesets.
491 549
492 550 Pattern matching is supported for `string`. See
493 551 :hg:`help revisions.patterns`.
494 552 """
495 553 getbi = repo.revbranchcache().branchinfo
496 554 def getbranch(r):
497 555 try:
498 556 return getbi(r)[0]
499 557 except error.WdirUnsupported:
500 558 return repo[r].branch()
501 559
502 560 try:
503 561 b = getstring(x, '')
504 562 except error.ParseError:
505 563 # not a string, but another revspec, e.g. tip()
506 564 pass
507 565 else:
508 566 kind, pattern, matcher = stringutil.stringmatcher(b)
509 567 if kind == 'literal':
510 568 # note: falls through to the revspec case if no branch with
511 569 # this name exists and pattern kind is not specified explicitly
512 570 if pattern in repo.branchmap():
513 571 return subset.filter(lambda r: matcher(getbranch(r)),
514 572 condrepr=('<branch %r>', b))
515 573 if b.startswith('literal:'):
516 574 raise error.RepoLookupError(_("branch '%s' does not exist")
517 575 % pattern)
518 576 else:
519 577 return subset.filter(lambda r: matcher(getbranch(r)),
520 578 condrepr=('<branch %r>', b))
521 579
522 580 s = getset(repo, fullreposet(repo), x)
523 581 b = set()
524 582 for r in s:
525 583 b.add(getbranch(r))
526 584 c = s.__contains__
527 585 return subset.filter(lambda r: c(r) or getbranch(r) in b,
528 586 condrepr=lambda: '<branch %r>' % _sortedb(b))
529 587
530 588 @predicate('phasedivergent()', safe=True)
531 589 def phasedivergent(repo, subset, x):
532 590 """Mutable changesets marked as successors of public changesets.
533 591
534 592 Only non-public and non-obsolete changesets can be `phasedivergent`.
535 593 (EXPERIMENTAL)
536 594 """
537 595 # i18n: "phasedivergent" is a keyword
538 596 getargs(x, 0, 0, _("phasedivergent takes no arguments"))
539 597 phasedivergent = obsmod.getrevs(repo, 'phasedivergent')
540 598 return subset & phasedivergent
541 599
542 600 @predicate('bundle()', safe=True)
543 601 def bundle(repo, subset, x):
544 602 """Changesets in the bundle.
545 603
546 604 Bundle must be specified by the -R option."""
547 605
548 606 try:
549 607 bundlerevs = repo.changelog.bundlerevs
550 608 except AttributeError:
551 609 raise error.Abort(_("no bundle provided - specify with -R"))
552 610 return subset & bundlerevs
553 611
554 612 def checkstatus(repo, subset, pat, field):
555 613 hasset = matchmod.patkind(pat) == 'set'
556 614
557 615 mcache = [None]
558 616 def matches(x):
559 617 c = repo[x]
560 618 if not mcache[0] or hasset:
561 619 mcache[0] = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
562 620 m = mcache[0]
563 621 fname = None
564 622 if not m.anypats() and len(m.files()) == 1:
565 623 fname = m.files()[0]
566 624 if fname is not None:
567 625 if fname not in c.files():
568 626 return False
569 627 else:
570 628 for f in c.files():
571 629 if m(f):
572 630 break
573 631 else:
574 632 return False
575 633 files = repo.status(c.p1().node(), c.node())[field]
576 634 if fname is not None:
577 635 if fname in files:
578 636 return True
579 637 else:
580 638 for f in files:
581 639 if m(f):
582 640 return True
583 641
584 642 return subset.filter(matches, condrepr=('<status[%r] %r>', field, pat))
585 643
586 644 def _children(repo, subset, parentset):
587 645 if not parentset:
588 646 return baseset()
589 647 cs = set()
590 648 pr = repo.changelog.parentrevs
591 649 minrev = parentset.min()
592 650 nullrev = node.nullrev
593 651 for r in subset:
594 652 if r <= minrev:
595 653 continue
596 654 p1, p2 = pr(r)
597 655 if p1 in parentset:
598 656 cs.add(r)
599 657 if p2 != nullrev and p2 in parentset:
600 658 cs.add(r)
601 659 return baseset(cs)
602 660
603 661 @predicate('children(set)', safe=True)
604 662 def children(repo, subset, x):
605 663 """Child changesets of changesets in set.
606 664 """
607 665 s = getset(repo, fullreposet(repo), x)
608 666 cs = _children(repo, subset, s)
609 667 return subset & cs
610 668
611 669 @predicate('closed()', safe=True, weight=10)
612 670 def closed(repo, subset, x):
613 671 """Changeset is closed.
614 672 """
615 673 # i18n: "closed" is a keyword
616 674 getargs(x, 0, 0, _("closed takes no arguments"))
617 675 return subset.filter(lambda r: repo[r].closesbranch(),
618 676 condrepr='<branch closed>')
619 677
620 678 # for internal use
621 679 @predicate('_commonancestorheads(set)', safe=True)
622 680 def _commonancestorheads(repo, subset, x):
623 681 # This is an internal method is for quickly calculating "heads(::x and
624 682 # ::y)"
625 683
626 684 # These greatest common ancestors are the same ones that the consensus bid
627 685 # merge will find.
628 686 startrevs = getset(repo, fullreposet(repo), x, order=anyorder)
629 687
630 688 ancs = repo.changelog._commonancestorsheads(*list(startrevs))
631 689 return subset & baseset(ancs)
632 690
633 691 @predicate('commonancestors(set)', safe=True)
634 692 def commonancestors(repo, subset, x):
635 693 """Changesets that are ancestors of every changeset in set.
636 694 """
637 695 startrevs = getset(repo, fullreposet(repo), x, order=anyorder)
638 696 if not startrevs:
639 697 return baseset()
640 698 for r in startrevs:
641 699 subset &= dagop.revancestors(repo, baseset([r]))
642 700 return subset
643 701
644 702 @predicate('contains(pattern)', weight=100)
645 703 def contains(repo, subset, x):
646 704 """The revision's manifest contains a file matching pattern (but might not
647 705 modify it). See :hg:`help patterns` for information about file patterns.
648 706
649 707 The pattern without explicit kind like ``glob:`` is expected to be
650 708 relative to the current directory and match against a file exactly
651 709 for efficiency.
652 710 """
653 711 # i18n: "contains" is a keyword
654 712 pat = getstring(x, _("contains requires a pattern"))
655 713
656 714 def matches(x):
657 715 if not matchmod.patkind(pat):
658 716 pats = pathutil.canonpath(repo.root, repo.getcwd(), pat)
659 717 if pats in repo[x]:
660 718 return True
661 719 else:
662 720 c = repo[x]
663 721 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
664 722 for f in c.manifest():
665 723 if m(f):
666 724 return True
667 725 return False
668 726
669 727 return subset.filter(matches, condrepr=('<contains %r>', pat))
670 728
671 729 @predicate('converted([id])', safe=True)
672 730 def converted(repo, subset, x):
673 731 """Changesets converted from the given identifier in the old repository if
674 732 present, or all converted changesets if no identifier is specified.
675 733 """
676 734
677 735 # There is exactly no chance of resolving the revision, so do a simple
678 736 # string compare and hope for the best
679 737
680 738 rev = None
681 739 # i18n: "converted" is a keyword
682 740 l = getargs(x, 0, 1, _('converted takes one or no arguments'))
683 741 if l:
684 742 # i18n: "converted" is a keyword
685 743 rev = getstring(l[0], _('converted requires a revision'))
686 744
687 745 def _matchvalue(r):
688 746 source = repo[r].extra().get('convert_revision', None)
689 747 return source is not None and (rev is None or source.startswith(rev))
690 748
691 749 return subset.filter(lambda r: _matchvalue(r),
692 750 condrepr=('<converted %r>', rev))
693 751
694 752 @predicate('date(interval)', safe=True, weight=10)
695 753 def date(repo, subset, x):
696 754 """Changesets within the interval, see :hg:`help dates`.
697 755 """
698 756 # i18n: "date" is a keyword
699 757 ds = getstring(x, _("date requires a string"))
700 758 dm = dateutil.matchdate(ds)
701 759 return subset.filter(lambda x: dm(repo[x].date()[0]),
702 760 condrepr=('<date %r>', ds))
703 761
704 762 @predicate('desc(string)', safe=True, weight=10)
705 763 def desc(repo, subset, x):
706 764 """Search commit message for string. The match is case-insensitive.
707 765
708 766 Pattern matching is supported for `string`. See
709 767 :hg:`help revisions.patterns`.
710 768 """
711 769 # i18n: "desc" is a keyword
712 770 ds = getstring(x, _("desc requires a string"))
713 771
714 772 kind, pattern, matcher = _substringmatcher(ds, casesensitive=False)
715 773
716 774 return subset.filter(lambda r: matcher(repo[r].description()),
717 775 condrepr=('<desc %r>', ds))
718 776
719 777 def _descendants(repo, subset, x, followfirst=False, startdepth=None,
720 778 stopdepth=None):
721 779 roots = getset(repo, fullreposet(repo), x)
722 780 if not roots:
723 781 return baseset()
724 782 s = dagop.revdescendants(repo, roots, followfirst, startdepth, stopdepth)
725 783 return subset & s
726 784
727 785 @predicate('descendants(set[, depth])', safe=True)
728 786 def descendants(repo, subset, x):
729 787 """Changesets which are descendants of changesets in set, including the
730 788 given changesets themselves.
731 789
732 790 If depth is specified, the result only includes changesets up to
733 791 the specified generation.
734 792 """
735 793 # startdepth is for internal use only until we can decide the UI
736 794 args = getargsdict(x, 'descendants', 'set depth startdepth')
737 795 if 'set' not in args:
738 796 # i18n: "descendants" is a keyword
739 797 raise error.ParseError(_('descendants takes at least 1 argument'))
740 798 startdepth = stopdepth = None
741 799 if 'startdepth' in args:
742 800 n = getinteger(args['startdepth'],
743 801 "descendants expects an integer startdepth")
744 802 if n < 0:
745 803 raise error.ParseError("negative startdepth")
746 804 startdepth = n
747 805 if 'depth' in args:
748 806 # i18n: "descendants" is a keyword
749 807 n = getinteger(args['depth'], _("descendants expects an integer depth"))
750 808 if n < 0:
751 809 raise error.ParseError(_("negative depth"))
752 810 stopdepth = n + 1
753 811 return _descendants(repo, subset, args['set'],
754 812 startdepth=startdepth, stopdepth=stopdepth)
755 813
756 814 @predicate('_firstdescendants', safe=True)
757 815 def _firstdescendants(repo, subset, x):
758 816 # ``_firstdescendants(set)``
759 817 # Like ``descendants(set)`` but follows only the first parents.
760 818 return _descendants(repo, subset, x, followfirst=True)
761 819
762 820 @predicate('destination([set])', safe=True, weight=10)
763 821 def destination(repo, subset, x):
764 822 """Changesets that were created by a graft, transplant or rebase operation,
765 823 with the given revisions specified as the source. Omitting the optional set
766 824 is the same as passing all().
767 825 """
768 826 if x is not None:
769 827 sources = getset(repo, fullreposet(repo), x)
770 828 else:
771 829 sources = fullreposet(repo)
772 830
773 831 dests = set()
774 832
775 833 # subset contains all of the possible destinations that can be returned, so
776 834 # iterate over them and see if their source(s) were provided in the arg set.
777 835 # Even if the immediate src of r is not in the arg set, src's source (or
778 836 # further back) may be. Scanning back further than the immediate src allows
779 837 # transitive transplants and rebases to yield the same results as transitive
780 838 # grafts.
781 839 for r in subset:
782 840 src = _getrevsource(repo, r)
783 841 lineage = None
784 842
785 843 while src is not None:
786 844 if lineage is None:
787 845 lineage = list()
788 846
789 847 lineage.append(r)
790 848
791 849 # The visited lineage is a match if the current source is in the arg
792 850 # set. Since every candidate dest is visited by way of iterating
793 851 # subset, any dests further back in the lineage will be tested by a
794 852 # different iteration over subset. Likewise, if the src was already
795 853 # selected, the current lineage can be selected without going back
796 854 # further.
797 855 if src in sources or src in dests:
798 856 dests.update(lineage)
799 857 break
800 858
801 859 r = src
802 860 src = _getrevsource(repo, r)
803 861
804 862 return subset.filter(dests.__contains__,
805 863 condrepr=lambda: '<destination %r>' % _sortedb(dests))
806 864
807 865 @predicate('contentdivergent()', safe=True)
808 866 def contentdivergent(repo, subset, x):
809 867 """
810 868 Final successors of changesets with an alternative set of final
811 869 successors. (EXPERIMENTAL)
812 870 """
813 871 # i18n: "contentdivergent" is a keyword
814 872 getargs(x, 0, 0, _("contentdivergent takes no arguments"))
815 873 contentdivergent = obsmod.getrevs(repo, 'contentdivergent')
816 874 return subset & contentdivergent
817 875
818 876 @predicate('extdata(source)', safe=False, weight=100)
819 877 def extdata(repo, subset, x):
820 878 """Changesets in the specified extdata source. (EXPERIMENTAL)"""
821 879 # i18n: "extdata" is a keyword
822 880 args = getargsdict(x, 'extdata', 'source')
823 881 source = getstring(args.get('source'),
824 882 # i18n: "extdata" is a keyword
825 883 _('extdata takes at least 1 string argument'))
826 884 data = scmutil.extdatasource(repo, source)
827 885 return subset & baseset(data)
828 886
829 887 @predicate('extinct()', safe=True)
830 888 def extinct(repo, subset, x):
831 889 """Obsolete changesets with obsolete descendants only.
832 890 """
833 891 # i18n: "extinct" is a keyword
834 892 getargs(x, 0, 0, _("extinct takes no arguments"))
835 893 extincts = obsmod.getrevs(repo, 'extinct')
836 894 return subset & extincts
837 895
838 896 @predicate('extra(label, [value])', safe=True)
839 897 def extra(repo, subset, x):
840 898 """Changesets with the given label in the extra metadata, with the given
841 899 optional value.
842 900
843 901 Pattern matching is supported for `value`. See
844 902 :hg:`help revisions.patterns`.
845 903 """
846 904 args = getargsdict(x, 'extra', 'label value')
847 905 if 'label' not in args:
848 906 # i18n: "extra" is a keyword
849 907 raise error.ParseError(_('extra takes at least 1 argument'))
850 908 # i18n: "extra" is a keyword
851 909 label = getstring(args['label'], _('first argument to extra must be '
852 910 'a string'))
853 911 value = None
854 912
855 913 if 'value' in args:
856 914 # i18n: "extra" is a keyword
857 915 value = getstring(args['value'], _('second argument to extra must be '
858 916 'a string'))
859 917 kind, value, matcher = stringutil.stringmatcher(value)
860 918
861 919 def _matchvalue(r):
862 920 extra = repo[r].extra()
863 921 return label in extra and (value is None or matcher(extra[label]))
864 922
865 923 return subset.filter(lambda r: _matchvalue(r),
866 924 condrepr=('<extra[%r] %r>', label, value))
867 925
868 926 @predicate('filelog(pattern)', safe=True)
869 927 def filelog(repo, subset, x):
870 928 """Changesets connected to the specified filelog.
871 929
872 930 For performance reasons, visits only revisions mentioned in the file-level
873 931 filelog, rather than filtering through all changesets (much faster, but
874 932 doesn't include deletes or duplicate changes). For a slower, more accurate
875 933 result, use ``file()``.
876 934
877 935 The pattern without explicit kind like ``glob:`` is expected to be
878 936 relative to the current directory and match against a file exactly
879 937 for efficiency.
880 938
881 939 If some linkrev points to revisions filtered by the current repoview, we'll
882 940 work around it to return a non-filtered value.
883 941 """
884 942
885 943 # i18n: "filelog" is a keyword
886 944 pat = getstring(x, _("filelog requires a pattern"))
887 945 s = set()
888 946 cl = repo.changelog
889 947
890 948 if not matchmod.patkind(pat):
891 949 f = pathutil.canonpath(repo.root, repo.getcwd(), pat)
892 950 files = [f]
893 951 else:
894 952 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=repo[None])
895 953 files = (f for f in repo[None] if m(f))
896 954
897 955 for f in files:
898 956 fl = repo.file(f)
899 957 known = {}
900 958 scanpos = 0
901 959 for fr in list(fl):
902 960 fn = fl.node(fr)
903 961 if fn in known:
904 962 s.add(known[fn])
905 963 continue
906 964
907 965 lr = fl.linkrev(fr)
908 966 if lr in cl:
909 967 s.add(lr)
910 968 elif scanpos is not None:
911 969 # lowest matching changeset is filtered, scan further
912 970 # ahead in changelog
913 971 start = max(lr, scanpos) + 1
914 972 scanpos = None
915 973 for r in cl.revs(start):
916 974 # minimize parsing of non-matching entries
917 975 if f in cl.revision(r) and f in cl.readfiles(r):
918 976 try:
919 977 # try to use manifest delta fastpath
920 978 n = repo[r].filenode(f)
921 979 if n not in known:
922 980 if n == fn:
923 981 s.add(r)
924 982 scanpos = r
925 983 break
926 984 else:
927 985 known[n] = r
928 986 except error.ManifestLookupError:
929 987 # deletion in changelog
930 988 continue
931 989
932 990 return subset & s
933 991
934 992 @predicate('first(set, [n])', safe=True, takeorder=True, weight=0)
935 993 def first(repo, subset, x, order):
936 994 """An alias for limit().
937 995 """
938 996 return limit(repo, subset, x, order)
939 997
940 998 def _follow(repo, subset, x, name, followfirst=False):
941 999 args = getargsdict(x, name, 'file startrev')
942 1000 revs = None
943 1001 if 'startrev' in args:
944 1002 revs = getset(repo, fullreposet(repo), args['startrev'])
945 1003 if 'file' in args:
946 1004 x = getstring(args['file'], _("%s expected a pattern") % name)
947 1005 if revs is None:
948 1006 revs = [None]
949 1007 fctxs = []
950 1008 for r in revs:
951 1009 ctx = mctx = repo[r]
952 1010 if r is None:
953 1011 ctx = repo['.']
954 1012 m = matchmod.match(repo.root, repo.getcwd(), [x],
955 1013 ctx=mctx, default='path')
956 1014 fctxs.extend(ctx[f].introfilectx() for f in ctx.manifest().walk(m))
957 1015 s = dagop.filerevancestors(fctxs, followfirst)
958 1016 else:
959 1017 if revs is None:
960 1018 revs = baseset([repo['.'].rev()])
961 1019 s = dagop.revancestors(repo, revs, followfirst)
962 1020
963 1021 return subset & s
964 1022
965 1023 @predicate('follow([file[, startrev]])', safe=True)
966 1024 def follow(repo, subset, x):
967 1025 """
968 1026 An alias for ``::.`` (ancestors of the working directory's first parent).
969 1027 If file pattern is specified, the histories of files matching given
970 1028 pattern in the revision given by startrev are followed, including copies.
971 1029 """
972 1030 return _follow(repo, subset, x, 'follow')
973 1031
974 1032 @predicate('_followfirst', safe=True)
975 1033 def _followfirst(repo, subset, x):
976 1034 # ``followfirst([file[, startrev]])``
977 1035 # Like ``follow([file[, startrev]])`` but follows only the first parent
978 1036 # of every revisions or files revisions.
979 1037 return _follow(repo, subset, x, '_followfirst', followfirst=True)
980 1038
981 1039 @predicate('followlines(file, fromline:toline[, startrev=., descend=False])',
982 1040 safe=True)
983 1041 def followlines(repo, subset, x):
984 1042 """Changesets modifying `file` in line range ('fromline', 'toline').
985 1043
986 1044 Line range corresponds to 'file' content at 'startrev' and should hence be
987 1045 consistent with file size. If startrev is not specified, working directory's
988 1046 parent is used.
989 1047
990 1048 By default, ancestors of 'startrev' are returned. If 'descend' is True,
991 1049 descendants of 'startrev' are returned though renames are (currently) not
992 1050 followed in this direction.
993 1051 """
994 1052 args = getargsdict(x, 'followlines', 'file *lines startrev descend')
995 1053 if len(args['lines']) != 1:
996 1054 raise error.ParseError(_("followlines requires a line range"))
997 1055
998 1056 rev = '.'
999 1057 if 'startrev' in args:
1000 1058 revs = getset(repo, fullreposet(repo), args['startrev'])
1001 1059 if len(revs) != 1:
1002 1060 raise error.ParseError(
1003 1061 # i18n: "followlines" is a keyword
1004 1062 _("followlines expects exactly one revision"))
1005 1063 rev = revs.last()
1006 1064
1007 1065 pat = getstring(args['file'], _("followlines requires a pattern"))
1008 1066 # i18n: "followlines" is a keyword
1009 1067 msg = _("followlines expects exactly one file")
1010 1068 fname = scmutil.parsefollowlinespattern(repo, rev, pat, msg)
1011 1069 # i18n: "followlines" is a keyword
1012 1070 lr = getrange(args['lines'][0], _("followlines expects a line range"))
1013 1071 fromline, toline = [getinteger(a, _("line range bounds must be integers"))
1014 1072 for a in lr]
1015 1073 fromline, toline = util.processlinerange(fromline, toline)
1016 1074
1017 1075 fctx = repo[rev].filectx(fname)
1018 1076 descend = False
1019 1077 if 'descend' in args:
1020 1078 descend = getboolean(args['descend'],
1021 1079 # i18n: "descend" is a keyword
1022 1080 _("descend argument must be a boolean"))
1023 1081 if descend:
1024 1082 rs = generatorset(
1025 1083 (c.rev() for c, _linerange
1026 1084 in dagop.blockdescendants(fctx, fromline, toline)),
1027 1085 iterasc=True)
1028 1086 else:
1029 1087 rs = generatorset(
1030 1088 (c.rev() for c, _linerange
1031 1089 in dagop.blockancestors(fctx, fromline, toline)),
1032 1090 iterasc=False)
1033 1091 return subset & rs
1034 1092
1035 1093 @predicate('all()', safe=True)
1036 1094 def getall(repo, subset, x):
1037 1095 """All changesets, the same as ``0:tip``.
1038 1096 """
1039 1097 # i18n: "all" is a keyword
1040 1098 getargs(x, 0, 0, _("all takes no arguments"))
1041 1099 return subset & spanset(repo) # drop "null" if any
1042 1100
1043 1101 @predicate('grep(regex)', weight=10)
1044 1102 def grep(repo, subset, x):
1045 1103 """Like ``keyword(string)`` but accepts a regex. Use ``grep(r'...')``
1046 1104 to ensure special escape characters are handled correctly. Unlike
1047 1105 ``keyword(string)``, the match is case-sensitive.
1048 1106 """
1049 1107 try:
1050 1108 # i18n: "grep" is a keyword
1051 1109 gr = re.compile(getstring(x, _("grep requires a string")))
1052 1110 except re.error as e:
1053 1111 raise error.ParseError(
1054 1112 _('invalid match pattern: %s') % stringutil.forcebytestr(e))
1055 1113
1056 1114 def matches(x):
1057 1115 c = repo[x]
1058 1116 for e in c.files() + [c.user(), c.description()]:
1059 1117 if gr.search(e):
1060 1118 return True
1061 1119 return False
1062 1120
1063 1121 return subset.filter(matches, condrepr=('<grep %r>', gr.pattern))
1064 1122
1065 1123 @predicate('_matchfiles', safe=True)
1066 1124 def _matchfiles(repo, subset, x):
1067 1125 # _matchfiles takes a revset list of prefixed arguments:
1068 1126 #
1069 1127 # [p:foo, i:bar, x:baz]
1070 1128 #
1071 1129 # builds a match object from them and filters subset. Allowed
1072 1130 # prefixes are 'p:' for regular patterns, 'i:' for include
1073 1131 # patterns and 'x:' for exclude patterns. Use 'r:' prefix to pass
1074 1132 # a revision identifier, or the empty string to reference the
1075 1133 # working directory, from which the match object is
1076 1134 # initialized. Use 'd:' to set the default matching mode, default
1077 1135 # to 'glob'. At most one 'r:' and 'd:' argument can be passed.
1078 1136
1079 1137 l = getargs(x, 1, -1, "_matchfiles requires at least one argument")
1080 1138 pats, inc, exc = [], [], []
1081 1139 rev, default = None, None
1082 1140 for arg in l:
1083 1141 s = getstring(arg, "_matchfiles requires string arguments")
1084 1142 prefix, value = s[:2], s[2:]
1085 1143 if prefix == 'p:':
1086 1144 pats.append(value)
1087 1145 elif prefix == 'i:':
1088 1146 inc.append(value)
1089 1147 elif prefix == 'x:':
1090 1148 exc.append(value)
1091 1149 elif prefix == 'r:':
1092 1150 if rev is not None:
1093 1151 raise error.ParseError('_matchfiles expected at most one '
1094 1152 'revision')
1095 1153 if value == '': # empty means working directory
1096 1154 rev = node.wdirrev
1097 1155 else:
1098 1156 rev = value
1099 1157 elif prefix == 'd:':
1100 1158 if default is not None:
1101 1159 raise error.ParseError('_matchfiles expected at most one '
1102 1160 'default mode')
1103 1161 default = value
1104 1162 else:
1105 1163 raise error.ParseError('invalid _matchfiles prefix: %s' % prefix)
1106 1164 if not default:
1107 1165 default = 'glob'
1108 1166 hasset = any(matchmod.patkind(p) == 'set' for p in pats + inc + exc)
1109 1167
1110 1168 mcache = [None]
1111 1169
1112 1170 # This directly read the changelog data as creating changectx for all
1113 1171 # revisions is quite expensive.
1114 1172 getfiles = repo.changelog.readfiles
1115 1173 wdirrev = node.wdirrev
1116 1174 def matches(x):
1117 1175 if x == wdirrev:
1118 1176 files = repo[x].files()
1119 1177 else:
1120 1178 files = getfiles(x)
1121 1179
1122 1180 if not mcache[0] or (hasset and rev is None):
1123 1181 r = x if rev is None else rev
1124 1182 mcache[0] = matchmod.match(repo.root, repo.getcwd(), pats,
1125 1183 include=inc, exclude=exc, ctx=repo[r],
1126 1184 default=default)
1127 1185 m = mcache[0]
1128 1186
1129 1187 for f in files:
1130 1188 if m(f):
1131 1189 return True
1132 1190 return False
1133 1191
1134 1192 return subset.filter(matches,
1135 1193 condrepr=('<matchfiles patterns=%r, include=%r '
1136 1194 'exclude=%r, default=%r, rev=%r>',
1137 1195 pats, inc, exc, default, rev))
1138 1196
1139 1197 @predicate('file(pattern)', safe=True, weight=10)
1140 1198 def hasfile(repo, subset, x):
1141 1199 """Changesets affecting files matched by pattern.
1142 1200
1143 1201 For a faster but less accurate result, consider using ``filelog()``
1144 1202 instead.
1145 1203
1146 1204 This predicate uses ``glob:`` as the default kind of pattern.
1147 1205 """
1148 1206 # i18n: "file" is a keyword
1149 1207 pat = getstring(x, _("file requires a pattern"))
1150 1208 return _matchfiles(repo, subset, ('string', 'p:' + pat))
1151 1209
1152 1210 @predicate('head()', safe=True)
1153 1211 def head(repo, subset, x):
1154 1212 """Changeset is a named branch head.
1155 1213 """
1156 1214 # i18n: "head" is a keyword
1157 1215 getargs(x, 0, 0, _("head takes no arguments"))
1158 1216 hs = set()
1159 1217 cl = repo.changelog
1160 1218 for ls in repo.branchmap().itervalues():
1161 1219 hs.update(cl.rev(h) for h in ls)
1162 1220 return subset & baseset(hs)
1163 1221
1164 1222 @predicate('heads(set)', safe=True, takeorder=True)
1165 1223 def heads(repo, subset, x, order):
1166 1224 """Members of set with no children in set.
1167 1225 """
1168 1226 # argument set should never define order
1169 1227 if order == defineorder:
1170 1228 order = followorder
1171 1229 inputset = getset(repo, fullreposet(repo), x, order=order)
1172 1230 wdirparents = None
1173 1231 if node.wdirrev in inputset:
1174 1232 # a bit slower, but not common so good enough for now
1175 1233 wdirparents = [p.rev() for p in repo[None].parents()]
1176 1234 inputset = set(inputset)
1177 1235 inputset.discard(node.wdirrev)
1178 1236 heads = repo.changelog.headrevs(inputset)
1179 1237 if wdirparents is not None:
1180 1238 heads.difference_update(wdirparents)
1181 1239 heads.add(node.wdirrev)
1182 1240 heads = baseset(heads)
1183 1241 return subset & heads
1184 1242
1185 1243 @predicate('hidden()', safe=True)
1186 1244 def hidden(repo, subset, x):
1187 1245 """Hidden changesets.
1188 1246 """
1189 1247 # i18n: "hidden" is a keyword
1190 1248 getargs(x, 0, 0, _("hidden takes no arguments"))
1191 1249 hiddenrevs = repoview.filterrevs(repo, 'visible')
1192 1250 return subset & hiddenrevs
1193 1251
1194 1252 @predicate('keyword(string)', safe=True, weight=10)
1195 1253 def keyword(repo, subset, x):
1196 1254 """Search commit message, user name, and names of changed files for
1197 1255 string. The match is case-insensitive.
1198 1256
1199 1257 For a regular expression or case sensitive search of these fields, use
1200 1258 ``grep(regex)``.
1201 1259 """
1202 1260 # i18n: "keyword" is a keyword
1203 1261 kw = encoding.lower(getstring(x, _("keyword requires a string")))
1204 1262
1205 1263 def matches(r):
1206 1264 c = repo[r]
1207 1265 return any(kw in encoding.lower(t)
1208 1266 for t in c.files() + [c.user(), c.description()])
1209 1267
1210 1268 return subset.filter(matches, condrepr=('<keyword %r>', kw))
1211 1269
1212 1270 @predicate('limit(set[, n[, offset]])', safe=True, takeorder=True, weight=0)
1213 1271 def limit(repo, subset, x, order):
1214 1272 """First n members of set, defaulting to 1, starting from offset.
1215 1273 """
1216 1274 args = getargsdict(x, 'limit', 'set n offset')
1217 1275 if 'set' not in args:
1218 1276 # i18n: "limit" is a keyword
1219 1277 raise error.ParseError(_("limit requires one to three arguments"))
1220 1278 # i18n: "limit" is a keyword
1221 1279 lim = getinteger(args.get('n'), _("limit expects a number"), default=1)
1222 1280 if lim < 0:
1223 1281 raise error.ParseError(_("negative number to select"))
1224 1282 # i18n: "limit" is a keyword
1225 1283 ofs = getinteger(args.get('offset'), _("limit expects a number"), default=0)
1226 1284 if ofs < 0:
1227 1285 raise error.ParseError(_("negative offset"))
1228 1286 os = getset(repo, fullreposet(repo), args['set'])
1229 1287 ls = os.slice(ofs, ofs + lim)
1230 1288 if order == followorder and lim > 1:
1231 1289 return subset & ls
1232 1290 return ls & subset
1233 1291
1234 1292 @predicate('last(set, [n])', safe=True, takeorder=True)
1235 1293 def last(repo, subset, x, order):
1236 1294 """Last n members of set, defaulting to 1.
1237 1295 """
1238 1296 # i18n: "last" is a keyword
1239 1297 l = getargs(x, 1, 2, _("last requires one or two arguments"))
1240 1298 lim = 1
1241 1299 if len(l) == 2:
1242 1300 # i18n: "last" is a keyword
1243 1301 lim = getinteger(l[1], _("last expects a number"))
1244 1302 if lim < 0:
1245 1303 raise error.ParseError(_("negative number to select"))
1246 1304 os = getset(repo, fullreposet(repo), l[0])
1247 1305 os.reverse()
1248 1306 ls = os.slice(0, lim)
1249 1307 if order == followorder and lim > 1:
1250 1308 return subset & ls
1251 1309 ls.reverse()
1252 1310 return ls & subset
1253 1311
1254 1312 @predicate('max(set)', safe=True)
1255 1313 def maxrev(repo, subset, x):
1256 1314 """Changeset with highest revision number in set.
1257 1315 """
1258 1316 os = getset(repo, fullreposet(repo), x)
1259 1317 try:
1260 1318 m = os.max()
1261 1319 if m in subset:
1262 1320 return baseset([m], datarepr=('<max %r, %r>', subset, os))
1263 1321 except ValueError:
1264 1322 # os.max() throws a ValueError when the collection is empty.
1265 1323 # Same as python's max().
1266 1324 pass
1267 1325 return baseset(datarepr=('<max %r, %r>', subset, os))
1268 1326
1269 1327 @predicate('merge()', safe=True)
1270 1328 def merge(repo, subset, x):
1271 1329 """Changeset is a merge changeset.
1272 1330 """
1273 1331 # i18n: "merge" is a keyword
1274 1332 getargs(x, 0, 0, _("merge takes no arguments"))
1275 1333 cl = repo.changelog
1276 1334 return subset.filter(lambda r: cl.parentrevs(r)[1] != -1,
1277 1335 condrepr='<merge>')
1278 1336
1279 1337 @predicate('branchpoint()', safe=True)
1280 1338 def branchpoint(repo, subset, x):
1281 1339 """Changesets with more than one child.
1282 1340 """
1283 1341 # i18n: "branchpoint" is a keyword
1284 1342 getargs(x, 0, 0, _("branchpoint takes no arguments"))
1285 1343 cl = repo.changelog
1286 1344 if not subset:
1287 1345 return baseset()
1288 1346 # XXX this should be 'parentset.min()' assuming 'parentset' is a smartset
1289 1347 # (and if it is not, it should.)
1290 1348 baserev = min(subset)
1291 1349 parentscount = [0]*(len(repo) - baserev)
1292 1350 for r in cl.revs(start=baserev + 1):
1293 1351 for p in cl.parentrevs(r):
1294 1352 if p >= baserev:
1295 1353 parentscount[p - baserev] += 1
1296 1354 return subset.filter(lambda r: parentscount[r - baserev] > 1,
1297 1355 condrepr='<branchpoint>')
1298 1356
1299 1357 @predicate('min(set)', safe=True)
1300 1358 def minrev(repo, subset, x):
1301 1359 """Changeset with lowest revision number in set.
1302 1360 """
1303 1361 os = getset(repo, fullreposet(repo), x)
1304 1362 try:
1305 1363 m = os.min()
1306 1364 if m in subset:
1307 1365 return baseset([m], datarepr=('<min %r, %r>', subset, os))
1308 1366 except ValueError:
1309 1367 # os.min() throws a ValueError when the collection is empty.
1310 1368 # Same as python's min().
1311 1369 pass
1312 1370 return baseset(datarepr=('<min %r, %r>', subset, os))
1313 1371
1314 1372 @predicate('modifies(pattern)', safe=True, weight=30)
1315 1373 def modifies(repo, subset, x):
1316 1374 """Changesets modifying files matched by pattern.
1317 1375
1318 1376 The pattern without explicit kind like ``glob:`` is expected to be
1319 1377 relative to the current directory and match against a file or a
1320 1378 directory.
1321 1379 """
1322 1380 # i18n: "modifies" is a keyword
1323 1381 pat = getstring(x, _("modifies requires a pattern"))
1324 1382 return checkstatus(repo, subset, pat, 0)
1325 1383
1326 1384 @predicate('named(namespace)')
1327 1385 def named(repo, subset, x):
1328 1386 """The changesets in a given namespace.
1329 1387
1330 1388 Pattern matching is supported for `namespace`. See
1331 1389 :hg:`help revisions.patterns`.
1332 1390 """
1333 1391 # i18n: "named" is a keyword
1334 1392 args = getargs(x, 1, 1, _('named requires a namespace argument'))
1335 1393
1336 1394 ns = getstring(args[0],
1337 1395 # i18n: "named" is a keyword
1338 1396 _('the argument to named must be a string'))
1339 1397 kind, pattern, matcher = stringutil.stringmatcher(ns)
1340 1398 namespaces = set()
1341 1399 if kind == 'literal':
1342 1400 if pattern not in repo.names:
1343 1401 raise error.RepoLookupError(_("namespace '%s' does not exist")
1344 1402 % ns)
1345 1403 namespaces.add(repo.names[pattern])
1346 1404 else:
1347 1405 for name, ns in repo.names.iteritems():
1348 1406 if matcher(name):
1349 1407 namespaces.add(ns)
1350 1408
1351 1409 names = set()
1352 1410 for ns in namespaces:
1353 1411 for name in ns.listnames(repo):
1354 1412 if name not in ns.deprecated:
1355 1413 names.update(repo[n].rev() for n in ns.nodes(repo, name))
1356 1414
1357 1415 names -= {node.nullrev}
1358 1416 return subset & names
1359 1417
1360 1418 @predicate('id(string)', safe=True)
1361 1419 def node_(repo, subset, x):
1362 1420 """Revision non-ambiguously specified by the given hex string prefix.
1363 1421 """
1364 1422 # i18n: "id" is a keyword
1365 1423 l = getargs(x, 1, 1, _("id requires one argument"))
1366 1424 # i18n: "id" is a keyword
1367 1425 n = getstring(l[0], _("id requires a string"))
1368 1426 if len(n) == 40:
1369 1427 try:
1370 1428 rn = repo.changelog.rev(node.bin(n))
1371 1429 except error.WdirUnsupported:
1372 1430 rn = node.wdirrev
1373 1431 except (LookupError, TypeError):
1374 1432 rn = None
1375 1433 else:
1376 1434 rn = None
1377 1435 try:
1378 1436 pm = scmutil.resolvehexnodeidprefix(repo, n)
1379 1437 if pm is not None:
1380 1438 rn = repo.changelog.rev(pm)
1381 1439 except LookupError:
1382 1440 pass
1383 1441 except error.WdirUnsupported:
1384 1442 rn = node.wdirrev
1385 1443
1386 1444 if rn is None:
1387 1445 return baseset()
1388 1446 result = baseset([rn])
1389 1447 return result & subset
1390 1448
1391 1449 @predicate('none()', safe=True)
1392 1450 def none(repo, subset, x):
1393 1451 """No changesets.
1394 1452 """
1395 1453 # i18n: "none" is a keyword
1396 1454 getargs(x, 0, 0, _("none takes no arguments"))
1397 1455 return baseset()
1398 1456
1399 1457 @predicate('obsolete()', safe=True)
1400 1458 def obsolete(repo, subset, x):
1401 1459 """Mutable changeset with a newer version."""
1402 1460 # i18n: "obsolete" is a keyword
1403 1461 getargs(x, 0, 0, _("obsolete takes no arguments"))
1404 1462 obsoletes = obsmod.getrevs(repo, 'obsolete')
1405 1463 return subset & obsoletes
1406 1464
1407 1465 @predicate('only(set, [set])', safe=True)
1408 1466 def only(repo, subset, x):
1409 1467 """Changesets that are ancestors of the first set that are not ancestors
1410 1468 of any other head in the repo. If a second set is specified, the result
1411 1469 is ancestors of the first set that are not ancestors of the second set
1412 1470 (i.e. ::<set1> - ::<set2>).
1413 1471 """
1414 1472 cl = repo.changelog
1415 1473 # i18n: "only" is a keyword
1416 1474 args = getargs(x, 1, 2, _('only takes one or two arguments'))
1417 1475 include = getset(repo, fullreposet(repo), args[0])
1418 1476 if len(args) == 1:
1419 1477 if not include:
1420 1478 return baseset()
1421 1479
1422 1480 descendants = set(dagop.revdescendants(repo, include, False))
1423 1481 exclude = [rev for rev in cl.headrevs()
1424 1482 if not rev in descendants and not rev in include]
1425 1483 else:
1426 1484 exclude = getset(repo, fullreposet(repo), args[1])
1427 1485
1428 1486 results = set(cl.findmissingrevs(common=exclude, heads=include))
1429 1487 # XXX we should turn this into a baseset instead of a set, smartset may do
1430 1488 # some optimizations from the fact this is a baseset.
1431 1489 return subset & results
1432 1490
1433 1491 @predicate('origin([set])', safe=True)
1434 1492 def origin(repo, subset, x):
1435 1493 """
1436 1494 Changesets that were specified as a source for the grafts, transplants or
1437 1495 rebases that created the given revisions. Omitting the optional set is the
1438 1496 same as passing all(). If a changeset created by these operations is itself
1439 1497 specified as a source for one of these operations, only the source changeset
1440 1498 for the first operation is selected.
1441 1499 """
1442 1500 if x is not None:
1443 1501 dests = getset(repo, fullreposet(repo), x)
1444 1502 else:
1445 1503 dests = fullreposet(repo)
1446 1504
1447 1505 def _firstsrc(rev):
1448 1506 src = _getrevsource(repo, rev)
1449 1507 if src is None:
1450 1508 return None
1451 1509
1452 1510 while True:
1453 1511 prev = _getrevsource(repo, src)
1454 1512
1455 1513 if prev is None:
1456 1514 return src
1457 1515 src = prev
1458 1516
1459 1517 o = {_firstsrc(r) for r in dests}
1460 1518 o -= {None}
1461 1519 # XXX we should turn this into a baseset instead of a set, smartset may do
1462 1520 # some optimizations from the fact this is a baseset.
1463 1521 return subset & o
1464 1522
1465 1523 @predicate('outgoing([path])', safe=False, weight=10)
1466 1524 def outgoing(repo, subset, x):
1467 1525 """Changesets not found in the specified destination repository, or the
1468 1526 default push location.
1469 1527 """
1470 1528 # Avoid cycles.
1471 1529 from . import (
1472 1530 discovery,
1473 1531 hg,
1474 1532 )
1475 1533 # i18n: "outgoing" is a keyword
1476 1534 l = getargs(x, 0, 1, _("outgoing takes one or no arguments"))
1477 1535 # i18n: "outgoing" is a keyword
1478 1536 dest = l and getstring(l[0], _("outgoing requires a repository path")) or ''
1479 1537 if not dest:
1480 1538 # ui.paths.getpath() explicitly tests for None, not just a boolean
1481 1539 dest = None
1482 1540 path = repo.ui.paths.getpath(dest, default=('default-push', 'default'))
1483 1541 if not path:
1484 1542 raise error.Abort(_('default repository not configured!'),
1485 1543 hint=_("see 'hg help config.paths'"))
1486 1544 dest = path.pushloc or path.loc
1487 1545 branches = path.branch, []
1488 1546
1489 1547 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1490 1548 if revs:
1491 1549 revs = [repo.lookup(rev) for rev in revs]
1492 1550 other = hg.peer(repo, {}, dest)
1493 1551 repo.ui.pushbuffer()
1494 1552 outgoing = discovery.findcommonoutgoing(repo, other, onlyheads=revs)
1495 1553 repo.ui.popbuffer()
1496 1554 cl = repo.changelog
1497 1555 o = {cl.rev(r) for r in outgoing.missing}
1498 1556 return subset & o
1499 1557
1500 1558 @predicate('p1([set])', safe=True)
1501 1559 def p1(repo, subset, x):
1502 1560 """First parent of changesets in set, or the working directory.
1503 1561 """
1504 1562 if x is None:
1505 1563 p = repo[x].p1().rev()
1506 1564 if p >= 0:
1507 1565 return subset & baseset([p])
1508 1566 return baseset()
1509 1567
1510 1568 ps = set()
1511 1569 cl = repo.changelog
1512 1570 for r in getset(repo, fullreposet(repo), x):
1513 1571 try:
1514 1572 ps.add(cl.parentrevs(r)[0])
1515 1573 except error.WdirUnsupported:
1516 1574 ps.add(repo[r].parents()[0].rev())
1517 1575 ps -= {node.nullrev}
1518 1576 # XXX we should turn this into a baseset instead of a set, smartset may do
1519 1577 # some optimizations from the fact this is a baseset.
1520 1578 return subset & ps
1521 1579
1522 1580 @predicate('p2([set])', safe=True)
1523 1581 def p2(repo, subset, x):
1524 1582 """Second parent of changesets in set, or the working directory.
1525 1583 """
1526 1584 if x is None:
1527 1585 ps = repo[x].parents()
1528 1586 try:
1529 1587 p = ps[1].rev()
1530 1588 if p >= 0:
1531 1589 return subset & baseset([p])
1532 1590 return baseset()
1533 1591 except IndexError:
1534 1592 return baseset()
1535 1593
1536 1594 ps = set()
1537 1595 cl = repo.changelog
1538 1596 for r in getset(repo, fullreposet(repo), x):
1539 1597 try:
1540 1598 ps.add(cl.parentrevs(r)[1])
1541 1599 except error.WdirUnsupported:
1542 1600 parents = repo[r].parents()
1543 1601 if len(parents) == 2:
1544 1602 ps.add(parents[1])
1545 1603 ps -= {node.nullrev}
1546 1604 # XXX we should turn this into a baseset instead of a set, smartset may do
1547 1605 # some optimizations from the fact this is a baseset.
1548 1606 return subset & ps
1549 1607
1550 1608 def parentpost(repo, subset, x, order):
1551 1609 return p1(repo, subset, x)
1552 1610
1553 1611 @predicate('parents([set])', safe=True)
1554 1612 def parents(repo, subset, x):
1555 1613 """
1556 1614 The set of all parents for all changesets in set, or the working directory.
1557 1615 """
1558 1616 if x is None:
1559 1617 ps = set(p.rev() for p in repo[x].parents())
1560 1618 else:
1561 1619 ps = set()
1562 1620 cl = repo.changelog
1563 1621 up = ps.update
1564 1622 parentrevs = cl.parentrevs
1565 1623 for r in getset(repo, fullreposet(repo), x):
1566 1624 try:
1567 1625 up(parentrevs(r))
1568 1626 except error.WdirUnsupported:
1569 1627 up(p.rev() for p in repo[r].parents())
1570 1628 ps -= {node.nullrev}
1571 1629 return subset & ps
1572 1630
1573 1631 def _phase(repo, subset, *targets):
1574 1632 """helper to select all rev in <targets> phases"""
1575 1633 return repo._phasecache.getrevset(repo, targets, subset)
1576 1634
1577 1635 @predicate('_phase(idx)', safe=True)
1578 1636 def phase(repo, subset, x):
1579 1637 l = getargs(x, 1, 1, ("_phase requires one argument"))
1580 1638 target = getinteger(l[0], ("_phase expects a number"))
1581 1639 return _phase(repo, subset, target)
1582 1640
1583 1641 @predicate('draft()', safe=True)
1584 1642 def draft(repo, subset, x):
1585 1643 """Changeset in draft phase."""
1586 1644 # i18n: "draft" is a keyword
1587 1645 getargs(x, 0, 0, _("draft takes no arguments"))
1588 1646 target = phases.draft
1589 1647 return _phase(repo, subset, target)
1590 1648
1591 1649 @predicate('secret()', safe=True)
1592 1650 def secret(repo, subset, x):
1593 1651 """Changeset in secret phase."""
1594 1652 # i18n: "secret" is a keyword
1595 1653 getargs(x, 0, 0, _("secret takes no arguments"))
1596 1654 target = phases.secret
1597 1655 return _phase(repo, subset, target)
1598 1656
1599 1657 @predicate('stack([revs])', safe=True)
1600 1658 def stack(repo, subset, x):
1601 1659 """Experimental revset for the stack of changesets or working directory
1602 1660 parent. (EXPERIMENTAL)
1603 1661 """
1604 1662 if x is None:
1605 1663 stacks = stackmod.getstack(repo, x)
1606 1664 else:
1607 1665 stacks = smartset.baseset([])
1608 1666 for revision in getset(repo, fullreposet(repo), x):
1609 1667 currentstack = stackmod.getstack(repo, revision)
1610 1668 stacks = stacks + currentstack
1611 1669
1612 1670 return subset & stacks
1613 1671
1614 1672 def parentspec(repo, subset, x, n, order):
1615 1673 """``set^0``
1616 1674 The set.
1617 1675 ``set^1`` (or ``set^``), ``set^2``
1618 1676 First or second parent, respectively, of all changesets in set.
1619 1677 """
1620 1678 try:
1621 1679 n = int(n[1])
1622 1680 if n not in (0, 1, 2):
1623 1681 raise ValueError
1624 1682 except (TypeError, ValueError):
1625 1683 raise error.ParseError(_("^ expects a number 0, 1, or 2"))
1626 1684 ps = set()
1627 1685 cl = repo.changelog
1628 1686 for r in getset(repo, fullreposet(repo), x):
1629 1687 if n == 0:
1630 1688 ps.add(r)
1631 1689 elif n == 1:
1632 1690 try:
1633 1691 ps.add(cl.parentrevs(r)[0])
1634 1692 except error.WdirUnsupported:
1635 1693 ps.add(repo[r].parents()[0].rev())
1636 1694 else:
1637 1695 try:
1638 1696 parents = cl.parentrevs(r)
1639 1697 if parents[1] != node.nullrev:
1640 1698 ps.add(parents[1])
1641 1699 except error.WdirUnsupported:
1642 1700 parents = repo[r].parents()
1643 1701 if len(parents) == 2:
1644 1702 ps.add(parents[1].rev())
1645 1703 return subset & ps
1646 1704
1647 1705 @predicate('present(set)', safe=True, takeorder=True)
1648 1706 def present(repo, subset, x, order):
1649 1707 """An empty set, if any revision in set isn't found; otherwise,
1650 1708 all revisions in set.
1651 1709
1652 1710 If any of specified revisions is not present in the local repository,
1653 1711 the query is normally aborted. But this predicate allows the query
1654 1712 to continue even in such cases.
1655 1713 """
1656 1714 try:
1657 1715 return getset(repo, subset, x, order)
1658 1716 except error.RepoLookupError:
1659 1717 return baseset()
1660 1718
1661 1719 # for internal use
1662 1720 @predicate('_notpublic', safe=True)
1663 1721 def _notpublic(repo, subset, x):
1664 1722 getargs(x, 0, 0, "_notpublic takes no arguments")
1665 1723 return _phase(repo, subset, phases.draft, phases.secret)
1666 1724
1667 1725 # for internal use
1668 1726 @predicate('_phaseandancestors(phasename, set)', safe=True)
1669 1727 def _phaseandancestors(repo, subset, x):
1670 1728 # equivalent to (phasename() & ancestors(set)) but more efficient
1671 1729 # phasename could be one of 'draft', 'secret', or '_notpublic'
1672 1730 args = getargs(x, 2, 2, "_phaseandancestors requires two arguments")
1673 1731 phasename = getsymbol(args[0])
1674 1732 s = getset(repo, fullreposet(repo), args[1])
1675 1733
1676 1734 draft = phases.draft
1677 1735 secret = phases.secret
1678 1736 phasenamemap = {
1679 1737 '_notpublic': draft,
1680 1738 'draft': draft, # follow secret's ancestors
1681 1739 'secret': secret,
1682 1740 }
1683 1741 if phasename not in phasenamemap:
1684 1742 raise error.ParseError('%r is not a valid phasename' % phasename)
1685 1743
1686 1744 minimalphase = phasenamemap[phasename]
1687 1745 getphase = repo._phasecache.phase
1688 1746
1689 1747 def cutfunc(rev):
1690 1748 return getphase(repo, rev) < minimalphase
1691 1749
1692 1750 revs = dagop.revancestors(repo, s, cutfunc=cutfunc)
1693 1751
1694 1752 if phasename == 'draft': # need to remove secret changesets
1695 1753 revs = revs.filter(lambda r: getphase(repo, r) == draft)
1696 1754 return subset & revs
1697 1755
1698 1756 @predicate('public()', safe=True)
1699 1757 def public(repo, subset, x):
1700 1758 """Changeset in public phase."""
1701 1759 # i18n: "public" is a keyword
1702 1760 getargs(x, 0, 0, _("public takes no arguments"))
1703 1761 return _phase(repo, subset, phases.public)
1704 1762
1705 1763 @predicate('remote([id [,path]])', safe=False)
1706 1764 def remote(repo, subset, x):
1707 1765 """Local revision that corresponds to the given identifier in a
1708 1766 remote repository, if present. Here, the '.' identifier is a
1709 1767 synonym for the current local branch.
1710 1768 """
1711 1769
1712 1770 from . import hg # avoid start-up nasties
1713 1771 # i18n: "remote" is a keyword
1714 1772 l = getargs(x, 0, 2, _("remote takes zero, one, or two arguments"))
1715 1773
1716 1774 q = '.'
1717 1775 if len(l) > 0:
1718 1776 # i18n: "remote" is a keyword
1719 1777 q = getstring(l[0], _("remote requires a string id"))
1720 1778 if q == '.':
1721 1779 q = repo['.'].branch()
1722 1780
1723 1781 dest = ''
1724 1782 if len(l) > 1:
1725 1783 # i18n: "remote" is a keyword
1726 1784 dest = getstring(l[1], _("remote requires a repository path"))
1727 1785 dest = repo.ui.expandpath(dest or 'default')
1728 1786 dest, branches = hg.parseurl(dest)
1729 1787 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1730 1788 if revs:
1731 1789 revs = [repo.lookup(rev) for rev in revs]
1732 1790 other = hg.peer(repo, {}, dest)
1733 1791 n = other.lookup(q)
1734 1792 if n in repo:
1735 1793 r = repo[n].rev()
1736 1794 if r in subset:
1737 1795 return baseset([r])
1738 1796 return baseset()
1739 1797
1740 1798 @predicate('removes(pattern)', safe=True, weight=30)
1741 1799 def removes(repo, subset, x):
1742 1800 """Changesets which remove files matching pattern.
1743 1801
1744 1802 The pattern without explicit kind like ``glob:`` is expected to be
1745 1803 relative to the current directory and match against a file or a
1746 1804 directory.
1747 1805 """
1748 1806 # i18n: "removes" is a keyword
1749 1807 pat = getstring(x, _("removes requires a pattern"))
1750 1808 return checkstatus(repo, subset, pat, 2)
1751 1809
1752 1810 @predicate('rev(number)', safe=True)
1753 1811 def rev(repo, subset, x):
1754 1812 """Revision with the given numeric identifier.
1755 1813 """
1756 1814 # i18n: "rev" is a keyword
1757 1815 l = getargs(x, 1, 1, _("rev requires one argument"))
1758 1816 try:
1759 1817 # i18n: "rev" is a keyword
1760 1818 l = int(getstring(l[0], _("rev requires a number")))
1761 1819 except (TypeError, ValueError):
1762 1820 # i18n: "rev" is a keyword
1763 1821 raise error.ParseError(_("rev expects a number"))
1764 1822 if l not in repo.changelog and l not in (node.nullrev, node.wdirrev):
1765 1823 return baseset()
1766 1824 return subset & baseset([l])
1767 1825
1768 1826 @predicate('_rev(number)', safe=True)
1769 1827 def _rev(repo, subset, x):
1770 1828 # internal version of "rev(x)" that raise error if "x" is invalid
1771 1829 # i18n: "rev" is a keyword
1772 1830 l = getargs(x, 1, 1, _("rev requires one argument"))
1773 1831 try:
1774 1832 # i18n: "rev" is a keyword
1775 1833 l = int(getstring(l[0], _("rev requires a number")))
1776 1834 except (TypeError, ValueError):
1777 1835 # i18n: "rev" is a keyword
1778 1836 raise error.ParseError(_("rev expects a number"))
1779 1837 repo.changelog.node(l) # check that the rev exists
1780 1838 return subset & baseset([l])
1781 1839
1782 1840 @predicate('revset(set)', safe=True, takeorder=True)
1783 1841 def revsetpredicate(repo, subset, x, order):
1784 1842 """Strictly interpret the content as a revset.
1785 1843
1786 1844 The content of this special predicate will be strictly interpreted as a
1787 1845 revset. For example, ``revset(id(0))`` will be interpreted as "id(0)"
1788 1846 without possible ambiguity with a "id(0)" bookmark or tag.
1789 1847 """
1790 1848 return getset(repo, subset, x, order)
1791 1849
1792 1850 @predicate('matching(revision [, field])', safe=True)
1793 1851 def matching(repo, subset, x):
1794 1852 """Changesets in which a given set of fields match the set of fields in the
1795 1853 selected revision or set.
1796 1854
1797 1855 To match more than one field pass the list of fields to match separated
1798 1856 by spaces (e.g. ``author description``).
1799 1857
1800 1858 Valid fields are most regular revision fields and some special fields.
1801 1859
1802 1860 Regular revision fields are ``description``, ``author``, ``branch``,
1803 1861 ``date``, ``files``, ``phase``, ``parents``, ``substate``, ``user``
1804 1862 and ``diff``.
1805 1863 Note that ``author`` and ``user`` are synonyms. ``diff`` refers to the
1806 1864 contents of the revision. Two revisions matching their ``diff`` will
1807 1865 also match their ``files``.
1808 1866
1809 1867 Special fields are ``summary`` and ``metadata``:
1810 1868 ``summary`` matches the first line of the description.
1811 1869 ``metadata`` is equivalent to matching ``description user date``
1812 1870 (i.e. it matches the main metadata fields).
1813 1871
1814 1872 ``metadata`` is the default field which is used when no fields are
1815 1873 specified. You can match more than one field at a time.
1816 1874 """
1817 1875 # i18n: "matching" is a keyword
1818 1876 l = getargs(x, 1, 2, _("matching takes 1 or 2 arguments"))
1819 1877
1820 1878 revs = getset(repo, fullreposet(repo), l[0])
1821 1879
1822 1880 fieldlist = ['metadata']
1823 1881 if len(l) > 1:
1824 1882 fieldlist = getstring(l[1],
1825 1883 # i18n: "matching" is a keyword
1826 1884 _("matching requires a string "
1827 1885 "as its second argument")).split()
1828 1886
1829 1887 # Make sure that there are no repeated fields,
1830 1888 # expand the 'special' 'metadata' field type
1831 1889 # and check the 'files' whenever we check the 'diff'
1832 1890 fields = []
1833 1891 for field in fieldlist:
1834 1892 if field == 'metadata':
1835 1893 fields += ['user', 'description', 'date']
1836 1894 elif field == 'diff':
1837 1895 # a revision matching the diff must also match the files
1838 1896 # since matching the diff is very costly, make sure to
1839 1897 # also match the files first
1840 1898 fields += ['files', 'diff']
1841 1899 else:
1842 1900 if field == 'author':
1843 1901 field = 'user'
1844 1902 fields.append(field)
1845 1903 fields = set(fields)
1846 1904 if 'summary' in fields and 'description' in fields:
1847 1905 # If a revision matches its description it also matches its summary
1848 1906 fields.discard('summary')
1849 1907
1850 1908 # We may want to match more than one field
1851 1909 # Not all fields take the same amount of time to be matched
1852 1910 # Sort the selected fields in order of increasing matching cost
1853 1911 fieldorder = ['phase', 'parents', 'user', 'date', 'branch', 'summary',
1854 1912 'files', 'description', 'substate', 'diff']
1855 1913 def fieldkeyfunc(f):
1856 1914 try:
1857 1915 return fieldorder.index(f)
1858 1916 except ValueError:
1859 1917 # assume an unknown field is very costly
1860 1918 return len(fieldorder)
1861 1919 fields = list(fields)
1862 1920 fields.sort(key=fieldkeyfunc)
1863 1921
1864 1922 # Each field will be matched with its own "getfield" function
1865 1923 # which will be added to the getfieldfuncs array of functions
1866 1924 getfieldfuncs = []
1867 1925 _funcs = {
1868 1926 'user': lambda r: repo[r].user(),
1869 1927 'branch': lambda r: repo[r].branch(),
1870 1928 'date': lambda r: repo[r].date(),
1871 1929 'description': lambda r: repo[r].description(),
1872 1930 'files': lambda r: repo[r].files(),
1873 1931 'parents': lambda r: repo[r].parents(),
1874 1932 'phase': lambda r: repo[r].phase(),
1875 1933 'substate': lambda r: repo[r].substate,
1876 1934 'summary': lambda r: repo[r].description().splitlines()[0],
1877 1935 'diff': lambda r: list(repo[r].diff(
1878 1936 opts=diffutil.diffallopts(repo.ui, {'git': True}))),
1879 1937 }
1880 1938 for info in fields:
1881 1939 getfield = _funcs.get(info, None)
1882 1940 if getfield is None:
1883 1941 raise error.ParseError(
1884 1942 # i18n: "matching" is a keyword
1885 1943 _("unexpected field name passed to matching: %s") % info)
1886 1944 getfieldfuncs.append(getfield)
1887 1945 # convert the getfield array of functions into a "getinfo" function
1888 1946 # which returns an array of field values (or a single value if there
1889 1947 # is only one field to match)
1890 1948 getinfo = lambda r: [f(r) for f in getfieldfuncs]
1891 1949
1892 1950 def matches(x):
1893 1951 for rev in revs:
1894 1952 target = getinfo(rev)
1895 1953 match = True
1896 1954 for n, f in enumerate(getfieldfuncs):
1897 1955 if target[n] != f(x):
1898 1956 match = False
1899 1957 if match:
1900 1958 return True
1901 1959 return False
1902 1960
1903 1961 return subset.filter(matches, condrepr=('<matching%r %r>', fields, revs))
1904 1962
1905 1963 @predicate('reverse(set)', safe=True, takeorder=True, weight=0)
1906 1964 def reverse(repo, subset, x, order):
1907 1965 """Reverse order of set.
1908 1966 """
1909 1967 l = getset(repo, subset, x, order)
1910 1968 if order == defineorder:
1911 1969 l.reverse()
1912 1970 return l
1913 1971
1914 1972 @predicate('roots(set)', safe=True)
1915 1973 def roots(repo, subset, x):
1916 1974 """Changesets in set with no parent changeset in set.
1917 1975 """
1918 1976 s = getset(repo, fullreposet(repo), x)
1919 1977 parents = repo.changelog.parentrevs
1920 1978 def filter(r):
1921 1979 for p in parents(r):
1922 1980 if 0 <= p and p in s:
1923 1981 return False
1924 1982 return True
1925 1983 return subset & s.filter(filter, condrepr='<roots>')
1926 1984
1927 1985 _sortkeyfuncs = {
1928 1986 'rev': lambda c: c.rev(),
1929 1987 'branch': lambda c: c.branch(),
1930 1988 'desc': lambda c: c.description(),
1931 1989 'user': lambda c: c.user(),
1932 1990 'author': lambda c: c.user(),
1933 1991 'date': lambda c: c.date()[0],
1934 1992 }
1935 1993
1936 1994 def _getsortargs(x):
1937 1995 """Parse sort options into (set, [(key, reverse)], opts)"""
1938 1996 args = getargsdict(x, 'sort', 'set keys topo.firstbranch')
1939 1997 if 'set' not in args:
1940 1998 # i18n: "sort" is a keyword
1941 1999 raise error.ParseError(_('sort requires one or two arguments'))
1942 2000 keys = "rev"
1943 2001 if 'keys' in args:
1944 2002 # i18n: "sort" is a keyword
1945 2003 keys = getstring(args['keys'], _("sort spec must be a string"))
1946 2004
1947 2005 keyflags = []
1948 2006 for k in keys.split():
1949 2007 fk = k
1950 2008 reverse = (k.startswith('-'))
1951 2009 if reverse:
1952 2010 k = k[1:]
1953 2011 if k not in _sortkeyfuncs and k != 'topo':
1954 2012 raise error.ParseError(
1955 2013 _("unknown sort key %r") % pycompat.bytestr(fk))
1956 2014 keyflags.append((k, reverse))
1957 2015
1958 2016 if len(keyflags) > 1 and any(k == 'topo' for k, reverse in keyflags):
1959 2017 # i18n: "topo" is a keyword
1960 2018 raise error.ParseError(_('topo sort order cannot be combined '
1961 2019 'with other sort keys'))
1962 2020
1963 2021 opts = {}
1964 2022 if 'topo.firstbranch' in args:
1965 2023 if any(k == 'topo' for k, reverse in keyflags):
1966 2024 opts['topo.firstbranch'] = args['topo.firstbranch']
1967 2025 else:
1968 2026 # i18n: "topo" and "topo.firstbranch" are keywords
1969 2027 raise error.ParseError(_('topo.firstbranch can only be used '
1970 2028 'when using the topo sort key'))
1971 2029
1972 2030 return args['set'], keyflags, opts
1973 2031
1974 2032 @predicate('sort(set[, [-]key... [, ...]])', safe=True, takeorder=True,
1975 2033 weight=10)
1976 2034 def sort(repo, subset, x, order):
1977 2035 """Sort set by keys. The default sort order is ascending, specify a key
1978 2036 as ``-key`` to sort in descending order.
1979 2037
1980 2038 The keys can be:
1981 2039
1982 2040 - ``rev`` for the revision number,
1983 2041 - ``branch`` for the branch name,
1984 2042 - ``desc`` for the commit message (description),
1985 2043 - ``user`` for user name (``author`` can be used as an alias),
1986 2044 - ``date`` for the commit date
1987 2045 - ``topo`` for a reverse topographical sort
1988 2046
1989 2047 The ``topo`` sort order cannot be combined with other sort keys. This sort
1990 2048 takes one optional argument, ``topo.firstbranch``, which takes a revset that
1991 2049 specifies what topographical branches to prioritize in the sort.
1992 2050
1993 2051 """
1994 2052 s, keyflags, opts = _getsortargs(x)
1995 2053 revs = getset(repo, subset, s, order)
1996 2054
1997 2055 if not keyflags or order != defineorder:
1998 2056 return revs
1999 2057 if len(keyflags) == 1 and keyflags[0][0] == "rev":
2000 2058 revs.sort(reverse=keyflags[0][1])
2001 2059 return revs
2002 2060 elif keyflags[0][0] == "topo":
2003 2061 firstbranch = ()
2004 2062 if 'topo.firstbranch' in opts:
2005 2063 firstbranch = getset(repo, subset, opts['topo.firstbranch'])
2006 2064 revs = baseset(dagop.toposort(revs, repo.changelog.parentrevs,
2007 2065 firstbranch),
2008 2066 istopo=True)
2009 2067 if keyflags[0][1]:
2010 2068 revs.reverse()
2011 2069 return revs
2012 2070
2013 2071 # sort() is guaranteed to be stable
2014 2072 ctxs = [repo[r] for r in revs]
2015 2073 for k, reverse in reversed(keyflags):
2016 2074 ctxs.sort(key=_sortkeyfuncs[k], reverse=reverse)
2017 2075 return baseset([c.rev() for c in ctxs])
2018 2076
2019 2077 @predicate('subrepo([pattern])')
2020 2078 def subrepo(repo, subset, x):
2021 2079 """Changesets that add, modify or remove the given subrepo. If no subrepo
2022 2080 pattern is named, any subrepo changes are returned.
2023 2081 """
2024 2082 # i18n: "subrepo" is a keyword
2025 2083 args = getargs(x, 0, 1, _('subrepo takes at most one argument'))
2026 2084 pat = None
2027 2085 if len(args) != 0:
2028 2086 pat = getstring(args[0], _("subrepo requires a pattern"))
2029 2087
2030 2088 m = matchmod.exact(repo.root, repo.root, ['.hgsubstate'])
2031 2089
2032 2090 def submatches(names):
2033 2091 k, p, m = stringutil.stringmatcher(pat)
2034 2092 for name in names:
2035 2093 if m(name):
2036 2094 yield name
2037 2095
2038 2096 def matches(x):
2039 2097 c = repo[x]
2040 2098 s = repo.status(c.p1().node(), c.node(), match=m)
2041 2099
2042 2100 if pat is None:
2043 2101 return s.added or s.modified or s.removed
2044 2102
2045 2103 if s.added:
2046 2104 return any(submatches(c.substate.keys()))
2047 2105
2048 2106 if s.modified:
2049 2107 subs = set(c.p1().substate.keys())
2050 2108 subs.update(c.substate.keys())
2051 2109
2052 2110 for path in submatches(subs):
2053 2111 if c.p1().substate.get(path) != c.substate.get(path):
2054 2112 return True
2055 2113
2056 2114 if s.removed:
2057 2115 return any(submatches(c.p1().substate.keys()))
2058 2116
2059 2117 return False
2060 2118
2061 2119 return subset.filter(matches, condrepr=('<subrepo %r>', pat))
2062 2120
2063 2121 def _mapbynodefunc(repo, s, f):
2064 2122 """(repo, smartset, [node] -> [node]) -> smartset
2065 2123
2066 2124 Helper method to map a smartset to another smartset given a function only
2067 2125 talking about nodes. Handles converting between rev numbers and nodes, and
2068 2126 filtering.
2069 2127 """
2070 2128 cl = repo.unfiltered().changelog
2071 2129 torev = cl.rev
2072 2130 tonode = cl.node
2073 2131 nodemap = cl.nodemap
2074 2132 result = set(torev(n) for n in f(tonode(r) for r in s) if n in nodemap)
2075 2133 return smartset.baseset(result - repo.changelog.filteredrevs)
2076 2134
2077 2135 @predicate('successors(set)', safe=True)
2078 2136 def successors(repo, subset, x):
2079 2137 """All successors for set, including the given set themselves"""
2080 2138 s = getset(repo, fullreposet(repo), x)
2081 2139 f = lambda nodes: obsutil.allsuccessors(repo.obsstore, nodes)
2082 2140 d = _mapbynodefunc(repo, s, f)
2083 2141 return subset & d
2084 2142
2085 2143 def _substringmatcher(pattern, casesensitive=True):
2086 2144 kind, pattern, matcher = stringutil.stringmatcher(
2087 2145 pattern, casesensitive=casesensitive)
2088 2146 if kind == 'literal':
2089 2147 if not casesensitive:
2090 2148 pattern = encoding.lower(pattern)
2091 2149 matcher = lambda s: pattern in encoding.lower(s)
2092 2150 else:
2093 2151 matcher = lambda s: pattern in s
2094 2152 return kind, pattern, matcher
2095 2153
2096 2154 @predicate('tag([name])', safe=True)
2097 2155 def tag(repo, subset, x):
2098 2156 """The specified tag by name, or all tagged revisions if no name is given.
2099 2157
2100 2158 Pattern matching is supported for `name`. See
2101 2159 :hg:`help revisions.patterns`.
2102 2160 """
2103 2161 # i18n: "tag" is a keyword
2104 2162 args = getargs(x, 0, 1, _("tag takes one or no arguments"))
2105 2163 cl = repo.changelog
2106 2164 if args:
2107 2165 pattern = getstring(args[0],
2108 2166 # i18n: "tag" is a keyword
2109 2167 _('the argument to tag must be a string'))
2110 2168 kind, pattern, matcher = stringutil.stringmatcher(pattern)
2111 2169 if kind == 'literal':
2112 2170 # avoid resolving all tags
2113 2171 tn = repo._tagscache.tags.get(pattern, None)
2114 2172 if tn is None:
2115 2173 raise error.RepoLookupError(_("tag '%s' does not exist")
2116 2174 % pattern)
2117 2175 s = {repo[tn].rev()}
2118 2176 else:
2119 2177 s = {cl.rev(n) for t, n in repo.tagslist() if matcher(t)}
2120 2178 else:
2121 2179 s = {cl.rev(n) for t, n in repo.tagslist() if t != 'tip'}
2122 2180 return subset & s
2123 2181
2124 2182 @predicate('tagged', safe=True)
2125 2183 def tagged(repo, subset, x):
2126 2184 return tag(repo, subset, x)
2127 2185
2128 2186 @predicate('orphan()', safe=True)
2129 2187 def orphan(repo, subset, x):
2130 2188 """Non-obsolete changesets with obsolete ancestors. (EXPERIMENTAL)
2131 2189 """
2132 2190 # i18n: "orphan" is a keyword
2133 2191 getargs(x, 0, 0, _("orphan takes no arguments"))
2134 2192 orphan = obsmod.getrevs(repo, 'orphan')
2135 2193 return subset & orphan
2136 2194
2137 2195
2138 2196 @predicate('user(string)', safe=True, weight=10)
2139 2197 def user(repo, subset, x):
2140 2198 """User name contains string. The match is case-insensitive.
2141 2199
2142 2200 Pattern matching is supported for `string`. See
2143 2201 :hg:`help revisions.patterns`.
2144 2202 """
2145 2203 return author(repo, subset, x)
2146 2204
2147 2205 @predicate('wdir()', safe=True, weight=0)
2148 2206 def wdir(repo, subset, x):
2149 2207 """Working directory. (EXPERIMENTAL)"""
2150 2208 # i18n: "wdir" is a keyword
2151 2209 getargs(x, 0, 0, _("wdir takes no arguments"))
2152 2210 if node.wdirrev in subset or isinstance(subset, fullreposet):
2153 2211 return baseset([node.wdirrev])
2154 2212 return baseset()
2155 2213
2156 2214 def _orderedlist(repo, subset, x):
2157 2215 s = getstring(x, "internal error")
2158 2216 if not s:
2159 2217 return baseset()
2160 2218 # remove duplicates here. it's difficult for caller to deduplicate sets
2161 2219 # because different symbols can point to the same rev.
2162 2220 cl = repo.changelog
2163 2221 ls = []
2164 2222 seen = set()
2165 2223 for t in s.split('\0'):
2166 2224 try:
2167 2225 # fast path for integer revision
2168 2226 r = int(t)
2169 2227 if ('%d' % r) != t or r not in cl:
2170 2228 raise ValueError
2171 2229 revs = [r]
2172 2230 except ValueError:
2173 2231 revs = stringset(repo, subset, t, defineorder)
2174 2232
2175 2233 for r in revs:
2176 2234 if r in seen:
2177 2235 continue
2178 2236 if (r in subset
2179 2237 or r == node.nullrev and isinstance(subset, fullreposet)):
2180 2238 ls.append(r)
2181 2239 seen.add(r)
2182 2240 return baseset(ls)
2183 2241
2184 2242 # for internal use
2185 2243 @predicate('_list', safe=True, takeorder=True)
2186 2244 def _list(repo, subset, x, order):
2187 2245 if order == followorder:
2188 2246 # slow path to take the subset order
2189 2247 return subset & _orderedlist(repo, fullreposet(repo), x)
2190 2248 else:
2191 2249 return _orderedlist(repo, subset, x)
2192 2250
2193 2251 def _orderedintlist(repo, subset, x):
2194 2252 s = getstring(x, "internal error")
2195 2253 if not s:
2196 2254 return baseset()
2197 2255 ls = [int(r) for r in s.split('\0')]
2198 2256 s = subset
2199 2257 return baseset([r for r in ls if r in s])
2200 2258
2201 2259 # for internal use
2202 2260 @predicate('_intlist', safe=True, takeorder=True, weight=0)
2203 2261 def _intlist(repo, subset, x, order):
2204 2262 if order == followorder:
2205 2263 # slow path to take the subset order
2206 2264 return subset & _orderedintlist(repo, fullreposet(repo), x)
2207 2265 else:
2208 2266 return _orderedintlist(repo, subset, x)
2209 2267
2210 2268 def _orderedhexlist(repo, subset, x):
2211 2269 s = getstring(x, "internal error")
2212 2270 if not s:
2213 2271 return baseset()
2214 2272 cl = repo.changelog
2215 2273 ls = [cl.rev(node.bin(r)) for r in s.split('\0')]
2216 2274 s = subset
2217 2275 return baseset([r for r in ls if r in s])
2218 2276
2219 2277 # for internal use
2220 2278 @predicate('_hexlist', safe=True, takeorder=True)
2221 2279 def _hexlist(repo, subset, x, order):
2222 2280 if order == followorder:
2223 2281 # slow path to take the subset order
2224 2282 return subset & _orderedhexlist(repo, fullreposet(repo), x)
2225 2283 else:
2226 2284 return _orderedhexlist(repo, subset, x)
2227 2285
2228 2286 methods = {
2229 2287 "range": rangeset,
2230 2288 "rangeall": rangeall,
2231 2289 "rangepre": rangepre,
2232 2290 "rangepost": rangepost,
2233 2291 "dagrange": dagrange,
2234 2292 "string": stringset,
2235 2293 "symbol": stringset,
2236 2294 "and": andset,
2237 2295 "andsmally": andsmallyset,
2238 2296 "or": orset,
2239 2297 "not": notset,
2240 2298 "difference": differenceset,
2241 2299 "relation": relationset,
2242 2300 "relsubscript": relsubscriptset,
2243 2301 "subscript": subscriptset,
2244 2302 "list": listset,
2245 2303 "keyvalue": keyvaluepair,
2246 2304 "func": func,
2247 2305 "ancestor": ancestorspec,
2248 2306 "parent": parentspec,
2249 2307 "parentpost": parentpost,
2250 2308 "smartset": rawsmartset,
2251 2309 }
2252 2310
2253 2311 subscriptrelations = {
2254 2312 "g": generationsrel,
2255 2313 "generations": generationsrel,
2256 2314 }
2257 2315
2258 2316 def lookupfn(repo):
2259 2317 return lambda symbol: scmutil.isrevsymbol(repo, symbol)
2260 2318
2261 2319 def match(ui, spec, lookup=None):
2262 2320 """Create a matcher for a single revision spec"""
2263 2321 return matchany(ui, [spec], lookup=lookup)
2264 2322
2265 2323 def matchany(ui, specs, lookup=None, localalias=None):
2266 2324 """Create a matcher that will include any revisions matching one of the
2267 2325 given specs
2268 2326
2269 2327 If lookup function is not None, the parser will first attempt to handle
2270 2328 old-style ranges, which may contain operator characters.
2271 2329
2272 2330 If localalias is not None, it is a dict {name: definitionstring}. It takes
2273 2331 precedence over [revsetalias] config section.
2274 2332 """
2275 2333 if not specs:
2276 2334 def mfunc(repo, subset=None):
2277 2335 return baseset()
2278 2336 return mfunc
2279 2337 if not all(specs):
2280 2338 raise error.ParseError(_("empty query"))
2281 2339 if len(specs) == 1:
2282 2340 tree = revsetlang.parse(specs[0], lookup)
2283 2341 else:
2284 2342 tree = ('or',
2285 2343 ('list',) + tuple(revsetlang.parse(s, lookup) for s in specs))
2286 2344
2287 2345 aliases = []
2288 2346 warn = None
2289 2347 if ui:
2290 2348 aliases.extend(ui.configitems('revsetalias'))
2291 2349 warn = ui.warn
2292 2350 if localalias:
2293 2351 aliases.extend(localalias.items())
2294 2352 if aliases:
2295 2353 tree = revsetlang.expandaliases(tree, aliases, warn=warn)
2296 2354 tree = revsetlang.foldconcat(tree)
2297 2355 tree = revsetlang.analyze(tree)
2298 2356 tree = revsetlang.optimize(tree)
2299 2357 return makematcher(tree)
2300 2358
2301 2359 def makematcher(tree):
2302 2360 """Create a matcher from an evaluatable tree"""
2303 2361 def mfunc(repo, subset=None, order=None):
2304 2362 if order is None:
2305 2363 if subset is None:
2306 2364 order = defineorder # 'x'
2307 2365 else:
2308 2366 order = followorder # 'subset & x'
2309 2367 if subset is None:
2310 2368 subset = fullreposet(repo)
2311 2369 return getset(repo, subset, tree, order)
2312 2370 return mfunc
2313 2371
2314 2372 def loadpredicate(ui, extname, registrarobj):
2315 2373 """Load revset predicates from specified registrarobj
2316 2374 """
2317 2375 for name, func in registrarobj._table.iteritems():
2318 2376 symbols[name] = func
2319 2377 if func._safe:
2320 2378 safesymbols.add(name)
2321 2379
2322 2380 # load built-in predicates explicitly to setup safesymbols
2323 2381 loadpredicate(None, None, predicate)
2324 2382
2325 2383 # tell hggettext to extract docstrings from these functions:
2326 2384 i18nfunctions = symbols.values()
@@ -1,84 +1,85
1 1 # this is hack to make sure no escape characters are inserted into the output
2 2
3 3 from __future__ import absolute_import
4 4
5 5 import doctest
6 6 import os
7 7 import re
8 8 import sys
9 9
10 10 ispy3 = (sys.version_info[0] >= 3)
11 11
12 12 if 'TERM' in os.environ:
13 13 del os.environ['TERM']
14 14
15 15 class py3docchecker(doctest.OutputChecker):
16 16 def check_output(self, want, got, optionflags):
17 17 want2 = re.sub(r'''\bu(['"])(.*?)\1''', r'\1\2\1', want) # py2: u''
18 18 got2 = re.sub(r'''\bb(['"])(.*?)\1''', r'\1\2\1', got) # py3: b''
19 19 # py3: <exc.name>: b'<msg>' -> <name>: <msg>
20 20 # <exc.name>: <others> -> <name>: <others>
21 21 got2 = re.sub(r'''^mercurial\.\w+\.(\w+): (['"])(.*?)\2''', r'\1: \3',
22 22 got2, re.MULTILINE)
23 23 got2 = re.sub(r'^mercurial\.\w+\.(\w+): ', r'\1: ', got2, re.MULTILINE)
24 24 return any(doctest.OutputChecker.check_output(self, w, g, optionflags)
25 25 for w, g in [(want, got), (want2, got2)])
26 26
27 27 def testmod(name, optionflags=0, testtarget=None):
28 28 __import__(name)
29 29 mod = sys.modules[name]
30 30 if testtarget is not None:
31 31 mod = getattr(mod, testtarget)
32 32
33 33 # minimal copy of doctest.testmod()
34 34 finder = doctest.DocTestFinder()
35 35 checker = None
36 36 if ispy3:
37 37 checker = py3docchecker()
38 38 runner = doctest.DocTestRunner(checker=checker, optionflags=optionflags)
39 39 for test in finder.find(mod, name):
40 40 runner.run(test)
41 41 runner.summarize()
42 42
43 43 testmod('mercurial.changegroup')
44 44 testmod('mercurial.changelog')
45 45 testmod('mercurial.cmdutil')
46 46 testmod('mercurial.color')
47 47 testmod('mercurial.config')
48 48 testmod('mercurial.context')
49 49 testmod('mercurial.dagparser', optionflags=doctest.NORMALIZE_WHITESPACE)
50 50 testmod('mercurial.dispatch')
51 51 testmod('mercurial.encoding')
52 52 testmod('mercurial.fancyopts')
53 53 testmod('mercurial.formatter')
54 54 testmod('mercurial.hg')
55 55 testmod('mercurial.hgweb.hgwebdir_mod')
56 56 testmod('mercurial.match')
57 57 testmod('mercurial.mdiff')
58 58 testmod('mercurial.minirst')
59 59 testmod('mercurial.patch')
60 60 testmod('mercurial.pathutil')
61 61 testmod('mercurial.parser')
62 62 testmod('mercurial.pycompat')
63 63 testmod('mercurial.revlog')
64 64 testmod('mercurial.revlogutils.deltas')
65 testmod('mercurial.revset')
65 66 testmod('mercurial.revsetlang')
66 67 testmod('mercurial.smartset')
67 68 testmod('mercurial.store')
68 69 testmod('mercurial.subrepo')
69 70 testmod('mercurial.templatefilters')
70 71 testmod('mercurial.templater')
71 72 testmod('mercurial.ui')
72 73 testmod('mercurial.url')
73 74 testmod('mercurial.util')
74 75 testmod('mercurial.util', testtarget='platform')
75 76 testmod('mercurial.utils.stringutil')
76 77 testmod('hgext.convert.convcmd')
77 78 testmod('hgext.convert.cvsps')
78 79 testmod('hgext.convert.filemap')
79 80 testmod('hgext.convert.p4')
80 81 testmod('hgext.convert.subversion')
81 82 testmod('hgext.fix')
82 83 testmod('hgext.mq')
83 84 # Helper scripts in tests/ that have doctests:
84 85 testmod('drawdag')
@@ -1,2952 +1,2979
1 1 $ HGENCODING=utf-8
2 2 $ export HGENCODING
3 3 $ cat > testrevset.py << EOF
4 4 > import mercurial.revset
5 5 >
6 6 > baseset = mercurial.revset.baseset
7 7 >
8 8 > def r3232(repo, subset, x):
9 9 > """"simple revset that return [3,2,3,2]
10 10 >
11 11 > revisions duplicated on purpose.
12 12 > """
13 13 > if 3 not in subset:
14 14 > if 2 in subset:
15 15 > return baseset([2,2])
16 16 > return baseset()
17 17 > return baseset([3,3,2,2])
18 18 >
19 19 > mercurial.revset.symbols[b'r3232'] = r3232
20 20 > EOF
21 21 $ cat >> $HGRCPATH << EOF
22 22 > [extensions]
23 23 > drawdag=$TESTDIR/drawdag.py
24 24 > testrevset=$TESTTMP/testrevset.py
25 25 > EOF
26 26
27 27 $ try() {
28 28 > hg debugrevspec --debug "$@"
29 29 > }
30 30
31 31 $ log() {
32 32 > hg log --template '{rev}\n' -r "$1"
33 33 > }
34 34
35 35 extension to build '_intlist()' and '_hexlist()', which is necessary because
36 36 these predicates use '\0' as a separator:
37 37
38 38 $ cat <<EOF > debugrevlistspec.py
39 39 > from __future__ import absolute_import
40 40 > from mercurial import (
41 41 > node as nodemod,
42 42 > registrar,
43 43 > revset,
44 44 > revsetlang,
45 45 > )
46 46 > from mercurial.utils import stringutil
47 47 > cmdtable = {}
48 48 > command = registrar.command(cmdtable)
49 49 > @command(b'debugrevlistspec',
50 50 > [(b'', b'optimize', None, b'print parsed tree after optimizing'),
51 51 > (b'', b'bin', None, b'unhexlify arguments')])
52 52 > def debugrevlistspec(ui, repo, fmt, *args, **opts):
53 53 > if opts['bin']:
54 54 > args = map(nodemod.bin, args)
55 55 > expr = revsetlang.formatspec(fmt, list(args))
56 56 > if ui.verbose:
57 57 > tree = revsetlang.parse(expr, lookup=revset.lookupfn(repo))
58 58 > ui.note(revsetlang.prettyformat(tree), b"\n")
59 59 > if opts["optimize"]:
60 60 > opttree = revsetlang.optimize(revsetlang.analyze(tree))
61 61 > ui.note(b"* optimized:\n", revsetlang.prettyformat(opttree),
62 62 > b"\n")
63 63 > func = revset.match(ui, expr, lookup=revset.lookupfn(repo))
64 64 > revs = func(repo)
65 65 > if ui.verbose:
66 66 > ui.note(b"* set:\n", stringutil.prettyrepr(revs), b"\n")
67 67 > for c in revs:
68 68 > ui.write(b"%d\n" % c)
69 69 > EOF
70 70 $ cat <<EOF >> $HGRCPATH
71 71 > [extensions]
72 72 > debugrevlistspec = $TESTTMP/debugrevlistspec.py
73 73 > EOF
74 74 $ trylist() {
75 75 > hg debugrevlistspec --debug "$@"
76 76 > }
77 77
78 78 $ hg init repo
79 79 $ cd repo
80 80
81 81 $ echo a > a
82 82 $ hg branch a
83 83 marked working directory as branch a
84 84 (branches are permanent and global, did you want a bookmark?)
85 85 $ hg ci -Aqm0
86 86
87 87 $ echo b > b
88 88 $ hg branch b
89 89 marked working directory as branch b
90 90 $ hg ci -Aqm1
91 91
92 92 $ rm a
93 93 $ hg branch a-b-c-
94 94 marked working directory as branch a-b-c-
95 95 $ hg ci -Aqm2 -u Bob
96 96
97 97 $ hg log -r "extra('branch', 'a-b-c-')" --template '{rev}\n'
98 98 2
99 99 $ hg log -r "extra('branch')" --template '{rev}\n'
100 100 0
101 101 1
102 102 2
103 103 $ hg log -r "extra('branch', 're:a')" --template '{rev} {branch}\n'
104 104 0 a
105 105 2 a-b-c-
106 106
107 107 $ hg co 1
108 108 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
109 109 $ hg branch +a+b+c+
110 110 marked working directory as branch +a+b+c+
111 111 $ hg ci -Aqm3
112 112
113 113 $ hg co 2 # interleave
114 114 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
115 115 $ echo bb > b
116 116 $ hg branch -- -a-b-c-
117 117 marked working directory as branch -a-b-c-
118 118 $ hg ci -Aqm4 -d "May 12 2005"
119 119
120 120 $ hg co 3
121 121 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
122 122 $ hg branch !a/b/c/
123 123 marked working directory as branch !a/b/c/
124 124 $ hg ci -Aqm"5 bug"
125 125
126 126 $ hg merge 4
127 127 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
128 128 (branch merge, don't forget to commit)
129 129 $ hg branch _a_b_c_
130 130 marked working directory as branch _a_b_c_
131 131 $ hg ci -Aqm"6 issue619"
132 132
133 133 $ hg branch .a.b.c.
134 134 marked working directory as branch .a.b.c.
135 135 $ hg ci -Aqm7
136 136
137 137 $ hg branch all
138 138 marked working directory as branch all
139 139
140 140 $ hg co 4
141 141 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
142 142 $ hg branch Γ©
143 143 marked working directory as branch \xc3\xa9 (esc)
144 144 $ hg ci -Aqm9
145 145
146 146 $ hg tag -r6 1.0
147 147 $ hg bookmark -r6 xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
148 148
149 149 $ hg clone --quiet -U -r 7 . ../remote1
150 150 $ hg clone --quiet -U -r 8 . ../remote2
151 151 $ echo "[paths]" >> .hg/hgrc
152 152 $ echo "default = ../remote1" >> .hg/hgrc
153 153
154 154 trivial
155 155
156 156 $ try 0:1
157 157 (range
158 158 (symbol '0')
159 159 (symbol '1'))
160 160 * set:
161 161 <spanset+ 0:2>
162 162 0
163 163 1
164 164 $ try --optimize :
165 165 (rangeall
166 166 None)
167 167 * optimized:
168 168 (rangeall
169 169 None)
170 170 * set:
171 171 <spanset+ 0:10>
172 172 0
173 173 1
174 174 2
175 175 3
176 176 4
177 177 5
178 178 6
179 179 7
180 180 8
181 181 9
182 182 $ try 3::6
183 183 (dagrange
184 184 (symbol '3')
185 185 (symbol '6'))
186 186 * set:
187 187 <baseset+ [3, 5, 6]>
188 188 3
189 189 5
190 190 6
191 191 $ try '0|1|2'
192 192 (or
193 193 (list
194 194 (symbol '0')
195 195 (symbol '1')
196 196 (symbol '2')))
197 197 * set:
198 198 <baseset [0, 1, 2]>
199 199 0
200 200 1
201 201 2
202 202
203 203 names that should work without quoting
204 204
205 205 $ try a
206 206 (symbol 'a')
207 207 * set:
208 208 <baseset [0]>
209 209 0
210 210 $ try b-a
211 211 (minus
212 212 (symbol 'b')
213 213 (symbol 'a'))
214 214 * set:
215 215 <filteredset
216 216 <baseset [1]>,
217 217 <not
218 218 <baseset [0]>>>
219 219 1
220 220 $ try _a_b_c_
221 221 (symbol '_a_b_c_')
222 222 * set:
223 223 <baseset [6]>
224 224 6
225 225 $ try _a_b_c_-a
226 226 (minus
227 227 (symbol '_a_b_c_')
228 228 (symbol 'a'))
229 229 * set:
230 230 <filteredset
231 231 <baseset [6]>,
232 232 <not
233 233 <baseset [0]>>>
234 234 6
235 235 $ try .a.b.c.
236 236 (symbol '.a.b.c.')
237 237 * set:
238 238 <baseset [7]>
239 239 7
240 240 $ try .a.b.c.-a
241 241 (minus
242 242 (symbol '.a.b.c.')
243 243 (symbol 'a'))
244 244 * set:
245 245 <filteredset
246 246 <baseset [7]>,
247 247 <not
248 248 <baseset [0]>>>
249 249 7
250 250
251 251 names that should be caught by fallback mechanism
252 252
253 253 $ try -- '-a-b-c-'
254 254 (symbol '-a-b-c-')
255 255 * set:
256 256 <baseset [4]>
257 257 4
258 258 $ log -a-b-c-
259 259 4
260 260 $ try '+a+b+c+'
261 261 (symbol '+a+b+c+')
262 262 * set:
263 263 <baseset [3]>
264 264 3
265 265 $ try '+a+b+c+:'
266 266 (rangepost
267 267 (symbol '+a+b+c+'))
268 268 * set:
269 269 <spanset+ 3:10>
270 270 3
271 271 4
272 272 5
273 273 6
274 274 7
275 275 8
276 276 9
277 277 $ try ':+a+b+c+'
278 278 (rangepre
279 279 (symbol '+a+b+c+'))
280 280 * set:
281 281 <spanset+ 0:4>
282 282 0
283 283 1
284 284 2
285 285 3
286 286 $ try -- '-a-b-c-:+a+b+c+'
287 287 (range
288 288 (symbol '-a-b-c-')
289 289 (symbol '+a+b+c+'))
290 290 * set:
291 291 <spanset- 3:5>
292 292 4
293 293 3
294 294 $ log '-a-b-c-:+a+b+c+'
295 295 4
296 296 3
297 297
298 298 $ try -- -a-b-c--a # complains
299 299 (minus
300 300 (minus
301 301 (minus
302 302 (negate
303 303 (symbol 'a'))
304 304 (symbol 'b'))
305 305 (symbol 'c'))
306 306 (negate
307 307 (symbol 'a')))
308 308 abort: unknown revision '-a'!
309 309 [255]
310 310 $ try Γ©
311 311 (symbol '\xc3\xa9')
312 312 * set:
313 313 <baseset [9]>
314 314 9
315 315
316 316 no quoting needed
317 317
318 318 $ log ::a-b-c-
319 319 0
320 320 1
321 321 2
322 322
323 323 quoting needed
324 324
325 325 $ try '"-a-b-c-"-a'
326 326 (minus
327 327 (string '-a-b-c-')
328 328 (symbol 'a'))
329 329 * set:
330 330 <filteredset
331 331 <baseset [4]>,
332 332 <not
333 333 <baseset [0]>>>
334 334 4
335 335
336 336 $ log '1 or 2'
337 337 1
338 338 2
339 339 $ log '1|2'
340 340 1
341 341 2
342 342 $ log '1 and 2'
343 343 $ log '1&2'
344 344 $ try '1&2|3' # precedence - and is higher
345 345 (or
346 346 (list
347 347 (and
348 348 (symbol '1')
349 349 (symbol '2'))
350 350 (symbol '3')))
351 351 * set:
352 352 <addset
353 353 <baseset []>,
354 354 <baseset [3]>>
355 355 3
356 356 $ try '1|2&3'
357 357 (or
358 358 (list
359 359 (symbol '1')
360 360 (and
361 361 (symbol '2')
362 362 (symbol '3'))))
363 363 * set:
364 364 <addset
365 365 <baseset [1]>,
366 366 <baseset []>>
367 367 1
368 368 $ try '1&2&3' # associativity
369 369 (and
370 370 (and
371 371 (symbol '1')
372 372 (symbol '2'))
373 373 (symbol '3'))
374 374 * set:
375 375 <baseset []>
376 376 $ try '1|(2|3)'
377 377 (or
378 378 (list
379 379 (symbol '1')
380 380 (group
381 381 (or
382 382 (list
383 383 (symbol '2')
384 384 (symbol '3'))))))
385 385 * set:
386 386 <addset
387 387 <baseset [1]>,
388 388 <baseset [2, 3]>>
389 389 1
390 390 2
391 391 3
392 392 $ log '1.0' # tag
393 393 6
394 394 $ log 'a' # branch
395 395 0
396 396 $ log '2785f51ee'
397 397 0
398 398 $ log 'date(2005)'
399 399 4
400 400 $ log 'date(this is a test)'
401 401 hg: parse error at 10: unexpected token: symbol
402 402 (date(this is a test)
403 403 ^ here)
404 404 [255]
405 405 $ log 'date()'
406 406 hg: parse error: date requires a string
407 407 [255]
408 408 $ log 'date'
409 409 abort: unknown revision 'date'!
410 410 [255]
411 411 $ log 'date('
412 412 hg: parse error at 5: not a prefix: end
413 413 (date(
414 414 ^ here)
415 415 [255]
416 416 $ log 'date("\xy")'
417 417 hg: parse error: invalid \x escape* (glob)
418 418 [255]
419 419 $ log 'date(tip)'
420 420 hg: parse error: invalid date: 'tip'
421 421 [255]
422 422 $ log '0:date'
423 423 abort: unknown revision 'date'!
424 424 [255]
425 425 $ log '::"date"'
426 426 abort: unknown revision 'date'!
427 427 [255]
428 428 $ hg book date -r 4
429 429 $ log '0:date'
430 430 0
431 431 1
432 432 2
433 433 3
434 434 4
435 435 $ log '::date'
436 436 0
437 437 1
438 438 2
439 439 4
440 440 $ log '::"date"'
441 441 0
442 442 1
443 443 2
444 444 4
445 445 $ log 'date(2005) and 1::'
446 446 4
447 447 $ hg book -d date
448 448
449 449 function name should be a symbol
450 450
451 451 $ log '"date"(2005)'
452 452 hg: parse error: not a symbol
453 453 [255]
454 454
455 455 keyword arguments
456 456
457 457 $ log 'extra(branch, value=a)'
458 458 0
459 459
460 460 $ log 'extra(branch, a, b)'
461 461 hg: parse error: extra takes at most 2 positional arguments
462 462 [255]
463 463 $ log 'extra(a, label=b)'
464 464 hg: parse error: extra got multiple values for keyword argument 'label'
465 465 [255]
466 466 $ log 'extra(label=branch, default)'
467 467 hg: parse error: extra got an invalid argument
468 468 [255]
469 469 $ log 'extra(branch, foo+bar=baz)'
470 470 hg: parse error: extra got an invalid argument
471 471 [255]
472 472 $ log 'extra(unknown=branch)'
473 473 hg: parse error: extra got an unexpected keyword argument 'unknown'
474 474 [255]
475 475
476 476 $ try 'foo=bar|baz'
477 477 (keyvalue
478 478 (symbol 'foo')
479 479 (or
480 480 (list
481 481 (symbol 'bar')
482 482 (symbol 'baz'))))
483 483 hg: parse error: can't use a key-value pair in this context
484 484 [255]
485 485
486 486 right-hand side should be optimized recursively
487 487
488 488 $ try --optimize 'foo=(not public())'
489 489 (keyvalue
490 490 (symbol 'foo')
491 491 (group
492 492 (not
493 493 (func
494 494 (symbol 'public')
495 495 None))))
496 496 * optimized:
497 497 (keyvalue
498 498 (symbol 'foo')
499 499 (func
500 500 (symbol '_notpublic')
501 501 None))
502 502 hg: parse error: can't use a key-value pair in this context
503 503 [255]
504 504
505 505 relation-subscript operator has the highest binding strength (as function call):
506 506
507 507 $ hg debugrevspec -p parsed 'tip:tip^#generations[-1]'
508 508 * parsed:
509 509 (range
510 510 (symbol 'tip')
511 511 (relsubscript
512 512 (parentpost
513 513 (symbol 'tip'))
514 514 (symbol 'generations')
515 515 (negate
516 516 (symbol '1'))))
517 517 9
518 518 8
519 519 7
520 520 6
521 521 5
522 522 4
523 523
524 524 $ hg debugrevspec -p parsed --no-show-revs 'not public()#generations[0]'
525 525 * parsed:
526 526 (not
527 527 (relsubscript
528 528 (func
529 529 (symbol 'public')
530 530 None)
531 531 (symbol 'generations')
532 532 (symbol '0')))
533 533
534 534 left-hand side of relation-subscript operator should be optimized recursively:
535 535
536 536 $ hg debugrevspec -p analyzed -p optimized --no-show-revs \
537 537 > '(not public())#generations[0]'
538 538 * analyzed:
539 539 (relsubscript
540 540 (not
541 541 (func
542 542 (symbol 'public')
543 543 None))
544 544 (symbol 'generations')
545 545 (symbol '0'))
546 546 * optimized:
547 547 (relsubscript
548 548 (func
549 549 (symbol '_notpublic')
550 550 None)
551 551 (symbol 'generations')
552 552 (symbol '0'))
553 553
554 554 resolution of subscript and relation-subscript ternary operators:
555 555
556 556 $ hg debugrevspec -p analyzed 'tip[0]'
557 557 * analyzed:
558 558 (subscript
559 559 (symbol 'tip')
560 560 (symbol '0'))
561 561 hg: parse error: can't use a subscript in this context
562 562 [255]
563 563
564 564 $ hg debugrevspec -p analyzed 'tip#rel[0]'
565 565 * analyzed:
566 566 (relsubscript
567 567 (symbol 'tip')
568 568 (symbol 'rel')
569 569 (symbol '0'))
570 570 hg: parse error: unknown identifier: rel
571 571 [255]
572 572
573 573 $ hg debugrevspec -p analyzed '(tip#rel)[0]'
574 574 * analyzed:
575 575 (subscript
576 576 (relation
577 577 (symbol 'tip')
578 578 (symbol 'rel'))
579 579 (symbol '0'))
580 580 hg: parse error: can't use a subscript in this context
581 581 [255]
582 582
583 583 $ hg debugrevspec -p analyzed 'tip#rel[0][1]'
584 584 * analyzed:
585 585 (subscript
586 586 (relsubscript
587 587 (symbol 'tip')
588 588 (symbol 'rel')
589 589 (symbol '0'))
590 590 (symbol '1'))
591 591 hg: parse error: can't use a subscript in this context
592 592 [255]
593 593
594 594 $ hg debugrevspec -p analyzed 'tip#rel0#rel1[1]'
595 595 * analyzed:
596 596 (relsubscript
597 597 (relation
598 598 (symbol 'tip')
599 599 (symbol 'rel0'))
600 600 (symbol 'rel1')
601 601 (symbol '1'))
602 602 hg: parse error: unknown identifier: rel1
603 603 [255]
604 604
605 605 $ hg debugrevspec -p analyzed 'tip#rel0[0]#rel1[1]'
606 606 * analyzed:
607 607 (relsubscript
608 608 (relsubscript
609 609 (symbol 'tip')
610 610 (symbol 'rel0')
611 611 (symbol '0'))
612 612 (symbol 'rel1')
613 613 (symbol '1'))
614 614 hg: parse error: unknown identifier: rel1
615 615 [255]
616 616
617 617 parse errors of relation, subscript and relation-subscript operators:
618 618
619 619 $ hg debugrevspec '[0]'
620 620 hg: parse error at 0: not a prefix: [
621 621 ([0]
622 622 ^ here)
623 623 [255]
624 624 $ hg debugrevspec '.#'
625 625 hg: parse error at 2: not a prefix: end
626 626 (.#
627 627 ^ here)
628 628 [255]
629 629 $ hg debugrevspec '#rel'
630 630 hg: parse error at 0: not a prefix: #
631 631 (#rel
632 632 ^ here)
633 633 [255]
634 634 $ hg debugrevspec '.#rel[0'
635 635 hg: parse error at 7: unexpected token: end
636 636 (.#rel[0
637 637 ^ here)
638 638 [255]
639 639 $ hg debugrevspec '.]'
640 640 hg: parse error at 1: invalid token
641 641 (.]
642 642 ^ here)
643 643 [255]
644 644
645 645 $ hg debugrevspec '.#generations[a]'
646 646 hg: parse error: relation subscript must be an integer
647 647 [255]
648 648 $ hg debugrevspec '.#generations[1-2]'
649 649 hg: parse error: relation subscript must be an integer
650 650 [255]
651 $ hg debugrevspec '.#generations[foo:bar]'
652 hg: parse error: relation subscript bounds must be integers
653 [255]
651 654
652 655 suggested relations
653 656
654 657 $ hg debugrevspec '.#generafions[0]'
655 658 hg: parse error: unknown identifier: generafions
656 659 (did you mean generations?)
657 660 [255]
658 661
659 662 $ hg debugrevspec '.#f[0]'
660 663 hg: parse error: unknown identifier: f
661 664 [255]
662 665
663 666 parsed tree at stages:
664 667
665 668 $ hg debugrevspec -p all '()'
666 669 * parsed:
667 670 (group
668 671 None)
669 672 * expanded:
670 673 (group
671 674 None)
672 675 * concatenated:
673 676 (group
674 677 None)
675 678 * analyzed:
676 679 None
677 680 * optimized:
678 681 None
679 682 hg: parse error: missing argument
680 683 [255]
681 684
682 685 $ hg debugrevspec --no-optimized -p all '()'
683 686 * parsed:
684 687 (group
685 688 None)
686 689 * expanded:
687 690 (group
688 691 None)
689 692 * concatenated:
690 693 (group
691 694 None)
692 695 * analyzed:
693 696 None
694 697 hg: parse error: missing argument
695 698 [255]
696 699
697 700 $ hg debugrevspec -p parsed -p analyzed -p optimized '(0|1)-1'
698 701 * parsed:
699 702 (minus
700 703 (group
701 704 (or
702 705 (list
703 706 (symbol '0')
704 707 (symbol '1'))))
705 708 (symbol '1'))
706 709 * analyzed:
707 710 (and
708 711 (or
709 712 (list
710 713 (symbol '0')
711 714 (symbol '1')))
712 715 (not
713 716 (symbol '1')))
714 717 * optimized:
715 718 (difference
716 719 (func
717 720 (symbol '_list')
718 721 (string '0\x001'))
719 722 (symbol '1'))
720 723 0
721 724
722 725 $ hg debugrevspec -p unknown '0'
723 726 abort: invalid stage name: unknown
724 727 [255]
725 728
726 729 $ hg debugrevspec -p all --optimize '0'
727 730 abort: cannot use --optimize with --show-stage
728 731 [255]
729 732
730 733 verify optimized tree:
731 734
732 735 $ hg debugrevspec --verify '0|1'
733 736
734 737 $ hg debugrevspec --verify -v -p analyzed -p optimized 'r3232() & 2'
735 738 * analyzed:
736 739 (and
737 740 (func
738 741 (symbol 'r3232')
739 742 None)
740 743 (symbol '2'))
741 744 * optimized:
742 745 (andsmally
743 746 (func
744 747 (symbol 'r3232')
745 748 None)
746 749 (symbol '2'))
747 750 * analyzed set:
748 751 <baseset [2]>
749 752 * optimized set:
750 753 <baseset [2, 2]>
751 754 --- analyzed
752 755 +++ optimized
753 756 2
754 757 +2
755 758 [1]
756 759
757 760 $ hg debugrevspec --no-optimized --verify-optimized '0'
758 761 abort: cannot use --verify-optimized with --no-optimized
759 762 [255]
760 763
761 764 Test that symbols only get parsed as functions if there's an opening
762 765 parenthesis.
763 766
764 767 $ hg book only -r 9
765 768 $ log 'only(only)' # Outer "only" is a function, inner "only" is the bookmark
766 769 8
767 770 9
768 771
769 772 ':y' behaves like '0:y', but can't be rewritten as such since the revision '0'
770 773 may be hidden (issue5385)
771 774
772 775 $ try -p parsed -p analyzed ':'
773 776 * parsed:
774 777 (rangeall
775 778 None)
776 779 * analyzed:
777 780 (rangeall
778 781 None)
779 782 * set:
780 783 <spanset+ 0:10>
781 784 0
782 785 1
783 786 2
784 787 3
785 788 4
786 789 5
787 790 6
788 791 7
789 792 8
790 793 9
791 794 $ try -p analyzed ':1'
792 795 * analyzed:
793 796 (rangepre
794 797 (symbol '1'))
795 798 * set:
796 799 <spanset+ 0:2>
797 800 0
798 801 1
799 802 $ try -p analyzed ':(1|2)'
800 803 * analyzed:
801 804 (rangepre
802 805 (or
803 806 (list
804 807 (symbol '1')
805 808 (symbol '2'))))
806 809 * set:
807 810 <spanset+ 0:3>
808 811 0
809 812 1
810 813 2
811 814 $ try -p analyzed ':(1&2)'
812 815 * analyzed:
813 816 (rangepre
814 817 (and
815 818 (symbol '1')
816 819 (symbol '2')))
817 820 * set:
818 821 <baseset []>
819 822
820 823 infix/suffix resolution of ^ operator (issue2884, issue5764):
821 824
822 825 x^:y means (x^):y
823 826
824 827 $ try '1^:2'
825 828 (range
826 829 (parentpost
827 830 (symbol '1'))
828 831 (symbol '2'))
829 832 * set:
830 833 <spanset+ 0:3>
831 834 0
832 835 1
833 836 2
834 837
835 838 $ try '1^::2'
836 839 (dagrange
837 840 (parentpost
838 841 (symbol '1'))
839 842 (symbol '2'))
840 843 * set:
841 844 <baseset+ [0, 1, 2]>
842 845 0
843 846 1
844 847 2
845 848
846 849 $ try '1^..2'
847 850 (dagrange
848 851 (parentpost
849 852 (symbol '1'))
850 853 (symbol '2'))
851 854 * set:
852 855 <baseset+ [0, 1, 2]>
853 856 0
854 857 1
855 858 2
856 859
857 860 $ try '9^:'
858 861 (rangepost
859 862 (parentpost
860 863 (symbol '9')))
861 864 * set:
862 865 <spanset+ 8:10>
863 866 8
864 867 9
865 868
866 869 $ try '9^::'
867 870 (dagrangepost
868 871 (parentpost
869 872 (symbol '9')))
870 873 * set:
871 874 <generatorsetasc+>
872 875 8
873 876 9
874 877
875 878 $ try '9^..'
876 879 (dagrangepost
877 880 (parentpost
878 881 (symbol '9')))
879 882 * set:
880 883 <generatorsetasc+>
881 884 8
882 885 9
883 886
884 887 x^:y should be resolved before omitting group operators
885 888
886 889 $ try '1^(:2)'
887 890 (parent
888 891 (symbol '1')
889 892 (group
890 893 (rangepre
891 894 (symbol '2'))))
892 895 hg: parse error: ^ expects a number 0, 1, or 2
893 896 [255]
894 897
895 898 x^:y should be resolved recursively
896 899
897 900 $ try 'sort(1^:2)'
898 901 (func
899 902 (symbol 'sort')
900 903 (range
901 904 (parentpost
902 905 (symbol '1'))
903 906 (symbol '2')))
904 907 * set:
905 908 <spanset+ 0:3>
906 909 0
907 910 1
908 911 2
909 912
910 913 $ try '(3^:4)^:2'
911 914 (range
912 915 (parentpost
913 916 (group
914 917 (range
915 918 (parentpost
916 919 (symbol '3'))
917 920 (symbol '4'))))
918 921 (symbol '2'))
919 922 * set:
920 923 <spanset+ 0:3>
921 924 0
922 925 1
923 926 2
924 927
925 928 $ try '(3^::4)^::2'
926 929 (dagrange
927 930 (parentpost
928 931 (group
929 932 (dagrange
930 933 (parentpost
931 934 (symbol '3'))
932 935 (symbol '4'))))
933 936 (symbol '2'))
934 937 * set:
935 938 <baseset+ [0, 1, 2]>
936 939 0
937 940 1
938 941 2
939 942
940 943 $ try '(9^:)^:'
941 944 (rangepost
942 945 (parentpost
943 946 (group
944 947 (rangepost
945 948 (parentpost
946 949 (symbol '9'))))))
947 950 * set:
948 951 <spanset+ 4:10>
949 952 4
950 953 5
951 954 6
952 955 7
953 956 8
954 957 9
955 958
956 959 x^ in alias should also be resolved
957 960
958 961 $ try 'A' --config 'revsetalias.A=1^:2'
959 962 (symbol 'A')
960 963 * expanded:
961 964 (range
962 965 (parentpost
963 966 (symbol '1'))
964 967 (symbol '2'))
965 968 * set:
966 969 <spanset+ 0:3>
967 970 0
968 971 1
969 972 2
970 973
971 974 $ try 'A:2' --config 'revsetalias.A=1^'
972 975 (range
973 976 (symbol 'A')
974 977 (symbol '2'))
975 978 * expanded:
976 979 (range
977 980 (parentpost
978 981 (symbol '1'))
979 982 (symbol '2'))
980 983 * set:
981 984 <spanset+ 0:3>
982 985 0
983 986 1
984 987 2
985 988
986 989 but not beyond the boundary of alias expansion, because the resolution should
987 990 be made at the parsing stage
988 991
989 992 $ try '1^A' --config 'revsetalias.A=:2'
990 993 (parent
991 994 (symbol '1')
992 995 (symbol 'A'))
993 996 * expanded:
994 997 (parent
995 998 (symbol '1')
996 999 (rangepre
997 1000 (symbol '2')))
998 1001 hg: parse error: ^ expects a number 0, 1, or 2
999 1002 [255]
1000 1003
1001 1004 '::' itself isn't a valid expression
1002 1005
1003 1006 $ try '::'
1004 1007 (dagrangeall
1005 1008 None)
1006 1009 hg: parse error: can't use '::' in this context
1007 1010 [255]
1008 1011
1009 1012 ancestor can accept 0 or more arguments
1010 1013
1011 1014 $ log 'ancestor()'
1012 1015 $ log 'ancestor(1)'
1013 1016 1
1014 1017 $ log 'ancestor(4,5)'
1015 1018 1
1016 1019 $ log 'ancestor(4,5) and 4'
1017 1020 $ log 'ancestor(0,0,1,3)'
1018 1021 0
1019 1022 $ log 'ancestor(3,1,5,3,5,1)'
1020 1023 1
1021 1024 $ log 'ancestor(0,1,3,5)'
1022 1025 0
1023 1026 $ log 'ancestor(1,2,3,4,5)'
1024 1027 1
1025 1028
1026 1029 test ancestors
1027 1030
1028 1031 $ hg log -G -T '{rev}\n' --config experimental.graphshorten=True
1029 1032 @ 9
1030 1033 o 8
1031 1034 | o 7
1032 1035 | o 6
1033 1036 |/|
1034 1037 | o 5
1035 1038 o | 4
1036 1039 | o 3
1037 1040 o | 2
1038 1041 |/
1039 1042 o 1
1040 1043 o 0
1041 1044
1042 1045 $ log 'ancestors(5)'
1043 1046 0
1044 1047 1
1045 1048 3
1046 1049 5
1047 1050 $ log 'ancestor(ancestors(5))'
1048 1051 0
1049 1052 $ log '::r3232()'
1050 1053 0
1051 1054 1
1052 1055 2
1053 1056 3
1054 1057
1055 1058 test common ancestors
1056 1059
1057 1060 $ hg log -T '{rev}\n' -r 'commonancestors(7 + 9)'
1058 1061 0
1059 1062 1
1060 1063 2
1061 1064 4
1062 1065
1063 1066 $ hg log -T '{rev}\n' -r 'commonancestors(heads(all()))'
1064 1067 0
1065 1068 1
1066 1069 2
1067 1070 4
1068 1071
1069 1072 $ hg log -T '{rev}\n' -r 'commonancestors(9)'
1070 1073 0
1071 1074 1
1072 1075 2
1073 1076 4
1074 1077 8
1075 1078 9
1076 1079
1077 1080 $ hg log -T '{rev}\n' -r 'commonancestors(8 + 9)'
1078 1081 0
1079 1082 1
1080 1083 2
1081 1084 4
1082 1085 8
1083 1086
1084 1087 test the specialized implementation of heads(commonancestors(..))
1085 1088 (2 gcas is tested in test-merge-criss-cross.t)
1086 1089
1087 1090 $ hg log -T '{rev}\n' -r 'heads(commonancestors(7 + 9))'
1088 1091 4
1089 1092 $ hg log -T '{rev}\n' -r 'heads(commonancestors(heads(all())))'
1090 1093 4
1091 1094 $ hg log -T '{rev}\n' -r 'heads(commonancestors(9))'
1092 1095 9
1093 1096 $ hg log -T '{rev}\n' -r 'heads(commonancestors(8 + 9))'
1094 1097 8
1095 1098
1096 1099 test ancestor variants of empty revision
1097 1100
1098 1101 $ log 'ancestor(none())'
1099 1102 $ log 'ancestors(none())'
1100 1103 $ log 'commonancestors(none())'
1101 1104 $ log 'heads(commonancestors(none()))'
1102 1105
1103 1106 test ancestors with depth limit
1104 1107
1105 1108 (depth=0 selects the node itself)
1106 1109
1107 1110 $ log 'reverse(ancestors(9, depth=0))'
1108 1111 9
1109 1112
1110 1113 (interleaved: '4' would be missing if heap queue were higher depth first)
1111 1114
1112 1115 $ log 'reverse(ancestors(8:9, depth=1))'
1113 1116 9
1114 1117 8
1115 1118 4
1116 1119
1117 1120 (interleaved: '2' would be missing if heap queue were higher depth first)
1118 1121
1119 1122 $ log 'reverse(ancestors(7+8, depth=2))'
1120 1123 8
1121 1124 7
1122 1125 6
1123 1126 5
1124 1127 4
1125 1128 2
1126 1129
1127 1130 (walk example above by separate queries)
1128 1131
1129 1132 $ log 'reverse(ancestors(8, depth=2)) + reverse(ancestors(7, depth=2))'
1130 1133 8
1131 1134 4
1132 1135 2
1133 1136 7
1134 1137 6
1135 1138 5
1136 1139
1137 1140 (walk 2nd and 3rd ancestors)
1138 1141
1139 1142 $ log 'reverse(ancestors(7, depth=3, startdepth=2))'
1140 1143 5
1141 1144 4
1142 1145 3
1143 1146 2
1144 1147
1145 1148 (interleaved: '4' would be missing if higher-depth ancestors weren't scanned)
1146 1149
1147 1150 $ log 'reverse(ancestors(7+8, depth=2, startdepth=2))'
1148 1151 5
1149 1152 4
1150 1153 2
1151 1154
1152 1155 (note that 'ancestors(x, depth=y, startdepth=z)' does not identical to
1153 1156 'ancestors(x, depth=y) - ancestors(x, depth=z-1)' because a node may have
1154 1157 multiple depths)
1155 1158
1156 1159 $ log 'reverse(ancestors(7+8, depth=2) - ancestors(7+8, depth=1))'
1157 1160 5
1158 1161 2
1159 1162
1160 1163 test bad arguments passed to ancestors()
1161 1164
1162 1165 $ log 'ancestors(., depth=-1)'
1163 1166 hg: parse error: negative depth
1164 1167 [255]
1165 1168 $ log 'ancestors(., depth=foo)'
1166 1169 hg: parse error: ancestors expects an integer depth
1167 1170 [255]
1168 1171
1169 1172 test descendants
1170 1173
1171 1174 $ hg log -G -T '{rev}\n' --config experimental.graphshorten=True
1172 1175 @ 9
1173 1176 o 8
1174 1177 | o 7
1175 1178 | o 6
1176 1179 |/|
1177 1180 | o 5
1178 1181 o | 4
1179 1182 | o 3
1180 1183 o | 2
1181 1184 |/
1182 1185 o 1
1183 1186 o 0
1184 1187
1185 1188 (null is ultimate root and has optimized path)
1186 1189
1187 1190 $ log 'null:4 & descendants(null)'
1188 1191 -1
1189 1192 0
1190 1193 1
1191 1194 2
1192 1195 3
1193 1196 4
1194 1197
1195 1198 (including merge)
1196 1199
1197 1200 $ log ':8 & descendants(2)'
1198 1201 2
1199 1202 4
1200 1203 6
1201 1204 7
1202 1205 8
1203 1206
1204 1207 (multiple roots)
1205 1208
1206 1209 $ log ':8 & descendants(2+5)'
1207 1210 2
1208 1211 4
1209 1212 5
1210 1213 6
1211 1214 7
1212 1215 8
1213 1216
1214 1217 test descendants with depth limit
1215 1218
1216 1219 (depth=0 selects the node itself)
1217 1220
1218 1221 $ log 'descendants(0, depth=0)'
1219 1222 0
1220 1223 $ log 'null: & descendants(null, depth=0)'
1221 1224 -1
1222 1225
1223 1226 (p2 = null should be ignored)
1224 1227
1225 1228 $ log 'null: & descendants(null, depth=2)'
1226 1229 -1
1227 1230 0
1228 1231 1
1229 1232
1230 1233 (multiple paths: depth(6) = (2, 3))
1231 1234
1232 1235 $ log 'descendants(1+3, depth=2)'
1233 1236 1
1234 1237 2
1235 1238 3
1236 1239 4
1237 1240 5
1238 1241 6
1239 1242
1240 1243 (multiple paths: depth(5) = (1, 2), depth(6) = (2, 3))
1241 1244
1242 1245 $ log 'descendants(3+1, depth=2, startdepth=2)'
1243 1246 4
1244 1247 5
1245 1248 6
1246 1249
1247 1250 (multiple depths: depth(6) = (0, 2, 4), search for depth=2)
1248 1251
1249 1252 $ log 'descendants(0+3+6, depth=3, startdepth=1)'
1250 1253 1
1251 1254 2
1252 1255 3
1253 1256 4
1254 1257 5
1255 1258 6
1256 1259 7
1257 1260
1258 1261 (multiple depths: depth(6) = (0, 4), no match)
1259 1262
1260 1263 $ log 'descendants(0+6, depth=3, startdepth=1)'
1261 1264 1
1262 1265 2
1263 1266 3
1264 1267 4
1265 1268 5
1266 1269 7
1267 1270
1268 1271 test ancestors/descendants relation subscript:
1269 1272
1270 1273 $ log 'tip#generations[0]'
1271 1274 9
1272 1275 $ log '.#generations[-1]'
1273 1276 8
1274 1277 $ log '.#g[(-1)]'
1275 1278 8
1276 1279
1280 $ log '6#generations[0:1]'
1281 6
1282 7
1283 $ log '6#generations[-1:1]'
1284 4
1285 5
1286 6
1287 7
1288 $ log '6#generations[0:]'
1289 6
1290 7
1291 $ log '5#generations[:0]'
1292 0
1293 1
1294 3
1295 5
1296 $ log '3#generations[:]'
1297 0
1298 1
1299 3
1300 5
1301 6
1302 7
1303
1277 1304 $ hg debugrevspec -p parsed 'roots(:)#g[2]'
1278 1305 * parsed:
1279 1306 (relsubscript
1280 1307 (func
1281 1308 (symbol 'roots')
1282 1309 (rangeall
1283 1310 None))
1284 1311 (symbol 'g')
1285 1312 (symbol '2'))
1286 1313 2
1287 1314 3
1288 1315
1289 1316 test author
1290 1317
1291 1318 $ log 'author(bob)'
1292 1319 2
1293 1320 $ log 'author("re:bob|test")'
1294 1321 0
1295 1322 1
1296 1323 2
1297 1324 3
1298 1325 4
1299 1326 5
1300 1327 6
1301 1328 7
1302 1329 8
1303 1330 9
1304 1331 $ log 'author(r"re:\S")'
1305 1332 0
1306 1333 1
1307 1334 2
1308 1335 3
1309 1336 4
1310 1337 5
1311 1338 6
1312 1339 7
1313 1340 8
1314 1341 9
1315 1342 $ log 'branch(Γ©)'
1316 1343 8
1317 1344 9
1318 1345 $ log 'branch(a)'
1319 1346 0
1320 1347 $ hg log -r 'branch("re:a")' --template '{rev} {branch}\n'
1321 1348 0 a
1322 1349 2 a-b-c-
1323 1350 3 +a+b+c+
1324 1351 4 -a-b-c-
1325 1352 5 !a/b/c/
1326 1353 6 _a_b_c_
1327 1354 7 .a.b.c.
1328 1355 $ log 'children(ancestor(4,5))'
1329 1356 2
1330 1357 3
1331 1358
1332 1359 $ log 'children(4)'
1333 1360 6
1334 1361 8
1335 1362 $ log 'children(null)'
1336 1363 0
1337 1364
1338 1365 $ log 'closed()'
1339 1366 $ log 'contains(a)'
1340 1367 0
1341 1368 1
1342 1369 3
1343 1370 5
1344 1371 $ log 'contains("../repo/a")'
1345 1372 0
1346 1373 1
1347 1374 3
1348 1375 5
1349 1376 $ log 'desc(B)'
1350 1377 5
1351 1378 $ hg log -r 'desc(r"re:S?u")' --template "{rev} {desc|firstline}\n"
1352 1379 5 5 bug
1353 1380 6 6 issue619
1354 1381 $ log 'descendants(2 or 3)'
1355 1382 2
1356 1383 3
1357 1384 4
1358 1385 5
1359 1386 6
1360 1387 7
1361 1388 8
1362 1389 9
1363 1390 $ log 'file("b*")'
1364 1391 1
1365 1392 4
1366 1393 $ log 'filelog("b")'
1367 1394 1
1368 1395 4
1369 1396 $ log 'filelog("../repo/b")'
1370 1397 1
1371 1398 4
1372 1399 $ log 'follow()'
1373 1400 0
1374 1401 1
1375 1402 2
1376 1403 4
1377 1404 8
1378 1405 9
1379 1406 $ log 'grep("issue\d+")'
1380 1407 6
1381 1408 $ try 'grep("(")' # invalid regular expression
1382 1409 (func
1383 1410 (symbol 'grep')
1384 1411 (string '('))
1385 1412 hg: parse error: invalid match pattern: (unbalanced parenthesis|missing \),.*) (re)
1386 1413 [255]
1387 1414 $ try 'grep("\bissue\d+")'
1388 1415 (func
1389 1416 (symbol 'grep')
1390 1417 (string '\x08issue\\d+'))
1391 1418 * set:
1392 1419 <filteredset
1393 1420 <fullreposet+ 0:10>,
1394 1421 <grep '\x08issue\\d+'>>
1395 1422 $ try 'grep(r"\bissue\d+")'
1396 1423 (func
1397 1424 (symbol 'grep')
1398 1425 (string '\\bissue\\d+'))
1399 1426 * set:
1400 1427 <filteredset
1401 1428 <fullreposet+ 0:10>,
1402 1429 <grep '\\bissue\\d+'>>
1403 1430 6
1404 1431 $ try 'grep(r"\")'
1405 1432 hg: parse error at 7: unterminated string
1406 1433 (grep(r"\")
1407 1434 ^ here)
1408 1435 [255]
1409 1436 $ log 'head()'
1410 1437 0
1411 1438 1
1412 1439 2
1413 1440 3
1414 1441 4
1415 1442 5
1416 1443 6
1417 1444 7
1418 1445 9
1419 1446
1420 1447 Test heads
1421 1448
1422 1449 $ log 'heads(6::)'
1423 1450 7
1424 1451
1425 1452 heads() can be computed in subset '9:'
1426 1453
1427 1454 $ hg debugrevspec -s '9: & heads(all())'
1428 1455 * set:
1429 1456 <filteredset
1430 1457 <baseset [9]>,
1431 1458 <baseset+ [7, 9]>>
1432 1459 9
1433 1460
1434 1461 but should follow the order of the subset
1435 1462
1436 1463 $ log 'heads(all())'
1437 1464 7
1438 1465 9
1439 1466 $ log 'heads(tip:0)'
1440 1467 7
1441 1468 9
1442 1469 $ log 'tip:0 & heads(all())'
1443 1470 9
1444 1471 7
1445 1472 $ log 'tip:0 & heads(0:tip)'
1446 1473 9
1447 1474 7
1448 1475
1449 1476 $ log 'keyword(issue)'
1450 1477 6
1451 1478 $ log 'keyword("test a")'
1452 1479
1453 1480 Test first (=limit) and last
1454 1481
1455 1482 $ log 'limit(head(), 1)'
1456 1483 0
1457 1484 $ log 'limit(author("re:bob|test"), 3, 5)'
1458 1485 5
1459 1486 6
1460 1487 7
1461 1488 $ log 'limit(author("re:bob|test"), offset=6)'
1462 1489 6
1463 1490 $ log 'limit(author("re:bob|test"), offset=10)'
1464 1491 $ log 'limit(all(), 1, -1)'
1465 1492 hg: parse error: negative offset
1466 1493 [255]
1467 1494 $ log 'limit(all(), -1)'
1468 1495 hg: parse error: negative number to select
1469 1496 [255]
1470 1497 $ log 'limit(all(), 0)'
1471 1498
1472 1499 $ log 'last(all(), -1)'
1473 1500 hg: parse error: negative number to select
1474 1501 [255]
1475 1502 $ log 'last(all(), 0)'
1476 1503 $ log 'last(all(), 1)'
1477 1504 9
1478 1505 $ log 'last(all(), 2)'
1479 1506 8
1480 1507 9
1481 1508
1482 1509 Test smartset.slice() by first/last()
1483 1510
1484 1511 (using unoptimized set, filteredset as example)
1485 1512
1486 1513 $ hg debugrevspec --no-show-revs -s '0:7 & branch("re:")'
1487 1514 * set:
1488 1515 <filteredset
1489 1516 <spanset+ 0:8>,
1490 1517 <branch 're:'>>
1491 1518 $ log 'limit(0:7 & branch("re:"), 3, 4)'
1492 1519 4
1493 1520 5
1494 1521 6
1495 1522 $ log 'limit(7:0 & branch("re:"), 3, 4)'
1496 1523 3
1497 1524 2
1498 1525 1
1499 1526 $ log 'last(0:7 & branch("re:"), 2)'
1500 1527 6
1501 1528 7
1502 1529
1503 1530 (using baseset)
1504 1531
1505 1532 $ hg debugrevspec --no-show-revs -s 0+1+2+3+4+5+6+7
1506 1533 * set:
1507 1534 <baseset [0, 1, 2, 3, 4, 5, 6, 7]>
1508 1535 $ hg debugrevspec --no-show-revs -s 0::7
1509 1536 * set:
1510 1537 <baseset+ [0, 1, 2, 3, 4, 5, 6, 7]>
1511 1538 $ log 'limit(0+1+2+3+4+5+6+7, 3, 4)'
1512 1539 4
1513 1540 5
1514 1541 6
1515 1542 $ log 'limit(sort(0::7, rev), 3, 4)'
1516 1543 4
1517 1544 5
1518 1545 6
1519 1546 $ log 'limit(sort(0::7, -rev), 3, 4)'
1520 1547 3
1521 1548 2
1522 1549 1
1523 1550 $ log 'last(sort(0::7, rev), 2)'
1524 1551 6
1525 1552 7
1526 1553 $ hg debugrevspec -s 'limit(sort(0::7, rev), 3, 6)'
1527 1554 * set:
1528 1555 <baseset+ [6, 7]>
1529 1556 6
1530 1557 7
1531 1558 $ hg debugrevspec -s 'limit(sort(0::7, rev), 3, 9)'
1532 1559 * set:
1533 1560 <baseset+ []>
1534 1561 $ hg debugrevspec -s 'limit(sort(0::7, -rev), 3, 6)'
1535 1562 * set:
1536 1563 <baseset- [0, 1]>
1537 1564 1
1538 1565 0
1539 1566 $ hg debugrevspec -s 'limit(sort(0::7, -rev), 3, 9)'
1540 1567 * set:
1541 1568 <baseset- []>
1542 1569 $ hg debugrevspec -s 'limit(0::7, 0)'
1543 1570 * set:
1544 1571 <baseset+ []>
1545 1572
1546 1573 (using spanset)
1547 1574
1548 1575 $ hg debugrevspec --no-show-revs -s 0:7
1549 1576 * set:
1550 1577 <spanset+ 0:8>
1551 1578 $ log 'limit(0:7, 3, 4)'
1552 1579 4
1553 1580 5
1554 1581 6
1555 1582 $ log 'limit(7:0, 3, 4)'
1556 1583 3
1557 1584 2
1558 1585 1
1559 1586 $ log 'limit(0:7, 3, 6)'
1560 1587 6
1561 1588 7
1562 1589 $ log 'limit(7:0, 3, 6)'
1563 1590 1
1564 1591 0
1565 1592 $ log 'last(0:7, 2)'
1566 1593 6
1567 1594 7
1568 1595 $ hg debugrevspec -s 'limit(0:7, 3, 6)'
1569 1596 * set:
1570 1597 <spanset+ 6:8>
1571 1598 6
1572 1599 7
1573 1600 $ hg debugrevspec -s 'limit(0:7, 3, 9)'
1574 1601 * set:
1575 1602 <spanset+ 8:8>
1576 1603 $ hg debugrevspec -s 'limit(7:0, 3, 6)'
1577 1604 * set:
1578 1605 <spanset- 0:2>
1579 1606 1
1580 1607 0
1581 1608 $ hg debugrevspec -s 'limit(7:0, 3, 9)'
1582 1609 * set:
1583 1610 <spanset- 0:0>
1584 1611 $ hg debugrevspec -s 'limit(0:7, 0)'
1585 1612 * set:
1586 1613 <spanset+ 0:0>
1587 1614
1588 1615 Test order of first/last revisions
1589 1616
1590 1617 $ hg debugrevspec -s 'first(4:0, 3) & 3:'
1591 1618 * set:
1592 1619 <filteredset
1593 1620 <spanset- 2:5>,
1594 1621 <spanset+ 3:10>>
1595 1622 4
1596 1623 3
1597 1624
1598 1625 $ hg debugrevspec -s '3: & first(4:0, 3)'
1599 1626 * set:
1600 1627 <filteredset
1601 1628 <spanset+ 3:10>,
1602 1629 <spanset- 2:5>>
1603 1630 3
1604 1631 4
1605 1632
1606 1633 $ hg debugrevspec -s 'last(4:0, 3) & :1'
1607 1634 * set:
1608 1635 <filteredset
1609 1636 <spanset- 0:3>,
1610 1637 <spanset+ 0:2>>
1611 1638 1
1612 1639 0
1613 1640
1614 1641 $ hg debugrevspec -s ':1 & last(4:0, 3)'
1615 1642 * set:
1616 1643 <filteredset
1617 1644 <spanset+ 0:2>,
1618 1645 <spanset+ 0:3>>
1619 1646 0
1620 1647 1
1621 1648
1622 1649 Test scmutil.revsingle() should return the last revision
1623 1650
1624 1651 $ hg debugrevspec -s 'last(0::)'
1625 1652 * set:
1626 1653 <baseset slice=0:1
1627 1654 <generatorsetasc->>
1628 1655 9
1629 1656 $ hg identify -r '0::' --num
1630 1657 9
1631 1658
1632 1659 Test matching
1633 1660
1634 1661 $ log 'matching(6)'
1635 1662 6
1636 1663 $ log 'matching(6:7, "phase parents user date branch summary files description substate")'
1637 1664 6
1638 1665 7
1639 1666
1640 1667 Testing min and max
1641 1668
1642 1669 max: simple
1643 1670
1644 1671 $ log 'max(contains(a))'
1645 1672 5
1646 1673
1647 1674 max: simple on unordered set)
1648 1675
1649 1676 $ log 'max((4+0+2+5+7) and contains(a))'
1650 1677 5
1651 1678
1652 1679 max: no result
1653 1680
1654 1681 $ log 'max(contains(stringthatdoesnotappearanywhere))'
1655 1682
1656 1683 max: no result on unordered set
1657 1684
1658 1685 $ log 'max((4+0+2+5+7) and contains(stringthatdoesnotappearanywhere))'
1659 1686
1660 1687 min: simple
1661 1688
1662 1689 $ log 'min(contains(a))'
1663 1690 0
1664 1691
1665 1692 min: simple on unordered set
1666 1693
1667 1694 $ log 'min((4+0+2+5+7) and contains(a))'
1668 1695 0
1669 1696
1670 1697 min: empty
1671 1698
1672 1699 $ log 'min(contains(stringthatdoesnotappearanywhere))'
1673 1700
1674 1701 min: empty on unordered set
1675 1702
1676 1703 $ log 'min((4+0+2+5+7) and contains(stringthatdoesnotappearanywhere))'
1677 1704
1678 1705
1679 1706 $ log 'merge()'
1680 1707 6
1681 1708 $ log 'branchpoint()'
1682 1709 1
1683 1710 4
1684 1711 $ log 'modifies(b)'
1685 1712 4
1686 1713 $ log 'modifies("path:b")'
1687 1714 4
1688 1715 $ log 'modifies("*")'
1689 1716 4
1690 1717 6
1691 1718 $ log 'modifies("set:modified()")'
1692 1719 4
1693 1720 $ log 'id(5)'
1694 1721 2
1695 1722 $ log 'only(9)'
1696 1723 8
1697 1724 9
1698 1725 $ log 'only(8)'
1699 1726 8
1700 1727 $ log 'only(9, 5)'
1701 1728 2
1702 1729 4
1703 1730 8
1704 1731 9
1705 1732 $ log 'only(7 + 9, 5 + 2)'
1706 1733 4
1707 1734 6
1708 1735 7
1709 1736 8
1710 1737 9
1711 1738
1712 1739 Test empty set input
1713 1740 $ log 'only(p2())'
1714 1741 $ log 'only(p1(), p2())'
1715 1742 0
1716 1743 1
1717 1744 2
1718 1745 4
1719 1746 8
1720 1747 9
1721 1748
1722 1749 Test '%' operator
1723 1750
1724 1751 $ log '9%'
1725 1752 8
1726 1753 9
1727 1754 $ log '9%5'
1728 1755 2
1729 1756 4
1730 1757 8
1731 1758 9
1732 1759 $ log '(7 + 9)%(5 + 2)'
1733 1760 4
1734 1761 6
1735 1762 7
1736 1763 8
1737 1764 9
1738 1765
1739 1766 Test operand of '%' is optimized recursively (issue4670)
1740 1767
1741 1768 $ try --optimize '8:9-8%'
1742 1769 (onlypost
1743 1770 (minus
1744 1771 (range
1745 1772 (symbol '8')
1746 1773 (symbol '9'))
1747 1774 (symbol '8')))
1748 1775 * optimized:
1749 1776 (func
1750 1777 (symbol 'only')
1751 1778 (difference
1752 1779 (range
1753 1780 (symbol '8')
1754 1781 (symbol '9'))
1755 1782 (symbol '8')))
1756 1783 * set:
1757 1784 <baseset+ [8, 9]>
1758 1785 8
1759 1786 9
1760 1787 $ try --optimize '(9)%(5)'
1761 1788 (only
1762 1789 (group
1763 1790 (symbol '9'))
1764 1791 (group
1765 1792 (symbol '5')))
1766 1793 * optimized:
1767 1794 (func
1768 1795 (symbol 'only')
1769 1796 (list
1770 1797 (symbol '9')
1771 1798 (symbol '5')))
1772 1799 * set:
1773 1800 <baseset+ [2, 4, 8, 9]>
1774 1801 2
1775 1802 4
1776 1803 8
1777 1804 9
1778 1805
1779 1806 Test the order of operations
1780 1807
1781 1808 $ log '7 + 9%5 + 2'
1782 1809 7
1783 1810 2
1784 1811 4
1785 1812 8
1786 1813 9
1787 1814
1788 1815 Test explicit numeric revision
1789 1816 $ log 'rev(-2)'
1790 1817 $ log 'rev(-1)'
1791 1818 -1
1792 1819 $ log 'rev(0)'
1793 1820 0
1794 1821 $ log 'rev(9)'
1795 1822 9
1796 1823 $ log 'rev(10)'
1797 1824 $ log 'rev(tip)'
1798 1825 hg: parse error: rev expects a number
1799 1826 [255]
1800 1827
1801 1828 Test hexadecimal revision
1802 1829 $ log 'id(2)'
1803 1830 $ log 'id(5)'
1804 1831 2
1805 1832 $ hg --config experimental.revisions.prefixhexnode=yes log --template '{rev}\n' -r 'id(x5)'
1806 1833 2
1807 1834 $ hg --config experimental.revisions.prefixhexnode=yes log --template '{rev}\n' -r 'x5'
1808 1835 2
1809 1836 $ hg --config experimental.revisions.prefixhexnode=yes log --template '{rev}\n' -r 'id(x)'
1810 1837 $ hg --config experimental.revisions.prefixhexnode=yes log --template '{rev}\n' -r 'x'
1811 1838 abort: 00changelog.i@: ambiguous identifier!
1812 1839 [255]
1813 1840 $ log 'id(23268)'
1814 1841 4
1815 1842 $ log 'id(2785f51eece)'
1816 1843 0
1817 1844 $ log 'id(d5d0dcbdc4d9ff5dbb2d336f32f0bb561c1a532c)'
1818 1845 8
1819 1846 $ log 'id(d5d0dcbdc4a)'
1820 1847 $ log 'id(d5d0dcbdc4w)'
1821 1848 $ log 'id(d5d0dcbdc4d9ff5dbb2d336f32f0bb561c1a532d)'
1822 1849 $ log 'id(d5d0dcbdc4d9ff5dbb2d336f32f0bb561c1a532q)'
1823 1850 $ log 'id(1.0)'
1824 1851 $ log 'id(xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx)'
1825 1852
1826 1853 Test null revision
1827 1854 $ log '(null)'
1828 1855 -1
1829 1856 $ log '(null:0)'
1830 1857 -1
1831 1858 0
1832 1859 $ log '(0:null)'
1833 1860 0
1834 1861 -1
1835 1862 $ log 'null::0'
1836 1863 -1
1837 1864 0
1838 1865 $ log 'null:tip - 0:'
1839 1866 -1
1840 1867 $ log 'null: and null::' | head -1
1841 1868 -1
1842 1869 $ log 'null: or 0:' | head -2
1843 1870 -1
1844 1871 0
1845 1872 $ log 'ancestors(null)'
1846 1873 -1
1847 1874 $ log 'reverse(null:)' | tail -2
1848 1875 0
1849 1876 -1
1850 1877 $ log 'first(null:)'
1851 1878 -1
1852 1879 $ log 'min(null:)'
1853 1880 BROKEN: should be '-1'
1854 1881 $ log 'tip:null and all()' | tail -2
1855 1882 1
1856 1883 0
1857 1884
1858 1885 Test working-directory revision
1859 1886 $ hg debugrevspec 'wdir()'
1860 1887 2147483647
1861 1888 $ hg debugrevspec 'wdir()^'
1862 1889 9
1863 1890 $ hg up 7
1864 1891 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
1865 1892 $ hg debugrevspec 'wdir()^'
1866 1893 7
1867 1894 $ hg debugrevspec 'wdir()^0'
1868 1895 2147483647
1869 1896 $ hg debugrevspec 'wdir()~3'
1870 1897 5
1871 1898 $ hg debugrevspec 'ancestors(wdir())'
1872 1899 0
1873 1900 1
1874 1901 2
1875 1902 3
1876 1903 4
1877 1904 5
1878 1905 6
1879 1906 7
1880 1907 2147483647
1881 1908 $ hg debugrevspec '0:wdir() & ancestor(wdir())'
1882 1909 2147483647
1883 1910 $ hg debugrevspec '0:wdir() & ancestor(.:wdir())'
1884 1911 4
1885 1912 $ hg debugrevspec '0:wdir() & ancestor(wdir(), wdir())'
1886 1913 2147483647
1887 1914 $ hg debugrevspec '0:wdir() & ancestor(wdir(), tip)'
1888 1915 4
1889 1916 $ hg debugrevspec 'null:wdir() & ancestor(wdir(), null)'
1890 1917 -1
1891 1918 $ hg debugrevspec 'wdir()~0'
1892 1919 2147483647
1893 1920 $ hg debugrevspec 'p1(wdir())'
1894 1921 7
1895 1922 $ hg debugrevspec 'p2(wdir())'
1896 1923 $ hg debugrevspec 'parents(wdir())'
1897 1924 7
1898 1925 $ hg debugrevspec 'wdir()^1'
1899 1926 7
1900 1927 $ hg debugrevspec 'wdir()^2'
1901 1928 $ hg debugrevspec 'wdir()^3'
1902 1929 hg: parse error: ^ expects a number 0, 1, or 2
1903 1930 [255]
1904 1931 For tests consistency
1905 1932 $ hg up 9
1906 1933 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
1907 1934 $ hg debugrevspec 'tip or wdir()'
1908 1935 9
1909 1936 2147483647
1910 1937 $ hg debugrevspec '0:tip and wdir()'
1911 1938 $ log '0:wdir()' | tail -3
1912 1939 8
1913 1940 9
1914 1941 2147483647
1915 1942 $ log 'wdir():0' | head -3
1916 1943 2147483647
1917 1944 9
1918 1945 8
1919 1946 $ log 'wdir():wdir()'
1920 1947 2147483647
1921 1948 $ log '(all() + wdir()) & min(. + wdir())'
1922 1949 9
1923 1950 $ log '(all() + wdir()) & max(. + wdir())'
1924 1951 2147483647
1925 1952 $ log 'first(wdir() + .)'
1926 1953 2147483647
1927 1954 $ log 'last(. + wdir())'
1928 1955 2147483647
1929 1956
1930 1957 Test working-directory integer revision and node id
1931 1958 (BUG: '0:wdir()' is still needed to populate wdir revision)
1932 1959
1933 1960 $ hg debugrevspec '0:wdir() & 2147483647'
1934 1961 2147483647
1935 1962 $ hg debugrevspec '0:wdir() & rev(2147483647)'
1936 1963 2147483647
1937 1964 $ hg debugrevspec '0:wdir() & ffffffffffffffffffffffffffffffffffffffff'
1938 1965 2147483647
1939 1966 $ hg debugrevspec '0:wdir() & ffffffffffff'
1940 1967 2147483647
1941 1968 $ hg debugrevspec '0:wdir() & id(ffffffffffffffffffffffffffffffffffffffff)'
1942 1969 2147483647
1943 1970 $ hg debugrevspec '0:wdir() & id(ffffffffffff)'
1944 1971 2147483647
1945 1972
1946 1973 $ cd ..
1947 1974
1948 1975 Test short 'ff...' hash collision
1949 1976 (BUG: '0:wdir()' is still needed to populate wdir revision)
1950 1977
1951 1978 $ hg init wdir-hashcollision
1952 1979 $ cd wdir-hashcollision
1953 1980 $ cat <<EOF >> .hg/hgrc
1954 1981 > [experimental]
1955 1982 > evolution.createmarkers=True
1956 1983 > EOF
1957 1984 $ echo 0 > a
1958 1985 $ hg ci -qAm 0
1959 1986 $ for i in 2463 2961 6726 78127; do
1960 1987 > hg up -q 0
1961 1988 > echo $i > a
1962 1989 > hg ci -qm $i
1963 1990 > done
1964 1991 $ hg up -q null
1965 1992 $ hg log -r '0:wdir()' -T '{rev}:{node} {shortest(node, 3)}\n'
1966 1993 0:b4e73ffab476aa0ee32ed81ca51e07169844bc6a b4e
1967 1994 1:fffbae3886c8fbb2114296380d276fd37715d571 fffba
1968 1995 2:fffb6093b00943f91034b9bdad069402c834e572 fffb6
1969 1996 3:fff48a9b9de34a4d64120c29548214c67980ade3 fff4
1970 1997 4:ffff85cff0ff78504fcdc3c0bc10de0c65379249 ffff8
1971 1998 2147483647:ffffffffffffffffffffffffffffffffffffffff fffff
1972 1999 $ hg debugobsolete fffbae3886c8fbb2114296380d276fd37715d571
1973 2000 obsoleted 1 changesets
1974 2001
1975 2002 $ hg debugrevspec '0:wdir() & fff'
1976 2003 abort: 00changelog.i@fff: ambiguous identifier!
1977 2004 [255]
1978 2005 $ hg debugrevspec '0:wdir() & ffff'
1979 2006 abort: 00changelog.i@ffff: ambiguous identifier!
1980 2007 [255]
1981 2008 $ hg debugrevspec '0:wdir() & fffb'
1982 2009 abort: 00changelog.i@fffb: ambiguous identifier!
1983 2010 [255]
1984 2011 BROKEN should be '2' (node lookup uses unfiltered repo)
1985 2012 $ hg debugrevspec '0:wdir() & id(fffb)'
1986 2013 BROKEN should be '2' (node lookup uses unfiltered repo)
1987 2014 $ hg debugrevspec '0:wdir() & ffff8'
1988 2015 4
1989 2016 $ hg debugrevspec '0:wdir() & fffff'
1990 2017 2147483647
1991 2018
1992 2019 $ cd ..
1993 2020
1994 2021 Test branch() with wdir()
1995 2022
1996 2023 $ cd repo
1997 2024
1998 2025 $ log '0:wdir() & branch("literal:Γ©")'
1999 2026 8
2000 2027 9
2001 2028 2147483647
2002 2029 $ log '0:wdir() & branch("re:Γ©")'
2003 2030 8
2004 2031 9
2005 2032 2147483647
2006 2033 $ log '0:wdir() & branch("re:^a")'
2007 2034 0
2008 2035 2
2009 2036 $ log '0:wdir() & branch(8)'
2010 2037 8
2011 2038 9
2012 2039 2147483647
2013 2040
2014 2041 branch(wdir()) returns all revisions belonging to the working branch. The wdir
2015 2042 itself isn't returned unless it is explicitly populated.
2016 2043
2017 2044 $ log 'branch(wdir())'
2018 2045 8
2019 2046 9
2020 2047 $ log '0:wdir() & branch(wdir())'
2021 2048 8
2022 2049 9
2023 2050 2147483647
2024 2051
2025 2052 $ log 'outgoing()'
2026 2053 8
2027 2054 9
2028 2055 $ log 'outgoing("../remote1")'
2029 2056 8
2030 2057 9
2031 2058 $ log 'outgoing("../remote2")'
2032 2059 3
2033 2060 5
2034 2061 6
2035 2062 7
2036 2063 9
2037 2064 $ log 'p1(merge())'
2038 2065 5
2039 2066 $ log 'p2(merge())'
2040 2067 4
2041 2068 $ log 'parents(merge())'
2042 2069 4
2043 2070 5
2044 2071 $ log 'p1(branchpoint())'
2045 2072 0
2046 2073 2
2047 2074 $ log 'p2(branchpoint())'
2048 2075 $ log 'parents(branchpoint())'
2049 2076 0
2050 2077 2
2051 2078 $ log 'removes(a)'
2052 2079 2
2053 2080 6
2054 2081 $ log 'roots(all())'
2055 2082 0
2056 2083 $ log 'reverse(2 or 3 or 4 or 5)'
2057 2084 5
2058 2085 4
2059 2086 3
2060 2087 2
2061 2088 $ log 'reverse(all())'
2062 2089 9
2063 2090 8
2064 2091 7
2065 2092 6
2066 2093 5
2067 2094 4
2068 2095 3
2069 2096 2
2070 2097 1
2071 2098 0
2072 2099 $ log 'reverse(all()) & filelog(b)'
2073 2100 4
2074 2101 1
2075 2102 $ log 'rev(5)'
2076 2103 5
2077 2104 $ log 'sort(limit(reverse(all()), 3))'
2078 2105 7
2079 2106 8
2080 2107 9
2081 2108 $ log 'sort(2 or 3 or 4 or 5, date)'
2082 2109 2
2083 2110 3
2084 2111 5
2085 2112 4
2086 2113 $ log 'tagged()'
2087 2114 6
2088 2115 $ log 'tag()'
2089 2116 6
2090 2117 $ log 'tag(1.0)'
2091 2118 6
2092 2119 $ log 'tag(tip)'
2093 2120 9
2094 2121
2095 2122 Test order of revisions in compound expression
2096 2123 ----------------------------------------------
2097 2124
2098 2125 The general rule is that only the outermost (= leftmost) predicate can
2099 2126 enforce its ordering requirement. The other predicates should take the
2100 2127 ordering defined by it.
2101 2128
2102 2129 'A & B' should follow the order of 'A':
2103 2130
2104 2131 $ log '2:0 & 0::2'
2105 2132 2
2106 2133 1
2107 2134 0
2108 2135
2109 2136 'head()' combines sets in right order:
2110 2137
2111 2138 $ log '2:0 & head()'
2112 2139 2
2113 2140 1
2114 2141 0
2115 2142
2116 2143 'x:y' takes ordering parameter into account:
2117 2144
2118 2145 $ try -p optimized '3:0 & 0:3 & not 2:1'
2119 2146 * optimized:
2120 2147 (difference
2121 2148 (and
2122 2149 (range
2123 2150 (symbol '3')
2124 2151 (symbol '0'))
2125 2152 (range
2126 2153 (symbol '0')
2127 2154 (symbol '3')))
2128 2155 (range
2129 2156 (symbol '2')
2130 2157 (symbol '1')))
2131 2158 * set:
2132 2159 <filteredset
2133 2160 <filteredset
2134 2161 <spanset- 0:4>,
2135 2162 <spanset+ 0:4>>,
2136 2163 <not
2137 2164 <spanset+ 1:3>>>
2138 2165 3
2139 2166 0
2140 2167
2141 2168 'a + b', which is optimized to '_list(a b)', should take the ordering of
2142 2169 the left expression:
2143 2170
2144 2171 $ try --optimize '2:0 & (0 + 1 + 2)'
2145 2172 (and
2146 2173 (range
2147 2174 (symbol '2')
2148 2175 (symbol '0'))
2149 2176 (group
2150 2177 (or
2151 2178 (list
2152 2179 (symbol '0')
2153 2180 (symbol '1')
2154 2181 (symbol '2')))))
2155 2182 * optimized:
2156 2183 (and
2157 2184 (range
2158 2185 (symbol '2')
2159 2186 (symbol '0'))
2160 2187 (func
2161 2188 (symbol '_list')
2162 2189 (string '0\x001\x002')))
2163 2190 * set:
2164 2191 <filteredset
2165 2192 <spanset- 0:3>,
2166 2193 <baseset [0, 1, 2]>>
2167 2194 2
2168 2195 1
2169 2196 0
2170 2197
2171 2198 'A + B' should take the ordering of the left expression:
2172 2199
2173 2200 $ try --optimize '2:0 & (0:1 + 2)'
2174 2201 (and
2175 2202 (range
2176 2203 (symbol '2')
2177 2204 (symbol '0'))
2178 2205 (group
2179 2206 (or
2180 2207 (list
2181 2208 (range
2182 2209 (symbol '0')
2183 2210 (symbol '1'))
2184 2211 (symbol '2')))))
2185 2212 * optimized:
2186 2213 (and
2187 2214 (range
2188 2215 (symbol '2')
2189 2216 (symbol '0'))
2190 2217 (or
2191 2218 (list
2192 2219 (range
2193 2220 (symbol '0')
2194 2221 (symbol '1'))
2195 2222 (symbol '2'))))
2196 2223 * set:
2197 2224 <filteredset
2198 2225 <spanset- 0:3>,
2199 2226 <addset
2200 2227 <spanset+ 0:2>,
2201 2228 <baseset [2]>>>
2202 2229 2
2203 2230 1
2204 2231 0
2205 2232
2206 2233 '_intlist(a b)' should behave like 'a + b':
2207 2234
2208 2235 $ trylist --optimize '2:0 & %ld' 0 1 2
2209 2236 (and
2210 2237 (range
2211 2238 (symbol '2')
2212 2239 (symbol '0'))
2213 2240 (func
2214 2241 (symbol '_intlist')
2215 2242 (string '0\x001\x002')))
2216 2243 * optimized:
2217 2244 (andsmally
2218 2245 (range
2219 2246 (symbol '2')
2220 2247 (symbol '0'))
2221 2248 (func
2222 2249 (symbol '_intlist')
2223 2250 (string '0\x001\x002')))
2224 2251 * set:
2225 2252 <filteredset
2226 2253 <spanset- 0:3>,
2227 2254 <baseset+ [0, 1, 2]>>
2228 2255 2
2229 2256 1
2230 2257 0
2231 2258
2232 2259 $ trylist --optimize '%ld & 2:0' 0 2 1
2233 2260 (and
2234 2261 (func
2235 2262 (symbol '_intlist')
2236 2263 (string '0\x002\x001'))
2237 2264 (range
2238 2265 (symbol '2')
2239 2266 (symbol '0')))
2240 2267 * optimized:
2241 2268 (and
2242 2269 (func
2243 2270 (symbol '_intlist')
2244 2271 (string '0\x002\x001'))
2245 2272 (range
2246 2273 (symbol '2')
2247 2274 (symbol '0')))
2248 2275 * set:
2249 2276 <filteredset
2250 2277 <baseset [0, 2, 1]>,
2251 2278 <spanset- 0:3>>
2252 2279 0
2253 2280 2
2254 2281 1
2255 2282
2256 2283 '_hexlist(a b)' should behave like 'a + b':
2257 2284
2258 2285 $ trylist --optimize --bin '2:0 & %ln' `hg log -T '{node} ' -r0:2`
2259 2286 (and
2260 2287 (range
2261 2288 (symbol '2')
2262 2289 (symbol '0'))
2263 2290 (func
2264 2291 (symbol '_hexlist')
2265 2292 (string '*'))) (glob)
2266 2293 * optimized:
2267 2294 (and
2268 2295 (range
2269 2296 (symbol '2')
2270 2297 (symbol '0'))
2271 2298 (func
2272 2299 (symbol '_hexlist')
2273 2300 (string '*'))) (glob)
2274 2301 * set:
2275 2302 <filteredset
2276 2303 <spanset- 0:3>,
2277 2304 <baseset [0, 1, 2]>>
2278 2305 2
2279 2306 1
2280 2307 0
2281 2308
2282 2309 $ trylist --optimize --bin '%ln & 2:0' `hg log -T '{node} ' -r0+2+1`
2283 2310 (and
2284 2311 (func
2285 2312 (symbol '_hexlist')
2286 2313 (string '*')) (glob)
2287 2314 (range
2288 2315 (symbol '2')
2289 2316 (symbol '0')))
2290 2317 * optimized:
2291 2318 (andsmally
2292 2319 (func
2293 2320 (symbol '_hexlist')
2294 2321 (string '*')) (glob)
2295 2322 (range
2296 2323 (symbol '2')
2297 2324 (symbol '0')))
2298 2325 * set:
2299 2326 <baseset [0, 2, 1]>
2300 2327 0
2301 2328 2
2302 2329 1
2303 2330
2304 2331 '_list' should not go through the slow follow-order path if order doesn't
2305 2332 matter:
2306 2333
2307 2334 $ try -p optimized '2:0 & not (0 + 1)'
2308 2335 * optimized:
2309 2336 (difference
2310 2337 (range
2311 2338 (symbol '2')
2312 2339 (symbol '0'))
2313 2340 (func
2314 2341 (symbol '_list')
2315 2342 (string '0\x001')))
2316 2343 * set:
2317 2344 <filteredset
2318 2345 <spanset- 0:3>,
2319 2346 <not
2320 2347 <baseset [0, 1]>>>
2321 2348 2
2322 2349
2323 2350 $ try -p optimized '2:0 & not (0:2 & (0 + 1))'
2324 2351 * optimized:
2325 2352 (difference
2326 2353 (range
2327 2354 (symbol '2')
2328 2355 (symbol '0'))
2329 2356 (and
2330 2357 (range
2331 2358 (symbol '0')
2332 2359 (symbol '2'))
2333 2360 (func
2334 2361 (symbol '_list')
2335 2362 (string '0\x001'))))
2336 2363 * set:
2337 2364 <filteredset
2338 2365 <spanset- 0:3>,
2339 2366 <not
2340 2367 <baseset [0, 1]>>>
2341 2368 2
2342 2369
2343 2370 because 'present()' does nothing other than suppressing an error, the
2344 2371 ordering requirement should be forwarded to the nested expression
2345 2372
2346 2373 $ try -p optimized 'present(2 + 0 + 1)'
2347 2374 * optimized:
2348 2375 (func
2349 2376 (symbol 'present')
2350 2377 (func
2351 2378 (symbol '_list')
2352 2379 (string '2\x000\x001')))
2353 2380 * set:
2354 2381 <baseset [2, 0, 1]>
2355 2382 2
2356 2383 0
2357 2384 1
2358 2385
2359 2386 $ try --optimize '2:0 & present(0 + 1 + 2)'
2360 2387 (and
2361 2388 (range
2362 2389 (symbol '2')
2363 2390 (symbol '0'))
2364 2391 (func
2365 2392 (symbol 'present')
2366 2393 (or
2367 2394 (list
2368 2395 (symbol '0')
2369 2396 (symbol '1')
2370 2397 (symbol '2')))))
2371 2398 * optimized:
2372 2399 (and
2373 2400 (range
2374 2401 (symbol '2')
2375 2402 (symbol '0'))
2376 2403 (func
2377 2404 (symbol 'present')
2378 2405 (func
2379 2406 (symbol '_list')
2380 2407 (string '0\x001\x002'))))
2381 2408 * set:
2382 2409 <filteredset
2383 2410 <spanset- 0:3>,
2384 2411 <baseset [0, 1, 2]>>
2385 2412 2
2386 2413 1
2387 2414 0
2388 2415
2389 2416 'reverse()' should take effect only if it is the outermost expression:
2390 2417
2391 2418 $ try --optimize '0:2 & reverse(all())'
2392 2419 (and
2393 2420 (range
2394 2421 (symbol '0')
2395 2422 (symbol '2'))
2396 2423 (func
2397 2424 (symbol 'reverse')
2398 2425 (func
2399 2426 (symbol 'all')
2400 2427 None)))
2401 2428 * optimized:
2402 2429 (and
2403 2430 (range
2404 2431 (symbol '0')
2405 2432 (symbol '2'))
2406 2433 (func
2407 2434 (symbol 'reverse')
2408 2435 (func
2409 2436 (symbol 'all')
2410 2437 None)))
2411 2438 * set:
2412 2439 <filteredset
2413 2440 <spanset+ 0:3>,
2414 2441 <spanset+ 0:10>>
2415 2442 0
2416 2443 1
2417 2444 2
2418 2445
2419 2446 'sort()' should take effect only if it is the outermost expression:
2420 2447
2421 2448 $ try --optimize '0:2 & sort(all(), -rev)'
2422 2449 (and
2423 2450 (range
2424 2451 (symbol '0')
2425 2452 (symbol '2'))
2426 2453 (func
2427 2454 (symbol 'sort')
2428 2455 (list
2429 2456 (func
2430 2457 (symbol 'all')
2431 2458 None)
2432 2459 (negate
2433 2460 (symbol 'rev')))))
2434 2461 * optimized:
2435 2462 (and
2436 2463 (range
2437 2464 (symbol '0')
2438 2465 (symbol '2'))
2439 2466 (func
2440 2467 (symbol 'sort')
2441 2468 (list
2442 2469 (func
2443 2470 (symbol 'all')
2444 2471 None)
2445 2472 (string '-rev'))))
2446 2473 * set:
2447 2474 <filteredset
2448 2475 <spanset+ 0:3>,
2449 2476 <spanset+ 0:10>>
2450 2477 0
2451 2478 1
2452 2479 2
2453 2480
2454 2481 invalid argument passed to noop sort():
2455 2482
2456 2483 $ log '0:2 & sort()'
2457 2484 hg: parse error: sort requires one or two arguments
2458 2485 [255]
2459 2486 $ log '0:2 & sort(all(), -invalid)'
2460 2487 hg: parse error: unknown sort key '-invalid'
2461 2488 [255]
2462 2489
2463 2490 for 'A & f(B)', 'B' should not be affected by the order of 'A':
2464 2491
2465 2492 $ try --optimize '2:0 & first(1 + 0 + 2)'
2466 2493 (and
2467 2494 (range
2468 2495 (symbol '2')
2469 2496 (symbol '0'))
2470 2497 (func
2471 2498 (symbol 'first')
2472 2499 (or
2473 2500 (list
2474 2501 (symbol '1')
2475 2502 (symbol '0')
2476 2503 (symbol '2')))))
2477 2504 * optimized:
2478 2505 (and
2479 2506 (range
2480 2507 (symbol '2')
2481 2508 (symbol '0'))
2482 2509 (func
2483 2510 (symbol 'first')
2484 2511 (func
2485 2512 (symbol '_list')
2486 2513 (string '1\x000\x002'))))
2487 2514 * set:
2488 2515 <filteredset
2489 2516 <baseset [1]>,
2490 2517 <spanset- 0:3>>
2491 2518 1
2492 2519
2493 2520 $ try --optimize '2:0 & not last(0 + 2 + 1)'
2494 2521 (and
2495 2522 (range
2496 2523 (symbol '2')
2497 2524 (symbol '0'))
2498 2525 (not
2499 2526 (func
2500 2527 (symbol 'last')
2501 2528 (or
2502 2529 (list
2503 2530 (symbol '0')
2504 2531 (symbol '2')
2505 2532 (symbol '1'))))))
2506 2533 * optimized:
2507 2534 (difference
2508 2535 (range
2509 2536 (symbol '2')
2510 2537 (symbol '0'))
2511 2538 (func
2512 2539 (symbol 'last')
2513 2540 (func
2514 2541 (symbol '_list')
2515 2542 (string '0\x002\x001'))))
2516 2543 * set:
2517 2544 <filteredset
2518 2545 <spanset- 0:3>,
2519 2546 <not
2520 2547 <baseset [1]>>>
2521 2548 2
2522 2549 0
2523 2550
2524 2551 for 'A & (op)(B)', 'B' should not be affected by the order of 'A':
2525 2552
2526 2553 $ try --optimize '2:0 & (1 + 0 + 2):(0 + 2 + 1)'
2527 2554 (and
2528 2555 (range
2529 2556 (symbol '2')
2530 2557 (symbol '0'))
2531 2558 (range
2532 2559 (group
2533 2560 (or
2534 2561 (list
2535 2562 (symbol '1')
2536 2563 (symbol '0')
2537 2564 (symbol '2'))))
2538 2565 (group
2539 2566 (or
2540 2567 (list
2541 2568 (symbol '0')
2542 2569 (symbol '2')
2543 2570 (symbol '1'))))))
2544 2571 * optimized:
2545 2572 (and
2546 2573 (range
2547 2574 (symbol '2')
2548 2575 (symbol '0'))
2549 2576 (range
2550 2577 (func
2551 2578 (symbol '_list')
2552 2579 (string '1\x000\x002'))
2553 2580 (func
2554 2581 (symbol '_list')
2555 2582 (string '0\x002\x001'))))
2556 2583 * set:
2557 2584 <filteredset
2558 2585 <spanset- 0:3>,
2559 2586 <baseset [1]>>
2560 2587 1
2561 2588
2562 2589 'A & B' can be rewritten as 'flipand(B, A)' by weight.
2563 2590
2564 2591 $ try --optimize 'contains("glob:*") & (2 + 0 + 1)'
2565 2592 (and
2566 2593 (func
2567 2594 (symbol 'contains')
2568 2595 (string 'glob:*'))
2569 2596 (group
2570 2597 (or
2571 2598 (list
2572 2599 (symbol '2')
2573 2600 (symbol '0')
2574 2601 (symbol '1')))))
2575 2602 * optimized:
2576 2603 (andsmally
2577 2604 (func
2578 2605 (symbol 'contains')
2579 2606 (string 'glob:*'))
2580 2607 (func
2581 2608 (symbol '_list')
2582 2609 (string '2\x000\x001')))
2583 2610 * set:
2584 2611 <filteredset
2585 2612 <baseset+ [0, 1, 2]>,
2586 2613 <contains 'glob:*'>>
2587 2614 0
2588 2615 1
2589 2616 2
2590 2617
2591 2618 and in this example, 'A & B' is rewritten as 'B & A', but 'A' overrides
2592 2619 the order appropriately:
2593 2620
2594 2621 $ try --optimize 'reverse(contains("glob:*")) & (0 + 2 + 1)'
2595 2622 (and
2596 2623 (func
2597 2624 (symbol 'reverse')
2598 2625 (func
2599 2626 (symbol 'contains')
2600 2627 (string 'glob:*')))
2601 2628 (group
2602 2629 (or
2603 2630 (list
2604 2631 (symbol '0')
2605 2632 (symbol '2')
2606 2633 (symbol '1')))))
2607 2634 * optimized:
2608 2635 (andsmally
2609 2636 (func
2610 2637 (symbol 'reverse')
2611 2638 (func
2612 2639 (symbol 'contains')
2613 2640 (string 'glob:*')))
2614 2641 (func
2615 2642 (symbol '_list')
2616 2643 (string '0\x002\x001')))
2617 2644 * set:
2618 2645 <filteredset
2619 2646 <baseset- [0, 1, 2]>,
2620 2647 <contains 'glob:*'>>
2621 2648 2
2622 2649 1
2623 2650 0
2624 2651
2625 2652 test sort revset
2626 2653 --------------------------------------------
2627 2654
2628 2655 test when adding two unordered revsets
2629 2656
2630 2657 $ log 'sort(keyword(issue) or modifies(b))'
2631 2658 4
2632 2659 6
2633 2660
2634 2661 test when sorting a reversed collection in the same way it is
2635 2662
2636 2663 $ log 'sort(reverse(all()), -rev)'
2637 2664 9
2638 2665 8
2639 2666 7
2640 2667 6
2641 2668 5
2642 2669 4
2643 2670 3
2644 2671 2
2645 2672 1
2646 2673 0
2647 2674
2648 2675 test when sorting a reversed collection
2649 2676
2650 2677 $ log 'sort(reverse(all()), rev)'
2651 2678 0
2652 2679 1
2653 2680 2
2654 2681 3
2655 2682 4
2656 2683 5
2657 2684 6
2658 2685 7
2659 2686 8
2660 2687 9
2661 2688
2662 2689
2663 2690 test sorting two sorted collections in different orders
2664 2691
2665 2692 $ log 'sort(outgoing() or reverse(removes(a)), rev)'
2666 2693 2
2667 2694 6
2668 2695 8
2669 2696 9
2670 2697
2671 2698 test sorting two sorted collections in different orders backwards
2672 2699
2673 2700 $ log 'sort(outgoing() or reverse(removes(a)), -rev)'
2674 2701 9
2675 2702 8
2676 2703 6
2677 2704 2
2678 2705
2679 2706 test empty sort key which is noop
2680 2707
2681 2708 $ log 'sort(0 + 2 + 1, "")'
2682 2709 0
2683 2710 2
2684 2711 1
2685 2712
2686 2713 test invalid sort keys
2687 2714
2688 2715 $ log 'sort(all(), -invalid)'
2689 2716 hg: parse error: unknown sort key '-invalid'
2690 2717 [255]
2691 2718
2692 2719 $ cd ..
2693 2720
2694 2721 test sorting by multiple keys including variable-length strings
2695 2722
2696 2723 $ hg init sorting
2697 2724 $ cd sorting
2698 2725 $ cat <<EOF >> .hg/hgrc
2699 2726 > [ui]
2700 2727 > logtemplate = '{rev} {branch|p5}{desc|p5}{author|p5}{date|hgdate}\n'
2701 2728 > [templatealias]
2702 2729 > p5(s) = pad(s, 5)
2703 2730 > EOF
2704 2731 $ hg branch -qf b12
2705 2732 $ hg ci -m m111 -u u112 -d '111 10800'
2706 2733 $ hg branch -qf b11
2707 2734 $ hg ci -m m12 -u u111 -d '112 7200'
2708 2735 $ hg branch -qf b111
2709 2736 $ hg ci -m m11 -u u12 -d '111 3600'
2710 2737 $ hg branch -qf b112
2711 2738 $ hg ci -m m111 -u u11 -d '120 0'
2712 2739 $ hg branch -qf b111
2713 2740 $ hg ci -m m112 -u u111 -d '110 14400'
2714 2741 created new head
2715 2742
2716 2743 compare revisions (has fast path):
2717 2744
2718 2745 $ hg log -r 'sort(all(), rev)'
2719 2746 0 b12 m111 u112 111 10800
2720 2747 1 b11 m12 u111 112 7200
2721 2748 2 b111 m11 u12 111 3600
2722 2749 3 b112 m111 u11 120 0
2723 2750 4 b111 m112 u111 110 14400
2724 2751
2725 2752 $ hg log -r 'sort(all(), -rev)'
2726 2753 4 b111 m112 u111 110 14400
2727 2754 3 b112 m111 u11 120 0
2728 2755 2 b111 m11 u12 111 3600
2729 2756 1 b11 m12 u111 112 7200
2730 2757 0 b12 m111 u112 111 10800
2731 2758
2732 2759 compare variable-length strings (issue5218):
2733 2760
2734 2761 $ hg log -r 'sort(all(), branch)'
2735 2762 1 b11 m12 u111 112 7200
2736 2763 2 b111 m11 u12 111 3600
2737 2764 4 b111 m112 u111 110 14400
2738 2765 3 b112 m111 u11 120 0
2739 2766 0 b12 m111 u112 111 10800
2740 2767
2741 2768 $ hg log -r 'sort(all(), -branch)'
2742 2769 0 b12 m111 u112 111 10800
2743 2770 3 b112 m111 u11 120 0
2744 2771 2 b111 m11 u12 111 3600
2745 2772 4 b111 m112 u111 110 14400
2746 2773 1 b11 m12 u111 112 7200
2747 2774
2748 2775 $ hg log -r 'sort(all(), desc)'
2749 2776 2 b111 m11 u12 111 3600
2750 2777 0 b12 m111 u112 111 10800
2751 2778 3 b112 m111 u11 120 0
2752 2779 4 b111 m112 u111 110 14400
2753 2780 1 b11 m12 u111 112 7200
2754 2781
2755 2782 $ hg log -r 'sort(all(), -desc)'
2756 2783 1 b11 m12 u111 112 7200
2757 2784 4 b111 m112 u111 110 14400
2758 2785 0 b12 m111 u112 111 10800
2759 2786 3 b112 m111 u11 120 0
2760 2787 2 b111 m11 u12 111 3600
2761 2788
2762 2789 $ hg log -r 'sort(all(), user)'
2763 2790 3 b112 m111 u11 120 0
2764 2791 1 b11 m12 u111 112 7200
2765 2792 4 b111 m112 u111 110 14400
2766 2793 0 b12 m111 u112 111 10800
2767 2794 2 b111 m11 u12 111 3600
2768 2795
2769 2796 $ hg log -r 'sort(all(), -user)'
2770 2797 2 b111 m11 u12 111 3600
2771 2798 0 b12 m111 u112 111 10800
2772 2799 1 b11 m12 u111 112 7200
2773 2800 4 b111 m112 u111 110 14400
2774 2801 3 b112 m111 u11 120 0
2775 2802
2776 2803 compare dates (tz offset should have no effect):
2777 2804
2778 2805 $ hg log -r 'sort(all(), date)'
2779 2806 4 b111 m112 u111 110 14400
2780 2807 0 b12 m111 u112 111 10800
2781 2808 2 b111 m11 u12 111 3600
2782 2809 1 b11 m12 u111 112 7200
2783 2810 3 b112 m111 u11 120 0
2784 2811
2785 2812 $ hg log -r 'sort(all(), -date)'
2786 2813 3 b112 m111 u11 120 0
2787 2814 1 b11 m12 u111 112 7200
2788 2815 0 b12 m111 u112 111 10800
2789 2816 2 b111 m11 u12 111 3600
2790 2817 4 b111 m112 u111 110 14400
2791 2818
2792 2819 be aware that 'sort(x, -k)' is not exactly the same as 'reverse(sort(x, k))'
2793 2820 because '-k' reverses the comparison, not the list itself:
2794 2821
2795 2822 $ hg log -r 'sort(0 + 2, date)'
2796 2823 0 b12 m111 u112 111 10800
2797 2824 2 b111 m11 u12 111 3600
2798 2825
2799 2826 $ hg log -r 'sort(0 + 2, -date)'
2800 2827 0 b12 m111 u112 111 10800
2801 2828 2 b111 m11 u12 111 3600
2802 2829
2803 2830 $ hg log -r 'reverse(sort(0 + 2, date))'
2804 2831 2 b111 m11 u12 111 3600
2805 2832 0 b12 m111 u112 111 10800
2806 2833
2807 2834 sort by multiple keys:
2808 2835
2809 2836 $ hg log -r 'sort(all(), "branch -rev")'
2810 2837 1 b11 m12 u111 112 7200
2811 2838 4 b111 m112 u111 110 14400
2812 2839 2 b111 m11 u12 111 3600
2813 2840 3 b112 m111 u11 120 0
2814 2841 0 b12 m111 u112 111 10800
2815 2842
2816 2843 $ hg log -r 'sort(all(), "-desc -date")'
2817 2844 1 b11 m12 u111 112 7200
2818 2845 4 b111 m112 u111 110 14400
2819 2846 3 b112 m111 u11 120 0
2820 2847 0 b12 m111 u112 111 10800
2821 2848 2 b111 m11 u12 111 3600
2822 2849
2823 2850 $ hg log -r 'sort(all(), "user -branch date rev")'
2824 2851 3 b112 m111 u11 120 0
2825 2852 4 b111 m112 u111 110 14400
2826 2853 1 b11 m12 u111 112 7200
2827 2854 0 b12 m111 u112 111 10800
2828 2855 2 b111 m11 u12 111 3600
2829 2856
2830 2857 toposort prioritises graph branches
2831 2858
2832 2859 $ hg up 2
2833 2860 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
2834 2861 $ touch a
2835 2862 $ hg addremove
2836 2863 adding a
2837 2864 $ hg ci -m 't1' -u 'tu' -d '130 0'
2838 2865 created new head
2839 2866 $ echo 'a' >> a
2840 2867 $ hg ci -m 't2' -u 'tu' -d '130 0'
2841 2868 $ hg book book1
2842 2869 $ hg up 4
2843 2870 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
2844 2871 (leaving bookmark book1)
2845 2872 $ touch a
2846 2873 $ hg addremove
2847 2874 adding a
2848 2875 $ hg ci -m 't3' -u 'tu' -d '130 0'
2849 2876
2850 2877 $ hg log -r 'sort(all(), topo)'
2851 2878 7 b111 t3 tu 130 0
2852 2879 4 b111 m112 u111 110 14400
2853 2880 3 b112 m111 u11 120 0
2854 2881 6 b111 t2 tu 130 0
2855 2882 5 b111 t1 tu 130 0
2856 2883 2 b111 m11 u12 111 3600
2857 2884 1 b11 m12 u111 112 7200
2858 2885 0 b12 m111 u112 111 10800
2859 2886
2860 2887 $ hg log -r 'sort(all(), -topo)'
2861 2888 0 b12 m111 u112 111 10800
2862 2889 1 b11 m12 u111 112 7200
2863 2890 2 b111 m11 u12 111 3600
2864 2891 5 b111 t1 tu 130 0
2865 2892 6 b111 t2 tu 130 0
2866 2893 3 b112 m111 u11 120 0
2867 2894 4 b111 m112 u111 110 14400
2868 2895 7 b111 t3 tu 130 0
2869 2896
2870 2897 $ hg log -r 'sort(all(), topo, topo.firstbranch=book1)'
2871 2898 6 b111 t2 tu 130 0
2872 2899 5 b111 t1 tu 130 0
2873 2900 7 b111 t3 tu 130 0
2874 2901 4 b111 m112 u111 110 14400
2875 2902 3 b112 m111 u11 120 0
2876 2903 2 b111 m11 u12 111 3600
2877 2904 1 b11 m12 u111 112 7200
2878 2905 0 b12 m111 u112 111 10800
2879 2906
2880 2907 topographical sorting can't be combined with other sort keys, and you can't
2881 2908 use the topo.firstbranch option when topo sort is not active:
2882 2909
2883 2910 $ hg log -r 'sort(all(), "topo user")'
2884 2911 hg: parse error: topo sort order cannot be combined with other sort keys
2885 2912 [255]
2886 2913
2887 2914 $ hg log -r 'sort(all(), user, topo.firstbranch=book1)'
2888 2915 hg: parse error: topo.firstbranch can only be used when using the topo sort key
2889 2916 [255]
2890 2917
2891 2918 topo.firstbranch should accept any kind of expressions:
2892 2919
2893 2920 $ hg log -r 'sort(0, topo, topo.firstbranch=(book1))'
2894 2921 0 b12 m111 u112 111 10800
2895 2922
2896 2923 $ cd ..
2897 2924 $ cd repo
2898 2925
2899 2926 test multiline revset with errors
2900 2927
2901 2928 $ echo > multiline-revset
2902 2929 $ echo '. +' >> multiline-revset
2903 2930 $ echo '.^ +' >> multiline-revset
2904 2931 $ hg log -r "`cat multiline-revset`"
2905 2932 hg: parse error at 9: not a prefix: end
2906 2933 ( . + .^ +
2907 2934 ^ here)
2908 2935 [255]
2909 2936 $ hg debugrevspec -v 'revset(first(rev(0)))' -p all
2910 2937 * parsed:
2911 2938 (func
2912 2939 (symbol 'revset')
2913 2940 (func
2914 2941 (symbol 'first')
2915 2942 (func
2916 2943 (symbol 'rev')
2917 2944 (symbol '0'))))
2918 2945 * expanded:
2919 2946 (func
2920 2947 (symbol 'revset')
2921 2948 (func
2922 2949 (symbol 'first')
2923 2950 (func
2924 2951 (symbol 'rev')
2925 2952 (symbol '0'))))
2926 2953 * concatenated:
2927 2954 (func
2928 2955 (symbol 'revset')
2929 2956 (func
2930 2957 (symbol 'first')
2931 2958 (func
2932 2959 (symbol 'rev')
2933 2960 (symbol '0'))))
2934 2961 * analyzed:
2935 2962 (func
2936 2963 (symbol 'revset')
2937 2964 (func
2938 2965 (symbol 'first')
2939 2966 (func
2940 2967 (symbol 'rev')
2941 2968 (symbol '0'))))
2942 2969 * optimized:
2943 2970 (func
2944 2971 (symbol 'revset')
2945 2972 (func
2946 2973 (symbol 'first')
2947 2974 (func
2948 2975 (symbol 'rev')
2949 2976 (symbol '0'))))
2950 2977 * set:
2951 2978 <baseset+ [0]>
2952 2979 0
General Comments 0
You need to be logged in to leave comments. Login now