##// END OF EJS Templates
revset: add diff(pattern) predicate for "grep --diff"...
Yuya Nishihara -
r46317:99b8b73e default
parent child Browse files
Show More
@@ -1,2753 +1,2793 b''
1 1 # revset.py - revision set queries for mercurial
2 2 #
3 3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import re
11 11
12 12 from .i18n import _
13 13 from .pycompat import getattr
14 14 from . import (
15 15 dagop,
16 16 destutil,
17 17 diffutil,
18 18 encoding,
19 19 error,
20 grep as grepmod,
20 21 hbisect,
21 22 match as matchmod,
22 23 node,
23 24 obsolete as obsmod,
24 25 obsutil,
25 26 pathutil,
26 27 phases,
27 28 pycompat,
28 29 registrar,
29 30 repoview,
30 31 revsetlang,
31 32 scmutil,
32 33 smartset,
33 34 stack as stackmod,
34 35 util,
35 36 )
36 37 from .utils import (
37 38 dateutil,
38 39 stringutil,
39 40 )
40 41
41 42 # helpers for processing parsed tree
42 43 getsymbol = revsetlang.getsymbol
43 44 getstring = revsetlang.getstring
44 45 getinteger = revsetlang.getinteger
45 46 getboolean = revsetlang.getboolean
46 47 getlist = revsetlang.getlist
47 48 getintrange = revsetlang.getintrange
48 49 getargs = revsetlang.getargs
49 50 getargsdict = revsetlang.getargsdict
50 51
51 52 baseset = smartset.baseset
52 53 generatorset = smartset.generatorset
53 54 spanset = smartset.spanset
54 55 fullreposet = smartset.fullreposet
55 56
56 57 # revisions not included in all(), but populated if specified
57 58 _virtualrevs = (node.nullrev, node.wdirrev)
58 59
59 60 # Constants for ordering requirement, used in getset():
60 61 #
61 62 # If 'define', any nested functions and operations MAY change the ordering of
62 63 # the entries in the set (but if changes the ordering, it MUST ALWAYS change
63 64 # it). If 'follow', any nested functions and operations MUST take the ordering
64 65 # specified by the first operand to the '&' operator.
65 66 #
66 67 # For instance,
67 68 #
68 69 # X & (Y | Z)
69 70 # ^ ^^^^^^^
70 71 # | follow
71 72 # define
72 73 #
73 74 # will be evaluated as 'or(y(x()), z(x()))', where 'x()' can change the order
74 75 # of the entries in the set, but 'y()', 'z()' and 'or()' shouldn't.
75 76 #
76 77 # 'any' means the order doesn't matter. For instance,
77 78 #
78 79 # (X & !Y) | ancestors(Z)
79 80 # ^ ^
80 81 # any any
81 82 #
82 83 # For 'X & !Y', 'X' decides the order and 'Y' is subtracted from 'X', so the
83 84 # order of 'Y' does not matter. For 'ancestors(Z)', Z's order does not matter
84 85 # since 'ancestors' does not care about the order of its argument.
85 86 #
86 87 # Currently, most revsets do not care about the order, so 'define' is
87 88 # equivalent to 'follow' for them, and the resulting order is based on the
88 89 # 'subset' parameter passed down to them:
89 90 #
90 91 # m = revset.match(...)
91 92 # m(repo, subset, order=defineorder)
92 93 # ^^^^^^
93 94 # For most revsets, 'define' means using the order this subset provides
94 95 #
95 96 # There are a few revsets that always redefine the order if 'define' is
96 97 # specified: 'sort(X)', 'reverse(X)', 'x:y'.
97 98 anyorder = b'any' # don't care the order, could be even random-shuffled
98 99 defineorder = b'define' # ALWAYS redefine, or ALWAYS follow the current order
99 100 followorder = b'follow' # MUST follow the current order
100 101
101 102 # helpers
102 103
103 104
104 105 def getset(repo, subset, x, order=defineorder):
105 106 if not x:
106 107 raise error.ParseError(_(b"missing argument"))
107 108 return methods[x[0]](repo, subset, *x[1:], order=order)
108 109
109 110
110 111 def _getrevsource(repo, r):
111 112 extra = repo[r].extra()
112 113 for label in (b'source', b'transplant_source', b'rebase_source'):
113 114 if label in extra:
114 115 try:
115 116 return repo[extra[label]].rev()
116 117 except error.RepoLookupError:
117 118 pass
118 119 return None
119 120
120 121
121 122 def _sortedb(xs):
122 123 return sorted(pycompat.rapply(pycompat.maybebytestr, xs))
123 124
124 125
125 126 # operator methods
126 127
127 128
128 129 def stringset(repo, subset, x, order):
129 130 if not x:
130 131 raise error.ParseError(_(b"empty string is not a valid revision"))
131 132 x = scmutil.intrev(scmutil.revsymbol(repo, x))
132 133 if x in subset or x in _virtualrevs and isinstance(subset, fullreposet):
133 134 return baseset([x])
134 135 return baseset()
135 136
136 137
137 138 def rawsmartset(repo, subset, x, order):
138 139 """argument is already a smartset, use that directly"""
139 140 if order == followorder:
140 141 return subset & x
141 142 else:
142 143 return x & subset
143 144
144 145
145 146 def rangeset(repo, subset, x, y, order):
146 147 m = getset(repo, fullreposet(repo), x)
147 148 n = getset(repo, fullreposet(repo), y)
148 149
149 150 if not m or not n:
150 151 return baseset()
151 152 return _makerangeset(repo, subset, m.first(), n.last(), order)
152 153
153 154
154 155 def rangeall(repo, subset, x, order):
155 156 assert x is None
156 157 return _makerangeset(repo, subset, 0, repo.changelog.tiprev(), order)
157 158
158 159
159 160 def rangepre(repo, subset, y, order):
160 161 # ':y' can't be rewritten to '0:y' since '0' may be hidden
161 162 n = getset(repo, fullreposet(repo), y)
162 163 if not n:
163 164 return baseset()
164 165 return _makerangeset(repo, subset, 0, n.last(), order)
165 166
166 167
167 168 def rangepost(repo, subset, x, order):
168 169 m = getset(repo, fullreposet(repo), x)
169 170 if not m:
170 171 return baseset()
171 172 return _makerangeset(
172 173 repo, subset, m.first(), repo.changelog.tiprev(), order
173 174 )
174 175
175 176
176 177 def _makerangeset(repo, subset, m, n, order):
177 178 if m == n:
178 179 r = baseset([m])
179 180 elif n == node.wdirrev:
180 181 r = spanset(repo, m, len(repo)) + baseset([n])
181 182 elif m == node.wdirrev:
182 183 r = baseset([m]) + spanset(repo, repo.changelog.tiprev(), n - 1)
183 184 elif m < n:
184 185 r = spanset(repo, m, n + 1)
185 186 else:
186 187 r = spanset(repo, m, n - 1)
187 188
188 189 if order == defineorder:
189 190 return r & subset
190 191 else:
191 192 # carrying the sorting over when possible would be more efficient
192 193 return subset & r
193 194
194 195
195 196 def dagrange(repo, subset, x, y, order):
196 197 r = fullreposet(repo)
197 198 xs = dagop.reachableroots(
198 199 repo, getset(repo, r, x), getset(repo, r, y), includepath=True
199 200 )
200 201 return subset & xs
201 202
202 203
203 204 def andset(repo, subset, x, y, order):
204 205 if order == anyorder:
205 206 yorder = anyorder
206 207 else:
207 208 yorder = followorder
208 209 return getset(repo, getset(repo, subset, x, order), y, yorder)
209 210
210 211
211 212 def andsmallyset(repo, subset, x, y, order):
212 213 # 'andsmally(x, y)' is equivalent to 'and(x, y)', but faster when y is small
213 214 if order == anyorder:
214 215 yorder = anyorder
215 216 else:
216 217 yorder = followorder
217 218 return getset(repo, getset(repo, subset, y, yorder), x, order)
218 219
219 220
220 221 def differenceset(repo, subset, x, y, order):
221 222 return getset(repo, subset, x, order) - getset(repo, subset, y, anyorder)
222 223
223 224
224 225 def _orsetlist(repo, subset, xs, order):
225 226 assert xs
226 227 if len(xs) == 1:
227 228 return getset(repo, subset, xs[0], order)
228 229 p = len(xs) // 2
229 230 a = _orsetlist(repo, subset, xs[:p], order)
230 231 b = _orsetlist(repo, subset, xs[p:], order)
231 232 return a + b
232 233
233 234
234 235 def orset(repo, subset, x, order):
235 236 xs = getlist(x)
236 237 if not xs:
237 238 return baseset()
238 239 if order == followorder:
239 240 # slow path to take the subset order
240 241 return subset & _orsetlist(repo, fullreposet(repo), xs, anyorder)
241 242 else:
242 243 return _orsetlist(repo, subset, xs, order)
243 244
244 245
245 246 def notset(repo, subset, x, order):
246 247 return subset - getset(repo, subset, x, anyorder)
247 248
248 249
249 250 def relationset(repo, subset, x, y, order):
250 251 # this is pretty basic implementation of 'x#y' operator, still
251 252 # experimental so undocumented. see the wiki for further ideas.
252 253 # https://www.mercurial-scm.org/wiki/RevsetOperatorPlan
253 254 rel = getsymbol(y)
254 255 if rel in relations:
255 256 return relations[rel](repo, subset, x, rel, order)
256 257
257 258 relnames = [r for r in relations.keys() if len(r) > 1]
258 259 raise error.UnknownIdentifier(rel, relnames)
259 260
260 261
261 262 def _splitrange(a, b):
262 263 """Split range with bounds a and b into two ranges at 0 and return two
263 264 tuples of numbers for use as startdepth and stopdepth arguments of
264 265 revancestors and revdescendants.
265 266
266 267 >>> _splitrange(-10, -5) # [-10:-5]
267 268 ((5, 11), (None, None))
268 269 >>> _splitrange(5, 10) # [5:10]
269 270 ((None, None), (5, 11))
270 271 >>> _splitrange(-10, 10) # [-10:10]
271 272 ((0, 11), (0, 11))
272 273 >>> _splitrange(-10, 0) # [-10:0]
273 274 ((0, 11), (None, None))
274 275 >>> _splitrange(0, 10) # [0:10]
275 276 ((None, None), (0, 11))
276 277 >>> _splitrange(0, 0) # [0:0]
277 278 ((0, 1), (None, None))
278 279 >>> _splitrange(1, -1) # [1:-1]
279 280 ((None, None), (None, None))
280 281 """
281 282 ancdepths = (None, None)
282 283 descdepths = (None, None)
283 284 if a == b == 0:
284 285 ancdepths = (0, 1)
285 286 if a < 0:
286 287 ancdepths = (-min(b, 0), -a + 1)
287 288 if b > 0:
288 289 descdepths = (max(a, 0), b + 1)
289 290 return ancdepths, descdepths
290 291
291 292
292 293 def generationsrel(repo, subset, x, rel, order):
293 294 z = (b'rangeall', None)
294 295 return generationssubrel(repo, subset, x, rel, z, order)
295 296
296 297
297 298 def generationssubrel(repo, subset, x, rel, z, order):
298 299 # TODO: rewrite tests, and drop startdepth argument from ancestors() and
299 300 # descendants() predicates
300 301 a, b = getintrange(
301 302 z,
302 303 _(b'relation subscript must be an integer or a range'),
303 304 _(b'relation subscript bounds must be integers'),
304 305 deffirst=-(dagop.maxlogdepth - 1),
305 306 deflast=+(dagop.maxlogdepth - 1),
306 307 )
307 308 (ancstart, ancstop), (descstart, descstop) = _splitrange(a, b)
308 309
309 310 if ancstart is None and descstart is None:
310 311 return baseset()
311 312
312 313 revs = getset(repo, fullreposet(repo), x)
313 314 if not revs:
314 315 return baseset()
315 316
316 317 if ancstart is not None and descstart is not None:
317 318 s = dagop.revancestors(repo, revs, False, ancstart, ancstop)
318 319 s += dagop.revdescendants(repo, revs, False, descstart, descstop)
319 320 elif ancstart is not None:
320 321 s = dagop.revancestors(repo, revs, False, ancstart, ancstop)
321 322 elif descstart is not None:
322 323 s = dagop.revdescendants(repo, revs, False, descstart, descstop)
323 324
324 325 return subset & s
325 326
326 327
327 328 def relsubscriptset(repo, subset, x, y, z, order):
328 329 # this is pretty basic implementation of 'x#y[z]' operator, still
329 330 # experimental so undocumented. see the wiki for further ideas.
330 331 # https://www.mercurial-scm.org/wiki/RevsetOperatorPlan
331 332 rel = getsymbol(y)
332 333 if rel in subscriptrelations:
333 334 return subscriptrelations[rel](repo, subset, x, rel, z, order)
334 335
335 336 relnames = [r for r in subscriptrelations.keys() if len(r) > 1]
336 337 raise error.UnknownIdentifier(rel, relnames)
337 338
338 339
339 340 def subscriptset(repo, subset, x, y, order):
340 341 raise error.ParseError(_(b"can't use a subscript in this context"))
341 342
342 343
343 344 def listset(repo, subset, *xs, **opts):
344 345 raise error.ParseError(
345 346 _(b"can't use a list in this context"),
346 347 hint=_(b'see \'hg help "revsets.x or y"\''),
347 348 )
348 349
349 350
350 351 def keyvaluepair(repo, subset, k, v, order):
351 352 raise error.ParseError(_(b"can't use a key-value pair in this context"))
352 353
353 354
354 355 def func(repo, subset, a, b, order):
355 356 f = getsymbol(a)
356 357 if f in symbols:
357 358 func = symbols[f]
358 359 if getattr(func, '_takeorder', False):
359 360 return func(repo, subset, b, order)
360 361 return func(repo, subset, b)
361 362
362 363 keep = lambda fn: getattr(fn, '__doc__', None) is not None
363 364
364 365 syms = [s for (s, fn) in symbols.items() if keep(fn)]
365 366 raise error.UnknownIdentifier(f, syms)
366 367
367 368
368 369 # functions
369 370
370 371 # symbols are callables like:
371 372 # fn(repo, subset, x)
372 373 # with:
373 374 # repo - current repository instance
374 375 # subset - of revisions to be examined
375 376 # x - argument in tree form
376 377 symbols = revsetlang.symbols
377 378
378 379 # symbols which can't be used for a DoS attack for any given input
379 380 # (e.g. those which accept regexes as plain strings shouldn't be included)
380 381 # functions that just return a lot of changesets (like all) don't count here
381 382 safesymbols = set()
382 383
383 384 predicate = registrar.revsetpredicate()
384 385
385 386
386 387 @predicate(b'_destupdate')
387 388 def _destupdate(repo, subset, x):
388 389 # experimental revset for update destination
389 390 args = getargsdict(x, b'limit', b'clean')
390 391 return subset & baseset(
391 392 [destutil.destupdate(repo, **pycompat.strkwargs(args))[0]]
392 393 )
393 394
394 395
395 396 @predicate(b'_destmerge')
396 397 def _destmerge(repo, subset, x):
397 398 # experimental revset for merge destination
398 399 sourceset = None
399 400 if x is not None:
400 401 sourceset = getset(repo, fullreposet(repo), x)
401 402 return subset & baseset([destutil.destmerge(repo, sourceset=sourceset)])
402 403
403 404
404 405 @predicate(b'adds(pattern)', safe=True, weight=30)
405 406 def adds(repo, subset, x):
406 407 """Changesets that add a file matching pattern.
407 408
408 409 The pattern without explicit kind like ``glob:`` is expected to be
409 410 relative to the current directory and match against a file or a
410 411 directory.
411 412 """
412 413 # i18n: "adds" is a keyword
413 414 pat = getstring(x, _(b"adds requires a pattern"))
414 415 return checkstatus(repo, subset, pat, 'added')
415 416
416 417
417 418 @predicate(b'ancestor(*changeset)', safe=True, weight=0.5)
418 419 def ancestor(repo, subset, x):
419 420 """A greatest common ancestor of the changesets.
420 421
421 422 Accepts 0 or more changesets.
422 423 Will return empty list when passed no args.
423 424 Greatest common ancestor of a single changeset is that changeset.
424 425 """
425 426 reviter = iter(orset(repo, fullreposet(repo), x, order=anyorder))
426 427 try:
427 428 anc = repo[next(reviter)]
428 429 except StopIteration:
429 430 return baseset()
430 431 for r in reviter:
431 432 anc = anc.ancestor(repo[r])
432 433
433 434 r = scmutil.intrev(anc)
434 435 if r in subset:
435 436 return baseset([r])
436 437 return baseset()
437 438
438 439
439 440 def _ancestors(
440 441 repo, subset, x, followfirst=False, startdepth=None, stopdepth=None
441 442 ):
442 443 heads = getset(repo, fullreposet(repo), x)
443 444 if not heads:
444 445 return baseset()
445 446 s = dagop.revancestors(repo, heads, followfirst, startdepth, stopdepth)
446 447 return subset & s
447 448
448 449
449 450 @predicate(b'ancestors(set[, depth])', safe=True)
450 451 def ancestors(repo, subset, x):
451 452 """Changesets that are ancestors of changesets in set, including the
452 453 given changesets themselves.
453 454
454 455 If depth is specified, the result only includes changesets up to
455 456 the specified generation.
456 457 """
457 458 # startdepth is for internal use only until we can decide the UI
458 459 args = getargsdict(x, b'ancestors', b'set depth startdepth')
459 460 if b'set' not in args:
460 461 # i18n: "ancestors" is a keyword
461 462 raise error.ParseError(_(b'ancestors takes at least 1 argument'))
462 463 startdepth = stopdepth = None
463 464 if b'startdepth' in args:
464 465 n = getinteger(
465 466 args[b'startdepth'], b"ancestors expects an integer startdepth"
466 467 )
467 468 if n < 0:
468 469 raise error.ParseError(b"negative startdepth")
469 470 startdepth = n
470 471 if b'depth' in args:
471 472 # i18n: "ancestors" is a keyword
472 473 n = getinteger(args[b'depth'], _(b"ancestors expects an integer depth"))
473 474 if n < 0:
474 475 raise error.ParseError(_(b"negative depth"))
475 476 stopdepth = n + 1
476 477 return _ancestors(
477 478 repo, subset, args[b'set'], startdepth=startdepth, stopdepth=stopdepth
478 479 )
479 480
480 481
481 482 @predicate(b'_firstancestors', safe=True)
482 483 def _firstancestors(repo, subset, x):
483 484 # ``_firstancestors(set)``
484 485 # Like ``ancestors(set)`` but follows only the first parents.
485 486 return _ancestors(repo, subset, x, followfirst=True)
486 487
487 488
488 489 def _childrenspec(repo, subset, x, n, order):
489 490 """Changesets that are the Nth child of a changeset
490 491 in set.
491 492 """
492 493 cs = set()
493 494 for r in getset(repo, fullreposet(repo), x):
494 495 for i in range(n):
495 496 c = repo[r].children()
496 497 if len(c) == 0:
497 498 break
498 499 if len(c) > 1:
499 500 raise error.RepoLookupError(
500 501 _(b"revision in set has more than one child")
501 502 )
502 503 r = c[0].rev()
503 504 else:
504 505 cs.add(r)
505 506 return subset & cs
506 507
507 508
508 509 def ancestorspec(repo, subset, x, n, order):
509 510 """``set~n``
510 511 Changesets that are the Nth ancestor (first parents only) of a changeset
511 512 in set.
512 513 """
513 514 n = getinteger(n, _(b"~ expects a number"))
514 515 if n < 0:
515 516 # children lookup
516 517 return _childrenspec(repo, subset, x, -n, order)
517 518 ps = set()
518 519 cl = repo.changelog
519 520 for r in getset(repo, fullreposet(repo), x):
520 521 for i in range(n):
521 522 try:
522 523 r = cl.parentrevs(r)[0]
523 524 except error.WdirUnsupported:
524 525 r = repo[r].p1().rev()
525 526 ps.add(r)
526 527 return subset & ps
527 528
528 529
529 530 @predicate(b'author(string)', safe=True, weight=10)
530 531 def author(repo, subset, x):
531 532 """Alias for ``user(string)``.
532 533 """
533 534 # i18n: "author" is a keyword
534 535 n = getstring(x, _(b"author requires a string"))
535 536 kind, pattern, matcher = _substringmatcher(n, casesensitive=False)
536 537 return subset.filter(
537 538 lambda x: matcher(repo[x].user()), condrepr=(b'<user %r>', n)
538 539 )
539 540
540 541
541 542 @predicate(b'bisect(string)', safe=True)
542 543 def bisect(repo, subset, x):
543 544 """Changesets marked in the specified bisect status:
544 545
545 546 - ``good``, ``bad``, ``skip``: csets explicitly marked as good/bad/skip
546 547 - ``goods``, ``bads`` : csets topologically good/bad
547 548 - ``range`` : csets taking part in the bisection
548 549 - ``pruned`` : csets that are goods, bads or skipped
549 550 - ``untested`` : csets whose fate is yet unknown
550 551 - ``ignored`` : csets ignored due to DAG topology
551 552 - ``current`` : the cset currently being bisected
552 553 """
553 554 # i18n: "bisect" is a keyword
554 555 status = getstring(x, _(b"bisect requires a string")).lower()
555 556 state = set(hbisect.get(repo, status))
556 557 return subset & state
557 558
558 559
559 560 # Backward-compatibility
560 561 # - no help entry so that we do not advertise it any more
561 562 @predicate(b'bisected', safe=True)
562 563 def bisected(repo, subset, x):
563 564 return bisect(repo, subset, x)
564 565
565 566
566 567 @predicate(b'bookmark([name])', safe=True)
567 568 def bookmark(repo, subset, x):
568 569 """The named bookmark or all bookmarks.
569 570
570 571 Pattern matching is supported for `name`. See :hg:`help revisions.patterns`.
571 572 """
572 573 # i18n: "bookmark" is a keyword
573 574 args = getargs(x, 0, 1, _(b'bookmark takes one or no arguments'))
574 575 if args:
575 576 bm = getstring(
576 577 args[0],
577 578 # i18n: "bookmark" is a keyword
578 579 _(b'the argument to bookmark must be a string'),
579 580 )
580 581 kind, pattern, matcher = stringutil.stringmatcher(bm)
581 582 bms = set()
582 583 if kind == b'literal':
583 584 if bm == pattern:
584 585 pattern = repo._bookmarks.expandname(pattern)
585 586 bmrev = repo._bookmarks.get(pattern, None)
586 587 if not bmrev:
587 588 raise error.RepoLookupError(
588 589 _(b"bookmark '%s' does not exist") % pattern
589 590 )
590 591 bms.add(repo[bmrev].rev())
591 592 else:
592 593 matchrevs = set()
593 594 for name, bmrev in pycompat.iteritems(repo._bookmarks):
594 595 if matcher(name):
595 596 matchrevs.add(bmrev)
596 597 for bmrev in matchrevs:
597 598 bms.add(repo[bmrev].rev())
598 599 else:
599 600 bms = {repo[r].rev() for r in repo._bookmarks.values()}
600 601 bms -= {node.nullrev}
601 602 return subset & bms
602 603
603 604
604 605 @predicate(b'branch(string or set)', safe=True, weight=10)
605 606 def branch(repo, subset, x):
606 607 """
607 608 All changesets belonging to the given branch or the branches of the given
608 609 changesets.
609 610
610 611 Pattern matching is supported for `string`. See
611 612 :hg:`help revisions.patterns`.
612 613 """
613 614 getbi = repo.revbranchcache().branchinfo
614 615
615 616 def getbranch(r):
616 617 try:
617 618 return getbi(r)[0]
618 619 except error.WdirUnsupported:
619 620 return repo[r].branch()
620 621
621 622 try:
622 623 b = getstring(x, b'')
623 624 except error.ParseError:
624 625 # not a string, but another revspec, e.g. tip()
625 626 pass
626 627 else:
627 628 kind, pattern, matcher = stringutil.stringmatcher(b)
628 629 if kind == b'literal':
629 630 # note: falls through to the revspec case if no branch with
630 631 # this name exists and pattern kind is not specified explicitly
631 632 if repo.branchmap().hasbranch(pattern):
632 633 return subset.filter(
633 634 lambda r: matcher(getbranch(r)),
634 635 condrepr=(b'<branch %r>', b),
635 636 )
636 637 if b.startswith(b'literal:'):
637 638 raise error.RepoLookupError(
638 639 _(b"branch '%s' does not exist") % pattern
639 640 )
640 641 else:
641 642 return subset.filter(
642 643 lambda r: matcher(getbranch(r)), condrepr=(b'<branch %r>', b)
643 644 )
644 645
645 646 s = getset(repo, fullreposet(repo), x)
646 647 b = set()
647 648 for r in s:
648 649 b.add(getbranch(r))
649 650 c = s.__contains__
650 651 return subset.filter(
651 652 lambda r: c(r) or getbranch(r) in b,
652 653 condrepr=lambda: b'<branch %r>' % _sortedb(b),
653 654 )
654 655
655 656
656 657 @predicate(b'phasedivergent()', safe=True)
657 658 def phasedivergent(repo, subset, x):
658 659 """Mutable changesets marked as successors of public changesets.
659 660
660 661 Only non-public and non-obsolete changesets can be `phasedivergent`.
661 662 (EXPERIMENTAL)
662 663 """
663 664 # i18n: "phasedivergent" is a keyword
664 665 getargs(x, 0, 0, _(b"phasedivergent takes no arguments"))
665 666 phasedivergent = obsmod.getrevs(repo, b'phasedivergent')
666 667 return subset & phasedivergent
667 668
668 669
669 670 @predicate(b'bundle()', safe=True)
670 671 def bundle(repo, subset, x):
671 672 """Changesets in the bundle.
672 673
673 674 Bundle must be specified by the -R option."""
674 675
675 676 try:
676 677 bundlerevs = repo.changelog.bundlerevs
677 678 except AttributeError:
678 679 raise error.Abort(_(b"no bundle provided - specify with -R"))
679 680 return subset & bundlerevs
680 681
681 682
682 683 def checkstatus(repo, subset, pat, field):
683 684 """Helper for status-related revsets (adds, removes, modifies).
684 685 The field parameter says which kind is desired.
685 686 """
686 687 hasset = matchmod.patkind(pat) == b'set'
687 688
688 689 mcache = [None]
689 690
690 691 def matches(x):
691 692 c = repo[x]
692 693 if not mcache[0] or hasset:
693 694 mcache[0] = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
694 695 m = mcache[0]
695 696 fname = None
696 697
697 698 assert m is not None # help pytype
698 699 if not m.anypats() and len(m.files()) == 1:
699 700 fname = m.files()[0]
700 701 if fname is not None:
701 702 if fname not in c.files():
702 703 return False
703 704 else:
704 705 if not any(m(f) for f in c.files()):
705 706 return False
706 707 files = getattr(repo.status(c.p1().node(), c.node()), field)
707 708 if fname is not None:
708 709 if fname in files:
709 710 return True
710 711 else:
711 712 if any(m(f) for f in files):
712 713 return True
713 714
714 715 return subset.filter(
715 716 matches, condrepr=(b'<status.%s %r>', pycompat.sysbytes(field), pat)
716 717 )
717 718
718 719
719 720 def _children(repo, subset, parentset):
720 721 if not parentset:
721 722 return baseset()
722 723 cs = set()
723 724 pr = repo.changelog.parentrevs
724 725 minrev = parentset.min()
725 726 nullrev = node.nullrev
726 727 for r in subset:
727 728 if r <= minrev:
728 729 continue
729 730 p1, p2 = pr(r)
730 731 if p1 in parentset:
731 732 cs.add(r)
732 733 if p2 != nullrev and p2 in parentset:
733 734 cs.add(r)
734 735 return baseset(cs)
735 736
736 737
737 738 @predicate(b'children(set)', safe=True)
738 739 def children(repo, subset, x):
739 740 """Child changesets of changesets in set.
740 741 """
741 742 s = getset(repo, fullreposet(repo), x)
742 743 cs = _children(repo, subset, s)
743 744 return subset & cs
744 745
745 746
746 747 @predicate(b'closed()', safe=True, weight=10)
747 748 def closed(repo, subset, x):
748 749 """Changeset is closed.
749 750 """
750 751 # i18n: "closed" is a keyword
751 752 getargs(x, 0, 0, _(b"closed takes no arguments"))
752 753 return subset.filter(
753 754 lambda r: repo[r].closesbranch(), condrepr=b'<branch closed>'
754 755 )
755 756
756 757
757 758 # for internal use
758 759 @predicate(b'_commonancestorheads(set)', safe=True)
759 760 def _commonancestorheads(repo, subset, x):
760 761 # This is an internal method is for quickly calculating "heads(::x and
761 762 # ::y)"
762 763
763 764 # These greatest common ancestors are the same ones that the consensus bid
764 765 # merge will find.
765 766 startrevs = getset(repo, fullreposet(repo), x, order=anyorder)
766 767
767 768 ancs = repo.changelog._commonancestorsheads(*list(startrevs))
768 769 return subset & baseset(ancs)
769 770
770 771
771 772 @predicate(b'commonancestors(set)', safe=True)
772 773 def commonancestors(repo, subset, x):
773 774 """Changesets that are ancestors of every changeset in set.
774 775 """
775 776 startrevs = getset(repo, fullreposet(repo), x, order=anyorder)
776 777 if not startrevs:
777 778 return baseset()
778 779 for r in startrevs:
779 780 subset &= dagop.revancestors(repo, baseset([r]))
780 781 return subset
781 782
782 783
783 784 @predicate(b'conflictlocal()', safe=True)
784 785 def conflictlocal(repo, subset, x):
785 786 """The local side of the merge, if currently in an unresolved merge.
786 787
787 788 "merge" here includes merge conflicts from e.g. 'hg rebase' or 'hg graft'.
788 789 """
789 790 getargs(x, 0, 0, _(b"conflictlocal takes no arguments"))
790 791 from . import mergestate as mergestatemod
791 792
792 793 mergestate = mergestatemod.mergestate.read(repo)
793 794 if mergestate.active() and repo.changelog.hasnode(mergestate.local):
794 795 return subset & {repo.changelog.rev(mergestate.local)}
795 796
796 797 return baseset()
797 798
798 799
799 800 @predicate(b'conflictother()', safe=True)
800 801 def conflictother(repo, subset, x):
801 802 """The other side of the merge, if currently in an unresolved merge.
802 803
803 804 "merge" here includes merge conflicts from e.g. 'hg rebase' or 'hg graft'.
804 805 """
805 806 getargs(x, 0, 0, _(b"conflictother takes no arguments"))
806 807 from . import mergestate as mergestatemod
807 808
808 809 mergestate = mergestatemod.mergestate.read(repo)
809 810 if mergestate.active() and repo.changelog.hasnode(mergestate.other):
810 811 return subset & {repo.changelog.rev(mergestate.other)}
811 812
812 813 return baseset()
813 814
814 815
815 816 @predicate(b'contains(pattern)', weight=100)
816 817 def contains(repo, subset, x):
817 818 """The revision's manifest contains a file matching pattern (but might not
818 819 modify it). See :hg:`help patterns` for information about file patterns.
819 820
820 821 The pattern without explicit kind like ``glob:`` is expected to be
821 822 relative to the current directory and match against a file exactly
822 823 for efficiency.
823 824 """
824 825 # i18n: "contains" is a keyword
825 826 pat = getstring(x, _(b"contains requires a pattern"))
826 827
827 828 def matches(x):
828 829 if not matchmod.patkind(pat):
829 830 pats = pathutil.canonpath(repo.root, repo.getcwd(), pat)
830 831 if pats in repo[x]:
831 832 return True
832 833 else:
833 834 c = repo[x]
834 835 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
835 836 for f in c.manifest():
836 837 if m(f):
837 838 return True
838 839 return False
839 840
840 841 return subset.filter(matches, condrepr=(b'<contains %r>', pat))
841 842
842 843
843 844 @predicate(b'converted([id])', safe=True)
844 845 def converted(repo, subset, x):
845 846 """Changesets converted from the given identifier in the old repository if
846 847 present, or all converted changesets if no identifier is specified.
847 848 """
848 849
849 850 # There is exactly no chance of resolving the revision, so do a simple
850 851 # string compare and hope for the best
851 852
852 853 rev = None
853 854 # i18n: "converted" is a keyword
854 855 l = getargs(x, 0, 1, _(b'converted takes one or no arguments'))
855 856 if l:
856 857 # i18n: "converted" is a keyword
857 858 rev = getstring(l[0], _(b'converted requires a revision'))
858 859
859 860 def _matchvalue(r):
860 861 source = repo[r].extra().get(b'convert_revision', None)
861 862 return source is not None and (rev is None or source.startswith(rev))
862 863
863 864 return subset.filter(
864 865 lambda r: _matchvalue(r), condrepr=(b'<converted %r>', rev)
865 866 )
866 867
867 868
868 869 @predicate(b'date(interval)', safe=True, weight=10)
869 870 def date(repo, subset, x):
870 871 """Changesets within the interval, see :hg:`help dates`.
871 872 """
872 873 # i18n: "date" is a keyword
873 874 ds = getstring(x, _(b"date requires a string"))
874 875 dm = dateutil.matchdate(ds)
875 876 return subset.filter(
876 877 lambda x: dm(repo[x].date()[0]), condrepr=(b'<date %r>', ds)
877 878 )
878 879
879 880
880 881 @predicate(b'desc(string)', safe=True, weight=10)
881 882 def desc(repo, subset, x):
882 883 """Search commit message for string. The match is case-insensitive.
883 884
884 885 Pattern matching is supported for `string`. See
885 886 :hg:`help revisions.patterns`.
886 887 """
887 888 # i18n: "desc" is a keyword
888 889 ds = getstring(x, _(b"desc requires a string"))
889 890
890 891 kind, pattern, matcher = _substringmatcher(ds, casesensitive=False)
891 892
892 893 return subset.filter(
893 894 lambda r: matcher(repo[r].description()), condrepr=(b'<desc %r>', ds)
894 895 )
895 896
896 897
897 898 def _descendants(
898 899 repo, subset, x, followfirst=False, startdepth=None, stopdepth=None
899 900 ):
900 901 roots = getset(repo, fullreposet(repo), x)
901 902 if not roots:
902 903 return baseset()
903 904 s = dagop.revdescendants(repo, roots, followfirst, startdepth, stopdepth)
904 905 return subset & s
905 906
906 907
907 908 @predicate(b'descendants(set[, depth])', safe=True)
908 909 def descendants(repo, subset, x):
909 910 """Changesets which are descendants of changesets in set, including the
910 911 given changesets themselves.
911 912
912 913 If depth is specified, the result only includes changesets up to
913 914 the specified generation.
914 915 """
915 916 # startdepth is for internal use only until we can decide the UI
916 917 args = getargsdict(x, b'descendants', b'set depth startdepth')
917 918 if b'set' not in args:
918 919 # i18n: "descendants" is a keyword
919 920 raise error.ParseError(_(b'descendants takes at least 1 argument'))
920 921 startdepth = stopdepth = None
921 922 if b'startdepth' in args:
922 923 n = getinteger(
923 924 args[b'startdepth'], b"descendants expects an integer startdepth"
924 925 )
925 926 if n < 0:
926 927 raise error.ParseError(b"negative startdepth")
927 928 startdepth = n
928 929 if b'depth' in args:
929 930 # i18n: "descendants" is a keyword
930 931 n = getinteger(
931 932 args[b'depth'], _(b"descendants expects an integer depth")
932 933 )
933 934 if n < 0:
934 935 raise error.ParseError(_(b"negative depth"))
935 936 stopdepth = n + 1
936 937 return _descendants(
937 938 repo, subset, args[b'set'], startdepth=startdepth, stopdepth=stopdepth
938 939 )
939 940
940 941
941 942 @predicate(b'_firstdescendants', safe=True)
942 943 def _firstdescendants(repo, subset, x):
943 944 # ``_firstdescendants(set)``
944 945 # Like ``descendants(set)`` but follows only the first parents.
945 946 return _descendants(repo, subset, x, followfirst=True)
946 947
947 948
948 949 @predicate(b'destination([set])', safe=True, weight=10)
949 950 def destination(repo, subset, x):
950 951 """Changesets that were created by a graft, transplant or rebase operation,
951 952 with the given revisions specified as the source. Omitting the optional set
952 953 is the same as passing all().
953 954 """
954 955 if x is not None:
955 956 sources = getset(repo, fullreposet(repo), x)
956 957 else:
957 958 sources = fullreposet(repo)
958 959
959 960 dests = set()
960 961
961 962 # subset contains all of the possible destinations that can be returned, so
962 963 # iterate over them and see if their source(s) were provided in the arg set.
963 964 # Even if the immediate src of r is not in the arg set, src's source (or
964 965 # further back) may be. Scanning back further than the immediate src allows
965 966 # transitive transplants and rebases to yield the same results as transitive
966 967 # grafts.
967 968 for r in subset:
968 969 src = _getrevsource(repo, r)
969 970 lineage = None
970 971
971 972 while src is not None:
972 973 if lineage is None:
973 974 lineage = list()
974 975
975 976 lineage.append(r)
976 977
977 978 # The visited lineage is a match if the current source is in the arg
978 979 # set. Since every candidate dest is visited by way of iterating
979 980 # subset, any dests further back in the lineage will be tested by a
980 981 # different iteration over subset. Likewise, if the src was already
981 982 # selected, the current lineage can be selected without going back
982 983 # further.
983 984 if src in sources or src in dests:
984 985 dests.update(lineage)
985 986 break
986 987
987 988 r = src
988 989 src = _getrevsource(repo, r)
989 990
990 991 return subset.filter(
991 992 dests.__contains__,
992 993 condrepr=lambda: b'<destination %r>' % _sortedb(dests),
993 994 )
994 995
995 996
997 @predicate(b'diff(pattern)', weight=110)
998 def diff(repo, subset, x):
999 """Search revision differences for when the pattern was added or removed.
1000
1001 The pattern may be a substring literal or a regular expression. See
1002 :hg:`help revisions.patterns`.
1003 """
1004 args = getargsdict(x, b'diff', b'pattern')
1005 if b'pattern' not in args:
1006 # i18n: "diff" is a keyword
1007 raise error.ParseError(_(b'diff takes at least 1 argument'))
1008
1009 pattern = getstring(args[b'pattern'], _(b'diff requires a string pattern'))
1010 regexp = stringutil.substringregexp(pattern, re.M)
1011
1012 # TODO: add support for file pattern and --follow. For example,
1013 # diff(pattern[, set]) where set may be file(pattern) or follow(pattern),
1014 # and we'll eventually add a support for narrowing files by revset?
1015 fmatch = matchmod.always()
1016
1017 def makefilematcher(ctx):
1018 return fmatch
1019
1020 # TODO: search in a windowed way
1021 searcher = grepmod.grepsearcher(repo.ui, repo, regexp, diff=True)
1022
1023 def testdiff(rev):
1024 # consume the generator to discard revfiles/matches cache
1025 found = False
1026 for fn, ctx, pstates, states in searcher.searchfiles(
1027 baseset([rev]), makefilematcher
1028 ):
1029 if next(grepmod.difflinestates(pstates, states), None):
1030 found = True
1031 return found
1032
1033 return subset.filter(testdiff, condrepr=(b'<diff %r>', pattern))
1034
1035
996 1036 @predicate(b'contentdivergent()', safe=True)
997 1037 def contentdivergent(repo, subset, x):
998 1038 """
999 1039 Final successors of changesets with an alternative set of final
1000 1040 successors. (EXPERIMENTAL)
1001 1041 """
1002 1042 # i18n: "contentdivergent" is a keyword
1003 1043 getargs(x, 0, 0, _(b"contentdivergent takes no arguments"))
1004 1044 contentdivergent = obsmod.getrevs(repo, b'contentdivergent')
1005 1045 return subset & contentdivergent
1006 1046
1007 1047
1008 1048 @predicate(b'expectsize(set[, size])', safe=True, takeorder=True)
1009 1049 def expectsize(repo, subset, x, order):
1010 1050 """Return the given revset if size matches the revset size.
1011 1051 Abort if the revset doesn't expect given size.
1012 1052 size can either be an integer range or an integer.
1013 1053
1014 1054 For example, ``expectsize(0:1, 3:5)`` will abort as revset size is 2 and
1015 1055 2 is not between 3 and 5 inclusive."""
1016 1056
1017 1057 args = getargsdict(x, b'expectsize', b'set size')
1018 1058 minsize = 0
1019 1059 maxsize = len(repo) + 1
1020 1060 err = b''
1021 1061 if b'size' not in args or b'set' not in args:
1022 1062 raise error.ParseError(_(b'invalid set of arguments'))
1023 1063 minsize, maxsize = getintrange(
1024 1064 args[b'size'],
1025 1065 _(b'expectsize requires a size range or a positive integer'),
1026 1066 _(b'size range bounds must be integers'),
1027 1067 minsize,
1028 1068 maxsize,
1029 1069 )
1030 1070 if minsize < 0 or maxsize < 0:
1031 1071 raise error.ParseError(_(b'negative size'))
1032 1072 rev = getset(repo, fullreposet(repo), args[b'set'], order=order)
1033 1073 if minsize != maxsize and (len(rev) < minsize or len(rev) > maxsize):
1034 1074 err = _(b'revset size mismatch. expected between %d and %d, got %d') % (
1035 1075 minsize,
1036 1076 maxsize,
1037 1077 len(rev),
1038 1078 )
1039 1079 elif minsize == maxsize and len(rev) != minsize:
1040 1080 err = _(b'revset size mismatch. expected %d, got %d') % (
1041 1081 minsize,
1042 1082 len(rev),
1043 1083 )
1044 1084 if err:
1045 1085 raise error.RepoLookupError(err)
1046 1086 if order == followorder:
1047 1087 return subset & rev
1048 1088 else:
1049 1089 return rev & subset
1050 1090
1051 1091
1052 1092 @predicate(b'extdata(source)', safe=False, weight=100)
1053 1093 def extdata(repo, subset, x):
1054 1094 """Changesets in the specified extdata source. (EXPERIMENTAL)"""
1055 1095 # i18n: "extdata" is a keyword
1056 1096 args = getargsdict(x, b'extdata', b'source')
1057 1097 source = getstring(
1058 1098 args.get(b'source'),
1059 1099 # i18n: "extdata" is a keyword
1060 1100 _(b'extdata takes at least 1 string argument'),
1061 1101 )
1062 1102 data = scmutil.extdatasource(repo, source)
1063 1103 return subset & baseset(data)
1064 1104
1065 1105
1066 1106 @predicate(b'extinct()', safe=True)
1067 1107 def extinct(repo, subset, x):
1068 1108 """Obsolete changesets with obsolete descendants only. (EXPERIMENTAL)
1069 1109 """
1070 1110 # i18n: "extinct" is a keyword
1071 1111 getargs(x, 0, 0, _(b"extinct takes no arguments"))
1072 1112 extincts = obsmod.getrevs(repo, b'extinct')
1073 1113 return subset & extincts
1074 1114
1075 1115
1076 1116 @predicate(b'extra(label, [value])', safe=True)
1077 1117 def extra(repo, subset, x):
1078 1118 """Changesets with the given label in the extra metadata, with the given
1079 1119 optional value.
1080 1120
1081 1121 Pattern matching is supported for `value`. See
1082 1122 :hg:`help revisions.patterns`.
1083 1123 """
1084 1124 args = getargsdict(x, b'extra', b'label value')
1085 1125 if b'label' not in args:
1086 1126 # i18n: "extra" is a keyword
1087 1127 raise error.ParseError(_(b'extra takes at least 1 argument'))
1088 1128 # i18n: "extra" is a keyword
1089 1129 label = getstring(
1090 1130 args[b'label'], _(b'first argument to extra must be a string')
1091 1131 )
1092 1132 value = None
1093 1133
1094 1134 if b'value' in args:
1095 1135 # i18n: "extra" is a keyword
1096 1136 value = getstring(
1097 1137 args[b'value'], _(b'second argument to extra must be a string')
1098 1138 )
1099 1139 kind, value, matcher = stringutil.stringmatcher(value)
1100 1140
1101 1141 def _matchvalue(r):
1102 1142 extra = repo[r].extra()
1103 1143 return label in extra and (value is None or matcher(extra[label]))
1104 1144
1105 1145 return subset.filter(
1106 1146 lambda r: _matchvalue(r), condrepr=(b'<extra[%r] %r>', label, value)
1107 1147 )
1108 1148
1109 1149
1110 1150 @predicate(b'filelog(pattern)', safe=True)
1111 1151 def filelog(repo, subset, x):
1112 1152 """Changesets connected to the specified filelog.
1113 1153
1114 1154 For performance reasons, visits only revisions mentioned in the file-level
1115 1155 filelog, rather than filtering through all changesets (much faster, but
1116 1156 doesn't include deletes or duplicate changes). For a slower, more accurate
1117 1157 result, use ``file()``.
1118 1158
1119 1159 The pattern without explicit kind like ``glob:`` is expected to be
1120 1160 relative to the current directory and match against a file exactly
1121 1161 for efficiency.
1122 1162 """
1123 1163
1124 1164 # i18n: "filelog" is a keyword
1125 1165 pat = getstring(x, _(b"filelog requires a pattern"))
1126 1166 s = set()
1127 1167 cl = repo.changelog
1128 1168
1129 1169 if not matchmod.patkind(pat):
1130 1170 f = pathutil.canonpath(repo.root, repo.getcwd(), pat)
1131 1171 files = [f]
1132 1172 else:
1133 1173 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=repo[None])
1134 1174 files = (f for f in repo[None] if m(f))
1135 1175
1136 1176 for f in files:
1137 1177 fl = repo.file(f)
1138 1178 known = {}
1139 1179 scanpos = 0
1140 1180 for fr in list(fl):
1141 1181 fn = fl.node(fr)
1142 1182 if fn in known:
1143 1183 s.add(known[fn])
1144 1184 continue
1145 1185
1146 1186 lr = fl.linkrev(fr)
1147 1187 if lr in cl:
1148 1188 s.add(lr)
1149 1189 elif scanpos is not None:
1150 1190 # lowest matching changeset is filtered, scan further
1151 1191 # ahead in changelog
1152 1192 start = max(lr, scanpos) + 1
1153 1193 scanpos = None
1154 1194 for r in cl.revs(start):
1155 1195 # minimize parsing of non-matching entries
1156 1196 if f in cl.revision(r) and f in cl.readfiles(r):
1157 1197 try:
1158 1198 # try to use manifest delta fastpath
1159 1199 n = repo[r].filenode(f)
1160 1200 if n not in known:
1161 1201 if n == fn:
1162 1202 s.add(r)
1163 1203 scanpos = r
1164 1204 break
1165 1205 else:
1166 1206 known[n] = r
1167 1207 except error.ManifestLookupError:
1168 1208 # deletion in changelog
1169 1209 continue
1170 1210
1171 1211 return subset & s
1172 1212
1173 1213
1174 1214 @predicate(b'first(set, [n])', safe=True, takeorder=True, weight=0)
1175 1215 def first(repo, subset, x, order):
1176 1216 """An alias for limit().
1177 1217 """
1178 1218 return limit(repo, subset, x, order)
1179 1219
1180 1220
1181 1221 def _follow(repo, subset, x, name, followfirst=False):
1182 1222 args = getargsdict(x, name, b'file startrev')
1183 1223 revs = None
1184 1224 if b'startrev' in args:
1185 1225 revs = getset(repo, fullreposet(repo), args[b'startrev'])
1186 1226 if b'file' in args:
1187 1227 x = getstring(args[b'file'], _(b"%s expected a pattern") % name)
1188 1228 if revs is None:
1189 1229 revs = [None]
1190 1230 fctxs = []
1191 1231 for r in revs:
1192 1232 ctx = mctx = repo[r]
1193 1233 if r is None:
1194 1234 ctx = repo[b'.']
1195 1235 m = matchmod.match(
1196 1236 repo.root, repo.getcwd(), [x], ctx=mctx, default=b'path'
1197 1237 )
1198 1238 fctxs.extend(ctx[f].introfilectx() for f in ctx.manifest().walk(m))
1199 1239 s = dagop.filerevancestors(fctxs, followfirst)
1200 1240 else:
1201 1241 if revs is None:
1202 1242 revs = baseset([repo[b'.'].rev()])
1203 1243 s = dagop.revancestors(repo, revs, followfirst)
1204 1244
1205 1245 return subset & s
1206 1246
1207 1247
1208 1248 @predicate(b'follow([file[, startrev]])', safe=True)
1209 1249 def follow(repo, subset, x):
1210 1250 """
1211 1251 An alias for ``::.`` (ancestors of the working directory's first parent).
1212 1252 If file pattern is specified, the histories of files matching given
1213 1253 pattern in the revision given by startrev are followed, including copies.
1214 1254 """
1215 1255 return _follow(repo, subset, x, b'follow')
1216 1256
1217 1257
1218 1258 @predicate(b'_followfirst', safe=True)
1219 1259 def _followfirst(repo, subset, x):
1220 1260 # ``followfirst([file[, startrev]])``
1221 1261 # Like ``follow([file[, startrev]])`` but follows only the first parent
1222 1262 # of every revisions or files revisions.
1223 1263 return _follow(repo, subset, x, b'_followfirst', followfirst=True)
1224 1264
1225 1265
1226 1266 @predicate(
1227 1267 b'followlines(file, fromline:toline[, startrev=., descend=False])',
1228 1268 safe=True,
1229 1269 )
1230 1270 def followlines(repo, subset, x):
1231 1271 """Changesets modifying `file` in line range ('fromline', 'toline').
1232 1272
1233 1273 Line range corresponds to 'file' content at 'startrev' and should hence be
1234 1274 consistent with file size. If startrev is not specified, working directory's
1235 1275 parent is used.
1236 1276
1237 1277 By default, ancestors of 'startrev' are returned. If 'descend' is True,
1238 1278 descendants of 'startrev' are returned though renames are (currently) not
1239 1279 followed in this direction.
1240 1280 """
1241 1281 args = getargsdict(x, b'followlines', b'file *lines startrev descend')
1242 1282 if len(args[b'lines']) != 1:
1243 1283 raise error.ParseError(_(b"followlines requires a line range"))
1244 1284
1245 1285 rev = b'.'
1246 1286 if b'startrev' in args:
1247 1287 revs = getset(repo, fullreposet(repo), args[b'startrev'])
1248 1288 if len(revs) != 1:
1249 1289 raise error.ParseError(
1250 1290 # i18n: "followlines" is a keyword
1251 1291 _(b"followlines expects exactly one revision")
1252 1292 )
1253 1293 rev = revs.last()
1254 1294
1255 1295 pat = getstring(args[b'file'], _(b"followlines requires a pattern"))
1256 1296 # i18n: "followlines" is a keyword
1257 1297 msg = _(b"followlines expects exactly one file")
1258 1298 fname = scmutil.parsefollowlinespattern(repo, rev, pat, msg)
1259 1299 fromline, toline = util.processlinerange(
1260 1300 *getintrange(
1261 1301 args[b'lines'][0],
1262 1302 # i18n: "followlines" is a keyword
1263 1303 _(b"followlines expects a line number or a range"),
1264 1304 _(b"line range bounds must be integers"),
1265 1305 )
1266 1306 )
1267 1307
1268 1308 fctx = repo[rev].filectx(fname)
1269 1309 descend = False
1270 1310 if b'descend' in args:
1271 1311 descend = getboolean(
1272 1312 args[b'descend'],
1273 1313 # i18n: "descend" is a keyword
1274 1314 _(b"descend argument must be a boolean"),
1275 1315 )
1276 1316 if descend:
1277 1317 rs = generatorset(
1278 1318 (
1279 1319 c.rev()
1280 1320 for c, _linerange in dagop.blockdescendants(
1281 1321 fctx, fromline, toline
1282 1322 )
1283 1323 ),
1284 1324 iterasc=True,
1285 1325 )
1286 1326 else:
1287 1327 rs = generatorset(
1288 1328 (
1289 1329 c.rev()
1290 1330 for c, _linerange in dagop.blockancestors(
1291 1331 fctx, fromline, toline
1292 1332 )
1293 1333 ),
1294 1334 iterasc=False,
1295 1335 )
1296 1336 return subset & rs
1297 1337
1298 1338
1299 1339 @predicate(b'all()', safe=True)
1300 1340 def getall(repo, subset, x):
1301 1341 """All changesets, the same as ``0:tip``.
1302 1342 """
1303 1343 # i18n: "all" is a keyword
1304 1344 getargs(x, 0, 0, _(b"all takes no arguments"))
1305 1345 return subset & spanset(repo) # drop "null" if any
1306 1346
1307 1347
1308 1348 @predicate(b'grep(regex)', weight=10)
1309 1349 def grep(repo, subset, x):
1310 1350 """Like ``keyword(string)`` but accepts a regex. Use ``grep(r'...')``
1311 1351 to ensure special escape characters are handled correctly. Unlike
1312 1352 ``keyword(string)``, the match is case-sensitive.
1313 1353 """
1314 1354 try:
1315 1355 # i18n: "grep" is a keyword
1316 1356 gr = re.compile(getstring(x, _(b"grep requires a string")))
1317 1357 except re.error as e:
1318 1358 raise error.ParseError(
1319 1359 _(b'invalid match pattern: %s') % stringutil.forcebytestr(e)
1320 1360 )
1321 1361
1322 1362 def matches(x):
1323 1363 c = repo[x]
1324 1364 for e in c.files() + [c.user(), c.description()]:
1325 1365 if gr.search(e):
1326 1366 return True
1327 1367 return False
1328 1368
1329 1369 return subset.filter(matches, condrepr=(b'<grep %r>', gr.pattern))
1330 1370
1331 1371
1332 1372 @predicate(b'_matchfiles', safe=True)
1333 1373 def _matchfiles(repo, subset, x):
1334 1374 # _matchfiles takes a revset list of prefixed arguments:
1335 1375 #
1336 1376 # [p:foo, i:bar, x:baz]
1337 1377 #
1338 1378 # builds a match object from them and filters subset. Allowed
1339 1379 # prefixes are 'p:' for regular patterns, 'i:' for include
1340 1380 # patterns and 'x:' for exclude patterns. Use 'r:' prefix to pass
1341 1381 # a revision identifier, or the empty string to reference the
1342 1382 # working directory, from which the match object is
1343 1383 # initialized. Use 'd:' to set the default matching mode, default
1344 1384 # to 'glob'. At most one 'r:' and 'd:' argument can be passed.
1345 1385
1346 1386 l = getargs(x, 1, -1, b"_matchfiles requires at least one argument")
1347 1387 pats, inc, exc = [], [], []
1348 1388 rev, default = None, None
1349 1389 for arg in l:
1350 1390 s = getstring(arg, b"_matchfiles requires string arguments")
1351 1391 prefix, value = s[:2], s[2:]
1352 1392 if prefix == b'p:':
1353 1393 pats.append(value)
1354 1394 elif prefix == b'i:':
1355 1395 inc.append(value)
1356 1396 elif prefix == b'x:':
1357 1397 exc.append(value)
1358 1398 elif prefix == b'r:':
1359 1399 if rev is not None:
1360 1400 raise error.ParseError(
1361 1401 b'_matchfiles expected at most one revision'
1362 1402 )
1363 1403 if value == b'': # empty means working directory
1364 1404 rev = node.wdirrev
1365 1405 else:
1366 1406 rev = value
1367 1407 elif prefix == b'd:':
1368 1408 if default is not None:
1369 1409 raise error.ParseError(
1370 1410 b'_matchfiles expected at most one default mode'
1371 1411 )
1372 1412 default = value
1373 1413 else:
1374 1414 raise error.ParseError(b'invalid _matchfiles prefix: %s' % prefix)
1375 1415 if not default:
1376 1416 default = b'glob'
1377 1417 hasset = any(matchmod.patkind(p) == b'set' for p in pats + inc + exc)
1378 1418
1379 1419 mcache = [None]
1380 1420
1381 1421 # This directly read the changelog data as creating changectx for all
1382 1422 # revisions is quite expensive.
1383 1423 getfiles = repo.changelog.readfiles
1384 1424 wdirrev = node.wdirrev
1385 1425
1386 1426 def matches(x):
1387 1427 if x == wdirrev:
1388 1428 files = repo[x].files()
1389 1429 else:
1390 1430 files = getfiles(x)
1391 1431
1392 1432 if not mcache[0] or (hasset and rev is None):
1393 1433 r = x if rev is None else rev
1394 1434 mcache[0] = matchmod.match(
1395 1435 repo.root,
1396 1436 repo.getcwd(),
1397 1437 pats,
1398 1438 include=inc,
1399 1439 exclude=exc,
1400 1440 ctx=repo[r],
1401 1441 default=default,
1402 1442 )
1403 1443 m = mcache[0]
1404 1444
1405 1445 for f in files:
1406 1446 if m(f):
1407 1447 return True
1408 1448 return False
1409 1449
1410 1450 return subset.filter(
1411 1451 matches,
1412 1452 condrepr=(
1413 1453 b'<matchfiles patterns=%r, include=%r '
1414 1454 b'exclude=%r, default=%r, rev=%r>',
1415 1455 pats,
1416 1456 inc,
1417 1457 exc,
1418 1458 default,
1419 1459 rev,
1420 1460 ),
1421 1461 )
1422 1462
1423 1463
1424 1464 @predicate(b'file(pattern)', safe=True, weight=10)
1425 1465 def hasfile(repo, subset, x):
1426 1466 """Changesets affecting files matched by pattern.
1427 1467
1428 1468 For a faster but less accurate result, consider using ``filelog()``
1429 1469 instead.
1430 1470
1431 1471 This predicate uses ``glob:`` as the default kind of pattern.
1432 1472 """
1433 1473 # i18n: "file" is a keyword
1434 1474 pat = getstring(x, _(b"file requires a pattern"))
1435 1475 return _matchfiles(repo, subset, (b'string', b'p:' + pat))
1436 1476
1437 1477
1438 1478 @predicate(b'head()', safe=True)
1439 1479 def head(repo, subset, x):
1440 1480 """Changeset is a named branch head.
1441 1481 """
1442 1482 # i18n: "head" is a keyword
1443 1483 getargs(x, 0, 0, _(b"head takes no arguments"))
1444 1484 hs = set()
1445 1485 cl = repo.changelog
1446 1486 for ls in repo.branchmap().iterheads():
1447 1487 hs.update(cl.rev(h) for h in ls)
1448 1488 return subset & baseset(hs)
1449 1489
1450 1490
1451 1491 @predicate(b'heads(set)', safe=True, takeorder=True)
1452 1492 def heads(repo, subset, x, order):
1453 1493 """Members of set with no children in set.
1454 1494 """
1455 1495 # argument set should never define order
1456 1496 if order == defineorder:
1457 1497 order = followorder
1458 1498 inputset = getset(repo, fullreposet(repo), x, order=order)
1459 1499 wdirparents = None
1460 1500 if node.wdirrev in inputset:
1461 1501 # a bit slower, but not common so good enough for now
1462 1502 wdirparents = [p.rev() for p in repo[None].parents()]
1463 1503 inputset = set(inputset)
1464 1504 inputset.discard(node.wdirrev)
1465 1505 heads = repo.changelog.headrevs(inputset)
1466 1506 if wdirparents is not None:
1467 1507 heads.difference_update(wdirparents)
1468 1508 heads.add(node.wdirrev)
1469 1509 heads = baseset(heads)
1470 1510 return subset & heads
1471 1511
1472 1512
1473 1513 @predicate(b'hidden()', safe=True)
1474 1514 def hidden(repo, subset, x):
1475 1515 """Hidden changesets.
1476 1516 """
1477 1517 # i18n: "hidden" is a keyword
1478 1518 getargs(x, 0, 0, _(b"hidden takes no arguments"))
1479 1519 hiddenrevs = repoview.filterrevs(repo, b'visible')
1480 1520 return subset & hiddenrevs
1481 1521
1482 1522
1483 1523 @predicate(b'keyword(string)', safe=True, weight=10)
1484 1524 def keyword(repo, subset, x):
1485 1525 """Search commit message, user name, and names of changed files for
1486 1526 string. The match is case-insensitive.
1487 1527
1488 1528 For a regular expression or case sensitive search of these fields, use
1489 1529 ``grep(regex)``.
1490 1530 """
1491 1531 # i18n: "keyword" is a keyword
1492 1532 kw = encoding.lower(getstring(x, _(b"keyword requires a string")))
1493 1533
1494 1534 def matches(r):
1495 1535 c = repo[r]
1496 1536 return any(
1497 1537 kw in encoding.lower(t)
1498 1538 for t in c.files() + [c.user(), c.description()]
1499 1539 )
1500 1540
1501 1541 return subset.filter(matches, condrepr=(b'<keyword %r>', kw))
1502 1542
1503 1543
1504 1544 @predicate(b'limit(set[, n[, offset]])', safe=True, takeorder=True, weight=0)
1505 1545 def limit(repo, subset, x, order):
1506 1546 """First n members of set, defaulting to 1, starting from offset.
1507 1547 """
1508 1548 args = getargsdict(x, b'limit', b'set n offset')
1509 1549 if b'set' not in args:
1510 1550 # i18n: "limit" is a keyword
1511 1551 raise error.ParseError(_(b"limit requires one to three arguments"))
1512 1552 # i18n: "limit" is a keyword
1513 1553 lim = getinteger(args.get(b'n'), _(b"limit expects a number"), default=1)
1514 1554 if lim < 0:
1515 1555 raise error.ParseError(_(b"negative number to select"))
1516 1556 # i18n: "limit" is a keyword
1517 1557 ofs = getinteger(
1518 1558 args.get(b'offset'), _(b"limit expects a number"), default=0
1519 1559 )
1520 1560 if ofs < 0:
1521 1561 raise error.ParseError(_(b"negative offset"))
1522 1562 os = getset(repo, fullreposet(repo), args[b'set'])
1523 1563 ls = os.slice(ofs, ofs + lim)
1524 1564 if order == followorder and lim > 1:
1525 1565 return subset & ls
1526 1566 return ls & subset
1527 1567
1528 1568
1529 1569 @predicate(b'last(set, [n])', safe=True, takeorder=True)
1530 1570 def last(repo, subset, x, order):
1531 1571 """Last n members of set, defaulting to 1.
1532 1572 """
1533 1573 # i18n: "last" is a keyword
1534 1574 l = getargs(x, 1, 2, _(b"last requires one or two arguments"))
1535 1575 lim = 1
1536 1576 if len(l) == 2:
1537 1577 # i18n: "last" is a keyword
1538 1578 lim = getinteger(l[1], _(b"last expects a number"))
1539 1579 if lim < 0:
1540 1580 raise error.ParseError(_(b"negative number to select"))
1541 1581 os = getset(repo, fullreposet(repo), l[0])
1542 1582 os.reverse()
1543 1583 ls = os.slice(0, lim)
1544 1584 if order == followorder and lim > 1:
1545 1585 return subset & ls
1546 1586 ls.reverse()
1547 1587 return ls & subset
1548 1588
1549 1589
1550 1590 @predicate(b'max(set)', safe=True)
1551 1591 def maxrev(repo, subset, x):
1552 1592 """Changeset with highest revision number in set.
1553 1593 """
1554 1594 os = getset(repo, fullreposet(repo), x)
1555 1595 try:
1556 1596 m = os.max()
1557 1597 if m in subset:
1558 1598 return baseset([m], datarepr=(b'<max %r, %r>', subset, os))
1559 1599 except ValueError:
1560 1600 # os.max() throws a ValueError when the collection is empty.
1561 1601 # Same as python's max().
1562 1602 pass
1563 1603 return baseset(datarepr=(b'<max %r, %r>', subset, os))
1564 1604
1565 1605
1566 1606 @predicate(b'merge()', safe=True)
1567 1607 def merge(repo, subset, x):
1568 1608 """Changeset is a merge changeset.
1569 1609 """
1570 1610 # i18n: "merge" is a keyword
1571 1611 getargs(x, 0, 0, _(b"merge takes no arguments"))
1572 1612 cl = repo.changelog
1573 1613 nullrev = node.nullrev
1574 1614
1575 1615 def ismerge(r):
1576 1616 try:
1577 1617 return cl.parentrevs(r)[1] != nullrev
1578 1618 except error.WdirUnsupported:
1579 1619 return bool(repo[r].p2())
1580 1620
1581 1621 return subset.filter(ismerge, condrepr=b'<merge>')
1582 1622
1583 1623
1584 1624 @predicate(b'branchpoint()', safe=True)
1585 1625 def branchpoint(repo, subset, x):
1586 1626 """Changesets with more than one child.
1587 1627 """
1588 1628 # i18n: "branchpoint" is a keyword
1589 1629 getargs(x, 0, 0, _(b"branchpoint takes no arguments"))
1590 1630 cl = repo.changelog
1591 1631 if not subset:
1592 1632 return baseset()
1593 1633 # XXX this should be 'parentset.min()' assuming 'parentset' is a smartset
1594 1634 # (and if it is not, it should.)
1595 1635 baserev = min(subset)
1596 1636 parentscount = [0] * (len(repo) - baserev)
1597 1637 for r in cl.revs(start=baserev + 1):
1598 1638 for p in cl.parentrevs(r):
1599 1639 if p >= baserev:
1600 1640 parentscount[p - baserev] += 1
1601 1641 return subset.filter(
1602 1642 lambda r: parentscount[r - baserev] > 1, condrepr=b'<branchpoint>'
1603 1643 )
1604 1644
1605 1645
1606 1646 @predicate(b'min(set)', safe=True)
1607 1647 def minrev(repo, subset, x):
1608 1648 """Changeset with lowest revision number in set.
1609 1649 """
1610 1650 os = getset(repo, fullreposet(repo), x)
1611 1651 try:
1612 1652 m = os.min()
1613 1653 if m in subset:
1614 1654 return baseset([m], datarepr=(b'<min %r, %r>', subset, os))
1615 1655 except ValueError:
1616 1656 # os.min() throws a ValueError when the collection is empty.
1617 1657 # Same as python's min().
1618 1658 pass
1619 1659 return baseset(datarepr=(b'<min %r, %r>', subset, os))
1620 1660
1621 1661
1622 1662 @predicate(b'modifies(pattern)', safe=True, weight=30)
1623 1663 def modifies(repo, subset, x):
1624 1664 """Changesets modifying files matched by pattern.
1625 1665
1626 1666 The pattern without explicit kind like ``glob:`` is expected to be
1627 1667 relative to the current directory and match against a file or a
1628 1668 directory.
1629 1669 """
1630 1670 # i18n: "modifies" is a keyword
1631 1671 pat = getstring(x, _(b"modifies requires a pattern"))
1632 1672 return checkstatus(repo, subset, pat, 'modified')
1633 1673
1634 1674
1635 1675 @predicate(b'named(namespace)')
1636 1676 def named(repo, subset, x):
1637 1677 """The changesets in a given namespace.
1638 1678
1639 1679 Pattern matching is supported for `namespace`. See
1640 1680 :hg:`help revisions.patterns`.
1641 1681 """
1642 1682 # i18n: "named" is a keyword
1643 1683 args = getargs(x, 1, 1, _(b'named requires a namespace argument'))
1644 1684
1645 1685 ns = getstring(
1646 1686 args[0],
1647 1687 # i18n: "named" is a keyword
1648 1688 _(b'the argument to named must be a string'),
1649 1689 )
1650 1690 kind, pattern, matcher = stringutil.stringmatcher(ns)
1651 1691 namespaces = set()
1652 1692 if kind == b'literal':
1653 1693 if pattern not in repo.names:
1654 1694 raise error.RepoLookupError(
1655 1695 _(b"namespace '%s' does not exist") % ns
1656 1696 )
1657 1697 namespaces.add(repo.names[pattern])
1658 1698 else:
1659 1699 for name, ns in pycompat.iteritems(repo.names):
1660 1700 if matcher(name):
1661 1701 namespaces.add(ns)
1662 1702
1663 1703 names = set()
1664 1704 for ns in namespaces:
1665 1705 for name in ns.listnames(repo):
1666 1706 if name not in ns.deprecated:
1667 1707 names.update(repo[n].rev() for n in ns.nodes(repo, name))
1668 1708
1669 1709 names -= {node.nullrev}
1670 1710 return subset & names
1671 1711
1672 1712
1673 1713 @predicate(b'id(string)', safe=True)
1674 1714 def node_(repo, subset, x):
1675 1715 """Revision non-ambiguously specified by the given hex string prefix.
1676 1716 """
1677 1717 # i18n: "id" is a keyword
1678 1718 l = getargs(x, 1, 1, _(b"id requires one argument"))
1679 1719 # i18n: "id" is a keyword
1680 1720 n = getstring(l[0], _(b"id requires a string"))
1681 1721 if len(n) == 40:
1682 1722 try:
1683 1723 rn = repo.changelog.rev(node.bin(n))
1684 1724 except error.WdirUnsupported:
1685 1725 rn = node.wdirrev
1686 1726 except (LookupError, TypeError):
1687 1727 rn = None
1688 1728 else:
1689 1729 rn = None
1690 1730 try:
1691 1731 pm = scmutil.resolvehexnodeidprefix(repo, n)
1692 1732 if pm is not None:
1693 1733 rn = repo.changelog.rev(pm)
1694 1734 except LookupError:
1695 1735 pass
1696 1736 except error.WdirUnsupported:
1697 1737 rn = node.wdirrev
1698 1738
1699 1739 if rn is None:
1700 1740 return baseset()
1701 1741 result = baseset([rn])
1702 1742 return result & subset
1703 1743
1704 1744
1705 1745 @predicate(b'none()', safe=True)
1706 1746 def none(repo, subset, x):
1707 1747 """No changesets.
1708 1748 """
1709 1749 # i18n: "none" is a keyword
1710 1750 getargs(x, 0, 0, _(b"none takes no arguments"))
1711 1751 return baseset()
1712 1752
1713 1753
1714 1754 @predicate(b'obsolete()', safe=True)
1715 1755 def obsolete(repo, subset, x):
1716 1756 """Mutable changeset with a newer version. (EXPERIMENTAL)"""
1717 1757 # i18n: "obsolete" is a keyword
1718 1758 getargs(x, 0, 0, _(b"obsolete takes no arguments"))
1719 1759 obsoletes = obsmod.getrevs(repo, b'obsolete')
1720 1760 return subset & obsoletes
1721 1761
1722 1762
1723 1763 @predicate(b'only(set, [set])', safe=True)
1724 1764 def only(repo, subset, x):
1725 1765 """Changesets that are ancestors of the first set that are not ancestors
1726 1766 of any other head in the repo. If a second set is specified, the result
1727 1767 is ancestors of the first set that are not ancestors of the second set
1728 1768 (i.e. ::<set1> - ::<set2>).
1729 1769 """
1730 1770 cl = repo.changelog
1731 1771 # i18n: "only" is a keyword
1732 1772 args = getargs(x, 1, 2, _(b'only takes one or two arguments'))
1733 1773 include = getset(repo, fullreposet(repo), args[0])
1734 1774 if len(args) == 1:
1735 1775 if not include:
1736 1776 return baseset()
1737 1777
1738 1778 descendants = set(dagop.revdescendants(repo, include, False))
1739 1779 exclude = [
1740 1780 rev
1741 1781 for rev in cl.headrevs()
1742 1782 if not rev in descendants and not rev in include
1743 1783 ]
1744 1784 else:
1745 1785 exclude = getset(repo, fullreposet(repo), args[1])
1746 1786
1747 1787 results = set(cl.findmissingrevs(common=exclude, heads=include))
1748 1788 # XXX we should turn this into a baseset instead of a set, smartset may do
1749 1789 # some optimizations from the fact this is a baseset.
1750 1790 return subset & results
1751 1791
1752 1792
1753 1793 @predicate(b'origin([set])', safe=True)
1754 1794 def origin(repo, subset, x):
1755 1795 """
1756 1796 Changesets that were specified as a source for the grafts, transplants or
1757 1797 rebases that created the given revisions. Omitting the optional set is the
1758 1798 same as passing all(). If a changeset created by these operations is itself
1759 1799 specified as a source for one of these operations, only the source changeset
1760 1800 for the first operation is selected.
1761 1801 """
1762 1802 if x is not None:
1763 1803 dests = getset(repo, fullreposet(repo), x)
1764 1804 else:
1765 1805 dests = fullreposet(repo)
1766 1806
1767 1807 def _firstsrc(rev):
1768 1808 src = _getrevsource(repo, rev)
1769 1809 if src is None:
1770 1810 return None
1771 1811
1772 1812 while True:
1773 1813 prev = _getrevsource(repo, src)
1774 1814
1775 1815 if prev is None:
1776 1816 return src
1777 1817 src = prev
1778 1818
1779 1819 o = {_firstsrc(r) for r in dests}
1780 1820 o -= {None}
1781 1821 # XXX we should turn this into a baseset instead of a set, smartset may do
1782 1822 # some optimizations from the fact this is a baseset.
1783 1823 return subset & o
1784 1824
1785 1825
1786 1826 @predicate(b'outgoing([path])', safe=False, weight=10)
1787 1827 def outgoing(repo, subset, x):
1788 1828 """Changesets not found in the specified destination repository, or the
1789 1829 default push location.
1790 1830 """
1791 1831 # Avoid cycles.
1792 1832 from . import (
1793 1833 discovery,
1794 1834 hg,
1795 1835 )
1796 1836
1797 1837 # i18n: "outgoing" is a keyword
1798 1838 l = getargs(x, 0, 1, _(b"outgoing takes one or no arguments"))
1799 1839 # i18n: "outgoing" is a keyword
1800 1840 dest = (
1801 1841 l and getstring(l[0], _(b"outgoing requires a repository path")) or b''
1802 1842 )
1803 1843 if not dest:
1804 1844 # ui.paths.getpath() explicitly tests for None, not just a boolean
1805 1845 dest = None
1806 1846 path = repo.ui.paths.getpath(dest, default=(b'default-push', b'default'))
1807 1847 if not path:
1808 1848 raise error.Abort(
1809 1849 _(b'default repository not configured!'),
1810 1850 hint=_(b"see 'hg help config.paths'"),
1811 1851 )
1812 1852 dest = path.pushloc or path.loc
1813 1853 branches = path.branch, []
1814 1854
1815 1855 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1816 1856 if revs:
1817 1857 revs = [repo.lookup(rev) for rev in revs]
1818 1858 other = hg.peer(repo, {}, dest)
1819 1859 repo.ui.pushbuffer()
1820 1860 outgoing = discovery.findcommonoutgoing(repo, other, onlyheads=revs)
1821 1861 repo.ui.popbuffer()
1822 1862 cl = repo.changelog
1823 1863 o = {cl.rev(r) for r in outgoing.missing}
1824 1864 return subset & o
1825 1865
1826 1866
1827 1867 @predicate(b'p1([set])', safe=True)
1828 1868 def p1(repo, subset, x):
1829 1869 """First parent of changesets in set, or the working directory.
1830 1870 """
1831 1871 if x is None:
1832 1872 p = repo[x].p1().rev()
1833 1873 if p >= 0:
1834 1874 return subset & baseset([p])
1835 1875 return baseset()
1836 1876
1837 1877 ps = set()
1838 1878 cl = repo.changelog
1839 1879 for r in getset(repo, fullreposet(repo), x):
1840 1880 try:
1841 1881 ps.add(cl.parentrevs(r)[0])
1842 1882 except error.WdirUnsupported:
1843 1883 ps.add(repo[r].p1().rev())
1844 1884 ps -= {node.nullrev}
1845 1885 # XXX we should turn this into a baseset instead of a set, smartset may do
1846 1886 # some optimizations from the fact this is a baseset.
1847 1887 return subset & ps
1848 1888
1849 1889
1850 1890 @predicate(b'p2([set])', safe=True)
1851 1891 def p2(repo, subset, x):
1852 1892 """Second parent of changesets in set, or the working directory.
1853 1893 """
1854 1894 if x is None:
1855 1895 ps = repo[x].parents()
1856 1896 try:
1857 1897 p = ps[1].rev()
1858 1898 if p >= 0:
1859 1899 return subset & baseset([p])
1860 1900 return baseset()
1861 1901 except IndexError:
1862 1902 return baseset()
1863 1903
1864 1904 ps = set()
1865 1905 cl = repo.changelog
1866 1906 for r in getset(repo, fullreposet(repo), x):
1867 1907 try:
1868 1908 ps.add(cl.parentrevs(r)[1])
1869 1909 except error.WdirUnsupported:
1870 1910 parents = repo[r].parents()
1871 1911 if len(parents) == 2:
1872 1912 ps.add(parents[1])
1873 1913 ps -= {node.nullrev}
1874 1914 # XXX we should turn this into a baseset instead of a set, smartset may do
1875 1915 # some optimizations from the fact this is a baseset.
1876 1916 return subset & ps
1877 1917
1878 1918
1879 1919 def parentpost(repo, subset, x, order):
1880 1920 return p1(repo, subset, x)
1881 1921
1882 1922
1883 1923 @predicate(b'parents([set])', safe=True)
1884 1924 def parents(repo, subset, x):
1885 1925 """
1886 1926 The set of all parents for all changesets in set, or the working directory.
1887 1927 """
1888 1928 if x is None:
1889 1929 ps = {p.rev() for p in repo[x].parents()}
1890 1930 else:
1891 1931 ps = set()
1892 1932 cl = repo.changelog
1893 1933 up = ps.update
1894 1934 parentrevs = cl.parentrevs
1895 1935 for r in getset(repo, fullreposet(repo), x):
1896 1936 try:
1897 1937 up(parentrevs(r))
1898 1938 except error.WdirUnsupported:
1899 1939 up(p.rev() for p in repo[r].parents())
1900 1940 ps -= {node.nullrev}
1901 1941 return subset & ps
1902 1942
1903 1943
1904 1944 def _phase(repo, subset, *targets):
1905 1945 """helper to select all rev in <targets> phases"""
1906 1946 return repo._phasecache.getrevset(repo, targets, subset)
1907 1947
1908 1948
1909 1949 @predicate(b'_phase(idx)', safe=True)
1910 1950 def phase(repo, subset, x):
1911 1951 l = getargs(x, 1, 1, b"_phase requires one argument")
1912 1952 target = getinteger(l[0], b"_phase expects a number")
1913 1953 return _phase(repo, subset, target)
1914 1954
1915 1955
1916 1956 @predicate(b'draft()', safe=True)
1917 1957 def draft(repo, subset, x):
1918 1958 """Changeset in draft phase."""
1919 1959 # i18n: "draft" is a keyword
1920 1960 getargs(x, 0, 0, _(b"draft takes no arguments"))
1921 1961 target = phases.draft
1922 1962 return _phase(repo, subset, target)
1923 1963
1924 1964
1925 1965 @predicate(b'secret()', safe=True)
1926 1966 def secret(repo, subset, x):
1927 1967 """Changeset in secret phase."""
1928 1968 # i18n: "secret" is a keyword
1929 1969 getargs(x, 0, 0, _(b"secret takes no arguments"))
1930 1970 target = phases.secret
1931 1971 return _phase(repo, subset, target)
1932 1972
1933 1973
1934 1974 @predicate(b'stack([revs])', safe=True)
1935 1975 def stack(repo, subset, x):
1936 1976 """Experimental revset for the stack of changesets or working directory
1937 1977 parent. (EXPERIMENTAL)
1938 1978 """
1939 1979 if x is None:
1940 1980 stacks = stackmod.getstack(repo)
1941 1981 else:
1942 1982 stacks = smartset.baseset([])
1943 1983 for revision in getset(repo, fullreposet(repo), x):
1944 1984 currentstack = stackmod.getstack(repo, revision)
1945 1985 stacks = stacks + currentstack
1946 1986
1947 1987 return subset & stacks
1948 1988
1949 1989
1950 1990 def parentspec(repo, subset, x, n, order):
1951 1991 """``set^0``
1952 1992 The set.
1953 1993 ``set^1`` (or ``set^``), ``set^2``
1954 1994 First or second parent, respectively, of all changesets in set.
1955 1995 """
1956 1996 try:
1957 1997 n = int(n[1])
1958 1998 if n not in (0, 1, 2):
1959 1999 raise ValueError
1960 2000 except (TypeError, ValueError):
1961 2001 raise error.ParseError(_(b"^ expects a number 0, 1, or 2"))
1962 2002 ps = set()
1963 2003 cl = repo.changelog
1964 2004 for r in getset(repo, fullreposet(repo), x):
1965 2005 if n == 0:
1966 2006 ps.add(r)
1967 2007 elif n == 1:
1968 2008 try:
1969 2009 ps.add(cl.parentrevs(r)[0])
1970 2010 except error.WdirUnsupported:
1971 2011 ps.add(repo[r].p1().rev())
1972 2012 else:
1973 2013 try:
1974 2014 parents = cl.parentrevs(r)
1975 2015 if parents[1] != node.nullrev:
1976 2016 ps.add(parents[1])
1977 2017 except error.WdirUnsupported:
1978 2018 parents = repo[r].parents()
1979 2019 if len(parents) == 2:
1980 2020 ps.add(parents[1].rev())
1981 2021 return subset & ps
1982 2022
1983 2023
1984 2024 @predicate(b'present(set)', safe=True, takeorder=True)
1985 2025 def present(repo, subset, x, order):
1986 2026 """An empty set, if any revision in set isn't found; otherwise,
1987 2027 all revisions in set.
1988 2028
1989 2029 If any of specified revisions is not present in the local repository,
1990 2030 the query is normally aborted. But this predicate allows the query
1991 2031 to continue even in such cases.
1992 2032 """
1993 2033 try:
1994 2034 return getset(repo, subset, x, order)
1995 2035 except error.RepoLookupError:
1996 2036 return baseset()
1997 2037
1998 2038
1999 2039 # for internal use
2000 2040 @predicate(b'_notpublic', safe=True)
2001 2041 def _notpublic(repo, subset, x):
2002 2042 getargs(x, 0, 0, b"_notpublic takes no arguments")
2003 2043 return _phase(repo, subset, phases.draft, phases.secret)
2004 2044
2005 2045
2006 2046 # for internal use
2007 2047 @predicate(b'_phaseandancestors(phasename, set)', safe=True)
2008 2048 def _phaseandancestors(repo, subset, x):
2009 2049 # equivalent to (phasename() & ancestors(set)) but more efficient
2010 2050 # phasename could be one of 'draft', 'secret', or '_notpublic'
2011 2051 args = getargs(x, 2, 2, b"_phaseandancestors requires two arguments")
2012 2052 phasename = getsymbol(args[0])
2013 2053 s = getset(repo, fullreposet(repo), args[1])
2014 2054
2015 2055 draft = phases.draft
2016 2056 secret = phases.secret
2017 2057 phasenamemap = {
2018 2058 b'_notpublic': draft,
2019 2059 b'draft': draft, # follow secret's ancestors
2020 2060 b'secret': secret,
2021 2061 }
2022 2062 if phasename not in phasenamemap:
2023 2063 raise error.ParseError(b'%r is not a valid phasename' % phasename)
2024 2064
2025 2065 minimalphase = phasenamemap[phasename]
2026 2066 getphase = repo._phasecache.phase
2027 2067
2028 2068 def cutfunc(rev):
2029 2069 return getphase(repo, rev) < minimalphase
2030 2070
2031 2071 revs = dagop.revancestors(repo, s, cutfunc=cutfunc)
2032 2072
2033 2073 if phasename == b'draft': # need to remove secret changesets
2034 2074 revs = revs.filter(lambda r: getphase(repo, r) == draft)
2035 2075 return subset & revs
2036 2076
2037 2077
2038 2078 @predicate(b'public()', safe=True)
2039 2079 def public(repo, subset, x):
2040 2080 """Changeset in public phase."""
2041 2081 # i18n: "public" is a keyword
2042 2082 getargs(x, 0, 0, _(b"public takes no arguments"))
2043 2083 return _phase(repo, subset, phases.public)
2044 2084
2045 2085
2046 2086 @predicate(b'remote([id [,path]])', safe=False)
2047 2087 def remote(repo, subset, x):
2048 2088 """Local revision that corresponds to the given identifier in a
2049 2089 remote repository, if present. Here, the '.' identifier is a
2050 2090 synonym for the current local branch.
2051 2091 """
2052 2092
2053 2093 from . import hg # avoid start-up nasties
2054 2094
2055 2095 # i18n: "remote" is a keyword
2056 2096 l = getargs(x, 0, 2, _(b"remote takes zero, one, or two arguments"))
2057 2097
2058 2098 q = b'.'
2059 2099 if len(l) > 0:
2060 2100 # i18n: "remote" is a keyword
2061 2101 q = getstring(l[0], _(b"remote requires a string id"))
2062 2102 if q == b'.':
2063 2103 q = repo[b'.'].branch()
2064 2104
2065 2105 dest = b''
2066 2106 if len(l) > 1:
2067 2107 # i18n: "remote" is a keyword
2068 2108 dest = getstring(l[1], _(b"remote requires a repository path"))
2069 2109 dest = repo.ui.expandpath(dest or b'default')
2070 2110 dest, branches = hg.parseurl(dest)
2071 2111
2072 2112 other = hg.peer(repo, {}, dest)
2073 2113 n = other.lookup(q)
2074 2114 if n in repo:
2075 2115 r = repo[n].rev()
2076 2116 if r in subset:
2077 2117 return baseset([r])
2078 2118 return baseset()
2079 2119
2080 2120
2081 2121 @predicate(b'removes(pattern)', safe=True, weight=30)
2082 2122 def removes(repo, subset, x):
2083 2123 """Changesets which remove files matching pattern.
2084 2124
2085 2125 The pattern without explicit kind like ``glob:`` is expected to be
2086 2126 relative to the current directory and match against a file or a
2087 2127 directory.
2088 2128 """
2089 2129 # i18n: "removes" is a keyword
2090 2130 pat = getstring(x, _(b"removes requires a pattern"))
2091 2131 return checkstatus(repo, subset, pat, 'removed')
2092 2132
2093 2133
2094 2134 @predicate(b'rev(number)', safe=True)
2095 2135 def rev(repo, subset, x):
2096 2136 """Revision with the given numeric identifier."""
2097 2137 try:
2098 2138 return _rev(repo, subset, x)
2099 2139 except error.RepoLookupError:
2100 2140 return baseset()
2101 2141
2102 2142
2103 2143 @predicate(b'_rev(number)', safe=True)
2104 2144 def _rev(repo, subset, x):
2105 2145 # internal version of "rev(x)" that raise error if "x" is invalid
2106 2146 # i18n: "rev" is a keyword
2107 2147 l = getargs(x, 1, 1, _(b"rev requires one argument"))
2108 2148 try:
2109 2149 # i18n: "rev" is a keyword
2110 2150 l = int(getstring(l[0], _(b"rev requires a number")))
2111 2151 except (TypeError, ValueError):
2112 2152 # i18n: "rev" is a keyword
2113 2153 raise error.ParseError(_(b"rev expects a number"))
2114 2154 if l not in _virtualrevs:
2115 2155 try:
2116 2156 repo.changelog.node(l) # check that the rev exists
2117 2157 except IndexError:
2118 2158 raise error.RepoLookupError(_(b"unknown revision '%d'") % l)
2119 2159 return subset & baseset([l])
2120 2160
2121 2161
2122 2162 @predicate(b'revset(set)', safe=True, takeorder=True)
2123 2163 def revsetpredicate(repo, subset, x, order):
2124 2164 """Strictly interpret the content as a revset.
2125 2165
2126 2166 The content of this special predicate will be strictly interpreted as a
2127 2167 revset. For example, ``revset(id(0))`` will be interpreted as "id(0)"
2128 2168 without possible ambiguity with a "id(0)" bookmark or tag.
2129 2169 """
2130 2170 return getset(repo, subset, x, order)
2131 2171
2132 2172
2133 2173 @predicate(b'matching(revision [, field])', safe=True)
2134 2174 def matching(repo, subset, x):
2135 2175 """Changesets in which a given set of fields match the set of fields in the
2136 2176 selected revision or set.
2137 2177
2138 2178 To match more than one field pass the list of fields to match separated
2139 2179 by spaces (e.g. ``author description``).
2140 2180
2141 2181 Valid fields are most regular revision fields and some special fields.
2142 2182
2143 2183 Regular revision fields are ``description``, ``author``, ``branch``,
2144 2184 ``date``, ``files``, ``phase``, ``parents``, ``substate``, ``user``
2145 2185 and ``diff``.
2146 2186 Note that ``author`` and ``user`` are synonyms. ``diff`` refers to the
2147 2187 contents of the revision. Two revisions matching their ``diff`` will
2148 2188 also match their ``files``.
2149 2189
2150 2190 Special fields are ``summary`` and ``metadata``:
2151 2191 ``summary`` matches the first line of the description.
2152 2192 ``metadata`` is equivalent to matching ``description user date``
2153 2193 (i.e. it matches the main metadata fields).
2154 2194
2155 2195 ``metadata`` is the default field which is used when no fields are
2156 2196 specified. You can match more than one field at a time.
2157 2197 """
2158 2198 # i18n: "matching" is a keyword
2159 2199 l = getargs(x, 1, 2, _(b"matching takes 1 or 2 arguments"))
2160 2200
2161 2201 revs = getset(repo, fullreposet(repo), l[0])
2162 2202
2163 2203 fieldlist = [b'metadata']
2164 2204 if len(l) > 1:
2165 2205 fieldlist = getstring(
2166 2206 l[1],
2167 2207 # i18n: "matching" is a keyword
2168 2208 _(b"matching requires a string as its second argument"),
2169 2209 ).split()
2170 2210
2171 2211 # Make sure that there are no repeated fields,
2172 2212 # expand the 'special' 'metadata' field type
2173 2213 # and check the 'files' whenever we check the 'diff'
2174 2214 fields = []
2175 2215 for field in fieldlist:
2176 2216 if field == b'metadata':
2177 2217 fields += [b'user', b'description', b'date']
2178 2218 elif field == b'diff':
2179 2219 # a revision matching the diff must also match the files
2180 2220 # since matching the diff is very costly, make sure to
2181 2221 # also match the files first
2182 2222 fields += [b'files', b'diff']
2183 2223 else:
2184 2224 if field == b'author':
2185 2225 field = b'user'
2186 2226 fields.append(field)
2187 2227 fields = set(fields)
2188 2228 if b'summary' in fields and b'description' in fields:
2189 2229 # If a revision matches its description it also matches its summary
2190 2230 fields.discard(b'summary')
2191 2231
2192 2232 # We may want to match more than one field
2193 2233 # Not all fields take the same amount of time to be matched
2194 2234 # Sort the selected fields in order of increasing matching cost
2195 2235 fieldorder = [
2196 2236 b'phase',
2197 2237 b'parents',
2198 2238 b'user',
2199 2239 b'date',
2200 2240 b'branch',
2201 2241 b'summary',
2202 2242 b'files',
2203 2243 b'description',
2204 2244 b'substate',
2205 2245 b'diff',
2206 2246 ]
2207 2247
2208 2248 def fieldkeyfunc(f):
2209 2249 try:
2210 2250 return fieldorder.index(f)
2211 2251 except ValueError:
2212 2252 # assume an unknown field is very costly
2213 2253 return len(fieldorder)
2214 2254
2215 2255 fields = list(fields)
2216 2256 fields.sort(key=fieldkeyfunc)
2217 2257
2218 2258 # Each field will be matched with its own "getfield" function
2219 2259 # which will be added to the getfieldfuncs array of functions
2220 2260 getfieldfuncs = []
2221 2261 _funcs = {
2222 2262 b'user': lambda r: repo[r].user(),
2223 2263 b'branch': lambda r: repo[r].branch(),
2224 2264 b'date': lambda r: repo[r].date(),
2225 2265 b'description': lambda r: repo[r].description(),
2226 2266 b'files': lambda r: repo[r].files(),
2227 2267 b'parents': lambda r: repo[r].parents(),
2228 2268 b'phase': lambda r: repo[r].phase(),
2229 2269 b'substate': lambda r: repo[r].substate,
2230 2270 b'summary': lambda r: repo[r].description().splitlines()[0],
2231 2271 b'diff': lambda r: list(
2232 2272 repo[r].diff(opts=diffutil.diffallopts(repo.ui, {b'git': True}))
2233 2273 ),
2234 2274 }
2235 2275 for info in fields:
2236 2276 getfield = _funcs.get(info, None)
2237 2277 if getfield is None:
2238 2278 raise error.ParseError(
2239 2279 # i18n: "matching" is a keyword
2240 2280 _(b"unexpected field name passed to matching: %s")
2241 2281 % info
2242 2282 )
2243 2283 getfieldfuncs.append(getfield)
2244 2284 # convert the getfield array of functions into a "getinfo" function
2245 2285 # which returns an array of field values (or a single value if there
2246 2286 # is only one field to match)
2247 2287 getinfo = lambda r: [f(r) for f in getfieldfuncs]
2248 2288
2249 2289 def matches(x):
2250 2290 for rev in revs:
2251 2291 target = getinfo(rev)
2252 2292 match = True
2253 2293 for n, f in enumerate(getfieldfuncs):
2254 2294 if target[n] != f(x):
2255 2295 match = False
2256 2296 if match:
2257 2297 return True
2258 2298 return False
2259 2299
2260 2300 return subset.filter(matches, condrepr=(b'<matching%r %r>', fields, revs))
2261 2301
2262 2302
2263 2303 @predicate(b'reverse(set)', safe=True, takeorder=True, weight=0)
2264 2304 def reverse(repo, subset, x, order):
2265 2305 """Reverse order of set.
2266 2306 """
2267 2307 l = getset(repo, subset, x, order)
2268 2308 if order == defineorder:
2269 2309 l.reverse()
2270 2310 return l
2271 2311
2272 2312
2273 2313 @predicate(b'roots(set)', safe=True)
2274 2314 def roots(repo, subset, x):
2275 2315 """Changesets in set with no parent changeset in set.
2276 2316 """
2277 2317 s = getset(repo, fullreposet(repo), x)
2278 2318 parents = repo.changelog.parentrevs
2279 2319
2280 2320 def filter(r):
2281 2321 for p in parents(r):
2282 2322 if 0 <= p and p in s:
2283 2323 return False
2284 2324 return True
2285 2325
2286 2326 return subset & s.filter(filter, condrepr=b'<roots>')
2287 2327
2288 2328
2289 2329 _sortkeyfuncs = {
2290 2330 b'rev': scmutil.intrev,
2291 2331 b'branch': lambda c: c.branch(),
2292 2332 b'desc': lambda c: c.description(),
2293 2333 b'user': lambda c: c.user(),
2294 2334 b'author': lambda c: c.user(),
2295 2335 b'date': lambda c: c.date()[0],
2296 2336 b'node': scmutil.binnode,
2297 2337 }
2298 2338
2299 2339
2300 2340 def _getsortargs(x):
2301 2341 """Parse sort options into (set, [(key, reverse)], opts)"""
2302 2342 args = getargsdict(x, b'sort', b'set keys topo.firstbranch')
2303 2343 if b'set' not in args:
2304 2344 # i18n: "sort" is a keyword
2305 2345 raise error.ParseError(_(b'sort requires one or two arguments'))
2306 2346 keys = b"rev"
2307 2347 if b'keys' in args:
2308 2348 # i18n: "sort" is a keyword
2309 2349 keys = getstring(args[b'keys'], _(b"sort spec must be a string"))
2310 2350
2311 2351 keyflags = []
2312 2352 for k in keys.split():
2313 2353 fk = k
2314 2354 reverse = k.startswith(b'-')
2315 2355 if reverse:
2316 2356 k = k[1:]
2317 2357 if k not in _sortkeyfuncs and k != b'topo':
2318 2358 raise error.ParseError(
2319 2359 _(b"unknown sort key %r") % pycompat.bytestr(fk)
2320 2360 )
2321 2361 keyflags.append((k, reverse))
2322 2362
2323 2363 if len(keyflags) > 1 and any(k == b'topo' for k, reverse in keyflags):
2324 2364 # i18n: "topo" is a keyword
2325 2365 raise error.ParseError(
2326 2366 _(b'topo sort order cannot be combined with other sort keys')
2327 2367 )
2328 2368
2329 2369 opts = {}
2330 2370 if b'topo.firstbranch' in args:
2331 2371 if any(k == b'topo' for k, reverse in keyflags):
2332 2372 opts[b'topo.firstbranch'] = args[b'topo.firstbranch']
2333 2373 else:
2334 2374 # i18n: "topo" and "topo.firstbranch" are keywords
2335 2375 raise error.ParseError(
2336 2376 _(
2337 2377 b'topo.firstbranch can only be used '
2338 2378 b'when using the topo sort key'
2339 2379 )
2340 2380 )
2341 2381
2342 2382 return args[b'set'], keyflags, opts
2343 2383
2344 2384
2345 2385 @predicate(
2346 2386 b'sort(set[, [-]key... [, ...]])', safe=True, takeorder=True, weight=10
2347 2387 )
2348 2388 def sort(repo, subset, x, order):
2349 2389 """Sort set by keys. The default sort order is ascending, specify a key
2350 2390 as ``-key`` to sort in descending order.
2351 2391
2352 2392 The keys can be:
2353 2393
2354 2394 - ``rev`` for the revision number,
2355 2395 - ``branch`` for the branch name,
2356 2396 - ``desc`` for the commit message (description),
2357 2397 - ``user`` for user name (``author`` can be used as an alias),
2358 2398 - ``date`` for the commit date
2359 2399 - ``topo`` for a reverse topographical sort
2360 2400 - ``node`` the nodeid of the revision
2361 2401
2362 2402 The ``topo`` sort order cannot be combined with other sort keys. This sort
2363 2403 takes one optional argument, ``topo.firstbranch``, which takes a revset that
2364 2404 specifies what topographical branches to prioritize in the sort.
2365 2405
2366 2406 """
2367 2407 s, keyflags, opts = _getsortargs(x)
2368 2408 revs = getset(repo, subset, s, order)
2369 2409
2370 2410 if not keyflags or order != defineorder:
2371 2411 return revs
2372 2412 if len(keyflags) == 1 and keyflags[0][0] == b"rev":
2373 2413 revs.sort(reverse=keyflags[0][1])
2374 2414 return revs
2375 2415 elif keyflags[0][0] == b"topo":
2376 2416 firstbranch = ()
2377 2417 if b'topo.firstbranch' in opts:
2378 2418 firstbranch = getset(repo, subset, opts[b'topo.firstbranch'])
2379 2419 revs = baseset(
2380 2420 dagop.toposort(revs, repo.changelog.parentrevs, firstbranch),
2381 2421 istopo=True,
2382 2422 )
2383 2423 if keyflags[0][1]:
2384 2424 revs.reverse()
2385 2425 return revs
2386 2426
2387 2427 # sort() is guaranteed to be stable
2388 2428 ctxs = [repo[r] for r in revs]
2389 2429 for k, reverse in reversed(keyflags):
2390 2430 ctxs.sort(key=_sortkeyfuncs[k], reverse=reverse)
2391 2431 return baseset([c.rev() for c in ctxs])
2392 2432
2393 2433
2394 2434 @predicate(b'subrepo([pattern])')
2395 2435 def subrepo(repo, subset, x):
2396 2436 """Changesets that add, modify or remove the given subrepo. If no subrepo
2397 2437 pattern is named, any subrepo changes are returned.
2398 2438 """
2399 2439 # i18n: "subrepo" is a keyword
2400 2440 args = getargs(x, 0, 1, _(b'subrepo takes at most one argument'))
2401 2441 pat = None
2402 2442 if len(args) != 0:
2403 2443 pat = getstring(args[0], _(b"subrepo requires a pattern"))
2404 2444
2405 2445 m = matchmod.exact([b'.hgsubstate'])
2406 2446
2407 2447 def submatches(names):
2408 2448 k, p, m = stringutil.stringmatcher(pat)
2409 2449 for name in names:
2410 2450 if m(name):
2411 2451 yield name
2412 2452
2413 2453 def matches(x):
2414 2454 c = repo[x]
2415 2455 s = repo.status(c.p1().node(), c.node(), match=m)
2416 2456
2417 2457 if pat is None:
2418 2458 return s.added or s.modified or s.removed
2419 2459
2420 2460 if s.added:
2421 2461 return any(submatches(c.substate.keys()))
2422 2462
2423 2463 if s.modified:
2424 2464 subs = set(c.p1().substate.keys())
2425 2465 subs.update(c.substate.keys())
2426 2466
2427 2467 for path in submatches(subs):
2428 2468 if c.p1().substate.get(path) != c.substate.get(path):
2429 2469 return True
2430 2470
2431 2471 if s.removed:
2432 2472 return any(submatches(c.p1().substate.keys()))
2433 2473
2434 2474 return False
2435 2475
2436 2476 return subset.filter(matches, condrepr=(b'<subrepo %r>', pat))
2437 2477
2438 2478
2439 2479 def _mapbynodefunc(repo, s, f):
2440 2480 """(repo, smartset, [node] -> [node]) -> smartset
2441 2481
2442 2482 Helper method to map a smartset to another smartset given a function only
2443 2483 talking about nodes. Handles converting between rev numbers and nodes, and
2444 2484 filtering.
2445 2485 """
2446 2486 cl = repo.unfiltered().changelog
2447 2487 torev = cl.index.get_rev
2448 2488 tonode = cl.node
2449 2489 result = {torev(n) for n in f(tonode(r) for r in s)}
2450 2490 result.discard(None)
2451 2491 return smartset.baseset(result - repo.changelog.filteredrevs)
2452 2492
2453 2493
2454 2494 @predicate(b'successors(set)', safe=True)
2455 2495 def successors(repo, subset, x):
2456 2496 """All successors for set, including the given set themselves.
2457 2497 (EXPERIMENTAL)"""
2458 2498 s = getset(repo, fullreposet(repo), x)
2459 2499 f = lambda nodes: obsutil.allsuccessors(repo.obsstore, nodes)
2460 2500 d = _mapbynodefunc(repo, s, f)
2461 2501 return subset & d
2462 2502
2463 2503
2464 2504 def _substringmatcher(pattern, casesensitive=True):
2465 2505 kind, pattern, matcher = stringutil.stringmatcher(
2466 2506 pattern, casesensitive=casesensitive
2467 2507 )
2468 2508 if kind == b'literal':
2469 2509 if not casesensitive:
2470 2510 pattern = encoding.lower(pattern)
2471 2511 matcher = lambda s: pattern in encoding.lower(s)
2472 2512 else:
2473 2513 matcher = lambda s: pattern in s
2474 2514 return kind, pattern, matcher
2475 2515
2476 2516
2477 2517 @predicate(b'tag([name])', safe=True)
2478 2518 def tag(repo, subset, x):
2479 2519 """The specified tag by name, or all tagged revisions if no name is given.
2480 2520
2481 2521 Pattern matching is supported for `name`. See
2482 2522 :hg:`help revisions.patterns`.
2483 2523 """
2484 2524 # i18n: "tag" is a keyword
2485 2525 args = getargs(x, 0, 1, _(b"tag takes one or no arguments"))
2486 2526 cl = repo.changelog
2487 2527 if args:
2488 2528 pattern = getstring(
2489 2529 args[0],
2490 2530 # i18n: "tag" is a keyword
2491 2531 _(b'the argument to tag must be a string'),
2492 2532 )
2493 2533 kind, pattern, matcher = stringutil.stringmatcher(pattern)
2494 2534 if kind == b'literal':
2495 2535 # avoid resolving all tags
2496 2536 tn = repo._tagscache.tags.get(pattern, None)
2497 2537 if tn is None:
2498 2538 raise error.RepoLookupError(
2499 2539 _(b"tag '%s' does not exist") % pattern
2500 2540 )
2501 2541 s = {repo[tn].rev()}
2502 2542 else:
2503 2543 s = {cl.rev(n) for t, n in repo.tagslist() if matcher(t)}
2504 2544 else:
2505 2545 s = {cl.rev(n) for t, n in repo.tagslist() if t != b'tip'}
2506 2546 return subset & s
2507 2547
2508 2548
2509 2549 @predicate(b'tagged', safe=True)
2510 2550 def tagged(repo, subset, x):
2511 2551 return tag(repo, subset, x)
2512 2552
2513 2553
2514 2554 @predicate(b'orphan()', safe=True)
2515 2555 def orphan(repo, subset, x):
2516 2556 """Non-obsolete changesets with obsolete ancestors. (EXPERIMENTAL)
2517 2557 """
2518 2558 # i18n: "orphan" is a keyword
2519 2559 getargs(x, 0, 0, _(b"orphan takes no arguments"))
2520 2560 orphan = obsmod.getrevs(repo, b'orphan')
2521 2561 return subset & orphan
2522 2562
2523 2563
2524 2564 @predicate(b'unstable()', safe=True)
2525 2565 def unstable(repo, subset, x):
2526 2566 """Changesets with instabilities. (EXPERIMENTAL)
2527 2567 """
2528 2568 # i18n: "unstable" is a keyword
2529 2569 getargs(x, 0, 0, b'unstable takes no arguments')
2530 2570 _unstable = set()
2531 2571 _unstable.update(obsmod.getrevs(repo, b'orphan'))
2532 2572 _unstable.update(obsmod.getrevs(repo, b'phasedivergent'))
2533 2573 _unstable.update(obsmod.getrevs(repo, b'contentdivergent'))
2534 2574 return subset & baseset(_unstable)
2535 2575
2536 2576
2537 2577 @predicate(b'user(string)', safe=True, weight=10)
2538 2578 def user(repo, subset, x):
2539 2579 """User name contains string. The match is case-insensitive.
2540 2580
2541 2581 Pattern matching is supported for `string`. See
2542 2582 :hg:`help revisions.patterns`.
2543 2583 """
2544 2584 return author(repo, subset, x)
2545 2585
2546 2586
2547 2587 @predicate(b'wdir()', safe=True, weight=0)
2548 2588 def wdir(repo, subset, x):
2549 2589 """Working directory. (EXPERIMENTAL)"""
2550 2590 # i18n: "wdir" is a keyword
2551 2591 getargs(x, 0, 0, _(b"wdir takes no arguments"))
2552 2592 if node.wdirrev in subset or isinstance(subset, fullreposet):
2553 2593 return baseset([node.wdirrev])
2554 2594 return baseset()
2555 2595
2556 2596
2557 2597 def _orderedlist(repo, subset, x):
2558 2598 s = getstring(x, b"internal error")
2559 2599 if not s:
2560 2600 return baseset()
2561 2601 # remove duplicates here. it's difficult for caller to deduplicate sets
2562 2602 # because different symbols can point to the same rev.
2563 2603 cl = repo.changelog
2564 2604 ls = []
2565 2605 seen = set()
2566 2606 for t in s.split(b'\0'):
2567 2607 try:
2568 2608 # fast path for integer revision
2569 2609 r = int(t)
2570 2610 if (b'%d' % r) != t or r not in cl:
2571 2611 raise ValueError
2572 2612 revs = [r]
2573 2613 except ValueError:
2574 2614 revs = stringset(repo, subset, t, defineorder)
2575 2615
2576 2616 for r in revs:
2577 2617 if r in seen:
2578 2618 continue
2579 2619 if (
2580 2620 r in subset
2581 2621 or r in _virtualrevs
2582 2622 and isinstance(subset, fullreposet)
2583 2623 ):
2584 2624 ls.append(r)
2585 2625 seen.add(r)
2586 2626 return baseset(ls)
2587 2627
2588 2628
2589 2629 # for internal use
2590 2630 @predicate(b'_list', safe=True, takeorder=True)
2591 2631 def _list(repo, subset, x, order):
2592 2632 if order == followorder:
2593 2633 # slow path to take the subset order
2594 2634 return subset & _orderedlist(repo, fullreposet(repo), x)
2595 2635 else:
2596 2636 return _orderedlist(repo, subset, x)
2597 2637
2598 2638
2599 2639 def _orderedintlist(repo, subset, x):
2600 2640 s = getstring(x, b"internal error")
2601 2641 if not s:
2602 2642 return baseset()
2603 2643 ls = [int(r) for r in s.split(b'\0')]
2604 2644 s = subset
2605 2645 return baseset([r for r in ls if r in s])
2606 2646
2607 2647
2608 2648 # for internal use
2609 2649 @predicate(b'_intlist', safe=True, takeorder=True, weight=0)
2610 2650 def _intlist(repo, subset, x, order):
2611 2651 if order == followorder:
2612 2652 # slow path to take the subset order
2613 2653 return subset & _orderedintlist(repo, fullreposet(repo), x)
2614 2654 else:
2615 2655 return _orderedintlist(repo, subset, x)
2616 2656
2617 2657
2618 2658 def _orderedhexlist(repo, subset, x):
2619 2659 s = getstring(x, b"internal error")
2620 2660 if not s:
2621 2661 return baseset()
2622 2662 cl = repo.changelog
2623 2663 ls = [cl.rev(node.bin(r)) for r in s.split(b'\0')]
2624 2664 s = subset
2625 2665 return baseset([r for r in ls if r in s])
2626 2666
2627 2667
2628 2668 # for internal use
2629 2669 @predicate(b'_hexlist', safe=True, takeorder=True)
2630 2670 def _hexlist(repo, subset, x, order):
2631 2671 if order == followorder:
2632 2672 # slow path to take the subset order
2633 2673 return subset & _orderedhexlist(repo, fullreposet(repo), x)
2634 2674 else:
2635 2675 return _orderedhexlist(repo, subset, x)
2636 2676
2637 2677
2638 2678 methods = {
2639 2679 b"range": rangeset,
2640 2680 b"rangeall": rangeall,
2641 2681 b"rangepre": rangepre,
2642 2682 b"rangepost": rangepost,
2643 2683 b"dagrange": dagrange,
2644 2684 b"string": stringset,
2645 2685 b"symbol": stringset,
2646 2686 b"and": andset,
2647 2687 b"andsmally": andsmallyset,
2648 2688 b"or": orset,
2649 2689 b"not": notset,
2650 2690 b"difference": differenceset,
2651 2691 b"relation": relationset,
2652 2692 b"relsubscript": relsubscriptset,
2653 2693 b"subscript": subscriptset,
2654 2694 b"list": listset,
2655 2695 b"keyvalue": keyvaluepair,
2656 2696 b"func": func,
2657 2697 b"ancestor": ancestorspec,
2658 2698 b"parent": parentspec,
2659 2699 b"parentpost": parentpost,
2660 2700 b"smartset": rawsmartset,
2661 2701 }
2662 2702
2663 2703 relations = {
2664 2704 b"g": generationsrel,
2665 2705 b"generations": generationsrel,
2666 2706 }
2667 2707
2668 2708 subscriptrelations = {
2669 2709 b"g": generationssubrel,
2670 2710 b"generations": generationssubrel,
2671 2711 }
2672 2712
2673 2713
2674 2714 def lookupfn(repo):
2675 2715 return lambda symbol: scmutil.isrevsymbol(repo, symbol)
2676 2716
2677 2717
2678 2718 def match(ui, spec, lookup=None):
2679 2719 """Create a matcher for a single revision spec"""
2680 2720 return matchany(ui, [spec], lookup=lookup)
2681 2721
2682 2722
2683 2723 def matchany(ui, specs, lookup=None, localalias=None):
2684 2724 """Create a matcher that will include any revisions matching one of the
2685 2725 given specs
2686 2726
2687 2727 If lookup function is not None, the parser will first attempt to handle
2688 2728 old-style ranges, which may contain operator characters.
2689 2729
2690 2730 If localalias is not None, it is a dict {name: definitionstring}. It takes
2691 2731 precedence over [revsetalias] config section.
2692 2732 """
2693 2733 if not specs:
2694 2734
2695 2735 def mfunc(repo, subset=None):
2696 2736 return baseset()
2697 2737
2698 2738 return mfunc
2699 2739 if not all(specs):
2700 2740 raise error.ParseError(_(b"empty query"))
2701 2741 if len(specs) == 1:
2702 2742 tree = revsetlang.parse(specs[0], lookup)
2703 2743 else:
2704 2744 tree = (
2705 2745 b'or',
2706 2746 (b'list',) + tuple(revsetlang.parse(s, lookup) for s in specs),
2707 2747 )
2708 2748
2709 2749 aliases = []
2710 2750 warn = None
2711 2751 if ui:
2712 2752 aliases.extend(ui.configitems(b'revsetalias'))
2713 2753 warn = ui.warn
2714 2754 if localalias:
2715 2755 aliases.extend(localalias.items())
2716 2756 if aliases:
2717 2757 tree = revsetlang.expandaliases(tree, aliases, warn=warn)
2718 2758 tree = revsetlang.foldconcat(tree)
2719 2759 tree = revsetlang.analyze(tree)
2720 2760 tree = revsetlang.optimize(tree)
2721 2761 return makematcher(tree)
2722 2762
2723 2763
2724 2764 def makematcher(tree):
2725 2765 """Create a matcher from an evaluatable tree"""
2726 2766
2727 2767 def mfunc(repo, subset=None, order=None):
2728 2768 if order is None:
2729 2769 if subset is None:
2730 2770 order = defineorder # 'x'
2731 2771 else:
2732 2772 order = followorder # 'subset & x'
2733 2773 if subset is None:
2734 2774 subset = fullreposet(repo)
2735 2775 return getset(repo, subset, tree, order)
2736 2776
2737 2777 return mfunc
2738 2778
2739 2779
2740 2780 def loadpredicate(ui, extname, registrarobj):
2741 2781 """Load revset predicates from specified registrarobj
2742 2782 """
2743 2783 for name, func in pycompat.iteritems(registrarobj._table):
2744 2784 symbols[name] = func
2745 2785 if func._safe:
2746 2786 safesymbols.add(name)
2747 2787
2748 2788
2749 2789 # load built-in predicates explicitly to setup safesymbols
2750 2790 loadpredicate(None, None, predicate)
2751 2791
2752 2792 # tell hggettext to extract docstrings from these functions:
2753 2793 i18nfunctions = symbols.values()
@@ -1,1440 +1,1464 b''
1 1 $ hg init t
2 2 $ cd t
3 3 $ echo import > port
4 4 $ hg add port
5 5 $ hg commit -m 0 -u spam -d '0 0'
6 6 $ echo export >> port
7 7 $ hg commit -m 1 -u eggs -d '1 0'
8 8 $ echo export > port
9 9 $ echo vaportight >> port
10 10 $ echo 'import/export' >> port
11 11 $ hg commit -m 2 -u spam -d '2 0'
12 12 $ echo 'import/export' >> port
13 13 $ hg commit -m 3 -u eggs -d '3 0'
14 14 $ head -n 3 port > port1
15 15 $ mv port1 port
16 16 $ hg commit -m 4 -u spam -d '4 0'
17 17
18 18 pattern error
19 19
20 20 $ hg grep '**test**'
21 21 grep: invalid match pattern: nothing to repeat* (glob)
22 22 [1]
23 23
24 invalid revset syntax
25
26 $ hg log -r 'diff()'
27 hg: parse error: diff takes at least 1 argument
28 [255]
29 $ hg log -r 'diff(:)'
30 hg: parse error: diff requires a string pattern
31 [255]
32 $ hg log -r 'diff("re:**test**")'
33 hg: parse error: invalid regular expression: nothing to repeat* (glob)
34 [255]
35
24 36 simple
25 37
26 38 $ hg grep -r tip:0 '.*'
27 39 port:4:export
28 40 port:4:vaportight
29 41 port:4:import/export
30 42 port:3:export
31 43 port:3:vaportight
32 44 port:3:import/export
33 45 port:3:import/export
34 46 port:2:export
35 47 port:2:vaportight
36 48 port:2:import/export
37 49 port:1:import
38 50 port:1:export
39 51 port:0:import
40 52 $ hg grep -r tip:0 port port
41 53 port:4:export
42 54 port:4:vaportight
43 55 port:4:import/export
44 56 port:3:export
45 57 port:3:vaportight
46 58 port:3:import/export
47 59 port:3:import/export
48 60 port:2:export
49 61 port:2:vaportight
50 62 port:2:import/export
51 63 port:1:import
52 64 port:1:export
53 65 port:0:import
54 66
55 67 simple from subdirectory
56 68
57 69 $ mkdir dir
58 70 $ cd dir
59 71 $ hg grep -r tip:0 port
60 72 port:4:export
61 73 port:4:vaportight
62 74 port:4:import/export
63 75 port:3:export
64 76 port:3:vaportight
65 77 port:3:import/export
66 78 port:3:import/export
67 79 port:2:export
68 80 port:2:vaportight
69 81 port:2:import/export
70 82 port:1:import
71 83 port:1:export
72 84 port:0:import
73 85 $ hg grep -r tip:0 port --config ui.relative-paths=yes
74 86 ../port:4:export
75 87 ../port:4:vaportight
76 88 ../port:4:import/export
77 89 ../port:3:export
78 90 ../port:3:vaportight
79 91 ../port:3:import/export
80 92 ../port:3:import/export
81 93 ../port:2:export
82 94 ../port:2:vaportight
83 95 ../port:2:import/export
84 96 ../port:1:import
85 97 ../port:1:export
86 98 ../port:0:import
87 99 $ cd ..
88 100
89 101 simple with color
90 102
91 103 $ hg --config extensions.color= grep --config color.mode=ansi \
92 104 > --color=always port port -r tip:0
93 105 \x1b[0;35mport\x1b[0m\x1b[0;36m:\x1b[0m\x1b[0;34m4\x1b[0m\x1b[0;36m:\x1b[0mex\x1b[0;31;1mport\x1b[0m (esc)
94 106 \x1b[0;35mport\x1b[0m\x1b[0;36m:\x1b[0m\x1b[0;34m4\x1b[0m\x1b[0;36m:\x1b[0mva\x1b[0;31;1mport\x1b[0might (esc)
95 107 \x1b[0;35mport\x1b[0m\x1b[0;36m:\x1b[0m\x1b[0;34m4\x1b[0m\x1b[0;36m:\x1b[0mim\x1b[0;31;1mport\x1b[0m/ex\x1b[0;31;1mport\x1b[0m (esc)
96 108 \x1b[0;35mport\x1b[0m\x1b[0;36m:\x1b[0m\x1b[0;34m3\x1b[0m\x1b[0;36m:\x1b[0mex\x1b[0;31;1mport\x1b[0m (esc)
97 109 \x1b[0;35mport\x1b[0m\x1b[0;36m:\x1b[0m\x1b[0;34m3\x1b[0m\x1b[0;36m:\x1b[0mva\x1b[0;31;1mport\x1b[0might (esc)
98 110 \x1b[0;35mport\x1b[0m\x1b[0;36m:\x1b[0m\x1b[0;34m3\x1b[0m\x1b[0;36m:\x1b[0mim\x1b[0;31;1mport\x1b[0m/ex\x1b[0;31;1mport\x1b[0m (esc)
99 111 \x1b[0;35mport\x1b[0m\x1b[0;36m:\x1b[0m\x1b[0;34m3\x1b[0m\x1b[0;36m:\x1b[0mim\x1b[0;31;1mport\x1b[0m/ex\x1b[0;31;1mport\x1b[0m (esc)
100 112 \x1b[0;35mport\x1b[0m\x1b[0;36m:\x1b[0m\x1b[0;34m2\x1b[0m\x1b[0;36m:\x1b[0mex\x1b[0;31;1mport\x1b[0m (esc)
101 113 \x1b[0;35mport\x1b[0m\x1b[0;36m:\x1b[0m\x1b[0;34m2\x1b[0m\x1b[0;36m:\x1b[0mva\x1b[0;31;1mport\x1b[0might (esc)
102 114 \x1b[0;35mport\x1b[0m\x1b[0;36m:\x1b[0m\x1b[0;34m2\x1b[0m\x1b[0;36m:\x1b[0mim\x1b[0;31;1mport\x1b[0m/ex\x1b[0;31;1mport\x1b[0m (esc)
103 115 \x1b[0;35mport\x1b[0m\x1b[0;36m:\x1b[0m\x1b[0;34m1\x1b[0m\x1b[0;36m:\x1b[0mim\x1b[0;31;1mport\x1b[0m (esc)
104 116 \x1b[0;35mport\x1b[0m\x1b[0;36m:\x1b[0m\x1b[0;34m1\x1b[0m\x1b[0;36m:\x1b[0mex\x1b[0;31;1mport\x1b[0m (esc)
105 117 \x1b[0;35mport\x1b[0m\x1b[0;36m:\x1b[0m\x1b[0;34m0\x1b[0m\x1b[0;36m:\x1b[0mim\x1b[0;31;1mport\x1b[0m (esc)
106 118
107 119 simple templated
108 120
109 121 $ hg grep port -r tip:0 \
110 122 > -T '{path}:{rev}:{node|short}:{texts % "{if(matched, text|upper, text)}"}\n'
111 123 port:4:914fa752cdea:exPORT
112 124 port:4:914fa752cdea:vaPORTight
113 125 port:4:914fa752cdea:imPORT/exPORT
114 126 port:3:95040cfd017d:exPORT
115 127 port:3:95040cfd017d:vaPORTight
116 128 port:3:95040cfd017d:imPORT/exPORT
117 129 port:3:95040cfd017d:imPORT/exPORT
118 130 port:2:3b325e3481a1:exPORT
119 131 port:2:3b325e3481a1:vaPORTight
120 132 port:2:3b325e3481a1:imPORT/exPORT
121 133 port:1:8b20f75c1585:imPORT
122 134 port:1:8b20f75c1585:exPORT
123 135 port:0:f31323c92170:imPORT
124 136
125 137 $ hg grep port -r tip:0 -T '{path}:{rev}:{texts}\n'
126 138 port:4:export
127 139 port:4:vaportight
128 140 port:4:import/export
129 141 port:3:export
130 142 port:3:vaportight
131 143 port:3:import/export
132 144 port:3:import/export
133 145 port:2:export
134 146 port:2:vaportight
135 147 port:2:import/export
136 148 port:1:import
137 149 port:1:export
138 150 port:0:import
139 151
140 152 $ hg grep port -r tip:0 -T '{path}:{tags}:{texts}\n'
141 153 port:tip:export
142 154 port:tip:vaportight
143 155 port:tip:import/export
144 156 port::export
145 157 port::vaportight
146 158 port::import/export
147 159 port::import/export
148 160 port::export
149 161 port::vaportight
150 162 port::import/export
151 163 port::import
152 164 port::export
153 165 port::import
154 166
155 167 simple JSON (no "change" field)
156 168
157 169 $ hg grep -r tip:0 -Tjson port
158 170 [
159 171 {
160 172 "date": [4, 0],
161 173 "lineno": 1,
162 174 "node": "914fa752cdea87777ac1a8d5c858b0c736218f6c",
163 175 "path": "port",
164 176 "rev": 4,
165 177 "texts": [{"matched": false, "text": "ex"}, {"matched": true, "text": "port"}],
166 178 "user": "spam"
167 179 },
168 180 {
169 181 "date": [4, 0],
170 182 "lineno": 2,
171 183 "node": "914fa752cdea87777ac1a8d5c858b0c736218f6c",
172 184 "path": "port",
173 185 "rev": 4,
174 186 "texts": [{"matched": false, "text": "va"}, {"matched": true, "text": "port"}, {"matched": false, "text": "ight"}],
175 187 "user": "spam"
176 188 },
177 189 {
178 190 "date": [4, 0],
179 191 "lineno": 3,
180 192 "node": "914fa752cdea87777ac1a8d5c858b0c736218f6c",
181 193 "path": "port",
182 194 "rev": 4,
183 195 "texts": [{"matched": false, "text": "im"}, {"matched": true, "text": "port"}, {"matched": false, "text": "/ex"}, {"matched": true, "text": "port"}],
184 196 "user": "spam"
185 197 },
186 198 {
187 199 "date": [3, 0],
188 200 "lineno": 1,
189 201 "node": "95040cfd017d658c536071c6290230a613c4c2a6",
190 202 "path": "port",
191 203 "rev": 3,
192 204 "texts": [{"matched": false, "text": "ex"}, {"matched": true, "text": "port"}],
193 205 "user": "eggs"
194 206 },
195 207 {
196 208 "date": [3, 0],
197 209 "lineno": 2,
198 210 "node": "95040cfd017d658c536071c6290230a613c4c2a6",
199 211 "path": "port",
200 212 "rev": 3,
201 213 "texts": [{"matched": false, "text": "va"}, {"matched": true, "text": "port"}, {"matched": false, "text": "ight"}],
202 214 "user": "eggs"
203 215 },
204 216 {
205 217 "date": [3, 0],
206 218 "lineno": 3,
207 219 "node": "95040cfd017d658c536071c6290230a613c4c2a6",
208 220 "path": "port",
209 221 "rev": 3,
210 222 "texts": [{"matched": false, "text": "im"}, {"matched": true, "text": "port"}, {"matched": false, "text": "/ex"}, {"matched": true, "text": "port"}],
211 223 "user": "eggs"
212 224 },
213 225 {
214 226 "date": [3, 0],
215 227 "lineno": 4,
216 228 "node": "95040cfd017d658c536071c6290230a613c4c2a6",
217 229 "path": "port",
218 230 "rev": 3,
219 231 "texts": [{"matched": false, "text": "im"}, {"matched": true, "text": "port"}, {"matched": false, "text": "/ex"}, {"matched": true, "text": "port"}],
220 232 "user": "eggs"
221 233 },
222 234 {
223 235 "date": [2, 0],
224 236 "lineno": 1,
225 237 "node": "3b325e3481a1f07435d81dfdbfa434d9a0245b47",
226 238 "path": "port",
227 239 "rev": 2,
228 240 "texts": [{"matched": false, "text": "ex"}, {"matched": true, "text": "port"}],
229 241 "user": "spam"
230 242 },
231 243 {
232 244 "date": [2, 0],
233 245 "lineno": 2,
234 246 "node": "3b325e3481a1f07435d81dfdbfa434d9a0245b47",
235 247 "path": "port",
236 248 "rev": 2,
237 249 "texts": [{"matched": false, "text": "va"}, {"matched": true, "text": "port"}, {"matched": false, "text": "ight"}],
238 250 "user": "spam"
239 251 },
240 252 {
241 253 "date": [2, 0],
242 254 "lineno": 3,
243 255 "node": "3b325e3481a1f07435d81dfdbfa434d9a0245b47",
244 256 "path": "port",
245 257 "rev": 2,
246 258 "texts": [{"matched": false, "text": "im"}, {"matched": true, "text": "port"}, {"matched": false, "text": "/ex"}, {"matched": true, "text": "port"}],
247 259 "user": "spam"
248 260 },
249 261 {
250 262 "date": [1, 0],
251 263 "lineno": 1,
252 264 "node": "8b20f75c158513ff5ac80bd0e5219bfb6f0eb587",
253 265 "path": "port",
254 266 "rev": 1,
255 267 "texts": [{"matched": false, "text": "im"}, {"matched": true, "text": "port"}],
256 268 "user": "eggs"
257 269 },
258 270 {
259 271 "date": [1, 0],
260 272 "lineno": 2,
261 273 "node": "8b20f75c158513ff5ac80bd0e5219bfb6f0eb587",
262 274 "path": "port",
263 275 "rev": 1,
264 276 "texts": [{"matched": false, "text": "ex"}, {"matched": true, "text": "port"}],
265 277 "user": "eggs"
266 278 },
267 279 {
268 280 "date": [0, 0],
269 281 "lineno": 1,
270 282 "node": "f31323c9217050ba245ee8b537c713ec2e8ab226",
271 283 "path": "port",
272 284 "rev": 0,
273 285 "texts": [{"matched": false, "text": "im"}, {"matched": true, "text": "port"}],
274 286 "user": "spam"
275 287 }
276 288 ]
277 289
278 290 simple JSON without matching lines
279 291
280 292 $ hg grep -r tip:0 -Tjson -l port
281 293 [
282 294 {
283 295 "date": [4, 0],
284 296 "lineno": 1,
285 297 "node": "914fa752cdea87777ac1a8d5c858b0c736218f6c",
286 298 "path": "port",
287 299 "rev": 4,
288 300 "user": "spam"
289 301 },
290 302 {
291 303 "date": [3, 0],
292 304 "lineno": 1,
293 305 "node": "95040cfd017d658c536071c6290230a613c4c2a6",
294 306 "path": "port",
295 307 "rev": 3,
296 308 "user": "eggs"
297 309 },
298 310 {
299 311 "date": [2, 0],
300 312 "lineno": 1,
301 313 "node": "3b325e3481a1f07435d81dfdbfa434d9a0245b47",
302 314 "path": "port",
303 315 "rev": 2,
304 316 "user": "spam"
305 317 },
306 318 {
307 319 "date": [1, 0],
308 320 "lineno": 1,
309 321 "node": "8b20f75c158513ff5ac80bd0e5219bfb6f0eb587",
310 322 "path": "port",
311 323 "rev": 1,
312 324 "user": "eggs"
313 325 },
314 326 {
315 327 "date": [0, 0],
316 328 "lineno": 1,
317 329 "node": "f31323c9217050ba245ee8b537c713ec2e8ab226",
318 330 "path": "port",
319 331 "rev": 0,
320 332 "user": "spam"
321 333 }
322 334 ]
323 335
324 336 diff of each revision for reference
325 337
326 338 $ hg log -p -T'== rev: {rev} ==\n'
327 339 == rev: 4 ==
328 340 diff -r 95040cfd017d -r 914fa752cdea port
329 341 --- a/port Thu Jan 01 00:00:03 1970 +0000
330 342 +++ b/port Thu Jan 01 00:00:04 1970 +0000
331 343 @@ -1,4 +1,3 @@
332 344 export
333 345 vaportight
334 346 import/export
335 347 -import/export
336 348
337 349 == rev: 3 ==
338 350 diff -r 3b325e3481a1 -r 95040cfd017d port
339 351 --- a/port Thu Jan 01 00:00:02 1970 +0000
340 352 +++ b/port Thu Jan 01 00:00:03 1970 +0000
341 353 @@ -1,3 +1,4 @@
342 354 export
343 355 vaportight
344 356 import/export
345 357 +import/export
346 358
347 359 == rev: 2 ==
348 360 diff -r 8b20f75c1585 -r 3b325e3481a1 port
349 361 --- a/port Thu Jan 01 00:00:01 1970 +0000
350 362 +++ b/port Thu Jan 01 00:00:02 1970 +0000
351 363 @@ -1,2 +1,3 @@
352 364 -import
353 365 export
354 366 +vaportight
355 367 +import/export
356 368
357 369 == rev: 1 ==
358 370 diff -r f31323c92170 -r 8b20f75c1585 port
359 371 --- a/port Thu Jan 01 00:00:00 1970 +0000
360 372 +++ b/port Thu Jan 01 00:00:01 1970 +0000
361 373 @@ -1,1 +1,2 @@
362 374 import
363 375 +export
364 376
365 377 == rev: 0 ==
366 378 diff -r 000000000000 -r f31323c92170 port
367 379 --- /dev/null Thu Jan 01 00:00:00 1970 +0000
368 380 +++ b/port Thu Jan 01 00:00:00 1970 +0000
369 381 @@ -0,0 +1,1 @@
370 382 +import
371 383
372 384
373 385 all
374 386
375 387 $ hg grep --traceback --all -nu port port
376 388 port:4:4:-:spam:import/export
377 389 port:3:4:+:eggs:import/export
378 390 port:2:1:-:spam:import
379 391 port:2:2:+:spam:vaportight
380 392 port:2:3:+:spam:import/export
381 393 port:1:2:+:eggs:export
382 394 port:0:1:+:spam:import
383 395
384 396 all JSON
385 397
386 398 $ hg grep --all -Tjson port port
387 399 [
388 400 {
389 401 "change": "-",
390 402 "date": [4, 0],
391 403 "lineno": 4,
392 404 "node": "914fa752cdea87777ac1a8d5c858b0c736218f6c",
393 405 "path": "port",
394 406 "rev": 4,
395 407 "texts": [{"matched": false, "text": "im"}, {"matched": true, "text": "port"}, {"matched": false, "text": "/ex"}, {"matched": true, "text": "port"}],
396 408 "user": "spam"
397 409 },
398 410 {
399 411 "change": "+",
400 412 "date": [3, 0],
401 413 "lineno": 4,
402 414 "node": "95040cfd017d658c536071c6290230a613c4c2a6",
403 415 "path": "port",
404 416 "rev": 3,
405 417 "texts": [{"matched": false, "text": "im"}, {"matched": true, "text": "port"}, {"matched": false, "text": "/ex"}, {"matched": true, "text": "port"}],
406 418 "user": "eggs"
407 419 },
408 420 {
409 421 "change": "-",
410 422 "date": [2, 0],
411 423 "lineno": 1,
412 424 "node": "3b325e3481a1f07435d81dfdbfa434d9a0245b47",
413 425 "path": "port",
414 426 "rev": 2,
415 427 "texts": [{"matched": false, "text": "im"}, {"matched": true, "text": "port"}],
416 428 "user": "spam"
417 429 },
418 430 {
419 431 "change": "+",
420 432 "date": [2, 0],
421 433 "lineno": 2,
422 434 "node": "3b325e3481a1f07435d81dfdbfa434d9a0245b47",
423 435 "path": "port",
424 436 "rev": 2,
425 437 "texts": [{"matched": false, "text": "va"}, {"matched": true, "text": "port"}, {"matched": false, "text": "ight"}],
426 438 "user": "spam"
427 439 },
428 440 {
429 441 "change": "+",
430 442 "date": [2, 0],
431 443 "lineno": 3,
432 444 "node": "3b325e3481a1f07435d81dfdbfa434d9a0245b47",
433 445 "path": "port",
434 446 "rev": 2,
435 447 "texts": [{"matched": false, "text": "im"}, {"matched": true, "text": "port"}, {"matched": false, "text": "/ex"}, {"matched": true, "text": "port"}],
436 448 "user": "spam"
437 449 },
438 450 {
439 451 "change": "+",
440 452 "date": [1, 0],
441 453 "lineno": 2,
442 454 "node": "8b20f75c158513ff5ac80bd0e5219bfb6f0eb587",
443 455 "path": "port",
444 456 "rev": 1,
445 457 "texts": [{"matched": false, "text": "ex"}, {"matched": true, "text": "port"}],
446 458 "user": "eggs"
447 459 },
448 460 {
449 461 "change": "+",
450 462 "date": [0, 0],
451 463 "lineno": 1,
452 464 "node": "f31323c9217050ba245ee8b537c713ec2e8ab226",
453 465 "path": "port",
454 466 "rev": 0,
455 467 "texts": [{"matched": false, "text": "im"}, {"matched": true, "text": "port"}],
456 468 "user": "spam"
457 469 }
458 470 ]
459 471
460 472 other
461 473
462 474 $ hg grep -r tip:0 -l port port
463 475 port:4
464 476 port:3
465 477 port:2
466 478 port:1
467 479 port:0
468 480 $ hg grep -r tip:0 import port
469 481 port:4:import/export
470 482 port:3:import/export
471 483 port:3:import/export
472 484 port:2:import/export
473 485 port:1:import
474 486 port:0:import
475 487
476 488 $ hg cp port port2
477 489 $ hg commit -m 4 -u spam -d '5 0'
478 490
479 491 follow
480 492
481 493 $ hg grep -r tip:0 --traceback -f 'import\n\Z' port2
482 494 [1]
483 495 $ echo deport >> port2
484 496 $ hg commit -m 5 -u eggs -d '6 0'
485 497 $ hg grep -f --all -nu port port2
486 498 port2:6:4:+:eggs:deport
487 499 port:4:4:-:spam:import/export
488 500 port:3:4:+:eggs:import/export
489 501 port:2:1:-:spam:import
490 502 port:2:2:+:spam:vaportight
491 503 port:2:3:+:spam:import/export
492 504 port:1:2:+:eggs:export
493 505 port:0:1:+:spam:import
494 506
495 507 $ hg up -q null
496 508 $ hg grep -r 'reverse(:.)' -f port
497 509 port:0:import
498 510
499 511 Test wdir
500 512 (at least, this shouldn't crash)
501 513
502 514 $ hg up -q
503 515 $ echo wport >> port2
504 516 $ hg stat
505 517 M port2
506 518 $ hg grep -r 'wdir()' port
507 519 port:2147483647:export
508 520 port:2147483647:vaportight
509 521 port:2147483647:import/export
510 522 port2:2147483647:export
511 523 port2:2147483647:vaportight
512 524 port2:2147483647:import/export
513 525 port2:2147483647:deport
514 526 port2:2147483647:wport
515 527
516 528 $ cd ..
517 529 $ hg init t2
518 530 $ cd t2
519 531 $ hg grep -r tip:0 foobar foo
520 532 [1]
521 533 $ hg grep -r tip:0 foobar
522 534 [1]
523 535 $ echo blue >> color
524 536 $ echo black >> color
525 537 $ hg add color
526 538 $ hg ci -m 0
527 539 $ echo orange >> color
528 540 $ hg ci -m 1
529 541 $ echo black > color
530 542 $ hg ci -m 2
531 543 $ echo orange >> color
532 544 $ echo blue >> color
533 545 $ hg ci -m 3
534 546 $ hg grep -r tip:0 orange
535 547 color:3:orange
536 548 color:1:orange
537 549 $ hg grep --all orange
538 550 color:3:+:orange
539 551 color:2:-:orange
540 552 color:1:+:orange
541 553 $ hg grep --diff orange --color=debug
542 554 [grep.filename|color][grep.sep|:][grep.rev|3][grep.sep|:][grep.inserted grep.change|+][grep.sep|:][grep.match|orange]
543 555 [grep.filename|color][grep.sep|:][grep.rev|2][grep.sep|:][grep.deleted grep.change|-][grep.sep|:][grep.match|orange]
544 556 [grep.filename|color][grep.sep|:][grep.rev|1][grep.sep|:][grep.inserted grep.change|+][grep.sep|:][grep.match|orange]
545 557
546 558 $ hg grep --diff orange --color=yes
547 559 \x1b[0;35mcolor\x1b[0m\x1b[0;36m:\x1b[0m\x1b[0;34m3\x1b[0m\x1b[0;36m:\x1b[0m\x1b[0;32;1m+\x1b[0m\x1b[0;36m:\x1b[0m\x1b[0;31;1morange\x1b[0m (esc)
548 560 \x1b[0;35mcolor\x1b[0m\x1b[0;36m:\x1b[0m\x1b[0;34m2\x1b[0m\x1b[0;36m:\x1b[0m\x1b[0;31;1m-\x1b[0m\x1b[0;36m:\x1b[0m\x1b[0;31;1morange\x1b[0m (esc)
549 561 \x1b[0;35mcolor\x1b[0m\x1b[0;36m:\x1b[0m\x1b[0;34m1\x1b[0m\x1b[0;36m:\x1b[0m\x1b[0;32;1m+\x1b[0m\x1b[0;36m:\x1b[0m\x1b[0;31;1morange\x1b[0m (esc)
550 562
551 563 $ hg grep --diff orange
552 564 color:3:+:orange
553 565 color:2:-:orange
554 566 color:1:+:orange
555 567
568 revset predicate for "grep --diff"
569
570 $ hg log -qr 'diff("re:^bl...$")'
571 0:203191eb5e21
572 $ hg log -qr 'diff("orange")'
573 1:7c585a21e0d1
574 2:11bd8bc8d653
575 3:e0116d3829f8
576 $ hg log -qr '2:0 & diff("orange")'
577 2:11bd8bc8d653
578 1:7c585a21e0d1
579
556 580 test substring match: '^' should only match at the beginning
557 581
558 582 $ hg grep -r tip:0 '^.' --config extensions.color= --color debug
559 583 [grep.filename|color][grep.sep|:][grep.rev|3][grep.sep|:][grep.match|b]lack
560 584 [grep.filename|color][grep.sep|:][grep.rev|3][grep.sep|:][grep.match|o]range
561 585 [grep.filename|color][grep.sep|:][grep.rev|3][grep.sep|:][grep.match|b]lue
562 586 [grep.filename|color][grep.sep|:][grep.rev|2][grep.sep|:][grep.match|b]lack
563 587 [grep.filename|color][grep.sep|:][grep.rev|1][grep.sep|:][grep.match|b]lue
564 588 [grep.filename|color][grep.sep|:][grep.rev|1][grep.sep|:][grep.match|b]lack
565 589 [grep.filename|color][grep.sep|:][grep.rev|1][grep.sep|:][grep.match|o]range
566 590 [grep.filename|color][grep.sep|:][grep.rev|0][grep.sep|:][grep.match|b]lue
567 591 [grep.filename|color][grep.sep|:][grep.rev|0][grep.sep|:][grep.match|b]lack
568 592
569 593 match in last "line" without newline
570 594
571 595 $ "$PYTHON" -c 'fp = open("noeol", "wb"); fp.write(b"no infinite loop"); fp.close();'
572 596 $ hg ci -Amnoeol
573 597 adding noeol
574 598 $ hg grep -r tip:0 loop
575 599 noeol:4:no infinite loop
576 600
577 601 $ cd ..
578 602
579 603 Issue685: traceback in grep -r after rename
580 604
581 605 Got a traceback when using grep on a single
582 606 revision with renamed files.
583 607
584 608 $ hg init issue685
585 609 $ cd issue685
586 610 $ echo octarine > color
587 611 $ hg ci -Amcolor
588 612 adding color
589 613 $ hg rename color colour
590 614 $ hg ci -Am rename
591 615 $ hg grep -r tip:0 octarine
592 616 colour:1:octarine
593 617 color:0:octarine
594 618
595 619 Used to crash here
596 620
597 621 $ hg grep -r 1 octarine
598 622 colour:1:octarine
599 623 $ cd ..
600 624
601 625
602 626 Issue337: test that grep follows parent-child relationships instead
603 627 of just using revision numbers.
604 628
605 629 $ hg init issue337
606 630 $ cd issue337
607 631
608 632 $ echo white > color
609 633 $ hg commit -A -m "0 white"
610 634 adding color
611 635
612 636 $ echo red > color
613 637 $ hg commit -A -m "1 red"
614 638
615 639 $ hg update 0
616 640 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
617 641 $ echo black > color
618 642 $ hg commit -A -m "2 black"
619 643 created new head
620 644
621 645 $ hg update --clean 1
622 646 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
623 647 $ echo blue > color
624 648 $ hg commit -A -m "3 blue"
625 649
626 650 $ hg grep --all red
627 651 color:3:-:red
628 652 color:1:+:red
629 653
630 654 $ hg grep --diff red
631 655 color:3:-:red
632 656 color:1:+:red
633 657
634 658 Issue3885: test that changing revision order does not alter the
635 659 revisions printed, just their order.
636 660
637 661 $ hg grep --all red -r "all()"
638 662 color:1:+:red
639 663 color:3:-:red
640 664
641 665 $ hg grep --all red -r "reverse(all())"
642 666 color:3:-:red
643 667 color:1:+:red
644 668
645 669 $ hg grep --diff red -r "all()"
646 670 color:1:+:red
647 671 color:3:-:red
648 672
649 673 $ hg grep --diff red -r "reverse(all())"
650 674 color:3:-:red
651 675 color:1:+:red
652 676
653 677 $ cd ..
654 678
655 679 $ hg init a
656 680 $ cd a
657 681 $ cp "$TESTDIR/binfile.bin" .
658 682 $ hg add binfile.bin
659 683 $ hg ci -m 'add binfile.bin'
660 684 $ hg grep "MaCam" --all
661 685 binfile.bin:0:+: Binary file matches
662 686
663 687 $ hg grep "MaCam" --diff
664 688 binfile.bin:0:+: Binary file matches
665 689
666 690 $ cd ..
667 691
668 692 Moved line may not be collected by "grep --diff" since it first filters
669 693 the contents to be diffed by the pattern. (i.e.
670 694 "diff <(grep pat a) <(grep pat b)", not "diff a b | grep pat".)
671 695 This is much faster than generating full diff per revision.
672 696
673 697 $ hg init moved-line
674 698 $ cd moved-line
675 699 $ cat <<'EOF' > a
676 700 > foo
677 701 > bar
678 702 > baz
679 703 > EOF
680 704 $ hg ci -Am initial
681 705 adding a
682 706 $ cat <<'EOF' > a
683 707 > bar
684 708 > baz
685 709 > foo
686 710 > EOF
687 711 $ hg ci -m reorder
688 712
689 713 $ hg diff -c 1
690 714 diff -r a593cc55e81b -r 69789a3b6e80 a
691 715 --- a/a Thu Jan 01 00:00:00 1970 +0000
692 716 +++ b/a Thu Jan 01 00:00:00 1970 +0000
693 717 @@ -1,3 +1,3 @@
694 718 -foo
695 719 bar
696 720 baz
697 721 +foo
698 722
699 723 can't find the move of "foo" at the revision 1:
700 724
701 725 $ hg grep --diff foo -r1
702 726 [1]
703 727
704 728 "bar" isn't moved at the revisoin 1:
705 729
706 730 $ hg grep --diff bar -r1
707 731 [1]
708 732
709 733 $ cd ..
710 734
711 735 Test for showing working of allfiles flag
712 736
713 737 $ hg init sng
714 738 $ cd sng
715 739 $ echo "unmod" >> um
716 740 $ echo old > old
717 741 $ hg ci -q -A -m "adds unmod to um"
718 742 $ echo "something else" >> new
719 743 $ hg ci -A -m "second commit"
720 744 adding new
721 745 $ hg grep -r "." "unmod"
722 746 um:1:unmod
723 747
724 748 Existing tracked files in the working directory are searched by default
725 749
726 750 $ echo modified >> new
727 751 $ echo 'added' > added; hg add added
728 752 $ echo 'added, missing' > added-missing; hg add added-missing; rm added-missing
729 753 $ echo 'untracked' > untracked
730 754 $ hg rm old
731 755 $ hg grep ''
732 756 added:added
733 757 new:something else
734 758 new:modified
735 759 um:unmod
736 760
737 761 #if symlink
738 762 Grepping a symlink greps its destination
739 763
740 764 $ rm -f added; ln -s symlink-added added
741 765 $ hg grep '' | grep added
742 766 added:symlink-added
743 767
744 768 But we reject symlinks as directories components of a tracked file as
745 769 usual:
746 770
747 771 $ mkdir dir; touch dir/f; hg add dir/f
748 772 $ rm -rf dir; ln -s / dir
749 773 $ hg grep ''
750 774 abort: path 'dir/f' traverses symbolic link 'dir'
751 775 [255]
752 776 #endif
753 777
754 778 But we can search files from some other revision with -rREV
755 779
756 780 $ hg grep -r. mod
757 781 um:1:unmod
758 782
759 783 $ hg grep --diff mod
760 784 um:0:+:unmod
761 785
762 786 $ cd ..
763 787
764 788 Change Default of grep by ui.tweakdefaults, that is, the files not in current
765 789 working directory should not be grepp-ed on
766 790
767 791 $ hg init ab
768 792 $ cd ab
769 793 $ cat <<'EOF' >> .hg/hgrc
770 794 > [ui]
771 795 > tweakdefaults = True
772 796 > EOF
773 797 $ echo "some text">>file1
774 798 $ hg add file1
775 799 $ hg commit -m "adds file1"
776 800 $ hg mv file1 file2
777 801
778 802 wdir revision is hidden by default:
779 803
780 804 $ hg grep "some"
781 805 file2:some text
782 806
783 807 but it should be available in template dict:
784 808
785 809 $ hg grep "some" -Tjson
786 810 [
787 811 {
788 812 "date": [0, 0],
789 813 "lineno": 1,
790 814 "node": "ffffffffffffffffffffffffffffffffffffffff",
791 815 "path": "file2",
792 816 "rev": 2147483647,
793 817 "texts": [{"matched": true, "text": "some"}, {"matched": false, "text": " text"}],
794 818 "user": "test"
795 819 }
796 820 ]
797 821
798 822 $ cd ..
799 823
800 824 test -rMULTIREV
801 825
802 826 $ cd sng
803 827 $ hg rm um
804 828 $ hg commit -m "deletes um"
805 829 $ hg grep -r "0:2" "unmod"
806 830 um:0:unmod
807 831 um:1:unmod
808 832 $ hg grep -r "0:2" "unmod" um
809 833 um:0:unmod
810 834 um:1:unmod
811 835 $ hg grep -r "0:2" "unmod" "glob:**/um" # Check that patterns also work
812 836 um:0:unmod
813 837 um:1:unmod
814 838 $ cd ..
815 839
816 840 --follow with/without --diff and/or paths
817 841 -----------------------------------------
818 842
819 843 For each test case, we compare the history traversal of "hg log",
820 844 "hg grep --diff", and "hg grep" (--all-files).
821 845
822 846 "hg grep --diff" should traverse the log in the same way as "hg log".
823 847 "hg grep" (--all-files) is slightly different in that it includes
824 848 unmodified changes.
825 849
826 850 $ hg init follow
827 851 $ cd follow
828 852
829 853 $ cat <<'EOF' >> .hg/hgrc
830 854 > [ui]
831 855 > logtemplate = '{rev}: {join(files % "{status} {path}", ", ")}\n'
832 856 > EOF
833 857
834 858 $ for f in add0 add0-mod1 add0-rm1 add0-mod2 add0-rm2 add0-mod3 add0-mod4 add0-rm4; do
835 859 > echo data0 >> $f
836 860 > done
837 861 $ hg ci -qAm0
838 862
839 863 $ hg cp add0 add0-cp1
840 864 $ hg cp add0 add0-cp1-mod1
841 865 $ hg cp add0 add0-cp1-mod1-rm3
842 866 $ hg rm add0-rm1
843 867 $ for f in *mod1*; do
844 868 > echo data1 >> $f
845 869 > done
846 870 $ hg ci -qAm1
847 871
848 872 $ hg update -q 0
849 873 $ hg cp add0 add0-cp2
850 874 $ hg cp add0 add0-cp2-mod2
851 875 $ hg rm add0-rm2
852 876 $ for f in *mod2*; do
853 877 > echo data2 >> $f
854 878 > done
855 879 $ hg ci -qAm2
856 880
857 881 $ hg update -q 1
858 882 $ hg cp add0-cp1 add0-cp1-cp3
859 883 $ hg cp add0-cp1-mod1 add0-cp1-mod1-cp3-mod3
860 884 $ hg rm add0-cp1-mod1-rm3
861 885 $ for f in *mod3*; do
862 886 > echo data3 >> $f
863 887 > done
864 888 $ hg ci -qAm3
865 889
866 890 $ hg cp add0 add0-cp4
867 891 $ hg cp add0 add0-cp4-mod4
868 892 $ hg rm add0-rm4
869 893 $ for f in *mod4*; do
870 894 > echo data4 >> $f
871 895 > done
872 896
873 897 $ hg log -Gr':wdir()'
874 898 o 2147483647: A add0-cp4, A add0-cp4-mod4, M add0-mod4, R add0-rm4
875 899 |
876 900 @ 3: A add0-cp1-cp3, A add0-cp1-mod1-cp3-mod3, R add0-cp1-mod1-rm3, M add0-mod3
877 901 |
878 902 | o 2: A add0-cp2, A add0-cp2-mod2, M add0-mod2, R add0-rm2
879 903 | |
880 904 o | 1: A add0-cp1, A add0-cp1-mod1, A add0-cp1-mod1-rm3, M add0-mod1, R add0-rm1
881 905 |/
882 906 o 0: A add0, A add0-mod1, A add0-mod2, A add0-mod3, A add0-mod4, A add0-rm1, A add0-rm2, A add0-rm4
883 907
884 908
885 909 follow revision history from wdir parent:
886 910
887 911 $ hg log -f
888 912 3: A add0-cp1-cp3, A add0-cp1-mod1-cp3-mod3, R add0-cp1-mod1-rm3, M add0-mod3
889 913 1: A add0-cp1, A add0-cp1-mod1, A add0-cp1-mod1-rm3, M add0-mod1, R add0-rm1
890 914 0: A add0, A add0-mod1, A add0-mod2, A add0-mod3, A add0-mod4, A add0-rm1, A add0-rm2, A add0-rm4
891 915
892 916 $ hg grep --diff -f data
893 917 add0-cp1-mod1-cp3-mod3:3:+:data3
894 918 add0-mod3:3:+:data3
895 919 add0-cp1-mod1:1:+:data1
896 920 add0-cp1-mod1-rm3:1:+:data1
897 921 add0-mod1:1:+:data1
898 922 add0:0:+:data0
899 923 add0-mod1:0:+:data0
900 924 add0-mod2:0:+:data0
901 925 add0-mod3:0:+:data0
902 926 add0-mod4:0:+:data0
903 927 add0-rm1:0:+:data0
904 928 add0-rm2:0:+:data0
905 929 add0-rm4:0:+:data0
906 930
907 931 $ hg grep -f data
908 932 add0:3:data0
909 933 add0-cp1:3:data0
910 934 add0-cp1-cp3:3:data0
911 935 add0-cp1-mod1:3:data0
912 936 add0-cp1-mod1:3:data1
913 937 add0-cp1-mod1-cp3-mod3:3:data0
914 938 add0-cp1-mod1-cp3-mod3:3:data1
915 939 add0-cp1-mod1-cp3-mod3:3:data3
916 940 add0-mod1:3:data0
917 941 add0-mod1:3:data1
918 942 add0-mod2:3:data0
919 943 add0-mod3:3:data0
920 944 add0-mod3:3:data3
921 945 add0-mod4:3:data0
922 946 add0-rm2:3:data0
923 947 add0-rm4:3:data0
924 948 add0:1:data0
925 949 add0-cp1:1:data0
926 950 add0-cp1-mod1:1:data0
927 951 add0-cp1-mod1:1:data1
928 952 add0-cp1-mod1-rm3:1:data0
929 953 add0-cp1-mod1-rm3:1:data1
930 954 add0-mod1:1:data0
931 955 add0-mod1:1:data1
932 956 add0-mod2:1:data0
933 957 add0-mod3:1:data0
934 958 add0-mod4:1:data0
935 959 add0-rm2:1:data0
936 960 add0-rm4:1:data0
937 961 add0:0:data0
938 962 add0-mod1:0:data0
939 963 add0-mod2:0:data0
940 964 add0-mod3:0:data0
941 965 add0-mod4:0:data0
942 966 add0-rm1:0:data0
943 967 add0-rm2:0:data0
944 968 add0-rm4:0:data0
945 969
946 970 follow revision history from specified revision:
947 971
948 972 $ hg log -fr2
949 973 2: A add0-cp2, A add0-cp2-mod2, M add0-mod2, R add0-rm2
950 974 0: A add0, A add0-mod1, A add0-mod2, A add0-mod3, A add0-mod4, A add0-rm1, A add0-rm2, A add0-rm4
951 975
952 976 $ hg grep --diff -fr2 data
953 977 add0-cp2-mod2:2:+:data2
954 978 add0-mod2:2:+:data2
955 979 add0:0:+:data0
956 980 add0-mod1:0:+:data0
957 981 add0-mod2:0:+:data0
958 982 add0-mod3:0:+:data0
959 983 add0-mod4:0:+:data0
960 984 add0-rm1:0:+:data0
961 985 add0-rm2:0:+:data0
962 986 add0-rm4:0:+:data0
963 987
964 988 $ hg grep -fr2 data
965 989 add0:2:data0
966 990 add0-cp2:2:data0
967 991 add0-cp2-mod2:2:data0
968 992 add0-cp2-mod2:2:data2
969 993 add0-mod1:2:data0
970 994 add0-mod2:2:data0
971 995 add0-mod2:2:data2
972 996 add0-mod3:2:data0
973 997 add0-mod4:2:data0
974 998 add0-rm1:2:data0
975 999 add0-rm4:2:data0
976 1000 add0:0:data0
977 1001 add0-mod1:0:data0
978 1002 add0-mod2:0:data0
979 1003 add0-mod3:0:data0
980 1004 add0-mod4:0:data0
981 1005 add0-rm1:0:data0
982 1006 add0-rm2:0:data0
983 1007 add0-rm4:0:data0
984 1008
985 1009 follow revision history from wdir:
986 1010
987 1011 $ hg log -fr'wdir()'
988 1012 2147483647: A add0-cp4, A add0-cp4-mod4, M add0-mod4, R add0-rm4
989 1013 3: A add0-cp1-cp3, A add0-cp1-mod1-cp3-mod3, R add0-cp1-mod1-rm3, M add0-mod3
990 1014 1: A add0-cp1, A add0-cp1-mod1, A add0-cp1-mod1-rm3, M add0-mod1, R add0-rm1
991 1015 0: A add0, A add0-mod1, A add0-mod2, A add0-mod3, A add0-mod4, A add0-rm1, A add0-rm2, A add0-rm4
992 1016
993 1017 BROKEN: should not abort because of removed file
994 1018 $ hg grep --diff -fr'wdir()' data
995 1019 add0-cp4-mod4:2147483647:+:data4
996 1020 add0-mod4:2147483647:+:data4
997 1021 add0-rm4:2147483647:-:abort: add0-rm4@None: not found in manifest!
998 1022 [255]
999 1023
1000 1024 $ hg grep -fr'wdir()' data
1001 1025 add0:2147483647:data0
1002 1026 add0-cp1:2147483647:data0
1003 1027 add0-cp1-cp3:2147483647:data0
1004 1028 add0-cp1-mod1:2147483647:data0
1005 1029 add0-cp1-mod1:2147483647:data1
1006 1030 add0-cp1-mod1-cp3-mod3:2147483647:data0
1007 1031 add0-cp1-mod1-cp3-mod3:2147483647:data1
1008 1032 add0-cp1-mod1-cp3-mod3:2147483647:data3
1009 1033 add0-cp4:2147483647:data0
1010 1034 add0-cp4-mod4:2147483647:data0
1011 1035 add0-cp4-mod4:2147483647:data4
1012 1036 add0-mod1:2147483647:data0
1013 1037 add0-mod1:2147483647:data1
1014 1038 add0-mod2:2147483647:data0
1015 1039 add0-mod3:2147483647:data0
1016 1040 add0-mod3:2147483647:data3
1017 1041 add0-mod4:2147483647:data0
1018 1042 add0-mod4:2147483647:data4
1019 1043 add0-rm2:2147483647:data0
1020 1044 add0:3:data0
1021 1045 add0-cp1:3:data0
1022 1046 add0-cp1-cp3:3:data0
1023 1047 add0-cp1-mod1:3:data0
1024 1048 add0-cp1-mod1:3:data1
1025 1049 add0-cp1-mod1-cp3-mod3:3:data0
1026 1050 add0-cp1-mod1-cp3-mod3:3:data1
1027 1051 add0-cp1-mod1-cp3-mod3:3:data3
1028 1052 add0-mod1:3:data0
1029 1053 add0-mod1:3:data1
1030 1054 add0-mod2:3:data0
1031 1055 add0-mod3:3:data0
1032 1056 add0-mod3:3:data3
1033 1057 add0-mod4:3:data0
1034 1058 add0-rm2:3:data0
1035 1059 add0-rm4:3:data0
1036 1060 add0:1:data0
1037 1061 add0-cp1:1:data0
1038 1062 add0-cp1-mod1:1:data0
1039 1063 add0-cp1-mod1:1:data1
1040 1064 add0-cp1-mod1-rm3:1:data0
1041 1065 add0-cp1-mod1-rm3:1:data1
1042 1066 add0-mod1:1:data0
1043 1067 add0-mod1:1:data1
1044 1068 add0-mod2:1:data0
1045 1069 add0-mod3:1:data0
1046 1070 add0-mod4:1:data0
1047 1071 add0-rm2:1:data0
1048 1072 add0-rm4:1:data0
1049 1073 add0:0:data0
1050 1074 add0-mod1:0:data0
1051 1075 add0-mod2:0:data0
1052 1076 add0-mod3:0:data0
1053 1077 add0-mod4:0:data0
1054 1078 add0-rm1:0:data0
1055 1079 add0-rm2:0:data0
1056 1080 add0-rm4:0:data0
1057 1081
1058 1082 follow revision history from multiple revisions:
1059 1083
1060 1084 $ hg log -fr'1+2'
1061 1085 2: A add0-cp2, A add0-cp2-mod2, M add0-mod2, R add0-rm2
1062 1086 1: A add0-cp1, A add0-cp1-mod1, A add0-cp1-mod1-rm3, M add0-mod1, R add0-rm1
1063 1087 0: A add0, A add0-mod1, A add0-mod2, A add0-mod3, A add0-mod4, A add0-rm1, A add0-rm2, A add0-rm4
1064 1088
1065 1089 $ hg grep --diff -fr'1+2' data
1066 1090 add0-cp2-mod2:2:+:data2
1067 1091 add0-mod2:2:+:data2
1068 1092 add0-cp1-mod1:1:+:data1
1069 1093 add0-cp1-mod1-rm3:1:+:data1
1070 1094 add0-mod1:1:+:data1
1071 1095 add0:0:+:data0
1072 1096 add0-mod1:0:+:data0
1073 1097 add0-mod2:0:+:data0
1074 1098 add0-mod3:0:+:data0
1075 1099 add0-mod4:0:+:data0
1076 1100 add0-rm1:0:+:data0
1077 1101 add0-rm2:0:+:data0
1078 1102 add0-rm4:0:+:data0
1079 1103
1080 1104 $ hg grep -fr'1+2' data
1081 1105 add0:2:data0
1082 1106 add0-cp2:2:data0
1083 1107 add0-cp2-mod2:2:data0
1084 1108 add0-cp2-mod2:2:data2
1085 1109 add0-mod1:2:data0
1086 1110 add0-mod2:2:data0
1087 1111 add0-mod2:2:data2
1088 1112 add0-mod3:2:data0
1089 1113 add0-mod4:2:data0
1090 1114 add0-rm1:2:data0
1091 1115 add0-rm4:2:data0
1092 1116 add0:1:data0
1093 1117 add0-cp1:1:data0
1094 1118 add0-cp1-mod1:1:data0
1095 1119 add0-cp1-mod1:1:data1
1096 1120 add0-cp1-mod1-rm3:1:data0
1097 1121 add0-cp1-mod1-rm3:1:data1
1098 1122 add0-mod1:1:data0
1099 1123 add0-mod1:1:data1
1100 1124 add0-mod2:1:data0
1101 1125 add0-mod3:1:data0
1102 1126 add0-mod4:1:data0
1103 1127 add0-rm2:1:data0
1104 1128 add0-rm4:1:data0
1105 1129 add0:0:data0
1106 1130 add0-mod1:0:data0
1107 1131 add0-mod2:0:data0
1108 1132 add0-mod3:0:data0
1109 1133 add0-mod4:0:data0
1110 1134 add0-rm1:0:data0
1111 1135 add0-rm2:0:data0
1112 1136 add0-rm4:0:data0
1113 1137
1114 1138 follow file history from wdir parent, unmodified in wdir:
1115 1139
1116 1140 $ hg log -f add0-mod3
1117 1141 3: A add0-cp1-cp3, A add0-cp1-mod1-cp3-mod3, R add0-cp1-mod1-rm3, M add0-mod3
1118 1142 0: A add0, A add0-mod1, A add0-mod2, A add0-mod3, A add0-mod4, A add0-rm1, A add0-rm2, A add0-rm4
1119 1143
1120 1144 $ hg grep --diff -f data add0-mod3
1121 1145 add0-mod3:3:+:data3
1122 1146 add0-mod3:0:+:data0
1123 1147
1124 1148 $ hg grep -f data add0-mod3
1125 1149 add0-mod3:3:data0
1126 1150 add0-mod3:3:data3
1127 1151 add0-mod3:1:data0
1128 1152 add0-mod3:0:data0
1129 1153
1130 1154 follow file history from wdir parent, modified in wdir:
1131 1155
1132 1156 $ hg log -f add0-mod4
1133 1157 0: A add0, A add0-mod1, A add0-mod2, A add0-mod3, A add0-mod4, A add0-rm1, A add0-rm2, A add0-rm4
1134 1158
1135 1159 $ hg grep --diff -f data add0-mod4
1136 1160 add0-mod4:0:+:data0
1137 1161
1138 1162 $ hg grep -f data add0-mod4
1139 1163 add0-mod4:3:data0
1140 1164 add0-mod4:1:data0
1141 1165 add0-mod4:0:data0
1142 1166
1143 1167 follow file history from wdir parent, copied but unmodified:
1144 1168
1145 1169 $ hg log -f add0-cp1-cp3
1146 1170 3: A add0-cp1-cp3, A add0-cp1-mod1-cp3-mod3, R add0-cp1-mod1-rm3, M add0-mod3
1147 1171 1: A add0-cp1, A add0-cp1-mod1, A add0-cp1-mod1-rm3, M add0-mod1, R add0-rm1
1148 1172 0: A add0, A add0-mod1, A add0-mod2, A add0-mod3, A add0-mod4, A add0-rm1, A add0-rm2, A add0-rm4
1149 1173
1150 1174 $ hg grep --diff -f data add0-cp1-cp3
1151 1175 add0:0:+:data0
1152 1176
1153 1177 BROKEN: should follow history across renames
1154 1178 $ hg grep -f data add0-cp1-cp3
1155 1179 add0-cp1-cp3:3:data0
1156 1180
1157 1181 follow file history from wdir parent, copied and modified:
1158 1182
1159 1183 $ hg log -f add0-cp1-mod1-cp3-mod3
1160 1184 3: A add0-cp1-cp3, A add0-cp1-mod1-cp3-mod3, R add0-cp1-mod1-rm3, M add0-mod3
1161 1185 1: A add0-cp1, A add0-cp1-mod1, A add0-cp1-mod1-rm3, M add0-mod1, R add0-rm1
1162 1186 0: A add0, A add0-mod1, A add0-mod2, A add0-mod3, A add0-mod4, A add0-rm1, A add0-rm2, A add0-rm4
1163 1187
1164 1188 $ hg grep --diff -f data add0-cp1-mod1-cp3-mod3
1165 1189 add0-cp1-mod1-cp3-mod3:3:+:data3
1166 1190 add0-cp1-mod1:1:+:data1
1167 1191 add0:0:+:data0
1168 1192
1169 1193 BROKEN: should follow history across renames
1170 1194 $ hg grep -f data add0-cp1-mod1-cp3-mod3
1171 1195 add0-cp1-mod1-cp3-mod3:3:data0
1172 1196 add0-cp1-mod1-cp3-mod3:3:data1
1173 1197 add0-cp1-mod1-cp3-mod3:3:data3
1174 1198
1175 1199 follow file history from wdir parent, copied in wdir:
1176 1200
1177 1201 $ hg log -f add0-cp4
1178 1202 abort: cannot follow nonexistent file: "add0-cp4"
1179 1203 [255]
1180 1204
1181 1205 $ hg grep --diff -f data add0-cp4
1182 1206 abort: cannot follow nonexistent file: "add0-cp4"
1183 1207 [255]
1184 1208
1185 1209 BROKEN: maybe better to abort
1186 1210 $ hg grep -f data add0-cp4
1187 1211 [1]
1188 1212
1189 1213 follow file history from wdir parent, removed:
1190 1214
1191 1215 $ hg log -f add0-cp1-mod1-rm3
1192 1216 abort: cannot follow file not in parent revision: "add0-cp1-mod1-rm3"
1193 1217 [255]
1194 1218
1195 1219 $ hg grep --diff -f data add0-cp1-mod1-rm3
1196 1220 abort: cannot follow file not in parent revision: "add0-cp1-mod1-rm3"
1197 1221 [255]
1198 1222
1199 1223 BROKEN: maybe better to abort
1200 1224 $ hg grep -f data add0-cp1-mod1-rm3
1201 1225 add0-cp1-mod1-rm3:1:data0
1202 1226 add0-cp1-mod1-rm3:1:data1
1203 1227
1204 1228 follow file history from wdir parent (explicit), removed:
1205 1229
1206 1230 $ hg log -fr. add0-cp1-mod1-rm3
1207 1231 abort: cannot follow file not in any of the specified revisions: "add0-cp1-mod1-rm3"
1208 1232 [255]
1209 1233
1210 1234 $ hg grep --diff -fr. data add0-cp1-mod1-rm3
1211 1235 abort: cannot follow file not in any of the specified revisions: "add0-cp1-mod1-rm3"
1212 1236 [255]
1213 1237
1214 1238 BROKEN: should abort
1215 1239 $ hg grep -fr. data add0-cp1-mod1-rm3
1216 1240 add0-cp1-mod1-rm3:1:data0
1217 1241 add0-cp1-mod1-rm3:1:data1
1218 1242
1219 1243 follow file history from wdir parent, removed in wdir:
1220 1244
1221 1245 $ hg log -f add0-rm4
1222 1246 abort: cannot follow file not in parent revision: "add0-rm4"
1223 1247 [255]
1224 1248
1225 1249 $ hg grep --diff -f data add0-rm4
1226 1250 abort: cannot follow file not in parent revision: "add0-rm4"
1227 1251 [255]
1228 1252
1229 1253 BROKEN: should abort
1230 1254 $ hg grep -f data add0-rm4
1231 1255 add0-rm4:3:data0
1232 1256 add0-rm4:1:data0
1233 1257 add0-rm4:0:data0
1234 1258
1235 1259 follow file history from wdir parent (explicit), removed in wdir:
1236 1260
1237 1261 $ hg log -fr. add0-rm4
1238 1262 0: A add0, A add0-mod1, A add0-mod2, A add0-mod3, A add0-mod4, A add0-rm1, A add0-rm2, A add0-rm4
1239 1263
1240 1264 $ hg grep --diff -fr. data add0-rm4
1241 1265 add0-rm4:0:+:data0
1242 1266
1243 1267 $ hg grep -fr. data add0-rm4
1244 1268 add0-rm4:3:data0
1245 1269 add0-rm4:1:data0
1246 1270 add0-rm4:0:data0
1247 1271
1248 1272 follow file history from wdir parent, multiple files:
1249 1273
1250 1274 $ hg log -f add0-mod3 add0-cp1-mod1
1251 1275 3: A add0-cp1-cp3, A add0-cp1-mod1-cp3-mod3, R add0-cp1-mod1-rm3, M add0-mod3
1252 1276 1: A add0-cp1, A add0-cp1-mod1, A add0-cp1-mod1-rm3, M add0-mod1, R add0-rm1
1253 1277 0: A add0, A add0-mod1, A add0-mod2, A add0-mod3, A add0-mod4, A add0-rm1, A add0-rm2, A add0-rm4
1254 1278
1255 1279 $ hg grep --diff -f data add0-mod3 add0-cp1-mod1
1256 1280 add0-mod3:3:+:data3
1257 1281 add0-cp1-mod1:1:+:data1
1258 1282 add0:0:+:data0
1259 1283 add0-mod3:0:+:data0
1260 1284
1261 1285 BROKEN: should follow history across renames
1262 1286 $ hg grep -f data add0-mod3 add0-cp1-mod1
1263 1287 add0-cp1-mod1:3:data0
1264 1288 add0-cp1-mod1:3:data1
1265 1289 add0-mod3:3:data0
1266 1290 add0-mod3:3:data3
1267 1291 add0-cp1-mod1:1:data0
1268 1292 add0-cp1-mod1:1:data1
1269 1293 add0-mod3:1:data0
1270 1294 add0-mod3:0:data0
1271 1295
1272 1296 follow file history from specified revision, modified:
1273 1297
1274 1298 $ hg log -fr2 add0-mod2
1275 1299 2: A add0-cp2, A add0-cp2-mod2, M add0-mod2, R add0-rm2
1276 1300 0: A add0, A add0-mod1, A add0-mod2, A add0-mod3, A add0-mod4, A add0-rm1, A add0-rm2, A add0-rm4
1277 1301
1278 1302 $ hg grep --diff -fr2 data add0-mod2
1279 1303 add0-mod2:2:+:data2
1280 1304 add0-mod2:0:+:data0
1281 1305
1282 1306 $ hg grep -fr2 data add0-mod2
1283 1307 add0-mod2:2:data0
1284 1308 add0-mod2:2:data2
1285 1309 add0-mod2:0:data0
1286 1310
1287 1311 follow file history from specified revision, copied but unmodified:
1288 1312
1289 1313 $ hg log -fr2 add0-cp2
1290 1314 2: A add0-cp2, A add0-cp2-mod2, M add0-mod2, R add0-rm2
1291 1315 0: A add0, A add0-mod1, A add0-mod2, A add0-mod3, A add0-mod4, A add0-rm1, A add0-rm2, A add0-rm4
1292 1316
1293 1317 $ hg grep --diff -fr2 data add0-cp2
1294 1318 add0:0:+:data0
1295 1319
1296 1320 BROKEN: should follow history across renames
1297 1321 $ hg grep -fr2 data add0-cp2
1298 1322 add0-cp2:2:data0
1299 1323
1300 1324 follow file history from specified revision, copied and modified:
1301 1325
1302 1326 $ hg log -fr2 add0-cp2-mod2
1303 1327 2: A add0-cp2, A add0-cp2-mod2, M add0-mod2, R add0-rm2
1304 1328 0: A add0, A add0-mod1, A add0-mod2, A add0-mod3, A add0-mod4, A add0-rm1, A add0-rm2, A add0-rm4
1305 1329
1306 1330 $ hg grep --diff -fr2 data add0-cp2-mod2
1307 1331 add0-cp2-mod2:2:+:data2
1308 1332 add0:0:+:data0
1309 1333
1310 1334 BROKEN: should follow history across renames
1311 1335 $ hg grep -fr2 data add0-cp2-mod2
1312 1336 add0-cp2-mod2:2:data0
1313 1337 add0-cp2-mod2:2:data2
1314 1338
1315 1339 follow file history from specified revision, removed:
1316 1340
1317 1341 $ hg log -fr2 add0-rm2
1318 1342 abort: cannot follow file not in any of the specified revisions: "add0-rm2"
1319 1343 [255]
1320 1344
1321 1345 $ hg grep --diff -fr2 data add0-rm2
1322 1346 abort: cannot follow file not in any of the specified revisions: "add0-rm2"
1323 1347 [255]
1324 1348
1325 1349 BROKEN: should abort
1326 1350 $ hg grep -fr2 data add0-rm2
1327 1351 add0-rm2:0:data0
1328 1352
1329 1353 follow file history from specified revision, multiple files:
1330 1354
1331 1355 $ hg log -fr2 add0-cp2 add0-mod2
1332 1356 2: A add0-cp2, A add0-cp2-mod2, M add0-mod2, R add0-rm2
1333 1357 0: A add0, A add0-mod1, A add0-mod2, A add0-mod3, A add0-mod4, A add0-rm1, A add0-rm2, A add0-rm4
1334 1358
1335 1359 $ hg grep --diff -fr2 data add0-cp2 add0-mod2
1336 1360 add0-mod2:2:+:data2
1337 1361 add0:0:+:data0
1338 1362 add0-mod2:0:+:data0
1339 1363
1340 1364 BROKEN: should follow history across renames
1341 1365 $ hg grep -fr2 data add0-cp2 add0-mod2
1342 1366 add0-cp2:2:data0
1343 1367 add0-mod2:2:data0
1344 1368 add0-mod2:2:data2
1345 1369 add0-mod2:0:data0
1346 1370
1347 1371 follow file history from wdir, unmodified:
1348 1372
1349 1373 $ hg log -fr'wdir()' add0-mod3
1350 1374 2147483647: A add0-cp4, A add0-cp4-mod4, M add0-mod4, R add0-rm4
1351 1375 3: A add0-cp1-cp3, A add0-cp1-mod1-cp3-mod3, R add0-cp1-mod1-rm3, M add0-mod3
1352 1376 0: A add0, A add0-mod1, A add0-mod2, A add0-mod3, A add0-mod4, A add0-rm1, A add0-rm2, A add0-rm4
1353 1377
1354 1378 $ hg grep --diff -fr'wdir()' data add0-mod3
1355 1379 add0-mod3:3:+:data3
1356 1380 add0-mod3:0:+:data0
1357 1381
1358 1382 $ hg grep -fr'wdir()' data add0-mod3
1359 1383 add0-mod3:2147483647:data0
1360 1384 add0-mod3:2147483647:data3
1361 1385 add0-mod3:3:data0
1362 1386 add0-mod3:3:data3
1363 1387 add0-mod3:1:data0
1364 1388 add0-mod3:0:data0
1365 1389
1366 1390 follow file history from wdir, modified:
1367 1391
1368 1392 $ hg log -fr'wdir()' add0-mod4
1369 1393 2147483647: A add0-cp4, A add0-cp4-mod4, M add0-mod4, R add0-rm4
1370 1394 0: A add0, A add0-mod1, A add0-mod2, A add0-mod3, A add0-mod4, A add0-rm1, A add0-rm2, A add0-rm4
1371 1395
1372 1396 $ hg grep --diff -fr'wdir()' data add0-mod4
1373 1397 add0-mod4:2147483647:+:data4
1374 1398 add0-mod4:0:+:data0
1375 1399
1376 1400 $ hg grep -fr'wdir()' data add0-mod4
1377 1401 add0-mod4:2147483647:data0
1378 1402 add0-mod4:2147483647:data4
1379 1403 add0-mod4:3:data0
1380 1404 add0-mod4:1:data0
1381 1405 add0-mod4:0:data0
1382 1406
1383 1407 follow file history from wdir, copied but unmodified:
1384 1408
1385 1409 $ hg log -fr'wdir()' add0-cp4
1386 1410 2147483647: A add0-cp4, A add0-cp4-mod4, M add0-mod4, R add0-rm4
1387 1411 0: A add0, A add0-mod1, A add0-mod2, A add0-mod3, A add0-mod4, A add0-rm1, A add0-rm2, A add0-rm4
1388 1412
1389 1413 $ hg grep --diff -fr'wdir()' data add0-cp4
1390 1414 add0:0:+:data0
1391 1415
1392 1416 BROKEN: should follow history across renames
1393 1417 $ hg grep -fr'wdir()' data add0-cp4
1394 1418 add0-cp4:2147483647:data0
1395 1419
1396 1420 follow file history from wdir, copied and modified:
1397 1421
1398 1422 $ hg log -fr'wdir()' add0-cp4-mod4
1399 1423 2147483647: A add0-cp4, A add0-cp4-mod4, M add0-mod4, R add0-rm4
1400 1424 0: A add0, A add0-mod1, A add0-mod2, A add0-mod3, A add0-mod4, A add0-rm1, A add0-rm2, A add0-rm4
1401 1425
1402 1426 $ hg grep --diff -fr'wdir()' data add0-cp4-mod4
1403 1427 add0-cp4-mod4:2147483647:+:data4
1404 1428 add0:0:+:data0
1405 1429
1406 1430 BROKEN: should follow history across renames
1407 1431 $ hg grep -fr'wdir()' data add0-cp4-mod4
1408 1432 add0-cp4-mod4:2147483647:data0
1409 1433 add0-cp4-mod4:2147483647:data4
1410 1434
1411 1435 follow file history from wdir, multiple files:
1412 1436
1413 1437 $ hg log -fr'wdir()' add0-cp4 add0-mod4 add0-mod3
1414 1438 2147483647: A add0-cp4, A add0-cp4-mod4, M add0-mod4, R add0-rm4
1415 1439 3: A add0-cp1-cp3, A add0-cp1-mod1-cp3-mod3, R add0-cp1-mod1-rm3, M add0-mod3
1416 1440 0: A add0, A add0-mod1, A add0-mod2, A add0-mod3, A add0-mod4, A add0-rm1, A add0-rm2, A add0-rm4
1417 1441
1418 1442 $ hg grep --diff -fr'wdir()' data add0-cp4 add0-mod4 add0-mod3
1419 1443 add0-mod4:2147483647:+:data4
1420 1444 add0-mod3:3:+:data3
1421 1445 add0:0:+:data0
1422 1446 add0-mod3:0:+:data0
1423 1447 add0-mod4:0:+:data0
1424 1448
1425 1449 BROKEN: should follow history across renames
1426 1450 $ hg grep -fr'wdir()' data add0-cp4 add0-mod4 add0-mod3
1427 1451 add0-cp4:2147483647:data0
1428 1452 add0-mod3:2147483647:data0
1429 1453 add0-mod3:2147483647:data3
1430 1454 add0-mod4:2147483647:data0
1431 1455 add0-mod4:2147483647:data4
1432 1456 add0-mod3:3:data0
1433 1457 add0-mod3:3:data3
1434 1458 add0-mod4:3:data0
1435 1459 add0-mod3:1:data0
1436 1460 add0-mod4:1:data0
1437 1461 add0-mod3:0:data0
1438 1462 add0-mod4:0:data0
1439 1463
1440 1464 $ cd ..
General Comments 0
You need to be logged in to leave comments. Login now