##// END OF EJS Templates
revset: rename diff(pattern) to diffcontains(pattern)...
Yuya Nishihara -
r46342:c0059573 stable
parent child Browse files
Show More
@@ -1,2793 +1,2796
1 1 # revset.py - revision set queries for mercurial
2 2 #
3 3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import re
11 11
12 12 from .i18n import _
13 13 from .pycompat import getattr
14 14 from . import (
15 15 dagop,
16 16 destutil,
17 17 diffutil,
18 18 encoding,
19 19 error,
20 20 grep as grepmod,
21 21 hbisect,
22 22 match as matchmod,
23 23 node,
24 24 obsolete as obsmod,
25 25 obsutil,
26 26 pathutil,
27 27 phases,
28 28 pycompat,
29 29 registrar,
30 30 repoview,
31 31 revsetlang,
32 32 scmutil,
33 33 smartset,
34 34 stack as stackmod,
35 35 util,
36 36 )
37 37 from .utils import (
38 38 dateutil,
39 39 stringutil,
40 40 )
41 41
42 42 # helpers for processing parsed tree
43 43 getsymbol = revsetlang.getsymbol
44 44 getstring = revsetlang.getstring
45 45 getinteger = revsetlang.getinteger
46 46 getboolean = revsetlang.getboolean
47 47 getlist = revsetlang.getlist
48 48 getintrange = revsetlang.getintrange
49 49 getargs = revsetlang.getargs
50 50 getargsdict = revsetlang.getargsdict
51 51
52 52 baseset = smartset.baseset
53 53 generatorset = smartset.generatorset
54 54 spanset = smartset.spanset
55 55 fullreposet = smartset.fullreposet
56 56
57 57 # revisions not included in all(), but populated if specified
58 58 _virtualrevs = (node.nullrev, node.wdirrev)
59 59
60 60 # Constants for ordering requirement, used in getset():
61 61 #
62 62 # If 'define', any nested functions and operations MAY change the ordering of
63 63 # the entries in the set (but if changes the ordering, it MUST ALWAYS change
64 64 # it). If 'follow', any nested functions and operations MUST take the ordering
65 65 # specified by the first operand to the '&' operator.
66 66 #
67 67 # For instance,
68 68 #
69 69 # X & (Y | Z)
70 70 # ^ ^^^^^^^
71 71 # | follow
72 72 # define
73 73 #
74 74 # will be evaluated as 'or(y(x()), z(x()))', where 'x()' can change the order
75 75 # of the entries in the set, but 'y()', 'z()' and 'or()' shouldn't.
76 76 #
77 77 # 'any' means the order doesn't matter. For instance,
78 78 #
79 79 # (X & !Y) | ancestors(Z)
80 80 # ^ ^
81 81 # any any
82 82 #
83 83 # For 'X & !Y', 'X' decides the order and 'Y' is subtracted from 'X', so the
84 84 # order of 'Y' does not matter. For 'ancestors(Z)', Z's order does not matter
85 85 # since 'ancestors' does not care about the order of its argument.
86 86 #
87 87 # Currently, most revsets do not care about the order, so 'define' is
88 88 # equivalent to 'follow' for them, and the resulting order is based on the
89 89 # 'subset' parameter passed down to them:
90 90 #
91 91 # m = revset.match(...)
92 92 # m(repo, subset, order=defineorder)
93 93 # ^^^^^^
94 94 # For most revsets, 'define' means using the order this subset provides
95 95 #
96 96 # There are a few revsets that always redefine the order if 'define' is
97 97 # specified: 'sort(X)', 'reverse(X)', 'x:y'.
98 98 anyorder = b'any' # don't care the order, could be even random-shuffled
99 99 defineorder = b'define' # ALWAYS redefine, or ALWAYS follow the current order
100 100 followorder = b'follow' # MUST follow the current order
101 101
102 102 # helpers
103 103
104 104
105 105 def getset(repo, subset, x, order=defineorder):
106 106 if not x:
107 107 raise error.ParseError(_(b"missing argument"))
108 108 return methods[x[0]](repo, subset, *x[1:], order=order)
109 109
110 110
111 111 def _getrevsource(repo, r):
112 112 extra = repo[r].extra()
113 113 for label in (b'source', b'transplant_source', b'rebase_source'):
114 114 if label in extra:
115 115 try:
116 116 return repo[extra[label]].rev()
117 117 except error.RepoLookupError:
118 118 pass
119 119 return None
120 120
121 121
122 122 def _sortedb(xs):
123 123 return sorted(pycompat.rapply(pycompat.maybebytestr, xs))
124 124
125 125
126 126 # operator methods
127 127
128 128
129 129 def stringset(repo, subset, x, order):
130 130 if not x:
131 131 raise error.ParseError(_(b"empty string is not a valid revision"))
132 132 x = scmutil.intrev(scmutil.revsymbol(repo, x))
133 133 if x in subset or x in _virtualrevs and isinstance(subset, fullreposet):
134 134 return baseset([x])
135 135 return baseset()
136 136
137 137
138 138 def rawsmartset(repo, subset, x, order):
139 139 """argument is already a smartset, use that directly"""
140 140 if order == followorder:
141 141 return subset & x
142 142 else:
143 143 return x & subset
144 144
145 145
146 146 def rangeset(repo, subset, x, y, order):
147 147 m = getset(repo, fullreposet(repo), x)
148 148 n = getset(repo, fullreposet(repo), y)
149 149
150 150 if not m or not n:
151 151 return baseset()
152 152 return _makerangeset(repo, subset, m.first(), n.last(), order)
153 153
154 154
155 155 def rangeall(repo, subset, x, order):
156 156 assert x is None
157 157 return _makerangeset(repo, subset, 0, repo.changelog.tiprev(), order)
158 158
159 159
160 160 def rangepre(repo, subset, y, order):
161 161 # ':y' can't be rewritten to '0:y' since '0' may be hidden
162 162 n = getset(repo, fullreposet(repo), y)
163 163 if not n:
164 164 return baseset()
165 165 return _makerangeset(repo, subset, 0, n.last(), order)
166 166
167 167
168 168 def rangepost(repo, subset, x, order):
169 169 m = getset(repo, fullreposet(repo), x)
170 170 if not m:
171 171 return baseset()
172 172 return _makerangeset(
173 173 repo, subset, m.first(), repo.changelog.tiprev(), order
174 174 )
175 175
176 176
177 177 def _makerangeset(repo, subset, m, n, order):
178 178 if m == n:
179 179 r = baseset([m])
180 180 elif n == node.wdirrev:
181 181 r = spanset(repo, m, len(repo)) + baseset([n])
182 182 elif m == node.wdirrev:
183 183 r = baseset([m]) + spanset(repo, repo.changelog.tiprev(), n - 1)
184 184 elif m < n:
185 185 r = spanset(repo, m, n + 1)
186 186 else:
187 187 r = spanset(repo, m, n - 1)
188 188
189 189 if order == defineorder:
190 190 return r & subset
191 191 else:
192 192 # carrying the sorting over when possible would be more efficient
193 193 return subset & r
194 194
195 195
196 196 def dagrange(repo, subset, x, y, order):
197 197 r = fullreposet(repo)
198 198 xs = dagop.reachableroots(
199 199 repo, getset(repo, r, x), getset(repo, r, y), includepath=True
200 200 )
201 201 return subset & xs
202 202
203 203
204 204 def andset(repo, subset, x, y, order):
205 205 if order == anyorder:
206 206 yorder = anyorder
207 207 else:
208 208 yorder = followorder
209 209 return getset(repo, getset(repo, subset, x, order), y, yorder)
210 210
211 211
212 212 def andsmallyset(repo, subset, x, y, order):
213 213 # 'andsmally(x, y)' is equivalent to 'and(x, y)', but faster when y is small
214 214 if order == anyorder:
215 215 yorder = anyorder
216 216 else:
217 217 yorder = followorder
218 218 return getset(repo, getset(repo, subset, y, yorder), x, order)
219 219
220 220
221 221 def differenceset(repo, subset, x, y, order):
222 222 return getset(repo, subset, x, order) - getset(repo, subset, y, anyorder)
223 223
224 224
225 225 def _orsetlist(repo, subset, xs, order):
226 226 assert xs
227 227 if len(xs) == 1:
228 228 return getset(repo, subset, xs[0], order)
229 229 p = len(xs) // 2
230 230 a = _orsetlist(repo, subset, xs[:p], order)
231 231 b = _orsetlist(repo, subset, xs[p:], order)
232 232 return a + b
233 233
234 234
235 235 def orset(repo, subset, x, order):
236 236 xs = getlist(x)
237 237 if not xs:
238 238 return baseset()
239 239 if order == followorder:
240 240 # slow path to take the subset order
241 241 return subset & _orsetlist(repo, fullreposet(repo), xs, anyorder)
242 242 else:
243 243 return _orsetlist(repo, subset, xs, order)
244 244
245 245
246 246 def notset(repo, subset, x, order):
247 247 return subset - getset(repo, subset, x, anyorder)
248 248
249 249
250 250 def relationset(repo, subset, x, y, order):
251 251 # this is pretty basic implementation of 'x#y' operator, still
252 252 # experimental so undocumented. see the wiki for further ideas.
253 253 # https://www.mercurial-scm.org/wiki/RevsetOperatorPlan
254 254 rel = getsymbol(y)
255 255 if rel in relations:
256 256 return relations[rel](repo, subset, x, rel, order)
257 257
258 258 relnames = [r for r in relations.keys() if len(r) > 1]
259 259 raise error.UnknownIdentifier(rel, relnames)
260 260
261 261
262 262 def _splitrange(a, b):
263 263 """Split range with bounds a and b into two ranges at 0 and return two
264 264 tuples of numbers for use as startdepth and stopdepth arguments of
265 265 revancestors and revdescendants.
266 266
267 267 >>> _splitrange(-10, -5) # [-10:-5]
268 268 ((5, 11), (None, None))
269 269 >>> _splitrange(5, 10) # [5:10]
270 270 ((None, None), (5, 11))
271 271 >>> _splitrange(-10, 10) # [-10:10]
272 272 ((0, 11), (0, 11))
273 273 >>> _splitrange(-10, 0) # [-10:0]
274 274 ((0, 11), (None, None))
275 275 >>> _splitrange(0, 10) # [0:10]
276 276 ((None, None), (0, 11))
277 277 >>> _splitrange(0, 0) # [0:0]
278 278 ((0, 1), (None, None))
279 279 >>> _splitrange(1, -1) # [1:-1]
280 280 ((None, None), (None, None))
281 281 """
282 282 ancdepths = (None, None)
283 283 descdepths = (None, None)
284 284 if a == b == 0:
285 285 ancdepths = (0, 1)
286 286 if a < 0:
287 287 ancdepths = (-min(b, 0), -a + 1)
288 288 if b > 0:
289 289 descdepths = (max(a, 0), b + 1)
290 290 return ancdepths, descdepths
291 291
292 292
293 293 def generationsrel(repo, subset, x, rel, order):
294 294 z = (b'rangeall', None)
295 295 return generationssubrel(repo, subset, x, rel, z, order)
296 296
297 297
298 298 def generationssubrel(repo, subset, x, rel, z, order):
299 299 # TODO: rewrite tests, and drop startdepth argument from ancestors() and
300 300 # descendants() predicates
301 301 a, b = getintrange(
302 302 z,
303 303 _(b'relation subscript must be an integer or a range'),
304 304 _(b'relation subscript bounds must be integers'),
305 305 deffirst=-(dagop.maxlogdepth - 1),
306 306 deflast=+(dagop.maxlogdepth - 1),
307 307 )
308 308 (ancstart, ancstop), (descstart, descstop) = _splitrange(a, b)
309 309
310 310 if ancstart is None and descstart is None:
311 311 return baseset()
312 312
313 313 revs = getset(repo, fullreposet(repo), x)
314 314 if not revs:
315 315 return baseset()
316 316
317 317 if ancstart is not None and descstart is not None:
318 318 s = dagop.revancestors(repo, revs, False, ancstart, ancstop)
319 319 s += dagop.revdescendants(repo, revs, False, descstart, descstop)
320 320 elif ancstart is not None:
321 321 s = dagop.revancestors(repo, revs, False, ancstart, ancstop)
322 322 elif descstart is not None:
323 323 s = dagop.revdescendants(repo, revs, False, descstart, descstop)
324 324
325 325 return subset & s
326 326
327 327
328 328 def relsubscriptset(repo, subset, x, y, z, order):
329 329 # this is pretty basic implementation of 'x#y[z]' operator, still
330 330 # experimental so undocumented. see the wiki for further ideas.
331 331 # https://www.mercurial-scm.org/wiki/RevsetOperatorPlan
332 332 rel = getsymbol(y)
333 333 if rel in subscriptrelations:
334 334 return subscriptrelations[rel](repo, subset, x, rel, z, order)
335 335
336 336 relnames = [r for r in subscriptrelations.keys() if len(r) > 1]
337 337 raise error.UnknownIdentifier(rel, relnames)
338 338
339 339
340 340 def subscriptset(repo, subset, x, y, order):
341 341 raise error.ParseError(_(b"can't use a subscript in this context"))
342 342
343 343
344 344 def listset(repo, subset, *xs, **opts):
345 345 raise error.ParseError(
346 346 _(b"can't use a list in this context"),
347 347 hint=_(b'see \'hg help "revsets.x or y"\''),
348 348 )
349 349
350 350
351 351 def keyvaluepair(repo, subset, k, v, order):
352 352 raise error.ParseError(_(b"can't use a key-value pair in this context"))
353 353
354 354
355 355 def func(repo, subset, a, b, order):
356 356 f = getsymbol(a)
357 357 if f in symbols:
358 358 func = symbols[f]
359 359 if getattr(func, '_takeorder', False):
360 360 return func(repo, subset, b, order)
361 361 return func(repo, subset, b)
362 362
363 363 keep = lambda fn: getattr(fn, '__doc__', None) is not None
364 364
365 365 syms = [s for (s, fn) in symbols.items() if keep(fn)]
366 366 raise error.UnknownIdentifier(f, syms)
367 367
368 368
369 369 # functions
370 370
371 371 # symbols are callables like:
372 372 # fn(repo, subset, x)
373 373 # with:
374 374 # repo - current repository instance
375 375 # subset - of revisions to be examined
376 376 # x - argument in tree form
377 377 symbols = revsetlang.symbols
378 378
379 379 # symbols which can't be used for a DoS attack for any given input
380 380 # (e.g. those which accept regexes as plain strings shouldn't be included)
381 381 # functions that just return a lot of changesets (like all) don't count here
382 382 safesymbols = set()
383 383
384 384 predicate = registrar.revsetpredicate()
385 385
386 386
387 387 @predicate(b'_destupdate')
388 388 def _destupdate(repo, subset, x):
389 389 # experimental revset for update destination
390 390 args = getargsdict(x, b'limit', b'clean')
391 391 return subset & baseset(
392 392 [destutil.destupdate(repo, **pycompat.strkwargs(args))[0]]
393 393 )
394 394
395 395
396 396 @predicate(b'_destmerge')
397 397 def _destmerge(repo, subset, x):
398 398 # experimental revset for merge destination
399 399 sourceset = None
400 400 if x is not None:
401 401 sourceset = getset(repo, fullreposet(repo), x)
402 402 return subset & baseset([destutil.destmerge(repo, sourceset=sourceset)])
403 403
404 404
405 405 @predicate(b'adds(pattern)', safe=True, weight=30)
406 406 def adds(repo, subset, x):
407 407 """Changesets that add a file matching pattern.
408 408
409 409 The pattern without explicit kind like ``glob:`` is expected to be
410 410 relative to the current directory and match against a file or a
411 411 directory.
412 412 """
413 413 # i18n: "adds" is a keyword
414 414 pat = getstring(x, _(b"adds requires a pattern"))
415 415 return checkstatus(repo, subset, pat, 'added')
416 416
417 417
418 418 @predicate(b'ancestor(*changeset)', safe=True, weight=0.5)
419 419 def ancestor(repo, subset, x):
420 420 """A greatest common ancestor of the changesets.
421 421
422 422 Accepts 0 or more changesets.
423 423 Will return empty list when passed no args.
424 424 Greatest common ancestor of a single changeset is that changeset.
425 425 """
426 426 reviter = iter(orset(repo, fullreposet(repo), x, order=anyorder))
427 427 try:
428 428 anc = repo[next(reviter)]
429 429 except StopIteration:
430 430 return baseset()
431 431 for r in reviter:
432 432 anc = anc.ancestor(repo[r])
433 433
434 434 r = scmutil.intrev(anc)
435 435 if r in subset:
436 436 return baseset([r])
437 437 return baseset()
438 438
439 439
440 440 def _ancestors(
441 441 repo, subset, x, followfirst=False, startdepth=None, stopdepth=None
442 442 ):
443 443 heads = getset(repo, fullreposet(repo), x)
444 444 if not heads:
445 445 return baseset()
446 446 s = dagop.revancestors(repo, heads, followfirst, startdepth, stopdepth)
447 447 return subset & s
448 448
449 449
450 450 @predicate(b'ancestors(set[, depth])', safe=True)
451 451 def ancestors(repo, subset, x):
452 452 """Changesets that are ancestors of changesets in set, including the
453 453 given changesets themselves.
454 454
455 455 If depth is specified, the result only includes changesets up to
456 456 the specified generation.
457 457 """
458 458 # startdepth is for internal use only until we can decide the UI
459 459 args = getargsdict(x, b'ancestors', b'set depth startdepth')
460 460 if b'set' not in args:
461 461 # i18n: "ancestors" is a keyword
462 462 raise error.ParseError(_(b'ancestors takes at least 1 argument'))
463 463 startdepth = stopdepth = None
464 464 if b'startdepth' in args:
465 465 n = getinteger(
466 466 args[b'startdepth'], b"ancestors expects an integer startdepth"
467 467 )
468 468 if n < 0:
469 469 raise error.ParseError(b"negative startdepth")
470 470 startdepth = n
471 471 if b'depth' in args:
472 472 # i18n: "ancestors" is a keyword
473 473 n = getinteger(args[b'depth'], _(b"ancestors expects an integer depth"))
474 474 if n < 0:
475 475 raise error.ParseError(_(b"negative depth"))
476 476 stopdepth = n + 1
477 477 return _ancestors(
478 478 repo, subset, args[b'set'], startdepth=startdepth, stopdepth=stopdepth
479 479 )
480 480
481 481
482 482 @predicate(b'_firstancestors', safe=True)
483 483 def _firstancestors(repo, subset, x):
484 484 # ``_firstancestors(set)``
485 485 # Like ``ancestors(set)`` but follows only the first parents.
486 486 return _ancestors(repo, subset, x, followfirst=True)
487 487
488 488
489 489 def _childrenspec(repo, subset, x, n, order):
490 490 """Changesets that are the Nth child of a changeset
491 491 in set.
492 492 """
493 493 cs = set()
494 494 for r in getset(repo, fullreposet(repo), x):
495 495 for i in range(n):
496 496 c = repo[r].children()
497 497 if len(c) == 0:
498 498 break
499 499 if len(c) > 1:
500 500 raise error.RepoLookupError(
501 501 _(b"revision in set has more than one child")
502 502 )
503 503 r = c[0].rev()
504 504 else:
505 505 cs.add(r)
506 506 return subset & cs
507 507
508 508
509 509 def ancestorspec(repo, subset, x, n, order):
510 510 """``set~n``
511 511 Changesets that are the Nth ancestor (first parents only) of a changeset
512 512 in set.
513 513 """
514 514 n = getinteger(n, _(b"~ expects a number"))
515 515 if n < 0:
516 516 # children lookup
517 517 return _childrenspec(repo, subset, x, -n, order)
518 518 ps = set()
519 519 cl = repo.changelog
520 520 for r in getset(repo, fullreposet(repo), x):
521 521 for i in range(n):
522 522 try:
523 523 r = cl.parentrevs(r)[0]
524 524 except error.WdirUnsupported:
525 525 r = repo[r].p1().rev()
526 526 ps.add(r)
527 527 return subset & ps
528 528
529 529
530 530 @predicate(b'author(string)', safe=True, weight=10)
531 531 def author(repo, subset, x):
532 532 """Alias for ``user(string)``.
533 533 """
534 534 # i18n: "author" is a keyword
535 535 n = getstring(x, _(b"author requires a string"))
536 536 kind, pattern, matcher = _substringmatcher(n, casesensitive=False)
537 537 return subset.filter(
538 538 lambda x: matcher(repo[x].user()), condrepr=(b'<user %r>', n)
539 539 )
540 540
541 541
542 542 @predicate(b'bisect(string)', safe=True)
543 543 def bisect(repo, subset, x):
544 544 """Changesets marked in the specified bisect status:
545 545
546 546 - ``good``, ``bad``, ``skip``: csets explicitly marked as good/bad/skip
547 547 - ``goods``, ``bads`` : csets topologically good/bad
548 548 - ``range`` : csets taking part in the bisection
549 549 - ``pruned`` : csets that are goods, bads or skipped
550 550 - ``untested`` : csets whose fate is yet unknown
551 551 - ``ignored`` : csets ignored due to DAG topology
552 552 - ``current`` : the cset currently being bisected
553 553 """
554 554 # i18n: "bisect" is a keyword
555 555 status = getstring(x, _(b"bisect requires a string")).lower()
556 556 state = set(hbisect.get(repo, status))
557 557 return subset & state
558 558
559 559
560 560 # Backward-compatibility
561 561 # - no help entry so that we do not advertise it any more
562 562 @predicate(b'bisected', safe=True)
563 563 def bisected(repo, subset, x):
564 564 return bisect(repo, subset, x)
565 565
566 566
567 567 @predicate(b'bookmark([name])', safe=True)
568 568 def bookmark(repo, subset, x):
569 569 """The named bookmark or all bookmarks.
570 570
571 571 Pattern matching is supported for `name`. See :hg:`help revisions.patterns`.
572 572 """
573 573 # i18n: "bookmark" is a keyword
574 574 args = getargs(x, 0, 1, _(b'bookmark takes one or no arguments'))
575 575 if args:
576 576 bm = getstring(
577 577 args[0],
578 578 # i18n: "bookmark" is a keyword
579 579 _(b'the argument to bookmark must be a string'),
580 580 )
581 581 kind, pattern, matcher = stringutil.stringmatcher(bm)
582 582 bms = set()
583 583 if kind == b'literal':
584 584 if bm == pattern:
585 585 pattern = repo._bookmarks.expandname(pattern)
586 586 bmrev = repo._bookmarks.get(pattern, None)
587 587 if not bmrev:
588 588 raise error.RepoLookupError(
589 589 _(b"bookmark '%s' does not exist") % pattern
590 590 )
591 591 bms.add(repo[bmrev].rev())
592 592 else:
593 593 matchrevs = set()
594 594 for name, bmrev in pycompat.iteritems(repo._bookmarks):
595 595 if matcher(name):
596 596 matchrevs.add(bmrev)
597 597 for bmrev in matchrevs:
598 598 bms.add(repo[bmrev].rev())
599 599 else:
600 600 bms = {repo[r].rev() for r in repo._bookmarks.values()}
601 601 bms -= {node.nullrev}
602 602 return subset & bms
603 603
604 604
605 605 @predicate(b'branch(string or set)', safe=True, weight=10)
606 606 def branch(repo, subset, x):
607 607 """
608 608 All changesets belonging to the given branch or the branches of the given
609 609 changesets.
610 610
611 611 Pattern matching is supported for `string`. See
612 612 :hg:`help revisions.patterns`.
613 613 """
614 614 getbi = repo.revbranchcache().branchinfo
615 615
616 616 def getbranch(r):
617 617 try:
618 618 return getbi(r)[0]
619 619 except error.WdirUnsupported:
620 620 return repo[r].branch()
621 621
622 622 try:
623 623 b = getstring(x, b'')
624 624 except error.ParseError:
625 625 # not a string, but another revspec, e.g. tip()
626 626 pass
627 627 else:
628 628 kind, pattern, matcher = stringutil.stringmatcher(b)
629 629 if kind == b'literal':
630 630 # note: falls through to the revspec case if no branch with
631 631 # this name exists and pattern kind is not specified explicitly
632 632 if repo.branchmap().hasbranch(pattern):
633 633 return subset.filter(
634 634 lambda r: matcher(getbranch(r)),
635 635 condrepr=(b'<branch %r>', b),
636 636 )
637 637 if b.startswith(b'literal:'):
638 638 raise error.RepoLookupError(
639 639 _(b"branch '%s' does not exist") % pattern
640 640 )
641 641 else:
642 642 return subset.filter(
643 643 lambda r: matcher(getbranch(r)), condrepr=(b'<branch %r>', b)
644 644 )
645 645
646 646 s = getset(repo, fullreposet(repo), x)
647 647 b = set()
648 648 for r in s:
649 649 b.add(getbranch(r))
650 650 c = s.__contains__
651 651 return subset.filter(
652 652 lambda r: c(r) or getbranch(r) in b,
653 653 condrepr=lambda: b'<branch %r>' % _sortedb(b),
654 654 )
655 655
656 656
657 657 @predicate(b'phasedivergent()', safe=True)
658 658 def phasedivergent(repo, subset, x):
659 659 """Mutable changesets marked as successors of public changesets.
660 660
661 661 Only non-public and non-obsolete changesets can be `phasedivergent`.
662 662 (EXPERIMENTAL)
663 663 """
664 664 # i18n: "phasedivergent" is a keyword
665 665 getargs(x, 0, 0, _(b"phasedivergent takes no arguments"))
666 666 phasedivergent = obsmod.getrevs(repo, b'phasedivergent')
667 667 return subset & phasedivergent
668 668
669 669
670 670 @predicate(b'bundle()', safe=True)
671 671 def bundle(repo, subset, x):
672 672 """Changesets in the bundle.
673 673
674 674 Bundle must be specified by the -R option."""
675 675
676 676 try:
677 677 bundlerevs = repo.changelog.bundlerevs
678 678 except AttributeError:
679 679 raise error.Abort(_(b"no bundle provided - specify with -R"))
680 680 return subset & bundlerevs
681 681
682 682
683 683 def checkstatus(repo, subset, pat, field):
684 684 """Helper for status-related revsets (adds, removes, modifies).
685 685 The field parameter says which kind is desired.
686 686 """
687 687 hasset = matchmod.patkind(pat) == b'set'
688 688
689 689 mcache = [None]
690 690
691 691 def matches(x):
692 692 c = repo[x]
693 693 if not mcache[0] or hasset:
694 694 mcache[0] = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
695 695 m = mcache[0]
696 696 fname = None
697 697
698 698 assert m is not None # help pytype
699 699 if not m.anypats() and len(m.files()) == 1:
700 700 fname = m.files()[0]
701 701 if fname is not None:
702 702 if fname not in c.files():
703 703 return False
704 704 else:
705 705 if not any(m(f) for f in c.files()):
706 706 return False
707 707 files = getattr(repo.status(c.p1().node(), c.node()), field)
708 708 if fname is not None:
709 709 if fname in files:
710 710 return True
711 711 else:
712 712 if any(m(f) for f in files):
713 713 return True
714 714
715 715 return subset.filter(
716 716 matches, condrepr=(b'<status.%s %r>', pycompat.sysbytes(field), pat)
717 717 )
718 718
719 719
720 720 def _children(repo, subset, parentset):
721 721 if not parentset:
722 722 return baseset()
723 723 cs = set()
724 724 pr = repo.changelog.parentrevs
725 725 minrev = parentset.min()
726 726 nullrev = node.nullrev
727 727 for r in subset:
728 728 if r <= minrev:
729 729 continue
730 730 p1, p2 = pr(r)
731 731 if p1 in parentset:
732 732 cs.add(r)
733 733 if p2 != nullrev and p2 in parentset:
734 734 cs.add(r)
735 735 return baseset(cs)
736 736
737 737
738 738 @predicate(b'children(set)', safe=True)
739 739 def children(repo, subset, x):
740 740 """Child changesets of changesets in set.
741 741 """
742 742 s = getset(repo, fullreposet(repo), x)
743 743 cs = _children(repo, subset, s)
744 744 return subset & cs
745 745
746 746
747 747 @predicate(b'closed()', safe=True, weight=10)
748 748 def closed(repo, subset, x):
749 749 """Changeset is closed.
750 750 """
751 751 # i18n: "closed" is a keyword
752 752 getargs(x, 0, 0, _(b"closed takes no arguments"))
753 753 return subset.filter(
754 754 lambda r: repo[r].closesbranch(), condrepr=b'<branch closed>'
755 755 )
756 756
757 757
758 758 # for internal use
759 759 @predicate(b'_commonancestorheads(set)', safe=True)
760 760 def _commonancestorheads(repo, subset, x):
761 761 # This is an internal method is for quickly calculating "heads(::x and
762 762 # ::y)"
763 763
764 764 # These greatest common ancestors are the same ones that the consensus bid
765 765 # merge will find.
766 766 startrevs = getset(repo, fullreposet(repo), x, order=anyorder)
767 767
768 768 ancs = repo.changelog._commonancestorsheads(*list(startrevs))
769 769 return subset & baseset(ancs)
770 770
771 771
772 772 @predicate(b'commonancestors(set)', safe=True)
773 773 def commonancestors(repo, subset, x):
774 774 """Changesets that are ancestors of every changeset in set.
775 775 """
776 776 startrevs = getset(repo, fullreposet(repo), x, order=anyorder)
777 777 if not startrevs:
778 778 return baseset()
779 779 for r in startrevs:
780 780 subset &= dagop.revancestors(repo, baseset([r]))
781 781 return subset
782 782
783 783
784 784 @predicate(b'conflictlocal()', safe=True)
785 785 def conflictlocal(repo, subset, x):
786 786 """The local side of the merge, if currently in an unresolved merge.
787 787
788 788 "merge" here includes merge conflicts from e.g. 'hg rebase' or 'hg graft'.
789 789 """
790 790 getargs(x, 0, 0, _(b"conflictlocal takes no arguments"))
791 791 from . import mergestate as mergestatemod
792 792
793 793 mergestate = mergestatemod.mergestate.read(repo)
794 794 if mergestate.active() and repo.changelog.hasnode(mergestate.local):
795 795 return subset & {repo.changelog.rev(mergestate.local)}
796 796
797 797 return baseset()
798 798
799 799
800 800 @predicate(b'conflictother()', safe=True)
801 801 def conflictother(repo, subset, x):
802 802 """The other side of the merge, if currently in an unresolved merge.
803 803
804 804 "merge" here includes merge conflicts from e.g. 'hg rebase' or 'hg graft'.
805 805 """
806 806 getargs(x, 0, 0, _(b"conflictother takes no arguments"))
807 807 from . import mergestate as mergestatemod
808 808
809 809 mergestate = mergestatemod.mergestate.read(repo)
810 810 if mergestate.active() and repo.changelog.hasnode(mergestate.other):
811 811 return subset & {repo.changelog.rev(mergestate.other)}
812 812
813 813 return baseset()
814 814
815 815
816 816 @predicate(b'contains(pattern)', weight=100)
817 817 def contains(repo, subset, x):
818 818 """The revision's manifest contains a file matching pattern (but might not
819 819 modify it). See :hg:`help patterns` for information about file patterns.
820 820
821 821 The pattern without explicit kind like ``glob:`` is expected to be
822 822 relative to the current directory and match against a file exactly
823 823 for efficiency.
824 824 """
825 825 # i18n: "contains" is a keyword
826 826 pat = getstring(x, _(b"contains requires a pattern"))
827 827
828 828 def matches(x):
829 829 if not matchmod.patkind(pat):
830 830 pats = pathutil.canonpath(repo.root, repo.getcwd(), pat)
831 831 if pats in repo[x]:
832 832 return True
833 833 else:
834 834 c = repo[x]
835 835 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
836 836 for f in c.manifest():
837 837 if m(f):
838 838 return True
839 839 return False
840 840
841 841 return subset.filter(matches, condrepr=(b'<contains %r>', pat))
842 842
843 843
844 844 @predicate(b'converted([id])', safe=True)
845 845 def converted(repo, subset, x):
846 846 """Changesets converted from the given identifier in the old repository if
847 847 present, or all converted changesets if no identifier is specified.
848 848 """
849 849
850 850 # There is exactly no chance of resolving the revision, so do a simple
851 851 # string compare and hope for the best
852 852
853 853 rev = None
854 854 # i18n: "converted" is a keyword
855 855 l = getargs(x, 0, 1, _(b'converted takes one or no arguments'))
856 856 if l:
857 857 # i18n: "converted" is a keyword
858 858 rev = getstring(l[0], _(b'converted requires a revision'))
859 859
860 860 def _matchvalue(r):
861 861 source = repo[r].extra().get(b'convert_revision', None)
862 862 return source is not None and (rev is None or source.startswith(rev))
863 863
864 864 return subset.filter(
865 865 lambda r: _matchvalue(r), condrepr=(b'<converted %r>', rev)
866 866 )
867 867
868 868
869 869 @predicate(b'date(interval)', safe=True, weight=10)
870 870 def date(repo, subset, x):
871 871 """Changesets within the interval, see :hg:`help dates`.
872 872 """
873 873 # i18n: "date" is a keyword
874 874 ds = getstring(x, _(b"date requires a string"))
875 875 dm = dateutil.matchdate(ds)
876 876 return subset.filter(
877 877 lambda x: dm(repo[x].date()[0]), condrepr=(b'<date %r>', ds)
878 878 )
879 879
880 880
881 881 @predicate(b'desc(string)', safe=True, weight=10)
882 882 def desc(repo, subset, x):
883 883 """Search commit message for string. The match is case-insensitive.
884 884
885 885 Pattern matching is supported for `string`. See
886 886 :hg:`help revisions.patterns`.
887 887 """
888 888 # i18n: "desc" is a keyword
889 889 ds = getstring(x, _(b"desc requires a string"))
890 890
891 891 kind, pattern, matcher = _substringmatcher(ds, casesensitive=False)
892 892
893 893 return subset.filter(
894 894 lambda r: matcher(repo[r].description()), condrepr=(b'<desc %r>', ds)
895 895 )
896 896
897 897
898 898 def _descendants(
899 899 repo, subset, x, followfirst=False, startdepth=None, stopdepth=None
900 900 ):
901 901 roots = getset(repo, fullreposet(repo), x)
902 902 if not roots:
903 903 return baseset()
904 904 s = dagop.revdescendants(repo, roots, followfirst, startdepth, stopdepth)
905 905 return subset & s
906 906
907 907
908 908 @predicate(b'descendants(set[, depth])', safe=True)
909 909 def descendants(repo, subset, x):
910 910 """Changesets which are descendants of changesets in set, including the
911 911 given changesets themselves.
912 912
913 913 If depth is specified, the result only includes changesets up to
914 914 the specified generation.
915 915 """
916 916 # startdepth is for internal use only until we can decide the UI
917 917 args = getargsdict(x, b'descendants', b'set depth startdepth')
918 918 if b'set' not in args:
919 919 # i18n: "descendants" is a keyword
920 920 raise error.ParseError(_(b'descendants takes at least 1 argument'))
921 921 startdepth = stopdepth = None
922 922 if b'startdepth' in args:
923 923 n = getinteger(
924 924 args[b'startdepth'], b"descendants expects an integer startdepth"
925 925 )
926 926 if n < 0:
927 927 raise error.ParseError(b"negative startdepth")
928 928 startdepth = n
929 929 if b'depth' in args:
930 930 # i18n: "descendants" is a keyword
931 931 n = getinteger(
932 932 args[b'depth'], _(b"descendants expects an integer depth")
933 933 )
934 934 if n < 0:
935 935 raise error.ParseError(_(b"negative depth"))
936 936 stopdepth = n + 1
937 937 return _descendants(
938 938 repo, subset, args[b'set'], startdepth=startdepth, stopdepth=stopdepth
939 939 )
940 940
941 941
942 942 @predicate(b'_firstdescendants', safe=True)
943 943 def _firstdescendants(repo, subset, x):
944 944 # ``_firstdescendants(set)``
945 945 # Like ``descendants(set)`` but follows only the first parents.
946 946 return _descendants(repo, subset, x, followfirst=True)
947 947
948 948
949 949 @predicate(b'destination([set])', safe=True, weight=10)
950 950 def destination(repo, subset, x):
951 951 """Changesets that were created by a graft, transplant or rebase operation,
952 952 with the given revisions specified as the source. Omitting the optional set
953 953 is the same as passing all().
954 954 """
955 955 if x is not None:
956 956 sources = getset(repo, fullreposet(repo), x)
957 957 else:
958 958 sources = fullreposet(repo)
959 959
960 960 dests = set()
961 961
962 962 # subset contains all of the possible destinations that can be returned, so
963 963 # iterate over them and see if their source(s) were provided in the arg set.
964 964 # Even if the immediate src of r is not in the arg set, src's source (or
965 965 # further back) may be. Scanning back further than the immediate src allows
966 966 # transitive transplants and rebases to yield the same results as transitive
967 967 # grafts.
968 968 for r in subset:
969 969 src = _getrevsource(repo, r)
970 970 lineage = None
971 971
972 972 while src is not None:
973 973 if lineage is None:
974 974 lineage = list()
975 975
976 976 lineage.append(r)
977 977
978 978 # The visited lineage is a match if the current source is in the arg
979 979 # set. Since every candidate dest is visited by way of iterating
980 980 # subset, any dests further back in the lineage will be tested by a
981 981 # different iteration over subset. Likewise, if the src was already
982 982 # selected, the current lineage can be selected without going back
983 983 # further.
984 984 if src in sources or src in dests:
985 985 dests.update(lineage)
986 986 break
987 987
988 988 r = src
989 989 src = _getrevsource(repo, r)
990 990
991 991 return subset.filter(
992 992 dests.__contains__,
993 993 condrepr=lambda: b'<destination %r>' % _sortedb(dests),
994 994 )
995 995
996 996
997 @predicate(b'diff(pattern)', weight=110)
998 def diff(repo, subset, x):
997 @predicate(b'diffcontains(pattern)', weight=110)
998 def diffcontains(repo, subset, x):
999 999 """Search revision differences for when the pattern was added or removed.
1000 1000
1001 1001 The pattern may be a substring literal or a regular expression. See
1002 1002 :hg:`help revisions.patterns`.
1003 1003 """
1004 args = getargsdict(x, b'diff', b'pattern')
1004 args = getargsdict(x, b'diffcontains', b'pattern')
1005 1005 if b'pattern' not in args:
1006 # i18n: "diff" is a keyword
1007 raise error.ParseError(_(b'diff takes at least 1 argument'))
1008
1009 pattern = getstring(args[b'pattern'], _(b'diff requires a string pattern'))
1006 # i18n: "diffcontains" is a keyword
1007 raise error.ParseError(_(b'diffcontains takes at least 1 argument'))
1008
1009 pattern = getstring(
1010 args[b'pattern'], _(b'diffcontains requires a string pattern')
1011 )
1010 1012 regexp = stringutil.substringregexp(pattern, re.M)
1011 1013
1012 1014 # TODO: add support for file pattern and --follow. For example,
1013 # diff(pattern[, set]) where set may be file(pattern) or follow(pattern),
1014 # and we'll eventually add a support for narrowing files by revset?
1015 # diffcontains(pattern[, set]) where set may be file(pattern) or
1016 # follow(pattern), and we'll eventually add a support for narrowing
1017 # files by revset?
1015 1018 fmatch = matchmod.always()
1016 1019
1017 1020 def makefilematcher(ctx):
1018 1021 return fmatch
1019 1022
1020 1023 # TODO: search in a windowed way
1021 1024 searcher = grepmod.grepsearcher(repo.ui, repo, regexp, diff=True)
1022 1025
1023 1026 def testdiff(rev):
1024 1027 # consume the generator to discard revfiles/matches cache
1025 1028 found = False
1026 1029 for fn, ctx, pstates, states in searcher.searchfiles(
1027 1030 baseset([rev]), makefilematcher
1028 1031 ):
1029 1032 if next(grepmod.difflinestates(pstates, states), None):
1030 1033 found = True
1031 1034 return found
1032 1035
1033 return subset.filter(testdiff, condrepr=(b'<diff %r>', pattern))
1036 return subset.filter(testdiff, condrepr=(b'<diffcontains %r>', pattern))
1034 1037
1035 1038
1036 1039 @predicate(b'contentdivergent()', safe=True)
1037 1040 def contentdivergent(repo, subset, x):
1038 1041 """
1039 1042 Final successors of changesets with an alternative set of final
1040 1043 successors. (EXPERIMENTAL)
1041 1044 """
1042 1045 # i18n: "contentdivergent" is a keyword
1043 1046 getargs(x, 0, 0, _(b"contentdivergent takes no arguments"))
1044 1047 contentdivergent = obsmod.getrevs(repo, b'contentdivergent')
1045 1048 return subset & contentdivergent
1046 1049
1047 1050
1048 1051 @predicate(b'expectsize(set[, size])', safe=True, takeorder=True)
1049 1052 def expectsize(repo, subset, x, order):
1050 1053 """Return the given revset if size matches the revset size.
1051 1054 Abort if the revset doesn't expect given size.
1052 1055 size can either be an integer range or an integer.
1053 1056
1054 1057 For example, ``expectsize(0:1, 3:5)`` will abort as revset size is 2 and
1055 1058 2 is not between 3 and 5 inclusive."""
1056 1059
1057 1060 args = getargsdict(x, b'expectsize', b'set size')
1058 1061 minsize = 0
1059 1062 maxsize = len(repo) + 1
1060 1063 err = b''
1061 1064 if b'size' not in args or b'set' not in args:
1062 1065 raise error.ParseError(_(b'invalid set of arguments'))
1063 1066 minsize, maxsize = getintrange(
1064 1067 args[b'size'],
1065 1068 _(b'expectsize requires a size range or a positive integer'),
1066 1069 _(b'size range bounds must be integers'),
1067 1070 minsize,
1068 1071 maxsize,
1069 1072 )
1070 1073 if minsize < 0 or maxsize < 0:
1071 1074 raise error.ParseError(_(b'negative size'))
1072 1075 rev = getset(repo, fullreposet(repo), args[b'set'], order=order)
1073 1076 if minsize != maxsize and (len(rev) < minsize or len(rev) > maxsize):
1074 1077 err = _(b'revset size mismatch. expected between %d and %d, got %d') % (
1075 1078 minsize,
1076 1079 maxsize,
1077 1080 len(rev),
1078 1081 )
1079 1082 elif minsize == maxsize and len(rev) != minsize:
1080 1083 err = _(b'revset size mismatch. expected %d, got %d') % (
1081 1084 minsize,
1082 1085 len(rev),
1083 1086 )
1084 1087 if err:
1085 1088 raise error.RepoLookupError(err)
1086 1089 if order == followorder:
1087 1090 return subset & rev
1088 1091 else:
1089 1092 return rev & subset
1090 1093
1091 1094
1092 1095 @predicate(b'extdata(source)', safe=False, weight=100)
1093 1096 def extdata(repo, subset, x):
1094 1097 """Changesets in the specified extdata source. (EXPERIMENTAL)"""
1095 1098 # i18n: "extdata" is a keyword
1096 1099 args = getargsdict(x, b'extdata', b'source')
1097 1100 source = getstring(
1098 1101 args.get(b'source'),
1099 1102 # i18n: "extdata" is a keyword
1100 1103 _(b'extdata takes at least 1 string argument'),
1101 1104 )
1102 1105 data = scmutil.extdatasource(repo, source)
1103 1106 return subset & baseset(data)
1104 1107
1105 1108
1106 1109 @predicate(b'extinct()', safe=True)
1107 1110 def extinct(repo, subset, x):
1108 1111 """Obsolete changesets with obsolete descendants only. (EXPERIMENTAL)
1109 1112 """
1110 1113 # i18n: "extinct" is a keyword
1111 1114 getargs(x, 0, 0, _(b"extinct takes no arguments"))
1112 1115 extincts = obsmod.getrevs(repo, b'extinct')
1113 1116 return subset & extincts
1114 1117
1115 1118
1116 1119 @predicate(b'extra(label, [value])', safe=True)
1117 1120 def extra(repo, subset, x):
1118 1121 """Changesets with the given label in the extra metadata, with the given
1119 1122 optional value.
1120 1123
1121 1124 Pattern matching is supported for `value`. See
1122 1125 :hg:`help revisions.patterns`.
1123 1126 """
1124 1127 args = getargsdict(x, b'extra', b'label value')
1125 1128 if b'label' not in args:
1126 1129 # i18n: "extra" is a keyword
1127 1130 raise error.ParseError(_(b'extra takes at least 1 argument'))
1128 1131 # i18n: "extra" is a keyword
1129 1132 label = getstring(
1130 1133 args[b'label'], _(b'first argument to extra must be a string')
1131 1134 )
1132 1135 value = None
1133 1136
1134 1137 if b'value' in args:
1135 1138 # i18n: "extra" is a keyword
1136 1139 value = getstring(
1137 1140 args[b'value'], _(b'second argument to extra must be a string')
1138 1141 )
1139 1142 kind, value, matcher = stringutil.stringmatcher(value)
1140 1143
1141 1144 def _matchvalue(r):
1142 1145 extra = repo[r].extra()
1143 1146 return label in extra and (value is None or matcher(extra[label]))
1144 1147
1145 1148 return subset.filter(
1146 1149 lambda r: _matchvalue(r), condrepr=(b'<extra[%r] %r>', label, value)
1147 1150 )
1148 1151
1149 1152
1150 1153 @predicate(b'filelog(pattern)', safe=True)
1151 1154 def filelog(repo, subset, x):
1152 1155 """Changesets connected to the specified filelog.
1153 1156
1154 1157 For performance reasons, visits only revisions mentioned in the file-level
1155 1158 filelog, rather than filtering through all changesets (much faster, but
1156 1159 doesn't include deletes or duplicate changes). For a slower, more accurate
1157 1160 result, use ``file()``.
1158 1161
1159 1162 The pattern without explicit kind like ``glob:`` is expected to be
1160 1163 relative to the current directory and match against a file exactly
1161 1164 for efficiency.
1162 1165 """
1163 1166
1164 1167 # i18n: "filelog" is a keyword
1165 1168 pat = getstring(x, _(b"filelog requires a pattern"))
1166 1169 s = set()
1167 1170 cl = repo.changelog
1168 1171
1169 1172 if not matchmod.patkind(pat):
1170 1173 f = pathutil.canonpath(repo.root, repo.getcwd(), pat)
1171 1174 files = [f]
1172 1175 else:
1173 1176 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=repo[None])
1174 1177 files = (f for f in repo[None] if m(f))
1175 1178
1176 1179 for f in files:
1177 1180 fl = repo.file(f)
1178 1181 known = {}
1179 1182 scanpos = 0
1180 1183 for fr in list(fl):
1181 1184 fn = fl.node(fr)
1182 1185 if fn in known:
1183 1186 s.add(known[fn])
1184 1187 continue
1185 1188
1186 1189 lr = fl.linkrev(fr)
1187 1190 if lr in cl:
1188 1191 s.add(lr)
1189 1192 elif scanpos is not None:
1190 1193 # lowest matching changeset is filtered, scan further
1191 1194 # ahead in changelog
1192 1195 start = max(lr, scanpos) + 1
1193 1196 scanpos = None
1194 1197 for r in cl.revs(start):
1195 1198 # minimize parsing of non-matching entries
1196 1199 if f in cl.revision(r) and f in cl.readfiles(r):
1197 1200 try:
1198 1201 # try to use manifest delta fastpath
1199 1202 n = repo[r].filenode(f)
1200 1203 if n not in known:
1201 1204 if n == fn:
1202 1205 s.add(r)
1203 1206 scanpos = r
1204 1207 break
1205 1208 else:
1206 1209 known[n] = r
1207 1210 except error.ManifestLookupError:
1208 1211 # deletion in changelog
1209 1212 continue
1210 1213
1211 1214 return subset & s
1212 1215
1213 1216
1214 1217 @predicate(b'first(set, [n])', safe=True, takeorder=True, weight=0)
1215 1218 def first(repo, subset, x, order):
1216 1219 """An alias for limit().
1217 1220 """
1218 1221 return limit(repo, subset, x, order)
1219 1222
1220 1223
1221 1224 def _follow(repo, subset, x, name, followfirst=False):
1222 1225 args = getargsdict(x, name, b'file startrev')
1223 1226 revs = None
1224 1227 if b'startrev' in args:
1225 1228 revs = getset(repo, fullreposet(repo), args[b'startrev'])
1226 1229 if b'file' in args:
1227 1230 x = getstring(args[b'file'], _(b"%s expected a pattern") % name)
1228 1231 if revs is None:
1229 1232 revs = [None]
1230 1233 fctxs = []
1231 1234 for r in revs:
1232 1235 ctx = mctx = repo[r]
1233 1236 if r is None:
1234 1237 ctx = repo[b'.']
1235 1238 m = matchmod.match(
1236 1239 repo.root, repo.getcwd(), [x], ctx=mctx, default=b'path'
1237 1240 )
1238 1241 fctxs.extend(ctx[f].introfilectx() for f in ctx.manifest().walk(m))
1239 1242 s = dagop.filerevancestors(fctxs, followfirst)
1240 1243 else:
1241 1244 if revs is None:
1242 1245 revs = baseset([repo[b'.'].rev()])
1243 1246 s = dagop.revancestors(repo, revs, followfirst)
1244 1247
1245 1248 return subset & s
1246 1249
1247 1250
1248 1251 @predicate(b'follow([file[, startrev]])', safe=True)
1249 1252 def follow(repo, subset, x):
1250 1253 """
1251 1254 An alias for ``::.`` (ancestors of the working directory's first parent).
1252 1255 If file pattern is specified, the histories of files matching given
1253 1256 pattern in the revision given by startrev are followed, including copies.
1254 1257 """
1255 1258 return _follow(repo, subset, x, b'follow')
1256 1259
1257 1260
1258 1261 @predicate(b'_followfirst', safe=True)
1259 1262 def _followfirst(repo, subset, x):
1260 1263 # ``followfirst([file[, startrev]])``
1261 1264 # Like ``follow([file[, startrev]])`` but follows only the first parent
1262 1265 # of every revisions or files revisions.
1263 1266 return _follow(repo, subset, x, b'_followfirst', followfirst=True)
1264 1267
1265 1268
1266 1269 @predicate(
1267 1270 b'followlines(file, fromline:toline[, startrev=., descend=False])',
1268 1271 safe=True,
1269 1272 )
1270 1273 def followlines(repo, subset, x):
1271 1274 """Changesets modifying `file` in line range ('fromline', 'toline').
1272 1275
1273 1276 Line range corresponds to 'file' content at 'startrev' and should hence be
1274 1277 consistent with file size. If startrev is not specified, working directory's
1275 1278 parent is used.
1276 1279
1277 1280 By default, ancestors of 'startrev' are returned. If 'descend' is True,
1278 1281 descendants of 'startrev' are returned though renames are (currently) not
1279 1282 followed in this direction.
1280 1283 """
1281 1284 args = getargsdict(x, b'followlines', b'file *lines startrev descend')
1282 1285 if len(args[b'lines']) != 1:
1283 1286 raise error.ParseError(_(b"followlines requires a line range"))
1284 1287
1285 1288 rev = b'.'
1286 1289 if b'startrev' in args:
1287 1290 revs = getset(repo, fullreposet(repo), args[b'startrev'])
1288 1291 if len(revs) != 1:
1289 1292 raise error.ParseError(
1290 1293 # i18n: "followlines" is a keyword
1291 1294 _(b"followlines expects exactly one revision")
1292 1295 )
1293 1296 rev = revs.last()
1294 1297
1295 1298 pat = getstring(args[b'file'], _(b"followlines requires a pattern"))
1296 1299 # i18n: "followlines" is a keyword
1297 1300 msg = _(b"followlines expects exactly one file")
1298 1301 fname = scmutil.parsefollowlinespattern(repo, rev, pat, msg)
1299 1302 fromline, toline = util.processlinerange(
1300 1303 *getintrange(
1301 1304 args[b'lines'][0],
1302 1305 # i18n: "followlines" is a keyword
1303 1306 _(b"followlines expects a line number or a range"),
1304 1307 _(b"line range bounds must be integers"),
1305 1308 )
1306 1309 )
1307 1310
1308 1311 fctx = repo[rev].filectx(fname)
1309 1312 descend = False
1310 1313 if b'descend' in args:
1311 1314 descend = getboolean(
1312 1315 args[b'descend'],
1313 1316 # i18n: "descend" is a keyword
1314 1317 _(b"descend argument must be a boolean"),
1315 1318 )
1316 1319 if descend:
1317 1320 rs = generatorset(
1318 1321 (
1319 1322 c.rev()
1320 1323 for c, _linerange in dagop.blockdescendants(
1321 1324 fctx, fromline, toline
1322 1325 )
1323 1326 ),
1324 1327 iterasc=True,
1325 1328 )
1326 1329 else:
1327 1330 rs = generatorset(
1328 1331 (
1329 1332 c.rev()
1330 1333 for c, _linerange in dagop.blockancestors(
1331 1334 fctx, fromline, toline
1332 1335 )
1333 1336 ),
1334 1337 iterasc=False,
1335 1338 )
1336 1339 return subset & rs
1337 1340
1338 1341
1339 1342 @predicate(b'all()', safe=True)
1340 1343 def getall(repo, subset, x):
1341 1344 """All changesets, the same as ``0:tip``.
1342 1345 """
1343 1346 # i18n: "all" is a keyword
1344 1347 getargs(x, 0, 0, _(b"all takes no arguments"))
1345 1348 return subset & spanset(repo) # drop "null" if any
1346 1349
1347 1350
1348 1351 @predicate(b'grep(regex)', weight=10)
1349 1352 def grep(repo, subset, x):
1350 1353 """Like ``keyword(string)`` but accepts a regex. Use ``grep(r'...')``
1351 1354 to ensure special escape characters are handled correctly. Unlike
1352 1355 ``keyword(string)``, the match is case-sensitive.
1353 1356 """
1354 1357 try:
1355 1358 # i18n: "grep" is a keyword
1356 1359 gr = re.compile(getstring(x, _(b"grep requires a string")))
1357 1360 except re.error as e:
1358 1361 raise error.ParseError(
1359 1362 _(b'invalid match pattern: %s') % stringutil.forcebytestr(e)
1360 1363 )
1361 1364
1362 1365 def matches(x):
1363 1366 c = repo[x]
1364 1367 for e in c.files() + [c.user(), c.description()]:
1365 1368 if gr.search(e):
1366 1369 return True
1367 1370 return False
1368 1371
1369 1372 return subset.filter(matches, condrepr=(b'<grep %r>', gr.pattern))
1370 1373
1371 1374
1372 1375 @predicate(b'_matchfiles', safe=True)
1373 1376 def _matchfiles(repo, subset, x):
1374 1377 # _matchfiles takes a revset list of prefixed arguments:
1375 1378 #
1376 1379 # [p:foo, i:bar, x:baz]
1377 1380 #
1378 1381 # builds a match object from them and filters subset. Allowed
1379 1382 # prefixes are 'p:' for regular patterns, 'i:' for include
1380 1383 # patterns and 'x:' for exclude patterns. Use 'r:' prefix to pass
1381 1384 # a revision identifier, or the empty string to reference the
1382 1385 # working directory, from which the match object is
1383 1386 # initialized. Use 'd:' to set the default matching mode, default
1384 1387 # to 'glob'. At most one 'r:' and 'd:' argument can be passed.
1385 1388
1386 1389 l = getargs(x, 1, -1, b"_matchfiles requires at least one argument")
1387 1390 pats, inc, exc = [], [], []
1388 1391 rev, default = None, None
1389 1392 for arg in l:
1390 1393 s = getstring(arg, b"_matchfiles requires string arguments")
1391 1394 prefix, value = s[:2], s[2:]
1392 1395 if prefix == b'p:':
1393 1396 pats.append(value)
1394 1397 elif prefix == b'i:':
1395 1398 inc.append(value)
1396 1399 elif prefix == b'x:':
1397 1400 exc.append(value)
1398 1401 elif prefix == b'r:':
1399 1402 if rev is not None:
1400 1403 raise error.ParseError(
1401 1404 b'_matchfiles expected at most one revision'
1402 1405 )
1403 1406 if value == b'': # empty means working directory
1404 1407 rev = node.wdirrev
1405 1408 else:
1406 1409 rev = value
1407 1410 elif prefix == b'd:':
1408 1411 if default is not None:
1409 1412 raise error.ParseError(
1410 1413 b'_matchfiles expected at most one default mode'
1411 1414 )
1412 1415 default = value
1413 1416 else:
1414 1417 raise error.ParseError(b'invalid _matchfiles prefix: %s' % prefix)
1415 1418 if not default:
1416 1419 default = b'glob'
1417 1420 hasset = any(matchmod.patkind(p) == b'set' for p in pats + inc + exc)
1418 1421
1419 1422 mcache = [None]
1420 1423
1421 1424 # This directly read the changelog data as creating changectx for all
1422 1425 # revisions is quite expensive.
1423 1426 getfiles = repo.changelog.readfiles
1424 1427 wdirrev = node.wdirrev
1425 1428
1426 1429 def matches(x):
1427 1430 if x == wdirrev:
1428 1431 files = repo[x].files()
1429 1432 else:
1430 1433 files = getfiles(x)
1431 1434
1432 1435 if not mcache[0] or (hasset and rev is None):
1433 1436 r = x if rev is None else rev
1434 1437 mcache[0] = matchmod.match(
1435 1438 repo.root,
1436 1439 repo.getcwd(),
1437 1440 pats,
1438 1441 include=inc,
1439 1442 exclude=exc,
1440 1443 ctx=repo[r],
1441 1444 default=default,
1442 1445 )
1443 1446 m = mcache[0]
1444 1447
1445 1448 for f in files:
1446 1449 if m(f):
1447 1450 return True
1448 1451 return False
1449 1452
1450 1453 return subset.filter(
1451 1454 matches,
1452 1455 condrepr=(
1453 1456 b'<matchfiles patterns=%r, include=%r '
1454 1457 b'exclude=%r, default=%r, rev=%r>',
1455 1458 pats,
1456 1459 inc,
1457 1460 exc,
1458 1461 default,
1459 1462 rev,
1460 1463 ),
1461 1464 )
1462 1465
1463 1466
1464 1467 @predicate(b'file(pattern)', safe=True, weight=10)
1465 1468 def hasfile(repo, subset, x):
1466 1469 """Changesets affecting files matched by pattern.
1467 1470
1468 1471 For a faster but less accurate result, consider using ``filelog()``
1469 1472 instead.
1470 1473
1471 1474 This predicate uses ``glob:`` as the default kind of pattern.
1472 1475 """
1473 1476 # i18n: "file" is a keyword
1474 1477 pat = getstring(x, _(b"file requires a pattern"))
1475 1478 return _matchfiles(repo, subset, (b'string', b'p:' + pat))
1476 1479
1477 1480
1478 1481 @predicate(b'head()', safe=True)
1479 1482 def head(repo, subset, x):
1480 1483 """Changeset is a named branch head.
1481 1484 """
1482 1485 # i18n: "head" is a keyword
1483 1486 getargs(x, 0, 0, _(b"head takes no arguments"))
1484 1487 hs = set()
1485 1488 cl = repo.changelog
1486 1489 for ls in repo.branchmap().iterheads():
1487 1490 hs.update(cl.rev(h) for h in ls)
1488 1491 return subset & baseset(hs)
1489 1492
1490 1493
1491 1494 @predicate(b'heads(set)', safe=True, takeorder=True)
1492 1495 def heads(repo, subset, x, order):
1493 1496 """Members of set with no children in set.
1494 1497 """
1495 1498 # argument set should never define order
1496 1499 if order == defineorder:
1497 1500 order = followorder
1498 1501 inputset = getset(repo, fullreposet(repo), x, order=order)
1499 1502 wdirparents = None
1500 1503 if node.wdirrev in inputset:
1501 1504 # a bit slower, but not common so good enough for now
1502 1505 wdirparents = [p.rev() for p in repo[None].parents()]
1503 1506 inputset = set(inputset)
1504 1507 inputset.discard(node.wdirrev)
1505 1508 heads = repo.changelog.headrevs(inputset)
1506 1509 if wdirparents is not None:
1507 1510 heads.difference_update(wdirparents)
1508 1511 heads.add(node.wdirrev)
1509 1512 heads = baseset(heads)
1510 1513 return subset & heads
1511 1514
1512 1515
1513 1516 @predicate(b'hidden()', safe=True)
1514 1517 def hidden(repo, subset, x):
1515 1518 """Hidden changesets.
1516 1519 """
1517 1520 # i18n: "hidden" is a keyword
1518 1521 getargs(x, 0, 0, _(b"hidden takes no arguments"))
1519 1522 hiddenrevs = repoview.filterrevs(repo, b'visible')
1520 1523 return subset & hiddenrevs
1521 1524
1522 1525
1523 1526 @predicate(b'keyword(string)', safe=True, weight=10)
1524 1527 def keyword(repo, subset, x):
1525 1528 """Search commit message, user name, and names of changed files for
1526 1529 string. The match is case-insensitive.
1527 1530
1528 1531 For a regular expression or case sensitive search of these fields, use
1529 1532 ``grep(regex)``.
1530 1533 """
1531 1534 # i18n: "keyword" is a keyword
1532 1535 kw = encoding.lower(getstring(x, _(b"keyword requires a string")))
1533 1536
1534 1537 def matches(r):
1535 1538 c = repo[r]
1536 1539 return any(
1537 1540 kw in encoding.lower(t)
1538 1541 for t in c.files() + [c.user(), c.description()]
1539 1542 )
1540 1543
1541 1544 return subset.filter(matches, condrepr=(b'<keyword %r>', kw))
1542 1545
1543 1546
1544 1547 @predicate(b'limit(set[, n[, offset]])', safe=True, takeorder=True, weight=0)
1545 1548 def limit(repo, subset, x, order):
1546 1549 """First n members of set, defaulting to 1, starting from offset.
1547 1550 """
1548 1551 args = getargsdict(x, b'limit', b'set n offset')
1549 1552 if b'set' not in args:
1550 1553 # i18n: "limit" is a keyword
1551 1554 raise error.ParseError(_(b"limit requires one to three arguments"))
1552 1555 # i18n: "limit" is a keyword
1553 1556 lim = getinteger(args.get(b'n'), _(b"limit expects a number"), default=1)
1554 1557 if lim < 0:
1555 1558 raise error.ParseError(_(b"negative number to select"))
1556 1559 # i18n: "limit" is a keyword
1557 1560 ofs = getinteger(
1558 1561 args.get(b'offset'), _(b"limit expects a number"), default=0
1559 1562 )
1560 1563 if ofs < 0:
1561 1564 raise error.ParseError(_(b"negative offset"))
1562 1565 os = getset(repo, fullreposet(repo), args[b'set'])
1563 1566 ls = os.slice(ofs, ofs + lim)
1564 1567 if order == followorder and lim > 1:
1565 1568 return subset & ls
1566 1569 return ls & subset
1567 1570
1568 1571
1569 1572 @predicate(b'last(set, [n])', safe=True, takeorder=True)
1570 1573 def last(repo, subset, x, order):
1571 1574 """Last n members of set, defaulting to 1.
1572 1575 """
1573 1576 # i18n: "last" is a keyword
1574 1577 l = getargs(x, 1, 2, _(b"last requires one or two arguments"))
1575 1578 lim = 1
1576 1579 if len(l) == 2:
1577 1580 # i18n: "last" is a keyword
1578 1581 lim = getinteger(l[1], _(b"last expects a number"))
1579 1582 if lim < 0:
1580 1583 raise error.ParseError(_(b"negative number to select"))
1581 1584 os = getset(repo, fullreposet(repo), l[0])
1582 1585 os.reverse()
1583 1586 ls = os.slice(0, lim)
1584 1587 if order == followorder and lim > 1:
1585 1588 return subset & ls
1586 1589 ls.reverse()
1587 1590 return ls & subset
1588 1591
1589 1592
1590 1593 @predicate(b'max(set)', safe=True)
1591 1594 def maxrev(repo, subset, x):
1592 1595 """Changeset with highest revision number in set.
1593 1596 """
1594 1597 os = getset(repo, fullreposet(repo), x)
1595 1598 try:
1596 1599 m = os.max()
1597 1600 if m in subset:
1598 1601 return baseset([m], datarepr=(b'<max %r, %r>', subset, os))
1599 1602 except ValueError:
1600 1603 # os.max() throws a ValueError when the collection is empty.
1601 1604 # Same as python's max().
1602 1605 pass
1603 1606 return baseset(datarepr=(b'<max %r, %r>', subset, os))
1604 1607
1605 1608
1606 1609 @predicate(b'merge()', safe=True)
1607 1610 def merge(repo, subset, x):
1608 1611 """Changeset is a merge changeset.
1609 1612 """
1610 1613 # i18n: "merge" is a keyword
1611 1614 getargs(x, 0, 0, _(b"merge takes no arguments"))
1612 1615 cl = repo.changelog
1613 1616 nullrev = node.nullrev
1614 1617
1615 1618 def ismerge(r):
1616 1619 try:
1617 1620 return cl.parentrevs(r)[1] != nullrev
1618 1621 except error.WdirUnsupported:
1619 1622 return bool(repo[r].p2())
1620 1623
1621 1624 return subset.filter(ismerge, condrepr=b'<merge>')
1622 1625
1623 1626
1624 1627 @predicate(b'branchpoint()', safe=True)
1625 1628 def branchpoint(repo, subset, x):
1626 1629 """Changesets with more than one child.
1627 1630 """
1628 1631 # i18n: "branchpoint" is a keyword
1629 1632 getargs(x, 0, 0, _(b"branchpoint takes no arguments"))
1630 1633 cl = repo.changelog
1631 1634 if not subset:
1632 1635 return baseset()
1633 1636 # XXX this should be 'parentset.min()' assuming 'parentset' is a smartset
1634 1637 # (and if it is not, it should.)
1635 1638 baserev = min(subset)
1636 1639 parentscount = [0] * (len(repo) - baserev)
1637 1640 for r in cl.revs(start=baserev + 1):
1638 1641 for p in cl.parentrevs(r):
1639 1642 if p >= baserev:
1640 1643 parentscount[p - baserev] += 1
1641 1644 return subset.filter(
1642 1645 lambda r: parentscount[r - baserev] > 1, condrepr=b'<branchpoint>'
1643 1646 )
1644 1647
1645 1648
1646 1649 @predicate(b'min(set)', safe=True)
1647 1650 def minrev(repo, subset, x):
1648 1651 """Changeset with lowest revision number in set.
1649 1652 """
1650 1653 os = getset(repo, fullreposet(repo), x)
1651 1654 try:
1652 1655 m = os.min()
1653 1656 if m in subset:
1654 1657 return baseset([m], datarepr=(b'<min %r, %r>', subset, os))
1655 1658 except ValueError:
1656 1659 # os.min() throws a ValueError when the collection is empty.
1657 1660 # Same as python's min().
1658 1661 pass
1659 1662 return baseset(datarepr=(b'<min %r, %r>', subset, os))
1660 1663
1661 1664
1662 1665 @predicate(b'modifies(pattern)', safe=True, weight=30)
1663 1666 def modifies(repo, subset, x):
1664 1667 """Changesets modifying files matched by pattern.
1665 1668
1666 1669 The pattern without explicit kind like ``glob:`` is expected to be
1667 1670 relative to the current directory and match against a file or a
1668 1671 directory.
1669 1672 """
1670 1673 # i18n: "modifies" is a keyword
1671 1674 pat = getstring(x, _(b"modifies requires a pattern"))
1672 1675 return checkstatus(repo, subset, pat, 'modified')
1673 1676
1674 1677
1675 1678 @predicate(b'named(namespace)')
1676 1679 def named(repo, subset, x):
1677 1680 """The changesets in a given namespace.
1678 1681
1679 1682 Pattern matching is supported for `namespace`. See
1680 1683 :hg:`help revisions.patterns`.
1681 1684 """
1682 1685 # i18n: "named" is a keyword
1683 1686 args = getargs(x, 1, 1, _(b'named requires a namespace argument'))
1684 1687
1685 1688 ns = getstring(
1686 1689 args[0],
1687 1690 # i18n: "named" is a keyword
1688 1691 _(b'the argument to named must be a string'),
1689 1692 )
1690 1693 kind, pattern, matcher = stringutil.stringmatcher(ns)
1691 1694 namespaces = set()
1692 1695 if kind == b'literal':
1693 1696 if pattern not in repo.names:
1694 1697 raise error.RepoLookupError(
1695 1698 _(b"namespace '%s' does not exist") % ns
1696 1699 )
1697 1700 namespaces.add(repo.names[pattern])
1698 1701 else:
1699 1702 for name, ns in pycompat.iteritems(repo.names):
1700 1703 if matcher(name):
1701 1704 namespaces.add(ns)
1702 1705
1703 1706 names = set()
1704 1707 for ns in namespaces:
1705 1708 for name in ns.listnames(repo):
1706 1709 if name not in ns.deprecated:
1707 1710 names.update(repo[n].rev() for n in ns.nodes(repo, name))
1708 1711
1709 1712 names -= {node.nullrev}
1710 1713 return subset & names
1711 1714
1712 1715
1713 1716 @predicate(b'id(string)', safe=True)
1714 1717 def node_(repo, subset, x):
1715 1718 """Revision non-ambiguously specified by the given hex string prefix.
1716 1719 """
1717 1720 # i18n: "id" is a keyword
1718 1721 l = getargs(x, 1, 1, _(b"id requires one argument"))
1719 1722 # i18n: "id" is a keyword
1720 1723 n = getstring(l[0], _(b"id requires a string"))
1721 1724 if len(n) == 40:
1722 1725 try:
1723 1726 rn = repo.changelog.rev(node.bin(n))
1724 1727 except error.WdirUnsupported:
1725 1728 rn = node.wdirrev
1726 1729 except (LookupError, TypeError):
1727 1730 rn = None
1728 1731 else:
1729 1732 rn = None
1730 1733 try:
1731 1734 pm = scmutil.resolvehexnodeidprefix(repo, n)
1732 1735 if pm is not None:
1733 1736 rn = repo.changelog.rev(pm)
1734 1737 except LookupError:
1735 1738 pass
1736 1739 except error.WdirUnsupported:
1737 1740 rn = node.wdirrev
1738 1741
1739 1742 if rn is None:
1740 1743 return baseset()
1741 1744 result = baseset([rn])
1742 1745 return result & subset
1743 1746
1744 1747
1745 1748 @predicate(b'none()', safe=True)
1746 1749 def none(repo, subset, x):
1747 1750 """No changesets.
1748 1751 """
1749 1752 # i18n: "none" is a keyword
1750 1753 getargs(x, 0, 0, _(b"none takes no arguments"))
1751 1754 return baseset()
1752 1755
1753 1756
1754 1757 @predicate(b'obsolete()', safe=True)
1755 1758 def obsolete(repo, subset, x):
1756 1759 """Mutable changeset with a newer version. (EXPERIMENTAL)"""
1757 1760 # i18n: "obsolete" is a keyword
1758 1761 getargs(x, 0, 0, _(b"obsolete takes no arguments"))
1759 1762 obsoletes = obsmod.getrevs(repo, b'obsolete')
1760 1763 return subset & obsoletes
1761 1764
1762 1765
1763 1766 @predicate(b'only(set, [set])', safe=True)
1764 1767 def only(repo, subset, x):
1765 1768 """Changesets that are ancestors of the first set that are not ancestors
1766 1769 of any other head in the repo. If a second set is specified, the result
1767 1770 is ancestors of the first set that are not ancestors of the second set
1768 1771 (i.e. ::<set1> - ::<set2>).
1769 1772 """
1770 1773 cl = repo.changelog
1771 1774 # i18n: "only" is a keyword
1772 1775 args = getargs(x, 1, 2, _(b'only takes one or two arguments'))
1773 1776 include = getset(repo, fullreposet(repo), args[0])
1774 1777 if len(args) == 1:
1775 1778 if not include:
1776 1779 return baseset()
1777 1780
1778 1781 descendants = set(dagop.revdescendants(repo, include, False))
1779 1782 exclude = [
1780 1783 rev
1781 1784 for rev in cl.headrevs()
1782 1785 if not rev in descendants and not rev in include
1783 1786 ]
1784 1787 else:
1785 1788 exclude = getset(repo, fullreposet(repo), args[1])
1786 1789
1787 1790 results = set(cl.findmissingrevs(common=exclude, heads=include))
1788 1791 # XXX we should turn this into a baseset instead of a set, smartset may do
1789 1792 # some optimizations from the fact this is a baseset.
1790 1793 return subset & results
1791 1794
1792 1795
1793 1796 @predicate(b'origin([set])', safe=True)
1794 1797 def origin(repo, subset, x):
1795 1798 """
1796 1799 Changesets that were specified as a source for the grafts, transplants or
1797 1800 rebases that created the given revisions. Omitting the optional set is the
1798 1801 same as passing all(). If a changeset created by these operations is itself
1799 1802 specified as a source for one of these operations, only the source changeset
1800 1803 for the first operation is selected.
1801 1804 """
1802 1805 if x is not None:
1803 1806 dests = getset(repo, fullreposet(repo), x)
1804 1807 else:
1805 1808 dests = fullreposet(repo)
1806 1809
1807 1810 def _firstsrc(rev):
1808 1811 src = _getrevsource(repo, rev)
1809 1812 if src is None:
1810 1813 return None
1811 1814
1812 1815 while True:
1813 1816 prev = _getrevsource(repo, src)
1814 1817
1815 1818 if prev is None:
1816 1819 return src
1817 1820 src = prev
1818 1821
1819 1822 o = {_firstsrc(r) for r in dests}
1820 1823 o -= {None}
1821 1824 # XXX we should turn this into a baseset instead of a set, smartset may do
1822 1825 # some optimizations from the fact this is a baseset.
1823 1826 return subset & o
1824 1827
1825 1828
1826 1829 @predicate(b'outgoing([path])', safe=False, weight=10)
1827 1830 def outgoing(repo, subset, x):
1828 1831 """Changesets not found in the specified destination repository, or the
1829 1832 default push location.
1830 1833 """
1831 1834 # Avoid cycles.
1832 1835 from . import (
1833 1836 discovery,
1834 1837 hg,
1835 1838 )
1836 1839
1837 1840 # i18n: "outgoing" is a keyword
1838 1841 l = getargs(x, 0, 1, _(b"outgoing takes one or no arguments"))
1839 1842 # i18n: "outgoing" is a keyword
1840 1843 dest = (
1841 1844 l and getstring(l[0], _(b"outgoing requires a repository path")) or b''
1842 1845 )
1843 1846 if not dest:
1844 1847 # ui.paths.getpath() explicitly tests for None, not just a boolean
1845 1848 dest = None
1846 1849 path = repo.ui.paths.getpath(dest, default=(b'default-push', b'default'))
1847 1850 if not path:
1848 1851 raise error.Abort(
1849 1852 _(b'default repository not configured!'),
1850 1853 hint=_(b"see 'hg help config.paths'"),
1851 1854 )
1852 1855 dest = path.pushloc or path.loc
1853 1856 branches = path.branch, []
1854 1857
1855 1858 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1856 1859 if revs:
1857 1860 revs = [repo.lookup(rev) for rev in revs]
1858 1861 other = hg.peer(repo, {}, dest)
1859 1862 repo.ui.pushbuffer()
1860 1863 outgoing = discovery.findcommonoutgoing(repo, other, onlyheads=revs)
1861 1864 repo.ui.popbuffer()
1862 1865 cl = repo.changelog
1863 1866 o = {cl.rev(r) for r in outgoing.missing}
1864 1867 return subset & o
1865 1868
1866 1869
1867 1870 @predicate(b'p1([set])', safe=True)
1868 1871 def p1(repo, subset, x):
1869 1872 """First parent of changesets in set, or the working directory.
1870 1873 """
1871 1874 if x is None:
1872 1875 p = repo[x].p1().rev()
1873 1876 if p >= 0:
1874 1877 return subset & baseset([p])
1875 1878 return baseset()
1876 1879
1877 1880 ps = set()
1878 1881 cl = repo.changelog
1879 1882 for r in getset(repo, fullreposet(repo), x):
1880 1883 try:
1881 1884 ps.add(cl.parentrevs(r)[0])
1882 1885 except error.WdirUnsupported:
1883 1886 ps.add(repo[r].p1().rev())
1884 1887 ps -= {node.nullrev}
1885 1888 # XXX we should turn this into a baseset instead of a set, smartset may do
1886 1889 # some optimizations from the fact this is a baseset.
1887 1890 return subset & ps
1888 1891
1889 1892
1890 1893 @predicate(b'p2([set])', safe=True)
1891 1894 def p2(repo, subset, x):
1892 1895 """Second parent of changesets in set, or the working directory.
1893 1896 """
1894 1897 if x is None:
1895 1898 ps = repo[x].parents()
1896 1899 try:
1897 1900 p = ps[1].rev()
1898 1901 if p >= 0:
1899 1902 return subset & baseset([p])
1900 1903 return baseset()
1901 1904 except IndexError:
1902 1905 return baseset()
1903 1906
1904 1907 ps = set()
1905 1908 cl = repo.changelog
1906 1909 for r in getset(repo, fullreposet(repo), x):
1907 1910 try:
1908 1911 ps.add(cl.parentrevs(r)[1])
1909 1912 except error.WdirUnsupported:
1910 1913 parents = repo[r].parents()
1911 1914 if len(parents) == 2:
1912 1915 ps.add(parents[1])
1913 1916 ps -= {node.nullrev}
1914 1917 # XXX we should turn this into a baseset instead of a set, smartset may do
1915 1918 # some optimizations from the fact this is a baseset.
1916 1919 return subset & ps
1917 1920
1918 1921
1919 1922 def parentpost(repo, subset, x, order):
1920 1923 return p1(repo, subset, x)
1921 1924
1922 1925
1923 1926 @predicate(b'parents([set])', safe=True)
1924 1927 def parents(repo, subset, x):
1925 1928 """
1926 1929 The set of all parents for all changesets in set, or the working directory.
1927 1930 """
1928 1931 if x is None:
1929 1932 ps = {p.rev() for p in repo[x].parents()}
1930 1933 else:
1931 1934 ps = set()
1932 1935 cl = repo.changelog
1933 1936 up = ps.update
1934 1937 parentrevs = cl.parentrevs
1935 1938 for r in getset(repo, fullreposet(repo), x):
1936 1939 try:
1937 1940 up(parentrevs(r))
1938 1941 except error.WdirUnsupported:
1939 1942 up(p.rev() for p in repo[r].parents())
1940 1943 ps -= {node.nullrev}
1941 1944 return subset & ps
1942 1945
1943 1946
1944 1947 def _phase(repo, subset, *targets):
1945 1948 """helper to select all rev in <targets> phases"""
1946 1949 return repo._phasecache.getrevset(repo, targets, subset)
1947 1950
1948 1951
1949 1952 @predicate(b'_phase(idx)', safe=True)
1950 1953 def phase(repo, subset, x):
1951 1954 l = getargs(x, 1, 1, b"_phase requires one argument")
1952 1955 target = getinteger(l[0], b"_phase expects a number")
1953 1956 return _phase(repo, subset, target)
1954 1957
1955 1958
1956 1959 @predicate(b'draft()', safe=True)
1957 1960 def draft(repo, subset, x):
1958 1961 """Changeset in draft phase."""
1959 1962 # i18n: "draft" is a keyword
1960 1963 getargs(x, 0, 0, _(b"draft takes no arguments"))
1961 1964 target = phases.draft
1962 1965 return _phase(repo, subset, target)
1963 1966
1964 1967
1965 1968 @predicate(b'secret()', safe=True)
1966 1969 def secret(repo, subset, x):
1967 1970 """Changeset in secret phase."""
1968 1971 # i18n: "secret" is a keyword
1969 1972 getargs(x, 0, 0, _(b"secret takes no arguments"))
1970 1973 target = phases.secret
1971 1974 return _phase(repo, subset, target)
1972 1975
1973 1976
1974 1977 @predicate(b'stack([revs])', safe=True)
1975 1978 def stack(repo, subset, x):
1976 1979 """Experimental revset for the stack of changesets or working directory
1977 1980 parent. (EXPERIMENTAL)
1978 1981 """
1979 1982 if x is None:
1980 1983 stacks = stackmod.getstack(repo)
1981 1984 else:
1982 1985 stacks = smartset.baseset([])
1983 1986 for revision in getset(repo, fullreposet(repo), x):
1984 1987 currentstack = stackmod.getstack(repo, revision)
1985 1988 stacks = stacks + currentstack
1986 1989
1987 1990 return subset & stacks
1988 1991
1989 1992
1990 1993 def parentspec(repo, subset, x, n, order):
1991 1994 """``set^0``
1992 1995 The set.
1993 1996 ``set^1`` (or ``set^``), ``set^2``
1994 1997 First or second parent, respectively, of all changesets in set.
1995 1998 """
1996 1999 try:
1997 2000 n = int(n[1])
1998 2001 if n not in (0, 1, 2):
1999 2002 raise ValueError
2000 2003 except (TypeError, ValueError):
2001 2004 raise error.ParseError(_(b"^ expects a number 0, 1, or 2"))
2002 2005 ps = set()
2003 2006 cl = repo.changelog
2004 2007 for r in getset(repo, fullreposet(repo), x):
2005 2008 if n == 0:
2006 2009 ps.add(r)
2007 2010 elif n == 1:
2008 2011 try:
2009 2012 ps.add(cl.parentrevs(r)[0])
2010 2013 except error.WdirUnsupported:
2011 2014 ps.add(repo[r].p1().rev())
2012 2015 else:
2013 2016 try:
2014 2017 parents = cl.parentrevs(r)
2015 2018 if parents[1] != node.nullrev:
2016 2019 ps.add(parents[1])
2017 2020 except error.WdirUnsupported:
2018 2021 parents = repo[r].parents()
2019 2022 if len(parents) == 2:
2020 2023 ps.add(parents[1].rev())
2021 2024 return subset & ps
2022 2025
2023 2026
2024 2027 @predicate(b'present(set)', safe=True, takeorder=True)
2025 2028 def present(repo, subset, x, order):
2026 2029 """An empty set, if any revision in set isn't found; otherwise,
2027 2030 all revisions in set.
2028 2031
2029 2032 If any of specified revisions is not present in the local repository,
2030 2033 the query is normally aborted. But this predicate allows the query
2031 2034 to continue even in such cases.
2032 2035 """
2033 2036 try:
2034 2037 return getset(repo, subset, x, order)
2035 2038 except error.RepoLookupError:
2036 2039 return baseset()
2037 2040
2038 2041
2039 2042 # for internal use
2040 2043 @predicate(b'_notpublic', safe=True)
2041 2044 def _notpublic(repo, subset, x):
2042 2045 getargs(x, 0, 0, b"_notpublic takes no arguments")
2043 2046 return _phase(repo, subset, phases.draft, phases.secret)
2044 2047
2045 2048
2046 2049 # for internal use
2047 2050 @predicate(b'_phaseandancestors(phasename, set)', safe=True)
2048 2051 def _phaseandancestors(repo, subset, x):
2049 2052 # equivalent to (phasename() & ancestors(set)) but more efficient
2050 2053 # phasename could be one of 'draft', 'secret', or '_notpublic'
2051 2054 args = getargs(x, 2, 2, b"_phaseandancestors requires two arguments")
2052 2055 phasename = getsymbol(args[0])
2053 2056 s = getset(repo, fullreposet(repo), args[1])
2054 2057
2055 2058 draft = phases.draft
2056 2059 secret = phases.secret
2057 2060 phasenamemap = {
2058 2061 b'_notpublic': draft,
2059 2062 b'draft': draft, # follow secret's ancestors
2060 2063 b'secret': secret,
2061 2064 }
2062 2065 if phasename not in phasenamemap:
2063 2066 raise error.ParseError(b'%r is not a valid phasename' % phasename)
2064 2067
2065 2068 minimalphase = phasenamemap[phasename]
2066 2069 getphase = repo._phasecache.phase
2067 2070
2068 2071 def cutfunc(rev):
2069 2072 return getphase(repo, rev) < minimalphase
2070 2073
2071 2074 revs = dagop.revancestors(repo, s, cutfunc=cutfunc)
2072 2075
2073 2076 if phasename == b'draft': # need to remove secret changesets
2074 2077 revs = revs.filter(lambda r: getphase(repo, r) == draft)
2075 2078 return subset & revs
2076 2079
2077 2080
2078 2081 @predicate(b'public()', safe=True)
2079 2082 def public(repo, subset, x):
2080 2083 """Changeset in public phase."""
2081 2084 # i18n: "public" is a keyword
2082 2085 getargs(x, 0, 0, _(b"public takes no arguments"))
2083 2086 return _phase(repo, subset, phases.public)
2084 2087
2085 2088
2086 2089 @predicate(b'remote([id [,path]])', safe=False)
2087 2090 def remote(repo, subset, x):
2088 2091 """Local revision that corresponds to the given identifier in a
2089 2092 remote repository, if present. Here, the '.' identifier is a
2090 2093 synonym for the current local branch.
2091 2094 """
2092 2095
2093 2096 from . import hg # avoid start-up nasties
2094 2097
2095 2098 # i18n: "remote" is a keyword
2096 2099 l = getargs(x, 0, 2, _(b"remote takes zero, one, or two arguments"))
2097 2100
2098 2101 q = b'.'
2099 2102 if len(l) > 0:
2100 2103 # i18n: "remote" is a keyword
2101 2104 q = getstring(l[0], _(b"remote requires a string id"))
2102 2105 if q == b'.':
2103 2106 q = repo[b'.'].branch()
2104 2107
2105 2108 dest = b''
2106 2109 if len(l) > 1:
2107 2110 # i18n: "remote" is a keyword
2108 2111 dest = getstring(l[1], _(b"remote requires a repository path"))
2109 2112 dest = repo.ui.expandpath(dest or b'default')
2110 2113 dest, branches = hg.parseurl(dest)
2111 2114
2112 2115 other = hg.peer(repo, {}, dest)
2113 2116 n = other.lookup(q)
2114 2117 if n in repo:
2115 2118 r = repo[n].rev()
2116 2119 if r in subset:
2117 2120 return baseset([r])
2118 2121 return baseset()
2119 2122
2120 2123
2121 2124 @predicate(b'removes(pattern)', safe=True, weight=30)
2122 2125 def removes(repo, subset, x):
2123 2126 """Changesets which remove files matching pattern.
2124 2127
2125 2128 The pattern without explicit kind like ``glob:`` is expected to be
2126 2129 relative to the current directory and match against a file or a
2127 2130 directory.
2128 2131 """
2129 2132 # i18n: "removes" is a keyword
2130 2133 pat = getstring(x, _(b"removes requires a pattern"))
2131 2134 return checkstatus(repo, subset, pat, 'removed')
2132 2135
2133 2136
2134 2137 @predicate(b'rev(number)', safe=True)
2135 2138 def rev(repo, subset, x):
2136 2139 """Revision with the given numeric identifier."""
2137 2140 try:
2138 2141 return _rev(repo, subset, x)
2139 2142 except error.RepoLookupError:
2140 2143 return baseset()
2141 2144
2142 2145
2143 2146 @predicate(b'_rev(number)', safe=True)
2144 2147 def _rev(repo, subset, x):
2145 2148 # internal version of "rev(x)" that raise error if "x" is invalid
2146 2149 # i18n: "rev" is a keyword
2147 2150 l = getargs(x, 1, 1, _(b"rev requires one argument"))
2148 2151 try:
2149 2152 # i18n: "rev" is a keyword
2150 2153 l = int(getstring(l[0], _(b"rev requires a number")))
2151 2154 except (TypeError, ValueError):
2152 2155 # i18n: "rev" is a keyword
2153 2156 raise error.ParseError(_(b"rev expects a number"))
2154 2157 if l not in _virtualrevs:
2155 2158 try:
2156 2159 repo.changelog.node(l) # check that the rev exists
2157 2160 except IndexError:
2158 2161 raise error.RepoLookupError(_(b"unknown revision '%d'") % l)
2159 2162 return subset & baseset([l])
2160 2163
2161 2164
2162 2165 @predicate(b'revset(set)', safe=True, takeorder=True)
2163 2166 def revsetpredicate(repo, subset, x, order):
2164 2167 """Strictly interpret the content as a revset.
2165 2168
2166 2169 The content of this special predicate will be strictly interpreted as a
2167 2170 revset. For example, ``revset(id(0))`` will be interpreted as "id(0)"
2168 2171 without possible ambiguity with a "id(0)" bookmark or tag.
2169 2172 """
2170 2173 return getset(repo, subset, x, order)
2171 2174
2172 2175
2173 2176 @predicate(b'matching(revision [, field])', safe=True)
2174 2177 def matching(repo, subset, x):
2175 2178 """Changesets in which a given set of fields match the set of fields in the
2176 2179 selected revision or set.
2177 2180
2178 2181 To match more than one field pass the list of fields to match separated
2179 2182 by spaces (e.g. ``author description``).
2180 2183
2181 2184 Valid fields are most regular revision fields and some special fields.
2182 2185
2183 2186 Regular revision fields are ``description``, ``author``, ``branch``,
2184 2187 ``date``, ``files``, ``phase``, ``parents``, ``substate``, ``user``
2185 2188 and ``diff``.
2186 2189 Note that ``author`` and ``user`` are synonyms. ``diff`` refers to the
2187 2190 contents of the revision. Two revisions matching their ``diff`` will
2188 2191 also match their ``files``.
2189 2192
2190 2193 Special fields are ``summary`` and ``metadata``:
2191 2194 ``summary`` matches the first line of the description.
2192 2195 ``metadata`` is equivalent to matching ``description user date``
2193 2196 (i.e. it matches the main metadata fields).
2194 2197
2195 2198 ``metadata`` is the default field which is used when no fields are
2196 2199 specified. You can match more than one field at a time.
2197 2200 """
2198 2201 # i18n: "matching" is a keyword
2199 2202 l = getargs(x, 1, 2, _(b"matching takes 1 or 2 arguments"))
2200 2203
2201 2204 revs = getset(repo, fullreposet(repo), l[0])
2202 2205
2203 2206 fieldlist = [b'metadata']
2204 2207 if len(l) > 1:
2205 2208 fieldlist = getstring(
2206 2209 l[1],
2207 2210 # i18n: "matching" is a keyword
2208 2211 _(b"matching requires a string as its second argument"),
2209 2212 ).split()
2210 2213
2211 2214 # Make sure that there are no repeated fields,
2212 2215 # expand the 'special' 'metadata' field type
2213 2216 # and check the 'files' whenever we check the 'diff'
2214 2217 fields = []
2215 2218 for field in fieldlist:
2216 2219 if field == b'metadata':
2217 2220 fields += [b'user', b'description', b'date']
2218 2221 elif field == b'diff':
2219 2222 # a revision matching the diff must also match the files
2220 2223 # since matching the diff is very costly, make sure to
2221 2224 # also match the files first
2222 2225 fields += [b'files', b'diff']
2223 2226 else:
2224 2227 if field == b'author':
2225 2228 field = b'user'
2226 2229 fields.append(field)
2227 2230 fields = set(fields)
2228 2231 if b'summary' in fields and b'description' in fields:
2229 2232 # If a revision matches its description it also matches its summary
2230 2233 fields.discard(b'summary')
2231 2234
2232 2235 # We may want to match more than one field
2233 2236 # Not all fields take the same amount of time to be matched
2234 2237 # Sort the selected fields in order of increasing matching cost
2235 2238 fieldorder = [
2236 2239 b'phase',
2237 2240 b'parents',
2238 2241 b'user',
2239 2242 b'date',
2240 2243 b'branch',
2241 2244 b'summary',
2242 2245 b'files',
2243 2246 b'description',
2244 2247 b'substate',
2245 2248 b'diff',
2246 2249 ]
2247 2250
2248 2251 def fieldkeyfunc(f):
2249 2252 try:
2250 2253 return fieldorder.index(f)
2251 2254 except ValueError:
2252 2255 # assume an unknown field is very costly
2253 2256 return len(fieldorder)
2254 2257
2255 2258 fields = list(fields)
2256 2259 fields.sort(key=fieldkeyfunc)
2257 2260
2258 2261 # Each field will be matched with its own "getfield" function
2259 2262 # which will be added to the getfieldfuncs array of functions
2260 2263 getfieldfuncs = []
2261 2264 _funcs = {
2262 2265 b'user': lambda r: repo[r].user(),
2263 2266 b'branch': lambda r: repo[r].branch(),
2264 2267 b'date': lambda r: repo[r].date(),
2265 2268 b'description': lambda r: repo[r].description(),
2266 2269 b'files': lambda r: repo[r].files(),
2267 2270 b'parents': lambda r: repo[r].parents(),
2268 2271 b'phase': lambda r: repo[r].phase(),
2269 2272 b'substate': lambda r: repo[r].substate,
2270 2273 b'summary': lambda r: repo[r].description().splitlines()[0],
2271 2274 b'diff': lambda r: list(
2272 2275 repo[r].diff(opts=diffutil.diffallopts(repo.ui, {b'git': True}))
2273 2276 ),
2274 2277 }
2275 2278 for info in fields:
2276 2279 getfield = _funcs.get(info, None)
2277 2280 if getfield is None:
2278 2281 raise error.ParseError(
2279 2282 # i18n: "matching" is a keyword
2280 2283 _(b"unexpected field name passed to matching: %s")
2281 2284 % info
2282 2285 )
2283 2286 getfieldfuncs.append(getfield)
2284 2287 # convert the getfield array of functions into a "getinfo" function
2285 2288 # which returns an array of field values (or a single value if there
2286 2289 # is only one field to match)
2287 2290 getinfo = lambda r: [f(r) for f in getfieldfuncs]
2288 2291
2289 2292 def matches(x):
2290 2293 for rev in revs:
2291 2294 target = getinfo(rev)
2292 2295 match = True
2293 2296 for n, f in enumerate(getfieldfuncs):
2294 2297 if target[n] != f(x):
2295 2298 match = False
2296 2299 if match:
2297 2300 return True
2298 2301 return False
2299 2302
2300 2303 return subset.filter(matches, condrepr=(b'<matching%r %r>', fields, revs))
2301 2304
2302 2305
2303 2306 @predicate(b'reverse(set)', safe=True, takeorder=True, weight=0)
2304 2307 def reverse(repo, subset, x, order):
2305 2308 """Reverse order of set.
2306 2309 """
2307 2310 l = getset(repo, subset, x, order)
2308 2311 if order == defineorder:
2309 2312 l.reverse()
2310 2313 return l
2311 2314
2312 2315
2313 2316 @predicate(b'roots(set)', safe=True)
2314 2317 def roots(repo, subset, x):
2315 2318 """Changesets in set with no parent changeset in set.
2316 2319 """
2317 2320 s = getset(repo, fullreposet(repo), x)
2318 2321 parents = repo.changelog.parentrevs
2319 2322
2320 2323 def filter(r):
2321 2324 for p in parents(r):
2322 2325 if 0 <= p and p in s:
2323 2326 return False
2324 2327 return True
2325 2328
2326 2329 return subset & s.filter(filter, condrepr=b'<roots>')
2327 2330
2328 2331
2329 2332 _sortkeyfuncs = {
2330 2333 b'rev': scmutil.intrev,
2331 2334 b'branch': lambda c: c.branch(),
2332 2335 b'desc': lambda c: c.description(),
2333 2336 b'user': lambda c: c.user(),
2334 2337 b'author': lambda c: c.user(),
2335 2338 b'date': lambda c: c.date()[0],
2336 2339 b'node': scmutil.binnode,
2337 2340 }
2338 2341
2339 2342
2340 2343 def _getsortargs(x):
2341 2344 """Parse sort options into (set, [(key, reverse)], opts)"""
2342 2345 args = getargsdict(x, b'sort', b'set keys topo.firstbranch')
2343 2346 if b'set' not in args:
2344 2347 # i18n: "sort" is a keyword
2345 2348 raise error.ParseError(_(b'sort requires one or two arguments'))
2346 2349 keys = b"rev"
2347 2350 if b'keys' in args:
2348 2351 # i18n: "sort" is a keyword
2349 2352 keys = getstring(args[b'keys'], _(b"sort spec must be a string"))
2350 2353
2351 2354 keyflags = []
2352 2355 for k in keys.split():
2353 2356 fk = k
2354 2357 reverse = k.startswith(b'-')
2355 2358 if reverse:
2356 2359 k = k[1:]
2357 2360 if k not in _sortkeyfuncs and k != b'topo':
2358 2361 raise error.ParseError(
2359 2362 _(b"unknown sort key %r") % pycompat.bytestr(fk)
2360 2363 )
2361 2364 keyflags.append((k, reverse))
2362 2365
2363 2366 if len(keyflags) > 1 and any(k == b'topo' for k, reverse in keyflags):
2364 2367 # i18n: "topo" is a keyword
2365 2368 raise error.ParseError(
2366 2369 _(b'topo sort order cannot be combined with other sort keys')
2367 2370 )
2368 2371
2369 2372 opts = {}
2370 2373 if b'topo.firstbranch' in args:
2371 2374 if any(k == b'topo' for k, reverse in keyflags):
2372 2375 opts[b'topo.firstbranch'] = args[b'topo.firstbranch']
2373 2376 else:
2374 2377 # i18n: "topo" and "topo.firstbranch" are keywords
2375 2378 raise error.ParseError(
2376 2379 _(
2377 2380 b'topo.firstbranch can only be used '
2378 2381 b'when using the topo sort key'
2379 2382 )
2380 2383 )
2381 2384
2382 2385 return args[b'set'], keyflags, opts
2383 2386
2384 2387
2385 2388 @predicate(
2386 2389 b'sort(set[, [-]key... [, ...]])', safe=True, takeorder=True, weight=10
2387 2390 )
2388 2391 def sort(repo, subset, x, order):
2389 2392 """Sort set by keys. The default sort order is ascending, specify a key
2390 2393 as ``-key`` to sort in descending order.
2391 2394
2392 2395 The keys can be:
2393 2396
2394 2397 - ``rev`` for the revision number,
2395 2398 - ``branch`` for the branch name,
2396 2399 - ``desc`` for the commit message (description),
2397 2400 - ``user`` for user name (``author`` can be used as an alias),
2398 2401 - ``date`` for the commit date
2399 2402 - ``topo`` for a reverse topographical sort
2400 2403 - ``node`` the nodeid of the revision
2401 2404
2402 2405 The ``topo`` sort order cannot be combined with other sort keys. This sort
2403 2406 takes one optional argument, ``topo.firstbranch``, which takes a revset that
2404 2407 specifies what topographical branches to prioritize in the sort.
2405 2408
2406 2409 """
2407 2410 s, keyflags, opts = _getsortargs(x)
2408 2411 revs = getset(repo, subset, s, order)
2409 2412
2410 2413 if not keyflags or order != defineorder:
2411 2414 return revs
2412 2415 if len(keyflags) == 1 and keyflags[0][0] == b"rev":
2413 2416 revs.sort(reverse=keyflags[0][1])
2414 2417 return revs
2415 2418 elif keyflags[0][0] == b"topo":
2416 2419 firstbranch = ()
2417 2420 if b'topo.firstbranch' in opts:
2418 2421 firstbranch = getset(repo, subset, opts[b'topo.firstbranch'])
2419 2422 revs = baseset(
2420 2423 dagop.toposort(revs, repo.changelog.parentrevs, firstbranch),
2421 2424 istopo=True,
2422 2425 )
2423 2426 if keyflags[0][1]:
2424 2427 revs.reverse()
2425 2428 return revs
2426 2429
2427 2430 # sort() is guaranteed to be stable
2428 2431 ctxs = [repo[r] for r in revs]
2429 2432 for k, reverse in reversed(keyflags):
2430 2433 ctxs.sort(key=_sortkeyfuncs[k], reverse=reverse)
2431 2434 return baseset([c.rev() for c in ctxs])
2432 2435
2433 2436
2434 2437 @predicate(b'subrepo([pattern])')
2435 2438 def subrepo(repo, subset, x):
2436 2439 """Changesets that add, modify or remove the given subrepo. If no subrepo
2437 2440 pattern is named, any subrepo changes are returned.
2438 2441 """
2439 2442 # i18n: "subrepo" is a keyword
2440 2443 args = getargs(x, 0, 1, _(b'subrepo takes at most one argument'))
2441 2444 pat = None
2442 2445 if len(args) != 0:
2443 2446 pat = getstring(args[0], _(b"subrepo requires a pattern"))
2444 2447
2445 2448 m = matchmod.exact([b'.hgsubstate'])
2446 2449
2447 2450 def submatches(names):
2448 2451 k, p, m = stringutil.stringmatcher(pat)
2449 2452 for name in names:
2450 2453 if m(name):
2451 2454 yield name
2452 2455
2453 2456 def matches(x):
2454 2457 c = repo[x]
2455 2458 s = repo.status(c.p1().node(), c.node(), match=m)
2456 2459
2457 2460 if pat is None:
2458 2461 return s.added or s.modified or s.removed
2459 2462
2460 2463 if s.added:
2461 2464 return any(submatches(c.substate.keys()))
2462 2465
2463 2466 if s.modified:
2464 2467 subs = set(c.p1().substate.keys())
2465 2468 subs.update(c.substate.keys())
2466 2469
2467 2470 for path in submatches(subs):
2468 2471 if c.p1().substate.get(path) != c.substate.get(path):
2469 2472 return True
2470 2473
2471 2474 if s.removed:
2472 2475 return any(submatches(c.p1().substate.keys()))
2473 2476
2474 2477 return False
2475 2478
2476 2479 return subset.filter(matches, condrepr=(b'<subrepo %r>', pat))
2477 2480
2478 2481
2479 2482 def _mapbynodefunc(repo, s, f):
2480 2483 """(repo, smartset, [node] -> [node]) -> smartset
2481 2484
2482 2485 Helper method to map a smartset to another smartset given a function only
2483 2486 talking about nodes. Handles converting between rev numbers and nodes, and
2484 2487 filtering.
2485 2488 """
2486 2489 cl = repo.unfiltered().changelog
2487 2490 torev = cl.index.get_rev
2488 2491 tonode = cl.node
2489 2492 result = {torev(n) for n in f(tonode(r) for r in s)}
2490 2493 result.discard(None)
2491 2494 return smartset.baseset(result - repo.changelog.filteredrevs)
2492 2495
2493 2496
2494 2497 @predicate(b'successors(set)', safe=True)
2495 2498 def successors(repo, subset, x):
2496 2499 """All successors for set, including the given set themselves.
2497 2500 (EXPERIMENTAL)"""
2498 2501 s = getset(repo, fullreposet(repo), x)
2499 2502 f = lambda nodes: obsutil.allsuccessors(repo.obsstore, nodes)
2500 2503 d = _mapbynodefunc(repo, s, f)
2501 2504 return subset & d
2502 2505
2503 2506
2504 2507 def _substringmatcher(pattern, casesensitive=True):
2505 2508 kind, pattern, matcher = stringutil.stringmatcher(
2506 2509 pattern, casesensitive=casesensitive
2507 2510 )
2508 2511 if kind == b'literal':
2509 2512 if not casesensitive:
2510 2513 pattern = encoding.lower(pattern)
2511 2514 matcher = lambda s: pattern in encoding.lower(s)
2512 2515 else:
2513 2516 matcher = lambda s: pattern in s
2514 2517 return kind, pattern, matcher
2515 2518
2516 2519
2517 2520 @predicate(b'tag([name])', safe=True)
2518 2521 def tag(repo, subset, x):
2519 2522 """The specified tag by name, or all tagged revisions if no name is given.
2520 2523
2521 2524 Pattern matching is supported for `name`. See
2522 2525 :hg:`help revisions.patterns`.
2523 2526 """
2524 2527 # i18n: "tag" is a keyword
2525 2528 args = getargs(x, 0, 1, _(b"tag takes one or no arguments"))
2526 2529 cl = repo.changelog
2527 2530 if args:
2528 2531 pattern = getstring(
2529 2532 args[0],
2530 2533 # i18n: "tag" is a keyword
2531 2534 _(b'the argument to tag must be a string'),
2532 2535 )
2533 2536 kind, pattern, matcher = stringutil.stringmatcher(pattern)
2534 2537 if kind == b'literal':
2535 2538 # avoid resolving all tags
2536 2539 tn = repo._tagscache.tags.get(pattern, None)
2537 2540 if tn is None:
2538 2541 raise error.RepoLookupError(
2539 2542 _(b"tag '%s' does not exist") % pattern
2540 2543 )
2541 2544 s = {repo[tn].rev()}
2542 2545 else:
2543 2546 s = {cl.rev(n) for t, n in repo.tagslist() if matcher(t)}
2544 2547 else:
2545 2548 s = {cl.rev(n) for t, n in repo.tagslist() if t != b'tip'}
2546 2549 return subset & s
2547 2550
2548 2551
2549 2552 @predicate(b'tagged', safe=True)
2550 2553 def tagged(repo, subset, x):
2551 2554 return tag(repo, subset, x)
2552 2555
2553 2556
2554 2557 @predicate(b'orphan()', safe=True)
2555 2558 def orphan(repo, subset, x):
2556 2559 """Non-obsolete changesets with obsolete ancestors. (EXPERIMENTAL)
2557 2560 """
2558 2561 # i18n: "orphan" is a keyword
2559 2562 getargs(x, 0, 0, _(b"orphan takes no arguments"))
2560 2563 orphan = obsmod.getrevs(repo, b'orphan')
2561 2564 return subset & orphan
2562 2565
2563 2566
2564 2567 @predicate(b'unstable()', safe=True)
2565 2568 def unstable(repo, subset, x):
2566 2569 """Changesets with instabilities. (EXPERIMENTAL)
2567 2570 """
2568 2571 # i18n: "unstable" is a keyword
2569 2572 getargs(x, 0, 0, b'unstable takes no arguments')
2570 2573 _unstable = set()
2571 2574 _unstable.update(obsmod.getrevs(repo, b'orphan'))
2572 2575 _unstable.update(obsmod.getrevs(repo, b'phasedivergent'))
2573 2576 _unstable.update(obsmod.getrevs(repo, b'contentdivergent'))
2574 2577 return subset & baseset(_unstable)
2575 2578
2576 2579
2577 2580 @predicate(b'user(string)', safe=True, weight=10)
2578 2581 def user(repo, subset, x):
2579 2582 """User name contains string. The match is case-insensitive.
2580 2583
2581 2584 Pattern matching is supported for `string`. See
2582 2585 :hg:`help revisions.patterns`.
2583 2586 """
2584 2587 return author(repo, subset, x)
2585 2588
2586 2589
2587 2590 @predicate(b'wdir()', safe=True, weight=0)
2588 2591 def wdir(repo, subset, x):
2589 2592 """Working directory. (EXPERIMENTAL)"""
2590 2593 # i18n: "wdir" is a keyword
2591 2594 getargs(x, 0, 0, _(b"wdir takes no arguments"))
2592 2595 if node.wdirrev in subset or isinstance(subset, fullreposet):
2593 2596 return baseset([node.wdirrev])
2594 2597 return baseset()
2595 2598
2596 2599
2597 2600 def _orderedlist(repo, subset, x):
2598 2601 s = getstring(x, b"internal error")
2599 2602 if not s:
2600 2603 return baseset()
2601 2604 # remove duplicates here. it's difficult for caller to deduplicate sets
2602 2605 # because different symbols can point to the same rev.
2603 2606 cl = repo.changelog
2604 2607 ls = []
2605 2608 seen = set()
2606 2609 for t in s.split(b'\0'):
2607 2610 try:
2608 2611 # fast path for integer revision
2609 2612 r = int(t)
2610 2613 if (b'%d' % r) != t or r not in cl:
2611 2614 raise ValueError
2612 2615 revs = [r]
2613 2616 except ValueError:
2614 2617 revs = stringset(repo, subset, t, defineorder)
2615 2618
2616 2619 for r in revs:
2617 2620 if r in seen:
2618 2621 continue
2619 2622 if (
2620 2623 r in subset
2621 2624 or r in _virtualrevs
2622 2625 and isinstance(subset, fullreposet)
2623 2626 ):
2624 2627 ls.append(r)
2625 2628 seen.add(r)
2626 2629 return baseset(ls)
2627 2630
2628 2631
2629 2632 # for internal use
2630 2633 @predicate(b'_list', safe=True, takeorder=True)
2631 2634 def _list(repo, subset, x, order):
2632 2635 if order == followorder:
2633 2636 # slow path to take the subset order
2634 2637 return subset & _orderedlist(repo, fullreposet(repo), x)
2635 2638 else:
2636 2639 return _orderedlist(repo, subset, x)
2637 2640
2638 2641
2639 2642 def _orderedintlist(repo, subset, x):
2640 2643 s = getstring(x, b"internal error")
2641 2644 if not s:
2642 2645 return baseset()
2643 2646 ls = [int(r) for r in s.split(b'\0')]
2644 2647 s = subset
2645 2648 return baseset([r for r in ls if r in s])
2646 2649
2647 2650
2648 2651 # for internal use
2649 2652 @predicate(b'_intlist', safe=True, takeorder=True, weight=0)
2650 2653 def _intlist(repo, subset, x, order):
2651 2654 if order == followorder:
2652 2655 # slow path to take the subset order
2653 2656 return subset & _orderedintlist(repo, fullreposet(repo), x)
2654 2657 else:
2655 2658 return _orderedintlist(repo, subset, x)
2656 2659
2657 2660
2658 2661 def _orderedhexlist(repo, subset, x):
2659 2662 s = getstring(x, b"internal error")
2660 2663 if not s:
2661 2664 return baseset()
2662 2665 cl = repo.changelog
2663 2666 ls = [cl.rev(node.bin(r)) for r in s.split(b'\0')]
2664 2667 s = subset
2665 2668 return baseset([r for r in ls if r in s])
2666 2669
2667 2670
2668 2671 # for internal use
2669 2672 @predicate(b'_hexlist', safe=True, takeorder=True)
2670 2673 def _hexlist(repo, subset, x, order):
2671 2674 if order == followorder:
2672 2675 # slow path to take the subset order
2673 2676 return subset & _orderedhexlist(repo, fullreposet(repo), x)
2674 2677 else:
2675 2678 return _orderedhexlist(repo, subset, x)
2676 2679
2677 2680
2678 2681 methods = {
2679 2682 b"range": rangeset,
2680 2683 b"rangeall": rangeall,
2681 2684 b"rangepre": rangepre,
2682 2685 b"rangepost": rangepost,
2683 2686 b"dagrange": dagrange,
2684 2687 b"string": stringset,
2685 2688 b"symbol": stringset,
2686 2689 b"and": andset,
2687 2690 b"andsmally": andsmallyset,
2688 2691 b"or": orset,
2689 2692 b"not": notset,
2690 2693 b"difference": differenceset,
2691 2694 b"relation": relationset,
2692 2695 b"relsubscript": relsubscriptset,
2693 2696 b"subscript": subscriptset,
2694 2697 b"list": listset,
2695 2698 b"keyvalue": keyvaluepair,
2696 2699 b"func": func,
2697 2700 b"ancestor": ancestorspec,
2698 2701 b"parent": parentspec,
2699 2702 b"parentpost": parentpost,
2700 2703 b"smartset": rawsmartset,
2701 2704 }
2702 2705
2703 2706 relations = {
2704 2707 b"g": generationsrel,
2705 2708 b"generations": generationsrel,
2706 2709 }
2707 2710
2708 2711 subscriptrelations = {
2709 2712 b"g": generationssubrel,
2710 2713 b"generations": generationssubrel,
2711 2714 }
2712 2715
2713 2716
2714 2717 def lookupfn(repo):
2715 2718 return lambda symbol: scmutil.isrevsymbol(repo, symbol)
2716 2719
2717 2720
2718 2721 def match(ui, spec, lookup=None):
2719 2722 """Create a matcher for a single revision spec"""
2720 2723 return matchany(ui, [spec], lookup=lookup)
2721 2724
2722 2725
2723 2726 def matchany(ui, specs, lookup=None, localalias=None):
2724 2727 """Create a matcher that will include any revisions matching one of the
2725 2728 given specs
2726 2729
2727 2730 If lookup function is not None, the parser will first attempt to handle
2728 2731 old-style ranges, which may contain operator characters.
2729 2732
2730 2733 If localalias is not None, it is a dict {name: definitionstring}. It takes
2731 2734 precedence over [revsetalias] config section.
2732 2735 """
2733 2736 if not specs:
2734 2737
2735 2738 def mfunc(repo, subset=None):
2736 2739 return baseset()
2737 2740
2738 2741 return mfunc
2739 2742 if not all(specs):
2740 2743 raise error.ParseError(_(b"empty query"))
2741 2744 if len(specs) == 1:
2742 2745 tree = revsetlang.parse(specs[0], lookup)
2743 2746 else:
2744 2747 tree = (
2745 2748 b'or',
2746 2749 (b'list',) + tuple(revsetlang.parse(s, lookup) for s in specs),
2747 2750 )
2748 2751
2749 2752 aliases = []
2750 2753 warn = None
2751 2754 if ui:
2752 2755 aliases.extend(ui.configitems(b'revsetalias'))
2753 2756 warn = ui.warn
2754 2757 if localalias:
2755 2758 aliases.extend(localalias.items())
2756 2759 if aliases:
2757 2760 tree = revsetlang.expandaliases(tree, aliases, warn=warn)
2758 2761 tree = revsetlang.foldconcat(tree)
2759 2762 tree = revsetlang.analyze(tree)
2760 2763 tree = revsetlang.optimize(tree)
2761 2764 return makematcher(tree)
2762 2765
2763 2766
2764 2767 def makematcher(tree):
2765 2768 """Create a matcher from an evaluatable tree"""
2766 2769
2767 2770 def mfunc(repo, subset=None, order=None):
2768 2771 if order is None:
2769 2772 if subset is None:
2770 2773 order = defineorder # 'x'
2771 2774 else:
2772 2775 order = followorder # 'subset & x'
2773 2776 if subset is None:
2774 2777 subset = fullreposet(repo)
2775 2778 return getset(repo, subset, tree, order)
2776 2779
2777 2780 return mfunc
2778 2781
2779 2782
2780 2783 def loadpredicate(ui, extname, registrarobj):
2781 2784 """Load revset predicates from specified registrarobj
2782 2785 """
2783 2786 for name, func in pycompat.iteritems(registrarobj._table):
2784 2787 symbols[name] = func
2785 2788 if func._safe:
2786 2789 safesymbols.add(name)
2787 2790
2788 2791
2789 2792 # load built-in predicates explicitly to setup safesymbols
2790 2793 loadpredicate(None, None, predicate)
2791 2794
2792 2795 # tell hggettext to extract docstrings from these functions:
2793 2796 i18nfunctions = symbols.values()
@@ -1,1464 +1,1464
1 1 $ hg init t
2 2 $ cd t
3 3 $ echo import > port
4 4 $ hg add port
5 5 $ hg commit -m 0 -u spam -d '0 0'
6 6 $ echo export >> port
7 7 $ hg commit -m 1 -u eggs -d '1 0'
8 8 $ echo export > port
9 9 $ echo vaportight >> port
10 10 $ echo 'import/export' >> port
11 11 $ hg commit -m 2 -u spam -d '2 0'
12 12 $ echo 'import/export' >> port
13 13 $ hg commit -m 3 -u eggs -d '3 0'
14 14 $ head -n 3 port > port1
15 15 $ mv port1 port
16 16 $ hg commit -m 4 -u spam -d '4 0'
17 17
18 18 pattern error
19 19
20 20 $ hg grep '**test**'
21 21 grep: invalid match pattern: nothing to repeat* (glob)
22 22 [1]
23 23
24 24 invalid revset syntax
25 25
26 $ hg log -r 'diff()'
27 hg: parse error: diff takes at least 1 argument
26 $ hg log -r 'diffcontains()'
27 hg: parse error: diffcontains takes at least 1 argument
28 28 [255]
29 $ hg log -r 'diff(:)'
30 hg: parse error: diff requires a string pattern
29 $ hg log -r 'diffcontains(:)'
30 hg: parse error: diffcontains requires a string pattern
31 31 [255]
32 $ hg log -r 'diff("re:**test**")'
32 $ hg log -r 'diffcontains("re:**test**")'
33 33 hg: parse error: invalid regular expression: nothing to repeat* (glob)
34 34 [255]
35 35
36 36 simple
37 37
38 38 $ hg grep -r tip:0 '.*'
39 39 port:4:export
40 40 port:4:vaportight
41 41 port:4:import/export
42 42 port:3:export
43 43 port:3:vaportight
44 44 port:3:import/export
45 45 port:3:import/export
46 46 port:2:export
47 47 port:2:vaportight
48 48 port:2:import/export
49 49 port:1:import
50 50 port:1:export
51 51 port:0:import
52 52 $ hg grep -r tip:0 port port
53 53 port:4:export
54 54 port:4:vaportight
55 55 port:4:import/export
56 56 port:3:export
57 57 port:3:vaportight
58 58 port:3:import/export
59 59 port:3:import/export
60 60 port:2:export
61 61 port:2:vaportight
62 62 port:2:import/export
63 63 port:1:import
64 64 port:1:export
65 65 port:0:import
66 66
67 67 simple from subdirectory
68 68
69 69 $ mkdir dir
70 70 $ cd dir
71 71 $ hg grep -r tip:0 port
72 72 port:4:export
73 73 port:4:vaportight
74 74 port:4:import/export
75 75 port:3:export
76 76 port:3:vaportight
77 77 port:3:import/export
78 78 port:3:import/export
79 79 port:2:export
80 80 port:2:vaportight
81 81 port:2:import/export
82 82 port:1:import
83 83 port:1:export
84 84 port:0:import
85 85 $ hg grep -r tip:0 port --config ui.relative-paths=yes
86 86 ../port:4:export
87 87 ../port:4:vaportight
88 88 ../port:4:import/export
89 89 ../port:3:export
90 90 ../port:3:vaportight
91 91 ../port:3:import/export
92 92 ../port:3:import/export
93 93 ../port:2:export
94 94 ../port:2:vaportight
95 95 ../port:2:import/export
96 96 ../port:1:import
97 97 ../port:1:export
98 98 ../port:0:import
99 99 $ cd ..
100 100
101 101 simple with color
102 102
103 103 $ hg --config extensions.color= grep --config color.mode=ansi \
104 104 > --color=always port port -r tip:0
105 105 \x1b[0;35mport\x1b[0m\x1b[0;36m:\x1b[0m\x1b[0;34m4\x1b[0m\x1b[0;36m:\x1b[0mex\x1b[0;31;1mport\x1b[0m (esc)
106 106 \x1b[0;35mport\x1b[0m\x1b[0;36m:\x1b[0m\x1b[0;34m4\x1b[0m\x1b[0;36m:\x1b[0mva\x1b[0;31;1mport\x1b[0might (esc)
107 107 \x1b[0;35mport\x1b[0m\x1b[0;36m:\x1b[0m\x1b[0;34m4\x1b[0m\x1b[0;36m:\x1b[0mim\x1b[0;31;1mport\x1b[0m/ex\x1b[0;31;1mport\x1b[0m (esc)
108 108 \x1b[0;35mport\x1b[0m\x1b[0;36m:\x1b[0m\x1b[0;34m3\x1b[0m\x1b[0;36m:\x1b[0mex\x1b[0;31;1mport\x1b[0m (esc)
109 109 \x1b[0;35mport\x1b[0m\x1b[0;36m:\x1b[0m\x1b[0;34m3\x1b[0m\x1b[0;36m:\x1b[0mva\x1b[0;31;1mport\x1b[0might (esc)
110 110 \x1b[0;35mport\x1b[0m\x1b[0;36m:\x1b[0m\x1b[0;34m3\x1b[0m\x1b[0;36m:\x1b[0mim\x1b[0;31;1mport\x1b[0m/ex\x1b[0;31;1mport\x1b[0m (esc)
111 111 \x1b[0;35mport\x1b[0m\x1b[0;36m:\x1b[0m\x1b[0;34m3\x1b[0m\x1b[0;36m:\x1b[0mim\x1b[0;31;1mport\x1b[0m/ex\x1b[0;31;1mport\x1b[0m (esc)
112 112 \x1b[0;35mport\x1b[0m\x1b[0;36m:\x1b[0m\x1b[0;34m2\x1b[0m\x1b[0;36m:\x1b[0mex\x1b[0;31;1mport\x1b[0m (esc)
113 113 \x1b[0;35mport\x1b[0m\x1b[0;36m:\x1b[0m\x1b[0;34m2\x1b[0m\x1b[0;36m:\x1b[0mva\x1b[0;31;1mport\x1b[0might (esc)
114 114 \x1b[0;35mport\x1b[0m\x1b[0;36m:\x1b[0m\x1b[0;34m2\x1b[0m\x1b[0;36m:\x1b[0mim\x1b[0;31;1mport\x1b[0m/ex\x1b[0;31;1mport\x1b[0m (esc)
115 115 \x1b[0;35mport\x1b[0m\x1b[0;36m:\x1b[0m\x1b[0;34m1\x1b[0m\x1b[0;36m:\x1b[0mim\x1b[0;31;1mport\x1b[0m (esc)
116 116 \x1b[0;35mport\x1b[0m\x1b[0;36m:\x1b[0m\x1b[0;34m1\x1b[0m\x1b[0;36m:\x1b[0mex\x1b[0;31;1mport\x1b[0m (esc)
117 117 \x1b[0;35mport\x1b[0m\x1b[0;36m:\x1b[0m\x1b[0;34m0\x1b[0m\x1b[0;36m:\x1b[0mim\x1b[0;31;1mport\x1b[0m (esc)
118 118
119 119 simple templated
120 120
121 121 $ hg grep port -r tip:0 \
122 122 > -T '{path}:{rev}:{node|short}:{texts % "{if(matched, text|upper, text)}"}\n'
123 123 port:4:914fa752cdea:exPORT
124 124 port:4:914fa752cdea:vaPORTight
125 125 port:4:914fa752cdea:imPORT/exPORT
126 126 port:3:95040cfd017d:exPORT
127 127 port:3:95040cfd017d:vaPORTight
128 128 port:3:95040cfd017d:imPORT/exPORT
129 129 port:3:95040cfd017d:imPORT/exPORT
130 130 port:2:3b325e3481a1:exPORT
131 131 port:2:3b325e3481a1:vaPORTight
132 132 port:2:3b325e3481a1:imPORT/exPORT
133 133 port:1:8b20f75c1585:imPORT
134 134 port:1:8b20f75c1585:exPORT
135 135 port:0:f31323c92170:imPORT
136 136
137 137 $ hg grep port -r tip:0 -T '{path}:{rev}:{texts}\n'
138 138 port:4:export
139 139 port:4:vaportight
140 140 port:4:import/export
141 141 port:3:export
142 142 port:3:vaportight
143 143 port:3:import/export
144 144 port:3:import/export
145 145 port:2:export
146 146 port:2:vaportight
147 147 port:2:import/export
148 148 port:1:import
149 149 port:1:export
150 150 port:0:import
151 151
152 152 $ hg grep port -r tip:0 -T '{path}:{tags}:{texts}\n'
153 153 port:tip:export
154 154 port:tip:vaportight
155 155 port:tip:import/export
156 156 port::export
157 157 port::vaportight
158 158 port::import/export
159 159 port::import/export
160 160 port::export
161 161 port::vaportight
162 162 port::import/export
163 163 port::import
164 164 port::export
165 165 port::import
166 166
167 167 simple JSON (no "change" field)
168 168
169 169 $ hg grep -r tip:0 -Tjson port
170 170 [
171 171 {
172 172 "date": [4, 0],
173 173 "lineno": 1,
174 174 "node": "914fa752cdea87777ac1a8d5c858b0c736218f6c",
175 175 "path": "port",
176 176 "rev": 4,
177 177 "texts": [{"matched": false, "text": "ex"}, {"matched": true, "text": "port"}],
178 178 "user": "spam"
179 179 },
180 180 {
181 181 "date": [4, 0],
182 182 "lineno": 2,
183 183 "node": "914fa752cdea87777ac1a8d5c858b0c736218f6c",
184 184 "path": "port",
185 185 "rev": 4,
186 186 "texts": [{"matched": false, "text": "va"}, {"matched": true, "text": "port"}, {"matched": false, "text": "ight"}],
187 187 "user": "spam"
188 188 },
189 189 {
190 190 "date": [4, 0],
191 191 "lineno": 3,
192 192 "node": "914fa752cdea87777ac1a8d5c858b0c736218f6c",
193 193 "path": "port",
194 194 "rev": 4,
195 195 "texts": [{"matched": false, "text": "im"}, {"matched": true, "text": "port"}, {"matched": false, "text": "/ex"}, {"matched": true, "text": "port"}],
196 196 "user": "spam"
197 197 },
198 198 {
199 199 "date": [3, 0],
200 200 "lineno": 1,
201 201 "node": "95040cfd017d658c536071c6290230a613c4c2a6",
202 202 "path": "port",
203 203 "rev": 3,
204 204 "texts": [{"matched": false, "text": "ex"}, {"matched": true, "text": "port"}],
205 205 "user": "eggs"
206 206 },
207 207 {
208 208 "date": [3, 0],
209 209 "lineno": 2,
210 210 "node": "95040cfd017d658c536071c6290230a613c4c2a6",
211 211 "path": "port",
212 212 "rev": 3,
213 213 "texts": [{"matched": false, "text": "va"}, {"matched": true, "text": "port"}, {"matched": false, "text": "ight"}],
214 214 "user": "eggs"
215 215 },
216 216 {
217 217 "date": [3, 0],
218 218 "lineno": 3,
219 219 "node": "95040cfd017d658c536071c6290230a613c4c2a6",
220 220 "path": "port",
221 221 "rev": 3,
222 222 "texts": [{"matched": false, "text": "im"}, {"matched": true, "text": "port"}, {"matched": false, "text": "/ex"}, {"matched": true, "text": "port"}],
223 223 "user": "eggs"
224 224 },
225 225 {
226 226 "date": [3, 0],
227 227 "lineno": 4,
228 228 "node": "95040cfd017d658c536071c6290230a613c4c2a6",
229 229 "path": "port",
230 230 "rev": 3,
231 231 "texts": [{"matched": false, "text": "im"}, {"matched": true, "text": "port"}, {"matched": false, "text": "/ex"}, {"matched": true, "text": "port"}],
232 232 "user": "eggs"
233 233 },
234 234 {
235 235 "date": [2, 0],
236 236 "lineno": 1,
237 237 "node": "3b325e3481a1f07435d81dfdbfa434d9a0245b47",
238 238 "path": "port",
239 239 "rev": 2,
240 240 "texts": [{"matched": false, "text": "ex"}, {"matched": true, "text": "port"}],
241 241 "user": "spam"
242 242 },
243 243 {
244 244 "date": [2, 0],
245 245 "lineno": 2,
246 246 "node": "3b325e3481a1f07435d81dfdbfa434d9a0245b47",
247 247 "path": "port",
248 248 "rev": 2,
249 249 "texts": [{"matched": false, "text": "va"}, {"matched": true, "text": "port"}, {"matched": false, "text": "ight"}],
250 250 "user": "spam"
251 251 },
252 252 {
253 253 "date": [2, 0],
254 254 "lineno": 3,
255 255 "node": "3b325e3481a1f07435d81dfdbfa434d9a0245b47",
256 256 "path": "port",
257 257 "rev": 2,
258 258 "texts": [{"matched": false, "text": "im"}, {"matched": true, "text": "port"}, {"matched": false, "text": "/ex"}, {"matched": true, "text": "port"}],
259 259 "user": "spam"
260 260 },
261 261 {
262 262 "date": [1, 0],
263 263 "lineno": 1,
264 264 "node": "8b20f75c158513ff5ac80bd0e5219bfb6f0eb587",
265 265 "path": "port",
266 266 "rev": 1,
267 267 "texts": [{"matched": false, "text": "im"}, {"matched": true, "text": "port"}],
268 268 "user": "eggs"
269 269 },
270 270 {
271 271 "date": [1, 0],
272 272 "lineno": 2,
273 273 "node": "8b20f75c158513ff5ac80bd0e5219bfb6f0eb587",
274 274 "path": "port",
275 275 "rev": 1,
276 276 "texts": [{"matched": false, "text": "ex"}, {"matched": true, "text": "port"}],
277 277 "user": "eggs"
278 278 },
279 279 {
280 280 "date": [0, 0],
281 281 "lineno": 1,
282 282 "node": "f31323c9217050ba245ee8b537c713ec2e8ab226",
283 283 "path": "port",
284 284 "rev": 0,
285 285 "texts": [{"matched": false, "text": "im"}, {"matched": true, "text": "port"}],
286 286 "user": "spam"
287 287 }
288 288 ]
289 289
290 290 simple JSON without matching lines
291 291
292 292 $ hg grep -r tip:0 -Tjson -l port
293 293 [
294 294 {
295 295 "date": [4, 0],
296 296 "lineno": 1,
297 297 "node": "914fa752cdea87777ac1a8d5c858b0c736218f6c",
298 298 "path": "port",
299 299 "rev": 4,
300 300 "user": "spam"
301 301 },
302 302 {
303 303 "date": [3, 0],
304 304 "lineno": 1,
305 305 "node": "95040cfd017d658c536071c6290230a613c4c2a6",
306 306 "path": "port",
307 307 "rev": 3,
308 308 "user": "eggs"
309 309 },
310 310 {
311 311 "date": [2, 0],
312 312 "lineno": 1,
313 313 "node": "3b325e3481a1f07435d81dfdbfa434d9a0245b47",
314 314 "path": "port",
315 315 "rev": 2,
316 316 "user": "spam"
317 317 },
318 318 {
319 319 "date": [1, 0],
320 320 "lineno": 1,
321 321 "node": "8b20f75c158513ff5ac80bd0e5219bfb6f0eb587",
322 322 "path": "port",
323 323 "rev": 1,
324 324 "user": "eggs"
325 325 },
326 326 {
327 327 "date": [0, 0],
328 328 "lineno": 1,
329 329 "node": "f31323c9217050ba245ee8b537c713ec2e8ab226",
330 330 "path": "port",
331 331 "rev": 0,
332 332 "user": "spam"
333 333 }
334 334 ]
335 335
336 336 diff of each revision for reference
337 337
338 338 $ hg log -p -T'== rev: {rev} ==\n'
339 339 == rev: 4 ==
340 340 diff -r 95040cfd017d -r 914fa752cdea port
341 341 --- a/port Thu Jan 01 00:00:03 1970 +0000
342 342 +++ b/port Thu Jan 01 00:00:04 1970 +0000
343 343 @@ -1,4 +1,3 @@
344 344 export
345 345 vaportight
346 346 import/export
347 347 -import/export
348 348
349 349 == rev: 3 ==
350 350 diff -r 3b325e3481a1 -r 95040cfd017d port
351 351 --- a/port Thu Jan 01 00:00:02 1970 +0000
352 352 +++ b/port Thu Jan 01 00:00:03 1970 +0000
353 353 @@ -1,3 +1,4 @@
354 354 export
355 355 vaportight
356 356 import/export
357 357 +import/export
358 358
359 359 == rev: 2 ==
360 360 diff -r 8b20f75c1585 -r 3b325e3481a1 port
361 361 --- a/port Thu Jan 01 00:00:01 1970 +0000
362 362 +++ b/port Thu Jan 01 00:00:02 1970 +0000
363 363 @@ -1,2 +1,3 @@
364 364 -import
365 365 export
366 366 +vaportight
367 367 +import/export
368 368
369 369 == rev: 1 ==
370 370 diff -r f31323c92170 -r 8b20f75c1585 port
371 371 --- a/port Thu Jan 01 00:00:00 1970 +0000
372 372 +++ b/port Thu Jan 01 00:00:01 1970 +0000
373 373 @@ -1,1 +1,2 @@
374 374 import
375 375 +export
376 376
377 377 == rev: 0 ==
378 378 diff -r 000000000000 -r f31323c92170 port
379 379 --- /dev/null Thu Jan 01 00:00:00 1970 +0000
380 380 +++ b/port Thu Jan 01 00:00:00 1970 +0000
381 381 @@ -0,0 +1,1 @@
382 382 +import
383 383
384 384
385 385 all
386 386
387 387 $ hg grep --traceback --all -nu port port
388 388 port:4:4:-:spam:import/export
389 389 port:3:4:+:eggs:import/export
390 390 port:2:1:-:spam:import
391 391 port:2:2:+:spam:vaportight
392 392 port:2:3:+:spam:import/export
393 393 port:1:2:+:eggs:export
394 394 port:0:1:+:spam:import
395 395
396 396 all JSON
397 397
398 398 $ hg grep --all -Tjson port port
399 399 [
400 400 {
401 401 "change": "-",
402 402 "date": [4, 0],
403 403 "lineno": 4,
404 404 "node": "914fa752cdea87777ac1a8d5c858b0c736218f6c",
405 405 "path": "port",
406 406 "rev": 4,
407 407 "texts": [{"matched": false, "text": "im"}, {"matched": true, "text": "port"}, {"matched": false, "text": "/ex"}, {"matched": true, "text": "port"}],
408 408 "user": "spam"
409 409 },
410 410 {
411 411 "change": "+",
412 412 "date": [3, 0],
413 413 "lineno": 4,
414 414 "node": "95040cfd017d658c536071c6290230a613c4c2a6",
415 415 "path": "port",
416 416 "rev": 3,
417 417 "texts": [{"matched": false, "text": "im"}, {"matched": true, "text": "port"}, {"matched": false, "text": "/ex"}, {"matched": true, "text": "port"}],
418 418 "user": "eggs"
419 419 },
420 420 {
421 421 "change": "-",
422 422 "date": [2, 0],
423 423 "lineno": 1,
424 424 "node": "3b325e3481a1f07435d81dfdbfa434d9a0245b47",
425 425 "path": "port",
426 426 "rev": 2,
427 427 "texts": [{"matched": false, "text": "im"}, {"matched": true, "text": "port"}],
428 428 "user": "spam"
429 429 },
430 430 {
431 431 "change": "+",
432 432 "date": [2, 0],
433 433 "lineno": 2,
434 434 "node": "3b325e3481a1f07435d81dfdbfa434d9a0245b47",
435 435 "path": "port",
436 436 "rev": 2,
437 437 "texts": [{"matched": false, "text": "va"}, {"matched": true, "text": "port"}, {"matched": false, "text": "ight"}],
438 438 "user": "spam"
439 439 },
440 440 {
441 441 "change": "+",
442 442 "date": [2, 0],
443 443 "lineno": 3,
444 444 "node": "3b325e3481a1f07435d81dfdbfa434d9a0245b47",
445 445 "path": "port",
446 446 "rev": 2,
447 447 "texts": [{"matched": false, "text": "im"}, {"matched": true, "text": "port"}, {"matched": false, "text": "/ex"}, {"matched": true, "text": "port"}],
448 448 "user": "spam"
449 449 },
450 450 {
451 451 "change": "+",
452 452 "date": [1, 0],
453 453 "lineno": 2,
454 454 "node": "8b20f75c158513ff5ac80bd0e5219bfb6f0eb587",
455 455 "path": "port",
456 456 "rev": 1,
457 457 "texts": [{"matched": false, "text": "ex"}, {"matched": true, "text": "port"}],
458 458 "user": "eggs"
459 459 },
460 460 {
461 461 "change": "+",
462 462 "date": [0, 0],
463 463 "lineno": 1,
464 464 "node": "f31323c9217050ba245ee8b537c713ec2e8ab226",
465 465 "path": "port",
466 466 "rev": 0,
467 467 "texts": [{"matched": false, "text": "im"}, {"matched": true, "text": "port"}],
468 468 "user": "spam"
469 469 }
470 470 ]
471 471
472 472 other
473 473
474 474 $ hg grep -r tip:0 -l port port
475 475 port:4
476 476 port:3
477 477 port:2
478 478 port:1
479 479 port:0
480 480 $ hg grep -r tip:0 import port
481 481 port:4:import/export
482 482 port:3:import/export
483 483 port:3:import/export
484 484 port:2:import/export
485 485 port:1:import
486 486 port:0:import
487 487
488 488 $ hg cp port port2
489 489 $ hg commit -m 4 -u spam -d '5 0'
490 490
491 491 follow
492 492
493 493 $ hg grep -r tip:0 --traceback -f 'import\n\Z' port2
494 494 [1]
495 495 $ echo deport >> port2
496 496 $ hg commit -m 5 -u eggs -d '6 0'
497 497 $ hg grep -f --all -nu port port2
498 498 port2:6:4:+:eggs:deport
499 499 port:4:4:-:spam:import/export
500 500 port:3:4:+:eggs:import/export
501 501 port:2:1:-:spam:import
502 502 port:2:2:+:spam:vaportight
503 503 port:2:3:+:spam:import/export
504 504 port:1:2:+:eggs:export
505 505 port:0:1:+:spam:import
506 506
507 507 $ hg up -q null
508 508 $ hg grep -r 'reverse(:.)' -f port
509 509 port:0:import
510 510
511 511 Test wdir
512 512 (at least, this shouldn't crash)
513 513
514 514 $ hg up -q
515 515 $ echo wport >> port2
516 516 $ hg stat
517 517 M port2
518 518 $ hg grep -r 'wdir()' port
519 519 port:2147483647:export
520 520 port:2147483647:vaportight
521 521 port:2147483647:import/export
522 522 port2:2147483647:export
523 523 port2:2147483647:vaportight
524 524 port2:2147483647:import/export
525 525 port2:2147483647:deport
526 526 port2:2147483647:wport
527 527
528 528 $ cd ..
529 529 $ hg init t2
530 530 $ cd t2
531 531 $ hg grep -r tip:0 foobar foo
532 532 [1]
533 533 $ hg grep -r tip:0 foobar
534 534 [1]
535 535 $ echo blue >> color
536 536 $ echo black >> color
537 537 $ hg add color
538 538 $ hg ci -m 0
539 539 $ echo orange >> color
540 540 $ hg ci -m 1
541 541 $ echo black > color
542 542 $ hg ci -m 2
543 543 $ echo orange >> color
544 544 $ echo blue >> color
545 545 $ hg ci -m 3
546 546 $ hg grep -r tip:0 orange
547 547 color:3:orange
548 548 color:1:orange
549 549 $ hg grep --all orange
550 550 color:3:+:orange
551 551 color:2:-:orange
552 552 color:1:+:orange
553 553 $ hg grep --diff orange --color=debug
554 554 [grep.filename|color][grep.sep|:][grep.rev|3][grep.sep|:][grep.inserted grep.change|+][grep.sep|:][grep.match|orange]
555 555 [grep.filename|color][grep.sep|:][grep.rev|2][grep.sep|:][grep.deleted grep.change|-][grep.sep|:][grep.match|orange]
556 556 [grep.filename|color][grep.sep|:][grep.rev|1][grep.sep|:][grep.inserted grep.change|+][grep.sep|:][grep.match|orange]
557 557
558 558 $ hg grep --diff orange --color=yes
559 559 \x1b[0;35mcolor\x1b[0m\x1b[0;36m:\x1b[0m\x1b[0;34m3\x1b[0m\x1b[0;36m:\x1b[0m\x1b[0;32;1m+\x1b[0m\x1b[0;36m:\x1b[0m\x1b[0;31;1morange\x1b[0m (esc)
560 560 \x1b[0;35mcolor\x1b[0m\x1b[0;36m:\x1b[0m\x1b[0;34m2\x1b[0m\x1b[0;36m:\x1b[0m\x1b[0;31;1m-\x1b[0m\x1b[0;36m:\x1b[0m\x1b[0;31;1morange\x1b[0m (esc)
561 561 \x1b[0;35mcolor\x1b[0m\x1b[0;36m:\x1b[0m\x1b[0;34m1\x1b[0m\x1b[0;36m:\x1b[0m\x1b[0;32;1m+\x1b[0m\x1b[0;36m:\x1b[0m\x1b[0;31;1morange\x1b[0m (esc)
562 562
563 563 $ hg grep --diff orange
564 564 color:3:+:orange
565 565 color:2:-:orange
566 566 color:1:+:orange
567 567
568 568 revset predicate for "grep --diff"
569 569
570 $ hg log -qr 'diff("re:^bl...$")'
570 $ hg log -qr 'diffcontains("re:^bl...$")'
571 571 0:203191eb5e21
572 $ hg log -qr 'diff("orange")'
572 $ hg log -qr 'diffcontains("orange")'
573 573 1:7c585a21e0d1
574 574 2:11bd8bc8d653
575 575 3:e0116d3829f8
576 $ hg log -qr '2:0 & diff("orange")'
576 $ hg log -qr '2:0 & diffcontains("orange")'
577 577 2:11bd8bc8d653
578 578 1:7c585a21e0d1
579 579
580 580 test substring match: '^' should only match at the beginning
581 581
582 582 $ hg grep -r tip:0 '^.' --config extensions.color= --color debug
583 583 [grep.filename|color][grep.sep|:][grep.rev|3][grep.sep|:][grep.match|b]lack
584 584 [grep.filename|color][grep.sep|:][grep.rev|3][grep.sep|:][grep.match|o]range
585 585 [grep.filename|color][grep.sep|:][grep.rev|3][grep.sep|:][grep.match|b]lue
586 586 [grep.filename|color][grep.sep|:][grep.rev|2][grep.sep|:][grep.match|b]lack
587 587 [grep.filename|color][grep.sep|:][grep.rev|1][grep.sep|:][grep.match|b]lue
588 588 [grep.filename|color][grep.sep|:][grep.rev|1][grep.sep|:][grep.match|b]lack
589 589 [grep.filename|color][grep.sep|:][grep.rev|1][grep.sep|:][grep.match|o]range
590 590 [grep.filename|color][grep.sep|:][grep.rev|0][grep.sep|:][grep.match|b]lue
591 591 [grep.filename|color][grep.sep|:][grep.rev|0][grep.sep|:][grep.match|b]lack
592 592
593 593 match in last "line" without newline
594 594
595 595 $ "$PYTHON" -c 'fp = open("noeol", "wb"); fp.write(b"no infinite loop"); fp.close();'
596 596 $ hg ci -Amnoeol
597 597 adding noeol
598 598 $ hg grep -r tip:0 loop
599 599 noeol:4:no infinite loop
600 600
601 601 $ cd ..
602 602
603 603 Issue685: traceback in grep -r after rename
604 604
605 605 Got a traceback when using grep on a single
606 606 revision with renamed files.
607 607
608 608 $ hg init issue685
609 609 $ cd issue685
610 610 $ echo octarine > color
611 611 $ hg ci -Amcolor
612 612 adding color
613 613 $ hg rename color colour
614 614 $ hg ci -Am rename
615 615 $ hg grep -r tip:0 octarine
616 616 colour:1:octarine
617 617 color:0:octarine
618 618
619 619 Used to crash here
620 620
621 621 $ hg grep -r 1 octarine
622 622 colour:1:octarine
623 623 $ cd ..
624 624
625 625
626 626 Issue337: test that grep follows parent-child relationships instead
627 627 of just using revision numbers.
628 628
629 629 $ hg init issue337
630 630 $ cd issue337
631 631
632 632 $ echo white > color
633 633 $ hg commit -A -m "0 white"
634 634 adding color
635 635
636 636 $ echo red > color
637 637 $ hg commit -A -m "1 red"
638 638
639 639 $ hg update 0
640 640 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
641 641 $ echo black > color
642 642 $ hg commit -A -m "2 black"
643 643 created new head
644 644
645 645 $ hg update --clean 1
646 646 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
647 647 $ echo blue > color
648 648 $ hg commit -A -m "3 blue"
649 649
650 650 $ hg grep --all red
651 651 color:3:-:red
652 652 color:1:+:red
653 653
654 654 $ hg grep --diff red
655 655 color:3:-:red
656 656 color:1:+:red
657 657
658 658 Issue3885: test that changing revision order does not alter the
659 659 revisions printed, just their order.
660 660
661 661 $ hg grep --all red -r "all()"
662 662 color:1:+:red
663 663 color:3:-:red
664 664
665 665 $ hg grep --all red -r "reverse(all())"
666 666 color:3:-:red
667 667 color:1:+:red
668 668
669 669 $ hg grep --diff red -r "all()"
670 670 color:1:+:red
671 671 color:3:-:red
672 672
673 673 $ hg grep --diff red -r "reverse(all())"
674 674 color:3:-:red
675 675 color:1:+:red
676 676
677 677 $ cd ..
678 678
679 679 $ hg init a
680 680 $ cd a
681 681 $ cp "$TESTDIR/binfile.bin" .
682 682 $ hg add binfile.bin
683 683 $ hg ci -m 'add binfile.bin'
684 684 $ hg grep "MaCam" --all
685 685 binfile.bin:0:+: Binary file matches
686 686
687 687 $ hg grep "MaCam" --diff
688 688 binfile.bin:0:+: Binary file matches
689 689
690 690 $ cd ..
691 691
692 692 Moved line may not be collected by "grep --diff" since it first filters
693 693 the contents to be diffed by the pattern. (i.e.
694 694 "diff <(grep pat a) <(grep pat b)", not "diff a b | grep pat".)
695 695 This is much faster than generating full diff per revision.
696 696
697 697 $ hg init moved-line
698 698 $ cd moved-line
699 699 $ cat <<'EOF' > a
700 700 > foo
701 701 > bar
702 702 > baz
703 703 > EOF
704 704 $ hg ci -Am initial
705 705 adding a
706 706 $ cat <<'EOF' > a
707 707 > bar
708 708 > baz
709 709 > foo
710 710 > EOF
711 711 $ hg ci -m reorder
712 712
713 713 $ hg diff -c 1
714 714 diff -r a593cc55e81b -r 69789a3b6e80 a
715 715 --- a/a Thu Jan 01 00:00:00 1970 +0000
716 716 +++ b/a Thu Jan 01 00:00:00 1970 +0000
717 717 @@ -1,3 +1,3 @@
718 718 -foo
719 719 bar
720 720 baz
721 721 +foo
722 722
723 723 can't find the move of "foo" at the revision 1:
724 724
725 725 $ hg grep --diff foo -r1
726 726 [1]
727 727
728 728 "bar" isn't moved at the revisoin 1:
729 729
730 730 $ hg grep --diff bar -r1
731 731 [1]
732 732
733 733 $ cd ..
734 734
735 735 Test for showing working of allfiles flag
736 736
737 737 $ hg init sng
738 738 $ cd sng
739 739 $ echo "unmod" >> um
740 740 $ echo old > old
741 741 $ hg ci -q -A -m "adds unmod to um"
742 742 $ echo "something else" >> new
743 743 $ hg ci -A -m "second commit"
744 744 adding new
745 745 $ hg grep -r "." "unmod"
746 746 um:1:unmod
747 747
748 748 Existing tracked files in the working directory are searched by default
749 749
750 750 $ echo modified >> new
751 751 $ echo 'added' > added; hg add added
752 752 $ echo 'added, missing' > added-missing; hg add added-missing; rm added-missing
753 753 $ echo 'untracked' > untracked
754 754 $ hg rm old
755 755 $ hg grep ''
756 756 added:added
757 757 new:something else
758 758 new:modified
759 759 um:unmod
760 760
761 761 #if symlink
762 762 Grepping a symlink greps its destination
763 763
764 764 $ rm -f added; ln -s symlink-added added
765 765 $ hg grep '' | grep added
766 766 added:symlink-added
767 767
768 768 But we reject symlinks as directories components of a tracked file as
769 769 usual:
770 770
771 771 $ mkdir dir; touch dir/f; hg add dir/f
772 772 $ rm -rf dir; ln -s / dir
773 773 $ hg grep ''
774 774 abort: path 'dir/f' traverses symbolic link 'dir'
775 775 [255]
776 776 #endif
777 777
778 778 But we can search files from some other revision with -rREV
779 779
780 780 $ hg grep -r. mod
781 781 um:1:unmod
782 782
783 783 $ hg grep --diff mod
784 784 um:0:+:unmod
785 785
786 786 $ cd ..
787 787
788 788 Change Default of grep by ui.tweakdefaults, that is, the files not in current
789 789 working directory should not be grepp-ed on
790 790
791 791 $ hg init ab
792 792 $ cd ab
793 793 $ cat <<'EOF' >> .hg/hgrc
794 794 > [ui]
795 795 > tweakdefaults = True
796 796 > EOF
797 797 $ echo "some text">>file1
798 798 $ hg add file1
799 799 $ hg commit -m "adds file1"
800 800 $ hg mv file1 file2
801 801
802 802 wdir revision is hidden by default:
803 803
804 804 $ hg grep "some"
805 805 file2:some text
806 806
807 807 but it should be available in template dict:
808 808
809 809 $ hg grep "some" -Tjson
810 810 [
811 811 {
812 812 "date": [0, 0],
813 813 "lineno": 1,
814 814 "node": "ffffffffffffffffffffffffffffffffffffffff",
815 815 "path": "file2",
816 816 "rev": 2147483647,
817 817 "texts": [{"matched": true, "text": "some"}, {"matched": false, "text": " text"}],
818 818 "user": "test"
819 819 }
820 820 ]
821 821
822 822 $ cd ..
823 823
824 824 test -rMULTIREV
825 825
826 826 $ cd sng
827 827 $ hg rm um
828 828 $ hg commit -m "deletes um"
829 829 $ hg grep -r "0:2" "unmod"
830 830 um:0:unmod
831 831 um:1:unmod
832 832 $ hg grep -r "0:2" "unmod" um
833 833 um:0:unmod
834 834 um:1:unmod
835 835 $ hg grep -r "0:2" "unmod" "glob:**/um" # Check that patterns also work
836 836 um:0:unmod
837 837 um:1:unmod
838 838 $ cd ..
839 839
840 840 --follow with/without --diff and/or paths
841 841 -----------------------------------------
842 842
843 843 For each test case, we compare the history traversal of "hg log",
844 844 "hg grep --diff", and "hg grep" (--all-files).
845 845
846 846 "hg grep --diff" should traverse the log in the same way as "hg log".
847 847 "hg grep" (--all-files) is slightly different in that it includes
848 848 unmodified changes.
849 849
850 850 $ hg init follow
851 851 $ cd follow
852 852
853 853 $ cat <<'EOF' >> .hg/hgrc
854 854 > [ui]
855 855 > logtemplate = '{rev}: {join(files % "{status} {path}", ", ")}\n'
856 856 > EOF
857 857
858 858 $ for f in add0 add0-mod1 add0-rm1 add0-mod2 add0-rm2 add0-mod3 add0-mod4 add0-rm4; do
859 859 > echo data0 >> $f
860 860 > done
861 861 $ hg ci -qAm0
862 862
863 863 $ hg cp add0 add0-cp1
864 864 $ hg cp add0 add0-cp1-mod1
865 865 $ hg cp add0 add0-cp1-mod1-rm3
866 866 $ hg rm add0-rm1
867 867 $ for f in *mod1*; do
868 868 > echo data1 >> $f
869 869 > done
870 870 $ hg ci -qAm1
871 871
872 872 $ hg update -q 0
873 873 $ hg cp add0 add0-cp2
874 874 $ hg cp add0 add0-cp2-mod2
875 875 $ hg rm add0-rm2
876 876 $ for f in *mod2*; do
877 877 > echo data2 >> $f
878 878 > done
879 879 $ hg ci -qAm2
880 880
881 881 $ hg update -q 1
882 882 $ hg cp add0-cp1 add0-cp1-cp3
883 883 $ hg cp add0-cp1-mod1 add0-cp1-mod1-cp3-mod3
884 884 $ hg rm add0-cp1-mod1-rm3
885 885 $ for f in *mod3*; do
886 886 > echo data3 >> $f
887 887 > done
888 888 $ hg ci -qAm3
889 889
890 890 $ hg cp add0 add0-cp4
891 891 $ hg cp add0 add0-cp4-mod4
892 892 $ hg rm add0-rm4
893 893 $ for f in *mod4*; do
894 894 > echo data4 >> $f
895 895 > done
896 896
897 897 $ hg log -Gr':wdir()'
898 898 o 2147483647: A add0-cp4, A add0-cp4-mod4, M add0-mod4, R add0-rm4
899 899 |
900 900 @ 3: A add0-cp1-cp3, A add0-cp1-mod1-cp3-mod3, R add0-cp1-mod1-rm3, M add0-mod3
901 901 |
902 902 | o 2: A add0-cp2, A add0-cp2-mod2, M add0-mod2, R add0-rm2
903 903 | |
904 904 o | 1: A add0-cp1, A add0-cp1-mod1, A add0-cp1-mod1-rm3, M add0-mod1, R add0-rm1
905 905 |/
906 906 o 0: A add0, A add0-mod1, A add0-mod2, A add0-mod3, A add0-mod4, A add0-rm1, A add0-rm2, A add0-rm4
907 907
908 908
909 909 follow revision history from wdir parent:
910 910
911 911 $ hg log -f
912 912 3: A add0-cp1-cp3, A add0-cp1-mod1-cp3-mod3, R add0-cp1-mod1-rm3, M add0-mod3
913 913 1: A add0-cp1, A add0-cp1-mod1, A add0-cp1-mod1-rm3, M add0-mod1, R add0-rm1
914 914 0: A add0, A add0-mod1, A add0-mod2, A add0-mod3, A add0-mod4, A add0-rm1, A add0-rm2, A add0-rm4
915 915
916 916 $ hg grep --diff -f data
917 917 add0-cp1-mod1-cp3-mod3:3:+:data3
918 918 add0-mod3:3:+:data3
919 919 add0-cp1-mod1:1:+:data1
920 920 add0-cp1-mod1-rm3:1:+:data1
921 921 add0-mod1:1:+:data1
922 922 add0:0:+:data0
923 923 add0-mod1:0:+:data0
924 924 add0-mod2:0:+:data0
925 925 add0-mod3:0:+:data0
926 926 add0-mod4:0:+:data0
927 927 add0-rm1:0:+:data0
928 928 add0-rm2:0:+:data0
929 929 add0-rm4:0:+:data0
930 930
931 931 $ hg grep -f data
932 932 add0:3:data0
933 933 add0-cp1:3:data0
934 934 add0-cp1-cp3:3:data0
935 935 add0-cp1-mod1:3:data0
936 936 add0-cp1-mod1:3:data1
937 937 add0-cp1-mod1-cp3-mod3:3:data0
938 938 add0-cp1-mod1-cp3-mod3:3:data1
939 939 add0-cp1-mod1-cp3-mod3:3:data3
940 940 add0-mod1:3:data0
941 941 add0-mod1:3:data1
942 942 add0-mod2:3:data0
943 943 add0-mod3:3:data0
944 944 add0-mod3:3:data3
945 945 add0-mod4:3:data0
946 946 add0-rm2:3:data0
947 947 add0-rm4:3:data0
948 948 add0:1:data0
949 949 add0-cp1:1:data0
950 950 add0-cp1-mod1:1:data0
951 951 add0-cp1-mod1:1:data1
952 952 add0-cp1-mod1-rm3:1:data0
953 953 add0-cp1-mod1-rm3:1:data1
954 954 add0-mod1:1:data0
955 955 add0-mod1:1:data1
956 956 add0-mod2:1:data0
957 957 add0-mod3:1:data0
958 958 add0-mod4:1:data0
959 959 add0-rm2:1:data0
960 960 add0-rm4:1:data0
961 961 add0:0:data0
962 962 add0-mod1:0:data0
963 963 add0-mod2:0:data0
964 964 add0-mod3:0:data0
965 965 add0-mod4:0:data0
966 966 add0-rm1:0:data0
967 967 add0-rm2:0:data0
968 968 add0-rm4:0:data0
969 969
970 970 follow revision history from specified revision:
971 971
972 972 $ hg log -fr2
973 973 2: A add0-cp2, A add0-cp2-mod2, M add0-mod2, R add0-rm2
974 974 0: A add0, A add0-mod1, A add0-mod2, A add0-mod3, A add0-mod4, A add0-rm1, A add0-rm2, A add0-rm4
975 975
976 976 $ hg grep --diff -fr2 data
977 977 add0-cp2-mod2:2:+:data2
978 978 add0-mod2:2:+:data2
979 979 add0:0:+:data0
980 980 add0-mod1:0:+:data0
981 981 add0-mod2:0:+:data0
982 982 add0-mod3:0:+:data0
983 983 add0-mod4:0:+:data0
984 984 add0-rm1:0:+:data0
985 985 add0-rm2:0:+:data0
986 986 add0-rm4:0:+:data0
987 987
988 988 $ hg grep -fr2 data
989 989 add0:2:data0
990 990 add0-cp2:2:data0
991 991 add0-cp2-mod2:2:data0
992 992 add0-cp2-mod2:2:data2
993 993 add0-mod1:2:data0
994 994 add0-mod2:2:data0
995 995 add0-mod2:2:data2
996 996 add0-mod3:2:data0
997 997 add0-mod4:2:data0
998 998 add0-rm1:2:data0
999 999 add0-rm4:2:data0
1000 1000 add0:0:data0
1001 1001 add0-mod1:0:data0
1002 1002 add0-mod2:0:data0
1003 1003 add0-mod3:0:data0
1004 1004 add0-mod4:0:data0
1005 1005 add0-rm1:0:data0
1006 1006 add0-rm2:0:data0
1007 1007 add0-rm4:0:data0
1008 1008
1009 1009 follow revision history from wdir:
1010 1010
1011 1011 $ hg log -fr'wdir()'
1012 1012 2147483647: A add0-cp4, A add0-cp4-mod4, M add0-mod4, R add0-rm4
1013 1013 3: A add0-cp1-cp3, A add0-cp1-mod1-cp3-mod3, R add0-cp1-mod1-rm3, M add0-mod3
1014 1014 1: A add0-cp1, A add0-cp1-mod1, A add0-cp1-mod1-rm3, M add0-mod1, R add0-rm1
1015 1015 0: A add0, A add0-mod1, A add0-mod2, A add0-mod3, A add0-mod4, A add0-rm1, A add0-rm2, A add0-rm4
1016 1016
1017 1017 BROKEN: should not abort because of removed file
1018 1018 $ hg grep --diff -fr'wdir()' data
1019 1019 add0-cp4-mod4:2147483647:+:data4
1020 1020 add0-mod4:2147483647:+:data4
1021 1021 add0-rm4:2147483647:-:abort: add0-rm4@None: not found in manifest!
1022 1022 [255]
1023 1023
1024 1024 $ hg grep -fr'wdir()' data
1025 1025 add0:2147483647:data0
1026 1026 add0-cp1:2147483647:data0
1027 1027 add0-cp1-cp3:2147483647:data0
1028 1028 add0-cp1-mod1:2147483647:data0
1029 1029 add0-cp1-mod1:2147483647:data1
1030 1030 add0-cp1-mod1-cp3-mod3:2147483647:data0
1031 1031 add0-cp1-mod1-cp3-mod3:2147483647:data1
1032 1032 add0-cp1-mod1-cp3-mod3:2147483647:data3
1033 1033 add0-cp4:2147483647:data0
1034 1034 add0-cp4-mod4:2147483647:data0
1035 1035 add0-cp4-mod4:2147483647:data4
1036 1036 add0-mod1:2147483647:data0
1037 1037 add0-mod1:2147483647:data1
1038 1038 add0-mod2:2147483647:data0
1039 1039 add0-mod3:2147483647:data0
1040 1040 add0-mod3:2147483647:data3
1041 1041 add0-mod4:2147483647:data0
1042 1042 add0-mod4:2147483647:data4
1043 1043 add0-rm2:2147483647:data0
1044 1044 add0:3:data0
1045 1045 add0-cp1:3:data0
1046 1046 add0-cp1-cp3:3:data0
1047 1047 add0-cp1-mod1:3:data0
1048 1048 add0-cp1-mod1:3:data1
1049 1049 add0-cp1-mod1-cp3-mod3:3:data0
1050 1050 add0-cp1-mod1-cp3-mod3:3:data1
1051 1051 add0-cp1-mod1-cp3-mod3:3:data3
1052 1052 add0-mod1:3:data0
1053 1053 add0-mod1:3:data1
1054 1054 add0-mod2:3:data0
1055 1055 add0-mod3:3:data0
1056 1056 add0-mod3:3:data3
1057 1057 add0-mod4:3:data0
1058 1058 add0-rm2:3:data0
1059 1059 add0-rm4:3:data0
1060 1060 add0:1:data0
1061 1061 add0-cp1:1:data0
1062 1062 add0-cp1-mod1:1:data0
1063 1063 add0-cp1-mod1:1:data1
1064 1064 add0-cp1-mod1-rm3:1:data0
1065 1065 add0-cp1-mod1-rm3:1:data1
1066 1066 add0-mod1:1:data0
1067 1067 add0-mod1:1:data1
1068 1068 add0-mod2:1:data0
1069 1069 add0-mod3:1:data0
1070 1070 add0-mod4:1:data0
1071 1071 add0-rm2:1:data0
1072 1072 add0-rm4:1:data0
1073 1073 add0:0:data0
1074 1074 add0-mod1:0:data0
1075 1075 add0-mod2:0:data0
1076 1076 add0-mod3:0:data0
1077 1077 add0-mod4:0:data0
1078 1078 add0-rm1:0:data0
1079 1079 add0-rm2:0:data0
1080 1080 add0-rm4:0:data0
1081 1081
1082 1082 follow revision history from multiple revisions:
1083 1083
1084 1084 $ hg log -fr'1+2'
1085 1085 2: A add0-cp2, A add0-cp2-mod2, M add0-mod2, R add0-rm2
1086 1086 1: A add0-cp1, A add0-cp1-mod1, A add0-cp1-mod1-rm3, M add0-mod1, R add0-rm1
1087 1087 0: A add0, A add0-mod1, A add0-mod2, A add0-mod3, A add0-mod4, A add0-rm1, A add0-rm2, A add0-rm4
1088 1088
1089 1089 $ hg grep --diff -fr'1+2' data
1090 1090 add0-cp2-mod2:2:+:data2
1091 1091 add0-mod2:2:+:data2
1092 1092 add0-cp1-mod1:1:+:data1
1093 1093 add0-cp1-mod1-rm3:1:+:data1
1094 1094 add0-mod1:1:+:data1
1095 1095 add0:0:+:data0
1096 1096 add0-mod1:0:+:data0
1097 1097 add0-mod2:0:+:data0
1098 1098 add0-mod3:0:+:data0
1099 1099 add0-mod4:0:+:data0
1100 1100 add0-rm1:0:+:data0
1101 1101 add0-rm2:0:+:data0
1102 1102 add0-rm4:0:+:data0
1103 1103
1104 1104 $ hg grep -fr'1+2' data
1105 1105 add0:2:data0
1106 1106 add0-cp2:2:data0
1107 1107 add0-cp2-mod2:2:data0
1108 1108 add0-cp2-mod2:2:data2
1109 1109 add0-mod1:2:data0
1110 1110 add0-mod2:2:data0
1111 1111 add0-mod2:2:data2
1112 1112 add0-mod3:2:data0
1113 1113 add0-mod4:2:data0
1114 1114 add0-rm1:2:data0
1115 1115 add0-rm4:2:data0
1116 1116 add0:1:data0
1117 1117 add0-cp1:1:data0
1118 1118 add0-cp1-mod1:1:data0
1119 1119 add0-cp1-mod1:1:data1
1120 1120 add0-cp1-mod1-rm3:1:data0
1121 1121 add0-cp1-mod1-rm3:1:data1
1122 1122 add0-mod1:1:data0
1123 1123 add0-mod1:1:data1
1124 1124 add0-mod2:1:data0
1125 1125 add0-mod3:1:data0
1126 1126 add0-mod4:1:data0
1127 1127 add0-rm2:1:data0
1128 1128 add0-rm4:1:data0
1129 1129 add0:0:data0
1130 1130 add0-mod1:0:data0
1131 1131 add0-mod2:0:data0
1132 1132 add0-mod3:0:data0
1133 1133 add0-mod4:0:data0
1134 1134 add0-rm1:0:data0
1135 1135 add0-rm2:0:data0
1136 1136 add0-rm4:0:data0
1137 1137
1138 1138 follow file history from wdir parent, unmodified in wdir:
1139 1139
1140 1140 $ hg log -f add0-mod3
1141 1141 3: A add0-cp1-cp3, A add0-cp1-mod1-cp3-mod3, R add0-cp1-mod1-rm3, M add0-mod3
1142 1142 0: A add0, A add0-mod1, A add0-mod2, A add0-mod3, A add0-mod4, A add0-rm1, A add0-rm2, A add0-rm4
1143 1143
1144 1144 $ hg grep --diff -f data add0-mod3
1145 1145 add0-mod3:3:+:data3
1146 1146 add0-mod3:0:+:data0
1147 1147
1148 1148 $ hg grep -f data add0-mod3
1149 1149 add0-mod3:3:data0
1150 1150 add0-mod3:3:data3
1151 1151 add0-mod3:1:data0
1152 1152 add0-mod3:0:data0
1153 1153
1154 1154 follow file history from wdir parent, modified in wdir:
1155 1155
1156 1156 $ hg log -f add0-mod4
1157 1157 0: A add0, A add0-mod1, A add0-mod2, A add0-mod3, A add0-mod4, A add0-rm1, A add0-rm2, A add0-rm4
1158 1158
1159 1159 $ hg grep --diff -f data add0-mod4
1160 1160 add0-mod4:0:+:data0
1161 1161
1162 1162 $ hg grep -f data add0-mod4
1163 1163 add0-mod4:3:data0
1164 1164 add0-mod4:1:data0
1165 1165 add0-mod4:0:data0
1166 1166
1167 1167 follow file history from wdir parent, copied but unmodified:
1168 1168
1169 1169 $ hg log -f add0-cp1-cp3
1170 1170 3: A add0-cp1-cp3, A add0-cp1-mod1-cp3-mod3, R add0-cp1-mod1-rm3, M add0-mod3
1171 1171 1: A add0-cp1, A add0-cp1-mod1, A add0-cp1-mod1-rm3, M add0-mod1, R add0-rm1
1172 1172 0: A add0, A add0-mod1, A add0-mod2, A add0-mod3, A add0-mod4, A add0-rm1, A add0-rm2, A add0-rm4
1173 1173
1174 1174 $ hg grep --diff -f data add0-cp1-cp3
1175 1175 add0:0:+:data0
1176 1176
1177 1177 BROKEN: should follow history across renames
1178 1178 $ hg grep -f data add0-cp1-cp3
1179 1179 add0-cp1-cp3:3:data0
1180 1180
1181 1181 follow file history from wdir parent, copied and modified:
1182 1182
1183 1183 $ hg log -f add0-cp1-mod1-cp3-mod3
1184 1184 3: A add0-cp1-cp3, A add0-cp1-mod1-cp3-mod3, R add0-cp1-mod1-rm3, M add0-mod3
1185 1185 1: A add0-cp1, A add0-cp1-mod1, A add0-cp1-mod1-rm3, M add0-mod1, R add0-rm1
1186 1186 0: A add0, A add0-mod1, A add0-mod2, A add0-mod3, A add0-mod4, A add0-rm1, A add0-rm2, A add0-rm4
1187 1187
1188 1188 $ hg grep --diff -f data add0-cp1-mod1-cp3-mod3
1189 1189 add0-cp1-mod1-cp3-mod3:3:+:data3
1190 1190 add0-cp1-mod1:1:+:data1
1191 1191 add0:0:+:data0
1192 1192
1193 1193 BROKEN: should follow history across renames
1194 1194 $ hg grep -f data add0-cp1-mod1-cp3-mod3
1195 1195 add0-cp1-mod1-cp3-mod3:3:data0
1196 1196 add0-cp1-mod1-cp3-mod3:3:data1
1197 1197 add0-cp1-mod1-cp3-mod3:3:data3
1198 1198
1199 1199 follow file history from wdir parent, copied in wdir:
1200 1200
1201 1201 $ hg log -f add0-cp4
1202 1202 abort: cannot follow nonexistent file: "add0-cp4"
1203 1203 [255]
1204 1204
1205 1205 $ hg grep --diff -f data add0-cp4
1206 1206 abort: cannot follow nonexistent file: "add0-cp4"
1207 1207 [255]
1208 1208
1209 1209 BROKEN: maybe better to abort
1210 1210 $ hg grep -f data add0-cp4
1211 1211 [1]
1212 1212
1213 1213 follow file history from wdir parent, removed:
1214 1214
1215 1215 $ hg log -f add0-cp1-mod1-rm3
1216 1216 abort: cannot follow file not in parent revision: "add0-cp1-mod1-rm3"
1217 1217 [255]
1218 1218
1219 1219 $ hg grep --diff -f data add0-cp1-mod1-rm3
1220 1220 abort: cannot follow file not in parent revision: "add0-cp1-mod1-rm3"
1221 1221 [255]
1222 1222
1223 1223 BROKEN: maybe better to abort
1224 1224 $ hg grep -f data add0-cp1-mod1-rm3
1225 1225 add0-cp1-mod1-rm3:1:data0
1226 1226 add0-cp1-mod1-rm3:1:data1
1227 1227
1228 1228 follow file history from wdir parent (explicit), removed:
1229 1229
1230 1230 $ hg log -fr. add0-cp1-mod1-rm3
1231 1231 abort: cannot follow file not in any of the specified revisions: "add0-cp1-mod1-rm3"
1232 1232 [255]
1233 1233
1234 1234 $ hg grep --diff -fr. data add0-cp1-mod1-rm3
1235 1235 abort: cannot follow file not in any of the specified revisions: "add0-cp1-mod1-rm3"
1236 1236 [255]
1237 1237
1238 1238 BROKEN: should abort
1239 1239 $ hg grep -fr. data add0-cp1-mod1-rm3
1240 1240 add0-cp1-mod1-rm3:1:data0
1241 1241 add0-cp1-mod1-rm3:1:data1
1242 1242
1243 1243 follow file history from wdir parent, removed in wdir:
1244 1244
1245 1245 $ hg log -f add0-rm4
1246 1246 abort: cannot follow file not in parent revision: "add0-rm4"
1247 1247 [255]
1248 1248
1249 1249 $ hg grep --diff -f data add0-rm4
1250 1250 abort: cannot follow file not in parent revision: "add0-rm4"
1251 1251 [255]
1252 1252
1253 1253 BROKEN: should abort
1254 1254 $ hg grep -f data add0-rm4
1255 1255 add0-rm4:3:data0
1256 1256 add0-rm4:1:data0
1257 1257 add0-rm4:0:data0
1258 1258
1259 1259 follow file history from wdir parent (explicit), removed in wdir:
1260 1260
1261 1261 $ hg log -fr. add0-rm4
1262 1262 0: A add0, A add0-mod1, A add0-mod2, A add0-mod3, A add0-mod4, A add0-rm1, A add0-rm2, A add0-rm4
1263 1263
1264 1264 $ hg grep --diff -fr. data add0-rm4
1265 1265 add0-rm4:0:+:data0
1266 1266
1267 1267 $ hg grep -fr. data add0-rm4
1268 1268 add0-rm4:3:data0
1269 1269 add0-rm4:1:data0
1270 1270 add0-rm4:0:data0
1271 1271
1272 1272 follow file history from wdir parent, multiple files:
1273 1273
1274 1274 $ hg log -f add0-mod3 add0-cp1-mod1
1275 1275 3: A add0-cp1-cp3, A add0-cp1-mod1-cp3-mod3, R add0-cp1-mod1-rm3, M add0-mod3
1276 1276 1: A add0-cp1, A add0-cp1-mod1, A add0-cp1-mod1-rm3, M add0-mod1, R add0-rm1
1277 1277 0: A add0, A add0-mod1, A add0-mod2, A add0-mod3, A add0-mod4, A add0-rm1, A add0-rm2, A add0-rm4
1278 1278
1279 1279 $ hg grep --diff -f data add0-mod3 add0-cp1-mod1
1280 1280 add0-mod3:3:+:data3
1281 1281 add0-cp1-mod1:1:+:data1
1282 1282 add0:0:+:data0
1283 1283 add0-mod3:0:+:data0
1284 1284
1285 1285 BROKEN: should follow history across renames
1286 1286 $ hg grep -f data add0-mod3 add0-cp1-mod1
1287 1287 add0-cp1-mod1:3:data0
1288 1288 add0-cp1-mod1:3:data1
1289 1289 add0-mod3:3:data0
1290 1290 add0-mod3:3:data3
1291 1291 add0-cp1-mod1:1:data0
1292 1292 add0-cp1-mod1:1:data1
1293 1293 add0-mod3:1:data0
1294 1294 add0-mod3:0:data0
1295 1295
1296 1296 follow file history from specified revision, modified:
1297 1297
1298 1298 $ hg log -fr2 add0-mod2
1299 1299 2: A add0-cp2, A add0-cp2-mod2, M add0-mod2, R add0-rm2
1300 1300 0: A add0, A add0-mod1, A add0-mod2, A add0-mod3, A add0-mod4, A add0-rm1, A add0-rm2, A add0-rm4
1301 1301
1302 1302 $ hg grep --diff -fr2 data add0-mod2
1303 1303 add0-mod2:2:+:data2
1304 1304 add0-mod2:0:+:data0
1305 1305
1306 1306 $ hg grep -fr2 data add0-mod2
1307 1307 add0-mod2:2:data0
1308 1308 add0-mod2:2:data2
1309 1309 add0-mod2:0:data0
1310 1310
1311 1311 follow file history from specified revision, copied but unmodified:
1312 1312
1313 1313 $ hg log -fr2 add0-cp2
1314 1314 2: A add0-cp2, A add0-cp2-mod2, M add0-mod2, R add0-rm2
1315 1315 0: A add0, A add0-mod1, A add0-mod2, A add0-mod3, A add0-mod4, A add0-rm1, A add0-rm2, A add0-rm4
1316 1316
1317 1317 $ hg grep --diff -fr2 data add0-cp2
1318 1318 add0:0:+:data0
1319 1319
1320 1320 BROKEN: should follow history across renames
1321 1321 $ hg grep -fr2 data add0-cp2
1322 1322 add0-cp2:2:data0
1323 1323
1324 1324 follow file history from specified revision, copied and modified:
1325 1325
1326 1326 $ hg log -fr2 add0-cp2-mod2
1327 1327 2: A add0-cp2, A add0-cp2-mod2, M add0-mod2, R add0-rm2
1328 1328 0: A add0, A add0-mod1, A add0-mod2, A add0-mod3, A add0-mod4, A add0-rm1, A add0-rm2, A add0-rm4
1329 1329
1330 1330 $ hg grep --diff -fr2 data add0-cp2-mod2
1331 1331 add0-cp2-mod2:2:+:data2
1332 1332 add0:0:+:data0
1333 1333
1334 1334 BROKEN: should follow history across renames
1335 1335 $ hg grep -fr2 data add0-cp2-mod2
1336 1336 add0-cp2-mod2:2:data0
1337 1337 add0-cp2-mod2:2:data2
1338 1338
1339 1339 follow file history from specified revision, removed:
1340 1340
1341 1341 $ hg log -fr2 add0-rm2
1342 1342 abort: cannot follow file not in any of the specified revisions: "add0-rm2"
1343 1343 [255]
1344 1344
1345 1345 $ hg grep --diff -fr2 data add0-rm2
1346 1346 abort: cannot follow file not in any of the specified revisions: "add0-rm2"
1347 1347 [255]
1348 1348
1349 1349 BROKEN: should abort
1350 1350 $ hg grep -fr2 data add0-rm2
1351 1351 add0-rm2:0:data0
1352 1352
1353 1353 follow file history from specified revision, multiple files:
1354 1354
1355 1355 $ hg log -fr2 add0-cp2 add0-mod2
1356 1356 2: A add0-cp2, A add0-cp2-mod2, M add0-mod2, R add0-rm2
1357 1357 0: A add0, A add0-mod1, A add0-mod2, A add0-mod3, A add0-mod4, A add0-rm1, A add0-rm2, A add0-rm4
1358 1358
1359 1359 $ hg grep --diff -fr2 data add0-cp2 add0-mod2
1360 1360 add0-mod2:2:+:data2
1361 1361 add0:0:+:data0
1362 1362 add0-mod2:0:+:data0
1363 1363
1364 1364 BROKEN: should follow history across renames
1365 1365 $ hg grep -fr2 data add0-cp2 add0-mod2
1366 1366 add0-cp2:2:data0
1367 1367 add0-mod2:2:data0
1368 1368 add0-mod2:2:data2
1369 1369 add0-mod2:0:data0
1370 1370
1371 1371 follow file history from wdir, unmodified:
1372 1372
1373 1373 $ hg log -fr'wdir()' add0-mod3
1374 1374 2147483647: A add0-cp4, A add0-cp4-mod4, M add0-mod4, R add0-rm4
1375 1375 3: A add0-cp1-cp3, A add0-cp1-mod1-cp3-mod3, R add0-cp1-mod1-rm3, M add0-mod3
1376 1376 0: A add0, A add0-mod1, A add0-mod2, A add0-mod3, A add0-mod4, A add0-rm1, A add0-rm2, A add0-rm4
1377 1377
1378 1378 $ hg grep --diff -fr'wdir()' data add0-mod3
1379 1379 add0-mod3:3:+:data3
1380 1380 add0-mod3:0:+:data0
1381 1381
1382 1382 $ hg grep -fr'wdir()' data add0-mod3
1383 1383 add0-mod3:2147483647:data0
1384 1384 add0-mod3:2147483647:data3
1385 1385 add0-mod3:3:data0
1386 1386 add0-mod3:3:data3
1387 1387 add0-mod3:1:data0
1388 1388 add0-mod3:0:data0
1389 1389
1390 1390 follow file history from wdir, modified:
1391 1391
1392 1392 $ hg log -fr'wdir()' add0-mod4
1393 1393 2147483647: A add0-cp4, A add0-cp4-mod4, M add0-mod4, R add0-rm4
1394 1394 0: A add0, A add0-mod1, A add0-mod2, A add0-mod3, A add0-mod4, A add0-rm1, A add0-rm2, A add0-rm4
1395 1395
1396 1396 $ hg grep --diff -fr'wdir()' data add0-mod4
1397 1397 add0-mod4:2147483647:+:data4
1398 1398 add0-mod4:0:+:data0
1399 1399
1400 1400 $ hg grep -fr'wdir()' data add0-mod4
1401 1401 add0-mod4:2147483647:data0
1402 1402 add0-mod4:2147483647:data4
1403 1403 add0-mod4:3:data0
1404 1404 add0-mod4:1:data0
1405 1405 add0-mod4:0:data0
1406 1406
1407 1407 follow file history from wdir, copied but unmodified:
1408 1408
1409 1409 $ hg log -fr'wdir()' add0-cp4
1410 1410 2147483647: A add0-cp4, A add0-cp4-mod4, M add0-mod4, R add0-rm4
1411 1411 0: A add0, A add0-mod1, A add0-mod2, A add0-mod3, A add0-mod4, A add0-rm1, A add0-rm2, A add0-rm4
1412 1412
1413 1413 $ hg grep --diff -fr'wdir()' data add0-cp4
1414 1414 add0:0:+:data0
1415 1415
1416 1416 BROKEN: should follow history across renames
1417 1417 $ hg grep -fr'wdir()' data add0-cp4
1418 1418 add0-cp4:2147483647:data0
1419 1419
1420 1420 follow file history from wdir, copied and modified:
1421 1421
1422 1422 $ hg log -fr'wdir()' add0-cp4-mod4
1423 1423 2147483647: A add0-cp4, A add0-cp4-mod4, M add0-mod4, R add0-rm4
1424 1424 0: A add0, A add0-mod1, A add0-mod2, A add0-mod3, A add0-mod4, A add0-rm1, A add0-rm2, A add0-rm4
1425 1425
1426 1426 $ hg grep --diff -fr'wdir()' data add0-cp4-mod4
1427 1427 add0-cp4-mod4:2147483647:+:data4
1428 1428 add0:0:+:data0
1429 1429
1430 1430 BROKEN: should follow history across renames
1431 1431 $ hg grep -fr'wdir()' data add0-cp4-mod4
1432 1432 add0-cp4-mod4:2147483647:data0
1433 1433 add0-cp4-mod4:2147483647:data4
1434 1434
1435 1435 follow file history from wdir, multiple files:
1436 1436
1437 1437 $ hg log -fr'wdir()' add0-cp4 add0-mod4 add0-mod3
1438 1438 2147483647: A add0-cp4, A add0-cp4-mod4, M add0-mod4, R add0-rm4
1439 1439 3: A add0-cp1-cp3, A add0-cp1-mod1-cp3-mod3, R add0-cp1-mod1-rm3, M add0-mod3
1440 1440 0: A add0, A add0-mod1, A add0-mod2, A add0-mod3, A add0-mod4, A add0-rm1, A add0-rm2, A add0-rm4
1441 1441
1442 1442 $ hg grep --diff -fr'wdir()' data add0-cp4 add0-mod4 add0-mod3
1443 1443 add0-mod4:2147483647:+:data4
1444 1444 add0-mod3:3:+:data3
1445 1445 add0:0:+:data0
1446 1446 add0-mod3:0:+:data0
1447 1447 add0-mod4:0:+:data0
1448 1448
1449 1449 BROKEN: should follow history across renames
1450 1450 $ hg grep -fr'wdir()' data add0-cp4 add0-mod4 add0-mod3
1451 1451 add0-cp4:2147483647:data0
1452 1452 add0-mod3:2147483647:data0
1453 1453 add0-mod3:2147483647:data3
1454 1454 add0-mod4:2147483647:data0
1455 1455 add0-mod4:2147483647:data4
1456 1456 add0-mod3:3:data0
1457 1457 add0-mod3:3:data3
1458 1458 add0-mod4:3:data0
1459 1459 add0-mod3:1:data0
1460 1460 add0-mod4:1:data0
1461 1461 add0-mod3:0:data0
1462 1462 add0-mod4:0:data0
1463 1463
1464 1464 $ cd ..
General Comments 0
You need to be logged in to leave comments. Login now