##// END OF EJS Templates
revset: extract a helper to parse integer range...
Yuya Nishihara -
r41702:59638c6f default
parent child Browse files
Show More
@@ -1,2385 +1,2386 b''
1 1 # revset.py - revision set queries for mercurial
2 2 #
3 3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import re
11 11
12 12 from .i18n import _
13 13 from . import (
14 14 dagop,
15 15 destutil,
16 16 diffutil,
17 17 encoding,
18 18 error,
19 19 hbisect,
20 20 match as matchmod,
21 21 node,
22 22 obsolete as obsmod,
23 23 obsutil,
24 24 pathutil,
25 25 phases,
26 26 pycompat,
27 27 registrar,
28 28 repoview,
29 29 revsetlang,
30 30 scmutil,
31 31 smartset,
32 32 stack as stackmod,
33 33 util,
34 34 )
35 35 from .utils import (
36 36 dateutil,
37 37 stringutil,
38 38 )
39 39
40 40 # helpers for processing parsed tree
41 41 getsymbol = revsetlang.getsymbol
42 42 getstring = revsetlang.getstring
43 43 getinteger = revsetlang.getinteger
44 44 getboolean = revsetlang.getboolean
45 45 getlist = revsetlang.getlist
46 46 getrange = revsetlang.getrange
47 getintrange = revsetlang.getintrange
47 48 getargs = revsetlang.getargs
48 49 getargsdict = revsetlang.getargsdict
49 50
50 51 baseset = smartset.baseset
51 52 generatorset = smartset.generatorset
52 53 spanset = smartset.spanset
53 54 fullreposet = smartset.fullreposet
54 55
55 56 # Constants for ordering requirement, used in getset():
56 57 #
57 58 # If 'define', any nested functions and operations MAY change the ordering of
58 59 # the entries in the set (but if changes the ordering, it MUST ALWAYS change
59 60 # it). If 'follow', any nested functions and operations MUST take the ordering
60 61 # specified by the first operand to the '&' operator.
61 62 #
62 63 # For instance,
63 64 #
64 65 # X & (Y | Z)
65 66 # ^ ^^^^^^^
66 67 # | follow
67 68 # define
68 69 #
69 70 # will be evaluated as 'or(y(x()), z(x()))', where 'x()' can change the order
70 71 # of the entries in the set, but 'y()', 'z()' and 'or()' shouldn't.
71 72 #
72 73 # 'any' means the order doesn't matter. For instance,
73 74 #
74 75 # (X & !Y) | ancestors(Z)
75 76 # ^ ^
76 77 # any any
77 78 #
78 79 # For 'X & !Y', 'X' decides the order and 'Y' is subtracted from 'X', so the
79 80 # order of 'Y' does not matter. For 'ancestors(Z)', Z's order does not matter
80 81 # since 'ancestors' does not care about the order of its argument.
81 82 #
82 83 # Currently, most revsets do not care about the order, so 'define' is
83 84 # equivalent to 'follow' for them, and the resulting order is based on the
84 85 # 'subset' parameter passed down to them:
85 86 #
86 87 # m = revset.match(...)
87 88 # m(repo, subset, order=defineorder)
88 89 # ^^^^^^
89 90 # For most revsets, 'define' means using the order this subset provides
90 91 #
91 92 # There are a few revsets that always redefine the order if 'define' is
92 93 # specified: 'sort(X)', 'reverse(X)', 'x:y'.
93 94 anyorder = 'any' # don't care the order, could be even random-shuffled
94 95 defineorder = 'define' # ALWAYS redefine, or ALWAYS follow the current order
95 96 followorder = 'follow' # MUST follow the current order
96 97
97 98 # helpers
98 99
99 100 def getset(repo, subset, x, order=defineorder):
100 101 if not x:
101 102 raise error.ParseError(_("missing argument"))
102 103 return methods[x[0]](repo, subset, *x[1:], order=order)
103 104
104 105 def _getrevsource(repo, r):
105 106 extra = repo[r].extra()
106 107 for label in ('source', 'transplant_source', 'rebase_source'):
107 108 if label in extra:
108 109 try:
109 110 return repo[extra[label]].rev()
110 111 except error.RepoLookupError:
111 112 pass
112 113 return None
113 114
114 115 def _sortedb(xs):
115 116 return sorted(pycompat.rapply(pycompat.maybebytestr, xs))
116 117
117 118 # operator methods
118 119
119 120 def stringset(repo, subset, x, order):
120 121 if not x:
121 122 raise error.ParseError(_("empty string is not a valid revision"))
122 123 x = scmutil.intrev(scmutil.revsymbol(repo, x))
123 124 if (x in subset
124 125 or x == node.nullrev and isinstance(subset, fullreposet)):
125 126 return baseset([x])
126 127 return baseset()
127 128
128 129 def rawsmartset(repo, subset, x, order):
129 130 """argument is already a smartset, use that directly"""
130 131 if order == followorder:
131 132 return subset & x
132 133 else:
133 134 return x & subset
134 135
135 136 def rangeset(repo, subset, x, y, order):
136 137 m = getset(repo, fullreposet(repo), x)
137 138 n = getset(repo, fullreposet(repo), y)
138 139
139 140 if not m or not n:
140 141 return baseset()
141 142 return _makerangeset(repo, subset, m.first(), n.last(), order)
142 143
143 144 def rangeall(repo, subset, x, order):
144 145 assert x is None
145 146 return _makerangeset(repo, subset, 0, repo.changelog.tiprev(), order)
146 147
147 148 def rangepre(repo, subset, y, order):
148 149 # ':y' can't be rewritten to '0:y' since '0' may be hidden
149 150 n = getset(repo, fullreposet(repo), y)
150 151 if not n:
151 152 return baseset()
152 153 return _makerangeset(repo, subset, 0, n.last(), order)
153 154
154 155 def rangepost(repo, subset, x, order):
155 156 m = getset(repo, fullreposet(repo), x)
156 157 if not m:
157 158 return baseset()
158 159 return _makerangeset(repo, subset, m.first(), repo.changelog.tiprev(),
159 160 order)
160 161
161 162 def _makerangeset(repo, subset, m, n, order):
162 163 if m == n:
163 164 r = baseset([m])
164 165 elif n == node.wdirrev:
165 166 r = spanset(repo, m, len(repo)) + baseset([n])
166 167 elif m == node.wdirrev:
167 168 r = baseset([m]) + spanset(repo, repo.changelog.tiprev(), n - 1)
168 169 elif m < n:
169 170 r = spanset(repo, m, n + 1)
170 171 else:
171 172 r = spanset(repo, m, n - 1)
172 173
173 174 if order == defineorder:
174 175 return r & subset
175 176 else:
176 177 # carrying the sorting over when possible would be more efficient
177 178 return subset & r
178 179
179 180 def dagrange(repo, subset, x, y, order):
180 181 r = fullreposet(repo)
181 182 xs = dagop.reachableroots(repo, getset(repo, r, x), getset(repo, r, y),
182 183 includepath=True)
183 184 return subset & xs
184 185
185 186 def andset(repo, subset, x, y, order):
186 187 if order == anyorder:
187 188 yorder = anyorder
188 189 else:
189 190 yorder = followorder
190 191 return getset(repo, getset(repo, subset, x, order), y, yorder)
191 192
192 193 def andsmallyset(repo, subset, x, y, order):
193 194 # 'andsmally(x, y)' is equivalent to 'and(x, y)', but faster when y is small
194 195 if order == anyorder:
195 196 yorder = anyorder
196 197 else:
197 198 yorder = followorder
198 199 return getset(repo, getset(repo, subset, y, yorder), x, order)
199 200
200 201 def differenceset(repo, subset, x, y, order):
201 202 return getset(repo, subset, x, order) - getset(repo, subset, y, anyorder)
202 203
203 204 def _orsetlist(repo, subset, xs, order):
204 205 assert xs
205 206 if len(xs) == 1:
206 207 return getset(repo, subset, xs[0], order)
207 208 p = len(xs) // 2
208 209 a = _orsetlist(repo, subset, xs[:p], order)
209 210 b = _orsetlist(repo, subset, xs[p:], order)
210 211 return a + b
211 212
212 213 def orset(repo, subset, x, order):
213 214 xs = getlist(x)
214 215 if not xs:
215 216 return baseset()
216 217 if order == followorder:
217 218 # slow path to take the subset order
218 219 return subset & _orsetlist(repo, fullreposet(repo), xs, anyorder)
219 220 else:
220 221 return _orsetlist(repo, subset, xs, order)
221 222
222 223 def notset(repo, subset, x, order):
223 224 return subset - getset(repo, subset, x, anyorder)
224 225
225 226 def relationset(repo, subset, x, y, order):
226 227 raise error.ParseError(_("can't use a relation in this context"))
227 228
228 229 def _splitrange(a, b):
229 230 """Split range with bounds a and b into two ranges at 0 and return two
230 231 tuples of numbers for use as startdepth and stopdepth arguments of
231 232 revancestors and revdescendants.
232 233
233 234 >>> _splitrange(-10, -5) # [-10:-5]
234 235 ((5, 11), (None, None))
235 236 >>> _splitrange(5, 10) # [5:10]
236 237 ((None, None), (5, 11))
237 238 >>> _splitrange(-10, 10) # [-10:10]
238 239 ((0, 11), (0, 11))
239 240 >>> _splitrange(-10, 0) # [-10:0]
240 241 ((0, 11), (None, None))
241 242 >>> _splitrange(0, 10) # [0:10]
242 243 ((None, None), (0, 11))
243 244 >>> _splitrange(0, 0) # [0:0]
244 245 ((0, 1), (None, None))
245 246 >>> _splitrange(1, -1) # [1:-1]
246 247 ((None, None), (None, None))
247 248 """
248 249 ancdepths = (None, None)
249 250 descdepths = (None, None)
250 251 if a == b == 0:
251 252 ancdepths = (0, 1)
252 253 if a < 0:
253 254 ancdepths = (-min(b, 0), -a + 1)
254 255 if b > 0:
255 256 descdepths = (max(a, 0), b + 1)
256 257 return ancdepths, descdepths
257 258
258 259 def generationsrel(repo, subset, x, rel, a, b, order):
259 260 # TODO: rewrite tests, and drop startdepth argument from ancestors() and
260 261 # descendants() predicates
261 262 if a is None:
262 263 a = -(dagop.maxlogdepth - 1)
263 264 if b is None:
264 265 b = +(dagop.maxlogdepth - 1)
265 266
266 267 (ancstart, ancstop), (descstart, descstop) = _splitrange(a, b)
267 268
268 269 if ancstart is None and descstart is None:
269 270 return baseset()
270 271
271 272 revs = getset(repo, fullreposet(repo), x)
272 273 if not revs:
273 274 return baseset()
274 275
275 276 if ancstart is not None and descstart is not None:
276 277 s = dagop.revancestors(repo, revs, False, ancstart, ancstop)
277 278 s += dagop.revdescendants(repo, revs, False, descstart, descstop)
278 279 elif ancstart is not None:
279 280 s = dagop.revancestors(repo, revs, False, ancstart, ancstop)
280 281 elif descstart is not None:
281 282 s = dagop.revdescendants(repo, revs, False, descstart, descstop)
282 283
283 284 return subset & s
284 285
285 286 def relsubscriptset(repo, subset, x, y, z, order):
286 287 # this is pretty basic implementation of 'x#y[z]' operator, still
287 288 # experimental so undocumented. see the wiki for further ideas.
288 289 # https://www.mercurial-scm.org/wiki/RevsetOperatorPlan
289 290 rel = getsymbol(y)
290 291 try:
291 292 a, b = getrange(z, '')
292 293 except error.ParseError:
293 294 a = getinteger(z, _("relation subscript must be an integer"))
294 295 b = a
295 296 else:
296 297 def getbound(i):
297 298 if i is None:
298 299 return None
299 300 msg = _("relation subscript bounds must be integers")
300 301 return getinteger(i, msg)
301 302 a, b = [getbound(i) for i in (a, b)]
302 303
303 304 if rel in subscriptrelations:
304 305 return subscriptrelations[rel](repo, subset, x, rel, a, b, order)
305 306
306 307 relnames = [r for r in subscriptrelations.keys() if len(r) > 1]
307 308 raise error.UnknownIdentifier(rel, relnames)
308 309
309 310 def subscriptset(repo, subset, x, y, order):
310 311 raise error.ParseError(_("can't use a subscript in this context"))
311 312
312 313 def listset(repo, subset, *xs, **opts):
313 314 raise error.ParseError(_("can't use a list in this context"),
314 315 hint=_('see \'hg help "revsets.x or y"\''))
315 316
316 317 def keyvaluepair(repo, subset, k, v, order):
317 318 raise error.ParseError(_("can't use a key-value pair in this context"))
318 319
319 320 def func(repo, subset, a, b, order):
320 321 f = getsymbol(a)
321 322 if f in symbols:
322 323 func = symbols[f]
323 324 if getattr(func, '_takeorder', False):
324 325 return func(repo, subset, b, order)
325 326 return func(repo, subset, b)
326 327
327 328 keep = lambda fn: getattr(fn, '__doc__', None) is not None
328 329
329 330 syms = [s for (s, fn) in symbols.items() if keep(fn)]
330 331 raise error.UnknownIdentifier(f, syms)
331 332
332 333 # functions
333 334
334 335 # symbols are callables like:
335 336 # fn(repo, subset, x)
336 337 # with:
337 338 # repo - current repository instance
338 339 # subset - of revisions to be examined
339 340 # x - argument in tree form
340 341 symbols = revsetlang.symbols
341 342
342 343 # symbols which can't be used for a DoS attack for any given input
343 344 # (e.g. those which accept regexes as plain strings shouldn't be included)
344 345 # functions that just return a lot of changesets (like all) don't count here
345 346 safesymbols = set()
346 347
347 348 predicate = registrar.revsetpredicate()
348 349
349 350 @predicate('_destupdate')
350 351 def _destupdate(repo, subset, x):
351 352 # experimental revset for update destination
352 353 args = getargsdict(x, 'limit', 'clean')
353 354 return subset & baseset([destutil.destupdate(repo,
354 355 **pycompat.strkwargs(args))[0]])
355 356
356 357 @predicate('_destmerge')
357 358 def _destmerge(repo, subset, x):
358 359 # experimental revset for merge destination
359 360 sourceset = None
360 361 if x is not None:
361 362 sourceset = getset(repo, fullreposet(repo), x)
362 363 return subset & baseset([destutil.destmerge(repo, sourceset=sourceset)])
363 364
364 365 @predicate('adds(pattern)', safe=True, weight=30)
365 366 def adds(repo, subset, x):
366 367 """Changesets that add a file matching pattern.
367 368
368 369 The pattern without explicit kind like ``glob:`` is expected to be
369 370 relative to the current directory and match against a file or a
370 371 directory.
371 372 """
372 373 # i18n: "adds" is a keyword
373 374 pat = getstring(x, _("adds requires a pattern"))
374 375 return checkstatus(repo, subset, pat, 1)
375 376
376 377 @predicate('ancestor(*changeset)', safe=True, weight=0.5)
377 378 def ancestor(repo, subset, x):
378 379 """A greatest common ancestor of the changesets.
379 380
380 381 Accepts 0 or more changesets.
381 382 Will return empty list when passed no args.
382 383 Greatest common ancestor of a single changeset is that changeset.
383 384 """
384 385 reviter = iter(orset(repo, fullreposet(repo), x, order=anyorder))
385 386 try:
386 387 anc = repo[next(reviter)]
387 388 except StopIteration:
388 389 return baseset()
389 390 for r in reviter:
390 391 anc = anc.ancestor(repo[r])
391 392
392 393 r = scmutil.intrev(anc)
393 394 if r in subset:
394 395 return baseset([r])
395 396 return baseset()
396 397
397 398 def _ancestors(repo, subset, x, followfirst=False, startdepth=None,
398 399 stopdepth=None):
399 400 heads = getset(repo, fullreposet(repo), x)
400 401 if not heads:
401 402 return baseset()
402 403 s = dagop.revancestors(repo, heads, followfirst, startdepth, stopdepth)
403 404 return subset & s
404 405
405 406 @predicate('ancestors(set[, depth])', safe=True)
406 407 def ancestors(repo, subset, x):
407 408 """Changesets that are ancestors of changesets in set, including the
408 409 given changesets themselves.
409 410
410 411 If depth is specified, the result only includes changesets up to
411 412 the specified generation.
412 413 """
413 414 # startdepth is for internal use only until we can decide the UI
414 415 args = getargsdict(x, 'ancestors', 'set depth startdepth')
415 416 if 'set' not in args:
416 417 # i18n: "ancestors" is a keyword
417 418 raise error.ParseError(_('ancestors takes at least 1 argument'))
418 419 startdepth = stopdepth = None
419 420 if 'startdepth' in args:
420 421 n = getinteger(args['startdepth'],
421 422 "ancestors expects an integer startdepth")
422 423 if n < 0:
423 424 raise error.ParseError("negative startdepth")
424 425 startdepth = n
425 426 if 'depth' in args:
426 427 # i18n: "ancestors" is a keyword
427 428 n = getinteger(args['depth'], _("ancestors expects an integer depth"))
428 429 if n < 0:
429 430 raise error.ParseError(_("negative depth"))
430 431 stopdepth = n + 1
431 432 return _ancestors(repo, subset, args['set'],
432 433 startdepth=startdepth, stopdepth=stopdepth)
433 434
434 435 @predicate('_firstancestors', safe=True)
435 436 def _firstancestors(repo, subset, x):
436 437 # ``_firstancestors(set)``
437 438 # Like ``ancestors(set)`` but follows only the first parents.
438 439 return _ancestors(repo, subset, x, followfirst=True)
439 440
440 441 def _childrenspec(repo, subset, x, n, order):
441 442 """Changesets that are the Nth child of a changeset
442 443 in set.
443 444 """
444 445 cs = set()
445 446 for r in getset(repo, fullreposet(repo), x):
446 447 for i in range(n):
447 448 c = repo[r].children()
448 449 if len(c) == 0:
449 450 break
450 451 if len(c) > 1:
451 452 raise error.RepoLookupError(
452 453 _("revision in set has more than one child"))
453 454 r = c[0].rev()
454 455 else:
455 456 cs.add(r)
456 457 return subset & cs
457 458
458 459 def ancestorspec(repo, subset, x, n, order):
459 460 """``set~n``
460 461 Changesets that are the Nth ancestor (first parents only) of a changeset
461 462 in set.
462 463 """
463 464 n = getinteger(n, _("~ expects a number"))
464 465 if n < 0:
465 466 # children lookup
466 467 return _childrenspec(repo, subset, x, -n, order)
467 468 ps = set()
468 469 cl = repo.changelog
469 470 for r in getset(repo, fullreposet(repo), x):
470 471 for i in range(n):
471 472 try:
472 473 r = cl.parentrevs(r)[0]
473 474 except error.WdirUnsupported:
474 475 r = repo[r].p1().rev()
475 476 ps.add(r)
476 477 return subset & ps
477 478
478 479 @predicate('author(string)', safe=True, weight=10)
479 480 def author(repo, subset, x):
480 481 """Alias for ``user(string)``.
481 482 """
482 483 # i18n: "author" is a keyword
483 484 n = getstring(x, _("author requires a string"))
484 485 kind, pattern, matcher = _substringmatcher(n, casesensitive=False)
485 486 return subset.filter(lambda x: matcher(repo[x].user()),
486 487 condrepr=('<user %r>', n))
487 488
488 489 @predicate('bisect(string)', safe=True)
489 490 def bisect(repo, subset, x):
490 491 """Changesets marked in the specified bisect status:
491 492
492 493 - ``good``, ``bad``, ``skip``: csets explicitly marked as good/bad/skip
493 494 - ``goods``, ``bads`` : csets topologically good/bad
494 495 - ``range`` : csets taking part in the bisection
495 496 - ``pruned`` : csets that are goods, bads or skipped
496 497 - ``untested`` : csets whose fate is yet unknown
497 498 - ``ignored`` : csets ignored due to DAG topology
498 499 - ``current`` : the cset currently being bisected
499 500 """
500 501 # i18n: "bisect" is a keyword
501 502 status = getstring(x, _("bisect requires a string")).lower()
502 503 state = set(hbisect.get(repo, status))
503 504 return subset & state
504 505
505 506 # Backward-compatibility
506 507 # - no help entry so that we do not advertise it any more
507 508 @predicate('bisected', safe=True)
508 509 def bisected(repo, subset, x):
509 510 return bisect(repo, subset, x)
510 511
511 512 @predicate('bookmark([name])', safe=True)
512 513 def bookmark(repo, subset, x):
513 514 """The named bookmark or all bookmarks.
514 515
515 516 Pattern matching is supported for `name`. See :hg:`help revisions.patterns`.
516 517 """
517 518 # i18n: "bookmark" is a keyword
518 519 args = getargs(x, 0, 1, _('bookmark takes one or no arguments'))
519 520 if args:
520 521 bm = getstring(args[0],
521 522 # i18n: "bookmark" is a keyword
522 523 _('the argument to bookmark must be a string'))
523 524 kind, pattern, matcher = stringutil.stringmatcher(bm)
524 525 bms = set()
525 526 if kind == 'literal':
526 527 if bm == pattern:
527 528 pattern = repo._bookmarks.expandname(pattern)
528 529 bmrev = repo._bookmarks.get(pattern, None)
529 530 if not bmrev:
530 531 raise error.RepoLookupError(_("bookmark '%s' does not exist")
531 532 % pattern)
532 533 bms.add(repo[bmrev].rev())
533 534 else:
534 535 matchrevs = set()
535 536 for name, bmrev in repo._bookmarks.iteritems():
536 537 if matcher(name):
537 538 matchrevs.add(bmrev)
538 539 for bmrev in matchrevs:
539 540 bms.add(repo[bmrev].rev())
540 541 else:
541 542 bms = {repo[r].rev() for r in repo._bookmarks.values()}
542 543 bms -= {node.nullrev}
543 544 return subset & bms
544 545
545 546 @predicate('branch(string or set)', safe=True, weight=10)
546 547 def branch(repo, subset, x):
547 548 """
548 549 All changesets belonging to the given branch or the branches of the given
549 550 changesets.
550 551
551 552 Pattern matching is supported for `string`. See
552 553 :hg:`help revisions.patterns`.
553 554 """
554 555 getbi = repo.revbranchcache().branchinfo
555 556 def getbranch(r):
556 557 try:
557 558 return getbi(r)[0]
558 559 except error.WdirUnsupported:
559 560 return repo[r].branch()
560 561
561 562 try:
562 563 b = getstring(x, '')
563 564 except error.ParseError:
564 565 # not a string, but another revspec, e.g. tip()
565 566 pass
566 567 else:
567 568 kind, pattern, matcher = stringutil.stringmatcher(b)
568 569 if kind == 'literal':
569 570 # note: falls through to the revspec case if no branch with
570 571 # this name exists and pattern kind is not specified explicitly
571 572 if pattern in repo.branchmap():
572 573 return subset.filter(lambda r: matcher(getbranch(r)),
573 574 condrepr=('<branch %r>', b))
574 575 if b.startswith('literal:'):
575 576 raise error.RepoLookupError(_("branch '%s' does not exist")
576 577 % pattern)
577 578 else:
578 579 return subset.filter(lambda r: matcher(getbranch(r)),
579 580 condrepr=('<branch %r>', b))
580 581
581 582 s = getset(repo, fullreposet(repo), x)
582 583 b = set()
583 584 for r in s:
584 585 b.add(getbranch(r))
585 586 c = s.__contains__
586 587 return subset.filter(lambda r: c(r) or getbranch(r) in b,
587 588 condrepr=lambda: '<branch %r>' % _sortedb(b))
588 589
589 590 @predicate('phasedivergent()', safe=True)
590 591 def phasedivergent(repo, subset, x):
591 592 """Mutable changesets marked as successors of public changesets.
592 593
593 594 Only non-public and non-obsolete changesets can be `phasedivergent`.
594 595 (EXPERIMENTAL)
595 596 """
596 597 # i18n: "phasedivergent" is a keyword
597 598 getargs(x, 0, 0, _("phasedivergent takes no arguments"))
598 599 phasedivergent = obsmod.getrevs(repo, 'phasedivergent')
599 600 return subset & phasedivergent
600 601
601 602 @predicate('bundle()', safe=True)
602 603 def bundle(repo, subset, x):
603 604 """Changesets in the bundle.
604 605
605 606 Bundle must be specified by the -R option."""
606 607
607 608 try:
608 609 bundlerevs = repo.changelog.bundlerevs
609 610 except AttributeError:
610 611 raise error.Abort(_("no bundle provided - specify with -R"))
611 612 return subset & bundlerevs
612 613
613 614 def checkstatus(repo, subset, pat, field):
614 615 hasset = matchmod.patkind(pat) == 'set'
615 616
616 617 mcache = [None]
617 618 def matches(x):
618 619 c = repo[x]
619 620 if not mcache[0] or hasset:
620 621 mcache[0] = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
621 622 m = mcache[0]
622 623 fname = None
623 624 if not m.anypats() and len(m.files()) == 1:
624 625 fname = m.files()[0]
625 626 if fname is not None:
626 627 if fname not in c.files():
627 628 return False
628 629 else:
629 630 for f in c.files():
630 631 if m(f):
631 632 break
632 633 else:
633 634 return False
634 635 files = repo.status(c.p1().node(), c.node())[field]
635 636 if fname is not None:
636 637 if fname in files:
637 638 return True
638 639 else:
639 640 for f in files:
640 641 if m(f):
641 642 return True
642 643
643 644 return subset.filter(matches, condrepr=('<status[%r] %r>', field, pat))
644 645
645 646 def _children(repo, subset, parentset):
646 647 if not parentset:
647 648 return baseset()
648 649 cs = set()
649 650 pr = repo.changelog.parentrevs
650 651 minrev = parentset.min()
651 652 nullrev = node.nullrev
652 653 for r in subset:
653 654 if r <= minrev:
654 655 continue
655 656 p1, p2 = pr(r)
656 657 if p1 in parentset:
657 658 cs.add(r)
658 659 if p2 != nullrev and p2 in parentset:
659 660 cs.add(r)
660 661 return baseset(cs)
661 662
662 663 @predicate('children(set)', safe=True)
663 664 def children(repo, subset, x):
664 665 """Child changesets of changesets in set.
665 666 """
666 667 s = getset(repo, fullreposet(repo), x)
667 668 cs = _children(repo, subset, s)
668 669 return subset & cs
669 670
670 671 @predicate('closed()', safe=True, weight=10)
671 672 def closed(repo, subset, x):
672 673 """Changeset is closed.
673 674 """
674 675 # i18n: "closed" is a keyword
675 676 getargs(x, 0, 0, _("closed takes no arguments"))
676 677 return subset.filter(lambda r: repo[r].closesbranch(),
677 678 condrepr='<branch closed>')
678 679
679 680 # for internal use
680 681 @predicate('_commonancestorheads(set)', safe=True)
681 682 def _commonancestorheads(repo, subset, x):
682 683 # This is an internal method is for quickly calculating "heads(::x and
683 684 # ::y)"
684 685
685 686 # These greatest common ancestors are the same ones that the consensus bid
686 687 # merge will find.
687 688 startrevs = getset(repo, fullreposet(repo), x, order=anyorder)
688 689
689 690 ancs = repo.changelog._commonancestorsheads(*list(startrevs))
690 691 return subset & baseset(ancs)
691 692
692 693 @predicate('commonancestors(set)', safe=True)
693 694 def commonancestors(repo, subset, x):
694 695 """Changesets that are ancestors of every changeset in set.
695 696 """
696 697 startrevs = getset(repo, fullreposet(repo), x, order=anyorder)
697 698 if not startrevs:
698 699 return baseset()
699 700 for r in startrevs:
700 701 subset &= dagop.revancestors(repo, baseset([r]))
701 702 return subset
702 703
703 704 @predicate('contains(pattern)', weight=100)
704 705 def contains(repo, subset, x):
705 706 """The revision's manifest contains a file matching pattern (but might not
706 707 modify it). See :hg:`help patterns` for information about file patterns.
707 708
708 709 The pattern without explicit kind like ``glob:`` is expected to be
709 710 relative to the current directory and match against a file exactly
710 711 for efficiency.
711 712 """
712 713 # i18n: "contains" is a keyword
713 714 pat = getstring(x, _("contains requires a pattern"))
714 715
715 716 def matches(x):
716 717 if not matchmod.patkind(pat):
717 718 pats = pathutil.canonpath(repo.root, repo.getcwd(), pat)
718 719 if pats in repo[x]:
719 720 return True
720 721 else:
721 722 c = repo[x]
722 723 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
723 724 for f in c.manifest():
724 725 if m(f):
725 726 return True
726 727 return False
727 728
728 729 return subset.filter(matches, condrepr=('<contains %r>', pat))
729 730
730 731 @predicate('converted([id])', safe=True)
731 732 def converted(repo, subset, x):
732 733 """Changesets converted from the given identifier in the old repository if
733 734 present, or all converted changesets if no identifier is specified.
734 735 """
735 736
736 737 # There is exactly no chance of resolving the revision, so do a simple
737 738 # string compare and hope for the best
738 739
739 740 rev = None
740 741 # i18n: "converted" is a keyword
741 742 l = getargs(x, 0, 1, _('converted takes one or no arguments'))
742 743 if l:
743 744 # i18n: "converted" is a keyword
744 745 rev = getstring(l[0], _('converted requires a revision'))
745 746
746 747 def _matchvalue(r):
747 748 source = repo[r].extra().get('convert_revision', None)
748 749 return source is not None and (rev is None or source.startswith(rev))
749 750
750 751 return subset.filter(lambda r: _matchvalue(r),
751 752 condrepr=('<converted %r>', rev))
752 753
753 754 @predicate('date(interval)', safe=True, weight=10)
754 755 def date(repo, subset, x):
755 756 """Changesets within the interval, see :hg:`help dates`.
756 757 """
757 758 # i18n: "date" is a keyword
758 759 ds = getstring(x, _("date requires a string"))
759 760 dm = dateutil.matchdate(ds)
760 761 return subset.filter(lambda x: dm(repo[x].date()[0]),
761 762 condrepr=('<date %r>', ds))
762 763
763 764 @predicate('desc(string)', safe=True, weight=10)
764 765 def desc(repo, subset, x):
765 766 """Search commit message for string. The match is case-insensitive.
766 767
767 768 Pattern matching is supported for `string`. See
768 769 :hg:`help revisions.patterns`.
769 770 """
770 771 # i18n: "desc" is a keyword
771 772 ds = getstring(x, _("desc requires a string"))
772 773
773 774 kind, pattern, matcher = _substringmatcher(ds, casesensitive=False)
774 775
775 776 return subset.filter(lambda r: matcher(repo[r].description()),
776 777 condrepr=('<desc %r>', ds))
777 778
778 779 def _descendants(repo, subset, x, followfirst=False, startdepth=None,
779 780 stopdepth=None):
780 781 roots = getset(repo, fullreposet(repo), x)
781 782 if not roots:
782 783 return baseset()
783 784 s = dagop.revdescendants(repo, roots, followfirst, startdepth, stopdepth)
784 785 return subset & s
785 786
786 787 @predicate('descendants(set[, depth])', safe=True)
787 788 def descendants(repo, subset, x):
788 789 """Changesets which are descendants of changesets in set, including the
789 790 given changesets themselves.
790 791
791 792 If depth is specified, the result only includes changesets up to
792 793 the specified generation.
793 794 """
794 795 # startdepth is for internal use only until we can decide the UI
795 796 args = getargsdict(x, 'descendants', 'set depth startdepth')
796 797 if 'set' not in args:
797 798 # i18n: "descendants" is a keyword
798 799 raise error.ParseError(_('descendants takes at least 1 argument'))
799 800 startdepth = stopdepth = None
800 801 if 'startdepth' in args:
801 802 n = getinteger(args['startdepth'],
802 803 "descendants expects an integer startdepth")
803 804 if n < 0:
804 805 raise error.ParseError("negative startdepth")
805 806 startdepth = n
806 807 if 'depth' in args:
807 808 # i18n: "descendants" is a keyword
808 809 n = getinteger(args['depth'], _("descendants expects an integer depth"))
809 810 if n < 0:
810 811 raise error.ParseError(_("negative depth"))
811 812 stopdepth = n + 1
812 813 return _descendants(repo, subset, args['set'],
813 814 startdepth=startdepth, stopdepth=stopdepth)
814 815
815 816 @predicate('_firstdescendants', safe=True)
816 817 def _firstdescendants(repo, subset, x):
817 818 # ``_firstdescendants(set)``
818 819 # Like ``descendants(set)`` but follows only the first parents.
819 820 return _descendants(repo, subset, x, followfirst=True)
820 821
821 822 @predicate('destination([set])', safe=True, weight=10)
822 823 def destination(repo, subset, x):
823 824 """Changesets that were created by a graft, transplant or rebase operation,
824 825 with the given revisions specified as the source. Omitting the optional set
825 826 is the same as passing all().
826 827 """
827 828 if x is not None:
828 829 sources = getset(repo, fullreposet(repo), x)
829 830 else:
830 831 sources = fullreposet(repo)
831 832
832 833 dests = set()
833 834
834 835 # subset contains all of the possible destinations that can be returned, so
835 836 # iterate over them and see if their source(s) were provided in the arg set.
836 837 # Even if the immediate src of r is not in the arg set, src's source (or
837 838 # further back) may be. Scanning back further than the immediate src allows
838 839 # transitive transplants and rebases to yield the same results as transitive
839 840 # grafts.
840 841 for r in subset:
841 842 src = _getrevsource(repo, r)
842 843 lineage = None
843 844
844 845 while src is not None:
845 846 if lineage is None:
846 847 lineage = list()
847 848
848 849 lineage.append(r)
849 850
850 851 # The visited lineage is a match if the current source is in the arg
851 852 # set. Since every candidate dest is visited by way of iterating
852 853 # subset, any dests further back in the lineage will be tested by a
853 854 # different iteration over subset. Likewise, if the src was already
854 855 # selected, the current lineage can be selected without going back
855 856 # further.
856 857 if src in sources or src in dests:
857 858 dests.update(lineage)
858 859 break
859 860
860 861 r = src
861 862 src = _getrevsource(repo, r)
862 863
863 864 return subset.filter(dests.__contains__,
864 865 condrepr=lambda: '<destination %r>' % _sortedb(dests))
865 866
866 867 @predicate('contentdivergent()', safe=True)
867 868 def contentdivergent(repo, subset, x):
868 869 """
869 870 Final successors of changesets with an alternative set of final
870 871 successors. (EXPERIMENTAL)
871 872 """
872 873 # i18n: "contentdivergent" is a keyword
873 874 getargs(x, 0, 0, _("contentdivergent takes no arguments"))
874 875 contentdivergent = obsmod.getrevs(repo, 'contentdivergent')
875 876 return subset & contentdivergent
876 877
877 878 @predicate('extdata(source)', safe=False, weight=100)
878 879 def extdata(repo, subset, x):
879 880 """Changesets in the specified extdata source. (EXPERIMENTAL)"""
880 881 # i18n: "extdata" is a keyword
881 882 args = getargsdict(x, 'extdata', 'source')
882 883 source = getstring(args.get('source'),
883 884 # i18n: "extdata" is a keyword
884 885 _('extdata takes at least 1 string argument'))
885 886 data = scmutil.extdatasource(repo, source)
886 887 return subset & baseset(data)
887 888
888 889 @predicate('extinct()', safe=True)
889 890 def extinct(repo, subset, x):
890 891 """Obsolete changesets with obsolete descendants only.
891 892 """
892 893 # i18n: "extinct" is a keyword
893 894 getargs(x, 0, 0, _("extinct takes no arguments"))
894 895 extincts = obsmod.getrevs(repo, 'extinct')
895 896 return subset & extincts
896 897
897 898 @predicate('extra(label, [value])', safe=True)
898 899 def extra(repo, subset, x):
899 900 """Changesets with the given label in the extra metadata, with the given
900 901 optional value.
901 902
902 903 Pattern matching is supported for `value`. See
903 904 :hg:`help revisions.patterns`.
904 905 """
905 906 args = getargsdict(x, 'extra', 'label value')
906 907 if 'label' not in args:
907 908 # i18n: "extra" is a keyword
908 909 raise error.ParseError(_('extra takes at least 1 argument'))
909 910 # i18n: "extra" is a keyword
910 911 label = getstring(args['label'], _('first argument to extra must be '
911 912 'a string'))
912 913 value = None
913 914
914 915 if 'value' in args:
915 916 # i18n: "extra" is a keyword
916 917 value = getstring(args['value'], _('second argument to extra must be '
917 918 'a string'))
918 919 kind, value, matcher = stringutil.stringmatcher(value)
919 920
920 921 def _matchvalue(r):
921 922 extra = repo[r].extra()
922 923 return label in extra and (value is None or matcher(extra[label]))
923 924
924 925 return subset.filter(lambda r: _matchvalue(r),
925 926 condrepr=('<extra[%r] %r>', label, value))
926 927
927 928 @predicate('filelog(pattern)', safe=True)
928 929 def filelog(repo, subset, x):
929 930 """Changesets connected to the specified filelog.
930 931
931 932 For performance reasons, visits only revisions mentioned in the file-level
932 933 filelog, rather than filtering through all changesets (much faster, but
933 934 doesn't include deletes or duplicate changes). For a slower, more accurate
934 935 result, use ``file()``.
935 936
936 937 The pattern without explicit kind like ``glob:`` is expected to be
937 938 relative to the current directory and match against a file exactly
938 939 for efficiency.
939 940
940 941 If some linkrev points to revisions filtered by the current repoview, we'll
941 942 work around it to return a non-filtered value.
942 943 """
943 944
944 945 # i18n: "filelog" is a keyword
945 946 pat = getstring(x, _("filelog requires a pattern"))
946 947 s = set()
947 948 cl = repo.changelog
948 949
949 950 if not matchmod.patkind(pat):
950 951 f = pathutil.canonpath(repo.root, repo.getcwd(), pat)
951 952 files = [f]
952 953 else:
953 954 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=repo[None])
954 955 files = (f for f in repo[None] if m(f))
955 956
956 957 for f in files:
957 958 fl = repo.file(f)
958 959 known = {}
959 960 scanpos = 0
960 961 for fr in list(fl):
961 962 fn = fl.node(fr)
962 963 if fn in known:
963 964 s.add(known[fn])
964 965 continue
965 966
966 967 lr = fl.linkrev(fr)
967 968 if lr in cl:
968 969 s.add(lr)
969 970 elif scanpos is not None:
970 971 # lowest matching changeset is filtered, scan further
971 972 # ahead in changelog
972 973 start = max(lr, scanpos) + 1
973 974 scanpos = None
974 975 for r in cl.revs(start):
975 976 # minimize parsing of non-matching entries
976 977 if f in cl.revision(r) and f in cl.readfiles(r):
977 978 try:
978 979 # try to use manifest delta fastpath
979 980 n = repo[r].filenode(f)
980 981 if n not in known:
981 982 if n == fn:
982 983 s.add(r)
983 984 scanpos = r
984 985 break
985 986 else:
986 987 known[n] = r
987 988 except error.ManifestLookupError:
988 989 # deletion in changelog
989 990 continue
990 991
991 992 return subset & s
992 993
993 994 @predicate('first(set, [n])', safe=True, takeorder=True, weight=0)
994 995 def first(repo, subset, x, order):
995 996 """An alias for limit().
996 997 """
997 998 return limit(repo, subset, x, order)
998 999
999 1000 def _follow(repo, subset, x, name, followfirst=False):
1000 1001 args = getargsdict(x, name, 'file startrev')
1001 1002 revs = None
1002 1003 if 'startrev' in args:
1003 1004 revs = getset(repo, fullreposet(repo), args['startrev'])
1004 1005 if 'file' in args:
1005 1006 x = getstring(args['file'], _("%s expected a pattern") % name)
1006 1007 if revs is None:
1007 1008 revs = [None]
1008 1009 fctxs = []
1009 1010 for r in revs:
1010 1011 ctx = mctx = repo[r]
1011 1012 if r is None:
1012 1013 ctx = repo['.']
1013 1014 m = matchmod.match(repo.root, repo.getcwd(), [x],
1014 1015 ctx=mctx, default='path')
1015 1016 fctxs.extend(ctx[f].introfilectx() for f in ctx.manifest().walk(m))
1016 1017 s = dagop.filerevancestors(fctxs, followfirst)
1017 1018 else:
1018 1019 if revs is None:
1019 1020 revs = baseset([repo['.'].rev()])
1020 1021 s = dagop.revancestors(repo, revs, followfirst)
1021 1022
1022 1023 return subset & s
1023 1024
1024 1025 @predicate('follow([file[, startrev]])', safe=True)
1025 1026 def follow(repo, subset, x):
1026 1027 """
1027 1028 An alias for ``::.`` (ancestors of the working directory's first parent).
1028 1029 If file pattern is specified, the histories of files matching given
1029 1030 pattern in the revision given by startrev are followed, including copies.
1030 1031 """
1031 1032 return _follow(repo, subset, x, 'follow')
1032 1033
1033 1034 @predicate('_followfirst', safe=True)
1034 1035 def _followfirst(repo, subset, x):
1035 1036 # ``followfirst([file[, startrev]])``
1036 1037 # Like ``follow([file[, startrev]])`` but follows only the first parent
1037 1038 # of every revisions or files revisions.
1038 1039 return _follow(repo, subset, x, '_followfirst', followfirst=True)
1039 1040
1040 1041 @predicate('followlines(file, fromline:toline[, startrev=., descend=False])',
1041 1042 safe=True)
1042 1043 def followlines(repo, subset, x):
1043 1044 """Changesets modifying `file` in line range ('fromline', 'toline').
1044 1045
1045 1046 Line range corresponds to 'file' content at 'startrev' and should hence be
1046 1047 consistent with file size. If startrev is not specified, working directory's
1047 1048 parent is used.
1048 1049
1049 1050 By default, ancestors of 'startrev' are returned. If 'descend' is True,
1050 1051 descendants of 'startrev' are returned though renames are (currently) not
1051 1052 followed in this direction.
1052 1053 """
1053 1054 args = getargsdict(x, 'followlines', 'file *lines startrev descend')
1054 1055 if len(args['lines']) != 1:
1055 1056 raise error.ParseError(_("followlines requires a line range"))
1056 1057
1057 1058 rev = '.'
1058 1059 if 'startrev' in args:
1059 1060 revs = getset(repo, fullreposet(repo), args['startrev'])
1060 1061 if len(revs) != 1:
1061 1062 raise error.ParseError(
1062 1063 # i18n: "followlines" is a keyword
1063 1064 _("followlines expects exactly one revision"))
1064 1065 rev = revs.last()
1065 1066
1066 1067 pat = getstring(args['file'], _("followlines requires a pattern"))
1067 1068 # i18n: "followlines" is a keyword
1068 1069 msg = _("followlines expects exactly one file")
1069 1070 fname = scmutil.parsefollowlinespattern(repo, rev, pat, msg)
1071 fromline, toline = util.processlinerange(
1072 *getintrange(args['lines'][0],
1070 1073 # i18n: "followlines" is a keyword
1071 lr = getrange(args['lines'][0], _("followlines expects a line range"))
1072 fromline, toline = [getinteger(a, _("line range bounds must be integers"))
1073 for a in lr]
1074 fromline, toline = util.processlinerange(fromline, toline)
1074 _("followlines expects a line range"),
1075 _("line range bounds must be integers")))
1075 1076
1076 1077 fctx = repo[rev].filectx(fname)
1077 1078 descend = False
1078 1079 if 'descend' in args:
1079 1080 descend = getboolean(args['descend'],
1080 1081 # i18n: "descend" is a keyword
1081 1082 _("descend argument must be a boolean"))
1082 1083 if descend:
1083 1084 rs = generatorset(
1084 1085 (c.rev() for c, _linerange
1085 1086 in dagop.blockdescendants(fctx, fromline, toline)),
1086 1087 iterasc=True)
1087 1088 else:
1088 1089 rs = generatorset(
1089 1090 (c.rev() for c, _linerange
1090 1091 in dagop.blockancestors(fctx, fromline, toline)),
1091 1092 iterasc=False)
1092 1093 return subset & rs
1093 1094
1094 1095 @predicate('all()', safe=True)
1095 1096 def getall(repo, subset, x):
1096 1097 """All changesets, the same as ``0:tip``.
1097 1098 """
1098 1099 # i18n: "all" is a keyword
1099 1100 getargs(x, 0, 0, _("all takes no arguments"))
1100 1101 return subset & spanset(repo) # drop "null" if any
1101 1102
1102 1103 @predicate('grep(regex)', weight=10)
1103 1104 def grep(repo, subset, x):
1104 1105 """Like ``keyword(string)`` but accepts a regex. Use ``grep(r'...')``
1105 1106 to ensure special escape characters are handled correctly. Unlike
1106 1107 ``keyword(string)``, the match is case-sensitive.
1107 1108 """
1108 1109 try:
1109 1110 # i18n: "grep" is a keyword
1110 1111 gr = re.compile(getstring(x, _("grep requires a string")))
1111 1112 except re.error as e:
1112 1113 raise error.ParseError(
1113 1114 _('invalid match pattern: %s') % stringutil.forcebytestr(e))
1114 1115
1115 1116 def matches(x):
1116 1117 c = repo[x]
1117 1118 for e in c.files() + [c.user(), c.description()]:
1118 1119 if gr.search(e):
1119 1120 return True
1120 1121 return False
1121 1122
1122 1123 return subset.filter(matches, condrepr=('<grep %r>', gr.pattern))
1123 1124
1124 1125 @predicate('_matchfiles', safe=True)
1125 1126 def _matchfiles(repo, subset, x):
1126 1127 # _matchfiles takes a revset list of prefixed arguments:
1127 1128 #
1128 1129 # [p:foo, i:bar, x:baz]
1129 1130 #
1130 1131 # builds a match object from them and filters subset. Allowed
1131 1132 # prefixes are 'p:' for regular patterns, 'i:' for include
1132 1133 # patterns and 'x:' for exclude patterns. Use 'r:' prefix to pass
1133 1134 # a revision identifier, or the empty string to reference the
1134 1135 # working directory, from which the match object is
1135 1136 # initialized. Use 'd:' to set the default matching mode, default
1136 1137 # to 'glob'. At most one 'r:' and 'd:' argument can be passed.
1137 1138
1138 1139 l = getargs(x, 1, -1, "_matchfiles requires at least one argument")
1139 1140 pats, inc, exc = [], [], []
1140 1141 rev, default = None, None
1141 1142 for arg in l:
1142 1143 s = getstring(arg, "_matchfiles requires string arguments")
1143 1144 prefix, value = s[:2], s[2:]
1144 1145 if prefix == 'p:':
1145 1146 pats.append(value)
1146 1147 elif prefix == 'i:':
1147 1148 inc.append(value)
1148 1149 elif prefix == 'x:':
1149 1150 exc.append(value)
1150 1151 elif prefix == 'r:':
1151 1152 if rev is not None:
1152 1153 raise error.ParseError('_matchfiles expected at most one '
1153 1154 'revision')
1154 1155 if value == '': # empty means working directory
1155 1156 rev = node.wdirrev
1156 1157 else:
1157 1158 rev = value
1158 1159 elif prefix == 'd:':
1159 1160 if default is not None:
1160 1161 raise error.ParseError('_matchfiles expected at most one '
1161 1162 'default mode')
1162 1163 default = value
1163 1164 else:
1164 1165 raise error.ParseError('invalid _matchfiles prefix: %s' % prefix)
1165 1166 if not default:
1166 1167 default = 'glob'
1167 1168 hasset = any(matchmod.patkind(p) == 'set' for p in pats + inc + exc)
1168 1169
1169 1170 mcache = [None]
1170 1171
1171 1172 # This directly read the changelog data as creating changectx for all
1172 1173 # revisions is quite expensive.
1173 1174 getfiles = repo.changelog.readfiles
1174 1175 wdirrev = node.wdirrev
1175 1176 def matches(x):
1176 1177 if x == wdirrev:
1177 1178 files = repo[x].files()
1178 1179 else:
1179 1180 files = getfiles(x)
1180 1181
1181 1182 if not mcache[0] or (hasset and rev is None):
1182 1183 r = x if rev is None else rev
1183 1184 mcache[0] = matchmod.match(repo.root, repo.getcwd(), pats,
1184 1185 include=inc, exclude=exc, ctx=repo[r],
1185 1186 default=default)
1186 1187 m = mcache[0]
1187 1188
1188 1189 for f in files:
1189 1190 if m(f):
1190 1191 return True
1191 1192 return False
1192 1193
1193 1194 return subset.filter(matches,
1194 1195 condrepr=('<matchfiles patterns=%r, include=%r '
1195 1196 'exclude=%r, default=%r, rev=%r>',
1196 1197 pats, inc, exc, default, rev))
1197 1198
1198 1199 @predicate('file(pattern)', safe=True, weight=10)
1199 1200 def hasfile(repo, subset, x):
1200 1201 """Changesets affecting files matched by pattern.
1201 1202
1202 1203 For a faster but less accurate result, consider using ``filelog()``
1203 1204 instead.
1204 1205
1205 1206 This predicate uses ``glob:`` as the default kind of pattern.
1206 1207 """
1207 1208 # i18n: "file" is a keyword
1208 1209 pat = getstring(x, _("file requires a pattern"))
1209 1210 return _matchfiles(repo, subset, ('string', 'p:' + pat))
1210 1211
1211 1212 @predicate('head()', safe=True)
1212 1213 def head(repo, subset, x):
1213 1214 """Changeset is a named branch head.
1214 1215 """
1215 1216 # i18n: "head" is a keyword
1216 1217 getargs(x, 0, 0, _("head takes no arguments"))
1217 1218 hs = set()
1218 1219 cl = repo.changelog
1219 1220 for ls in repo.branchmap().itervalues():
1220 1221 hs.update(cl.rev(h) for h in ls)
1221 1222 return subset & baseset(hs)
1222 1223
1223 1224 @predicate('heads(set)', safe=True, takeorder=True)
1224 1225 def heads(repo, subset, x, order):
1225 1226 """Members of set with no children in set.
1226 1227 """
1227 1228 # argument set should never define order
1228 1229 if order == defineorder:
1229 1230 order = followorder
1230 1231 inputset = getset(repo, fullreposet(repo), x, order=order)
1231 1232 wdirparents = None
1232 1233 if node.wdirrev in inputset:
1233 1234 # a bit slower, but not common so good enough for now
1234 1235 wdirparents = [p.rev() for p in repo[None].parents()]
1235 1236 inputset = set(inputset)
1236 1237 inputset.discard(node.wdirrev)
1237 1238 heads = repo.changelog.headrevs(inputset)
1238 1239 if wdirparents is not None:
1239 1240 heads.difference_update(wdirparents)
1240 1241 heads.add(node.wdirrev)
1241 1242 heads = baseset(heads)
1242 1243 return subset & heads
1243 1244
1244 1245 @predicate('hidden()', safe=True)
1245 1246 def hidden(repo, subset, x):
1246 1247 """Hidden changesets.
1247 1248 """
1248 1249 # i18n: "hidden" is a keyword
1249 1250 getargs(x, 0, 0, _("hidden takes no arguments"))
1250 1251 hiddenrevs = repoview.filterrevs(repo, 'visible')
1251 1252 return subset & hiddenrevs
1252 1253
1253 1254 @predicate('keyword(string)', safe=True, weight=10)
1254 1255 def keyword(repo, subset, x):
1255 1256 """Search commit message, user name, and names of changed files for
1256 1257 string. The match is case-insensitive.
1257 1258
1258 1259 For a regular expression or case sensitive search of these fields, use
1259 1260 ``grep(regex)``.
1260 1261 """
1261 1262 # i18n: "keyword" is a keyword
1262 1263 kw = encoding.lower(getstring(x, _("keyword requires a string")))
1263 1264
1264 1265 def matches(r):
1265 1266 c = repo[r]
1266 1267 return any(kw in encoding.lower(t)
1267 1268 for t in c.files() + [c.user(), c.description()])
1268 1269
1269 1270 return subset.filter(matches, condrepr=('<keyword %r>', kw))
1270 1271
1271 1272 @predicate('limit(set[, n[, offset]])', safe=True, takeorder=True, weight=0)
1272 1273 def limit(repo, subset, x, order):
1273 1274 """First n members of set, defaulting to 1, starting from offset.
1274 1275 """
1275 1276 args = getargsdict(x, 'limit', 'set n offset')
1276 1277 if 'set' not in args:
1277 1278 # i18n: "limit" is a keyword
1278 1279 raise error.ParseError(_("limit requires one to three arguments"))
1279 1280 # i18n: "limit" is a keyword
1280 1281 lim = getinteger(args.get('n'), _("limit expects a number"), default=1)
1281 1282 if lim < 0:
1282 1283 raise error.ParseError(_("negative number to select"))
1283 1284 # i18n: "limit" is a keyword
1284 1285 ofs = getinteger(args.get('offset'), _("limit expects a number"), default=0)
1285 1286 if ofs < 0:
1286 1287 raise error.ParseError(_("negative offset"))
1287 1288 os = getset(repo, fullreposet(repo), args['set'])
1288 1289 ls = os.slice(ofs, ofs + lim)
1289 1290 if order == followorder and lim > 1:
1290 1291 return subset & ls
1291 1292 return ls & subset
1292 1293
1293 1294 @predicate('last(set, [n])', safe=True, takeorder=True)
1294 1295 def last(repo, subset, x, order):
1295 1296 """Last n members of set, defaulting to 1.
1296 1297 """
1297 1298 # i18n: "last" is a keyword
1298 1299 l = getargs(x, 1, 2, _("last requires one or two arguments"))
1299 1300 lim = 1
1300 1301 if len(l) == 2:
1301 1302 # i18n: "last" is a keyword
1302 1303 lim = getinteger(l[1], _("last expects a number"))
1303 1304 if lim < 0:
1304 1305 raise error.ParseError(_("negative number to select"))
1305 1306 os = getset(repo, fullreposet(repo), l[0])
1306 1307 os.reverse()
1307 1308 ls = os.slice(0, lim)
1308 1309 if order == followorder and lim > 1:
1309 1310 return subset & ls
1310 1311 ls.reverse()
1311 1312 return ls & subset
1312 1313
1313 1314 @predicate('max(set)', safe=True)
1314 1315 def maxrev(repo, subset, x):
1315 1316 """Changeset with highest revision number in set.
1316 1317 """
1317 1318 os = getset(repo, fullreposet(repo), x)
1318 1319 try:
1319 1320 m = os.max()
1320 1321 if m in subset:
1321 1322 return baseset([m], datarepr=('<max %r, %r>', subset, os))
1322 1323 except ValueError:
1323 1324 # os.max() throws a ValueError when the collection is empty.
1324 1325 # Same as python's max().
1325 1326 pass
1326 1327 return baseset(datarepr=('<max %r, %r>', subset, os))
1327 1328
1328 1329 @predicate('merge()', safe=True)
1329 1330 def merge(repo, subset, x):
1330 1331 """Changeset is a merge changeset.
1331 1332 """
1332 1333 # i18n: "merge" is a keyword
1333 1334 getargs(x, 0, 0, _("merge takes no arguments"))
1334 1335 cl = repo.changelog
1335 1336 return subset.filter(lambda r: cl.parentrevs(r)[1] != -1,
1336 1337 condrepr='<merge>')
1337 1338
1338 1339 @predicate('branchpoint()', safe=True)
1339 1340 def branchpoint(repo, subset, x):
1340 1341 """Changesets with more than one child.
1341 1342 """
1342 1343 # i18n: "branchpoint" is a keyword
1343 1344 getargs(x, 0, 0, _("branchpoint takes no arguments"))
1344 1345 cl = repo.changelog
1345 1346 if not subset:
1346 1347 return baseset()
1347 1348 # XXX this should be 'parentset.min()' assuming 'parentset' is a smartset
1348 1349 # (and if it is not, it should.)
1349 1350 baserev = min(subset)
1350 1351 parentscount = [0]*(len(repo) - baserev)
1351 1352 for r in cl.revs(start=baserev + 1):
1352 1353 for p in cl.parentrevs(r):
1353 1354 if p >= baserev:
1354 1355 parentscount[p - baserev] += 1
1355 1356 return subset.filter(lambda r: parentscount[r - baserev] > 1,
1356 1357 condrepr='<branchpoint>')
1357 1358
1358 1359 @predicate('min(set)', safe=True)
1359 1360 def minrev(repo, subset, x):
1360 1361 """Changeset with lowest revision number in set.
1361 1362 """
1362 1363 os = getset(repo, fullreposet(repo), x)
1363 1364 try:
1364 1365 m = os.min()
1365 1366 if m in subset:
1366 1367 return baseset([m], datarepr=('<min %r, %r>', subset, os))
1367 1368 except ValueError:
1368 1369 # os.min() throws a ValueError when the collection is empty.
1369 1370 # Same as python's min().
1370 1371 pass
1371 1372 return baseset(datarepr=('<min %r, %r>', subset, os))
1372 1373
1373 1374 @predicate('modifies(pattern)', safe=True, weight=30)
1374 1375 def modifies(repo, subset, x):
1375 1376 """Changesets modifying files matched by pattern.
1376 1377
1377 1378 The pattern without explicit kind like ``glob:`` is expected to be
1378 1379 relative to the current directory and match against a file or a
1379 1380 directory.
1380 1381 """
1381 1382 # i18n: "modifies" is a keyword
1382 1383 pat = getstring(x, _("modifies requires a pattern"))
1383 1384 return checkstatus(repo, subset, pat, 0)
1384 1385
1385 1386 @predicate('named(namespace)')
1386 1387 def named(repo, subset, x):
1387 1388 """The changesets in a given namespace.
1388 1389
1389 1390 Pattern matching is supported for `namespace`. See
1390 1391 :hg:`help revisions.patterns`.
1391 1392 """
1392 1393 # i18n: "named" is a keyword
1393 1394 args = getargs(x, 1, 1, _('named requires a namespace argument'))
1394 1395
1395 1396 ns = getstring(args[0],
1396 1397 # i18n: "named" is a keyword
1397 1398 _('the argument to named must be a string'))
1398 1399 kind, pattern, matcher = stringutil.stringmatcher(ns)
1399 1400 namespaces = set()
1400 1401 if kind == 'literal':
1401 1402 if pattern not in repo.names:
1402 1403 raise error.RepoLookupError(_("namespace '%s' does not exist")
1403 1404 % ns)
1404 1405 namespaces.add(repo.names[pattern])
1405 1406 else:
1406 1407 for name, ns in repo.names.iteritems():
1407 1408 if matcher(name):
1408 1409 namespaces.add(ns)
1409 1410
1410 1411 names = set()
1411 1412 for ns in namespaces:
1412 1413 for name in ns.listnames(repo):
1413 1414 if name not in ns.deprecated:
1414 1415 names.update(repo[n].rev() for n in ns.nodes(repo, name))
1415 1416
1416 1417 names -= {node.nullrev}
1417 1418 return subset & names
1418 1419
1419 1420 @predicate('id(string)', safe=True)
1420 1421 def node_(repo, subset, x):
1421 1422 """Revision non-ambiguously specified by the given hex string prefix.
1422 1423 """
1423 1424 # i18n: "id" is a keyword
1424 1425 l = getargs(x, 1, 1, _("id requires one argument"))
1425 1426 # i18n: "id" is a keyword
1426 1427 n = getstring(l[0], _("id requires a string"))
1427 1428 if len(n) == 40:
1428 1429 try:
1429 1430 rn = repo.changelog.rev(node.bin(n))
1430 1431 except error.WdirUnsupported:
1431 1432 rn = node.wdirrev
1432 1433 except (LookupError, TypeError):
1433 1434 rn = None
1434 1435 else:
1435 1436 rn = None
1436 1437 try:
1437 1438 pm = scmutil.resolvehexnodeidprefix(repo, n)
1438 1439 if pm is not None:
1439 1440 rn = repo.changelog.rev(pm)
1440 1441 except LookupError:
1441 1442 pass
1442 1443 except error.WdirUnsupported:
1443 1444 rn = node.wdirrev
1444 1445
1445 1446 if rn is None:
1446 1447 return baseset()
1447 1448 result = baseset([rn])
1448 1449 return result & subset
1449 1450
1450 1451 @predicate('none()', safe=True)
1451 1452 def none(repo, subset, x):
1452 1453 """No changesets.
1453 1454 """
1454 1455 # i18n: "none" is a keyword
1455 1456 getargs(x, 0, 0, _("none takes no arguments"))
1456 1457 return baseset()
1457 1458
1458 1459 @predicate('obsolete()', safe=True)
1459 1460 def obsolete(repo, subset, x):
1460 1461 """Mutable changeset with a newer version."""
1461 1462 # i18n: "obsolete" is a keyword
1462 1463 getargs(x, 0, 0, _("obsolete takes no arguments"))
1463 1464 obsoletes = obsmod.getrevs(repo, 'obsolete')
1464 1465 return subset & obsoletes
1465 1466
1466 1467 @predicate('only(set, [set])', safe=True)
1467 1468 def only(repo, subset, x):
1468 1469 """Changesets that are ancestors of the first set that are not ancestors
1469 1470 of any other head in the repo. If a second set is specified, the result
1470 1471 is ancestors of the first set that are not ancestors of the second set
1471 1472 (i.e. ::<set1> - ::<set2>).
1472 1473 """
1473 1474 cl = repo.changelog
1474 1475 # i18n: "only" is a keyword
1475 1476 args = getargs(x, 1, 2, _('only takes one or two arguments'))
1476 1477 include = getset(repo, fullreposet(repo), args[0])
1477 1478 if len(args) == 1:
1478 1479 if not include:
1479 1480 return baseset()
1480 1481
1481 1482 descendants = set(dagop.revdescendants(repo, include, False))
1482 1483 exclude = [rev for rev in cl.headrevs()
1483 1484 if not rev in descendants and not rev in include]
1484 1485 else:
1485 1486 exclude = getset(repo, fullreposet(repo), args[1])
1486 1487
1487 1488 results = set(cl.findmissingrevs(common=exclude, heads=include))
1488 1489 # XXX we should turn this into a baseset instead of a set, smartset may do
1489 1490 # some optimizations from the fact this is a baseset.
1490 1491 return subset & results
1491 1492
1492 1493 @predicate('origin([set])', safe=True)
1493 1494 def origin(repo, subset, x):
1494 1495 """
1495 1496 Changesets that were specified as a source for the grafts, transplants or
1496 1497 rebases that created the given revisions. Omitting the optional set is the
1497 1498 same as passing all(). If a changeset created by these operations is itself
1498 1499 specified as a source for one of these operations, only the source changeset
1499 1500 for the first operation is selected.
1500 1501 """
1501 1502 if x is not None:
1502 1503 dests = getset(repo, fullreposet(repo), x)
1503 1504 else:
1504 1505 dests = fullreposet(repo)
1505 1506
1506 1507 def _firstsrc(rev):
1507 1508 src = _getrevsource(repo, rev)
1508 1509 if src is None:
1509 1510 return None
1510 1511
1511 1512 while True:
1512 1513 prev = _getrevsource(repo, src)
1513 1514
1514 1515 if prev is None:
1515 1516 return src
1516 1517 src = prev
1517 1518
1518 1519 o = {_firstsrc(r) for r in dests}
1519 1520 o -= {None}
1520 1521 # XXX we should turn this into a baseset instead of a set, smartset may do
1521 1522 # some optimizations from the fact this is a baseset.
1522 1523 return subset & o
1523 1524
1524 1525 @predicate('outgoing([path])', safe=False, weight=10)
1525 1526 def outgoing(repo, subset, x):
1526 1527 """Changesets not found in the specified destination repository, or the
1527 1528 default push location.
1528 1529 """
1529 1530 # Avoid cycles.
1530 1531 from . import (
1531 1532 discovery,
1532 1533 hg,
1533 1534 )
1534 1535 # i18n: "outgoing" is a keyword
1535 1536 l = getargs(x, 0, 1, _("outgoing takes one or no arguments"))
1536 1537 # i18n: "outgoing" is a keyword
1537 1538 dest = l and getstring(l[0], _("outgoing requires a repository path")) or ''
1538 1539 if not dest:
1539 1540 # ui.paths.getpath() explicitly tests for None, not just a boolean
1540 1541 dest = None
1541 1542 path = repo.ui.paths.getpath(dest, default=('default-push', 'default'))
1542 1543 if not path:
1543 1544 raise error.Abort(_('default repository not configured!'),
1544 1545 hint=_("see 'hg help config.paths'"))
1545 1546 dest = path.pushloc or path.loc
1546 1547 branches = path.branch, []
1547 1548
1548 1549 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1549 1550 if revs:
1550 1551 revs = [repo.lookup(rev) for rev in revs]
1551 1552 other = hg.peer(repo, {}, dest)
1552 1553 repo.ui.pushbuffer()
1553 1554 outgoing = discovery.findcommonoutgoing(repo, other, onlyheads=revs)
1554 1555 repo.ui.popbuffer()
1555 1556 cl = repo.changelog
1556 1557 o = {cl.rev(r) for r in outgoing.missing}
1557 1558 return subset & o
1558 1559
1559 1560 @predicate('p1([set])', safe=True)
1560 1561 def p1(repo, subset, x):
1561 1562 """First parent of changesets in set, or the working directory.
1562 1563 """
1563 1564 if x is None:
1564 1565 p = repo[x].p1().rev()
1565 1566 if p >= 0:
1566 1567 return subset & baseset([p])
1567 1568 return baseset()
1568 1569
1569 1570 ps = set()
1570 1571 cl = repo.changelog
1571 1572 for r in getset(repo, fullreposet(repo), x):
1572 1573 try:
1573 1574 ps.add(cl.parentrevs(r)[0])
1574 1575 except error.WdirUnsupported:
1575 1576 ps.add(repo[r].p1().rev())
1576 1577 ps -= {node.nullrev}
1577 1578 # XXX we should turn this into a baseset instead of a set, smartset may do
1578 1579 # some optimizations from the fact this is a baseset.
1579 1580 return subset & ps
1580 1581
1581 1582 @predicate('p2([set])', safe=True)
1582 1583 def p2(repo, subset, x):
1583 1584 """Second parent of changesets in set, or the working directory.
1584 1585 """
1585 1586 if x is None:
1586 1587 ps = repo[x].parents()
1587 1588 try:
1588 1589 p = ps[1].rev()
1589 1590 if p >= 0:
1590 1591 return subset & baseset([p])
1591 1592 return baseset()
1592 1593 except IndexError:
1593 1594 return baseset()
1594 1595
1595 1596 ps = set()
1596 1597 cl = repo.changelog
1597 1598 for r in getset(repo, fullreposet(repo), x):
1598 1599 try:
1599 1600 ps.add(cl.parentrevs(r)[1])
1600 1601 except error.WdirUnsupported:
1601 1602 parents = repo[r].parents()
1602 1603 if len(parents) == 2:
1603 1604 ps.add(parents[1])
1604 1605 ps -= {node.nullrev}
1605 1606 # XXX we should turn this into a baseset instead of a set, smartset may do
1606 1607 # some optimizations from the fact this is a baseset.
1607 1608 return subset & ps
1608 1609
1609 1610 def parentpost(repo, subset, x, order):
1610 1611 return p1(repo, subset, x)
1611 1612
1612 1613 @predicate('parents([set])', safe=True)
1613 1614 def parents(repo, subset, x):
1614 1615 """
1615 1616 The set of all parents for all changesets in set, or the working directory.
1616 1617 """
1617 1618 if x is None:
1618 1619 ps = set(p.rev() for p in repo[x].parents())
1619 1620 else:
1620 1621 ps = set()
1621 1622 cl = repo.changelog
1622 1623 up = ps.update
1623 1624 parentrevs = cl.parentrevs
1624 1625 for r in getset(repo, fullreposet(repo), x):
1625 1626 try:
1626 1627 up(parentrevs(r))
1627 1628 except error.WdirUnsupported:
1628 1629 up(p.rev() for p in repo[r].parents())
1629 1630 ps -= {node.nullrev}
1630 1631 return subset & ps
1631 1632
1632 1633 def _phase(repo, subset, *targets):
1633 1634 """helper to select all rev in <targets> phases"""
1634 1635 return repo._phasecache.getrevset(repo, targets, subset)
1635 1636
1636 1637 @predicate('_phase(idx)', safe=True)
1637 1638 def phase(repo, subset, x):
1638 1639 l = getargs(x, 1, 1, ("_phase requires one argument"))
1639 1640 target = getinteger(l[0], ("_phase expects a number"))
1640 1641 return _phase(repo, subset, target)
1641 1642
1642 1643 @predicate('draft()', safe=True)
1643 1644 def draft(repo, subset, x):
1644 1645 """Changeset in draft phase."""
1645 1646 # i18n: "draft" is a keyword
1646 1647 getargs(x, 0, 0, _("draft takes no arguments"))
1647 1648 target = phases.draft
1648 1649 return _phase(repo, subset, target)
1649 1650
1650 1651 @predicate('secret()', safe=True)
1651 1652 def secret(repo, subset, x):
1652 1653 """Changeset in secret phase."""
1653 1654 # i18n: "secret" is a keyword
1654 1655 getargs(x, 0, 0, _("secret takes no arguments"))
1655 1656 target = phases.secret
1656 1657 return _phase(repo, subset, target)
1657 1658
1658 1659 @predicate('stack([revs])', safe=True)
1659 1660 def stack(repo, subset, x):
1660 1661 """Experimental revset for the stack of changesets or working directory
1661 1662 parent. (EXPERIMENTAL)
1662 1663 """
1663 1664 if x is None:
1664 1665 stacks = stackmod.getstack(repo, x)
1665 1666 else:
1666 1667 stacks = smartset.baseset([])
1667 1668 for revision in getset(repo, fullreposet(repo), x):
1668 1669 currentstack = stackmod.getstack(repo, revision)
1669 1670 stacks = stacks + currentstack
1670 1671
1671 1672 return subset & stacks
1672 1673
1673 1674 def parentspec(repo, subset, x, n, order):
1674 1675 """``set^0``
1675 1676 The set.
1676 1677 ``set^1`` (or ``set^``), ``set^2``
1677 1678 First or second parent, respectively, of all changesets in set.
1678 1679 """
1679 1680 try:
1680 1681 n = int(n[1])
1681 1682 if n not in (0, 1, 2):
1682 1683 raise ValueError
1683 1684 except (TypeError, ValueError):
1684 1685 raise error.ParseError(_("^ expects a number 0, 1, or 2"))
1685 1686 ps = set()
1686 1687 cl = repo.changelog
1687 1688 for r in getset(repo, fullreposet(repo), x):
1688 1689 if n == 0:
1689 1690 ps.add(r)
1690 1691 elif n == 1:
1691 1692 try:
1692 1693 ps.add(cl.parentrevs(r)[0])
1693 1694 except error.WdirUnsupported:
1694 1695 ps.add(repo[r].p1().rev())
1695 1696 else:
1696 1697 try:
1697 1698 parents = cl.parentrevs(r)
1698 1699 if parents[1] != node.nullrev:
1699 1700 ps.add(parents[1])
1700 1701 except error.WdirUnsupported:
1701 1702 parents = repo[r].parents()
1702 1703 if len(parents) == 2:
1703 1704 ps.add(parents[1].rev())
1704 1705 return subset & ps
1705 1706
1706 1707 @predicate('present(set)', safe=True, takeorder=True)
1707 1708 def present(repo, subset, x, order):
1708 1709 """An empty set, if any revision in set isn't found; otherwise,
1709 1710 all revisions in set.
1710 1711
1711 1712 If any of specified revisions is not present in the local repository,
1712 1713 the query is normally aborted. But this predicate allows the query
1713 1714 to continue even in such cases.
1714 1715 """
1715 1716 try:
1716 1717 return getset(repo, subset, x, order)
1717 1718 except error.RepoLookupError:
1718 1719 return baseset()
1719 1720
1720 1721 # for internal use
1721 1722 @predicate('_notpublic', safe=True)
1722 1723 def _notpublic(repo, subset, x):
1723 1724 getargs(x, 0, 0, "_notpublic takes no arguments")
1724 1725 return _phase(repo, subset, phases.draft, phases.secret)
1725 1726
1726 1727 # for internal use
1727 1728 @predicate('_phaseandancestors(phasename, set)', safe=True)
1728 1729 def _phaseandancestors(repo, subset, x):
1729 1730 # equivalent to (phasename() & ancestors(set)) but more efficient
1730 1731 # phasename could be one of 'draft', 'secret', or '_notpublic'
1731 1732 args = getargs(x, 2, 2, "_phaseandancestors requires two arguments")
1732 1733 phasename = getsymbol(args[0])
1733 1734 s = getset(repo, fullreposet(repo), args[1])
1734 1735
1735 1736 draft = phases.draft
1736 1737 secret = phases.secret
1737 1738 phasenamemap = {
1738 1739 '_notpublic': draft,
1739 1740 'draft': draft, # follow secret's ancestors
1740 1741 'secret': secret,
1741 1742 }
1742 1743 if phasename not in phasenamemap:
1743 1744 raise error.ParseError('%r is not a valid phasename' % phasename)
1744 1745
1745 1746 minimalphase = phasenamemap[phasename]
1746 1747 getphase = repo._phasecache.phase
1747 1748
1748 1749 def cutfunc(rev):
1749 1750 return getphase(repo, rev) < minimalphase
1750 1751
1751 1752 revs = dagop.revancestors(repo, s, cutfunc=cutfunc)
1752 1753
1753 1754 if phasename == 'draft': # need to remove secret changesets
1754 1755 revs = revs.filter(lambda r: getphase(repo, r) == draft)
1755 1756 return subset & revs
1756 1757
1757 1758 @predicate('public()', safe=True)
1758 1759 def public(repo, subset, x):
1759 1760 """Changeset in public phase."""
1760 1761 # i18n: "public" is a keyword
1761 1762 getargs(x, 0, 0, _("public takes no arguments"))
1762 1763 return _phase(repo, subset, phases.public)
1763 1764
1764 1765 @predicate('remote([id [,path]])', safe=False)
1765 1766 def remote(repo, subset, x):
1766 1767 """Local revision that corresponds to the given identifier in a
1767 1768 remote repository, if present. Here, the '.' identifier is a
1768 1769 synonym for the current local branch.
1769 1770 """
1770 1771
1771 1772 from . import hg # avoid start-up nasties
1772 1773 # i18n: "remote" is a keyword
1773 1774 l = getargs(x, 0, 2, _("remote takes zero, one, or two arguments"))
1774 1775
1775 1776 q = '.'
1776 1777 if len(l) > 0:
1777 1778 # i18n: "remote" is a keyword
1778 1779 q = getstring(l[0], _("remote requires a string id"))
1779 1780 if q == '.':
1780 1781 q = repo['.'].branch()
1781 1782
1782 1783 dest = ''
1783 1784 if len(l) > 1:
1784 1785 # i18n: "remote" is a keyword
1785 1786 dest = getstring(l[1], _("remote requires a repository path"))
1786 1787 dest = repo.ui.expandpath(dest or 'default')
1787 1788 dest, branches = hg.parseurl(dest)
1788 1789 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1789 1790 if revs:
1790 1791 revs = [repo.lookup(rev) for rev in revs]
1791 1792 other = hg.peer(repo, {}, dest)
1792 1793 n = other.lookup(q)
1793 1794 if n in repo:
1794 1795 r = repo[n].rev()
1795 1796 if r in subset:
1796 1797 return baseset([r])
1797 1798 return baseset()
1798 1799
1799 1800 @predicate('removes(pattern)', safe=True, weight=30)
1800 1801 def removes(repo, subset, x):
1801 1802 """Changesets which remove files matching pattern.
1802 1803
1803 1804 The pattern without explicit kind like ``glob:`` is expected to be
1804 1805 relative to the current directory and match against a file or a
1805 1806 directory.
1806 1807 """
1807 1808 # i18n: "removes" is a keyword
1808 1809 pat = getstring(x, _("removes requires a pattern"))
1809 1810 return checkstatus(repo, subset, pat, 2)
1810 1811
1811 1812 @predicate('rev(number)', safe=True)
1812 1813 def rev(repo, subset, x):
1813 1814 """Revision with the given numeric identifier.
1814 1815 """
1815 1816 # i18n: "rev" is a keyword
1816 1817 l = getargs(x, 1, 1, _("rev requires one argument"))
1817 1818 try:
1818 1819 # i18n: "rev" is a keyword
1819 1820 l = int(getstring(l[0], _("rev requires a number")))
1820 1821 except (TypeError, ValueError):
1821 1822 # i18n: "rev" is a keyword
1822 1823 raise error.ParseError(_("rev expects a number"))
1823 1824 if l not in repo.changelog and l not in (node.nullrev, node.wdirrev):
1824 1825 return baseset()
1825 1826 return subset & baseset([l])
1826 1827
1827 1828 @predicate('_rev(number)', safe=True)
1828 1829 def _rev(repo, subset, x):
1829 1830 # internal version of "rev(x)" that raise error if "x" is invalid
1830 1831 # i18n: "rev" is a keyword
1831 1832 l = getargs(x, 1, 1, _("rev requires one argument"))
1832 1833 try:
1833 1834 # i18n: "rev" is a keyword
1834 1835 l = int(getstring(l[0], _("rev requires a number")))
1835 1836 except (TypeError, ValueError):
1836 1837 # i18n: "rev" is a keyword
1837 1838 raise error.ParseError(_("rev expects a number"))
1838 1839 repo.changelog.node(l) # check that the rev exists
1839 1840 return subset & baseset([l])
1840 1841
1841 1842 @predicate('revset(set)', safe=True, takeorder=True)
1842 1843 def revsetpredicate(repo, subset, x, order):
1843 1844 """Strictly interpret the content as a revset.
1844 1845
1845 1846 The content of this special predicate will be strictly interpreted as a
1846 1847 revset. For example, ``revset(id(0))`` will be interpreted as "id(0)"
1847 1848 without possible ambiguity with a "id(0)" bookmark or tag.
1848 1849 """
1849 1850 return getset(repo, subset, x, order)
1850 1851
1851 1852 @predicate('matching(revision [, field])', safe=True)
1852 1853 def matching(repo, subset, x):
1853 1854 """Changesets in which a given set of fields match the set of fields in the
1854 1855 selected revision or set.
1855 1856
1856 1857 To match more than one field pass the list of fields to match separated
1857 1858 by spaces (e.g. ``author description``).
1858 1859
1859 1860 Valid fields are most regular revision fields and some special fields.
1860 1861
1861 1862 Regular revision fields are ``description``, ``author``, ``branch``,
1862 1863 ``date``, ``files``, ``phase``, ``parents``, ``substate``, ``user``
1863 1864 and ``diff``.
1864 1865 Note that ``author`` and ``user`` are synonyms. ``diff`` refers to the
1865 1866 contents of the revision. Two revisions matching their ``diff`` will
1866 1867 also match their ``files``.
1867 1868
1868 1869 Special fields are ``summary`` and ``metadata``:
1869 1870 ``summary`` matches the first line of the description.
1870 1871 ``metadata`` is equivalent to matching ``description user date``
1871 1872 (i.e. it matches the main metadata fields).
1872 1873
1873 1874 ``metadata`` is the default field which is used when no fields are
1874 1875 specified. You can match more than one field at a time.
1875 1876 """
1876 1877 # i18n: "matching" is a keyword
1877 1878 l = getargs(x, 1, 2, _("matching takes 1 or 2 arguments"))
1878 1879
1879 1880 revs = getset(repo, fullreposet(repo), l[0])
1880 1881
1881 1882 fieldlist = ['metadata']
1882 1883 if len(l) > 1:
1883 1884 fieldlist = getstring(l[1],
1884 1885 # i18n: "matching" is a keyword
1885 1886 _("matching requires a string "
1886 1887 "as its second argument")).split()
1887 1888
1888 1889 # Make sure that there are no repeated fields,
1889 1890 # expand the 'special' 'metadata' field type
1890 1891 # and check the 'files' whenever we check the 'diff'
1891 1892 fields = []
1892 1893 for field in fieldlist:
1893 1894 if field == 'metadata':
1894 1895 fields += ['user', 'description', 'date']
1895 1896 elif field == 'diff':
1896 1897 # a revision matching the diff must also match the files
1897 1898 # since matching the diff is very costly, make sure to
1898 1899 # also match the files first
1899 1900 fields += ['files', 'diff']
1900 1901 else:
1901 1902 if field == 'author':
1902 1903 field = 'user'
1903 1904 fields.append(field)
1904 1905 fields = set(fields)
1905 1906 if 'summary' in fields and 'description' in fields:
1906 1907 # If a revision matches its description it also matches its summary
1907 1908 fields.discard('summary')
1908 1909
1909 1910 # We may want to match more than one field
1910 1911 # Not all fields take the same amount of time to be matched
1911 1912 # Sort the selected fields in order of increasing matching cost
1912 1913 fieldorder = ['phase', 'parents', 'user', 'date', 'branch', 'summary',
1913 1914 'files', 'description', 'substate', 'diff']
1914 1915 def fieldkeyfunc(f):
1915 1916 try:
1916 1917 return fieldorder.index(f)
1917 1918 except ValueError:
1918 1919 # assume an unknown field is very costly
1919 1920 return len(fieldorder)
1920 1921 fields = list(fields)
1921 1922 fields.sort(key=fieldkeyfunc)
1922 1923
1923 1924 # Each field will be matched with its own "getfield" function
1924 1925 # which will be added to the getfieldfuncs array of functions
1925 1926 getfieldfuncs = []
1926 1927 _funcs = {
1927 1928 'user': lambda r: repo[r].user(),
1928 1929 'branch': lambda r: repo[r].branch(),
1929 1930 'date': lambda r: repo[r].date(),
1930 1931 'description': lambda r: repo[r].description(),
1931 1932 'files': lambda r: repo[r].files(),
1932 1933 'parents': lambda r: repo[r].parents(),
1933 1934 'phase': lambda r: repo[r].phase(),
1934 1935 'substate': lambda r: repo[r].substate,
1935 1936 'summary': lambda r: repo[r].description().splitlines()[0],
1936 1937 'diff': lambda r: list(repo[r].diff(
1937 1938 opts=diffutil.diffallopts(repo.ui, {'git': True}))),
1938 1939 }
1939 1940 for info in fields:
1940 1941 getfield = _funcs.get(info, None)
1941 1942 if getfield is None:
1942 1943 raise error.ParseError(
1943 1944 # i18n: "matching" is a keyword
1944 1945 _("unexpected field name passed to matching: %s") % info)
1945 1946 getfieldfuncs.append(getfield)
1946 1947 # convert the getfield array of functions into a "getinfo" function
1947 1948 # which returns an array of field values (or a single value if there
1948 1949 # is only one field to match)
1949 1950 getinfo = lambda r: [f(r) for f in getfieldfuncs]
1950 1951
1951 1952 def matches(x):
1952 1953 for rev in revs:
1953 1954 target = getinfo(rev)
1954 1955 match = True
1955 1956 for n, f in enumerate(getfieldfuncs):
1956 1957 if target[n] != f(x):
1957 1958 match = False
1958 1959 if match:
1959 1960 return True
1960 1961 return False
1961 1962
1962 1963 return subset.filter(matches, condrepr=('<matching%r %r>', fields, revs))
1963 1964
1964 1965 @predicate('reverse(set)', safe=True, takeorder=True, weight=0)
1965 1966 def reverse(repo, subset, x, order):
1966 1967 """Reverse order of set.
1967 1968 """
1968 1969 l = getset(repo, subset, x, order)
1969 1970 if order == defineorder:
1970 1971 l.reverse()
1971 1972 return l
1972 1973
1973 1974 @predicate('roots(set)', safe=True)
1974 1975 def roots(repo, subset, x):
1975 1976 """Changesets in set with no parent changeset in set.
1976 1977 """
1977 1978 s = getset(repo, fullreposet(repo), x)
1978 1979 parents = repo.changelog.parentrevs
1979 1980 def filter(r):
1980 1981 for p in parents(r):
1981 1982 if 0 <= p and p in s:
1982 1983 return False
1983 1984 return True
1984 1985 return subset & s.filter(filter, condrepr='<roots>')
1985 1986
1986 1987 _sortkeyfuncs = {
1987 1988 'rev': lambda c: c.rev(),
1988 1989 'branch': lambda c: c.branch(),
1989 1990 'desc': lambda c: c.description(),
1990 1991 'user': lambda c: c.user(),
1991 1992 'author': lambda c: c.user(),
1992 1993 'date': lambda c: c.date()[0],
1993 1994 }
1994 1995
1995 1996 def _getsortargs(x):
1996 1997 """Parse sort options into (set, [(key, reverse)], opts)"""
1997 1998 args = getargsdict(x, 'sort', 'set keys topo.firstbranch')
1998 1999 if 'set' not in args:
1999 2000 # i18n: "sort" is a keyword
2000 2001 raise error.ParseError(_('sort requires one or two arguments'))
2001 2002 keys = "rev"
2002 2003 if 'keys' in args:
2003 2004 # i18n: "sort" is a keyword
2004 2005 keys = getstring(args['keys'], _("sort spec must be a string"))
2005 2006
2006 2007 keyflags = []
2007 2008 for k in keys.split():
2008 2009 fk = k
2009 2010 reverse = (k.startswith('-'))
2010 2011 if reverse:
2011 2012 k = k[1:]
2012 2013 if k not in _sortkeyfuncs and k != 'topo':
2013 2014 raise error.ParseError(
2014 2015 _("unknown sort key %r") % pycompat.bytestr(fk))
2015 2016 keyflags.append((k, reverse))
2016 2017
2017 2018 if len(keyflags) > 1 and any(k == 'topo' for k, reverse in keyflags):
2018 2019 # i18n: "topo" is a keyword
2019 2020 raise error.ParseError(_('topo sort order cannot be combined '
2020 2021 'with other sort keys'))
2021 2022
2022 2023 opts = {}
2023 2024 if 'topo.firstbranch' in args:
2024 2025 if any(k == 'topo' for k, reverse in keyflags):
2025 2026 opts['topo.firstbranch'] = args['topo.firstbranch']
2026 2027 else:
2027 2028 # i18n: "topo" and "topo.firstbranch" are keywords
2028 2029 raise error.ParseError(_('topo.firstbranch can only be used '
2029 2030 'when using the topo sort key'))
2030 2031
2031 2032 return args['set'], keyflags, opts
2032 2033
2033 2034 @predicate('sort(set[, [-]key... [, ...]])', safe=True, takeorder=True,
2034 2035 weight=10)
2035 2036 def sort(repo, subset, x, order):
2036 2037 """Sort set by keys. The default sort order is ascending, specify a key
2037 2038 as ``-key`` to sort in descending order.
2038 2039
2039 2040 The keys can be:
2040 2041
2041 2042 - ``rev`` for the revision number,
2042 2043 - ``branch`` for the branch name,
2043 2044 - ``desc`` for the commit message (description),
2044 2045 - ``user`` for user name (``author`` can be used as an alias),
2045 2046 - ``date`` for the commit date
2046 2047 - ``topo`` for a reverse topographical sort
2047 2048
2048 2049 The ``topo`` sort order cannot be combined with other sort keys. This sort
2049 2050 takes one optional argument, ``topo.firstbranch``, which takes a revset that
2050 2051 specifies what topographical branches to prioritize in the sort.
2051 2052
2052 2053 """
2053 2054 s, keyflags, opts = _getsortargs(x)
2054 2055 revs = getset(repo, subset, s, order)
2055 2056
2056 2057 if not keyflags or order != defineorder:
2057 2058 return revs
2058 2059 if len(keyflags) == 1 and keyflags[0][0] == "rev":
2059 2060 revs.sort(reverse=keyflags[0][1])
2060 2061 return revs
2061 2062 elif keyflags[0][0] == "topo":
2062 2063 firstbranch = ()
2063 2064 if 'topo.firstbranch' in opts:
2064 2065 firstbranch = getset(repo, subset, opts['topo.firstbranch'])
2065 2066 revs = baseset(dagop.toposort(revs, repo.changelog.parentrevs,
2066 2067 firstbranch),
2067 2068 istopo=True)
2068 2069 if keyflags[0][1]:
2069 2070 revs.reverse()
2070 2071 return revs
2071 2072
2072 2073 # sort() is guaranteed to be stable
2073 2074 ctxs = [repo[r] for r in revs]
2074 2075 for k, reverse in reversed(keyflags):
2075 2076 ctxs.sort(key=_sortkeyfuncs[k], reverse=reverse)
2076 2077 return baseset([c.rev() for c in ctxs])
2077 2078
2078 2079 @predicate('subrepo([pattern])')
2079 2080 def subrepo(repo, subset, x):
2080 2081 """Changesets that add, modify or remove the given subrepo. If no subrepo
2081 2082 pattern is named, any subrepo changes are returned.
2082 2083 """
2083 2084 # i18n: "subrepo" is a keyword
2084 2085 args = getargs(x, 0, 1, _('subrepo takes at most one argument'))
2085 2086 pat = None
2086 2087 if len(args) != 0:
2087 2088 pat = getstring(args[0], _("subrepo requires a pattern"))
2088 2089
2089 2090 m = matchmod.exact(repo.root, repo.root, ['.hgsubstate'])
2090 2091
2091 2092 def submatches(names):
2092 2093 k, p, m = stringutil.stringmatcher(pat)
2093 2094 for name in names:
2094 2095 if m(name):
2095 2096 yield name
2096 2097
2097 2098 def matches(x):
2098 2099 c = repo[x]
2099 2100 s = repo.status(c.p1().node(), c.node(), match=m)
2100 2101
2101 2102 if pat is None:
2102 2103 return s.added or s.modified or s.removed
2103 2104
2104 2105 if s.added:
2105 2106 return any(submatches(c.substate.keys()))
2106 2107
2107 2108 if s.modified:
2108 2109 subs = set(c.p1().substate.keys())
2109 2110 subs.update(c.substate.keys())
2110 2111
2111 2112 for path in submatches(subs):
2112 2113 if c.p1().substate.get(path) != c.substate.get(path):
2113 2114 return True
2114 2115
2115 2116 if s.removed:
2116 2117 return any(submatches(c.p1().substate.keys()))
2117 2118
2118 2119 return False
2119 2120
2120 2121 return subset.filter(matches, condrepr=('<subrepo %r>', pat))
2121 2122
2122 2123 def _mapbynodefunc(repo, s, f):
2123 2124 """(repo, smartset, [node] -> [node]) -> smartset
2124 2125
2125 2126 Helper method to map a smartset to another smartset given a function only
2126 2127 talking about nodes. Handles converting between rev numbers and nodes, and
2127 2128 filtering.
2128 2129 """
2129 2130 cl = repo.unfiltered().changelog
2130 2131 torev = cl.rev
2131 2132 tonode = cl.node
2132 2133 nodemap = cl.nodemap
2133 2134 result = set(torev(n) for n in f(tonode(r) for r in s) if n in nodemap)
2134 2135 return smartset.baseset(result - repo.changelog.filteredrevs)
2135 2136
2136 2137 @predicate('successors(set)', safe=True)
2137 2138 def successors(repo, subset, x):
2138 2139 """All successors for set, including the given set themselves"""
2139 2140 s = getset(repo, fullreposet(repo), x)
2140 2141 f = lambda nodes: obsutil.allsuccessors(repo.obsstore, nodes)
2141 2142 d = _mapbynodefunc(repo, s, f)
2142 2143 return subset & d
2143 2144
2144 2145 def _substringmatcher(pattern, casesensitive=True):
2145 2146 kind, pattern, matcher = stringutil.stringmatcher(
2146 2147 pattern, casesensitive=casesensitive)
2147 2148 if kind == 'literal':
2148 2149 if not casesensitive:
2149 2150 pattern = encoding.lower(pattern)
2150 2151 matcher = lambda s: pattern in encoding.lower(s)
2151 2152 else:
2152 2153 matcher = lambda s: pattern in s
2153 2154 return kind, pattern, matcher
2154 2155
2155 2156 @predicate('tag([name])', safe=True)
2156 2157 def tag(repo, subset, x):
2157 2158 """The specified tag by name, or all tagged revisions if no name is given.
2158 2159
2159 2160 Pattern matching is supported for `name`. See
2160 2161 :hg:`help revisions.patterns`.
2161 2162 """
2162 2163 # i18n: "tag" is a keyword
2163 2164 args = getargs(x, 0, 1, _("tag takes one or no arguments"))
2164 2165 cl = repo.changelog
2165 2166 if args:
2166 2167 pattern = getstring(args[0],
2167 2168 # i18n: "tag" is a keyword
2168 2169 _('the argument to tag must be a string'))
2169 2170 kind, pattern, matcher = stringutil.stringmatcher(pattern)
2170 2171 if kind == 'literal':
2171 2172 # avoid resolving all tags
2172 2173 tn = repo._tagscache.tags.get(pattern, None)
2173 2174 if tn is None:
2174 2175 raise error.RepoLookupError(_("tag '%s' does not exist")
2175 2176 % pattern)
2176 2177 s = {repo[tn].rev()}
2177 2178 else:
2178 2179 s = {cl.rev(n) for t, n in repo.tagslist() if matcher(t)}
2179 2180 else:
2180 2181 s = {cl.rev(n) for t, n in repo.tagslist() if t != 'tip'}
2181 2182 return subset & s
2182 2183
2183 2184 @predicate('tagged', safe=True)
2184 2185 def tagged(repo, subset, x):
2185 2186 return tag(repo, subset, x)
2186 2187
2187 2188 @predicate('orphan()', safe=True)
2188 2189 def orphan(repo, subset, x):
2189 2190 """Non-obsolete changesets with obsolete ancestors. (EXPERIMENTAL)
2190 2191 """
2191 2192 # i18n: "orphan" is a keyword
2192 2193 getargs(x, 0, 0, _("orphan takes no arguments"))
2193 2194 orphan = obsmod.getrevs(repo, 'orphan')
2194 2195 return subset & orphan
2195 2196
2196 2197
2197 2198 @predicate('user(string)', safe=True, weight=10)
2198 2199 def user(repo, subset, x):
2199 2200 """User name contains string. The match is case-insensitive.
2200 2201
2201 2202 Pattern matching is supported for `string`. See
2202 2203 :hg:`help revisions.patterns`.
2203 2204 """
2204 2205 return author(repo, subset, x)
2205 2206
2206 2207 @predicate('wdir()', safe=True, weight=0)
2207 2208 def wdir(repo, subset, x):
2208 2209 """Working directory. (EXPERIMENTAL)"""
2209 2210 # i18n: "wdir" is a keyword
2210 2211 getargs(x, 0, 0, _("wdir takes no arguments"))
2211 2212 if node.wdirrev in subset or isinstance(subset, fullreposet):
2212 2213 return baseset([node.wdirrev])
2213 2214 return baseset()
2214 2215
2215 2216 def _orderedlist(repo, subset, x):
2216 2217 s = getstring(x, "internal error")
2217 2218 if not s:
2218 2219 return baseset()
2219 2220 # remove duplicates here. it's difficult for caller to deduplicate sets
2220 2221 # because different symbols can point to the same rev.
2221 2222 cl = repo.changelog
2222 2223 ls = []
2223 2224 seen = set()
2224 2225 for t in s.split('\0'):
2225 2226 try:
2226 2227 # fast path for integer revision
2227 2228 r = int(t)
2228 2229 if ('%d' % r) != t or r not in cl:
2229 2230 raise ValueError
2230 2231 revs = [r]
2231 2232 except ValueError:
2232 2233 revs = stringset(repo, subset, t, defineorder)
2233 2234
2234 2235 for r in revs:
2235 2236 if r in seen:
2236 2237 continue
2237 2238 if (r in subset
2238 2239 or r == node.nullrev and isinstance(subset, fullreposet)):
2239 2240 ls.append(r)
2240 2241 seen.add(r)
2241 2242 return baseset(ls)
2242 2243
2243 2244 # for internal use
2244 2245 @predicate('_list', safe=True, takeorder=True)
2245 2246 def _list(repo, subset, x, order):
2246 2247 if order == followorder:
2247 2248 # slow path to take the subset order
2248 2249 return subset & _orderedlist(repo, fullreposet(repo), x)
2249 2250 else:
2250 2251 return _orderedlist(repo, subset, x)
2251 2252
2252 2253 def _orderedintlist(repo, subset, x):
2253 2254 s = getstring(x, "internal error")
2254 2255 if not s:
2255 2256 return baseset()
2256 2257 ls = [int(r) for r in s.split('\0')]
2257 2258 s = subset
2258 2259 return baseset([r for r in ls if r in s])
2259 2260
2260 2261 # for internal use
2261 2262 @predicate('_intlist', safe=True, takeorder=True, weight=0)
2262 2263 def _intlist(repo, subset, x, order):
2263 2264 if order == followorder:
2264 2265 # slow path to take the subset order
2265 2266 return subset & _orderedintlist(repo, fullreposet(repo), x)
2266 2267 else:
2267 2268 return _orderedintlist(repo, subset, x)
2268 2269
2269 2270 def _orderedhexlist(repo, subset, x):
2270 2271 s = getstring(x, "internal error")
2271 2272 if not s:
2272 2273 return baseset()
2273 2274 cl = repo.changelog
2274 2275 ls = [cl.rev(node.bin(r)) for r in s.split('\0')]
2275 2276 s = subset
2276 2277 return baseset([r for r in ls if r in s])
2277 2278
2278 2279 # for internal use
2279 2280 @predicate('_hexlist', safe=True, takeorder=True)
2280 2281 def _hexlist(repo, subset, x, order):
2281 2282 if order == followorder:
2282 2283 # slow path to take the subset order
2283 2284 return subset & _orderedhexlist(repo, fullreposet(repo), x)
2284 2285 else:
2285 2286 return _orderedhexlist(repo, subset, x)
2286 2287
2287 2288 methods = {
2288 2289 "range": rangeset,
2289 2290 "rangeall": rangeall,
2290 2291 "rangepre": rangepre,
2291 2292 "rangepost": rangepost,
2292 2293 "dagrange": dagrange,
2293 2294 "string": stringset,
2294 2295 "symbol": stringset,
2295 2296 "and": andset,
2296 2297 "andsmally": andsmallyset,
2297 2298 "or": orset,
2298 2299 "not": notset,
2299 2300 "difference": differenceset,
2300 2301 "relation": relationset,
2301 2302 "relsubscript": relsubscriptset,
2302 2303 "subscript": subscriptset,
2303 2304 "list": listset,
2304 2305 "keyvalue": keyvaluepair,
2305 2306 "func": func,
2306 2307 "ancestor": ancestorspec,
2307 2308 "parent": parentspec,
2308 2309 "parentpost": parentpost,
2309 2310 "smartset": rawsmartset,
2310 2311 }
2311 2312
2312 2313 subscriptrelations = {
2313 2314 "g": generationsrel,
2314 2315 "generations": generationsrel,
2315 2316 }
2316 2317
2317 2318 def lookupfn(repo):
2318 2319 return lambda symbol: scmutil.isrevsymbol(repo, symbol)
2319 2320
2320 2321 def match(ui, spec, lookup=None):
2321 2322 """Create a matcher for a single revision spec"""
2322 2323 return matchany(ui, [spec], lookup=lookup)
2323 2324
2324 2325 def matchany(ui, specs, lookup=None, localalias=None):
2325 2326 """Create a matcher that will include any revisions matching one of the
2326 2327 given specs
2327 2328
2328 2329 If lookup function is not None, the parser will first attempt to handle
2329 2330 old-style ranges, which may contain operator characters.
2330 2331
2331 2332 If localalias is not None, it is a dict {name: definitionstring}. It takes
2332 2333 precedence over [revsetalias] config section.
2333 2334 """
2334 2335 if not specs:
2335 2336 def mfunc(repo, subset=None):
2336 2337 return baseset()
2337 2338 return mfunc
2338 2339 if not all(specs):
2339 2340 raise error.ParseError(_("empty query"))
2340 2341 if len(specs) == 1:
2341 2342 tree = revsetlang.parse(specs[0], lookup)
2342 2343 else:
2343 2344 tree = ('or',
2344 2345 ('list',) + tuple(revsetlang.parse(s, lookup) for s in specs))
2345 2346
2346 2347 aliases = []
2347 2348 warn = None
2348 2349 if ui:
2349 2350 aliases.extend(ui.configitems('revsetalias'))
2350 2351 warn = ui.warn
2351 2352 if localalias:
2352 2353 aliases.extend(localalias.items())
2353 2354 if aliases:
2354 2355 tree = revsetlang.expandaliases(tree, aliases, warn=warn)
2355 2356 tree = revsetlang.foldconcat(tree)
2356 2357 tree = revsetlang.analyze(tree)
2357 2358 tree = revsetlang.optimize(tree)
2358 2359 return makematcher(tree)
2359 2360
2360 2361 def makematcher(tree):
2361 2362 """Create a matcher from an evaluatable tree"""
2362 2363 def mfunc(repo, subset=None, order=None):
2363 2364 if order is None:
2364 2365 if subset is None:
2365 2366 order = defineorder # 'x'
2366 2367 else:
2367 2368 order = followorder # 'subset & x'
2368 2369 if subset is None:
2369 2370 subset = fullreposet(repo)
2370 2371 return getset(repo, subset, tree, order)
2371 2372 return mfunc
2372 2373
2373 2374 def loadpredicate(ui, extname, registrarobj):
2374 2375 """Load revset predicates from specified registrarobj
2375 2376 """
2376 2377 for name, func in registrarobj._table.iteritems():
2377 2378 symbols[name] = func
2378 2379 if func._safe:
2379 2380 safesymbols.add(name)
2380 2381
2381 2382 # load built-in predicates explicitly to setup safesymbols
2382 2383 loadpredicate(None, None, predicate)
2383 2384
2384 2385 # tell hggettext to extract docstrings from these functions:
2385 2386 i18nfunctions = symbols.values()
@@ -1,834 +1,843 b''
1 1 # revsetlang.py - parser, tokenizer and utility for revision set language
2 2 #
3 3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import string
11 11
12 12 from .i18n import _
13 13 from . import (
14 14 error,
15 15 node,
16 16 parser,
17 17 pycompat,
18 18 smartset,
19 19 util,
20 20 )
21 21 from .utils import (
22 22 stringutil,
23 23 )
24 24
25 25 elements = {
26 26 # token-type: binding-strength, primary, prefix, infix, suffix
27 27 "(": (21, None, ("group", 1, ")"), ("func", 1, ")"), None),
28 28 "[": (21, None, None, ("subscript", 1, "]"), None),
29 29 "#": (21, None, None, ("relation", 21), None),
30 30 "##": (20, None, None, ("_concat", 20), None),
31 31 "~": (18, None, None, ("ancestor", 18), None),
32 32 "^": (18, None, None, ("parent", 18), "parentpost"),
33 33 "-": (5, None, ("negate", 19), ("minus", 5), None),
34 34 "::": (17, "dagrangeall", ("dagrangepre", 17), ("dagrange", 17),
35 35 "dagrangepost"),
36 36 "..": (17, "dagrangeall", ("dagrangepre", 17), ("dagrange", 17),
37 37 "dagrangepost"),
38 38 ":": (15, "rangeall", ("rangepre", 15), ("range", 15), "rangepost"),
39 39 "not": (10, None, ("not", 10), None, None),
40 40 "!": (10, None, ("not", 10), None, None),
41 41 "and": (5, None, None, ("and", 5), None),
42 42 "&": (5, None, None, ("and", 5), None),
43 43 "%": (5, None, None, ("only", 5), "onlypost"),
44 44 "or": (4, None, None, ("or", 4), None),
45 45 "|": (4, None, None, ("or", 4), None),
46 46 "+": (4, None, None, ("or", 4), None),
47 47 "=": (3, None, None, ("keyvalue", 3), None),
48 48 ",": (2, None, None, ("list", 2), None),
49 49 ")": (0, None, None, None, None),
50 50 "]": (0, None, None, None, None),
51 51 "symbol": (0, "symbol", None, None, None),
52 52 "string": (0, "string", None, None, None),
53 53 "end": (0, None, None, None, None),
54 54 }
55 55
56 56 keywords = {'and', 'or', 'not'}
57 57
58 58 symbols = {}
59 59
60 60 _quoteletters = {'"', "'"}
61 61 _simpleopletters = set(pycompat.iterbytestr("()[]#:=,-|&+!~^%"))
62 62
63 63 # default set of valid characters for the initial letter of symbols
64 64 _syminitletters = set(pycompat.iterbytestr(
65 65 string.ascii_letters.encode('ascii') +
66 66 string.digits.encode('ascii') +
67 67 '._@')) | set(map(pycompat.bytechr, pycompat.xrange(128, 256)))
68 68
69 69 # default set of valid characters for non-initial letters of symbols
70 70 _symletters = _syminitletters | set(pycompat.iterbytestr('-/'))
71 71
72 72 def tokenize(program, lookup=None, syminitletters=None, symletters=None):
73 73 '''
74 74 Parse a revset statement into a stream of tokens
75 75
76 76 ``syminitletters`` is the set of valid characters for the initial
77 77 letter of symbols.
78 78
79 79 By default, character ``c`` is recognized as valid for initial
80 80 letter of symbols, if ``c.isalnum() or c in '._@' or ord(c) > 127``.
81 81
82 82 ``symletters`` is the set of valid characters for non-initial
83 83 letters of symbols.
84 84
85 85 By default, character ``c`` is recognized as valid for non-initial
86 86 letters of symbols, if ``c.isalnum() or c in '-._/@' or ord(c) > 127``.
87 87
88 88 Check that @ is a valid unquoted token character (issue3686):
89 89 >>> list(tokenize(b"@::"))
90 90 [('symbol', '@', 0), ('::', None, 1), ('end', None, 3)]
91 91
92 92 '''
93 93 if not isinstance(program, bytes):
94 94 raise error.ProgrammingError('revset statement must be bytes, got %r'
95 95 % program)
96 96 program = pycompat.bytestr(program)
97 97 if syminitletters is None:
98 98 syminitletters = _syminitletters
99 99 if symletters is None:
100 100 symletters = _symletters
101 101
102 102 if program and lookup:
103 103 # attempt to parse old-style ranges first to deal with
104 104 # things like old-tag which contain query metacharacters
105 105 parts = program.split(':', 1)
106 106 if all(lookup(sym) for sym in parts if sym):
107 107 if parts[0]:
108 108 yield ('symbol', parts[0], 0)
109 109 if len(parts) > 1:
110 110 s = len(parts[0])
111 111 yield (':', None, s)
112 112 if parts[1]:
113 113 yield ('symbol', parts[1], s + 1)
114 114 yield ('end', None, len(program))
115 115 return
116 116
117 117 pos, l = 0, len(program)
118 118 while pos < l:
119 119 c = program[pos]
120 120 if c.isspace(): # skip inter-token whitespace
121 121 pass
122 122 elif c == ':' and program[pos:pos + 2] == '::': # look ahead carefully
123 123 yield ('::', None, pos)
124 124 pos += 1 # skip ahead
125 125 elif c == '.' and program[pos:pos + 2] == '..': # look ahead carefully
126 126 yield ('..', None, pos)
127 127 pos += 1 # skip ahead
128 128 elif c == '#' and program[pos:pos + 2] == '##': # look ahead carefully
129 129 yield ('##', None, pos)
130 130 pos += 1 # skip ahead
131 131 elif c in _simpleopletters: # handle simple operators
132 132 yield (c, None, pos)
133 133 elif (c in _quoteletters or c == 'r' and
134 134 program[pos:pos + 2] in ("r'", 'r"')): # handle quoted strings
135 135 if c == 'r':
136 136 pos += 1
137 137 c = program[pos]
138 138 decode = lambda x: x
139 139 else:
140 140 decode = parser.unescapestr
141 141 pos += 1
142 142 s = pos
143 143 while pos < l: # find closing quote
144 144 d = program[pos]
145 145 if d == '\\': # skip over escaped characters
146 146 pos += 2
147 147 continue
148 148 if d == c:
149 149 yield ('string', decode(program[s:pos]), s)
150 150 break
151 151 pos += 1
152 152 else:
153 153 raise error.ParseError(_("unterminated string"), s)
154 154 # gather up a symbol/keyword
155 155 elif c in syminitletters:
156 156 s = pos
157 157 pos += 1
158 158 while pos < l: # find end of symbol
159 159 d = program[pos]
160 160 if d not in symletters:
161 161 break
162 162 if d == '.' and program[pos - 1] == '.': # special case for ..
163 163 pos -= 1
164 164 break
165 165 pos += 1
166 166 sym = program[s:pos]
167 167 if sym in keywords: # operator keywords
168 168 yield (sym, None, s)
169 169 elif '-' in sym:
170 170 # some jerk gave us foo-bar-baz, try to check if it's a symbol
171 171 if lookup and lookup(sym):
172 172 # looks like a real symbol
173 173 yield ('symbol', sym, s)
174 174 else:
175 175 # looks like an expression
176 176 parts = sym.split('-')
177 177 for p in parts[:-1]:
178 178 if p: # possible consecutive -
179 179 yield ('symbol', p, s)
180 180 s += len(p)
181 181 yield ('-', None, s)
182 182 s += 1
183 183 if parts[-1]: # possible trailing -
184 184 yield ('symbol', parts[-1], s)
185 185 else:
186 186 yield ('symbol', sym, s)
187 187 pos -= 1
188 188 else:
189 189 raise error.ParseError(_("syntax error in revset '%s'") %
190 190 program, pos)
191 191 pos += 1
192 192 yield ('end', None, pos)
193 193
194 194 # helpers
195 195
196 196 _notset = object()
197 197
198 198 def getsymbol(x):
199 199 if x and x[0] == 'symbol':
200 200 return x[1]
201 201 raise error.ParseError(_('not a symbol'))
202 202
203 203 def getstring(x, err):
204 204 if x and (x[0] == 'string' or x[0] == 'symbol'):
205 205 return x[1]
206 206 raise error.ParseError(err)
207 207
208 208 def getinteger(x, err, default=_notset):
209 209 if not x and default is not _notset:
210 210 return default
211 211 try:
212 212 return int(getstring(x, err))
213 213 except ValueError:
214 214 raise error.ParseError(err)
215 215
216 216 def getboolean(x, err):
217 217 value = stringutil.parsebool(getsymbol(x))
218 218 if value is not None:
219 219 return value
220 220 raise error.ParseError(err)
221 221
222 222 def getlist(x):
223 223 if not x:
224 224 return []
225 225 if x[0] == 'list':
226 226 return list(x[1:])
227 227 return [x]
228 228
229 229 def getrange(x, err):
230 230 if not x:
231 231 raise error.ParseError(err)
232 232 op = x[0]
233 233 if op == 'range':
234 234 return x[1], x[2]
235 235 elif op == 'rangepre':
236 236 return None, x[1]
237 237 elif op == 'rangepost':
238 238 return x[1], None
239 239 elif op == 'rangeall':
240 240 return None, None
241 241 raise error.ParseError(err)
242 242
243 def getintrange(x, err1, err2, deffirst=_notset, deflast=_notset):
244 """Get [first, last] integer range (both inclusive) from a parsed tree
245
246 If any of the sides omitted, and if no default provided, ParseError will
247 be raised.
248 """
249 a, b = getrange(x, err1)
250 return getinteger(a, err2, deffirst), getinteger(b, err2, deflast)
251
243 252 def getargs(x, min, max, err):
244 253 l = getlist(x)
245 254 if len(l) < min or (max >= 0 and len(l) > max):
246 255 raise error.ParseError(err)
247 256 return l
248 257
249 258 def getargsdict(x, funcname, keys):
250 259 return parser.buildargsdict(getlist(x), funcname, parser.splitargspec(keys),
251 260 keyvaluenode='keyvalue', keynode='symbol')
252 261
253 262 # cache of {spec: raw parsed tree} built internally
254 263 _treecache = {}
255 264
256 265 def _cachedtree(spec):
257 266 # thread safe because parse() is reentrant and dict.__setitem__() is atomic
258 267 tree = _treecache.get(spec)
259 268 if tree is None:
260 269 _treecache[spec] = tree = parse(spec)
261 270 return tree
262 271
263 272 def _build(tmplspec, *repls):
264 273 """Create raw parsed tree from a template revset statement
265 274
266 275 >>> _build(b'f(_) and _', (b'string', b'1'), (b'symbol', b'2'))
267 276 ('and', ('func', ('symbol', 'f'), ('string', '1')), ('symbol', '2'))
268 277 """
269 278 template = _cachedtree(tmplspec)
270 279 return parser.buildtree(template, ('symbol', '_'), *repls)
271 280
272 281 def _match(patspec, tree):
273 282 """Test if a tree matches the given pattern statement; return the matches
274 283
275 284 >>> _match(b'f(_)', parse(b'f()'))
276 285 >>> _match(b'f(_)', parse(b'f(1)'))
277 286 [('func', ('symbol', 'f'), ('symbol', '1')), ('symbol', '1')]
278 287 >>> _match(b'f(_)', parse(b'f(1, 2)'))
279 288 """
280 289 pattern = _cachedtree(patspec)
281 290 return parser.matchtree(pattern, tree, ('symbol', '_'),
282 291 {'keyvalue', 'list'})
283 292
284 293 def _matchonly(revs, bases):
285 294 return _match('ancestors(_) and not ancestors(_)', ('and', revs, bases))
286 295
287 296 def _fixops(x):
288 297 """Rewrite raw parsed tree to resolve ambiguous syntax which cannot be
289 298 handled well by our simple top-down parser"""
290 299 if not isinstance(x, tuple):
291 300 return x
292 301
293 302 op = x[0]
294 303 if op == 'parent':
295 304 # x^:y means (x^) : y, not x ^ (:y)
296 305 # x^: means (x^) :, not x ^ (:)
297 306 post = ('parentpost', x[1])
298 307 if x[2][0] == 'dagrangepre':
299 308 return _fixops(('dagrange', post, x[2][1]))
300 309 elif x[2][0] == 'dagrangeall':
301 310 return _fixops(('dagrangepost', post))
302 311 elif x[2][0] == 'rangepre':
303 312 return _fixops(('range', post, x[2][1]))
304 313 elif x[2][0] == 'rangeall':
305 314 return _fixops(('rangepost', post))
306 315 elif op == 'or':
307 316 # make number of arguments deterministic:
308 317 # x + y + z -> (or x y z) -> (or (list x y z))
309 318 return (op, _fixops(('list',) + x[1:]))
310 319 elif op == 'subscript' and x[1][0] == 'relation':
311 320 # x#y[z] ternary
312 321 return _fixops(('relsubscript', x[1][1], x[1][2], x[2]))
313 322
314 323 return (op,) + tuple(_fixops(y) for y in x[1:])
315 324
316 325 def _analyze(x):
317 326 if x is None:
318 327 return x
319 328
320 329 op = x[0]
321 330 if op == 'minus':
322 331 return _analyze(_build('_ and not _', *x[1:]))
323 332 elif op == 'only':
324 333 return _analyze(_build('only(_, _)', *x[1:]))
325 334 elif op == 'onlypost':
326 335 return _analyze(_build('only(_)', x[1]))
327 336 elif op == 'dagrangeall':
328 337 raise error.ParseError(_("can't use '::' in this context"))
329 338 elif op == 'dagrangepre':
330 339 return _analyze(_build('ancestors(_)', x[1]))
331 340 elif op == 'dagrangepost':
332 341 return _analyze(_build('descendants(_)', x[1]))
333 342 elif op == 'negate':
334 343 s = getstring(x[1], _("can't negate that"))
335 344 return _analyze(('string', '-' + s))
336 345 elif op in ('string', 'symbol', 'smartset'):
337 346 return x
338 347 elif op == 'rangeall':
339 348 return (op, None)
340 349 elif op in {'or', 'not', 'rangepre', 'rangepost', 'parentpost'}:
341 350 return (op, _analyze(x[1]))
342 351 elif op == 'group':
343 352 return _analyze(x[1])
344 353 elif op in {'and', 'dagrange', 'range', 'parent', 'ancestor', 'relation',
345 354 'subscript'}:
346 355 ta = _analyze(x[1])
347 356 tb = _analyze(x[2])
348 357 return (op, ta, tb)
349 358 elif op == 'relsubscript':
350 359 ta = _analyze(x[1])
351 360 tb = _analyze(x[2])
352 361 tc = _analyze(x[3])
353 362 return (op, ta, tb, tc)
354 363 elif op == 'list':
355 364 return (op,) + tuple(_analyze(y) for y in x[1:])
356 365 elif op == 'keyvalue':
357 366 return (op, x[1], _analyze(x[2]))
358 367 elif op == 'func':
359 368 return (op, x[1], _analyze(x[2]))
360 369 raise ValueError('invalid operator %r' % op)
361 370
362 371 def analyze(x):
363 372 """Transform raw parsed tree to evaluatable tree which can be fed to
364 373 optimize() or getset()
365 374
366 375 All pseudo operations should be mapped to real operations or functions
367 376 defined in methods or symbols table respectively.
368 377 """
369 378 return _analyze(x)
370 379
371 380 def _optimize(x):
372 381 if x is None:
373 382 return 0, x
374 383
375 384 op = x[0]
376 385 if op in ('string', 'symbol', 'smartset'):
377 386 return 0.5, x # single revisions are small
378 387 elif op == 'and':
379 388 wa, ta = _optimize(x[1])
380 389 wb, tb = _optimize(x[2])
381 390 w = min(wa, wb)
382 391
383 392 # (draft/secret/_notpublic() & ::x) have a fast path
384 393 m = _match('_() & ancestors(_)', ('and', ta, tb))
385 394 if m and getsymbol(m[1]) in {'draft', 'secret', '_notpublic'}:
386 395 return w, _build('_phaseandancestors(_, _)', m[1], m[2])
387 396
388 397 # (::x and not ::y)/(not ::y and ::x) have a fast path
389 398 m = _matchonly(ta, tb) or _matchonly(tb, ta)
390 399 if m:
391 400 return w, _build('only(_, _)', *m[1:])
392 401
393 402 m = _match('not _', tb)
394 403 if m:
395 404 return wa, ('difference', ta, m[1])
396 405 if wa > wb:
397 406 op = 'andsmally'
398 407 return w, (op, ta, tb)
399 408 elif op == 'or':
400 409 # fast path for machine-generated expression, that is likely to have
401 410 # lots of trivial revisions: 'a + b + c()' to '_list(a b) + c()'
402 411 ws, ts, ss = [], [], []
403 412 def flushss():
404 413 if not ss:
405 414 return
406 415 if len(ss) == 1:
407 416 w, t = ss[0]
408 417 else:
409 418 s = '\0'.join(t[1] for w, t in ss)
410 419 y = _build('_list(_)', ('string', s))
411 420 w, t = _optimize(y)
412 421 ws.append(w)
413 422 ts.append(t)
414 423 del ss[:]
415 424 for y in getlist(x[1]):
416 425 w, t = _optimize(y)
417 426 if t is not None and (t[0] == 'string' or t[0] == 'symbol'):
418 427 ss.append((w, t))
419 428 continue
420 429 flushss()
421 430 ws.append(w)
422 431 ts.append(t)
423 432 flushss()
424 433 if len(ts) == 1:
425 434 return ws[0], ts[0] # 'or' operation is fully optimized out
426 435 return max(ws), (op, ('list',) + tuple(ts))
427 436 elif op == 'not':
428 437 # Optimize not public() to _notpublic() because we have a fast version
429 438 if _match('public()', x[1]):
430 439 o = _optimize(_build('_notpublic()'))
431 440 return o[0], o[1]
432 441 else:
433 442 o = _optimize(x[1])
434 443 return o[0], (op, o[1])
435 444 elif op == 'rangeall':
436 445 return 1, x
437 446 elif op in ('rangepre', 'rangepost', 'parentpost'):
438 447 o = _optimize(x[1])
439 448 return o[0], (op, o[1])
440 449 elif op in ('dagrange', 'range'):
441 450 wa, ta = _optimize(x[1])
442 451 wb, tb = _optimize(x[2])
443 452 return wa + wb, (op, ta, tb)
444 453 elif op in ('parent', 'ancestor', 'relation', 'subscript'):
445 454 w, t = _optimize(x[1])
446 455 return w, (op, t, x[2])
447 456 elif op == 'relsubscript':
448 457 w, t = _optimize(x[1])
449 458 return w, (op, t, x[2], x[3])
450 459 elif op == 'list':
451 460 ws, ts = zip(*(_optimize(y) for y in x[1:]))
452 461 return sum(ws), (op,) + ts
453 462 elif op == 'keyvalue':
454 463 w, t = _optimize(x[2])
455 464 return w, (op, x[1], t)
456 465 elif op == 'func':
457 466 f = getsymbol(x[1])
458 467 wa, ta = _optimize(x[2])
459 468 w = getattr(symbols.get(f), '_weight', 1)
460 469 m = _match('commonancestors(_)', ta)
461 470
462 471 # Optimize heads(commonancestors(_)) because we have a fast version
463 472 if f == 'heads' and m:
464 473 return w + wa, _build('_commonancestorheads(_)', m[1])
465 474
466 475 return w + wa, (op, x[1], ta)
467 476 raise ValueError('invalid operator %r' % op)
468 477
469 478 def optimize(tree):
470 479 """Optimize evaluatable tree
471 480
472 481 All pseudo operations should be transformed beforehand.
473 482 """
474 483 _weight, newtree = _optimize(tree)
475 484 return newtree
476 485
477 486 # the set of valid characters for the initial letter of symbols in
478 487 # alias declarations and definitions
479 488 _aliassyminitletters = _syminitletters | {'$'}
480 489
481 490 def _parsewith(spec, lookup=None, syminitletters=None):
482 491 """Generate a parse tree of given spec with given tokenizing options
483 492
484 493 >>> _parsewith(b'foo($1)', syminitletters=_aliassyminitletters)
485 494 ('func', ('symbol', 'foo'), ('symbol', '$1'))
486 495 >>> _parsewith(b'$1')
487 496 Traceback (most recent call last):
488 497 ...
489 498 ParseError: ("syntax error in revset '$1'", 0)
490 499 >>> _parsewith(b'foo bar')
491 500 Traceback (most recent call last):
492 501 ...
493 502 ParseError: ('invalid token', 4)
494 503 """
495 504 if lookup and spec.startswith('revset(') and spec.endswith(')'):
496 505 lookup = None
497 506 p = parser.parser(elements)
498 507 tree, pos = p.parse(tokenize(spec, lookup=lookup,
499 508 syminitletters=syminitletters))
500 509 if pos != len(spec):
501 510 raise error.ParseError(_('invalid token'), pos)
502 511 return _fixops(parser.simplifyinfixops(tree, ('list', 'or')))
503 512
504 513 class _aliasrules(parser.basealiasrules):
505 514 """Parsing and expansion rule set of revset aliases"""
506 515 _section = _('revset alias')
507 516
508 517 @staticmethod
509 518 def _parse(spec):
510 519 """Parse alias declaration/definition ``spec``
511 520
512 521 This allows symbol names to use also ``$`` as an initial letter
513 522 (for backward compatibility), and callers of this function should
514 523 examine whether ``$`` is used also for unexpected symbols or not.
515 524 """
516 525 return _parsewith(spec, syminitletters=_aliassyminitletters)
517 526
518 527 @staticmethod
519 528 def _trygetfunc(tree):
520 529 if tree[0] == 'func' and tree[1][0] == 'symbol':
521 530 return tree[1][1], getlist(tree[2])
522 531
523 532 def expandaliases(tree, aliases, warn=None):
524 533 """Expand aliases in a tree, aliases is a list of (name, value) tuples"""
525 534 aliases = _aliasrules.buildmap(aliases)
526 535 tree = _aliasrules.expand(aliases, tree)
527 536 # warn about problematic (but not referred) aliases
528 537 if warn is not None:
529 538 for name, alias in sorted(aliases.iteritems()):
530 539 if alias.error and not alias.warned:
531 540 warn(_('warning: %s\n') % (alias.error))
532 541 alias.warned = True
533 542 return tree
534 543
535 544 def foldconcat(tree):
536 545 """Fold elements to be concatenated by `##`
537 546 """
538 547 if (not isinstance(tree, tuple)
539 548 or tree[0] in ('string', 'symbol', 'smartset')):
540 549 return tree
541 550 if tree[0] == '_concat':
542 551 pending = [tree]
543 552 l = []
544 553 while pending:
545 554 e = pending.pop()
546 555 if e[0] == '_concat':
547 556 pending.extend(reversed(e[1:]))
548 557 elif e[0] in ('string', 'symbol'):
549 558 l.append(e[1])
550 559 else:
551 560 msg = _("\"##\" can't concatenate \"%s\" element") % (e[0])
552 561 raise error.ParseError(msg)
553 562 return ('string', ''.join(l))
554 563 else:
555 564 return tuple(foldconcat(t) for t in tree)
556 565
557 566 def parse(spec, lookup=None):
558 567 try:
559 568 return _parsewith(spec, lookup=lookup)
560 569 except error.ParseError as inst:
561 570 if len(inst.args) > 1: # has location
562 571 loc = inst.args[1]
563 572 # Remove newlines -- spaces are equivalent whitespace.
564 573 spec = spec.replace('\n', ' ')
565 574 # We want the caret to point to the place in the template that
566 575 # failed to parse, but in a hint we get a open paren at the
567 576 # start. Therefore, we print "loc + 1" spaces (instead of "loc")
568 577 # to line up the caret with the location of the error.
569 578 inst.hint = spec + '\n' + ' ' * (loc + 1) + '^ ' + _('here')
570 579 raise
571 580
572 581 def _quote(s):
573 582 r"""Quote a value in order to make it safe for the revset engine.
574 583
575 584 >>> _quote(b'asdf')
576 585 "'asdf'"
577 586 >>> _quote(b"asdf'\"")
578 587 '\'asdf\\\'"\''
579 588 >>> _quote(b'asdf\'')
580 589 "'asdf\\''"
581 590 >>> _quote(1)
582 591 "'1'"
583 592 """
584 593 return "'%s'" % stringutil.escapestr(pycompat.bytestr(s))
585 594
586 595 def _formatargtype(c, arg):
587 596 if c == 'd':
588 597 return '_rev(%d)' % int(arg)
589 598 elif c == 's':
590 599 return _quote(arg)
591 600 elif c == 'r':
592 601 if not isinstance(arg, bytes):
593 602 raise TypeError
594 603 parse(arg) # make sure syntax errors are confined
595 604 return '(%s)' % arg
596 605 elif c == 'n':
597 606 return _quote(node.hex(arg))
598 607 elif c == 'b':
599 608 try:
600 609 return _quote(arg.branch())
601 610 except AttributeError:
602 611 raise TypeError
603 612 raise error.ParseError(_('unexpected revspec format character %s') % c)
604 613
605 614 def _formatlistexp(s, t):
606 615 l = len(s)
607 616 if l == 0:
608 617 return "_list('')"
609 618 elif l == 1:
610 619 return _formatargtype(t, s[0])
611 620 elif t == 'd':
612 621 return _formatintlist(s)
613 622 elif t == 's':
614 623 return "_list(%s)" % _quote("\0".join(s))
615 624 elif t == 'n':
616 625 return "_hexlist('%s')" % "\0".join(node.hex(a) for a in s)
617 626 elif t == 'b':
618 627 try:
619 628 return "_list('%s')" % "\0".join(a.branch() for a in s)
620 629 except AttributeError:
621 630 raise TypeError
622 631
623 632 m = l // 2
624 633 return '(%s or %s)' % (_formatlistexp(s[:m], t), _formatlistexp(s[m:], t))
625 634
626 635 def _formatintlist(data):
627 636 try:
628 637 l = len(data)
629 638 if l == 0:
630 639 return "_list('')"
631 640 elif l == 1:
632 641 return _formatargtype('d', data[0])
633 642 return "_intlist('%s')" % "\0".join('%d' % int(a) for a in data)
634 643 except (TypeError, ValueError):
635 644 raise error.ParseError(_('invalid argument for revspec'))
636 645
637 646 def _formatparamexp(args, t):
638 647 return ', '.join(_formatargtype(t, a) for a in args)
639 648
640 649 _formatlistfuncs = {
641 650 'l': _formatlistexp,
642 651 'p': _formatparamexp,
643 652 }
644 653
645 654 def formatspec(expr, *args):
646 655 '''
647 656 This is a convenience function for using revsets internally, and
648 657 escapes arguments appropriately. Aliases are intentionally ignored
649 658 so that intended expression behavior isn't accidentally subverted.
650 659
651 660 Supported arguments:
652 661
653 662 %r = revset expression, parenthesized
654 663 %d = rev(int(arg)), no quoting
655 664 %s = string(arg), escaped and single-quoted
656 665 %b = arg.branch(), escaped and single-quoted
657 666 %n = hex(arg), single-quoted
658 667 %% = a literal '%'
659 668
660 669 Prefixing the type with 'l' specifies a parenthesized list of that type,
661 670 and 'p' specifies a list of function parameters of that type.
662 671
663 672 >>> formatspec(b'%r:: and %lr', b'10 or 11', (b"this()", b"that()"))
664 673 '(10 or 11):: and ((this()) or (that()))'
665 674 >>> formatspec(b'%d:: and not %d::', 10, 20)
666 675 '_rev(10):: and not _rev(20)::'
667 676 >>> formatspec(b'%ld or %ld', [], [1])
668 677 "_list('') or _rev(1)"
669 678 >>> formatspec(b'keyword(%s)', b'foo\\xe9')
670 679 "keyword('foo\\\\xe9')"
671 680 >>> b = lambda: b'default'
672 681 >>> b.branch = b
673 682 >>> formatspec(b'branch(%b)', b)
674 683 "branch('default')"
675 684 >>> formatspec(b'root(%ls)', [b'a', b'b', b'c', b'd'])
676 685 "root(_list('a\\\\x00b\\\\x00c\\\\x00d'))"
677 686 >>> formatspec(b'sort(%r, %ps)', b':', [b'desc', b'user'])
678 687 "sort((:), 'desc', 'user')"
679 688 >>> formatspec(b'%ls', [b'a', b"'"])
680 689 "_list('a\\\\x00\\\\'')"
681 690 '''
682 691 parsed = _parseargs(expr, args)
683 692 ret = []
684 693 for t, arg in parsed:
685 694 if t is None:
686 695 ret.append(arg)
687 696 elif t == 'baseset':
688 697 if isinstance(arg, set):
689 698 arg = sorted(arg)
690 699 ret.append(_formatintlist(list(arg)))
691 700 else:
692 701 raise error.ProgrammingError("unknown revspec item type: %r" % t)
693 702 return b''.join(ret)
694 703
695 704 def spectree(expr, *args):
696 705 """similar to formatspec but return a parsed and optimized tree"""
697 706 parsed = _parseargs(expr, args)
698 707 ret = []
699 708 inputs = []
700 709 for t, arg in parsed:
701 710 if t is None:
702 711 ret.append(arg)
703 712 elif t == 'baseset':
704 713 newtree = ('smartset', smartset.baseset(arg))
705 714 inputs.append(newtree)
706 715 ret.append("$")
707 716 else:
708 717 raise error.ProgrammingError("unknown revspec item type: %r" % t)
709 718 expr = b''.join(ret)
710 719 tree = _parsewith(expr, syminitletters=_aliassyminitletters)
711 720 tree = parser.buildtree(tree, ('symbol', '$'), *inputs)
712 721 tree = foldconcat(tree)
713 722 tree = analyze(tree)
714 723 tree = optimize(tree)
715 724 return tree
716 725
717 726 def _parseargs(expr, args):
718 727 """parse the expression and replace all inexpensive args
719 728
720 729 return a list of tuple [(arg-type, arg-value)]
721 730
722 731 Arg-type can be:
723 732 * None: a string ready to be concatenated into a final spec
724 733 * 'baseset': an iterable of revisions
725 734 """
726 735 expr = pycompat.bytestr(expr)
727 736 argiter = iter(args)
728 737 ret = []
729 738 pos = 0
730 739 while pos < len(expr):
731 740 q = expr.find('%', pos)
732 741 if q < 0:
733 742 ret.append((None, expr[pos:]))
734 743 break
735 744 ret.append((None, expr[pos:q]))
736 745 pos = q + 1
737 746 try:
738 747 d = expr[pos]
739 748 except IndexError:
740 749 raise error.ParseError(_('incomplete revspec format character'))
741 750 if d == '%':
742 751 ret.append((None, d))
743 752 pos += 1
744 753 continue
745 754
746 755 try:
747 756 arg = next(argiter)
748 757 except StopIteration:
749 758 raise error.ParseError(_('missing argument for revspec'))
750 759 f = _formatlistfuncs.get(d)
751 760 if f:
752 761 # a list of some type, might be expensive, do not replace
753 762 pos += 1
754 763 islist = (d == 'l')
755 764 try:
756 765 d = expr[pos]
757 766 except IndexError:
758 767 raise error.ParseError(_('incomplete revspec format character'))
759 768 if islist and d == 'd' and arg:
760 769 # we don't create a baseset yet, because it come with an
761 770 # extra cost. If we are going to serialize it we better
762 771 # skip it.
763 772 ret.append(('baseset', arg))
764 773 pos += 1
765 774 continue
766 775 try:
767 776 ret.append((None, f(list(arg), d)))
768 777 except (TypeError, ValueError):
769 778 raise error.ParseError(_('invalid argument for revspec'))
770 779 else:
771 780 # a single entry, not expensive, replace
772 781 try:
773 782 ret.append((None, _formatargtype(d, arg)))
774 783 except (TypeError, ValueError):
775 784 raise error.ParseError(_('invalid argument for revspec'))
776 785 pos += 1
777 786
778 787 try:
779 788 next(argiter)
780 789 raise error.ParseError(_('too many revspec arguments specified'))
781 790 except StopIteration:
782 791 pass
783 792 return ret
784 793
785 794 def prettyformat(tree):
786 795 return parser.prettyformat(tree, ('string', 'symbol'))
787 796
788 797 def depth(tree):
789 798 if isinstance(tree, tuple):
790 799 return max(map(depth, tree)) + 1
791 800 else:
792 801 return 0
793 802
794 803 def funcsused(tree):
795 804 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
796 805 return set()
797 806 else:
798 807 funcs = set()
799 808 for s in tree[1:]:
800 809 funcs |= funcsused(s)
801 810 if tree[0] == 'func':
802 811 funcs.add(tree[1][1])
803 812 return funcs
804 813
805 814 _hashre = util.re.compile('[0-9a-fA-F]{1,40}$')
806 815
807 816 def _ishashlikesymbol(symbol):
808 817 """returns true if the symbol looks like a hash"""
809 818 return _hashre.match(symbol)
810 819
811 820 def gethashlikesymbols(tree):
812 821 """returns the list of symbols of the tree that look like hashes
813 822
814 823 >>> gethashlikesymbols(parse(b'3::abe3ff'))
815 824 ['3', 'abe3ff']
816 825 >>> gethashlikesymbols(parse(b'precursors(.)'))
817 826 []
818 827 >>> gethashlikesymbols(parse(b'precursors(34)'))
819 828 ['34']
820 829 >>> gethashlikesymbols(parse(b'abe3ffZ'))
821 830 []
822 831 """
823 832 if not tree:
824 833 return []
825 834
826 835 if tree[0] == "symbol":
827 836 if _ishashlikesymbol(tree[1]):
828 837 return [tree[1]]
829 838 elif len(tree) >= 3:
830 839 results = []
831 840 for subtree in tree[1:]:
832 841 results += gethashlikesymbols(subtree)
833 842 return results
834 843 return []
General Comments 0
You need to be logged in to leave comments. Login now