##// END OF EJS Templates
revset: stop serializing node when using "%ln"...
marmoute -
r52469:de5bf3fe default
parent child Browse files
Show More
@@ -1,2874 +1,2891 b''
1 # revset.py - revision set queries for mercurial
1 # revset.py - revision set queries for mercurial
2 #
2 #
3 # Copyright 2010 Olivia Mackall <olivia@selenic.com>
3 # Copyright 2010 Olivia Mackall <olivia@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8
8
9 import binascii
9 import binascii
10 import functools
10 import functools
11 import random
11 import random
12 import re
12 import re
13
13
14 from .i18n import _
14 from .i18n import _
15 from .node import (
15 from .node import (
16 bin,
16 bin,
17 nullrev,
17 nullrev,
18 wdirrev,
18 wdirrev,
19 )
19 )
20 from . import (
20 from . import (
21 dagop,
21 dagop,
22 destutil,
22 destutil,
23 diffutil,
23 diffutil,
24 encoding,
24 encoding,
25 error,
25 error,
26 grep as grepmod,
26 grep as grepmod,
27 hbisect,
27 hbisect,
28 match as matchmod,
28 match as matchmod,
29 obsolete as obsmod,
29 obsolete as obsmod,
30 obsutil,
30 obsutil,
31 pathutil,
31 pathutil,
32 phases,
32 phases,
33 pycompat,
33 pycompat,
34 registrar,
34 registrar,
35 repoview,
35 repoview,
36 revsetlang,
36 revsetlang,
37 scmutil,
37 scmutil,
38 smartset,
38 smartset,
39 stack as stackmod,
39 stack as stackmod,
40 util,
40 util,
41 )
41 )
42 from .utils import (
42 from .utils import (
43 dateutil,
43 dateutil,
44 stringutil,
44 stringutil,
45 urlutil,
45 urlutil,
46 )
46 )
47
47
48 # helpers for processing parsed tree
48 # helpers for processing parsed tree
49 getsymbol = revsetlang.getsymbol
49 getsymbol = revsetlang.getsymbol
50 getstring = revsetlang.getstring
50 getstring = revsetlang.getstring
51 getinteger = revsetlang.getinteger
51 getinteger = revsetlang.getinteger
52 getboolean = revsetlang.getboolean
52 getboolean = revsetlang.getboolean
53 getlist = revsetlang.getlist
53 getlist = revsetlang.getlist
54 getintrange = revsetlang.getintrange
54 getintrange = revsetlang.getintrange
55 getargs = revsetlang.getargs
55 getargs = revsetlang.getargs
56 getargsdict = revsetlang.getargsdict
56 getargsdict = revsetlang.getargsdict
57
57
58 baseset = smartset.baseset
58 baseset = smartset.baseset
59 generatorset = smartset.generatorset
59 generatorset = smartset.generatorset
60 spanset = smartset.spanset
60 spanset = smartset.spanset
61 fullreposet = smartset.fullreposet
61 fullreposet = smartset.fullreposet
62
62
63 # revisions not included in all(), but populated if specified
63 # revisions not included in all(), but populated if specified
64 _virtualrevs = (nullrev, wdirrev)
64 _virtualrevs = (nullrev, wdirrev)
65
65
66 # Constants for ordering requirement, used in getset():
66 # Constants for ordering requirement, used in getset():
67 #
67 #
68 # If 'define', any nested functions and operations MAY change the ordering of
68 # If 'define', any nested functions and operations MAY change the ordering of
69 # the entries in the set (but if changes the ordering, it MUST ALWAYS change
69 # the entries in the set (but if changes the ordering, it MUST ALWAYS change
70 # it). If 'follow', any nested functions and operations MUST take the ordering
70 # it). If 'follow', any nested functions and operations MUST take the ordering
71 # specified by the first operand to the '&' operator.
71 # specified by the first operand to the '&' operator.
72 #
72 #
73 # For instance,
73 # For instance,
74 #
74 #
75 # X & (Y | Z)
75 # X & (Y | Z)
76 # ^ ^^^^^^^
76 # ^ ^^^^^^^
77 # | follow
77 # | follow
78 # define
78 # define
79 #
79 #
80 # will be evaluated as 'or(y(x()), z(x()))', where 'x()' can change the order
80 # will be evaluated as 'or(y(x()), z(x()))', where 'x()' can change the order
81 # of the entries in the set, but 'y()', 'z()' and 'or()' shouldn't.
81 # of the entries in the set, but 'y()', 'z()' and 'or()' shouldn't.
82 #
82 #
83 # 'any' means the order doesn't matter. For instance,
83 # 'any' means the order doesn't matter. For instance,
84 #
84 #
85 # (X & !Y) | ancestors(Z)
85 # (X & !Y) | ancestors(Z)
86 # ^ ^
86 # ^ ^
87 # any any
87 # any any
88 #
88 #
89 # For 'X & !Y', 'X' decides the order and 'Y' is subtracted from 'X', so the
89 # For 'X & !Y', 'X' decides the order and 'Y' is subtracted from 'X', so the
90 # order of 'Y' does not matter. For 'ancestors(Z)', Z's order does not matter
90 # order of 'Y' does not matter. For 'ancestors(Z)', Z's order does not matter
91 # since 'ancestors' does not care about the order of its argument.
91 # since 'ancestors' does not care about the order of its argument.
92 #
92 #
93 # Currently, most revsets do not care about the order, so 'define' is
93 # Currently, most revsets do not care about the order, so 'define' is
94 # equivalent to 'follow' for them, and the resulting order is based on the
94 # equivalent to 'follow' for them, and the resulting order is based on the
95 # 'subset' parameter passed down to them:
95 # 'subset' parameter passed down to them:
96 #
96 #
97 # m = revset.match(...)
97 # m = revset.match(...)
98 # m(repo, subset, order=defineorder)
98 # m(repo, subset, order=defineorder)
99 # ^^^^^^
99 # ^^^^^^
100 # For most revsets, 'define' means using the order this subset provides
100 # For most revsets, 'define' means using the order this subset provides
101 #
101 #
102 # There are a few revsets that always redefine the order if 'define' is
102 # There are a few revsets that always redefine the order if 'define' is
103 # specified: 'sort(X)', 'reverse(X)', 'x:y'.
103 # specified: 'sort(X)', 'reverse(X)', 'x:y'.
104 anyorder = b'any' # don't care the order, could be even random-shuffled
104 anyorder = b'any' # don't care the order, could be even random-shuffled
105 defineorder = b'define' # ALWAYS redefine, or ALWAYS follow the current order
105 defineorder = b'define' # ALWAYS redefine, or ALWAYS follow the current order
106 followorder = b'follow' # MUST follow the current order
106 followorder = b'follow' # MUST follow the current order
107
107
108 # helpers
108 # helpers
109
109
110
110
111 def getset(repo, subset, x, order=defineorder):
111 def getset(repo, subset, x, order=defineorder):
112 if not x:
112 if not x:
113 raise error.ParseError(_(b"missing argument"))
113 raise error.ParseError(_(b"missing argument"))
114 return methods[x[0]](repo, subset, *x[1:], order=order)
114 return methods[x[0]](repo, subset, *x[1:], order=order)
115
115
116
116
117 def _getrevsource(repo, r):
117 def _getrevsource(repo, r):
118 extra = repo[r].extra()
118 extra = repo[r].extra()
119 for label in (b'source', b'transplant_source', b'rebase_source'):
119 for label in (b'source', b'transplant_source', b'rebase_source'):
120 if label in extra:
120 if label in extra:
121 try:
121 try:
122 return repo[extra[label]].rev()
122 return repo[extra[label]].rev()
123 except error.RepoLookupError:
123 except error.RepoLookupError:
124 pass
124 pass
125 return None
125 return None
126
126
127
127
128 def _sortedb(xs):
128 def _sortedb(xs):
129 return sorted(pycompat.rapply(pycompat.maybebytestr, xs))
129 return sorted(pycompat.rapply(pycompat.maybebytestr, xs))
130
130
131
131
132 # operator methods
132 # operator methods
133
133
134
134
135 def stringset(repo, subset, x, order):
135 def stringset(repo, subset, x, order):
136 if not x:
136 if not x:
137 raise error.ParseError(_(b"empty string is not a valid revision"))
137 raise error.ParseError(_(b"empty string is not a valid revision"))
138 x = scmutil.intrev(scmutil.revsymbol(repo, x))
138 x = scmutil.intrev(scmutil.revsymbol(repo, x))
139 if x in subset or x in _virtualrevs and isinstance(subset, fullreposet):
139 if x in subset or x in _virtualrevs and isinstance(subset, fullreposet):
140 return baseset([x])
140 return baseset([x])
141 return baseset()
141 return baseset()
142
142
143
143
144 def rawsmartset(repo, subset, x, order):
144 def rawsmartset(repo, subset, x, order):
145 """argument is already a smartset, use that directly"""
145 """argument is already a smartset, use that directly"""
146 if order == followorder:
146 if order == followorder:
147 return subset & x
147 return subset & x
148 else:
148 else:
149 return x & subset
149 return x & subset
150
150
151
151
152 def raw_node_set(repo, subset, x, order):
153 """argument is a list of nodeid, resolve and use them"""
154 nodes = _ordered_node_set(repo, x)
155 if order == followorder:
156 return subset & nodes
157 else:
158 return nodes & subset
159
160
161 def _ordered_node_set(repo, nodes):
162 if not nodes:
163 return baseset()
164 to_rev = repo.changelog.index.rev
165 return baseset([to_rev(r) for r in nodes])
166
167
152 def rangeset(repo, subset, x, y, order):
168 def rangeset(repo, subset, x, y, order):
153 m = getset(repo, fullreposet(repo), x)
169 m = getset(repo, fullreposet(repo), x)
154 n = getset(repo, fullreposet(repo), y)
170 n = getset(repo, fullreposet(repo), y)
155
171
156 if not m or not n:
172 if not m or not n:
157 return baseset()
173 return baseset()
158 return _makerangeset(repo, subset, m.first(), n.last(), order)
174 return _makerangeset(repo, subset, m.first(), n.last(), order)
159
175
160
176
161 def rangeall(repo, subset, x, order):
177 def rangeall(repo, subset, x, order):
162 assert x is None
178 assert x is None
163 return _makerangeset(repo, subset, 0, repo.changelog.tiprev(), order)
179 return _makerangeset(repo, subset, 0, repo.changelog.tiprev(), order)
164
180
165
181
166 def rangepre(repo, subset, y, order):
182 def rangepre(repo, subset, y, order):
167 # ':y' can't be rewritten to '0:y' since '0' may be hidden
183 # ':y' can't be rewritten to '0:y' since '0' may be hidden
168 n = getset(repo, fullreposet(repo), y)
184 n = getset(repo, fullreposet(repo), y)
169 if not n:
185 if not n:
170 return baseset()
186 return baseset()
171 return _makerangeset(repo, subset, 0, n.last(), order)
187 return _makerangeset(repo, subset, 0, n.last(), order)
172
188
173
189
174 def rangepost(repo, subset, x, order):
190 def rangepost(repo, subset, x, order):
175 m = getset(repo, fullreposet(repo), x)
191 m = getset(repo, fullreposet(repo), x)
176 if not m:
192 if not m:
177 return baseset()
193 return baseset()
178 return _makerangeset(
194 return _makerangeset(
179 repo, subset, m.first(), repo.changelog.tiprev(), order
195 repo, subset, m.first(), repo.changelog.tiprev(), order
180 )
196 )
181
197
182
198
183 def _makerangeset(repo, subset, m, n, order):
199 def _makerangeset(repo, subset, m, n, order):
184 if m == n:
200 if m == n:
185 r = baseset([m])
201 r = baseset([m])
186 elif n == wdirrev:
202 elif n == wdirrev:
187 r = spanset(repo, m, len(repo)) + baseset([n])
203 r = spanset(repo, m, len(repo)) + baseset([n])
188 elif m == wdirrev:
204 elif m == wdirrev:
189 r = baseset([m]) + spanset(repo, repo.changelog.tiprev(), n - 1)
205 r = baseset([m]) + spanset(repo, repo.changelog.tiprev(), n - 1)
190 elif m < n:
206 elif m < n:
191 r = spanset(repo, m, n + 1)
207 r = spanset(repo, m, n + 1)
192 else:
208 else:
193 r = spanset(repo, m, n - 1)
209 r = spanset(repo, m, n - 1)
194
210
195 if order == defineorder:
211 if order == defineorder:
196 return r & subset
212 return r & subset
197 else:
213 else:
198 # carrying the sorting over when possible would be more efficient
214 # carrying the sorting over when possible would be more efficient
199 return subset & r
215 return subset & r
200
216
201
217
202 def dagrange(repo, subset, x, y, order):
218 def dagrange(repo, subset, x, y, order):
203 r = fullreposet(repo)
219 r = fullreposet(repo)
204 xs = dagop.reachableroots(
220 xs = dagop.reachableroots(
205 repo, getset(repo, r, x), getset(repo, r, y), includepath=True
221 repo, getset(repo, r, x), getset(repo, r, y), includepath=True
206 )
222 )
207 return subset & xs
223 return subset & xs
208
224
209
225
210 def andset(repo, subset, x, y, order):
226 def andset(repo, subset, x, y, order):
211 if order == anyorder:
227 if order == anyorder:
212 yorder = anyorder
228 yorder = anyorder
213 else:
229 else:
214 yorder = followorder
230 yorder = followorder
215 return getset(repo, getset(repo, subset, x, order), y, yorder)
231 return getset(repo, getset(repo, subset, x, order), y, yorder)
216
232
217
233
218 def andsmallyset(repo, subset, x, y, order):
234 def andsmallyset(repo, subset, x, y, order):
219 # 'andsmally(x, y)' is equivalent to 'and(x, y)', but faster when y is small
235 # 'andsmally(x, y)' is equivalent to 'and(x, y)', but faster when y is small
220 if order == anyorder:
236 if order == anyorder:
221 yorder = anyorder
237 yorder = anyorder
222 else:
238 else:
223 yorder = followorder
239 yorder = followorder
224 return getset(repo, getset(repo, subset, y, yorder), x, order)
240 return getset(repo, getset(repo, subset, y, yorder), x, order)
225
241
226
242
227 def differenceset(repo, subset, x, y, order):
243 def differenceset(repo, subset, x, y, order):
228 return getset(repo, subset, x, order) - getset(repo, subset, y, anyorder)
244 return getset(repo, subset, x, order) - getset(repo, subset, y, anyorder)
229
245
230
246
231 def _orsetlist(repo, subset, xs, order):
247 def _orsetlist(repo, subset, xs, order):
232 assert xs
248 assert xs
233 if len(xs) == 1:
249 if len(xs) == 1:
234 return getset(repo, subset, xs[0], order)
250 return getset(repo, subset, xs[0], order)
235 p = len(xs) // 2
251 p = len(xs) // 2
236 a = _orsetlist(repo, subset, xs[:p], order)
252 a = _orsetlist(repo, subset, xs[:p], order)
237 b = _orsetlist(repo, subset, xs[p:], order)
253 b = _orsetlist(repo, subset, xs[p:], order)
238 return a + b
254 return a + b
239
255
240
256
241 def orset(repo, subset, x, order):
257 def orset(repo, subset, x, order):
242 xs = getlist(x)
258 xs = getlist(x)
243 if not xs:
259 if not xs:
244 return baseset()
260 return baseset()
245 if order == followorder:
261 if order == followorder:
246 # slow path to take the subset order
262 # slow path to take the subset order
247 return subset & _orsetlist(repo, fullreposet(repo), xs, anyorder)
263 return subset & _orsetlist(repo, fullreposet(repo), xs, anyorder)
248 else:
264 else:
249 return _orsetlist(repo, subset, xs, order)
265 return _orsetlist(repo, subset, xs, order)
250
266
251
267
252 def notset(repo, subset, x, order):
268 def notset(repo, subset, x, order):
253 return subset - getset(repo, subset, x, anyorder)
269 return subset - getset(repo, subset, x, anyorder)
254
270
255
271
256 def relationset(repo, subset, x, y, order):
272 def relationset(repo, subset, x, y, order):
257 # this is pretty basic implementation of 'x#y' operator, still
273 # this is pretty basic implementation of 'x#y' operator, still
258 # experimental so undocumented. see the wiki for further ideas.
274 # experimental so undocumented. see the wiki for further ideas.
259 # https://www.mercurial-scm.org/wiki/RevsetOperatorPlan
275 # https://www.mercurial-scm.org/wiki/RevsetOperatorPlan
260 rel = getsymbol(y)
276 rel = getsymbol(y)
261 if rel in relations:
277 if rel in relations:
262 return relations[rel](repo, subset, x, rel, order)
278 return relations[rel](repo, subset, x, rel, order)
263
279
264 relnames = [r for r in relations.keys() if len(r) > 1]
280 relnames = [r for r in relations.keys() if len(r) > 1]
265 raise error.UnknownIdentifier(rel, relnames)
281 raise error.UnknownIdentifier(rel, relnames)
266
282
267
283
268 def _splitrange(a, b):
284 def _splitrange(a, b):
269 """Split range with bounds a and b into two ranges at 0 and return two
285 """Split range with bounds a and b into two ranges at 0 and return two
270 tuples of numbers for use as startdepth and stopdepth arguments of
286 tuples of numbers for use as startdepth and stopdepth arguments of
271 revancestors and revdescendants.
287 revancestors and revdescendants.
272
288
273 >>> _splitrange(-10, -5) # [-10:-5]
289 >>> _splitrange(-10, -5) # [-10:-5]
274 ((5, 11), (None, None))
290 ((5, 11), (None, None))
275 >>> _splitrange(5, 10) # [5:10]
291 >>> _splitrange(5, 10) # [5:10]
276 ((None, None), (5, 11))
292 ((None, None), (5, 11))
277 >>> _splitrange(-10, 10) # [-10:10]
293 >>> _splitrange(-10, 10) # [-10:10]
278 ((0, 11), (0, 11))
294 ((0, 11), (0, 11))
279 >>> _splitrange(-10, 0) # [-10:0]
295 >>> _splitrange(-10, 0) # [-10:0]
280 ((0, 11), (None, None))
296 ((0, 11), (None, None))
281 >>> _splitrange(0, 10) # [0:10]
297 >>> _splitrange(0, 10) # [0:10]
282 ((None, None), (0, 11))
298 ((None, None), (0, 11))
283 >>> _splitrange(0, 0) # [0:0]
299 >>> _splitrange(0, 0) # [0:0]
284 ((0, 1), (None, None))
300 ((0, 1), (None, None))
285 >>> _splitrange(1, -1) # [1:-1]
301 >>> _splitrange(1, -1) # [1:-1]
286 ((None, None), (None, None))
302 ((None, None), (None, None))
287 """
303 """
288 ancdepths = (None, None)
304 ancdepths = (None, None)
289 descdepths = (None, None)
305 descdepths = (None, None)
290 if a == b == 0:
306 if a == b == 0:
291 ancdepths = (0, 1)
307 ancdepths = (0, 1)
292 if a < 0:
308 if a < 0:
293 ancdepths = (-min(b, 0), -a + 1)
309 ancdepths = (-min(b, 0), -a + 1)
294 if b > 0:
310 if b > 0:
295 descdepths = (max(a, 0), b + 1)
311 descdepths = (max(a, 0), b + 1)
296 return ancdepths, descdepths
312 return ancdepths, descdepths
297
313
298
314
299 def generationsrel(repo, subset, x, rel, order):
315 def generationsrel(repo, subset, x, rel, order):
300 z = (b'rangeall', None)
316 z = (b'rangeall', None)
301 return generationssubrel(repo, subset, x, rel, z, order)
317 return generationssubrel(repo, subset, x, rel, z, order)
302
318
303
319
304 def generationssubrel(repo, subset, x, rel, z, order):
320 def generationssubrel(repo, subset, x, rel, z, order):
305 # TODO: rewrite tests, and drop startdepth argument from ancestors() and
321 # TODO: rewrite tests, and drop startdepth argument from ancestors() and
306 # descendants() predicates
322 # descendants() predicates
307 a, b = getintrange(
323 a, b = getintrange(
308 z,
324 z,
309 _(b'relation subscript must be an integer or a range'),
325 _(b'relation subscript must be an integer or a range'),
310 _(b'relation subscript bounds must be integers'),
326 _(b'relation subscript bounds must be integers'),
311 deffirst=-(dagop.maxlogdepth - 1),
327 deffirst=-(dagop.maxlogdepth - 1),
312 deflast=+(dagop.maxlogdepth - 1),
328 deflast=+(dagop.maxlogdepth - 1),
313 )
329 )
314 (ancstart, ancstop), (descstart, descstop) = _splitrange(a, b)
330 (ancstart, ancstop), (descstart, descstop) = _splitrange(a, b)
315
331
316 if ancstart is None and descstart is None:
332 if ancstart is None and descstart is None:
317 return baseset()
333 return baseset()
318
334
319 revs = getset(repo, fullreposet(repo), x)
335 revs = getset(repo, fullreposet(repo), x)
320 if not revs:
336 if not revs:
321 return baseset()
337 return baseset()
322
338
323 if ancstart is not None and descstart is not None:
339 if ancstart is not None and descstart is not None:
324 s = dagop.revancestors(repo, revs, False, ancstart, ancstop)
340 s = dagop.revancestors(repo, revs, False, ancstart, ancstop)
325 s += dagop.revdescendants(repo, revs, False, descstart, descstop)
341 s += dagop.revdescendants(repo, revs, False, descstart, descstop)
326 elif ancstart is not None:
342 elif ancstart is not None:
327 s = dagop.revancestors(repo, revs, False, ancstart, ancstop)
343 s = dagop.revancestors(repo, revs, False, ancstart, ancstop)
328 elif descstart is not None:
344 elif descstart is not None:
329 s = dagop.revdescendants(repo, revs, False, descstart, descstop)
345 s = dagop.revdescendants(repo, revs, False, descstart, descstop)
330
346
331 return subset & s
347 return subset & s
332
348
333
349
334 def relsubscriptset(repo, subset, x, y, z, order):
350 def relsubscriptset(repo, subset, x, y, z, order):
335 # this is pretty basic implementation of 'x#y[z]' operator, still
351 # this is pretty basic implementation of 'x#y[z]' operator, still
336 # experimental so undocumented. see the wiki for further ideas.
352 # experimental so undocumented. see the wiki for further ideas.
337 # https://www.mercurial-scm.org/wiki/RevsetOperatorPlan
353 # https://www.mercurial-scm.org/wiki/RevsetOperatorPlan
338 rel = getsymbol(y)
354 rel = getsymbol(y)
339 if rel in subscriptrelations:
355 if rel in subscriptrelations:
340 return subscriptrelations[rel](repo, subset, x, rel, z, order)
356 return subscriptrelations[rel](repo, subset, x, rel, z, order)
341
357
342 relnames = [r for r in subscriptrelations.keys() if len(r) > 1]
358 relnames = [r for r in subscriptrelations.keys() if len(r) > 1]
343 raise error.UnknownIdentifier(rel, relnames)
359 raise error.UnknownIdentifier(rel, relnames)
344
360
345
361
346 def subscriptset(repo, subset, x, y, order):
362 def subscriptset(repo, subset, x, y, order):
347 raise error.ParseError(_(b"can't use a subscript in this context"))
363 raise error.ParseError(_(b"can't use a subscript in this context"))
348
364
349
365
350 def listset(repo, subset, *xs, **opts):
366 def listset(repo, subset, *xs, **opts):
351 raise error.ParseError(
367 raise error.ParseError(
352 _(b"can't use a list in this context"),
368 _(b"can't use a list in this context"),
353 hint=_(b'see \'hg help "revsets.x or y"\''),
369 hint=_(b'see \'hg help "revsets.x or y"\''),
354 )
370 )
355
371
356
372
357 def keyvaluepair(repo, subset, k, v, order):
373 def keyvaluepair(repo, subset, k, v, order):
358 raise error.ParseError(_(b"can't use a key-value pair in this context"))
374 raise error.ParseError(_(b"can't use a key-value pair in this context"))
359
375
360
376
361 def func(repo, subset, a, b, order):
377 def func(repo, subset, a, b, order):
362 f = getsymbol(a)
378 f = getsymbol(a)
363 if f in symbols:
379 if f in symbols:
364 func = symbols[f]
380 func = symbols[f]
365 if getattr(func, '_takeorder', False):
381 if getattr(func, '_takeorder', False):
366 return func(repo, subset, b, order)
382 return func(repo, subset, b, order)
367 return func(repo, subset, b)
383 return func(repo, subset, b)
368
384
369 keep = lambda fn: getattr(fn, '__doc__', None) is not None
385 keep = lambda fn: getattr(fn, '__doc__', None) is not None
370
386
371 syms = [s for (s, fn) in symbols.items() if keep(fn)]
387 syms = [s for (s, fn) in symbols.items() if keep(fn)]
372 raise error.UnknownIdentifier(f, syms)
388 raise error.UnknownIdentifier(f, syms)
373
389
374
390
375 # functions
391 # functions
376
392
377 # symbols are callables like:
393 # symbols are callables like:
378 # fn(repo, subset, x)
394 # fn(repo, subset, x)
379 # with:
395 # with:
380 # repo - current repository instance
396 # repo - current repository instance
381 # subset - of revisions to be examined
397 # subset - of revisions to be examined
382 # x - argument in tree form
398 # x - argument in tree form
383 symbols = revsetlang.symbols
399 symbols = revsetlang.symbols
384
400
385 # symbols which can't be used for a DoS attack for any given input
401 # symbols which can't be used for a DoS attack for any given input
386 # (e.g. those which accept regexes as plain strings shouldn't be included)
402 # (e.g. those which accept regexes as plain strings shouldn't be included)
387 # functions that just return a lot of changesets (like all) don't count here
403 # functions that just return a lot of changesets (like all) don't count here
388 safesymbols = set()
404 safesymbols = set()
389
405
390 predicate = registrar.revsetpredicate()
406 predicate = registrar.revsetpredicate()
391
407
392
408
393 @predicate(b'_destupdate')
409 @predicate(b'_destupdate')
394 def _destupdate(repo, subset, x):
410 def _destupdate(repo, subset, x):
395 # experimental revset for update destination
411 # experimental revset for update destination
396 args = getargsdict(x, b'limit', b'clean')
412 args = getargsdict(x, b'limit', b'clean')
397 return subset & baseset(
413 return subset & baseset(
398 [destutil.destupdate(repo, **pycompat.strkwargs(args))[0]]
414 [destutil.destupdate(repo, **pycompat.strkwargs(args))[0]]
399 )
415 )
400
416
401
417
402 @predicate(b'_destmerge')
418 @predicate(b'_destmerge')
403 def _destmerge(repo, subset, x):
419 def _destmerge(repo, subset, x):
404 # experimental revset for merge destination
420 # experimental revset for merge destination
405 sourceset = None
421 sourceset = None
406 if x is not None:
422 if x is not None:
407 sourceset = getset(repo, fullreposet(repo), x)
423 sourceset = getset(repo, fullreposet(repo), x)
408 return subset & baseset([destutil.destmerge(repo, sourceset=sourceset)])
424 return subset & baseset([destutil.destmerge(repo, sourceset=sourceset)])
409
425
410
426
411 @predicate(b'adds(pattern)', safe=True, weight=30)
427 @predicate(b'adds(pattern)', safe=True, weight=30)
412 def adds(repo, subset, x):
428 def adds(repo, subset, x):
413 """Changesets that add a file matching pattern.
429 """Changesets that add a file matching pattern.
414
430
415 The pattern without explicit kind like ``glob:`` is expected to be
431 The pattern without explicit kind like ``glob:`` is expected to be
416 relative to the current directory and match against a file or a
432 relative to the current directory and match against a file or a
417 directory.
433 directory.
418 """
434 """
419 # i18n: "adds" is a keyword
435 # i18n: "adds" is a keyword
420 pat = getstring(x, _(b"adds requires a pattern"))
436 pat = getstring(x, _(b"adds requires a pattern"))
421 return checkstatus(repo, subset, pat, 'added')
437 return checkstatus(repo, subset, pat, 'added')
422
438
423
439
424 @predicate(b'ancestor(*changeset)', safe=True, weight=0.5)
440 @predicate(b'ancestor(*changeset)', safe=True, weight=0.5)
425 def ancestor(repo, subset, x):
441 def ancestor(repo, subset, x):
426 """A greatest common ancestor of the changesets.
442 """A greatest common ancestor of the changesets.
427
443
428 Accepts 0 or more changesets.
444 Accepts 0 or more changesets.
429 Will return empty list when passed no args.
445 Will return empty list when passed no args.
430 Greatest common ancestor of a single changeset is that changeset.
446 Greatest common ancestor of a single changeset is that changeset.
431 """
447 """
432 reviter = iter(orset(repo, fullreposet(repo), x, order=anyorder))
448 reviter = iter(orset(repo, fullreposet(repo), x, order=anyorder))
433 try:
449 try:
434 anc = repo[next(reviter)]
450 anc = repo[next(reviter)]
435 except StopIteration:
451 except StopIteration:
436 return baseset()
452 return baseset()
437 for r in reviter:
453 for r in reviter:
438 anc = anc.ancestor(repo[r])
454 anc = anc.ancestor(repo[r])
439
455
440 r = scmutil.intrev(anc)
456 r = scmutil.intrev(anc)
441 if r in subset:
457 if r in subset:
442 return baseset([r])
458 return baseset([r])
443 return baseset()
459 return baseset()
444
460
445
461
446 def _ancestors(
462 def _ancestors(
447 repo, subset, x, followfirst=False, startdepth=None, stopdepth=None
463 repo, subset, x, followfirst=False, startdepth=None, stopdepth=None
448 ):
464 ):
449 heads = getset(repo, fullreposet(repo), x)
465 heads = getset(repo, fullreposet(repo), x)
450 if not heads:
466 if not heads:
451 return baseset()
467 return baseset()
452 s = dagop.revancestors(repo, heads, followfirst, startdepth, stopdepth)
468 s = dagop.revancestors(repo, heads, followfirst, startdepth, stopdepth)
453 return subset & s
469 return subset & s
454
470
455
471
456 @predicate(b'ancestors(set[, depth])', safe=True)
472 @predicate(b'ancestors(set[, depth])', safe=True)
457 def ancestors(repo, subset, x):
473 def ancestors(repo, subset, x):
458 """Changesets that are ancestors of changesets in set, including the
474 """Changesets that are ancestors of changesets in set, including the
459 given changesets themselves.
475 given changesets themselves.
460
476
461 If depth is specified, the result only includes changesets up to
477 If depth is specified, the result only includes changesets up to
462 the specified generation.
478 the specified generation.
463 """
479 """
464 # startdepth is for internal use only until we can decide the UI
480 # startdepth is for internal use only until we can decide the UI
465 args = getargsdict(x, b'ancestors', b'set depth startdepth')
481 args = getargsdict(x, b'ancestors', b'set depth startdepth')
466 if b'set' not in args:
482 if b'set' not in args:
467 # i18n: "ancestors" is a keyword
483 # i18n: "ancestors" is a keyword
468 raise error.ParseError(_(b'ancestors takes at least 1 argument'))
484 raise error.ParseError(_(b'ancestors takes at least 1 argument'))
469 startdepth = stopdepth = None
485 startdepth = stopdepth = None
470 if b'startdepth' in args:
486 if b'startdepth' in args:
471 n = getinteger(
487 n = getinteger(
472 args[b'startdepth'], b"ancestors expects an integer startdepth"
488 args[b'startdepth'], b"ancestors expects an integer startdepth"
473 )
489 )
474 if n < 0:
490 if n < 0:
475 raise error.ParseError(b"negative startdepth")
491 raise error.ParseError(b"negative startdepth")
476 startdepth = n
492 startdepth = n
477 if b'depth' in args:
493 if b'depth' in args:
478 # i18n: "ancestors" is a keyword
494 # i18n: "ancestors" is a keyword
479 n = getinteger(args[b'depth'], _(b"ancestors expects an integer depth"))
495 n = getinteger(args[b'depth'], _(b"ancestors expects an integer depth"))
480 if n < 0:
496 if n < 0:
481 raise error.ParseError(_(b"negative depth"))
497 raise error.ParseError(_(b"negative depth"))
482 stopdepth = n + 1
498 stopdepth = n + 1
483 return _ancestors(
499 return _ancestors(
484 repo, subset, args[b'set'], startdepth=startdepth, stopdepth=stopdepth
500 repo, subset, args[b'set'], startdepth=startdepth, stopdepth=stopdepth
485 )
501 )
486
502
487
503
488 @predicate(b'_firstancestors', safe=True)
504 @predicate(b'_firstancestors', safe=True)
489 def _firstancestors(repo, subset, x):
505 def _firstancestors(repo, subset, x):
490 # ``_firstancestors(set)``
506 # ``_firstancestors(set)``
491 # Like ``ancestors(set)`` but follows only the first parents.
507 # Like ``ancestors(set)`` but follows only the first parents.
492 return _ancestors(repo, subset, x, followfirst=True)
508 return _ancestors(repo, subset, x, followfirst=True)
493
509
494
510
495 def _childrenspec(repo, subset, x, n, order):
511 def _childrenspec(repo, subset, x, n, order):
496 """Changesets that are the Nth child of a changeset
512 """Changesets that are the Nth child of a changeset
497 in set.
513 in set.
498 """
514 """
499 cs = set()
515 cs = set()
500 for r in getset(repo, fullreposet(repo), x):
516 for r in getset(repo, fullreposet(repo), x):
501 for i in range(n):
517 for i in range(n):
502 c = repo[r].children()
518 c = repo[r].children()
503 if len(c) == 0:
519 if len(c) == 0:
504 break
520 break
505 if len(c) > 1:
521 if len(c) > 1:
506 raise error.RepoLookupError(
522 raise error.RepoLookupError(
507 _(b"revision in set has more than one child")
523 _(b"revision in set has more than one child")
508 )
524 )
509 r = c[0].rev()
525 r = c[0].rev()
510 else:
526 else:
511 cs.add(r)
527 cs.add(r)
512 return subset & cs
528 return subset & cs
513
529
514
530
515 def ancestorspec(repo, subset, x, n, order):
531 def ancestorspec(repo, subset, x, n, order):
516 """``set~n``
532 """``set~n``
517 Changesets that are the Nth ancestor (first parents only) of a changeset
533 Changesets that are the Nth ancestor (first parents only) of a changeset
518 in set.
534 in set.
519 """
535 """
520 n = getinteger(n, _(b"~ expects a number"))
536 n = getinteger(n, _(b"~ expects a number"))
521 if n < 0:
537 if n < 0:
522 # children lookup
538 # children lookup
523 return _childrenspec(repo, subset, x, -n, order)
539 return _childrenspec(repo, subset, x, -n, order)
524 ps = set()
540 ps = set()
525 cl = repo.changelog
541 cl = repo.changelog
526 for r in getset(repo, fullreposet(repo), x):
542 for r in getset(repo, fullreposet(repo), x):
527 for i in range(n):
543 for i in range(n):
528 try:
544 try:
529 r = cl.parentrevs(r)[0]
545 r = cl.parentrevs(r)[0]
530 except error.WdirUnsupported:
546 except error.WdirUnsupported:
531 r = repo[r].p1().rev()
547 r = repo[r].p1().rev()
532 ps.add(r)
548 ps.add(r)
533 return subset & ps
549 return subset & ps
534
550
535
551
536 @predicate(b'author(string)', safe=True, weight=10)
552 @predicate(b'author(string)', safe=True, weight=10)
537 def author(repo, subset, x):
553 def author(repo, subset, x):
538 """Alias for ``user(string)``."""
554 """Alias for ``user(string)``."""
539 # i18n: "author" is a keyword
555 # i18n: "author" is a keyword
540 n = getstring(x, _(b"author requires a string"))
556 n = getstring(x, _(b"author requires a string"))
541 kind, pattern, matcher = _substringmatcher(n, casesensitive=False)
557 kind, pattern, matcher = _substringmatcher(n, casesensitive=False)
542 return subset.filter(
558 return subset.filter(
543 lambda x: matcher(repo[x].user()), condrepr=(b'<user %r>', n)
559 lambda x: matcher(repo[x].user()), condrepr=(b'<user %r>', n)
544 )
560 )
545
561
546
562
547 @predicate(b'bisect(string)', safe=True)
563 @predicate(b'bisect(string)', safe=True)
548 def bisect(repo, subset, x):
564 def bisect(repo, subset, x):
549 """Changesets marked in the specified bisect status:
565 """Changesets marked in the specified bisect status:
550
566
551 - ``good``, ``bad``, ``skip``: csets explicitly marked as good/bad/skip
567 - ``good``, ``bad``, ``skip``: csets explicitly marked as good/bad/skip
552 - ``goods``, ``bads`` : csets topologically good/bad
568 - ``goods``, ``bads`` : csets topologically good/bad
553 - ``range`` : csets taking part in the bisection
569 - ``range`` : csets taking part in the bisection
554 - ``pruned`` : csets that are goods, bads or skipped
570 - ``pruned`` : csets that are goods, bads or skipped
555 - ``untested`` : csets whose fate is yet unknown
571 - ``untested`` : csets whose fate is yet unknown
556 - ``ignored`` : csets ignored due to DAG topology
572 - ``ignored`` : csets ignored due to DAG topology
557 - ``current`` : the cset currently being bisected
573 - ``current`` : the cset currently being bisected
558 """
574 """
559 # i18n: "bisect" is a keyword
575 # i18n: "bisect" is a keyword
560 status = getstring(x, _(b"bisect requires a string")).lower()
576 status = getstring(x, _(b"bisect requires a string")).lower()
561 state = set(hbisect.get(repo, status))
577 state = set(hbisect.get(repo, status))
562 return subset & state
578 return subset & state
563
579
564
580
565 # Backward-compatibility
581 # Backward-compatibility
566 # - no help entry so that we do not advertise it any more
582 # - no help entry so that we do not advertise it any more
567 @predicate(b'bisected', safe=True)
583 @predicate(b'bisected', safe=True)
568 def bisected(repo, subset, x):
584 def bisected(repo, subset, x):
569 return bisect(repo, subset, x)
585 return bisect(repo, subset, x)
570
586
571
587
572 @predicate(b'bookmark([name])', safe=True)
588 @predicate(b'bookmark([name])', safe=True)
573 def bookmark(repo, subset, x):
589 def bookmark(repo, subset, x):
574 """The named bookmark or all bookmarks.
590 """The named bookmark or all bookmarks.
575
591
576 Pattern matching is supported for `name`. See :hg:`help revisions.patterns`.
592 Pattern matching is supported for `name`. See :hg:`help revisions.patterns`.
577 """
593 """
578 # i18n: "bookmark" is a keyword
594 # i18n: "bookmark" is a keyword
579 args = getargs(x, 0, 1, _(b'bookmark takes one or no arguments'))
595 args = getargs(x, 0, 1, _(b'bookmark takes one or no arguments'))
580 if args:
596 if args:
581 bm = getstring(
597 bm = getstring(
582 args[0],
598 args[0],
583 # i18n: "bookmark" is a keyword
599 # i18n: "bookmark" is a keyword
584 _(b'the argument to bookmark must be a string'),
600 _(b'the argument to bookmark must be a string'),
585 )
601 )
586 kind, pattern, matcher = stringutil.stringmatcher(bm)
602 kind, pattern, matcher = stringutil.stringmatcher(bm)
587 bms = set()
603 bms = set()
588 if kind == b'literal':
604 if kind == b'literal':
589 if bm == pattern:
605 if bm == pattern:
590 pattern = repo._bookmarks.expandname(pattern)
606 pattern = repo._bookmarks.expandname(pattern)
591 bmrev = repo._bookmarks.get(pattern, None)
607 bmrev = repo._bookmarks.get(pattern, None)
592 if not bmrev:
608 if not bmrev:
593 raise error.RepoLookupError(
609 raise error.RepoLookupError(
594 _(b"bookmark '%s' does not exist") % pattern
610 _(b"bookmark '%s' does not exist") % pattern
595 )
611 )
596 bms.add(repo[bmrev].rev())
612 bms.add(repo[bmrev].rev())
597 else:
613 else:
598 matchrevs = set()
614 matchrevs = set()
599 for name, bmrev in repo._bookmarks.items():
615 for name, bmrev in repo._bookmarks.items():
600 if matcher(name):
616 if matcher(name):
601 matchrevs.add(bmrev)
617 matchrevs.add(bmrev)
602 for bmrev in matchrevs:
618 for bmrev in matchrevs:
603 bms.add(repo[bmrev].rev())
619 bms.add(repo[bmrev].rev())
604 else:
620 else:
605 bms = {repo[r].rev() for r in repo._bookmarks.values()}
621 bms = {repo[r].rev() for r in repo._bookmarks.values()}
606 bms -= {nullrev}
622 bms -= {nullrev}
607 return subset & bms
623 return subset & bms
608
624
609
625
610 @predicate(b'branch(string or set)', safe=True, weight=10)
626 @predicate(b'branch(string or set)', safe=True, weight=10)
611 def branch(repo, subset, x):
627 def branch(repo, subset, x):
612 """
628 """
613 All changesets belonging to the given branch or the branches of the given
629 All changesets belonging to the given branch or the branches of the given
614 changesets.
630 changesets.
615
631
616 Pattern matching is supported for `string`. See
632 Pattern matching is supported for `string`. See
617 :hg:`help revisions.patterns`.
633 :hg:`help revisions.patterns`.
618 """
634 """
619 getbi = repo.revbranchcache().branchinfo
635 getbi = repo.revbranchcache().branchinfo
620
636
621 def getbranch(r):
637 def getbranch(r):
622 try:
638 try:
623 return getbi(r)[0]
639 return getbi(r)[0]
624 except error.WdirUnsupported:
640 except error.WdirUnsupported:
625 return repo[r].branch()
641 return repo[r].branch()
626
642
627 try:
643 try:
628 b = getstring(x, b'')
644 b = getstring(x, b'')
629 except error.ParseError:
645 except error.ParseError:
630 # not a string, but another revspec, e.g. tip()
646 # not a string, but another revspec, e.g. tip()
631 pass
647 pass
632 else:
648 else:
633 kind, pattern, matcher = stringutil.stringmatcher(b)
649 kind, pattern, matcher = stringutil.stringmatcher(b)
634 if kind == b'literal':
650 if kind == b'literal':
635 # note: falls through to the revspec case if no branch with
651 # note: falls through to the revspec case if no branch with
636 # this name exists and pattern kind is not specified explicitly
652 # this name exists and pattern kind is not specified explicitly
637 if repo.branchmap().hasbranch(pattern):
653 if repo.branchmap().hasbranch(pattern):
638 return subset.filter(
654 return subset.filter(
639 lambda r: matcher(getbranch(r)),
655 lambda r: matcher(getbranch(r)),
640 condrepr=(b'<branch %r>', b),
656 condrepr=(b'<branch %r>', b),
641 )
657 )
642 if b.startswith(b'literal:'):
658 if b.startswith(b'literal:'):
643 raise error.RepoLookupError(
659 raise error.RepoLookupError(
644 _(b"branch '%s' does not exist") % pattern
660 _(b"branch '%s' does not exist") % pattern
645 )
661 )
646 else:
662 else:
647 return subset.filter(
663 return subset.filter(
648 lambda r: matcher(getbranch(r)), condrepr=(b'<branch %r>', b)
664 lambda r: matcher(getbranch(r)), condrepr=(b'<branch %r>', b)
649 )
665 )
650
666
651 s = getset(repo, fullreposet(repo), x)
667 s = getset(repo, fullreposet(repo), x)
652 b = set()
668 b = set()
653 for r in s:
669 for r in s:
654 b.add(getbranch(r))
670 b.add(getbranch(r))
655 c = s.__contains__
671 c = s.__contains__
656 return subset.filter(
672 return subset.filter(
657 lambda r: c(r) or getbranch(r) in b,
673 lambda r: c(r) or getbranch(r) in b,
658 condrepr=lambda: b'<branch %r>' % _sortedb(b),
674 condrepr=lambda: b'<branch %r>' % _sortedb(b),
659 )
675 )
660
676
661
677
662 @predicate(b'phasedivergent()', safe=True)
678 @predicate(b'phasedivergent()', safe=True)
663 def phasedivergent(repo, subset, x):
679 def phasedivergent(repo, subset, x):
664 """Mutable changesets marked as successors of public changesets.
680 """Mutable changesets marked as successors of public changesets.
665
681
666 Only non-public and non-obsolete changesets can be `phasedivergent`.
682 Only non-public and non-obsolete changesets can be `phasedivergent`.
667 (EXPERIMENTAL)
683 (EXPERIMENTAL)
668 """
684 """
669 # i18n: "phasedivergent" is a keyword
685 # i18n: "phasedivergent" is a keyword
670 getargs(x, 0, 0, _(b"phasedivergent takes no arguments"))
686 getargs(x, 0, 0, _(b"phasedivergent takes no arguments"))
671 phasedivergent = obsmod.getrevs(repo, b'phasedivergent')
687 phasedivergent = obsmod.getrevs(repo, b'phasedivergent')
672 return subset & phasedivergent
688 return subset & phasedivergent
673
689
674
690
675 @predicate(b'bundle()', safe=True)
691 @predicate(b'bundle()', safe=True)
676 def bundle(repo, subset, x):
692 def bundle(repo, subset, x):
677 """Changesets in the bundle.
693 """Changesets in the bundle.
678
694
679 Bundle must be specified by the -R option."""
695 Bundle must be specified by the -R option."""
680
696
681 try:
697 try:
682 bundlerevs = repo.changelog.bundlerevs
698 bundlerevs = repo.changelog.bundlerevs
683 except AttributeError:
699 except AttributeError:
684 raise error.Abort(_(b"no bundle provided - specify with -R"))
700 raise error.Abort(_(b"no bundle provided - specify with -R"))
685 return subset & bundlerevs
701 return subset & bundlerevs
686
702
687
703
688 def checkstatus(repo, subset, pat, field):
704 def checkstatus(repo, subset, pat, field):
689 """Helper for status-related revsets (adds, removes, modifies).
705 """Helper for status-related revsets (adds, removes, modifies).
690 The field parameter says which kind is desired.
706 The field parameter says which kind is desired.
691 """
707 """
692 hasset = matchmod.patkind(pat) == b'set'
708 hasset = matchmod.patkind(pat) == b'set'
693
709
694 mcache = [None]
710 mcache = [None]
695
711
696 def matches(x):
712 def matches(x):
697 c = repo[x]
713 c = repo[x]
698 if not mcache[0] or hasset:
714 if not mcache[0] or hasset:
699 mcache[0] = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
715 mcache[0] = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
700 m = mcache[0]
716 m = mcache[0]
701 fname = None
717 fname = None
702
718
703 assert m is not None # help pytype
719 assert m is not None # help pytype
704 if not m.anypats() and len(m.files()) == 1:
720 if not m.anypats() and len(m.files()) == 1:
705 fname = m.files()[0]
721 fname = m.files()[0]
706 if fname is not None:
722 if fname is not None:
707 if fname not in c.files():
723 if fname not in c.files():
708 return False
724 return False
709 else:
725 else:
710 if not any(m(f) for f in c.files()):
726 if not any(m(f) for f in c.files()):
711 return False
727 return False
712 files = getattr(repo.status(c.p1().node(), c.node()), field)
728 files = getattr(repo.status(c.p1().node(), c.node()), field)
713 if fname is not None:
729 if fname is not None:
714 if fname in files:
730 if fname in files:
715 return True
731 return True
716 else:
732 else:
717 if any(m(f) for f in files):
733 if any(m(f) for f in files):
718 return True
734 return True
719
735
720 return subset.filter(
736 return subset.filter(
721 matches, condrepr=(b'<status.%s %r>', pycompat.sysbytes(field), pat)
737 matches, condrepr=(b'<status.%s %r>', pycompat.sysbytes(field), pat)
722 )
738 )
723
739
724
740
725 def _children(repo, subset, parentset):
741 def _children(repo, subset, parentset):
726 if not parentset:
742 if not parentset:
727 return baseset()
743 return baseset()
728 cs = set()
744 cs = set()
729 pr = repo.changelog.parentrevs
745 pr = repo.changelog.parentrevs
730 minrev = parentset.min()
746 minrev = parentset.min()
731 for r in subset:
747 for r in subset:
732 if r <= minrev:
748 if r <= minrev:
733 continue
749 continue
734 p1, p2 = pr(r)
750 p1, p2 = pr(r)
735 if p1 in parentset:
751 if p1 in parentset:
736 cs.add(r)
752 cs.add(r)
737 if p2 != nullrev and p2 in parentset:
753 if p2 != nullrev and p2 in parentset:
738 cs.add(r)
754 cs.add(r)
739 return baseset(cs)
755 return baseset(cs)
740
756
741
757
742 @predicate(b'children(set)', safe=True)
758 @predicate(b'children(set)', safe=True)
743 def children(repo, subset, x):
759 def children(repo, subset, x):
744 """Child changesets of changesets in set."""
760 """Child changesets of changesets in set."""
745 s = getset(repo, fullreposet(repo), x)
761 s = getset(repo, fullreposet(repo), x)
746 cs = _children(repo, subset, s)
762 cs = _children(repo, subset, s)
747 return subset & cs
763 return subset & cs
748
764
749
765
750 @predicate(b'closed()', safe=True, weight=10)
766 @predicate(b'closed()', safe=True, weight=10)
751 def closed(repo, subset, x):
767 def closed(repo, subset, x):
752 """Changeset is closed."""
768 """Changeset is closed."""
753 # i18n: "closed" is a keyword
769 # i18n: "closed" is a keyword
754 getargs(x, 0, 0, _(b"closed takes no arguments"))
770 getargs(x, 0, 0, _(b"closed takes no arguments"))
755 return subset.filter(
771 return subset.filter(
756 lambda r: repo[r].closesbranch(), condrepr=b'<branch closed>'
772 lambda r: repo[r].closesbranch(), condrepr=b'<branch closed>'
757 )
773 )
758
774
759
775
760 # for internal use
776 # for internal use
761 @predicate(b'_commonancestorheads(set)', safe=True)
777 @predicate(b'_commonancestorheads(set)', safe=True)
762 def _commonancestorheads(repo, subset, x):
778 def _commonancestorheads(repo, subset, x):
763 # This is an internal method is for quickly calculating "heads(::x and
779 # This is an internal method is for quickly calculating "heads(::x and
764 # ::y)"
780 # ::y)"
765
781
766 # These greatest common ancestors are the same ones that the consensus bid
782 # These greatest common ancestors are the same ones that the consensus bid
767 # merge will find.
783 # merge will find.
768 startrevs = getset(repo, fullreposet(repo), x, order=anyorder)
784 startrevs = getset(repo, fullreposet(repo), x, order=anyorder)
769
785
770 ancs = repo.changelog._commonancestorsheads(*list(startrevs))
786 ancs = repo.changelog._commonancestorsheads(*list(startrevs))
771 return subset & baseset(ancs)
787 return subset & baseset(ancs)
772
788
773
789
774 @predicate(b'commonancestors(set)', safe=True)
790 @predicate(b'commonancestors(set)', safe=True)
775 def commonancestors(repo, subset, x):
791 def commonancestors(repo, subset, x):
776 """Changesets that are ancestors of every changeset in set."""
792 """Changesets that are ancestors of every changeset in set."""
777 startrevs = getset(repo, fullreposet(repo), x, order=anyorder)
793 startrevs = getset(repo, fullreposet(repo), x, order=anyorder)
778 if not startrevs:
794 if not startrevs:
779 return baseset()
795 return baseset()
780 for r in startrevs:
796 for r in startrevs:
781 subset &= dagop.revancestors(repo, baseset([r]))
797 subset &= dagop.revancestors(repo, baseset([r]))
782 return subset
798 return subset
783
799
784
800
785 @predicate(b'conflictlocal()', safe=True)
801 @predicate(b'conflictlocal()', safe=True)
786 def conflictlocal(repo, subset, x):
802 def conflictlocal(repo, subset, x):
787 """The local side of the merge, if currently in an unresolved merge.
803 """The local side of the merge, if currently in an unresolved merge.
788
804
789 "merge" here includes merge conflicts from e.g. 'hg rebase' or 'hg graft'.
805 "merge" here includes merge conflicts from e.g. 'hg rebase' or 'hg graft'.
790 """
806 """
791 getargs(x, 0, 0, _(b"conflictlocal takes no arguments"))
807 getargs(x, 0, 0, _(b"conflictlocal takes no arguments"))
792 from . import mergestate as mergestatemod
808 from . import mergestate as mergestatemod
793
809
794 mergestate = mergestatemod.mergestate.read(repo)
810 mergestate = mergestatemod.mergestate.read(repo)
795 if mergestate.active() and repo.changelog.hasnode(mergestate.local):
811 if mergestate.active() and repo.changelog.hasnode(mergestate.local):
796 return subset & {repo.changelog.rev(mergestate.local)}
812 return subset & {repo.changelog.rev(mergestate.local)}
797
813
798 return baseset()
814 return baseset()
799
815
800
816
801 @predicate(b'conflictother()', safe=True)
817 @predicate(b'conflictother()', safe=True)
802 def conflictother(repo, subset, x):
818 def conflictother(repo, subset, x):
803 """The other side of the merge, if currently in an unresolved merge.
819 """The other side of the merge, if currently in an unresolved merge.
804
820
805 "merge" here includes merge conflicts from e.g. 'hg rebase' or 'hg graft'.
821 "merge" here includes merge conflicts from e.g. 'hg rebase' or 'hg graft'.
806 """
822 """
807 getargs(x, 0, 0, _(b"conflictother takes no arguments"))
823 getargs(x, 0, 0, _(b"conflictother takes no arguments"))
808 from . import mergestate as mergestatemod
824 from . import mergestate as mergestatemod
809
825
810 mergestate = mergestatemod.mergestate.read(repo)
826 mergestate = mergestatemod.mergestate.read(repo)
811 if mergestate.active() and repo.changelog.hasnode(mergestate.other):
827 if mergestate.active() and repo.changelog.hasnode(mergestate.other):
812 return subset & {repo.changelog.rev(mergestate.other)}
828 return subset & {repo.changelog.rev(mergestate.other)}
813
829
814 return baseset()
830 return baseset()
815
831
816
832
817 @predicate(b'contains(pattern)', weight=100)
833 @predicate(b'contains(pattern)', weight=100)
818 def contains(repo, subset, x):
834 def contains(repo, subset, x):
819 """The revision's manifest contains a file matching pattern (but might not
835 """The revision's manifest contains a file matching pattern (but might not
820 modify it). See :hg:`help patterns` for information about file patterns.
836 modify it). See :hg:`help patterns` for information about file patterns.
821
837
822 The pattern without explicit kind like ``glob:`` is expected to be
838 The pattern without explicit kind like ``glob:`` is expected to be
823 relative to the current directory and match against a file exactly
839 relative to the current directory and match against a file exactly
824 for efficiency.
840 for efficiency.
825 """
841 """
826 # i18n: "contains" is a keyword
842 # i18n: "contains" is a keyword
827 pat = getstring(x, _(b"contains requires a pattern"))
843 pat = getstring(x, _(b"contains requires a pattern"))
828
844
829 def matches(x):
845 def matches(x):
830 if not matchmod.patkind(pat):
846 if not matchmod.patkind(pat):
831 pats = pathutil.canonpath(repo.root, repo.getcwd(), pat)
847 pats = pathutil.canonpath(repo.root, repo.getcwd(), pat)
832 if pats in repo[x]:
848 if pats in repo[x]:
833 return True
849 return True
834 else:
850 else:
835 c = repo[x]
851 c = repo[x]
836 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
852 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
837 for f in c.manifest():
853 for f in c.manifest():
838 if m(f):
854 if m(f):
839 return True
855 return True
840 return False
856 return False
841
857
842 return subset.filter(matches, condrepr=(b'<contains %r>', pat))
858 return subset.filter(matches, condrepr=(b'<contains %r>', pat))
843
859
844
860
845 @predicate(b'converted([id])', safe=True)
861 @predicate(b'converted([id])', safe=True)
846 def converted(repo, subset, x):
862 def converted(repo, subset, x):
847 """Changesets converted from the given identifier in the old repository if
863 """Changesets converted from the given identifier in the old repository if
848 present, or all converted changesets if no identifier is specified.
864 present, or all converted changesets if no identifier is specified.
849 """
865 """
850
866
851 # There is exactly no chance of resolving the revision, so do a simple
867 # There is exactly no chance of resolving the revision, so do a simple
852 # string compare and hope for the best
868 # string compare and hope for the best
853
869
854 rev = None
870 rev = None
855 # i18n: "converted" is a keyword
871 # i18n: "converted" is a keyword
856 l = getargs(x, 0, 1, _(b'converted takes one or no arguments'))
872 l = getargs(x, 0, 1, _(b'converted takes one or no arguments'))
857 if l:
873 if l:
858 # i18n: "converted" is a keyword
874 # i18n: "converted" is a keyword
859 rev = getstring(l[0], _(b'converted requires a revision'))
875 rev = getstring(l[0], _(b'converted requires a revision'))
860
876
861 def _matchvalue(r):
877 def _matchvalue(r):
862 source = repo[r].extra().get(b'convert_revision', None)
878 source = repo[r].extra().get(b'convert_revision', None)
863 return source is not None and (rev is None or source.startswith(rev))
879 return source is not None and (rev is None or source.startswith(rev))
864
880
865 return subset.filter(
881 return subset.filter(
866 lambda r: _matchvalue(r), condrepr=(b'<converted %r>', rev)
882 lambda r: _matchvalue(r), condrepr=(b'<converted %r>', rev)
867 )
883 )
868
884
869
885
870 @predicate(b'date(interval)', safe=True, weight=10)
886 @predicate(b'date(interval)', safe=True, weight=10)
871 def date(repo, subset, x):
887 def date(repo, subset, x):
872 """Changesets within the interval, see :hg:`help dates`."""
888 """Changesets within the interval, see :hg:`help dates`."""
873 # i18n: "date" is a keyword
889 # i18n: "date" is a keyword
874 ds = getstring(x, _(b"date requires a string"))
890 ds = getstring(x, _(b"date requires a string"))
875 dm = dateutil.matchdate(ds)
891 dm = dateutil.matchdate(ds)
876 return subset.filter(
892 return subset.filter(
877 lambda x: dm(repo[x].date()[0]), condrepr=(b'<date %r>', ds)
893 lambda x: dm(repo[x].date()[0]), condrepr=(b'<date %r>', ds)
878 )
894 )
879
895
880
896
881 @predicate(b'desc(string)', safe=True, weight=10)
897 @predicate(b'desc(string)', safe=True, weight=10)
882 def desc(repo, subset, x):
898 def desc(repo, subset, x):
883 """Search commit message for string. The match is case-insensitive.
899 """Search commit message for string. The match is case-insensitive.
884
900
885 Pattern matching is supported for `string`. See
901 Pattern matching is supported for `string`. See
886 :hg:`help revisions.patterns`.
902 :hg:`help revisions.patterns`.
887 """
903 """
888 # i18n: "desc" is a keyword
904 # i18n: "desc" is a keyword
889 ds = getstring(x, _(b"desc requires a string"))
905 ds = getstring(x, _(b"desc requires a string"))
890
906
891 kind, pattern, matcher = _substringmatcher(ds, casesensitive=False)
907 kind, pattern, matcher = _substringmatcher(ds, casesensitive=False)
892
908
893 return subset.filter(
909 return subset.filter(
894 lambda r: matcher(repo[r].description()), condrepr=(b'<desc %r>', ds)
910 lambda r: matcher(repo[r].description()), condrepr=(b'<desc %r>', ds)
895 )
911 )
896
912
897
913
898 def _descendants(
914 def _descendants(
899 repo, subset, x, followfirst=False, startdepth=None, stopdepth=None
915 repo, subset, x, followfirst=False, startdepth=None, stopdepth=None
900 ):
916 ):
901 roots = getset(repo, fullreposet(repo), x)
917 roots = getset(repo, fullreposet(repo), x)
902 if not roots:
918 if not roots:
903 return baseset()
919 return baseset()
904 s = dagop.revdescendants(repo, roots, followfirst, startdepth, stopdepth)
920 s = dagop.revdescendants(repo, roots, followfirst, startdepth, stopdepth)
905 return subset & s
921 return subset & s
906
922
907
923
908 @predicate(b'descendants(set[, depth])', safe=True)
924 @predicate(b'descendants(set[, depth])', safe=True)
909 def descendants(repo, subset, x):
925 def descendants(repo, subset, x):
910 """Changesets which are descendants of changesets in set, including the
926 """Changesets which are descendants of changesets in set, including the
911 given changesets themselves.
927 given changesets themselves.
912
928
913 If depth is specified, the result only includes changesets up to
929 If depth is specified, the result only includes changesets up to
914 the specified generation.
930 the specified generation.
915 """
931 """
916 # startdepth is for internal use only until we can decide the UI
932 # startdepth is for internal use only until we can decide the UI
917 args = getargsdict(x, b'descendants', b'set depth startdepth')
933 args = getargsdict(x, b'descendants', b'set depth startdepth')
918 if b'set' not in args:
934 if b'set' not in args:
919 # i18n: "descendants" is a keyword
935 # i18n: "descendants" is a keyword
920 raise error.ParseError(_(b'descendants takes at least 1 argument'))
936 raise error.ParseError(_(b'descendants takes at least 1 argument'))
921 startdepth = stopdepth = None
937 startdepth = stopdepth = None
922 if b'startdepth' in args:
938 if b'startdepth' in args:
923 n = getinteger(
939 n = getinteger(
924 args[b'startdepth'], b"descendants expects an integer startdepth"
940 args[b'startdepth'], b"descendants expects an integer startdepth"
925 )
941 )
926 if n < 0:
942 if n < 0:
927 raise error.ParseError(b"negative startdepth")
943 raise error.ParseError(b"negative startdepth")
928 startdepth = n
944 startdepth = n
929 if b'depth' in args:
945 if b'depth' in args:
930 # i18n: "descendants" is a keyword
946 # i18n: "descendants" is a keyword
931 n = getinteger(
947 n = getinteger(
932 args[b'depth'], _(b"descendants expects an integer depth")
948 args[b'depth'], _(b"descendants expects an integer depth")
933 )
949 )
934 if n < 0:
950 if n < 0:
935 raise error.ParseError(_(b"negative depth"))
951 raise error.ParseError(_(b"negative depth"))
936 stopdepth = n + 1
952 stopdepth = n + 1
937 return _descendants(
953 return _descendants(
938 repo, subset, args[b'set'], startdepth=startdepth, stopdepth=stopdepth
954 repo, subset, args[b'set'], startdepth=startdepth, stopdepth=stopdepth
939 )
955 )
940
956
941
957
942 @predicate(b'_firstdescendants', safe=True)
958 @predicate(b'_firstdescendants', safe=True)
943 def _firstdescendants(repo, subset, x):
959 def _firstdescendants(repo, subset, x):
944 # ``_firstdescendants(set)``
960 # ``_firstdescendants(set)``
945 # Like ``descendants(set)`` but follows only the first parents.
961 # Like ``descendants(set)`` but follows only the first parents.
946 return _descendants(repo, subset, x, followfirst=True)
962 return _descendants(repo, subset, x, followfirst=True)
947
963
948
964
949 @predicate(b'destination([set])', safe=True, weight=10)
965 @predicate(b'destination([set])', safe=True, weight=10)
950 def destination(repo, subset, x):
966 def destination(repo, subset, x):
951 """Changesets that were created by a graft, transplant or rebase operation,
967 """Changesets that were created by a graft, transplant or rebase operation,
952 with the given revisions specified as the source. Omitting the optional set
968 with the given revisions specified as the source. Omitting the optional set
953 is the same as passing all().
969 is the same as passing all().
954 """
970 """
955 if x is not None:
971 if x is not None:
956 sources = getset(repo, fullreposet(repo), x)
972 sources = getset(repo, fullreposet(repo), x)
957 else:
973 else:
958 sources = fullreposet(repo)
974 sources = fullreposet(repo)
959
975
960 dests = set()
976 dests = set()
961
977
962 # subset contains all of the possible destinations that can be returned, so
978 # subset contains all of the possible destinations that can be returned, so
963 # iterate over them and see if their source(s) were provided in the arg set.
979 # iterate over them and see if their source(s) were provided in the arg set.
964 # Even if the immediate src of r is not in the arg set, src's source (or
980 # Even if the immediate src of r is not in the arg set, src's source (or
965 # further back) may be. Scanning back further than the immediate src allows
981 # further back) may be. Scanning back further than the immediate src allows
966 # transitive transplants and rebases to yield the same results as transitive
982 # transitive transplants and rebases to yield the same results as transitive
967 # grafts.
983 # grafts.
968 for r in subset:
984 for r in subset:
969 src = _getrevsource(repo, r)
985 src = _getrevsource(repo, r)
970 lineage = None
986 lineage = None
971
987
972 while src is not None:
988 while src is not None:
973 if lineage is None:
989 if lineage is None:
974 lineage = list()
990 lineage = list()
975
991
976 lineage.append(r)
992 lineage.append(r)
977
993
978 # The visited lineage is a match if the current source is in the arg
994 # The visited lineage is a match if the current source is in the arg
979 # set. Since every candidate dest is visited by way of iterating
995 # set. Since every candidate dest is visited by way of iterating
980 # subset, any dests further back in the lineage will be tested by a
996 # subset, any dests further back in the lineage will be tested by a
981 # different iteration over subset. Likewise, if the src was already
997 # different iteration over subset. Likewise, if the src was already
982 # selected, the current lineage can be selected without going back
998 # selected, the current lineage can be selected without going back
983 # further.
999 # further.
984 if src in sources or src in dests:
1000 if src in sources or src in dests:
985 dests.update(lineage)
1001 dests.update(lineage)
986 break
1002 break
987
1003
988 r = src
1004 r = src
989 src = _getrevsource(repo, r)
1005 src = _getrevsource(repo, r)
990
1006
991 return subset.filter(
1007 return subset.filter(
992 dests.__contains__,
1008 dests.__contains__,
993 condrepr=lambda: b'<destination %r>' % _sortedb(dests),
1009 condrepr=lambda: b'<destination %r>' % _sortedb(dests),
994 )
1010 )
995
1011
996
1012
997 @predicate(b'diffcontains(pattern)', weight=110)
1013 @predicate(b'diffcontains(pattern)', weight=110)
998 def diffcontains(repo, subset, x):
1014 def diffcontains(repo, subset, x):
999 """Search revision differences for when the pattern was added or removed.
1015 """Search revision differences for when the pattern was added or removed.
1000
1016
1001 The pattern may be a substring literal or a regular expression. See
1017 The pattern may be a substring literal or a regular expression. See
1002 :hg:`help revisions.patterns`.
1018 :hg:`help revisions.patterns`.
1003 """
1019 """
1004 args = getargsdict(x, b'diffcontains', b'pattern')
1020 args = getargsdict(x, b'diffcontains', b'pattern')
1005 if b'pattern' not in args:
1021 if b'pattern' not in args:
1006 # i18n: "diffcontains" is a keyword
1022 # i18n: "diffcontains" is a keyword
1007 raise error.ParseError(_(b'diffcontains takes at least 1 argument'))
1023 raise error.ParseError(_(b'diffcontains takes at least 1 argument'))
1008
1024
1009 pattern = getstring(
1025 pattern = getstring(
1010 args[b'pattern'], _(b'diffcontains requires a string pattern')
1026 args[b'pattern'], _(b'diffcontains requires a string pattern')
1011 )
1027 )
1012 regexp = stringutil.substringregexp(pattern, re.M)
1028 regexp = stringutil.substringregexp(pattern, re.M)
1013
1029
1014 # TODO: add support for file pattern and --follow. For example,
1030 # TODO: add support for file pattern and --follow. For example,
1015 # diffcontains(pattern[, set]) where set may be file(pattern) or
1031 # diffcontains(pattern[, set]) where set may be file(pattern) or
1016 # follow(pattern), and we'll eventually add a support for narrowing
1032 # follow(pattern), and we'll eventually add a support for narrowing
1017 # files by revset?
1033 # files by revset?
1018 fmatch = matchmod.always()
1034 fmatch = matchmod.always()
1019
1035
1020 def makefilematcher(ctx):
1036 def makefilematcher(ctx):
1021 return fmatch
1037 return fmatch
1022
1038
1023 # TODO: search in a windowed way
1039 # TODO: search in a windowed way
1024 searcher = grepmod.grepsearcher(repo.ui, repo, regexp, diff=True)
1040 searcher = grepmod.grepsearcher(repo.ui, repo, regexp, diff=True)
1025
1041
1026 def testdiff(rev):
1042 def testdiff(rev):
1027 # consume the generator to discard revfiles/matches cache
1043 # consume the generator to discard revfiles/matches cache
1028 found = False
1044 found = False
1029 for fn, ctx, pstates, states in searcher.searchfiles(
1045 for fn, ctx, pstates, states in searcher.searchfiles(
1030 baseset([rev]), makefilematcher
1046 baseset([rev]), makefilematcher
1031 ):
1047 ):
1032 if next(grepmod.difflinestates(pstates, states), None):
1048 if next(grepmod.difflinestates(pstates, states), None):
1033 found = True
1049 found = True
1034 return found
1050 return found
1035
1051
1036 return subset.filter(testdiff, condrepr=(b'<diffcontains %r>', pattern))
1052 return subset.filter(testdiff, condrepr=(b'<diffcontains %r>', pattern))
1037
1053
1038
1054
1039 @predicate(b'contentdivergent()', safe=True)
1055 @predicate(b'contentdivergent()', safe=True)
1040 def contentdivergent(repo, subset, x):
1056 def contentdivergent(repo, subset, x):
1041 """
1057 """
1042 Final successors of changesets with an alternative set of final
1058 Final successors of changesets with an alternative set of final
1043 successors. (EXPERIMENTAL)
1059 successors. (EXPERIMENTAL)
1044 """
1060 """
1045 # i18n: "contentdivergent" is a keyword
1061 # i18n: "contentdivergent" is a keyword
1046 getargs(x, 0, 0, _(b"contentdivergent takes no arguments"))
1062 getargs(x, 0, 0, _(b"contentdivergent takes no arguments"))
1047 contentdivergent = obsmod.getrevs(repo, b'contentdivergent')
1063 contentdivergent = obsmod.getrevs(repo, b'contentdivergent')
1048 return subset & contentdivergent
1064 return subset & contentdivergent
1049
1065
1050
1066
1051 @predicate(b'expectsize(set[, size])', safe=True, takeorder=True)
1067 @predicate(b'expectsize(set[, size])', safe=True, takeorder=True)
1052 def expectsize(repo, subset, x, order):
1068 def expectsize(repo, subset, x, order):
1053 """Return the given revset if size matches the revset size.
1069 """Return the given revset if size matches the revset size.
1054 Abort if the revset doesn't expect given size.
1070 Abort if the revset doesn't expect given size.
1055 size can either be an integer range or an integer.
1071 size can either be an integer range or an integer.
1056
1072
1057 For example, ``expectsize(0:1, 3:5)`` will abort as revset size is 2 and
1073 For example, ``expectsize(0:1, 3:5)`` will abort as revset size is 2 and
1058 2 is not between 3 and 5 inclusive."""
1074 2 is not between 3 and 5 inclusive."""
1059
1075
1060 args = getargsdict(x, b'expectsize', b'set size')
1076 args = getargsdict(x, b'expectsize', b'set size')
1061 minsize = 0
1077 minsize = 0
1062 maxsize = len(repo) + 1
1078 maxsize = len(repo) + 1
1063 err = b''
1079 err = b''
1064 if b'size' not in args or b'set' not in args:
1080 if b'size' not in args or b'set' not in args:
1065 raise error.ParseError(_(b'invalid set of arguments'))
1081 raise error.ParseError(_(b'invalid set of arguments'))
1066 minsize, maxsize = getintrange(
1082 minsize, maxsize = getintrange(
1067 args[b'size'],
1083 args[b'size'],
1068 _(b'expectsize requires a size range or a positive integer'),
1084 _(b'expectsize requires a size range or a positive integer'),
1069 _(b'size range bounds must be integers'),
1085 _(b'size range bounds must be integers'),
1070 minsize,
1086 minsize,
1071 maxsize,
1087 maxsize,
1072 )
1088 )
1073 if minsize < 0 or maxsize < 0:
1089 if minsize < 0 or maxsize < 0:
1074 raise error.ParseError(_(b'negative size'))
1090 raise error.ParseError(_(b'negative size'))
1075 rev = getset(repo, fullreposet(repo), args[b'set'], order=order)
1091 rev = getset(repo, fullreposet(repo), args[b'set'], order=order)
1076 if minsize != maxsize and (len(rev) < minsize or len(rev) > maxsize):
1092 if minsize != maxsize and (len(rev) < minsize or len(rev) > maxsize):
1077 err = _(b'revset size mismatch. expected between %d and %d, got %d') % (
1093 err = _(b'revset size mismatch. expected between %d and %d, got %d') % (
1078 minsize,
1094 minsize,
1079 maxsize,
1095 maxsize,
1080 len(rev),
1096 len(rev),
1081 )
1097 )
1082 elif minsize == maxsize and len(rev) != minsize:
1098 elif minsize == maxsize and len(rev) != minsize:
1083 err = _(b'revset size mismatch. expected %d, got %d') % (
1099 err = _(b'revset size mismatch. expected %d, got %d') % (
1084 minsize,
1100 minsize,
1085 len(rev),
1101 len(rev),
1086 )
1102 )
1087 if err:
1103 if err:
1088 raise error.RepoLookupError(err)
1104 raise error.RepoLookupError(err)
1089 if order == followorder:
1105 if order == followorder:
1090 return subset & rev
1106 return subset & rev
1091 else:
1107 else:
1092 return rev & subset
1108 return rev & subset
1093
1109
1094
1110
1095 @predicate(b'extdata(source)', safe=False, weight=100)
1111 @predicate(b'extdata(source)', safe=False, weight=100)
1096 def extdata(repo, subset, x):
1112 def extdata(repo, subset, x):
1097 """Changesets in the specified extdata source. (EXPERIMENTAL)"""
1113 """Changesets in the specified extdata source. (EXPERIMENTAL)"""
1098 # i18n: "extdata" is a keyword
1114 # i18n: "extdata" is a keyword
1099 args = getargsdict(x, b'extdata', b'source')
1115 args = getargsdict(x, b'extdata', b'source')
1100 source = getstring(
1116 source = getstring(
1101 args.get(b'source'),
1117 args.get(b'source'),
1102 # i18n: "extdata" is a keyword
1118 # i18n: "extdata" is a keyword
1103 _(b'extdata takes at least 1 string argument'),
1119 _(b'extdata takes at least 1 string argument'),
1104 )
1120 )
1105 data = scmutil.extdatasource(repo, source)
1121 data = scmutil.extdatasource(repo, source)
1106 return subset & baseset(data)
1122 return subset & baseset(data)
1107
1123
1108
1124
1109 @predicate(b'extinct()', safe=True)
1125 @predicate(b'extinct()', safe=True)
1110 def extinct(repo, subset, x):
1126 def extinct(repo, subset, x):
1111 """Obsolete changesets with obsolete descendants only. (EXPERIMENTAL)"""
1127 """Obsolete changesets with obsolete descendants only. (EXPERIMENTAL)"""
1112 # i18n: "extinct" is a keyword
1128 # i18n: "extinct" is a keyword
1113 getargs(x, 0, 0, _(b"extinct takes no arguments"))
1129 getargs(x, 0, 0, _(b"extinct takes no arguments"))
1114 extincts = obsmod.getrevs(repo, b'extinct')
1130 extincts = obsmod.getrevs(repo, b'extinct')
1115 return subset & extincts
1131 return subset & extincts
1116
1132
1117
1133
1118 @predicate(b'extra(label, [value])', safe=True)
1134 @predicate(b'extra(label, [value])', safe=True)
1119 def extra(repo, subset, x):
1135 def extra(repo, subset, x):
1120 """Changesets with the given label in the extra metadata, with the given
1136 """Changesets with the given label in the extra metadata, with the given
1121 optional value.
1137 optional value.
1122
1138
1123 Pattern matching is supported for `value`. See
1139 Pattern matching is supported for `value`. See
1124 :hg:`help revisions.patterns`.
1140 :hg:`help revisions.patterns`.
1125 """
1141 """
1126 args = getargsdict(x, b'extra', b'label value')
1142 args = getargsdict(x, b'extra', b'label value')
1127 if b'label' not in args:
1143 if b'label' not in args:
1128 # i18n: "extra" is a keyword
1144 # i18n: "extra" is a keyword
1129 raise error.ParseError(_(b'extra takes at least 1 argument'))
1145 raise error.ParseError(_(b'extra takes at least 1 argument'))
1130 # i18n: "extra" is a keyword
1146 # i18n: "extra" is a keyword
1131 label = getstring(
1147 label = getstring(
1132 args[b'label'], _(b'first argument to extra must be a string')
1148 args[b'label'], _(b'first argument to extra must be a string')
1133 )
1149 )
1134 value = None
1150 value = None
1135
1151
1136 if b'value' in args:
1152 if b'value' in args:
1137 # i18n: "extra" is a keyword
1153 # i18n: "extra" is a keyword
1138 value = getstring(
1154 value = getstring(
1139 args[b'value'], _(b'second argument to extra must be a string')
1155 args[b'value'], _(b'second argument to extra must be a string')
1140 )
1156 )
1141 kind, value, matcher = stringutil.stringmatcher(value)
1157 kind, value, matcher = stringutil.stringmatcher(value)
1142
1158
1143 def _matchvalue(r):
1159 def _matchvalue(r):
1144 extra = repo[r].extra()
1160 extra = repo[r].extra()
1145 return label in extra and (value is None or matcher(extra[label]))
1161 return label in extra and (value is None or matcher(extra[label]))
1146
1162
1147 return subset.filter(
1163 return subset.filter(
1148 lambda r: _matchvalue(r), condrepr=(b'<extra[%r] %r>', label, value)
1164 lambda r: _matchvalue(r), condrepr=(b'<extra[%r] %r>', label, value)
1149 )
1165 )
1150
1166
1151
1167
1152 @predicate(b'filelog(pattern)', safe=True)
1168 @predicate(b'filelog(pattern)', safe=True)
1153 def filelog(repo, subset, x):
1169 def filelog(repo, subset, x):
1154 """Changesets connected to the specified filelog.
1170 """Changesets connected to the specified filelog.
1155
1171
1156 For performance reasons, visits only revisions mentioned in the file-level
1172 For performance reasons, visits only revisions mentioned in the file-level
1157 filelog, rather than filtering through all changesets (much faster, but
1173 filelog, rather than filtering through all changesets (much faster, but
1158 doesn't include deletes or duplicate changes). For a slower, more accurate
1174 doesn't include deletes or duplicate changes). For a slower, more accurate
1159 result, use ``file()``.
1175 result, use ``file()``.
1160
1176
1161 The pattern without explicit kind like ``glob:`` is expected to be
1177 The pattern without explicit kind like ``glob:`` is expected to be
1162 relative to the current directory and match against a file exactly
1178 relative to the current directory and match against a file exactly
1163 for efficiency.
1179 for efficiency.
1164 """
1180 """
1165
1181
1166 # i18n: "filelog" is a keyword
1182 # i18n: "filelog" is a keyword
1167 pat = getstring(x, _(b"filelog requires a pattern"))
1183 pat = getstring(x, _(b"filelog requires a pattern"))
1168 s = set()
1184 s = set()
1169 cl = repo.changelog
1185 cl = repo.changelog
1170
1186
1171 if not matchmod.patkind(pat):
1187 if not matchmod.patkind(pat):
1172 f = pathutil.canonpath(repo.root, repo.getcwd(), pat)
1188 f = pathutil.canonpath(repo.root, repo.getcwd(), pat)
1173 files = [f]
1189 files = [f]
1174 else:
1190 else:
1175 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=repo[None])
1191 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=repo[None])
1176 files = (f for f in repo[None] if m(f))
1192 files = (f for f in repo[None] if m(f))
1177
1193
1178 for f in files:
1194 for f in files:
1179 fl = repo.file(f)
1195 fl = repo.file(f)
1180 known = {}
1196 known = {}
1181 scanpos = 0
1197 scanpos = 0
1182 for fr in list(fl):
1198 for fr in list(fl):
1183 fn = fl.node(fr)
1199 fn = fl.node(fr)
1184 if fn in known:
1200 if fn in known:
1185 s.add(known[fn])
1201 s.add(known[fn])
1186 continue
1202 continue
1187
1203
1188 lr = fl.linkrev(fr)
1204 lr = fl.linkrev(fr)
1189 if lr in cl:
1205 if lr in cl:
1190 s.add(lr)
1206 s.add(lr)
1191 elif scanpos is not None:
1207 elif scanpos is not None:
1192 # lowest matching changeset is filtered, scan further
1208 # lowest matching changeset is filtered, scan further
1193 # ahead in changelog
1209 # ahead in changelog
1194 start = max(lr, scanpos) + 1
1210 start = max(lr, scanpos) + 1
1195 scanpos = None
1211 scanpos = None
1196 for r in cl.revs(start):
1212 for r in cl.revs(start):
1197 # minimize parsing of non-matching entries
1213 # minimize parsing of non-matching entries
1198 if f in cl.revision(r) and f in cl.readfiles(r):
1214 if f in cl.revision(r) and f in cl.readfiles(r):
1199 try:
1215 try:
1200 # try to use manifest delta fastpath
1216 # try to use manifest delta fastpath
1201 n = repo[r].filenode(f)
1217 n = repo[r].filenode(f)
1202 if n not in known:
1218 if n not in known:
1203 if n == fn:
1219 if n == fn:
1204 s.add(r)
1220 s.add(r)
1205 scanpos = r
1221 scanpos = r
1206 break
1222 break
1207 else:
1223 else:
1208 known[n] = r
1224 known[n] = r
1209 except error.ManifestLookupError:
1225 except error.ManifestLookupError:
1210 # deletion in changelog
1226 # deletion in changelog
1211 continue
1227 continue
1212
1228
1213 return subset & s
1229 return subset & s
1214
1230
1215
1231
1216 @predicate(b'first(set, [n])', safe=True, takeorder=True, weight=0)
1232 @predicate(b'first(set, [n])', safe=True, takeorder=True, weight=0)
1217 def first(repo, subset, x, order):
1233 def first(repo, subset, x, order):
1218 """An alias for limit()."""
1234 """An alias for limit()."""
1219 return limit(repo, subset, x, order)
1235 return limit(repo, subset, x, order)
1220
1236
1221
1237
1222 def _follow(repo, subset, x, name, followfirst=False):
1238 def _follow(repo, subset, x, name, followfirst=False):
1223 args = getargsdict(x, name, b'file startrev')
1239 args = getargsdict(x, name, b'file startrev')
1224 revs = None
1240 revs = None
1225 if b'startrev' in args:
1241 if b'startrev' in args:
1226 revs = getset(repo, fullreposet(repo), args[b'startrev'])
1242 revs = getset(repo, fullreposet(repo), args[b'startrev'])
1227 if b'file' in args:
1243 if b'file' in args:
1228 x = getstring(args[b'file'], _(b"%s expected a pattern") % name)
1244 x = getstring(args[b'file'], _(b"%s expected a pattern") % name)
1229 if revs is None:
1245 if revs is None:
1230 revs = [None]
1246 revs = [None]
1231 fctxs = []
1247 fctxs = []
1232 for r in revs:
1248 for r in revs:
1233 ctx = mctx = repo[r]
1249 ctx = mctx = repo[r]
1234 if r is None:
1250 if r is None:
1235 ctx = repo[b'.']
1251 ctx = repo[b'.']
1236 m = matchmod.match(
1252 m = matchmod.match(
1237 repo.root, repo.getcwd(), [x], ctx=mctx, default=b'path'
1253 repo.root, repo.getcwd(), [x], ctx=mctx, default=b'path'
1238 )
1254 )
1239 fctxs.extend(ctx[f].introfilectx() for f in ctx.manifest().walk(m))
1255 fctxs.extend(ctx[f].introfilectx() for f in ctx.manifest().walk(m))
1240 s = dagop.filerevancestors(fctxs, followfirst)
1256 s = dagop.filerevancestors(fctxs, followfirst)
1241 else:
1257 else:
1242 if revs is None:
1258 if revs is None:
1243 revs = baseset([repo[b'.'].rev()])
1259 revs = baseset([repo[b'.'].rev()])
1244 s = dagop.revancestors(repo, revs, followfirst)
1260 s = dagop.revancestors(repo, revs, followfirst)
1245
1261
1246 return subset & s
1262 return subset & s
1247
1263
1248
1264
1249 @predicate(b'follow([file[, startrev]])', safe=True)
1265 @predicate(b'follow([file[, startrev]])', safe=True)
1250 def follow(repo, subset, x):
1266 def follow(repo, subset, x):
1251 """
1267 """
1252 An alias for ``::.`` (ancestors of the working directory's first parent).
1268 An alias for ``::.`` (ancestors of the working directory's first parent).
1253 If file pattern is specified, the histories of files matching given
1269 If file pattern is specified, the histories of files matching given
1254 pattern in the revision given by startrev are followed, including copies.
1270 pattern in the revision given by startrev are followed, including copies.
1255 """
1271 """
1256 return _follow(repo, subset, x, b'follow')
1272 return _follow(repo, subset, x, b'follow')
1257
1273
1258
1274
1259 @predicate(b'_followfirst', safe=True)
1275 @predicate(b'_followfirst', safe=True)
1260 def _followfirst(repo, subset, x):
1276 def _followfirst(repo, subset, x):
1261 # ``followfirst([file[, startrev]])``
1277 # ``followfirst([file[, startrev]])``
1262 # Like ``follow([file[, startrev]])`` but follows only the first parent
1278 # Like ``follow([file[, startrev]])`` but follows only the first parent
1263 # of every revisions or files revisions.
1279 # of every revisions or files revisions.
1264 return _follow(repo, subset, x, b'_followfirst', followfirst=True)
1280 return _follow(repo, subset, x, b'_followfirst', followfirst=True)
1265
1281
1266
1282
1267 @predicate(
1283 @predicate(
1268 b'followlines(file, fromline:toline[, startrev=., descend=False])',
1284 b'followlines(file, fromline:toline[, startrev=., descend=False])',
1269 safe=True,
1285 safe=True,
1270 )
1286 )
1271 def followlines(repo, subset, x):
1287 def followlines(repo, subset, x):
1272 """Changesets modifying `file` in line range ('fromline', 'toline').
1288 """Changesets modifying `file` in line range ('fromline', 'toline').
1273
1289
1274 Line range corresponds to 'file' content at 'startrev' and should hence be
1290 Line range corresponds to 'file' content at 'startrev' and should hence be
1275 consistent with file size. If startrev is not specified, working directory's
1291 consistent with file size. If startrev is not specified, working directory's
1276 parent is used.
1292 parent is used.
1277
1293
1278 By default, ancestors of 'startrev' are returned. If 'descend' is True,
1294 By default, ancestors of 'startrev' are returned. If 'descend' is True,
1279 descendants of 'startrev' are returned though renames are (currently) not
1295 descendants of 'startrev' are returned though renames are (currently) not
1280 followed in this direction.
1296 followed in this direction.
1281 """
1297 """
1282 args = getargsdict(x, b'followlines', b'file *lines startrev descend')
1298 args = getargsdict(x, b'followlines', b'file *lines startrev descend')
1283 if len(args[b'lines']) != 1:
1299 if len(args[b'lines']) != 1:
1284 raise error.ParseError(_(b"followlines requires a line range"))
1300 raise error.ParseError(_(b"followlines requires a line range"))
1285
1301
1286 rev = b'.'
1302 rev = b'.'
1287 if b'startrev' in args:
1303 if b'startrev' in args:
1288 revs = getset(repo, fullreposet(repo), args[b'startrev'])
1304 revs = getset(repo, fullreposet(repo), args[b'startrev'])
1289 if len(revs) != 1:
1305 if len(revs) != 1:
1290 raise error.ParseError(
1306 raise error.ParseError(
1291 # i18n: "followlines" is a keyword
1307 # i18n: "followlines" is a keyword
1292 _(b"followlines expects exactly one revision")
1308 _(b"followlines expects exactly one revision")
1293 )
1309 )
1294 rev = revs.last()
1310 rev = revs.last()
1295
1311
1296 pat = getstring(args[b'file'], _(b"followlines requires a pattern"))
1312 pat = getstring(args[b'file'], _(b"followlines requires a pattern"))
1297 # i18n: "followlines" is a keyword
1313 # i18n: "followlines" is a keyword
1298 msg = _(b"followlines expects exactly one file")
1314 msg = _(b"followlines expects exactly one file")
1299 fname = scmutil.parsefollowlinespattern(repo, rev, pat, msg)
1315 fname = scmutil.parsefollowlinespattern(repo, rev, pat, msg)
1300 fromline, toline = util.processlinerange(
1316 fromline, toline = util.processlinerange(
1301 *getintrange(
1317 *getintrange(
1302 args[b'lines'][0],
1318 args[b'lines'][0],
1303 # i18n: "followlines" is a keyword
1319 # i18n: "followlines" is a keyword
1304 _(b"followlines expects a line number or a range"),
1320 _(b"followlines expects a line number or a range"),
1305 _(b"line range bounds must be integers"),
1321 _(b"line range bounds must be integers"),
1306 )
1322 )
1307 )
1323 )
1308
1324
1309 fctx = repo[rev].filectx(fname)
1325 fctx = repo[rev].filectx(fname)
1310 descend = False
1326 descend = False
1311 if b'descend' in args:
1327 if b'descend' in args:
1312 descend = getboolean(
1328 descend = getboolean(
1313 args[b'descend'],
1329 args[b'descend'],
1314 # i18n: "descend" is a keyword
1330 # i18n: "descend" is a keyword
1315 _(b"descend argument must be a boolean"),
1331 _(b"descend argument must be a boolean"),
1316 )
1332 )
1317 if descend:
1333 if descend:
1318 rs = generatorset(
1334 rs = generatorset(
1319 (
1335 (
1320 c.rev()
1336 c.rev()
1321 for c, _linerange in dagop.blockdescendants(
1337 for c, _linerange in dagop.blockdescendants(
1322 fctx, fromline, toline
1338 fctx, fromline, toline
1323 )
1339 )
1324 ),
1340 ),
1325 iterasc=True,
1341 iterasc=True,
1326 )
1342 )
1327 else:
1343 else:
1328 rs = generatorset(
1344 rs = generatorset(
1329 (
1345 (
1330 c.rev()
1346 c.rev()
1331 for c, _linerange in dagop.blockancestors(
1347 for c, _linerange in dagop.blockancestors(
1332 fctx, fromline, toline
1348 fctx, fromline, toline
1333 )
1349 )
1334 ),
1350 ),
1335 iterasc=False,
1351 iterasc=False,
1336 )
1352 )
1337 return subset & rs
1353 return subset & rs
1338
1354
1339
1355
1340 @predicate(b'nodefromfile(path)')
1356 @predicate(b'nodefromfile(path)')
1341 def nodefromfile(repo, subset, x):
1357 def nodefromfile(repo, subset, x):
1342 """Read a list of nodes from the file at `path`.
1358 """Read a list of nodes from the file at `path`.
1343
1359
1344 This applies `id(LINE)` to each line of the file.
1360 This applies `id(LINE)` to each line of the file.
1345
1361
1346 This is useful when the amount of nodes you need to specify gets too large
1362 This is useful when the amount of nodes you need to specify gets too large
1347 for the command line.
1363 for the command line.
1348 """
1364 """
1349 path = getstring(x, _(b"nodefromfile require a file path"))
1365 path = getstring(x, _(b"nodefromfile require a file path"))
1350 listed_rev = set()
1366 listed_rev = set()
1351 try:
1367 try:
1352 with pycompat.open(path, 'rb') as f:
1368 with pycompat.open(path, 'rb') as f:
1353 for line in f:
1369 for line in f:
1354 n = line.strip()
1370 n = line.strip()
1355 rn = _node(repo, n)
1371 rn = _node(repo, n)
1356 if rn is not None:
1372 if rn is not None:
1357 listed_rev.add(rn)
1373 listed_rev.add(rn)
1358 except IOError as exc:
1374 except IOError as exc:
1359 m = _(b'cannot open nodes file "%s": %s')
1375 m = _(b'cannot open nodes file "%s": %s')
1360 m %= (path, encoding.strtolocal(exc.strerror))
1376 m %= (path, encoding.strtolocal(exc.strerror))
1361 raise error.Abort(m)
1377 raise error.Abort(m)
1362 return subset & baseset(listed_rev)
1378 return subset & baseset(listed_rev)
1363
1379
1364
1380
1365 @predicate(b'all()', safe=True)
1381 @predicate(b'all()', safe=True)
1366 def getall(repo, subset, x):
1382 def getall(repo, subset, x):
1367 """All changesets, the same as ``0:tip``."""
1383 """All changesets, the same as ``0:tip``."""
1368 # i18n: "all" is a keyword
1384 # i18n: "all" is a keyword
1369 getargs(x, 0, 0, _(b"all takes no arguments"))
1385 getargs(x, 0, 0, _(b"all takes no arguments"))
1370 return subset & spanset(repo) # drop "null" if any
1386 return subset & spanset(repo) # drop "null" if any
1371
1387
1372
1388
1373 @predicate(b'grep(regex)', weight=10)
1389 @predicate(b'grep(regex)', weight=10)
1374 def grep(repo, subset, x):
1390 def grep(repo, subset, x):
1375 """Like ``keyword(string)`` but accepts a regex. Use ``grep(r'...')``
1391 """Like ``keyword(string)`` but accepts a regex. Use ``grep(r'...')``
1376 to ensure special escape characters are handled correctly. Unlike
1392 to ensure special escape characters are handled correctly. Unlike
1377 ``keyword(string)``, the match is case-sensitive.
1393 ``keyword(string)``, the match is case-sensitive.
1378 """
1394 """
1379 try:
1395 try:
1380 # i18n: "grep" is a keyword
1396 # i18n: "grep" is a keyword
1381 gr = re.compile(getstring(x, _(b"grep requires a string")))
1397 gr = re.compile(getstring(x, _(b"grep requires a string")))
1382 except re.error as e:
1398 except re.error as e:
1383 raise error.ParseError(
1399 raise error.ParseError(
1384 _(b'invalid match pattern: %s') % stringutil.forcebytestr(e)
1400 _(b'invalid match pattern: %s') % stringutil.forcebytestr(e)
1385 )
1401 )
1386
1402
1387 def matches(x):
1403 def matches(x):
1388 c = repo[x]
1404 c = repo[x]
1389 for e in c.files() + [c.user(), c.description()]:
1405 for e in c.files() + [c.user(), c.description()]:
1390 if gr.search(e):
1406 if gr.search(e):
1391 return True
1407 return True
1392 return False
1408 return False
1393
1409
1394 return subset.filter(matches, condrepr=(b'<grep %r>', gr.pattern))
1410 return subset.filter(matches, condrepr=(b'<grep %r>', gr.pattern))
1395
1411
1396
1412
1397 @predicate(b'_matchfiles', safe=True)
1413 @predicate(b'_matchfiles', safe=True)
1398 def _matchfiles(repo, subset, x):
1414 def _matchfiles(repo, subset, x):
1399 # _matchfiles takes a revset list of prefixed arguments:
1415 # _matchfiles takes a revset list of prefixed arguments:
1400 #
1416 #
1401 # [p:foo, i:bar, x:baz]
1417 # [p:foo, i:bar, x:baz]
1402 #
1418 #
1403 # builds a match object from them and filters subset. Allowed
1419 # builds a match object from them and filters subset. Allowed
1404 # prefixes are 'p:' for regular patterns, 'i:' for include
1420 # prefixes are 'p:' for regular patterns, 'i:' for include
1405 # patterns and 'x:' for exclude patterns. Use 'r:' prefix to pass
1421 # patterns and 'x:' for exclude patterns. Use 'r:' prefix to pass
1406 # a revision identifier, or the empty string to reference the
1422 # a revision identifier, or the empty string to reference the
1407 # working directory, from which the match object is
1423 # working directory, from which the match object is
1408 # initialized. Use 'd:' to set the default matching mode, default
1424 # initialized. Use 'd:' to set the default matching mode, default
1409 # to 'glob'. At most one 'r:' and 'd:' argument can be passed.
1425 # to 'glob'. At most one 'r:' and 'd:' argument can be passed.
1410
1426
1411 l = getargs(x, 1, -1, b"_matchfiles requires at least one argument")
1427 l = getargs(x, 1, -1, b"_matchfiles requires at least one argument")
1412 pats, inc, exc = [], [], []
1428 pats, inc, exc = [], [], []
1413 rev, default = None, None
1429 rev, default = None, None
1414 for arg in l:
1430 for arg in l:
1415 s = getstring(arg, b"_matchfiles requires string arguments")
1431 s = getstring(arg, b"_matchfiles requires string arguments")
1416 prefix, value = s[:2], s[2:]
1432 prefix, value = s[:2], s[2:]
1417 if prefix == b'p:':
1433 if prefix == b'p:':
1418 pats.append(value)
1434 pats.append(value)
1419 elif prefix == b'i:':
1435 elif prefix == b'i:':
1420 inc.append(value)
1436 inc.append(value)
1421 elif prefix == b'x:':
1437 elif prefix == b'x:':
1422 exc.append(value)
1438 exc.append(value)
1423 elif prefix == b'r:':
1439 elif prefix == b'r:':
1424 if rev is not None:
1440 if rev is not None:
1425 raise error.ParseError(
1441 raise error.ParseError(
1426 b'_matchfiles expected at most one revision'
1442 b'_matchfiles expected at most one revision'
1427 )
1443 )
1428 if value == b'': # empty means working directory
1444 if value == b'': # empty means working directory
1429 rev = wdirrev
1445 rev = wdirrev
1430 else:
1446 else:
1431 rev = value
1447 rev = value
1432 elif prefix == b'd:':
1448 elif prefix == b'd:':
1433 if default is not None:
1449 if default is not None:
1434 raise error.ParseError(
1450 raise error.ParseError(
1435 b'_matchfiles expected at most one default mode'
1451 b'_matchfiles expected at most one default mode'
1436 )
1452 )
1437 default = value
1453 default = value
1438 else:
1454 else:
1439 raise error.ParseError(b'invalid _matchfiles prefix: %s' % prefix)
1455 raise error.ParseError(b'invalid _matchfiles prefix: %s' % prefix)
1440 if not default:
1456 if not default:
1441 default = b'glob'
1457 default = b'glob'
1442 hasset = any(matchmod.patkind(p) == b'set' for p in pats + inc + exc)
1458 hasset = any(matchmod.patkind(p) == b'set' for p in pats + inc + exc)
1443
1459
1444 mcache = [None]
1460 mcache = [None]
1445
1461
1446 # This directly read the changelog data as creating changectx for all
1462 # This directly read the changelog data as creating changectx for all
1447 # revisions is quite expensive.
1463 # revisions is quite expensive.
1448 getfiles = repo.changelog.readfiles
1464 getfiles = repo.changelog.readfiles
1449
1465
1450 def matches(x):
1466 def matches(x):
1451 if x == wdirrev:
1467 if x == wdirrev:
1452 files = repo[x].files()
1468 files = repo[x].files()
1453 else:
1469 else:
1454 files = getfiles(x)
1470 files = getfiles(x)
1455
1471
1456 if not mcache[0] or (hasset and rev is None):
1472 if not mcache[0] or (hasset and rev is None):
1457 r = x if rev is None else rev
1473 r = x if rev is None else rev
1458 mcache[0] = matchmod.match(
1474 mcache[0] = matchmod.match(
1459 repo.root,
1475 repo.root,
1460 repo.getcwd(),
1476 repo.getcwd(),
1461 pats,
1477 pats,
1462 include=inc,
1478 include=inc,
1463 exclude=exc,
1479 exclude=exc,
1464 ctx=repo[r],
1480 ctx=repo[r],
1465 default=default,
1481 default=default,
1466 )
1482 )
1467 m = mcache[0]
1483 m = mcache[0]
1468
1484
1469 for f in files:
1485 for f in files:
1470 if m(f):
1486 if m(f):
1471 return True
1487 return True
1472 return False
1488 return False
1473
1489
1474 return subset.filter(
1490 return subset.filter(
1475 matches,
1491 matches,
1476 condrepr=(
1492 condrepr=(
1477 b'<matchfiles patterns=%r, include=%r '
1493 b'<matchfiles patterns=%r, include=%r '
1478 b'exclude=%r, default=%r, rev=%r>',
1494 b'exclude=%r, default=%r, rev=%r>',
1479 pats,
1495 pats,
1480 inc,
1496 inc,
1481 exc,
1497 exc,
1482 default,
1498 default,
1483 rev,
1499 rev,
1484 ),
1500 ),
1485 )
1501 )
1486
1502
1487
1503
1488 @predicate(b'file(pattern)', safe=True, weight=10)
1504 @predicate(b'file(pattern)', safe=True, weight=10)
1489 def hasfile(repo, subset, x):
1505 def hasfile(repo, subset, x):
1490 """Changesets affecting files matched by pattern.
1506 """Changesets affecting files matched by pattern.
1491
1507
1492 For a faster but less accurate result, consider using ``filelog()``
1508 For a faster but less accurate result, consider using ``filelog()``
1493 instead.
1509 instead.
1494
1510
1495 This predicate uses ``glob:`` as the default kind of pattern.
1511 This predicate uses ``glob:`` as the default kind of pattern.
1496 """
1512 """
1497 # i18n: "file" is a keyword
1513 # i18n: "file" is a keyword
1498 pat = getstring(x, _(b"file requires a pattern"))
1514 pat = getstring(x, _(b"file requires a pattern"))
1499 return _matchfiles(repo, subset, (b'string', b'p:' + pat))
1515 return _matchfiles(repo, subset, (b'string', b'p:' + pat))
1500
1516
1501
1517
1502 @predicate(b'head()', safe=True)
1518 @predicate(b'head()', safe=True)
1503 def head(repo, subset, x):
1519 def head(repo, subset, x):
1504 """Changeset is a named branch head."""
1520 """Changeset is a named branch head."""
1505 # i18n: "head" is a keyword
1521 # i18n: "head" is a keyword
1506 getargs(x, 0, 0, _(b"head takes no arguments"))
1522 getargs(x, 0, 0, _(b"head takes no arguments"))
1507 hs = set()
1523 hs = set()
1508 cl = repo.changelog
1524 cl = repo.changelog
1509 for ls in repo.branchmap().iterheads():
1525 for ls in repo.branchmap().iterheads():
1510 hs.update(cl.rev(h) for h in ls)
1526 hs.update(cl.rev(h) for h in ls)
1511 return subset & baseset(hs)
1527 return subset & baseset(hs)
1512
1528
1513
1529
1514 @predicate(b'heads(set)', safe=True, takeorder=True)
1530 @predicate(b'heads(set)', safe=True, takeorder=True)
1515 def heads(repo, subset, x, order):
1531 def heads(repo, subset, x, order):
1516 """Members of set with no children in set."""
1532 """Members of set with no children in set."""
1517 # argument set should never define order
1533 # argument set should never define order
1518 if order == defineorder:
1534 if order == defineorder:
1519 order = followorder
1535 order = followorder
1520 inputset = getset(repo, fullreposet(repo), x, order=order)
1536 inputset = getset(repo, fullreposet(repo), x, order=order)
1521 wdirparents = None
1537 wdirparents = None
1522 if wdirrev in inputset:
1538 if wdirrev in inputset:
1523 # a bit slower, but not common so good enough for now
1539 # a bit slower, but not common so good enough for now
1524 wdirparents = [p.rev() for p in repo[None].parents()]
1540 wdirparents = [p.rev() for p in repo[None].parents()]
1525 inputset = set(inputset)
1541 inputset = set(inputset)
1526 inputset.discard(wdirrev)
1542 inputset.discard(wdirrev)
1527 heads = repo.changelog.headrevs(inputset)
1543 heads = repo.changelog.headrevs(inputset)
1528 if wdirparents is not None:
1544 if wdirparents is not None:
1529 heads.difference_update(wdirparents)
1545 heads.difference_update(wdirparents)
1530 heads.add(wdirrev)
1546 heads.add(wdirrev)
1531 heads = baseset(heads)
1547 heads = baseset(heads)
1532 return subset & heads
1548 return subset & heads
1533
1549
1534
1550
1535 @predicate(b'hidden()', safe=True)
1551 @predicate(b'hidden()', safe=True)
1536 def hidden(repo, subset, x):
1552 def hidden(repo, subset, x):
1537 """Hidden changesets."""
1553 """Hidden changesets."""
1538 # i18n: "hidden" is a keyword
1554 # i18n: "hidden" is a keyword
1539 getargs(x, 0, 0, _(b"hidden takes no arguments"))
1555 getargs(x, 0, 0, _(b"hidden takes no arguments"))
1540 hiddenrevs = repoview.filterrevs(repo, b'visible')
1556 hiddenrevs = repoview.filterrevs(repo, b'visible')
1541 return subset & hiddenrevs
1557 return subset & hiddenrevs
1542
1558
1543
1559
1544 @predicate(b'keyword(string)', safe=True, weight=10)
1560 @predicate(b'keyword(string)', safe=True, weight=10)
1545 def keyword(repo, subset, x):
1561 def keyword(repo, subset, x):
1546 """Search commit message, user name, and names of changed files for
1562 """Search commit message, user name, and names of changed files for
1547 string. The match is case-insensitive.
1563 string. The match is case-insensitive.
1548
1564
1549 For a regular expression or case sensitive search of these fields, use
1565 For a regular expression or case sensitive search of these fields, use
1550 ``grep(regex)``.
1566 ``grep(regex)``.
1551 """
1567 """
1552 # i18n: "keyword" is a keyword
1568 # i18n: "keyword" is a keyword
1553 kw = encoding.lower(getstring(x, _(b"keyword requires a string")))
1569 kw = encoding.lower(getstring(x, _(b"keyword requires a string")))
1554
1570
1555 def matches(r):
1571 def matches(r):
1556 c = repo[r]
1572 c = repo[r]
1557 return any(
1573 return any(
1558 kw in encoding.lower(t)
1574 kw in encoding.lower(t)
1559 for t in c.files() + [c.user(), c.description()]
1575 for t in c.files() + [c.user(), c.description()]
1560 )
1576 )
1561
1577
1562 return subset.filter(matches, condrepr=(b'<keyword %r>', kw))
1578 return subset.filter(matches, condrepr=(b'<keyword %r>', kw))
1563
1579
1564
1580
1565 @predicate(b'limit(set[, n[, offset]])', safe=True, takeorder=True, weight=0)
1581 @predicate(b'limit(set[, n[, offset]])', safe=True, takeorder=True, weight=0)
1566 def limit(repo, subset, x, order):
1582 def limit(repo, subset, x, order):
1567 """First n members of set, defaulting to 1, starting from offset."""
1583 """First n members of set, defaulting to 1, starting from offset."""
1568 args = getargsdict(x, b'limit', b'set n offset')
1584 args = getargsdict(x, b'limit', b'set n offset')
1569 if b'set' not in args:
1585 if b'set' not in args:
1570 # i18n: "limit" is a keyword
1586 # i18n: "limit" is a keyword
1571 raise error.ParseError(_(b"limit requires one to three arguments"))
1587 raise error.ParseError(_(b"limit requires one to three arguments"))
1572 # i18n: "limit" is a keyword
1588 # i18n: "limit" is a keyword
1573 lim = getinteger(args.get(b'n'), _(b"limit expects a number"), default=1)
1589 lim = getinteger(args.get(b'n'), _(b"limit expects a number"), default=1)
1574 if lim < 0:
1590 if lim < 0:
1575 raise error.ParseError(_(b"negative number to select"))
1591 raise error.ParseError(_(b"negative number to select"))
1576 # i18n: "limit" is a keyword
1592 # i18n: "limit" is a keyword
1577 ofs = getinteger(
1593 ofs = getinteger(
1578 args.get(b'offset'), _(b"limit expects a number"), default=0
1594 args.get(b'offset'), _(b"limit expects a number"), default=0
1579 )
1595 )
1580 if ofs < 0:
1596 if ofs < 0:
1581 raise error.ParseError(_(b"negative offset"))
1597 raise error.ParseError(_(b"negative offset"))
1582 os = getset(repo, fullreposet(repo), args[b'set'])
1598 os = getset(repo, fullreposet(repo), args[b'set'])
1583 ls = os.slice(ofs, ofs + lim)
1599 ls = os.slice(ofs, ofs + lim)
1584 if order == followorder and lim > 1:
1600 if order == followorder and lim > 1:
1585 return subset & ls
1601 return subset & ls
1586 return ls & subset
1602 return ls & subset
1587
1603
1588
1604
1589 @predicate(b'last(set, [n])', safe=True, takeorder=True)
1605 @predicate(b'last(set, [n])', safe=True, takeorder=True)
1590 def last(repo, subset, x, order):
1606 def last(repo, subset, x, order):
1591 """Last n members of set, defaulting to 1."""
1607 """Last n members of set, defaulting to 1."""
1592 # i18n: "last" is a keyword
1608 # i18n: "last" is a keyword
1593 l = getargs(x, 1, 2, _(b"last requires one or two arguments"))
1609 l = getargs(x, 1, 2, _(b"last requires one or two arguments"))
1594 lim = 1
1610 lim = 1
1595 if len(l) == 2:
1611 if len(l) == 2:
1596 # i18n: "last" is a keyword
1612 # i18n: "last" is a keyword
1597 lim = getinteger(l[1], _(b"last expects a number"))
1613 lim = getinteger(l[1], _(b"last expects a number"))
1598 if lim < 0:
1614 if lim < 0:
1599 raise error.ParseError(_(b"negative number to select"))
1615 raise error.ParseError(_(b"negative number to select"))
1600 os = getset(repo, fullreposet(repo), l[0])
1616 os = getset(repo, fullreposet(repo), l[0])
1601 os.reverse()
1617 os.reverse()
1602 ls = os.slice(0, lim)
1618 ls = os.slice(0, lim)
1603 if order == followorder and lim > 1:
1619 if order == followorder and lim > 1:
1604 return subset & ls
1620 return subset & ls
1605 ls.reverse()
1621 ls.reverse()
1606 return ls & subset
1622 return ls & subset
1607
1623
1608
1624
1609 @predicate(b'max(set)', safe=True)
1625 @predicate(b'max(set)', safe=True)
1610 def maxrev(repo, subset, x):
1626 def maxrev(repo, subset, x):
1611 """Changeset with highest revision number in set."""
1627 """Changeset with highest revision number in set."""
1612 os = getset(repo, fullreposet(repo), x)
1628 os = getset(repo, fullreposet(repo), x)
1613 try:
1629 try:
1614 m = os.max()
1630 m = os.max()
1615 if m in subset:
1631 if m in subset:
1616 return baseset([m], datarepr=(b'<max %r, %r>', subset, os))
1632 return baseset([m], datarepr=(b'<max %r, %r>', subset, os))
1617 except ValueError:
1633 except ValueError:
1618 # os.max() throws a ValueError when the collection is empty.
1634 # os.max() throws a ValueError when the collection is empty.
1619 # Same as python's max().
1635 # Same as python's max().
1620 pass
1636 pass
1621 return baseset(datarepr=(b'<max %r, %r>', subset, os))
1637 return baseset(datarepr=(b'<max %r, %r>', subset, os))
1622
1638
1623
1639
1624 @predicate(b'merge()', safe=True)
1640 @predicate(b'merge()', safe=True)
1625 def merge(repo, subset, x):
1641 def merge(repo, subset, x):
1626 """Changeset is a merge changeset."""
1642 """Changeset is a merge changeset."""
1627 # i18n: "merge" is a keyword
1643 # i18n: "merge" is a keyword
1628 getargs(x, 0, 0, _(b"merge takes no arguments"))
1644 getargs(x, 0, 0, _(b"merge takes no arguments"))
1629 cl = repo.changelog
1645 cl = repo.changelog
1630
1646
1631 def ismerge(r):
1647 def ismerge(r):
1632 try:
1648 try:
1633 return cl.parentrevs(r)[1] != nullrev
1649 return cl.parentrevs(r)[1] != nullrev
1634 except error.WdirUnsupported:
1650 except error.WdirUnsupported:
1635 return bool(repo[r].p2())
1651 return bool(repo[r].p2())
1636
1652
1637 return subset.filter(ismerge, condrepr=b'<merge>')
1653 return subset.filter(ismerge, condrepr=b'<merge>')
1638
1654
1639
1655
1640 @predicate(b'branchpoint()', safe=True)
1656 @predicate(b'branchpoint()', safe=True)
1641 def branchpoint(repo, subset, x):
1657 def branchpoint(repo, subset, x):
1642 """Changesets with more than one child."""
1658 """Changesets with more than one child."""
1643 # i18n: "branchpoint" is a keyword
1659 # i18n: "branchpoint" is a keyword
1644 getargs(x, 0, 0, _(b"branchpoint takes no arguments"))
1660 getargs(x, 0, 0, _(b"branchpoint takes no arguments"))
1645 cl = repo.changelog
1661 cl = repo.changelog
1646 if not subset:
1662 if not subset:
1647 return baseset()
1663 return baseset()
1648 # XXX this should be 'parentset.min()' assuming 'parentset' is a smartset
1664 # XXX this should be 'parentset.min()' assuming 'parentset' is a smartset
1649 # (and if it is not, it should.)
1665 # (and if it is not, it should.)
1650 baserev = min(subset)
1666 baserev = min(subset)
1651 parentscount = [0] * (len(repo) - baserev)
1667 parentscount = [0] * (len(repo) - baserev)
1652 for r in cl.revs(start=baserev + 1):
1668 for r in cl.revs(start=baserev + 1):
1653 for p in cl.parentrevs(r):
1669 for p in cl.parentrevs(r):
1654 if p >= baserev:
1670 if p >= baserev:
1655 parentscount[p - baserev] += 1
1671 parentscount[p - baserev] += 1
1656 return subset.filter(
1672 return subset.filter(
1657 lambda r: parentscount[r - baserev] > 1, condrepr=b'<branchpoint>'
1673 lambda r: parentscount[r - baserev] > 1, condrepr=b'<branchpoint>'
1658 )
1674 )
1659
1675
1660
1676
1661 @predicate(b'min(set)', safe=True)
1677 @predicate(b'min(set)', safe=True)
1662 def minrev(repo, subset, x):
1678 def minrev(repo, subset, x):
1663 """Changeset with lowest revision number in set."""
1679 """Changeset with lowest revision number in set."""
1664 os = getset(repo, fullreposet(repo), x)
1680 os = getset(repo, fullreposet(repo), x)
1665 try:
1681 try:
1666 m = os.min()
1682 m = os.min()
1667 if m in subset:
1683 if m in subset:
1668 return baseset([m], datarepr=(b'<min %r, %r>', subset, os))
1684 return baseset([m], datarepr=(b'<min %r, %r>', subset, os))
1669 except ValueError:
1685 except ValueError:
1670 # os.min() throws a ValueError when the collection is empty.
1686 # os.min() throws a ValueError when the collection is empty.
1671 # Same as python's min().
1687 # Same as python's min().
1672 pass
1688 pass
1673 return baseset(datarepr=(b'<min %r, %r>', subset, os))
1689 return baseset(datarepr=(b'<min %r, %r>', subset, os))
1674
1690
1675
1691
1676 @predicate(b'modifies(pattern)', safe=True, weight=30)
1692 @predicate(b'modifies(pattern)', safe=True, weight=30)
1677 def modifies(repo, subset, x):
1693 def modifies(repo, subset, x):
1678 """Changesets modifying files matched by pattern.
1694 """Changesets modifying files matched by pattern.
1679
1695
1680 The pattern without explicit kind like ``glob:`` is expected to be
1696 The pattern without explicit kind like ``glob:`` is expected to be
1681 relative to the current directory and match against a file or a
1697 relative to the current directory and match against a file or a
1682 directory.
1698 directory.
1683 """
1699 """
1684 # i18n: "modifies" is a keyword
1700 # i18n: "modifies" is a keyword
1685 pat = getstring(x, _(b"modifies requires a pattern"))
1701 pat = getstring(x, _(b"modifies requires a pattern"))
1686 return checkstatus(repo, subset, pat, 'modified')
1702 return checkstatus(repo, subset, pat, 'modified')
1687
1703
1688
1704
1689 @predicate(b'named(namespace)')
1705 @predicate(b'named(namespace)')
1690 def named(repo, subset, x):
1706 def named(repo, subset, x):
1691 """The changesets in a given namespace.
1707 """The changesets in a given namespace.
1692
1708
1693 Pattern matching is supported for `namespace`. See
1709 Pattern matching is supported for `namespace`. See
1694 :hg:`help revisions.patterns`.
1710 :hg:`help revisions.patterns`.
1695 """
1711 """
1696 # i18n: "named" is a keyword
1712 # i18n: "named" is a keyword
1697 args = getargs(x, 1, 1, _(b'named requires a namespace argument'))
1713 args = getargs(x, 1, 1, _(b'named requires a namespace argument'))
1698
1714
1699 ns = getstring(
1715 ns = getstring(
1700 args[0],
1716 args[0],
1701 # i18n: "named" is a keyword
1717 # i18n: "named" is a keyword
1702 _(b'the argument to named must be a string'),
1718 _(b'the argument to named must be a string'),
1703 )
1719 )
1704 kind, pattern, matcher = stringutil.stringmatcher(ns)
1720 kind, pattern, matcher = stringutil.stringmatcher(ns)
1705 namespaces = set()
1721 namespaces = set()
1706 if kind == b'literal':
1722 if kind == b'literal':
1707 if pattern not in repo.names:
1723 if pattern not in repo.names:
1708 raise error.RepoLookupError(
1724 raise error.RepoLookupError(
1709 _(b"namespace '%s' does not exist") % ns
1725 _(b"namespace '%s' does not exist") % ns
1710 )
1726 )
1711 namespaces.add(repo.names[pattern])
1727 namespaces.add(repo.names[pattern])
1712 else:
1728 else:
1713 for name, ns in repo.names.items():
1729 for name, ns in repo.names.items():
1714 if matcher(name):
1730 if matcher(name):
1715 namespaces.add(ns)
1731 namespaces.add(ns)
1716
1732
1717 names = set()
1733 names = set()
1718 for ns in namespaces:
1734 for ns in namespaces:
1719 for name in ns.listnames(repo):
1735 for name in ns.listnames(repo):
1720 if name not in ns.deprecated:
1736 if name not in ns.deprecated:
1721 names.update(repo[n].rev() for n in ns.nodes(repo, name))
1737 names.update(repo[n].rev() for n in ns.nodes(repo, name))
1722
1738
1723 names -= {nullrev}
1739 names -= {nullrev}
1724 return subset & names
1740 return subset & names
1725
1741
1726
1742
1727 def _node(repo, n):
1743 def _node(repo, n):
1728 """process a node input"""
1744 """process a node input"""
1729 rn = None
1745 rn = None
1730 if len(n) == 2 * repo.nodeconstants.nodelen:
1746 if len(n) == 2 * repo.nodeconstants.nodelen:
1731 try:
1747 try:
1732 rn = repo.changelog.rev(bin(n))
1748 rn = repo.changelog.rev(bin(n))
1733 except error.WdirUnsupported:
1749 except error.WdirUnsupported:
1734 rn = wdirrev
1750 rn = wdirrev
1735 except (binascii.Error, LookupError):
1751 except (binascii.Error, LookupError):
1736 rn = None
1752 rn = None
1737 else:
1753 else:
1738 try:
1754 try:
1739 pm = scmutil.resolvehexnodeidprefix(repo, n)
1755 pm = scmutil.resolvehexnodeidprefix(repo, n)
1740 if pm is not None:
1756 if pm is not None:
1741 rn = repo.changelog.rev(pm)
1757 rn = repo.changelog.rev(pm)
1742 except LookupError:
1758 except LookupError:
1743 pass
1759 pass
1744 except error.WdirUnsupported:
1760 except error.WdirUnsupported:
1745 rn = wdirrev
1761 rn = wdirrev
1746 return rn
1762 return rn
1747
1763
1748
1764
1749 @predicate(b'id(string)', safe=True)
1765 @predicate(b'id(string)', safe=True)
1750 def node_(repo, subset, x):
1766 def node_(repo, subset, x):
1751 """Revision non-ambiguously specified by the given hex string prefix."""
1767 """Revision non-ambiguously specified by the given hex string prefix."""
1752 # i18n: "id" is a keyword
1768 # i18n: "id" is a keyword
1753 l = getargs(x, 1, 1, _(b"id requires one argument"))
1769 l = getargs(x, 1, 1, _(b"id requires one argument"))
1754 # i18n: "id" is a keyword
1770 # i18n: "id" is a keyword
1755 n = getstring(l[0], _(b"id requires a string"))
1771 n = getstring(l[0], _(b"id requires a string"))
1756 rn = _node(repo, n)
1772 rn = _node(repo, n)
1757
1773
1758 if rn is None:
1774 if rn is None:
1759 return baseset()
1775 return baseset()
1760 result = baseset([rn])
1776 result = baseset([rn])
1761 return result & subset
1777 return result & subset
1762
1778
1763
1779
1764 @predicate(b'none()', safe=True)
1780 @predicate(b'none()', safe=True)
1765 def none(repo, subset, x):
1781 def none(repo, subset, x):
1766 """No changesets."""
1782 """No changesets."""
1767 # i18n: "none" is a keyword
1783 # i18n: "none" is a keyword
1768 getargs(x, 0, 0, _(b"none takes no arguments"))
1784 getargs(x, 0, 0, _(b"none takes no arguments"))
1769 return baseset()
1785 return baseset()
1770
1786
1771
1787
1772 @predicate(b'obsolete()', safe=True)
1788 @predicate(b'obsolete()', safe=True)
1773 def obsolete(repo, subset, x):
1789 def obsolete(repo, subset, x):
1774 """Mutable changeset with a newer version. (EXPERIMENTAL)"""
1790 """Mutable changeset with a newer version. (EXPERIMENTAL)"""
1775 # i18n: "obsolete" is a keyword
1791 # i18n: "obsolete" is a keyword
1776 getargs(x, 0, 0, _(b"obsolete takes no arguments"))
1792 getargs(x, 0, 0, _(b"obsolete takes no arguments"))
1777 obsoletes = obsmod.getrevs(repo, b'obsolete')
1793 obsoletes = obsmod.getrevs(repo, b'obsolete')
1778 return subset & obsoletes
1794 return subset & obsoletes
1779
1795
1780
1796
1781 @predicate(b'only(set, [set])', safe=True)
1797 @predicate(b'only(set, [set])', safe=True)
1782 def only(repo, subset, x):
1798 def only(repo, subset, x):
1783 """Changesets that are ancestors of the first set that are not ancestors
1799 """Changesets that are ancestors of the first set that are not ancestors
1784 of any other head in the repo. If a second set is specified, the result
1800 of any other head in the repo. If a second set is specified, the result
1785 is ancestors of the first set that are not ancestors of the second set
1801 is ancestors of the first set that are not ancestors of the second set
1786 (i.e. ::<set1> - ::<set2>).
1802 (i.e. ::<set1> - ::<set2>).
1787 """
1803 """
1788 cl = repo.changelog
1804 cl = repo.changelog
1789 # i18n: "only" is a keyword
1805 # i18n: "only" is a keyword
1790 args = getargs(x, 1, 2, _(b'only takes one or two arguments'))
1806 args = getargs(x, 1, 2, _(b'only takes one or two arguments'))
1791 include = getset(repo, fullreposet(repo), args[0])
1807 include = getset(repo, fullreposet(repo), args[0])
1792 if len(args) == 1:
1808 if len(args) == 1:
1793 if not include:
1809 if not include:
1794 return baseset()
1810 return baseset()
1795
1811
1796 descendants = set(dagop.revdescendants(repo, include, False))
1812 descendants = set(dagop.revdescendants(repo, include, False))
1797 exclude = [
1813 exclude = [
1798 rev
1814 rev
1799 for rev in cl.headrevs()
1815 for rev in cl.headrevs()
1800 if not rev in descendants and not rev in include
1816 if not rev in descendants and not rev in include
1801 ]
1817 ]
1802 else:
1818 else:
1803 exclude = getset(repo, fullreposet(repo), args[1])
1819 exclude = getset(repo, fullreposet(repo), args[1])
1804
1820
1805 results = set(cl.findmissingrevs(common=exclude, heads=include))
1821 results = set(cl.findmissingrevs(common=exclude, heads=include))
1806 # XXX we should turn this into a baseset instead of a set, smartset may do
1822 # XXX we should turn this into a baseset instead of a set, smartset may do
1807 # some optimizations from the fact this is a baseset.
1823 # some optimizations from the fact this is a baseset.
1808 return subset & results
1824 return subset & results
1809
1825
1810
1826
1811 @predicate(b'origin([set])', safe=True)
1827 @predicate(b'origin([set])', safe=True)
1812 def origin(repo, subset, x):
1828 def origin(repo, subset, x):
1813 """
1829 """
1814 Changesets that were specified as a source for the grafts, transplants or
1830 Changesets that were specified as a source for the grafts, transplants or
1815 rebases that created the given revisions. Omitting the optional set is the
1831 rebases that created the given revisions. Omitting the optional set is the
1816 same as passing all(). If a changeset created by these operations is itself
1832 same as passing all(). If a changeset created by these operations is itself
1817 specified as a source for one of these operations, only the source changeset
1833 specified as a source for one of these operations, only the source changeset
1818 for the first operation is selected.
1834 for the first operation is selected.
1819 """
1835 """
1820 if x is not None:
1836 if x is not None:
1821 dests = getset(repo, fullreposet(repo), x)
1837 dests = getset(repo, fullreposet(repo), x)
1822 else:
1838 else:
1823 dests = fullreposet(repo)
1839 dests = fullreposet(repo)
1824
1840
1825 def _firstsrc(rev):
1841 def _firstsrc(rev):
1826 src = _getrevsource(repo, rev)
1842 src = _getrevsource(repo, rev)
1827 if src is None:
1843 if src is None:
1828 return None
1844 return None
1829
1845
1830 while True:
1846 while True:
1831 prev = _getrevsource(repo, src)
1847 prev = _getrevsource(repo, src)
1832
1848
1833 if prev is None:
1849 if prev is None:
1834 return src
1850 return src
1835 src = prev
1851 src = prev
1836
1852
1837 o = {_firstsrc(r) for r in dests}
1853 o = {_firstsrc(r) for r in dests}
1838 o -= {None}
1854 o -= {None}
1839 # XXX we should turn this into a baseset instead of a set, smartset may do
1855 # XXX we should turn this into a baseset instead of a set, smartset may do
1840 # some optimizations from the fact this is a baseset.
1856 # some optimizations from the fact this is a baseset.
1841 return subset & o
1857 return subset & o
1842
1858
1843
1859
1844 @predicate(b'outgoing([path])', safe=False, weight=10)
1860 @predicate(b'outgoing([path])', safe=False, weight=10)
1845 def outgoing(repo, subset, x):
1861 def outgoing(repo, subset, x):
1846 """Changesets not found in the specified destination repository, or the
1862 """Changesets not found in the specified destination repository, or the
1847 default push location.
1863 default push location.
1848
1864
1849 If the location resolve to multiple repositories, the union of all
1865 If the location resolve to multiple repositories, the union of all
1850 outgoing changeset will be used.
1866 outgoing changeset will be used.
1851 """
1867 """
1852 # Avoid cycles.
1868 # Avoid cycles.
1853 from . import (
1869 from . import (
1854 discovery,
1870 discovery,
1855 hg,
1871 hg,
1856 )
1872 )
1857
1873
1858 # i18n: "outgoing" is a keyword
1874 # i18n: "outgoing" is a keyword
1859 l = getargs(x, 0, 1, _(b"outgoing takes one or no arguments"))
1875 l = getargs(x, 0, 1, _(b"outgoing takes one or no arguments"))
1860 # i18n: "outgoing" is a keyword
1876 # i18n: "outgoing" is a keyword
1861 dest = (
1877 dest = (
1862 l and getstring(l[0], _(b"outgoing requires a repository path")) or b''
1878 l and getstring(l[0], _(b"outgoing requires a repository path")) or b''
1863 )
1879 )
1864 if dest:
1880 if dest:
1865 dests = [dest]
1881 dests = [dest]
1866 else:
1882 else:
1867 dests = []
1883 dests = []
1868 missing = set()
1884 missing = set()
1869 for path in urlutil.get_push_paths(repo, repo.ui, dests):
1885 for path in urlutil.get_push_paths(repo, repo.ui, dests):
1870 branches = path.branch, []
1886 branches = path.branch, []
1871
1887
1872 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1888 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1873 if revs:
1889 if revs:
1874 revs = [repo.lookup(rev) for rev in revs]
1890 revs = [repo.lookup(rev) for rev in revs]
1875 other = hg.peer(repo, {}, path)
1891 other = hg.peer(repo, {}, path)
1876 try:
1892 try:
1877 with repo.ui.silent():
1893 with repo.ui.silent():
1878 outgoing = discovery.findcommonoutgoing(
1894 outgoing = discovery.findcommonoutgoing(
1879 repo, other, onlyheads=revs
1895 repo, other, onlyheads=revs
1880 )
1896 )
1881 finally:
1897 finally:
1882 other.close()
1898 other.close()
1883 missing.update(outgoing.missing)
1899 missing.update(outgoing.missing)
1884 cl = repo.changelog
1900 cl = repo.changelog
1885 o = {cl.rev(r) for r in missing}
1901 o = {cl.rev(r) for r in missing}
1886 return subset & o
1902 return subset & o
1887
1903
1888
1904
1889 @predicate(b'p1([set])', safe=True)
1905 @predicate(b'p1([set])', safe=True)
1890 def p1(repo, subset, x):
1906 def p1(repo, subset, x):
1891 """First parent of changesets in set, or the working directory."""
1907 """First parent of changesets in set, or the working directory."""
1892 if x is None:
1908 if x is None:
1893 p = repo[x].p1().rev()
1909 p = repo[x].p1().rev()
1894 if p >= 0:
1910 if p >= 0:
1895 return subset & baseset([p])
1911 return subset & baseset([p])
1896 return baseset()
1912 return baseset()
1897
1913
1898 ps = set()
1914 ps = set()
1899 cl = repo.changelog
1915 cl = repo.changelog
1900 for r in getset(repo, fullreposet(repo), x):
1916 for r in getset(repo, fullreposet(repo), x):
1901 try:
1917 try:
1902 ps.add(cl.parentrevs(r)[0])
1918 ps.add(cl.parentrevs(r)[0])
1903 except error.WdirUnsupported:
1919 except error.WdirUnsupported:
1904 ps.add(repo[r].p1().rev())
1920 ps.add(repo[r].p1().rev())
1905 ps -= {nullrev}
1921 ps -= {nullrev}
1906 # XXX we should turn this into a baseset instead of a set, smartset may do
1922 # XXX we should turn this into a baseset instead of a set, smartset may do
1907 # some optimizations from the fact this is a baseset.
1923 # some optimizations from the fact this is a baseset.
1908 return subset & ps
1924 return subset & ps
1909
1925
1910
1926
1911 @predicate(b'p2([set])', safe=True)
1927 @predicate(b'p2([set])', safe=True)
1912 def p2(repo, subset, x):
1928 def p2(repo, subset, x):
1913 """Second parent of changesets in set, or the working directory."""
1929 """Second parent of changesets in set, or the working directory."""
1914 if x is None:
1930 if x is None:
1915 ps = repo[x].parents()
1931 ps = repo[x].parents()
1916 try:
1932 try:
1917 p = ps[1].rev()
1933 p = ps[1].rev()
1918 if p >= 0:
1934 if p >= 0:
1919 return subset & baseset([p])
1935 return subset & baseset([p])
1920 return baseset()
1936 return baseset()
1921 except IndexError:
1937 except IndexError:
1922 return baseset()
1938 return baseset()
1923
1939
1924 ps = set()
1940 ps = set()
1925 cl = repo.changelog
1941 cl = repo.changelog
1926 for r in getset(repo, fullreposet(repo), x):
1942 for r in getset(repo, fullreposet(repo), x):
1927 try:
1943 try:
1928 ps.add(cl.parentrevs(r)[1])
1944 ps.add(cl.parentrevs(r)[1])
1929 except error.WdirUnsupported:
1945 except error.WdirUnsupported:
1930 parents = repo[r].parents()
1946 parents = repo[r].parents()
1931 if len(parents) == 2:
1947 if len(parents) == 2:
1932 ps.add(parents[1])
1948 ps.add(parents[1])
1933 ps -= {nullrev}
1949 ps -= {nullrev}
1934 # XXX we should turn this into a baseset instead of a set, smartset may do
1950 # XXX we should turn this into a baseset instead of a set, smartset may do
1935 # some optimizations from the fact this is a baseset.
1951 # some optimizations from the fact this is a baseset.
1936 return subset & ps
1952 return subset & ps
1937
1953
1938
1954
1939 def parentpost(repo, subset, x, order):
1955 def parentpost(repo, subset, x, order):
1940 return p1(repo, subset, x)
1956 return p1(repo, subset, x)
1941
1957
1942
1958
1943 @predicate(b'parents([set])', safe=True)
1959 @predicate(b'parents([set])', safe=True)
1944 def parents(repo, subset, x):
1960 def parents(repo, subset, x):
1945 """
1961 """
1946 The set of all parents for all changesets in set, or the working directory.
1962 The set of all parents for all changesets in set, or the working directory.
1947 """
1963 """
1948 if x is None:
1964 if x is None:
1949 ps = {p.rev() for p in repo[x].parents()}
1965 ps = {p.rev() for p in repo[x].parents()}
1950 else:
1966 else:
1951 ps = set()
1967 ps = set()
1952 cl = repo.changelog
1968 cl = repo.changelog
1953 up = ps.update
1969 up = ps.update
1954 parentrevs = cl.parentrevs
1970 parentrevs = cl.parentrevs
1955 for r in getset(repo, fullreposet(repo), x):
1971 for r in getset(repo, fullreposet(repo), x):
1956 try:
1972 try:
1957 up(parentrevs(r))
1973 up(parentrevs(r))
1958 except error.WdirUnsupported:
1974 except error.WdirUnsupported:
1959 up(p.rev() for p in repo[r].parents())
1975 up(p.rev() for p in repo[r].parents())
1960 ps -= {nullrev}
1976 ps -= {nullrev}
1961 return subset & ps
1977 return subset & ps
1962
1978
1963
1979
1964 def _phase(repo, subset, *targets):
1980 def _phase(repo, subset, *targets):
1965 """helper to select all rev in <targets> phases"""
1981 """helper to select all rev in <targets> phases"""
1966 return repo._phasecache.getrevset(repo, targets, subset)
1982 return repo._phasecache.getrevset(repo, targets, subset)
1967
1983
1968
1984
1969 @predicate(b'_internal()', safe=True)
1985 @predicate(b'_internal()', safe=True)
1970 def _internal(repo, subset, x):
1986 def _internal(repo, subset, x):
1971 getargs(x, 0, 0, _(b"_internal takes no arguments"))
1987 getargs(x, 0, 0, _(b"_internal takes no arguments"))
1972 return _phase(repo, subset, *phases.all_internal_phases)
1988 return _phase(repo, subset, *phases.all_internal_phases)
1973
1989
1974
1990
1975 @predicate(b'_phase(idx)', safe=True)
1991 @predicate(b'_phase(idx)', safe=True)
1976 def phase(repo, subset, x):
1992 def phase(repo, subset, x):
1977 l = getargs(x, 1, 1, b"_phase requires one argument")
1993 l = getargs(x, 1, 1, b"_phase requires one argument")
1978 target = getinteger(l[0], b"_phase expects a number")
1994 target = getinteger(l[0], b"_phase expects a number")
1979 return _phase(repo, subset, target)
1995 return _phase(repo, subset, target)
1980
1996
1981
1997
1982 @predicate(b'draft()', safe=True)
1998 @predicate(b'draft()', safe=True)
1983 def draft(repo, subset, x):
1999 def draft(repo, subset, x):
1984 """Changeset in draft phase."""
2000 """Changeset in draft phase."""
1985 # i18n: "draft" is a keyword
2001 # i18n: "draft" is a keyword
1986 getargs(x, 0, 0, _(b"draft takes no arguments"))
2002 getargs(x, 0, 0, _(b"draft takes no arguments"))
1987 target = phases.draft
2003 target = phases.draft
1988 return _phase(repo, subset, target)
2004 return _phase(repo, subset, target)
1989
2005
1990
2006
1991 @predicate(b'secret()', safe=True)
2007 @predicate(b'secret()', safe=True)
1992 def secret(repo, subset, x):
2008 def secret(repo, subset, x):
1993 """Changeset in secret phase."""
2009 """Changeset in secret phase."""
1994 # i18n: "secret" is a keyword
2010 # i18n: "secret" is a keyword
1995 getargs(x, 0, 0, _(b"secret takes no arguments"))
2011 getargs(x, 0, 0, _(b"secret takes no arguments"))
1996 target = phases.secret
2012 target = phases.secret
1997 return _phase(repo, subset, target)
2013 return _phase(repo, subset, target)
1998
2014
1999
2015
2000 @predicate(b'stack([revs])', safe=True)
2016 @predicate(b'stack([revs])', safe=True)
2001 def stack(repo, subset, x):
2017 def stack(repo, subset, x):
2002 """Experimental revset for the stack of changesets or working directory
2018 """Experimental revset for the stack of changesets or working directory
2003 parent. (EXPERIMENTAL)
2019 parent. (EXPERIMENTAL)
2004 """
2020 """
2005 if x is None:
2021 if x is None:
2006 stacks = stackmod.getstack(repo)
2022 stacks = stackmod.getstack(repo)
2007 else:
2023 else:
2008 stacks = smartset.baseset([])
2024 stacks = smartset.baseset([])
2009 for revision in getset(repo, fullreposet(repo), x):
2025 for revision in getset(repo, fullreposet(repo), x):
2010 currentstack = stackmod.getstack(repo, revision)
2026 currentstack = stackmod.getstack(repo, revision)
2011 stacks = stacks + currentstack
2027 stacks = stacks + currentstack
2012
2028
2013 return subset & stacks
2029 return subset & stacks
2014
2030
2015
2031
2016 def parentspec(repo, subset, x, n, order):
2032 def parentspec(repo, subset, x, n, order):
2017 """``set^0``
2033 """``set^0``
2018 The set.
2034 The set.
2019 ``set^1`` (or ``set^``), ``set^2``
2035 ``set^1`` (or ``set^``), ``set^2``
2020 First or second parent, respectively, of all changesets in set.
2036 First or second parent, respectively, of all changesets in set.
2021 """
2037 """
2022 try:
2038 try:
2023 n = int(n[1])
2039 n = int(n[1])
2024 if n not in (0, 1, 2):
2040 if n not in (0, 1, 2):
2025 raise ValueError
2041 raise ValueError
2026 except (TypeError, ValueError):
2042 except (TypeError, ValueError):
2027 raise error.ParseError(_(b"^ expects a number 0, 1, or 2"))
2043 raise error.ParseError(_(b"^ expects a number 0, 1, or 2"))
2028 ps = set()
2044 ps = set()
2029 cl = repo.changelog
2045 cl = repo.changelog
2030 for r in getset(repo, fullreposet(repo), x):
2046 for r in getset(repo, fullreposet(repo), x):
2031 if n == 0:
2047 if n == 0:
2032 ps.add(r)
2048 ps.add(r)
2033 elif n == 1:
2049 elif n == 1:
2034 try:
2050 try:
2035 ps.add(cl.parentrevs(r)[0])
2051 ps.add(cl.parentrevs(r)[0])
2036 except error.WdirUnsupported:
2052 except error.WdirUnsupported:
2037 ps.add(repo[r].p1().rev())
2053 ps.add(repo[r].p1().rev())
2038 else:
2054 else:
2039 try:
2055 try:
2040 parents = cl.parentrevs(r)
2056 parents = cl.parentrevs(r)
2041 if parents[1] != nullrev:
2057 if parents[1] != nullrev:
2042 ps.add(parents[1])
2058 ps.add(parents[1])
2043 except error.WdirUnsupported:
2059 except error.WdirUnsupported:
2044 parents = repo[r].parents()
2060 parents = repo[r].parents()
2045 if len(parents) == 2:
2061 if len(parents) == 2:
2046 ps.add(parents[1].rev())
2062 ps.add(parents[1].rev())
2047 return subset & ps
2063 return subset & ps
2048
2064
2049
2065
2050 @predicate(b'present(set)', safe=True, takeorder=True)
2066 @predicate(b'present(set)', safe=True, takeorder=True)
2051 def present(repo, subset, x, order):
2067 def present(repo, subset, x, order):
2052 """An empty set, if any revision in set isn't found; otherwise,
2068 """An empty set, if any revision in set isn't found; otherwise,
2053 all revisions in set.
2069 all revisions in set.
2054
2070
2055 If any of specified revisions is not present in the local repository,
2071 If any of specified revisions is not present in the local repository,
2056 the query is normally aborted. But this predicate allows the query
2072 the query is normally aborted. But this predicate allows the query
2057 to continue even in such cases.
2073 to continue even in such cases.
2058 """
2074 """
2059 try:
2075 try:
2060 return getset(repo, subset, x, order)
2076 return getset(repo, subset, x, order)
2061 except error.RepoLookupError:
2077 except error.RepoLookupError:
2062 return baseset()
2078 return baseset()
2063
2079
2064
2080
2065 # for internal use
2081 # for internal use
2066 @predicate(b'_notpublic', safe=True)
2082 @predicate(b'_notpublic', safe=True)
2067 def _notpublic(repo, subset, x):
2083 def _notpublic(repo, subset, x):
2068 getargs(x, 0, 0, b"_notpublic takes no arguments")
2084 getargs(x, 0, 0, b"_notpublic takes no arguments")
2069 return _phase(repo, subset, *phases.not_public_phases)
2085 return _phase(repo, subset, *phases.not_public_phases)
2070
2086
2071
2087
2072 # for internal use
2088 # for internal use
2073 @predicate(b'_phaseandancestors(phasename, set)', safe=True)
2089 @predicate(b'_phaseandancestors(phasename, set)', safe=True)
2074 def _phaseandancestors(repo, subset, x):
2090 def _phaseandancestors(repo, subset, x):
2075 # equivalent to (phasename() & ancestors(set)) but more efficient
2091 # equivalent to (phasename() & ancestors(set)) but more efficient
2076 # phasename could be one of 'draft', 'secret', or '_notpublic'
2092 # phasename could be one of 'draft', 'secret', or '_notpublic'
2077 args = getargs(x, 2, 2, b"_phaseandancestors requires two arguments")
2093 args = getargs(x, 2, 2, b"_phaseandancestors requires two arguments")
2078 phasename = getsymbol(args[0])
2094 phasename = getsymbol(args[0])
2079 s = getset(repo, fullreposet(repo), args[1])
2095 s = getset(repo, fullreposet(repo), args[1])
2080
2096
2081 draft = phases.draft
2097 draft = phases.draft
2082 secret = phases.secret
2098 secret = phases.secret
2083 phasenamemap = {
2099 phasenamemap = {
2084 b'_notpublic': draft,
2100 b'_notpublic': draft,
2085 b'draft': draft, # follow secret's ancestors
2101 b'draft': draft, # follow secret's ancestors
2086 b'secret': secret,
2102 b'secret': secret,
2087 }
2103 }
2088 if phasename not in phasenamemap:
2104 if phasename not in phasenamemap:
2089 raise error.ParseError(b'%r is not a valid phasename' % phasename)
2105 raise error.ParseError(b'%r is not a valid phasename' % phasename)
2090
2106
2091 minimalphase = phasenamemap[phasename]
2107 minimalphase = phasenamemap[phasename]
2092 getphase = repo._phasecache.phase
2108 getphase = repo._phasecache.phase
2093
2109
2094 def cutfunc(rev):
2110 def cutfunc(rev):
2095 return getphase(repo, rev) < minimalphase
2111 return getphase(repo, rev) < minimalphase
2096
2112
2097 revs = dagop.revancestors(repo, s, cutfunc=cutfunc)
2113 revs = dagop.revancestors(repo, s, cutfunc=cutfunc)
2098
2114
2099 if phasename == b'draft': # need to remove secret changesets
2115 if phasename == b'draft': # need to remove secret changesets
2100 revs = revs.filter(lambda r: getphase(repo, r) == draft)
2116 revs = revs.filter(lambda r: getphase(repo, r) == draft)
2101 return subset & revs
2117 return subset & revs
2102
2118
2103
2119
2104 @predicate(b'public()', safe=True)
2120 @predicate(b'public()', safe=True)
2105 def public(repo, subset, x):
2121 def public(repo, subset, x):
2106 """Changeset in public phase."""
2122 """Changeset in public phase."""
2107 # i18n: "public" is a keyword
2123 # i18n: "public" is a keyword
2108 getargs(x, 0, 0, _(b"public takes no arguments"))
2124 getargs(x, 0, 0, _(b"public takes no arguments"))
2109 return _phase(repo, subset, phases.public)
2125 return _phase(repo, subset, phases.public)
2110
2126
2111
2127
2112 @predicate(b'remote([id [,path]])', safe=False)
2128 @predicate(b'remote([id [,path]])', safe=False)
2113 def remote(repo, subset, x):
2129 def remote(repo, subset, x):
2114 """Local revision that corresponds to the given identifier in a
2130 """Local revision that corresponds to the given identifier in a
2115 remote repository, if present. Here, the '.' identifier is a
2131 remote repository, if present. Here, the '.' identifier is a
2116 synonym for the current local branch.
2132 synonym for the current local branch.
2117 """
2133 """
2118
2134
2119 from . import hg # avoid start-up nasties
2135 from . import hg # avoid start-up nasties
2120
2136
2121 # i18n: "remote" is a keyword
2137 # i18n: "remote" is a keyword
2122 l = getargs(x, 0, 2, _(b"remote takes zero, one, or two arguments"))
2138 l = getargs(x, 0, 2, _(b"remote takes zero, one, or two arguments"))
2123
2139
2124 q = b'.'
2140 q = b'.'
2125 if len(l) > 0:
2141 if len(l) > 0:
2126 # i18n: "remote" is a keyword
2142 # i18n: "remote" is a keyword
2127 q = getstring(l[0], _(b"remote requires a string id"))
2143 q = getstring(l[0], _(b"remote requires a string id"))
2128 if q == b'.':
2144 if q == b'.':
2129 q = repo[b'.'].branch()
2145 q = repo[b'.'].branch()
2130
2146
2131 dest = b''
2147 dest = b''
2132 if len(l) > 1:
2148 if len(l) > 1:
2133 # i18n: "remote" is a keyword
2149 # i18n: "remote" is a keyword
2134 dest = getstring(l[1], _(b"remote requires a repository path"))
2150 dest = getstring(l[1], _(b"remote requires a repository path"))
2135 if not dest:
2151 if not dest:
2136 dest = b'default'
2152 dest = b'default'
2137 path = urlutil.get_unique_pull_path_obj(b'remote', repo.ui, dest)
2153 path = urlutil.get_unique_pull_path_obj(b'remote', repo.ui, dest)
2138
2154
2139 other = hg.peer(repo, {}, path)
2155 other = hg.peer(repo, {}, path)
2140 n = other.lookup(q)
2156 n = other.lookup(q)
2141 if n in repo:
2157 if n in repo:
2142 r = repo[n].rev()
2158 r = repo[n].rev()
2143 if r in subset:
2159 if r in subset:
2144 return baseset([r])
2160 return baseset([r])
2145 return baseset()
2161 return baseset()
2146
2162
2147
2163
2148 @predicate(b'removes(pattern)', safe=True, weight=30)
2164 @predicate(b'removes(pattern)', safe=True, weight=30)
2149 def removes(repo, subset, x):
2165 def removes(repo, subset, x):
2150 """Changesets which remove files matching pattern.
2166 """Changesets which remove files matching pattern.
2151
2167
2152 The pattern without explicit kind like ``glob:`` is expected to be
2168 The pattern without explicit kind like ``glob:`` is expected to be
2153 relative to the current directory and match against a file or a
2169 relative to the current directory and match against a file or a
2154 directory.
2170 directory.
2155 """
2171 """
2156 # i18n: "removes" is a keyword
2172 # i18n: "removes" is a keyword
2157 pat = getstring(x, _(b"removes requires a pattern"))
2173 pat = getstring(x, _(b"removes requires a pattern"))
2158 return checkstatus(repo, subset, pat, 'removed')
2174 return checkstatus(repo, subset, pat, 'removed')
2159
2175
2160
2176
2161 @predicate(b'rev(number)', safe=True)
2177 @predicate(b'rev(number)', safe=True)
2162 def rev(repo, subset, x):
2178 def rev(repo, subset, x):
2163 """Revision with the given numeric identifier."""
2179 """Revision with the given numeric identifier."""
2164 try:
2180 try:
2165 return _rev(repo, subset, x)
2181 return _rev(repo, subset, x)
2166 except error.RepoLookupError:
2182 except error.RepoLookupError:
2167 return baseset()
2183 return baseset()
2168
2184
2169
2185
2170 @predicate(b'_rev(number)', safe=True)
2186 @predicate(b'_rev(number)', safe=True)
2171 def _rev(repo, subset, x):
2187 def _rev(repo, subset, x):
2172 # internal version of "rev(x)" that raise error if "x" is invalid
2188 # internal version of "rev(x)" that raise error if "x" is invalid
2173 # i18n: "rev" is a keyword
2189 # i18n: "rev" is a keyword
2174 l = getargs(x, 1, 1, _(b"rev requires one argument"))
2190 l = getargs(x, 1, 1, _(b"rev requires one argument"))
2175 try:
2191 try:
2176 # i18n: "rev" is a keyword
2192 # i18n: "rev" is a keyword
2177 l = int(getstring(l[0], _(b"rev requires a number")))
2193 l = int(getstring(l[0], _(b"rev requires a number")))
2178 except (TypeError, ValueError):
2194 except (TypeError, ValueError):
2179 # i18n: "rev" is a keyword
2195 # i18n: "rev" is a keyword
2180 raise error.ParseError(_(b"rev expects a number"))
2196 raise error.ParseError(_(b"rev expects a number"))
2181 if l not in _virtualrevs:
2197 if l not in _virtualrevs:
2182 try:
2198 try:
2183 repo.changelog.node(l) # check that the rev exists
2199 repo.changelog.node(l) # check that the rev exists
2184 except IndexError:
2200 except IndexError:
2185 raise error.RepoLookupError(_(b"unknown revision '%d'") % l)
2201 raise error.RepoLookupError(_(b"unknown revision '%d'") % l)
2186 return subset & baseset([l])
2202 return subset & baseset([l])
2187
2203
2188
2204
2189 @predicate(b'revset(set)', safe=True, takeorder=True)
2205 @predicate(b'revset(set)', safe=True, takeorder=True)
2190 def revsetpredicate(repo, subset, x, order):
2206 def revsetpredicate(repo, subset, x, order):
2191 """Strictly interpret the content as a revset.
2207 """Strictly interpret the content as a revset.
2192
2208
2193 The content of this special predicate will be strictly interpreted as a
2209 The content of this special predicate will be strictly interpreted as a
2194 revset. For example, ``revset(id(0))`` will be interpreted as "id(0)"
2210 revset. For example, ``revset(id(0))`` will be interpreted as "id(0)"
2195 without possible ambiguity with a "id(0)" bookmark or tag.
2211 without possible ambiguity with a "id(0)" bookmark or tag.
2196 """
2212 """
2197 return getset(repo, subset, x, order)
2213 return getset(repo, subset, x, order)
2198
2214
2199
2215
2200 @predicate(b'matching(revision [, field])', safe=True)
2216 @predicate(b'matching(revision [, field])', safe=True)
2201 def matching(repo, subset, x):
2217 def matching(repo, subset, x):
2202 """Changesets in which a given set of fields match the set of fields in the
2218 """Changesets in which a given set of fields match the set of fields in the
2203 selected revision or set.
2219 selected revision or set.
2204
2220
2205 To match more than one field pass the list of fields to match separated
2221 To match more than one field pass the list of fields to match separated
2206 by spaces (e.g. ``author description``).
2222 by spaces (e.g. ``author description``).
2207
2223
2208 Valid fields are most regular revision fields and some special fields.
2224 Valid fields are most regular revision fields and some special fields.
2209
2225
2210 Regular revision fields are ``description``, ``author``, ``branch``,
2226 Regular revision fields are ``description``, ``author``, ``branch``,
2211 ``date``, ``files``, ``phase``, ``parents``, ``substate``, ``user``
2227 ``date``, ``files``, ``phase``, ``parents``, ``substate``, ``user``
2212 and ``diff``.
2228 and ``diff``.
2213 Note that ``author`` and ``user`` are synonyms. ``diff`` refers to the
2229 Note that ``author`` and ``user`` are synonyms. ``diff`` refers to the
2214 contents of the revision. Two revisions matching their ``diff`` will
2230 contents of the revision. Two revisions matching their ``diff`` will
2215 also match their ``files``.
2231 also match their ``files``.
2216
2232
2217 Special fields are ``summary`` and ``metadata``:
2233 Special fields are ``summary`` and ``metadata``:
2218 ``summary`` matches the first line of the description.
2234 ``summary`` matches the first line of the description.
2219 ``metadata`` is equivalent to matching ``description user date``
2235 ``metadata`` is equivalent to matching ``description user date``
2220 (i.e. it matches the main metadata fields).
2236 (i.e. it matches the main metadata fields).
2221
2237
2222 ``metadata`` is the default field which is used when no fields are
2238 ``metadata`` is the default field which is used when no fields are
2223 specified. You can match more than one field at a time.
2239 specified. You can match more than one field at a time.
2224 """
2240 """
2225 # i18n: "matching" is a keyword
2241 # i18n: "matching" is a keyword
2226 l = getargs(x, 1, 2, _(b"matching takes 1 or 2 arguments"))
2242 l = getargs(x, 1, 2, _(b"matching takes 1 or 2 arguments"))
2227
2243
2228 revs = getset(repo, fullreposet(repo), l[0])
2244 revs = getset(repo, fullreposet(repo), l[0])
2229
2245
2230 fieldlist = [b'metadata']
2246 fieldlist = [b'metadata']
2231 if len(l) > 1:
2247 if len(l) > 1:
2232 fieldlist = getstring(
2248 fieldlist = getstring(
2233 l[1],
2249 l[1],
2234 # i18n: "matching" is a keyword
2250 # i18n: "matching" is a keyword
2235 _(b"matching requires a string as its second argument"),
2251 _(b"matching requires a string as its second argument"),
2236 ).split()
2252 ).split()
2237
2253
2238 # Make sure that there are no repeated fields,
2254 # Make sure that there are no repeated fields,
2239 # expand the 'special' 'metadata' field type
2255 # expand the 'special' 'metadata' field type
2240 # and check the 'files' whenever we check the 'diff'
2256 # and check the 'files' whenever we check the 'diff'
2241 fields = []
2257 fields = []
2242 for field in fieldlist:
2258 for field in fieldlist:
2243 if field == b'metadata':
2259 if field == b'metadata':
2244 fields += [b'user', b'description', b'date']
2260 fields += [b'user', b'description', b'date']
2245 elif field == b'diff':
2261 elif field == b'diff':
2246 # a revision matching the diff must also match the files
2262 # a revision matching the diff must also match the files
2247 # since matching the diff is very costly, make sure to
2263 # since matching the diff is very costly, make sure to
2248 # also match the files first
2264 # also match the files first
2249 fields += [b'files', b'diff']
2265 fields += [b'files', b'diff']
2250 else:
2266 else:
2251 if field == b'author':
2267 if field == b'author':
2252 field = b'user'
2268 field = b'user'
2253 fields.append(field)
2269 fields.append(field)
2254 fields = set(fields)
2270 fields = set(fields)
2255 if b'summary' in fields and b'description' in fields:
2271 if b'summary' in fields and b'description' in fields:
2256 # If a revision matches its description it also matches its summary
2272 # If a revision matches its description it also matches its summary
2257 fields.discard(b'summary')
2273 fields.discard(b'summary')
2258
2274
2259 # We may want to match more than one field
2275 # We may want to match more than one field
2260 # Not all fields take the same amount of time to be matched
2276 # Not all fields take the same amount of time to be matched
2261 # Sort the selected fields in order of increasing matching cost
2277 # Sort the selected fields in order of increasing matching cost
2262 fieldorder = [
2278 fieldorder = [
2263 b'phase',
2279 b'phase',
2264 b'parents',
2280 b'parents',
2265 b'user',
2281 b'user',
2266 b'date',
2282 b'date',
2267 b'branch',
2283 b'branch',
2268 b'summary',
2284 b'summary',
2269 b'files',
2285 b'files',
2270 b'description',
2286 b'description',
2271 b'substate',
2287 b'substate',
2272 b'diff',
2288 b'diff',
2273 ]
2289 ]
2274
2290
2275 def fieldkeyfunc(f):
2291 def fieldkeyfunc(f):
2276 try:
2292 try:
2277 return fieldorder.index(f)
2293 return fieldorder.index(f)
2278 except ValueError:
2294 except ValueError:
2279 # assume an unknown field is very costly
2295 # assume an unknown field is very costly
2280 return len(fieldorder)
2296 return len(fieldorder)
2281
2297
2282 fields = list(fields)
2298 fields = list(fields)
2283 fields.sort(key=fieldkeyfunc)
2299 fields.sort(key=fieldkeyfunc)
2284
2300
2285 # Each field will be matched with its own "getfield" function
2301 # Each field will be matched with its own "getfield" function
2286 # which will be added to the getfieldfuncs array of functions
2302 # which will be added to the getfieldfuncs array of functions
2287 getfieldfuncs = []
2303 getfieldfuncs = []
2288 _funcs = {
2304 _funcs = {
2289 b'user': lambda r: repo[r].user(),
2305 b'user': lambda r: repo[r].user(),
2290 b'branch': lambda r: repo[r].branch(),
2306 b'branch': lambda r: repo[r].branch(),
2291 b'date': lambda r: repo[r].date(),
2307 b'date': lambda r: repo[r].date(),
2292 b'description': lambda r: repo[r].description(),
2308 b'description': lambda r: repo[r].description(),
2293 b'files': lambda r: repo[r].files(),
2309 b'files': lambda r: repo[r].files(),
2294 b'parents': lambda r: repo[r].parents(),
2310 b'parents': lambda r: repo[r].parents(),
2295 b'phase': lambda r: repo[r].phase(),
2311 b'phase': lambda r: repo[r].phase(),
2296 b'substate': lambda r: repo[r].substate,
2312 b'substate': lambda r: repo[r].substate,
2297 b'summary': lambda r: repo[r].description().splitlines()[0],
2313 b'summary': lambda r: repo[r].description().splitlines()[0],
2298 b'diff': lambda r: list(
2314 b'diff': lambda r: list(
2299 repo[r].diff(opts=diffutil.diffallopts(repo.ui, {b'git': True}))
2315 repo[r].diff(opts=diffutil.diffallopts(repo.ui, {b'git': True}))
2300 ),
2316 ),
2301 }
2317 }
2302 for info in fields:
2318 for info in fields:
2303 getfield = _funcs.get(info, None)
2319 getfield = _funcs.get(info, None)
2304 if getfield is None:
2320 if getfield is None:
2305 raise error.ParseError(
2321 raise error.ParseError(
2306 # i18n: "matching" is a keyword
2322 # i18n: "matching" is a keyword
2307 _(b"unexpected field name passed to matching: %s")
2323 _(b"unexpected field name passed to matching: %s")
2308 % info
2324 % info
2309 )
2325 )
2310 getfieldfuncs.append(getfield)
2326 getfieldfuncs.append(getfield)
2311 # convert the getfield array of functions into a "getinfo" function
2327 # convert the getfield array of functions into a "getinfo" function
2312 # which returns an array of field values (or a single value if there
2328 # which returns an array of field values (or a single value if there
2313 # is only one field to match)
2329 # is only one field to match)
2314 getinfo = lambda r: [f(r) for f in getfieldfuncs]
2330 getinfo = lambda r: [f(r) for f in getfieldfuncs]
2315
2331
2316 def matches(x):
2332 def matches(x):
2317 for rev in revs:
2333 for rev in revs:
2318 target = getinfo(rev)
2334 target = getinfo(rev)
2319 match = True
2335 match = True
2320 for n, f in enumerate(getfieldfuncs):
2336 for n, f in enumerate(getfieldfuncs):
2321 if target[n] != f(x):
2337 if target[n] != f(x):
2322 match = False
2338 match = False
2323 if match:
2339 if match:
2324 return True
2340 return True
2325 return False
2341 return False
2326
2342
2327 return subset.filter(matches, condrepr=(b'<matching%r %r>', fields, revs))
2343 return subset.filter(matches, condrepr=(b'<matching%r %r>', fields, revs))
2328
2344
2329
2345
2330 @predicate(b'reverse(set)', safe=True, takeorder=True, weight=0)
2346 @predicate(b'reverse(set)', safe=True, takeorder=True, weight=0)
2331 def reverse(repo, subset, x, order):
2347 def reverse(repo, subset, x, order):
2332 """Reverse order of set."""
2348 """Reverse order of set."""
2333 l = getset(repo, subset, x, order)
2349 l = getset(repo, subset, x, order)
2334 if order == defineorder:
2350 if order == defineorder:
2335 l.reverse()
2351 l.reverse()
2336 return l
2352 return l
2337
2353
2338
2354
2339 @predicate(b'roots(set)', safe=True)
2355 @predicate(b'roots(set)', safe=True)
2340 def roots(repo, subset, x):
2356 def roots(repo, subset, x):
2341 """Changesets in set with no parent changeset in set."""
2357 """Changesets in set with no parent changeset in set."""
2342 s = getset(repo, fullreposet(repo), x)
2358 s = getset(repo, fullreposet(repo), x)
2343 parents = repo.changelog.parentrevs
2359 parents = repo.changelog.parentrevs
2344
2360
2345 def filter(r):
2361 def filter(r):
2346 try:
2362 try:
2347 for p in parents(r):
2363 for p in parents(r):
2348 if 0 <= p and p in s:
2364 if 0 <= p and p in s:
2349 return False
2365 return False
2350 except error.WdirUnsupported:
2366 except error.WdirUnsupported:
2351 for p in repo[None].parents():
2367 for p in repo[None].parents():
2352 if p.rev() in s:
2368 if p.rev() in s:
2353 return False
2369 return False
2354 return True
2370 return True
2355
2371
2356 return subset & s.filter(filter, condrepr=b'<roots>')
2372 return subset & s.filter(filter, condrepr=b'<roots>')
2357
2373
2358
2374
2359 MAXINT = (1 << 31) - 1
2375 MAXINT = (1 << 31) - 1
2360 MININT = -MAXINT - 1
2376 MININT = -MAXINT - 1
2361
2377
2362
2378
2363 def pick_random(c, gen=random):
2379 def pick_random(c, gen=random):
2364 # exists as its own function to make it possible to overwrite the seed
2380 # exists as its own function to make it possible to overwrite the seed
2365 return gen.randint(MININT, MAXINT)
2381 return gen.randint(MININT, MAXINT)
2366
2382
2367
2383
2368 _sortkeyfuncs = {
2384 _sortkeyfuncs = {
2369 b'rev': scmutil.intrev,
2385 b'rev': scmutil.intrev,
2370 b'branch': lambda c: c.branch(),
2386 b'branch': lambda c: c.branch(),
2371 b'desc': lambda c: c.description(),
2387 b'desc': lambda c: c.description(),
2372 b'user': lambda c: c.user(),
2388 b'user': lambda c: c.user(),
2373 b'author': lambda c: c.user(),
2389 b'author': lambda c: c.user(),
2374 b'date': lambda c: c.date()[0],
2390 b'date': lambda c: c.date()[0],
2375 b'node': scmutil.binnode,
2391 b'node': scmutil.binnode,
2376 b'random': pick_random,
2392 b'random': pick_random,
2377 }
2393 }
2378
2394
2379
2395
2380 def _getsortargs(x):
2396 def _getsortargs(x):
2381 """Parse sort options into (set, [(key, reverse)], opts)"""
2397 """Parse sort options into (set, [(key, reverse)], opts)"""
2382 args = getargsdict(
2398 args = getargsdict(
2383 x,
2399 x,
2384 b'sort',
2400 b'sort',
2385 b'set keys topo.firstbranch random.seed',
2401 b'set keys topo.firstbranch random.seed',
2386 )
2402 )
2387 if b'set' not in args:
2403 if b'set' not in args:
2388 # i18n: "sort" is a keyword
2404 # i18n: "sort" is a keyword
2389 raise error.ParseError(_(b'sort requires one or two arguments'))
2405 raise error.ParseError(_(b'sort requires one or two arguments'))
2390 keys = b"rev"
2406 keys = b"rev"
2391 if b'keys' in args:
2407 if b'keys' in args:
2392 # i18n: "sort" is a keyword
2408 # i18n: "sort" is a keyword
2393 keys = getstring(args[b'keys'], _(b"sort spec must be a string"))
2409 keys = getstring(args[b'keys'], _(b"sort spec must be a string"))
2394
2410
2395 keyflags = []
2411 keyflags = []
2396 for k in keys.split():
2412 for k in keys.split():
2397 fk = k
2413 fk = k
2398 reverse = k.startswith(b'-')
2414 reverse = k.startswith(b'-')
2399 if reverse:
2415 if reverse:
2400 k = k[1:]
2416 k = k[1:]
2401 if k not in _sortkeyfuncs and k != b'topo':
2417 if k not in _sortkeyfuncs and k != b'topo':
2402 raise error.ParseError(
2418 raise error.ParseError(
2403 _(b"unknown sort key %r") % pycompat.bytestr(fk)
2419 _(b"unknown sort key %r") % pycompat.bytestr(fk)
2404 )
2420 )
2405 keyflags.append((k, reverse))
2421 keyflags.append((k, reverse))
2406
2422
2407 if len(keyflags) > 1 and any(k == b'topo' for k, reverse in keyflags):
2423 if len(keyflags) > 1 and any(k == b'topo' for k, reverse in keyflags):
2408 # i18n: "topo" is a keyword
2424 # i18n: "topo" is a keyword
2409 raise error.ParseError(
2425 raise error.ParseError(
2410 _(b'topo sort order cannot be combined with other sort keys')
2426 _(b'topo sort order cannot be combined with other sort keys')
2411 )
2427 )
2412
2428
2413 opts = {}
2429 opts = {}
2414 if b'topo.firstbranch' in args:
2430 if b'topo.firstbranch' in args:
2415 if any(k == b'topo' for k, reverse in keyflags):
2431 if any(k == b'topo' for k, reverse in keyflags):
2416 opts[b'topo.firstbranch'] = args[b'topo.firstbranch']
2432 opts[b'topo.firstbranch'] = args[b'topo.firstbranch']
2417 else:
2433 else:
2418 # i18n: "topo" and "topo.firstbranch" are keywords
2434 # i18n: "topo" and "topo.firstbranch" are keywords
2419 raise error.ParseError(
2435 raise error.ParseError(
2420 _(
2436 _(
2421 b'topo.firstbranch can only be used '
2437 b'topo.firstbranch can only be used '
2422 b'when using the topo sort key'
2438 b'when using the topo sort key'
2423 )
2439 )
2424 )
2440 )
2425
2441
2426 if b'random.seed' in args:
2442 if b'random.seed' in args:
2427 if any(k == b'random' for k, reverse in keyflags):
2443 if any(k == b'random' for k, reverse in keyflags):
2428 s = args[b'random.seed']
2444 s = args[b'random.seed']
2429 seed = getstring(s, _(b"random.seed must be a string"))
2445 seed = getstring(s, _(b"random.seed must be a string"))
2430 opts[b'random.seed'] = seed
2446 opts[b'random.seed'] = seed
2431 else:
2447 else:
2432 # i18n: "random" and "random.seed" are keywords
2448 # i18n: "random" and "random.seed" are keywords
2433 raise error.ParseError(
2449 raise error.ParseError(
2434 _(
2450 _(
2435 b'random.seed can only be used '
2451 b'random.seed can only be used '
2436 b'when using the random sort key'
2452 b'when using the random sort key'
2437 )
2453 )
2438 )
2454 )
2439
2455
2440 return args[b'set'], keyflags, opts
2456 return args[b'set'], keyflags, opts
2441
2457
2442
2458
2443 @predicate(
2459 @predicate(
2444 b'sort(set[, [-]key... [, ...]])', safe=True, takeorder=True, weight=10
2460 b'sort(set[, [-]key... [, ...]])', safe=True, takeorder=True, weight=10
2445 )
2461 )
2446 def sort(repo, subset, x, order):
2462 def sort(repo, subset, x, order):
2447 """Sort set by keys. The default sort order is ascending, specify a key
2463 """Sort set by keys. The default sort order is ascending, specify a key
2448 as ``-key`` to sort in descending order.
2464 as ``-key`` to sort in descending order.
2449
2465
2450 The keys can be:
2466 The keys can be:
2451
2467
2452 - ``rev`` for the revision number,
2468 - ``rev`` for the revision number,
2453 - ``branch`` for the branch name,
2469 - ``branch`` for the branch name,
2454 - ``desc`` for the commit message (description),
2470 - ``desc`` for the commit message (description),
2455 - ``user`` for user name (``author`` can be used as an alias),
2471 - ``user`` for user name (``author`` can be used as an alias),
2456 - ``date`` for the commit date
2472 - ``date`` for the commit date
2457 - ``topo`` for a reverse topographical sort
2473 - ``topo`` for a reverse topographical sort
2458 - ``node`` the nodeid of the revision
2474 - ``node`` the nodeid of the revision
2459 - ``random`` randomly shuffle revisions
2475 - ``random`` randomly shuffle revisions
2460
2476
2461 The ``topo`` sort order cannot be combined with other sort keys. This sort
2477 The ``topo`` sort order cannot be combined with other sort keys. This sort
2462 takes one optional argument, ``topo.firstbranch``, which takes a revset that
2478 takes one optional argument, ``topo.firstbranch``, which takes a revset that
2463 specifies what topographical branches to prioritize in the sort.
2479 specifies what topographical branches to prioritize in the sort.
2464
2480
2465 The ``random`` sort takes one optional ``random.seed`` argument to control
2481 The ``random`` sort takes one optional ``random.seed`` argument to control
2466 the pseudo-randomness of the result.
2482 the pseudo-randomness of the result.
2467 """
2483 """
2468 s, keyflags, opts = _getsortargs(x)
2484 s, keyflags, opts = _getsortargs(x)
2469 revs = getset(repo, subset, s, order)
2485 revs = getset(repo, subset, s, order)
2470
2486
2471 if not keyflags or order != defineorder:
2487 if not keyflags or order != defineorder:
2472 return revs
2488 return revs
2473 if len(keyflags) == 1 and keyflags[0][0] == b"rev":
2489 if len(keyflags) == 1 and keyflags[0][0] == b"rev":
2474 revs.sort(reverse=keyflags[0][1])
2490 revs.sort(reverse=keyflags[0][1])
2475 return revs
2491 return revs
2476 elif keyflags[0][0] == b"topo":
2492 elif keyflags[0][0] == b"topo":
2477 firstbranch = ()
2493 firstbranch = ()
2478 parentrevs = repo.changelog.parentrevs
2494 parentrevs = repo.changelog.parentrevs
2479 parentsfunc = parentrevs
2495 parentsfunc = parentrevs
2480 if wdirrev in revs:
2496 if wdirrev in revs:
2481
2497
2482 def parentsfunc(r):
2498 def parentsfunc(r):
2483 try:
2499 try:
2484 return parentrevs(r)
2500 return parentrevs(r)
2485 except error.WdirUnsupported:
2501 except error.WdirUnsupported:
2486 return [p.rev() for p in repo[None].parents()]
2502 return [p.rev() for p in repo[None].parents()]
2487
2503
2488 if b'topo.firstbranch' in opts:
2504 if b'topo.firstbranch' in opts:
2489 firstbranch = getset(repo, subset, opts[b'topo.firstbranch'])
2505 firstbranch = getset(repo, subset, opts[b'topo.firstbranch'])
2490 revs = baseset(
2506 revs = baseset(
2491 dagop.toposort(revs, parentsfunc, firstbranch),
2507 dagop.toposort(revs, parentsfunc, firstbranch),
2492 istopo=True,
2508 istopo=True,
2493 )
2509 )
2494 if keyflags[0][1]:
2510 if keyflags[0][1]:
2495 revs.reverse()
2511 revs.reverse()
2496 return revs
2512 return revs
2497
2513
2498 # sort() is guaranteed to be stable
2514 # sort() is guaranteed to be stable
2499 ctxs = [repo[r] for r in revs]
2515 ctxs = [repo[r] for r in revs]
2500 for k, reverse in reversed(keyflags):
2516 for k, reverse in reversed(keyflags):
2501 func = _sortkeyfuncs[k]
2517 func = _sortkeyfuncs[k]
2502 if k == b'random' and b'random.seed' in opts:
2518 if k == b'random' and b'random.seed' in opts:
2503 seed = opts[b'random.seed']
2519 seed = opts[b'random.seed']
2504 r = random.Random(seed)
2520 r = random.Random(seed)
2505 func = functools.partial(func, gen=r)
2521 func = functools.partial(func, gen=r)
2506 ctxs.sort(key=func, reverse=reverse)
2522 ctxs.sort(key=func, reverse=reverse)
2507 return baseset([c.rev() for c in ctxs])
2523 return baseset([c.rev() for c in ctxs])
2508
2524
2509
2525
2510 @predicate(b'subrepo([pattern])')
2526 @predicate(b'subrepo([pattern])')
2511 def subrepo(repo, subset, x):
2527 def subrepo(repo, subset, x):
2512 """Changesets that add, modify or remove the given subrepo. If no subrepo
2528 """Changesets that add, modify or remove the given subrepo. If no subrepo
2513 pattern is named, any subrepo changes are returned.
2529 pattern is named, any subrepo changes are returned.
2514 """
2530 """
2515 # i18n: "subrepo" is a keyword
2531 # i18n: "subrepo" is a keyword
2516 args = getargs(x, 0, 1, _(b'subrepo takes at most one argument'))
2532 args = getargs(x, 0, 1, _(b'subrepo takes at most one argument'))
2517 pat = None
2533 pat = None
2518 if len(args) != 0:
2534 if len(args) != 0:
2519 pat = getstring(args[0], _(b"subrepo requires a pattern"))
2535 pat = getstring(args[0], _(b"subrepo requires a pattern"))
2520
2536
2521 m = matchmod.exact([b'.hgsubstate'])
2537 m = matchmod.exact([b'.hgsubstate'])
2522
2538
2523 def submatches(names):
2539 def submatches(names):
2524 k, p, m = stringutil.stringmatcher(pat)
2540 k, p, m = stringutil.stringmatcher(pat)
2525 for name in names:
2541 for name in names:
2526 if m(name):
2542 if m(name):
2527 yield name
2543 yield name
2528
2544
2529 def matches(x):
2545 def matches(x):
2530 c = repo[x]
2546 c = repo[x]
2531 s = repo.status(c.p1().node(), c.node(), match=m)
2547 s = repo.status(c.p1().node(), c.node(), match=m)
2532
2548
2533 if pat is None:
2549 if pat is None:
2534 return s.added or s.modified or s.removed
2550 return s.added or s.modified or s.removed
2535
2551
2536 if s.added:
2552 if s.added:
2537 return any(submatches(c.substate.keys()))
2553 return any(submatches(c.substate.keys()))
2538
2554
2539 if s.modified:
2555 if s.modified:
2540 subs = set(c.p1().substate.keys())
2556 subs = set(c.p1().substate.keys())
2541 subs.update(c.substate.keys())
2557 subs.update(c.substate.keys())
2542
2558
2543 for path in submatches(subs):
2559 for path in submatches(subs):
2544 if c.p1().substate.get(path) != c.substate.get(path):
2560 if c.p1().substate.get(path) != c.substate.get(path):
2545 return True
2561 return True
2546
2562
2547 if s.removed:
2563 if s.removed:
2548 return any(submatches(c.p1().substate.keys()))
2564 return any(submatches(c.p1().substate.keys()))
2549
2565
2550 return False
2566 return False
2551
2567
2552 return subset.filter(matches, condrepr=(b'<subrepo %r>', pat))
2568 return subset.filter(matches, condrepr=(b'<subrepo %r>', pat))
2553
2569
2554
2570
2555 def _mapbynodefunc(repo, s, f):
2571 def _mapbynodefunc(repo, s, f):
2556 """(repo, smartset, [node] -> [node]) -> smartset
2572 """(repo, smartset, [node] -> [node]) -> smartset
2557
2573
2558 Helper method to map a smartset to another smartset given a function only
2574 Helper method to map a smartset to another smartset given a function only
2559 talking about nodes. Handles converting between rev numbers and nodes, and
2575 talking about nodes. Handles converting between rev numbers and nodes, and
2560 filtering.
2576 filtering.
2561 """
2577 """
2562 cl = repo.unfiltered().changelog
2578 cl = repo.unfiltered().changelog
2563 torev = cl.index.get_rev
2579 torev = cl.index.get_rev
2564 tonode = cl.node
2580 tonode = cl.node
2565 result = {torev(n) for n in f(tonode(r) for r in s)}
2581 result = {torev(n) for n in f(tonode(r) for r in s)}
2566 result.discard(None)
2582 result.discard(None)
2567 return smartset.baseset(result - repo.changelog.filteredrevs)
2583 return smartset.baseset(result - repo.changelog.filteredrevs)
2568
2584
2569
2585
2570 @predicate(b'successors(set)', safe=True)
2586 @predicate(b'successors(set)', safe=True)
2571 def successors(repo, subset, x):
2587 def successors(repo, subset, x):
2572 """All successors for set, including the given set themselves.
2588 """All successors for set, including the given set themselves.
2573 (EXPERIMENTAL)"""
2589 (EXPERIMENTAL)"""
2574 s = getset(repo, fullreposet(repo), x)
2590 s = getset(repo, fullreposet(repo), x)
2575 f = lambda nodes: obsutil.allsuccessors(repo.obsstore, nodes)
2591 f = lambda nodes: obsutil.allsuccessors(repo.obsstore, nodes)
2576 d = _mapbynodefunc(repo, s, f)
2592 d = _mapbynodefunc(repo, s, f)
2577 return subset & d
2593 return subset & d
2578
2594
2579
2595
2580 def _substringmatcher(pattern, casesensitive=True):
2596 def _substringmatcher(pattern, casesensitive=True):
2581 kind, pattern, matcher = stringutil.stringmatcher(
2597 kind, pattern, matcher = stringutil.stringmatcher(
2582 pattern, casesensitive=casesensitive
2598 pattern, casesensitive=casesensitive
2583 )
2599 )
2584 if kind == b'literal':
2600 if kind == b'literal':
2585 if not casesensitive:
2601 if not casesensitive:
2586 pattern = encoding.lower(pattern)
2602 pattern = encoding.lower(pattern)
2587 matcher = lambda s: pattern in encoding.lower(s)
2603 matcher = lambda s: pattern in encoding.lower(s)
2588 else:
2604 else:
2589 matcher = lambda s: pattern in s
2605 matcher = lambda s: pattern in s
2590 return kind, pattern, matcher
2606 return kind, pattern, matcher
2591
2607
2592
2608
2593 @predicate(b'tag([name])', safe=True)
2609 @predicate(b'tag([name])', safe=True)
2594 def tag(repo, subset, x):
2610 def tag(repo, subset, x):
2595 """The specified tag by name, or all tagged revisions if no name is given.
2611 """The specified tag by name, or all tagged revisions if no name is given.
2596
2612
2597 Pattern matching is supported for `name`. See
2613 Pattern matching is supported for `name`. See
2598 :hg:`help revisions.patterns`.
2614 :hg:`help revisions.patterns`.
2599 """
2615 """
2600 # i18n: "tag" is a keyword
2616 # i18n: "tag" is a keyword
2601 args = getargs(x, 0, 1, _(b"tag takes one or no arguments"))
2617 args = getargs(x, 0, 1, _(b"tag takes one or no arguments"))
2602 cl = repo.changelog
2618 cl = repo.changelog
2603 if args:
2619 if args:
2604 pattern = getstring(
2620 pattern = getstring(
2605 args[0],
2621 args[0],
2606 # i18n: "tag" is a keyword
2622 # i18n: "tag" is a keyword
2607 _(b'the argument to tag must be a string'),
2623 _(b'the argument to tag must be a string'),
2608 )
2624 )
2609 kind, pattern, matcher = stringutil.stringmatcher(pattern)
2625 kind, pattern, matcher = stringutil.stringmatcher(pattern)
2610 if kind == b'literal':
2626 if kind == b'literal':
2611 # avoid resolving all tags
2627 # avoid resolving all tags
2612 tn = repo._tagscache.tags.get(pattern, None)
2628 tn = repo._tagscache.tags.get(pattern, None)
2613 if tn is None:
2629 if tn is None:
2614 raise error.RepoLookupError(
2630 raise error.RepoLookupError(
2615 _(b"tag '%s' does not exist") % pattern
2631 _(b"tag '%s' does not exist") % pattern
2616 )
2632 )
2617 s = {repo[tn].rev()}
2633 s = {repo[tn].rev()}
2618 else:
2634 else:
2619 s = {cl.rev(n) for t, n in repo.tagslist() if matcher(t)}
2635 s = {cl.rev(n) for t, n in repo.tagslist() if matcher(t)}
2620 else:
2636 else:
2621 s = {cl.rev(n) for t, n in repo.tagslist() if t != b'tip'}
2637 s = {cl.rev(n) for t, n in repo.tagslist() if t != b'tip'}
2622 return subset & s
2638 return subset & s
2623
2639
2624
2640
2625 @predicate(b'tagged', safe=True)
2641 @predicate(b'tagged', safe=True)
2626 def tagged(repo, subset, x):
2642 def tagged(repo, subset, x):
2627 return tag(repo, subset, x)
2643 return tag(repo, subset, x)
2628
2644
2629
2645
2630 @predicate(b'orphan()', safe=True)
2646 @predicate(b'orphan()', safe=True)
2631 def orphan(repo, subset, x):
2647 def orphan(repo, subset, x):
2632 """Non-obsolete changesets with obsolete ancestors. (EXPERIMENTAL)"""
2648 """Non-obsolete changesets with obsolete ancestors. (EXPERIMENTAL)"""
2633 # i18n: "orphan" is a keyword
2649 # i18n: "orphan" is a keyword
2634 getargs(x, 0, 0, _(b"orphan takes no arguments"))
2650 getargs(x, 0, 0, _(b"orphan takes no arguments"))
2635 orphan = obsmod.getrevs(repo, b'orphan')
2651 orphan = obsmod.getrevs(repo, b'orphan')
2636 return subset & orphan
2652 return subset & orphan
2637
2653
2638
2654
2639 @predicate(b'unstable()', safe=True)
2655 @predicate(b'unstable()', safe=True)
2640 def unstable(repo, subset, x):
2656 def unstable(repo, subset, x):
2641 """Changesets with instabilities. (EXPERIMENTAL)"""
2657 """Changesets with instabilities. (EXPERIMENTAL)"""
2642 # i18n: "unstable" is a keyword
2658 # i18n: "unstable" is a keyword
2643 getargs(x, 0, 0, b'unstable takes no arguments')
2659 getargs(x, 0, 0, b'unstable takes no arguments')
2644 _unstable = set()
2660 _unstable = set()
2645 _unstable.update(obsmod.getrevs(repo, b'orphan'))
2661 _unstable.update(obsmod.getrevs(repo, b'orphan'))
2646 _unstable.update(obsmod.getrevs(repo, b'phasedivergent'))
2662 _unstable.update(obsmod.getrevs(repo, b'phasedivergent'))
2647 _unstable.update(obsmod.getrevs(repo, b'contentdivergent'))
2663 _unstable.update(obsmod.getrevs(repo, b'contentdivergent'))
2648 return subset & baseset(_unstable)
2664 return subset & baseset(_unstable)
2649
2665
2650
2666
2651 @predicate(b'user(string)', safe=True, weight=10)
2667 @predicate(b'user(string)', safe=True, weight=10)
2652 def user(repo, subset, x):
2668 def user(repo, subset, x):
2653 """User name contains string. The match is case-insensitive.
2669 """User name contains string. The match is case-insensitive.
2654
2670
2655 Pattern matching is supported for `string`. See
2671 Pattern matching is supported for `string`. See
2656 :hg:`help revisions.patterns`.
2672 :hg:`help revisions.patterns`.
2657 """
2673 """
2658 return author(repo, subset, x)
2674 return author(repo, subset, x)
2659
2675
2660
2676
2661 @predicate(b'wdir()', safe=True, weight=0)
2677 @predicate(b'wdir()', safe=True, weight=0)
2662 def wdir(repo, subset, x):
2678 def wdir(repo, subset, x):
2663 """Working directory. (EXPERIMENTAL)"""
2679 """Working directory. (EXPERIMENTAL)"""
2664 # i18n: "wdir" is a keyword
2680 # i18n: "wdir" is a keyword
2665 getargs(x, 0, 0, _(b"wdir takes no arguments"))
2681 getargs(x, 0, 0, _(b"wdir takes no arguments"))
2666 if wdirrev in subset or isinstance(subset, fullreposet):
2682 if wdirrev in subset or isinstance(subset, fullreposet):
2667 return baseset([wdirrev])
2683 return baseset([wdirrev])
2668 return baseset()
2684 return baseset()
2669
2685
2670
2686
2671 def _orderedlist(repo, subset, x):
2687 def _orderedlist(repo, subset, x):
2672 s = getstring(x, b"internal error")
2688 s = getstring(x, b"internal error")
2673 if not s:
2689 if not s:
2674 return baseset()
2690 return baseset()
2675 # remove duplicates here. it's difficult for caller to deduplicate sets
2691 # remove duplicates here. it's difficult for caller to deduplicate sets
2676 # because different symbols can point to the same rev.
2692 # because different symbols can point to the same rev.
2677 cl = repo.changelog
2693 cl = repo.changelog
2678 ls = []
2694 ls = []
2679 seen = set()
2695 seen = set()
2680 for t in s.split(b'\0'):
2696 for t in s.split(b'\0'):
2681 try:
2697 try:
2682 # fast path for integer revision
2698 # fast path for integer revision
2683 r = int(t)
2699 r = int(t)
2684 if (b'%d' % r) != t or r not in cl:
2700 if (b'%d' % r) != t or r not in cl:
2685 raise ValueError
2701 raise ValueError
2686 revs = [r]
2702 revs = [r]
2687 except ValueError:
2703 except ValueError:
2688 revs = stringset(repo, subset, t, defineorder)
2704 revs = stringset(repo, subset, t, defineorder)
2689
2705
2690 for r in revs:
2706 for r in revs:
2691 if r in seen:
2707 if r in seen:
2692 continue
2708 continue
2693 if (
2709 if (
2694 r in subset
2710 r in subset
2695 or r in _virtualrevs
2711 or r in _virtualrevs
2696 and isinstance(subset, fullreposet)
2712 and isinstance(subset, fullreposet)
2697 ):
2713 ):
2698 ls.append(r)
2714 ls.append(r)
2699 seen.add(r)
2715 seen.add(r)
2700 return baseset(ls)
2716 return baseset(ls)
2701
2717
2702
2718
2703 # for internal use
2719 # for internal use
2704 @predicate(b'_list', safe=True, takeorder=True)
2720 @predicate(b'_list', safe=True, takeorder=True)
2705 def _list(repo, subset, x, order):
2721 def _list(repo, subset, x, order):
2706 if order == followorder:
2722 if order == followorder:
2707 # slow path to take the subset order
2723 # slow path to take the subset order
2708 return subset & _orderedlist(repo, fullreposet(repo), x)
2724 return subset & _orderedlist(repo, fullreposet(repo), x)
2709 else:
2725 else:
2710 return _orderedlist(repo, subset, x)
2726 return _orderedlist(repo, subset, x)
2711
2727
2712
2728
2713 def _orderedintlist(repo, subset, x):
2729 def _orderedintlist(repo, subset, x):
2714 s = getstring(x, b"internal error")
2730 s = getstring(x, b"internal error")
2715 if not s:
2731 if not s:
2716 return baseset()
2732 return baseset()
2717 ls = [int(r) for r in s.split(b'\0')]
2733 ls = [int(r) for r in s.split(b'\0')]
2718 s = subset
2734 s = subset
2719 return baseset([r for r in ls if r in s])
2735 return baseset([r for r in ls if r in s])
2720
2736
2721
2737
2722 # for internal use
2738 # for internal use
2723 @predicate(b'_intlist', safe=True, takeorder=True, weight=0)
2739 @predicate(b'_intlist', safe=True, takeorder=True, weight=0)
2724 def _intlist(repo, subset, x, order):
2740 def _intlist(repo, subset, x, order):
2725 if order == followorder:
2741 if order == followorder:
2726 # slow path to take the subset order
2742 # slow path to take the subset order
2727 return subset & _orderedintlist(repo, fullreposet(repo), x)
2743 return subset & _orderedintlist(repo, fullreposet(repo), x)
2728 else:
2744 else:
2729 return _orderedintlist(repo, subset, x)
2745 return _orderedintlist(repo, subset, x)
2730
2746
2731
2747
2732 def _orderedhexlist(repo, subset, x):
2748 def _orderedhexlist(repo, subset, x):
2733 s = getstring(x, b"internal error")
2749 s = getstring(x, b"internal error")
2734 if not s:
2750 if not s:
2735 return baseset()
2751 return baseset()
2736 cl = repo.changelog
2752 cl = repo.changelog
2737 ls = [cl.rev(bin(r)) for r in s.split(b'\0')]
2753 ls = [cl.rev(bin(r)) for r in s.split(b'\0')]
2738 s = subset
2754 s = subset
2739 return baseset([r for r in ls if r in s])
2755 return baseset([r for r in ls if r in s])
2740
2756
2741
2757
2742 # for internal use
2758 # for internal use
2743 @predicate(b'_hexlist', safe=True, takeorder=True)
2759 @predicate(b'_hexlist', safe=True, takeorder=True)
2744 def _hexlist(repo, subset, x, order):
2760 def _hexlist(repo, subset, x, order):
2745 if order == followorder:
2761 if order == followorder:
2746 # slow path to take the subset order
2762 # slow path to take the subset order
2747 return subset & _orderedhexlist(repo, fullreposet(repo), x)
2763 return subset & _orderedhexlist(repo, fullreposet(repo), x)
2748 else:
2764 else:
2749 return _orderedhexlist(repo, subset, x)
2765 return _orderedhexlist(repo, subset, x)
2750
2766
2751
2767
2752 methods = {
2768 methods = {
2753 b"range": rangeset,
2769 b"range": rangeset,
2754 b"rangeall": rangeall,
2770 b"rangeall": rangeall,
2755 b"rangepre": rangepre,
2771 b"rangepre": rangepre,
2756 b"rangepost": rangepost,
2772 b"rangepost": rangepost,
2757 b"dagrange": dagrange,
2773 b"dagrange": dagrange,
2758 b"string": stringset,
2774 b"string": stringset,
2759 b"symbol": stringset,
2775 b"symbol": stringset,
2760 b"and": andset,
2776 b"and": andset,
2761 b"andsmally": andsmallyset,
2777 b"andsmally": andsmallyset,
2762 b"or": orset,
2778 b"or": orset,
2763 b"not": notset,
2779 b"not": notset,
2764 b"difference": differenceset,
2780 b"difference": differenceset,
2765 b"relation": relationset,
2781 b"relation": relationset,
2766 b"relsubscript": relsubscriptset,
2782 b"relsubscript": relsubscriptset,
2767 b"subscript": subscriptset,
2783 b"subscript": subscriptset,
2768 b"list": listset,
2784 b"list": listset,
2769 b"keyvalue": keyvaluepair,
2785 b"keyvalue": keyvaluepair,
2770 b"func": func,
2786 b"func": func,
2771 b"ancestor": ancestorspec,
2787 b"ancestor": ancestorspec,
2772 b"parent": parentspec,
2788 b"parent": parentspec,
2773 b"parentpost": parentpost,
2789 b"parentpost": parentpost,
2774 b"smartset": rawsmartset,
2790 b"smartset": rawsmartset,
2791 b"nodeset": raw_node_set,
2775 }
2792 }
2776
2793
2777 relations = {
2794 relations = {
2778 b"g": generationsrel,
2795 b"g": generationsrel,
2779 b"generations": generationsrel,
2796 b"generations": generationsrel,
2780 }
2797 }
2781
2798
2782 subscriptrelations = {
2799 subscriptrelations = {
2783 b"g": generationssubrel,
2800 b"g": generationssubrel,
2784 b"generations": generationssubrel,
2801 b"generations": generationssubrel,
2785 }
2802 }
2786
2803
2787
2804
2788 def lookupfn(repo):
2805 def lookupfn(repo):
2789 def fn(symbol):
2806 def fn(symbol):
2790 try:
2807 try:
2791 return scmutil.isrevsymbol(repo, symbol)
2808 return scmutil.isrevsymbol(repo, symbol)
2792 except error.AmbiguousPrefixLookupError:
2809 except error.AmbiguousPrefixLookupError:
2793 raise error.InputError(
2810 raise error.InputError(
2794 b'ambiguous revision identifier: %s' % symbol
2811 b'ambiguous revision identifier: %s' % symbol
2795 )
2812 )
2796
2813
2797 return fn
2814 return fn
2798
2815
2799
2816
2800 def match(ui, spec, lookup=None):
2817 def match(ui, spec, lookup=None):
2801 """Create a matcher for a single revision spec"""
2818 """Create a matcher for a single revision spec"""
2802 return matchany(ui, [spec], lookup=lookup)
2819 return matchany(ui, [spec], lookup=lookup)
2803
2820
2804
2821
2805 def matchany(ui, specs, lookup=None, localalias=None):
2822 def matchany(ui, specs, lookup=None, localalias=None):
2806 """Create a matcher that will include any revisions matching one of the
2823 """Create a matcher that will include any revisions matching one of the
2807 given specs
2824 given specs
2808
2825
2809 If lookup function is not None, the parser will first attempt to handle
2826 If lookup function is not None, the parser will first attempt to handle
2810 old-style ranges, which may contain operator characters.
2827 old-style ranges, which may contain operator characters.
2811
2828
2812 If localalias is not None, it is a dict {name: definitionstring}. It takes
2829 If localalias is not None, it is a dict {name: definitionstring}. It takes
2813 precedence over [revsetalias] config section.
2830 precedence over [revsetalias] config section.
2814 """
2831 """
2815 if not specs:
2832 if not specs:
2816
2833
2817 def mfunc(repo, subset=None):
2834 def mfunc(repo, subset=None):
2818 return baseset()
2835 return baseset()
2819
2836
2820 return mfunc
2837 return mfunc
2821 if not all(specs):
2838 if not all(specs):
2822 raise error.ParseError(_(b"empty query"))
2839 raise error.ParseError(_(b"empty query"))
2823 if len(specs) == 1:
2840 if len(specs) == 1:
2824 tree = revsetlang.parse(specs[0], lookup)
2841 tree = revsetlang.parse(specs[0], lookup)
2825 else:
2842 else:
2826 tree = (
2843 tree = (
2827 b'or',
2844 b'or',
2828 (b'list',) + tuple(revsetlang.parse(s, lookup) for s in specs),
2845 (b'list',) + tuple(revsetlang.parse(s, lookup) for s in specs),
2829 )
2846 )
2830
2847
2831 aliases = []
2848 aliases = []
2832 warn = None
2849 warn = None
2833 if ui:
2850 if ui:
2834 aliases.extend(ui.configitems(b'revsetalias'))
2851 aliases.extend(ui.configitems(b'revsetalias'))
2835 warn = ui.warn
2852 warn = ui.warn
2836 if localalias:
2853 if localalias:
2837 aliases.extend(localalias.items())
2854 aliases.extend(localalias.items())
2838 if aliases:
2855 if aliases:
2839 tree = revsetlang.expandaliases(tree, aliases, warn=warn)
2856 tree = revsetlang.expandaliases(tree, aliases, warn=warn)
2840 tree = revsetlang.foldconcat(tree)
2857 tree = revsetlang.foldconcat(tree)
2841 tree = revsetlang.analyze(tree)
2858 tree = revsetlang.analyze(tree)
2842 tree = revsetlang.optimize(tree)
2859 tree = revsetlang.optimize(tree)
2843 return makematcher(tree)
2860 return makematcher(tree)
2844
2861
2845
2862
2846 def makematcher(tree):
2863 def makematcher(tree):
2847 """Create a matcher from an evaluatable tree"""
2864 """Create a matcher from an evaluatable tree"""
2848
2865
2849 def mfunc(repo, subset=None, order=None):
2866 def mfunc(repo, subset=None, order=None):
2850 if order is None:
2867 if order is None:
2851 if subset is None:
2868 if subset is None:
2852 order = defineorder # 'x'
2869 order = defineorder # 'x'
2853 else:
2870 else:
2854 order = followorder # 'subset & x'
2871 order = followorder # 'subset & x'
2855 if subset is None:
2872 if subset is None:
2856 subset = fullreposet(repo)
2873 subset = fullreposet(repo)
2857 return getset(repo, subset, tree, order)
2874 return getset(repo, subset, tree, order)
2858
2875
2859 return mfunc
2876 return mfunc
2860
2877
2861
2878
2862 def loadpredicate(ui, extname, registrarobj):
2879 def loadpredicate(ui, extname, registrarobj):
2863 """Load revset predicates from specified registrarobj"""
2880 """Load revset predicates from specified registrarobj"""
2864 for name, func in registrarobj._table.items():
2881 for name, func in registrarobj._table.items():
2865 symbols[name] = func
2882 symbols[name] = func
2866 if func._safe:
2883 if func._safe:
2867 safesymbols.add(name)
2884 safesymbols.add(name)
2868
2885
2869
2886
2870 # load built-in predicates explicitly to setup safesymbols
2887 # load built-in predicates explicitly to setup safesymbols
2871 loadpredicate(None, None, predicate)
2888 loadpredicate(None, None, predicate)
2872
2889
2873 # tell hggettext to extract docstrings from these functions:
2890 # tell hggettext to extract docstrings from these functions:
2874 i18nfunctions = symbols.values()
2891 i18nfunctions = symbols.values()
@@ -1,940 +1,953 b''
1 # revsetlang.py - parser, tokenizer and utility for revision set language
1 # revsetlang.py - parser, tokenizer and utility for revision set language
2 #
2 #
3 # Copyright 2010 Olivia Mackall <olivia@selenic.com>
3 # Copyright 2010 Olivia Mackall <olivia@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8
8
9 import string
9 import string
10
10
11 from .i18n import _
11 from .i18n import _
12 from .node import hex
12 from .node import hex
13 from . import (
13 from . import (
14 error,
14 error,
15 parser,
15 parser,
16 pycompat,
16 pycompat,
17 smartset,
17 smartset,
18 util,
18 util,
19 )
19 )
20 from .utils import stringutil
20 from .utils import stringutil
21
21
22 elements = {
22 elements = {
23 # token-type: binding-strength, primary, prefix, infix, suffix
23 # token-type: binding-strength, primary, prefix, infix, suffix
24 b"(": (21, None, (b"group", 1, b")"), (b"func", 1, b")"), None),
24 b"(": (21, None, (b"group", 1, b")"), (b"func", 1, b")"), None),
25 b"[": (21, None, None, (b"subscript", 1, b"]"), None),
25 b"[": (21, None, None, (b"subscript", 1, b"]"), None),
26 b"#": (21, None, None, (b"relation", 21), None),
26 b"#": (21, None, None, (b"relation", 21), None),
27 b"##": (20, None, None, (b"_concat", 20), None),
27 b"##": (20, None, None, (b"_concat", 20), None),
28 b"~": (18, None, None, (b"ancestor", 18), None),
28 b"~": (18, None, None, (b"ancestor", 18), None),
29 b"^": (18, None, None, (b"parent", 18), b"parentpost"),
29 b"^": (18, None, None, (b"parent", 18), b"parentpost"),
30 b"-": (5, None, (b"negate", 19), (b"minus", 5), None),
30 b"-": (5, None, (b"negate", 19), (b"minus", 5), None),
31 b"::": (
31 b"::": (
32 17,
32 17,
33 b"dagrangeall",
33 b"dagrangeall",
34 (b"dagrangepre", 17),
34 (b"dagrangepre", 17),
35 (b"dagrange", 17),
35 (b"dagrange", 17),
36 b"dagrangepost",
36 b"dagrangepost",
37 ),
37 ),
38 b"..": (
38 b"..": (
39 17,
39 17,
40 b"dagrangeall",
40 b"dagrangeall",
41 (b"dagrangepre", 17),
41 (b"dagrangepre", 17),
42 (b"dagrange", 17),
42 (b"dagrange", 17),
43 b"dagrangepost",
43 b"dagrangepost",
44 ),
44 ),
45 b":": (15, b"rangeall", (b"rangepre", 15), (b"range", 15), b"rangepost"),
45 b":": (15, b"rangeall", (b"rangepre", 15), (b"range", 15), b"rangepost"),
46 b"not": (10, None, (b"not", 10), None, None),
46 b"not": (10, None, (b"not", 10), None, None),
47 b"!": (10, None, (b"not", 10), None, None),
47 b"!": (10, None, (b"not", 10), None, None),
48 b"and": (5, None, None, (b"and", 5), None),
48 b"and": (5, None, None, (b"and", 5), None),
49 b"&": (5, None, None, (b"and", 5), None),
49 b"&": (5, None, None, (b"and", 5), None),
50 b"%": (5, None, None, (b"only", 5), b"onlypost"),
50 b"%": (5, None, None, (b"only", 5), b"onlypost"),
51 b"or": (4, None, None, (b"or", 4), None),
51 b"or": (4, None, None, (b"or", 4), None),
52 b"|": (4, None, None, (b"or", 4), None),
52 b"|": (4, None, None, (b"or", 4), None),
53 b"+": (4, None, None, (b"or", 4), None),
53 b"+": (4, None, None, (b"or", 4), None),
54 b"=": (3, None, None, (b"keyvalue", 3), None),
54 b"=": (3, None, None, (b"keyvalue", 3), None),
55 b",": (2, None, None, (b"list", 2), None),
55 b",": (2, None, None, (b"list", 2), None),
56 b")": (0, None, None, None, None),
56 b")": (0, None, None, None, None),
57 b"]": (0, None, None, None, None),
57 b"]": (0, None, None, None, None),
58 b"symbol": (0, b"symbol", None, None, None),
58 b"symbol": (0, b"symbol", None, None, None),
59 b"string": (0, b"string", None, None, None),
59 b"string": (0, b"string", None, None, None),
60 b"end": (0, None, None, None, None),
60 b"end": (0, None, None, None, None),
61 }
61 }
62
62
63 keywords = {b'and', b'or', b'not'}
63 keywords = {b'and', b'or', b'not'}
64
64
65 symbols = {}
65 symbols = {}
66
66
67 _quoteletters = {b'"', b"'"}
67 _quoteletters = {b'"', b"'"}
68 _simpleopletters = set(pycompat.iterbytestr(b"()[]#:=,-|&+!~^%"))
68 _simpleopletters = set(pycompat.iterbytestr(b"()[]#:=,-|&+!~^%"))
69
69
70 # default set of valid characters for the initial letter of symbols
70 # default set of valid characters for the initial letter of symbols
71 _syminitletters = set(
71 _syminitletters = set(
72 pycompat.iterbytestr(
72 pycompat.iterbytestr(
73 pycompat.sysbytes(string.ascii_letters)
73 pycompat.sysbytes(string.ascii_letters)
74 + pycompat.sysbytes(string.digits)
74 + pycompat.sysbytes(string.digits)
75 + b'._@'
75 + b'._@'
76 )
76 )
77 ) | set(map(pycompat.bytechr, range(128, 256)))
77 ) | set(map(pycompat.bytechr, range(128, 256)))
78
78
79 # default set of valid characters for non-initial letters of symbols
79 # default set of valid characters for non-initial letters of symbols
80 _symletters = _syminitletters | set(pycompat.iterbytestr(b'-/'))
80 _symletters = _syminitletters | set(pycompat.iterbytestr(b'-/'))
81
81
82
82
83 def tokenize(program, lookup=None, syminitletters=None, symletters=None):
83 def tokenize(program, lookup=None, syminitletters=None, symletters=None):
84 """
84 """
85 Parse a revset statement into a stream of tokens
85 Parse a revset statement into a stream of tokens
86
86
87 ``syminitletters`` is the set of valid characters for the initial
87 ``syminitletters`` is the set of valid characters for the initial
88 letter of symbols.
88 letter of symbols.
89
89
90 By default, character ``c`` is recognized as valid for initial
90 By default, character ``c`` is recognized as valid for initial
91 letter of symbols, if ``c.isalnum() or c in '._@' or ord(c) > 127``.
91 letter of symbols, if ``c.isalnum() or c in '._@' or ord(c) > 127``.
92
92
93 ``symletters`` is the set of valid characters for non-initial
93 ``symletters`` is the set of valid characters for non-initial
94 letters of symbols.
94 letters of symbols.
95
95
96 By default, character ``c`` is recognized as valid for non-initial
96 By default, character ``c`` is recognized as valid for non-initial
97 letters of symbols, if ``c.isalnum() or c in '-._/@' or ord(c) > 127``.
97 letters of symbols, if ``c.isalnum() or c in '-._/@' or ord(c) > 127``.
98
98
99 Check that @ is a valid unquoted token character (issue3686):
99 Check that @ is a valid unquoted token character (issue3686):
100 >>> list(tokenize(b"@::"))
100 >>> list(tokenize(b"@::"))
101 [('symbol', '@', 0), ('::', None, 1), ('end', None, 3)]
101 [('symbol', '@', 0), ('::', None, 1), ('end', None, 3)]
102
102
103 """
103 """
104 if not isinstance(program, bytes):
104 if not isinstance(program, bytes):
105 raise error.ProgrammingError(
105 raise error.ProgrammingError(
106 b'revset statement must be bytes, got %r' % program
106 b'revset statement must be bytes, got %r' % program
107 )
107 )
108 program = pycompat.bytestr(program)
108 program = pycompat.bytestr(program)
109 if syminitletters is None:
109 if syminitletters is None:
110 syminitletters = _syminitletters
110 syminitletters = _syminitletters
111 if symletters is None:
111 if symletters is None:
112 symletters = _symletters
112 symletters = _symletters
113
113
114 if program and lookup:
114 if program and lookup:
115 # attempt to parse old-style ranges first to deal with
115 # attempt to parse old-style ranges first to deal with
116 # things like old-tag which contain query metacharacters
116 # things like old-tag which contain query metacharacters
117 parts = program.split(b':', 1)
117 parts = program.split(b':', 1)
118 if all(lookup(sym) for sym in parts if sym):
118 if all(lookup(sym) for sym in parts if sym):
119 if parts[0]:
119 if parts[0]:
120 yield (b'symbol', parts[0], 0)
120 yield (b'symbol', parts[0], 0)
121 if len(parts) > 1:
121 if len(parts) > 1:
122 s = len(parts[0])
122 s = len(parts[0])
123 yield (b':', None, s)
123 yield (b':', None, s)
124 if parts[1]:
124 if parts[1]:
125 yield (b'symbol', parts[1], s + 1)
125 yield (b'symbol', parts[1], s + 1)
126 yield (b'end', None, len(program))
126 yield (b'end', None, len(program))
127 return
127 return
128
128
129 pos, l = 0, len(program)
129 pos, l = 0, len(program)
130 while pos < l:
130 while pos < l:
131 c = program[pos]
131 c = program[pos]
132 if c.isspace(): # skip inter-token whitespace
132 if c.isspace(): # skip inter-token whitespace
133 pass
133 pass
134 elif (
134 elif (
135 c == b':' and program[pos : pos + 2] == b'::'
135 c == b':' and program[pos : pos + 2] == b'::'
136 ): # look ahead carefully
136 ): # look ahead carefully
137 yield (b'::', None, pos)
137 yield (b'::', None, pos)
138 pos += 1 # skip ahead
138 pos += 1 # skip ahead
139 elif (
139 elif (
140 c == b'.' and program[pos : pos + 2] == b'..'
140 c == b'.' and program[pos : pos + 2] == b'..'
141 ): # look ahead carefully
141 ): # look ahead carefully
142 yield (b'..', None, pos)
142 yield (b'..', None, pos)
143 pos += 1 # skip ahead
143 pos += 1 # skip ahead
144 elif (
144 elif (
145 c == b'#' and program[pos : pos + 2] == b'##'
145 c == b'#' and program[pos : pos + 2] == b'##'
146 ): # look ahead carefully
146 ): # look ahead carefully
147 yield (b'##', None, pos)
147 yield (b'##', None, pos)
148 pos += 1 # skip ahead
148 pos += 1 # skip ahead
149 elif c in _simpleopletters: # handle simple operators
149 elif c in _simpleopletters: # handle simple operators
150 yield (c, None, pos)
150 yield (c, None, pos)
151 elif (
151 elif (
152 c in _quoteletters
152 c in _quoteletters
153 or c == b'r'
153 or c == b'r'
154 and program[pos : pos + 2] in (b"r'", b'r"')
154 and program[pos : pos + 2] in (b"r'", b'r"')
155 ): # handle quoted strings
155 ): # handle quoted strings
156 if c == b'r':
156 if c == b'r':
157 pos += 1
157 pos += 1
158 c = program[pos]
158 c = program[pos]
159 decode = lambda x: x
159 decode = lambda x: x
160 else:
160 else:
161 decode = parser.unescapestr
161 decode = parser.unescapestr
162 pos += 1
162 pos += 1
163 s = pos
163 s = pos
164 while pos < l: # find closing quote
164 while pos < l: # find closing quote
165 d = program[pos]
165 d = program[pos]
166 if d == b'\\': # skip over escaped characters
166 if d == b'\\': # skip over escaped characters
167 pos += 2
167 pos += 2
168 continue
168 continue
169 if d == c:
169 if d == c:
170 yield (b'string', decode(program[s:pos]), s)
170 yield (b'string', decode(program[s:pos]), s)
171 break
171 break
172 pos += 1
172 pos += 1
173 else:
173 else:
174 raise error.ParseError(_(b"unterminated string"), s)
174 raise error.ParseError(_(b"unterminated string"), s)
175 # gather up a symbol/keyword
175 # gather up a symbol/keyword
176 elif c in syminitletters:
176 elif c in syminitletters:
177 s = pos
177 s = pos
178 pos += 1
178 pos += 1
179 while pos < l: # find end of symbol
179 while pos < l: # find end of symbol
180 d = program[pos]
180 d = program[pos]
181 if d not in symletters:
181 if d not in symletters:
182 break
182 break
183 if (
183 if (
184 d == b'.' and program[pos - 1] == b'.'
184 d == b'.' and program[pos - 1] == b'.'
185 ): # special case for ..
185 ): # special case for ..
186 pos -= 1
186 pos -= 1
187 break
187 break
188 pos += 1
188 pos += 1
189 sym = program[s:pos]
189 sym = program[s:pos]
190 if sym in keywords: # operator keywords
190 if sym in keywords: # operator keywords
191 yield (sym, None, s)
191 yield (sym, None, s)
192 elif b'-' in sym:
192 elif b'-' in sym:
193 # some jerk gave us foo-bar-baz, try to check if it's a symbol
193 # some jerk gave us foo-bar-baz, try to check if it's a symbol
194 if lookup and lookup(sym):
194 if lookup and lookup(sym):
195 # looks like a real symbol
195 # looks like a real symbol
196 yield (b'symbol', sym, s)
196 yield (b'symbol', sym, s)
197 else:
197 else:
198 # looks like an expression
198 # looks like an expression
199 parts = sym.split(b'-')
199 parts = sym.split(b'-')
200 for p in parts[:-1]:
200 for p in parts[:-1]:
201 if p: # possible consecutive -
201 if p: # possible consecutive -
202 yield (b'symbol', p, s)
202 yield (b'symbol', p, s)
203 s += len(p)
203 s += len(p)
204 yield (b'-', None, s)
204 yield (b'-', None, s)
205 s += 1
205 s += 1
206 if parts[-1]: # possible trailing -
206 if parts[-1]: # possible trailing -
207 yield (b'symbol', parts[-1], s)
207 yield (b'symbol', parts[-1], s)
208 else:
208 else:
209 yield (b'symbol', sym, s)
209 yield (b'symbol', sym, s)
210 pos -= 1
210 pos -= 1
211 else:
211 else:
212 raise error.ParseError(
212 raise error.ParseError(
213 _(b"syntax error in revset '%s'") % program, pos
213 _(b"syntax error in revset '%s'") % program, pos
214 )
214 )
215 pos += 1
215 pos += 1
216 yield (b'end', None, pos)
216 yield (b'end', None, pos)
217
217
218
218
219 # helpers
219 # helpers
220
220
221 _notset = object()
221 _notset = object()
222
222
223
223
224 def getsymbol(x):
224 def getsymbol(x):
225 if x and x[0] == b'symbol':
225 if x and x[0] == b'symbol':
226 return x[1]
226 return x[1]
227 raise error.ParseError(_(b'not a symbol'))
227 raise error.ParseError(_(b'not a symbol'))
228
228
229
229
230 def getstring(x, err):
230 def getstring(x, err):
231 if x and (x[0] == b'string' or x[0] == b'symbol'):
231 if x and (x[0] == b'string' or x[0] == b'symbol'):
232 return x[1]
232 return x[1]
233 raise error.ParseError(err)
233 raise error.ParseError(err)
234
234
235
235
236 def getinteger(x, err, default=_notset):
236 def getinteger(x, err, default=_notset):
237 if not x and default is not _notset:
237 if not x and default is not _notset:
238 return default
238 return default
239 try:
239 try:
240 return int(getstring(x, err))
240 return int(getstring(x, err))
241 except ValueError:
241 except ValueError:
242 raise error.ParseError(err)
242 raise error.ParseError(err)
243
243
244
244
245 def getboolean(x, err):
245 def getboolean(x, err):
246 value = stringutil.parsebool(getsymbol(x))
246 value = stringutil.parsebool(getsymbol(x))
247 if value is not None:
247 if value is not None:
248 return value
248 return value
249 raise error.ParseError(err)
249 raise error.ParseError(err)
250
250
251
251
252 def getlist(x):
252 def getlist(x):
253 if not x:
253 if not x:
254 return []
254 return []
255 if x[0] == b'list':
255 if x[0] == b'list':
256 return list(x[1:])
256 return list(x[1:])
257 return [x]
257 return [x]
258
258
259
259
260 def getrange(x, err):
260 def getrange(x, err):
261 if not x:
261 if not x:
262 raise error.ParseError(err)
262 raise error.ParseError(err)
263 op = x[0]
263 op = x[0]
264 if op == b'range':
264 if op == b'range':
265 return x[1], x[2]
265 return x[1], x[2]
266 elif op == b'rangepre':
266 elif op == b'rangepre':
267 return None, x[1]
267 return None, x[1]
268 elif op == b'rangepost':
268 elif op == b'rangepost':
269 return x[1], None
269 return x[1], None
270 elif op == b'rangeall':
270 elif op == b'rangeall':
271 return None, None
271 return None, None
272 raise error.ParseError(err)
272 raise error.ParseError(err)
273
273
274
274
275 def getintrange(x, err1, err2, deffirst=_notset, deflast=_notset):
275 def getintrange(x, err1, err2, deffirst=_notset, deflast=_notset):
276 """Get [first, last] integer range (both inclusive) from a parsed tree
276 """Get [first, last] integer range (both inclusive) from a parsed tree
277
277
278 If any of the sides omitted, and if no default provided, ParseError will
278 If any of the sides omitted, and if no default provided, ParseError will
279 be raised.
279 be raised.
280 """
280 """
281 if x and (x[0] == b'string' or x[0] == b'symbol'):
281 if x and (x[0] == b'string' or x[0] == b'symbol'):
282 n = getinteger(x, err1)
282 n = getinteger(x, err1)
283 return n, n
283 return n, n
284 a, b = getrange(x, err1)
284 a, b = getrange(x, err1)
285 return getinteger(a, err2, deffirst), getinteger(b, err2, deflast)
285 return getinteger(a, err2, deffirst), getinteger(b, err2, deflast)
286
286
287
287
288 def getargs(x, min, max, err):
288 def getargs(x, min, max, err):
289 l = getlist(x)
289 l = getlist(x)
290 if len(l) < min or (max >= 0 and len(l) > max):
290 if len(l) < min or (max >= 0 and len(l) > max):
291 raise error.ParseError(err)
291 raise error.ParseError(err)
292 return l
292 return l
293
293
294
294
295 def getargsdict(x, funcname, keys):
295 def getargsdict(x, funcname, keys):
296 return parser.buildargsdict(
296 return parser.buildargsdict(
297 getlist(x),
297 getlist(x),
298 funcname,
298 funcname,
299 parser.splitargspec(keys),
299 parser.splitargspec(keys),
300 keyvaluenode=b'keyvalue',
300 keyvaluenode=b'keyvalue',
301 keynode=b'symbol',
301 keynode=b'symbol',
302 )
302 )
303
303
304
304
305 # cache of {spec: raw parsed tree} built internally
305 # cache of {spec: raw parsed tree} built internally
306 _treecache = {}
306 _treecache = {}
307
307
308
308
309 def _cachedtree(spec):
309 def _cachedtree(spec):
310 # thread safe because parse() is reentrant and dict.__setitem__() is atomic
310 # thread safe because parse() is reentrant and dict.__setitem__() is atomic
311 tree = _treecache.get(spec)
311 tree = _treecache.get(spec)
312 if tree is None:
312 if tree is None:
313 _treecache[spec] = tree = parse(spec)
313 _treecache[spec] = tree = parse(spec)
314 return tree
314 return tree
315
315
316
316
317 def _build(tmplspec, *repls):
317 def _build(tmplspec, *repls):
318 """Create raw parsed tree from a template revset statement
318 """Create raw parsed tree from a template revset statement
319
319
320 >>> _build(b'f(_) and _', (b'string', b'1'), (b'symbol', b'2'))
320 >>> _build(b'f(_) and _', (b'string', b'1'), (b'symbol', b'2'))
321 ('and', ('func', ('symbol', 'f'), ('string', '1')), ('symbol', '2'))
321 ('and', ('func', ('symbol', 'f'), ('string', '1')), ('symbol', '2'))
322 """
322 """
323 template = _cachedtree(tmplspec)
323 template = _cachedtree(tmplspec)
324 return parser.buildtree(template, (b'symbol', b'_'), *repls)
324 return parser.buildtree(template, (b'symbol', b'_'), *repls)
325
325
326
326
327 def _match(patspec, tree):
327 def _match(patspec, tree):
328 """Test if a tree matches the given pattern statement; return the matches
328 """Test if a tree matches the given pattern statement; return the matches
329
329
330 >>> _match(b'f(_)', parse(b'f()'))
330 >>> _match(b'f(_)', parse(b'f()'))
331 >>> _match(b'f(_)', parse(b'f(1)'))
331 >>> _match(b'f(_)', parse(b'f(1)'))
332 [('func', ('symbol', 'f'), ('symbol', '1')), ('symbol', '1')]
332 [('func', ('symbol', 'f'), ('symbol', '1')), ('symbol', '1')]
333 >>> _match(b'f(_)', parse(b'f(1, 2)'))
333 >>> _match(b'f(_)', parse(b'f(1, 2)'))
334 """
334 """
335 pattern = _cachedtree(patspec)
335 pattern = _cachedtree(patspec)
336 return parser.matchtree(
336 return parser.matchtree(
337 pattern, tree, (b'symbol', b'_'), {b'keyvalue', b'list'}
337 pattern, tree, (b'symbol', b'_'), {b'keyvalue', b'list'}
338 )
338 )
339
339
340
340
341 def _matchonly(revs, bases):
341 def _matchonly(revs, bases):
342 return _match(b'ancestors(_) and not ancestors(_)', (b'and', revs, bases))
342 return _match(b'ancestors(_) and not ancestors(_)', (b'and', revs, bases))
343
343
344
344
345 def _fixops(x):
345 def _fixops(x):
346 """Rewrite raw parsed tree to resolve ambiguous syntax which cannot be
346 """Rewrite raw parsed tree to resolve ambiguous syntax which cannot be
347 handled well by our simple top-down parser"""
347 handled well by our simple top-down parser"""
348 if not isinstance(x, tuple):
348 if not isinstance(x, tuple):
349 return x
349 return x
350
350
351 op = x[0]
351 op = x[0]
352 if op == b'parent':
352 if op == b'parent':
353 # x^:y means (x^) : y, not x ^ (:y)
353 # x^:y means (x^) : y, not x ^ (:y)
354 # x^: means (x^) :, not x ^ (:)
354 # x^: means (x^) :, not x ^ (:)
355 post = (b'parentpost', x[1])
355 post = (b'parentpost', x[1])
356 if x[2][0] == b'dagrangepre':
356 if x[2][0] == b'dagrangepre':
357 return _fixops((b'dagrange', post, x[2][1]))
357 return _fixops((b'dagrange', post, x[2][1]))
358 elif x[2][0] == b'dagrangeall':
358 elif x[2][0] == b'dagrangeall':
359 return _fixops((b'dagrangepost', post))
359 return _fixops((b'dagrangepost', post))
360 elif x[2][0] == b'rangepre':
360 elif x[2][0] == b'rangepre':
361 return _fixops((b'range', post, x[2][1]))
361 return _fixops((b'range', post, x[2][1]))
362 elif x[2][0] == b'rangeall':
362 elif x[2][0] == b'rangeall':
363 return _fixops((b'rangepost', post))
363 return _fixops((b'rangepost', post))
364 elif op == b'or':
364 elif op == b'or':
365 # make number of arguments deterministic:
365 # make number of arguments deterministic:
366 # x + y + z -> (or x y z) -> (or (list x y z))
366 # x + y + z -> (or x y z) -> (or (list x y z))
367 return (op, _fixops((b'list',) + x[1:]))
367 return (op, _fixops((b'list',) + x[1:]))
368 elif op == b'subscript' and x[1][0] == b'relation':
368 elif op == b'subscript' and x[1][0] == b'relation':
369 # x#y[z] ternary
369 # x#y[z] ternary
370 return _fixops((b'relsubscript', x[1][1], x[1][2], x[2]))
370 return _fixops((b'relsubscript', x[1][1], x[1][2], x[2]))
371
371
372 return (op,) + tuple(_fixops(y) for y in x[1:])
372 return (op,) + tuple(_fixops(y) for y in x[1:])
373
373
374
374
375 def _analyze(x):
375 def _analyze(x):
376 if x is None:
376 if x is None:
377 return x
377 return x
378
378
379 op = x[0]
379 op = x[0]
380 if op == b'minus':
380 if op == b'minus':
381 return _analyze(_build(b'_ and not _', *x[1:]))
381 return _analyze(_build(b'_ and not _', *x[1:]))
382 elif op == b'only':
382 elif op == b'only':
383 return _analyze(_build(b'only(_, _)', *x[1:]))
383 return _analyze(_build(b'only(_, _)', *x[1:]))
384 elif op == b'onlypost':
384 elif op == b'onlypost':
385 return _analyze(_build(b'only(_)', x[1]))
385 return _analyze(_build(b'only(_)', x[1]))
386 elif op == b'dagrangeall':
386 elif op == b'dagrangeall':
387 raise error.ParseError(_(b"can't use '::' in this context"))
387 raise error.ParseError(_(b"can't use '::' in this context"))
388 elif op == b'dagrangepre':
388 elif op == b'dagrangepre':
389 return _analyze(_build(b'ancestors(_)', x[1]))
389 return _analyze(_build(b'ancestors(_)', x[1]))
390 elif op == b'dagrangepost':
390 elif op == b'dagrangepost':
391 return _analyze(_build(b'descendants(_)', x[1]))
391 return _analyze(_build(b'descendants(_)', x[1]))
392 elif op == b'negate':
392 elif op == b'negate':
393 s = getstring(x[1], _(b"can't negate that"))
393 s = getstring(x[1], _(b"can't negate that"))
394 return _analyze((b'string', b'-' + s))
394 return _analyze((b'string', b'-' + s))
395 elif op in (b'string', b'symbol', b'smartset'):
395 elif op in (b'string', b'symbol', b'smartset', b'nodeset'):
396 return x
396 return x
397 elif op == b'rangeall':
397 elif op == b'rangeall':
398 return (op, None)
398 return (op, None)
399 elif op in {b'or', b'not', b'rangepre', b'rangepost', b'parentpost'}:
399 elif op in {b'or', b'not', b'rangepre', b'rangepost', b'parentpost'}:
400 return (op, _analyze(x[1]))
400 return (op, _analyze(x[1]))
401 elif op == b'group':
401 elif op == b'group':
402 return _analyze(x[1])
402 return _analyze(x[1])
403 elif op in {
403 elif op in {
404 b'and',
404 b'and',
405 b'dagrange',
405 b'dagrange',
406 b'range',
406 b'range',
407 b'parent',
407 b'parent',
408 b'ancestor',
408 b'ancestor',
409 b'relation',
409 b'relation',
410 b'subscript',
410 b'subscript',
411 }:
411 }:
412 ta = _analyze(x[1])
412 ta = _analyze(x[1])
413 tb = _analyze(x[2])
413 tb = _analyze(x[2])
414 return (op, ta, tb)
414 return (op, ta, tb)
415 elif op == b'relsubscript':
415 elif op == b'relsubscript':
416 ta = _analyze(x[1])
416 ta = _analyze(x[1])
417 tb = _analyze(x[2])
417 tb = _analyze(x[2])
418 tc = _analyze(x[3])
418 tc = _analyze(x[3])
419 return (op, ta, tb, tc)
419 return (op, ta, tb, tc)
420 elif op == b'list':
420 elif op == b'list':
421 return (op,) + tuple(_analyze(y) for y in x[1:])
421 return (op,) + tuple(_analyze(y) for y in x[1:])
422 elif op == b'keyvalue':
422 elif op == b'keyvalue':
423 return (op, x[1], _analyze(x[2]))
423 return (op, x[1], _analyze(x[2]))
424 elif op == b'func':
424 elif op == b'func':
425 return (op, x[1], _analyze(x[2]))
425 return (op, x[1], _analyze(x[2]))
426 raise ValueError(b'invalid operator %r' % op)
426 raise ValueError(b'invalid operator %r' % op)
427
427
428
428
429 def analyze(x):
429 def analyze(x):
430 """Transform raw parsed tree to evaluatable tree which can be fed to
430 """Transform raw parsed tree to evaluatable tree which can be fed to
431 optimize() or getset()
431 optimize() or getset()
432
432
433 All pseudo operations should be mapped to real operations or functions
433 All pseudo operations should be mapped to real operations or functions
434 defined in methods or symbols table respectively.
434 defined in methods or symbols table respectively.
435 """
435 """
436 return _analyze(x)
436 return _analyze(x)
437
437
438
438
439 def _optimize(x):
439 def _optimize(x):
440 if x is None:
440 if x is None:
441 return 0, x
441 return 0, x
442
442
443 op = x[0]
443 op = x[0]
444 if op in (b'string', b'symbol', b'smartset'):
444 if op in (b'string', b'symbol', b'smartset', b'nodeset'):
445 return 0.5, x # single revisions are small
445 # single revisions are small, and set of already computed revision are assumed to be cheap.
446 return 0.5, x
446 elif op == b'and':
447 elif op == b'and':
447 wa, ta = _optimize(x[1])
448 wa, ta = _optimize(x[1])
448 wb, tb = _optimize(x[2])
449 wb, tb = _optimize(x[2])
449 w = min(wa, wb)
450 w = min(wa, wb)
450
451
451 # (draft/secret/_notpublic() & ::x) have a fast path
452 # (draft/secret/_notpublic() & ::x) have a fast path
452 m = _match(b'_() & ancestors(_)', (b'and', ta, tb))
453 m = _match(b'_() & ancestors(_)', (b'and', ta, tb))
453 if m and getsymbol(m[1]) in {b'draft', b'secret', b'_notpublic'}:
454 if m and getsymbol(m[1]) in {b'draft', b'secret', b'_notpublic'}:
454 return w, _build(b'_phaseandancestors(_, _)', m[1], m[2])
455 return w, _build(b'_phaseandancestors(_, _)', m[1], m[2])
455
456
456 # (::x and not ::y)/(not ::y and ::x) have a fast path
457 # (::x and not ::y)/(not ::y and ::x) have a fast path
457 m = _matchonly(ta, tb) or _matchonly(tb, ta)
458 m = _matchonly(ta, tb) or _matchonly(tb, ta)
458 if m:
459 if m:
459 return w, _build(b'only(_, _)', *m[1:])
460 return w, _build(b'only(_, _)', *m[1:])
460
461
461 m = _match(b'not _', tb)
462 m = _match(b'not _', tb)
462 if m:
463 if m:
463 return wa, (b'difference', ta, m[1])
464 return wa, (b'difference', ta, m[1])
464 if wa > wb:
465 if wa > wb:
465 op = b'andsmally'
466 op = b'andsmally'
466 return w, (op, ta, tb)
467 return w, (op, ta, tb)
467 elif op == b'or':
468 elif op == b'or':
468 # fast path for machine-generated expression, that is likely to have
469 # fast path for machine-generated expression, that is likely to have
469 # lots of trivial revisions: 'a + b + c()' to '_list(a b) + c()'
470 # lots of trivial revisions: 'a + b + c()' to '_list(a b) + c()'
470 ws, ts, ss = [], [], []
471 ws, ts, ss = [], [], []
471
472
472 def flushss():
473 def flushss():
473 if not ss:
474 if not ss:
474 return
475 return
475 if len(ss) == 1:
476 if len(ss) == 1:
476 w, t = ss[0]
477 w, t = ss[0]
477 else:
478 else:
478 s = b'\0'.join(t[1] for w, t in ss)
479 s = b'\0'.join(t[1] for w, t in ss)
479 y = _build(b'_list(_)', (b'string', s))
480 y = _build(b'_list(_)', (b'string', s))
480 w, t = _optimize(y)
481 w, t = _optimize(y)
481 ws.append(w)
482 ws.append(w)
482 ts.append(t)
483 ts.append(t)
483 del ss[:]
484 del ss[:]
484
485
485 for y in getlist(x[1]):
486 for y in getlist(x[1]):
486 w, t = _optimize(y)
487 w, t = _optimize(y)
487 if t is not None and (t[0] == b'string' or t[0] == b'symbol'):
488 if t is not None and (t[0] == b'string' or t[0] == b'symbol'):
488 ss.append((w, t))
489 ss.append((w, t))
489 continue
490 continue
490 flushss()
491 flushss()
491 ws.append(w)
492 ws.append(w)
492 ts.append(t)
493 ts.append(t)
493 flushss()
494 flushss()
494 if len(ts) == 1:
495 if len(ts) == 1:
495 return ws[0], ts[0] # 'or' operation is fully optimized out
496 return ws[0], ts[0] # 'or' operation is fully optimized out
496 return max(ws), (op, (b'list',) + tuple(ts))
497 return max(ws), (op, (b'list',) + tuple(ts))
497 elif op == b'not':
498 elif op == b'not':
498 # Optimize not public() to _notpublic() because we have a fast version
499 # Optimize not public() to _notpublic() because we have a fast version
499 if _match(b'public()', x[1]):
500 if _match(b'public()', x[1]):
500 o = _optimize(_build(b'_notpublic()'))
501 o = _optimize(_build(b'_notpublic()'))
501 return o[0], o[1]
502 return o[0], o[1]
502 else:
503 else:
503 o = _optimize(x[1])
504 o = _optimize(x[1])
504 return o[0], (op, o[1])
505 return o[0], (op, o[1])
505 elif op == b'rangeall':
506 elif op == b'rangeall':
506 return 1, x
507 return 1, x
507 elif op in (b'rangepre', b'rangepost', b'parentpost'):
508 elif op in (b'rangepre', b'rangepost', b'parentpost'):
508 o = _optimize(x[1])
509 o = _optimize(x[1])
509 return o[0], (op, o[1])
510 return o[0], (op, o[1])
510 elif op in (b'dagrange', b'range'):
511 elif op in (b'dagrange', b'range'):
511 wa, ta = _optimize(x[1])
512 wa, ta = _optimize(x[1])
512 wb, tb = _optimize(x[2])
513 wb, tb = _optimize(x[2])
513 return wa + wb, (op, ta, tb)
514 return wa + wb, (op, ta, tb)
514 elif op in (b'parent', b'ancestor', b'relation', b'subscript'):
515 elif op in (b'parent', b'ancestor', b'relation', b'subscript'):
515 w, t = _optimize(x[1])
516 w, t = _optimize(x[1])
516 return w, (op, t, x[2])
517 return w, (op, t, x[2])
517 elif op == b'relsubscript':
518 elif op == b'relsubscript':
518 w, t = _optimize(x[1])
519 w, t = _optimize(x[1])
519 return w, (op, t, x[2], x[3])
520 return w, (op, t, x[2], x[3])
520 elif op == b'list':
521 elif op == b'list':
521 ws, ts = zip(*(_optimize(y) for y in x[1:]))
522 ws, ts = zip(*(_optimize(y) for y in x[1:]))
522 return sum(ws), (op,) + ts
523 return sum(ws), (op,) + ts
523 elif op == b'keyvalue':
524 elif op == b'keyvalue':
524 w, t = _optimize(x[2])
525 w, t = _optimize(x[2])
525 return w, (op, x[1], t)
526 return w, (op, x[1], t)
526 elif op == b'func':
527 elif op == b'func':
527 f = getsymbol(x[1])
528 f = getsymbol(x[1])
528 wa, ta = _optimize(x[2])
529 wa, ta = _optimize(x[2])
529 w = getattr(symbols.get(f), '_weight', 1)
530 w = getattr(symbols.get(f), '_weight', 1)
530 m = _match(b'commonancestors(_)', ta)
531 m = _match(b'commonancestors(_)', ta)
531
532
532 # Optimize heads(commonancestors(_)) because we have a fast version
533 # Optimize heads(commonancestors(_)) because we have a fast version
533 if f == b'heads' and m:
534 if f == b'heads' and m:
534 return w + wa, _build(b'_commonancestorheads(_)', m[1])
535 return w + wa, _build(b'_commonancestorheads(_)', m[1])
535
536
536 return w + wa, (op, x[1], ta)
537 return w + wa, (op, x[1], ta)
537 raise ValueError(b'invalid operator %r' % op)
538 raise ValueError(b'invalid operator %r' % op)
538
539
539
540
540 def optimize(tree):
541 def optimize(tree):
541 """Optimize evaluatable tree
542 """Optimize evaluatable tree
542
543
543 All pseudo operations should be transformed beforehand.
544 All pseudo operations should be transformed beforehand.
544 """
545 """
545 _weight, newtree = _optimize(tree)
546 _weight, newtree = _optimize(tree)
546 return newtree
547 return newtree
547
548
548
549
549 # the set of valid characters for the initial letter of symbols in
550 # the set of valid characters for the initial letter of symbols in
550 # alias declarations and definitions
551 # alias declarations and definitions
551 _aliassyminitletters = _syminitletters | {b'$'}
552 _aliassyminitletters = _syminitletters | {b'$'}
552
553
553
554
554 def _parsewith(spec, lookup=None, syminitletters=None):
555 def _parsewith(spec, lookup=None, syminitletters=None):
555 """Generate a parse tree of given spec with given tokenizing options
556 """Generate a parse tree of given spec with given tokenizing options
556
557
557 >>> _parsewith(b'foo($1)', syminitletters=_aliassyminitletters)
558 >>> _parsewith(b'foo($1)', syminitletters=_aliassyminitletters)
558 ('func', ('symbol', 'foo'), ('symbol', '$1'))
559 ('func', ('symbol', 'foo'), ('symbol', '$1'))
559 >>> from . import error
560 >>> from . import error
560 >>> from . import pycompat
561 >>> from . import pycompat
561 >>> try:
562 >>> try:
562 ... _parsewith(b'$1')
563 ... _parsewith(b'$1')
563 ... except error.ParseError as e:
564 ... except error.ParseError as e:
564 ... pycompat.sysstr(e.message)
565 ... pycompat.sysstr(e.message)
565 ... e.location
566 ... e.location
566 "syntax error in revset '$1'"
567 "syntax error in revset '$1'"
567 0
568 0
568 >>> try:
569 >>> try:
569 ... _parsewith(b'foo bar')
570 ... _parsewith(b'foo bar')
570 ... except error.ParseError as e:
571 ... except error.ParseError as e:
571 ... pycompat.sysstr(e.message)
572 ... pycompat.sysstr(e.message)
572 ... e.location
573 ... e.location
573 'invalid token'
574 'invalid token'
574 4
575 4
575 """
576 """
576 if lookup and spec.startswith(b'revset(') and spec.endswith(b')'):
577 if lookup and spec.startswith(b'revset(') and spec.endswith(b')'):
577 lookup = None
578 lookup = None
578 p = parser.parser(elements)
579 p = parser.parser(elements)
579 tree, pos = p.parse(
580 tree, pos = p.parse(
580 tokenize(spec, lookup=lookup, syminitletters=syminitletters)
581 tokenize(spec, lookup=lookup, syminitletters=syminitletters)
581 )
582 )
582 if pos != len(spec):
583 if pos != len(spec):
583 raise error.ParseError(_(b'invalid token'), pos)
584 raise error.ParseError(_(b'invalid token'), pos)
584 return _fixops(parser.simplifyinfixops(tree, (b'list', b'or')))
585 return _fixops(parser.simplifyinfixops(tree, (b'list', b'or')))
585
586
586
587
587 class _aliasrules(parser.basealiasrules):
588 class _aliasrules(parser.basealiasrules):
588 """Parsing and expansion rule set of revset aliases"""
589 """Parsing and expansion rule set of revset aliases"""
589
590
590 _section = _(b'revset alias')
591 _section = _(b'revset alias')
591
592
592 @staticmethod
593 @staticmethod
593 def _parse(spec):
594 def _parse(spec):
594 """Parse alias declaration/definition ``spec``
595 """Parse alias declaration/definition ``spec``
595
596
596 This allows symbol names to use also ``$`` as an initial letter
597 This allows symbol names to use also ``$`` as an initial letter
597 (for backward compatibility), and callers of this function should
598 (for backward compatibility), and callers of this function should
598 examine whether ``$`` is used also for unexpected symbols or not.
599 examine whether ``$`` is used also for unexpected symbols or not.
599 """
600 """
600 return _parsewith(spec, syminitletters=_aliassyminitletters)
601 return _parsewith(spec, syminitletters=_aliassyminitletters)
601
602
602 @staticmethod
603 @staticmethod
603 def _trygetfunc(tree):
604 def _trygetfunc(tree):
604 if tree[0] == b'func' and tree[1][0] == b'symbol':
605 if tree[0] == b'func' and tree[1][0] == b'symbol':
605 return tree[1][1], getlist(tree[2])
606 return tree[1][1], getlist(tree[2])
606
607
607
608
608 def expandaliases(tree, aliases, warn=None):
609 def expandaliases(tree, aliases, warn=None):
609 """Expand aliases in a tree, aliases is a list of (name, value) tuples"""
610 """Expand aliases in a tree, aliases is a list of (name, value) tuples"""
610 aliases = _aliasrules.buildmap(aliases)
611 aliases = _aliasrules.buildmap(aliases)
611 tree = _aliasrules.expand(aliases, tree)
612 tree = _aliasrules.expand(aliases, tree)
612 # warn about problematic (but not referred) aliases
613 # warn about problematic (but not referred) aliases
613 if warn is not None:
614 if warn is not None:
614 for name, alias in sorted(aliases.items()):
615 for name, alias in sorted(aliases.items()):
615 if alias.error and not alias.warned:
616 if alias.error and not alias.warned:
616 warn(_(b'warning: %s\n') % (alias.error))
617 warn(_(b'warning: %s\n') % (alias.error))
617 alias.warned = True
618 alias.warned = True
618 return tree
619 return tree
619
620
620
621
621 def foldconcat(tree):
622 def foldconcat(tree):
622 """Fold elements to be concatenated by `##`"""
623 """Fold elements to be concatenated by `##`"""
623 if not isinstance(tree, tuple) or tree[0] in (
624 if not isinstance(tree, tuple) or tree[0] in (
624 b'string',
625 b'string',
625 b'symbol',
626 b'symbol',
626 b'smartset',
627 b'smartset',
627 ):
628 ):
628 return tree
629 return tree
629 if tree[0] == b'_concat':
630 if tree[0] == b'_concat':
630 pending = [tree]
631 pending = [tree]
631 l = []
632 l = []
632 while pending:
633 while pending:
633 e = pending.pop()
634 e = pending.pop()
634 if e[0] == b'_concat':
635 if e[0] == b'_concat':
635 pending.extend(reversed(e[1:]))
636 pending.extend(reversed(e[1:]))
636 elif e[0] in (b'string', b'symbol'):
637 elif e[0] in (b'string', b'symbol'):
637 l.append(e[1])
638 l.append(e[1])
638 else:
639 else:
639 msg = _(b"\"##\" can't concatenate \"%s\" element") % (e[0])
640 msg = _(b"\"##\" can't concatenate \"%s\" element") % (e[0])
640 raise error.ParseError(msg)
641 raise error.ParseError(msg)
641 return (b'string', b''.join(l))
642 return (b'string', b''.join(l))
642 else:
643 else:
643 return tuple(foldconcat(t) for t in tree)
644 return tuple(foldconcat(t) for t in tree)
644
645
645
646
646 def parse(spec, lookup=None):
647 def parse(spec, lookup=None):
647 try:
648 try:
648 return _parsewith(spec, lookup=lookup)
649 return _parsewith(spec, lookup=lookup)
649 except error.ParseError as inst:
650 except error.ParseError as inst:
650 if inst.location is not None:
651 if inst.location is not None:
651 loc = inst.location
652 loc = inst.location
652 # Remove newlines -- spaces are equivalent whitespace.
653 # Remove newlines -- spaces are equivalent whitespace.
653 spec = spec.replace(b'\n', b' ')
654 spec = spec.replace(b'\n', b' ')
654 # We want the caret to point to the place in the template that
655 # We want the caret to point to the place in the template that
655 # failed to parse, but in a hint we get a open paren at the
656 # failed to parse, but in a hint we get a open paren at the
656 # start. Therefore, we print "loc + 1" spaces (instead of "loc")
657 # start. Therefore, we print "loc + 1" spaces (instead of "loc")
657 # to line up the caret with the location of the error.
658 # to line up the caret with the location of the error.
658 inst.hint = spec + b'\n' + b' ' * (loc + 1) + b'^ ' + _(b'here')
659 inst.hint = spec + b'\n' + b' ' * (loc + 1) + b'^ ' + _(b'here')
659 raise
660 raise
660
661
661
662
662 def _quote(s):
663 def _quote(s):
663 r"""Quote a value in order to make it safe for the revset engine.
664 r"""Quote a value in order to make it safe for the revset engine.
664
665
665 >>> _quote(b'asdf')
666 >>> _quote(b'asdf')
666 "'asdf'"
667 "'asdf'"
667 >>> _quote(b"asdf'\"")
668 >>> _quote(b"asdf'\"")
668 '\'asdf\\\'"\''
669 '\'asdf\\\'"\''
669 >>> _quote(b'asdf\'')
670 >>> _quote(b'asdf\'')
670 "'asdf\\''"
671 "'asdf\\''"
671 >>> _quote(1)
672 >>> _quote(1)
672 "'1'"
673 "'1'"
673 """
674 """
674 return b"'%s'" % stringutil.escapestr(pycompat.bytestr(s))
675 return b"'%s'" % stringutil.escapestr(pycompat.bytestr(s))
675
676
676
677
677 def _formatargtype(c, arg):
678 def _formatargtype(c, arg):
678 if c == b'd':
679 if c == b'd':
679 return b'_rev(%d)' % int(arg)
680 return b'_rev(%d)' % int(arg)
680 elif c == b's':
681 elif c == b's':
681 return _quote(arg)
682 return _quote(arg)
682 elif c == b'r':
683 elif c == b'r':
683 if not isinstance(arg, bytes):
684 if not isinstance(arg, bytes):
684 raise TypeError
685 raise TypeError
685 parse(arg) # make sure syntax errors are confined
686 parse(arg) # make sure syntax errors are confined
686 return b'(%s)' % arg
687 return b'(%s)' % arg
687 elif c == b'n':
688 elif c == b'n':
688 return _quote(hex(arg))
689 return _quote(hex(arg))
689 elif c == b'b':
690 elif c == b'b':
690 try:
691 try:
691 return _quote(arg.branch())
692 return _quote(arg.branch())
692 except AttributeError:
693 except AttributeError:
693 raise TypeError
694 raise TypeError
694 raise error.ParseError(_(b'unexpected revspec format character %s') % c)
695 raise error.ParseError(_(b'unexpected revspec format character %s') % c)
695
696
696
697
697 def _formatlistexp(s, t):
698 def _formatlistexp(s, t):
698 l = len(s)
699 l = len(s)
699 if l == 0:
700 if l == 0:
700 return b"_list('')"
701 return b"_list('')"
701 elif l == 1:
702 elif l == 1:
702 return _formatargtype(t, s[0])
703 return _formatargtype(t, s[0])
703 elif t == b'd':
704 elif t == b'd':
704 return _formatintlist(s)
705 return _formatintlist(s)
705 elif t == b's':
706 elif t == b's':
706 return b"_list(%s)" % _quote(b"\0".join(s))
707 return b"_list(%s)" % _quote(b"\0".join(s))
707 elif t == b'n':
708 elif t == b'n':
708 return b"_hexlist('%s')" % b"\0".join(hex(a) for a in s)
709 return b"_hexlist('%s')" % b"\0".join(hex(a) for a in s)
709 elif t == b'b':
710 elif t == b'b':
710 try:
711 try:
711 return b"_list('%s')" % b"\0".join(a.branch() for a in s)
712 return b"_list('%s')" % b"\0".join(a.branch() for a in s)
712 except AttributeError:
713 except AttributeError:
713 raise TypeError
714 raise TypeError
714
715
715 m = l // 2
716 m = l // 2
716 return b'(%s or %s)' % (_formatlistexp(s[:m], t), _formatlistexp(s[m:], t))
717 return b'(%s or %s)' % (_formatlistexp(s[:m], t), _formatlistexp(s[m:], t))
717
718
718
719
719 def _formatintlist(data):
720 def _formatintlist(data):
720 try:
721 try:
721 l = len(data)
722 l = len(data)
722 if l == 0:
723 if l == 0:
723 return b"_list('')"
724 return b"_list('')"
724 elif l == 1:
725 elif l == 1:
725 return _formatargtype(b'd', data[0])
726 return _formatargtype(b'd', data[0])
726 return b"_intlist('%s')" % b"\0".join(b'%d' % int(a) for a in data)
727 return b"_intlist('%s')" % b"\0".join(b'%d' % int(a) for a in data)
727 except (TypeError, ValueError):
728 except (TypeError, ValueError):
728 raise error.ParseError(_(b'invalid argument for revspec'))
729 raise error.ParseError(_(b'invalid argument for revspec'))
729
730
730
731
731 def _formatparamexp(args, t):
732 def _formatparamexp(args, t):
732 return b', '.join(_formatargtype(t, a) for a in args)
733 return b', '.join(_formatargtype(t, a) for a in args)
733
734
734
735
735 _formatlistfuncs = {
736 _formatlistfuncs = {
736 b'l': _formatlistexp,
737 b'l': _formatlistexp,
737 b'p': _formatparamexp,
738 b'p': _formatparamexp,
738 }
739 }
739
740
740
741
741 def formatspec(expr, *args):
742 def formatspec(expr, *args):
742 """
743 """
743 This is a convenience function for using revsets internally, and
744 This is a convenience function for using revsets internally, and
744 escapes arguments appropriately. Aliases are intentionally ignored
745 escapes arguments appropriately. Aliases are intentionally ignored
745 so that intended expression behavior isn't accidentally subverted.
746 so that intended expression behavior isn't accidentally subverted.
746
747
747 Supported arguments:
748 Supported arguments:
748
749
749 %r = revset expression, parenthesized
750 %r = revset expression, parenthesized
750 %d = rev(int(arg)), no quoting
751 %d = rev(int(arg)), no quoting
751 %s = string(arg), escaped and single-quoted
752 %s = string(arg), escaped and single-quoted
752 %b = arg.branch(), escaped and single-quoted
753 %b = arg.branch(), escaped and single-quoted
753 %n = hex(arg), single-quoted
754 %n = hex(arg), single-quoted
754 %% = a literal '%'
755 %% = a literal '%'
755
756
756 Prefixing the type with 'l' specifies a parenthesized list of that type,
757 Prefixing the type with 'l' specifies a parenthesized list of that type,
757 and 'p' specifies a list of function parameters of that type.
758 and 'p' specifies a list of function parameters of that type.
758
759
759 >>> formatspec(b'%r:: and %lr', b'10 or 11', (b"this()", b"that()"))
760 >>> formatspec(b'%r:: and %lr', b'10 or 11', (b"this()", b"that()"))
760 '(10 or 11):: and ((this()) or (that()))'
761 '(10 or 11):: and ((this()) or (that()))'
761 >>> formatspec(b'%d:: and not %d::', 10, 20)
762 >>> formatspec(b'%d:: and not %d::', 10, 20)
762 '_rev(10):: and not _rev(20)::'
763 '_rev(10):: and not _rev(20)::'
763 >>> formatspec(b'%ld or %ld', [], [1])
764 >>> formatspec(b'%ld or %ld', [], [1])
764 "_list('') or _rev(1)"
765 "_list('') or _rev(1)"
765 >>> formatspec(b'keyword(%s)', b'foo\\xe9')
766 >>> formatspec(b'keyword(%s)', b'foo\\xe9')
766 "keyword('foo\\\\xe9')"
767 "keyword('foo\\\\xe9')"
767 >>> b = lambda: b'default'
768 >>> b = lambda: b'default'
768 >>> b.branch = b
769 >>> b.branch = b
769 >>> formatspec(b'branch(%b)', b)
770 >>> formatspec(b'branch(%b)', b)
770 "branch('default')"
771 "branch('default')"
771 >>> formatspec(b'root(%ls)', [b'a', b'b', b'c', b'd'])
772 >>> formatspec(b'root(%ls)', [b'a', b'b', b'c', b'd'])
772 "root(_list('a\\\\x00b\\\\x00c\\\\x00d'))"
773 "root(_list('a\\\\x00b\\\\x00c\\\\x00d'))"
773 >>> formatspec(b'sort(%r, %ps)', b':', [b'desc', b'user'])
774 >>> formatspec(b'sort(%r, %ps)', b':', [b'desc', b'user'])
774 "sort((:), 'desc', 'user')"
775 "sort((:), 'desc', 'user')"
775 >>> formatspec(b'%ls', [b'a', b"'"])
776 >>> formatspec(b'%ls', [b'a', b"'"])
776 "_list('a\\\\x00\\\\'')"
777 "_list('a\\\\x00\\\\'')"
777 """
778 """
778 parsed = _parseargs(expr, args)
779 parsed = _parseargs(expr, args)
779 ret = []
780 ret = []
780 for t, arg in parsed:
781 for t, arg in parsed:
781 if t is None:
782 if t is None:
782 ret.append(arg)
783 ret.append(arg)
783 elif t == b'baseset':
784 elif t == b'baseset':
784 if isinstance(arg, set):
785 if isinstance(arg, set):
785 arg = sorted(arg)
786 arg = sorted(arg)
786 ret.append(_formatintlist(list(arg)))
787 ret.append(_formatintlist(list(arg)))
788 elif t == b'nodeset':
789 ret.append(_formatlistexp(list(arg), b"n"))
787 else:
790 else:
788 raise error.ProgrammingError(b"unknown revspec item type: %r" % t)
791 raise error.ProgrammingError(b"unknown revspec item type: %r" % t)
789 return b''.join(ret)
792 return b''.join(ret)
790
793
791
794
792 def spectree(expr, *args):
795 def spectree(expr, *args):
793 """similar to formatspec but return a parsed and optimized tree"""
796 """similar to formatspec but return a parsed and optimized tree"""
794 parsed = _parseargs(expr, args)
797 parsed = _parseargs(expr, args)
795 ret = []
798 ret = []
796 inputs = []
799 inputs = []
797 for t, arg in parsed:
800 for t, arg in parsed:
798 if t is None:
801 if t is None:
799 ret.append(arg)
802 ret.append(arg)
800 elif t == b'baseset':
803 elif t == b'baseset':
801 newtree = (b'smartset', smartset.baseset(arg))
804 newtree = (b'smartset', smartset.baseset(arg))
802 inputs.append(newtree)
805 inputs.append(newtree)
803 ret.append(b"$")
806 ret.append(b"$")
807 elif t == b'nodeset':
808 newtree = (b'nodeset', arg)
809 inputs.append(newtree)
810 ret.append(b"$")
804 else:
811 else:
805 raise error.ProgrammingError(b"unknown revspec item type: %r" % t)
812 raise error.ProgrammingError(b"unknown revspec item type: %r" % t)
806 expr = b''.join(ret)
813 expr = b''.join(ret)
807 tree = _parsewith(expr, syminitletters=_aliassyminitletters)
814 tree = _parsewith(expr, syminitletters=_aliassyminitletters)
808 tree = parser.buildtree(tree, (b'symbol', b'$'), *inputs)
815 tree = parser.buildtree(tree, (b'symbol', b'$'), *inputs)
809 tree = foldconcat(tree)
816 tree = foldconcat(tree)
810 tree = analyze(tree)
817 tree = analyze(tree)
811 tree = optimize(tree)
818 tree = optimize(tree)
812 return tree
819 return tree
813
820
814
821
815 def _parseargs(expr, args):
822 def _parseargs(expr, args):
816 """parse the expression and replace all inexpensive args
823 """parse the expression and replace all inexpensive args
817
824
818 return a list of tuple [(arg-type, arg-value)]
825 return a list of tuple [(arg-type, arg-value)]
819
826
820 Arg-type can be:
827 Arg-type can be:
821 * None: a string ready to be concatenated into a final spec
828 * None: a string ready to be concatenated into a final spec
822 * 'baseset': an iterable of revisions
829 * 'baseset': an iterable of revisions
823 """
830 """
824 expr = pycompat.bytestr(expr)
831 expr = pycompat.bytestr(expr)
825 argiter = iter(args)
832 argiter = iter(args)
826 ret = []
833 ret = []
827 pos = 0
834 pos = 0
828 while pos < len(expr):
835 while pos < len(expr):
829 q = expr.find(b'%', pos)
836 q = expr.find(b'%', pos)
830 if q < 0:
837 if q < 0:
831 ret.append((None, expr[pos:]))
838 ret.append((None, expr[pos:]))
832 break
839 break
833 ret.append((None, expr[pos:q]))
840 ret.append((None, expr[pos:q]))
834 pos = q + 1
841 pos = q + 1
835 try:
842 try:
836 d = expr[pos]
843 d = expr[pos]
837 except IndexError:
844 except IndexError:
838 raise error.ParseError(_(b'incomplete revspec format character'))
845 raise error.ParseError(_(b'incomplete revspec format character'))
839 if d == b'%':
846 if d == b'%':
840 ret.append((None, d))
847 ret.append((None, d))
841 pos += 1
848 pos += 1
842 continue
849 continue
843
850
844 try:
851 try:
845 arg = next(argiter)
852 arg = next(argiter)
846 except StopIteration:
853 except StopIteration:
847 raise error.ParseError(_(b'missing argument for revspec'))
854 raise error.ParseError(_(b'missing argument for revspec'))
848 f = _formatlistfuncs.get(d)
855 f = _formatlistfuncs.get(d)
849 if f:
856 if f:
850 # a list of some type, might be expensive, do not replace
857 # a list of some type, might be expensive, do not replace
851 pos += 1
858 pos += 1
852 islist = d == b'l'
859 islist = d == b'l'
853 try:
860 try:
854 d = expr[pos]
861 d = expr[pos]
855 except IndexError:
862 except IndexError:
856 raise error.ParseError(
863 raise error.ParseError(
857 _(b'incomplete revspec format character')
864 _(b'incomplete revspec format character')
858 )
865 )
859 if islist and d == b'd' and arg:
866 if islist and d == b'd' and arg:
860 # we don't create a baseset yet, because it come with an
867 # we don't create a baseset yet, because it come with an
861 # extra cost. If we are going to serialize it we better
868 # extra cost. If we are going to serialize it we better
862 # skip it.
869 # skip it.
863 ret.append((b'baseset', arg))
870 ret.append((b'baseset', arg))
864 pos += 1
871 pos += 1
865 continue
872 continue
873 elif islist and d == b'n' and arg:
874 # we cannot turn the node into revision yet, but not
875 # serializing them will same a lot of time for large set.
876 ret.append((b'nodeset', arg))
877 pos += 1
878 continue
866 try:
879 try:
867 ret.append((None, f(list(arg), d)))
880 ret.append((None, f(list(arg), d)))
868 except (TypeError, ValueError):
881 except (TypeError, ValueError):
869 raise error.ParseError(_(b'invalid argument for revspec'))
882 raise error.ParseError(_(b'invalid argument for revspec'))
870 else:
883 else:
871 # a single entry, not expensive, replace
884 # a single entry, not expensive, replace
872 try:
885 try:
873 ret.append((None, _formatargtype(d, arg)))
886 ret.append((None, _formatargtype(d, arg)))
874 except (TypeError, ValueError):
887 except (TypeError, ValueError):
875 raise error.ParseError(_(b'invalid argument for revspec'))
888 raise error.ParseError(_(b'invalid argument for revspec'))
876 pos += 1
889 pos += 1
877
890
878 try:
891 try:
879 next(argiter)
892 next(argiter)
880 raise error.ParseError(_(b'too many revspec arguments specified'))
893 raise error.ParseError(_(b'too many revspec arguments specified'))
881 except StopIteration:
894 except StopIteration:
882 pass
895 pass
883 return ret
896 return ret
884
897
885
898
886 def prettyformat(tree):
899 def prettyformat(tree):
887 return parser.prettyformat(tree, (b'string', b'symbol'))
900 return parser.prettyformat(tree, (b'string', b'symbol'))
888
901
889
902
890 def depth(tree):
903 def depth(tree):
891 if isinstance(tree, tuple):
904 if isinstance(tree, tuple):
892 return max(map(depth, tree)) + 1
905 return max(map(depth, tree)) + 1
893 else:
906 else:
894 return 0
907 return 0
895
908
896
909
897 def funcsused(tree):
910 def funcsused(tree):
898 if not isinstance(tree, tuple) or tree[0] in (b'string', b'symbol'):
911 if not isinstance(tree, tuple) or tree[0] in (b'string', b'symbol'):
899 return set()
912 return set()
900 else:
913 else:
901 funcs = set()
914 funcs = set()
902 for s in tree[1:]:
915 for s in tree[1:]:
903 funcs |= funcsused(s)
916 funcs |= funcsused(s)
904 if tree[0] == b'func':
917 if tree[0] == b'func':
905 funcs.add(tree[1][1])
918 funcs.add(tree[1][1])
906 return funcs
919 return funcs
907
920
908
921
909 _hashre = util.re.compile(b'[0-9a-fA-F]{1,40}$')
922 _hashre = util.re.compile(b'[0-9a-fA-F]{1,40}$')
910
923
911
924
912 def _ishashlikesymbol(symbol):
925 def _ishashlikesymbol(symbol):
913 """returns true if the symbol looks like a hash"""
926 """returns true if the symbol looks like a hash"""
914 return _hashre.match(symbol)
927 return _hashre.match(symbol)
915
928
916
929
917 def gethashlikesymbols(tree):
930 def gethashlikesymbols(tree):
918 """returns the list of symbols of the tree that look like hashes
931 """returns the list of symbols of the tree that look like hashes
919
932
920 >>> gethashlikesymbols(parse(b'3::abe3ff'))
933 >>> gethashlikesymbols(parse(b'3::abe3ff'))
921 ['3', 'abe3ff']
934 ['3', 'abe3ff']
922 >>> gethashlikesymbols(parse(b'precursors(.)'))
935 >>> gethashlikesymbols(parse(b'precursors(.)'))
923 []
936 []
924 >>> gethashlikesymbols(parse(b'precursors(34)'))
937 >>> gethashlikesymbols(parse(b'precursors(34)'))
925 ['34']
938 ['34']
926 >>> gethashlikesymbols(parse(b'abe3ffZ'))
939 >>> gethashlikesymbols(parse(b'abe3ffZ'))
927 []
940 []
928 """
941 """
929 if not tree:
942 if not tree:
930 return []
943 return []
931
944
932 if tree[0] == b"symbol":
945 if tree[0] == b"symbol":
933 if _ishashlikesymbol(tree[1]):
946 if _ishashlikesymbol(tree[1]):
934 return [tree[1]]
947 return [tree[1]]
935 elif len(tree) >= 3:
948 elif len(tree) >= 3:
936 results = []
949 results = []
937 for subtree in tree[1:]:
950 for subtree in tree[1:]:
938 results += gethashlikesymbols(subtree)
951 results += gethashlikesymbols(subtree)
939 return results
952 return results
940 return []
953 return []
General Comments 0
You need to be logged in to leave comments. Login now