Show More
@@ -89,7 +89,7 b' from mercurial import (' | |||||
89 | phases, |
|
89 | phases, | |
90 | pycompat, |
|
90 | pycompat, | |
91 | registrar, |
|
91 | registrar, | |
92 | revset, |
|
92 | revsetlang, | |
93 | scmutil, |
|
93 | scmutil, | |
94 | smartset, |
|
94 | smartset, | |
95 | subrepo, |
|
95 | subrepo, | |
@@ -3568,7 +3568,7 b' revsetpredicate = registrar.revsetpredic' | |||||
3568 | def revsetmq(repo, subset, x): |
|
3568 | def revsetmq(repo, subset, x): | |
3569 | """Changesets managed by MQ. |
|
3569 | """Changesets managed by MQ. | |
3570 | """ |
|
3570 | """ | |
3571 | revset.getargs(x, 0, 0, _("mq takes no arguments")) |
|
3571 | revsetlang.getargs(x, 0, 0, _("mq takes no arguments")) | |
3572 | applied = set([repo[r.node].rev() for r in repo.mq.applied]) |
|
3572 | applied = set([repo[r.node].rev() for r in repo.mq.applied]) | |
3573 | return smartset.baseset([r for r in subset if r in applied]) |
|
3573 | return smartset.baseset([r for r in subset if r in applied]) | |
3574 |
|
3574 |
@@ -44,7 +44,7 b' from . import (' | |||||
44 | patch, |
|
44 | patch, | |
45 | phases, |
|
45 | phases, | |
46 | pycompat, |
|
46 | pycompat, | |
47 | revset, |
|
47 | revsetlang, | |
48 | scmutil, |
|
48 | scmutil, | |
49 | server, |
|
49 | server, | |
50 | sshserver, |
|
50 | sshserver, | |
@@ -3414,7 +3414,7 b' def log(ui, repo, *pats, **opts):' | |||||
3414 |
|
3414 | |||
3415 | """ |
|
3415 | """ | |
3416 | if opts.get('follow') and opts.get('rev'): |
|
3416 | if opts.get('follow') and opts.get('rev'): | |
3417 | opts['rev'] = [revset.formatspec('reverse(::%lr)', opts.get('rev'))] |
|
3417 | opts['rev'] = [revsetlang.formatspec('reverse(::%lr)', opts.get('rev'))] | |
3418 | del opts['follow'] |
|
3418 | del opts['follow'] | |
3419 |
|
3419 | |||
3420 | if opts.get('graph'): |
|
3420 | if opts.get('graph'): | |
@@ -4093,7 +4093,7 b' def push(ui, repo, dest=None, **opts):' | |||||
4093 | elif path.pushrev: |
|
4093 | elif path.pushrev: | |
4094 | # It doesn't make any sense to specify ancestor revisions. So limit |
|
4094 | # It doesn't make any sense to specify ancestor revisions. So limit | |
4095 | # to DAG heads to make discovery simpler. |
|
4095 | # to DAG heads to make discovery simpler. | |
4096 | expr = revset.formatspec('heads(%r)', path.pushrev) |
|
4096 | expr = revsetlang.formatspec('heads(%r)', path.pushrev) | |
4097 | revs = scmutil.revrange(repo, [expr]) |
|
4097 | revs = scmutil.revrange(repo, [expr]) | |
4098 | revs = [repo[rev].node() for rev in revs] |
|
4098 | revs = [repo[rev].node() for rev in revs] | |
4099 | if not revs: |
|
4099 | if not revs: |
@@ -52,6 +52,7 b' from . import (' | |||||
52 | repair, |
|
52 | repair, | |
53 | revlog, |
|
53 | revlog, | |
54 | revset, |
|
54 | revset, | |
|
55 | revsetlang, | |||
55 | scmutil, |
|
56 | scmutil, | |
56 | setdiscovery, |
|
57 | setdiscovery, | |
57 | simplemerge, |
|
58 | simplemerge, | |
@@ -1794,10 +1795,10 b' def debugrevspec(ui, repo, expr, **opts)' | |||||
1794 | """ |
|
1795 | """ | |
1795 | stages = [ |
|
1796 | stages = [ | |
1796 | ('parsed', lambda tree: tree), |
|
1797 | ('parsed', lambda tree: tree), | |
1797 | ('expanded', lambda tree: revset.expandaliases(ui, tree)), |
|
1798 | ('expanded', lambda tree: revsetlang.expandaliases(ui, tree)), | |
1798 | ('concatenated', revset.foldconcat), |
|
1799 | ('concatenated', revsetlang.foldconcat), | |
1799 | ('analyzed', revset.analyze), |
|
1800 | ('analyzed', revsetlang.analyze), | |
1800 | ('optimized', revset.optimize), |
|
1801 | ('optimized', revsetlang.optimize), | |
1801 | ] |
|
1802 | ] | |
1802 | if opts['no_optimized']: |
|
1803 | if opts['no_optimized']: | |
1803 | stages = stages[:-1] |
|
1804 | stages = stages[:-1] | |
@@ -1826,13 +1827,13 b' def debugrevspec(ui, repo, expr, **opts)' | |||||
1826 |
|
1827 | |||
1827 | treebystage = {} |
|
1828 | treebystage = {} | |
1828 | printedtree = None |
|
1829 | printedtree = None | |
1829 | tree = revset.parse(expr, lookup=repo.__contains__) |
|
1830 | tree = revsetlang.parse(expr, lookup=repo.__contains__) | |
1830 | for n, f in stages: |
|
1831 | for n, f in stages: | |
1831 | treebystage[n] = tree = f(tree) |
|
1832 | treebystage[n] = tree = f(tree) | |
1832 | if n in showalways or (n in showchanged and tree != printedtree): |
|
1833 | if n in showalways or (n in showchanged and tree != printedtree): | |
1833 | if opts['show_stage'] or n != 'parsed': |
|
1834 | if opts['show_stage'] or n != 'parsed': | |
1834 | ui.write(("* %s:\n") % n) |
|
1835 | ui.write(("* %s:\n") % n) | |
1835 | ui.write(revset.prettyformat(tree), "\n") |
|
1836 | ui.write(revsetlang.prettyformat(tree), "\n") | |
1836 | printedtree = tree |
|
1837 | printedtree = tree | |
1837 |
|
1838 | |||
1838 | if opts['verify_optimized']: |
|
1839 | if opts['verify_optimized']: |
@@ -32,6 +32,7 b' from .. import (' | |||||
32 | error, |
|
32 | error, | |
33 | graphmod, |
|
33 | graphmod, | |
34 | revset, |
|
34 | revset, | |
|
35 | revsetlang, | |||
35 | scmutil, |
|
36 | scmutil, | |
36 | smartset, |
|
37 | smartset, | |
37 | templatefilters, |
|
38 | templatefilters, | |
@@ -239,20 +240,20 b' def _search(web, req, tmpl):' | |||||
239 |
|
240 | |||
240 | revdef = 'reverse(%s)' % query |
|
241 | revdef = 'reverse(%s)' % query | |
241 | try: |
|
242 | try: | |
242 | tree = revset.parse(revdef) |
|
243 | tree = revsetlang.parse(revdef) | |
243 | except error.ParseError: |
|
244 | except error.ParseError: | |
244 | # can't parse to a revset tree |
|
245 | # can't parse to a revset tree | |
245 | return MODE_KEYWORD, query |
|
246 | return MODE_KEYWORD, query | |
246 |
|
247 | |||
247 | if revset.depth(tree) <= 2: |
|
248 | if revsetlang.depth(tree) <= 2: | |
248 | # no revset syntax used |
|
249 | # no revset syntax used | |
249 | return MODE_KEYWORD, query |
|
250 | return MODE_KEYWORD, query | |
250 |
|
251 | |||
251 | if any((token, (value or '')[:3]) == ('string', 're:') |
|
252 | if any((token, (value or '')[:3]) == ('string', 're:') | |
252 |
|
|
253 | for token, value, pos in revsetlang.tokenize(revdef)): | |
253 | return MODE_KEYWORD, query |
|
254 | return MODE_KEYWORD, query | |
254 |
|
255 | |||
255 | funcsused = revset.funcsused(tree) |
|
256 | funcsused = revsetlang.funcsused(tree) | |
256 | if not funcsused.issubset(revset.safesymbols): |
|
257 | if not funcsused.issubset(revset.safesymbols): | |
257 | return MODE_KEYWORD, query |
|
258 | return MODE_KEYWORD, query | |
258 |
|
259 |
@@ -50,6 +50,7 b' from . import (' | |||||
50 | pushkey, |
|
50 | pushkey, | |
51 | repoview, |
|
51 | repoview, | |
52 | revset, |
|
52 | revset, | |
|
53 | revsetlang, | |||
53 | scmutil, |
|
54 | scmutil, | |
54 | store, |
|
55 | store, | |
55 | subrepo, |
|
56 | subrepo, | |
@@ -573,7 +574,7 b' class localrepository(object):' | |||||
573 | '''Find revisions matching a revset. |
|
574 | '''Find revisions matching a revset. | |
574 |
|
575 | |||
575 | The revset is specified as a string ``expr`` that may contain |
|
576 | The revset is specified as a string ``expr`` that may contain | |
576 | %-formatting to escape certain types. See ``revset.formatspec``. |
|
577 | %-formatting to escape certain types. See ``revsetlang.formatspec``. | |
577 |
|
578 | |||
578 | Revset aliases from the configuration are not expanded. To expand |
|
579 | Revset aliases from the configuration are not expanded. To expand | |
579 | user aliases, consider calling ``scmutil.revrange()``. |
|
580 | user aliases, consider calling ``scmutil.revrange()``. | |
@@ -581,7 +582,7 b' class localrepository(object):' | |||||
581 | Returns a revset.abstractsmartset, which is a list-like interface |
|
582 | Returns a revset.abstractsmartset, which is a list-like interface | |
582 | that contains integer revisions. |
|
583 | that contains integer revisions. | |
583 | ''' |
|
584 | ''' | |
584 | expr = revset.formatspec(expr, *args) |
|
585 | expr = revsetlang.formatspec(expr, *args) | |
585 | m = revset.match(None, expr) |
|
586 | m = revset.match(None, expr) | |
586 | return m(self) |
|
587 | return m(self) | |
587 |
|
588 |
This diff has been collapsed as it changes many lines, (695 lines changed) Show them Hide them | |||||
@@ -9,7 +9,6 b' from __future__ import absolute_import' | |||||
9 |
|
9 | |||
10 | import heapq |
|
10 | import heapq | |
11 | import re |
|
11 | import re | |
12 | import string |
|
|||
13 |
|
12 | |||
14 | from .i18n import _ |
|
13 | from .i18n import _ | |
15 | from . import ( |
|
14 | from . import ( | |
@@ -20,16 +19,29 b' from . import (' | |||||
20 | match as matchmod, |
|
19 | match as matchmod, | |
21 | node, |
|
20 | node, | |
22 | obsolete as obsmod, |
|
21 | obsolete as obsmod, | |
23 | parser, |
|
|||
24 | pathutil, |
|
22 | pathutil, | |
25 | phases, |
|
23 | phases, | |
26 | pycompat, |
|
|||
27 | registrar, |
|
24 | registrar, | |
28 | repoview, |
|
25 | repoview, | |
|
26 | revsetlang, | |||
29 | smartset, |
|
27 | smartset, | |
30 | util, |
|
28 | util, | |
31 | ) |
|
29 | ) | |
32 |
|
30 | |||
|
31 | # helpers for processing parsed tree | |||
|
32 | getsymbol = revsetlang.getsymbol | |||
|
33 | getstring = revsetlang.getstring | |||
|
34 | getinteger = revsetlang.getinteger | |||
|
35 | getlist = revsetlang.getlist | |||
|
36 | getrange = revsetlang.getrange | |||
|
37 | getargs = revsetlang.getargs | |||
|
38 | getargsdict = revsetlang.getargsdict | |||
|
39 | ||||
|
40 | # constants used as an argument of match() and matchany() | |||
|
41 | anyorder = revsetlang.anyorder | |||
|
42 | defineorder = revsetlang.defineorder | |||
|
43 | followorder = revsetlang.followorder | |||
|
44 | ||||
33 | baseset = smartset.baseset |
|
45 | baseset = smartset.baseset | |
34 | generatorset = smartset.generatorset |
|
46 | generatorset = smartset.generatorset | |
35 | spanset = smartset.spanset |
|
47 | spanset = smartset.spanset | |
@@ -152,213 +164,8 b' def reachableroots(repo, roots, heads, i' | |||||
152 | revs.sort() |
|
164 | revs.sort() | |
153 | return revs |
|
165 | return revs | |
154 |
|
166 | |||
155 | elements = { |
|
|||
156 | # token-type: binding-strength, primary, prefix, infix, suffix |
|
|||
157 | "(": (21, None, ("group", 1, ")"), ("func", 1, ")"), None), |
|
|||
158 | "##": (20, None, None, ("_concat", 20), None), |
|
|||
159 | "~": (18, None, None, ("ancestor", 18), None), |
|
|||
160 | "^": (18, None, None, ("parent", 18), "parentpost"), |
|
|||
161 | "-": (5, None, ("negate", 19), ("minus", 5), None), |
|
|||
162 | "::": (17, None, ("dagrangepre", 17), ("dagrange", 17), "dagrangepost"), |
|
|||
163 | "..": (17, None, ("dagrangepre", 17), ("dagrange", 17), "dagrangepost"), |
|
|||
164 | ":": (15, "rangeall", ("rangepre", 15), ("range", 15), "rangepost"), |
|
|||
165 | "not": (10, None, ("not", 10), None, None), |
|
|||
166 | "!": (10, None, ("not", 10), None, None), |
|
|||
167 | "and": (5, None, None, ("and", 5), None), |
|
|||
168 | "&": (5, None, None, ("and", 5), None), |
|
|||
169 | "%": (5, None, None, ("only", 5), "onlypost"), |
|
|||
170 | "or": (4, None, None, ("or", 4), None), |
|
|||
171 | "|": (4, None, None, ("or", 4), None), |
|
|||
172 | "+": (4, None, None, ("or", 4), None), |
|
|||
173 | "=": (3, None, None, ("keyvalue", 3), None), |
|
|||
174 | ",": (2, None, None, ("list", 2), None), |
|
|||
175 | ")": (0, None, None, None, None), |
|
|||
176 | "symbol": (0, "symbol", None, None, None), |
|
|||
177 | "string": (0, "string", None, None, None), |
|
|||
178 | "end": (0, None, None, None, None), |
|
|||
179 | } |
|
|||
180 |
|
||||
181 | keywords = set(['and', 'or', 'not']) |
|
|||
182 |
|
||||
183 | # default set of valid characters for the initial letter of symbols |
|
|||
184 | _syminitletters = set( |
|
|||
185 | string.ascii_letters + |
|
|||
186 | string.digits + pycompat.sysstr('._@')) | set(map(chr, xrange(128, 256))) |
|
|||
187 |
|
||||
188 | # default set of valid characters for non-initial letters of symbols |
|
|||
189 | _symletters = _syminitletters | set(pycompat.sysstr('-/')) |
|
|||
190 |
|
||||
191 | def tokenize(program, lookup=None, syminitletters=None, symletters=None): |
|
|||
192 | ''' |
|
|||
193 | Parse a revset statement into a stream of tokens |
|
|||
194 |
|
||||
195 | ``syminitletters`` is the set of valid characters for the initial |
|
|||
196 | letter of symbols. |
|
|||
197 |
|
||||
198 | By default, character ``c`` is recognized as valid for initial |
|
|||
199 | letter of symbols, if ``c.isalnum() or c in '._@' or ord(c) > 127``. |
|
|||
200 |
|
||||
201 | ``symletters`` is the set of valid characters for non-initial |
|
|||
202 | letters of symbols. |
|
|||
203 |
|
||||
204 | By default, character ``c`` is recognized as valid for non-initial |
|
|||
205 | letters of symbols, if ``c.isalnum() or c in '-._/@' or ord(c) > 127``. |
|
|||
206 |
|
||||
207 | Check that @ is a valid unquoted token character (issue3686): |
|
|||
208 | >>> list(tokenize("@::")) |
|
|||
209 | [('symbol', '@', 0), ('::', None, 1), ('end', None, 3)] |
|
|||
210 |
|
||||
211 | ''' |
|
|||
212 | if syminitletters is None: |
|
|||
213 | syminitletters = _syminitletters |
|
|||
214 | if symletters is None: |
|
|||
215 | symletters = _symletters |
|
|||
216 |
|
||||
217 | if program and lookup: |
|
|||
218 | # attempt to parse old-style ranges first to deal with |
|
|||
219 | # things like old-tag which contain query metacharacters |
|
|||
220 | parts = program.split(':', 1) |
|
|||
221 | if all(lookup(sym) for sym in parts if sym): |
|
|||
222 | if parts[0]: |
|
|||
223 | yield ('symbol', parts[0], 0) |
|
|||
224 | if len(parts) > 1: |
|
|||
225 | s = len(parts[0]) |
|
|||
226 | yield (':', None, s) |
|
|||
227 | if parts[1]: |
|
|||
228 | yield ('symbol', parts[1], s + 1) |
|
|||
229 | yield ('end', None, len(program)) |
|
|||
230 | return |
|
|||
231 |
|
||||
232 | pos, l = 0, len(program) |
|
|||
233 | while pos < l: |
|
|||
234 | c = program[pos] |
|
|||
235 | if c.isspace(): # skip inter-token whitespace |
|
|||
236 | pass |
|
|||
237 | elif c == ':' and program[pos:pos + 2] == '::': # look ahead carefully |
|
|||
238 | yield ('::', None, pos) |
|
|||
239 | pos += 1 # skip ahead |
|
|||
240 | elif c == '.' and program[pos:pos + 2] == '..': # look ahead carefully |
|
|||
241 | yield ('..', None, pos) |
|
|||
242 | pos += 1 # skip ahead |
|
|||
243 | elif c == '#' and program[pos:pos + 2] == '##': # look ahead carefully |
|
|||
244 | yield ('##', None, pos) |
|
|||
245 | pos += 1 # skip ahead |
|
|||
246 | elif c in "():=,-|&+!~^%": # handle simple operators |
|
|||
247 | yield (c, None, pos) |
|
|||
248 | elif (c in '"\'' or c == 'r' and |
|
|||
249 | program[pos:pos + 2] in ("r'", 'r"')): # handle quoted strings |
|
|||
250 | if c == 'r': |
|
|||
251 | pos += 1 |
|
|||
252 | c = program[pos] |
|
|||
253 | decode = lambda x: x |
|
|||
254 | else: |
|
|||
255 | decode = parser.unescapestr |
|
|||
256 | pos += 1 |
|
|||
257 | s = pos |
|
|||
258 | while pos < l: # find closing quote |
|
|||
259 | d = program[pos] |
|
|||
260 | if d == '\\': # skip over escaped characters |
|
|||
261 | pos += 2 |
|
|||
262 | continue |
|
|||
263 | if d == c: |
|
|||
264 | yield ('string', decode(program[s:pos]), s) |
|
|||
265 | break |
|
|||
266 | pos += 1 |
|
|||
267 | else: |
|
|||
268 | raise error.ParseError(_("unterminated string"), s) |
|
|||
269 | # gather up a symbol/keyword |
|
|||
270 | elif c in syminitletters: |
|
|||
271 | s = pos |
|
|||
272 | pos += 1 |
|
|||
273 | while pos < l: # find end of symbol |
|
|||
274 | d = program[pos] |
|
|||
275 | if d not in symletters: |
|
|||
276 | break |
|
|||
277 | if d == '.' and program[pos - 1] == '.': # special case for .. |
|
|||
278 | pos -= 1 |
|
|||
279 | break |
|
|||
280 | pos += 1 |
|
|||
281 | sym = program[s:pos] |
|
|||
282 | if sym in keywords: # operator keywords |
|
|||
283 | yield (sym, None, s) |
|
|||
284 | elif '-' in sym: |
|
|||
285 | # some jerk gave us foo-bar-baz, try to check if it's a symbol |
|
|||
286 | if lookup and lookup(sym): |
|
|||
287 | # looks like a real symbol |
|
|||
288 | yield ('symbol', sym, s) |
|
|||
289 | else: |
|
|||
290 | # looks like an expression |
|
|||
291 | parts = sym.split('-') |
|
|||
292 | for p in parts[:-1]: |
|
|||
293 | if p: # possible consecutive - |
|
|||
294 | yield ('symbol', p, s) |
|
|||
295 | s += len(p) |
|
|||
296 | yield ('-', None, pos) |
|
|||
297 | s += 1 |
|
|||
298 | if parts[-1]: # possible trailing - |
|
|||
299 | yield ('symbol', parts[-1], s) |
|
|||
300 | else: |
|
|||
301 | yield ('symbol', sym, s) |
|
|||
302 | pos -= 1 |
|
|||
303 | else: |
|
|||
304 | raise error.ParseError(_("syntax error in revset '%s'") % |
|
|||
305 | program, pos) |
|
|||
306 | pos += 1 |
|
|||
307 | yield ('end', None, pos) |
|
|||
308 |
|
||||
309 | # helpers |
|
167 | # helpers | |
310 |
|
168 | |||
311 | _notset = object() |
|
|||
312 |
|
||||
313 | def getsymbol(x): |
|
|||
314 | if x and x[0] == 'symbol': |
|
|||
315 | return x[1] |
|
|||
316 | raise error.ParseError(_('not a symbol')) |
|
|||
317 |
|
||||
318 | def getstring(x, err): |
|
|||
319 | if x and (x[0] == 'string' or x[0] == 'symbol'): |
|
|||
320 | return x[1] |
|
|||
321 | raise error.ParseError(err) |
|
|||
322 |
|
||||
323 | def getinteger(x, err, default=_notset): |
|
|||
324 | if not x and default is not _notset: |
|
|||
325 | return default |
|
|||
326 | try: |
|
|||
327 | return int(getstring(x, err)) |
|
|||
328 | except ValueError: |
|
|||
329 | raise error.ParseError(err) |
|
|||
330 |
|
||||
331 | def getlist(x): |
|
|||
332 | if not x: |
|
|||
333 | return [] |
|
|||
334 | if x[0] == 'list': |
|
|||
335 | return list(x[1:]) |
|
|||
336 | return [x] |
|
|||
337 |
|
||||
338 | def getrange(x, err): |
|
|||
339 | if not x: |
|
|||
340 | raise error.ParseError(err) |
|
|||
341 | op = x[0] |
|
|||
342 | if op == 'range': |
|
|||
343 | return x[1], x[2] |
|
|||
344 | elif op == 'rangepre': |
|
|||
345 | return None, x[1] |
|
|||
346 | elif op == 'rangepost': |
|
|||
347 | return x[1], None |
|
|||
348 | elif op == 'rangeall': |
|
|||
349 | return None, None |
|
|||
350 | raise error.ParseError(err) |
|
|||
351 |
|
||||
352 | def getargs(x, min, max, err): |
|
|||
353 | l = getlist(x) |
|
|||
354 | if len(l) < min or (max >= 0 and len(l) > max): |
|
|||
355 | raise error.ParseError(err) |
|
|||
356 | return l |
|
|||
357 |
|
||||
358 | def getargsdict(x, funcname, keys): |
|
|||
359 | return parser.buildargsdict(getlist(x), funcname, parser.splitargspec(keys), |
|
|||
360 | keyvaluenode='keyvalue', keynode='symbol') |
|
|||
361 |
|
||||
362 | def getset(repo, subset, x): |
|
169 | def getset(repo, subset, x): | |
363 | if not x: |
|
170 | if not x: | |
364 | raise error.ParseError(_("missing argument")) |
|
171 | raise error.ParseError(_("missing argument")) | |
@@ -2412,350 +2219,6 b' methods = {' | |||||
2412 | "parentpost": parentpost, |
|
2219 | "parentpost": parentpost, | |
2413 | } |
|
2220 | } | |
2414 |
|
2221 | |||
2415 | # Constants for ordering requirement, used in _analyze(): |
|
|||
2416 | # |
|
|||
2417 | # If 'define', any nested functions and operations can change the ordering of |
|
|||
2418 | # the entries in the set. If 'follow', any nested functions and operations |
|
|||
2419 | # should take the ordering specified by the first operand to the '&' operator. |
|
|||
2420 | # |
|
|||
2421 | # For instance, |
|
|||
2422 | # |
|
|||
2423 | # X & (Y | Z) |
|
|||
2424 | # ^ ^^^^^^^ |
|
|||
2425 | # | follow |
|
|||
2426 | # define |
|
|||
2427 | # |
|
|||
2428 | # will be evaluated as 'or(y(x()), z(x()))', where 'x()' can change the order |
|
|||
2429 | # of the entries in the set, but 'y()', 'z()' and 'or()' shouldn't. |
|
|||
2430 | # |
|
|||
2431 | # 'any' means the order doesn't matter. For instance, |
|
|||
2432 | # |
|
|||
2433 | # X & !Y |
|
|||
2434 | # ^ |
|
|||
2435 | # any |
|
|||
2436 | # |
|
|||
2437 | # 'y()' can either enforce its ordering requirement or take the ordering |
|
|||
2438 | # specified by 'x()' because 'not()' doesn't care the order. |
|
|||
2439 | # |
|
|||
2440 | # Transition of ordering requirement: |
|
|||
2441 | # |
|
|||
2442 | # 1. starts with 'define' |
|
|||
2443 | # 2. shifts to 'follow' by 'x & y' |
|
|||
2444 | # 3. changes back to 'define' on function call 'f(x)' or function-like |
|
|||
2445 | # operation 'x (f) y' because 'f' may have its own ordering requirement |
|
|||
2446 | # for 'x' and 'y' (e.g. 'first(x)') |
|
|||
2447 | # |
|
|||
2448 | anyorder = 'any' # don't care the order |
|
|||
2449 | defineorder = 'define' # should define the order |
|
|||
2450 | followorder = 'follow' # must follow the current order |
|
|||
2451 |
|
||||
2452 | # transition table for 'x & y', from the current expression 'x' to 'y' |
|
|||
2453 | _tofolloworder = { |
|
|||
2454 | anyorder: anyorder, |
|
|||
2455 | defineorder: followorder, |
|
|||
2456 | followorder: followorder, |
|
|||
2457 | } |
|
|||
2458 |
|
||||
2459 | def _matchonly(revs, bases): |
|
|||
2460 | """ |
|
|||
2461 | >>> f = lambda *args: _matchonly(*map(parse, args)) |
|
|||
2462 | >>> f('ancestors(A)', 'not ancestors(B)') |
|
|||
2463 | ('list', ('symbol', 'A'), ('symbol', 'B')) |
|
|||
2464 | """ |
|
|||
2465 | if (revs is not None |
|
|||
2466 | and revs[0] == 'func' |
|
|||
2467 | and getsymbol(revs[1]) == 'ancestors' |
|
|||
2468 | and bases is not None |
|
|||
2469 | and bases[0] == 'not' |
|
|||
2470 | and bases[1][0] == 'func' |
|
|||
2471 | and getsymbol(bases[1][1]) == 'ancestors'): |
|
|||
2472 | return ('list', revs[2], bases[1][2]) |
|
|||
2473 |
|
||||
2474 | def _fixops(x): |
|
|||
2475 | """Rewrite raw parsed tree to resolve ambiguous syntax which cannot be |
|
|||
2476 | handled well by our simple top-down parser""" |
|
|||
2477 | if not isinstance(x, tuple): |
|
|||
2478 | return x |
|
|||
2479 |
|
||||
2480 | op = x[0] |
|
|||
2481 | if op == 'parent': |
|
|||
2482 | # x^:y means (x^) : y, not x ^ (:y) |
|
|||
2483 | # x^: means (x^) :, not x ^ (:) |
|
|||
2484 | post = ('parentpost', x[1]) |
|
|||
2485 | if x[2][0] == 'dagrangepre': |
|
|||
2486 | return _fixops(('dagrange', post, x[2][1])) |
|
|||
2487 | elif x[2][0] == 'rangepre': |
|
|||
2488 | return _fixops(('range', post, x[2][1])) |
|
|||
2489 | elif x[2][0] == 'rangeall': |
|
|||
2490 | return _fixops(('rangepost', post)) |
|
|||
2491 | elif op == 'or': |
|
|||
2492 | # make number of arguments deterministic: |
|
|||
2493 | # x + y + z -> (or x y z) -> (or (list x y z)) |
|
|||
2494 | return (op, _fixops(('list',) + x[1:])) |
|
|||
2495 |
|
||||
2496 | return (op,) + tuple(_fixops(y) for y in x[1:]) |
|
|||
2497 |
|
||||
2498 | def _analyze(x, order): |
|
|||
2499 | if x is None: |
|
|||
2500 | return x |
|
|||
2501 |
|
||||
2502 | op = x[0] |
|
|||
2503 | if op == 'minus': |
|
|||
2504 | return _analyze(('and', x[1], ('not', x[2])), order) |
|
|||
2505 | elif op == 'only': |
|
|||
2506 | t = ('func', ('symbol', 'only'), ('list', x[1], x[2])) |
|
|||
2507 | return _analyze(t, order) |
|
|||
2508 | elif op == 'onlypost': |
|
|||
2509 | return _analyze(('func', ('symbol', 'only'), x[1]), order) |
|
|||
2510 | elif op == 'dagrangepre': |
|
|||
2511 | return _analyze(('func', ('symbol', 'ancestors'), x[1]), order) |
|
|||
2512 | elif op == 'dagrangepost': |
|
|||
2513 | return _analyze(('func', ('symbol', 'descendants'), x[1]), order) |
|
|||
2514 | elif op == 'negate': |
|
|||
2515 | s = getstring(x[1], _("can't negate that")) |
|
|||
2516 | return _analyze(('string', '-' + s), order) |
|
|||
2517 | elif op in ('string', 'symbol'): |
|
|||
2518 | return x |
|
|||
2519 | elif op == 'and': |
|
|||
2520 | ta = _analyze(x[1], order) |
|
|||
2521 | tb = _analyze(x[2], _tofolloworder[order]) |
|
|||
2522 | return (op, ta, tb, order) |
|
|||
2523 | elif op == 'or': |
|
|||
2524 | return (op, _analyze(x[1], order), order) |
|
|||
2525 | elif op == 'not': |
|
|||
2526 | return (op, _analyze(x[1], anyorder), order) |
|
|||
2527 | elif op == 'rangeall': |
|
|||
2528 | return (op, None, order) |
|
|||
2529 | elif op in ('rangepre', 'rangepost', 'parentpost'): |
|
|||
2530 | return (op, _analyze(x[1], defineorder), order) |
|
|||
2531 | elif op == 'group': |
|
|||
2532 | return _analyze(x[1], order) |
|
|||
2533 | elif op in ('dagrange', 'range', 'parent', 'ancestor'): |
|
|||
2534 | ta = _analyze(x[1], defineorder) |
|
|||
2535 | tb = _analyze(x[2], defineorder) |
|
|||
2536 | return (op, ta, tb, order) |
|
|||
2537 | elif op == 'list': |
|
|||
2538 | return (op,) + tuple(_analyze(y, order) for y in x[1:]) |
|
|||
2539 | elif op == 'keyvalue': |
|
|||
2540 | return (op, x[1], _analyze(x[2], order)) |
|
|||
2541 | elif op == 'func': |
|
|||
2542 | f = getsymbol(x[1]) |
|
|||
2543 | d = defineorder |
|
|||
2544 | if f == 'present': |
|
|||
2545 | # 'present(set)' is known to return the argument set with no |
|
|||
2546 | # modification, so forward the current order to its argument |
|
|||
2547 | d = order |
|
|||
2548 | return (op, x[1], _analyze(x[2], d), order) |
|
|||
2549 | raise ValueError('invalid operator %r' % op) |
|
|||
2550 |
|
||||
2551 | def analyze(x, order=defineorder): |
|
|||
2552 | """Transform raw parsed tree to evaluatable tree which can be fed to |
|
|||
2553 | optimize() or getset() |
|
|||
2554 |
|
||||
2555 | All pseudo operations should be mapped to real operations or functions |
|
|||
2556 | defined in methods or symbols table respectively. |
|
|||
2557 |
|
||||
2558 | 'order' specifies how the current expression 'x' is ordered (see the |
|
|||
2559 | constants defined above.) |
|
|||
2560 | """ |
|
|||
2561 | return _analyze(x, order) |
|
|||
2562 |
|
||||
2563 | def _optimize(x, small): |
|
|||
2564 | if x is None: |
|
|||
2565 | return 0, x |
|
|||
2566 |
|
||||
2567 | smallbonus = 1 |
|
|||
2568 | if small: |
|
|||
2569 | smallbonus = .5 |
|
|||
2570 |
|
||||
2571 | op = x[0] |
|
|||
2572 | if op in ('string', 'symbol'): |
|
|||
2573 | return smallbonus, x # single revisions are small |
|
|||
2574 | elif op == 'and': |
|
|||
2575 | wa, ta = _optimize(x[1], True) |
|
|||
2576 | wb, tb = _optimize(x[2], True) |
|
|||
2577 | order = x[3] |
|
|||
2578 | w = min(wa, wb) |
|
|||
2579 |
|
||||
2580 | # (::x and not ::y)/(not ::y and ::x) have a fast path |
|
|||
2581 | tm = _matchonly(ta, tb) or _matchonly(tb, ta) |
|
|||
2582 | if tm: |
|
|||
2583 | return w, ('func', ('symbol', 'only'), tm, order) |
|
|||
2584 |
|
||||
2585 | if tb is not None and tb[0] == 'not': |
|
|||
2586 | return wa, ('difference', ta, tb[1], order) |
|
|||
2587 |
|
||||
2588 | if wa > wb: |
|
|||
2589 | return w, (op, tb, ta, order) |
|
|||
2590 | return w, (op, ta, tb, order) |
|
|||
2591 | elif op == 'or': |
|
|||
2592 | # fast path for machine-generated expression, that is likely to have |
|
|||
2593 | # lots of trivial revisions: 'a + b + c()' to '_list(a b) + c()' |
|
|||
2594 | order = x[2] |
|
|||
2595 | ws, ts, ss = [], [], [] |
|
|||
2596 | def flushss(): |
|
|||
2597 | if not ss: |
|
|||
2598 | return |
|
|||
2599 | if len(ss) == 1: |
|
|||
2600 | w, t = ss[0] |
|
|||
2601 | else: |
|
|||
2602 | s = '\0'.join(t[1] for w, t in ss) |
|
|||
2603 | y = ('func', ('symbol', '_list'), ('string', s), order) |
|
|||
2604 | w, t = _optimize(y, False) |
|
|||
2605 | ws.append(w) |
|
|||
2606 | ts.append(t) |
|
|||
2607 | del ss[:] |
|
|||
2608 | for y in getlist(x[1]): |
|
|||
2609 | w, t = _optimize(y, False) |
|
|||
2610 | if t is not None and (t[0] == 'string' or t[0] == 'symbol'): |
|
|||
2611 | ss.append((w, t)) |
|
|||
2612 | continue |
|
|||
2613 | flushss() |
|
|||
2614 | ws.append(w) |
|
|||
2615 | ts.append(t) |
|
|||
2616 | flushss() |
|
|||
2617 | if len(ts) == 1: |
|
|||
2618 | return ws[0], ts[0] # 'or' operation is fully optimized out |
|
|||
2619 | # we can't reorder trees by weight because it would change the order. |
|
|||
2620 | # ("sort(a + b)" == "sort(b + a)", but "a + b" != "b + a") |
|
|||
2621 | # ts = tuple(t for w, t in sorted(zip(ws, ts), key=lambda wt: wt[0])) |
|
|||
2622 | return max(ws), (op, ('list',) + tuple(ts), order) |
|
|||
2623 | elif op == 'not': |
|
|||
2624 | # Optimize not public() to _notpublic() because we have a fast version |
|
|||
2625 | if x[1][:3] == ('func', ('symbol', 'public'), None): |
|
|||
2626 | order = x[1][3] |
|
|||
2627 | newsym = ('func', ('symbol', '_notpublic'), None, order) |
|
|||
2628 | o = _optimize(newsym, not small) |
|
|||
2629 | return o[0], o[1] |
|
|||
2630 | else: |
|
|||
2631 | o = _optimize(x[1], not small) |
|
|||
2632 | order = x[2] |
|
|||
2633 | return o[0], (op, o[1], order) |
|
|||
2634 | elif op == 'rangeall': |
|
|||
2635 | return smallbonus, x |
|
|||
2636 | elif op in ('rangepre', 'rangepost', 'parentpost'): |
|
|||
2637 | o = _optimize(x[1], small) |
|
|||
2638 | order = x[2] |
|
|||
2639 | return o[0], (op, o[1], order) |
|
|||
2640 | elif op in ('dagrange', 'range', 'parent', 'ancestor'): |
|
|||
2641 | wa, ta = _optimize(x[1], small) |
|
|||
2642 | wb, tb = _optimize(x[2], small) |
|
|||
2643 | order = x[3] |
|
|||
2644 | return wa + wb, (op, ta, tb, order) |
|
|||
2645 | elif op == 'list': |
|
|||
2646 | ws, ts = zip(*(_optimize(y, small) for y in x[1:])) |
|
|||
2647 | return sum(ws), (op,) + ts |
|
|||
2648 | elif op == 'keyvalue': |
|
|||
2649 | w, t = _optimize(x[2], small) |
|
|||
2650 | return w, (op, x[1], t) |
|
|||
2651 | elif op == 'func': |
|
|||
2652 | f = getsymbol(x[1]) |
|
|||
2653 | wa, ta = _optimize(x[2], small) |
|
|||
2654 | if f in ('author', 'branch', 'closed', 'date', 'desc', 'file', 'grep', |
|
|||
2655 | 'keyword', 'outgoing', 'user', 'destination'): |
|
|||
2656 | w = 10 # slow |
|
|||
2657 | elif f in ('modifies', 'adds', 'removes'): |
|
|||
2658 | w = 30 # slower |
|
|||
2659 | elif f == "contains": |
|
|||
2660 | w = 100 # very slow |
|
|||
2661 | elif f == "ancestor": |
|
|||
2662 | w = 1 * smallbonus |
|
|||
2663 | elif f in ('reverse', 'limit', 'first', 'wdir', '_intlist'): |
|
|||
2664 | w = 0 |
|
|||
2665 | elif f == "sort": |
|
|||
2666 | w = 10 # assume most sorts look at changelog |
|
|||
2667 | else: |
|
|||
2668 | w = 1 |
|
|||
2669 | order = x[3] |
|
|||
2670 | return w + wa, (op, x[1], ta, order) |
|
|||
2671 | raise ValueError('invalid operator %r' % op) |
|
|||
2672 |
|
||||
2673 | def optimize(tree): |
|
|||
2674 | """Optimize evaluatable tree |
|
|||
2675 |
|
||||
2676 | All pseudo operations should be transformed beforehand. |
|
|||
2677 | """ |
|
|||
2678 | _weight, newtree = _optimize(tree, small=True) |
|
|||
2679 | return newtree |
|
|||
2680 |
|
||||
2681 | # the set of valid characters for the initial letter of symbols in |
|
|||
2682 | # alias declarations and definitions |
|
|||
2683 | _aliassyminitletters = _syminitletters | set(pycompat.sysstr('$')) |
|
|||
2684 |
|
||||
2685 | def _parsewith(spec, lookup=None, syminitletters=None): |
|
|||
2686 | """Generate a parse tree of given spec with given tokenizing options |
|
|||
2687 |
|
||||
2688 | >>> _parsewith('foo($1)', syminitletters=_aliassyminitletters) |
|
|||
2689 | ('func', ('symbol', 'foo'), ('symbol', '$1')) |
|
|||
2690 | >>> _parsewith('$1') |
|
|||
2691 | Traceback (most recent call last): |
|
|||
2692 | ... |
|
|||
2693 | ParseError: ("syntax error in revset '$1'", 0) |
|
|||
2694 | >>> _parsewith('foo bar') |
|
|||
2695 | Traceback (most recent call last): |
|
|||
2696 | ... |
|
|||
2697 | ParseError: ('invalid token', 4) |
|
|||
2698 | """ |
|
|||
2699 | p = parser.parser(elements) |
|
|||
2700 | tree, pos = p.parse(tokenize(spec, lookup=lookup, |
|
|||
2701 | syminitletters=syminitletters)) |
|
|||
2702 | if pos != len(spec): |
|
|||
2703 | raise error.ParseError(_('invalid token'), pos) |
|
|||
2704 | return _fixops(parser.simplifyinfixops(tree, ('list', 'or'))) |
|
|||
2705 |
|
||||
2706 | class _aliasrules(parser.basealiasrules): |
|
|||
2707 | """Parsing and expansion rule set of revset aliases""" |
|
|||
2708 | _section = _('revset alias') |
|
|||
2709 |
|
||||
2710 | @staticmethod |
|
|||
2711 | def _parse(spec): |
|
|||
2712 | """Parse alias declaration/definition ``spec`` |
|
|||
2713 |
|
||||
2714 | This allows symbol names to use also ``$`` as an initial letter |
|
|||
2715 | (for backward compatibility), and callers of this function should |
|
|||
2716 | examine whether ``$`` is used also for unexpected symbols or not. |
|
|||
2717 | """ |
|
|||
2718 | return _parsewith(spec, syminitletters=_aliassyminitletters) |
|
|||
2719 |
|
||||
2720 | @staticmethod |
|
|||
2721 | def _trygetfunc(tree): |
|
|||
2722 | if tree[0] == 'func' and tree[1][0] == 'symbol': |
|
|||
2723 | return tree[1][1], getlist(tree[2]) |
|
|||
2724 |
|
||||
2725 | def expandaliases(ui, tree): |
|
|||
2726 | aliases = _aliasrules.buildmap(ui.configitems('revsetalias')) |
|
|||
2727 | tree = _aliasrules.expand(aliases, tree) |
|
|||
2728 | # warn about problematic (but not referred) aliases |
|
|||
2729 | for name, alias in sorted(aliases.iteritems()): |
|
|||
2730 | if alias.error and not alias.warned: |
|
|||
2731 | ui.warn(_('warning: %s\n') % (alias.error)) |
|
|||
2732 | alias.warned = True |
|
|||
2733 | return tree |
|
|||
2734 |
|
||||
2735 | def foldconcat(tree): |
|
|||
2736 | """Fold elements to be concatenated by `##` |
|
|||
2737 | """ |
|
|||
2738 | if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'): |
|
|||
2739 | return tree |
|
|||
2740 | if tree[0] == '_concat': |
|
|||
2741 | pending = [tree] |
|
|||
2742 | l = [] |
|
|||
2743 | while pending: |
|
|||
2744 | e = pending.pop() |
|
|||
2745 | if e[0] == '_concat': |
|
|||
2746 | pending.extend(reversed(e[1:])) |
|
|||
2747 | elif e[0] in ('string', 'symbol'): |
|
|||
2748 | l.append(e[1]) |
|
|||
2749 | else: |
|
|||
2750 | msg = _("\"##\" can't concatenate \"%s\" element") % (e[0]) |
|
|||
2751 | raise error.ParseError(msg) |
|
|||
2752 | return ('string', ''.join(l)) |
|
|||
2753 | else: |
|
|||
2754 | return tuple(foldconcat(t) for t in tree) |
|
|||
2755 |
|
||||
2756 | def parse(spec, lookup=None): |
|
|||
2757 | return _parsewith(spec, lookup=lookup) |
|
|||
2758 |
|
||||
2759 | def posttreebuilthook(tree, repo): |
|
2222 | def posttreebuilthook(tree, repo): | |
2760 | # hook for extensions to execute code on the optimized tree |
|
2223 | # hook for extensions to execute code on the optimized tree | |
2761 | pass |
|
2224 | pass | |
@@ -2785,15 +2248,16 b' def matchany(ui, specs, repo=None, order' | |||||
2785 | if repo: |
|
2248 | if repo: | |
2786 | lookup = repo.__contains__ |
|
2249 | lookup = repo.__contains__ | |
2787 | if len(specs) == 1: |
|
2250 | if len(specs) == 1: | |
2788 | tree = parse(specs[0], lookup) |
|
2251 | tree = revsetlang.parse(specs[0], lookup) | |
2789 | else: |
|
2252 | else: | |
2790 | tree = ('or', ('list',) + tuple(parse(s, lookup) for s in specs)) |
|
2253 | tree = ('or', | |
|
2254 | ('list',) + tuple(revsetlang.parse(s, lookup) for s in specs)) | |||
2791 |
|
2255 | |||
2792 | if ui: |
|
2256 | if ui: | |
2793 | tree = expandaliases(ui, tree) |
|
2257 | tree = revsetlang.expandaliases(ui, tree) | |
2794 | tree = foldconcat(tree) |
|
2258 | tree = revsetlang.foldconcat(tree) | |
2795 | tree = analyze(tree, order) |
|
2259 | tree = revsetlang.analyze(tree, order) | |
2796 | tree = optimize(tree) |
|
2260 | tree = revsetlang.optimize(tree) | |
2797 | posttreebuilthook(tree, repo) |
|
2261 | posttreebuilthook(tree, repo) | |
2798 | return makematcher(tree) |
|
2262 | return makematcher(tree) | |
2799 |
|
2263 | |||
@@ -2809,121 +2273,6 b' def makematcher(tree):' | |||||
2809 | return result |
|
2273 | return result | |
2810 | return mfunc |
|
2274 | return mfunc | |
2811 |
|
2275 | |||
2812 | def formatspec(expr, *args): |
|
|||
2813 | ''' |
|
|||
2814 | This is a convenience function for using revsets internally, and |
|
|||
2815 | escapes arguments appropriately. Aliases are intentionally ignored |
|
|||
2816 | so that intended expression behavior isn't accidentally subverted. |
|
|||
2817 |
|
||||
2818 | Supported arguments: |
|
|||
2819 |
|
||||
2820 | %r = revset expression, parenthesized |
|
|||
2821 | %d = int(arg), no quoting |
|
|||
2822 | %s = string(arg), escaped and single-quoted |
|
|||
2823 | %b = arg.branch(), escaped and single-quoted |
|
|||
2824 | %n = hex(arg), single-quoted |
|
|||
2825 | %% = a literal '%' |
|
|||
2826 |
|
||||
2827 | Prefixing the type with 'l' specifies a parenthesized list of that type. |
|
|||
2828 |
|
||||
2829 | >>> formatspec('%r:: and %lr', '10 or 11', ("this()", "that()")) |
|
|||
2830 | '(10 or 11):: and ((this()) or (that()))' |
|
|||
2831 | >>> formatspec('%d:: and not %d::', 10, 20) |
|
|||
2832 | '10:: and not 20::' |
|
|||
2833 | >>> formatspec('%ld or %ld', [], [1]) |
|
|||
2834 | "_list('') or 1" |
|
|||
2835 | >>> formatspec('keyword(%s)', 'foo\\xe9') |
|
|||
2836 | "keyword('foo\\\\xe9')" |
|
|||
2837 | >>> b = lambda: 'default' |
|
|||
2838 | >>> b.branch = b |
|
|||
2839 | >>> formatspec('branch(%b)', b) |
|
|||
2840 | "branch('default')" |
|
|||
2841 | >>> formatspec('root(%ls)', ['a', 'b', 'c', 'd']) |
|
|||
2842 | "root(_list('a\\x00b\\x00c\\x00d'))" |
|
|||
2843 | ''' |
|
|||
2844 |
|
||||
2845 | def quote(s): |
|
|||
2846 | return repr(str(s)) |
|
|||
2847 |
|
||||
2848 | def argtype(c, arg): |
|
|||
2849 | if c == 'd': |
|
|||
2850 | return str(int(arg)) |
|
|||
2851 | elif c == 's': |
|
|||
2852 | return quote(arg) |
|
|||
2853 | elif c == 'r': |
|
|||
2854 | parse(arg) # make sure syntax errors are confined |
|
|||
2855 | return '(%s)' % arg |
|
|||
2856 | elif c == 'n': |
|
|||
2857 | return quote(node.hex(arg)) |
|
|||
2858 | elif c == 'b': |
|
|||
2859 | return quote(arg.branch()) |
|
|||
2860 |
|
||||
2861 | def listexp(s, t): |
|
|||
2862 | l = len(s) |
|
|||
2863 | if l == 0: |
|
|||
2864 | return "_list('')" |
|
|||
2865 | elif l == 1: |
|
|||
2866 | return argtype(t, s[0]) |
|
|||
2867 | elif t == 'd': |
|
|||
2868 | return "_intlist('%s')" % "\0".join(str(int(a)) for a in s) |
|
|||
2869 | elif t == 's': |
|
|||
2870 | return "_list('%s')" % "\0".join(s) |
|
|||
2871 | elif t == 'n': |
|
|||
2872 | return "_hexlist('%s')" % "\0".join(node.hex(a) for a in s) |
|
|||
2873 | elif t == 'b': |
|
|||
2874 | return "_list('%s')" % "\0".join(a.branch() for a in s) |
|
|||
2875 |
|
||||
2876 | m = l // 2 |
|
|||
2877 | return '(%s or %s)' % (listexp(s[:m], t), listexp(s[m:], t)) |
|
|||
2878 |
|
||||
2879 | ret = '' |
|
|||
2880 | pos = 0 |
|
|||
2881 | arg = 0 |
|
|||
2882 | while pos < len(expr): |
|
|||
2883 | c = expr[pos] |
|
|||
2884 | if c == '%': |
|
|||
2885 | pos += 1 |
|
|||
2886 | d = expr[pos] |
|
|||
2887 | if d == '%': |
|
|||
2888 | ret += d |
|
|||
2889 | elif d in 'dsnbr': |
|
|||
2890 | ret += argtype(d, args[arg]) |
|
|||
2891 | arg += 1 |
|
|||
2892 | elif d == 'l': |
|
|||
2893 | # a list of some type |
|
|||
2894 | pos += 1 |
|
|||
2895 | d = expr[pos] |
|
|||
2896 | ret += listexp(list(args[arg]), d) |
|
|||
2897 | arg += 1 |
|
|||
2898 | else: |
|
|||
2899 | raise error.Abort(_('unexpected revspec format character %s') |
|
|||
2900 | % d) |
|
|||
2901 | else: |
|
|||
2902 | ret += c |
|
|||
2903 | pos += 1 |
|
|||
2904 |
|
||||
2905 | return ret |
|
|||
2906 |
|
||||
2907 | def prettyformat(tree): |
|
|||
2908 | return parser.prettyformat(tree, ('string', 'symbol')) |
|
|||
2909 |
|
||||
2910 | def depth(tree): |
|
|||
2911 | if isinstance(tree, tuple): |
|
|||
2912 | return max(map(depth, tree)) + 1 |
|
|||
2913 | else: |
|
|||
2914 | return 0 |
|
|||
2915 |
|
||||
2916 | def funcsused(tree): |
|
|||
2917 | if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'): |
|
|||
2918 | return set() |
|
|||
2919 | else: |
|
|||
2920 | funcs = set() |
|
|||
2921 | for s in tree[1:]: |
|
|||
2922 | funcs |= funcsused(s) |
|
|||
2923 | if tree[0] == 'func': |
|
|||
2924 | funcs.add(tree[1][1]) |
|
|||
2925 | return funcs |
|
|||
2926 |
|
||||
2927 | def loadpredicate(ui, extname, registrarobj): |
|
2276 | def loadpredicate(ui, extname, registrarobj): | |
2928 | """Load revset predicates from specified registrarobj |
|
2277 | """Load revset predicates from specified registrarobj | |
2929 | """ |
|
2278 | """ |
This diff has been collapsed as it changes many lines, (2257 lines changed) Show them Hide them | |||||
@@ -1,4 +1,4 b'' | |||||
1 | # revset.py - revision set queries for mercurial |
|
1 | # revsetlang.py - parser, tokenizer and utility for revision set language | |
2 | # |
|
2 | # | |
3 | # Copyright 2010 Matt Mackall <mpm@selenic.com> |
|
3 | # Copyright 2010 Matt Mackall <mpm@selenic.com> | |
4 | # |
|
4 | # | |
@@ -7,151 +7,16 b'' | |||||
7 |
|
7 | |||
8 | from __future__ import absolute_import |
|
8 | from __future__ import absolute_import | |
9 |
|
9 | |||
10 | import heapq |
|
|||
11 | import re |
|
|||
12 | import string |
|
10 | import string | |
13 |
|
11 | |||
14 | from .i18n import _ |
|
12 | from .i18n import _ | |
15 | from . import ( |
|
13 | from . import ( | |
16 | destutil, |
|
|||
17 | encoding, |
|
|||
18 | error, |
|
14 | error, | |
19 | hbisect, |
|
|||
20 | match as matchmod, |
|
|||
21 | node, |
|
15 | node, | |
22 | obsolete as obsmod, |
|
|||
23 | parser, |
|
16 | parser, | |
24 | pathutil, |
|
|||
25 | phases, |
|
|||
26 | pycompat, |
|
17 | pycompat, | |
27 | registrar, |
|
|||
28 | repoview, |
|
|||
29 | smartset, |
|
|||
30 | util, |
|
|||
31 | ) |
|
18 | ) | |
32 |
|
19 | |||
33 | baseset = smartset.baseset |
|
|||
34 | generatorset = smartset.generatorset |
|
|||
35 | spanset = smartset.spanset |
|
|||
36 | fullreposet = smartset.fullreposet |
|
|||
37 |
|
||||
38 | def _revancestors(repo, revs, followfirst): |
|
|||
39 | """Like revlog.ancestors(), but supports followfirst.""" |
|
|||
40 | if followfirst: |
|
|||
41 | cut = 1 |
|
|||
42 | else: |
|
|||
43 | cut = None |
|
|||
44 | cl = repo.changelog |
|
|||
45 |
|
||||
46 | def iterate(): |
|
|||
47 | revs.sort(reverse=True) |
|
|||
48 | irevs = iter(revs) |
|
|||
49 | h = [] |
|
|||
50 |
|
||||
51 | inputrev = next(irevs, None) |
|
|||
52 | if inputrev is not None: |
|
|||
53 | heapq.heappush(h, -inputrev) |
|
|||
54 |
|
||||
55 | seen = set() |
|
|||
56 | while h: |
|
|||
57 | current = -heapq.heappop(h) |
|
|||
58 | if current == inputrev: |
|
|||
59 | inputrev = next(irevs, None) |
|
|||
60 | if inputrev is not None: |
|
|||
61 | heapq.heappush(h, -inputrev) |
|
|||
62 | if current not in seen: |
|
|||
63 | seen.add(current) |
|
|||
64 | yield current |
|
|||
65 | for parent in cl.parentrevs(current)[:cut]: |
|
|||
66 | if parent != node.nullrev: |
|
|||
67 | heapq.heappush(h, -parent) |
|
|||
68 |
|
||||
69 | return generatorset(iterate(), iterasc=False) |
|
|||
70 |
|
||||
71 | def _revdescendants(repo, revs, followfirst): |
|
|||
72 | """Like revlog.descendants() but supports followfirst.""" |
|
|||
73 | if followfirst: |
|
|||
74 | cut = 1 |
|
|||
75 | else: |
|
|||
76 | cut = None |
|
|||
77 |
|
||||
78 | def iterate(): |
|
|||
79 | cl = repo.changelog |
|
|||
80 | # XXX this should be 'parentset.min()' assuming 'parentset' is a |
|
|||
81 | # smartset (and if it is not, it should.) |
|
|||
82 | first = min(revs) |
|
|||
83 | nullrev = node.nullrev |
|
|||
84 | if first == nullrev: |
|
|||
85 | # Are there nodes with a null first parent and a non-null |
|
|||
86 | # second one? Maybe. Do we care? Probably not. |
|
|||
87 | for i in cl: |
|
|||
88 | yield i |
|
|||
89 | else: |
|
|||
90 | seen = set(revs) |
|
|||
91 | for i in cl.revs(first + 1): |
|
|||
92 | for x in cl.parentrevs(i)[:cut]: |
|
|||
93 | if x != nullrev and x in seen: |
|
|||
94 | seen.add(i) |
|
|||
95 | yield i |
|
|||
96 | break |
|
|||
97 |
|
||||
98 | return generatorset(iterate(), iterasc=True) |
|
|||
99 |
|
||||
100 | def _reachablerootspure(repo, minroot, roots, heads, includepath): |
|
|||
101 | """return (heads(::<roots> and ::<heads>)) |
|
|||
102 |
|
||||
103 | If includepath is True, return (<roots>::<heads>).""" |
|
|||
104 | if not roots: |
|
|||
105 | return [] |
|
|||
106 | parentrevs = repo.changelog.parentrevs |
|
|||
107 | roots = set(roots) |
|
|||
108 | visit = list(heads) |
|
|||
109 | reachable = set() |
|
|||
110 | seen = {} |
|
|||
111 | # prefetch all the things! (because python is slow) |
|
|||
112 | reached = reachable.add |
|
|||
113 | dovisit = visit.append |
|
|||
114 | nextvisit = visit.pop |
|
|||
115 | # open-code the post-order traversal due to the tiny size of |
|
|||
116 | # sys.getrecursionlimit() |
|
|||
117 | while visit: |
|
|||
118 | rev = nextvisit() |
|
|||
119 | if rev in roots: |
|
|||
120 | reached(rev) |
|
|||
121 | if not includepath: |
|
|||
122 | continue |
|
|||
123 | parents = parentrevs(rev) |
|
|||
124 | seen[rev] = parents |
|
|||
125 | for parent in parents: |
|
|||
126 | if parent >= minroot and parent not in seen: |
|
|||
127 | dovisit(parent) |
|
|||
128 | if not reachable: |
|
|||
129 | return baseset() |
|
|||
130 | if not includepath: |
|
|||
131 | return reachable |
|
|||
132 | for rev in sorted(seen): |
|
|||
133 | for parent in seen[rev]: |
|
|||
134 | if parent in reachable: |
|
|||
135 | reached(rev) |
|
|||
136 | return reachable |
|
|||
137 |
|
||||
138 | def reachableroots(repo, roots, heads, includepath=False): |
|
|||
139 | """return (heads(::<roots> and ::<heads>)) |
|
|||
140 |
|
||||
141 | If includepath is True, return (<roots>::<heads>).""" |
|
|||
142 | if not roots: |
|
|||
143 | return baseset() |
|
|||
144 | minroot = roots.min() |
|
|||
145 | roots = list(roots) |
|
|||
146 | heads = list(heads) |
|
|||
147 | try: |
|
|||
148 | revs = repo.changelog.reachableroots(minroot, heads, roots, includepath) |
|
|||
149 | except AttributeError: |
|
|||
150 | revs = _reachablerootspure(repo, minroot, roots, heads, includepath) |
|
|||
151 | revs = baseset(revs) |
|
|||
152 | revs.sort() |
|
|||
153 | return revs |
|
|||
154 |
|
||||
155 | elements = { |
|
20 | elements = { | |
156 | # token-type: binding-strength, primary, prefix, infix, suffix |
|
21 | # token-type: binding-strength, primary, prefix, infix, suffix | |
157 | "(": (21, None, ("group", 1, ")"), ("func", 1, ")"), None), |
|
22 | "(": (21, None, ("group", 1, ")"), ("func", 1, ")"), None), | |
@@ -359,2059 +224,6 b' def getargsdict(x, funcname, keys):' | |||||
359 | return parser.buildargsdict(getlist(x), funcname, parser.splitargspec(keys), |
|
224 | return parser.buildargsdict(getlist(x), funcname, parser.splitargspec(keys), | |
360 | keyvaluenode='keyvalue', keynode='symbol') |
|
225 | keyvaluenode='keyvalue', keynode='symbol') | |
361 |
|
226 | |||
362 | def getset(repo, subset, x): |
|
|||
363 | if not x: |
|
|||
364 | raise error.ParseError(_("missing argument")) |
|
|||
365 | s = methods[x[0]](repo, subset, *x[1:]) |
|
|||
366 | if util.safehasattr(s, 'isascending'): |
|
|||
367 | return s |
|
|||
368 | # else case should not happen, because all non-func are internal, |
|
|||
369 | # ignoring for now. |
|
|||
370 | if x[0] == 'func' and x[1][0] == 'symbol' and x[1][1] in symbols: |
|
|||
371 | repo.ui.deprecwarn('revset "%s" uses list instead of smartset' |
|
|||
372 | % x[1][1], |
|
|||
373 | '3.9') |
|
|||
374 | return baseset(s) |
|
|||
375 |
|
||||
376 | def _getrevsource(repo, r): |
|
|||
377 | extra = repo[r].extra() |
|
|||
378 | for label in ('source', 'transplant_source', 'rebase_source'): |
|
|||
379 | if label in extra: |
|
|||
380 | try: |
|
|||
381 | return repo[extra[label]].rev() |
|
|||
382 | except error.RepoLookupError: |
|
|||
383 | pass |
|
|||
384 | return None |
|
|||
385 |
|
||||
386 | # operator methods |
|
|||
387 |
|
||||
388 | def stringset(repo, subset, x): |
|
|||
389 | x = repo[x].rev() |
|
|||
390 | if (x in subset |
|
|||
391 | or x == node.nullrev and isinstance(subset, fullreposet)): |
|
|||
392 | return baseset([x]) |
|
|||
393 | return baseset() |
|
|||
394 |
|
||||
395 | def rangeset(repo, subset, x, y, order): |
|
|||
396 | m = getset(repo, fullreposet(repo), x) |
|
|||
397 | n = getset(repo, fullreposet(repo), y) |
|
|||
398 |
|
||||
399 | if not m or not n: |
|
|||
400 | return baseset() |
|
|||
401 | return _makerangeset(repo, subset, m.first(), n.last(), order) |
|
|||
402 |
|
||||
403 | def rangeall(repo, subset, x, order): |
|
|||
404 | assert x is None |
|
|||
405 | return _makerangeset(repo, subset, 0, len(repo) - 1, order) |
|
|||
406 |
|
||||
407 | def rangepre(repo, subset, y, order): |
|
|||
408 | # ':y' can't be rewritten to '0:y' since '0' may be hidden |
|
|||
409 | n = getset(repo, fullreposet(repo), y) |
|
|||
410 | if not n: |
|
|||
411 | return baseset() |
|
|||
412 | return _makerangeset(repo, subset, 0, n.last(), order) |
|
|||
413 |
|
||||
414 | def rangepost(repo, subset, x, order): |
|
|||
415 | m = getset(repo, fullreposet(repo), x) |
|
|||
416 | if not m: |
|
|||
417 | return baseset() |
|
|||
418 | return _makerangeset(repo, subset, m.first(), len(repo) - 1, order) |
|
|||
419 |
|
||||
420 | def _makerangeset(repo, subset, m, n, order): |
|
|||
421 | if m == n: |
|
|||
422 | r = baseset([m]) |
|
|||
423 | elif n == node.wdirrev: |
|
|||
424 | r = spanset(repo, m, len(repo)) + baseset([n]) |
|
|||
425 | elif m == node.wdirrev: |
|
|||
426 | r = baseset([m]) + spanset(repo, len(repo) - 1, n - 1) |
|
|||
427 | elif m < n: |
|
|||
428 | r = spanset(repo, m, n + 1) |
|
|||
429 | else: |
|
|||
430 | r = spanset(repo, m, n - 1) |
|
|||
431 |
|
||||
432 | if order == defineorder: |
|
|||
433 | return r & subset |
|
|||
434 | else: |
|
|||
435 | # carrying the sorting over when possible would be more efficient |
|
|||
436 | return subset & r |
|
|||
437 |
|
||||
438 | def dagrange(repo, subset, x, y, order): |
|
|||
439 | r = fullreposet(repo) |
|
|||
440 | xs = reachableroots(repo, getset(repo, r, x), getset(repo, r, y), |
|
|||
441 | includepath=True) |
|
|||
442 | return subset & xs |
|
|||
443 |
|
||||
444 | def andset(repo, subset, x, y, order): |
|
|||
445 | return getset(repo, getset(repo, subset, x), y) |
|
|||
446 |
|
||||
447 | def differenceset(repo, subset, x, y, order): |
|
|||
448 | return getset(repo, subset, x) - getset(repo, subset, y) |
|
|||
449 |
|
||||
450 | def _orsetlist(repo, subset, xs): |
|
|||
451 | assert xs |
|
|||
452 | if len(xs) == 1: |
|
|||
453 | return getset(repo, subset, xs[0]) |
|
|||
454 | p = len(xs) // 2 |
|
|||
455 | a = _orsetlist(repo, subset, xs[:p]) |
|
|||
456 | b = _orsetlist(repo, subset, xs[p:]) |
|
|||
457 | return a + b |
|
|||
458 |
|
||||
459 | def orset(repo, subset, x, order): |
|
|||
460 | xs = getlist(x) |
|
|||
461 | if order == followorder: |
|
|||
462 | # slow path to take the subset order |
|
|||
463 | return subset & _orsetlist(repo, fullreposet(repo), xs) |
|
|||
464 | else: |
|
|||
465 | return _orsetlist(repo, subset, xs) |
|
|||
466 |
|
||||
467 | def notset(repo, subset, x, order): |
|
|||
468 | return subset - getset(repo, subset, x) |
|
|||
469 |
|
||||
470 | def listset(repo, subset, *xs): |
|
|||
471 | raise error.ParseError(_("can't use a list in this context"), |
|
|||
472 | hint=_('see hg help "revsets.x or y"')) |
|
|||
473 |
|
||||
474 | def keyvaluepair(repo, subset, k, v): |
|
|||
475 | raise error.ParseError(_("can't use a key-value pair in this context")) |
|
|||
476 |
|
||||
477 | def func(repo, subset, a, b, order): |
|
|||
478 | f = getsymbol(a) |
|
|||
479 | if f in symbols: |
|
|||
480 | func = symbols[f] |
|
|||
481 | if getattr(func, '_takeorder', False): |
|
|||
482 | return func(repo, subset, b, order) |
|
|||
483 | return func(repo, subset, b) |
|
|||
484 |
|
||||
485 | keep = lambda fn: getattr(fn, '__doc__', None) is not None |
|
|||
486 |
|
||||
487 | syms = [s for (s, fn) in symbols.items() if keep(fn)] |
|
|||
488 | raise error.UnknownIdentifier(f, syms) |
|
|||
489 |
|
||||
490 | # functions |
|
|||
491 |
|
||||
492 | # symbols are callables like: |
|
|||
493 | # fn(repo, subset, x) |
|
|||
494 | # with: |
|
|||
495 | # repo - current repository instance |
|
|||
496 | # subset - of revisions to be examined |
|
|||
497 | # x - argument in tree form |
|
|||
498 | symbols = {} |
|
|||
499 |
|
||||
500 | # symbols which can't be used for a DoS attack for any given input |
|
|||
501 | # (e.g. those which accept regexes as plain strings shouldn't be included) |
|
|||
502 | # functions that just return a lot of changesets (like all) don't count here |
|
|||
503 | safesymbols = set() |
|
|||
504 |
|
||||
505 | predicate = registrar.revsetpredicate() |
|
|||
506 |
|
||||
507 | @predicate('_destupdate') |
|
|||
508 | def _destupdate(repo, subset, x): |
|
|||
509 | # experimental revset for update destination |
|
|||
510 | args = getargsdict(x, 'limit', 'clean') |
|
|||
511 | return subset & baseset([destutil.destupdate(repo, **args)[0]]) |
|
|||
512 |
|
||||
513 | @predicate('_destmerge') |
|
|||
514 | def _destmerge(repo, subset, x): |
|
|||
515 | # experimental revset for merge destination |
|
|||
516 | sourceset = None |
|
|||
517 | if x is not None: |
|
|||
518 | sourceset = getset(repo, fullreposet(repo), x) |
|
|||
519 | return subset & baseset([destutil.destmerge(repo, sourceset=sourceset)]) |
|
|||
520 |
|
||||
521 | @predicate('adds(pattern)', safe=True) |
|
|||
522 | def adds(repo, subset, x): |
|
|||
523 | """Changesets that add a file matching pattern. |
|
|||
524 |
|
||||
525 | The pattern without explicit kind like ``glob:`` is expected to be |
|
|||
526 | relative to the current directory and match against a file or a |
|
|||
527 | directory. |
|
|||
528 | """ |
|
|||
529 | # i18n: "adds" is a keyword |
|
|||
530 | pat = getstring(x, _("adds requires a pattern")) |
|
|||
531 | return checkstatus(repo, subset, pat, 1) |
|
|||
532 |
|
||||
533 | @predicate('ancestor(*changeset)', safe=True) |
|
|||
534 | def ancestor(repo, subset, x): |
|
|||
535 | """A greatest common ancestor of the changesets. |
|
|||
536 |
|
||||
537 | Accepts 0 or more changesets. |
|
|||
538 | Will return empty list when passed no args. |
|
|||
539 | Greatest common ancestor of a single changeset is that changeset. |
|
|||
540 | """ |
|
|||
541 | # i18n: "ancestor" is a keyword |
|
|||
542 | l = getlist(x) |
|
|||
543 | rl = fullreposet(repo) |
|
|||
544 | anc = None |
|
|||
545 |
|
||||
546 | # (getset(repo, rl, i) for i in l) generates a list of lists |
|
|||
547 | for revs in (getset(repo, rl, i) for i in l): |
|
|||
548 | for r in revs: |
|
|||
549 | if anc is None: |
|
|||
550 | anc = repo[r] |
|
|||
551 | else: |
|
|||
552 | anc = anc.ancestor(repo[r]) |
|
|||
553 |
|
||||
554 | if anc is not None and anc.rev() in subset: |
|
|||
555 | return baseset([anc.rev()]) |
|
|||
556 | return baseset() |
|
|||
557 |
|
||||
558 | def _ancestors(repo, subset, x, followfirst=False): |
|
|||
559 | heads = getset(repo, fullreposet(repo), x) |
|
|||
560 | if not heads: |
|
|||
561 | return baseset() |
|
|||
562 | s = _revancestors(repo, heads, followfirst) |
|
|||
563 | return subset & s |
|
|||
564 |
|
||||
565 | @predicate('ancestors(set)', safe=True) |
|
|||
566 | def ancestors(repo, subset, x): |
|
|||
567 | """Changesets that are ancestors of a changeset in set. |
|
|||
568 | """ |
|
|||
569 | return _ancestors(repo, subset, x) |
|
|||
570 |
|
||||
571 | @predicate('_firstancestors', safe=True) |
|
|||
572 | def _firstancestors(repo, subset, x): |
|
|||
573 | # ``_firstancestors(set)`` |
|
|||
574 | # Like ``ancestors(set)`` but follows only the first parents. |
|
|||
575 | return _ancestors(repo, subset, x, followfirst=True) |
|
|||
576 |
|
||||
577 | def ancestorspec(repo, subset, x, n, order): |
|
|||
578 | """``set~n`` |
|
|||
579 | Changesets that are the Nth ancestor (first parents only) of a changeset |
|
|||
580 | in set. |
|
|||
581 | """ |
|
|||
582 | n = getinteger(n, _("~ expects a number")) |
|
|||
583 | ps = set() |
|
|||
584 | cl = repo.changelog |
|
|||
585 | for r in getset(repo, fullreposet(repo), x): |
|
|||
586 | for i in range(n): |
|
|||
587 | r = cl.parentrevs(r)[0] |
|
|||
588 | ps.add(r) |
|
|||
589 | return subset & ps |
|
|||
590 |
|
||||
591 | @predicate('author(string)', safe=True) |
|
|||
592 | def author(repo, subset, x): |
|
|||
593 | """Alias for ``user(string)``. |
|
|||
594 | """ |
|
|||
595 | # i18n: "author" is a keyword |
|
|||
596 | n = getstring(x, _("author requires a string")) |
|
|||
597 | kind, pattern, matcher = _substringmatcher(n, casesensitive=False) |
|
|||
598 | return subset.filter(lambda x: matcher(repo[x].user()), |
|
|||
599 | condrepr=('<user %r>', n)) |
|
|||
600 |
|
||||
601 | @predicate('bisect(string)', safe=True) |
|
|||
602 | def bisect(repo, subset, x): |
|
|||
603 | """Changesets marked in the specified bisect status: |
|
|||
604 |
|
||||
605 | - ``good``, ``bad``, ``skip``: csets explicitly marked as good/bad/skip |
|
|||
606 | - ``goods``, ``bads`` : csets topologically good/bad |
|
|||
607 | - ``range`` : csets taking part in the bisection |
|
|||
608 | - ``pruned`` : csets that are goods, bads or skipped |
|
|||
609 | - ``untested`` : csets whose fate is yet unknown |
|
|||
610 | - ``ignored`` : csets ignored due to DAG topology |
|
|||
611 | - ``current`` : the cset currently being bisected |
|
|||
612 | """ |
|
|||
613 | # i18n: "bisect" is a keyword |
|
|||
614 | status = getstring(x, _("bisect requires a string")).lower() |
|
|||
615 | state = set(hbisect.get(repo, status)) |
|
|||
616 | return subset & state |
|
|||
617 |
|
||||
618 | # Backward-compatibility |
|
|||
619 | # - no help entry so that we do not advertise it any more |
|
|||
620 | @predicate('bisected', safe=True) |
|
|||
621 | def bisected(repo, subset, x): |
|
|||
622 | return bisect(repo, subset, x) |
|
|||
623 |
|
||||
624 | @predicate('bookmark([name])', safe=True) |
|
|||
625 | def bookmark(repo, subset, x): |
|
|||
626 | """The named bookmark or all bookmarks. |
|
|||
627 |
|
||||
628 | Pattern matching is supported for `name`. See :hg:`help revisions.patterns`. |
|
|||
629 | """ |
|
|||
630 | # i18n: "bookmark" is a keyword |
|
|||
631 | args = getargs(x, 0, 1, _('bookmark takes one or no arguments')) |
|
|||
632 | if args: |
|
|||
633 | bm = getstring(args[0], |
|
|||
634 | # i18n: "bookmark" is a keyword |
|
|||
635 | _('the argument to bookmark must be a string')) |
|
|||
636 | kind, pattern, matcher = util.stringmatcher(bm) |
|
|||
637 | bms = set() |
|
|||
638 | if kind == 'literal': |
|
|||
639 | bmrev = repo._bookmarks.get(pattern, None) |
|
|||
640 | if not bmrev: |
|
|||
641 | raise error.RepoLookupError(_("bookmark '%s' does not exist") |
|
|||
642 | % pattern) |
|
|||
643 | bms.add(repo[bmrev].rev()) |
|
|||
644 | else: |
|
|||
645 | matchrevs = set() |
|
|||
646 | for name, bmrev in repo._bookmarks.iteritems(): |
|
|||
647 | if matcher(name): |
|
|||
648 | matchrevs.add(bmrev) |
|
|||
649 | if not matchrevs: |
|
|||
650 | raise error.RepoLookupError(_("no bookmarks exist" |
|
|||
651 | " that match '%s'") % pattern) |
|
|||
652 | for bmrev in matchrevs: |
|
|||
653 | bms.add(repo[bmrev].rev()) |
|
|||
654 | else: |
|
|||
655 | bms = set([repo[r].rev() |
|
|||
656 | for r in repo._bookmarks.values()]) |
|
|||
657 | bms -= set([node.nullrev]) |
|
|||
658 | return subset & bms |
|
|||
659 |
|
||||
660 | @predicate('branch(string or set)', safe=True) |
|
|||
661 | def branch(repo, subset, x): |
|
|||
662 | """ |
|
|||
663 | All changesets belonging to the given branch or the branches of the given |
|
|||
664 | changesets. |
|
|||
665 |
|
||||
666 | Pattern matching is supported for `string`. See |
|
|||
667 | :hg:`help revisions.patterns`. |
|
|||
668 | """ |
|
|||
669 | getbi = repo.revbranchcache().branchinfo |
|
|||
670 |
|
||||
671 | try: |
|
|||
672 | b = getstring(x, '') |
|
|||
673 | except error.ParseError: |
|
|||
674 | # not a string, but another revspec, e.g. tip() |
|
|||
675 | pass |
|
|||
676 | else: |
|
|||
677 | kind, pattern, matcher = util.stringmatcher(b) |
|
|||
678 | if kind == 'literal': |
|
|||
679 | # note: falls through to the revspec case if no branch with |
|
|||
680 | # this name exists and pattern kind is not specified explicitly |
|
|||
681 | if pattern in repo.branchmap(): |
|
|||
682 | return subset.filter(lambda r: matcher(getbi(r)[0]), |
|
|||
683 | condrepr=('<branch %r>', b)) |
|
|||
684 | if b.startswith('literal:'): |
|
|||
685 | raise error.RepoLookupError(_("branch '%s' does not exist") |
|
|||
686 | % pattern) |
|
|||
687 | else: |
|
|||
688 | return subset.filter(lambda r: matcher(getbi(r)[0]), |
|
|||
689 | condrepr=('<branch %r>', b)) |
|
|||
690 |
|
||||
691 | s = getset(repo, fullreposet(repo), x) |
|
|||
692 | b = set() |
|
|||
693 | for r in s: |
|
|||
694 | b.add(getbi(r)[0]) |
|
|||
695 | c = s.__contains__ |
|
|||
696 | return subset.filter(lambda r: c(r) or getbi(r)[0] in b, |
|
|||
697 | condrepr=lambda: '<branch %r>' % sorted(b)) |
|
|||
698 |
|
||||
699 | @predicate('bumped()', safe=True) |
|
|||
700 | def bumped(repo, subset, x): |
|
|||
701 | """Mutable changesets marked as successors of public changesets. |
|
|||
702 |
|
||||
703 | Only non-public and non-obsolete changesets can be `bumped`. |
|
|||
704 | """ |
|
|||
705 | # i18n: "bumped" is a keyword |
|
|||
706 | getargs(x, 0, 0, _("bumped takes no arguments")) |
|
|||
707 | bumped = obsmod.getrevs(repo, 'bumped') |
|
|||
708 | return subset & bumped |
|
|||
709 |
|
||||
710 | @predicate('bundle()', safe=True) |
|
|||
711 | def bundle(repo, subset, x): |
|
|||
712 | """Changesets in the bundle. |
|
|||
713 |
|
||||
714 | Bundle must be specified by the -R option.""" |
|
|||
715 |
|
||||
716 | try: |
|
|||
717 | bundlerevs = repo.changelog.bundlerevs |
|
|||
718 | except AttributeError: |
|
|||
719 | raise error.Abort(_("no bundle provided - specify with -R")) |
|
|||
720 | return subset & bundlerevs |
|
|||
721 |
|
||||
722 | def checkstatus(repo, subset, pat, field): |
|
|||
723 | hasset = matchmod.patkind(pat) == 'set' |
|
|||
724 |
|
||||
725 | mcache = [None] |
|
|||
726 | def matches(x): |
|
|||
727 | c = repo[x] |
|
|||
728 | if not mcache[0] or hasset: |
|
|||
729 | mcache[0] = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c) |
|
|||
730 | m = mcache[0] |
|
|||
731 | fname = None |
|
|||
732 | if not m.anypats() and len(m.files()) == 1: |
|
|||
733 | fname = m.files()[0] |
|
|||
734 | if fname is not None: |
|
|||
735 | if fname not in c.files(): |
|
|||
736 | return False |
|
|||
737 | else: |
|
|||
738 | for f in c.files(): |
|
|||
739 | if m(f): |
|
|||
740 | break |
|
|||
741 | else: |
|
|||
742 | return False |
|
|||
743 | files = repo.status(c.p1().node(), c.node())[field] |
|
|||
744 | if fname is not None: |
|
|||
745 | if fname in files: |
|
|||
746 | return True |
|
|||
747 | else: |
|
|||
748 | for f in files: |
|
|||
749 | if m(f): |
|
|||
750 | return True |
|
|||
751 |
|
||||
752 | return subset.filter(matches, condrepr=('<status[%r] %r>', field, pat)) |
|
|||
753 |
|
||||
754 | def _children(repo, subset, parentset): |
|
|||
755 | if not parentset: |
|
|||
756 | return baseset() |
|
|||
757 | cs = set() |
|
|||
758 | pr = repo.changelog.parentrevs |
|
|||
759 | minrev = parentset.min() |
|
|||
760 | nullrev = node.nullrev |
|
|||
761 | for r in subset: |
|
|||
762 | if r <= minrev: |
|
|||
763 | continue |
|
|||
764 | p1, p2 = pr(r) |
|
|||
765 | if p1 in parentset: |
|
|||
766 | cs.add(r) |
|
|||
767 | if p2 != nullrev and p2 in parentset: |
|
|||
768 | cs.add(r) |
|
|||
769 | return baseset(cs) |
|
|||
770 |
|
||||
771 | @predicate('children(set)', safe=True) |
|
|||
772 | def children(repo, subset, x): |
|
|||
773 | """Child changesets of changesets in set. |
|
|||
774 | """ |
|
|||
775 | s = getset(repo, fullreposet(repo), x) |
|
|||
776 | cs = _children(repo, subset, s) |
|
|||
777 | return subset & cs |
|
|||
778 |
|
||||
779 | @predicate('closed()', safe=True) |
|
|||
780 | def closed(repo, subset, x): |
|
|||
781 | """Changeset is closed. |
|
|||
782 | """ |
|
|||
783 | # i18n: "closed" is a keyword |
|
|||
784 | getargs(x, 0, 0, _("closed takes no arguments")) |
|
|||
785 | return subset.filter(lambda r: repo[r].closesbranch(), |
|
|||
786 | condrepr='<branch closed>') |
|
|||
787 |
|
||||
788 | @predicate('contains(pattern)') |
|
|||
789 | def contains(repo, subset, x): |
|
|||
790 | """The revision's manifest contains a file matching pattern (but might not |
|
|||
791 | modify it). See :hg:`help patterns` for information about file patterns. |
|
|||
792 |
|
||||
793 | The pattern without explicit kind like ``glob:`` is expected to be |
|
|||
794 | relative to the current directory and match against a file exactly |
|
|||
795 | for efficiency. |
|
|||
796 | """ |
|
|||
797 | # i18n: "contains" is a keyword |
|
|||
798 | pat = getstring(x, _("contains requires a pattern")) |
|
|||
799 |
|
||||
800 | def matches(x): |
|
|||
801 | if not matchmod.patkind(pat): |
|
|||
802 | pats = pathutil.canonpath(repo.root, repo.getcwd(), pat) |
|
|||
803 | if pats in repo[x]: |
|
|||
804 | return True |
|
|||
805 | else: |
|
|||
806 | c = repo[x] |
|
|||
807 | m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c) |
|
|||
808 | for f in c.manifest(): |
|
|||
809 | if m(f): |
|
|||
810 | return True |
|
|||
811 | return False |
|
|||
812 |
|
||||
813 | return subset.filter(matches, condrepr=('<contains %r>', pat)) |
|
|||
814 |
|
||||
815 | @predicate('converted([id])', safe=True) |
|
|||
816 | def converted(repo, subset, x): |
|
|||
817 | """Changesets converted from the given identifier in the old repository if |
|
|||
818 | present, or all converted changesets if no identifier is specified. |
|
|||
819 | """ |
|
|||
820 |
|
||||
821 | # There is exactly no chance of resolving the revision, so do a simple |
|
|||
822 | # string compare and hope for the best |
|
|||
823 |
|
||||
824 | rev = None |
|
|||
825 | # i18n: "converted" is a keyword |
|
|||
826 | l = getargs(x, 0, 1, _('converted takes one or no arguments')) |
|
|||
827 | if l: |
|
|||
828 | # i18n: "converted" is a keyword |
|
|||
829 | rev = getstring(l[0], _('converted requires a revision')) |
|
|||
830 |
|
||||
831 | def _matchvalue(r): |
|
|||
832 | source = repo[r].extra().get('convert_revision', None) |
|
|||
833 | return source is not None and (rev is None or source.startswith(rev)) |
|
|||
834 |
|
||||
835 | return subset.filter(lambda r: _matchvalue(r), |
|
|||
836 | condrepr=('<converted %r>', rev)) |
|
|||
837 |
|
||||
838 | @predicate('date(interval)', safe=True) |
|
|||
839 | def date(repo, subset, x): |
|
|||
840 | """Changesets within the interval, see :hg:`help dates`. |
|
|||
841 | """ |
|
|||
842 | # i18n: "date" is a keyword |
|
|||
843 | ds = getstring(x, _("date requires a string")) |
|
|||
844 | dm = util.matchdate(ds) |
|
|||
845 | return subset.filter(lambda x: dm(repo[x].date()[0]), |
|
|||
846 | condrepr=('<date %r>', ds)) |
|
|||
847 |
|
||||
848 | @predicate('desc(string)', safe=True) |
|
|||
849 | def desc(repo, subset, x): |
|
|||
850 | """Search commit message for string. The match is case-insensitive. |
|
|||
851 |
|
||||
852 | Pattern matching is supported for `string`. See |
|
|||
853 | :hg:`help revisions.patterns`. |
|
|||
854 | """ |
|
|||
855 | # i18n: "desc" is a keyword |
|
|||
856 | ds = getstring(x, _("desc requires a string")) |
|
|||
857 |
|
||||
858 | kind, pattern, matcher = _substringmatcher(ds, casesensitive=False) |
|
|||
859 |
|
||||
860 | return subset.filter(lambda r: matcher(repo[r].description()), |
|
|||
861 | condrepr=('<desc %r>', ds)) |
|
|||
862 |
|
||||
863 | def _descendants(repo, subset, x, followfirst=False): |
|
|||
864 | roots = getset(repo, fullreposet(repo), x) |
|
|||
865 | if not roots: |
|
|||
866 | return baseset() |
|
|||
867 | s = _revdescendants(repo, roots, followfirst) |
|
|||
868 |
|
||||
869 | # Both sets need to be ascending in order to lazily return the union |
|
|||
870 | # in the correct order. |
|
|||
871 | base = subset & roots |
|
|||
872 | desc = subset & s |
|
|||
873 | result = base + desc |
|
|||
874 | if subset.isascending(): |
|
|||
875 | result.sort() |
|
|||
876 | elif subset.isdescending(): |
|
|||
877 | result.sort(reverse=True) |
|
|||
878 | else: |
|
|||
879 | result = subset & result |
|
|||
880 | return result |
|
|||
881 |
|
||||
882 | @predicate('descendants(set)', safe=True) |
|
|||
883 | def descendants(repo, subset, x): |
|
|||
884 | """Changesets which are descendants of changesets in set. |
|
|||
885 | """ |
|
|||
886 | return _descendants(repo, subset, x) |
|
|||
887 |
|
||||
888 | @predicate('_firstdescendants', safe=True) |
|
|||
889 | def _firstdescendants(repo, subset, x): |
|
|||
890 | # ``_firstdescendants(set)`` |
|
|||
891 | # Like ``descendants(set)`` but follows only the first parents. |
|
|||
892 | return _descendants(repo, subset, x, followfirst=True) |
|
|||
893 |
|
||||
894 | @predicate('destination([set])', safe=True) |
|
|||
895 | def destination(repo, subset, x): |
|
|||
896 | """Changesets that were created by a graft, transplant or rebase operation, |
|
|||
897 | with the given revisions specified as the source. Omitting the optional set |
|
|||
898 | is the same as passing all(). |
|
|||
899 | """ |
|
|||
900 | if x is not None: |
|
|||
901 | sources = getset(repo, fullreposet(repo), x) |
|
|||
902 | else: |
|
|||
903 | sources = fullreposet(repo) |
|
|||
904 |
|
||||
905 | dests = set() |
|
|||
906 |
|
||||
907 | # subset contains all of the possible destinations that can be returned, so |
|
|||
908 | # iterate over them and see if their source(s) were provided in the arg set. |
|
|||
909 | # Even if the immediate src of r is not in the arg set, src's source (or |
|
|||
910 | # further back) may be. Scanning back further than the immediate src allows |
|
|||
911 | # transitive transplants and rebases to yield the same results as transitive |
|
|||
912 | # grafts. |
|
|||
913 | for r in subset: |
|
|||
914 | src = _getrevsource(repo, r) |
|
|||
915 | lineage = None |
|
|||
916 |
|
||||
917 | while src is not None: |
|
|||
918 | if lineage is None: |
|
|||
919 | lineage = list() |
|
|||
920 |
|
||||
921 | lineage.append(r) |
|
|||
922 |
|
||||
923 | # The visited lineage is a match if the current source is in the arg |
|
|||
924 | # set. Since every candidate dest is visited by way of iterating |
|
|||
925 | # subset, any dests further back in the lineage will be tested by a |
|
|||
926 | # different iteration over subset. Likewise, if the src was already |
|
|||
927 | # selected, the current lineage can be selected without going back |
|
|||
928 | # further. |
|
|||
929 | if src in sources or src in dests: |
|
|||
930 | dests.update(lineage) |
|
|||
931 | break |
|
|||
932 |
|
||||
933 | r = src |
|
|||
934 | src = _getrevsource(repo, r) |
|
|||
935 |
|
||||
936 | return subset.filter(dests.__contains__, |
|
|||
937 | condrepr=lambda: '<destination %r>' % sorted(dests)) |
|
|||
938 |
|
||||
939 | @predicate('divergent()', safe=True) |
|
|||
940 | def divergent(repo, subset, x): |
|
|||
941 | """ |
|
|||
942 | Final successors of changesets with an alternative set of final successors. |
|
|||
943 | """ |
|
|||
944 | # i18n: "divergent" is a keyword |
|
|||
945 | getargs(x, 0, 0, _("divergent takes no arguments")) |
|
|||
946 | divergent = obsmod.getrevs(repo, 'divergent') |
|
|||
947 | return subset & divergent |
|
|||
948 |
|
||||
949 | @predicate('extinct()', safe=True) |
|
|||
950 | def extinct(repo, subset, x): |
|
|||
951 | """Obsolete changesets with obsolete descendants only. |
|
|||
952 | """ |
|
|||
953 | # i18n: "extinct" is a keyword |
|
|||
954 | getargs(x, 0, 0, _("extinct takes no arguments")) |
|
|||
955 | extincts = obsmod.getrevs(repo, 'extinct') |
|
|||
956 | return subset & extincts |
|
|||
957 |
|
||||
958 | @predicate('extra(label, [value])', safe=True) |
|
|||
959 | def extra(repo, subset, x): |
|
|||
960 | """Changesets with the given label in the extra metadata, with the given |
|
|||
961 | optional value. |
|
|||
962 |
|
||||
963 | Pattern matching is supported for `value`. See |
|
|||
964 | :hg:`help revisions.patterns`. |
|
|||
965 | """ |
|
|||
966 | args = getargsdict(x, 'extra', 'label value') |
|
|||
967 | if 'label' not in args: |
|
|||
968 | # i18n: "extra" is a keyword |
|
|||
969 | raise error.ParseError(_('extra takes at least 1 argument')) |
|
|||
970 | # i18n: "extra" is a keyword |
|
|||
971 | label = getstring(args['label'], _('first argument to extra must be ' |
|
|||
972 | 'a string')) |
|
|||
973 | value = None |
|
|||
974 |
|
||||
975 | if 'value' in args: |
|
|||
976 | # i18n: "extra" is a keyword |
|
|||
977 | value = getstring(args['value'], _('second argument to extra must be ' |
|
|||
978 | 'a string')) |
|
|||
979 | kind, value, matcher = util.stringmatcher(value) |
|
|||
980 |
|
||||
981 | def _matchvalue(r): |
|
|||
982 | extra = repo[r].extra() |
|
|||
983 | return label in extra and (value is None or matcher(extra[label])) |
|
|||
984 |
|
||||
985 | return subset.filter(lambda r: _matchvalue(r), |
|
|||
986 | condrepr=('<extra[%r] %r>', label, value)) |
|
|||
987 |
|
||||
988 | @predicate('filelog(pattern)', safe=True) |
|
|||
989 | def filelog(repo, subset, x): |
|
|||
990 | """Changesets connected to the specified filelog. |
|
|||
991 |
|
||||
992 | For performance reasons, visits only revisions mentioned in the file-level |
|
|||
993 | filelog, rather than filtering through all changesets (much faster, but |
|
|||
994 | doesn't include deletes or duplicate changes). For a slower, more accurate |
|
|||
995 | result, use ``file()``. |
|
|||
996 |
|
||||
997 | The pattern without explicit kind like ``glob:`` is expected to be |
|
|||
998 | relative to the current directory and match against a file exactly |
|
|||
999 | for efficiency. |
|
|||
1000 |
|
||||
1001 | If some linkrev points to revisions filtered by the current repoview, we'll |
|
|||
1002 | work around it to return a non-filtered value. |
|
|||
1003 | """ |
|
|||
1004 |
|
||||
1005 | # i18n: "filelog" is a keyword |
|
|||
1006 | pat = getstring(x, _("filelog requires a pattern")) |
|
|||
1007 | s = set() |
|
|||
1008 | cl = repo.changelog |
|
|||
1009 |
|
||||
1010 | if not matchmod.patkind(pat): |
|
|||
1011 | f = pathutil.canonpath(repo.root, repo.getcwd(), pat) |
|
|||
1012 | files = [f] |
|
|||
1013 | else: |
|
|||
1014 | m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=repo[None]) |
|
|||
1015 | files = (f for f in repo[None] if m(f)) |
|
|||
1016 |
|
||||
1017 | for f in files: |
|
|||
1018 | fl = repo.file(f) |
|
|||
1019 | known = {} |
|
|||
1020 | scanpos = 0 |
|
|||
1021 | for fr in list(fl): |
|
|||
1022 | fn = fl.node(fr) |
|
|||
1023 | if fn in known: |
|
|||
1024 | s.add(known[fn]) |
|
|||
1025 | continue |
|
|||
1026 |
|
||||
1027 | lr = fl.linkrev(fr) |
|
|||
1028 | if lr in cl: |
|
|||
1029 | s.add(lr) |
|
|||
1030 | elif scanpos is not None: |
|
|||
1031 | # lowest matching changeset is filtered, scan further |
|
|||
1032 | # ahead in changelog |
|
|||
1033 | start = max(lr, scanpos) + 1 |
|
|||
1034 | scanpos = None |
|
|||
1035 | for r in cl.revs(start): |
|
|||
1036 | # minimize parsing of non-matching entries |
|
|||
1037 | if f in cl.revision(r) and f in cl.readfiles(r): |
|
|||
1038 | try: |
|
|||
1039 | # try to use manifest delta fastpath |
|
|||
1040 | n = repo[r].filenode(f) |
|
|||
1041 | if n not in known: |
|
|||
1042 | if n == fn: |
|
|||
1043 | s.add(r) |
|
|||
1044 | scanpos = r |
|
|||
1045 | break |
|
|||
1046 | else: |
|
|||
1047 | known[n] = r |
|
|||
1048 | except error.ManifestLookupError: |
|
|||
1049 | # deletion in changelog |
|
|||
1050 | continue |
|
|||
1051 |
|
||||
1052 | return subset & s |
|
|||
1053 |
|
||||
1054 | @predicate('first(set, [n])', safe=True) |
|
|||
1055 | def first(repo, subset, x): |
|
|||
1056 | """An alias for limit(). |
|
|||
1057 | """ |
|
|||
1058 | return limit(repo, subset, x) |
|
|||
1059 |
|
||||
1060 | def _follow(repo, subset, x, name, followfirst=False): |
|
|||
1061 | l = getargs(x, 0, 2, _("%s takes no arguments or a pattern " |
|
|||
1062 | "and an optional revset") % name) |
|
|||
1063 | c = repo['.'] |
|
|||
1064 | if l: |
|
|||
1065 | x = getstring(l[0], _("%s expected a pattern") % name) |
|
|||
1066 | rev = None |
|
|||
1067 | if len(l) >= 2: |
|
|||
1068 | revs = getset(repo, fullreposet(repo), l[1]) |
|
|||
1069 | if len(revs) != 1: |
|
|||
1070 | raise error.RepoLookupError( |
|
|||
1071 | _("%s expected one starting revision") % name) |
|
|||
1072 | rev = revs.last() |
|
|||
1073 | c = repo[rev] |
|
|||
1074 | matcher = matchmod.match(repo.root, repo.getcwd(), [x], |
|
|||
1075 | ctx=repo[rev], default='path') |
|
|||
1076 |
|
||||
1077 | files = c.manifest().walk(matcher) |
|
|||
1078 |
|
||||
1079 | s = set() |
|
|||
1080 | for fname in files: |
|
|||
1081 | fctx = c[fname] |
|
|||
1082 | s = s.union(set(c.rev() for c in fctx.ancestors(followfirst))) |
|
|||
1083 | # include the revision responsible for the most recent version |
|
|||
1084 | s.add(fctx.introrev()) |
|
|||
1085 | else: |
|
|||
1086 | s = _revancestors(repo, baseset([c.rev()]), followfirst) |
|
|||
1087 |
|
||||
1088 | return subset & s |
|
|||
1089 |
|
||||
1090 | @predicate('follow([pattern[, startrev]])', safe=True) |
|
|||
1091 | def follow(repo, subset, x): |
|
|||
1092 | """ |
|
|||
1093 | An alias for ``::.`` (ancestors of the working directory's first parent). |
|
|||
1094 | If pattern is specified, the histories of files matching given |
|
|||
1095 | pattern in the revision given by startrev are followed, including copies. |
|
|||
1096 | """ |
|
|||
1097 | return _follow(repo, subset, x, 'follow') |
|
|||
1098 |
|
||||
1099 | @predicate('_followfirst', safe=True) |
|
|||
1100 | def _followfirst(repo, subset, x): |
|
|||
1101 | # ``followfirst([pattern[, startrev]])`` |
|
|||
1102 | # Like ``follow([pattern[, startrev]])`` but follows only the first parent |
|
|||
1103 | # of every revisions or files revisions. |
|
|||
1104 | return _follow(repo, subset, x, '_followfirst', followfirst=True) |
|
|||
1105 |
|
||||
1106 | @predicate('followlines(file, fromline:toline[, startrev=.])', safe=True) |
|
|||
1107 | def followlines(repo, subset, x): |
|
|||
1108 | """Changesets modifying `file` in line range ('fromline', 'toline'). |
|
|||
1109 |
|
||||
1110 | Line range corresponds to 'file' content at 'startrev' and should hence be |
|
|||
1111 | consistent with file size. If startrev is not specified, working directory's |
|
|||
1112 | parent is used. |
|
|||
1113 | """ |
|
|||
1114 | from . import context # avoid circular import issues |
|
|||
1115 |
|
||||
1116 | args = getargsdict(x, 'followlines', 'file *lines startrev') |
|
|||
1117 | if len(args['lines']) != 1: |
|
|||
1118 | raise error.ParseError(_("followlines requires a line range")) |
|
|||
1119 |
|
||||
1120 | rev = '.' |
|
|||
1121 | if 'startrev' in args: |
|
|||
1122 | revs = getset(repo, fullreposet(repo), args['startrev']) |
|
|||
1123 | if len(revs) != 1: |
|
|||
1124 | raise error.ParseError( |
|
|||
1125 | _("followlines expects exactly one revision")) |
|
|||
1126 | rev = revs.last() |
|
|||
1127 |
|
||||
1128 | pat = getstring(args['file'], _("followlines requires a pattern")) |
|
|||
1129 | if not matchmod.patkind(pat): |
|
|||
1130 | fname = pathutil.canonpath(repo.root, repo.getcwd(), pat) |
|
|||
1131 | else: |
|
|||
1132 | m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=repo[rev]) |
|
|||
1133 | files = [f for f in repo[rev] if m(f)] |
|
|||
1134 | if len(files) != 1: |
|
|||
1135 | raise error.ParseError(_("followlines expects exactly one file")) |
|
|||
1136 | fname = files[0] |
|
|||
1137 |
|
||||
1138 | lr = getrange(args['lines'][0], _("followlines expects a line range")) |
|
|||
1139 | fromline, toline = [getinteger(a, _("line range bounds must be integers")) |
|
|||
1140 | for a in lr] |
|
|||
1141 | if toline - fromline < 0: |
|
|||
1142 | raise error.ParseError(_("line range must be positive")) |
|
|||
1143 | if fromline < 1: |
|
|||
1144 | raise error.ParseError(_("fromline must be strictly positive")) |
|
|||
1145 | fromline -= 1 |
|
|||
1146 |
|
||||
1147 | fctx = repo[rev].filectx(fname) |
|
|||
1148 | revs = (c.rev() for c in context.blockancestors(fctx, fromline, toline)) |
|
|||
1149 | return subset & generatorset(revs, iterasc=False) |
|
|||
1150 |
|
||||
1151 | @predicate('all()', safe=True) |
|
|||
1152 | def getall(repo, subset, x): |
|
|||
1153 | """All changesets, the same as ``0:tip``. |
|
|||
1154 | """ |
|
|||
1155 | # i18n: "all" is a keyword |
|
|||
1156 | getargs(x, 0, 0, _("all takes no arguments")) |
|
|||
1157 | return subset & spanset(repo) # drop "null" if any |
|
|||
1158 |
|
||||
1159 | @predicate('grep(regex)') |
|
|||
1160 | def grep(repo, subset, x): |
|
|||
1161 | """Like ``keyword(string)`` but accepts a regex. Use ``grep(r'...')`` |
|
|||
1162 | to ensure special escape characters are handled correctly. Unlike |
|
|||
1163 | ``keyword(string)``, the match is case-sensitive. |
|
|||
1164 | """ |
|
|||
1165 | try: |
|
|||
1166 | # i18n: "grep" is a keyword |
|
|||
1167 | gr = re.compile(getstring(x, _("grep requires a string"))) |
|
|||
1168 | except re.error as e: |
|
|||
1169 | raise error.ParseError(_('invalid match pattern: %s') % e) |
|
|||
1170 |
|
||||
1171 | def matches(x): |
|
|||
1172 | c = repo[x] |
|
|||
1173 | for e in c.files() + [c.user(), c.description()]: |
|
|||
1174 | if gr.search(e): |
|
|||
1175 | return True |
|
|||
1176 | return False |
|
|||
1177 |
|
||||
1178 | return subset.filter(matches, condrepr=('<grep %r>', gr.pattern)) |
|
|||
1179 |
|
||||
1180 | @predicate('_matchfiles', safe=True) |
|
|||
1181 | def _matchfiles(repo, subset, x): |
|
|||
1182 | # _matchfiles takes a revset list of prefixed arguments: |
|
|||
1183 | # |
|
|||
1184 | # [p:foo, i:bar, x:baz] |
|
|||
1185 | # |
|
|||
1186 | # builds a match object from them and filters subset. Allowed |
|
|||
1187 | # prefixes are 'p:' for regular patterns, 'i:' for include |
|
|||
1188 | # patterns and 'x:' for exclude patterns. Use 'r:' prefix to pass |
|
|||
1189 | # a revision identifier, or the empty string to reference the |
|
|||
1190 | # working directory, from which the match object is |
|
|||
1191 | # initialized. Use 'd:' to set the default matching mode, default |
|
|||
1192 | # to 'glob'. At most one 'r:' and 'd:' argument can be passed. |
|
|||
1193 |
|
||||
1194 | l = getargs(x, 1, -1, "_matchfiles requires at least one argument") |
|
|||
1195 | pats, inc, exc = [], [], [] |
|
|||
1196 | rev, default = None, None |
|
|||
1197 | for arg in l: |
|
|||
1198 | s = getstring(arg, "_matchfiles requires string arguments") |
|
|||
1199 | prefix, value = s[:2], s[2:] |
|
|||
1200 | if prefix == 'p:': |
|
|||
1201 | pats.append(value) |
|
|||
1202 | elif prefix == 'i:': |
|
|||
1203 | inc.append(value) |
|
|||
1204 | elif prefix == 'x:': |
|
|||
1205 | exc.append(value) |
|
|||
1206 | elif prefix == 'r:': |
|
|||
1207 | if rev is not None: |
|
|||
1208 | raise error.ParseError('_matchfiles expected at most one ' |
|
|||
1209 | 'revision') |
|
|||
1210 | if value != '': # empty means working directory; leave rev as None |
|
|||
1211 | rev = value |
|
|||
1212 | elif prefix == 'd:': |
|
|||
1213 | if default is not None: |
|
|||
1214 | raise error.ParseError('_matchfiles expected at most one ' |
|
|||
1215 | 'default mode') |
|
|||
1216 | default = value |
|
|||
1217 | else: |
|
|||
1218 | raise error.ParseError('invalid _matchfiles prefix: %s' % prefix) |
|
|||
1219 | if not default: |
|
|||
1220 | default = 'glob' |
|
|||
1221 |
|
||||
1222 | m = matchmod.match(repo.root, repo.getcwd(), pats, include=inc, |
|
|||
1223 | exclude=exc, ctx=repo[rev], default=default) |
|
|||
1224 |
|
||||
1225 | # This directly read the changelog data as creating changectx for all |
|
|||
1226 | # revisions is quite expensive. |
|
|||
1227 | getfiles = repo.changelog.readfiles |
|
|||
1228 | wdirrev = node.wdirrev |
|
|||
1229 | def matches(x): |
|
|||
1230 | if x == wdirrev: |
|
|||
1231 | files = repo[x].files() |
|
|||
1232 | else: |
|
|||
1233 | files = getfiles(x) |
|
|||
1234 | for f in files: |
|
|||
1235 | if m(f): |
|
|||
1236 | return True |
|
|||
1237 | return False |
|
|||
1238 |
|
||||
1239 | return subset.filter(matches, |
|
|||
1240 | condrepr=('<matchfiles patterns=%r, include=%r ' |
|
|||
1241 | 'exclude=%r, default=%r, rev=%r>', |
|
|||
1242 | pats, inc, exc, default, rev)) |
|
|||
1243 |
|
||||
1244 | @predicate('file(pattern)', safe=True) |
|
|||
1245 | def hasfile(repo, subset, x): |
|
|||
1246 | """Changesets affecting files matched by pattern. |
|
|||
1247 |
|
||||
1248 | For a faster but less accurate result, consider using ``filelog()`` |
|
|||
1249 | instead. |
|
|||
1250 |
|
||||
1251 | This predicate uses ``glob:`` as the default kind of pattern. |
|
|||
1252 | """ |
|
|||
1253 | # i18n: "file" is a keyword |
|
|||
1254 | pat = getstring(x, _("file requires a pattern")) |
|
|||
1255 | return _matchfiles(repo, subset, ('string', 'p:' + pat)) |
|
|||
1256 |
|
||||
1257 | @predicate('head()', safe=True) |
|
|||
1258 | def head(repo, subset, x): |
|
|||
1259 | """Changeset is a named branch head. |
|
|||
1260 | """ |
|
|||
1261 | # i18n: "head" is a keyword |
|
|||
1262 | getargs(x, 0, 0, _("head takes no arguments")) |
|
|||
1263 | hs = set() |
|
|||
1264 | cl = repo.changelog |
|
|||
1265 | for ls in repo.branchmap().itervalues(): |
|
|||
1266 | hs.update(cl.rev(h) for h in ls) |
|
|||
1267 | return subset & baseset(hs) |
|
|||
1268 |
|
||||
1269 | @predicate('heads(set)', safe=True) |
|
|||
1270 | def heads(repo, subset, x): |
|
|||
1271 | """Members of set with no children in set. |
|
|||
1272 | """ |
|
|||
1273 | s = getset(repo, subset, x) |
|
|||
1274 | ps = parents(repo, subset, x) |
|
|||
1275 | return s - ps |
|
|||
1276 |
|
||||
1277 | @predicate('hidden()', safe=True) |
|
|||
1278 | def hidden(repo, subset, x): |
|
|||
1279 | """Hidden changesets. |
|
|||
1280 | """ |
|
|||
1281 | # i18n: "hidden" is a keyword |
|
|||
1282 | getargs(x, 0, 0, _("hidden takes no arguments")) |
|
|||
1283 | hiddenrevs = repoview.filterrevs(repo, 'visible') |
|
|||
1284 | return subset & hiddenrevs |
|
|||
1285 |
|
||||
1286 | @predicate('keyword(string)', safe=True) |
|
|||
1287 | def keyword(repo, subset, x): |
|
|||
1288 | """Search commit message, user name, and names of changed files for |
|
|||
1289 | string. The match is case-insensitive. |
|
|||
1290 |
|
||||
1291 | For a regular expression or case sensitive search of these fields, use |
|
|||
1292 | ``grep(regex)``. |
|
|||
1293 | """ |
|
|||
1294 | # i18n: "keyword" is a keyword |
|
|||
1295 | kw = encoding.lower(getstring(x, _("keyword requires a string"))) |
|
|||
1296 |
|
||||
1297 | def matches(r): |
|
|||
1298 | c = repo[r] |
|
|||
1299 | return any(kw in encoding.lower(t) |
|
|||
1300 | for t in c.files() + [c.user(), c.description()]) |
|
|||
1301 |
|
||||
1302 | return subset.filter(matches, condrepr=('<keyword %r>', kw)) |
|
|||
1303 |
|
||||
1304 | @predicate('limit(set[, n[, offset]])', safe=True) |
|
|||
1305 | def limit(repo, subset, x): |
|
|||
1306 | """First n members of set, defaulting to 1, starting from offset. |
|
|||
1307 | """ |
|
|||
1308 | args = getargsdict(x, 'limit', 'set n offset') |
|
|||
1309 | if 'set' not in args: |
|
|||
1310 | # i18n: "limit" is a keyword |
|
|||
1311 | raise error.ParseError(_("limit requires one to three arguments")) |
|
|||
1312 | # i18n: "limit" is a keyword |
|
|||
1313 | lim = getinteger(args.get('n'), _("limit expects a number"), default=1) |
|
|||
1314 | # i18n: "limit" is a keyword |
|
|||
1315 | ofs = getinteger(args.get('offset'), _("limit expects a number"), default=0) |
|
|||
1316 | if ofs < 0: |
|
|||
1317 | raise error.ParseError(_("negative offset")) |
|
|||
1318 | os = getset(repo, fullreposet(repo), args['set']) |
|
|||
1319 | result = [] |
|
|||
1320 | it = iter(os) |
|
|||
1321 | for x in xrange(ofs): |
|
|||
1322 | y = next(it, None) |
|
|||
1323 | if y is None: |
|
|||
1324 | break |
|
|||
1325 | for x in xrange(lim): |
|
|||
1326 | y = next(it, None) |
|
|||
1327 | if y is None: |
|
|||
1328 | break |
|
|||
1329 | elif y in subset: |
|
|||
1330 | result.append(y) |
|
|||
1331 | return baseset(result, datarepr=('<limit n=%d, offset=%d, %r, %r>', |
|
|||
1332 | lim, ofs, subset, os)) |
|
|||
1333 |
|
||||
1334 | @predicate('last(set, [n])', safe=True) |
|
|||
1335 | def last(repo, subset, x): |
|
|||
1336 | """Last n members of set, defaulting to 1. |
|
|||
1337 | """ |
|
|||
1338 | # i18n: "last" is a keyword |
|
|||
1339 | l = getargs(x, 1, 2, _("last requires one or two arguments")) |
|
|||
1340 | lim = 1 |
|
|||
1341 | if len(l) == 2: |
|
|||
1342 | # i18n: "last" is a keyword |
|
|||
1343 | lim = getinteger(l[1], _("last expects a number")) |
|
|||
1344 | os = getset(repo, fullreposet(repo), l[0]) |
|
|||
1345 | os.reverse() |
|
|||
1346 | result = [] |
|
|||
1347 | it = iter(os) |
|
|||
1348 | for x in xrange(lim): |
|
|||
1349 | y = next(it, None) |
|
|||
1350 | if y is None: |
|
|||
1351 | break |
|
|||
1352 | elif y in subset: |
|
|||
1353 | result.append(y) |
|
|||
1354 | return baseset(result, datarepr=('<last n=%d, %r, %r>', lim, subset, os)) |
|
|||
1355 |
|
||||
1356 | @predicate('max(set)', safe=True) |
|
|||
1357 | def maxrev(repo, subset, x): |
|
|||
1358 | """Changeset with highest revision number in set. |
|
|||
1359 | """ |
|
|||
1360 | os = getset(repo, fullreposet(repo), x) |
|
|||
1361 | try: |
|
|||
1362 | m = os.max() |
|
|||
1363 | if m in subset: |
|
|||
1364 | return baseset([m], datarepr=('<max %r, %r>', subset, os)) |
|
|||
1365 | except ValueError: |
|
|||
1366 | # os.max() throws a ValueError when the collection is empty. |
|
|||
1367 | # Same as python's max(). |
|
|||
1368 | pass |
|
|||
1369 | return baseset(datarepr=('<max %r, %r>', subset, os)) |
|
|||
1370 |
|
||||
1371 | @predicate('merge()', safe=True) |
|
|||
1372 | def merge(repo, subset, x): |
|
|||
1373 | """Changeset is a merge changeset. |
|
|||
1374 | """ |
|
|||
1375 | # i18n: "merge" is a keyword |
|
|||
1376 | getargs(x, 0, 0, _("merge takes no arguments")) |
|
|||
1377 | cl = repo.changelog |
|
|||
1378 | return subset.filter(lambda r: cl.parentrevs(r)[1] != -1, |
|
|||
1379 | condrepr='<merge>') |
|
|||
1380 |
|
||||
1381 | @predicate('branchpoint()', safe=True) |
|
|||
1382 | def branchpoint(repo, subset, x): |
|
|||
1383 | """Changesets with more than one child. |
|
|||
1384 | """ |
|
|||
1385 | # i18n: "branchpoint" is a keyword |
|
|||
1386 | getargs(x, 0, 0, _("branchpoint takes no arguments")) |
|
|||
1387 | cl = repo.changelog |
|
|||
1388 | if not subset: |
|
|||
1389 | return baseset() |
|
|||
1390 | # XXX this should be 'parentset.min()' assuming 'parentset' is a smartset |
|
|||
1391 | # (and if it is not, it should.) |
|
|||
1392 | baserev = min(subset) |
|
|||
1393 | parentscount = [0]*(len(repo) - baserev) |
|
|||
1394 | for r in cl.revs(start=baserev + 1): |
|
|||
1395 | for p in cl.parentrevs(r): |
|
|||
1396 | if p >= baserev: |
|
|||
1397 | parentscount[p - baserev] += 1 |
|
|||
1398 | return subset.filter(lambda r: parentscount[r - baserev] > 1, |
|
|||
1399 | condrepr='<branchpoint>') |
|
|||
1400 |
|
||||
1401 | @predicate('min(set)', safe=True) |
|
|||
1402 | def minrev(repo, subset, x): |
|
|||
1403 | """Changeset with lowest revision number in set. |
|
|||
1404 | """ |
|
|||
1405 | os = getset(repo, fullreposet(repo), x) |
|
|||
1406 | try: |
|
|||
1407 | m = os.min() |
|
|||
1408 | if m in subset: |
|
|||
1409 | return baseset([m], datarepr=('<min %r, %r>', subset, os)) |
|
|||
1410 | except ValueError: |
|
|||
1411 | # os.min() throws a ValueError when the collection is empty. |
|
|||
1412 | # Same as python's min(). |
|
|||
1413 | pass |
|
|||
1414 | return baseset(datarepr=('<min %r, %r>', subset, os)) |
|
|||
1415 |
|
||||
1416 | @predicate('modifies(pattern)', safe=True) |
|
|||
1417 | def modifies(repo, subset, x): |
|
|||
1418 | """Changesets modifying files matched by pattern. |
|
|||
1419 |
|
||||
1420 | The pattern without explicit kind like ``glob:`` is expected to be |
|
|||
1421 | relative to the current directory and match against a file or a |
|
|||
1422 | directory. |
|
|||
1423 | """ |
|
|||
1424 | # i18n: "modifies" is a keyword |
|
|||
1425 | pat = getstring(x, _("modifies requires a pattern")) |
|
|||
1426 | return checkstatus(repo, subset, pat, 0) |
|
|||
1427 |
|
||||
1428 | @predicate('named(namespace)') |
|
|||
1429 | def named(repo, subset, x): |
|
|||
1430 | """The changesets in a given namespace. |
|
|||
1431 |
|
||||
1432 | Pattern matching is supported for `namespace`. See |
|
|||
1433 | :hg:`help revisions.patterns`. |
|
|||
1434 | """ |
|
|||
1435 | # i18n: "named" is a keyword |
|
|||
1436 | args = getargs(x, 1, 1, _('named requires a namespace argument')) |
|
|||
1437 |
|
||||
1438 | ns = getstring(args[0], |
|
|||
1439 | # i18n: "named" is a keyword |
|
|||
1440 | _('the argument to named must be a string')) |
|
|||
1441 | kind, pattern, matcher = util.stringmatcher(ns) |
|
|||
1442 | namespaces = set() |
|
|||
1443 | if kind == 'literal': |
|
|||
1444 | if pattern not in repo.names: |
|
|||
1445 | raise error.RepoLookupError(_("namespace '%s' does not exist") |
|
|||
1446 | % ns) |
|
|||
1447 | namespaces.add(repo.names[pattern]) |
|
|||
1448 | else: |
|
|||
1449 | for name, ns in repo.names.iteritems(): |
|
|||
1450 | if matcher(name): |
|
|||
1451 | namespaces.add(ns) |
|
|||
1452 | if not namespaces: |
|
|||
1453 | raise error.RepoLookupError(_("no namespace exists" |
|
|||
1454 | " that match '%s'") % pattern) |
|
|||
1455 |
|
||||
1456 | names = set() |
|
|||
1457 | for ns in namespaces: |
|
|||
1458 | for name in ns.listnames(repo): |
|
|||
1459 | if name not in ns.deprecated: |
|
|||
1460 | names.update(repo[n].rev() for n in ns.nodes(repo, name)) |
|
|||
1461 |
|
||||
1462 | names -= set([node.nullrev]) |
|
|||
1463 | return subset & names |
|
|||
1464 |
|
||||
1465 | @predicate('id(string)', safe=True) |
|
|||
1466 | def node_(repo, subset, x): |
|
|||
1467 | """Revision non-ambiguously specified by the given hex string prefix. |
|
|||
1468 | """ |
|
|||
1469 | # i18n: "id" is a keyword |
|
|||
1470 | l = getargs(x, 1, 1, _("id requires one argument")) |
|
|||
1471 | # i18n: "id" is a keyword |
|
|||
1472 | n = getstring(l[0], _("id requires a string")) |
|
|||
1473 | if len(n) == 40: |
|
|||
1474 | try: |
|
|||
1475 | rn = repo.changelog.rev(node.bin(n)) |
|
|||
1476 | except (LookupError, TypeError): |
|
|||
1477 | rn = None |
|
|||
1478 | else: |
|
|||
1479 | rn = None |
|
|||
1480 | pm = repo.changelog._partialmatch(n) |
|
|||
1481 | if pm is not None: |
|
|||
1482 | rn = repo.changelog.rev(pm) |
|
|||
1483 |
|
||||
1484 | if rn is None: |
|
|||
1485 | return baseset() |
|
|||
1486 | result = baseset([rn]) |
|
|||
1487 | return result & subset |
|
|||
1488 |
|
||||
1489 | @predicate('obsolete()', safe=True) |
|
|||
1490 | def obsolete(repo, subset, x): |
|
|||
1491 | """Mutable changeset with a newer version.""" |
|
|||
1492 | # i18n: "obsolete" is a keyword |
|
|||
1493 | getargs(x, 0, 0, _("obsolete takes no arguments")) |
|
|||
1494 | obsoletes = obsmod.getrevs(repo, 'obsolete') |
|
|||
1495 | return subset & obsoletes |
|
|||
1496 |
|
||||
1497 | @predicate('only(set, [set])', safe=True) |
|
|||
1498 | def only(repo, subset, x): |
|
|||
1499 | """Changesets that are ancestors of the first set that are not ancestors |
|
|||
1500 | of any other head in the repo. If a second set is specified, the result |
|
|||
1501 | is ancestors of the first set that are not ancestors of the second set |
|
|||
1502 | (i.e. ::<set1> - ::<set2>). |
|
|||
1503 | """ |
|
|||
1504 | cl = repo.changelog |
|
|||
1505 | # i18n: "only" is a keyword |
|
|||
1506 | args = getargs(x, 1, 2, _('only takes one or two arguments')) |
|
|||
1507 | include = getset(repo, fullreposet(repo), args[0]) |
|
|||
1508 | if len(args) == 1: |
|
|||
1509 | if not include: |
|
|||
1510 | return baseset() |
|
|||
1511 |
|
||||
1512 | descendants = set(_revdescendants(repo, include, False)) |
|
|||
1513 | exclude = [rev for rev in cl.headrevs() |
|
|||
1514 | if not rev in descendants and not rev in include] |
|
|||
1515 | else: |
|
|||
1516 | exclude = getset(repo, fullreposet(repo), args[1]) |
|
|||
1517 |
|
||||
1518 | results = set(cl.findmissingrevs(common=exclude, heads=include)) |
|
|||
1519 | # XXX we should turn this into a baseset instead of a set, smartset may do |
|
|||
1520 | # some optimizations from the fact this is a baseset. |
|
|||
1521 | return subset & results |
|
|||
1522 |
|
||||
1523 | @predicate('origin([set])', safe=True) |
|
|||
1524 | def origin(repo, subset, x): |
|
|||
1525 | """ |
|
|||
1526 | Changesets that were specified as a source for the grafts, transplants or |
|
|||
1527 | rebases that created the given revisions. Omitting the optional set is the |
|
|||
1528 | same as passing all(). If a changeset created by these operations is itself |
|
|||
1529 | specified as a source for one of these operations, only the source changeset |
|
|||
1530 | for the first operation is selected. |
|
|||
1531 | """ |
|
|||
1532 | if x is not None: |
|
|||
1533 | dests = getset(repo, fullreposet(repo), x) |
|
|||
1534 | else: |
|
|||
1535 | dests = fullreposet(repo) |
|
|||
1536 |
|
||||
1537 | def _firstsrc(rev): |
|
|||
1538 | src = _getrevsource(repo, rev) |
|
|||
1539 | if src is None: |
|
|||
1540 | return None |
|
|||
1541 |
|
||||
1542 | while True: |
|
|||
1543 | prev = _getrevsource(repo, src) |
|
|||
1544 |
|
||||
1545 | if prev is None: |
|
|||
1546 | return src |
|
|||
1547 | src = prev |
|
|||
1548 |
|
||||
1549 | o = set([_firstsrc(r) for r in dests]) |
|
|||
1550 | o -= set([None]) |
|
|||
1551 | # XXX we should turn this into a baseset instead of a set, smartset may do |
|
|||
1552 | # some optimizations from the fact this is a baseset. |
|
|||
1553 | return subset & o |
|
|||
1554 |
|
||||
1555 | @predicate('outgoing([path])', safe=False) |
|
|||
1556 | def outgoing(repo, subset, x): |
|
|||
1557 | """Changesets not found in the specified destination repository, or the |
|
|||
1558 | default push location. |
|
|||
1559 | """ |
|
|||
1560 | # Avoid cycles. |
|
|||
1561 | from . import ( |
|
|||
1562 | discovery, |
|
|||
1563 | hg, |
|
|||
1564 | ) |
|
|||
1565 | # i18n: "outgoing" is a keyword |
|
|||
1566 | l = getargs(x, 0, 1, _("outgoing takes one or no arguments")) |
|
|||
1567 | # i18n: "outgoing" is a keyword |
|
|||
1568 | dest = l and getstring(l[0], _("outgoing requires a repository path")) or '' |
|
|||
1569 | dest = repo.ui.expandpath(dest or 'default-push', dest or 'default') |
|
|||
1570 | dest, branches = hg.parseurl(dest) |
|
|||
1571 | revs, checkout = hg.addbranchrevs(repo, repo, branches, []) |
|
|||
1572 | if revs: |
|
|||
1573 | revs = [repo.lookup(rev) for rev in revs] |
|
|||
1574 | other = hg.peer(repo, {}, dest) |
|
|||
1575 | repo.ui.pushbuffer() |
|
|||
1576 | outgoing = discovery.findcommonoutgoing(repo, other, onlyheads=revs) |
|
|||
1577 | repo.ui.popbuffer() |
|
|||
1578 | cl = repo.changelog |
|
|||
1579 | o = set([cl.rev(r) for r in outgoing.missing]) |
|
|||
1580 | return subset & o |
|
|||
1581 |
|
||||
1582 | @predicate('p1([set])', safe=True) |
|
|||
1583 | def p1(repo, subset, x): |
|
|||
1584 | """First parent of changesets in set, or the working directory. |
|
|||
1585 | """ |
|
|||
1586 | if x is None: |
|
|||
1587 | p = repo[x].p1().rev() |
|
|||
1588 | if p >= 0: |
|
|||
1589 | return subset & baseset([p]) |
|
|||
1590 | return baseset() |
|
|||
1591 |
|
||||
1592 | ps = set() |
|
|||
1593 | cl = repo.changelog |
|
|||
1594 | for r in getset(repo, fullreposet(repo), x): |
|
|||
1595 | ps.add(cl.parentrevs(r)[0]) |
|
|||
1596 | ps -= set([node.nullrev]) |
|
|||
1597 | # XXX we should turn this into a baseset instead of a set, smartset may do |
|
|||
1598 | # some optimizations from the fact this is a baseset. |
|
|||
1599 | return subset & ps |
|
|||
1600 |
|
||||
1601 | @predicate('p2([set])', safe=True) |
|
|||
1602 | def p2(repo, subset, x): |
|
|||
1603 | """Second parent of changesets in set, or the working directory. |
|
|||
1604 | """ |
|
|||
1605 | if x is None: |
|
|||
1606 | ps = repo[x].parents() |
|
|||
1607 | try: |
|
|||
1608 | p = ps[1].rev() |
|
|||
1609 | if p >= 0: |
|
|||
1610 | return subset & baseset([p]) |
|
|||
1611 | return baseset() |
|
|||
1612 | except IndexError: |
|
|||
1613 | return baseset() |
|
|||
1614 |
|
||||
1615 | ps = set() |
|
|||
1616 | cl = repo.changelog |
|
|||
1617 | for r in getset(repo, fullreposet(repo), x): |
|
|||
1618 | ps.add(cl.parentrevs(r)[1]) |
|
|||
1619 | ps -= set([node.nullrev]) |
|
|||
1620 | # XXX we should turn this into a baseset instead of a set, smartset may do |
|
|||
1621 | # some optimizations from the fact this is a baseset. |
|
|||
1622 | return subset & ps |
|
|||
1623 |
|
||||
1624 | def parentpost(repo, subset, x, order): |
|
|||
1625 | return p1(repo, subset, x) |
|
|||
1626 |
|
||||
1627 | @predicate('parents([set])', safe=True) |
|
|||
1628 | def parents(repo, subset, x): |
|
|||
1629 | """ |
|
|||
1630 | The set of all parents for all changesets in set, or the working directory. |
|
|||
1631 | """ |
|
|||
1632 | if x is None: |
|
|||
1633 | ps = set(p.rev() for p in repo[x].parents()) |
|
|||
1634 | else: |
|
|||
1635 | ps = set() |
|
|||
1636 | cl = repo.changelog |
|
|||
1637 | up = ps.update |
|
|||
1638 | parentrevs = cl.parentrevs |
|
|||
1639 | for r in getset(repo, fullreposet(repo), x): |
|
|||
1640 | if r == node.wdirrev: |
|
|||
1641 | up(p.rev() for p in repo[r].parents()) |
|
|||
1642 | else: |
|
|||
1643 | up(parentrevs(r)) |
|
|||
1644 | ps -= set([node.nullrev]) |
|
|||
1645 | return subset & ps |
|
|||
1646 |
|
||||
1647 | def _phase(repo, subset, *targets): |
|
|||
1648 | """helper to select all rev in <targets> phases""" |
|
|||
1649 | s = repo._phasecache.getrevset(repo, targets) |
|
|||
1650 | return subset & s |
|
|||
1651 |
|
||||
1652 | @predicate('draft()', safe=True) |
|
|||
1653 | def draft(repo, subset, x): |
|
|||
1654 | """Changeset in draft phase.""" |
|
|||
1655 | # i18n: "draft" is a keyword |
|
|||
1656 | getargs(x, 0, 0, _("draft takes no arguments")) |
|
|||
1657 | target = phases.draft |
|
|||
1658 | return _phase(repo, subset, target) |
|
|||
1659 |
|
||||
1660 | @predicate('secret()', safe=True) |
|
|||
1661 | def secret(repo, subset, x): |
|
|||
1662 | """Changeset in secret phase.""" |
|
|||
1663 | # i18n: "secret" is a keyword |
|
|||
1664 | getargs(x, 0, 0, _("secret takes no arguments")) |
|
|||
1665 | target = phases.secret |
|
|||
1666 | return _phase(repo, subset, target) |
|
|||
1667 |
|
||||
1668 | def parentspec(repo, subset, x, n, order): |
|
|||
1669 | """``set^0`` |
|
|||
1670 | The set. |
|
|||
1671 | ``set^1`` (or ``set^``), ``set^2`` |
|
|||
1672 | First or second parent, respectively, of all changesets in set. |
|
|||
1673 | """ |
|
|||
1674 | try: |
|
|||
1675 | n = int(n[1]) |
|
|||
1676 | if n not in (0, 1, 2): |
|
|||
1677 | raise ValueError |
|
|||
1678 | except (TypeError, ValueError): |
|
|||
1679 | raise error.ParseError(_("^ expects a number 0, 1, or 2")) |
|
|||
1680 | ps = set() |
|
|||
1681 | cl = repo.changelog |
|
|||
1682 | for r in getset(repo, fullreposet(repo), x): |
|
|||
1683 | if n == 0: |
|
|||
1684 | ps.add(r) |
|
|||
1685 | elif n == 1: |
|
|||
1686 | ps.add(cl.parentrevs(r)[0]) |
|
|||
1687 | elif n == 2: |
|
|||
1688 | parents = cl.parentrevs(r) |
|
|||
1689 | if parents[1] != node.nullrev: |
|
|||
1690 | ps.add(parents[1]) |
|
|||
1691 | return subset & ps |
|
|||
1692 |
|
||||
1693 | @predicate('present(set)', safe=True) |
|
|||
1694 | def present(repo, subset, x): |
|
|||
1695 | """An empty set, if any revision in set isn't found; otherwise, |
|
|||
1696 | all revisions in set. |
|
|||
1697 |
|
||||
1698 | If any of specified revisions is not present in the local repository, |
|
|||
1699 | the query is normally aborted. But this predicate allows the query |
|
|||
1700 | to continue even in such cases. |
|
|||
1701 | """ |
|
|||
1702 | try: |
|
|||
1703 | return getset(repo, subset, x) |
|
|||
1704 | except error.RepoLookupError: |
|
|||
1705 | return baseset() |
|
|||
1706 |
|
||||
1707 | # for internal use |
|
|||
1708 | @predicate('_notpublic', safe=True) |
|
|||
1709 | def _notpublic(repo, subset, x): |
|
|||
1710 | getargs(x, 0, 0, "_notpublic takes no arguments") |
|
|||
1711 | return _phase(repo, subset, phases.draft, phases.secret) |
|
|||
1712 |
|
||||
1713 | @predicate('public()', safe=True) |
|
|||
1714 | def public(repo, subset, x): |
|
|||
1715 | """Changeset in public phase.""" |
|
|||
1716 | # i18n: "public" is a keyword |
|
|||
1717 | getargs(x, 0, 0, _("public takes no arguments")) |
|
|||
1718 | phase = repo._phasecache.phase |
|
|||
1719 | target = phases.public |
|
|||
1720 | condition = lambda r: phase(repo, r) == target |
|
|||
1721 | return subset.filter(condition, condrepr=('<phase %r>', target), |
|
|||
1722 | cache=False) |
|
|||
1723 |
|
||||
1724 | @predicate('remote([id [,path]])', safe=False) |
|
|||
1725 | def remote(repo, subset, x): |
|
|||
1726 | """Local revision that corresponds to the given identifier in a |
|
|||
1727 | remote repository, if present. Here, the '.' identifier is a |
|
|||
1728 | synonym for the current local branch. |
|
|||
1729 | """ |
|
|||
1730 |
|
||||
1731 | from . import hg # avoid start-up nasties |
|
|||
1732 | # i18n: "remote" is a keyword |
|
|||
1733 | l = getargs(x, 0, 2, _("remote takes zero, one, or two arguments")) |
|
|||
1734 |
|
||||
1735 | q = '.' |
|
|||
1736 | if len(l) > 0: |
|
|||
1737 | # i18n: "remote" is a keyword |
|
|||
1738 | q = getstring(l[0], _("remote requires a string id")) |
|
|||
1739 | if q == '.': |
|
|||
1740 | q = repo['.'].branch() |
|
|||
1741 |
|
||||
1742 | dest = '' |
|
|||
1743 | if len(l) > 1: |
|
|||
1744 | # i18n: "remote" is a keyword |
|
|||
1745 | dest = getstring(l[1], _("remote requires a repository path")) |
|
|||
1746 | dest = repo.ui.expandpath(dest or 'default') |
|
|||
1747 | dest, branches = hg.parseurl(dest) |
|
|||
1748 | revs, checkout = hg.addbranchrevs(repo, repo, branches, []) |
|
|||
1749 | if revs: |
|
|||
1750 | revs = [repo.lookup(rev) for rev in revs] |
|
|||
1751 | other = hg.peer(repo, {}, dest) |
|
|||
1752 | n = other.lookup(q) |
|
|||
1753 | if n in repo: |
|
|||
1754 | r = repo[n].rev() |
|
|||
1755 | if r in subset: |
|
|||
1756 | return baseset([r]) |
|
|||
1757 | return baseset() |
|
|||
1758 |
|
||||
1759 | @predicate('removes(pattern)', safe=True) |
|
|||
1760 | def removes(repo, subset, x): |
|
|||
1761 | """Changesets which remove files matching pattern. |
|
|||
1762 |
|
||||
1763 | The pattern without explicit kind like ``glob:`` is expected to be |
|
|||
1764 | relative to the current directory and match against a file or a |
|
|||
1765 | directory. |
|
|||
1766 | """ |
|
|||
1767 | # i18n: "removes" is a keyword |
|
|||
1768 | pat = getstring(x, _("removes requires a pattern")) |
|
|||
1769 | return checkstatus(repo, subset, pat, 2) |
|
|||
1770 |
|
||||
1771 | @predicate('rev(number)', safe=True) |
|
|||
1772 | def rev(repo, subset, x): |
|
|||
1773 | """Revision with the given numeric identifier. |
|
|||
1774 | """ |
|
|||
1775 | # i18n: "rev" is a keyword |
|
|||
1776 | l = getargs(x, 1, 1, _("rev requires one argument")) |
|
|||
1777 | try: |
|
|||
1778 | # i18n: "rev" is a keyword |
|
|||
1779 | l = int(getstring(l[0], _("rev requires a number"))) |
|
|||
1780 | except (TypeError, ValueError): |
|
|||
1781 | # i18n: "rev" is a keyword |
|
|||
1782 | raise error.ParseError(_("rev expects a number")) |
|
|||
1783 | if l not in repo.changelog and l != node.nullrev: |
|
|||
1784 | return baseset() |
|
|||
1785 | return subset & baseset([l]) |
|
|||
1786 |
|
||||
1787 | @predicate('matching(revision [, field])', safe=True) |
|
|||
1788 | def matching(repo, subset, x): |
|
|||
1789 | """Changesets in which a given set of fields match the set of fields in the |
|
|||
1790 | selected revision or set. |
|
|||
1791 |
|
||||
1792 | To match more than one field pass the list of fields to match separated |
|
|||
1793 | by spaces (e.g. ``author description``). |
|
|||
1794 |
|
||||
1795 | Valid fields are most regular revision fields and some special fields. |
|
|||
1796 |
|
||||
1797 | Regular revision fields are ``description``, ``author``, ``branch``, |
|
|||
1798 | ``date``, ``files``, ``phase``, ``parents``, ``substate``, ``user`` |
|
|||
1799 | and ``diff``. |
|
|||
1800 | Note that ``author`` and ``user`` are synonyms. ``diff`` refers to the |
|
|||
1801 | contents of the revision. Two revisions matching their ``diff`` will |
|
|||
1802 | also match their ``files``. |
|
|||
1803 |
|
||||
1804 | Special fields are ``summary`` and ``metadata``: |
|
|||
1805 | ``summary`` matches the first line of the description. |
|
|||
1806 | ``metadata`` is equivalent to matching ``description user date`` |
|
|||
1807 | (i.e. it matches the main metadata fields). |
|
|||
1808 |
|
||||
1809 | ``metadata`` is the default field which is used when no fields are |
|
|||
1810 | specified. You can match more than one field at a time. |
|
|||
1811 | """ |
|
|||
1812 | # i18n: "matching" is a keyword |
|
|||
1813 | l = getargs(x, 1, 2, _("matching takes 1 or 2 arguments")) |
|
|||
1814 |
|
||||
1815 | revs = getset(repo, fullreposet(repo), l[0]) |
|
|||
1816 |
|
||||
1817 | fieldlist = ['metadata'] |
|
|||
1818 | if len(l) > 1: |
|
|||
1819 | fieldlist = getstring(l[1], |
|
|||
1820 | # i18n: "matching" is a keyword |
|
|||
1821 | _("matching requires a string " |
|
|||
1822 | "as its second argument")).split() |
|
|||
1823 |
|
||||
1824 | # Make sure that there are no repeated fields, |
|
|||
1825 | # expand the 'special' 'metadata' field type |
|
|||
1826 | # and check the 'files' whenever we check the 'diff' |
|
|||
1827 | fields = [] |
|
|||
1828 | for field in fieldlist: |
|
|||
1829 | if field == 'metadata': |
|
|||
1830 | fields += ['user', 'description', 'date'] |
|
|||
1831 | elif field == 'diff': |
|
|||
1832 | # a revision matching the diff must also match the files |
|
|||
1833 | # since matching the diff is very costly, make sure to |
|
|||
1834 | # also match the files first |
|
|||
1835 | fields += ['files', 'diff'] |
|
|||
1836 | else: |
|
|||
1837 | if field == 'author': |
|
|||
1838 | field = 'user' |
|
|||
1839 | fields.append(field) |
|
|||
1840 | fields = set(fields) |
|
|||
1841 | if 'summary' in fields and 'description' in fields: |
|
|||
1842 | # If a revision matches its description it also matches its summary |
|
|||
1843 | fields.discard('summary') |
|
|||
1844 |
|
||||
1845 | # We may want to match more than one field |
|
|||
1846 | # Not all fields take the same amount of time to be matched |
|
|||
1847 | # Sort the selected fields in order of increasing matching cost |
|
|||
1848 | fieldorder = ['phase', 'parents', 'user', 'date', 'branch', 'summary', |
|
|||
1849 | 'files', 'description', 'substate', 'diff'] |
|
|||
1850 | def fieldkeyfunc(f): |
|
|||
1851 | try: |
|
|||
1852 | return fieldorder.index(f) |
|
|||
1853 | except ValueError: |
|
|||
1854 | # assume an unknown field is very costly |
|
|||
1855 | return len(fieldorder) |
|
|||
1856 | fields = list(fields) |
|
|||
1857 | fields.sort(key=fieldkeyfunc) |
|
|||
1858 |
|
||||
1859 | # Each field will be matched with its own "getfield" function |
|
|||
1860 | # which will be added to the getfieldfuncs array of functions |
|
|||
1861 | getfieldfuncs = [] |
|
|||
1862 | _funcs = { |
|
|||
1863 | 'user': lambda r: repo[r].user(), |
|
|||
1864 | 'branch': lambda r: repo[r].branch(), |
|
|||
1865 | 'date': lambda r: repo[r].date(), |
|
|||
1866 | 'description': lambda r: repo[r].description(), |
|
|||
1867 | 'files': lambda r: repo[r].files(), |
|
|||
1868 | 'parents': lambda r: repo[r].parents(), |
|
|||
1869 | 'phase': lambda r: repo[r].phase(), |
|
|||
1870 | 'substate': lambda r: repo[r].substate, |
|
|||
1871 | 'summary': lambda r: repo[r].description().splitlines()[0], |
|
|||
1872 | 'diff': lambda r: list(repo[r].diff(git=True),) |
|
|||
1873 | } |
|
|||
1874 | for info in fields: |
|
|||
1875 | getfield = _funcs.get(info, None) |
|
|||
1876 | if getfield is None: |
|
|||
1877 | raise error.ParseError( |
|
|||
1878 | # i18n: "matching" is a keyword |
|
|||
1879 | _("unexpected field name passed to matching: %s") % info) |
|
|||
1880 | getfieldfuncs.append(getfield) |
|
|||
1881 | # convert the getfield array of functions into a "getinfo" function |
|
|||
1882 | # which returns an array of field values (or a single value if there |
|
|||
1883 | # is only one field to match) |
|
|||
1884 | getinfo = lambda r: [f(r) for f in getfieldfuncs] |
|
|||
1885 |
|
||||
1886 | def matches(x): |
|
|||
1887 | for rev in revs: |
|
|||
1888 | target = getinfo(rev) |
|
|||
1889 | match = True |
|
|||
1890 | for n, f in enumerate(getfieldfuncs): |
|
|||
1891 | if target[n] != f(x): |
|
|||
1892 | match = False |
|
|||
1893 | if match: |
|
|||
1894 | return True |
|
|||
1895 | return False |
|
|||
1896 |
|
||||
1897 | return subset.filter(matches, condrepr=('<matching%r %r>', fields, revs)) |
|
|||
1898 |
|
||||
1899 | @predicate('reverse(set)', safe=True, takeorder=True) |
|
|||
1900 | def reverse(repo, subset, x, order): |
|
|||
1901 | """Reverse order of set. |
|
|||
1902 | """ |
|
|||
1903 | l = getset(repo, subset, x) |
|
|||
1904 | if order == defineorder: |
|
|||
1905 | l.reverse() |
|
|||
1906 | return l |
|
|||
1907 |
|
||||
1908 | @predicate('roots(set)', safe=True) |
|
|||
1909 | def roots(repo, subset, x): |
|
|||
1910 | """Changesets in set with no parent changeset in set. |
|
|||
1911 | """ |
|
|||
1912 | s = getset(repo, fullreposet(repo), x) |
|
|||
1913 | parents = repo.changelog.parentrevs |
|
|||
1914 | def filter(r): |
|
|||
1915 | for p in parents(r): |
|
|||
1916 | if 0 <= p and p in s: |
|
|||
1917 | return False |
|
|||
1918 | return True |
|
|||
1919 | return subset & s.filter(filter, condrepr='<roots>') |
|
|||
1920 |
|
||||
1921 | _sortkeyfuncs = { |
|
|||
1922 | 'rev': lambda c: c.rev(), |
|
|||
1923 | 'branch': lambda c: c.branch(), |
|
|||
1924 | 'desc': lambda c: c.description(), |
|
|||
1925 | 'user': lambda c: c.user(), |
|
|||
1926 | 'author': lambda c: c.user(), |
|
|||
1927 | 'date': lambda c: c.date()[0], |
|
|||
1928 | } |
|
|||
1929 |
|
||||
1930 | def _getsortargs(x): |
|
|||
1931 | """Parse sort options into (set, [(key, reverse)], opts)""" |
|
|||
1932 | args = getargsdict(x, 'sort', 'set keys topo.firstbranch') |
|
|||
1933 | if 'set' not in args: |
|
|||
1934 | # i18n: "sort" is a keyword |
|
|||
1935 | raise error.ParseError(_('sort requires one or two arguments')) |
|
|||
1936 | keys = "rev" |
|
|||
1937 | if 'keys' in args: |
|
|||
1938 | # i18n: "sort" is a keyword |
|
|||
1939 | keys = getstring(args['keys'], _("sort spec must be a string")) |
|
|||
1940 |
|
||||
1941 | keyflags = [] |
|
|||
1942 | for k in keys.split(): |
|
|||
1943 | fk = k |
|
|||
1944 | reverse = (k[0] == '-') |
|
|||
1945 | if reverse: |
|
|||
1946 | k = k[1:] |
|
|||
1947 | if k not in _sortkeyfuncs and k != 'topo': |
|
|||
1948 | raise error.ParseError(_("unknown sort key %r") % fk) |
|
|||
1949 | keyflags.append((k, reverse)) |
|
|||
1950 |
|
||||
1951 | if len(keyflags) > 1 and any(k == 'topo' for k, reverse in keyflags): |
|
|||
1952 | # i18n: "topo" is a keyword |
|
|||
1953 | raise error.ParseError(_('topo sort order cannot be combined ' |
|
|||
1954 | 'with other sort keys')) |
|
|||
1955 |
|
||||
1956 | opts = {} |
|
|||
1957 | if 'topo.firstbranch' in args: |
|
|||
1958 | if any(k == 'topo' for k, reverse in keyflags): |
|
|||
1959 | opts['topo.firstbranch'] = args['topo.firstbranch'] |
|
|||
1960 | else: |
|
|||
1961 | # i18n: "topo" and "topo.firstbranch" are keywords |
|
|||
1962 | raise error.ParseError(_('topo.firstbranch can only be used ' |
|
|||
1963 | 'when using the topo sort key')) |
|
|||
1964 |
|
||||
1965 | return args['set'], keyflags, opts |
|
|||
1966 |
|
||||
1967 | @predicate('sort(set[, [-]key... [, ...]])', safe=True, takeorder=True) |
|
|||
1968 | def sort(repo, subset, x, order): |
|
|||
1969 | """Sort set by keys. The default sort order is ascending, specify a key |
|
|||
1970 | as ``-key`` to sort in descending order. |
|
|||
1971 |
|
||||
1972 | The keys can be: |
|
|||
1973 |
|
||||
1974 | - ``rev`` for the revision number, |
|
|||
1975 | - ``branch`` for the branch name, |
|
|||
1976 | - ``desc`` for the commit message (description), |
|
|||
1977 | - ``user`` for user name (``author`` can be used as an alias), |
|
|||
1978 | - ``date`` for the commit date |
|
|||
1979 | - ``topo`` for a reverse topographical sort |
|
|||
1980 |
|
||||
1981 | The ``topo`` sort order cannot be combined with other sort keys. This sort |
|
|||
1982 | takes one optional argument, ``topo.firstbranch``, which takes a revset that |
|
|||
1983 | specifies what topographical branches to prioritize in the sort. |
|
|||
1984 |
|
||||
1985 | """ |
|
|||
1986 | s, keyflags, opts = _getsortargs(x) |
|
|||
1987 | revs = getset(repo, subset, s) |
|
|||
1988 |
|
||||
1989 | if not keyflags or order != defineorder: |
|
|||
1990 | return revs |
|
|||
1991 | if len(keyflags) == 1 and keyflags[0][0] == "rev": |
|
|||
1992 | revs.sort(reverse=keyflags[0][1]) |
|
|||
1993 | return revs |
|
|||
1994 | elif keyflags[0][0] == "topo": |
|
|||
1995 | firstbranch = () |
|
|||
1996 | if 'topo.firstbranch' in opts: |
|
|||
1997 | firstbranch = getset(repo, subset, opts['topo.firstbranch']) |
|
|||
1998 | revs = baseset(_toposort(revs, repo.changelog.parentrevs, firstbranch), |
|
|||
1999 | istopo=True) |
|
|||
2000 | if keyflags[0][1]: |
|
|||
2001 | revs.reverse() |
|
|||
2002 | return revs |
|
|||
2003 |
|
||||
2004 | # sort() is guaranteed to be stable |
|
|||
2005 | ctxs = [repo[r] for r in revs] |
|
|||
2006 | for k, reverse in reversed(keyflags): |
|
|||
2007 | ctxs.sort(key=_sortkeyfuncs[k], reverse=reverse) |
|
|||
2008 | return baseset([c.rev() for c in ctxs]) |
|
|||
2009 |
|
||||
2010 | def _toposort(revs, parentsfunc, firstbranch=()): |
|
|||
2011 | """Yield revisions from heads to roots one (topo) branch at a time. |
|
|||
2012 |
|
||||
2013 | This function aims to be used by a graph generator that wishes to minimize |
|
|||
2014 | the number of parallel branches and their interleaving. |
|
|||
2015 |
|
||||
2016 | Example iteration order (numbers show the "true" order in a changelog): |
|
|||
2017 |
|
||||
2018 | o 4 |
|
|||
2019 | | |
|
|||
2020 | o 1 |
|
|||
2021 | | |
|
|||
2022 | | o 3 |
|
|||
2023 | | | |
|
|||
2024 | | o 2 |
|
|||
2025 | |/ |
|
|||
2026 | o 0 |
|
|||
2027 |
|
||||
2028 | Note that the ancestors of merges are understood by the current |
|
|||
2029 | algorithm to be on the same branch. This means no reordering will |
|
|||
2030 | occur behind a merge. |
|
|||
2031 | """ |
|
|||
2032 |
|
||||
2033 | ### Quick summary of the algorithm |
|
|||
2034 | # |
|
|||
2035 | # This function is based around a "retention" principle. We keep revisions |
|
|||
2036 | # in memory until we are ready to emit a whole branch that immediately |
|
|||
2037 | # "merges" into an existing one. This reduces the number of parallel |
|
|||
2038 | # branches with interleaved revisions. |
|
|||
2039 | # |
|
|||
2040 | # During iteration revs are split into two groups: |
|
|||
2041 | # A) revision already emitted |
|
|||
2042 | # B) revision in "retention". They are stored as different subgroups. |
|
|||
2043 | # |
|
|||
2044 | # for each REV, we do the following logic: |
|
|||
2045 | # |
|
|||
2046 | # 1) if REV is a parent of (A), we will emit it. If there is a |
|
|||
2047 | # retention group ((B) above) that is blocked on REV being |
|
|||
2048 | # available, we emit all the revisions out of that retention |
|
|||
2049 | # group first. |
|
|||
2050 | # |
|
|||
2051 | # 2) else, we'll search for a subgroup in (B) awaiting for REV to be |
|
|||
2052 | # available, if such subgroup exist, we add REV to it and the subgroup is |
|
|||
2053 | # now awaiting for REV.parents() to be available. |
|
|||
2054 | # |
|
|||
2055 | # 3) finally if no such group existed in (B), we create a new subgroup. |
|
|||
2056 | # |
|
|||
2057 | # |
|
|||
2058 | # To bootstrap the algorithm, we emit the tipmost revision (which |
|
|||
2059 | # puts it in group (A) from above). |
|
|||
2060 |
|
||||
2061 | revs.sort(reverse=True) |
|
|||
2062 |
|
||||
2063 | # Set of parents of revision that have been emitted. They can be considered |
|
|||
2064 | # unblocked as the graph generator is already aware of them so there is no |
|
|||
2065 | # need to delay the revisions that reference them. |
|
|||
2066 | # |
|
|||
2067 | # If someone wants to prioritize a branch over the others, pre-filling this |
|
|||
2068 | # set will force all other branches to wait until this branch is ready to be |
|
|||
2069 | # emitted. |
|
|||
2070 | unblocked = set(firstbranch) |
|
|||
2071 |
|
||||
2072 | # list of groups waiting to be displayed, each group is defined by: |
|
|||
2073 | # |
|
|||
2074 | # (revs: lists of revs waiting to be displayed, |
|
|||
2075 | # blocked: set of that cannot be displayed before those in 'revs') |
|
|||
2076 | # |
|
|||
2077 | # The second value ('blocked') correspond to parents of any revision in the |
|
|||
2078 | # group ('revs') that is not itself contained in the group. The main idea |
|
|||
2079 | # of this algorithm is to delay as much as possible the emission of any |
|
|||
2080 | # revision. This means waiting for the moment we are about to display |
|
|||
2081 | # these parents to display the revs in a group. |
|
|||
2082 | # |
|
|||
2083 | # This first implementation is smart until it encounters a merge: it will |
|
|||
2084 | # emit revs as soon as any parent is about to be emitted and can grow an |
|
|||
2085 | # arbitrary number of revs in 'blocked'. In practice this mean we properly |
|
|||
2086 | # retains new branches but gives up on any special ordering for ancestors |
|
|||
2087 | # of merges. The implementation can be improved to handle this better. |
|
|||
2088 | # |
|
|||
2089 | # The first subgroup is special. It corresponds to all the revision that |
|
|||
2090 | # were already emitted. The 'revs' lists is expected to be empty and the |
|
|||
2091 | # 'blocked' set contains the parents revisions of already emitted revision. |
|
|||
2092 | # |
|
|||
2093 | # You could pre-seed the <parents> set of groups[0] to a specific |
|
|||
2094 | # changesets to select what the first emitted branch should be. |
|
|||
2095 | groups = [([], unblocked)] |
|
|||
2096 | pendingheap = [] |
|
|||
2097 | pendingset = set() |
|
|||
2098 |
|
||||
2099 | heapq.heapify(pendingheap) |
|
|||
2100 | heappop = heapq.heappop |
|
|||
2101 | heappush = heapq.heappush |
|
|||
2102 | for currentrev in revs: |
|
|||
2103 | # Heap works with smallest element, we want highest so we invert |
|
|||
2104 | if currentrev not in pendingset: |
|
|||
2105 | heappush(pendingheap, -currentrev) |
|
|||
2106 | pendingset.add(currentrev) |
|
|||
2107 | # iterates on pending rev until after the current rev have been |
|
|||
2108 | # processed. |
|
|||
2109 | rev = None |
|
|||
2110 | while rev != currentrev: |
|
|||
2111 | rev = -heappop(pendingheap) |
|
|||
2112 | pendingset.remove(rev) |
|
|||
2113 |
|
||||
2114 | # Seek for a subgroup blocked, waiting for the current revision. |
|
|||
2115 | matching = [i for i, g in enumerate(groups) if rev in g[1]] |
|
|||
2116 |
|
||||
2117 | if matching: |
|
|||
2118 | # The main idea is to gather together all sets that are blocked |
|
|||
2119 | # on the same revision. |
|
|||
2120 | # |
|
|||
2121 | # Groups are merged when a common blocking ancestor is |
|
|||
2122 | # observed. For example, given two groups: |
|
|||
2123 | # |
|
|||
2124 | # revs [5, 4] waiting for 1 |
|
|||
2125 | # revs [3, 2] waiting for 1 |
|
|||
2126 | # |
|
|||
2127 | # These two groups will be merged when we process |
|
|||
2128 | # 1. In theory, we could have merged the groups when |
|
|||
2129 | # we added 2 to the group it is now in (we could have |
|
|||
2130 | # noticed the groups were both blocked on 1 then), but |
|
|||
2131 | # the way it works now makes the algorithm simpler. |
|
|||
2132 | # |
|
|||
2133 | # We also always keep the oldest subgroup first. We can |
|
|||
2134 | # probably improve the behavior by having the longest set |
|
|||
2135 | # first. That way, graph algorithms could minimise the length |
|
|||
2136 | # of parallel lines their drawing. This is currently not done. |
|
|||
2137 | targetidx = matching.pop(0) |
|
|||
2138 | trevs, tparents = groups[targetidx] |
|
|||
2139 | for i in matching: |
|
|||
2140 | gr = groups[i] |
|
|||
2141 | trevs.extend(gr[0]) |
|
|||
2142 | tparents |= gr[1] |
|
|||
2143 | # delete all merged subgroups (except the one we kept) |
|
|||
2144 | # (starting from the last subgroup for performance and |
|
|||
2145 | # sanity reasons) |
|
|||
2146 | for i in reversed(matching): |
|
|||
2147 | del groups[i] |
|
|||
2148 | else: |
|
|||
2149 | # This is a new head. We create a new subgroup for it. |
|
|||
2150 | targetidx = len(groups) |
|
|||
2151 | groups.append(([], set([rev]))) |
|
|||
2152 |
|
||||
2153 | gr = groups[targetidx] |
|
|||
2154 |
|
||||
2155 | # We now add the current nodes to this subgroups. This is done |
|
|||
2156 | # after the subgroup merging because all elements from a subgroup |
|
|||
2157 | # that relied on this rev must precede it. |
|
|||
2158 | # |
|
|||
2159 | # we also update the <parents> set to include the parents of the |
|
|||
2160 | # new nodes. |
|
|||
2161 | if rev == currentrev: # only display stuff in rev |
|
|||
2162 | gr[0].append(rev) |
|
|||
2163 | gr[1].remove(rev) |
|
|||
2164 | parents = [p for p in parentsfunc(rev) if p > node.nullrev] |
|
|||
2165 | gr[1].update(parents) |
|
|||
2166 | for p in parents: |
|
|||
2167 | if p not in pendingset: |
|
|||
2168 | pendingset.add(p) |
|
|||
2169 | heappush(pendingheap, -p) |
|
|||
2170 |
|
||||
2171 | # Look for a subgroup to display |
|
|||
2172 | # |
|
|||
2173 | # When unblocked is empty (if clause), we were not waiting for any |
|
|||
2174 | # revisions during the first iteration (if no priority was given) or |
|
|||
2175 | # if we emitted a whole disconnected set of the graph (reached a |
|
|||
2176 | # root). In that case we arbitrarily take the oldest known |
|
|||
2177 | # subgroup. The heuristic could probably be better. |
|
|||
2178 | # |
|
|||
2179 | # Otherwise (elif clause) if the subgroup is blocked on |
|
|||
2180 | # a revision we just emitted, we can safely emit it as |
|
|||
2181 | # well. |
|
|||
2182 | if not unblocked: |
|
|||
2183 | if len(groups) > 1: # display other subset |
|
|||
2184 | targetidx = 1 |
|
|||
2185 | gr = groups[1] |
|
|||
2186 | elif not gr[1] & unblocked: |
|
|||
2187 | gr = None |
|
|||
2188 |
|
||||
2189 | if gr is not None: |
|
|||
2190 | # update the set of awaited revisions with the one from the |
|
|||
2191 | # subgroup |
|
|||
2192 | unblocked |= gr[1] |
|
|||
2193 | # output all revisions in the subgroup |
|
|||
2194 | for r in gr[0]: |
|
|||
2195 | yield r |
|
|||
2196 | # delete the subgroup that you just output |
|
|||
2197 | # unless it is groups[0] in which case you just empty it. |
|
|||
2198 | if targetidx: |
|
|||
2199 | del groups[targetidx] |
|
|||
2200 | else: |
|
|||
2201 | gr[0][:] = [] |
|
|||
2202 | # Check if we have some subgroup waiting for revisions we are not going to |
|
|||
2203 | # iterate over |
|
|||
2204 | for g in groups: |
|
|||
2205 | for r in g[0]: |
|
|||
2206 | yield r |
|
|||
2207 |
|
||||
2208 | @predicate('subrepo([pattern])') |
|
|||
2209 | def subrepo(repo, subset, x): |
|
|||
2210 | """Changesets that add, modify or remove the given subrepo. If no subrepo |
|
|||
2211 | pattern is named, any subrepo changes are returned. |
|
|||
2212 | """ |
|
|||
2213 | # i18n: "subrepo" is a keyword |
|
|||
2214 | args = getargs(x, 0, 1, _('subrepo takes at most one argument')) |
|
|||
2215 | pat = None |
|
|||
2216 | if len(args) != 0: |
|
|||
2217 | pat = getstring(args[0], _("subrepo requires a pattern")) |
|
|||
2218 |
|
||||
2219 | m = matchmod.exact(repo.root, repo.root, ['.hgsubstate']) |
|
|||
2220 |
|
||||
2221 | def submatches(names): |
|
|||
2222 | k, p, m = util.stringmatcher(pat) |
|
|||
2223 | for name in names: |
|
|||
2224 | if m(name): |
|
|||
2225 | yield name |
|
|||
2226 |
|
||||
2227 | def matches(x): |
|
|||
2228 | c = repo[x] |
|
|||
2229 | s = repo.status(c.p1().node(), c.node(), match=m) |
|
|||
2230 |
|
||||
2231 | if pat is None: |
|
|||
2232 | return s.added or s.modified or s.removed |
|
|||
2233 |
|
||||
2234 | if s.added: |
|
|||
2235 | return any(submatches(c.substate.keys())) |
|
|||
2236 |
|
||||
2237 | if s.modified: |
|
|||
2238 | subs = set(c.p1().substate.keys()) |
|
|||
2239 | subs.update(c.substate.keys()) |
|
|||
2240 |
|
||||
2241 | for path in submatches(subs): |
|
|||
2242 | if c.p1().substate.get(path) != c.substate.get(path): |
|
|||
2243 | return True |
|
|||
2244 |
|
||||
2245 | if s.removed: |
|
|||
2246 | return any(submatches(c.p1().substate.keys())) |
|
|||
2247 |
|
||||
2248 | return False |
|
|||
2249 |
|
||||
2250 | return subset.filter(matches, condrepr=('<subrepo %r>', pat)) |
|
|||
2251 |
|
||||
2252 | def _substringmatcher(pattern, casesensitive=True): |
|
|||
2253 | kind, pattern, matcher = util.stringmatcher(pattern, |
|
|||
2254 | casesensitive=casesensitive) |
|
|||
2255 | if kind == 'literal': |
|
|||
2256 | if not casesensitive: |
|
|||
2257 | pattern = encoding.lower(pattern) |
|
|||
2258 | matcher = lambda s: pattern in encoding.lower(s) |
|
|||
2259 | else: |
|
|||
2260 | matcher = lambda s: pattern in s |
|
|||
2261 | return kind, pattern, matcher |
|
|||
2262 |
|
||||
2263 | @predicate('tag([name])', safe=True) |
|
|||
2264 | def tag(repo, subset, x): |
|
|||
2265 | """The specified tag by name, or all tagged revisions if no name is given. |
|
|||
2266 |
|
||||
2267 | Pattern matching is supported for `name`. See |
|
|||
2268 | :hg:`help revisions.patterns`. |
|
|||
2269 | """ |
|
|||
2270 | # i18n: "tag" is a keyword |
|
|||
2271 | args = getargs(x, 0, 1, _("tag takes one or no arguments")) |
|
|||
2272 | cl = repo.changelog |
|
|||
2273 | if args: |
|
|||
2274 | pattern = getstring(args[0], |
|
|||
2275 | # i18n: "tag" is a keyword |
|
|||
2276 | _('the argument to tag must be a string')) |
|
|||
2277 | kind, pattern, matcher = util.stringmatcher(pattern) |
|
|||
2278 | if kind == 'literal': |
|
|||
2279 | # avoid resolving all tags |
|
|||
2280 | tn = repo._tagscache.tags.get(pattern, None) |
|
|||
2281 | if tn is None: |
|
|||
2282 | raise error.RepoLookupError(_("tag '%s' does not exist") |
|
|||
2283 | % pattern) |
|
|||
2284 | s = set([repo[tn].rev()]) |
|
|||
2285 | else: |
|
|||
2286 | s = set([cl.rev(n) for t, n in repo.tagslist() if matcher(t)]) |
|
|||
2287 | else: |
|
|||
2288 | s = set([cl.rev(n) for t, n in repo.tagslist() if t != 'tip']) |
|
|||
2289 | return subset & s |
|
|||
2290 |
|
||||
2291 | @predicate('tagged', safe=True) |
|
|||
2292 | def tagged(repo, subset, x): |
|
|||
2293 | return tag(repo, subset, x) |
|
|||
2294 |
|
||||
2295 | @predicate('unstable()', safe=True) |
|
|||
2296 | def unstable(repo, subset, x): |
|
|||
2297 | """Non-obsolete changesets with obsolete ancestors. |
|
|||
2298 | """ |
|
|||
2299 | # i18n: "unstable" is a keyword |
|
|||
2300 | getargs(x, 0, 0, _("unstable takes no arguments")) |
|
|||
2301 | unstables = obsmod.getrevs(repo, 'unstable') |
|
|||
2302 | return subset & unstables |
|
|||
2303 |
|
||||
2304 |
|
||||
2305 | @predicate('user(string)', safe=True) |
|
|||
2306 | def user(repo, subset, x): |
|
|||
2307 | """User name contains string. The match is case-insensitive. |
|
|||
2308 |
|
||||
2309 | Pattern matching is supported for `string`. See |
|
|||
2310 | :hg:`help revisions.patterns`. |
|
|||
2311 | """ |
|
|||
2312 | return author(repo, subset, x) |
|
|||
2313 |
|
||||
2314 | @predicate('wdir', safe=True) |
|
|||
2315 | def wdir(repo, subset, x): |
|
|||
2316 | """Working directory. (EXPERIMENTAL)""" |
|
|||
2317 | # i18n: "wdir" is a keyword |
|
|||
2318 | getargs(x, 0, 0, _("wdir takes no arguments")) |
|
|||
2319 | if node.wdirrev in subset or isinstance(subset, fullreposet): |
|
|||
2320 | return baseset([node.wdirrev]) |
|
|||
2321 | return baseset() |
|
|||
2322 |
|
||||
2323 | def _orderedlist(repo, subset, x): |
|
|||
2324 | s = getstring(x, "internal error") |
|
|||
2325 | if not s: |
|
|||
2326 | return baseset() |
|
|||
2327 | # remove duplicates here. it's difficult for caller to deduplicate sets |
|
|||
2328 | # because different symbols can point to the same rev. |
|
|||
2329 | cl = repo.changelog |
|
|||
2330 | ls = [] |
|
|||
2331 | seen = set() |
|
|||
2332 | for t in s.split('\0'): |
|
|||
2333 | try: |
|
|||
2334 | # fast path for integer revision |
|
|||
2335 | r = int(t) |
|
|||
2336 | if str(r) != t or r not in cl: |
|
|||
2337 | raise ValueError |
|
|||
2338 | revs = [r] |
|
|||
2339 | except ValueError: |
|
|||
2340 | revs = stringset(repo, subset, t) |
|
|||
2341 |
|
||||
2342 | for r in revs: |
|
|||
2343 | if r in seen: |
|
|||
2344 | continue |
|
|||
2345 | if (r in subset |
|
|||
2346 | or r == node.nullrev and isinstance(subset, fullreposet)): |
|
|||
2347 | ls.append(r) |
|
|||
2348 | seen.add(r) |
|
|||
2349 | return baseset(ls) |
|
|||
2350 |
|
||||
2351 | # for internal use |
|
|||
2352 | @predicate('_list', safe=True, takeorder=True) |
|
|||
2353 | def _list(repo, subset, x, order): |
|
|||
2354 | if order == followorder: |
|
|||
2355 | # slow path to take the subset order |
|
|||
2356 | return subset & _orderedlist(repo, fullreposet(repo), x) |
|
|||
2357 | else: |
|
|||
2358 | return _orderedlist(repo, subset, x) |
|
|||
2359 |
|
||||
2360 | def _orderedintlist(repo, subset, x): |
|
|||
2361 | s = getstring(x, "internal error") |
|
|||
2362 | if not s: |
|
|||
2363 | return baseset() |
|
|||
2364 | ls = [int(r) for r in s.split('\0')] |
|
|||
2365 | s = subset |
|
|||
2366 | return baseset([r for r in ls if r in s]) |
|
|||
2367 |
|
||||
2368 | # for internal use |
|
|||
2369 | @predicate('_intlist', safe=True, takeorder=True) |
|
|||
2370 | def _intlist(repo, subset, x, order): |
|
|||
2371 | if order == followorder: |
|
|||
2372 | # slow path to take the subset order |
|
|||
2373 | return subset & _orderedintlist(repo, fullreposet(repo), x) |
|
|||
2374 | else: |
|
|||
2375 | return _orderedintlist(repo, subset, x) |
|
|||
2376 |
|
||||
2377 | def _orderedhexlist(repo, subset, x): |
|
|||
2378 | s = getstring(x, "internal error") |
|
|||
2379 | if not s: |
|
|||
2380 | return baseset() |
|
|||
2381 | cl = repo.changelog |
|
|||
2382 | ls = [cl.rev(node.bin(r)) for r in s.split('\0')] |
|
|||
2383 | s = subset |
|
|||
2384 | return baseset([r for r in ls if r in s]) |
|
|||
2385 |
|
||||
2386 | # for internal use |
|
|||
2387 | @predicate('_hexlist', safe=True, takeorder=True) |
|
|||
2388 | def _hexlist(repo, subset, x, order): |
|
|||
2389 | if order == followorder: |
|
|||
2390 | # slow path to take the subset order |
|
|||
2391 | return subset & _orderedhexlist(repo, fullreposet(repo), x) |
|
|||
2392 | else: |
|
|||
2393 | return _orderedhexlist(repo, subset, x) |
|
|||
2394 |
|
||||
2395 | methods = { |
|
|||
2396 | "range": rangeset, |
|
|||
2397 | "rangeall": rangeall, |
|
|||
2398 | "rangepre": rangepre, |
|
|||
2399 | "rangepost": rangepost, |
|
|||
2400 | "dagrange": dagrange, |
|
|||
2401 | "string": stringset, |
|
|||
2402 | "symbol": stringset, |
|
|||
2403 | "and": andset, |
|
|||
2404 | "or": orset, |
|
|||
2405 | "not": notset, |
|
|||
2406 | "difference": differenceset, |
|
|||
2407 | "list": listset, |
|
|||
2408 | "keyvalue": keyvaluepair, |
|
|||
2409 | "func": func, |
|
|||
2410 | "ancestor": ancestorspec, |
|
|||
2411 | "parent": parentspec, |
|
|||
2412 | "parentpost": parentpost, |
|
|||
2413 | } |
|
|||
2414 |
|
||||
2415 | # Constants for ordering requirement, used in _analyze(): |
|
227 | # Constants for ordering requirement, used in _analyze(): | |
2416 | # |
|
228 | # | |
2417 | # If 'define', any nested functions and operations can change the ordering of |
|
229 | # If 'define', any nested functions and operations can change the ordering of | |
@@ -2756,59 +568,6 b' def foldconcat(tree):' | |||||
2756 | def parse(spec, lookup=None): |
|
568 | def parse(spec, lookup=None): | |
2757 | return _parsewith(spec, lookup=lookup) |
|
569 | return _parsewith(spec, lookup=lookup) | |
2758 |
|
570 | |||
2759 | def posttreebuilthook(tree, repo): |
|
|||
2760 | # hook for extensions to execute code on the optimized tree |
|
|||
2761 | pass |
|
|||
2762 |
|
||||
2763 | def match(ui, spec, repo=None, order=defineorder): |
|
|||
2764 | """Create a matcher for a single revision spec |
|
|||
2765 |
|
||||
2766 | If order=followorder, a matcher takes the ordering specified by the input |
|
|||
2767 | set. |
|
|||
2768 | """ |
|
|||
2769 | return matchany(ui, [spec], repo=repo, order=order) |
|
|||
2770 |
|
||||
2771 | def matchany(ui, specs, repo=None, order=defineorder): |
|
|||
2772 | """Create a matcher that will include any revisions matching one of the |
|
|||
2773 | given specs |
|
|||
2774 |
|
||||
2775 | If order=followorder, a matcher takes the ordering specified by the input |
|
|||
2776 | set. |
|
|||
2777 | """ |
|
|||
2778 | if not specs: |
|
|||
2779 | def mfunc(repo, subset=None): |
|
|||
2780 | return baseset() |
|
|||
2781 | return mfunc |
|
|||
2782 | if not all(specs): |
|
|||
2783 | raise error.ParseError(_("empty query")) |
|
|||
2784 | lookup = None |
|
|||
2785 | if repo: |
|
|||
2786 | lookup = repo.__contains__ |
|
|||
2787 | if len(specs) == 1: |
|
|||
2788 | tree = parse(specs[0], lookup) |
|
|||
2789 | else: |
|
|||
2790 | tree = ('or', ('list',) + tuple(parse(s, lookup) for s in specs)) |
|
|||
2791 |
|
||||
2792 | if ui: |
|
|||
2793 | tree = expandaliases(ui, tree) |
|
|||
2794 | tree = foldconcat(tree) |
|
|||
2795 | tree = analyze(tree, order) |
|
|||
2796 | tree = optimize(tree) |
|
|||
2797 | posttreebuilthook(tree, repo) |
|
|||
2798 | return makematcher(tree) |
|
|||
2799 |
|
||||
2800 | def makematcher(tree): |
|
|||
2801 | """Create a matcher from an evaluatable tree""" |
|
|||
2802 | def mfunc(repo, subset=None): |
|
|||
2803 | if subset is None: |
|
|||
2804 | subset = fullreposet(repo) |
|
|||
2805 | if util.safehasattr(subset, 'isascending'): |
|
|||
2806 | result = getset(repo, subset, tree) |
|
|||
2807 | else: |
|
|||
2808 | result = getset(repo, baseset(subset), tree) |
|
|||
2809 | return result |
|
|||
2810 | return mfunc |
|
|||
2811 |
|
||||
2812 | def formatspec(expr, *args): |
|
571 | def formatspec(expr, *args): | |
2813 | ''' |
|
572 | ''' | |
2814 | This is a convenience function for using revsets internally, and |
|
573 | This is a convenience function for using revsets internally, and | |
@@ -2923,17 +682,3 b' def funcsused(tree):' | |||||
2923 | if tree[0] == 'func': |
|
682 | if tree[0] == 'func': | |
2924 | funcs.add(tree[1][1]) |
|
683 | funcs.add(tree[1][1]) | |
2925 | return funcs |
|
684 | return funcs | |
2926 |
|
||||
2927 | def loadpredicate(ui, extname, registrarobj): |
|
|||
2928 | """Load revset predicates from specified registrarobj |
|
|||
2929 | """ |
|
|||
2930 | for name, func in registrarobj._table.iteritems(): |
|
|||
2931 | symbols[name] = func |
|
|||
2932 | if func._safe: |
|
|||
2933 | safesymbols.add(name) |
|
|||
2934 |
|
||||
2935 | # load built-in predicates explicitly to setup safesymbols |
|
|||
2936 | loadpredicate(None, None, predicate) |
|
|||
2937 |
|
||||
2938 | # tell hggettext to extract docstrings from these functions: |
|
|||
2939 | i18nfunctions = symbols.values() |
|
@@ -30,6 +30,7 b' from . import (' | |||||
30 | phases, |
|
30 | phases, | |
31 | pycompat, |
|
31 | pycompat, | |
32 | revset, |
|
32 | revset, | |
|
33 | revsetlang, | |||
33 | similar, |
|
34 | similar, | |
34 | util, |
|
35 | util, | |
35 | ) |
|
36 | ) | |
@@ -890,7 +891,7 b" def revsingle(repo, revspec, default='.'" | |||||
890 | return repo[l.last()] |
|
891 | return repo[l.last()] | |
891 |
|
892 | |||
892 | def _pairspec(revspec): |
|
893 | def _pairspec(revspec): | |
893 | tree = revset.parse(revspec) |
|
894 | tree = revsetlang.parse(revspec) | |
894 | return tree and tree[0] in ('range', 'rangepre', 'rangepost', 'rangeall') |
|
895 | return tree and tree[0] in ('range', 'rangepre', 'rangepost', 'rangeall') | |
895 |
|
896 | |||
896 | def revpair(repo, revs): |
|
897 | def revpair(repo, revs): | |
@@ -936,7 +937,7 b' def revrange(repo, specs):' | |||||
936 | revision numbers. |
|
937 | revision numbers. | |
937 |
|
938 | |||
938 | It is assumed the revsets are already formatted. If you have arguments |
|
939 | It is assumed the revsets are already formatted. If you have arguments | |
939 | that need to be expanded in the revset, call ``revset.formatspec()`` |
|
940 | that need to be expanded in the revset, call ``revsetlang.formatspec()`` | |
940 | and pass the result as an element of ``specs``. |
|
941 | and pass the result as an element of ``specs``. | |
941 |
|
942 | |||
942 | Specifying a single revset is allowed. |
|
943 | Specifying a single revset is allowed. | |
@@ -947,7 +948,7 b' def revrange(repo, specs):' | |||||
947 | allspecs = [] |
|
948 | allspecs = [] | |
948 | for spec in specs: |
|
949 | for spec in specs: | |
949 | if isinstance(spec, int): |
|
950 | if isinstance(spec, int): | |
950 | spec = revset.formatspec('rev(%d)', spec) |
|
951 | spec = revsetlang.formatspec('rev(%d)', spec) | |
951 | allspecs.append(spec) |
|
952 | allspecs.append(spec) | |
952 | m = revset.matchany(repo.ui, allspecs, repo) |
|
953 | m = revset.matchany(repo.ui, allspecs, repo) | |
953 | return m(repo) |
|
954 | return m(repo) |
@@ -20,6 +20,7 b' from . import (' | |||||
20 | pycompat, |
|
20 | pycompat, | |
21 | registrar, |
|
21 | registrar, | |
22 | revset as revsetmod, |
|
22 | revset as revsetmod, | |
|
23 | revsetlang, | |||
23 | templatefilters, |
|
24 | templatefilters, | |
24 | templatekw, |
|
25 | templatekw, | |
25 | util, |
|
26 | util, | |
@@ -778,7 +779,7 b' def revset(context, mapping, args):' | |||||
778 |
|
779 | |||
779 | if len(args) > 1: |
|
780 | if len(args) > 1: | |
780 | formatargs = [evalfuncarg(context, mapping, a) for a in args[1:]] |
|
781 | formatargs = [evalfuncarg(context, mapping, a) for a in args[1:]] | |
781 |
revs = query(revset |
|
782 | revs = query(revsetlang.formatspec(raw, *formatargs)) | |
782 | revs = list(revs) |
|
783 | revs = list(revs) | |
783 | else: |
|
784 | else: | |
784 | revsetcache = mapping['cache'].setdefault("revsetcache", {}) |
|
785 | revsetcache = mapping['cache'].setdefault("revsetcache", {}) |
@@ -28,7 +28,7 b" testmod('mercurial.minirst')" | |||||
28 | testmod('mercurial.patch') |
|
28 | testmod('mercurial.patch') | |
29 | testmod('mercurial.pathutil') |
|
29 | testmod('mercurial.pathutil') | |
30 | testmod('mercurial.parser') |
|
30 | testmod('mercurial.parser') | |
31 | testmod('mercurial.revset') |
|
31 | testmod('mercurial.revsetlang') | |
32 | testmod('mercurial.smartset') |
|
32 | testmod('mercurial.smartset') | |
33 | testmod('mercurial.store') |
|
33 | testmod('mercurial.store') | |
34 | testmod('mercurial.subrepo') |
|
34 | testmod('mercurial.subrepo') |
@@ -82,18 +82,18 b' o (0) root' | |||||
82 | > } |
|
82 | > } | |
83 |
|
83 | |||
84 | $ cat > printrevset.py <<EOF |
|
84 | $ cat > printrevset.py <<EOF | |
85 | > from mercurial import extensions, revset, commands, cmdutil |
|
85 | > from mercurial import extensions, revsetlang, commands, cmdutil | |
86 | > |
|
86 | > | |
87 | > def uisetup(ui): |
|
87 | > def uisetup(ui): | |
88 | > def printrevset(orig, ui, repo, *pats, **opts): |
|
88 | > def printrevset(orig, ui, repo, *pats, **opts): | |
89 | > if opts.get('print_revset'): |
|
89 | > if opts.get('print_revset'): | |
90 | > expr = cmdutil.getgraphlogrevs(repo, pats, opts)[1] |
|
90 | > expr = cmdutil.getgraphlogrevs(repo, pats, opts)[1] | |
91 | > if expr: |
|
91 | > if expr: | |
92 | > tree = revset.parse(expr) |
|
92 | > tree = revsetlang.parse(expr) | |
93 | > else: |
|
93 | > else: | |
94 | > tree = [] |
|
94 | > tree = [] | |
95 | > ui.write('%r\n' % (opts.get('rev', []),)) |
|
95 | > ui.write('%r\n' % (opts.get('rev', []),)) | |
96 | > ui.write(revset.prettyformat(tree) + '\n') |
|
96 | > ui.write(revsetlang.prettyformat(tree) + '\n') | |
97 | > return 0 |
|
97 | > return 0 | |
98 | > return orig(ui, repo, *pats, **opts) |
|
98 | > return orig(ui, repo, *pats, **opts) | |
99 | > entry = extensions.wrapcommand(commands.table, 'log', printrevset) |
|
99 | > entry = extensions.wrapcommand(commands.table, 'log', printrevset) |
@@ -40,6 +40,7 b" these predicates use '\\0' as a separator" | |||||
40 | > cmdutil, |
|
40 | > cmdutil, | |
41 | > node as nodemod, |
|
41 | > node as nodemod, | |
42 | > revset, |
|
42 | > revset, | |
|
43 | > revsetlang, | |||
43 | > smartset, |
|
44 | > smartset, | |
44 | > ) |
|
45 | > ) | |
45 | > cmdtable = {} |
|
46 | > cmdtable = {} | |
@@ -50,13 +51,14 b" these predicates use '\\0' as a separator" | |||||
50 | > def debugrevlistspec(ui, repo, fmt, *args, **opts): |
|
51 | > def debugrevlistspec(ui, repo, fmt, *args, **opts): | |
51 | > if opts['bin']: |
|
52 | > if opts['bin']: | |
52 | > args = map(nodemod.bin, args) |
|
53 | > args = map(nodemod.bin, args) | |
53 | > expr = revset.formatspec(fmt, list(args)) |
|
54 | > expr = revsetlang.formatspec(fmt, list(args)) | |
54 | > if ui.verbose: |
|
55 | > if ui.verbose: | |
55 | > tree = revset.parse(expr, lookup=repo.__contains__) |
|
56 | > tree = revsetlang.parse(expr, lookup=repo.__contains__) | |
56 | > ui.note(revset.prettyformat(tree), "\n") |
|
57 | > ui.note(revsetlang.prettyformat(tree), "\n") | |
57 | > if opts["optimize"]: |
|
58 | > if opts["optimize"]: | |
58 | > opttree = revset.optimize(revset.analyze(tree)) |
|
59 | > opttree = revsetlang.optimize(revsetlang.analyze(tree)) | |
59 |
> ui.note("* optimized:\n", revset.prettyformat(opttree), |
|
60 | > ui.note("* optimized:\n", revsetlang.prettyformat(opttree), | |
|
61 | > "\n") | |||
60 | > func = revset.match(ui, expr, repo) |
|
62 | > func = revset.match(ui, expr, repo) | |
61 | > revs = func(repo) |
|
63 | > revs = func(repo) | |
62 | > if ui.verbose: |
|
64 | > if ui.verbose: |
General Comments 0
You need to be logged in to leave comments.
Login now