##// END OF EJS Templates
parser: accept iterator of tokens instead of tokenizer function and program...
Yuya Nishihara -
r25654:af329a84 default
parent child Browse files
Show More
@@ -1,528 +1,528 b''
1 # fileset.py - file set queries for mercurial
1 # fileset.py - file set queries for mercurial
2 #
2 #
3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 import re
8 import re
9 import parser, error, util, merge
9 import parser, error, util, merge
10 from i18n import _
10 from i18n import _
11
11
12 elements = {
12 elements = {
13 "(": (20, ("group", 1, ")"), ("func", 1, ")")),
13 "(": (20, ("group", 1, ")"), ("func", 1, ")")),
14 "-": (5, ("negate", 19), ("minus", 5)),
14 "-": (5, ("negate", 19), ("minus", 5)),
15 "not": (10, ("not", 10)),
15 "not": (10, ("not", 10)),
16 "!": (10, ("not", 10)),
16 "!": (10, ("not", 10)),
17 "and": (5, None, ("and", 5)),
17 "and": (5, None, ("and", 5)),
18 "&": (5, None, ("and", 5)),
18 "&": (5, None, ("and", 5)),
19 "or": (4, None, ("or", 4)),
19 "or": (4, None, ("or", 4)),
20 "|": (4, None, ("or", 4)),
20 "|": (4, None, ("or", 4)),
21 "+": (4, None, ("or", 4)),
21 "+": (4, None, ("or", 4)),
22 ",": (2, None, ("list", 2)),
22 ",": (2, None, ("list", 2)),
23 ")": (0, None, None),
23 ")": (0, None, None),
24 "symbol": (0, ("symbol",), None),
24 "symbol": (0, ("symbol",), None),
25 "string": (0, ("string",), None),
25 "string": (0, ("string",), None),
26 "end": (0, None, None),
26 "end": (0, None, None),
27 }
27 }
28
28
29 keywords = set(['and', 'or', 'not'])
29 keywords = set(['and', 'or', 'not'])
30
30
31 globchars = ".*{}[]?/\\_"
31 globchars = ".*{}[]?/\\_"
32
32
33 def tokenize(program):
33 def tokenize(program):
34 pos, l = 0, len(program)
34 pos, l = 0, len(program)
35 while pos < l:
35 while pos < l:
36 c = program[pos]
36 c = program[pos]
37 if c.isspace(): # skip inter-token whitespace
37 if c.isspace(): # skip inter-token whitespace
38 pass
38 pass
39 elif c in "(),-|&+!": # handle simple operators
39 elif c in "(),-|&+!": # handle simple operators
40 yield (c, None, pos)
40 yield (c, None, pos)
41 elif (c in '"\'' or c == 'r' and
41 elif (c in '"\'' or c == 'r' and
42 program[pos:pos + 2] in ("r'", 'r"')): # handle quoted strings
42 program[pos:pos + 2] in ("r'", 'r"')): # handle quoted strings
43 if c == 'r':
43 if c == 'r':
44 pos += 1
44 pos += 1
45 c = program[pos]
45 c = program[pos]
46 decode = lambda x: x
46 decode = lambda x: x
47 else:
47 else:
48 decode = lambda x: x.decode('string-escape')
48 decode = lambda x: x.decode('string-escape')
49 pos += 1
49 pos += 1
50 s = pos
50 s = pos
51 while pos < l: # find closing quote
51 while pos < l: # find closing quote
52 d = program[pos]
52 d = program[pos]
53 if d == '\\': # skip over escaped characters
53 if d == '\\': # skip over escaped characters
54 pos += 2
54 pos += 2
55 continue
55 continue
56 if d == c:
56 if d == c:
57 yield ('string', decode(program[s:pos]), s)
57 yield ('string', decode(program[s:pos]), s)
58 break
58 break
59 pos += 1
59 pos += 1
60 else:
60 else:
61 raise error.ParseError(_("unterminated string"), s)
61 raise error.ParseError(_("unterminated string"), s)
62 elif c.isalnum() or c in globchars or ord(c) > 127:
62 elif c.isalnum() or c in globchars or ord(c) > 127:
63 # gather up a symbol/keyword
63 # gather up a symbol/keyword
64 s = pos
64 s = pos
65 pos += 1
65 pos += 1
66 while pos < l: # find end of symbol
66 while pos < l: # find end of symbol
67 d = program[pos]
67 d = program[pos]
68 if not (d.isalnum() or d in globchars or ord(d) > 127):
68 if not (d.isalnum() or d in globchars or ord(d) > 127):
69 break
69 break
70 pos += 1
70 pos += 1
71 sym = program[s:pos]
71 sym = program[s:pos]
72 if sym in keywords: # operator keywords
72 if sym in keywords: # operator keywords
73 yield (sym, None, s)
73 yield (sym, None, s)
74 else:
74 else:
75 yield ('symbol', sym, s)
75 yield ('symbol', sym, s)
76 pos -= 1
76 pos -= 1
77 else:
77 else:
78 raise error.ParseError(_("syntax error"), pos)
78 raise error.ParseError(_("syntax error"), pos)
79 pos += 1
79 pos += 1
80 yield ('end', None, pos)
80 yield ('end', None, pos)
81
81
82 def parse(expr):
82 def parse(expr):
83 p = parser.parser(tokenize, elements)
83 p = parser.parser(elements)
84 tree, pos = p.parse(expr)
84 tree, pos = p.parse(tokenize(expr))
85 if pos != len(expr):
85 if pos != len(expr):
86 raise error.ParseError(_("invalid token"), pos)
86 raise error.ParseError(_("invalid token"), pos)
87 return tree
87 return tree
88
88
89 def getstring(x, err):
89 def getstring(x, err):
90 if x and (x[0] == 'string' or x[0] == 'symbol'):
90 if x and (x[0] == 'string' or x[0] == 'symbol'):
91 return x[1]
91 return x[1]
92 raise error.ParseError(err)
92 raise error.ParseError(err)
93
93
94 def getset(mctx, x):
94 def getset(mctx, x):
95 if not x:
95 if not x:
96 raise error.ParseError(_("missing argument"))
96 raise error.ParseError(_("missing argument"))
97 return methods[x[0]](mctx, *x[1:])
97 return methods[x[0]](mctx, *x[1:])
98
98
99 def stringset(mctx, x):
99 def stringset(mctx, x):
100 m = mctx.matcher([x])
100 m = mctx.matcher([x])
101 return [f for f in mctx.subset if m(f)]
101 return [f for f in mctx.subset if m(f)]
102
102
103 def andset(mctx, x, y):
103 def andset(mctx, x, y):
104 return getset(mctx.narrow(getset(mctx, x)), y)
104 return getset(mctx.narrow(getset(mctx, x)), y)
105
105
106 def orset(mctx, x, y):
106 def orset(mctx, x, y):
107 # needs optimizing
107 # needs optimizing
108 xl = getset(mctx, x)
108 xl = getset(mctx, x)
109 yl = getset(mctx, y)
109 yl = getset(mctx, y)
110 return xl + [f for f in yl if f not in xl]
110 return xl + [f for f in yl if f not in xl]
111
111
112 def notset(mctx, x):
112 def notset(mctx, x):
113 s = set(getset(mctx, x))
113 s = set(getset(mctx, x))
114 return [r for r in mctx.subset if r not in s]
114 return [r for r in mctx.subset if r not in s]
115
115
116 def minusset(mctx, x, y):
116 def minusset(mctx, x, y):
117 xl = getset(mctx, x)
117 xl = getset(mctx, x)
118 yl = set(getset(mctx, y))
118 yl = set(getset(mctx, y))
119 return [f for f in xl if f not in yl]
119 return [f for f in xl if f not in yl]
120
120
121 def listset(mctx, a, b):
121 def listset(mctx, a, b):
122 raise error.ParseError(_("can't use a list in this context"))
122 raise error.ParseError(_("can't use a list in this context"))
123
123
124 def modified(mctx, x):
124 def modified(mctx, x):
125 """``modified()``
125 """``modified()``
126 File that is modified according to status.
126 File that is modified according to status.
127 """
127 """
128 # i18n: "modified" is a keyword
128 # i18n: "modified" is a keyword
129 getargs(x, 0, 0, _("modified takes no arguments"))
129 getargs(x, 0, 0, _("modified takes no arguments"))
130 s = mctx.status().modified
130 s = mctx.status().modified
131 return [f for f in mctx.subset if f in s]
131 return [f for f in mctx.subset if f in s]
132
132
133 def added(mctx, x):
133 def added(mctx, x):
134 """``added()``
134 """``added()``
135 File that is added according to status.
135 File that is added according to status.
136 """
136 """
137 # i18n: "added" is a keyword
137 # i18n: "added" is a keyword
138 getargs(x, 0, 0, _("added takes no arguments"))
138 getargs(x, 0, 0, _("added takes no arguments"))
139 s = mctx.status().added
139 s = mctx.status().added
140 return [f for f in mctx.subset if f in s]
140 return [f for f in mctx.subset if f in s]
141
141
142 def removed(mctx, x):
142 def removed(mctx, x):
143 """``removed()``
143 """``removed()``
144 File that is removed according to status.
144 File that is removed according to status.
145 """
145 """
146 # i18n: "removed" is a keyword
146 # i18n: "removed" is a keyword
147 getargs(x, 0, 0, _("removed takes no arguments"))
147 getargs(x, 0, 0, _("removed takes no arguments"))
148 s = mctx.status().removed
148 s = mctx.status().removed
149 return [f for f in mctx.subset if f in s]
149 return [f for f in mctx.subset if f in s]
150
150
151 def deleted(mctx, x):
151 def deleted(mctx, x):
152 """``deleted()``
152 """``deleted()``
153 File that is deleted according to status.
153 File that is deleted according to status.
154 """
154 """
155 # i18n: "deleted" is a keyword
155 # i18n: "deleted" is a keyword
156 getargs(x, 0, 0, _("deleted takes no arguments"))
156 getargs(x, 0, 0, _("deleted takes no arguments"))
157 s = mctx.status().deleted
157 s = mctx.status().deleted
158 return [f for f in mctx.subset if f in s]
158 return [f for f in mctx.subset if f in s]
159
159
160 def unknown(mctx, x):
160 def unknown(mctx, x):
161 """``unknown()``
161 """``unknown()``
162 File that is unknown according to status. These files will only be
162 File that is unknown according to status. These files will only be
163 considered if this predicate is used.
163 considered if this predicate is used.
164 """
164 """
165 # i18n: "unknown" is a keyword
165 # i18n: "unknown" is a keyword
166 getargs(x, 0, 0, _("unknown takes no arguments"))
166 getargs(x, 0, 0, _("unknown takes no arguments"))
167 s = mctx.status().unknown
167 s = mctx.status().unknown
168 return [f for f in mctx.subset if f in s]
168 return [f for f in mctx.subset if f in s]
169
169
170 def ignored(mctx, x):
170 def ignored(mctx, x):
171 """``ignored()``
171 """``ignored()``
172 File that is ignored according to status. These files will only be
172 File that is ignored according to status. These files will only be
173 considered if this predicate is used.
173 considered if this predicate is used.
174 """
174 """
175 # i18n: "ignored" is a keyword
175 # i18n: "ignored" is a keyword
176 getargs(x, 0, 0, _("ignored takes no arguments"))
176 getargs(x, 0, 0, _("ignored takes no arguments"))
177 s = mctx.status().ignored
177 s = mctx.status().ignored
178 return [f for f in mctx.subset if f in s]
178 return [f for f in mctx.subset if f in s]
179
179
180 def clean(mctx, x):
180 def clean(mctx, x):
181 """``clean()``
181 """``clean()``
182 File that is clean according to status.
182 File that is clean according to status.
183 """
183 """
184 # i18n: "clean" is a keyword
184 # i18n: "clean" is a keyword
185 getargs(x, 0, 0, _("clean takes no arguments"))
185 getargs(x, 0, 0, _("clean takes no arguments"))
186 s = mctx.status().clean
186 s = mctx.status().clean
187 return [f for f in mctx.subset if f in s]
187 return [f for f in mctx.subset if f in s]
188
188
189 def func(mctx, a, b):
189 def func(mctx, a, b):
190 if a[0] == 'symbol' and a[1] in symbols:
190 if a[0] == 'symbol' and a[1] in symbols:
191 return symbols[a[1]](mctx, b)
191 return symbols[a[1]](mctx, b)
192
192
193 keep = lambda fn: getattr(fn, '__doc__', None) is not None
193 keep = lambda fn: getattr(fn, '__doc__', None) is not None
194
194
195 syms = [s for (s, fn) in symbols.items() if keep(fn)]
195 syms = [s for (s, fn) in symbols.items() if keep(fn)]
196 raise error.UnknownIdentifier(a[1], syms)
196 raise error.UnknownIdentifier(a[1], syms)
197
197
198 def getlist(x):
198 def getlist(x):
199 if not x:
199 if not x:
200 return []
200 return []
201 if x[0] == 'list':
201 if x[0] == 'list':
202 return getlist(x[1]) + [x[2]]
202 return getlist(x[1]) + [x[2]]
203 return [x]
203 return [x]
204
204
205 def getargs(x, min, max, err):
205 def getargs(x, min, max, err):
206 l = getlist(x)
206 l = getlist(x)
207 if len(l) < min or len(l) > max:
207 if len(l) < min or len(l) > max:
208 raise error.ParseError(err)
208 raise error.ParseError(err)
209 return l
209 return l
210
210
211 def binary(mctx, x):
211 def binary(mctx, x):
212 """``binary()``
212 """``binary()``
213 File that appears to be binary (contains NUL bytes).
213 File that appears to be binary (contains NUL bytes).
214 """
214 """
215 # i18n: "binary" is a keyword
215 # i18n: "binary" is a keyword
216 getargs(x, 0, 0, _("binary takes no arguments"))
216 getargs(x, 0, 0, _("binary takes no arguments"))
217 return [f for f in mctx.existing() if util.binary(mctx.ctx[f].data())]
217 return [f for f in mctx.existing() if util.binary(mctx.ctx[f].data())]
218
218
219 def exec_(mctx, x):
219 def exec_(mctx, x):
220 """``exec()``
220 """``exec()``
221 File that is marked as executable.
221 File that is marked as executable.
222 """
222 """
223 # i18n: "exec" is a keyword
223 # i18n: "exec" is a keyword
224 getargs(x, 0, 0, _("exec takes no arguments"))
224 getargs(x, 0, 0, _("exec takes no arguments"))
225 return [f for f in mctx.existing() if mctx.ctx.flags(f) == 'x']
225 return [f for f in mctx.existing() if mctx.ctx.flags(f) == 'x']
226
226
227 def symlink(mctx, x):
227 def symlink(mctx, x):
228 """``symlink()``
228 """``symlink()``
229 File that is marked as a symlink.
229 File that is marked as a symlink.
230 """
230 """
231 # i18n: "symlink" is a keyword
231 # i18n: "symlink" is a keyword
232 getargs(x, 0, 0, _("symlink takes no arguments"))
232 getargs(x, 0, 0, _("symlink takes no arguments"))
233 return [f for f in mctx.existing() if mctx.ctx.flags(f) == 'l']
233 return [f for f in mctx.existing() if mctx.ctx.flags(f) == 'l']
234
234
235 def resolved(mctx, x):
235 def resolved(mctx, x):
236 """``resolved()``
236 """``resolved()``
237 File that is marked resolved according to the resolve state.
237 File that is marked resolved according to the resolve state.
238 """
238 """
239 # i18n: "resolved" is a keyword
239 # i18n: "resolved" is a keyword
240 getargs(x, 0, 0, _("resolved takes no arguments"))
240 getargs(x, 0, 0, _("resolved takes no arguments"))
241 if mctx.ctx.rev() is not None:
241 if mctx.ctx.rev() is not None:
242 return []
242 return []
243 ms = merge.mergestate(mctx.ctx.repo())
243 ms = merge.mergestate(mctx.ctx.repo())
244 return [f for f in mctx.subset if f in ms and ms[f] == 'r']
244 return [f for f in mctx.subset if f in ms and ms[f] == 'r']
245
245
246 def unresolved(mctx, x):
246 def unresolved(mctx, x):
247 """``unresolved()``
247 """``unresolved()``
248 File that is marked unresolved according to the resolve state.
248 File that is marked unresolved according to the resolve state.
249 """
249 """
250 # i18n: "unresolved" is a keyword
250 # i18n: "unresolved" is a keyword
251 getargs(x, 0, 0, _("unresolved takes no arguments"))
251 getargs(x, 0, 0, _("unresolved takes no arguments"))
252 if mctx.ctx.rev() is not None:
252 if mctx.ctx.rev() is not None:
253 return []
253 return []
254 ms = merge.mergestate(mctx.ctx.repo())
254 ms = merge.mergestate(mctx.ctx.repo())
255 return [f for f in mctx.subset if f in ms and ms[f] == 'u']
255 return [f for f in mctx.subset if f in ms and ms[f] == 'u']
256
256
257 def hgignore(mctx, x):
257 def hgignore(mctx, x):
258 """``hgignore()``
258 """``hgignore()``
259 File that matches the active .hgignore pattern.
259 File that matches the active .hgignore pattern.
260 """
260 """
261 # i18n: "hgignore" is a keyword
261 # i18n: "hgignore" is a keyword
262 getargs(x, 0, 0, _("hgignore takes no arguments"))
262 getargs(x, 0, 0, _("hgignore takes no arguments"))
263 ignore = mctx.ctx.repo().dirstate._ignore
263 ignore = mctx.ctx.repo().dirstate._ignore
264 return [f for f in mctx.subset if ignore(f)]
264 return [f for f in mctx.subset if ignore(f)]
265
265
266 def portable(mctx, x):
266 def portable(mctx, x):
267 """``portable()``
267 """``portable()``
268 File that has a portable name. (This doesn't include filenames with case
268 File that has a portable name. (This doesn't include filenames with case
269 collisions.)
269 collisions.)
270 """
270 """
271 # i18n: "portable" is a keyword
271 # i18n: "portable" is a keyword
272 getargs(x, 0, 0, _("portable takes no arguments"))
272 getargs(x, 0, 0, _("portable takes no arguments"))
273 checkwinfilename = util.checkwinfilename
273 checkwinfilename = util.checkwinfilename
274 return [f for f in mctx.subset if checkwinfilename(f) is None]
274 return [f for f in mctx.subset if checkwinfilename(f) is None]
275
275
276 def grep(mctx, x):
276 def grep(mctx, x):
277 """``grep(regex)``
277 """``grep(regex)``
278 File contains the given regular expression.
278 File contains the given regular expression.
279 """
279 """
280 try:
280 try:
281 # i18n: "grep" is a keyword
281 # i18n: "grep" is a keyword
282 r = re.compile(getstring(x, _("grep requires a pattern")))
282 r = re.compile(getstring(x, _("grep requires a pattern")))
283 except re.error, e:
283 except re.error, e:
284 raise error.ParseError(_('invalid match pattern: %s') % e)
284 raise error.ParseError(_('invalid match pattern: %s') % e)
285 return [f for f in mctx.existing() if r.search(mctx.ctx[f].data())]
285 return [f for f in mctx.existing() if r.search(mctx.ctx[f].data())]
286
286
287 def _sizetomax(s):
287 def _sizetomax(s):
288 try:
288 try:
289 s = s.strip()
289 s = s.strip()
290 for k, v in util._sizeunits:
290 for k, v in util._sizeunits:
291 if s.endswith(k):
291 if s.endswith(k):
292 # max(4k) = 5k - 1, max(4.5k) = 4.6k - 1
292 # max(4k) = 5k - 1, max(4.5k) = 4.6k - 1
293 n = s[:-len(k)]
293 n = s[:-len(k)]
294 inc = 1.0
294 inc = 1.0
295 if "." in n:
295 if "." in n:
296 inc /= 10 ** len(n.split(".")[1])
296 inc /= 10 ** len(n.split(".")[1])
297 return int((float(n) + inc) * v) - 1
297 return int((float(n) + inc) * v) - 1
298 # no extension, this is a precise value
298 # no extension, this is a precise value
299 return int(s)
299 return int(s)
300 except ValueError:
300 except ValueError:
301 raise error.ParseError(_("couldn't parse size: %s") % s)
301 raise error.ParseError(_("couldn't parse size: %s") % s)
302
302
303 def size(mctx, x):
303 def size(mctx, x):
304 """``size(expression)``
304 """``size(expression)``
305 File size matches the given expression. Examples:
305 File size matches the given expression. Examples:
306
306
307 - 1k (files from 1024 to 2047 bytes)
307 - 1k (files from 1024 to 2047 bytes)
308 - < 20k (files less than 20480 bytes)
308 - < 20k (files less than 20480 bytes)
309 - >= .5MB (files at least 524288 bytes)
309 - >= .5MB (files at least 524288 bytes)
310 - 4k - 1MB (files from 4096 bytes to 1048576 bytes)
310 - 4k - 1MB (files from 4096 bytes to 1048576 bytes)
311 """
311 """
312
312
313 # i18n: "size" is a keyword
313 # i18n: "size" is a keyword
314 expr = getstring(x, _("size requires an expression")).strip()
314 expr = getstring(x, _("size requires an expression")).strip()
315 if '-' in expr: # do we have a range?
315 if '-' in expr: # do we have a range?
316 a, b = expr.split('-', 1)
316 a, b = expr.split('-', 1)
317 a = util.sizetoint(a)
317 a = util.sizetoint(a)
318 b = util.sizetoint(b)
318 b = util.sizetoint(b)
319 m = lambda x: x >= a and x <= b
319 m = lambda x: x >= a and x <= b
320 elif expr.startswith("<="):
320 elif expr.startswith("<="):
321 a = util.sizetoint(expr[2:])
321 a = util.sizetoint(expr[2:])
322 m = lambda x: x <= a
322 m = lambda x: x <= a
323 elif expr.startswith("<"):
323 elif expr.startswith("<"):
324 a = util.sizetoint(expr[1:])
324 a = util.sizetoint(expr[1:])
325 m = lambda x: x < a
325 m = lambda x: x < a
326 elif expr.startswith(">="):
326 elif expr.startswith(">="):
327 a = util.sizetoint(expr[2:])
327 a = util.sizetoint(expr[2:])
328 m = lambda x: x >= a
328 m = lambda x: x >= a
329 elif expr.startswith(">"):
329 elif expr.startswith(">"):
330 a = util.sizetoint(expr[1:])
330 a = util.sizetoint(expr[1:])
331 m = lambda x: x > a
331 m = lambda x: x > a
332 elif expr[0].isdigit or expr[0] == '.':
332 elif expr[0].isdigit or expr[0] == '.':
333 a = util.sizetoint(expr)
333 a = util.sizetoint(expr)
334 b = _sizetomax(expr)
334 b = _sizetomax(expr)
335 m = lambda x: x >= a and x <= b
335 m = lambda x: x >= a and x <= b
336 else:
336 else:
337 raise error.ParseError(_("couldn't parse size: %s") % expr)
337 raise error.ParseError(_("couldn't parse size: %s") % expr)
338
338
339 return [f for f in mctx.existing() if m(mctx.ctx[f].size())]
339 return [f for f in mctx.existing() if m(mctx.ctx[f].size())]
340
340
341 def encoding(mctx, x):
341 def encoding(mctx, x):
342 """``encoding(name)``
342 """``encoding(name)``
343 File can be successfully decoded with the given character
343 File can be successfully decoded with the given character
344 encoding. May not be useful for encodings other than ASCII and
344 encoding. May not be useful for encodings other than ASCII and
345 UTF-8.
345 UTF-8.
346 """
346 """
347
347
348 # i18n: "encoding" is a keyword
348 # i18n: "encoding" is a keyword
349 enc = getstring(x, _("encoding requires an encoding name"))
349 enc = getstring(x, _("encoding requires an encoding name"))
350
350
351 s = []
351 s = []
352 for f in mctx.existing():
352 for f in mctx.existing():
353 d = mctx.ctx[f].data()
353 d = mctx.ctx[f].data()
354 try:
354 try:
355 d.decode(enc)
355 d.decode(enc)
356 except LookupError:
356 except LookupError:
357 raise util.Abort(_("unknown encoding '%s'") % enc)
357 raise util.Abort(_("unknown encoding '%s'") % enc)
358 except UnicodeDecodeError:
358 except UnicodeDecodeError:
359 continue
359 continue
360 s.append(f)
360 s.append(f)
361
361
362 return s
362 return s
363
363
364 def eol(mctx, x):
364 def eol(mctx, x):
365 """``eol(style)``
365 """``eol(style)``
366 File contains newlines of the given style (dos, unix, mac). Binary
366 File contains newlines of the given style (dos, unix, mac). Binary
367 files are excluded, files with mixed line endings match multiple
367 files are excluded, files with mixed line endings match multiple
368 styles.
368 styles.
369 """
369 """
370
370
371 # i18n: "encoding" is a keyword
371 # i18n: "encoding" is a keyword
372 enc = getstring(x, _("encoding requires an encoding name"))
372 enc = getstring(x, _("encoding requires an encoding name"))
373
373
374 s = []
374 s = []
375 for f in mctx.existing():
375 for f in mctx.existing():
376 d = mctx.ctx[f].data()
376 d = mctx.ctx[f].data()
377 if util.binary(d):
377 if util.binary(d):
378 continue
378 continue
379 if (enc == 'dos' or enc == 'win') and '\r\n' in d:
379 if (enc == 'dos' or enc == 'win') and '\r\n' in d:
380 s.append(f)
380 s.append(f)
381 elif enc == 'unix' and re.search('(?<!\r)\n', d):
381 elif enc == 'unix' and re.search('(?<!\r)\n', d):
382 s.append(f)
382 s.append(f)
383 elif enc == 'mac' and re.search('\r(?!\n)', d):
383 elif enc == 'mac' and re.search('\r(?!\n)', d):
384 s.append(f)
384 s.append(f)
385 return s
385 return s
386
386
387 def copied(mctx, x):
387 def copied(mctx, x):
388 """``copied()``
388 """``copied()``
389 File that is recorded as being copied.
389 File that is recorded as being copied.
390 """
390 """
391 # i18n: "copied" is a keyword
391 # i18n: "copied" is a keyword
392 getargs(x, 0, 0, _("copied takes no arguments"))
392 getargs(x, 0, 0, _("copied takes no arguments"))
393 s = []
393 s = []
394 for f in mctx.subset:
394 for f in mctx.subset:
395 p = mctx.ctx[f].parents()
395 p = mctx.ctx[f].parents()
396 if p and p[0].path() != f:
396 if p and p[0].path() != f:
397 s.append(f)
397 s.append(f)
398 return s
398 return s
399
399
400 def subrepo(mctx, x):
400 def subrepo(mctx, x):
401 """``subrepo([pattern])``
401 """``subrepo([pattern])``
402 Subrepositories whose paths match the given pattern.
402 Subrepositories whose paths match the given pattern.
403 """
403 """
404 # i18n: "subrepo" is a keyword
404 # i18n: "subrepo" is a keyword
405 getargs(x, 0, 1, _("subrepo takes at most one argument"))
405 getargs(x, 0, 1, _("subrepo takes at most one argument"))
406 ctx = mctx.ctx
406 ctx = mctx.ctx
407 sstate = sorted(ctx.substate)
407 sstate = sorted(ctx.substate)
408 if x:
408 if x:
409 # i18n: "subrepo" is a keyword
409 # i18n: "subrepo" is a keyword
410 pat = getstring(x, _("subrepo requires a pattern or no arguments"))
410 pat = getstring(x, _("subrepo requires a pattern or no arguments"))
411
411
412 import match as matchmod # avoid circular import issues
412 import match as matchmod # avoid circular import issues
413 fast = not matchmod.patkind(pat)
413 fast = not matchmod.patkind(pat)
414 if fast:
414 if fast:
415 def m(s):
415 def m(s):
416 return (s == pat)
416 return (s == pat)
417 else:
417 else:
418 m = matchmod.match(ctx.repo().root, '', [pat], ctx=ctx)
418 m = matchmod.match(ctx.repo().root, '', [pat], ctx=ctx)
419 return [sub for sub in sstate if m(sub)]
419 return [sub for sub in sstate if m(sub)]
420 else:
420 else:
421 return [sub for sub in sstate]
421 return [sub for sub in sstate]
422
422
423 symbols = {
423 symbols = {
424 'added': added,
424 'added': added,
425 'binary': binary,
425 'binary': binary,
426 'clean': clean,
426 'clean': clean,
427 'copied': copied,
427 'copied': copied,
428 'deleted': deleted,
428 'deleted': deleted,
429 'encoding': encoding,
429 'encoding': encoding,
430 'eol': eol,
430 'eol': eol,
431 'exec': exec_,
431 'exec': exec_,
432 'grep': grep,
432 'grep': grep,
433 'ignored': ignored,
433 'ignored': ignored,
434 'hgignore': hgignore,
434 'hgignore': hgignore,
435 'modified': modified,
435 'modified': modified,
436 'portable': portable,
436 'portable': portable,
437 'removed': removed,
437 'removed': removed,
438 'resolved': resolved,
438 'resolved': resolved,
439 'size': size,
439 'size': size,
440 'symlink': symlink,
440 'symlink': symlink,
441 'unknown': unknown,
441 'unknown': unknown,
442 'unresolved': unresolved,
442 'unresolved': unresolved,
443 'subrepo': subrepo,
443 'subrepo': subrepo,
444 }
444 }
445
445
446 methods = {
446 methods = {
447 'string': stringset,
447 'string': stringset,
448 'symbol': stringset,
448 'symbol': stringset,
449 'and': andset,
449 'and': andset,
450 'or': orset,
450 'or': orset,
451 'minus': minusset,
451 'minus': minusset,
452 'list': listset,
452 'list': listset,
453 'group': getset,
453 'group': getset,
454 'not': notset,
454 'not': notset,
455 'func': func,
455 'func': func,
456 }
456 }
457
457
458 class matchctx(object):
458 class matchctx(object):
459 def __init__(self, ctx, subset=None, status=None):
459 def __init__(self, ctx, subset=None, status=None):
460 self.ctx = ctx
460 self.ctx = ctx
461 self.subset = subset
461 self.subset = subset
462 self._status = status
462 self._status = status
463 def status(self):
463 def status(self):
464 return self._status
464 return self._status
465 def matcher(self, patterns):
465 def matcher(self, patterns):
466 return self.ctx.match(patterns)
466 return self.ctx.match(patterns)
467 def filter(self, files):
467 def filter(self, files):
468 return [f for f in files if f in self.subset]
468 return [f for f in files if f in self.subset]
469 def existing(self):
469 def existing(self):
470 if self._status is not None:
470 if self._status is not None:
471 removed = set(self._status[3])
471 removed = set(self._status[3])
472 unknown = set(self._status[4] + self._status[5])
472 unknown = set(self._status[4] + self._status[5])
473 else:
473 else:
474 removed = set()
474 removed = set()
475 unknown = set()
475 unknown = set()
476 return (f for f in self.subset
476 return (f for f in self.subset
477 if (f in self.ctx and f not in removed) or f in unknown)
477 if (f in self.ctx and f not in removed) or f in unknown)
478 def narrow(self, files):
478 def narrow(self, files):
479 return matchctx(self.ctx, self.filter(files), self._status)
479 return matchctx(self.ctx, self.filter(files), self._status)
480
480
481 def _intree(funcs, tree):
481 def _intree(funcs, tree):
482 if isinstance(tree, tuple):
482 if isinstance(tree, tuple):
483 if tree[0] == 'func' and tree[1][0] == 'symbol':
483 if tree[0] == 'func' and tree[1][0] == 'symbol':
484 if tree[1][1] in funcs:
484 if tree[1][1] in funcs:
485 return True
485 return True
486 for s in tree[1:]:
486 for s in tree[1:]:
487 if _intree(funcs, s):
487 if _intree(funcs, s):
488 return True
488 return True
489 return False
489 return False
490
490
491 # filesets using matchctx.existing()
491 # filesets using matchctx.existing()
492 _existingcallers = [
492 _existingcallers = [
493 'binary',
493 'binary',
494 'exec',
494 'exec',
495 'grep',
495 'grep',
496 'size',
496 'size',
497 'symlink',
497 'symlink',
498 ]
498 ]
499
499
500 def getfileset(ctx, expr):
500 def getfileset(ctx, expr):
501 tree = parse(expr)
501 tree = parse(expr)
502
502
503 # do we need status info?
503 # do we need status info?
504 if (_intree(['modified', 'added', 'removed', 'deleted',
504 if (_intree(['modified', 'added', 'removed', 'deleted',
505 'unknown', 'ignored', 'clean'], tree) or
505 'unknown', 'ignored', 'clean'], tree) or
506 # Using matchctx.existing() on a workingctx requires us to check
506 # Using matchctx.existing() on a workingctx requires us to check
507 # for deleted files.
507 # for deleted files.
508 (ctx.rev() is None and _intree(_existingcallers, tree))):
508 (ctx.rev() is None and _intree(_existingcallers, tree))):
509 unknown = _intree(['unknown'], tree)
509 unknown = _intree(['unknown'], tree)
510 ignored = _intree(['ignored'], tree)
510 ignored = _intree(['ignored'], tree)
511
511
512 r = ctx.repo()
512 r = ctx.repo()
513 status = r.status(ctx.p1(), ctx,
513 status = r.status(ctx.p1(), ctx,
514 unknown=unknown, ignored=ignored, clean=True)
514 unknown=unknown, ignored=ignored, clean=True)
515 subset = []
515 subset = []
516 for c in status:
516 for c in status:
517 subset.extend(c)
517 subset.extend(c)
518 else:
518 else:
519 status = None
519 status = None
520 subset = list(ctx.walk(ctx.match([])))
520 subset = list(ctx.walk(ctx.match([])))
521
521
522 return getset(matchctx(ctx, subset, status), tree)
522 return getset(matchctx(ctx, subset, status), tree)
523
523
524 def prettyformat(tree):
524 def prettyformat(tree):
525 return parser.prettyformat(tree, ('string', 'symbol'))
525 return parser.prettyformat(tree, ('string', 'symbol'))
526
526
527 # tell hggettext to extract docstrings from these functions:
527 # tell hggettext to extract docstrings from these functions:
528 i18nfunctions = symbols.values()
528 i18nfunctions = symbols.values()
@@ -1,187 +1,183 b''
1 # parser.py - simple top-down operator precedence parser for mercurial
1 # parser.py - simple top-down operator precedence parser for mercurial
2 #
2 #
3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 # see http://effbot.org/zone/simple-top-down-parsing.htm and
8 # see http://effbot.org/zone/simple-top-down-parsing.htm and
9 # http://eli.thegreenplace.net/2010/01/02/top-down-operator-precedence-parsing/
9 # http://eli.thegreenplace.net/2010/01/02/top-down-operator-precedence-parsing/
10 # for background
10 # for background
11
11
12 # takes a tokenizer and elements
12 # takes a tokenizer and elements
13 # tokenizer is an iterator that returns type, value pairs
13 # tokenizer is an iterator that returns type, value pairs
14 # elements is a mapping of types to binding strength, prefix and infix actions
14 # elements is a mapping of types to binding strength, prefix and infix actions
15 # an action is a tree node name, a tree label, and an optional match
15 # an action is a tree node name, a tree label, and an optional match
16 # __call__(program) parses program into a labeled tree
16 # __call__(program) parses program into a labeled tree
17
17
18 import error
18 import error
19 from i18n import _
19 from i18n import _
20
20
21 class parser(object):
21 class parser(object):
22 def __init__(self, tokenizer, elements, methods=None):
22 def __init__(self, elements, methods=None):
23 self._tokenizer = tokenizer
24 self._elements = elements
23 self._elements = elements
25 self._methods = methods
24 self._methods = methods
26 self.current = None
25 self.current = None
27 def _advance(self):
26 def _advance(self):
28 'advance the tokenizer'
27 'advance the tokenizer'
29 t = self.current
28 t = self.current
30 self.current = next(self._iter, None)
29 self.current = next(self._iter, None)
31 return t
30 return t
32 def _match(self, m, pos):
31 def _match(self, m, pos):
33 'make sure the tokenizer matches an end condition'
32 'make sure the tokenizer matches an end condition'
34 if self.current[0] != m:
33 if self.current[0] != m:
35 raise error.ParseError(_("unexpected token: %s") % self.current[0],
34 raise error.ParseError(_("unexpected token: %s") % self.current[0],
36 self.current[2])
35 self.current[2])
37 self._advance()
36 self._advance()
38 def _parse(self, bind=0):
37 def _parse(self, bind=0):
39 token, value, pos = self._advance()
38 token, value, pos = self._advance()
40 # handle prefix rules on current token
39 # handle prefix rules on current token
41 prefix = self._elements[token][1]
40 prefix = self._elements[token][1]
42 if not prefix:
41 if not prefix:
43 raise error.ParseError(_("not a prefix: %s") % token, pos)
42 raise error.ParseError(_("not a prefix: %s") % token, pos)
44 if len(prefix) == 1:
43 if len(prefix) == 1:
45 expr = (prefix[0], value)
44 expr = (prefix[0], value)
46 else:
45 else:
47 if len(prefix) > 2 and prefix[2] == self.current[0]:
46 if len(prefix) > 2 and prefix[2] == self.current[0]:
48 self._match(prefix[2], pos)
47 self._match(prefix[2], pos)
49 expr = (prefix[0], None)
48 expr = (prefix[0], None)
50 else:
49 else:
51 expr = (prefix[0], self._parse(prefix[1]))
50 expr = (prefix[0], self._parse(prefix[1]))
52 if len(prefix) > 2:
51 if len(prefix) > 2:
53 self._match(prefix[2], pos)
52 self._match(prefix[2], pos)
54 # gather tokens until we meet a lower binding strength
53 # gather tokens until we meet a lower binding strength
55 while bind < self._elements[self.current[0]][0]:
54 while bind < self._elements[self.current[0]][0]:
56 token, value, pos = self._advance()
55 token, value, pos = self._advance()
57 e = self._elements[token]
56 e = self._elements[token]
58 # check for suffix - next token isn't a valid prefix
57 # check for suffix - next token isn't a valid prefix
59 if len(e) == 4 and not self._elements[self.current[0]][1]:
58 if len(e) == 4 and not self._elements[self.current[0]][1]:
60 suffix = e[3]
59 suffix = e[3]
61 expr = (suffix[0], expr)
60 expr = (suffix[0], expr)
62 else:
61 else:
63 # handle infix rules
62 # handle infix rules
64 if len(e) < 3 or not e[2]:
63 if len(e) < 3 or not e[2]:
65 raise error.ParseError(_("not an infix: %s") % token, pos)
64 raise error.ParseError(_("not an infix: %s") % token, pos)
66 infix = e[2]
65 infix = e[2]
67 if len(infix) == 3 and infix[2] == self.current[0]:
66 if len(infix) == 3 and infix[2] == self.current[0]:
68 self._match(infix[2], pos)
67 self._match(infix[2], pos)
69 expr = (infix[0], expr, (None))
68 expr = (infix[0], expr, (None))
70 else:
69 else:
71 expr = (infix[0], expr, self._parse(infix[1]))
70 expr = (infix[0], expr, self._parse(infix[1]))
72 if len(infix) == 3:
71 if len(infix) == 3:
73 self._match(infix[2], pos)
72 self._match(infix[2], pos)
74 return expr
73 return expr
75 def parse(self, message, lookup=None):
74 def parse(self, tokeniter):
76 'generate a parse tree from a message'
75 'generate a parse tree from tokens'
77 if lookup:
76 self._iter = tokeniter
78 self._iter = self._tokenizer(message, lookup)
79 else:
80 self._iter = self._tokenizer(message)
81 self._advance()
77 self._advance()
82 res = self._parse()
78 res = self._parse()
83 token, value, pos = self.current
79 token, value, pos = self.current
84 return res, pos
80 return res, pos
85 def eval(self, tree):
81 def eval(self, tree):
86 'recursively evaluate a parse tree using node methods'
82 'recursively evaluate a parse tree using node methods'
87 if not isinstance(tree, tuple):
83 if not isinstance(tree, tuple):
88 return tree
84 return tree
89 return self._methods[tree[0]](*[self.eval(t) for t in tree[1:]])
85 return self._methods[tree[0]](*[self.eval(t) for t in tree[1:]])
90 def __call__(self, message):
86 def __call__(self, tokeniter):
91 'parse a message into a parse tree and evaluate if methods given'
87 'parse tokens into a parse tree and evaluate if methods given'
92 t = self.parse(message)
88 t = self.parse(tokeniter)
93 if self._methods:
89 if self._methods:
94 return self.eval(t)
90 return self.eval(t)
95 return t
91 return t
96
92
97 def _prettyformat(tree, leafnodes, level, lines):
93 def _prettyformat(tree, leafnodes, level, lines):
98 if not isinstance(tree, tuple) or tree[0] in leafnodes:
94 if not isinstance(tree, tuple) or tree[0] in leafnodes:
99 lines.append((level, str(tree)))
95 lines.append((level, str(tree)))
100 else:
96 else:
101 lines.append((level, '(%s' % tree[0]))
97 lines.append((level, '(%s' % tree[0]))
102 for s in tree[1:]:
98 for s in tree[1:]:
103 _prettyformat(s, leafnodes, level + 1, lines)
99 _prettyformat(s, leafnodes, level + 1, lines)
104 lines[-1:] = [(lines[-1][0], lines[-1][1] + ')')]
100 lines[-1:] = [(lines[-1][0], lines[-1][1] + ')')]
105
101
106 def prettyformat(tree, leafnodes):
102 def prettyformat(tree, leafnodes):
107 lines = []
103 lines = []
108 _prettyformat(tree, leafnodes, 0, lines)
104 _prettyformat(tree, leafnodes, 0, lines)
109 output = '\n'.join((' ' * l + s) for l, s in lines)
105 output = '\n'.join((' ' * l + s) for l, s in lines)
110 return output
106 return output
111
107
112 def simplifyinfixops(tree, targetnodes):
108 def simplifyinfixops(tree, targetnodes):
113 """Flatten chained infix operations to reduce usage of Python stack
109 """Flatten chained infix operations to reduce usage of Python stack
114
110
115 >>> def f(tree):
111 >>> def f(tree):
116 ... print prettyformat(simplifyinfixops(tree, ('or',)), ('symbol',))
112 ... print prettyformat(simplifyinfixops(tree, ('or',)), ('symbol',))
117 >>> f(('or',
113 >>> f(('or',
118 ... ('or',
114 ... ('or',
119 ... ('symbol', '1'),
115 ... ('symbol', '1'),
120 ... ('symbol', '2')),
116 ... ('symbol', '2')),
121 ... ('symbol', '3')))
117 ... ('symbol', '3')))
122 (or
118 (or
123 ('symbol', '1')
119 ('symbol', '1')
124 ('symbol', '2')
120 ('symbol', '2')
125 ('symbol', '3'))
121 ('symbol', '3'))
126 >>> f(('func',
122 >>> f(('func',
127 ... ('symbol', 'p1'),
123 ... ('symbol', 'p1'),
128 ... ('or',
124 ... ('or',
129 ... ('or',
125 ... ('or',
130 ... ('func',
126 ... ('func',
131 ... ('symbol', 'sort'),
127 ... ('symbol', 'sort'),
132 ... ('list',
128 ... ('list',
133 ... ('or',
129 ... ('or',
134 ... ('or',
130 ... ('or',
135 ... ('symbol', '1'),
131 ... ('symbol', '1'),
136 ... ('symbol', '2')),
132 ... ('symbol', '2')),
137 ... ('symbol', '3')),
133 ... ('symbol', '3')),
138 ... ('negate',
134 ... ('negate',
139 ... ('symbol', 'rev')))),
135 ... ('symbol', 'rev')))),
140 ... ('and',
136 ... ('and',
141 ... ('symbol', '4'),
137 ... ('symbol', '4'),
142 ... ('group',
138 ... ('group',
143 ... ('or',
139 ... ('or',
144 ... ('or',
140 ... ('or',
145 ... ('symbol', '5'),
141 ... ('symbol', '5'),
146 ... ('symbol', '6')),
142 ... ('symbol', '6')),
147 ... ('symbol', '7'))))),
143 ... ('symbol', '7'))))),
148 ... ('symbol', '8'))))
144 ... ('symbol', '8'))))
149 (func
145 (func
150 ('symbol', 'p1')
146 ('symbol', 'p1')
151 (or
147 (or
152 (func
148 (func
153 ('symbol', 'sort')
149 ('symbol', 'sort')
154 (list
150 (list
155 (or
151 (or
156 ('symbol', '1')
152 ('symbol', '1')
157 ('symbol', '2')
153 ('symbol', '2')
158 ('symbol', '3'))
154 ('symbol', '3'))
159 (negate
155 (negate
160 ('symbol', 'rev'))))
156 ('symbol', 'rev'))))
161 (and
157 (and
162 ('symbol', '4')
158 ('symbol', '4')
163 (group
159 (group
164 (or
160 (or
165 ('symbol', '5')
161 ('symbol', '5')
166 ('symbol', '6')
162 ('symbol', '6')
167 ('symbol', '7'))))
163 ('symbol', '7'))))
168 ('symbol', '8')))
164 ('symbol', '8')))
169 """
165 """
170 if not isinstance(tree, tuple):
166 if not isinstance(tree, tuple):
171 return tree
167 return tree
172 op = tree[0]
168 op = tree[0]
173 if op not in targetnodes:
169 if op not in targetnodes:
174 return (op,) + tuple(simplifyinfixops(x, targetnodes) for x in tree[1:])
170 return (op,) + tuple(simplifyinfixops(x, targetnodes) for x in tree[1:])
175
171
176 # walk down left nodes taking each right node. no recursion to left nodes
172 # walk down left nodes taking each right node. no recursion to left nodes
177 # because infix operators are left-associative, i.e. left tree is deep.
173 # because infix operators are left-associative, i.e. left tree is deep.
178 # e.g. '1 + 2 + 3' -> (+ (+ 1 2) 3) -> (+ 1 2 3)
174 # e.g. '1 + 2 + 3' -> (+ (+ 1 2) 3) -> (+ 1 2 3)
179 simplified = []
175 simplified = []
180 x = tree
176 x = tree
181 while x[0] == op:
177 while x[0] == op:
182 l, r = x[1:]
178 l, r = x[1:]
183 simplified.append(simplifyinfixops(r, targetnodes))
179 simplified.append(simplifyinfixops(r, targetnodes))
184 x = l
180 x = l
185 simplified.append(simplifyinfixops(x, targetnodes))
181 simplified.append(simplifyinfixops(x, targetnodes))
186 simplified.append(op)
182 simplified.append(op)
187 return tuple(reversed(simplified))
183 return tuple(reversed(simplified))
@@ -1,3635 +1,3635 b''
1 # revset.py - revision set queries for mercurial
1 # revset.py - revision set queries for mercurial
2 #
2 #
3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 import re
8 import re
9 import parser, util, error, hbisect, phases
9 import parser, util, error, hbisect, phases
10 import node
10 import node
11 import heapq
11 import heapq
12 import match as matchmod
12 import match as matchmod
13 from i18n import _
13 from i18n import _
14 import encoding
14 import encoding
15 import obsolete as obsmod
15 import obsolete as obsmod
16 import pathutil
16 import pathutil
17 import repoview
17 import repoview
18
18
19 def _revancestors(repo, revs, followfirst):
19 def _revancestors(repo, revs, followfirst):
20 """Like revlog.ancestors(), but supports followfirst."""
20 """Like revlog.ancestors(), but supports followfirst."""
21 if followfirst:
21 if followfirst:
22 cut = 1
22 cut = 1
23 else:
23 else:
24 cut = None
24 cut = None
25 cl = repo.changelog
25 cl = repo.changelog
26
26
27 def iterate():
27 def iterate():
28 revs.sort(reverse=True)
28 revs.sort(reverse=True)
29 irevs = iter(revs)
29 irevs = iter(revs)
30 h = []
30 h = []
31
31
32 inputrev = next(irevs, None)
32 inputrev = next(irevs, None)
33 if inputrev is not None:
33 if inputrev is not None:
34 heapq.heappush(h, -inputrev)
34 heapq.heappush(h, -inputrev)
35
35
36 seen = set()
36 seen = set()
37 while h:
37 while h:
38 current = -heapq.heappop(h)
38 current = -heapq.heappop(h)
39 if current == inputrev:
39 if current == inputrev:
40 inputrev = next(irevs, None)
40 inputrev = next(irevs, None)
41 if inputrev is not None:
41 if inputrev is not None:
42 heapq.heappush(h, -inputrev)
42 heapq.heappush(h, -inputrev)
43 if current not in seen:
43 if current not in seen:
44 seen.add(current)
44 seen.add(current)
45 yield current
45 yield current
46 for parent in cl.parentrevs(current)[:cut]:
46 for parent in cl.parentrevs(current)[:cut]:
47 if parent != node.nullrev:
47 if parent != node.nullrev:
48 heapq.heappush(h, -parent)
48 heapq.heappush(h, -parent)
49
49
50 return generatorset(iterate(), iterasc=False)
50 return generatorset(iterate(), iterasc=False)
51
51
52 def _revdescendants(repo, revs, followfirst):
52 def _revdescendants(repo, revs, followfirst):
53 """Like revlog.descendants() but supports followfirst."""
53 """Like revlog.descendants() but supports followfirst."""
54 if followfirst:
54 if followfirst:
55 cut = 1
55 cut = 1
56 else:
56 else:
57 cut = None
57 cut = None
58
58
59 def iterate():
59 def iterate():
60 cl = repo.changelog
60 cl = repo.changelog
61 # XXX this should be 'parentset.min()' assuming 'parentset' is a
61 # XXX this should be 'parentset.min()' assuming 'parentset' is a
62 # smartset (and if it is not, it should.)
62 # smartset (and if it is not, it should.)
63 first = min(revs)
63 first = min(revs)
64 nullrev = node.nullrev
64 nullrev = node.nullrev
65 if first == nullrev:
65 if first == nullrev:
66 # Are there nodes with a null first parent and a non-null
66 # Are there nodes with a null first parent and a non-null
67 # second one? Maybe. Do we care? Probably not.
67 # second one? Maybe. Do we care? Probably not.
68 for i in cl:
68 for i in cl:
69 yield i
69 yield i
70 else:
70 else:
71 seen = set(revs)
71 seen = set(revs)
72 for i in cl.revs(first + 1):
72 for i in cl.revs(first + 1):
73 for x in cl.parentrevs(i)[:cut]:
73 for x in cl.parentrevs(i)[:cut]:
74 if x != nullrev and x in seen:
74 if x != nullrev and x in seen:
75 seen.add(i)
75 seen.add(i)
76 yield i
76 yield i
77 break
77 break
78
78
79 return generatorset(iterate(), iterasc=True)
79 return generatorset(iterate(), iterasc=True)
80
80
81 def _revsbetween(repo, roots, heads):
81 def _revsbetween(repo, roots, heads):
82 """Return all paths between roots and heads, inclusive of both endpoint
82 """Return all paths between roots and heads, inclusive of both endpoint
83 sets."""
83 sets."""
84 if not roots:
84 if not roots:
85 return baseset()
85 return baseset()
86 parentrevs = repo.changelog.parentrevs
86 parentrevs = repo.changelog.parentrevs
87 visit = list(heads)
87 visit = list(heads)
88 reachable = set()
88 reachable = set()
89 seen = {}
89 seen = {}
90 # XXX this should be 'parentset.min()' assuming 'parentset' is a smartset
90 # XXX this should be 'parentset.min()' assuming 'parentset' is a smartset
91 # (and if it is not, it should.)
91 # (and if it is not, it should.)
92 minroot = min(roots)
92 minroot = min(roots)
93 roots = set(roots)
93 roots = set(roots)
94 # prefetch all the things! (because python is slow)
94 # prefetch all the things! (because python is slow)
95 reached = reachable.add
95 reached = reachable.add
96 dovisit = visit.append
96 dovisit = visit.append
97 nextvisit = visit.pop
97 nextvisit = visit.pop
98 # open-code the post-order traversal due to the tiny size of
98 # open-code the post-order traversal due to the tiny size of
99 # sys.getrecursionlimit()
99 # sys.getrecursionlimit()
100 while visit:
100 while visit:
101 rev = nextvisit()
101 rev = nextvisit()
102 if rev in roots:
102 if rev in roots:
103 reached(rev)
103 reached(rev)
104 parents = parentrevs(rev)
104 parents = parentrevs(rev)
105 seen[rev] = parents
105 seen[rev] = parents
106 for parent in parents:
106 for parent in parents:
107 if parent >= minroot and parent not in seen:
107 if parent >= minroot and parent not in seen:
108 dovisit(parent)
108 dovisit(parent)
109 if not reachable:
109 if not reachable:
110 return baseset()
110 return baseset()
111 for rev in sorted(seen):
111 for rev in sorted(seen):
112 for parent in seen[rev]:
112 for parent in seen[rev]:
113 if parent in reachable:
113 if parent in reachable:
114 reached(rev)
114 reached(rev)
115 return baseset(sorted(reachable))
115 return baseset(sorted(reachable))
116
116
117 elements = {
117 elements = {
118 "(": (21, ("group", 1, ")"), ("func", 1, ")")),
118 "(": (21, ("group", 1, ")"), ("func", 1, ")")),
119 "##": (20, None, ("_concat", 20)),
119 "##": (20, None, ("_concat", 20)),
120 "~": (18, None, ("ancestor", 18)),
120 "~": (18, None, ("ancestor", 18)),
121 "^": (18, None, ("parent", 18), ("parentpost", 18)),
121 "^": (18, None, ("parent", 18), ("parentpost", 18)),
122 "-": (5, ("negate", 19), ("minus", 5)),
122 "-": (5, ("negate", 19), ("minus", 5)),
123 "::": (17, ("dagrangepre", 17), ("dagrange", 17),
123 "::": (17, ("dagrangepre", 17), ("dagrange", 17),
124 ("dagrangepost", 17)),
124 ("dagrangepost", 17)),
125 "..": (17, ("dagrangepre", 17), ("dagrange", 17),
125 "..": (17, ("dagrangepre", 17), ("dagrange", 17),
126 ("dagrangepost", 17)),
126 ("dagrangepost", 17)),
127 ":": (15, ("rangepre", 15), ("range", 15), ("rangepost", 15)),
127 ":": (15, ("rangepre", 15), ("range", 15), ("rangepost", 15)),
128 "not": (10, ("not", 10)),
128 "not": (10, ("not", 10)),
129 "!": (10, ("not", 10)),
129 "!": (10, ("not", 10)),
130 "and": (5, None, ("and", 5)),
130 "and": (5, None, ("and", 5)),
131 "&": (5, None, ("and", 5)),
131 "&": (5, None, ("and", 5)),
132 "%": (5, None, ("only", 5), ("onlypost", 5)),
132 "%": (5, None, ("only", 5), ("onlypost", 5)),
133 "or": (4, None, ("or", 4)),
133 "or": (4, None, ("or", 4)),
134 "|": (4, None, ("or", 4)),
134 "|": (4, None, ("or", 4)),
135 "+": (4, None, ("or", 4)),
135 "+": (4, None, ("or", 4)),
136 ",": (2, None, ("list", 2)),
136 ",": (2, None, ("list", 2)),
137 ")": (0, None, None),
137 ")": (0, None, None),
138 "symbol": (0, ("symbol",), None),
138 "symbol": (0, ("symbol",), None),
139 "string": (0, ("string",), None),
139 "string": (0, ("string",), None),
140 "end": (0, None, None),
140 "end": (0, None, None),
141 }
141 }
142
142
143 keywords = set(['and', 'or', 'not'])
143 keywords = set(['and', 'or', 'not'])
144
144
145 # default set of valid characters for the initial letter of symbols
145 # default set of valid characters for the initial letter of symbols
146 _syminitletters = set(c for c in [chr(i) for i in xrange(256)]
146 _syminitletters = set(c for c in [chr(i) for i in xrange(256)]
147 if c.isalnum() or c in '._@' or ord(c) > 127)
147 if c.isalnum() or c in '._@' or ord(c) > 127)
148
148
149 # default set of valid characters for non-initial letters of symbols
149 # default set of valid characters for non-initial letters of symbols
150 _symletters = set(c for c in [chr(i) for i in xrange(256)]
150 _symletters = set(c for c in [chr(i) for i in xrange(256)]
151 if c.isalnum() or c in '-._/@' or ord(c) > 127)
151 if c.isalnum() or c in '-._/@' or ord(c) > 127)
152
152
153 def tokenize(program, lookup=None, syminitletters=None, symletters=None):
153 def tokenize(program, lookup=None, syminitletters=None, symletters=None):
154 '''
154 '''
155 Parse a revset statement into a stream of tokens
155 Parse a revset statement into a stream of tokens
156
156
157 ``syminitletters`` is the set of valid characters for the initial
157 ``syminitletters`` is the set of valid characters for the initial
158 letter of symbols.
158 letter of symbols.
159
159
160 By default, character ``c`` is recognized as valid for initial
160 By default, character ``c`` is recognized as valid for initial
161 letter of symbols, if ``c.isalnum() or c in '._@' or ord(c) > 127``.
161 letter of symbols, if ``c.isalnum() or c in '._@' or ord(c) > 127``.
162
162
163 ``symletters`` is the set of valid characters for non-initial
163 ``symletters`` is the set of valid characters for non-initial
164 letters of symbols.
164 letters of symbols.
165
165
166 By default, character ``c`` is recognized as valid for non-initial
166 By default, character ``c`` is recognized as valid for non-initial
167 letters of symbols, if ``c.isalnum() or c in '-._/@' or ord(c) > 127``.
167 letters of symbols, if ``c.isalnum() or c in '-._/@' or ord(c) > 127``.
168
168
169 Check that @ is a valid unquoted token character (issue3686):
169 Check that @ is a valid unquoted token character (issue3686):
170 >>> list(tokenize("@::"))
170 >>> list(tokenize("@::"))
171 [('symbol', '@', 0), ('::', None, 1), ('end', None, 3)]
171 [('symbol', '@', 0), ('::', None, 1), ('end', None, 3)]
172
172
173 '''
173 '''
174 if syminitletters is None:
174 if syminitletters is None:
175 syminitletters = _syminitletters
175 syminitletters = _syminitletters
176 if symletters is None:
176 if symletters is None:
177 symletters = _symletters
177 symletters = _symletters
178
178
179 pos, l = 0, len(program)
179 pos, l = 0, len(program)
180 while pos < l:
180 while pos < l:
181 c = program[pos]
181 c = program[pos]
182 if c.isspace(): # skip inter-token whitespace
182 if c.isspace(): # skip inter-token whitespace
183 pass
183 pass
184 elif c == ':' and program[pos:pos + 2] == '::': # look ahead carefully
184 elif c == ':' and program[pos:pos + 2] == '::': # look ahead carefully
185 yield ('::', None, pos)
185 yield ('::', None, pos)
186 pos += 1 # skip ahead
186 pos += 1 # skip ahead
187 elif c == '.' and program[pos:pos + 2] == '..': # look ahead carefully
187 elif c == '.' and program[pos:pos + 2] == '..': # look ahead carefully
188 yield ('..', None, pos)
188 yield ('..', None, pos)
189 pos += 1 # skip ahead
189 pos += 1 # skip ahead
190 elif c == '#' and program[pos:pos + 2] == '##': # look ahead carefully
190 elif c == '#' and program[pos:pos + 2] == '##': # look ahead carefully
191 yield ('##', None, pos)
191 yield ('##', None, pos)
192 pos += 1 # skip ahead
192 pos += 1 # skip ahead
193 elif c in "():,-|&+!~^%": # handle simple operators
193 elif c in "():,-|&+!~^%": # handle simple operators
194 yield (c, None, pos)
194 yield (c, None, pos)
195 elif (c in '"\'' or c == 'r' and
195 elif (c in '"\'' or c == 'r' and
196 program[pos:pos + 2] in ("r'", 'r"')): # handle quoted strings
196 program[pos:pos + 2] in ("r'", 'r"')): # handle quoted strings
197 if c == 'r':
197 if c == 'r':
198 pos += 1
198 pos += 1
199 c = program[pos]
199 c = program[pos]
200 decode = lambda x: x
200 decode = lambda x: x
201 else:
201 else:
202 decode = lambda x: x.decode('string-escape')
202 decode = lambda x: x.decode('string-escape')
203 pos += 1
203 pos += 1
204 s = pos
204 s = pos
205 while pos < l: # find closing quote
205 while pos < l: # find closing quote
206 d = program[pos]
206 d = program[pos]
207 if d == '\\': # skip over escaped characters
207 if d == '\\': # skip over escaped characters
208 pos += 2
208 pos += 2
209 continue
209 continue
210 if d == c:
210 if d == c:
211 yield ('string', decode(program[s:pos]), s)
211 yield ('string', decode(program[s:pos]), s)
212 break
212 break
213 pos += 1
213 pos += 1
214 else:
214 else:
215 raise error.ParseError(_("unterminated string"), s)
215 raise error.ParseError(_("unterminated string"), s)
216 # gather up a symbol/keyword
216 # gather up a symbol/keyword
217 elif c in syminitletters:
217 elif c in syminitletters:
218 s = pos
218 s = pos
219 pos += 1
219 pos += 1
220 while pos < l: # find end of symbol
220 while pos < l: # find end of symbol
221 d = program[pos]
221 d = program[pos]
222 if d not in symletters:
222 if d not in symletters:
223 break
223 break
224 if d == '.' and program[pos - 1] == '.': # special case for ..
224 if d == '.' and program[pos - 1] == '.': # special case for ..
225 pos -= 1
225 pos -= 1
226 break
226 break
227 pos += 1
227 pos += 1
228 sym = program[s:pos]
228 sym = program[s:pos]
229 if sym in keywords: # operator keywords
229 if sym in keywords: # operator keywords
230 yield (sym, None, s)
230 yield (sym, None, s)
231 elif '-' in sym:
231 elif '-' in sym:
232 # some jerk gave us foo-bar-baz, try to check if it's a symbol
232 # some jerk gave us foo-bar-baz, try to check if it's a symbol
233 if lookup and lookup(sym):
233 if lookup and lookup(sym):
234 # looks like a real symbol
234 # looks like a real symbol
235 yield ('symbol', sym, s)
235 yield ('symbol', sym, s)
236 else:
236 else:
237 # looks like an expression
237 # looks like an expression
238 parts = sym.split('-')
238 parts = sym.split('-')
239 for p in parts[:-1]:
239 for p in parts[:-1]:
240 if p: # possible consecutive -
240 if p: # possible consecutive -
241 yield ('symbol', p, s)
241 yield ('symbol', p, s)
242 s += len(p)
242 s += len(p)
243 yield ('-', None, pos)
243 yield ('-', None, pos)
244 s += 1
244 s += 1
245 if parts[-1]: # possible trailing -
245 if parts[-1]: # possible trailing -
246 yield ('symbol', parts[-1], s)
246 yield ('symbol', parts[-1], s)
247 else:
247 else:
248 yield ('symbol', sym, s)
248 yield ('symbol', sym, s)
249 pos -= 1
249 pos -= 1
250 else:
250 else:
251 raise error.ParseError(_("syntax error in revset '%s'") %
251 raise error.ParseError(_("syntax error in revset '%s'") %
252 program, pos)
252 program, pos)
253 pos += 1
253 pos += 1
254 yield ('end', None, pos)
254 yield ('end', None, pos)
255
255
256 def parseerrordetail(inst):
256 def parseerrordetail(inst):
257 """Compose error message from specified ParseError object
257 """Compose error message from specified ParseError object
258 """
258 """
259 if len(inst.args) > 1:
259 if len(inst.args) > 1:
260 return _('at %s: %s') % (inst.args[1], inst.args[0])
260 return _('at %s: %s') % (inst.args[1], inst.args[0])
261 else:
261 else:
262 return inst.args[0]
262 return inst.args[0]
263
263
264 # helpers
264 # helpers
265
265
266 def getstring(x, err):
266 def getstring(x, err):
267 if x and (x[0] == 'string' or x[0] == 'symbol'):
267 if x and (x[0] == 'string' or x[0] == 'symbol'):
268 return x[1]
268 return x[1]
269 raise error.ParseError(err)
269 raise error.ParseError(err)
270
270
271 def getlist(x):
271 def getlist(x):
272 if not x:
272 if not x:
273 return []
273 return []
274 if x[0] == 'list':
274 if x[0] == 'list':
275 return getlist(x[1]) + [x[2]]
275 return getlist(x[1]) + [x[2]]
276 return [x]
276 return [x]
277
277
278 def getargs(x, min, max, err):
278 def getargs(x, min, max, err):
279 l = getlist(x)
279 l = getlist(x)
280 if len(l) < min or (max >= 0 and len(l) > max):
280 if len(l) < min or (max >= 0 and len(l) > max):
281 raise error.ParseError(err)
281 raise error.ParseError(err)
282 return l
282 return l
283
283
284 def isvalidsymbol(tree):
284 def isvalidsymbol(tree):
285 """Examine whether specified ``tree`` is valid ``symbol`` or not
285 """Examine whether specified ``tree`` is valid ``symbol`` or not
286 """
286 """
287 return tree[0] == 'symbol' and len(tree) > 1
287 return tree[0] == 'symbol' and len(tree) > 1
288
288
289 def getsymbol(tree):
289 def getsymbol(tree):
290 """Get symbol name from valid ``symbol`` in ``tree``
290 """Get symbol name from valid ``symbol`` in ``tree``
291
291
292 This assumes that ``tree`` is already examined by ``isvalidsymbol``.
292 This assumes that ``tree`` is already examined by ``isvalidsymbol``.
293 """
293 """
294 return tree[1]
294 return tree[1]
295
295
296 def isvalidfunc(tree):
296 def isvalidfunc(tree):
297 """Examine whether specified ``tree`` is valid ``func`` or not
297 """Examine whether specified ``tree`` is valid ``func`` or not
298 """
298 """
299 return tree[0] == 'func' and len(tree) > 1 and isvalidsymbol(tree[1])
299 return tree[0] == 'func' and len(tree) > 1 and isvalidsymbol(tree[1])
300
300
301 def getfuncname(tree):
301 def getfuncname(tree):
302 """Get function name from valid ``func`` in ``tree``
302 """Get function name from valid ``func`` in ``tree``
303
303
304 This assumes that ``tree`` is already examined by ``isvalidfunc``.
304 This assumes that ``tree`` is already examined by ``isvalidfunc``.
305 """
305 """
306 return getsymbol(tree[1])
306 return getsymbol(tree[1])
307
307
308 def getfuncargs(tree):
308 def getfuncargs(tree):
309 """Get list of function arguments from valid ``func`` in ``tree``
309 """Get list of function arguments from valid ``func`` in ``tree``
310
310
311 This assumes that ``tree`` is already examined by ``isvalidfunc``.
311 This assumes that ``tree`` is already examined by ``isvalidfunc``.
312 """
312 """
313 if len(tree) > 2:
313 if len(tree) > 2:
314 return getlist(tree[2])
314 return getlist(tree[2])
315 else:
315 else:
316 return []
316 return []
317
317
318 def getset(repo, subset, x):
318 def getset(repo, subset, x):
319 if not x:
319 if not x:
320 raise error.ParseError(_("missing argument"))
320 raise error.ParseError(_("missing argument"))
321 s = methods[x[0]](repo, subset, *x[1:])
321 s = methods[x[0]](repo, subset, *x[1:])
322 if util.safehasattr(s, 'isascending'):
322 if util.safehasattr(s, 'isascending'):
323 return s
323 return s
324 if (repo.ui.configbool('devel', 'all-warnings')
324 if (repo.ui.configbool('devel', 'all-warnings')
325 or repo.ui.configbool('devel', 'old-revset')):
325 or repo.ui.configbool('devel', 'old-revset')):
326 # else case should not happen, because all non-func are internal,
326 # else case should not happen, because all non-func are internal,
327 # ignoring for now.
327 # ignoring for now.
328 if x[0] == 'func' and x[1][0] == 'symbol' and x[1][1] in symbols:
328 if x[0] == 'func' and x[1][0] == 'symbol' and x[1][1] in symbols:
329 repo.ui.develwarn('revset "%s" use list instead of smartset, '
329 repo.ui.develwarn('revset "%s" use list instead of smartset, '
330 '(upgrade your code)' % x[1][1])
330 '(upgrade your code)' % x[1][1])
331 return baseset(s)
331 return baseset(s)
332
332
333 def _getrevsource(repo, r):
333 def _getrevsource(repo, r):
334 extra = repo[r].extra()
334 extra = repo[r].extra()
335 for label in ('source', 'transplant_source', 'rebase_source'):
335 for label in ('source', 'transplant_source', 'rebase_source'):
336 if label in extra:
336 if label in extra:
337 try:
337 try:
338 return repo[extra[label]].rev()
338 return repo[extra[label]].rev()
339 except error.RepoLookupError:
339 except error.RepoLookupError:
340 pass
340 pass
341 return None
341 return None
342
342
343 # operator methods
343 # operator methods
344
344
345 def stringset(repo, subset, x):
345 def stringset(repo, subset, x):
346 x = repo[x].rev()
346 x = repo[x].rev()
347 if (x in subset
347 if (x in subset
348 or x == node.nullrev and isinstance(subset, fullreposet)):
348 or x == node.nullrev and isinstance(subset, fullreposet)):
349 return baseset([x])
349 return baseset([x])
350 return baseset()
350 return baseset()
351
351
352 def rangeset(repo, subset, x, y):
352 def rangeset(repo, subset, x, y):
353 m = getset(repo, fullreposet(repo), x)
353 m = getset(repo, fullreposet(repo), x)
354 n = getset(repo, fullreposet(repo), y)
354 n = getset(repo, fullreposet(repo), y)
355
355
356 if not m or not n:
356 if not m or not n:
357 return baseset()
357 return baseset()
358 m, n = m.first(), n.last()
358 m, n = m.first(), n.last()
359
359
360 if m < n:
360 if m < n:
361 r = spanset(repo, m, n + 1)
361 r = spanset(repo, m, n + 1)
362 else:
362 else:
363 r = spanset(repo, m, n - 1)
363 r = spanset(repo, m, n - 1)
364 # XXX We should combine with subset first: 'subset & baseset(...)'. This is
364 # XXX We should combine with subset first: 'subset & baseset(...)'. This is
365 # necessary to ensure we preserve the order in subset.
365 # necessary to ensure we preserve the order in subset.
366 #
366 #
367 # This has performance implication, carrying the sorting over when possible
367 # This has performance implication, carrying the sorting over when possible
368 # would be more efficient.
368 # would be more efficient.
369 return r & subset
369 return r & subset
370
370
371 def dagrange(repo, subset, x, y):
371 def dagrange(repo, subset, x, y):
372 r = fullreposet(repo)
372 r = fullreposet(repo)
373 xs = _revsbetween(repo, getset(repo, r, x), getset(repo, r, y))
373 xs = _revsbetween(repo, getset(repo, r, x), getset(repo, r, y))
374 # XXX We should combine with subset first: 'subset & baseset(...)'. This is
374 # XXX We should combine with subset first: 'subset & baseset(...)'. This is
375 # necessary to ensure we preserve the order in subset.
375 # necessary to ensure we preserve the order in subset.
376 return xs & subset
376 return xs & subset
377
377
378 def andset(repo, subset, x, y):
378 def andset(repo, subset, x, y):
379 return getset(repo, getset(repo, subset, x), y)
379 return getset(repo, getset(repo, subset, x), y)
380
380
381 def orset(repo, subset, *xs):
381 def orset(repo, subset, *xs):
382 rs = [getset(repo, subset, x) for x in xs]
382 rs = [getset(repo, subset, x) for x in xs]
383 return _combinesets(rs)
383 return _combinesets(rs)
384
384
385 def notset(repo, subset, x):
385 def notset(repo, subset, x):
386 return subset - getset(repo, subset, x)
386 return subset - getset(repo, subset, x)
387
387
388 def listset(repo, subset, a, b):
388 def listset(repo, subset, a, b):
389 raise error.ParseError(_("can't use a list in this context"))
389 raise error.ParseError(_("can't use a list in this context"))
390
390
391 def func(repo, subset, a, b):
391 def func(repo, subset, a, b):
392 if a[0] == 'symbol' and a[1] in symbols:
392 if a[0] == 'symbol' and a[1] in symbols:
393 return symbols[a[1]](repo, subset, b)
393 return symbols[a[1]](repo, subset, b)
394
394
395 keep = lambda fn: getattr(fn, '__doc__', None) is not None
395 keep = lambda fn: getattr(fn, '__doc__', None) is not None
396
396
397 syms = [s for (s, fn) in symbols.items() if keep(fn)]
397 syms = [s for (s, fn) in symbols.items() if keep(fn)]
398 raise error.UnknownIdentifier(a[1], syms)
398 raise error.UnknownIdentifier(a[1], syms)
399
399
400 # functions
400 # functions
401
401
402 def adds(repo, subset, x):
402 def adds(repo, subset, x):
403 """``adds(pattern)``
403 """``adds(pattern)``
404 Changesets that add a file matching pattern.
404 Changesets that add a file matching pattern.
405
405
406 The pattern without explicit kind like ``glob:`` is expected to be
406 The pattern without explicit kind like ``glob:`` is expected to be
407 relative to the current directory and match against a file or a
407 relative to the current directory and match against a file or a
408 directory.
408 directory.
409 """
409 """
410 # i18n: "adds" is a keyword
410 # i18n: "adds" is a keyword
411 pat = getstring(x, _("adds requires a pattern"))
411 pat = getstring(x, _("adds requires a pattern"))
412 return checkstatus(repo, subset, pat, 1)
412 return checkstatus(repo, subset, pat, 1)
413
413
414 def ancestor(repo, subset, x):
414 def ancestor(repo, subset, x):
415 """``ancestor(*changeset)``
415 """``ancestor(*changeset)``
416 A greatest common ancestor of the changesets.
416 A greatest common ancestor of the changesets.
417
417
418 Accepts 0 or more changesets.
418 Accepts 0 or more changesets.
419 Will return empty list when passed no args.
419 Will return empty list when passed no args.
420 Greatest common ancestor of a single changeset is that changeset.
420 Greatest common ancestor of a single changeset is that changeset.
421 """
421 """
422 # i18n: "ancestor" is a keyword
422 # i18n: "ancestor" is a keyword
423 l = getlist(x)
423 l = getlist(x)
424 rl = fullreposet(repo)
424 rl = fullreposet(repo)
425 anc = None
425 anc = None
426
426
427 # (getset(repo, rl, i) for i in l) generates a list of lists
427 # (getset(repo, rl, i) for i in l) generates a list of lists
428 for revs in (getset(repo, rl, i) for i in l):
428 for revs in (getset(repo, rl, i) for i in l):
429 for r in revs:
429 for r in revs:
430 if anc is None:
430 if anc is None:
431 anc = repo[r]
431 anc = repo[r]
432 else:
432 else:
433 anc = anc.ancestor(repo[r])
433 anc = anc.ancestor(repo[r])
434
434
435 if anc is not None and anc.rev() in subset:
435 if anc is not None and anc.rev() in subset:
436 return baseset([anc.rev()])
436 return baseset([anc.rev()])
437 return baseset()
437 return baseset()
438
438
439 def _ancestors(repo, subset, x, followfirst=False):
439 def _ancestors(repo, subset, x, followfirst=False):
440 heads = getset(repo, fullreposet(repo), x)
440 heads = getset(repo, fullreposet(repo), x)
441 if not heads:
441 if not heads:
442 return baseset()
442 return baseset()
443 s = _revancestors(repo, heads, followfirst)
443 s = _revancestors(repo, heads, followfirst)
444 return subset & s
444 return subset & s
445
445
446 def ancestors(repo, subset, x):
446 def ancestors(repo, subset, x):
447 """``ancestors(set)``
447 """``ancestors(set)``
448 Changesets that are ancestors of a changeset in set.
448 Changesets that are ancestors of a changeset in set.
449 """
449 """
450 return _ancestors(repo, subset, x)
450 return _ancestors(repo, subset, x)
451
451
452 def _firstancestors(repo, subset, x):
452 def _firstancestors(repo, subset, x):
453 # ``_firstancestors(set)``
453 # ``_firstancestors(set)``
454 # Like ``ancestors(set)`` but follows only the first parents.
454 # Like ``ancestors(set)`` but follows only the first parents.
455 return _ancestors(repo, subset, x, followfirst=True)
455 return _ancestors(repo, subset, x, followfirst=True)
456
456
457 def ancestorspec(repo, subset, x, n):
457 def ancestorspec(repo, subset, x, n):
458 """``set~n``
458 """``set~n``
459 Changesets that are the Nth ancestor (first parents only) of a changeset
459 Changesets that are the Nth ancestor (first parents only) of a changeset
460 in set.
460 in set.
461 """
461 """
462 try:
462 try:
463 n = int(n[1])
463 n = int(n[1])
464 except (TypeError, ValueError):
464 except (TypeError, ValueError):
465 raise error.ParseError(_("~ expects a number"))
465 raise error.ParseError(_("~ expects a number"))
466 ps = set()
466 ps = set()
467 cl = repo.changelog
467 cl = repo.changelog
468 for r in getset(repo, fullreposet(repo), x):
468 for r in getset(repo, fullreposet(repo), x):
469 for i in range(n):
469 for i in range(n):
470 r = cl.parentrevs(r)[0]
470 r = cl.parentrevs(r)[0]
471 ps.add(r)
471 ps.add(r)
472 return subset & ps
472 return subset & ps
473
473
474 def author(repo, subset, x):
474 def author(repo, subset, x):
475 """``author(string)``
475 """``author(string)``
476 Alias for ``user(string)``.
476 Alias for ``user(string)``.
477 """
477 """
478 # i18n: "author" is a keyword
478 # i18n: "author" is a keyword
479 n = encoding.lower(getstring(x, _("author requires a string")))
479 n = encoding.lower(getstring(x, _("author requires a string")))
480 kind, pattern, matcher = _substringmatcher(n)
480 kind, pattern, matcher = _substringmatcher(n)
481 return subset.filter(lambda x: matcher(encoding.lower(repo[x].user())))
481 return subset.filter(lambda x: matcher(encoding.lower(repo[x].user())))
482
482
483 def bisect(repo, subset, x):
483 def bisect(repo, subset, x):
484 """``bisect(string)``
484 """``bisect(string)``
485 Changesets marked in the specified bisect status:
485 Changesets marked in the specified bisect status:
486
486
487 - ``good``, ``bad``, ``skip``: csets explicitly marked as good/bad/skip
487 - ``good``, ``bad``, ``skip``: csets explicitly marked as good/bad/skip
488 - ``goods``, ``bads`` : csets topologically good/bad
488 - ``goods``, ``bads`` : csets topologically good/bad
489 - ``range`` : csets taking part in the bisection
489 - ``range`` : csets taking part in the bisection
490 - ``pruned`` : csets that are goods, bads or skipped
490 - ``pruned`` : csets that are goods, bads or skipped
491 - ``untested`` : csets whose fate is yet unknown
491 - ``untested`` : csets whose fate is yet unknown
492 - ``ignored`` : csets ignored due to DAG topology
492 - ``ignored`` : csets ignored due to DAG topology
493 - ``current`` : the cset currently being bisected
493 - ``current`` : the cset currently being bisected
494 """
494 """
495 # i18n: "bisect" is a keyword
495 # i18n: "bisect" is a keyword
496 status = getstring(x, _("bisect requires a string")).lower()
496 status = getstring(x, _("bisect requires a string")).lower()
497 state = set(hbisect.get(repo, status))
497 state = set(hbisect.get(repo, status))
498 return subset & state
498 return subset & state
499
499
500 # Backward-compatibility
500 # Backward-compatibility
501 # - no help entry so that we do not advertise it any more
501 # - no help entry so that we do not advertise it any more
502 def bisected(repo, subset, x):
502 def bisected(repo, subset, x):
503 return bisect(repo, subset, x)
503 return bisect(repo, subset, x)
504
504
505 def bookmark(repo, subset, x):
505 def bookmark(repo, subset, x):
506 """``bookmark([name])``
506 """``bookmark([name])``
507 The named bookmark or all bookmarks.
507 The named bookmark or all bookmarks.
508
508
509 If `name` starts with `re:`, the remainder of the name is treated as
509 If `name` starts with `re:`, the remainder of the name is treated as
510 a regular expression. To match a bookmark that actually starts with `re:`,
510 a regular expression. To match a bookmark that actually starts with `re:`,
511 use the prefix `literal:`.
511 use the prefix `literal:`.
512 """
512 """
513 # i18n: "bookmark" is a keyword
513 # i18n: "bookmark" is a keyword
514 args = getargs(x, 0, 1, _('bookmark takes one or no arguments'))
514 args = getargs(x, 0, 1, _('bookmark takes one or no arguments'))
515 if args:
515 if args:
516 bm = getstring(args[0],
516 bm = getstring(args[0],
517 # i18n: "bookmark" is a keyword
517 # i18n: "bookmark" is a keyword
518 _('the argument to bookmark must be a string'))
518 _('the argument to bookmark must be a string'))
519 kind, pattern, matcher = _stringmatcher(bm)
519 kind, pattern, matcher = _stringmatcher(bm)
520 bms = set()
520 bms = set()
521 if kind == 'literal':
521 if kind == 'literal':
522 bmrev = repo._bookmarks.get(pattern, None)
522 bmrev = repo._bookmarks.get(pattern, None)
523 if not bmrev:
523 if not bmrev:
524 raise error.RepoLookupError(_("bookmark '%s' does not exist")
524 raise error.RepoLookupError(_("bookmark '%s' does not exist")
525 % bm)
525 % bm)
526 bms.add(repo[bmrev].rev())
526 bms.add(repo[bmrev].rev())
527 else:
527 else:
528 matchrevs = set()
528 matchrevs = set()
529 for name, bmrev in repo._bookmarks.iteritems():
529 for name, bmrev in repo._bookmarks.iteritems():
530 if matcher(name):
530 if matcher(name):
531 matchrevs.add(bmrev)
531 matchrevs.add(bmrev)
532 if not matchrevs:
532 if not matchrevs:
533 raise error.RepoLookupError(_("no bookmarks exist"
533 raise error.RepoLookupError(_("no bookmarks exist"
534 " that match '%s'") % pattern)
534 " that match '%s'") % pattern)
535 for bmrev in matchrevs:
535 for bmrev in matchrevs:
536 bms.add(repo[bmrev].rev())
536 bms.add(repo[bmrev].rev())
537 else:
537 else:
538 bms = set([repo[r].rev()
538 bms = set([repo[r].rev()
539 for r in repo._bookmarks.values()])
539 for r in repo._bookmarks.values()])
540 bms -= set([node.nullrev])
540 bms -= set([node.nullrev])
541 return subset & bms
541 return subset & bms
542
542
543 def branch(repo, subset, x):
543 def branch(repo, subset, x):
544 """``branch(string or set)``
544 """``branch(string or set)``
545 All changesets belonging to the given branch or the branches of the given
545 All changesets belonging to the given branch or the branches of the given
546 changesets.
546 changesets.
547
547
548 If `string` starts with `re:`, the remainder of the name is treated as
548 If `string` starts with `re:`, the remainder of the name is treated as
549 a regular expression. To match a branch that actually starts with `re:`,
549 a regular expression. To match a branch that actually starts with `re:`,
550 use the prefix `literal:`.
550 use the prefix `literal:`.
551 """
551 """
552 getbi = repo.revbranchcache().branchinfo
552 getbi = repo.revbranchcache().branchinfo
553
553
554 try:
554 try:
555 b = getstring(x, '')
555 b = getstring(x, '')
556 except error.ParseError:
556 except error.ParseError:
557 # not a string, but another revspec, e.g. tip()
557 # not a string, but another revspec, e.g. tip()
558 pass
558 pass
559 else:
559 else:
560 kind, pattern, matcher = _stringmatcher(b)
560 kind, pattern, matcher = _stringmatcher(b)
561 if kind == 'literal':
561 if kind == 'literal':
562 # note: falls through to the revspec case if no branch with
562 # note: falls through to the revspec case if no branch with
563 # this name exists
563 # this name exists
564 if pattern in repo.branchmap():
564 if pattern in repo.branchmap():
565 return subset.filter(lambda r: matcher(getbi(r)[0]))
565 return subset.filter(lambda r: matcher(getbi(r)[0]))
566 else:
566 else:
567 return subset.filter(lambda r: matcher(getbi(r)[0]))
567 return subset.filter(lambda r: matcher(getbi(r)[0]))
568
568
569 s = getset(repo, fullreposet(repo), x)
569 s = getset(repo, fullreposet(repo), x)
570 b = set()
570 b = set()
571 for r in s:
571 for r in s:
572 b.add(getbi(r)[0])
572 b.add(getbi(r)[0])
573 c = s.__contains__
573 c = s.__contains__
574 return subset.filter(lambda r: c(r) or getbi(r)[0] in b)
574 return subset.filter(lambda r: c(r) or getbi(r)[0] in b)
575
575
576 def bumped(repo, subset, x):
576 def bumped(repo, subset, x):
577 """``bumped()``
577 """``bumped()``
578 Mutable changesets marked as successors of public changesets.
578 Mutable changesets marked as successors of public changesets.
579
579
580 Only non-public and non-obsolete changesets can be `bumped`.
580 Only non-public and non-obsolete changesets can be `bumped`.
581 """
581 """
582 # i18n: "bumped" is a keyword
582 # i18n: "bumped" is a keyword
583 getargs(x, 0, 0, _("bumped takes no arguments"))
583 getargs(x, 0, 0, _("bumped takes no arguments"))
584 bumped = obsmod.getrevs(repo, 'bumped')
584 bumped = obsmod.getrevs(repo, 'bumped')
585 return subset & bumped
585 return subset & bumped
586
586
587 def bundle(repo, subset, x):
587 def bundle(repo, subset, x):
588 """``bundle()``
588 """``bundle()``
589 Changesets in the bundle.
589 Changesets in the bundle.
590
590
591 Bundle must be specified by the -R option."""
591 Bundle must be specified by the -R option."""
592
592
593 try:
593 try:
594 bundlerevs = repo.changelog.bundlerevs
594 bundlerevs = repo.changelog.bundlerevs
595 except AttributeError:
595 except AttributeError:
596 raise util.Abort(_("no bundle provided - specify with -R"))
596 raise util.Abort(_("no bundle provided - specify with -R"))
597 return subset & bundlerevs
597 return subset & bundlerevs
598
598
599 def checkstatus(repo, subset, pat, field):
599 def checkstatus(repo, subset, pat, field):
600 hasset = matchmod.patkind(pat) == 'set'
600 hasset = matchmod.patkind(pat) == 'set'
601
601
602 mcache = [None]
602 mcache = [None]
603 def matches(x):
603 def matches(x):
604 c = repo[x]
604 c = repo[x]
605 if not mcache[0] or hasset:
605 if not mcache[0] or hasset:
606 mcache[0] = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
606 mcache[0] = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
607 m = mcache[0]
607 m = mcache[0]
608 fname = None
608 fname = None
609 if not m.anypats() and len(m.files()) == 1:
609 if not m.anypats() and len(m.files()) == 1:
610 fname = m.files()[0]
610 fname = m.files()[0]
611 if fname is not None:
611 if fname is not None:
612 if fname not in c.files():
612 if fname not in c.files():
613 return False
613 return False
614 else:
614 else:
615 for f in c.files():
615 for f in c.files():
616 if m(f):
616 if m(f):
617 break
617 break
618 else:
618 else:
619 return False
619 return False
620 files = repo.status(c.p1().node(), c.node())[field]
620 files = repo.status(c.p1().node(), c.node())[field]
621 if fname is not None:
621 if fname is not None:
622 if fname in files:
622 if fname in files:
623 return True
623 return True
624 else:
624 else:
625 for f in files:
625 for f in files:
626 if m(f):
626 if m(f):
627 return True
627 return True
628
628
629 return subset.filter(matches)
629 return subset.filter(matches)
630
630
631 def _children(repo, narrow, parentset):
631 def _children(repo, narrow, parentset):
632 if not parentset:
632 if not parentset:
633 return baseset()
633 return baseset()
634 cs = set()
634 cs = set()
635 pr = repo.changelog.parentrevs
635 pr = repo.changelog.parentrevs
636 minrev = parentset.min()
636 minrev = parentset.min()
637 for r in narrow:
637 for r in narrow:
638 if r <= minrev:
638 if r <= minrev:
639 continue
639 continue
640 for p in pr(r):
640 for p in pr(r):
641 if p in parentset:
641 if p in parentset:
642 cs.add(r)
642 cs.add(r)
643 # XXX using a set to feed the baseset is wrong. Sets are not ordered.
643 # XXX using a set to feed the baseset is wrong. Sets are not ordered.
644 # This does not break because of other fullreposet misbehavior.
644 # This does not break because of other fullreposet misbehavior.
645 return baseset(cs)
645 return baseset(cs)
646
646
647 def children(repo, subset, x):
647 def children(repo, subset, x):
648 """``children(set)``
648 """``children(set)``
649 Child changesets of changesets in set.
649 Child changesets of changesets in set.
650 """
650 """
651 s = getset(repo, fullreposet(repo), x)
651 s = getset(repo, fullreposet(repo), x)
652 cs = _children(repo, subset, s)
652 cs = _children(repo, subset, s)
653 return subset & cs
653 return subset & cs
654
654
655 def closed(repo, subset, x):
655 def closed(repo, subset, x):
656 """``closed()``
656 """``closed()``
657 Changeset is closed.
657 Changeset is closed.
658 """
658 """
659 # i18n: "closed" is a keyword
659 # i18n: "closed" is a keyword
660 getargs(x, 0, 0, _("closed takes no arguments"))
660 getargs(x, 0, 0, _("closed takes no arguments"))
661 return subset.filter(lambda r: repo[r].closesbranch())
661 return subset.filter(lambda r: repo[r].closesbranch())
662
662
663 def contains(repo, subset, x):
663 def contains(repo, subset, x):
664 """``contains(pattern)``
664 """``contains(pattern)``
665 The revision's manifest contains a file matching pattern (but might not
665 The revision's manifest contains a file matching pattern (but might not
666 modify it). See :hg:`help patterns` for information about file patterns.
666 modify it). See :hg:`help patterns` for information about file patterns.
667
667
668 The pattern without explicit kind like ``glob:`` is expected to be
668 The pattern without explicit kind like ``glob:`` is expected to be
669 relative to the current directory and match against a file exactly
669 relative to the current directory and match against a file exactly
670 for efficiency.
670 for efficiency.
671 """
671 """
672 # i18n: "contains" is a keyword
672 # i18n: "contains" is a keyword
673 pat = getstring(x, _("contains requires a pattern"))
673 pat = getstring(x, _("contains requires a pattern"))
674
674
675 def matches(x):
675 def matches(x):
676 if not matchmod.patkind(pat):
676 if not matchmod.patkind(pat):
677 pats = pathutil.canonpath(repo.root, repo.getcwd(), pat)
677 pats = pathutil.canonpath(repo.root, repo.getcwd(), pat)
678 if pats in repo[x]:
678 if pats in repo[x]:
679 return True
679 return True
680 else:
680 else:
681 c = repo[x]
681 c = repo[x]
682 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
682 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=c)
683 for f in c.manifest():
683 for f in c.manifest():
684 if m(f):
684 if m(f):
685 return True
685 return True
686 return False
686 return False
687
687
688 return subset.filter(matches)
688 return subset.filter(matches)
689
689
690 def converted(repo, subset, x):
690 def converted(repo, subset, x):
691 """``converted([id])``
691 """``converted([id])``
692 Changesets converted from the given identifier in the old repository if
692 Changesets converted from the given identifier in the old repository if
693 present, or all converted changesets if no identifier is specified.
693 present, or all converted changesets if no identifier is specified.
694 """
694 """
695
695
696 # There is exactly no chance of resolving the revision, so do a simple
696 # There is exactly no chance of resolving the revision, so do a simple
697 # string compare and hope for the best
697 # string compare and hope for the best
698
698
699 rev = None
699 rev = None
700 # i18n: "converted" is a keyword
700 # i18n: "converted" is a keyword
701 l = getargs(x, 0, 1, _('converted takes one or no arguments'))
701 l = getargs(x, 0, 1, _('converted takes one or no arguments'))
702 if l:
702 if l:
703 # i18n: "converted" is a keyword
703 # i18n: "converted" is a keyword
704 rev = getstring(l[0], _('converted requires a revision'))
704 rev = getstring(l[0], _('converted requires a revision'))
705
705
706 def _matchvalue(r):
706 def _matchvalue(r):
707 source = repo[r].extra().get('convert_revision', None)
707 source = repo[r].extra().get('convert_revision', None)
708 return source is not None and (rev is None or source.startswith(rev))
708 return source is not None and (rev is None or source.startswith(rev))
709
709
710 return subset.filter(lambda r: _matchvalue(r))
710 return subset.filter(lambda r: _matchvalue(r))
711
711
712 def date(repo, subset, x):
712 def date(repo, subset, x):
713 """``date(interval)``
713 """``date(interval)``
714 Changesets within the interval, see :hg:`help dates`.
714 Changesets within the interval, see :hg:`help dates`.
715 """
715 """
716 # i18n: "date" is a keyword
716 # i18n: "date" is a keyword
717 ds = getstring(x, _("date requires a string"))
717 ds = getstring(x, _("date requires a string"))
718 dm = util.matchdate(ds)
718 dm = util.matchdate(ds)
719 return subset.filter(lambda x: dm(repo[x].date()[0]))
719 return subset.filter(lambda x: dm(repo[x].date()[0]))
720
720
721 def desc(repo, subset, x):
721 def desc(repo, subset, x):
722 """``desc(string)``
722 """``desc(string)``
723 Search commit message for string. The match is case-insensitive.
723 Search commit message for string. The match is case-insensitive.
724 """
724 """
725 # i18n: "desc" is a keyword
725 # i18n: "desc" is a keyword
726 ds = encoding.lower(getstring(x, _("desc requires a string")))
726 ds = encoding.lower(getstring(x, _("desc requires a string")))
727
727
728 def matches(x):
728 def matches(x):
729 c = repo[x]
729 c = repo[x]
730 return ds in encoding.lower(c.description())
730 return ds in encoding.lower(c.description())
731
731
732 return subset.filter(matches)
732 return subset.filter(matches)
733
733
734 def _descendants(repo, subset, x, followfirst=False):
734 def _descendants(repo, subset, x, followfirst=False):
735 roots = getset(repo, fullreposet(repo), x)
735 roots = getset(repo, fullreposet(repo), x)
736 if not roots:
736 if not roots:
737 return baseset()
737 return baseset()
738 s = _revdescendants(repo, roots, followfirst)
738 s = _revdescendants(repo, roots, followfirst)
739
739
740 # Both sets need to be ascending in order to lazily return the union
740 # Both sets need to be ascending in order to lazily return the union
741 # in the correct order.
741 # in the correct order.
742 base = subset & roots
742 base = subset & roots
743 desc = subset & s
743 desc = subset & s
744 result = base + desc
744 result = base + desc
745 if subset.isascending():
745 if subset.isascending():
746 result.sort()
746 result.sort()
747 elif subset.isdescending():
747 elif subset.isdescending():
748 result.sort(reverse=True)
748 result.sort(reverse=True)
749 else:
749 else:
750 result = subset & result
750 result = subset & result
751 return result
751 return result
752
752
753 def descendants(repo, subset, x):
753 def descendants(repo, subset, x):
754 """``descendants(set)``
754 """``descendants(set)``
755 Changesets which are descendants of changesets in set.
755 Changesets which are descendants of changesets in set.
756 """
756 """
757 return _descendants(repo, subset, x)
757 return _descendants(repo, subset, x)
758
758
759 def _firstdescendants(repo, subset, x):
759 def _firstdescendants(repo, subset, x):
760 # ``_firstdescendants(set)``
760 # ``_firstdescendants(set)``
761 # Like ``descendants(set)`` but follows only the first parents.
761 # Like ``descendants(set)`` but follows only the first parents.
762 return _descendants(repo, subset, x, followfirst=True)
762 return _descendants(repo, subset, x, followfirst=True)
763
763
764 def destination(repo, subset, x):
764 def destination(repo, subset, x):
765 """``destination([set])``
765 """``destination([set])``
766 Changesets that were created by a graft, transplant or rebase operation,
766 Changesets that were created by a graft, transplant or rebase operation,
767 with the given revisions specified as the source. Omitting the optional set
767 with the given revisions specified as the source. Omitting the optional set
768 is the same as passing all().
768 is the same as passing all().
769 """
769 """
770 if x is not None:
770 if x is not None:
771 sources = getset(repo, fullreposet(repo), x)
771 sources = getset(repo, fullreposet(repo), x)
772 else:
772 else:
773 sources = fullreposet(repo)
773 sources = fullreposet(repo)
774
774
775 dests = set()
775 dests = set()
776
776
777 # subset contains all of the possible destinations that can be returned, so
777 # subset contains all of the possible destinations that can be returned, so
778 # iterate over them and see if their source(s) were provided in the arg set.
778 # iterate over them and see if their source(s) were provided in the arg set.
779 # Even if the immediate src of r is not in the arg set, src's source (or
779 # Even if the immediate src of r is not in the arg set, src's source (or
780 # further back) may be. Scanning back further than the immediate src allows
780 # further back) may be. Scanning back further than the immediate src allows
781 # transitive transplants and rebases to yield the same results as transitive
781 # transitive transplants and rebases to yield the same results as transitive
782 # grafts.
782 # grafts.
783 for r in subset:
783 for r in subset:
784 src = _getrevsource(repo, r)
784 src = _getrevsource(repo, r)
785 lineage = None
785 lineage = None
786
786
787 while src is not None:
787 while src is not None:
788 if lineage is None:
788 if lineage is None:
789 lineage = list()
789 lineage = list()
790
790
791 lineage.append(r)
791 lineage.append(r)
792
792
793 # The visited lineage is a match if the current source is in the arg
793 # The visited lineage is a match if the current source is in the arg
794 # set. Since every candidate dest is visited by way of iterating
794 # set. Since every candidate dest is visited by way of iterating
795 # subset, any dests further back in the lineage will be tested by a
795 # subset, any dests further back in the lineage will be tested by a
796 # different iteration over subset. Likewise, if the src was already
796 # different iteration over subset. Likewise, if the src was already
797 # selected, the current lineage can be selected without going back
797 # selected, the current lineage can be selected without going back
798 # further.
798 # further.
799 if src in sources or src in dests:
799 if src in sources or src in dests:
800 dests.update(lineage)
800 dests.update(lineage)
801 break
801 break
802
802
803 r = src
803 r = src
804 src = _getrevsource(repo, r)
804 src = _getrevsource(repo, r)
805
805
806 return subset.filter(dests.__contains__)
806 return subset.filter(dests.__contains__)
807
807
808 def divergent(repo, subset, x):
808 def divergent(repo, subset, x):
809 """``divergent()``
809 """``divergent()``
810 Final successors of changesets with an alternative set of final successors.
810 Final successors of changesets with an alternative set of final successors.
811 """
811 """
812 # i18n: "divergent" is a keyword
812 # i18n: "divergent" is a keyword
813 getargs(x, 0, 0, _("divergent takes no arguments"))
813 getargs(x, 0, 0, _("divergent takes no arguments"))
814 divergent = obsmod.getrevs(repo, 'divergent')
814 divergent = obsmod.getrevs(repo, 'divergent')
815 return subset & divergent
815 return subset & divergent
816
816
817 def extinct(repo, subset, x):
817 def extinct(repo, subset, x):
818 """``extinct()``
818 """``extinct()``
819 Obsolete changesets with obsolete descendants only.
819 Obsolete changesets with obsolete descendants only.
820 """
820 """
821 # i18n: "extinct" is a keyword
821 # i18n: "extinct" is a keyword
822 getargs(x, 0, 0, _("extinct takes no arguments"))
822 getargs(x, 0, 0, _("extinct takes no arguments"))
823 extincts = obsmod.getrevs(repo, 'extinct')
823 extincts = obsmod.getrevs(repo, 'extinct')
824 return subset & extincts
824 return subset & extincts
825
825
826 def extra(repo, subset, x):
826 def extra(repo, subset, x):
827 """``extra(label, [value])``
827 """``extra(label, [value])``
828 Changesets with the given label in the extra metadata, with the given
828 Changesets with the given label in the extra metadata, with the given
829 optional value.
829 optional value.
830
830
831 If `value` starts with `re:`, the remainder of the value is treated as
831 If `value` starts with `re:`, the remainder of the value is treated as
832 a regular expression. To match a value that actually starts with `re:`,
832 a regular expression. To match a value that actually starts with `re:`,
833 use the prefix `literal:`.
833 use the prefix `literal:`.
834 """
834 """
835
835
836 # i18n: "extra" is a keyword
836 # i18n: "extra" is a keyword
837 l = getargs(x, 1, 2, _('extra takes at least 1 and at most 2 arguments'))
837 l = getargs(x, 1, 2, _('extra takes at least 1 and at most 2 arguments'))
838 # i18n: "extra" is a keyword
838 # i18n: "extra" is a keyword
839 label = getstring(l[0], _('first argument to extra must be a string'))
839 label = getstring(l[0], _('first argument to extra must be a string'))
840 value = None
840 value = None
841
841
842 if len(l) > 1:
842 if len(l) > 1:
843 # i18n: "extra" is a keyword
843 # i18n: "extra" is a keyword
844 value = getstring(l[1], _('second argument to extra must be a string'))
844 value = getstring(l[1], _('second argument to extra must be a string'))
845 kind, value, matcher = _stringmatcher(value)
845 kind, value, matcher = _stringmatcher(value)
846
846
847 def _matchvalue(r):
847 def _matchvalue(r):
848 extra = repo[r].extra()
848 extra = repo[r].extra()
849 return label in extra and (value is None or matcher(extra[label]))
849 return label in extra and (value is None or matcher(extra[label]))
850
850
851 return subset.filter(lambda r: _matchvalue(r))
851 return subset.filter(lambda r: _matchvalue(r))
852
852
853 def filelog(repo, subset, x):
853 def filelog(repo, subset, x):
854 """``filelog(pattern)``
854 """``filelog(pattern)``
855 Changesets connected to the specified filelog.
855 Changesets connected to the specified filelog.
856
856
857 For performance reasons, visits only revisions mentioned in the file-level
857 For performance reasons, visits only revisions mentioned in the file-level
858 filelog, rather than filtering through all changesets (much faster, but
858 filelog, rather than filtering through all changesets (much faster, but
859 doesn't include deletes or duplicate changes). For a slower, more accurate
859 doesn't include deletes or duplicate changes). For a slower, more accurate
860 result, use ``file()``.
860 result, use ``file()``.
861
861
862 The pattern without explicit kind like ``glob:`` is expected to be
862 The pattern without explicit kind like ``glob:`` is expected to be
863 relative to the current directory and match against a file exactly
863 relative to the current directory and match against a file exactly
864 for efficiency.
864 for efficiency.
865
865
866 If some linkrev points to revisions filtered by the current repoview, we'll
866 If some linkrev points to revisions filtered by the current repoview, we'll
867 work around it to return a non-filtered value.
867 work around it to return a non-filtered value.
868 """
868 """
869
869
870 # i18n: "filelog" is a keyword
870 # i18n: "filelog" is a keyword
871 pat = getstring(x, _("filelog requires a pattern"))
871 pat = getstring(x, _("filelog requires a pattern"))
872 s = set()
872 s = set()
873 cl = repo.changelog
873 cl = repo.changelog
874
874
875 if not matchmod.patkind(pat):
875 if not matchmod.patkind(pat):
876 f = pathutil.canonpath(repo.root, repo.getcwd(), pat)
876 f = pathutil.canonpath(repo.root, repo.getcwd(), pat)
877 files = [f]
877 files = [f]
878 else:
878 else:
879 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=repo[None])
879 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=repo[None])
880 files = (f for f in repo[None] if m(f))
880 files = (f for f in repo[None] if m(f))
881
881
882 for f in files:
882 for f in files:
883 backrevref = {} # final value for: filerev -> changerev
883 backrevref = {} # final value for: filerev -> changerev
884 lowestchild = {} # lowest known filerev child of a filerev
884 lowestchild = {} # lowest known filerev child of a filerev
885 delayed = [] # filerev with filtered linkrev, for post-processing
885 delayed = [] # filerev with filtered linkrev, for post-processing
886 lowesthead = None # cache for manifest content of all head revisions
886 lowesthead = None # cache for manifest content of all head revisions
887 fl = repo.file(f)
887 fl = repo.file(f)
888 for fr in list(fl):
888 for fr in list(fl):
889 rev = fl.linkrev(fr)
889 rev = fl.linkrev(fr)
890 if rev not in cl:
890 if rev not in cl:
891 # changerev pointed in linkrev is filtered
891 # changerev pointed in linkrev is filtered
892 # record it for post processing.
892 # record it for post processing.
893 delayed.append((fr, rev))
893 delayed.append((fr, rev))
894 continue
894 continue
895 for p in fl.parentrevs(fr):
895 for p in fl.parentrevs(fr):
896 if 0 <= p and p not in lowestchild:
896 if 0 <= p and p not in lowestchild:
897 lowestchild[p] = fr
897 lowestchild[p] = fr
898 backrevref[fr] = rev
898 backrevref[fr] = rev
899 s.add(rev)
899 s.add(rev)
900
900
901 # Post-processing of all filerevs we skipped because they were
901 # Post-processing of all filerevs we skipped because they were
902 # filtered. If such filerevs have known and unfiltered children, this
902 # filtered. If such filerevs have known and unfiltered children, this
903 # means they have an unfiltered appearance out there. We'll use linkrev
903 # means they have an unfiltered appearance out there. We'll use linkrev
904 # adjustment to find one of these appearances. The lowest known child
904 # adjustment to find one of these appearances. The lowest known child
905 # will be used as a starting point because it is the best upper-bound we
905 # will be used as a starting point because it is the best upper-bound we
906 # have.
906 # have.
907 #
907 #
908 # This approach will fail when an unfiltered but linkrev-shadowed
908 # This approach will fail when an unfiltered but linkrev-shadowed
909 # appearance exists in a head changeset without unfiltered filerev
909 # appearance exists in a head changeset without unfiltered filerev
910 # children anywhere.
910 # children anywhere.
911 while delayed:
911 while delayed:
912 # must be a descending iteration. To slowly fill lowest child
912 # must be a descending iteration. To slowly fill lowest child
913 # information that is of potential use by the next item.
913 # information that is of potential use by the next item.
914 fr, rev = delayed.pop()
914 fr, rev = delayed.pop()
915 lkr = rev
915 lkr = rev
916
916
917 child = lowestchild.get(fr)
917 child = lowestchild.get(fr)
918
918
919 if child is None:
919 if child is None:
920 # search for existence of this file revision in a head revision.
920 # search for existence of this file revision in a head revision.
921 # There are three possibilities:
921 # There are three possibilities:
922 # - the revision exists in a head and we can find an
922 # - the revision exists in a head and we can find an
923 # introduction from there,
923 # introduction from there,
924 # - the revision does not exist in a head because it has been
924 # - the revision does not exist in a head because it has been
925 # changed since its introduction: we would have found a child
925 # changed since its introduction: we would have found a child
926 # and be in the other 'else' clause,
926 # and be in the other 'else' clause,
927 # - all versions of the revision are hidden.
927 # - all versions of the revision are hidden.
928 if lowesthead is None:
928 if lowesthead is None:
929 lowesthead = {}
929 lowesthead = {}
930 for h in repo.heads():
930 for h in repo.heads():
931 fnode = repo[h].manifest().get(f)
931 fnode = repo[h].manifest().get(f)
932 if fnode is not None:
932 if fnode is not None:
933 lowesthead[fl.rev(fnode)] = h
933 lowesthead[fl.rev(fnode)] = h
934 headrev = lowesthead.get(fr)
934 headrev = lowesthead.get(fr)
935 if headrev is None:
935 if headrev is None:
936 # content is nowhere unfiltered
936 # content is nowhere unfiltered
937 continue
937 continue
938 rev = repo[headrev][f].introrev()
938 rev = repo[headrev][f].introrev()
939 else:
939 else:
940 # the lowest known child is a good upper bound
940 # the lowest known child is a good upper bound
941 childcrev = backrevref[child]
941 childcrev = backrevref[child]
942 # XXX this does not guarantee returning the lowest
942 # XXX this does not guarantee returning the lowest
943 # introduction of this revision, but this gives a
943 # introduction of this revision, but this gives a
944 # result which is a good start and will fit in most
944 # result which is a good start and will fit in most
945 # cases. We probably need to fix the multiple
945 # cases. We probably need to fix the multiple
946 # introductions case properly (report each
946 # introductions case properly (report each
947 # introduction, even for identical file revisions)
947 # introduction, even for identical file revisions)
948 # once and for all at some point anyway.
948 # once and for all at some point anyway.
949 for p in repo[childcrev][f].parents():
949 for p in repo[childcrev][f].parents():
950 if p.filerev() == fr:
950 if p.filerev() == fr:
951 rev = p.rev()
951 rev = p.rev()
952 break
952 break
953 if rev == lkr: # no shadowed entry found
953 if rev == lkr: # no shadowed entry found
954 # XXX This should never happen unless some manifest points
954 # XXX This should never happen unless some manifest points
955 # to biggish file revisions (like a revision that uses a
955 # to biggish file revisions (like a revision that uses a
956 # parent that never appears in the manifest ancestors)
956 # parent that never appears in the manifest ancestors)
957 continue
957 continue
958
958
959 # Fill the data for the next iteration.
959 # Fill the data for the next iteration.
960 for p in fl.parentrevs(fr):
960 for p in fl.parentrevs(fr):
961 if 0 <= p and p not in lowestchild:
961 if 0 <= p and p not in lowestchild:
962 lowestchild[p] = fr
962 lowestchild[p] = fr
963 backrevref[fr] = rev
963 backrevref[fr] = rev
964 s.add(rev)
964 s.add(rev)
965
965
966 return subset & s
966 return subset & s
967
967
968 def first(repo, subset, x):
968 def first(repo, subset, x):
969 """``first(set, [n])``
969 """``first(set, [n])``
970 An alias for limit().
970 An alias for limit().
971 """
971 """
972 return limit(repo, subset, x)
972 return limit(repo, subset, x)
973
973
974 def _follow(repo, subset, x, name, followfirst=False):
974 def _follow(repo, subset, x, name, followfirst=False):
975 l = getargs(x, 0, 1, _("%s takes no arguments or a filename") % name)
975 l = getargs(x, 0, 1, _("%s takes no arguments or a filename") % name)
976 c = repo['.']
976 c = repo['.']
977 if l:
977 if l:
978 x = getstring(l[0], _("%s expected a filename") % name)
978 x = getstring(l[0], _("%s expected a filename") % name)
979 if x in c:
979 if x in c:
980 cx = c[x]
980 cx = c[x]
981 s = set(ctx.rev() for ctx in cx.ancestors(followfirst=followfirst))
981 s = set(ctx.rev() for ctx in cx.ancestors(followfirst=followfirst))
982 # include the revision responsible for the most recent version
982 # include the revision responsible for the most recent version
983 s.add(cx.introrev())
983 s.add(cx.introrev())
984 else:
984 else:
985 return baseset()
985 return baseset()
986 else:
986 else:
987 s = _revancestors(repo, baseset([c.rev()]), followfirst)
987 s = _revancestors(repo, baseset([c.rev()]), followfirst)
988
988
989 return subset & s
989 return subset & s
990
990
991 def follow(repo, subset, x):
991 def follow(repo, subset, x):
992 """``follow([file])``
992 """``follow([file])``
993 An alias for ``::.`` (ancestors of the working directory's first parent).
993 An alias for ``::.`` (ancestors of the working directory's first parent).
994 If a filename is specified, the history of the given file is followed,
994 If a filename is specified, the history of the given file is followed,
995 including copies.
995 including copies.
996 """
996 """
997 return _follow(repo, subset, x, 'follow')
997 return _follow(repo, subset, x, 'follow')
998
998
999 def _followfirst(repo, subset, x):
999 def _followfirst(repo, subset, x):
1000 # ``followfirst([file])``
1000 # ``followfirst([file])``
1001 # Like ``follow([file])`` but follows only the first parent of
1001 # Like ``follow([file])`` but follows only the first parent of
1002 # every revision or file revision.
1002 # every revision or file revision.
1003 return _follow(repo, subset, x, '_followfirst', followfirst=True)
1003 return _follow(repo, subset, x, '_followfirst', followfirst=True)
1004
1004
1005 def getall(repo, subset, x):
1005 def getall(repo, subset, x):
1006 """``all()``
1006 """``all()``
1007 All changesets, the same as ``0:tip``.
1007 All changesets, the same as ``0:tip``.
1008 """
1008 """
1009 # i18n: "all" is a keyword
1009 # i18n: "all" is a keyword
1010 getargs(x, 0, 0, _("all takes no arguments"))
1010 getargs(x, 0, 0, _("all takes no arguments"))
1011 return subset & spanset(repo) # drop "null" if any
1011 return subset & spanset(repo) # drop "null" if any
1012
1012
1013 def grep(repo, subset, x):
1013 def grep(repo, subset, x):
1014 """``grep(regex)``
1014 """``grep(regex)``
1015 Like ``keyword(string)`` but accepts a regex. Use ``grep(r'...')``
1015 Like ``keyword(string)`` but accepts a regex. Use ``grep(r'...')``
1016 to ensure special escape characters are handled correctly. Unlike
1016 to ensure special escape characters are handled correctly. Unlike
1017 ``keyword(string)``, the match is case-sensitive.
1017 ``keyword(string)``, the match is case-sensitive.
1018 """
1018 """
1019 try:
1019 try:
1020 # i18n: "grep" is a keyword
1020 # i18n: "grep" is a keyword
1021 gr = re.compile(getstring(x, _("grep requires a string")))
1021 gr = re.compile(getstring(x, _("grep requires a string")))
1022 except re.error, e:
1022 except re.error, e:
1023 raise error.ParseError(_('invalid match pattern: %s') % e)
1023 raise error.ParseError(_('invalid match pattern: %s') % e)
1024
1024
1025 def matches(x):
1025 def matches(x):
1026 c = repo[x]
1026 c = repo[x]
1027 for e in c.files() + [c.user(), c.description()]:
1027 for e in c.files() + [c.user(), c.description()]:
1028 if gr.search(e):
1028 if gr.search(e):
1029 return True
1029 return True
1030 return False
1030 return False
1031
1031
1032 return subset.filter(matches)
1032 return subset.filter(matches)
1033
1033
1034 def _matchfiles(repo, subset, x):
1034 def _matchfiles(repo, subset, x):
1035 # _matchfiles takes a revset list of prefixed arguments:
1035 # _matchfiles takes a revset list of prefixed arguments:
1036 #
1036 #
1037 # [p:foo, i:bar, x:baz]
1037 # [p:foo, i:bar, x:baz]
1038 #
1038 #
1039 # builds a match object from them and filters subset. Allowed
1039 # builds a match object from them and filters subset. Allowed
1040 # prefixes are 'p:' for regular patterns, 'i:' for include
1040 # prefixes are 'p:' for regular patterns, 'i:' for include
1041 # patterns and 'x:' for exclude patterns. Use 'r:' prefix to pass
1041 # patterns and 'x:' for exclude patterns. Use 'r:' prefix to pass
1042 # a revision identifier, or the empty string to reference the
1042 # a revision identifier, or the empty string to reference the
1043 # working directory, from which the match object is
1043 # working directory, from which the match object is
1044 # initialized. Use 'd:' to set the default matching mode, default
1044 # initialized. Use 'd:' to set the default matching mode, default
1045 # to 'glob'. At most one 'r:' and 'd:' argument can be passed.
1045 # to 'glob'. At most one 'r:' and 'd:' argument can be passed.
1046
1046
1047 # i18n: "_matchfiles" is a keyword
1047 # i18n: "_matchfiles" is a keyword
1048 l = getargs(x, 1, -1, _("_matchfiles requires at least one argument"))
1048 l = getargs(x, 1, -1, _("_matchfiles requires at least one argument"))
1049 pats, inc, exc = [], [], []
1049 pats, inc, exc = [], [], []
1050 rev, default = None, None
1050 rev, default = None, None
1051 for arg in l:
1051 for arg in l:
1052 # i18n: "_matchfiles" is a keyword
1052 # i18n: "_matchfiles" is a keyword
1053 s = getstring(arg, _("_matchfiles requires string arguments"))
1053 s = getstring(arg, _("_matchfiles requires string arguments"))
1054 prefix, value = s[:2], s[2:]
1054 prefix, value = s[:2], s[2:]
1055 if prefix == 'p:':
1055 if prefix == 'p:':
1056 pats.append(value)
1056 pats.append(value)
1057 elif prefix == 'i:':
1057 elif prefix == 'i:':
1058 inc.append(value)
1058 inc.append(value)
1059 elif prefix == 'x:':
1059 elif prefix == 'x:':
1060 exc.append(value)
1060 exc.append(value)
1061 elif prefix == 'r:':
1061 elif prefix == 'r:':
1062 if rev is not None:
1062 if rev is not None:
1063 # i18n: "_matchfiles" is a keyword
1063 # i18n: "_matchfiles" is a keyword
1064 raise error.ParseError(_('_matchfiles expected at most one '
1064 raise error.ParseError(_('_matchfiles expected at most one '
1065 'revision'))
1065 'revision'))
1066 if value != '': # empty means working directory; leave rev as None
1066 if value != '': # empty means working directory; leave rev as None
1067 rev = value
1067 rev = value
1068 elif prefix == 'd:':
1068 elif prefix == 'd:':
1069 if default is not None:
1069 if default is not None:
1070 # i18n: "_matchfiles" is a keyword
1070 # i18n: "_matchfiles" is a keyword
1071 raise error.ParseError(_('_matchfiles expected at most one '
1071 raise error.ParseError(_('_matchfiles expected at most one '
1072 'default mode'))
1072 'default mode'))
1073 default = value
1073 default = value
1074 else:
1074 else:
1075 # i18n: "_matchfiles" is a keyword
1075 # i18n: "_matchfiles" is a keyword
1076 raise error.ParseError(_('invalid _matchfiles prefix: %s') % prefix)
1076 raise error.ParseError(_('invalid _matchfiles prefix: %s') % prefix)
1077 if not default:
1077 if not default:
1078 default = 'glob'
1078 default = 'glob'
1079
1079
1080 m = matchmod.match(repo.root, repo.getcwd(), pats, include=inc,
1080 m = matchmod.match(repo.root, repo.getcwd(), pats, include=inc,
1081 exclude=exc, ctx=repo[rev], default=default)
1081 exclude=exc, ctx=repo[rev], default=default)
1082
1082
1083 def matches(x):
1083 def matches(x):
1084 for f in repo[x].files():
1084 for f in repo[x].files():
1085 if m(f):
1085 if m(f):
1086 return True
1086 return True
1087 return False
1087 return False
1088
1088
1089 return subset.filter(matches)
1089 return subset.filter(matches)
1090
1090
1091 def hasfile(repo, subset, x):
1091 def hasfile(repo, subset, x):
1092 """``file(pattern)``
1092 """``file(pattern)``
1093 Changesets affecting files matched by pattern.
1093 Changesets affecting files matched by pattern.
1094
1094
1095 For a faster but less accurate result, consider using ``filelog()``
1095 For a faster but less accurate result, consider using ``filelog()``
1096 instead.
1096 instead.
1097
1097
1098 This predicate uses ``glob:`` as the default kind of pattern.
1098 This predicate uses ``glob:`` as the default kind of pattern.
1099 """
1099 """
1100 # i18n: "file" is a keyword
1100 # i18n: "file" is a keyword
1101 pat = getstring(x, _("file requires a pattern"))
1101 pat = getstring(x, _("file requires a pattern"))
1102 return _matchfiles(repo, subset, ('string', 'p:' + pat))
1102 return _matchfiles(repo, subset, ('string', 'p:' + pat))
1103
1103
1104 def head(repo, subset, x):
1104 def head(repo, subset, x):
1105 """``head()``
1105 """``head()``
1106 Changeset is a named branch head.
1106 Changeset is a named branch head.
1107 """
1107 """
1108 # i18n: "head" is a keyword
1108 # i18n: "head" is a keyword
1109 getargs(x, 0, 0, _("head takes no arguments"))
1109 getargs(x, 0, 0, _("head takes no arguments"))
1110 hs = set()
1110 hs = set()
1111 cl = repo.changelog
1111 cl = repo.changelog
1112 for b, ls in repo.branchmap().iteritems():
1112 for b, ls in repo.branchmap().iteritems():
1113 hs.update(cl.rev(h) for h in ls)
1113 hs.update(cl.rev(h) for h in ls)
1114 # XXX using a set to feed the baseset is wrong. Sets are not ordered.
1114 # XXX using a set to feed the baseset is wrong. Sets are not ordered.
1115 # This does not break because of other fullreposet misbehavior.
1115 # This does not break because of other fullreposet misbehavior.
1116 # XXX We should combine with subset first: 'subset & baseset(...)'. This is
1116 # XXX We should combine with subset first: 'subset & baseset(...)'. This is
1117 # necessary to ensure we preserve the order in subset.
1117 # necessary to ensure we preserve the order in subset.
1118 return baseset(hs) & subset
1118 return baseset(hs) & subset
1119
1119
1120 def heads(repo, subset, x):
1120 def heads(repo, subset, x):
1121 """``heads(set)``
1121 """``heads(set)``
1122 Members of set with no children in set.
1122 Members of set with no children in set.
1123 """
1123 """
1124 s = getset(repo, subset, x)
1124 s = getset(repo, subset, x)
1125 ps = parents(repo, subset, x)
1125 ps = parents(repo, subset, x)
1126 return s - ps
1126 return s - ps
1127
1127
1128 def hidden(repo, subset, x):
1128 def hidden(repo, subset, x):
1129 """``hidden()``
1129 """``hidden()``
1130 Hidden changesets.
1130 Hidden changesets.
1131 """
1131 """
1132 # i18n: "hidden" is a keyword
1132 # i18n: "hidden" is a keyword
1133 getargs(x, 0, 0, _("hidden takes no arguments"))
1133 getargs(x, 0, 0, _("hidden takes no arguments"))
1134 hiddenrevs = repoview.filterrevs(repo, 'visible')
1134 hiddenrevs = repoview.filterrevs(repo, 'visible')
1135 return subset & hiddenrevs
1135 return subset & hiddenrevs
1136
1136
1137 def keyword(repo, subset, x):
1137 def keyword(repo, subset, x):
1138 """``keyword(string)``
1138 """``keyword(string)``
1139 Search commit message, user name, and names of changed files for
1139 Search commit message, user name, and names of changed files for
1140 string. The match is case-insensitive.
1140 string. The match is case-insensitive.
1141 """
1141 """
1142 # i18n: "keyword" is a keyword
1142 # i18n: "keyword" is a keyword
1143 kw = encoding.lower(getstring(x, _("keyword requires a string")))
1143 kw = encoding.lower(getstring(x, _("keyword requires a string")))
1144
1144
1145 def matches(r):
1145 def matches(r):
1146 c = repo[r]
1146 c = repo[r]
1147 return any(kw in encoding.lower(t)
1147 return any(kw in encoding.lower(t)
1148 for t in c.files() + [c.user(), c.description()])
1148 for t in c.files() + [c.user(), c.description()])
1149
1149
1150 return subset.filter(matches)
1150 return subset.filter(matches)
1151
1151
1152 def limit(repo, subset, x):
1152 def limit(repo, subset, x):
1153 """``limit(set, [n])``
1153 """``limit(set, [n])``
1154 First n members of set, defaulting to 1.
1154 First n members of set, defaulting to 1.
1155 """
1155 """
1156 # i18n: "limit" is a keyword
1156 # i18n: "limit" is a keyword
1157 l = getargs(x, 1, 2, _("limit requires one or two arguments"))
1157 l = getargs(x, 1, 2, _("limit requires one or two arguments"))
1158 try:
1158 try:
1159 lim = 1
1159 lim = 1
1160 if len(l) == 2:
1160 if len(l) == 2:
1161 # i18n: "limit" is a keyword
1161 # i18n: "limit" is a keyword
1162 lim = int(getstring(l[1], _("limit requires a number")))
1162 lim = int(getstring(l[1], _("limit requires a number")))
1163 except (TypeError, ValueError):
1163 except (TypeError, ValueError):
1164 # i18n: "limit" is a keyword
1164 # i18n: "limit" is a keyword
1165 raise error.ParseError(_("limit expects a number"))
1165 raise error.ParseError(_("limit expects a number"))
1166 ss = subset
1166 ss = subset
1167 os = getset(repo, fullreposet(repo), l[0])
1167 os = getset(repo, fullreposet(repo), l[0])
1168 result = []
1168 result = []
1169 it = iter(os)
1169 it = iter(os)
1170 for x in xrange(lim):
1170 for x in xrange(lim):
1171 y = next(it, None)
1171 y = next(it, None)
1172 if y is None:
1172 if y is None:
1173 break
1173 break
1174 elif y in ss:
1174 elif y in ss:
1175 result.append(y)
1175 result.append(y)
1176 return baseset(result)
1176 return baseset(result)
1177
1177
1178 def last(repo, subset, x):
1178 def last(repo, subset, x):
1179 """``last(set, [n])``
1179 """``last(set, [n])``
1180 Last n members of set, defaulting to 1.
1180 Last n members of set, defaulting to 1.
1181 """
1181 """
1182 # i18n: "last" is a keyword
1182 # i18n: "last" is a keyword
1183 l = getargs(x, 1, 2, _("last requires one or two arguments"))
1183 l = getargs(x, 1, 2, _("last requires one or two arguments"))
1184 try:
1184 try:
1185 lim = 1
1185 lim = 1
1186 if len(l) == 2:
1186 if len(l) == 2:
1187 # i18n: "last" is a keyword
1187 # i18n: "last" is a keyword
1188 lim = int(getstring(l[1], _("last requires a number")))
1188 lim = int(getstring(l[1], _("last requires a number")))
1189 except (TypeError, ValueError):
1189 except (TypeError, ValueError):
1190 # i18n: "last" is a keyword
1190 # i18n: "last" is a keyword
1191 raise error.ParseError(_("last expects a number"))
1191 raise error.ParseError(_("last expects a number"))
1192 ss = subset
1192 ss = subset
1193 os = getset(repo, fullreposet(repo), l[0])
1193 os = getset(repo, fullreposet(repo), l[0])
1194 os.reverse()
1194 os.reverse()
1195 result = []
1195 result = []
1196 it = iter(os)
1196 it = iter(os)
1197 for x in xrange(lim):
1197 for x in xrange(lim):
1198 y = next(it, None)
1198 y = next(it, None)
1199 if y is None:
1199 if y is None:
1200 break
1200 break
1201 elif y in ss:
1201 elif y in ss:
1202 result.append(y)
1202 result.append(y)
1203 return baseset(result)
1203 return baseset(result)
1204
1204
1205 def maxrev(repo, subset, x):
1205 def maxrev(repo, subset, x):
1206 """``max(set)``
1206 """``max(set)``
1207 Changeset with highest revision number in set.
1207 Changeset with highest revision number in set.
1208 """
1208 """
1209 os = getset(repo, fullreposet(repo), x)
1209 os = getset(repo, fullreposet(repo), x)
1210 if os:
1210 if os:
1211 m = os.max()
1211 m = os.max()
1212 if m in subset:
1212 if m in subset:
1213 return baseset([m])
1213 return baseset([m])
1214 return baseset()
1214 return baseset()
1215
1215
1216 def merge(repo, subset, x):
1216 def merge(repo, subset, x):
1217 """``merge()``
1217 """``merge()``
1218 Changeset is a merge changeset.
1218 Changeset is a merge changeset.
1219 """
1219 """
1220 # i18n: "merge" is a keyword
1220 # i18n: "merge" is a keyword
1221 getargs(x, 0, 0, _("merge takes no arguments"))
1221 getargs(x, 0, 0, _("merge takes no arguments"))
1222 cl = repo.changelog
1222 cl = repo.changelog
1223 return subset.filter(lambda r: cl.parentrevs(r)[1] != -1)
1223 return subset.filter(lambda r: cl.parentrevs(r)[1] != -1)
1224
1224
1225 def branchpoint(repo, subset, x):
1225 def branchpoint(repo, subset, x):
1226 """``branchpoint()``
1226 """``branchpoint()``
1227 Changesets with more than one child.
1227 Changesets with more than one child.
1228 """
1228 """
1229 # i18n: "branchpoint" is a keyword
1229 # i18n: "branchpoint" is a keyword
1230 getargs(x, 0, 0, _("branchpoint takes no arguments"))
1230 getargs(x, 0, 0, _("branchpoint takes no arguments"))
1231 cl = repo.changelog
1231 cl = repo.changelog
1232 if not subset:
1232 if not subset:
1233 return baseset()
1233 return baseset()
1234 # XXX this should be 'parentset.min()' assuming 'parentset' is a smartset
1234 # XXX this should be 'parentset.min()' assuming 'parentset' is a smartset
1235 # (and if it is not, it should.)
1235 # (and if it is not, it should.)
1236 baserev = min(subset)
1236 baserev = min(subset)
1237 parentscount = [0]*(len(repo) - baserev)
1237 parentscount = [0]*(len(repo) - baserev)
1238 for r in cl.revs(start=baserev + 1):
1238 for r in cl.revs(start=baserev + 1):
1239 for p in cl.parentrevs(r):
1239 for p in cl.parentrevs(r):
1240 if p >= baserev:
1240 if p >= baserev:
1241 parentscount[p - baserev] += 1
1241 parentscount[p - baserev] += 1
1242 return subset.filter(lambda r: parentscount[r - baserev] > 1)
1242 return subset.filter(lambda r: parentscount[r - baserev] > 1)
1243
1243
1244 def minrev(repo, subset, x):
1244 def minrev(repo, subset, x):
1245 """``min(set)``
1245 """``min(set)``
1246 Changeset with lowest revision number in set.
1246 Changeset with lowest revision number in set.
1247 """
1247 """
1248 os = getset(repo, fullreposet(repo), x)
1248 os = getset(repo, fullreposet(repo), x)
1249 if os:
1249 if os:
1250 m = os.min()
1250 m = os.min()
1251 if m in subset:
1251 if m in subset:
1252 return baseset([m])
1252 return baseset([m])
1253 return baseset()
1253 return baseset()
1254
1254
1255 def modifies(repo, subset, x):
1255 def modifies(repo, subset, x):
1256 """``modifies(pattern)``
1256 """``modifies(pattern)``
1257 Changesets modifying files matched by pattern.
1257 Changesets modifying files matched by pattern.
1258
1258
1259 The pattern without explicit kind like ``glob:`` is expected to be
1259 The pattern without explicit kind like ``glob:`` is expected to be
1260 relative to the current directory and match against a file or a
1260 relative to the current directory and match against a file or a
1261 directory.
1261 directory.
1262 """
1262 """
1263 # i18n: "modifies" is a keyword
1263 # i18n: "modifies" is a keyword
1264 pat = getstring(x, _("modifies requires a pattern"))
1264 pat = getstring(x, _("modifies requires a pattern"))
1265 return checkstatus(repo, subset, pat, 0)
1265 return checkstatus(repo, subset, pat, 0)
1266
1266
1267 def named(repo, subset, x):
1267 def named(repo, subset, x):
1268 """``named(namespace)``
1268 """``named(namespace)``
1269 The changesets in a given namespace.
1269 The changesets in a given namespace.
1270
1270
1271 If `namespace` starts with `re:`, the remainder of the string is treated as
1271 If `namespace` starts with `re:`, the remainder of the string is treated as
1272 a regular expression. To match a namespace that actually starts with `re:`,
1272 a regular expression. To match a namespace that actually starts with `re:`,
1273 use the prefix `literal:`.
1273 use the prefix `literal:`.
1274 """
1274 """
1275 # i18n: "named" is a keyword
1275 # i18n: "named" is a keyword
1276 args = getargs(x, 1, 1, _('named requires a namespace argument'))
1276 args = getargs(x, 1, 1, _('named requires a namespace argument'))
1277
1277
1278 ns = getstring(args[0],
1278 ns = getstring(args[0],
1279 # i18n: "named" is a keyword
1279 # i18n: "named" is a keyword
1280 _('the argument to named must be a string'))
1280 _('the argument to named must be a string'))
1281 kind, pattern, matcher = _stringmatcher(ns)
1281 kind, pattern, matcher = _stringmatcher(ns)
1282 namespaces = set()
1282 namespaces = set()
1283 if kind == 'literal':
1283 if kind == 'literal':
1284 if pattern not in repo.names:
1284 if pattern not in repo.names:
1285 raise error.RepoLookupError(_("namespace '%s' does not exist")
1285 raise error.RepoLookupError(_("namespace '%s' does not exist")
1286 % ns)
1286 % ns)
1287 namespaces.add(repo.names[pattern])
1287 namespaces.add(repo.names[pattern])
1288 else:
1288 else:
1289 for name, ns in repo.names.iteritems():
1289 for name, ns in repo.names.iteritems():
1290 if matcher(name):
1290 if matcher(name):
1291 namespaces.add(ns)
1291 namespaces.add(ns)
1292 if not namespaces:
1292 if not namespaces:
1293 raise error.RepoLookupError(_("no namespace exists"
1293 raise error.RepoLookupError(_("no namespace exists"
1294 " that match '%s'") % pattern)
1294 " that match '%s'") % pattern)
1295
1295
1296 names = set()
1296 names = set()
1297 for ns in namespaces:
1297 for ns in namespaces:
1298 for name in ns.listnames(repo):
1298 for name in ns.listnames(repo):
1299 if name not in ns.deprecated:
1299 if name not in ns.deprecated:
1300 names.update(repo[n].rev() for n in ns.nodes(repo, name))
1300 names.update(repo[n].rev() for n in ns.nodes(repo, name))
1301
1301
1302 names -= set([node.nullrev])
1302 names -= set([node.nullrev])
1303 return subset & names
1303 return subset & names
1304
1304
1305 def node_(repo, subset, x):
1305 def node_(repo, subset, x):
1306 """``id(string)``
1306 """``id(string)``
1307 Revision non-ambiguously specified by the given hex string prefix.
1307 Revision non-ambiguously specified by the given hex string prefix.
1308 """
1308 """
1309 # i18n: "id" is a keyword
1309 # i18n: "id" is a keyword
1310 l = getargs(x, 1, 1, _("id requires one argument"))
1310 l = getargs(x, 1, 1, _("id requires one argument"))
1311 # i18n: "id" is a keyword
1311 # i18n: "id" is a keyword
1312 n = getstring(l[0], _("id requires a string"))
1312 n = getstring(l[0], _("id requires a string"))
1313 if len(n) == 40:
1313 if len(n) == 40:
1314 try:
1314 try:
1315 rn = repo.changelog.rev(node.bin(n))
1315 rn = repo.changelog.rev(node.bin(n))
1316 except (LookupError, TypeError):
1316 except (LookupError, TypeError):
1317 rn = None
1317 rn = None
1318 else:
1318 else:
1319 rn = None
1319 rn = None
1320 pm = repo.changelog._partialmatch(n)
1320 pm = repo.changelog._partialmatch(n)
1321 if pm is not None:
1321 if pm is not None:
1322 rn = repo.changelog.rev(pm)
1322 rn = repo.changelog.rev(pm)
1323
1323
1324 if rn is None:
1324 if rn is None:
1325 return baseset()
1325 return baseset()
1326 result = baseset([rn])
1326 result = baseset([rn])
1327 return result & subset
1327 return result & subset
1328
1328
1329 def obsolete(repo, subset, x):
1329 def obsolete(repo, subset, x):
1330 """``obsolete()``
1330 """``obsolete()``
1331 Mutable changeset with a newer version."""
1331 Mutable changeset with a newer version."""
1332 # i18n: "obsolete" is a keyword
1332 # i18n: "obsolete" is a keyword
1333 getargs(x, 0, 0, _("obsolete takes no arguments"))
1333 getargs(x, 0, 0, _("obsolete takes no arguments"))
1334 obsoletes = obsmod.getrevs(repo, 'obsolete')
1334 obsoletes = obsmod.getrevs(repo, 'obsolete')
1335 return subset & obsoletes
1335 return subset & obsoletes
1336
1336
1337 def only(repo, subset, x):
1337 def only(repo, subset, x):
1338 """``only(set, [set])``
1338 """``only(set, [set])``
1339 Changesets that are ancestors of the first set that are not ancestors
1339 Changesets that are ancestors of the first set that are not ancestors
1340 of any other head in the repo. If a second set is specified, the result
1340 of any other head in the repo. If a second set is specified, the result
1341 is ancestors of the first set that are not ancestors of the second set
1341 is ancestors of the first set that are not ancestors of the second set
1342 (i.e. ::<set1> - ::<set2>).
1342 (i.e. ::<set1> - ::<set2>).
1343 """
1343 """
1344 cl = repo.changelog
1344 cl = repo.changelog
1345 # i18n: "only" is a keyword
1345 # i18n: "only" is a keyword
1346 args = getargs(x, 1, 2, _('only takes one or two arguments'))
1346 args = getargs(x, 1, 2, _('only takes one or two arguments'))
1347 include = getset(repo, fullreposet(repo), args[0])
1347 include = getset(repo, fullreposet(repo), args[0])
1348 if len(args) == 1:
1348 if len(args) == 1:
1349 if not include:
1349 if not include:
1350 return baseset()
1350 return baseset()
1351
1351
1352 descendants = set(_revdescendants(repo, include, False))
1352 descendants = set(_revdescendants(repo, include, False))
1353 exclude = [rev for rev in cl.headrevs()
1353 exclude = [rev for rev in cl.headrevs()
1354 if not rev in descendants and not rev in include]
1354 if not rev in descendants and not rev in include]
1355 else:
1355 else:
1356 exclude = getset(repo, fullreposet(repo), args[1])
1356 exclude = getset(repo, fullreposet(repo), args[1])
1357
1357
1358 results = set(cl.findmissingrevs(common=exclude, heads=include))
1358 results = set(cl.findmissingrevs(common=exclude, heads=include))
1359 # XXX we should turn this into a baseset instead of a set, smartset may do
1359 # XXX we should turn this into a baseset instead of a set, smartset may do
1360 # some optimisations from the fact this is a baseset.
1360 # some optimisations from the fact this is a baseset.
1361 return subset & results
1361 return subset & results
1362
1362
1363 def origin(repo, subset, x):
1363 def origin(repo, subset, x):
1364 """``origin([set])``
1364 """``origin([set])``
1365 Changesets that were specified as a source for the grafts, transplants or
1365 Changesets that were specified as a source for the grafts, transplants or
1366 rebases that created the given revisions. Omitting the optional set is the
1366 rebases that created the given revisions. Omitting the optional set is the
1367 same as passing all(). If a changeset created by these operations is itself
1367 same as passing all(). If a changeset created by these operations is itself
1368 specified as a source for one of these operations, only the source changeset
1368 specified as a source for one of these operations, only the source changeset
1369 for the first operation is selected.
1369 for the first operation is selected.
1370 """
1370 """
1371 if x is not None:
1371 if x is not None:
1372 dests = getset(repo, fullreposet(repo), x)
1372 dests = getset(repo, fullreposet(repo), x)
1373 else:
1373 else:
1374 dests = fullreposet(repo)
1374 dests = fullreposet(repo)
1375
1375
1376 def _firstsrc(rev):
1376 def _firstsrc(rev):
1377 src = _getrevsource(repo, rev)
1377 src = _getrevsource(repo, rev)
1378 if src is None:
1378 if src is None:
1379 return None
1379 return None
1380
1380
1381 while True:
1381 while True:
1382 prev = _getrevsource(repo, src)
1382 prev = _getrevsource(repo, src)
1383
1383
1384 if prev is None:
1384 if prev is None:
1385 return src
1385 return src
1386 src = prev
1386 src = prev
1387
1387
1388 o = set([_firstsrc(r) for r in dests])
1388 o = set([_firstsrc(r) for r in dests])
1389 o -= set([None])
1389 o -= set([None])
1390 # XXX we should turn this into a baseset instead of a set, smartset may do
1390 # XXX we should turn this into a baseset instead of a set, smartset may do
1391 # some optimisations from the fact this is a baseset.
1391 # some optimisations from the fact this is a baseset.
1392 return subset & o
1392 return subset & o
1393
1393
1394 def outgoing(repo, subset, x):
1394 def outgoing(repo, subset, x):
1395 """``outgoing([path])``
1395 """``outgoing([path])``
1396 Changesets not found in the specified destination repository, or the
1396 Changesets not found in the specified destination repository, or the
1397 default push location.
1397 default push location.
1398 """
1398 """
1399 # Avoid cycles.
1399 # Avoid cycles.
1400 import discovery
1400 import discovery
1401 import hg
1401 import hg
1402 # i18n: "outgoing" is a keyword
1402 # i18n: "outgoing" is a keyword
1403 l = getargs(x, 0, 1, _("outgoing takes one or no arguments"))
1403 l = getargs(x, 0, 1, _("outgoing takes one or no arguments"))
1404 # i18n: "outgoing" is a keyword
1404 # i18n: "outgoing" is a keyword
1405 dest = l and getstring(l[0], _("outgoing requires a repository path")) or ''
1405 dest = l and getstring(l[0], _("outgoing requires a repository path")) or ''
1406 dest = repo.ui.expandpath(dest or 'default-push', dest or 'default')
1406 dest = repo.ui.expandpath(dest or 'default-push', dest or 'default')
1407 dest, branches = hg.parseurl(dest)
1407 dest, branches = hg.parseurl(dest)
1408 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1408 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1409 if revs:
1409 if revs:
1410 revs = [repo.lookup(rev) for rev in revs]
1410 revs = [repo.lookup(rev) for rev in revs]
1411 other = hg.peer(repo, {}, dest)
1411 other = hg.peer(repo, {}, dest)
1412 repo.ui.pushbuffer()
1412 repo.ui.pushbuffer()
1413 outgoing = discovery.findcommonoutgoing(repo, other, onlyheads=revs)
1413 outgoing = discovery.findcommonoutgoing(repo, other, onlyheads=revs)
1414 repo.ui.popbuffer()
1414 repo.ui.popbuffer()
1415 cl = repo.changelog
1415 cl = repo.changelog
1416 o = set([cl.rev(r) for r in outgoing.missing])
1416 o = set([cl.rev(r) for r in outgoing.missing])
1417 return subset & o
1417 return subset & o
1418
1418
1419 def p1(repo, subset, x):
1419 def p1(repo, subset, x):
1420 """``p1([set])``
1420 """``p1([set])``
1421 First parent of changesets in set, or the working directory.
1421 First parent of changesets in set, or the working directory.
1422 """
1422 """
1423 if x is None:
1423 if x is None:
1424 p = repo[x].p1().rev()
1424 p = repo[x].p1().rev()
1425 if p >= 0:
1425 if p >= 0:
1426 return subset & baseset([p])
1426 return subset & baseset([p])
1427 return baseset()
1427 return baseset()
1428
1428
1429 ps = set()
1429 ps = set()
1430 cl = repo.changelog
1430 cl = repo.changelog
1431 for r in getset(repo, fullreposet(repo), x):
1431 for r in getset(repo, fullreposet(repo), x):
1432 ps.add(cl.parentrevs(r)[0])
1432 ps.add(cl.parentrevs(r)[0])
1433 ps -= set([node.nullrev])
1433 ps -= set([node.nullrev])
1434 # XXX we should turn this into a baseset instead of a set, smartset may do
1434 # XXX we should turn this into a baseset instead of a set, smartset may do
1435 # some optimisations from the fact this is a baseset.
1435 # some optimisations from the fact this is a baseset.
1436 return subset & ps
1436 return subset & ps
1437
1437
1438 def p2(repo, subset, x):
1438 def p2(repo, subset, x):
1439 """``p2([set])``
1439 """``p2([set])``
1440 Second parent of changesets in set, or the working directory.
1440 Second parent of changesets in set, or the working directory.
1441 """
1441 """
1442 if x is None:
1442 if x is None:
1443 ps = repo[x].parents()
1443 ps = repo[x].parents()
1444 try:
1444 try:
1445 p = ps[1].rev()
1445 p = ps[1].rev()
1446 if p >= 0:
1446 if p >= 0:
1447 return subset & baseset([p])
1447 return subset & baseset([p])
1448 return baseset()
1448 return baseset()
1449 except IndexError:
1449 except IndexError:
1450 return baseset()
1450 return baseset()
1451
1451
1452 ps = set()
1452 ps = set()
1453 cl = repo.changelog
1453 cl = repo.changelog
1454 for r in getset(repo, fullreposet(repo), x):
1454 for r in getset(repo, fullreposet(repo), x):
1455 ps.add(cl.parentrevs(r)[1])
1455 ps.add(cl.parentrevs(r)[1])
1456 ps -= set([node.nullrev])
1456 ps -= set([node.nullrev])
1457 # XXX we should turn this into a baseset instead of a set, smartset may do
1457 # XXX we should turn this into a baseset instead of a set, smartset may do
1458 # some optimisations from the fact this is a baseset.
1458 # some optimisations from the fact this is a baseset.
1459 return subset & ps
1459 return subset & ps
1460
1460
1461 def parents(repo, subset, x):
1461 def parents(repo, subset, x):
1462 """``parents([set])``
1462 """``parents([set])``
1463 The set of all parents for all changesets in set, or the working directory.
1463 The set of all parents for all changesets in set, or the working directory.
1464 """
1464 """
1465 if x is None:
1465 if x is None:
1466 ps = set(p.rev() for p in repo[x].parents())
1466 ps = set(p.rev() for p in repo[x].parents())
1467 else:
1467 else:
1468 ps = set()
1468 ps = set()
1469 cl = repo.changelog
1469 cl = repo.changelog
1470 for r in getset(repo, fullreposet(repo), x):
1470 for r in getset(repo, fullreposet(repo), x):
1471 ps.update(cl.parentrevs(r))
1471 ps.update(cl.parentrevs(r))
1472 ps -= set([node.nullrev])
1472 ps -= set([node.nullrev])
1473 return subset & ps
1473 return subset & ps
1474
1474
1475 def _phase(repo, subset, target):
1475 def _phase(repo, subset, target):
1476 """helper to select all rev in phase <target>"""
1476 """helper to select all rev in phase <target>"""
1477 repo._phasecache.loadphaserevs(repo) # ensure phase's sets are loaded
1477 repo._phasecache.loadphaserevs(repo) # ensure phase's sets are loaded
1478 if repo._phasecache._phasesets:
1478 if repo._phasecache._phasesets:
1479 s = repo._phasecache._phasesets[target] - repo.changelog.filteredrevs
1479 s = repo._phasecache._phasesets[target] - repo.changelog.filteredrevs
1480 s = baseset(s)
1480 s = baseset(s)
1481 s.sort() # set are non ordered, so we enforce ascending
1481 s.sort() # set are non ordered, so we enforce ascending
1482 return subset & s
1482 return subset & s
1483 else:
1483 else:
1484 phase = repo._phasecache.phase
1484 phase = repo._phasecache.phase
1485 condition = lambda r: phase(repo, r) == target
1485 condition = lambda r: phase(repo, r) == target
1486 return subset.filter(condition, cache=False)
1486 return subset.filter(condition, cache=False)
1487
1487
1488 def draft(repo, subset, x):
1488 def draft(repo, subset, x):
1489 """``draft()``
1489 """``draft()``
1490 Changeset in draft phase."""
1490 Changeset in draft phase."""
1491 # i18n: "draft" is a keyword
1491 # i18n: "draft" is a keyword
1492 getargs(x, 0, 0, _("draft takes no arguments"))
1492 getargs(x, 0, 0, _("draft takes no arguments"))
1493 target = phases.draft
1493 target = phases.draft
1494 return _phase(repo, subset, target)
1494 return _phase(repo, subset, target)
1495
1495
1496 def secret(repo, subset, x):
1496 def secret(repo, subset, x):
1497 """``secret()``
1497 """``secret()``
1498 Changeset in secret phase."""
1498 Changeset in secret phase."""
1499 # i18n: "secret" is a keyword
1499 # i18n: "secret" is a keyword
1500 getargs(x, 0, 0, _("secret takes no arguments"))
1500 getargs(x, 0, 0, _("secret takes no arguments"))
1501 target = phases.secret
1501 target = phases.secret
1502 return _phase(repo, subset, target)
1502 return _phase(repo, subset, target)
1503
1503
1504 def parentspec(repo, subset, x, n):
1504 def parentspec(repo, subset, x, n):
1505 """``set^0``
1505 """``set^0``
1506 The set.
1506 The set.
1507 ``set^1`` (or ``set^``), ``set^2``
1507 ``set^1`` (or ``set^``), ``set^2``
1508 First or second parent, respectively, of all changesets in set.
1508 First or second parent, respectively, of all changesets in set.
1509 """
1509 """
1510 try:
1510 try:
1511 n = int(n[1])
1511 n = int(n[1])
1512 if n not in (0, 1, 2):
1512 if n not in (0, 1, 2):
1513 raise ValueError
1513 raise ValueError
1514 except (TypeError, ValueError):
1514 except (TypeError, ValueError):
1515 raise error.ParseError(_("^ expects a number 0, 1, or 2"))
1515 raise error.ParseError(_("^ expects a number 0, 1, or 2"))
1516 ps = set()
1516 ps = set()
1517 cl = repo.changelog
1517 cl = repo.changelog
1518 for r in getset(repo, fullreposet(repo), x):
1518 for r in getset(repo, fullreposet(repo), x):
1519 if n == 0:
1519 if n == 0:
1520 ps.add(r)
1520 ps.add(r)
1521 elif n == 1:
1521 elif n == 1:
1522 ps.add(cl.parentrevs(r)[0])
1522 ps.add(cl.parentrevs(r)[0])
1523 elif n == 2:
1523 elif n == 2:
1524 parents = cl.parentrevs(r)
1524 parents = cl.parentrevs(r)
1525 if len(parents) > 1:
1525 if len(parents) > 1:
1526 ps.add(parents[1])
1526 ps.add(parents[1])
1527 return subset & ps
1527 return subset & ps
1528
1528
1529 def present(repo, subset, x):
1529 def present(repo, subset, x):
1530 """``present(set)``
1530 """``present(set)``
1531 An empty set, if any revision in set isn't found; otherwise,
1531 An empty set, if any revision in set isn't found; otherwise,
1532 all revisions in set.
1532 all revisions in set.
1533
1533
1534 If any of specified revisions is not present in the local repository,
1534 If any of specified revisions is not present in the local repository,
1535 the query is normally aborted. But this predicate allows the query
1535 the query is normally aborted. But this predicate allows the query
1536 to continue even in such cases.
1536 to continue even in such cases.
1537 """
1537 """
1538 try:
1538 try:
1539 return getset(repo, subset, x)
1539 return getset(repo, subset, x)
1540 except error.RepoLookupError:
1540 except error.RepoLookupError:
1541 return baseset()
1541 return baseset()
1542
1542
1543 # for internal use
1543 # for internal use
1544 def _notpublic(repo, subset, x):
1544 def _notpublic(repo, subset, x):
1545 getargs(x, 0, 0, "_notpublic takes no arguments")
1545 getargs(x, 0, 0, "_notpublic takes no arguments")
1546 repo._phasecache.loadphaserevs(repo) # ensure phase's sets are loaded
1546 repo._phasecache.loadphaserevs(repo) # ensure phase's sets are loaded
1547 if repo._phasecache._phasesets:
1547 if repo._phasecache._phasesets:
1548 s = set()
1548 s = set()
1549 for u in repo._phasecache._phasesets[1:]:
1549 for u in repo._phasecache._phasesets[1:]:
1550 s.update(u)
1550 s.update(u)
1551 s = baseset(s - repo.changelog.filteredrevs)
1551 s = baseset(s - repo.changelog.filteredrevs)
1552 s.sort()
1552 s.sort()
1553 return subset & s
1553 return subset & s
1554 else:
1554 else:
1555 phase = repo._phasecache.phase
1555 phase = repo._phasecache.phase
1556 target = phases.public
1556 target = phases.public
1557 condition = lambda r: phase(repo, r) != target
1557 condition = lambda r: phase(repo, r) != target
1558 return subset.filter(condition, cache=False)
1558 return subset.filter(condition, cache=False)
1559
1559
1560 def public(repo, subset, x):
1560 def public(repo, subset, x):
1561 """``public()``
1561 """``public()``
1562 Changeset in public phase."""
1562 Changeset in public phase."""
1563 # i18n: "public" is a keyword
1563 # i18n: "public" is a keyword
1564 getargs(x, 0, 0, _("public takes no arguments"))
1564 getargs(x, 0, 0, _("public takes no arguments"))
1565 phase = repo._phasecache.phase
1565 phase = repo._phasecache.phase
1566 target = phases.public
1566 target = phases.public
1567 condition = lambda r: phase(repo, r) == target
1567 condition = lambda r: phase(repo, r) == target
1568 return subset.filter(condition, cache=False)
1568 return subset.filter(condition, cache=False)
1569
1569
1570 def remote(repo, subset, x):
1570 def remote(repo, subset, x):
1571 """``remote([id [,path]])``
1571 """``remote([id [,path]])``
1572 Local revision that corresponds to the given identifier in a
1572 Local revision that corresponds to the given identifier in a
1573 remote repository, if present. Here, the '.' identifier is a
1573 remote repository, if present. Here, the '.' identifier is a
1574 synonym for the current local branch.
1574 synonym for the current local branch.
1575 """
1575 """
1576
1576
1577 import hg # avoid start-up nasties
1577 import hg # avoid start-up nasties
1578 # i18n: "remote" is a keyword
1578 # i18n: "remote" is a keyword
1579 l = getargs(x, 0, 2, _("remote takes one, two or no arguments"))
1579 l = getargs(x, 0, 2, _("remote takes one, two or no arguments"))
1580
1580
1581 q = '.'
1581 q = '.'
1582 if len(l) > 0:
1582 if len(l) > 0:
1583 # i18n: "remote" is a keyword
1583 # i18n: "remote" is a keyword
1584 q = getstring(l[0], _("remote requires a string id"))
1584 q = getstring(l[0], _("remote requires a string id"))
1585 if q == '.':
1585 if q == '.':
1586 q = repo['.'].branch()
1586 q = repo['.'].branch()
1587
1587
1588 dest = ''
1588 dest = ''
1589 if len(l) > 1:
1589 if len(l) > 1:
1590 # i18n: "remote" is a keyword
1590 # i18n: "remote" is a keyword
1591 dest = getstring(l[1], _("remote requires a repository path"))
1591 dest = getstring(l[1], _("remote requires a repository path"))
1592 dest = repo.ui.expandpath(dest or 'default')
1592 dest = repo.ui.expandpath(dest or 'default')
1593 dest, branches = hg.parseurl(dest)
1593 dest, branches = hg.parseurl(dest)
1594 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1594 revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
1595 if revs:
1595 if revs:
1596 revs = [repo.lookup(rev) for rev in revs]
1596 revs = [repo.lookup(rev) for rev in revs]
1597 other = hg.peer(repo, {}, dest)
1597 other = hg.peer(repo, {}, dest)
1598 n = other.lookup(q)
1598 n = other.lookup(q)
1599 if n in repo:
1599 if n in repo:
1600 r = repo[n].rev()
1600 r = repo[n].rev()
1601 if r in subset:
1601 if r in subset:
1602 return baseset([r])
1602 return baseset([r])
1603 return baseset()
1603 return baseset()
1604
1604
1605 def removes(repo, subset, x):
1605 def removes(repo, subset, x):
1606 """``removes(pattern)``
1606 """``removes(pattern)``
1607 Changesets which remove files matching pattern.
1607 Changesets which remove files matching pattern.
1608
1608
1609 The pattern without explicit kind like ``glob:`` is expected to be
1609 The pattern without explicit kind like ``glob:`` is expected to be
1610 relative to the current directory and match against a file or a
1610 relative to the current directory and match against a file or a
1611 directory.
1611 directory.
1612 """
1612 """
1613 # i18n: "removes" is a keyword
1613 # i18n: "removes" is a keyword
1614 pat = getstring(x, _("removes requires a pattern"))
1614 pat = getstring(x, _("removes requires a pattern"))
1615 return checkstatus(repo, subset, pat, 2)
1615 return checkstatus(repo, subset, pat, 2)
1616
1616
1617 def rev(repo, subset, x):
1617 def rev(repo, subset, x):
1618 """``rev(number)``
1618 """``rev(number)``
1619 Revision with the given numeric identifier.
1619 Revision with the given numeric identifier.
1620 """
1620 """
1621 # i18n: "rev" is a keyword
1621 # i18n: "rev" is a keyword
1622 l = getargs(x, 1, 1, _("rev requires one argument"))
1622 l = getargs(x, 1, 1, _("rev requires one argument"))
1623 try:
1623 try:
1624 # i18n: "rev" is a keyword
1624 # i18n: "rev" is a keyword
1625 l = int(getstring(l[0], _("rev requires a number")))
1625 l = int(getstring(l[0], _("rev requires a number")))
1626 except (TypeError, ValueError):
1626 except (TypeError, ValueError):
1627 # i18n: "rev" is a keyword
1627 # i18n: "rev" is a keyword
1628 raise error.ParseError(_("rev expects a number"))
1628 raise error.ParseError(_("rev expects a number"))
1629 if l not in repo.changelog and l != node.nullrev:
1629 if l not in repo.changelog and l != node.nullrev:
1630 return baseset()
1630 return baseset()
1631 return subset & baseset([l])
1631 return subset & baseset([l])
1632
1632
1633 def matching(repo, subset, x):
1633 def matching(repo, subset, x):
1634 """``matching(revision [, field])``
1634 """``matching(revision [, field])``
1635 Changesets in which a given set of fields match the set of fields in the
1635 Changesets in which a given set of fields match the set of fields in the
1636 selected revision or set.
1636 selected revision or set.
1637
1637
1638 To match more than one field pass the list of fields to match separated
1638 To match more than one field pass the list of fields to match separated
1639 by spaces (e.g. ``author description``).
1639 by spaces (e.g. ``author description``).
1640
1640
1641 Valid fields are most regular revision fields and some special fields.
1641 Valid fields are most regular revision fields and some special fields.
1642
1642
1643 Regular revision fields are ``description``, ``author``, ``branch``,
1643 Regular revision fields are ``description``, ``author``, ``branch``,
1644 ``date``, ``files``, ``phase``, ``parents``, ``substate``, ``user``
1644 ``date``, ``files``, ``phase``, ``parents``, ``substate``, ``user``
1645 and ``diff``.
1645 and ``diff``.
1646 Note that ``author`` and ``user`` are synonyms. ``diff`` refers to the
1646 Note that ``author`` and ``user`` are synonyms. ``diff`` refers to the
1647 contents of the revision. Two revisions matching their ``diff`` will
1647 contents of the revision. Two revisions matching their ``diff`` will
1648 also match their ``files``.
1648 also match their ``files``.
1649
1649
1650 Special fields are ``summary`` and ``metadata``:
1650 Special fields are ``summary`` and ``metadata``:
1651 ``summary`` matches the first line of the description.
1651 ``summary`` matches the first line of the description.
1652 ``metadata`` is equivalent to matching ``description user date``
1652 ``metadata`` is equivalent to matching ``description user date``
1653 (i.e. it matches the main metadata fields).
1653 (i.e. it matches the main metadata fields).
1654
1654
1655 ``metadata`` is the default field which is used when no fields are
1655 ``metadata`` is the default field which is used when no fields are
1656 specified. You can match more than one field at a time.
1656 specified. You can match more than one field at a time.
1657 """
1657 """
1658 # i18n: "matching" is a keyword
1658 # i18n: "matching" is a keyword
1659 l = getargs(x, 1, 2, _("matching takes 1 or 2 arguments"))
1659 l = getargs(x, 1, 2, _("matching takes 1 or 2 arguments"))
1660
1660
1661 revs = getset(repo, fullreposet(repo), l[0])
1661 revs = getset(repo, fullreposet(repo), l[0])
1662
1662
1663 fieldlist = ['metadata']
1663 fieldlist = ['metadata']
1664 if len(l) > 1:
1664 if len(l) > 1:
1665 fieldlist = getstring(l[1],
1665 fieldlist = getstring(l[1],
1666 # i18n: "matching" is a keyword
1666 # i18n: "matching" is a keyword
1667 _("matching requires a string "
1667 _("matching requires a string "
1668 "as its second argument")).split()
1668 "as its second argument")).split()
1669
1669
1670 # Make sure that there are no repeated fields,
1670 # Make sure that there are no repeated fields,
1671 # expand the 'special' 'metadata' field type
1671 # expand the 'special' 'metadata' field type
1672 # and check the 'files' whenever we check the 'diff'
1672 # and check the 'files' whenever we check the 'diff'
1673 fields = []
1673 fields = []
1674 for field in fieldlist:
1674 for field in fieldlist:
1675 if field == 'metadata':
1675 if field == 'metadata':
1676 fields += ['user', 'description', 'date']
1676 fields += ['user', 'description', 'date']
1677 elif field == 'diff':
1677 elif field == 'diff':
1678 # a revision matching the diff must also match the files
1678 # a revision matching the diff must also match the files
1679 # since matching the diff is very costly, make sure to
1679 # since matching the diff is very costly, make sure to
1680 # also match the files first
1680 # also match the files first
1681 fields += ['files', 'diff']
1681 fields += ['files', 'diff']
1682 else:
1682 else:
1683 if field == 'author':
1683 if field == 'author':
1684 field = 'user'
1684 field = 'user'
1685 fields.append(field)
1685 fields.append(field)
1686 fields = set(fields)
1686 fields = set(fields)
1687 if 'summary' in fields and 'description' in fields:
1687 if 'summary' in fields and 'description' in fields:
1688 # If a revision matches its description it also matches its summary
1688 # If a revision matches its description it also matches its summary
1689 fields.discard('summary')
1689 fields.discard('summary')
1690
1690
1691 # We may want to match more than one field
1691 # We may want to match more than one field
1692 # Not all fields take the same amount of time to be matched
1692 # Not all fields take the same amount of time to be matched
1693 # Sort the selected fields in order of increasing matching cost
1693 # Sort the selected fields in order of increasing matching cost
1694 fieldorder = ['phase', 'parents', 'user', 'date', 'branch', 'summary',
1694 fieldorder = ['phase', 'parents', 'user', 'date', 'branch', 'summary',
1695 'files', 'description', 'substate', 'diff']
1695 'files', 'description', 'substate', 'diff']
1696 def fieldkeyfunc(f):
1696 def fieldkeyfunc(f):
1697 try:
1697 try:
1698 return fieldorder.index(f)
1698 return fieldorder.index(f)
1699 except ValueError:
1699 except ValueError:
1700 # assume an unknown field is very costly
1700 # assume an unknown field is very costly
1701 return len(fieldorder)
1701 return len(fieldorder)
1702 fields = list(fields)
1702 fields = list(fields)
1703 fields.sort(key=fieldkeyfunc)
1703 fields.sort(key=fieldkeyfunc)
1704
1704
1705 # Each field will be matched with its own "getfield" function
1705 # Each field will be matched with its own "getfield" function
1706 # which will be added to the getfieldfuncs array of functions
1706 # which will be added to the getfieldfuncs array of functions
1707 getfieldfuncs = []
1707 getfieldfuncs = []
1708 _funcs = {
1708 _funcs = {
1709 'user': lambda r: repo[r].user(),
1709 'user': lambda r: repo[r].user(),
1710 'branch': lambda r: repo[r].branch(),
1710 'branch': lambda r: repo[r].branch(),
1711 'date': lambda r: repo[r].date(),
1711 'date': lambda r: repo[r].date(),
1712 'description': lambda r: repo[r].description(),
1712 'description': lambda r: repo[r].description(),
1713 'files': lambda r: repo[r].files(),
1713 'files': lambda r: repo[r].files(),
1714 'parents': lambda r: repo[r].parents(),
1714 'parents': lambda r: repo[r].parents(),
1715 'phase': lambda r: repo[r].phase(),
1715 'phase': lambda r: repo[r].phase(),
1716 'substate': lambda r: repo[r].substate,
1716 'substate': lambda r: repo[r].substate,
1717 'summary': lambda r: repo[r].description().splitlines()[0],
1717 'summary': lambda r: repo[r].description().splitlines()[0],
1718 'diff': lambda r: list(repo[r].diff(git=True),)
1718 'diff': lambda r: list(repo[r].diff(git=True),)
1719 }
1719 }
1720 for info in fields:
1720 for info in fields:
1721 getfield = _funcs.get(info, None)
1721 getfield = _funcs.get(info, None)
1722 if getfield is None:
1722 if getfield is None:
1723 raise error.ParseError(
1723 raise error.ParseError(
1724 # i18n: "matching" is a keyword
1724 # i18n: "matching" is a keyword
1725 _("unexpected field name passed to matching: %s") % info)
1725 _("unexpected field name passed to matching: %s") % info)
1726 getfieldfuncs.append(getfield)
1726 getfieldfuncs.append(getfield)
1727 # convert the getfield array of functions into a "getinfo" function
1727 # convert the getfield array of functions into a "getinfo" function
1728 # which returns an array of field values (or a single value if there
1728 # which returns an array of field values (or a single value if there
1729 # is only one field to match)
1729 # is only one field to match)
1730 getinfo = lambda r: [f(r) for f in getfieldfuncs]
1730 getinfo = lambda r: [f(r) for f in getfieldfuncs]
1731
1731
1732 def matches(x):
1732 def matches(x):
1733 for rev in revs:
1733 for rev in revs:
1734 target = getinfo(rev)
1734 target = getinfo(rev)
1735 match = True
1735 match = True
1736 for n, f in enumerate(getfieldfuncs):
1736 for n, f in enumerate(getfieldfuncs):
1737 if target[n] != f(x):
1737 if target[n] != f(x):
1738 match = False
1738 match = False
1739 if match:
1739 if match:
1740 return True
1740 return True
1741 return False
1741 return False
1742
1742
1743 return subset.filter(matches)
1743 return subset.filter(matches)
1744
1744
1745 def reverse(repo, subset, x):
1745 def reverse(repo, subset, x):
1746 """``reverse(set)``
1746 """``reverse(set)``
1747 Reverse order of set.
1747 Reverse order of set.
1748 """
1748 """
1749 l = getset(repo, subset, x)
1749 l = getset(repo, subset, x)
1750 l.reverse()
1750 l.reverse()
1751 return l
1751 return l
1752
1752
1753 def roots(repo, subset, x):
1753 def roots(repo, subset, x):
1754 """``roots(set)``
1754 """``roots(set)``
1755 Changesets in set with no parent changeset in set.
1755 Changesets in set with no parent changeset in set.
1756 """
1756 """
1757 s = getset(repo, fullreposet(repo), x)
1757 s = getset(repo, fullreposet(repo), x)
1758 parents = repo.changelog.parentrevs
1758 parents = repo.changelog.parentrevs
1759 def filter(r):
1759 def filter(r):
1760 for p in parents(r):
1760 for p in parents(r):
1761 if 0 <= p and p in s:
1761 if 0 <= p and p in s:
1762 return False
1762 return False
1763 return True
1763 return True
1764 return subset & s.filter(filter)
1764 return subset & s.filter(filter)
1765
1765
1766 def sort(repo, subset, x):
1766 def sort(repo, subset, x):
1767 """``sort(set[, [-]key...])``
1767 """``sort(set[, [-]key...])``
1768 Sort set by keys. The default sort order is ascending, specify a key
1768 Sort set by keys. The default sort order is ascending, specify a key
1769 as ``-key`` to sort in descending order.
1769 as ``-key`` to sort in descending order.
1770
1770
1771 The keys can be:
1771 The keys can be:
1772
1772
1773 - ``rev`` for the revision number,
1773 - ``rev`` for the revision number,
1774 - ``branch`` for the branch name,
1774 - ``branch`` for the branch name,
1775 - ``desc`` for the commit message (description),
1775 - ``desc`` for the commit message (description),
1776 - ``user`` for user name (``author`` can be used as an alias),
1776 - ``user`` for user name (``author`` can be used as an alias),
1777 - ``date`` for the commit date
1777 - ``date`` for the commit date
1778 """
1778 """
1779 # i18n: "sort" is a keyword
1779 # i18n: "sort" is a keyword
1780 l = getargs(x, 1, 2, _("sort requires one or two arguments"))
1780 l = getargs(x, 1, 2, _("sort requires one or two arguments"))
1781 keys = "rev"
1781 keys = "rev"
1782 if len(l) == 2:
1782 if len(l) == 2:
1783 # i18n: "sort" is a keyword
1783 # i18n: "sort" is a keyword
1784 keys = getstring(l[1], _("sort spec must be a string"))
1784 keys = getstring(l[1], _("sort spec must be a string"))
1785
1785
1786 s = l[0]
1786 s = l[0]
1787 keys = keys.split()
1787 keys = keys.split()
1788 l = []
1788 l = []
1789 def invert(s):
1789 def invert(s):
1790 return "".join(chr(255 - ord(c)) for c in s)
1790 return "".join(chr(255 - ord(c)) for c in s)
1791 revs = getset(repo, subset, s)
1791 revs = getset(repo, subset, s)
1792 if keys == ["rev"]:
1792 if keys == ["rev"]:
1793 revs.sort()
1793 revs.sort()
1794 return revs
1794 return revs
1795 elif keys == ["-rev"]:
1795 elif keys == ["-rev"]:
1796 revs.sort(reverse=True)
1796 revs.sort(reverse=True)
1797 return revs
1797 return revs
1798 for r in revs:
1798 for r in revs:
1799 c = repo[r]
1799 c = repo[r]
1800 e = []
1800 e = []
1801 for k in keys:
1801 for k in keys:
1802 if k == 'rev':
1802 if k == 'rev':
1803 e.append(r)
1803 e.append(r)
1804 elif k == '-rev':
1804 elif k == '-rev':
1805 e.append(-r)
1805 e.append(-r)
1806 elif k == 'branch':
1806 elif k == 'branch':
1807 e.append(c.branch())
1807 e.append(c.branch())
1808 elif k == '-branch':
1808 elif k == '-branch':
1809 e.append(invert(c.branch()))
1809 e.append(invert(c.branch()))
1810 elif k == 'desc':
1810 elif k == 'desc':
1811 e.append(c.description())
1811 e.append(c.description())
1812 elif k == '-desc':
1812 elif k == '-desc':
1813 e.append(invert(c.description()))
1813 e.append(invert(c.description()))
1814 elif k in 'user author':
1814 elif k in 'user author':
1815 e.append(c.user())
1815 e.append(c.user())
1816 elif k in '-user -author':
1816 elif k in '-user -author':
1817 e.append(invert(c.user()))
1817 e.append(invert(c.user()))
1818 elif k == 'date':
1818 elif k == 'date':
1819 e.append(c.date()[0])
1819 e.append(c.date()[0])
1820 elif k == '-date':
1820 elif k == '-date':
1821 e.append(-c.date()[0])
1821 e.append(-c.date()[0])
1822 else:
1822 else:
1823 raise error.ParseError(_("unknown sort key %r") % k)
1823 raise error.ParseError(_("unknown sort key %r") % k)
1824 e.append(r)
1824 e.append(r)
1825 l.append(e)
1825 l.append(e)
1826 l.sort()
1826 l.sort()
1827 return baseset([e[-1] for e in l])
1827 return baseset([e[-1] for e in l])
1828
1828
1829 def subrepo(repo, subset, x):
1829 def subrepo(repo, subset, x):
1830 """``subrepo([pattern])``
1830 """``subrepo([pattern])``
1831 Changesets that add, modify or remove the given subrepo. If no subrepo
1831 Changesets that add, modify or remove the given subrepo. If no subrepo
1832 pattern is named, any subrepo changes are returned.
1832 pattern is named, any subrepo changes are returned.
1833 """
1833 """
1834 # i18n: "subrepo" is a keyword
1834 # i18n: "subrepo" is a keyword
1835 args = getargs(x, 0, 1, _('subrepo takes at most one argument'))
1835 args = getargs(x, 0, 1, _('subrepo takes at most one argument'))
1836 if len(args) != 0:
1836 if len(args) != 0:
1837 pat = getstring(args[0], _("subrepo requires a pattern"))
1837 pat = getstring(args[0], _("subrepo requires a pattern"))
1838
1838
1839 m = matchmod.exact(repo.root, repo.root, ['.hgsubstate'])
1839 m = matchmod.exact(repo.root, repo.root, ['.hgsubstate'])
1840
1840
1841 def submatches(names):
1841 def submatches(names):
1842 k, p, m = _stringmatcher(pat)
1842 k, p, m = _stringmatcher(pat)
1843 for name in names:
1843 for name in names:
1844 if m(name):
1844 if m(name):
1845 yield name
1845 yield name
1846
1846
1847 def matches(x):
1847 def matches(x):
1848 c = repo[x]
1848 c = repo[x]
1849 s = repo.status(c.p1().node(), c.node(), match=m)
1849 s = repo.status(c.p1().node(), c.node(), match=m)
1850
1850
1851 if len(args) == 0:
1851 if len(args) == 0:
1852 return s.added or s.modified or s.removed
1852 return s.added or s.modified or s.removed
1853
1853
1854 if s.added:
1854 if s.added:
1855 return any(submatches(c.substate.keys()))
1855 return any(submatches(c.substate.keys()))
1856
1856
1857 if s.modified:
1857 if s.modified:
1858 subs = set(c.p1().substate.keys())
1858 subs = set(c.p1().substate.keys())
1859 subs.update(c.substate.keys())
1859 subs.update(c.substate.keys())
1860
1860
1861 for path in submatches(subs):
1861 for path in submatches(subs):
1862 if c.p1().substate.get(path) != c.substate.get(path):
1862 if c.p1().substate.get(path) != c.substate.get(path):
1863 return True
1863 return True
1864
1864
1865 if s.removed:
1865 if s.removed:
1866 return any(submatches(c.p1().substate.keys()))
1866 return any(submatches(c.p1().substate.keys()))
1867
1867
1868 return False
1868 return False
1869
1869
1870 return subset.filter(matches)
1870 return subset.filter(matches)
1871
1871
1872 def _stringmatcher(pattern):
1872 def _stringmatcher(pattern):
1873 """
1873 """
1874 accepts a string, possibly starting with 're:' or 'literal:' prefix.
1874 accepts a string, possibly starting with 're:' or 'literal:' prefix.
1875 returns the matcher name, pattern, and matcher function.
1875 returns the matcher name, pattern, and matcher function.
1876 missing or unknown prefixes are treated as literal matches.
1876 missing or unknown prefixes are treated as literal matches.
1877
1877
1878 helper for tests:
1878 helper for tests:
1879 >>> def test(pattern, *tests):
1879 >>> def test(pattern, *tests):
1880 ... kind, pattern, matcher = _stringmatcher(pattern)
1880 ... kind, pattern, matcher = _stringmatcher(pattern)
1881 ... return (kind, pattern, [bool(matcher(t)) for t in tests])
1881 ... return (kind, pattern, [bool(matcher(t)) for t in tests])
1882
1882
1883 exact matching (no prefix):
1883 exact matching (no prefix):
1884 >>> test('abcdefg', 'abc', 'def', 'abcdefg')
1884 >>> test('abcdefg', 'abc', 'def', 'abcdefg')
1885 ('literal', 'abcdefg', [False, False, True])
1885 ('literal', 'abcdefg', [False, False, True])
1886
1886
1887 regex matching ('re:' prefix)
1887 regex matching ('re:' prefix)
1888 >>> test('re:a.+b', 'nomatch', 'fooadef', 'fooadefbar')
1888 >>> test('re:a.+b', 'nomatch', 'fooadef', 'fooadefbar')
1889 ('re', 'a.+b', [False, False, True])
1889 ('re', 'a.+b', [False, False, True])
1890
1890
1891 force exact matches ('literal:' prefix)
1891 force exact matches ('literal:' prefix)
1892 >>> test('literal:re:foobar', 'foobar', 're:foobar')
1892 >>> test('literal:re:foobar', 'foobar', 're:foobar')
1893 ('literal', 're:foobar', [False, True])
1893 ('literal', 're:foobar', [False, True])
1894
1894
1895 unknown prefixes are ignored and treated as literals
1895 unknown prefixes are ignored and treated as literals
1896 >>> test('foo:bar', 'foo', 'bar', 'foo:bar')
1896 >>> test('foo:bar', 'foo', 'bar', 'foo:bar')
1897 ('literal', 'foo:bar', [False, False, True])
1897 ('literal', 'foo:bar', [False, False, True])
1898 """
1898 """
1899 if pattern.startswith('re:'):
1899 if pattern.startswith('re:'):
1900 pattern = pattern[3:]
1900 pattern = pattern[3:]
1901 try:
1901 try:
1902 regex = re.compile(pattern)
1902 regex = re.compile(pattern)
1903 except re.error, e:
1903 except re.error, e:
1904 raise error.ParseError(_('invalid regular expression: %s')
1904 raise error.ParseError(_('invalid regular expression: %s')
1905 % e)
1905 % e)
1906 return 're', pattern, regex.search
1906 return 're', pattern, regex.search
1907 elif pattern.startswith('literal:'):
1907 elif pattern.startswith('literal:'):
1908 pattern = pattern[8:]
1908 pattern = pattern[8:]
1909 return 'literal', pattern, pattern.__eq__
1909 return 'literal', pattern, pattern.__eq__
1910
1910
1911 def _substringmatcher(pattern):
1911 def _substringmatcher(pattern):
1912 kind, pattern, matcher = _stringmatcher(pattern)
1912 kind, pattern, matcher = _stringmatcher(pattern)
1913 if kind == 'literal':
1913 if kind == 'literal':
1914 matcher = lambda s: pattern in s
1914 matcher = lambda s: pattern in s
1915 return kind, pattern, matcher
1915 return kind, pattern, matcher
1916
1916
1917 def tag(repo, subset, x):
1917 def tag(repo, subset, x):
1918 """``tag([name])``
1918 """``tag([name])``
1919 The specified tag by name, or all tagged revisions if no name is given.
1919 The specified tag by name, or all tagged revisions if no name is given.
1920
1920
1921 If `name` starts with `re:`, the remainder of the name is treated as
1921 If `name` starts with `re:`, the remainder of the name is treated as
1922 a regular expression. To match a tag that actually starts with `re:`,
1922 a regular expression. To match a tag that actually starts with `re:`,
1923 use the prefix `literal:`.
1923 use the prefix `literal:`.
1924 """
1924 """
1925 # i18n: "tag" is a keyword
1925 # i18n: "tag" is a keyword
1926 args = getargs(x, 0, 1, _("tag takes one or no arguments"))
1926 args = getargs(x, 0, 1, _("tag takes one or no arguments"))
1927 cl = repo.changelog
1927 cl = repo.changelog
1928 if args:
1928 if args:
1929 pattern = getstring(args[0],
1929 pattern = getstring(args[0],
1930 # i18n: "tag" is a keyword
1930 # i18n: "tag" is a keyword
1931 _('the argument to tag must be a string'))
1931 _('the argument to tag must be a string'))
1932 kind, pattern, matcher = _stringmatcher(pattern)
1932 kind, pattern, matcher = _stringmatcher(pattern)
1933 if kind == 'literal':
1933 if kind == 'literal':
1934 # avoid resolving all tags
1934 # avoid resolving all tags
1935 tn = repo._tagscache.tags.get(pattern, None)
1935 tn = repo._tagscache.tags.get(pattern, None)
1936 if tn is None:
1936 if tn is None:
1937 raise error.RepoLookupError(_("tag '%s' does not exist")
1937 raise error.RepoLookupError(_("tag '%s' does not exist")
1938 % pattern)
1938 % pattern)
1939 s = set([repo[tn].rev()])
1939 s = set([repo[tn].rev()])
1940 else:
1940 else:
1941 s = set([cl.rev(n) for t, n in repo.tagslist() if matcher(t)])
1941 s = set([cl.rev(n) for t, n in repo.tagslist() if matcher(t)])
1942 else:
1942 else:
1943 s = set([cl.rev(n) for t, n in repo.tagslist() if t != 'tip'])
1943 s = set([cl.rev(n) for t, n in repo.tagslist() if t != 'tip'])
1944 return subset & s
1944 return subset & s
1945
1945
1946 def tagged(repo, subset, x):
1946 def tagged(repo, subset, x):
1947 return tag(repo, subset, x)
1947 return tag(repo, subset, x)
1948
1948
1949 def unstable(repo, subset, x):
1949 def unstable(repo, subset, x):
1950 """``unstable()``
1950 """``unstable()``
1951 Non-obsolete changesets with obsolete ancestors.
1951 Non-obsolete changesets with obsolete ancestors.
1952 """
1952 """
1953 # i18n: "unstable" is a keyword
1953 # i18n: "unstable" is a keyword
1954 getargs(x, 0, 0, _("unstable takes no arguments"))
1954 getargs(x, 0, 0, _("unstable takes no arguments"))
1955 unstables = obsmod.getrevs(repo, 'unstable')
1955 unstables = obsmod.getrevs(repo, 'unstable')
1956 return subset & unstables
1956 return subset & unstables
1957
1957
1958
1958
1959 def user(repo, subset, x):
1959 def user(repo, subset, x):
1960 """``user(string)``
1960 """``user(string)``
1961 User name contains string. The match is case-insensitive.
1961 User name contains string. The match is case-insensitive.
1962
1962
1963 If `string` starts with `re:`, the remainder of the string is treated as
1963 If `string` starts with `re:`, the remainder of the string is treated as
1964 a regular expression. To match a user that actually contains `re:`, use
1964 a regular expression. To match a user that actually contains `re:`, use
1965 the prefix `literal:`.
1965 the prefix `literal:`.
1966 """
1966 """
1967 return author(repo, subset, x)
1967 return author(repo, subset, x)
1968
1968
1969 # experimental
1969 # experimental
1970 def wdir(repo, subset, x):
1970 def wdir(repo, subset, x):
1971 # i18n: "wdir" is a keyword
1971 # i18n: "wdir" is a keyword
1972 getargs(x, 0, 0, _("wdir takes no arguments"))
1972 getargs(x, 0, 0, _("wdir takes no arguments"))
1973 if None in subset or isinstance(subset, fullreposet):
1973 if None in subset or isinstance(subset, fullreposet):
1974 return baseset([None])
1974 return baseset([None])
1975 return baseset()
1975 return baseset()
1976
1976
1977 # for internal use
1977 # for internal use
1978 def _list(repo, subset, x):
1978 def _list(repo, subset, x):
1979 s = getstring(x, "internal error")
1979 s = getstring(x, "internal error")
1980 if not s:
1980 if not s:
1981 return baseset()
1981 return baseset()
1982 # remove duplicates here. it's difficult for caller to deduplicate sets
1982 # remove duplicates here. it's difficult for caller to deduplicate sets
1983 # because different symbols can point to the same rev.
1983 # because different symbols can point to the same rev.
1984 cl = repo.changelog
1984 cl = repo.changelog
1985 ls = []
1985 ls = []
1986 seen = set()
1986 seen = set()
1987 for t in s.split('\0'):
1987 for t in s.split('\0'):
1988 try:
1988 try:
1989 # fast path for integer revision
1989 # fast path for integer revision
1990 r = int(t)
1990 r = int(t)
1991 if str(r) != t or r not in cl:
1991 if str(r) != t or r not in cl:
1992 raise ValueError
1992 raise ValueError
1993 except ValueError:
1993 except ValueError:
1994 r = repo[t].rev()
1994 r = repo[t].rev()
1995 if r in seen:
1995 if r in seen:
1996 continue
1996 continue
1997 if (r in subset
1997 if (r in subset
1998 or r == node.nullrev and isinstance(subset, fullreposet)):
1998 or r == node.nullrev and isinstance(subset, fullreposet)):
1999 ls.append(r)
1999 ls.append(r)
2000 seen.add(r)
2000 seen.add(r)
2001 return baseset(ls)
2001 return baseset(ls)
2002
2002
2003 # for internal use
2003 # for internal use
2004 def _intlist(repo, subset, x):
2004 def _intlist(repo, subset, x):
2005 s = getstring(x, "internal error")
2005 s = getstring(x, "internal error")
2006 if not s:
2006 if not s:
2007 return baseset()
2007 return baseset()
2008 ls = [int(r) for r in s.split('\0')]
2008 ls = [int(r) for r in s.split('\0')]
2009 s = subset
2009 s = subset
2010 return baseset([r for r in ls if r in s])
2010 return baseset([r for r in ls if r in s])
2011
2011
2012 # for internal use
2012 # for internal use
2013 def _hexlist(repo, subset, x):
2013 def _hexlist(repo, subset, x):
2014 s = getstring(x, "internal error")
2014 s = getstring(x, "internal error")
2015 if not s:
2015 if not s:
2016 return baseset()
2016 return baseset()
2017 cl = repo.changelog
2017 cl = repo.changelog
2018 ls = [cl.rev(node.bin(r)) for r in s.split('\0')]
2018 ls = [cl.rev(node.bin(r)) for r in s.split('\0')]
2019 s = subset
2019 s = subset
2020 return baseset([r for r in ls if r in s])
2020 return baseset([r for r in ls if r in s])
2021
2021
2022 symbols = {
2022 symbols = {
2023 "adds": adds,
2023 "adds": adds,
2024 "all": getall,
2024 "all": getall,
2025 "ancestor": ancestor,
2025 "ancestor": ancestor,
2026 "ancestors": ancestors,
2026 "ancestors": ancestors,
2027 "_firstancestors": _firstancestors,
2027 "_firstancestors": _firstancestors,
2028 "author": author,
2028 "author": author,
2029 "bisect": bisect,
2029 "bisect": bisect,
2030 "bisected": bisected,
2030 "bisected": bisected,
2031 "bookmark": bookmark,
2031 "bookmark": bookmark,
2032 "branch": branch,
2032 "branch": branch,
2033 "branchpoint": branchpoint,
2033 "branchpoint": branchpoint,
2034 "bumped": bumped,
2034 "bumped": bumped,
2035 "bundle": bundle,
2035 "bundle": bundle,
2036 "children": children,
2036 "children": children,
2037 "closed": closed,
2037 "closed": closed,
2038 "contains": contains,
2038 "contains": contains,
2039 "converted": converted,
2039 "converted": converted,
2040 "date": date,
2040 "date": date,
2041 "desc": desc,
2041 "desc": desc,
2042 "descendants": descendants,
2042 "descendants": descendants,
2043 "_firstdescendants": _firstdescendants,
2043 "_firstdescendants": _firstdescendants,
2044 "destination": destination,
2044 "destination": destination,
2045 "divergent": divergent,
2045 "divergent": divergent,
2046 "draft": draft,
2046 "draft": draft,
2047 "extinct": extinct,
2047 "extinct": extinct,
2048 "extra": extra,
2048 "extra": extra,
2049 "file": hasfile,
2049 "file": hasfile,
2050 "filelog": filelog,
2050 "filelog": filelog,
2051 "first": first,
2051 "first": first,
2052 "follow": follow,
2052 "follow": follow,
2053 "_followfirst": _followfirst,
2053 "_followfirst": _followfirst,
2054 "grep": grep,
2054 "grep": grep,
2055 "head": head,
2055 "head": head,
2056 "heads": heads,
2056 "heads": heads,
2057 "hidden": hidden,
2057 "hidden": hidden,
2058 "id": node_,
2058 "id": node_,
2059 "keyword": keyword,
2059 "keyword": keyword,
2060 "last": last,
2060 "last": last,
2061 "limit": limit,
2061 "limit": limit,
2062 "_matchfiles": _matchfiles,
2062 "_matchfiles": _matchfiles,
2063 "max": maxrev,
2063 "max": maxrev,
2064 "merge": merge,
2064 "merge": merge,
2065 "min": minrev,
2065 "min": minrev,
2066 "modifies": modifies,
2066 "modifies": modifies,
2067 "named": named,
2067 "named": named,
2068 "obsolete": obsolete,
2068 "obsolete": obsolete,
2069 "only": only,
2069 "only": only,
2070 "origin": origin,
2070 "origin": origin,
2071 "outgoing": outgoing,
2071 "outgoing": outgoing,
2072 "p1": p1,
2072 "p1": p1,
2073 "p2": p2,
2073 "p2": p2,
2074 "parents": parents,
2074 "parents": parents,
2075 "present": present,
2075 "present": present,
2076 "public": public,
2076 "public": public,
2077 "_notpublic": _notpublic,
2077 "_notpublic": _notpublic,
2078 "remote": remote,
2078 "remote": remote,
2079 "removes": removes,
2079 "removes": removes,
2080 "rev": rev,
2080 "rev": rev,
2081 "reverse": reverse,
2081 "reverse": reverse,
2082 "roots": roots,
2082 "roots": roots,
2083 "sort": sort,
2083 "sort": sort,
2084 "secret": secret,
2084 "secret": secret,
2085 "subrepo": subrepo,
2085 "subrepo": subrepo,
2086 "matching": matching,
2086 "matching": matching,
2087 "tag": tag,
2087 "tag": tag,
2088 "tagged": tagged,
2088 "tagged": tagged,
2089 "user": user,
2089 "user": user,
2090 "unstable": unstable,
2090 "unstable": unstable,
2091 "wdir": wdir,
2091 "wdir": wdir,
2092 "_list": _list,
2092 "_list": _list,
2093 "_intlist": _intlist,
2093 "_intlist": _intlist,
2094 "_hexlist": _hexlist,
2094 "_hexlist": _hexlist,
2095 }
2095 }
2096
2096
2097 # symbols which can't be used for a DoS attack for any given input
2097 # symbols which can't be used for a DoS attack for any given input
2098 # (e.g. those which accept regexes as plain strings shouldn't be included)
2098 # (e.g. those which accept regexes as plain strings shouldn't be included)
2099 # functions that just return a lot of changesets (like all) don't count here
2099 # functions that just return a lot of changesets (like all) don't count here
2100 safesymbols = set([
2100 safesymbols = set([
2101 "adds",
2101 "adds",
2102 "all",
2102 "all",
2103 "ancestor",
2103 "ancestor",
2104 "ancestors",
2104 "ancestors",
2105 "_firstancestors",
2105 "_firstancestors",
2106 "author",
2106 "author",
2107 "bisect",
2107 "bisect",
2108 "bisected",
2108 "bisected",
2109 "bookmark",
2109 "bookmark",
2110 "branch",
2110 "branch",
2111 "branchpoint",
2111 "branchpoint",
2112 "bumped",
2112 "bumped",
2113 "bundle",
2113 "bundle",
2114 "children",
2114 "children",
2115 "closed",
2115 "closed",
2116 "converted",
2116 "converted",
2117 "date",
2117 "date",
2118 "desc",
2118 "desc",
2119 "descendants",
2119 "descendants",
2120 "_firstdescendants",
2120 "_firstdescendants",
2121 "destination",
2121 "destination",
2122 "divergent",
2122 "divergent",
2123 "draft",
2123 "draft",
2124 "extinct",
2124 "extinct",
2125 "extra",
2125 "extra",
2126 "file",
2126 "file",
2127 "filelog",
2127 "filelog",
2128 "first",
2128 "first",
2129 "follow",
2129 "follow",
2130 "_followfirst",
2130 "_followfirst",
2131 "head",
2131 "head",
2132 "heads",
2132 "heads",
2133 "hidden",
2133 "hidden",
2134 "id",
2134 "id",
2135 "keyword",
2135 "keyword",
2136 "last",
2136 "last",
2137 "limit",
2137 "limit",
2138 "_matchfiles",
2138 "_matchfiles",
2139 "max",
2139 "max",
2140 "merge",
2140 "merge",
2141 "min",
2141 "min",
2142 "modifies",
2142 "modifies",
2143 "obsolete",
2143 "obsolete",
2144 "only",
2144 "only",
2145 "origin",
2145 "origin",
2146 "outgoing",
2146 "outgoing",
2147 "p1",
2147 "p1",
2148 "p2",
2148 "p2",
2149 "parents",
2149 "parents",
2150 "present",
2150 "present",
2151 "public",
2151 "public",
2152 "_notpublic",
2152 "_notpublic",
2153 "remote",
2153 "remote",
2154 "removes",
2154 "removes",
2155 "rev",
2155 "rev",
2156 "reverse",
2156 "reverse",
2157 "roots",
2157 "roots",
2158 "sort",
2158 "sort",
2159 "secret",
2159 "secret",
2160 "matching",
2160 "matching",
2161 "tag",
2161 "tag",
2162 "tagged",
2162 "tagged",
2163 "user",
2163 "user",
2164 "unstable",
2164 "unstable",
2165 "wdir",
2165 "wdir",
2166 "_list",
2166 "_list",
2167 "_intlist",
2167 "_intlist",
2168 "_hexlist",
2168 "_hexlist",
2169 ])
2169 ])
2170
2170
2171 methods = {
2171 methods = {
2172 "range": rangeset,
2172 "range": rangeset,
2173 "dagrange": dagrange,
2173 "dagrange": dagrange,
2174 "string": stringset,
2174 "string": stringset,
2175 "symbol": stringset,
2175 "symbol": stringset,
2176 "and": andset,
2176 "and": andset,
2177 "or": orset,
2177 "or": orset,
2178 "not": notset,
2178 "not": notset,
2179 "list": listset,
2179 "list": listset,
2180 "func": func,
2180 "func": func,
2181 "ancestor": ancestorspec,
2181 "ancestor": ancestorspec,
2182 "parent": parentspec,
2182 "parent": parentspec,
2183 "parentpost": p1,
2183 "parentpost": p1,
2184 }
2184 }
2185
2185
2186 def optimize(x, small):
2186 def optimize(x, small):
2187 if x is None:
2187 if x is None:
2188 return 0, x
2188 return 0, x
2189
2189
2190 smallbonus = 1
2190 smallbonus = 1
2191 if small:
2191 if small:
2192 smallbonus = .5
2192 smallbonus = .5
2193
2193
2194 op = x[0]
2194 op = x[0]
2195 if op == 'minus':
2195 if op == 'minus':
2196 return optimize(('and', x[1], ('not', x[2])), small)
2196 return optimize(('and', x[1], ('not', x[2])), small)
2197 elif op == 'only':
2197 elif op == 'only':
2198 return optimize(('func', ('symbol', 'only'),
2198 return optimize(('func', ('symbol', 'only'),
2199 ('list', x[1], x[2])), small)
2199 ('list', x[1], x[2])), small)
2200 elif op == 'onlypost':
2200 elif op == 'onlypost':
2201 return optimize(('func', ('symbol', 'only'), x[1]), small)
2201 return optimize(('func', ('symbol', 'only'), x[1]), small)
2202 elif op == 'dagrangepre':
2202 elif op == 'dagrangepre':
2203 return optimize(('func', ('symbol', 'ancestors'), x[1]), small)
2203 return optimize(('func', ('symbol', 'ancestors'), x[1]), small)
2204 elif op == 'dagrangepost':
2204 elif op == 'dagrangepost':
2205 return optimize(('func', ('symbol', 'descendants'), x[1]), small)
2205 return optimize(('func', ('symbol', 'descendants'), x[1]), small)
2206 elif op == 'rangepre':
2206 elif op == 'rangepre':
2207 return optimize(('range', ('string', '0'), x[1]), small)
2207 return optimize(('range', ('string', '0'), x[1]), small)
2208 elif op == 'rangepost':
2208 elif op == 'rangepost':
2209 return optimize(('range', x[1], ('string', 'tip')), small)
2209 return optimize(('range', x[1], ('string', 'tip')), small)
2210 elif op == 'negate':
2210 elif op == 'negate':
2211 return optimize(('string',
2211 return optimize(('string',
2212 '-' + getstring(x[1], _("can't negate that"))), small)
2212 '-' + getstring(x[1], _("can't negate that"))), small)
2213 elif op in 'string symbol negate':
2213 elif op in 'string symbol negate':
2214 return smallbonus, x # single revisions are small
2214 return smallbonus, x # single revisions are small
2215 elif op == 'and':
2215 elif op == 'and':
2216 wa, ta = optimize(x[1], True)
2216 wa, ta = optimize(x[1], True)
2217 wb, tb = optimize(x[2], True)
2217 wb, tb = optimize(x[2], True)
2218
2218
2219 # (::x and not ::y)/(not ::y and ::x) have a fast path
2219 # (::x and not ::y)/(not ::y and ::x) have a fast path
2220 def isonly(revs, bases):
2220 def isonly(revs, bases):
2221 return (
2221 return (
2222 revs[0] == 'func'
2222 revs[0] == 'func'
2223 and getstring(revs[1], _('not a symbol')) == 'ancestors'
2223 and getstring(revs[1], _('not a symbol')) == 'ancestors'
2224 and bases[0] == 'not'
2224 and bases[0] == 'not'
2225 and bases[1][0] == 'func'
2225 and bases[1][0] == 'func'
2226 and getstring(bases[1][1], _('not a symbol')) == 'ancestors')
2226 and getstring(bases[1][1], _('not a symbol')) == 'ancestors')
2227
2227
2228 w = min(wa, wb)
2228 w = min(wa, wb)
2229 if isonly(ta, tb):
2229 if isonly(ta, tb):
2230 return w, ('func', ('symbol', 'only'), ('list', ta[2], tb[1][2]))
2230 return w, ('func', ('symbol', 'only'), ('list', ta[2], tb[1][2]))
2231 if isonly(tb, ta):
2231 if isonly(tb, ta):
2232 return w, ('func', ('symbol', 'only'), ('list', tb[2], ta[1][2]))
2232 return w, ('func', ('symbol', 'only'), ('list', tb[2], ta[1][2]))
2233
2233
2234 if wa > wb:
2234 if wa > wb:
2235 return w, (op, tb, ta)
2235 return w, (op, tb, ta)
2236 return w, (op, ta, tb)
2236 return w, (op, ta, tb)
2237 elif op == 'or':
2237 elif op == 'or':
2238 # fast path for machine-generated expression, that is likely to have
2238 # fast path for machine-generated expression, that is likely to have
2239 # lots of trivial revisions: 'a + b + c()' to '_list(a b) + c()'
2239 # lots of trivial revisions: 'a + b + c()' to '_list(a b) + c()'
2240 ws, ts, ss = [], [], []
2240 ws, ts, ss = [], [], []
2241 def flushss():
2241 def flushss():
2242 if not ss:
2242 if not ss:
2243 return
2243 return
2244 if len(ss) == 1:
2244 if len(ss) == 1:
2245 w, t = ss[0]
2245 w, t = ss[0]
2246 else:
2246 else:
2247 s = '\0'.join(t[1] for w, t in ss)
2247 s = '\0'.join(t[1] for w, t in ss)
2248 y = ('func', ('symbol', '_list'), ('string', s))
2248 y = ('func', ('symbol', '_list'), ('string', s))
2249 w, t = optimize(y, False)
2249 w, t = optimize(y, False)
2250 ws.append(w)
2250 ws.append(w)
2251 ts.append(t)
2251 ts.append(t)
2252 del ss[:]
2252 del ss[:]
2253 for y in x[1:]:
2253 for y in x[1:]:
2254 w, t = optimize(y, False)
2254 w, t = optimize(y, False)
2255 if t[0] == 'string' or t[0] == 'symbol':
2255 if t[0] == 'string' or t[0] == 'symbol':
2256 ss.append((w, t))
2256 ss.append((w, t))
2257 continue
2257 continue
2258 flushss()
2258 flushss()
2259 ws.append(w)
2259 ws.append(w)
2260 ts.append(t)
2260 ts.append(t)
2261 flushss()
2261 flushss()
2262 if len(ts) == 1:
2262 if len(ts) == 1:
2263 return ws[0], ts[0] # 'or' operation is fully optimized out
2263 return ws[0], ts[0] # 'or' operation is fully optimized out
2264 # we can't reorder trees by weight because it would change the order.
2264 # we can't reorder trees by weight because it would change the order.
2265 # ("sort(a + b)" == "sort(b + a)", but "a + b" != "b + a")
2265 # ("sort(a + b)" == "sort(b + a)", but "a + b" != "b + a")
2266 # ts = tuple(t for w, t in sorted(zip(ws, ts), key=lambda wt: wt[0]))
2266 # ts = tuple(t for w, t in sorted(zip(ws, ts), key=lambda wt: wt[0]))
2267 return max(ws), (op,) + tuple(ts)
2267 return max(ws), (op,) + tuple(ts)
2268 elif op == 'not':
2268 elif op == 'not':
2269 # Optimize not public() to _notpublic() because we have a fast version
2269 # Optimize not public() to _notpublic() because we have a fast version
2270 if x[1] == ('func', ('symbol', 'public'), None):
2270 if x[1] == ('func', ('symbol', 'public'), None):
2271 newsym = ('func', ('symbol', '_notpublic'), None)
2271 newsym = ('func', ('symbol', '_notpublic'), None)
2272 o = optimize(newsym, not small)
2272 o = optimize(newsym, not small)
2273 return o[0], o[1]
2273 return o[0], o[1]
2274 else:
2274 else:
2275 o = optimize(x[1], not small)
2275 o = optimize(x[1], not small)
2276 return o[0], (op, o[1])
2276 return o[0], (op, o[1])
2277 elif op == 'parentpost':
2277 elif op == 'parentpost':
2278 o = optimize(x[1], small)
2278 o = optimize(x[1], small)
2279 return o[0], (op, o[1])
2279 return o[0], (op, o[1])
2280 elif op == 'group':
2280 elif op == 'group':
2281 return optimize(x[1], small)
2281 return optimize(x[1], small)
2282 elif op in 'dagrange range list parent ancestorspec':
2282 elif op in 'dagrange range list parent ancestorspec':
2283 if op == 'parent':
2283 if op == 'parent':
2284 # x^:y means (x^) : y, not x ^ (:y)
2284 # x^:y means (x^) : y, not x ^ (:y)
2285 post = ('parentpost', x[1])
2285 post = ('parentpost', x[1])
2286 if x[2][0] == 'dagrangepre':
2286 if x[2][0] == 'dagrangepre':
2287 return optimize(('dagrange', post, x[2][1]), small)
2287 return optimize(('dagrange', post, x[2][1]), small)
2288 elif x[2][0] == 'rangepre':
2288 elif x[2][0] == 'rangepre':
2289 return optimize(('range', post, x[2][1]), small)
2289 return optimize(('range', post, x[2][1]), small)
2290
2290
2291 wa, ta = optimize(x[1], small)
2291 wa, ta = optimize(x[1], small)
2292 wb, tb = optimize(x[2], small)
2292 wb, tb = optimize(x[2], small)
2293 return wa + wb, (op, ta, tb)
2293 return wa + wb, (op, ta, tb)
2294 elif op == 'func':
2294 elif op == 'func':
2295 f = getstring(x[1], _("not a symbol"))
2295 f = getstring(x[1], _("not a symbol"))
2296 wa, ta = optimize(x[2], small)
2296 wa, ta = optimize(x[2], small)
2297 if f in ("author branch closed date desc file grep keyword "
2297 if f in ("author branch closed date desc file grep keyword "
2298 "outgoing user"):
2298 "outgoing user"):
2299 w = 10 # slow
2299 w = 10 # slow
2300 elif f in "modifies adds removes":
2300 elif f in "modifies adds removes":
2301 w = 30 # slower
2301 w = 30 # slower
2302 elif f == "contains":
2302 elif f == "contains":
2303 w = 100 # very slow
2303 w = 100 # very slow
2304 elif f == "ancestor":
2304 elif f == "ancestor":
2305 w = 1 * smallbonus
2305 w = 1 * smallbonus
2306 elif f in "reverse limit first _intlist":
2306 elif f in "reverse limit first _intlist":
2307 w = 0
2307 w = 0
2308 elif f in "sort":
2308 elif f in "sort":
2309 w = 10 # assume most sorts look at changelog
2309 w = 10 # assume most sorts look at changelog
2310 else:
2310 else:
2311 w = 1
2311 w = 1
2312 return w + wa, (op, x[1], ta)
2312 return w + wa, (op, x[1], ta)
2313 return 1, x
2313 return 1, x
2314
2314
2315 _aliasarg = ('func', ('symbol', '_aliasarg'))
2315 _aliasarg = ('func', ('symbol', '_aliasarg'))
2316 def _getaliasarg(tree):
2316 def _getaliasarg(tree):
2317 """If tree matches ('func', ('symbol', '_aliasarg'), ('string', X))
2317 """If tree matches ('func', ('symbol', '_aliasarg'), ('string', X))
2318 return X, None otherwise.
2318 return X, None otherwise.
2319 """
2319 """
2320 if (len(tree) == 3 and tree[:2] == _aliasarg
2320 if (len(tree) == 3 and tree[:2] == _aliasarg
2321 and tree[2][0] == 'string'):
2321 and tree[2][0] == 'string'):
2322 return tree[2][1]
2322 return tree[2][1]
2323 return None
2323 return None
2324
2324
2325 def _checkaliasarg(tree, known=None):
2325 def _checkaliasarg(tree, known=None):
2326 """Check tree contains no _aliasarg construct or only ones which
2326 """Check tree contains no _aliasarg construct or only ones which
2327 value is in known. Used to avoid alias placeholders injection.
2327 value is in known. Used to avoid alias placeholders injection.
2328 """
2328 """
2329 if isinstance(tree, tuple):
2329 if isinstance(tree, tuple):
2330 arg = _getaliasarg(tree)
2330 arg = _getaliasarg(tree)
2331 if arg is not None and (not known or arg not in known):
2331 if arg is not None and (not known or arg not in known):
2332 raise error.UnknownIdentifier('_aliasarg', [])
2332 raise error.UnknownIdentifier('_aliasarg', [])
2333 for t in tree:
2333 for t in tree:
2334 _checkaliasarg(t, known)
2334 _checkaliasarg(t, known)
2335
2335
2336 # the set of valid characters for the initial letter of symbols in
2336 # the set of valid characters for the initial letter of symbols in
2337 # alias declarations and definitions
2337 # alias declarations and definitions
2338 _aliassyminitletters = set(c for c in [chr(i) for i in xrange(256)]
2338 _aliassyminitletters = set(c for c in [chr(i) for i in xrange(256)]
2339 if c.isalnum() or c in '._@$' or ord(c) > 127)
2339 if c.isalnum() or c in '._@$' or ord(c) > 127)
2340
2340
2341 def _tokenizealias(program, lookup=None):
2341 def _tokenizealias(program, lookup=None):
2342 """Parse alias declaration/definition into a stream of tokens
2342 """Parse alias declaration/definition into a stream of tokens
2343
2343
2344 This allows symbol names to use also ``$`` as an initial letter
2344 This allows symbol names to use also ``$`` as an initial letter
2345 (for backward compatibility), and callers of this function should
2345 (for backward compatibility), and callers of this function should
2346 examine whether ``$`` is used also for unexpected symbols or not.
2346 examine whether ``$`` is used also for unexpected symbols or not.
2347 """
2347 """
2348 return tokenize(program, lookup=lookup,
2348 return tokenize(program, lookup=lookup,
2349 syminitletters=_aliassyminitletters)
2349 syminitletters=_aliassyminitletters)
2350
2350
2351 def _parsealiasdecl(decl):
2351 def _parsealiasdecl(decl):
2352 """Parse alias declaration ``decl``
2352 """Parse alias declaration ``decl``
2353
2353
2354 This returns ``(name, tree, args, errorstr)`` tuple:
2354 This returns ``(name, tree, args, errorstr)`` tuple:
2355
2355
2356 - ``name``: of declared alias (may be ``decl`` itself at error)
2356 - ``name``: of declared alias (may be ``decl`` itself at error)
2357 - ``tree``: parse result (or ``None`` at error)
2357 - ``tree``: parse result (or ``None`` at error)
2358 - ``args``: list of alias argument names (or None for symbol declaration)
2358 - ``args``: list of alias argument names (or None for symbol declaration)
2359 - ``errorstr``: detail about detected error (or None)
2359 - ``errorstr``: detail about detected error (or None)
2360
2360
2361 >>> _parsealiasdecl('foo')
2361 >>> _parsealiasdecl('foo')
2362 ('foo', ('symbol', 'foo'), None, None)
2362 ('foo', ('symbol', 'foo'), None, None)
2363 >>> _parsealiasdecl('$foo')
2363 >>> _parsealiasdecl('$foo')
2364 ('$foo', None, None, "'$' not for alias arguments")
2364 ('$foo', None, None, "'$' not for alias arguments")
2365 >>> _parsealiasdecl('foo::bar')
2365 >>> _parsealiasdecl('foo::bar')
2366 ('foo::bar', None, None, 'invalid format')
2366 ('foo::bar', None, None, 'invalid format')
2367 >>> _parsealiasdecl('foo bar')
2367 >>> _parsealiasdecl('foo bar')
2368 ('foo bar', None, None, 'at 4: invalid token')
2368 ('foo bar', None, None, 'at 4: invalid token')
2369 >>> _parsealiasdecl('foo()')
2369 >>> _parsealiasdecl('foo()')
2370 ('foo', ('func', ('symbol', 'foo')), [], None)
2370 ('foo', ('func', ('symbol', 'foo')), [], None)
2371 >>> _parsealiasdecl('$foo()')
2371 >>> _parsealiasdecl('$foo()')
2372 ('$foo()', None, None, "'$' not for alias arguments")
2372 ('$foo()', None, None, "'$' not for alias arguments")
2373 >>> _parsealiasdecl('foo($1, $2)')
2373 >>> _parsealiasdecl('foo($1, $2)')
2374 ('foo', ('func', ('symbol', 'foo')), ['$1', '$2'], None)
2374 ('foo', ('func', ('symbol', 'foo')), ['$1', '$2'], None)
2375 >>> _parsealiasdecl('foo(bar_bar, baz.baz)')
2375 >>> _parsealiasdecl('foo(bar_bar, baz.baz)')
2376 ('foo', ('func', ('symbol', 'foo')), ['bar_bar', 'baz.baz'], None)
2376 ('foo', ('func', ('symbol', 'foo')), ['bar_bar', 'baz.baz'], None)
2377 >>> _parsealiasdecl('foo($1, $2, nested($1, $2))')
2377 >>> _parsealiasdecl('foo($1, $2, nested($1, $2))')
2378 ('foo($1, $2, nested($1, $2))', None, None, 'invalid argument list')
2378 ('foo($1, $2, nested($1, $2))', None, None, 'invalid argument list')
2379 >>> _parsealiasdecl('foo(bar($1, $2))')
2379 >>> _parsealiasdecl('foo(bar($1, $2))')
2380 ('foo(bar($1, $2))', None, None, 'invalid argument list')
2380 ('foo(bar($1, $2))', None, None, 'invalid argument list')
2381 >>> _parsealiasdecl('foo("string")')
2381 >>> _parsealiasdecl('foo("string")')
2382 ('foo("string")', None, None, 'invalid argument list')
2382 ('foo("string")', None, None, 'invalid argument list')
2383 >>> _parsealiasdecl('foo($1, $2')
2383 >>> _parsealiasdecl('foo($1, $2')
2384 ('foo($1, $2', None, None, 'at 10: unexpected token: end')
2384 ('foo($1, $2', None, None, 'at 10: unexpected token: end')
2385 >>> _parsealiasdecl('foo("string')
2385 >>> _parsealiasdecl('foo("string')
2386 ('foo("string', None, None, 'at 5: unterminated string')
2386 ('foo("string', None, None, 'at 5: unterminated string')
2387 >>> _parsealiasdecl('foo($1, $2, $1)')
2387 >>> _parsealiasdecl('foo($1, $2, $1)')
2388 ('foo', None, None, 'argument names collide with each other')
2388 ('foo', None, None, 'argument names collide with each other')
2389 """
2389 """
2390 p = parser.parser(_tokenizealias, elements)
2390 p = parser.parser(elements)
2391 try:
2391 try:
2392 tree, pos = p.parse(decl)
2392 tree, pos = p.parse(_tokenizealias(decl))
2393 if (pos != len(decl)):
2393 if (pos != len(decl)):
2394 raise error.ParseError(_('invalid token'), pos)
2394 raise error.ParseError(_('invalid token'), pos)
2395
2395
2396 if isvalidsymbol(tree):
2396 if isvalidsymbol(tree):
2397 # "name = ...." style
2397 # "name = ...." style
2398 name = getsymbol(tree)
2398 name = getsymbol(tree)
2399 if name.startswith('$'):
2399 if name.startswith('$'):
2400 return (decl, None, None, _("'$' not for alias arguments"))
2400 return (decl, None, None, _("'$' not for alias arguments"))
2401 return (name, ('symbol', name), None, None)
2401 return (name, ('symbol', name), None, None)
2402
2402
2403 if isvalidfunc(tree):
2403 if isvalidfunc(tree):
2404 # "name(arg, ....) = ...." style
2404 # "name(arg, ....) = ...." style
2405 name = getfuncname(tree)
2405 name = getfuncname(tree)
2406 if name.startswith('$'):
2406 if name.startswith('$'):
2407 return (decl, None, None, _("'$' not for alias arguments"))
2407 return (decl, None, None, _("'$' not for alias arguments"))
2408 args = []
2408 args = []
2409 for arg in getfuncargs(tree):
2409 for arg in getfuncargs(tree):
2410 if not isvalidsymbol(arg):
2410 if not isvalidsymbol(arg):
2411 return (decl, None, None, _("invalid argument list"))
2411 return (decl, None, None, _("invalid argument list"))
2412 args.append(getsymbol(arg))
2412 args.append(getsymbol(arg))
2413 if len(args) != len(set(args)):
2413 if len(args) != len(set(args)):
2414 return (name, None, None,
2414 return (name, None, None,
2415 _("argument names collide with each other"))
2415 _("argument names collide with each other"))
2416 return (name, ('func', ('symbol', name)), args, None)
2416 return (name, ('func', ('symbol', name)), args, None)
2417
2417
2418 return (decl, None, None, _("invalid format"))
2418 return (decl, None, None, _("invalid format"))
2419 except error.ParseError, inst:
2419 except error.ParseError, inst:
2420 return (decl, None, None, parseerrordetail(inst))
2420 return (decl, None, None, parseerrordetail(inst))
2421
2421
2422 def _parsealiasdefn(defn, args):
2422 def _parsealiasdefn(defn, args):
2423 """Parse alias definition ``defn``
2423 """Parse alias definition ``defn``
2424
2424
2425 This function also replaces alias argument references in the
2425 This function also replaces alias argument references in the
2426 specified definition by ``_aliasarg(ARGNAME)``.
2426 specified definition by ``_aliasarg(ARGNAME)``.
2427
2427
2428 ``args`` is a list of alias argument names, or None if the alias
2428 ``args`` is a list of alias argument names, or None if the alias
2429 is declared as a symbol.
2429 is declared as a symbol.
2430
2430
2431 This returns "tree" as parsing result.
2431 This returns "tree" as parsing result.
2432
2432
2433 >>> args = ['$1', '$2', 'foo']
2433 >>> args = ['$1', '$2', 'foo']
2434 >>> print prettyformat(_parsealiasdefn('$1 or foo', args))
2434 >>> print prettyformat(_parsealiasdefn('$1 or foo', args))
2435 (or
2435 (or
2436 (func
2436 (func
2437 ('symbol', '_aliasarg')
2437 ('symbol', '_aliasarg')
2438 ('string', '$1'))
2438 ('string', '$1'))
2439 (func
2439 (func
2440 ('symbol', '_aliasarg')
2440 ('symbol', '_aliasarg')
2441 ('string', 'foo')))
2441 ('string', 'foo')))
2442 >>> try:
2442 >>> try:
2443 ... _parsealiasdefn('$1 or $bar', args)
2443 ... _parsealiasdefn('$1 or $bar', args)
2444 ... except error.ParseError, inst:
2444 ... except error.ParseError, inst:
2445 ... print parseerrordetail(inst)
2445 ... print parseerrordetail(inst)
2446 at 6: '$' not for alias arguments
2446 at 6: '$' not for alias arguments
2447 >>> args = ['$1', '$10', 'foo']
2447 >>> args = ['$1', '$10', 'foo']
2448 >>> print prettyformat(_parsealiasdefn('$10 or foobar', args))
2448 >>> print prettyformat(_parsealiasdefn('$10 or foobar', args))
2449 (or
2449 (or
2450 (func
2450 (func
2451 ('symbol', '_aliasarg')
2451 ('symbol', '_aliasarg')
2452 ('string', '$10'))
2452 ('string', '$10'))
2453 ('symbol', 'foobar'))
2453 ('symbol', 'foobar'))
2454 >>> print prettyformat(_parsealiasdefn('"$1" or "foo"', args))
2454 >>> print prettyformat(_parsealiasdefn('"$1" or "foo"', args))
2455 (or
2455 (or
2456 ('string', '$1')
2456 ('string', '$1')
2457 ('string', 'foo'))
2457 ('string', 'foo'))
2458 """
2458 """
2459 def tokenizedefn(program, lookup=None):
2459 def tokenizedefn(program, lookup=None):
2460 if args:
2460 if args:
2461 argset = set(args)
2461 argset = set(args)
2462 else:
2462 else:
2463 argset = set()
2463 argset = set()
2464
2464
2465 for t, value, pos in _tokenizealias(program, lookup=lookup):
2465 for t, value, pos in _tokenizealias(program, lookup=lookup):
2466 if t == 'symbol':
2466 if t == 'symbol':
2467 if value in argset:
2467 if value in argset:
2468 # emulate tokenization of "_aliasarg('ARGNAME')":
2468 # emulate tokenization of "_aliasarg('ARGNAME')":
2469 # "_aliasarg()" is an unknown symbol only used separate
2469 # "_aliasarg()" is an unknown symbol only used separate
2470 # alias argument placeholders from regular strings.
2470 # alias argument placeholders from regular strings.
2471 yield ('symbol', '_aliasarg', pos)
2471 yield ('symbol', '_aliasarg', pos)
2472 yield ('(', None, pos)
2472 yield ('(', None, pos)
2473 yield ('string', value, pos)
2473 yield ('string', value, pos)
2474 yield (')', None, pos)
2474 yield (')', None, pos)
2475 continue
2475 continue
2476 elif value.startswith('$'):
2476 elif value.startswith('$'):
2477 raise error.ParseError(_("'$' not for alias arguments"),
2477 raise error.ParseError(_("'$' not for alias arguments"),
2478 pos)
2478 pos)
2479 yield (t, value, pos)
2479 yield (t, value, pos)
2480
2480
2481 p = parser.parser(tokenizedefn, elements)
2481 p = parser.parser(elements)
2482 tree, pos = p.parse(defn)
2482 tree, pos = p.parse(tokenizedefn(defn))
2483 if pos != len(defn):
2483 if pos != len(defn):
2484 raise error.ParseError(_('invalid token'), pos)
2484 raise error.ParseError(_('invalid token'), pos)
2485 return parser.simplifyinfixops(tree, ('or',))
2485 return parser.simplifyinfixops(tree, ('or',))
2486
2486
2487 class revsetalias(object):
2487 class revsetalias(object):
2488 # whether own `error` information is already shown or not.
2488 # whether own `error` information is already shown or not.
2489 # this avoids showing same warning multiple times at each `findaliases`.
2489 # this avoids showing same warning multiple times at each `findaliases`.
2490 warned = False
2490 warned = False
2491
2491
2492 def __init__(self, name, value):
2492 def __init__(self, name, value):
2493 '''Aliases like:
2493 '''Aliases like:
2494
2494
2495 h = heads(default)
2495 h = heads(default)
2496 b($1) = ancestors($1) - ancestors(default)
2496 b($1) = ancestors($1) - ancestors(default)
2497 '''
2497 '''
2498 self.name, self.tree, self.args, self.error = _parsealiasdecl(name)
2498 self.name, self.tree, self.args, self.error = _parsealiasdecl(name)
2499 if self.error:
2499 if self.error:
2500 self.error = _('failed to parse the declaration of revset alias'
2500 self.error = _('failed to parse the declaration of revset alias'
2501 ' "%s": %s') % (self.name, self.error)
2501 ' "%s": %s') % (self.name, self.error)
2502 return
2502 return
2503
2503
2504 try:
2504 try:
2505 self.replacement = _parsealiasdefn(value, self.args)
2505 self.replacement = _parsealiasdefn(value, self.args)
2506 # Check for placeholder injection
2506 # Check for placeholder injection
2507 _checkaliasarg(self.replacement, self.args)
2507 _checkaliasarg(self.replacement, self.args)
2508 except error.ParseError, inst:
2508 except error.ParseError, inst:
2509 self.error = _('failed to parse the definition of revset alias'
2509 self.error = _('failed to parse the definition of revset alias'
2510 ' "%s": %s') % (self.name, parseerrordetail(inst))
2510 ' "%s": %s') % (self.name, parseerrordetail(inst))
2511
2511
2512 def _getalias(aliases, tree):
2512 def _getalias(aliases, tree):
2513 """If tree looks like an unexpanded alias, return it. Return None
2513 """If tree looks like an unexpanded alias, return it. Return None
2514 otherwise.
2514 otherwise.
2515 """
2515 """
2516 if isinstance(tree, tuple) and tree:
2516 if isinstance(tree, tuple) and tree:
2517 if tree[0] == 'symbol' and len(tree) == 2:
2517 if tree[0] == 'symbol' and len(tree) == 2:
2518 name = tree[1]
2518 name = tree[1]
2519 alias = aliases.get(name)
2519 alias = aliases.get(name)
2520 if alias and alias.args is None and alias.tree == tree:
2520 if alias and alias.args is None and alias.tree == tree:
2521 return alias
2521 return alias
2522 if tree[0] == 'func' and len(tree) > 1:
2522 if tree[0] == 'func' and len(tree) > 1:
2523 if tree[1][0] == 'symbol' and len(tree[1]) == 2:
2523 if tree[1][0] == 'symbol' and len(tree[1]) == 2:
2524 name = tree[1][1]
2524 name = tree[1][1]
2525 alias = aliases.get(name)
2525 alias = aliases.get(name)
2526 if alias and alias.args is not None and alias.tree == tree[:2]:
2526 if alias and alias.args is not None and alias.tree == tree[:2]:
2527 return alias
2527 return alias
2528 return None
2528 return None
2529
2529
2530 def _expandargs(tree, args):
2530 def _expandargs(tree, args):
2531 """Replace _aliasarg instances with the substitution value of the
2531 """Replace _aliasarg instances with the substitution value of the
2532 same name in args, recursively.
2532 same name in args, recursively.
2533 """
2533 """
2534 if not tree or not isinstance(tree, tuple):
2534 if not tree or not isinstance(tree, tuple):
2535 return tree
2535 return tree
2536 arg = _getaliasarg(tree)
2536 arg = _getaliasarg(tree)
2537 if arg is not None:
2537 if arg is not None:
2538 return args[arg]
2538 return args[arg]
2539 return tuple(_expandargs(t, args) for t in tree)
2539 return tuple(_expandargs(t, args) for t in tree)
2540
2540
2541 def _expandaliases(aliases, tree, expanding, cache):
2541 def _expandaliases(aliases, tree, expanding, cache):
2542 """Expand aliases in tree, recursively.
2542 """Expand aliases in tree, recursively.
2543
2543
2544 'aliases' is a dictionary mapping user defined aliases to
2544 'aliases' is a dictionary mapping user defined aliases to
2545 revsetalias objects.
2545 revsetalias objects.
2546 """
2546 """
2547 if not isinstance(tree, tuple):
2547 if not isinstance(tree, tuple):
2548 # Do not expand raw strings
2548 # Do not expand raw strings
2549 return tree
2549 return tree
2550 alias = _getalias(aliases, tree)
2550 alias = _getalias(aliases, tree)
2551 if alias is not None:
2551 if alias is not None:
2552 if alias.error:
2552 if alias.error:
2553 raise util.Abort(alias.error)
2553 raise util.Abort(alias.error)
2554 if alias in expanding:
2554 if alias in expanding:
2555 raise error.ParseError(_('infinite expansion of revset alias "%s" '
2555 raise error.ParseError(_('infinite expansion of revset alias "%s" '
2556 'detected') % alias.name)
2556 'detected') % alias.name)
2557 expanding.append(alias)
2557 expanding.append(alias)
2558 if alias.name not in cache:
2558 if alias.name not in cache:
2559 cache[alias.name] = _expandaliases(aliases, alias.replacement,
2559 cache[alias.name] = _expandaliases(aliases, alias.replacement,
2560 expanding, cache)
2560 expanding, cache)
2561 result = cache[alias.name]
2561 result = cache[alias.name]
2562 expanding.pop()
2562 expanding.pop()
2563 if alias.args is not None:
2563 if alias.args is not None:
2564 l = getlist(tree[2])
2564 l = getlist(tree[2])
2565 if len(l) != len(alias.args):
2565 if len(l) != len(alias.args):
2566 raise error.ParseError(
2566 raise error.ParseError(
2567 _('invalid number of arguments: %s') % len(l))
2567 _('invalid number of arguments: %s') % len(l))
2568 l = [_expandaliases(aliases, a, [], cache) for a in l]
2568 l = [_expandaliases(aliases, a, [], cache) for a in l]
2569 result = _expandargs(result, dict(zip(alias.args, l)))
2569 result = _expandargs(result, dict(zip(alias.args, l)))
2570 else:
2570 else:
2571 result = tuple(_expandaliases(aliases, t, expanding, cache)
2571 result = tuple(_expandaliases(aliases, t, expanding, cache)
2572 for t in tree)
2572 for t in tree)
2573 return result
2573 return result
2574
2574
2575 def findaliases(ui, tree, showwarning=None):
2575 def findaliases(ui, tree, showwarning=None):
2576 _checkaliasarg(tree)
2576 _checkaliasarg(tree)
2577 aliases = {}
2577 aliases = {}
2578 for k, v in ui.configitems('revsetalias'):
2578 for k, v in ui.configitems('revsetalias'):
2579 alias = revsetalias(k, v)
2579 alias = revsetalias(k, v)
2580 aliases[alias.name] = alias
2580 aliases[alias.name] = alias
2581 tree = _expandaliases(aliases, tree, [], {})
2581 tree = _expandaliases(aliases, tree, [], {})
2582 if showwarning:
2582 if showwarning:
2583 # warn about problematic (but not referred) aliases
2583 # warn about problematic (but not referred) aliases
2584 for name, alias in sorted(aliases.iteritems()):
2584 for name, alias in sorted(aliases.iteritems()):
2585 if alias.error and not alias.warned:
2585 if alias.error and not alias.warned:
2586 showwarning(_('warning: %s\n') % (alias.error))
2586 showwarning(_('warning: %s\n') % (alias.error))
2587 alias.warned = True
2587 alias.warned = True
2588 return tree
2588 return tree
2589
2589
2590 def foldconcat(tree):
2590 def foldconcat(tree):
2591 """Fold elements to be concatenated by `##`
2591 """Fold elements to be concatenated by `##`
2592 """
2592 """
2593 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2593 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2594 return tree
2594 return tree
2595 if tree[0] == '_concat':
2595 if tree[0] == '_concat':
2596 pending = [tree]
2596 pending = [tree]
2597 l = []
2597 l = []
2598 while pending:
2598 while pending:
2599 e = pending.pop()
2599 e = pending.pop()
2600 if e[0] == '_concat':
2600 if e[0] == '_concat':
2601 pending.extend(reversed(e[1:]))
2601 pending.extend(reversed(e[1:]))
2602 elif e[0] in ('string', 'symbol'):
2602 elif e[0] in ('string', 'symbol'):
2603 l.append(e[1])
2603 l.append(e[1])
2604 else:
2604 else:
2605 msg = _("\"##\" can't concatenate \"%s\" element") % (e[0])
2605 msg = _("\"##\" can't concatenate \"%s\" element") % (e[0])
2606 raise error.ParseError(msg)
2606 raise error.ParseError(msg)
2607 return ('string', ''.join(l))
2607 return ('string', ''.join(l))
2608 else:
2608 else:
2609 return tuple(foldconcat(t) for t in tree)
2609 return tuple(foldconcat(t) for t in tree)
2610
2610
2611 def parse(spec, lookup=None):
2611 def parse(spec, lookup=None):
2612 p = parser.parser(tokenize, elements)
2612 p = parser.parser(elements)
2613 tree, pos = p.parse(spec, lookup=lookup)
2613 tree, pos = p.parse(tokenize(spec, lookup=lookup))
2614 if pos != len(spec):
2614 if pos != len(spec):
2615 raise error.ParseError(_("invalid token"), pos)
2615 raise error.ParseError(_("invalid token"), pos)
2616 return parser.simplifyinfixops(tree, ('or',))
2616 return parser.simplifyinfixops(tree, ('or',))
2617
2617
2618 def posttreebuilthook(tree, repo):
2618 def posttreebuilthook(tree, repo):
2619 # hook for extensions to execute code on the optimized tree
2619 # hook for extensions to execute code on the optimized tree
2620 pass
2620 pass
2621
2621
2622 def match(ui, spec, repo=None):
2622 def match(ui, spec, repo=None):
2623 if not spec:
2623 if not spec:
2624 raise error.ParseError(_("empty query"))
2624 raise error.ParseError(_("empty query"))
2625 lookup = None
2625 lookup = None
2626 if repo:
2626 if repo:
2627 lookup = repo.__contains__
2627 lookup = repo.__contains__
2628 tree = parse(spec, lookup)
2628 tree = parse(spec, lookup)
2629 if ui:
2629 if ui:
2630 tree = findaliases(ui, tree, showwarning=ui.warn)
2630 tree = findaliases(ui, tree, showwarning=ui.warn)
2631 tree = foldconcat(tree)
2631 tree = foldconcat(tree)
2632 weight, tree = optimize(tree, True)
2632 weight, tree = optimize(tree, True)
2633 posttreebuilthook(tree, repo)
2633 posttreebuilthook(tree, repo)
2634 def mfunc(repo, subset=None):
2634 def mfunc(repo, subset=None):
2635 if subset is None:
2635 if subset is None:
2636 subset = fullreposet(repo)
2636 subset = fullreposet(repo)
2637 if util.safehasattr(subset, 'isascending'):
2637 if util.safehasattr(subset, 'isascending'):
2638 result = getset(repo, subset, tree)
2638 result = getset(repo, subset, tree)
2639 else:
2639 else:
2640 result = getset(repo, baseset(subset), tree)
2640 result = getset(repo, baseset(subset), tree)
2641 return result
2641 return result
2642 return mfunc
2642 return mfunc
2643
2643
2644 def formatspec(expr, *args):
2644 def formatspec(expr, *args):
2645 '''
2645 '''
2646 This is a convenience function for using revsets internally, and
2646 This is a convenience function for using revsets internally, and
2647 escapes arguments appropriately. Aliases are intentionally ignored
2647 escapes arguments appropriately. Aliases are intentionally ignored
2648 so that intended expression behavior isn't accidentally subverted.
2648 so that intended expression behavior isn't accidentally subverted.
2649
2649
2650 Supported arguments:
2650 Supported arguments:
2651
2651
2652 %r = revset expression, parenthesized
2652 %r = revset expression, parenthesized
2653 %d = int(arg), no quoting
2653 %d = int(arg), no quoting
2654 %s = string(arg), escaped and single-quoted
2654 %s = string(arg), escaped and single-quoted
2655 %b = arg.branch(), escaped and single-quoted
2655 %b = arg.branch(), escaped and single-quoted
2656 %n = hex(arg), single-quoted
2656 %n = hex(arg), single-quoted
2657 %% = a literal '%'
2657 %% = a literal '%'
2658
2658
2659 Prefixing the type with 'l' specifies a parenthesized list of that type.
2659 Prefixing the type with 'l' specifies a parenthesized list of that type.
2660
2660
2661 >>> formatspec('%r:: and %lr', '10 or 11', ("this()", "that()"))
2661 >>> formatspec('%r:: and %lr', '10 or 11', ("this()", "that()"))
2662 '(10 or 11):: and ((this()) or (that()))'
2662 '(10 or 11):: and ((this()) or (that()))'
2663 >>> formatspec('%d:: and not %d::', 10, 20)
2663 >>> formatspec('%d:: and not %d::', 10, 20)
2664 '10:: and not 20::'
2664 '10:: and not 20::'
2665 >>> formatspec('%ld or %ld', [], [1])
2665 >>> formatspec('%ld or %ld', [], [1])
2666 "_list('') or 1"
2666 "_list('') or 1"
2667 >>> formatspec('keyword(%s)', 'foo\\xe9')
2667 >>> formatspec('keyword(%s)', 'foo\\xe9')
2668 "keyword('foo\\\\xe9')"
2668 "keyword('foo\\\\xe9')"
2669 >>> b = lambda: 'default'
2669 >>> b = lambda: 'default'
2670 >>> b.branch = b
2670 >>> b.branch = b
2671 >>> formatspec('branch(%b)', b)
2671 >>> formatspec('branch(%b)', b)
2672 "branch('default')"
2672 "branch('default')"
2673 >>> formatspec('root(%ls)', ['a', 'b', 'c', 'd'])
2673 >>> formatspec('root(%ls)', ['a', 'b', 'c', 'd'])
2674 "root(_list('a\\x00b\\x00c\\x00d'))"
2674 "root(_list('a\\x00b\\x00c\\x00d'))"
2675 '''
2675 '''
2676
2676
2677 def quote(s):
2677 def quote(s):
2678 return repr(str(s))
2678 return repr(str(s))
2679
2679
2680 def argtype(c, arg):
2680 def argtype(c, arg):
2681 if c == 'd':
2681 if c == 'd':
2682 return str(int(arg))
2682 return str(int(arg))
2683 elif c == 's':
2683 elif c == 's':
2684 return quote(arg)
2684 return quote(arg)
2685 elif c == 'r':
2685 elif c == 'r':
2686 parse(arg) # make sure syntax errors are confined
2686 parse(arg) # make sure syntax errors are confined
2687 return '(%s)' % arg
2687 return '(%s)' % arg
2688 elif c == 'n':
2688 elif c == 'n':
2689 return quote(node.hex(arg))
2689 return quote(node.hex(arg))
2690 elif c == 'b':
2690 elif c == 'b':
2691 return quote(arg.branch())
2691 return quote(arg.branch())
2692
2692
2693 def listexp(s, t):
2693 def listexp(s, t):
2694 l = len(s)
2694 l = len(s)
2695 if l == 0:
2695 if l == 0:
2696 return "_list('')"
2696 return "_list('')"
2697 elif l == 1:
2697 elif l == 1:
2698 return argtype(t, s[0])
2698 return argtype(t, s[0])
2699 elif t == 'd':
2699 elif t == 'd':
2700 return "_intlist('%s')" % "\0".join(str(int(a)) for a in s)
2700 return "_intlist('%s')" % "\0".join(str(int(a)) for a in s)
2701 elif t == 's':
2701 elif t == 's':
2702 return "_list('%s')" % "\0".join(s)
2702 return "_list('%s')" % "\0".join(s)
2703 elif t == 'n':
2703 elif t == 'n':
2704 return "_hexlist('%s')" % "\0".join(node.hex(a) for a in s)
2704 return "_hexlist('%s')" % "\0".join(node.hex(a) for a in s)
2705 elif t == 'b':
2705 elif t == 'b':
2706 return "_list('%s')" % "\0".join(a.branch() for a in s)
2706 return "_list('%s')" % "\0".join(a.branch() for a in s)
2707
2707
2708 m = l // 2
2708 m = l // 2
2709 return '(%s or %s)' % (listexp(s[:m], t), listexp(s[m:], t))
2709 return '(%s or %s)' % (listexp(s[:m], t), listexp(s[m:], t))
2710
2710
2711 ret = ''
2711 ret = ''
2712 pos = 0
2712 pos = 0
2713 arg = 0
2713 arg = 0
2714 while pos < len(expr):
2714 while pos < len(expr):
2715 c = expr[pos]
2715 c = expr[pos]
2716 if c == '%':
2716 if c == '%':
2717 pos += 1
2717 pos += 1
2718 d = expr[pos]
2718 d = expr[pos]
2719 if d == '%':
2719 if d == '%':
2720 ret += d
2720 ret += d
2721 elif d in 'dsnbr':
2721 elif d in 'dsnbr':
2722 ret += argtype(d, args[arg])
2722 ret += argtype(d, args[arg])
2723 arg += 1
2723 arg += 1
2724 elif d == 'l':
2724 elif d == 'l':
2725 # a list of some type
2725 # a list of some type
2726 pos += 1
2726 pos += 1
2727 d = expr[pos]
2727 d = expr[pos]
2728 ret += listexp(list(args[arg]), d)
2728 ret += listexp(list(args[arg]), d)
2729 arg += 1
2729 arg += 1
2730 else:
2730 else:
2731 raise util.Abort('unexpected revspec format character %s' % d)
2731 raise util.Abort('unexpected revspec format character %s' % d)
2732 else:
2732 else:
2733 ret += c
2733 ret += c
2734 pos += 1
2734 pos += 1
2735
2735
2736 return ret
2736 return ret
2737
2737
2738 def prettyformat(tree):
2738 def prettyformat(tree):
2739 return parser.prettyformat(tree, ('string', 'symbol'))
2739 return parser.prettyformat(tree, ('string', 'symbol'))
2740
2740
2741 def depth(tree):
2741 def depth(tree):
2742 if isinstance(tree, tuple):
2742 if isinstance(tree, tuple):
2743 return max(map(depth, tree)) + 1
2743 return max(map(depth, tree)) + 1
2744 else:
2744 else:
2745 return 0
2745 return 0
2746
2746
2747 def funcsused(tree):
2747 def funcsused(tree):
2748 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2748 if not isinstance(tree, tuple) or tree[0] in ('string', 'symbol'):
2749 return set()
2749 return set()
2750 else:
2750 else:
2751 funcs = set()
2751 funcs = set()
2752 for s in tree[1:]:
2752 for s in tree[1:]:
2753 funcs |= funcsused(s)
2753 funcs |= funcsused(s)
2754 if tree[0] == 'func':
2754 if tree[0] == 'func':
2755 funcs.add(tree[1][1])
2755 funcs.add(tree[1][1])
2756 return funcs
2756 return funcs
2757
2757
2758 class abstractsmartset(object):
2758 class abstractsmartset(object):
2759
2759
2760 def __nonzero__(self):
2760 def __nonzero__(self):
2761 """True if the smartset is not empty"""
2761 """True if the smartset is not empty"""
2762 raise NotImplementedError()
2762 raise NotImplementedError()
2763
2763
2764 def __contains__(self, rev):
2764 def __contains__(self, rev):
2765 """provide fast membership testing"""
2765 """provide fast membership testing"""
2766 raise NotImplementedError()
2766 raise NotImplementedError()
2767
2767
2768 def __iter__(self):
2768 def __iter__(self):
2769 """iterate the set in the order it is supposed to be iterated"""
2769 """iterate the set in the order it is supposed to be iterated"""
2770 raise NotImplementedError()
2770 raise NotImplementedError()
2771
2771
2772 # Attributes containing a function to perform a fast iteration in a given
2772 # Attributes containing a function to perform a fast iteration in a given
2773 # direction. A smartset can have none, one, or both defined.
2773 # direction. A smartset can have none, one, or both defined.
2774 #
2774 #
2775 # Default value is None instead of a function returning None to avoid
2775 # Default value is None instead of a function returning None to avoid
2776 # initializing an iterator just for testing if a fast method exists.
2776 # initializing an iterator just for testing if a fast method exists.
2777 fastasc = None
2777 fastasc = None
2778 fastdesc = None
2778 fastdesc = None
2779
2779
2780 def isascending(self):
2780 def isascending(self):
2781 """True if the set will iterate in ascending order"""
2781 """True if the set will iterate in ascending order"""
2782 raise NotImplementedError()
2782 raise NotImplementedError()
2783
2783
2784 def isdescending(self):
2784 def isdescending(self):
2785 """True if the set will iterate in descending order"""
2785 """True if the set will iterate in descending order"""
2786 raise NotImplementedError()
2786 raise NotImplementedError()
2787
2787
2788 def min(self):
2788 def min(self):
2789 """return the minimum element in the set"""
2789 """return the minimum element in the set"""
2790 if self.fastasc is not None:
2790 if self.fastasc is not None:
2791 for r in self.fastasc():
2791 for r in self.fastasc():
2792 return r
2792 return r
2793 raise ValueError('arg is an empty sequence')
2793 raise ValueError('arg is an empty sequence')
2794 return min(self)
2794 return min(self)
2795
2795
2796 def max(self):
2796 def max(self):
2797 """return the maximum element in the set"""
2797 """return the maximum element in the set"""
2798 if self.fastdesc is not None:
2798 if self.fastdesc is not None:
2799 for r in self.fastdesc():
2799 for r in self.fastdesc():
2800 return r
2800 return r
2801 raise ValueError('arg is an empty sequence')
2801 raise ValueError('arg is an empty sequence')
2802 return max(self)
2802 return max(self)
2803
2803
2804 def first(self):
2804 def first(self):
2805 """return the first element in the set (user iteration perspective)
2805 """return the first element in the set (user iteration perspective)
2806
2806
2807 Return None if the set is empty"""
2807 Return None if the set is empty"""
2808 raise NotImplementedError()
2808 raise NotImplementedError()
2809
2809
2810 def last(self):
2810 def last(self):
2811 """return the last element in the set (user iteration perspective)
2811 """return the last element in the set (user iteration perspective)
2812
2812
2813 Return None if the set is empty"""
2813 Return None if the set is empty"""
2814 raise NotImplementedError()
2814 raise NotImplementedError()
2815
2815
2816 def __len__(self):
2816 def __len__(self):
2817 """return the length of the smartsets
2817 """return the length of the smartsets
2818
2818
2819 This can be expensive on smartset that could be lazy otherwise."""
2819 This can be expensive on smartset that could be lazy otherwise."""
2820 raise NotImplementedError()
2820 raise NotImplementedError()
2821
2821
2822 def reverse(self):
2822 def reverse(self):
2823 """reverse the expected iteration order"""
2823 """reverse the expected iteration order"""
2824 raise NotImplementedError()
2824 raise NotImplementedError()
2825
2825
2826 def sort(self, reverse=True):
2826 def sort(self, reverse=True):
2827 """get the set to iterate in an ascending or descending order"""
2827 """get the set to iterate in an ascending or descending order"""
2828 raise NotImplementedError()
2828 raise NotImplementedError()
2829
2829
2830 def __and__(self, other):
2830 def __and__(self, other):
2831 """Returns a new object with the intersection of the two collections.
2831 """Returns a new object with the intersection of the two collections.
2832
2832
2833 This is part of the mandatory API for smartset."""
2833 This is part of the mandatory API for smartset."""
2834 if isinstance(other, fullreposet):
2834 if isinstance(other, fullreposet):
2835 return self
2835 return self
2836 return self.filter(other.__contains__, cache=False)
2836 return self.filter(other.__contains__, cache=False)
2837
2837
2838 def __add__(self, other):
2838 def __add__(self, other):
2839 """Returns a new object with the union of the two collections.
2839 """Returns a new object with the union of the two collections.
2840
2840
2841 This is part of the mandatory API for smartset."""
2841 This is part of the mandatory API for smartset."""
2842 return addset(self, other)
2842 return addset(self, other)
2843
2843
2844 def __sub__(self, other):
2844 def __sub__(self, other):
2845 """Returns a new object with the substraction of the two collections.
2845 """Returns a new object with the substraction of the two collections.
2846
2846
2847 This is part of the mandatory API for smartset."""
2847 This is part of the mandatory API for smartset."""
2848 c = other.__contains__
2848 c = other.__contains__
2849 return self.filter(lambda r: not c(r), cache=False)
2849 return self.filter(lambda r: not c(r), cache=False)
2850
2850
2851 def filter(self, condition, cache=True):
2851 def filter(self, condition, cache=True):
2852 """Returns this smartset filtered by condition as a new smartset.
2852 """Returns this smartset filtered by condition as a new smartset.
2853
2853
2854 `condition` is a callable which takes a revision number and returns a
2854 `condition` is a callable which takes a revision number and returns a
2855 boolean.
2855 boolean.
2856
2856
2857 This is part of the mandatory API for smartset."""
2857 This is part of the mandatory API for smartset."""
2858 # builtin cannot be cached. but do not needs to
2858 # builtin cannot be cached. but do not needs to
2859 if cache and util.safehasattr(condition, 'func_code'):
2859 if cache and util.safehasattr(condition, 'func_code'):
2860 condition = util.cachefunc(condition)
2860 condition = util.cachefunc(condition)
2861 return filteredset(self, condition)
2861 return filteredset(self, condition)
2862
2862
2863 class baseset(abstractsmartset):
2863 class baseset(abstractsmartset):
2864 """Basic data structure that represents a revset and contains the basic
2864 """Basic data structure that represents a revset and contains the basic
2865 operation that it should be able to perform.
2865 operation that it should be able to perform.
2866
2866
2867 Every method in this class should be implemented by any smartset class.
2867 Every method in this class should be implemented by any smartset class.
2868 """
2868 """
2869 def __init__(self, data=()):
2869 def __init__(self, data=()):
2870 if not isinstance(data, list):
2870 if not isinstance(data, list):
2871 data = list(data)
2871 data = list(data)
2872 self._list = data
2872 self._list = data
2873 self._ascending = None
2873 self._ascending = None
2874
2874
2875 @util.propertycache
2875 @util.propertycache
2876 def _set(self):
2876 def _set(self):
2877 return set(self._list)
2877 return set(self._list)
2878
2878
2879 @util.propertycache
2879 @util.propertycache
2880 def _asclist(self):
2880 def _asclist(self):
2881 asclist = self._list[:]
2881 asclist = self._list[:]
2882 asclist.sort()
2882 asclist.sort()
2883 return asclist
2883 return asclist
2884
2884
2885 def __iter__(self):
2885 def __iter__(self):
2886 if self._ascending is None:
2886 if self._ascending is None:
2887 return iter(self._list)
2887 return iter(self._list)
2888 elif self._ascending:
2888 elif self._ascending:
2889 return iter(self._asclist)
2889 return iter(self._asclist)
2890 else:
2890 else:
2891 return reversed(self._asclist)
2891 return reversed(self._asclist)
2892
2892
2893 def fastasc(self):
2893 def fastasc(self):
2894 return iter(self._asclist)
2894 return iter(self._asclist)
2895
2895
2896 def fastdesc(self):
2896 def fastdesc(self):
2897 return reversed(self._asclist)
2897 return reversed(self._asclist)
2898
2898
2899 @util.propertycache
2899 @util.propertycache
2900 def __contains__(self):
2900 def __contains__(self):
2901 return self._set.__contains__
2901 return self._set.__contains__
2902
2902
2903 def __nonzero__(self):
2903 def __nonzero__(self):
2904 return bool(self._list)
2904 return bool(self._list)
2905
2905
2906 def sort(self, reverse=False):
2906 def sort(self, reverse=False):
2907 self._ascending = not bool(reverse)
2907 self._ascending = not bool(reverse)
2908
2908
2909 def reverse(self):
2909 def reverse(self):
2910 if self._ascending is None:
2910 if self._ascending is None:
2911 self._list.reverse()
2911 self._list.reverse()
2912 else:
2912 else:
2913 self._ascending = not self._ascending
2913 self._ascending = not self._ascending
2914
2914
2915 def __len__(self):
2915 def __len__(self):
2916 return len(self._list)
2916 return len(self._list)
2917
2917
2918 def isascending(self):
2918 def isascending(self):
2919 """Returns True if the collection is ascending order, False if not.
2919 """Returns True if the collection is ascending order, False if not.
2920
2920
2921 This is part of the mandatory API for smartset."""
2921 This is part of the mandatory API for smartset."""
2922 if len(self) <= 1:
2922 if len(self) <= 1:
2923 return True
2923 return True
2924 return self._ascending is not None and self._ascending
2924 return self._ascending is not None and self._ascending
2925
2925
2926 def isdescending(self):
2926 def isdescending(self):
2927 """Returns True if the collection is descending order, False if not.
2927 """Returns True if the collection is descending order, False if not.
2928
2928
2929 This is part of the mandatory API for smartset."""
2929 This is part of the mandatory API for smartset."""
2930 if len(self) <= 1:
2930 if len(self) <= 1:
2931 return True
2931 return True
2932 return self._ascending is not None and not self._ascending
2932 return self._ascending is not None and not self._ascending
2933
2933
2934 def first(self):
2934 def first(self):
2935 if self:
2935 if self:
2936 if self._ascending is None:
2936 if self._ascending is None:
2937 return self._list[0]
2937 return self._list[0]
2938 elif self._ascending:
2938 elif self._ascending:
2939 return self._asclist[0]
2939 return self._asclist[0]
2940 else:
2940 else:
2941 return self._asclist[-1]
2941 return self._asclist[-1]
2942 return None
2942 return None
2943
2943
2944 def last(self):
2944 def last(self):
2945 if self:
2945 if self:
2946 if self._ascending is None:
2946 if self._ascending is None:
2947 return self._list[-1]
2947 return self._list[-1]
2948 elif self._ascending:
2948 elif self._ascending:
2949 return self._asclist[-1]
2949 return self._asclist[-1]
2950 else:
2950 else:
2951 return self._asclist[0]
2951 return self._asclist[0]
2952 return None
2952 return None
2953
2953
2954 def __repr__(self):
2954 def __repr__(self):
2955 d = {None: '', False: '-', True: '+'}[self._ascending]
2955 d = {None: '', False: '-', True: '+'}[self._ascending]
2956 return '<%s%s %r>' % (type(self).__name__, d, self._list)
2956 return '<%s%s %r>' % (type(self).__name__, d, self._list)
2957
2957
2958 class filteredset(abstractsmartset):
2958 class filteredset(abstractsmartset):
2959 """Duck type for baseset class which iterates lazily over the revisions in
2959 """Duck type for baseset class which iterates lazily over the revisions in
2960 the subset and contains a function which tests for membership in the
2960 the subset and contains a function which tests for membership in the
2961 revset
2961 revset
2962 """
2962 """
2963 def __init__(self, subset, condition=lambda x: True):
2963 def __init__(self, subset, condition=lambda x: True):
2964 """
2964 """
2965 condition: a function that decide whether a revision in the subset
2965 condition: a function that decide whether a revision in the subset
2966 belongs to the revset or not.
2966 belongs to the revset or not.
2967 """
2967 """
2968 self._subset = subset
2968 self._subset = subset
2969 self._condition = condition
2969 self._condition = condition
2970 self._cache = {}
2970 self._cache = {}
2971
2971
2972 def __contains__(self, x):
2972 def __contains__(self, x):
2973 c = self._cache
2973 c = self._cache
2974 if x not in c:
2974 if x not in c:
2975 v = c[x] = x in self._subset and self._condition(x)
2975 v = c[x] = x in self._subset and self._condition(x)
2976 return v
2976 return v
2977 return c[x]
2977 return c[x]
2978
2978
2979 def __iter__(self):
2979 def __iter__(self):
2980 return self._iterfilter(self._subset)
2980 return self._iterfilter(self._subset)
2981
2981
2982 def _iterfilter(self, it):
2982 def _iterfilter(self, it):
2983 cond = self._condition
2983 cond = self._condition
2984 for x in it:
2984 for x in it:
2985 if cond(x):
2985 if cond(x):
2986 yield x
2986 yield x
2987
2987
2988 @property
2988 @property
2989 def fastasc(self):
2989 def fastasc(self):
2990 it = self._subset.fastasc
2990 it = self._subset.fastasc
2991 if it is None:
2991 if it is None:
2992 return None
2992 return None
2993 return lambda: self._iterfilter(it())
2993 return lambda: self._iterfilter(it())
2994
2994
2995 @property
2995 @property
2996 def fastdesc(self):
2996 def fastdesc(self):
2997 it = self._subset.fastdesc
2997 it = self._subset.fastdesc
2998 if it is None:
2998 if it is None:
2999 return None
2999 return None
3000 return lambda: self._iterfilter(it())
3000 return lambda: self._iterfilter(it())
3001
3001
3002 def __nonzero__(self):
3002 def __nonzero__(self):
3003 for r in self:
3003 for r in self:
3004 return True
3004 return True
3005 return False
3005 return False
3006
3006
3007 def __len__(self):
3007 def __len__(self):
3008 # Basic implementation to be changed in future patches.
3008 # Basic implementation to be changed in future patches.
3009 l = baseset([r for r in self])
3009 l = baseset([r for r in self])
3010 return len(l)
3010 return len(l)
3011
3011
3012 def sort(self, reverse=False):
3012 def sort(self, reverse=False):
3013 self._subset.sort(reverse=reverse)
3013 self._subset.sort(reverse=reverse)
3014
3014
3015 def reverse(self):
3015 def reverse(self):
3016 self._subset.reverse()
3016 self._subset.reverse()
3017
3017
3018 def isascending(self):
3018 def isascending(self):
3019 return self._subset.isascending()
3019 return self._subset.isascending()
3020
3020
3021 def isdescending(self):
3021 def isdescending(self):
3022 return self._subset.isdescending()
3022 return self._subset.isdescending()
3023
3023
3024 def first(self):
3024 def first(self):
3025 for x in self:
3025 for x in self:
3026 return x
3026 return x
3027 return None
3027 return None
3028
3028
3029 def last(self):
3029 def last(self):
3030 it = None
3030 it = None
3031 if self.isascending():
3031 if self.isascending():
3032 it = self.fastdesc
3032 it = self.fastdesc
3033 elif self.isdescending():
3033 elif self.isdescending():
3034 it = self.fastasc
3034 it = self.fastasc
3035 if it is not None:
3035 if it is not None:
3036 for x in it():
3036 for x in it():
3037 return x
3037 return x
3038 return None #empty case
3038 return None #empty case
3039 else:
3039 else:
3040 x = None
3040 x = None
3041 for x in self:
3041 for x in self:
3042 pass
3042 pass
3043 return x
3043 return x
3044
3044
3045 def __repr__(self):
3045 def __repr__(self):
3046 return '<%s %r>' % (type(self).__name__, self._subset)
3046 return '<%s %r>' % (type(self).__name__, self._subset)
3047
3047
3048 # this function will be removed, or merged to addset or orset, when
3048 # this function will be removed, or merged to addset or orset, when
3049 # - scmutil.revrange() can be rewritten to not combine calculated smartsets
3049 # - scmutil.revrange() can be rewritten to not combine calculated smartsets
3050 # - or addset can handle more than two sets without balanced tree
3050 # - or addset can handle more than two sets without balanced tree
3051 def _combinesets(subsets):
3051 def _combinesets(subsets):
3052 """Create balanced tree of addsets representing union of given sets"""
3052 """Create balanced tree of addsets representing union of given sets"""
3053 if not subsets:
3053 if not subsets:
3054 return baseset()
3054 return baseset()
3055 if len(subsets) == 1:
3055 if len(subsets) == 1:
3056 return subsets[0]
3056 return subsets[0]
3057 p = len(subsets) // 2
3057 p = len(subsets) // 2
3058 xs = _combinesets(subsets[:p])
3058 xs = _combinesets(subsets[:p])
3059 ys = _combinesets(subsets[p:])
3059 ys = _combinesets(subsets[p:])
3060 return addset(xs, ys)
3060 return addset(xs, ys)
3061
3061
3062 def _iterordered(ascending, iter1, iter2):
3062 def _iterordered(ascending, iter1, iter2):
3063 """produce an ordered iteration from two iterators with the same order
3063 """produce an ordered iteration from two iterators with the same order
3064
3064
3065 The ascending is used to indicated the iteration direction.
3065 The ascending is used to indicated the iteration direction.
3066 """
3066 """
3067 choice = max
3067 choice = max
3068 if ascending:
3068 if ascending:
3069 choice = min
3069 choice = min
3070
3070
3071 val1 = None
3071 val1 = None
3072 val2 = None
3072 val2 = None
3073 try:
3073 try:
3074 # Consume both iterators in an ordered way until one is empty
3074 # Consume both iterators in an ordered way until one is empty
3075 while True:
3075 while True:
3076 if val1 is None:
3076 if val1 is None:
3077 val1 = iter1.next()
3077 val1 = iter1.next()
3078 if val2 is None:
3078 if val2 is None:
3079 val2 = iter2.next()
3079 val2 = iter2.next()
3080 next = choice(val1, val2)
3080 next = choice(val1, val2)
3081 yield next
3081 yield next
3082 if val1 == next:
3082 if val1 == next:
3083 val1 = None
3083 val1 = None
3084 if val2 == next:
3084 if val2 == next:
3085 val2 = None
3085 val2 = None
3086 except StopIteration:
3086 except StopIteration:
3087 # Flush any remaining values and consume the other one
3087 # Flush any remaining values and consume the other one
3088 it = iter2
3088 it = iter2
3089 if val1 is not None:
3089 if val1 is not None:
3090 yield val1
3090 yield val1
3091 it = iter1
3091 it = iter1
3092 elif val2 is not None:
3092 elif val2 is not None:
3093 # might have been equality and both are empty
3093 # might have been equality and both are empty
3094 yield val2
3094 yield val2
3095 for val in it:
3095 for val in it:
3096 yield val
3096 yield val
3097
3097
3098 class addset(abstractsmartset):
3098 class addset(abstractsmartset):
3099 """Represent the addition of two sets
3099 """Represent the addition of two sets
3100
3100
3101 Wrapper structure for lazily adding two structures without losing much
3101 Wrapper structure for lazily adding two structures without losing much
3102 performance on the __contains__ method
3102 performance on the __contains__ method
3103
3103
3104 If the ascending attribute is set, that means the two structures are
3104 If the ascending attribute is set, that means the two structures are
3105 ordered in either an ascending or descending way. Therefore, we can add
3105 ordered in either an ascending or descending way. Therefore, we can add
3106 them maintaining the order by iterating over both at the same time
3106 them maintaining the order by iterating over both at the same time
3107
3107
3108 >>> xs = baseset([0, 3, 2])
3108 >>> xs = baseset([0, 3, 2])
3109 >>> ys = baseset([5, 2, 4])
3109 >>> ys = baseset([5, 2, 4])
3110
3110
3111 >>> rs = addset(xs, ys)
3111 >>> rs = addset(xs, ys)
3112 >>> bool(rs), 0 in rs, 1 in rs, 5 in rs, rs.first(), rs.last()
3112 >>> bool(rs), 0 in rs, 1 in rs, 5 in rs, rs.first(), rs.last()
3113 (True, True, False, True, 0, 4)
3113 (True, True, False, True, 0, 4)
3114 >>> rs = addset(xs, baseset([]))
3114 >>> rs = addset(xs, baseset([]))
3115 >>> bool(rs), 0 in rs, 1 in rs, rs.first(), rs.last()
3115 >>> bool(rs), 0 in rs, 1 in rs, rs.first(), rs.last()
3116 (True, True, False, 0, 2)
3116 (True, True, False, 0, 2)
3117 >>> rs = addset(baseset([]), baseset([]))
3117 >>> rs = addset(baseset([]), baseset([]))
3118 >>> bool(rs), 0 in rs, rs.first(), rs.last()
3118 >>> bool(rs), 0 in rs, rs.first(), rs.last()
3119 (False, False, None, None)
3119 (False, False, None, None)
3120
3120
3121 iterate unsorted:
3121 iterate unsorted:
3122 >>> rs = addset(xs, ys)
3122 >>> rs = addset(xs, ys)
3123 >>> [x for x in rs] # without _genlist
3123 >>> [x for x in rs] # without _genlist
3124 [0, 3, 2, 5, 4]
3124 [0, 3, 2, 5, 4]
3125 >>> assert not rs._genlist
3125 >>> assert not rs._genlist
3126 >>> len(rs)
3126 >>> len(rs)
3127 5
3127 5
3128 >>> [x for x in rs] # with _genlist
3128 >>> [x for x in rs] # with _genlist
3129 [0, 3, 2, 5, 4]
3129 [0, 3, 2, 5, 4]
3130 >>> assert rs._genlist
3130 >>> assert rs._genlist
3131
3131
3132 iterate ascending:
3132 iterate ascending:
3133 >>> rs = addset(xs, ys, ascending=True)
3133 >>> rs = addset(xs, ys, ascending=True)
3134 >>> [x for x in rs], [x for x in rs.fastasc()] # without _asclist
3134 >>> [x for x in rs], [x for x in rs.fastasc()] # without _asclist
3135 ([0, 2, 3, 4, 5], [0, 2, 3, 4, 5])
3135 ([0, 2, 3, 4, 5], [0, 2, 3, 4, 5])
3136 >>> assert not rs._asclist
3136 >>> assert not rs._asclist
3137 >>> len(rs)
3137 >>> len(rs)
3138 5
3138 5
3139 >>> [x for x in rs], [x for x in rs.fastasc()]
3139 >>> [x for x in rs], [x for x in rs.fastasc()]
3140 ([0, 2, 3, 4, 5], [0, 2, 3, 4, 5])
3140 ([0, 2, 3, 4, 5], [0, 2, 3, 4, 5])
3141 >>> assert rs._asclist
3141 >>> assert rs._asclist
3142
3142
3143 iterate descending:
3143 iterate descending:
3144 >>> rs = addset(xs, ys, ascending=False)
3144 >>> rs = addset(xs, ys, ascending=False)
3145 >>> [x for x in rs], [x for x in rs.fastdesc()] # without _asclist
3145 >>> [x for x in rs], [x for x in rs.fastdesc()] # without _asclist
3146 ([5, 4, 3, 2, 0], [5, 4, 3, 2, 0])
3146 ([5, 4, 3, 2, 0], [5, 4, 3, 2, 0])
3147 >>> assert not rs._asclist
3147 >>> assert not rs._asclist
3148 >>> len(rs)
3148 >>> len(rs)
3149 5
3149 5
3150 >>> [x for x in rs], [x for x in rs.fastdesc()]
3150 >>> [x for x in rs], [x for x in rs.fastdesc()]
3151 ([5, 4, 3, 2, 0], [5, 4, 3, 2, 0])
3151 ([5, 4, 3, 2, 0], [5, 4, 3, 2, 0])
3152 >>> assert rs._asclist
3152 >>> assert rs._asclist
3153
3153
3154 iterate ascending without fastasc:
3154 iterate ascending without fastasc:
3155 >>> rs = addset(xs, generatorset(ys), ascending=True)
3155 >>> rs = addset(xs, generatorset(ys), ascending=True)
3156 >>> assert rs.fastasc is None
3156 >>> assert rs.fastasc is None
3157 >>> [x for x in rs]
3157 >>> [x for x in rs]
3158 [0, 2, 3, 4, 5]
3158 [0, 2, 3, 4, 5]
3159
3159
3160 iterate descending without fastdesc:
3160 iterate descending without fastdesc:
3161 >>> rs = addset(generatorset(xs), ys, ascending=False)
3161 >>> rs = addset(generatorset(xs), ys, ascending=False)
3162 >>> assert rs.fastdesc is None
3162 >>> assert rs.fastdesc is None
3163 >>> [x for x in rs]
3163 >>> [x for x in rs]
3164 [5, 4, 3, 2, 0]
3164 [5, 4, 3, 2, 0]
3165 """
3165 """
3166 def __init__(self, revs1, revs2, ascending=None):
3166 def __init__(self, revs1, revs2, ascending=None):
3167 self._r1 = revs1
3167 self._r1 = revs1
3168 self._r2 = revs2
3168 self._r2 = revs2
3169 self._iter = None
3169 self._iter = None
3170 self._ascending = ascending
3170 self._ascending = ascending
3171 self._genlist = None
3171 self._genlist = None
3172 self._asclist = None
3172 self._asclist = None
3173
3173
3174 def __len__(self):
3174 def __len__(self):
3175 return len(self._list)
3175 return len(self._list)
3176
3176
3177 def __nonzero__(self):
3177 def __nonzero__(self):
3178 return bool(self._r1) or bool(self._r2)
3178 return bool(self._r1) or bool(self._r2)
3179
3179
3180 @util.propertycache
3180 @util.propertycache
3181 def _list(self):
3181 def _list(self):
3182 if not self._genlist:
3182 if not self._genlist:
3183 self._genlist = baseset(iter(self))
3183 self._genlist = baseset(iter(self))
3184 return self._genlist
3184 return self._genlist
3185
3185
3186 def __iter__(self):
3186 def __iter__(self):
3187 """Iterate over both collections without repeating elements
3187 """Iterate over both collections without repeating elements
3188
3188
3189 If the ascending attribute is not set, iterate over the first one and
3189 If the ascending attribute is not set, iterate over the first one and
3190 then over the second one checking for membership on the first one so we
3190 then over the second one checking for membership on the first one so we
3191 dont yield any duplicates.
3191 dont yield any duplicates.
3192
3192
3193 If the ascending attribute is set, iterate over both collections at the
3193 If the ascending attribute is set, iterate over both collections at the
3194 same time, yielding only one value at a time in the given order.
3194 same time, yielding only one value at a time in the given order.
3195 """
3195 """
3196 if self._ascending is None:
3196 if self._ascending is None:
3197 if self._genlist:
3197 if self._genlist:
3198 return iter(self._genlist)
3198 return iter(self._genlist)
3199 def arbitraryordergen():
3199 def arbitraryordergen():
3200 for r in self._r1:
3200 for r in self._r1:
3201 yield r
3201 yield r
3202 inr1 = self._r1.__contains__
3202 inr1 = self._r1.__contains__
3203 for r in self._r2:
3203 for r in self._r2:
3204 if not inr1(r):
3204 if not inr1(r):
3205 yield r
3205 yield r
3206 return arbitraryordergen()
3206 return arbitraryordergen()
3207 # try to use our own fast iterator if it exists
3207 # try to use our own fast iterator if it exists
3208 self._trysetasclist()
3208 self._trysetasclist()
3209 if self._ascending:
3209 if self._ascending:
3210 attr = 'fastasc'
3210 attr = 'fastasc'
3211 else:
3211 else:
3212 attr = 'fastdesc'
3212 attr = 'fastdesc'
3213 it = getattr(self, attr)
3213 it = getattr(self, attr)
3214 if it is not None:
3214 if it is not None:
3215 return it()
3215 return it()
3216 # maybe half of the component supports fast
3216 # maybe half of the component supports fast
3217 # get iterator for _r1
3217 # get iterator for _r1
3218 iter1 = getattr(self._r1, attr)
3218 iter1 = getattr(self._r1, attr)
3219 if iter1 is None:
3219 if iter1 is None:
3220 # let's avoid side effect (not sure it matters)
3220 # let's avoid side effect (not sure it matters)
3221 iter1 = iter(sorted(self._r1, reverse=not self._ascending))
3221 iter1 = iter(sorted(self._r1, reverse=not self._ascending))
3222 else:
3222 else:
3223 iter1 = iter1()
3223 iter1 = iter1()
3224 # get iterator for _r2
3224 # get iterator for _r2
3225 iter2 = getattr(self._r2, attr)
3225 iter2 = getattr(self._r2, attr)
3226 if iter2 is None:
3226 if iter2 is None:
3227 # let's avoid side effect (not sure it matters)
3227 # let's avoid side effect (not sure it matters)
3228 iter2 = iter(sorted(self._r2, reverse=not self._ascending))
3228 iter2 = iter(sorted(self._r2, reverse=not self._ascending))
3229 else:
3229 else:
3230 iter2 = iter2()
3230 iter2 = iter2()
3231 return _iterordered(self._ascending, iter1, iter2)
3231 return _iterordered(self._ascending, iter1, iter2)
3232
3232
3233 def _trysetasclist(self):
3233 def _trysetasclist(self):
3234 """populate the _asclist attribute if possible and necessary"""
3234 """populate the _asclist attribute if possible and necessary"""
3235 if self._genlist is not None and self._asclist is None:
3235 if self._genlist is not None and self._asclist is None:
3236 self._asclist = sorted(self._genlist)
3236 self._asclist = sorted(self._genlist)
3237
3237
3238 @property
3238 @property
3239 def fastasc(self):
3239 def fastasc(self):
3240 self._trysetasclist()
3240 self._trysetasclist()
3241 if self._asclist is not None:
3241 if self._asclist is not None:
3242 return self._asclist.__iter__
3242 return self._asclist.__iter__
3243 iter1 = self._r1.fastasc
3243 iter1 = self._r1.fastasc
3244 iter2 = self._r2.fastasc
3244 iter2 = self._r2.fastasc
3245 if None in (iter1, iter2):
3245 if None in (iter1, iter2):
3246 return None
3246 return None
3247 return lambda: _iterordered(True, iter1(), iter2())
3247 return lambda: _iterordered(True, iter1(), iter2())
3248
3248
3249 @property
3249 @property
3250 def fastdesc(self):
3250 def fastdesc(self):
3251 self._trysetasclist()
3251 self._trysetasclist()
3252 if self._asclist is not None:
3252 if self._asclist is not None:
3253 return self._asclist.__reversed__
3253 return self._asclist.__reversed__
3254 iter1 = self._r1.fastdesc
3254 iter1 = self._r1.fastdesc
3255 iter2 = self._r2.fastdesc
3255 iter2 = self._r2.fastdesc
3256 if None in (iter1, iter2):
3256 if None in (iter1, iter2):
3257 return None
3257 return None
3258 return lambda: _iterordered(False, iter1(), iter2())
3258 return lambda: _iterordered(False, iter1(), iter2())
3259
3259
3260 def __contains__(self, x):
3260 def __contains__(self, x):
3261 return x in self._r1 or x in self._r2
3261 return x in self._r1 or x in self._r2
3262
3262
3263 def sort(self, reverse=False):
3263 def sort(self, reverse=False):
3264 """Sort the added set
3264 """Sort the added set
3265
3265
3266 For this we use the cached list with all the generated values and if we
3266 For this we use the cached list with all the generated values and if we
3267 know they are ascending or descending we can sort them in a smart way.
3267 know they are ascending or descending we can sort them in a smart way.
3268 """
3268 """
3269 self._ascending = not reverse
3269 self._ascending = not reverse
3270
3270
3271 def isascending(self):
3271 def isascending(self):
3272 return self._ascending is not None and self._ascending
3272 return self._ascending is not None and self._ascending
3273
3273
3274 def isdescending(self):
3274 def isdescending(self):
3275 return self._ascending is not None and not self._ascending
3275 return self._ascending is not None and not self._ascending
3276
3276
3277 def reverse(self):
3277 def reverse(self):
3278 if self._ascending is None:
3278 if self._ascending is None:
3279 self._list.reverse()
3279 self._list.reverse()
3280 else:
3280 else:
3281 self._ascending = not self._ascending
3281 self._ascending = not self._ascending
3282
3282
3283 def first(self):
3283 def first(self):
3284 for x in self:
3284 for x in self:
3285 return x
3285 return x
3286 return None
3286 return None
3287
3287
3288 def last(self):
3288 def last(self):
3289 self.reverse()
3289 self.reverse()
3290 val = self.first()
3290 val = self.first()
3291 self.reverse()
3291 self.reverse()
3292 return val
3292 return val
3293
3293
3294 def __repr__(self):
3294 def __repr__(self):
3295 d = {None: '', False: '-', True: '+'}[self._ascending]
3295 d = {None: '', False: '-', True: '+'}[self._ascending]
3296 return '<%s%s %r, %r>' % (type(self).__name__, d, self._r1, self._r2)
3296 return '<%s%s %r, %r>' % (type(self).__name__, d, self._r1, self._r2)
3297
3297
3298 class generatorset(abstractsmartset):
3298 class generatorset(abstractsmartset):
3299 """Wrap a generator for lazy iteration
3299 """Wrap a generator for lazy iteration
3300
3300
3301 Wrapper structure for generators that provides lazy membership and can
3301 Wrapper structure for generators that provides lazy membership and can
3302 be iterated more than once.
3302 be iterated more than once.
3303 When asked for membership it generates values until either it finds the
3303 When asked for membership it generates values until either it finds the
3304 requested one or has gone through all the elements in the generator
3304 requested one or has gone through all the elements in the generator
3305 """
3305 """
3306 def __init__(self, gen, iterasc=None):
3306 def __init__(self, gen, iterasc=None):
3307 """
3307 """
3308 gen: a generator producing the values for the generatorset.
3308 gen: a generator producing the values for the generatorset.
3309 """
3309 """
3310 self._gen = gen
3310 self._gen = gen
3311 self._asclist = None
3311 self._asclist = None
3312 self._cache = {}
3312 self._cache = {}
3313 self._genlist = []
3313 self._genlist = []
3314 self._finished = False
3314 self._finished = False
3315 self._ascending = True
3315 self._ascending = True
3316 if iterasc is not None:
3316 if iterasc is not None:
3317 if iterasc:
3317 if iterasc:
3318 self.fastasc = self._iterator
3318 self.fastasc = self._iterator
3319 self.__contains__ = self._asccontains
3319 self.__contains__ = self._asccontains
3320 else:
3320 else:
3321 self.fastdesc = self._iterator
3321 self.fastdesc = self._iterator
3322 self.__contains__ = self._desccontains
3322 self.__contains__ = self._desccontains
3323
3323
3324 def __nonzero__(self):
3324 def __nonzero__(self):
3325 # Do not use 'for r in self' because it will enforce the iteration
3325 # Do not use 'for r in self' because it will enforce the iteration
3326 # order (default ascending), possibly unrolling a whole descending
3326 # order (default ascending), possibly unrolling a whole descending
3327 # iterator.
3327 # iterator.
3328 if self._genlist:
3328 if self._genlist:
3329 return True
3329 return True
3330 for r in self._consumegen():
3330 for r in self._consumegen():
3331 return True
3331 return True
3332 return False
3332 return False
3333
3333
3334 def __contains__(self, x):
3334 def __contains__(self, x):
3335 if x in self._cache:
3335 if x in self._cache:
3336 return self._cache[x]
3336 return self._cache[x]
3337
3337
3338 # Use new values only, as existing values would be cached.
3338 # Use new values only, as existing values would be cached.
3339 for l in self._consumegen():
3339 for l in self._consumegen():
3340 if l == x:
3340 if l == x:
3341 return True
3341 return True
3342
3342
3343 self._cache[x] = False
3343 self._cache[x] = False
3344 return False
3344 return False
3345
3345
3346 def _asccontains(self, x):
3346 def _asccontains(self, x):
3347 """version of contains optimised for ascending generator"""
3347 """version of contains optimised for ascending generator"""
3348 if x in self._cache:
3348 if x in self._cache:
3349 return self._cache[x]
3349 return self._cache[x]
3350
3350
3351 # Use new values only, as existing values would be cached.
3351 # Use new values only, as existing values would be cached.
3352 for l in self._consumegen():
3352 for l in self._consumegen():
3353 if l == x:
3353 if l == x:
3354 return True
3354 return True
3355 if l > x:
3355 if l > x:
3356 break
3356 break
3357
3357
3358 self._cache[x] = False
3358 self._cache[x] = False
3359 return False
3359 return False
3360
3360
3361 def _desccontains(self, x):
3361 def _desccontains(self, x):
3362 """version of contains optimised for descending generator"""
3362 """version of contains optimised for descending generator"""
3363 if x in self._cache:
3363 if x in self._cache:
3364 return self._cache[x]
3364 return self._cache[x]
3365
3365
3366 # Use new values only, as existing values would be cached.
3366 # Use new values only, as existing values would be cached.
3367 for l in self._consumegen():
3367 for l in self._consumegen():
3368 if l == x:
3368 if l == x:
3369 return True
3369 return True
3370 if l < x:
3370 if l < x:
3371 break
3371 break
3372
3372
3373 self._cache[x] = False
3373 self._cache[x] = False
3374 return False
3374 return False
3375
3375
3376 def __iter__(self):
3376 def __iter__(self):
3377 if self._ascending:
3377 if self._ascending:
3378 it = self.fastasc
3378 it = self.fastasc
3379 else:
3379 else:
3380 it = self.fastdesc
3380 it = self.fastdesc
3381 if it is not None:
3381 if it is not None:
3382 return it()
3382 return it()
3383 # we need to consume the iterator
3383 # we need to consume the iterator
3384 for x in self._consumegen():
3384 for x in self._consumegen():
3385 pass
3385 pass
3386 # recall the same code
3386 # recall the same code
3387 return iter(self)
3387 return iter(self)
3388
3388
3389 def _iterator(self):
3389 def _iterator(self):
3390 if self._finished:
3390 if self._finished:
3391 return iter(self._genlist)
3391 return iter(self._genlist)
3392
3392
3393 # We have to use this complex iteration strategy to allow multiple
3393 # We have to use this complex iteration strategy to allow multiple
3394 # iterations at the same time. We need to be able to catch revision
3394 # iterations at the same time. We need to be able to catch revision
3395 # removed from _consumegen and added to genlist in another instance.
3395 # removed from _consumegen and added to genlist in another instance.
3396 #
3396 #
3397 # Getting rid of it would provide an about 15% speed up on this
3397 # Getting rid of it would provide an about 15% speed up on this
3398 # iteration.
3398 # iteration.
3399 genlist = self._genlist
3399 genlist = self._genlist
3400 nextrev = self._consumegen().next
3400 nextrev = self._consumegen().next
3401 _len = len # cache global lookup
3401 _len = len # cache global lookup
3402 def gen():
3402 def gen():
3403 i = 0
3403 i = 0
3404 while True:
3404 while True:
3405 if i < _len(genlist):
3405 if i < _len(genlist):
3406 yield genlist[i]
3406 yield genlist[i]
3407 else:
3407 else:
3408 yield nextrev()
3408 yield nextrev()
3409 i += 1
3409 i += 1
3410 return gen()
3410 return gen()
3411
3411
3412 def _consumegen(self):
3412 def _consumegen(self):
3413 cache = self._cache
3413 cache = self._cache
3414 genlist = self._genlist.append
3414 genlist = self._genlist.append
3415 for item in self._gen:
3415 for item in self._gen:
3416 cache[item] = True
3416 cache[item] = True
3417 genlist(item)
3417 genlist(item)
3418 yield item
3418 yield item
3419 if not self._finished:
3419 if not self._finished:
3420 self._finished = True
3420 self._finished = True
3421 asc = self._genlist[:]
3421 asc = self._genlist[:]
3422 asc.sort()
3422 asc.sort()
3423 self._asclist = asc
3423 self._asclist = asc
3424 self.fastasc = asc.__iter__
3424 self.fastasc = asc.__iter__
3425 self.fastdesc = asc.__reversed__
3425 self.fastdesc = asc.__reversed__
3426
3426
3427 def __len__(self):
3427 def __len__(self):
3428 for x in self._consumegen():
3428 for x in self._consumegen():
3429 pass
3429 pass
3430 return len(self._genlist)
3430 return len(self._genlist)
3431
3431
3432 def sort(self, reverse=False):
3432 def sort(self, reverse=False):
3433 self._ascending = not reverse
3433 self._ascending = not reverse
3434
3434
3435 def reverse(self):
3435 def reverse(self):
3436 self._ascending = not self._ascending
3436 self._ascending = not self._ascending
3437
3437
3438 def isascending(self):
3438 def isascending(self):
3439 return self._ascending
3439 return self._ascending
3440
3440
3441 def isdescending(self):
3441 def isdescending(self):
3442 return not self._ascending
3442 return not self._ascending
3443
3443
3444 def first(self):
3444 def first(self):
3445 if self._ascending:
3445 if self._ascending:
3446 it = self.fastasc
3446 it = self.fastasc
3447 else:
3447 else:
3448 it = self.fastdesc
3448 it = self.fastdesc
3449 if it is None:
3449 if it is None:
3450 # we need to consume all and try again
3450 # we need to consume all and try again
3451 for x in self._consumegen():
3451 for x in self._consumegen():
3452 pass
3452 pass
3453 return self.first()
3453 return self.first()
3454 return next(it(), None)
3454 return next(it(), None)
3455
3455
3456 def last(self):
3456 def last(self):
3457 if self._ascending:
3457 if self._ascending:
3458 it = self.fastdesc
3458 it = self.fastdesc
3459 else:
3459 else:
3460 it = self.fastasc
3460 it = self.fastasc
3461 if it is None:
3461 if it is None:
3462 # we need to consume all and try again
3462 # we need to consume all and try again
3463 for x in self._consumegen():
3463 for x in self._consumegen():
3464 pass
3464 pass
3465 return self.first()
3465 return self.first()
3466 return next(it(), None)
3466 return next(it(), None)
3467
3467
3468 def __repr__(self):
3468 def __repr__(self):
3469 d = {False: '-', True: '+'}[self._ascending]
3469 d = {False: '-', True: '+'}[self._ascending]
3470 return '<%s%s>' % (type(self).__name__, d)
3470 return '<%s%s>' % (type(self).__name__, d)
3471
3471
3472 class spanset(abstractsmartset):
3472 class spanset(abstractsmartset):
3473 """Duck type for baseset class which represents a range of revisions and
3473 """Duck type for baseset class which represents a range of revisions and
3474 can work lazily and without having all the range in memory
3474 can work lazily and without having all the range in memory
3475
3475
3476 Note that spanset(x, y) behave almost like xrange(x, y) except for two
3476 Note that spanset(x, y) behave almost like xrange(x, y) except for two
3477 notable points:
3477 notable points:
3478 - when x < y it will be automatically descending,
3478 - when x < y it will be automatically descending,
3479 - revision filtered with this repoview will be skipped.
3479 - revision filtered with this repoview will be skipped.
3480
3480
3481 """
3481 """
3482 def __init__(self, repo, start=0, end=None):
3482 def __init__(self, repo, start=0, end=None):
3483 """
3483 """
3484 start: first revision included the set
3484 start: first revision included the set
3485 (default to 0)
3485 (default to 0)
3486 end: first revision excluded (last+1)
3486 end: first revision excluded (last+1)
3487 (default to len(repo)
3487 (default to len(repo)
3488
3488
3489 Spanset will be descending if `end` < `start`.
3489 Spanset will be descending if `end` < `start`.
3490 """
3490 """
3491 if end is None:
3491 if end is None:
3492 end = len(repo)
3492 end = len(repo)
3493 self._ascending = start <= end
3493 self._ascending = start <= end
3494 if not self._ascending:
3494 if not self._ascending:
3495 start, end = end + 1, start +1
3495 start, end = end + 1, start +1
3496 self._start = start
3496 self._start = start
3497 self._end = end
3497 self._end = end
3498 self._hiddenrevs = repo.changelog.filteredrevs
3498 self._hiddenrevs = repo.changelog.filteredrevs
3499
3499
3500 def sort(self, reverse=False):
3500 def sort(self, reverse=False):
3501 self._ascending = not reverse
3501 self._ascending = not reverse
3502
3502
3503 def reverse(self):
3503 def reverse(self):
3504 self._ascending = not self._ascending
3504 self._ascending = not self._ascending
3505
3505
3506 def _iterfilter(self, iterrange):
3506 def _iterfilter(self, iterrange):
3507 s = self._hiddenrevs
3507 s = self._hiddenrevs
3508 for r in iterrange:
3508 for r in iterrange:
3509 if r not in s:
3509 if r not in s:
3510 yield r
3510 yield r
3511
3511
3512 def __iter__(self):
3512 def __iter__(self):
3513 if self._ascending:
3513 if self._ascending:
3514 return self.fastasc()
3514 return self.fastasc()
3515 else:
3515 else:
3516 return self.fastdesc()
3516 return self.fastdesc()
3517
3517
3518 def fastasc(self):
3518 def fastasc(self):
3519 iterrange = xrange(self._start, self._end)
3519 iterrange = xrange(self._start, self._end)
3520 if self._hiddenrevs:
3520 if self._hiddenrevs:
3521 return self._iterfilter(iterrange)
3521 return self._iterfilter(iterrange)
3522 return iter(iterrange)
3522 return iter(iterrange)
3523
3523
3524 def fastdesc(self):
3524 def fastdesc(self):
3525 iterrange = xrange(self._end - 1, self._start - 1, -1)
3525 iterrange = xrange(self._end - 1, self._start - 1, -1)
3526 if self._hiddenrevs:
3526 if self._hiddenrevs:
3527 return self._iterfilter(iterrange)
3527 return self._iterfilter(iterrange)
3528 return iter(iterrange)
3528 return iter(iterrange)
3529
3529
3530 def __contains__(self, rev):
3530 def __contains__(self, rev):
3531 hidden = self._hiddenrevs
3531 hidden = self._hiddenrevs
3532 return ((self._start <= rev < self._end)
3532 return ((self._start <= rev < self._end)
3533 and not (hidden and rev in hidden))
3533 and not (hidden and rev in hidden))
3534
3534
3535 def __nonzero__(self):
3535 def __nonzero__(self):
3536 for r in self:
3536 for r in self:
3537 return True
3537 return True
3538 return False
3538 return False
3539
3539
3540 def __len__(self):
3540 def __len__(self):
3541 if not self._hiddenrevs:
3541 if not self._hiddenrevs:
3542 return abs(self._end - self._start)
3542 return abs(self._end - self._start)
3543 else:
3543 else:
3544 count = 0
3544 count = 0
3545 start = self._start
3545 start = self._start
3546 end = self._end
3546 end = self._end
3547 for rev in self._hiddenrevs:
3547 for rev in self._hiddenrevs:
3548 if (end < rev <= start) or (start <= rev < end):
3548 if (end < rev <= start) or (start <= rev < end):
3549 count += 1
3549 count += 1
3550 return abs(self._end - self._start) - count
3550 return abs(self._end - self._start) - count
3551
3551
3552 def isascending(self):
3552 def isascending(self):
3553 return self._ascending
3553 return self._ascending
3554
3554
3555 def isdescending(self):
3555 def isdescending(self):
3556 return not self._ascending
3556 return not self._ascending
3557
3557
3558 def first(self):
3558 def first(self):
3559 if self._ascending:
3559 if self._ascending:
3560 it = self.fastasc
3560 it = self.fastasc
3561 else:
3561 else:
3562 it = self.fastdesc
3562 it = self.fastdesc
3563 for x in it():
3563 for x in it():
3564 return x
3564 return x
3565 return None
3565 return None
3566
3566
3567 def last(self):
3567 def last(self):
3568 if self._ascending:
3568 if self._ascending:
3569 it = self.fastdesc
3569 it = self.fastdesc
3570 else:
3570 else:
3571 it = self.fastasc
3571 it = self.fastasc
3572 for x in it():
3572 for x in it():
3573 return x
3573 return x
3574 return None
3574 return None
3575
3575
3576 def __repr__(self):
3576 def __repr__(self):
3577 d = {False: '-', True: '+'}[self._ascending]
3577 d = {False: '-', True: '+'}[self._ascending]
3578 return '<%s%s %d:%d>' % (type(self).__name__, d,
3578 return '<%s%s %d:%d>' % (type(self).__name__, d,
3579 self._start, self._end - 1)
3579 self._start, self._end - 1)
3580
3580
3581 class fullreposet(spanset):
3581 class fullreposet(spanset):
3582 """a set containing all revisions in the repo
3582 """a set containing all revisions in the repo
3583
3583
3584 This class exists to host special optimization and magic to handle virtual
3584 This class exists to host special optimization and magic to handle virtual
3585 revisions such as "null".
3585 revisions such as "null".
3586 """
3586 """
3587
3587
3588 def __init__(self, repo):
3588 def __init__(self, repo):
3589 super(fullreposet, self).__init__(repo)
3589 super(fullreposet, self).__init__(repo)
3590
3590
3591 def __and__(self, other):
3591 def __and__(self, other):
3592 """As self contains the whole repo, all of the other set should also be
3592 """As self contains the whole repo, all of the other set should also be
3593 in self. Therefore `self & other = other`.
3593 in self. Therefore `self & other = other`.
3594
3594
3595 This boldly assumes the other contains valid revs only.
3595 This boldly assumes the other contains valid revs only.
3596 """
3596 """
3597 # other not a smartset, make is so
3597 # other not a smartset, make is so
3598 if not util.safehasattr(other, 'isascending'):
3598 if not util.safehasattr(other, 'isascending'):
3599 # filter out hidden revision
3599 # filter out hidden revision
3600 # (this boldly assumes all smartset are pure)
3600 # (this boldly assumes all smartset are pure)
3601 #
3601 #
3602 # `other` was used with "&", let's assume this is a set like
3602 # `other` was used with "&", let's assume this is a set like
3603 # object.
3603 # object.
3604 other = baseset(other - self._hiddenrevs)
3604 other = baseset(other - self._hiddenrevs)
3605
3605
3606 # XXX As fullreposet is also used as bootstrap, this is wrong.
3606 # XXX As fullreposet is also used as bootstrap, this is wrong.
3607 #
3607 #
3608 # With a giveme312() revset returning [3,1,2], this makes
3608 # With a giveme312() revset returning [3,1,2], this makes
3609 # 'hg log -r "giveme312()"' -> 1, 2, 3 (wrong)
3609 # 'hg log -r "giveme312()"' -> 1, 2, 3 (wrong)
3610 # We cannot just drop it because other usage still need to sort it:
3610 # We cannot just drop it because other usage still need to sort it:
3611 # 'hg log -r "all() and giveme312()"' -> 1, 2, 3 (right)
3611 # 'hg log -r "all() and giveme312()"' -> 1, 2, 3 (right)
3612 #
3612 #
3613 # There is also some faulty revset implementations that rely on it
3613 # There is also some faulty revset implementations that rely on it
3614 # (eg: children as of its state in e8075329c5fb)
3614 # (eg: children as of its state in e8075329c5fb)
3615 #
3615 #
3616 # When we fix the two points above we can move this into the if clause
3616 # When we fix the two points above we can move this into the if clause
3617 other.sort(reverse=self.isdescending())
3617 other.sort(reverse=self.isdescending())
3618 return other
3618 return other
3619
3619
3620 def prettyformatset(revs):
3620 def prettyformatset(revs):
3621 lines = []
3621 lines = []
3622 rs = repr(revs)
3622 rs = repr(revs)
3623 p = 0
3623 p = 0
3624 while p < len(rs):
3624 while p < len(rs):
3625 q = rs.find('<', p + 1)
3625 q = rs.find('<', p + 1)
3626 if q < 0:
3626 if q < 0:
3627 q = len(rs)
3627 q = len(rs)
3628 l = rs.count('<', 0, p) - rs.count('>', 0, p)
3628 l = rs.count('<', 0, p) - rs.count('>', 0, p)
3629 assert l >= 0
3629 assert l >= 0
3630 lines.append((l, rs[p:q].rstrip()))
3630 lines.append((l, rs[p:q].rstrip()))
3631 p = q
3631 p = q
3632 return '\n'.join(' ' * l + s for l, s in lines)
3632 return '\n'.join(' ' * l + s for l, s in lines)
3633
3633
3634 # tell hggettext to extract docstrings from these functions:
3634 # tell hggettext to extract docstrings from these functions:
3635 i18nfunctions = symbols.values()
3635 i18nfunctions = symbols.values()
@@ -1,864 +1,862 b''
1 # templater.py - template expansion for output
1 # templater.py - template expansion for output
2 #
2 #
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from i18n import _
8 from i18n import _
9 import os, re
9 import os, re
10 import util, config, templatefilters, templatekw, parser, error
10 import util, config, templatefilters, templatekw, parser, error
11 import revset as revsetmod
11 import revset as revsetmod
12 import types
12 import types
13 import minirst
13 import minirst
14
14
15 # template parsing
15 # template parsing
16
16
17 elements = {
17 elements = {
18 "(": (20, ("group", 1, ")"), ("func", 1, ")")),
18 "(": (20, ("group", 1, ")"), ("func", 1, ")")),
19 ",": (2, None, ("list", 2)),
19 ",": (2, None, ("list", 2)),
20 "|": (5, None, ("|", 5)),
20 "|": (5, None, ("|", 5)),
21 "%": (6, None, ("%", 6)),
21 "%": (6, None, ("%", 6)),
22 ")": (0, None, None),
22 ")": (0, None, None),
23 "integer": (0, ("integer",), None),
23 "integer": (0, ("integer",), None),
24 "symbol": (0, ("symbol",), None),
24 "symbol": (0, ("symbol",), None),
25 "string": (0, ("template",), None),
25 "string": (0, ("template",), None),
26 "rawstring": (0, ("rawstring",), None),
26 "rawstring": (0, ("rawstring",), None),
27 "end": (0, None, None),
27 "end": (0, None, None),
28 }
28 }
29
29
30 def tokenizer(data):
30 def tokenize(program, start, end):
31 program, start, end = data
32 pos = start
31 pos = start
33 while pos < end:
32 while pos < end:
34 c = program[pos]
33 c = program[pos]
35 if c.isspace(): # skip inter-token whitespace
34 if c.isspace(): # skip inter-token whitespace
36 pass
35 pass
37 elif c in "(,)%|": # handle simple operators
36 elif c in "(,)%|": # handle simple operators
38 yield (c, None, pos)
37 yield (c, None, pos)
39 elif (c in '"\'' or c == 'r' and
38 elif (c in '"\'' or c == 'r' and
40 program[pos:pos + 2] in ("r'", 'r"')): # handle quoted strings
39 program[pos:pos + 2] in ("r'", 'r"')): # handle quoted strings
41 if c == 'r':
40 if c == 'r':
42 pos += 1
41 pos += 1
43 c = program[pos]
42 c = program[pos]
44 decode = False
43 decode = False
45 else:
44 else:
46 decode = True
45 decode = True
47 pos += 1
46 pos += 1
48 s = pos
47 s = pos
49 while pos < end: # find closing quote
48 while pos < end: # find closing quote
50 d = program[pos]
49 d = program[pos]
51 if decode and d == '\\': # skip over escaped characters
50 if decode and d == '\\': # skip over escaped characters
52 pos += 2
51 pos += 2
53 continue
52 continue
54 if d == c:
53 if d == c:
55 if not decode:
54 if not decode:
56 yield ('rawstring', program[s:pos], s)
55 yield ('rawstring', program[s:pos], s)
57 break
56 break
58 yield ('string', program[s:pos], s)
57 yield ('string', program[s:pos], s)
59 break
58 break
60 pos += 1
59 pos += 1
61 else:
60 else:
62 raise error.ParseError(_("unterminated string"), s)
61 raise error.ParseError(_("unterminated string"), s)
63 elif c.isdigit() or c == '-':
62 elif c.isdigit() or c == '-':
64 s = pos
63 s = pos
65 if c == '-': # simply take negate operator as part of integer
64 if c == '-': # simply take negate operator as part of integer
66 pos += 1
65 pos += 1
67 if pos >= end or not program[pos].isdigit():
66 if pos >= end or not program[pos].isdigit():
68 raise error.ParseError(_("integer literal without digits"), s)
67 raise error.ParseError(_("integer literal without digits"), s)
69 pos += 1
68 pos += 1
70 while pos < end:
69 while pos < end:
71 d = program[pos]
70 d = program[pos]
72 if not d.isdigit():
71 if not d.isdigit():
73 break
72 break
74 pos += 1
73 pos += 1
75 yield ('integer', program[s:pos], s)
74 yield ('integer', program[s:pos], s)
76 pos -= 1
75 pos -= 1
77 elif c.isalnum() or c in '_':
76 elif c.isalnum() or c in '_':
78 s = pos
77 s = pos
79 pos += 1
78 pos += 1
80 while pos < end: # find end of symbol
79 while pos < end: # find end of symbol
81 d = program[pos]
80 d = program[pos]
82 if not (d.isalnum() or d == "_"):
81 if not (d.isalnum() or d == "_"):
83 break
82 break
84 pos += 1
83 pos += 1
85 sym = program[s:pos]
84 sym = program[s:pos]
86 yield ('symbol', sym, s)
85 yield ('symbol', sym, s)
87 pos -= 1
86 pos -= 1
88 elif c == '}':
87 elif c == '}':
89 pos += 1
88 pos += 1
90 break
89 break
91 else:
90 else:
92 raise error.ParseError(_("syntax error"), pos)
91 raise error.ParseError(_("syntax error"), pos)
93 pos += 1
92 pos += 1
94 yield ('end', None, pos)
93 yield ('end', None, pos)
95
94
96 def compiletemplate(tmpl, context):
95 def compiletemplate(tmpl, context):
97 parsed = []
96 parsed = []
98 pos, stop = 0, len(tmpl)
97 pos, stop = 0, len(tmpl)
99 p = parser.parser(tokenizer, elements)
98 p = parser.parser(elements)
100 while pos < stop:
99 while pos < stop:
101 n = tmpl.find('{', pos)
100 n = tmpl.find('{', pos)
102 if n < 0:
101 if n < 0:
103 parsed.append(('string', tmpl[pos:]))
102 parsed.append(('string', tmpl[pos:]))
104 break
103 break
105 bs = (n - pos) - len(tmpl[pos:n].rstrip('\\'))
104 bs = (n - pos) - len(tmpl[pos:n].rstrip('\\'))
106 if bs % 2 == 1:
105 if bs % 2 == 1:
107 # escaped (e.g. '\{', '\\\{', but not '\\{')
106 # escaped (e.g. '\{', '\\\{', but not '\\{')
108 parsed.append(('string', (tmpl[pos:n - 1] + "{")))
107 parsed.append(('string', (tmpl[pos:n - 1] + "{")))
109 pos = n + 1
108 pos = n + 1
110 continue
109 continue
111 if n > pos:
110 if n > pos:
112 parsed.append(('string', tmpl[pos:n]))
111 parsed.append(('string', tmpl[pos:n]))
113
112
114 pd = [tmpl, n + 1, stop]
113 parseres, pos = p.parse(tokenize(tmpl, n + 1, stop))
115 parseres, pos = p.parse(pd)
116 parsed.append(parseres)
114 parsed.append(parseres)
117
115
118 return [compileexp(e, context, methods) for e in parsed]
116 return [compileexp(e, context, methods) for e in parsed]
119
117
120 def compileexp(exp, context, curmethods):
118 def compileexp(exp, context, curmethods):
121 t = exp[0]
119 t = exp[0]
122 if t in curmethods:
120 if t in curmethods:
123 return curmethods[t](exp, context)
121 return curmethods[t](exp, context)
124 raise error.ParseError(_("unknown method '%s'") % t)
122 raise error.ParseError(_("unknown method '%s'") % t)
125
123
126 # template evaluation
124 # template evaluation
127
125
128 def getsymbol(exp):
126 def getsymbol(exp):
129 if exp[0] == 'symbol':
127 if exp[0] == 'symbol':
130 return exp[1]
128 return exp[1]
131 raise error.ParseError(_("expected a symbol, got '%s'") % exp[0])
129 raise error.ParseError(_("expected a symbol, got '%s'") % exp[0])
132
130
133 def getlist(x):
131 def getlist(x):
134 if not x:
132 if not x:
135 return []
133 return []
136 if x[0] == 'list':
134 if x[0] == 'list':
137 return getlist(x[1]) + [x[2]]
135 return getlist(x[1]) + [x[2]]
138 return [x]
136 return [x]
139
137
140 def getfilter(exp, context):
138 def getfilter(exp, context):
141 f = getsymbol(exp)
139 f = getsymbol(exp)
142 if f not in context._filters:
140 if f not in context._filters:
143 raise error.ParseError(_("unknown function '%s'") % f)
141 raise error.ParseError(_("unknown function '%s'") % f)
144 return context._filters[f]
142 return context._filters[f]
145
143
146 def gettemplate(exp, context):
144 def gettemplate(exp, context):
147 if exp[0] == 'template':
145 if exp[0] == 'template':
148 return compiletemplate(exp[1], context)
146 return compiletemplate(exp[1], context)
149 if exp[0] == 'symbol':
147 if exp[0] == 'symbol':
150 # unlike runsymbol(), here 'symbol' is always taken as template name
148 # unlike runsymbol(), here 'symbol' is always taken as template name
151 # even if it exists in mapping. this allows us to override mapping
149 # even if it exists in mapping. this allows us to override mapping
152 # by web templates, e.g. 'changelogtag' is redefined in map file.
150 # by web templates, e.g. 'changelogtag' is redefined in map file.
153 return context._load(exp[1])
151 return context._load(exp[1])
154 raise error.ParseError(_("expected template specifier"))
152 raise error.ParseError(_("expected template specifier"))
155
153
156 def runinteger(context, mapping, data):
154 def runinteger(context, mapping, data):
157 return int(data)
155 return int(data)
158
156
159 def runstring(context, mapping, data):
157 def runstring(context, mapping, data):
160 return data.decode("string-escape")
158 return data.decode("string-escape")
161
159
162 def runrawstring(context, mapping, data):
160 def runrawstring(context, mapping, data):
163 return data
161 return data
164
162
165 def runsymbol(context, mapping, key):
163 def runsymbol(context, mapping, key):
166 v = mapping.get(key)
164 v = mapping.get(key)
167 if v is None:
165 if v is None:
168 v = context._defaults.get(key)
166 v = context._defaults.get(key)
169 if v is None:
167 if v is None:
170 try:
168 try:
171 v = context.process(key, mapping)
169 v = context.process(key, mapping)
172 except TemplateNotFound:
170 except TemplateNotFound:
173 v = ''
171 v = ''
174 if callable(v):
172 if callable(v):
175 return v(**mapping)
173 return v(**mapping)
176 if isinstance(v, types.GeneratorType):
174 if isinstance(v, types.GeneratorType):
177 v = list(v)
175 v = list(v)
178 return v
176 return v
179
177
180 def buildtemplate(exp, context):
178 def buildtemplate(exp, context):
181 ctmpl = compiletemplate(exp[1], context)
179 ctmpl = compiletemplate(exp[1], context)
182 if len(ctmpl) == 1:
180 if len(ctmpl) == 1:
183 return ctmpl[0] # fast path for string with no template fragment
181 return ctmpl[0] # fast path for string with no template fragment
184 return (runtemplate, ctmpl)
182 return (runtemplate, ctmpl)
185
183
186 def runtemplate(context, mapping, template):
184 def runtemplate(context, mapping, template):
187 for func, data in template:
185 for func, data in template:
188 yield func(context, mapping, data)
186 yield func(context, mapping, data)
189
187
190 def buildfilter(exp, context):
188 def buildfilter(exp, context):
191 func, data = compileexp(exp[1], context, methods)
189 func, data = compileexp(exp[1], context, methods)
192 filt = getfilter(exp[2], context)
190 filt = getfilter(exp[2], context)
193 return (runfilter, (func, data, filt))
191 return (runfilter, (func, data, filt))
194
192
195 def runfilter(context, mapping, data):
193 def runfilter(context, mapping, data):
196 func, data, filt = data
194 func, data, filt = data
197 # func() may return string, generator of strings or arbitrary object such
195 # func() may return string, generator of strings or arbitrary object such
198 # as date tuple, but filter does not want generator.
196 # as date tuple, but filter does not want generator.
199 thing = func(context, mapping, data)
197 thing = func(context, mapping, data)
200 if isinstance(thing, types.GeneratorType):
198 if isinstance(thing, types.GeneratorType):
201 thing = stringify(thing)
199 thing = stringify(thing)
202 try:
200 try:
203 return filt(thing)
201 return filt(thing)
204 except (ValueError, AttributeError, TypeError):
202 except (ValueError, AttributeError, TypeError):
205 if isinstance(data, tuple):
203 if isinstance(data, tuple):
206 dt = data[1]
204 dt = data[1]
207 else:
205 else:
208 dt = data
206 dt = data
209 raise util.Abort(_("template filter '%s' is not compatible with "
207 raise util.Abort(_("template filter '%s' is not compatible with "
210 "keyword '%s'") % (filt.func_name, dt))
208 "keyword '%s'") % (filt.func_name, dt))
211
209
212 def buildmap(exp, context):
210 def buildmap(exp, context):
213 func, data = compileexp(exp[1], context, methods)
211 func, data = compileexp(exp[1], context, methods)
214 ctmpl = gettemplate(exp[2], context)
212 ctmpl = gettemplate(exp[2], context)
215 return (runmap, (func, data, ctmpl))
213 return (runmap, (func, data, ctmpl))
216
214
217 def runmap(context, mapping, data):
215 def runmap(context, mapping, data):
218 func, data, ctmpl = data
216 func, data, ctmpl = data
219 d = func(context, mapping, data)
217 d = func(context, mapping, data)
220 if callable(d):
218 if callable(d):
221 d = d()
219 d = d()
222
220
223 lm = mapping.copy()
221 lm = mapping.copy()
224
222
225 for i in d:
223 for i in d:
226 if isinstance(i, dict):
224 if isinstance(i, dict):
227 lm.update(i)
225 lm.update(i)
228 lm['originalnode'] = mapping.get('node')
226 lm['originalnode'] = mapping.get('node')
229 yield runtemplate(context, lm, ctmpl)
227 yield runtemplate(context, lm, ctmpl)
230 else:
228 else:
231 # v is not an iterable of dicts, this happen when 'key'
229 # v is not an iterable of dicts, this happen when 'key'
232 # has been fully expanded already and format is useless.
230 # has been fully expanded already and format is useless.
233 # If so, return the expanded value.
231 # If so, return the expanded value.
234 yield i
232 yield i
235
233
236 def buildfunc(exp, context):
234 def buildfunc(exp, context):
237 n = getsymbol(exp[1])
235 n = getsymbol(exp[1])
238 args = [compileexp(x, context, exprmethods) for x in getlist(exp[2])]
236 args = [compileexp(x, context, exprmethods) for x in getlist(exp[2])]
239 if n in funcs:
237 if n in funcs:
240 f = funcs[n]
238 f = funcs[n]
241 return (f, args)
239 return (f, args)
242 if n in context._filters:
240 if n in context._filters:
243 if len(args) != 1:
241 if len(args) != 1:
244 raise error.ParseError(_("filter %s expects one argument") % n)
242 raise error.ParseError(_("filter %s expects one argument") % n)
245 f = context._filters[n]
243 f = context._filters[n]
246 return (runfilter, (args[0][0], args[0][1], f))
244 return (runfilter, (args[0][0], args[0][1], f))
247 raise error.ParseError(_("unknown function '%s'") % n)
245 raise error.ParseError(_("unknown function '%s'") % n)
248
246
249 def date(context, mapping, args):
247 def date(context, mapping, args):
250 """:date(date[, fmt]): Format a date. See :hg:`help dates` for formatting
248 """:date(date[, fmt]): Format a date. See :hg:`help dates` for formatting
251 strings."""
249 strings."""
252 if not (1 <= len(args) <= 2):
250 if not (1 <= len(args) <= 2):
253 # i18n: "date" is a keyword
251 # i18n: "date" is a keyword
254 raise error.ParseError(_("date expects one or two arguments"))
252 raise error.ParseError(_("date expects one or two arguments"))
255
253
256 date = args[0][0](context, mapping, args[0][1])
254 date = args[0][0](context, mapping, args[0][1])
257 fmt = None
255 fmt = None
258 if len(args) == 2:
256 if len(args) == 2:
259 fmt = stringify(args[1][0](context, mapping, args[1][1]))
257 fmt = stringify(args[1][0](context, mapping, args[1][1]))
260 try:
258 try:
261 if fmt is None:
259 if fmt is None:
262 return util.datestr(date)
260 return util.datestr(date)
263 else:
261 else:
264 return util.datestr(date, fmt)
262 return util.datestr(date, fmt)
265 except (TypeError, ValueError):
263 except (TypeError, ValueError):
266 # i18n: "date" is a keyword
264 # i18n: "date" is a keyword
267 raise error.ParseError(_("date expects a date information"))
265 raise error.ParseError(_("date expects a date information"))
268
266
269 def diff(context, mapping, args):
267 def diff(context, mapping, args):
270 """:diff([includepattern [, excludepattern]]): Show a diff, optionally
268 """:diff([includepattern [, excludepattern]]): Show a diff, optionally
271 specifying files to include or exclude."""
269 specifying files to include or exclude."""
272 if len(args) > 2:
270 if len(args) > 2:
273 # i18n: "diff" is a keyword
271 # i18n: "diff" is a keyword
274 raise error.ParseError(_("diff expects one, two or no arguments"))
272 raise error.ParseError(_("diff expects one, two or no arguments"))
275
273
276 def getpatterns(i):
274 def getpatterns(i):
277 if i < len(args):
275 if i < len(args):
278 s = stringify(args[i][0](context, mapping, args[i][1])).strip()
276 s = stringify(args[i][0](context, mapping, args[i][1])).strip()
279 if s:
277 if s:
280 return [s]
278 return [s]
281 return []
279 return []
282
280
283 ctx = mapping['ctx']
281 ctx = mapping['ctx']
284 chunks = ctx.diff(match=ctx.match([], getpatterns(0), getpatterns(1)))
282 chunks = ctx.diff(match=ctx.match([], getpatterns(0), getpatterns(1)))
285
283
286 return ''.join(chunks)
284 return ''.join(chunks)
287
285
288 def fill(context, mapping, args):
286 def fill(context, mapping, args):
289 """:fill(text[, width[, initialident[, hangindent]]]): Fill many
287 """:fill(text[, width[, initialident[, hangindent]]]): Fill many
290 paragraphs with optional indentation. See the "fill" filter."""
288 paragraphs with optional indentation. See the "fill" filter."""
291 if not (1 <= len(args) <= 4):
289 if not (1 <= len(args) <= 4):
292 # i18n: "fill" is a keyword
290 # i18n: "fill" is a keyword
293 raise error.ParseError(_("fill expects one to four arguments"))
291 raise error.ParseError(_("fill expects one to four arguments"))
294
292
295 text = stringify(args[0][0](context, mapping, args[0][1]))
293 text = stringify(args[0][0](context, mapping, args[0][1]))
296 width = 76
294 width = 76
297 initindent = ''
295 initindent = ''
298 hangindent = ''
296 hangindent = ''
299 if 2 <= len(args) <= 4:
297 if 2 <= len(args) <= 4:
300 try:
298 try:
301 width = int(stringify(args[1][0](context, mapping, args[1][1])))
299 width = int(stringify(args[1][0](context, mapping, args[1][1])))
302 except ValueError:
300 except ValueError:
303 # i18n: "fill" is a keyword
301 # i18n: "fill" is a keyword
304 raise error.ParseError(_("fill expects an integer width"))
302 raise error.ParseError(_("fill expects an integer width"))
305 try:
303 try:
306 initindent = stringify(args[2][0](context, mapping, args[2][1]))
304 initindent = stringify(args[2][0](context, mapping, args[2][1]))
307 hangindent = stringify(args[3][0](context, mapping, args[3][1]))
305 hangindent = stringify(args[3][0](context, mapping, args[3][1]))
308 except IndexError:
306 except IndexError:
309 pass
307 pass
310
308
311 return templatefilters.fill(text, width, initindent, hangindent)
309 return templatefilters.fill(text, width, initindent, hangindent)
312
310
313 def pad(context, mapping, args):
311 def pad(context, mapping, args):
314 """:pad(text, width[, fillchar=' '[, right=False]]): Pad text with a
312 """:pad(text, width[, fillchar=' '[, right=False]]): Pad text with a
315 fill character."""
313 fill character."""
316 if not (2 <= len(args) <= 4):
314 if not (2 <= len(args) <= 4):
317 # i18n: "pad" is a keyword
315 # i18n: "pad" is a keyword
318 raise error.ParseError(_("pad() expects two to four arguments"))
316 raise error.ParseError(_("pad() expects two to four arguments"))
319
317
320 width = int(args[1][1])
318 width = int(args[1][1])
321
319
322 text = stringify(args[0][0](context, mapping, args[0][1]))
320 text = stringify(args[0][0](context, mapping, args[0][1]))
323
321
324 right = False
322 right = False
325 fillchar = ' '
323 fillchar = ' '
326 if len(args) > 2:
324 if len(args) > 2:
327 fillchar = stringify(args[2][0](context, mapping, args[2][1]))
325 fillchar = stringify(args[2][0](context, mapping, args[2][1]))
328 if len(args) > 3:
326 if len(args) > 3:
329 right = util.parsebool(args[3][1])
327 right = util.parsebool(args[3][1])
330
328
331 if right:
329 if right:
332 return text.rjust(width, fillchar)
330 return text.rjust(width, fillchar)
333 else:
331 else:
334 return text.ljust(width, fillchar)
332 return text.ljust(width, fillchar)
335
333
336 def indent(context, mapping, args):
334 def indent(context, mapping, args):
337 """:indent(text, indentchars[, firstline]): Indents all non-empty lines
335 """:indent(text, indentchars[, firstline]): Indents all non-empty lines
338 with the characters given in the indentchars string. An optional
336 with the characters given in the indentchars string. An optional
339 third parameter will override the indent for the first line only
337 third parameter will override the indent for the first line only
340 if present."""
338 if present."""
341 if not (2 <= len(args) <= 3):
339 if not (2 <= len(args) <= 3):
342 # i18n: "indent" is a keyword
340 # i18n: "indent" is a keyword
343 raise error.ParseError(_("indent() expects two or three arguments"))
341 raise error.ParseError(_("indent() expects two or three arguments"))
344
342
345 text = stringify(args[0][0](context, mapping, args[0][1]))
343 text = stringify(args[0][0](context, mapping, args[0][1]))
346 indent = stringify(args[1][0](context, mapping, args[1][1]))
344 indent = stringify(args[1][0](context, mapping, args[1][1]))
347
345
348 if len(args) == 3:
346 if len(args) == 3:
349 firstline = stringify(args[2][0](context, mapping, args[2][1]))
347 firstline = stringify(args[2][0](context, mapping, args[2][1]))
350 else:
348 else:
351 firstline = indent
349 firstline = indent
352
350
353 # the indent function doesn't indent the first line, so we do it here
351 # the indent function doesn't indent the first line, so we do it here
354 return templatefilters.indent(firstline + text, indent)
352 return templatefilters.indent(firstline + text, indent)
355
353
356 def get(context, mapping, args):
354 def get(context, mapping, args):
357 """:get(dict, key): Get an attribute/key from an object. Some keywords
355 """:get(dict, key): Get an attribute/key from an object. Some keywords
358 are complex types. This function allows you to obtain the value of an
356 are complex types. This function allows you to obtain the value of an
359 attribute on these type."""
357 attribute on these type."""
360 if len(args) != 2:
358 if len(args) != 2:
361 # i18n: "get" is a keyword
359 # i18n: "get" is a keyword
362 raise error.ParseError(_("get() expects two arguments"))
360 raise error.ParseError(_("get() expects two arguments"))
363
361
364 dictarg = args[0][0](context, mapping, args[0][1])
362 dictarg = args[0][0](context, mapping, args[0][1])
365 if not util.safehasattr(dictarg, 'get'):
363 if not util.safehasattr(dictarg, 'get'):
366 # i18n: "get" is a keyword
364 # i18n: "get" is a keyword
367 raise error.ParseError(_("get() expects a dict as first argument"))
365 raise error.ParseError(_("get() expects a dict as first argument"))
368
366
369 key = args[1][0](context, mapping, args[1][1])
367 key = args[1][0](context, mapping, args[1][1])
370 yield dictarg.get(key)
368 yield dictarg.get(key)
371
369
372 def if_(context, mapping, args):
370 def if_(context, mapping, args):
373 """:if(expr, then[, else]): Conditionally execute based on the result of
371 """:if(expr, then[, else]): Conditionally execute based on the result of
374 an expression."""
372 an expression."""
375 if not (2 <= len(args) <= 3):
373 if not (2 <= len(args) <= 3):
376 # i18n: "if" is a keyword
374 # i18n: "if" is a keyword
377 raise error.ParseError(_("if expects two or three arguments"))
375 raise error.ParseError(_("if expects two or three arguments"))
378
376
379 test = stringify(args[0][0](context, mapping, args[0][1]))
377 test = stringify(args[0][0](context, mapping, args[0][1]))
380 if test:
378 if test:
381 yield args[1][0](context, mapping, args[1][1])
379 yield args[1][0](context, mapping, args[1][1])
382 elif len(args) == 3:
380 elif len(args) == 3:
383 yield args[2][0](context, mapping, args[2][1])
381 yield args[2][0](context, mapping, args[2][1])
384
382
385 def ifcontains(context, mapping, args):
383 def ifcontains(context, mapping, args):
386 """:ifcontains(search, thing, then[, else]): Conditionally execute based
384 """:ifcontains(search, thing, then[, else]): Conditionally execute based
387 on whether the item "search" is in "thing"."""
385 on whether the item "search" is in "thing"."""
388 if not (3 <= len(args) <= 4):
386 if not (3 <= len(args) <= 4):
389 # i18n: "ifcontains" is a keyword
387 # i18n: "ifcontains" is a keyword
390 raise error.ParseError(_("ifcontains expects three or four arguments"))
388 raise error.ParseError(_("ifcontains expects three or four arguments"))
391
389
392 item = stringify(args[0][0](context, mapping, args[0][1]))
390 item = stringify(args[0][0](context, mapping, args[0][1]))
393 items = args[1][0](context, mapping, args[1][1])
391 items = args[1][0](context, mapping, args[1][1])
394
392
395 if item in items:
393 if item in items:
396 yield args[2][0](context, mapping, args[2][1])
394 yield args[2][0](context, mapping, args[2][1])
397 elif len(args) == 4:
395 elif len(args) == 4:
398 yield args[3][0](context, mapping, args[3][1])
396 yield args[3][0](context, mapping, args[3][1])
399
397
400 def ifeq(context, mapping, args):
398 def ifeq(context, mapping, args):
401 """:ifeq(expr1, expr2, then[, else]): Conditionally execute based on
399 """:ifeq(expr1, expr2, then[, else]): Conditionally execute based on
402 whether 2 items are equivalent."""
400 whether 2 items are equivalent."""
403 if not (3 <= len(args) <= 4):
401 if not (3 <= len(args) <= 4):
404 # i18n: "ifeq" is a keyword
402 # i18n: "ifeq" is a keyword
405 raise error.ParseError(_("ifeq expects three or four arguments"))
403 raise error.ParseError(_("ifeq expects three or four arguments"))
406
404
407 test = stringify(args[0][0](context, mapping, args[0][1]))
405 test = stringify(args[0][0](context, mapping, args[0][1]))
408 match = stringify(args[1][0](context, mapping, args[1][1]))
406 match = stringify(args[1][0](context, mapping, args[1][1]))
409 if test == match:
407 if test == match:
410 yield args[2][0](context, mapping, args[2][1])
408 yield args[2][0](context, mapping, args[2][1])
411 elif len(args) == 4:
409 elif len(args) == 4:
412 yield args[3][0](context, mapping, args[3][1])
410 yield args[3][0](context, mapping, args[3][1])
413
411
414 def join(context, mapping, args):
412 def join(context, mapping, args):
415 """:join(list, sep): Join items in a list with a delimiter."""
413 """:join(list, sep): Join items in a list with a delimiter."""
416 if not (1 <= len(args) <= 2):
414 if not (1 <= len(args) <= 2):
417 # i18n: "join" is a keyword
415 # i18n: "join" is a keyword
418 raise error.ParseError(_("join expects one or two arguments"))
416 raise error.ParseError(_("join expects one or two arguments"))
419
417
420 joinset = args[0][0](context, mapping, args[0][1])
418 joinset = args[0][0](context, mapping, args[0][1])
421 if callable(joinset):
419 if callable(joinset):
422 jf = joinset.joinfmt
420 jf = joinset.joinfmt
423 joinset = [jf(x) for x in joinset()]
421 joinset = [jf(x) for x in joinset()]
424
422
425 joiner = " "
423 joiner = " "
426 if len(args) > 1:
424 if len(args) > 1:
427 joiner = stringify(args[1][0](context, mapping, args[1][1]))
425 joiner = stringify(args[1][0](context, mapping, args[1][1]))
428
426
429 first = True
427 first = True
430 for x in joinset:
428 for x in joinset:
431 if first:
429 if first:
432 first = False
430 first = False
433 else:
431 else:
434 yield joiner
432 yield joiner
435 yield x
433 yield x
436
434
437 def label(context, mapping, args):
435 def label(context, mapping, args):
438 """:label(label, expr): Apply a label to generated content. Content with
436 """:label(label, expr): Apply a label to generated content. Content with
439 a label applied can result in additional post-processing, such as
437 a label applied can result in additional post-processing, such as
440 automatic colorization."""
438 automatic colorization."""
441 if len(args) != 2:
439 if len(args) != 2:
442 # i18n: "label" is a keyword
440 # i18n: "label" is a keyword
443 raise error.ParseError(_("label expects two arguments"))
441 raise error.ParseError(_("label expects two arguments"))
444
442
445 # ignore args[0] (the label string) since this is supposed to be a a no-op
443 # ignore args[0] (the label string) since this is supposed to be a a no-op
446 yield args[1][0](context, mapping, args[1][1])
444 yield args[1][0](context, mapping, args[1][1])
447
445
448 def revset(context, mapping, args):
446 def revset(context, mapping, args):
449 """:revset(query[, formatargs...]): Execute a revision set query. See
447 """:revset(query[, formatargs...]): Execute a revision set query. See
450 :hg:`help revset`."""
448 :hg:`help revset`."""
451 if not len(args) > 0:
449 if not len(args) > 0:
452 # i18n: "revset" is a keyword
450 # i18n: "revset" is a keyword
453 raise error.ParseError(_("revset expects one or more arguments"))
451 raise error.ParseError(_("revset expects one or more arguments"))
454
452
455 raw = args[0][1]
453 raw = args[0][1]
456 ctx = mapping['ctx']
454 ctx = mapping['ctx']
457 repo = ctx.repo()
455 repo = ctx.repo()
458
456
459 def query(expr):
457 def query(expr):
460 m = revsetmod.match(repo.ui, expr)
458 m = revsetmod.match(repo.ui, expr)
461 return m(repo)
459 return m(repo)
462
460
463 if len(args) > 1:
461 if len(args) > 1:
464 formatargs = list([a[0](context, mapping, a[1]) for a in args[1:]])
462 formatargs = list([a[0](context, mapping, a[1]) for a in args[1:]])
465 revs = query(revsetmod.formatspec(raw, *formatargs))
463 revs = query(revsetmod.formatspec(raw, *formatargs))
466 revs = list([str(r) for r in revs])
464 revs = list([str(r) for r in revs])
467 else:
465 else:
468 revsetcache = mapping['cache'].setdefault("revsetcache", {})
466 revsetcache = mapping['cache'].setdefault("revsetcache", {})
469 if raw in revsetcache:
467 if raw in revsetcache:
470 revs = revsetcache[raw]
468 revs = revsetcache[raw]
471 else:
469 else:
472 revs = query(raw)
470 revs = query(raw)
473 revs = list([str(r) for r in revs])
471 revs = list([str(r) for r in revs])
474 revsetcache[raw] = revs
472 revsetcache[raw] = revs
475
473
476 return templatekw.showlist("revision", revs, **mapping)
474 return templatekw.showlist("revision", revs, **mapping)
477
475
478 def rstdoc(context, mapping, args):
476 def rstdoc(context, mapping, args):
479 """:rstdoc(text, style): Format ReStructuredText."""
477 """:rstdoc(text, style): Format ReStructuredText."""
480 if len(args) != 2:
478 if len(args) != 2:
481 # i18n: "rstdoc" is a keyword
479 # i18n: "rstdoc" is a keyword
482 raise error.ParseError(_("rstdoc expects two arguments"))
480 raise error.ParseError(_("rstdoc expects two arguments"))
483
481
484 text = stringify(args[0][0](context, mapping, args[0][1]))
482 text = stringify(args[0][0](context, mapping, args[0][1]))
485 style = stringify(args[1][0](context, mapping, args[1][1]))
483 style = stringify(args[1][0](context, mapping, args[1][1]))
486
484
487 return minirst.format(text, style=style, keep=['verbose'])
485 return minirst.format(text, style=style, keep=['verbose'])
488
486
489 def shortest(context, mapping, args):
487 def shortest(context, mapping, args):
490 """:shortest(node, minlength=4): Obtain the shortest representation of
488 """:shortest(node, minlength=4): Obtain the shortest representation of
491 a node."""
489 a node."""
492 if not (1 <= len(args) <= 2):
490 if not (1 <= len(args) <= 2):
493 # i18n: "shortest" is a keyword
491 # i18n: "shortest" is a keyword
494 raise error.ParseError(_("shortest() expects one or two arguments"))
492 raise error.ParseError(_("shortest() expects one or two arguments"))
495
493
496 node = stringify(args[0][0](context, mapping, args[0][1]))
494 node = stringify(args[0][0](context, mapping, args[0][1]))
497
495
498 minlength = 4
496 minlength = 4
499 if len(args) > 1:
497 if len(args) > 1:
500 minlength = int(args[1][1])
498 minlength = int(args[1][1])
501
499
502 cl = mapping['ctx']._repo.changelog
500 cl = mapping['ctx']._repo.changelog
503 def isvalid(test):
501 def isvalid(test):
504 try:
502 try:
505 try:
503 try:
506 cl.index.partialmatch(test)
504 cl.index.partialmatch(test)
507 except AttributeError:
505 except AttributeError:
508 # Pure mercurial doesn't support partialmatch on the index.
506 # Pure mercurial doesn't support partialmatch on the index.
509 # Fallback to the slow way.
507 # Fallback to the slow way.
510 if cl._partialmatch(test) is None:
508 if cl._partialmatch(test) is None:
511 return False
509 return False
512
510
513 try:
511 try:
514 i = int(test)
512 i = int(test)
515 # if we are a pure int, then starting with zero will not be
513 # if we are a pure int, then starting with zero will not be
516 # confused as a rev; or, obviously, if the int is larger than
514 # confused as a rev; or, obviously, if the int is larger than
517 # the value of the tip rev
515 # the value of the tip rev
518 if test[0] == '0' or i > len(cl):
516 if test[0] == '0' or i > len(cl):
519 return True
517 return True
520 return False
518 return False
521 except ValueError:
519 except ValueError:
522 return True
520 return True
523 except error.RevlogError:
521 except error.RevlogError:
524 return False
522 return False
525
523
526 shortest = node
524 shortest = node
527 startlength = max(6, minlength)
525 startlength = max(6, minlength)
528 length = startlength
526 length = startlength
529 while True:
527 while True:
530 test = node[:length]
528 test = node[:length]
531 if isvalid(test):
529 if isvalid(test):
532 shortest = test
530 shortest = test
533 if length == minlength or length > startlength:
531 if length == minlength or length > startlength:
534 return shortest
532 return shortest
535 length -= 1
533 length -= 1
536 else:
534 else:
537 length += 1
535 length += 1
538 if len(shortest) <= length:
536 if len(shortest) <= length:
539 return shortest
537 return shortest
540
538
541 def strip(context, mapping, args):
539 def strip(context, mapping, args):
542 """:strip(text[, chars]): Strip characters from a string."""
540 """:strip(text[, chars]): Strip characters from a string."""
543 if not (1 <= len(args) <= 2):
541 if not (1 <= len(args) <= 2):
544 # i18n: "strip" is a keyword
542 # i18n: "strip" is a keyword
545 raise error.ParseError(_("strip expects one or two arguments"))
543 raise error.ParseError(_("strip expects one or two arguments"))
546
544
547 text = stringify(args[0][0](context, mapping, args[0][1]))
545 text = stringify(args[0][0](context, mapping, args[0][1]))
548 if len(args) == 2:
546 if len(args) == 2:
549 chars = stringify(args[1][0](context, mapping, args[1][1]))
547 chars = stringify(args[1][0](context, mapping, args[1][1]))
550 return text.strip(chars)
548 return text.strip(chars)
551 return text.strip()
549 return text.strip()
552
550
553 def sub(context, mapping, args):
551 def sub(context, mapping, args):
554 """:sub(pattern, replacement, expression): Perform text substitution
552 """:sub(pattern, replacement, expression): Perform text substitution
555 using regular expressions."""
553 using regular expressions."""
556 if len(args) != 3:
554 if len(args) != 3:
557 # i18n: "sub" is a keyword
555 # i18n: "sub" is a keyword
558 raise error.ParseError(_("sub expects three arguments"))
556 raise error.ParseError(_("sub expects three arguments"))
559
557
560 pat = stringify(args[0][0](context, mapping, args[0][1]))
558 pat = stringify(args[0][0](context, mapping, args[0][1]))
561 rpl = stringify(args[1][0](context, mapping, args[1][1]))
559 rpl = stringify(args[1][0](context, mapping, args[1][1]))
562 src = stringify(args[2][0](context, mapping, args[2][1]))
560 src = stringify(args[2][0](context, mapping, args[2][1]))
563 yield re.sub(pat, rpl, src)
561 yield re.sub(pat, rpl, src)
564
562
565 def startswith(context, mapping, args):
563 def startswith(context, mapping, args):
566 """:startswith(pattern, text): Returns the value from the "text" argument
564 """:startswith(pattern, text): Returns the value from the "text" argument
567 if it begins with the content from the "pattern" argument."""
565 if it begins with the content from the "pattern" argument."""
568 if len(args) != 2:
566 if len(args) != 2:
569 # i18n: "startswith" is a keyword
567 # i18n: "startswith" is a keyword
570 raise error.ParseError(_("startswith expects two arguments"))
568 raise error.ParseError(_("startswith expects two arguments"))
571
569
572 patn = stringify(args[0][0](context, mapping, args[0][1]))
570 patn = stringify(args[0][0](context, mapping, args[0][1]))
573 text = stringify(args[1][0](context, mapping, args[1][1]))
571 text = stringify(args[1][0](context, mapping, args[1][1]))
574 if text.startswith(patn):
572 if text.startswith(patn):
575 return text
573 return text
576 return ''
574 return ''
577
575
578
576
579 def word(context, mapping, args):
577 def word(context, mapping, args):
580 """:word(number, text[, separator]): Return the nth word from a string."""
578 """:word(number, text[, separator]): Return the nth word from a string."""
581 if not (2 <= len(args) <= 3):
579 if not (2 <= len(args) <= 3):
582 # i18n: "word" is a keyword
580 # i18n: "word" is a keyword
583 raise error.ParseError(_("word expects two or three arguments, got %d")
581 raise error.ParseError(_("word expects two or three arguments, got %d")
584 % len(args))
582 % len(args))
585
583
586 try:
584 try:
587 num = int(stringify(args[0][0](context, mapping, args[0][1])))
585 num = int(stringify(args[0][0](context, mapping, args[0][1])))
588 except ValueError:
586 except ValueError:
589 # i18n: "word" is a keyword
587 # i18n: "word" is a keyword
590 raise error.ParseError(_("word expects an integer index"))
588 raise error.ParseError(_("word expects an integer index"))
591 text = stringify(args[1][0](context, mapping, args[1][1]))
589 text = stringify(args[1][0](context, mapping, args[1][1]))
592 if len(args) == 3:
590 if len(args) == 3:
593 splitter = stringify(args[2][0](context, mapping, args[2][1]))
591 splitter = stringify(args[2][0](context, mapping, args[2][1]))
594 else:
592 else:
595 splitter = None
593 splitter = None
596
594
597 tokens = text.split(splitter)
595 tokens = text.split(splitter)
598 if num >= len(tokens):
596 if num >= len(tokens):
599 return ''
597 return ''
600 else:
598 else:
601 return tokens[num]
599 return tokens[num]
602
600
603 # methods to interpret function arguments or inner expressions (e.g. {_(x)})
601 # methods to interpret function arguments or inner expressions (e.g. {_(x)})
604 exprmethods = {
602 exprmethods = {
605 "integer": lambda e, c: (runinteger, e[1]),
603 "integer": lambda e, c: (runinteger, e[1]),
606 "string": lambda e, c: (runstring, e[1]),
604 "string": lambda e, c: (runstring, e[1]),
607 "rawstring": lambda e, c: (runrawstring, e[1]),
605 "rawstring": lambda e, c: (runrawstring, e[1]),
608 "symbol": lambda e, c: (runsymbol, e[1]),
606 "symbol": lambda e, c: (runsymbol, e[1]),
609 "template": buildtemplate,
607 "template": buildtemplate,
610 "group": lambda e, c: compileexp(e[1], c, exprmethods),
608 "group": lambda e, c: compileexp(e[1], c, exprmethods),
611 # ".": buildmember,
609 # ".": buildmember,
612 "|": buildfilter,
610 "|": buildfilter,
613 "%": buildmap,
611 "%": buildmap,
614 "func": buildfunc,
612 "func": buildfunc,
615 }
613 }
616
614
617 # methods to interpret top-level template (e.g. {x}, {x|_}, {x % "y"})
615 # methods to interpret top-level template (e.g. {x}, {x|_}, {x % "y"})
618 methods = exprmethods.copy()
616 methods = exprmethods.copy()
619 methods["integer"] = exprmethods["symbol"] # '{1}' as variable
617 methods["integer"] = exprmethods["symbol"] # '{1}' as variable
620
618
621 funcs = {
619 funcs = {
622 "date": date,
620 "date": date,
623 "diff": diff,
621 "diff": diff,
624 "fill": fill,
622 "fill": fill,
625 "get": get,
623 "get": get,
626 "if": if_,
624 "if": if_,
627 "ifcontains": ifcontains,
625 "ifcontains": ifcontains,
628 "ifeq": ifeq,
626 "ifeq": ifeq,
629 "indent": indent,
627 "indent": indent,
630 "join": join,
628 "join": join,
631 "label": label,
629 "label": label,
632 "pad": pad,
630 "pad": pad,
633 "revset": revset,
631 "revset": revset,
634 "rstdoc": rstdoc,
632 "rstdoc": rstdoc,
635 "shortest": shortest,
633 "shortest": shortest,
636 "startswith": startswith,
634 "startswith": startswith,
637 "strip": strip,
635 "strip": strip,
638 "sub": sub,
636 "sub": sub,
639 "word": word,
637 "word": word,
640 }
638 }
641
639
642 # template engine
640 # template engine
643
641
644 stringify = templatefilters.stringify
642 stringify = templatefilters.stringify
645
643
646 def _flatten(thing):
644 def _flatten(thing):
647 '''yield a single stream from a possibly nested set of iterators'''
645 '''yield a single stream from a possibly nested set of iterators'''
648 if isinstance(thing, str):
646 if isinstance(thing, str):
649 yield thing
647 yield thing
650 elif not util.safehasattr(thing, '__iter__'):
648 elif not util.safehasattr(thing, '__iter__'):
651 if thing is not None:
649 if thing is not None:
652 yield str(thing)
650 yield str(thing)
653 else:
651 else:
654 for i in thing:
652 for i in thing:
655 if isinstance(i, str):
653 if isinstance(i, str):
656 yield i
654 yield i
657 elif not util.safehasattr(i, '__iter__'):
655 elif not util.safehasattr(i, '__iter__'):
658 if i is not None:
656 if i is not None:
659 yield str(i)
657 yield str(i)
660 elif i is not None:
658 elif i is not None:
661 for j in _flatten(i):
659 for j in _flatten(i):
662 yield j
660 yield j
663
661
664 def unquotestring(s):
662 def unquotestring(s):
665 '''unwrap quotes'''
663 '''unwrap quotes'''
666 if len(s) < 2 or s[0] != s[-1]:
664 if len(s) < 2 or s[0] != s[-1]:
667 raise SyntaxError(_('unmatched quotes'))
665 raise SyntaxError(_('unmatched quotes'))
668 # de-backslash-ify only <\">. it is invalid syntax in non-string part of
666 # de-backslash-ify only <\">. it is invalid syntax in non-string part of
669 # template, but we are likely to escape <"> in quoted string and it was
667 # template, but we are likely to escape <"> in quoted string and it was
670 # accepted before, thanks to issue4290. <\\"> is unmodified because it
668 # accepted before, thanks to issue4290. <\\"> is unmodified because it
671 # is ambiguous and it was processed as such before 2.8.1.
669 # is ambiguous and it was processed as such before 2.8.1.
672 #
670 #
673 # template result
671 # template result
674 # --------- ------------------------
672 # --------- ------------------------
675 # {\"\"} parse error
673 # {\"\"} parse error
676 # "{""}" {""} -> <>
674 # "{""}" {""} -> <>
677 # "{\"\"}" {""} -> <>
675 # "{\"\"}" {""} -> <>
678 # {"\""} {"\""} -> <">
676 # {"\""} {"\""} -> <">
679 # '{"\""}' {"\""} -> <">
677 # '{"\""}' {"\""} -> <">
680 # "{"\""}" parse error (don't care)
678 # "{"\""}" parse error (don't care)
681 q = s[0]
679 q = s[0]
682 return s[1:-1].replace('\\\\' + q, '\\\\\\' + q).replace('\\' + q, q)
680 return s[1:-1].replace('\\\\' + q, '\\\\\\' + q).replace('\\' + q, q)
683
681
684 class engine(object):
682 class engine(object):
685 '''template expansion engine.
683 '''template expansion engine.
686
684
687 template expansion works like this. a map file contains key=value
685 template expansion works like this. a map file contains key=value
688 pairs. if value is quoted, it is treated as string. otherwise, it
686 pairs. if value is quoted, it is treated as string. otherwise, it
689 is treated as name of template file.
687 is treated as name of template file.
690
688
691 templater is asked to expand a key in map. it looks up key, and
689 templater is asked to expand a key in map. it looks up key, and
692 looks for strings like this: {foo}. it expands {foo} by looking up
690 looks for strings like this: {foo}. it expands {foo} by looking up
693 foo in map, and substituting it. expansion is recursive: it stops
691 foo in map, and substituting it. expansion is recursive: it stops
694 when there is no more {foo} to replace.
692 when there is no more {foo} to replace.
695
693
696 expansion also allows formatting and filtering.
694 expansion also allows formatting and filtering.
697
695
698 format uses key to expand each item in list. syntax is
696 format uses key to expand each item in list. syntax is
699 {key%format}.
697 {key%format}.
700
698
701 filter uses function to transform value. syntax is
699 filter uses function to transform value. syntax is
702 {key|filter1|filter2|...}.'''
700 {key|filter1|filter2|...}.'''
703
701
704 def __init__(self, loader, filters={}, defaults={}):
702 def __init__(self, loader, filters={}, defaults={}):
705 self._loader = loader
703 self._loader = loader
706 self._filters = filters
704 self._filters = filters
707 self._defaults = defaults
705 self._defaults = defaults
708 self._cache = {}
706 self._cache = {}
709
707
710 def _load(self, t):
708 def _load(self, t):
711 '''load, parse, and cache a template'''
709 '''load, parse, and cache a template'''
712 if t not in self._cache:
710 if t not in self._cache:
713 self._cache[t] = compiletemplate(self._loader(t), self)
711 self._cache[t] = compiletemplate(self._loader(t), self)
714 return self._cache[t]
712 return self._cache[t]
715
713
716 def process(self, t, mapping):
714 def process(self, t, mapping):
717 '''Perform expansion. t is name of map element to expand.
715 '''Perform expansion. t is name of map element to expand.
718 mapping contains added elements for use during expansion. Is a
716 mapping contains added elements for use during expansion. Is a
719 generator.'''
717 generator.'''
720 return _flatten(runtemplate(self, mapping, self._load(t)))
718 return _flatten(runtemplate(self, mapping, self._load(t)))
721
719
722 engines = {'default': engine}
720 engines = {'default': engine}
723
721
724 def stylelist():
722 def stylelist():
725 paths = templatepaths()
723 paths = templatepaths()
726 if not paths:
724 if not paths:
727 return _('no templates found, try `hg debuginstall` for more info')
725 return _('no templates found, try `hg debuginstall` for more info')
728 dirlist = os.listdir(paths[0])
726 dirlist = os.listdir(paths[0])
729 stylelist = []
727 stylelist = []
730 for file in dirlist:
728 for file in dirlist:
731 split = file.split(".")
729 split = file.split(".")
732 if split[0] == "map-cmdline":
730 if split[0] == "map-cmdline":
733 stylelist.append(split[1])
731 stylelist.append(split[1])
734 return ", ".join(sorted(stylelist))
732 return ", ".join(sorted(stylelist))
735
733
736 class TemplateNotFound(util.Abort):
734 class TemplateNotFound(util.Abort):
737 pass
735 pass
738
736
739 class templater(object):
737 class templater(object):
740
738
741 def __init__(self, mapfile, filters={}, defaults={}, cache={},
739 def __init__(self, mapfile, filters={}, defaults={}, cache={},
742 minchunk=1024, maxchunk=65536):
740 minchunk=1024, maxchunk=65536):
743 '''set up template engine.
741 '''set up template engine.
744 mapfile is name of file to read map definitions from.
742 mapfile is name of file to read map definitions from.
745 filters is dict of functions. each transforms a value into another.
743 filters is dict of functions. each transforms a value into another.
746 defaults is dict of default map definitions.'''
744 defaults is dict of default map definitions.'''
747 self.mapfile = mapfile or 'template'
745 self.mapfile = mapfile or 'template'
748 self.cache = cache.copy()
746 self.cache = cache.copy()
749 self.map = {}
747 self.map = {}
750 if mapfile:
748 if mapfile:
751 self.base = os.path.dirname(mapfile)
749 self.base = os.path.dirname(mapfile)
752 else:
750 else:
753 self.base = ''
751 self.base = ''
754 self.filters = templatefilters.filters.copy()
752 self.filters = templatefilters.filters.copy()
755 self.filters.update(filters)
753 self.filters.update(filters)
756 self.defaults = defaults
754 self.defaults = defaults
757 self.minchunk, self.maxchunk = minchunk, maxchunk
755 self.minchunk, self.maxchunk = minchunk, maxchunk
758 self.ecache = {}
756 self.ecache = {}
759
757
760 if not mapfile:
758 if not mapfile:
761 return
759 return
762 if not os.path.exists(mapfile):
760 if not os.path.exists(mapfile):
763 raise util.Abort(_("style '%s' not found") % mapfile,
761 raise util.Abort(_("style '%s' not found") % mapfile,
764 hint=_("available styles: %s") % stylelist())
762 hint=_("available styles: %s") % stylelist())
765
763
766 conf = config.config(includepaths=templatepaths())
764 conf = config.config(includepaths=templatepaths())
767 conf.read(mapfile)
765 conf.read(mapfile)
768
766
769 for key, val in conf[''].items():
767 for key, val in conf[''].items():
770 if not val:
768 if not val:
771 raise SyntaxError(_('%s: missing value') % conf.source('', key))
769 raise SyntaxError(_('%s: missing value') % conf.source('', key))
772 if val[0] in "'\"":
770 if val[0] in "'\"":
773 try:
771 try:
774 self.cache[key] = unquotestring(val)
772 self.cache[key] = unquotestring(val)
775 except SyntaxError, inst:
773 except SyntaxError, inst:
776 raise SyntaxError('%s: %s' %
774 raise SyntaxError('%s: %s' %
777 (conf.source('', key), inst.args[0]))
775 (conf.source('', key), inst.args[0]))
778 else:
776 else:
779 val = 'default', val
777 val = 'default', val
780 if ':' in val[1]:
778 if ':' in val[1]:
781 val = val[1].split(':', 1)
779 val = val[1].split(':', 1)
782 self.map[key] = val[0], os.path.join(self.base, val[1])
780 self.map[key] = val[0], os.path.join(self.base, val[1])
783
781
784 def __contains__(self, key):
782 def __contains__(self, key):
785 return key in self.cache or key in self.map
783 return key in self.cache or key in self.map
786
784
787 def load(self, t):
785 def load(self, t):
788 '''Get the template for the given template name. Use a local cache.'''
786 '''Get the template for the given template name. Use a local cache.'''
789 if t not in self.cache:
787 if t not in self.cache:
790 try:
788 try:
791 self.cache[t] = util.readfile(self.map[t][1])
789 self.cache[t] = util.readfile(self.map[t][1])
792 except KeyError, inst:
790 except KeyError, inst:
793 raise TemplateNotFound(_('"%s" not in template map') %
791 raise TemplateNotFound(_('"%s" not in template map') %
794 inst.args[0])
792 inst.args[0])
795 except IOError, inst:
793 except IOError, inst:
796 raise IOError(inst.args[0], _('template file %s: %s') %
794 raise IOError(inst.args[0], _('template file %s: %s') %
797 (self.map[t][1], inst.args[1]))
795 (self.map[t][1], inst.args[1]))
798 return self.cache[t]
796 return self.cache[t]
799
797
800 def __call__(self, t, **mapping):
798 def __call__(self, t, **mapping):
801 ttype = t in self.map and self.map[t][0] or 'default'
799 ttype = t in self.map and self.map[t][0] or 'default'
802 if ttype not in self.ecache:
800 if ttype not in self.ecache:
803 self.ecache[ttype] = engines[ttype](self.load,
801 self.ecache[ttype] = engines[ttype](self.load,
804 self.filters, self.defaults)
802 self.filters, self.defaults)
805 proc = self.ecache[ttype]
803 proc = self.ecache[ttype]
806
804
807 stream = proc.process(t, mapping)
805 stream = proc.process(t, mapping)
808 if self.minchunk:
806 if self.minchunk:
809 stream = util.increasingchunks(stream, min=self.minchunk,
807 stream = util.increasingchunks(stream, min=self.minchunk,
810 max=self.maxchunk)
808 max=self.maxchunk)
811 return stream
809 return stream
812
810
813 def templatepaths():
811 def templatepaths():
814 '''return locations used for template files.'''
812 '''return locations used for template files.'''
815 pathsrel = ['templates']
813 pathsrel = ['templates']
816 paths = [os.path.normpath(os.path.join(util.datapath, f))
814 paths = [os.path.normpath(os.path.join(util.datapath, f))
817 for f in pathsrel]
815 for f in pathsrel]
818 return [p for p in paths if os.path.isdir(p)]
816 return [p for p in paths if os.path.isdir(p)]
819
817
820 def templatepath(name):
818 def templatepath(name):
821 '''return location of template file. returns None if not found.'''
819 '''return location of template file. returns None if not found.'''
822 for p in templatepaths():
820 for p in templatepaths():
823 f = os.path.join(p, name)
821 f = os.path.join(p, name)
824 if os.path.exists(f):
822 if os.path.exists(f):
825 return f
823 return f
826 return None
824 return None
827
825
828 def stylemap(styles, paths=None):
826 def stylemap(styles, paths=None):
829 """Return path to mapfile for a given style.
827 """Return path to mapfile for a given style.
830
828
831 Searches mapfile in the following locations:
829 Searches mapfile in the following locations:
832 1. templatepath/style/map
830 1. templatepath/style/map
833 2. templatepath/map-style
831 2. templatepath/map-style
834 3. templatepath/map
832 3. templatepath/map
835 """
833 """
836
834
837 if paths is None:
835 if paths is None:
838 paths = templatepaths()
836 paths = templatepaths()
839 elif isinstance(paths, str):
837 elif isinstance(paths, str):
840 paths = [paths]
838 paths = [paths]
841
839
842 if isinstance(styles, str):
840 if isinstance(styles, str):
843 styles = [styles]
841 styles = [styles]
844
842
845 for style in styles:
843 for style in styles:
846 # only plain name is allowed to honor template paths
844 # only plain name is allowed to honor template paths
847 if (not style
845 if (not style
848 or style in (os.curdir, os.pardir)
846 or style in (os.curdir, os.pardir)
849 or os.sep in style
847 or os.sep in style
850 or os.altsep and os.altsep in style):
848 or os.altsep and os.altsep in style):
851 continue
849 continue
852 locations = [os.path.join(style, 'map'), 'map-' + style]
850 locations = [os.path.join(style, 'map'), 'map-' + style]
853 locations.append('map')
851 locations.append('map')
854
852
855 for path in paths:
853 for path in paths:
856 for location in locations:
854 for location in locations:
857 mapfile = os.path.join(path, location)
855 mapfile = os.path.join(path, location)
858 if os.path.isfile(mapfile):
856 if os.path.isfile(mapfile):
859 return style, mapfile
857 return style, mapfile
860
858
861 raise RuntimeError("No hgweb templates found in %r" % paths)
859 raise RuntimeError("No hgweb templates found in %r" % paths)
862
860
863 # tell hggettext to extract docstrings from these functions:
861 # tell hggettext to extract docstrings from these functions:
864 i18nfunctions = funcs.values()
862 i18nfunctions = funcs.values()
General Comments 0
You need to be logged in to leave comments. Login now