##// END OF EJS Templates
py3: use pycompat.bytestr instead of bytes
Pulkit Goyal -
r32154:52e222ee default
parent child Browse files
Show More
@@ -1,1371 +1,1372 b''
1 # templater.py - template expansion for output
1 # templater.py - template expansion for output
2 #
2 #
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import os
10 import os
11 import re
11 import re
12 import types
12 import types
13
13
14 from .i18n import _
14 from .i18n import _
15 from . import (
15 from . import (
16 color,
16 color,
17 config,
17 config,
18 encoding,
18 encoding,
19 error,
19 error,
20 minirst,
20 minirst,
21 parser,
21 parser,
22 pycompat,
22 pycompat,
23 registrar,
23 registrar,
24 revset as revsetmod,
24 revset as revsetmod,
25 revsetlang,
25 revsetlang,
26 templatefilters,
26 templatefilters,
27 templatekw,
27 templatekw,
28 util,
28 util,
29 )
29 )
30
30
31 # template parsing
31 # template parsing
32
32
33 elements = {
33 elements = {
34 # token-type: binding-strength, primary, prefix, infix, suffix
34 # token-type: binding-strength, primary, prefix, infix, suffix
35 "(": (20, None, ("group", 1, ")"), ("func", 1, ")"), None),
35 "(": (20, None, ("group", 1, ")"), ("func", 1, ")"), None),
36 "%": (16, None, None, ("%", 16), None),
36 "%": (16, None, None, ("%", 16), None),
37 "|": (15, None, None, ("|", 15), None),
37 "|": (15, None, None, ("|", 15), None),
38 "*": (5, None, None, ("*", 5), None),
38 "*": (5, None, None, ("*", 5), None),
39 "/": (5, None, None, ("/", 5), None),
39 "/": (5, None, None, ("/", 5), None),
40 "+": (4, None, None, ("+", 4), None),
40 "+": (4, None, None, ("+", 4), None),
41 "-": (4, None, ("negate", 19), ("-", 4), None),
41 "-": (4, None, ("negate", 19), ("-", 4), None),
42 "=": (3, None, None, ("keyvalue", 3), None),
42 "=": (3, None, None, ("keyvalue", 3), None),
43 ",": (2, None, None, ("list", 2), None),
43 ",": (2, None, None, ("list", 2), None),
44 ")": (0, None, None, None, None),
44 ")": (0, None, None, None, None),
45 "integer": (0, "integer", None, None, None),
45 "integer": (0, "integer", None, None, None),
46 "symbol": (0, "symbol", None, None, None),
46 "symbol": (0, "symbol", None, None, None),
47 "string": (0, "string", None, None, None),
47 "string": (0, "string", None, None, None),
48 "template": (0, "template", None, None, None),
48 "template": (0, "template", None, None, None),
49 "end": (0, None, None, None, None),
49 "end": (0, None, None, None, None),
50 }
50 }
51
51
52 def tokenize(program, start, end, term=None):
52 def tokenize(program, start, end, term=None):
53 """Parse a template expression into a stream of tokens, which must end
53 """Parse a template expression into a stream of tokens, which must end
54 with term if specified"""
54 with term if specified"""
55 pos = start
55 pos = start
56 program = pycompat.bytestr(program)
56 while pos < end:
57 while pos < end:
57 c = program[pos]
58 c = program[pos]
58 if c.isspace(): # skip inter-token whitespace
59 if c.isspace(): # skip inter-token whitespace
59 pass
60 pass
60 elif c in "(=,)%|+-*/": # handle simple operators
61 elif c in "(=,)%|+-*/": # handle simple operators
61 yield (c, None, pos)
62 yield (c, None, pos)
62 elif c in '"\'': # handle quoted templates
63 elif c in '"\'': # handle quoted templates
63 s = pos + 1
64 s = pos + 1
64 data, pos = _parsetemplate(program, s, end, c)
65 data, pos = _parsetemplate(program, s, end, c)
65 yield ('template', data, s)
66 yield ('template', data, s)
66 pos -= 1
67 pos -= 1
67 elif c == 'r' and program[pos:pos + 2] in ("r'", 'r"'):
68 elif c == 'r' and program[pos:pos + 2] in ("r'", 'r"'):
68 # handle quoted strings
69 # handle quoted strings
69 c = program[pos + 1]
70 c = program[pos + 1]
70 s = pos = pos + 2
71 s = pos = pos + 2
71 while pos < end: # find closing quote
72 while pos < end: # find closing quote
72 d = program[pos]
73 d = program[pos]
73 if d == '\\': # skip over escaped characters
74 if d == '\\': # skip over escaped characters
74 pos += 2
75 pos += 2
75 continue
76 continue
76 if d == c:
77 if d == c:
77 yield ('string', program[s:pos], s)
78 yield ('string', program[s:pos], s)
78 break
79 break
79 pos += 1
80 pos += 1
80 else:
81 else:
81 raise error.ParseError(_("unterminated string"), s)
82 raise error.ParseError(_("unterminated string"), s)
82 elif c.isdigit():
83 elif c.isdigit():
83 s = pos
84 s = pos
84 while pos < end:
85 while pos < end:
85 d = program[pos]
86 d = program[pos]
86 if not d.isdigit():
87 if not d.isdigit():
87 break
88 break
88 pos += 1
89 pos += 1
89 yield ('integer', program[s:pos], s)
90 yield ('integer', program[s:pos], s)
90 pos -= 1
91 pos -= 1
91 elif (c == '\\' and program[pos:pos + 2] in (r"\'", r'\"')
92 elif (c == '\\' and program[pos:pos + 2] in (r"\'", r'\"')
92 or c == 'r' and program[pos:pos + 3] in (r"r\'", r'r\"')):
93 or c == 'r' and program[pos:pos + 3] in (r"r\'", r'r\"')):
93 # handle escaped quoted strings for compatibility with 2.9.2-3.4,
94 # handle escaped quoted strings for compatibility with 2.9.2-3.4,
94 # where some of nested templates were preprocessed as strings and
95 # where some of nested templates were preprocessed as strings and
95 # then compiled. therefore, \"...\" was allowed. (issue4733)
96 # then compiled. therefore, \"...\" was allowed. (issue4733)
96 #
97 #
97 # processing flow of _evalifliteral() at 5ab28a2e9962:
98 # processing flow of _evalifliteral() at 5ab28a2e9962:
98 # outer template string -> stringify() -> compiletemplate()
99 # outer template string -> stringify() -> compiletemplate()
99 # ------------------------ ------------ ------------------
100 # ------------------------ ------------ ------------------
100 # {f("\\\\ {g(\"\\\"\")}"} \\ {g("\"")} [r'\\', {g("\"")}]
101 # {f("\\\\ {g(\"\\\"\")}"} \\ {g("\"")} [r'\\', {g("\"")}]
101 # ~~~~~~~~
102 # ~~~~~~~~
102 # escaped quoted string
103 # escaped quoted string
103 if c == 'r':
104 if c == 'r':
104 pos += 1
105 pos += 1
105 token = 'string'
106 token = 'string'
106 else:
107 else:
107 token = 'template'
108 token = 'template'
108 quote = program[pos:pos + 2]
109 quote = program[pos:pos + 2]
109 s = pos = pos + 2
110 s = pos = pos + 2
110 while pos < end: # find closing escaped quote
111 while pos < end: # find closing escaped quote
111 if program.startswith('\\\\\\', pos, end):
112 if program.startswith('\\\\\\', pos, end):
112 pos += 4 # skip over double escaped characters
113 pos += 4 # skip over double escaped characters
113 continue
114 continue
114 if program.startswith(quote, pos, end):
115 if program.startswith(quote, pos, end):
115 # interpret as if it were a part of an outer string
116 # interpret as if it were a part of an outer string
116 data = parser.unescapestr(program[s:pos])
117 data = parser.unescapestr(program[s:pos])
117 if token == 'template':
118 if token == 'template':
118 data = _parsetemplate(data, 0, len(data))[0]
119 data = _parsetemplate(data, 0, len(data))[0]
119 yield (token, data, s)
120 yield (token, data, s)
120 pos += 1
121 pos += 1
121 break
122 break
122 pos += 1
123 pos += 1
123 else:
124 else:
124 raise error.ParseError(_("unterminated string"), s)
125 raise error.ParseError(_("unterminated string"), s)
125 elif c.isalnum() or c in '_':
126 elif c.isalnum() or c in '_':
126 s = pos
127 s = pos
127 pos += 1
128 pos += 1
128 while pos < end: # find end of symbol
129 while pos < end: # find end of symbol
129 d = program[pos]
130 d = program[pos]
130 if not (d.isalnum() or d == "_"):
131 if not (d.isalnum() or d == "_"):
131 break
132 break
132 pos += 1
133 pos += 1
133 sym = program[s:pos]
134 sym = program[s:pos]
134 yield ('symbol', sym, s)
135 yield ('symbol', sym, s)
135 pos -= 1
136 pos -= 1
136 elif c == term:
137 elif c == term:
137 yield ('end', None, pos + 1)
138 yield ('end', None, pos + 1)
138 return
139 return
139 else:
140 else:
140 raise error.ParseError(_("syntax error"), pos)
141 raise error.ParseError(_("syntax error"), pos)
141 pos += 1
142 pos += 1
142 if term:
143 if term:
143 raise error.ParseError(_("unterminated template expansion"), start)
144 raise error.ParseError(_("unterminated template expansion"), start)
144 yield ('end', None, pos)
145 yield ('end', None, pos)
145
146
146 def _parsetemplate(tmpl, start, stop, quote=''):
147 def _parsetemplate(tmpl, start, stop, quote=''):
147 r"""
148 r"""
148 >>> _parsetemplate('foo{bar}"baz', 0, 12)
149 >>> _parsetemplate('foo{bar}"baz', 0, 12)
149 ([('string', 'foo'), ('symbol', 'bar'), ('string', '"baz')], 12)
150 ([('string', 'foo'), ('symbol', 'bar'), ('string', '"baz')], 12)
150 >>> _parsetemplate('foo{bar}"baz', 0, 12, quote='"')
151 >>> _parsetemplate('foo{bar}"baz', 0, 12, quote='"')
151 ([('string', 'foo'), ('symbol', 'bar')], 9)
152 ([('string', 'foo'), ('symbol', 'bar')], 9)
152 >>> _parsetemplate('foo"{bar}', 0, 9, quote='"')
153 >>> _parsetemplate('foo"{bar}', 0, 9, quote='"')
153 ([('string', 'foo')], 4)
154 ([('string', 'foo')], 4)
154 >>> _parsetemplate(r'foo\"bar"baz', 0, 12, quote='"')
155 >>> _parsetemplate(r'foo\"bar"baz', 0, 12, quote='"')
155 ([('string', 'foo"'), ('string', 'bar')], 9)
156 ([('string', 'foo"'), ('string', 'bar')], 9)
156 >>> _parsetemplate(r'foo\\"bar', 0, 10, quote='"')
157 >>> _parsetemplate(r'foo\\"bar', 0, 10, quote='"')
157 ([('string', 'foo\\')], 6)
158 ([('string', 'foo\\')], 6)
158 """
159 """
159 parsed = []
160 parsed = []
160 sepchars = '{' + quote
161 sepchars = '{' + quote
161 pos = start
162 pos = start
162 p = parser.parser(elements)
163 p = parser.parser(elements)
163 while pos < stop:
164 while pos < stop:
164 n = min((tmpl.find(c, pos, stop) for c in sepchars),
165 n = min((tmpl.find(c, pos, stop) for c in sepchars),
165 key=lambda n: (n < 0, n))
166 key=lambda n: (n < 0, n))
166 if n < 0:
167 if n < 0:
167 parsed.append(('string', parser.unescapestr(tmpl[pos:stop])))
168 parsed.append(('string', parser.unescapestr(tmpl[pos:stop])))
168 pos = stop
169 pos = stop
169 break
170 break
170 c = tmpl[n]
171 c = tmpl[n]
171 bs = (n - pos) - len(tmpl[pos:n].rstrip('\\'))
172 bs = (n - pos) - len(tmpl[pos:n].rstrip('\\'))
172 if bs % 2 == 1:
173 if bs % 2 == 1:
173 # escaped (e.g. '\{', '\\\{', but not '\\{')
174 # escaped (e.g. '\{', '\\\{', but not '\\{')
174 parsed.append(('string', parser.unescapestr(tmpl[pos:n - 1]) + c))
175 parsed.append(('string', parser.unescapestr(tmpl[pos:n - 1]) + c))
175 pos = n + 1
176 pos = n + 1
176 continue
177 continue
177 if n > pos:
178 if n > pos:
178 parsed.append(('string', parser.unescapestr(tmpl[pos:n])))
179 parsed.append(('string', parser.unescapestr(tmpl[pos:n])))
179 if c == quote:
180 if c == quote:
180 return parsed, n + 1
181 return parsed, n + 1
181
182
182 parseres, pos = p.parse(tokenize(tmpl, n + 1, stop, '}'))
183 parseres, pos = p.parse(tokenize(tmpl, n + 1, stop, '}'))
183 parsed.append(parseres)
184 parsed.append(parseres)
184
185
185 if quote:
186 if quote:
186 raise error.ParseError(_("unterminated string"), start)
187 raise error.ParseError(_("unterminated string"), start)
187 return parsed, pos
188 return parsed, pos
188
189
189 def _unnesttemplatelist(tree):
190 def _unnesttemplatelist(tree):
190 """Expand list of templates to node tuple
191 """Expand list of templates to node tuple
191
192
192 >>> def f(tree):
193 >>> def f(tree):
193 ... print prettyformat(_unnesttemplatelist(tree))
194 ... print prettyformat(_unnesttemplatelist(tree))
194 >>> f(('template', []))
195 >>> f(('template', []))
195 ('string', '')
196 ('string', '')
196 >>> f(('template', [('string', 'foo')]))
197 >>> f(('template', [('string', 'foo')]))
197 ('string', 'foo')
198 ('string', 'foo')
198 >>> f(('template', [('string', 'foo'), ('symbol', 'rev')]))
199 >>> f(('template', [('string', 'foo'), ('symbol', 'rev')]))
199 (template
200 (template
200 ('string', 'foo')
201 ('string', 'foo')
201 ('symbol', 'rev'))
202 ('symbol', 'rev'))
202 >>> f(('template', [('symbol', 'rev')])) # template(rev) -> str
203 >>> f(('template', [('symbol', 'rev')])) # template(rev) -> str
203 (template
204 (template
204 ('symbol', 'rev'))
205 ('symbol', 'rev'))
205 >>> f(('template', [('template', [('string', 'foo')])]))
206 >>> f(('template', [('template', [('string', 'foo')])]))
206 ('string', 'foo')
207 ('string', 'foo')
207 """
208 """
208 if not isinstance(tree, tuple):
209 if not isinstance(tree, tuple):
209 return tree
210 return tree
210 op = tree[0]
211 op = tree[0]
211 if op != 'template':
212 if op != 'template':
212 return (op,) + tuple(_unnesttemplatelist(x) for x in tree[1:])
213 return (op,) + tuple(_unnesttemplatelist(x) for x in tree[1:])
213
214
214 assert len(tree) == 2
215 assert len(tree) == 2
215 xs = tuple(_unnesttemplatelist(x) for x in tree[1])
216 xs = tuple(_unnesttemplatelist(x) for x in tree[1])
216 if not xs:
217 if not xs:
217 return ('string', '') # empty template ""
218 return ('string', '') # empty template ""
218 elif len(xs) == 1 and xs[0][0] == 'string':
219 elif len(xs) == 1 and xs[0][0] == 'string':
219 return xs[0] # fast path for string with no template fragment "x"
220 return xs[0] # fast path for string with no template fragment "x"
220 else:
221 else:
221 return (op,) + xs
222 return (op,) + xs
222
223
223 def parse(tmpl):
224 def parse(tmpl):
224 """Parse template string into tree"""
225 """Parse template string into tree"""
225 parsed, pos = _parsetemplate(tmpl, 0, len(tmpl))
226 parsed, pos = _parsetemplate(tmpl, 0, len(tmpl))
226 assert pos == len(tmpl), 'unquoted template should be consumed'
227 assert pos == len(tmpl), 'unquoted template should be consumed'
227 return _unnesttemplatelist(('template', parsed))
228 return _unnesttemplatelist(('template', parsed))
228
229
229 def _parseexpr(expr):
230 def _parseexpr(expr):
230 """Parse a template expression into tree
231 """Parse a template expression into tree
231
232
232 >>> _parseexpr('"foo"')
233 >>> _parseexpr('"foo"')
233 ('string', 'foo')
234 ('string', 'foo')
234 >>> _parseexpr('foo(bar)')
235 >>> _parseexpr('foo(bar)')
235 ('func', ('symbol', 'foo'), ('symbol', 'bar'))
236 ('func', ('symbol', 'foo'), ('symbol', 'bar'))
236 >>> _parseexpr('foo(')
237 >>> _parseexpr('foo(')
237 Traceback (most recent call last):
238 Traceback (most recent call last):
238 ...
239 ...
239 ParseError: ('not a prefix: end', 4)
240 ParseError: ('not a prefix: end', 4)
240 >>> _parseexpr('"foo" "bar"')
241 >>> _parseexpr('"foo" "bar"')
241 Traceback (most recent call last):
242 Traceback (most recent call last):
242 ...
243 ...
243 ParseError: ('invalid token', 7)
244 ParseError: ('invalid token', 7)
244 """
245 """
245 p = parser.parser(elements)
246 p = parser.parser(elements)
246 tree, pos = p.parse(tokenize(expr, 0, len(expr)))
247 tree, pos = p.parse(tokenize(expr, 0, len(expr)))
247 if pos != len(expr):
248 if pos != len(expr):
248 raise error.ParseError(_('invalid token'), pos)
249 raise error.ParseError(_('invalid token'), pos)
249 return _unnesttemplatelist(tree)
250 return _unnesttemplatelist(tree)
250
251
251 def prettyformat(tree):
252 def prettyformat(tree):
252 return parser.prettyformat(tree, ('integer', 'string', 'symbol'))
253 return parser.prettyformat(tree, ('integer', 'string', 'symbol'))
253
254
254 def compileexp(exp, context, curmethods):
255 def compileexp(exp, context, curmethods):
255 """Compile parsed template tree to (func, data) pair"""
256 """Compile parsed template tree to (func, data) pair"""
256 t = exp[0]
257 t = exp[0]
257 if t in curmethods:
258 if t in curmethods:
258 return curmethods[t](exp, context)
259 return curmethods[t](exp, context)
259 raise error.ParseError(_("unknown method '%s'") % t)
260 raise error.ParseError(_("unknown method '%s'") % t)
260
261
261 # template evaluation
262 # template evaluation
262
263
263 def getsymbol(exp):
264 def getsymbol(exp):
264 if exp[0] == 'symbol':
265 if exp[0] == 'symbol':
265 return exp[1]
266 return exp[1]
266 raise error.ParseError(_("expected a symbol, got '%s'") % exp[0])
267 raise error.ParseError(_("expected a symbol, got '%s'") % exp[0])
267
268
268 def getlist(x):
269 def getlist(x):
269 if not x:
270 if not x:
270 return []
271 return []
271 if x[0] == 'list':
272 if x[0] == 'list':
272 return getlist(x[1]) + [x[2]]
273 return getlist(x[1]) + [x[2]]
273 return [x]
274 return [x]
274
275
275 def gettemplate(exp, context):
276 def gettemplate(exp, context):
276 """Compile given template tree or load named template from map file;
277 """Compile given template tree or load named template from map file;
277 returns (func, data) pair"""
278 returns (func, data) pair"""
278 if exp[0] in ('template', 'string'):
279 if exp[0] in ('template', 'string'):
279 return compileexp(exp, context, methods)
280 return compileexp(exp, context, methods)
280 if exp[0] == 'symbol':
281 if exp[0] == 'symbol':
281 # unlike runsymbol(), here 'symbol' is always taken as template name
282 # unlike runsymbol(), here 'symbol' is always taken as template name
282 # even if it exists in mapping. this allows us to override mapping
283 # even if it exists in mapping. this allows us to override mapping
283 # by web templates, e.g. 'changelogtag' is redefined in map file.
284 # by web templates, e.g. 'changelogtag' is redefined in map file.
284 return context._load(exp[1])
285 return context._load(exp[1])
285 raise error.ParseError(_("expected template specifier"))
286 raise error.ParseError(_("expected template specifier"))
286
287
287 def findsymbolicname(arg):
288 def findsymbolicname(arg):
288 """Find symbolic name for the given compiled expression; returns None
289 """Find symbolic name for the given compiled expression; returns None
289 if nothing found reliably"""
290 if nothing found reliably"""
290 while True:
291 while True:
291 func, data = arg
292 func, data = arg
292 if func is runsymbol:
293 if func is runsymbol:
293 return data
294 return data
294 elif func is runfilter:
295 elif func is runfilter:
295 arg = data[0]
296 arg = data[0]
296 else:
297 else:
297 return None
298 return None
298
299
299 def evalfuncarg(context, mapping, arg):
300 def evalfuncarg(context, mapping, arg):
300 func, data = arg
301 func, data = arg
301 # func() may return string, generator of strings or arbitrary object such
302 # func() may return string, generator of strings or arbitrary object such
302 # as date tuple, but filter does not want generator.
303 # as date tuple, but filter does not want generator.
303 thing = func(context, mapping, data)
304 thing = func(context, mapping, data)
304 if isinstance(thing, types.GeneratorType):
305 if isinstance(thing, types.GeneratorType):
305 thing = stringify(thing)
306 thing = stringify(thing)
306 return thing
307 return thing
307
308
308 def evalboolean(context, mapping, arg):
309 def evalboolean(context, mapping, arg):
309 """Evaluate given argument as boolean, but also takes boolean literals"""
310 """Evaluate given argument as boolean, but also takes boolean literals"""
310 func, data = arg
311 func, data = arg
311 if func is runsymbol:
312 if func is runsymbol:
312 thing = func(context, mapping, data, default=None)
313 thing = func(context, mapping, data, default=None)
313 if thing is None:
314 if thing is None:
314 # not a template keyword, takes as a boolean literal
315 # not a template keyword, takes as a boolean literal
315 thing = util.parsebool(data)
316 thing = util.parsebool(data)
316 else:
317 else:
317 thing = func(context, mapping, data)
318 thing = func(context, mapping, data)
318 if isinstance(thing, bool):
319 if isinstance(thing, bool):
319 return thing
320 return thing
320 # other objects are evaluated as strings, which means 0 is True, but
321 # other objects are evaluated as strings, which means 0 is True, but
321 # empty dict/list should be False as they are expected to be ''
322 # empty dict/list should be False as they are expected to be ''
322 return bool(stringify(thing))
323 return bool(stringify(thing))
323
324
324 def evalinteger(context, mapping, arg, err):
325 def evalinteger(context, mapping, arg, err):
325 v = evalfuncarg(context, mapping, arg)
326 v = evalfuncarg(context, mapping, arg)
326 try:
327 try:
327 return int(v)
328 return int(v)
328 except (TypeError, ValueError):
329 except (TypeError, ValueError):
329 raise error.ParseError(err)
330 raise error.ParseError(err)
330
331
331 def evalstring(context, mapping, arg):
332 def evalstring(context, mapping, arg):
332 func, data = arg
333 func, data = arg
333 return stringify(func(context, mapping, data))
334 return stringify(func(context, mapping, data))
334
335
335 def evalstringliteral(context, mapping, arg):
336 def evalstringliteral(context, mapping, arg):
336 """Evaluate given argument as string template, but returns symbol name
337 """Evaluate given argument as string template, but returns symbol name
337 if it is unknown"""
338 if it is unknown"""
338 func, data = arg
339 func, data = arg
339 if func is runsymbol:
340 if func is runsymbol:
340 thing = func(context, mapping, data, default=data)
341 thing = func(context, mapping, data, default=data)
341 else:
342 else:
342 thing = func(context, mapping, data)
343 thing = func(context, mapping, data)
343 return stringify(thing)
344 return stringify(thing)
344
345
345 def runinteger(context, mapping, data):
346 def runinteger(context, mapping, data):
346 return int(data)
347 return int(data)
347
348
348 def runstring(context, mapping, data):
349 def runstring(context, mapping, data):
349 return data
350 return data
350
351
351 def _recursivesymbolblocker(key):
352 def _recursivesymbolblocker(key):
352 def showrecursion(**args):
353 def showrecursion(**args):
353 raise error.Abort(_("recursive reference '%s' in template") % key)
354 raise error.Abort(_("recursive reference '%s' in template") % key)
354 return showrecursion
355 return showrecursion
355
356
356 def _runrecursivesymbol(context, mapping, key):
357 def _runrecursivesymbol(context, mapping, key):
357 raise error.Abort(_("recursive reference '%s' in template") % key)
358 raise error.Abort(_("recursive reference '%s' in template") % key)
358
359
359 def runsymbol(context, mapping, key, default=''):
360 def runsymbol(context, mapping, key, default=''):
360 v = mapping.get(key)
361 v = mapping.get(key)
361 if v is None:
362 if v is None:
362 v = context._defaults.get(key)
363 v = context._defaults.get(key)
363 if v is None:
364 if v is None:
364 # put poison to cut recursion. we can't move this to parsing phase
365 # put poison to cut recursion. we can't move this to parsing phase
365 # because "x = {x}" is allowed if "x" is a keyword. (issue4758)
366 # because "x = {x}" is allowed if "x" is a keyword. (issue4758)
366 safemapping = mapping.copy()
367 safemapping = mapping.copy()
367 safemapping[key] = _recursivesymbolblocker(key)
368 safemapping[key] = _recursivesymbolblocker(key)
368 try:
369 try:
369 v = context.process(key, safemapping)
370 v = context.process(key, safemapping)
370 except TemplateNotFound:
371 except TemplateNotFound:
371 v = default
372 v = default
372 if callable(v):
373 if callable(v):
373 return v(**mapping)
374 return v(**mapping)
374 return v
375 return v
375
376
376 def buildtemplate(exp, context):
377 def buildtemplate(exp, context):
377 ctmpl = [compileexp(e, context, methods) for e in exp[1:]]
378 ctmpl = [compileexp(e, context, methods) for e in exp[1:]]
378 return (runtemplate, ctmpl)
379 return (runtemplate, ctmpl)
379
380
380 def runtemplate(context, mapping, template):
381 def runtemplate(context, mapping, template):
381 for func, data in template:
382 for func, data in template:
382 yield func(context, mapping, data)
383 yield func(context, mapping, data)
383
384
384 def buildfilter(exp, context):
385 def buildfilter(exp, context):
385 n = getsymbol(exp[2])
386 n = getsymbol(exp[2])
386 if n in context._filters:
387 if n in context._filters:
387 filt = context._filters[n]
388 filt = context._filters[n]
388 arg = compileexp(exp[1], context, methods)
389 arg = compileexp(exp[1], context, methods)
389 return (runfilter, (arg, filt))
390 return (runfilter, (arg, filt))
390 if n in funcs:
391 if n in funcs:
391 f = funcs[n]
392 f = funcs[n]
392 args = _buildfuncargs(exp[1], context, methods, n, f._argspec)
393 args = _buildfuncargs(exp[1], context, methods, n, f._argspec)
393 return (f, args)
394 return (f, args)
394 raise error.ParseError(_("unknown function '%s'") % n)
395 raise error.ParseError(_("unknown function '%s'") % n)
395
396
396 def runfilter(context, mapping, data):
397 def runfilter(context, mapping, data):
397 arg, filt = data
398 arg, filt = data
398 thing = evalfuncarg(context, mapping, arg)
399 thing = evalfuncarg(context, mapping, arg)
399 try:
400 try:
400 return filt(thing)
401 return filt(thing)
401 except (ValueError, AttributeError, TypeError):
402 except (ValueError, AttributeError, TypeError):
402 sym = findsymbolicname(arg)
403 sym = findsymbolicname(arg)
403 if sym:
404 if sym:
404 msg = (_("template filter '%s' is not compatible with keyword '%s'")
405 msg = (_("template filter '%s' is not compatible with keyword '%s'")
405 % (filt.func_name, sym))
406 % (filt.func_name, sym))
406 else:
407 else:
407 msg = _("incompatible use of template filter '%s'") % filt.func_name
408 msg = _("incompatible use of template filter '%s'") % filt.func_name
408 raise error.Abort(msg)
409 raise error.Abort(msg)
409
410
410 def buildmap(exp, context):
411 def buildmap(exp, context):
411 func, data = compileexp(exp[1], context, methods)
412 func, data = compileexp(exp[1], context, methods)
412 tfunc, tdata = gettemplate(exp[2], context)
413 tfunc, tdata = gettemplate(exp[2], context)
413 return (runmap, (func, data, tfunc, tdata))
414 return (runmap, (func, data, tfunc, tdata))
414
415
415 def runmap(context, mapping, data):
416 def runmap(context, mapping, data):
416 func, data, tfunc, tdata = data
417 func, data, tfunc, tdata = data
417 d = func(context, mapping, data)
418 d = func(context, mapping, data)
418 if util.safehasattr(d, 'itermaps'):
419 if util.safehasattr(d, 'itermaps'):
419 diter = d.itermaps()
420 diter = d.itermaps()
420 else:
421 else:
421 try:
422 try:
422 diter = iter(d)
423 diter = iter(d)
423 except TypeError:
424 except TypeError:
424 if func is runsymbol:
425 if func is runsymbol:
425 raise error.ParseError(_("keyword '%s' is not iterable") % data)
426 raise error.ParseError(_("keyword '%s' is not iterable") % data)
426 else:
427 else:
427 raise error.ParseError(_("%r is not iterable") % d)
428 raise error.ParseError(_("%r is not iterable") % d)
428
429
429 for i, v in enumerate(diter):
430 for i, v in enumerate(diter):
430 lm = mapping.copy()
431 lm = mapping.copy()
431 lm['index'] = i
432 lm['index'] = i
432 if isinstance(v, dict):
433 if isinstance(v, dict):
433 lm.update(v)
434 lm.update(v)
434 lm['originalnode'] = mapping.get('node')
435 lm['originalnode'] = mapping.get('node')
435 yield tfunc(context, lm, tdata)
436 yield tfunc(context, lm, tdata)
436 else:
437 else:
437 # v is not an iterable of dicts, this happen when 'key'
438 # v is not an iterable of dicts, this happen when 'key'
438 # has been fully expanded already and format is useless.
439 # has been fully expanded already and format is useless.
439 # If so, return the expanded value.
440 # If so, return the expanded value.
440 yield v
441 yield v
441
442
442 def buildnegate(exp, context):
443 def buildnegate(exp, context):
443 arg = compileexp(exp[1], context, exprmethods)
444 arg = compileexp(exp[1], context, exprmethods)
444 return (runnegate, arg)
445 return (runnegate, arg)
445
446
446 def runnegate(context, mapping, data):
447 def runnegate(context, mapping, data):
447 data = evalinteger(context, mapping, data,
448 data = evalinteger(context, mapping, data,
448 _('negation needs an integer argument'))
449 _('negation needs an integer argument'))
449 return -data
450 return -data
450
451
451 def buildarithmetic(exp, context, func):
452 def buildarithmetic(exp, context, func):
452 left = compileexp(exp[1], context, exprmethods)
453 left = compileexp(exp[1], context, exprmethods)
453 right = compileexp(exp[2], context, exprmethods)
454 right = compileexp(exp[2], context, exprmethods)
454 return (runarithmetic, (func, left, right))
455 return (runarithmetic, (func, left, right))
455
456
456 def runarithmetic(context, mapping, data):
457 def runarithmetic(context, mapping, data):
457 func, left, right = data
458 func, left, right = data
458 left = evalinteger(context, mapping, left,
459 left = evalinteger(context, mapping, left,
459 _('arithmetic only defined on integers'))
460 _('arithmetic only defined on integers'))
460 right = evalinteger(context, mapping, right,
461 right = evalinteger(context, mapping, right,
461 _('arithmetic only defined on integers'))
462 _('arithmetic only defined on integers'))
462 try:
463 try:
463 return func(left, right)
464 return func(left, right)
464 except ZeroDivisionError:
465 except ZeroDivisionError:
465 raise error.Abort(_('division by zero is not defined'))
466 raise error.Abort(_('division by zero is not defined'))
466
467
467 def buildfunc(exp, context):
468 def buildfunc(exp, context):
468 n = getsymbol(exp[1])
469 n = getsymbol(exp[1])
469 if n in funcs:
470 if n in funcs:
470 f = funcs[n]
471 f = funcs[n]
471 args = _buildfuncargs(exp[2], context, exprmethods, n, f._argspec)
472 args = _buildfuncargs(exp[2], context, exprmethods, n, f._argspec)
472 return (f, args)
473 return (f, args)
473 if n in context._filters:
474 if n in context._filters:
474 args = _buildfuncargs(exp[2], context, exprmethods, n, argspec=None)
475 args = _buildfuncargs(exp[2], context, exprmethods, n, argspec=None)
475 if len(args) != 1:
476 if len(args) != 1:
476 raise error.ParseError(_("filter %s expects one argument") % n)
477 raise error.ParseError(_("filter %s expects one argument") % n)
477 f = context._filters[n]
478 f = context._filters[n]
478 return (runfilter, (args[0], f))
479 return (runfilter, (args[0], f))
479 raise error.ParseError(_("unknown function '%s'") % n)
480 raise error.ParseError(_("unknown function '%s'") % n)
480
481
481 def _buildfuncargs(exp, context, curmethods, funcname, argspec):
482 def _buildfuncargs(exp, context, curmethods, funcname, argspec):
482 """Compile parsed tree of function arguments into list or dict of
483 """Compile parsed tree of function arguments into list or dict of
483 (func, data) pairs
484 (func, data) pairs
484
485
485 >>> context = engine(lambda t: (runsymbol, t))
486 >>> context = engine(lambda t: (runsymbol, t))
486 >>> def fargs(expr, argspec):
487 >>> def fargs(expr, argspec):
487 ... x = _parseexpr(expr)
488 ... x = _parseexpr(expr)
488 ... n = getsymbol(x[1])
489 ... n = getsymbol(x[1])
489 ... return _buildfuncargs(x[2], context, exprmethods, n, argspec)
490 ... return _buildfuncargs(x[2], context, exprmethods, n, argspec)
490 >>> fargs('a(l=1, k=2)', 'k l m').keys()
491 >>> fargs('a(l=1, k=2)', 'k l m').keys()
491 ['l', 'k']
492 ['l', 'k']
492 >>> args = fargs('a(opts=1, k=2)', '**opts')
493 >>> args = fargs('a(opts=1, k=2)', '**opts')
493 >>> args.keys(), args['opts'].keys()
494 >>> args.keys(), args['opts'].keys()
494 (['opts'], ['opts', 'k'])
495 (['opts'], ['opts', 'k'])
495 """
496 """
496 def compiledict(xs):
497 def compiledict(xs):
497 return util.sortdict((k, compileexp(x, context, curmethods))
498 return util.sortdict((k, compileexp(x, context, curmethods))
498 for k, x in xs.iteritems())
499 for k, x in xs.iteritems())
499 def compilelist(xs):
500 def compilelist(xs):
500 return [compileexp(x, context, curmethods) for x in xs]
501 return [compileexp(x, context, curmethods) for x in xs]
501
502
502 if not argspec:
503 if not argspec:
503 # filter or function with no argspec: return list of positional args
504 # filter or function with no argspec: return list of positional args
504 return compilelist(getlist(exp))
505 return compilelist(getlist(exp))
505
506
506 # function with argspec: return dict of named args
507 # function with argspec: return dict of named args
507 _poskeys, varkey, _keys, optkey = argspec = parser.splitargspec(argspec)
508 _poskeys, varkey, _keys, optkey = argspec = parser.splitargspec(argspec)
508 treeargs = parser.buildargsdict(getlist(exp), funcname, argspec,
509 treeargs = parser.buildargsdict(getlist(exp), funcname, argspec,
509 keyvaluenode='keyvalue', keynode='symbol')
510 keyvaluenode='keyvalue', keynode='symbol')
510 compargs = util.sortdict()
511 compargs = util.sortdict()
511 if varkey:
512 if varkey:
512 compargs[varkey] = compilelist(treeargs.pop(varkey))
513 compargs[varkey] = compilelist(treeargs.pop(varkey))
513 if optkey:
514 if optkey:
514 compargs[optkey] = compiledict(treeargs.pop(optkey))
515 compargs[optkey] = compiledict(treeargs.pop(optkey))
515 compargs.update(compiledict(treeargs))
516 compargs.update(compiledict(treeargs))
516 return compargs
517 return compargs
517
518
518 def buildkeyvaluepair(exp, content):
519 def buildkeyvaluepair(exp, content):
519 raise error.ParseError(_("can't use a key-value pair in this context"))
520 raise error.ParseError(_("can't use a key-value pair in this context"))
520
521
521 # dict of template built-in functions
522 # dict of template built-in functions
522 funcs = {}
523 funcs = {}
523
524
524 templatefunc = registrar.templatefunc(funcs)
525 templatefunc = registrar.templatefunc(funcs)
525
526
526 @templatefunc('date(date[, fmt])')
527 @templatefunc('date(date[, fmt])')
527 def date(context, mapping, args):
528 def date(context, mapping, args):
528 """Format a date. See :hg:`help dates` for formatting
529 """Format a date. See :hg:`help dates` for formatting
529 strings. The default is a Unix date format, including the timezone:
530 strings. The default is a Unix date format, including the timezone:
530 "Mon Sep 04 15:13:13 2006 0700"."""
531 "Mon Sep 04 15:13:13 2006 0700"."""
531 if not (1 <= len(args) <= 2):
532 if not (1 <= len(args) <= 2):
532 # i18n: "date" is a keyword
533 # i18n: "date" is a keyword
533 raise error.ParseError(_("date expects one or two arguments"))
534 raise error.ParseError(_("date expects one or two arguments"))
534
535
535 date = evalfuncarg(context, mapping, args[0])
536 date = evalfuncarg(context, mapping, args[0])
536 fmt = None
537 fmt = None
537 if len(args) == 2:
538 if len(args) == 2:
538 fmt = evalstring(context, mapping, args[1])
539 fmt = evalstring(context, mapping, args[1])
539 try:
540 try:
540 if fmt is None:
541 if fmt is None:
541 return util.datestr(date)
542 return util.datestr(date)
542 else:
543 else:
543 return util.datestr(date, fmt)
544 return util.datestr(date, fmt)
544 except (TypeError, ValueError):
545 except (TypeError, ValueError):
545 # i18n: "date" is a keyword
546 # i18n: "date" is a keyword
546 raise error.ParseError(_("date expects a date information"))
547 raise error.ParseError(_("date expects a date information"))
547
548
548 @templatefunc('dict([[key=]value...])', argspec='*args **kwargs')
549 @templatefunc('dict([[key=]value...])', argspec='*args **kwargs')
549 def dict_(context, mapping, args):
550 def dict_(context, mapping, args):
550 """Construct a dict from key-value pairs. A key may be omitted if
551 """Construct a dict from key-value pairs. A key may be omitted if
551 a value expression can provide an unambiguous name."""
552 a value expression can provide an unambiguous name."""
552 data = util.sortdict()
553 data = util.sortdict()
553
554
554 for v in args['args']:
555 for v in args['args']:
555 k = findsymbolicname(v)
556 k = findsymbolicname(v)
556 if not k:
557 if not k:
557 raise error.ParseError(_('dict key cannot be inferred'))
558 raise error.ParseError(_('dict key cannot be inferred'))
558 if k in data or k in args['kwargs']:
559 if k in data or k in args['kwargs']:
559 raise error.ParseError(_("duplicated dict key '%s' inferred") % k)
560 raise error.ParseError(_("duplicated dict key '%s' inferred") % k)
560 data[k] = evalfuncarg(context, mapping, v)
561 data[k] = evalfuncarg(context, mapping, v)
561
562
562 data.update((k, evalfuncarg(context, mapping, v))
563 data.update((k, evalfuncarg(context, mapping, v))
563 for k, v in args['kwargs'].iteritems())
564 for k, v in args['kwargs'].iteritems())
564 return templatekw.hybriddict(data)
565 return templatekw.hybriddict(data)
565
566
566 @templatefunc('diff([includepattern [, excludepattern]])')
567 @templatefunc('diff([includepattern [, excludepattern]])')
567 def diff(context, mapping, args):
568 def diff(context, mapping, args):
568 """Show a diff, optionally
569 """Show a diff, optionally
569 specifying files to include or exclude."""
570 specifying files to include or exclude."""
570 if len(args) > 2:
571 if len(args) > 2:
571 # i18n: "diff" is a keyword
572 # i18n: "diff" is a keyword
572 raise error.ParseError(_("diff expects zero, one, or two arguments"))
573 raise error.ParseError(_("diff expects zero, one, or two arguments"))
573
574
574 def getpatterns(i):
575 def getpatterns(i):
575 if i < len(args):
576 if i < len(args):
576 s = evalstring(context, mapping, args[i]).strip()
577 s = evalstring(context, mapping, args[i]).strip()
577 if s:
578 if s:
578 return [s]
579 return [s]
579 return []
580 return []
580
581
581 ctx = mapping['ctx']
582 ctx = mapping['ctx']
582 chunks = ctx.diff(match=ctx.match([], getpatterns(0), getpatterns(1)))
583 chunks = ctx.diff(match=ctx.match([], getpatterns(0), getpatterns(1)))
583
584
584 return ''.join(chunks)
585 return ''.join(chunks)
585
586
586 @templatefunc('files(pattern)')
587 @templatefunc('files(pattern)')
587 def files(context, mapping, args):
588 def files(context, mapping, args):
588 """All files of the current changeset matching the pattern. See
589 """All files of the current changeset matching the pattern. See
589 :hg:`help patterns`."""
590 :hg:`help patterns`."""
590 if not len(args) == 1:
591 if not len(args) == 1:
591 # i18n: "files" is a keyword
592 # i18n: "files" is a keyword
592 raise error.ParseError(_("files expects one argument"))
593 raise error.ParseError(_("files expects one argument"))
593
594
594 raw = evalstring(context, mapping, args[0])
595 raw = evalstring(context, mapping, args[0])
595 ctx = mapping['ctx']
596 ctx = mapping['ctx']
596 m = ctx.match([raw])
597 m = ctx.match([raw])
597 files = list(ctx.matches(m))
598 files = list(ctx.matches(m))
598 return templatekw.showlist("file", files, mapping)
599 return templatekw.showlist("file", files, mapping)
599
600
600 @templatefunc('fill(text[, width[, initialident[, hangindent]]])')
601 @templatefunc('fill(text[, width[, initialident[, hangindent]]])')
601 def fill(context, mapping, args):
602 def fill(context, mapping, args):
602 """Fill many
603 """Fill many
603 paragraphs with optional indentation. See the "fill" filter."""
604 paragraphs with optional indentation. See the "fill" filter."""
604 if not (1 <= len(args) <= 4):
605 if not (1 <= len(args) <= 4):
605 # i18n: "fill" is a keyword
606 # i18n: "fill" is a keyword
606 raise error.ParseError(_("fill expects one to four arguments"))
607 raise error.ParseError(_("fill expects one to four arguments"))
607
608
608 text = evalstring(context, mapping, args[0])
609 text = evalstring(context, mapping, args[0])
609 width = 76
610 width = 76
610 initindent = ''
611 initindent = ''
611 hangindent = ''
612 hangindent = ''
612 if 2 <= len(args) <= 4:
613 if 2 <= len(args) <= 4:
613 width = evalinteger(context, mapping, args[1],
614 width = evalinteger(context, mapping, args[1],
614 # i18n: "fill" is a keyword
615 # i18n: "fill" is a keyword
615 _("fill expects an integer width"))
616 _("fill expects an integer width"))
616 try:
617 try:
617 initindent = evalstring(context, mapping, args[2])
618 initindent = evalstring(context, mapping, args[2])
618 hangindent = evalstring(context, mapping, args[3])
619 hangindent = evalstring(context, mapping, args[3])
619 except IndexError:
620 except IndexError:
620 pass
621 pass
621
622
622 return templatefilters.fill(text, width, initindent, hangindent)
623 return templatefilters.fill(text, width, initindent, hangindent)
623
624
624 @templatefunc('formatnode(node)')
625 @templatefunc('formatnode(node)')
625 def formatnode(context, mapping, args):
626 def formatnode(context, mapping, args):
626 """Obtain the preferred form of a changeset hash. (DEPRECATED)"""
627 """Obtain the preferred form of a changeset hash. (DEPRECATED)"""
627 if len(args) != 1:
628 if len(args) != 1:
628 # i18n: "formatnode" is a keyword
629 # i18n: "formatnode" is a keyword
629 raise error.ParseError(_("formatnode expects one argument"))
630 raise error.ParseError(_("formatnode expects one argument"))
630
631
631 ui = mapping['ui']
632 ui = mapping['ui']
632 node = evalstring(context, mapping, args[0])
633 node = evalstring(context, mapping, args[0])
633 if ui.debugflag:
634 if ui.debugflag:
634 return node
635 return node
635 return templatefilters.short(node)
636 return templatefilters.short(node)
636
637
637 @templatefunc('pad(text, width[, fillchar=\' \'[, left=False]])',
638 @templatefunc('pad(text, width[, fillchar=\' \'[, left=False]])',
638 argspec='text width fillchar left')
639 argspec='text width fillchar left')
639 def pad(context, mapping, args):
640 def pad(context, mapping, args):
640 """Pad text with a
641 """Pad text with a
641 fill character."""
642 fill character."""
642 if 'text' not in args or 'width' not in args:
643 if 'text' not in args or 'width' not in args:
643 # i18n: "pad" is a keyword
644 # i18n: "pad" is a keyword
644 raise error.ParseError(_("pad() expects two to four arguments"))
645 raise error.ParseError(_("pad() expects two to four arguments"))
645
646
646 width = evalinteger(context, mapping, args['width'],
647 width = evalinteger(context, mapping, args['width'],
647 # i18n: "pad" is a keyword
648 # i18n: "pad" is a keyword
648 _("pad() expects an integer width"))
649 _("pad() expects an integer width"))
649
650
650 text = evalstring(context, mapping, args['text'])
651 text = evalstring(context, mapping, args['text'])
651
652
652 left = False
653 left = False
653 fillchar = ' '
654 fillchar = ' '
654 if 'fillchar' in args:
655 if 'fillchar' in args:
655 fillchar = evalstring(context, mapping, args['fillchar'])
656 fillchar = evalstring(context, mapping, args['fillchar'])
656 if len(color.stripeffects(fillchar)) != 1:
657 if len(color.stripeffects(fillchar)) != 1:
657 # i18n: "pad" is a keyword
658 # i18n: "pad" is a keyword
658 raise error.ParseError(_("pad() expects a single fill character"))
659 raise error.ParseError(_("pad() expects a single fill character"))
659 if 'left' in args:
660 if 'left' in args:
660 left = evalboolean(context, mapping, args['left'])
661 left = evalboolean(context, mapping, args['left'])
661
662
662 fillwidth = width - encoding.colwidth(color.stripeffects(text))
663 fillwidth = width - encoding.colwidth(color.stripeffects(text))
663 if fillwidth <= 0:
664 if fillwidth <= 0:
664 return text
665 return text
665 if left:
666 if left:
666 return fillchar * fillwidth + text
667 return fillchar * fillwidth + text
667 else:
668 else:
668 return text + fillchar * fillwidth
669 return text + fillchar * fillwidth
669
670
670 @templatefunc('indent(text, indentchars[, firstline])')
671 @templatefunc('indent(text, indentchars[, firstline])')
671 def indent(context, mapping, args):
672 def indent(context, mapping, args):
672 """Indents all non-empty lines
673 """Indents all non-empty lines
673 with the characters given in the indentchars string. An optional
674 with the characters given in the indentchars string. An optional
674 third parameter will override the indent for the first line only
675 third parameter will override the indent for the first line only
675 if present."""
676 if present."""
676 if not (2 <= len(args) <= 3):
677 if not (2 <= len(args) <= 3):
677 # i18n: "indent" is a keyword
678 # i18n: "indent" is a keyword
678 raise error.ParseError(_("indent() expects two or three arguments"))
679 raise error.ParseError(_("indent() expects two or three arguments"))
679
680
680 text = evalstring(context, mapping, args[0])
681 text = evalstring(context, mapping, args[0])
681 indent = evalstring(context, mapping, args[1])
682 indent = evalstring(context, mapping, args[1])
682
683
683 if len(args) == 3:
684 if len(args) == 3:
684 firstline = evalstring(context, mapping, args[2])
685 firstline = evalstring(context, mapping, args[2])
685 else:
686 else:
686 firstline = indent
687 firstline = indent
687
688
688 # the indent function doesn't indent the first line, so we do it here
689 # the indent function doesn't indent the first line, so we do it here
689 return templatefilters.indent(firstline + text, indent)
690 return templatefilters.indent(firstline + text, indent)
690
691
691 @templatefunc('get(dict, key)')
692 @templatefunc('get(dict, key)')
692 def get(context, mapping, args):
693 def get(context, mapping, args):
693 """Get an attribute/key from an object. Some keywords
694 """Get an attribute/key from an object. Some keywords
694 are complex types. This function allows you to obtain the value of an
695 are complex types. This function allows you to obtain the value of an
695 attribute on these types."""
696 attribute on these types."""
696 if len(args) != 2:
697 if len(args) != 2:
697 # i18n: "get" is a keyword
698 # i18n: "get" is a keyword
698 raise error.ParseError(_("get() expects two arguments"))
699 raise error.ParseError(_("get() expects two arguments"))
699
700
700 dictarg = evalfuncarg(context, mapping, args[0])
701 dictarg = evalfuncarg(context, mapping, args[0])
701 if not util.safehasattr(dictarg, 'get'):
702 if not util.safehasattr(dictarg, 'get'):
702 # i18n: "get" is a keyword
703 # i18n: "get" is a keyword
703 raise error.ParseError(_("get() expects a dict as first argument"))
704 raise error.ParseError(_("get() expects a dict as first argument"))
704
705
705 key = evalfuncarg(context, mapping, args[1])
706 key = evalfuncarg(context, mapping, args[1])
706 return dictarg.get(key)
707 return dictarg.get(key)
707
708
708 @templatefunc('if(expr, then[, else])')
709 @templatefunc('if(expr, then[, else])')
709 def if_(context, mapping, args):
710 def if_(context, mapping, args):
710 """Conditionally execute based on the result of
711 """Conditionally execute based on the result of
711 an expression."""
712 an expression."""
712 if not (2 <= len(args) <= 3):
713 if not (2 <= len(args) <= 3):
713 # i18n: "if" is a keyword
714 # i18n: "if" is a keyword
714 raise error.ParseError(_("if expects two or three arguments"))
715 raise error.ParseError(_("if expects two or three arguments"))
715
716
716 test = evalboolean(context, mapping, args[0])
717 test = evalboolean(context, mapping, args[0])
717 if test:
718 if test:
718 yield args[1][0](context, mapping, args[1][1])
719 yield args[1][0](context, mapping, args[1][1])
719 elif len(args) == 3:
720 elif len(args) == 3:
720 yield args[2][0](context, mapping, args[2][1])
721 yield args[2][0](context, mapping, args[2][1])
721
722
722 @templatefunc('ifcontains(needle, haystack, then[, else])')
723 @templatefunc('ifcontains(needle, haystack, then[, else])')
723 def ifcontains(context, mapping, args):
724 def ifcontains(context, mapping, args):
724 """Conditionally execute based
725 """Conditionally execute based
725 on whether the item "needle" is in "haystack"."""
726 on whether the item "needle" is in "haystack"."""
726 if not (3 <= len(args) <= 4):
727 if not (3 <= len(args) <= 4):
727 # i18n: "ifcontains" is a keyword
728 # i18n: "ifcontains" is a keyword
728 raise error.ParseError(_("ifcontains expects three or four arguments"))
729 raise error.ParseError(_("ifcontains expects three or four arguments"))
729
730
730 needle = evalstring(context, mapping, args[0])
731 needle = evalstring(context, mapping, args[0])
731 haystack = evalfuncarg(context, mapping, args[1])
732 haystack = evalfuncarg(context, mapping, args[1])
732
733
733 if needle in haystack:
734 if needle in haystack:
734 yield args[2][0](context, mapping, args[2][1])
735 yield args[2][0](context, mapping, args[2][1])
735 elif len(args) == 4:
736 elif len(args) == 4:
736 yield args[3][0](context, mapping, args[3][1])
737 yield args[3][0](context, mapping, args[3][1])
737
738
738 @templatefunc('ifeq(expr1, expr2, then[, else])')
739 @templatefunc('ifeq(expr1, expr2, then[, else])')
739 def ifeq(context, mapping, args):
740 def ifeq(context, mapping, args):
740 """Conditionally execute based on
741 """Conditionally execute based on
741 whether 2 items are equivalent."""
742 whether 2 items are equivalent."""
742 if not (3 <= len(args) <= 4):
743 if not (3 <= len(args) <= 4):
743 # i18n: "ifeq" is a keyword
744 # i18n: "ifeq" is a keyword
744 raise error.ParseError(_("ifeq expects three or four arguments"))
745 raise error.ParseError(_("ifeq expects three or four arguments"))
745
746
746 test = evalstring(context, mapping, args[0])
747 test = evalstring(context, mapping, args[0])
747 match = evalstring(context, mapping, args[1])
748 match = evalstring(context, mapping, args[1])
748 if test == match:
749 if test == match:
749 yield args[2][0](context, mapping, args[2][1])
750 yield args[2][0](context, mapping, args[2][1])
750 elif len(args) == 4:
751 elif len(args) == 4:
751 yield args[3][0](context, mapping, args[3][1])
752 yield args[3][0](context, mapping, args[3][1])
752
753
753 @templatefunc('join(list, sep)')
754 @templatefunc('join(list, sep)')
754 def join(context, mapping, args):
755 def join(context, mapping, args):
755 """Join items in a list with a delimiter."""
756 """Join items in a list with a delimiter."""
756 if not (1 <= len(args) <= 2):
757 if not (1 <= len(args) <= 2):
757 # i18n: "join" is a keyword
758 # i18n: "join" is a keyword
758 raise error.ParseError(_("join expects one or two arguments"))
759 raise error.ParseError(_("join expects one or two arguments"))
759
760
760 joinset = args[0][0](context, mapping, args[0][1])
761 joinset = args[0][0](context, mapping, args[0][1])
761 if util.safehasattr(joinset, 'itermaps'):
762 if util.safehasattr(joinset, 'itermaps'):
762 jf = joinset.joinfmt
763 jf = joinset.joinfmt
763 joinset = [jf(x) for x in joinset.itermaps()]
764 joinset = [jf(x) for x in joinset.itermaps()]
764
765
765 joiner = " "
766 joiner = " "
766 if len(args) > 1:
767 if len(args) > 1:
767 joiner = evalstring(context, mapping, args[1])
768 joiner = evalstring(context, mapping, args[1])
768
769
769 first = True
770 first = True
770 for x in joinset:
771 for x in joinset:
771 if first:
772 if first:
772 first = False
773 first = False
773 else:
774 else:
774 yield joiner
775 yield joiner
775 yield x
776 yield x
776
777
777 @templatefunc('label(label, expr)')
778 @templatefunc('label(label, expr)')
778 def label(context, mapping, args):
779 def label(context, mapping, args):
779 """Apply a label to generated content. Content with
780 """Apply a label to generated content. Content with
780 a label applied can result in additional post-processing, such as
781 a label applied can result in additional post-processing, such as
781 automatic colorization."""
782 automatic colorization."""
782 if len(args) != 2:
783 if len(args) != 2:
783 # i18n: "label" is a keyword
784 # i18n: "label" is a keyword
784 raise error.ParseError(_("label expects two arguments"))
785 raise error.ParseError(_("label expects two arguments"))
785
786
786 ui = mapping['ui']
787 ui = mapping['ui']
787 thing = evalstring(context, mapping, args[1])
788 thing = evalstring(context, mapping, args[1])
788 # preserve unknown symbol as literal so effects like 'red', 'bold',
789 # preserve unknown symbol as literal so effects like 'red', 'bold',
789 # etc. don't need to be quoted
790 # etc. don't need to be quoted
790 label = evalstringliteral(context, mapping, args[0])
791 label = evalstringliteral(context, mapping, args[0])
791
792
792 return ui.label(thing, label)
793 return ui.label(thing, label)
793
794
794 @templatefunc('latesttag([pattern])')
795 @templatefunc('latesttag([pattern])')
795 def latesttag(context, mapping, args):
796 def latesttag(context, mapping, args):
796 """The global tags matching the given pattern on the
797 """The global tags matching the given pattern on the
797 most recent globally tagged ancestor of this changeset.
798 most recent globally tagged ancestor of this changeset.
798 If no such tags exist, the "{tag}" template resolves to
799 If no such tags exist, the "{tag}" template resolves to
799 the string "null"."""
800 the string "null"."""
800 if len(args) > 1:
801 if len(args) > 1:
801 # i18n: "latesttag" is a keyword
802 # i18n: "latesttag" is a keyword
802 raise error.ParseError(_("latesttag expects at most one argument"))
803 raise error.ParseError(_("latesttag expects at most one argument"))
803
804
804 pattern = None
805 pattern = None
805 if len(args) == 1:
806 if len(args) == 1:
806 pattern = evalstring(context, mapping, args[0])
807 pattern = evalstring(context, mapping, args[0])
807
808
808 return templatekw.showlatesttags(pattern, **mapping)
809 return templatekw.showlatesttags(pattern, **mapping)
809
810
810 @templatefunc('localdate(date[, tz])')
811 @templatefunc('localdate(date[, tz])')
811 def localdate(context, mapping, args):
812 def localdate(context, mapping, args):
812 """Converts a date to the specified timezone.
813 """Converts a date to the specified timezone.
813 The default is local date."""
814 The default is local date."""
814 if not (1 <= len(args) <= 2):
815 if not (1 <= len(args) <= 2):
815 # i18n: "localdate" is a keyword
816 # i18n: "localdate" is a keyword
816 raise error.ParseError(_("localdate expects one or two arguments"))
817 raise error.ParseError(_("localdate expects one or two arguments"))
817
818
818 date = evalfuncarg(context, mapping, args[0])
819 date = evalfuncarg(context, mapping, args[0])
819 try:
820 try:
820 date = util.parsedate(date)
821 date = util.parsedate(date)
821 except AttributeError: # not str nor date tuple
822 except AttributeError: # not str nor date tuple
822 # i18n: "localdate" is a keyword
823 # i18n: "localdate" is a keyword
823 raise error.ParseError(_("localdate expects a date information"))
824 raise error.ParseError(_("localdate expects a date information"))
824 if len(args) >= 2:
825 if len(args) >= 2:
825 tzoffset = None
826 tzoffset = None
826 tz = evalfuncarg(context, mapping, args[1])
827 tz = evalfuncarg(context, mapping, args[1])
827 if isinstance(tz, str):
828 if isinstance(tz, str):
828 tzoffset, remainder = util.parsetimezone(tz)
829 tzoffset, remainder = util.parsetimezone(tz)
829 if remainder:
830 if remainder:
830 tzoffset = None
831 tzoffset = None
831 if tzoffset is None:
832 if tzoffset is None:
832 try:
833 try:
833 tzoffset = int(tz)
834 tzoffset = int(tz)
834 except (TypeError, ValueError):
835 except (TypeError, ValueError):
835 # i18n: "localdate" is a keyword
836 # i18n: "localdate" is a keyword
836 raise error.ParseError(_("localdate expects a timezone"))
837 raise error.ParseError(_("localdate expects a timezone"))
837 else:
838 else:
838 tzoffset = util.makedate()[1]
839 tzoffset = util.makedate()[1]
839 return (date[0], tzoffset)
840 return (date[0], tzoffset)
840
841
841 @templatefunc('mod(a, b)')
842 @templatefunc('mod(a, b)')
842 def mod(context, mapping, args):
843 def mod(context, mapping, args):
843 """Calculate a mod b such that a / b + a mod b == a"""
844 """Calculate a mod b such that a / b + a mod b == a"""
844 if not len(args) == 2:
845 if not len(args) == 2:
845 # i18n: "mod" is a keyword
846 # i18n: "mod" is a keyword
846 raise error.ParseError(_("mod expects two arguments"))
847 raise error.ParseError(_("mod expects two arguments"))
847
848
848 func = lambda a, b: a % b
849 func = lambda a, b: a % b
849 return runarithmetic(context, mapping, (func, args[0], args[1]))
850 return runarithmetic(context, mapping, (func, args[0], args[1]))
850
851
851 @templatefunc('relpath(path)')
852 @templatefunc('relpath(path)')
852 def relpath(context, mapping, args):
853 def relpath(context, mapping, args):
853 """Convert a repository-absolute path into a filesystem path relative to
854 """Convert a repository-absolute path into a filesystem path relative to
854 the current working directory."""
855 the current working directory."""
855 if len(args) != 1:
856 if len(args) != 1:
856 # i18n: "relpath" is a keyword
857 # i18n: "relpath" is a keyword
857 raise error.ParseError(_("relpath expects one argument"))
858 raise error.ParseError(_("relpath expects one argument"))
858
859
859 repo = mapping['ctx'].repo()
860 repo = mapping['ctx'].repo()
860 path = evalstring(context, mapping, args[0])
861 path = evalstring(context, mapping, args[0])
861 return repo.pathto(path)
862 return repo.pathto(path)
862
863
863 @templatefunc('revset(query[, formatargs...])')
864 @templatefunc('revset(query[, formatargs...])')
864 def revset(context, mapping, args):
865 def revset(context, mapping, args):
865 """Execute a revision set query. See
866 """Execute a revision set query. See
866 :hg:`help revset`."""
867 :hg:`help revset`."""
867 if not len(args) > 0:
868 if not len(args) > 0:
868 # i18n: "revset" is a keyword
869 # i18n: "revset" is a keyword
869 raise error.ParseError(_("revset expects one or more arguments"))
870 raise error.ParseError(_("revset expects one or more arguments"))
870
871
871 raw = evalstring(context, mapping, args[0])
872 raw = evalstring(context, mapping, args[0])
872 ctx = mapping['ctx']
873 ctx = mapping['ctx']
873 repo = ctx.repo()
874 repo = ctx.repo()
874
875
875 def query(expr):
876 def query(expr):
876 m = revsetmod.match(repo.ui, expr)
877 m = revsetmod.match(repo.ui, expr)
877 return m(repo)
878 return m(repo)
878
879
879 if len(args) > 1:
880 if len(args) > 1:
880 formatargs = [evalfuncarg(context, mapping, a) for a in args[1:]]
881 formatargs = [evalfuncarg(context, mapping, a) for a in args[1:]]
881 revs = query(revsetlang.formatspec(raw, *formatargs))
882 revs = query(revsetlang.formatspec(raw, *formatargs))
882 revs = list(revs)
883 revs = list(revs)
883 else:
884 else:
884 revsetcache = mapping['cache'].setdefault("revsetcache", {})
885 revsetcache = mapping['cache'].setdefault("revsetcache", {})
885 if raw in revsetcache:
886 if raw in revsetcache:
886 revs = revsetcache[raw]
887 revs = revsetcache[raw]
887 else:
888 else:
888 revs = query(raw)
889 revs = query(raw)
889 revs = list(revs)
890 revs = list(revs)
890 revsetcache[raw] = revs
891 revsetcache[raw] = revs
891
892
892 return templatekw.showrevslist("revision", revs, **mapping)
893 return templatekw.showrevslist("revision", revs, **mapping)
893
894
894 @templatefunc('rstdoc(text, style)')
895 @templatefunc('rstdoc(text, style)')
895 def rstdoc(context, mapping, args):
896 def rstdoc(context, mapping, args):
896 """Format reStructuredText."""
897 """Format reStructuredText."""
897 if len(args) != 2:
898 if len(args) != 2:
898 # i18n: "rstdoc" is a keyword
899 # i18n: "rstdoc" is a keyword
899 raise error.ParseError(_("rstdoc expects two arguments"))
900 raise error.ParseError(_("rstdoc expects two arguments"))
900
901
901 text = evalstring(context, mapping, args[0])
902 text = evalstring(context, mapping, args[0])
902 style = evalstring(context, mapping, args[1])
903 style = evalstring(context, mapping, args[1])
903
904
904 return minirst.format(text, style=style, keep=['verbose'])
905 return minirst.format(text, style=style, keep=['verbose'])
905
906
906 @templatefunc('separate(sep, args)', argspec='sep *args')
907 @templatefunc('separate(sep, args)', argspec='sep *args')
907 def separate(context, mapping, args):
908 def separate(context, mapping, args):
908 """Add a separator between non-empty arguments."""
909 """Add a separator between non-empty arguments."""
909 if 'sep' not in args:
910 if 'sep' not in args:
910 # i18n: "separate" is a keyword
911 # i18n: "separate" is a keyword
911 raise error.ParseError(_("separate expects at least one argument"))
912 raise error.ParseError(_("separate expects at least one argument"))
912
913
913 sep = evalstring(context, mapping, args['sep'])
914 sep = evalstring(context, mapping, args['sep'])
914 first = True
915 first = True
915 for arg in args['args']:
916 for arg in args['args']:
916 argstr = evalstring(context, mapping, arg)
917 argstr = evalstring(context, mapping, arg)
917 if not argstr:
918 if not argstr:
918 continue
919 continue
919 if first:
920 if first:
920 first = False
921 first = False
921 else:
922 else:
922 yield sep
923 yield sep
923 yield argstr
924 yield argstr
924
925
925 @templatefunc('shortest(node, minlength=4)')
926 @templatefunc('shortest(node, minlength=4)')
926 def shortest(context, mapping, args):
927 def shortest(context, mapping, args):
927 """Obtain the shortest representation of
928 """Obtain the shortest representation of
928 a node."""
929 a node."""
929 if not (1 <= len(args) <= 2):
930 if not (1 <= len(args) <= 2):
930 # i18n: "shortest" is a keyword
931 # i18n: "shortest" is a keyword
931 raise error.ParseError(_("shortest() expects one or two arguments"))
932 raise error.ParseError(_("shortest() expects one or two arguments"))
932
933
933 node = evalstring(context, mapping, args[0])
934 node = evalstring(context, mapping, args[0])
934
935
935 minlength = 4
936 minlength = 4
936 if len(args) > 1:
937 if len(args) > 1:
937 minlength = evalinteger(context, mapping, args[1],
938 minlength = evalinteger(context, mapping, args[1],
938 # i18n: "shortest" is a keyword
939 # i18n: "shortest" is a keyword
939 _("shortest() expects an integer minlength"))
940 _("shortest() expects an integer minlength"))
940
941
941 # _partialmatch() of filtered changelog could take O(len(repo)) time,
942 # _partialmatch() of filtered changelog could take O(len(repo)) time,
942 # which would be unacceptably slow. so we look for hash collision in
943 # which would be unacceptably slow. so we look for hash collision in
943 # unfiltered space, which means some hashes may be slightly longer.
944 # unfiltered space, which means some hashes may be slightly longer.
944 cl = mapping['ctx']._repo.unfiltered().changelog
945 cl = mapping['ctx']._repo.unfiltered().changelog
945 def isvalid(test):
946 def isvalid(test):
946 try:
947 try:
947 if cl._partialmatch(test) is None:
948 if cl._partialmatch(test) is None:
948 return False
949 return False
949
950
950 try:
951 try:
951 i = int(test)
952 i = int(test)
952 # if we are a pure int, then starting with zero will not be
953 # if we are a pure int, then starting with zero will not be
953 # confused as a rev; or, obviously, if the int is larger than
954 # confused as a rev; or, obviously, if the int is larger than
954 # the value of the tip rev
955 # the value of the tip rev
955 if test[0] == '0' or i > len(cl):
956 if test[0] == '0' or i > len(cl):
956 return True
957 return True
957 return False
958 return False
958 except ValueError:
959 except ValueError:
959 return True
960 return True
960 except error.RevlogError:
961 except error.RevlogError:
961 return False
962 return False
962
963
963 shortest = node
964 shortest = node
964 startlength = max(6, minlength)
965 startlength = max(6, minlength)
965 length = startlength
966 length = startlength
966 while True:
967 while True:
967 test = node[:length]
968 test = node[:length]
968 if isvalid(test):
969 if isvalid(test):
969 shortest = test
970 shortest = test
970 if length == minlength or length > startlength:
971 if length == minlength or length > startlength:
971 return shortest
972 return shortest
972 length -= 1
973 length -= 1
973 else:
974 else:
974 length += 1
975 length += 1
975 if len(shortest) <= length:
976 if len(shortest) <= length:
976 return shortest
977 return shortest
977
978
978 @templatefunc('strip(text[, chars])')
979 @templatefunc('strip(text[, chars])')
979 def strip(context, mapping, args):
980 def strip(context, mapping, args):
980 """Strip characters from a string. By default,
981 """Strip characters from a string. By default,
981 strips all leading and trailing whitespace."""
982 strips all leading and trailing whitespace."""
982 if not (1 <= len(args) <= 2):
983 if not (1 <= len(args) <= 2):
983 # i18n: "strip" is a keyword
984 # i18n: "strip" is a keyword
984 raise error.ParseError(_("strip expects one or two arguments"))
985 raise error.ParseError(_("strip expects one or two arguments"))
985
986
986 text = evalstring(context, mapping, args[0])
987 text = evalstring(context, mapping, args[0])
987 if len(args) == 2:
988 if len(args) == 2:
988 chars = evalstring(context, mapping, args[1])
989 chars = evalstring(context, mapping, args[1])
989 return text.strip(chars)
990 return text.strip(chars)
990 return text.strip()
991 return text.strip()
991
992
992 @templatefunc('sub(pattern, replacement, expression)')
993 @templatefunc('sub(pattern, replacement, expression)')
993 def sub(context, mapping, args):
994 def sub(context, mapping, args):
994 """Perform text substitution
995 """Perform text substitution
995 using regular expressions."""
996 using regular expressions."""
996 if len(args) != 3:
997 if len(args) != 3:
997 # i18n: "sub" is a keyword
998 # i18n: "sub" is a keyword
998 raise error.ParseError(_("sub expects three arguments"))
999 raise error.ParseError(_("sub expects three arguments"))
999
1000
1000 pat = evalstring(context, mapping, args[0])
1001 pat = evalstring(context, mapping, args[0])
1001 rpl = evalstring(context, mapping, args[1])
1002 rpl = evalstring(context, mapping, args[1])
1002 src = evalstring(context, mapping, args[2])
1003 src = evalstring(context, mapping, args[2])
1003 try:
1004 try:
1004 patre = re.compile(pat)
1005 patre = re.compile(pat)
1005 except re.error:
1006 except re.error:
1006 # i18n: "sub" is a keyword
1007 # i18n: "sub" is a keyword
1007 raise error.ParseError(_("sub got an invalid pattern: %s") % pat)
1008 raise error.ParseError(_("sub got an invalid pattern: %s") % pat)
1008 try:
1009 try:
1009 yield patre.sub(rpl, src)
1010 yield patre.sub(rpl, src)
1010 except re.error:
1011 except re.error:
1011 # i18n: "sub" is a keyword
1012 # i18n: "sub" is a keyword
1012 raise error.ParseError(_("sub got an invalid replacement: %s") % rpl)
1013 raise error.ParseError(_("sub got an invalid replacement: %s") % rpl)
1013
1014
1014 @templatefunc('startswith(pattern, text)')
1015 @templatefunc('startswith(pattern, text)')
1015 def startswith(context, mapping, args):
1016 def startswith(context, mapping, args):
1016 """Returns the value from the "text" argument
1017 """Returns the value from the "text" argument
1017 if it begins with the content from the "pattern" argument."""
1018 if it begins with the content from the "pattern" argument."""
1018 if len(args) != 2:
1019 if len(args) != 2:
1019 # i18n: "startswith" is a keyword
1020 # i18n: "startswith" is a keyword
1020 raise error.ParseError(_("startswith expects two arguments"))
1021 raise error.ParseError(_("startswith expects two arguments"))
1021
1022
1022 patn = evalstring(context, mapping, args[0])
1023 patn = evalstring(context, mapping, args[0])
1023 text = evalstring(context, mapping, args[1])
1024 text = evalstring(context, mapping, args[1])
1024 if text.startswith(patn):
1025 if text.startswith(patn):
1025 return text
1026 return text
1026 return ''
1027 return ''
1027
1028
1028 @templatefunc('word(number, text[, separator])')
1029 @templatefunc('word(number, text[, separator])')
1029 def word(context, mapping, args):
1030 def word(context, mapping, args):
1030 """Return the nth word from a string."""
1031 """Return the nth word from a string."""
1031 if not (2 <= len(args) <= 3):
1032 if not (2 <= len(args) <= 3):
1032 # i18n: "word" is a keyword
1033 # i18n: "word" is a keyword
1033 raise error.ParseError(_("word expects two or three arguments, got %d")
1034 raise error.ParseError(_("word expects two or three arguments, got %d")
1034 % len(args))
1035 % len(args))
1035
1036
1036 num = evalinteger(context, mapping, args[0],
1037 num = evalinteger(context, mapping, args[0],
1037 # i18n: "word" is a keyword
1038 # i18n: "word" is a keyword
1038 _("word expects an integer index"))
1039 _("word expects an integer index"))
1039 text = evalstring(context, mapping, args[1])
1040 text = evalstring(context, mapping, args[1])
1040 if len(args) == 3:
1041 if len(args) == 3:
1041 splitter = evalstring(context, mapping, args[2])
1042 splitter = evalstring(context, mapping, args[2])
1042 else:
1043 else:
1043 splitter = None
1044 splitter = None
1044
1045
1045 tokens = text.split(splitter)
1046 tokens = text.split(splitter)
1046 if num >= len(tokens) or num < -len(tokens):
1047 if num >= len(tokens) or num < -len(tokens):
1047 return ''
1048 return ''
1048 else:
1049 else:
1049 return tokens[num]
1050 return tokens[num]
1050
1051
1051 # methods to interpret function arguments or inner expressions (e.g. {_(x)})
1052 # methods to interpret function arguments or inner expressions (e.g. {_(x)})
1052 exprmethods = {
1053 exprmethods = {
1053 "integer": lambda e, c: (runinteger, e[1]),
1054 "integer": lambda e, c: (runinteger, e[1]),
1054 "string": lambda e, c: (runstring, e[1]),
1055 "string": lambda e, c: (runstring, e[1]),
1055 "symbol": lambda e, c: (runsymbol, e[1]),
1056 "symbol": lambda e, c: (runsymbol, e[1]),
1056 "template": buildtemplate,
1057 "template": buildtemplate,
1057 "group": lambda e, c: compileexp(e[1], c, exprmethods),
1058 "group": lambda e, c: compileexp(e[1], c, exprmethods),
1058 # ".": buildmember,
1059 # ".": buildmember,
1059 "|": buildfilter,
1060 "|": buildfilter,
1060 "%": buildmap,
1061 "%": buildmap,
1061 "func": buildfunc,
1062 "func": buildfunc,
1062 "keyvalue": buildkeyvaluepair,
1063 "keyvalue": buildkeyvaluepair,
1063 "+": lambda e, c: buildarithmetic(e, c, lambda a, b: a + b),
1064 "+": lambda e, c: buildarithmetic(e, c, lambda a, b: a + b),
1064 "-": lambda e, c: buildarithmetic(e, c, lambda a, b: a - b),
1065 "-": lambda e, c: buildarithmetic(e, c, lambda a, b: a - b),
1065 "negate": buildnegate,
1066 "negate": buildnegate,
1066 "*": lambda e, c: buildarithmetic(e, c, lambda a, b: a * b),
1067 "*": lambda e, c: buildarithmetic(e, c, lambda a, b: a * b),
1067 "/": lambda e, c: buildarithmetic(e, c, lambda a, b: a // b),
1068 "/": lambda e, c: buildarithmetic(e, c, lambda a, b: a // b),
1068 }
1069 }
1069
1070
1070 # methods to interpret top-level template (e.g. {x}, {x|_}, {x % "y"})
1071 # methods to interpret top-level template (e.g. {x}, {x|_}, {x % "y"})
1071 methods = exprmethods.copy()
1072 methods = exprmethods.copy()
1072 methods["integer"] = exprmethods["symbol"] # '{1}' as variable
1073 methods["integer"] = exprmethods["symbol"] # '{1}' as variable
1073
1074
1074 class _aliasrules(parser.basealiasrules):
1075 class _aliasrules(parser.basealiasrules):
1075 """Parsing and expansion rule set of template aliases"""
1076 """Parsing and expansion rule set of template aliases"""
1076 _section = _('template alias')
1077 _section = _('template alias')
1077 _parse = staticmethod(_parseexpr)
1078 _parse = staticmethod(_parseexpr)
1078
1079
1079 @staticmethod
1080 @staticmethod
1080 def _trygetfunc(tree):
1081 def _trygetfunc(tree):
1081 """Return (name, args) if tree is func(...) or ...|filter; otherwise
1082 """Return (name, args) if tree is func(...) or ...|filter; otherwise
1082 None"""
1083 None"""
1083 if tree[0] == 'func' and tree[1][0] == 'symbol':
1084 if tree[0] == 'func' and tree[1][0] == 'symbol':
1084 return tree[1][1], getlist(tree[2])
1085 return tree[1][1], getlist(tree[2])
1085 if tree[0] == '|' and tree[2][0] == 'symbol':
1086 if tree[0] == '|' and tree[2][0] == 'symbol':
1086 return tree[2][1], [tree[1]]
1087 return tree[2][1], [tree[1]]
1087
1088
1088 def expandaliases(tree, aliases):
1089 def expandaliases(tree, aliases):
1089 """Return new tree of aliases are expanded"""
1090 """Return new tree of aliases are expanded"""
1090 aliasmap = _aliasrules.buildmap(aliases)
1091 aliasmap = _aliasrules.buildmap(aliases)
1091 return _aliasrules.expand(aliasmap, tree)
1092 return _aliasrules.expand(aliasmap, tree)
1092
1093
1093 # template engine
1094 # template engine
1094
1095
1095 stringify = templatefilters.stringify
1096 stringify = templatefilters.stringify
1096
1097
1097 def _flatten(thing):
1098 def _flatten(thing):
1098 '''yield a single stream from a possibly nested set of iterators'''
1099 '''yield a single stream from a possibly nested set of iterators'''
1099 thing = templatekw.unwraphybrid(thing)
1100 thing = templatekw.unwraphybrid(thing)
1100 if isinstance(thing, str):
1101 if isinstance(thing, str):
1101 yield thing
1102 yield thing
1102 elif thing is None:
1103 elif thing is None:
1103 pass
1104 pass
1104 elif not util.safehasattr(thing, '__iter__'):
1105 elif not util.safehasattr(thing, '__iter__'):
1105 yield str(thing)
1106 yield str(thing)
1106 else:
1107 else:
1107 for i in thing:
1108 for i in thing:
1108 i = templatekw.unwraphybrid(i)
1109 i = templatekw.unwraphybrid(i)
1109 if isinstance(i, str):
1110 if isinstance(i, str):
1110 yield i
1111 yield i
1111 elif i is None:
1112 elif i is None:
1112 pass
1113 pass
1113 elif not util.safehasattr(i, '__iter__'):
1114 elif not util.safehasattr(i, '__iter__'):
1114 yield str(i)
1115 yield str(i)
1115 else:
1116 else:
1116 for j in _flatten(i):
1117 for j in _flatten(i):
1117 yield j
1118 yield j
1118
1119
1119 def unquotestring(s):
1120 def unquotestring(s):
1120 '''unwrap quotes if any; otherwise returns unmodified string'''
1121 '''unwrap quotes if any; otherwise returns unmodified string'''
1121 if len(s) < 2 or s[0] not in "'\"" or s[0] != s[-1]:
1122 if len(s) < 2 or s[0] not in "'\"" or s[0] != s[-1]:
1122 return s
1123 return s
1123 return s[1:-1]
1124 return s[1:-1]
1124
1125
1125 class engine(object):
1126 class engine(object):
1126 '''template expansion engine.
1127 '''template expansion engine.
1127
1128
1128 template expansion works like this. a map file contains key=value
1129 template expansion works like this. a map file contains key=value
1129 pairs. if value is quoted, it is treated as string. otherwise, it
1130 pairs. if value is quoted, it is treated as string. otherwise, it
1130 is treated as name of template file.
1131 is treated as name of template file.
1131
1132
1132 templater is asked to expand a key in map. it looks up key, and
1133 templater is asked to expand a key in map. it looks up key, and
1133 looks for strings like this: {foo}. it expands {foo} by looking up
1134 looks for strings like this: {foo}. it expands {foo} by looking up
1134 foo in map, and substituting it. expansion is recursive: it stops
1135 foo in map, and substituting it. expansion is recursive: it stops
1135 when there is no more {foo} to replace.
1136 when there is no more {foo} to replace.
1136
1137
1137 expansion also allows formatting and filtering.
1138 expansion also allows formatting and filtering.
1138
1139
1139 format uses key to expand each item in list. syntax is
1140 format uses key to expand each item in list. syntax is
1140 {key%format}.
1141 {key%format}.
1141
1142
1142 filter uses function to transform value. syntax is
1143 filter uses function to transform value. syntax is
1143 {key|filter1|filter2|...}.'''
1144 {key|filter1|filter2|...}.'''
1144
1145
1145 def __init__(self, loader, filters=None, defaults=None, aliases=()):
1146 def __init__(self, loader, filters=None, defaults=None, aliases=()):
1146 self._loader = loader
1147 self._loader = loader
1147 if filters is None:
1148 if filters is None:
1148 filters = {}
1149 filters = {}
1149 self._filters = filters
1150 self._filters = filters
1150 if defaults is None:
1151 if defaults is None:
1151 defaults = {}
1152 defaults = {}
1152 self._defaults = defaults
1153 self._defaults = defaults
1153 self._aliasmap = _aliasrules.buildmap(aliases)
1154 self._aliasmap = _aliasrules.buildmap(aliases)
1154 self._cache = {} # key: (func, data)
1155 self._cache = {} # key: (func, data)
1155
1156
1156 def _load(self, t):
1157 def _load(self, t):
1157 '''load, parse, and cache a template'''
1158 '''load, parse, and cache a template'''
1158 if t not in self._cache:
1159 if t not in self._cache:
1159 # put poison to cut recursion while compiling 't'
1160 # put poison to cut recursion while compiling 't'
1160 self._cache[t] = (_runrecursivesymbol, t)
1161 self._cache[t] = (_runrecursivesymbol, t)
1161 try:
1162 try:
1162 x = parse(self._loader(t))
1163 x = parse(self._loader(t))
1163 if self._aliasmap:
1164 if self._aliasmap:
1164 x = _aliasrules.expand(self._aliasmap, x)
1165 x = _aliasrules.expand(self._aliasmap, x)
1165 self._cache[t] = compileexp(x, self, methods)
1166 self._cache[t] = compileexp(x, self, methods)
1166 except: # re-raises
1167 except: # re-raises
1167 del self._cache[t]
1168 del self._cache[t]
1168 raise
1169 raise
1169 return self._cache[t]
1170 return self._cache[t]
1170
1171
1171 def process(self, t, mapping):
1172 def process(self, t, mapping):
1172 '''Perform expansion. t is name of map element to expand.
1173 '''Perform expansion. t is name of map element to expand.
1173 mapping contains added elements for use during expansion. Is a
1174 mapping contains added elements for use during expansion. Is a
1174 generator.'''
1175 generator.'''
1175 func, data = self._load(t)
1176 func, data = self._load(t)
1176 return _flatten(func(self, mapping, data))
1177 return _flatten(func(self, mapping, data))
1177
1178
1178 engines = {'default': engine}
1179 engines = {'default': engine}
1179
1180
1180 def stylelist():
1181 def stylelist():
1181 paths = templatepaths()
1182 paths = templatepaths()
1182 if not paths:
1183 if not paths:
1183 return _('no templates found, try `hg debuginstall` for more info')
1184 return _('no templates found, try `hg debuginstall` for more info')
1184 dirlist = os.listdir(paths[0])
1185 dirlist = os.listdir(paths[0])
1185 stylelist = []
1186 stylelist = []
1186 for file in dirlist:
1187 for file in dirlist:
1187 split = file.split(".")
1188 split = file.split(".")
1188 if split[-1] in ('orig', 'rej'):
1189 if split[-1] in ('orig', 'rej'):
1189 continue
1190 continue
1190 if split[0] == "map-cmdline":
1191 if split[0] == "map-cmdline":
1191 stylelist.append(split[1])
1192 stylelist.append(split[1])
1192 return ", ".join(sorted(stylelist))
1193 return ", ".join(sorted(stylelist))
1193
1194
1194 def _readmapfile(mapfile):
1195 def _readmapfile(mapfile):
1195 """Load template elements from the given map file"""
1196 """Load template elements from the given map file"""
1196 if not os.path.exists(mapfile):
1197 if not os.path.exists(mapfile):
1197 raise error.Abort(_("style '%s' not found") % mapfile,
1198 raise error.Abort(_("style '%s' not found") % mapfile,
1198 hint=_("available styles: %s") % stylelist())
1199 hint=_("available styles: %s") % stylelist())
1199
1200
1200 base = os.path.dirname(mapfile)
1201 base = os.path.dirname(mapfile)
1201 conf = config.config(includepaths=templatepaths())
1202 conf = config.config(includepaths=templatepaths())
1202 conf.read(mapfile)
1203 conf.read(mapfile)
1203
1204
1204 cache = {}
1205 cache = {}
1205 tmap = {}
1206 tmap = {}
1206 for key, val in conf[''].items():
1207 for key, val in conf[''].items():
1207 if not val:
1208 if not val:
1208 raise error.ParseError(_('missing value'), conf.source('', key))
1209 raise error.ParseError(_('missing value'), conf.source('', key))
1209 if val[0] in "'\"":
1210 if val[0] in "'\"":
1210 if val[0] != val[-1]:
1211 if val[0] != val[-1]:
1211 raise error.ParseError(_('unmatched quotes'),
1212 raise error.ParseError(_('unmatched quotes'),
1212 conf.source('', key))
1213 conf.source('', key))
1213 cache[key] = unquotestring(val)
1214 cache[key] = unquotestring(val)
1214 elif key == "__base__":
1215 elif key == "__base__":
1215 # treat as a pointer to a base class for this style
1216 # treat as a pointer to a base class for this style
1216 path = util.normpath(os.path.join(base, val))
1217 path = util.normpath(os.path.join(base, val))
1217
1218
1218 # fallback check in template paths
1219 # fallback check in template paths
1219 if not os.path.exists(path):
1220 if not os.path.exists(path):
1220 for p in templatepaths():
1221 for p in templatepaths():
1221 p2 = util.normpath(os.path.join(p, val))
1222 p2 = util.normpath(os.path.join(p, val))
1222 if os.path.isfile(p2):
1223 if os.path.isfile(p2):
1223 path = p2
1224 path = p2
1224 break
1225 break
1225 p3 = util.normpath(os.path.join(p2, "map"))
1226 p3 = util.normpath(os.path.join(p2, "map"))
1226 if os.path.isfile(p3):
1227 if os.path.isfile(p3):
1227 path = p3
1228 path = p3
1228 break
1229 break
1229
1230
1230 bcache, btmap = _readmapfile(path)
1231 bcache, btmap = _readmapfile(path)
1231 for k in bcache:
1232 for k in bcache:
1232 if k not in cache:
1233 if k not in cache:
1233 cache[k] = bcache[k]
1234 cache[k] = bcache[k]
1234 for k in btmap:
1235 for k in btmap:
1235 if k not in tmap:
1236 if k not in tmap:
1236 tmap[k] = btmap[k]
1237 tmap[k] = btmap[k]
1237 else:
1238 else:
1238 val = 'default', val
1239 val = 'default', val
1239 if ':' in val[1]:
1240 if ':' in val[1]:
1240 val = val[1].split(':', 1)
1241 val = val[1].split(':', 1)
1241 tmap[key] = val[0], os.path.join(base, val[1])
1242 tmap[key] = val[0], os.path.join(base, val[1])
1242 return cache, tmap
1243 return cache, tmap
1243
1244
1244 class TemplateNotFound(error.Abort):
1245 class TemplateNotFound(error.Abort):
1245 pass
1246 pass
1246
1247
1247 class templater(object):
1248 class templater(object):
1248
1249
1249 def __init__(self, filters=None, defaults=None, cache=None, aliases=(),
1250 def __init__(self, filters=None, defaults=None, cache=None, aliases=(),
1250 minchunk=1024, maxchunk=65536):
1251 minchunk=1024, maxchunk=65536):
1251 '''set up template engine.
1252 '''set up template engine.
1252 filters is dict of functions. each transforms a value into another.
1253 filters is dict of functions. each transforms a value into another.
1253 defaults is dict of default map definitions.
1254 defaults is dict of default map definitions.
1254 aliases is list of alias (name, replacement) pairs.
1255 aliases is list of alias (name, replacement) pairs.
1255 '''
1256 '''
1256 if filters is None:
1257 if filters is None:
1257 filters = {}
1258 filters = {}
1258 if defaults is None:
1259 if defaults is None:
1259 defaults = {}
1260 defaults = {}
1260 if cache is None:
1261 if cache is None:
1261 cache = {}
1262 cache = {}
1262 self.cache = cache.copy()
1263 self.cache = cache.copy()
1263 self.map = {}
1264 self.map = {}
1264 self.filters = templatefilters.filters.copy()
1265 self.filters = templatefilters.filters.copy()
1265 self.filters.update(filters)
1266 self.filters.update(filters)
1266 self.defaults = defaults
1267 self.defaults = defaults
1267 self._aliases = aliases
1268 self._aliases = aliases
1268 self.minchunk, self.maxchunk = minchunk, maxchunk
1269 self.minchunk, self.maxchunk = minchunk, maxchunk
1269 self.ecache = {}
1270 self.ecache = {}
1270
1271
1271 @classmethod
1272 @classmethod
1272 def frommapfile(cls, mapfile, filters=None, defaults=None, cache=None,
1273 def frommapfile(cls, mapfile, filters=None, defaults=None, cache=None,
1273 minchunk=1024, maxchunk=65536):
1274 minchunk=1024, maxchunk=65536):
1274 """Create templater from the specified map file"""
1275 """Create templater from the specified map file"""
1275 t = cls(filters, defaults, cache, [], minchunk, maxchunk)
1276 t = cls(filters, defaults, cache, [], minchunk, maxchunk)
1276 cache, tmap = _readmapfile(mapfile)
1277 cache, tmap = _readmapfile(mapfile)
1277 t.cache.update(cache)
1278 t.cache.update(cache)
1278 t.map = tmap
1279 t.map = tmap
1279 return t
1280 return t
1280
1281
1281 def __contains__(self, key):
1282 def __contains__(self, key):
1282 return key in self.cache or key in self.map
1283 return key in self.cache or key in self.map
1283
1284
1284 def load(self, t):
1285 def load(self, t):
1285 '''Get the template for the given template name. Use a local cache.'''
1286 '''Get the template for the given template name. Use a local cache.'''
1286 if t not in self.cache:
1287 if t not in self.cache:
1287 try:
1288 try:
1288 self.cache[t] = util.readfile(self.map[t][1])
1289 self.cache[t] = util.readfile(self.map[t][1])
1289 except KeyError as inst:
1290 except KeyError as inst:
1290 raise TemplateNotFound(_('"%s" not in template map') %
1291 raise TemplateNotFound(_('"%s" not in template map') %
1291 inst.args[0])
1292 inst.args[0])
1292 except IOError as inst:
1293 except IOError as inst:
1293 raise IOError(inst.args[0], _('template file %s: %s') %
1294 raise IOError(inst.args[0], _('template file %s: %s') %
1294 (self.map[t][1], inst.args[1]))
1295 (self.map[t][1], inst.args[1]))
1295 return self.cache[t]
1296 return self.cache[t]
1296
1297
1297 def __call__(self, t, **mapping):
1298 def __call__(self, t, **mapping):
1298 ttype = t in self.map and self.map[t][0] or 'default'
1299 ttype = t in self.map and self.map[t][0] or 'default'
1299 if ttype not in self.ecache:
1300 if ttype not in self.ecache:
1300 try:
1301 try:
1301 ecls = engines[ttype]
1302 ecls = engines[ttype]
1302 except KeyError:
1303 except KeyError:
1303 raise error.Abort(_('invalid template engine: %s') % ttype)
1304 raise error.Abort(_('invalid template engine: %s') % ttype)
1304 self.ecache[ttype] = ecls(self.load, self.filters, self.defaults,
1305 self.ecache[ttype] = ecls(self.load, self.filters, self.defaults,
1305 self._aliases)
1306 self._aliases)
1306 proc = self.ecache[ttype]
1307 proc = self.ecache[ttype]
1307
1308
1308 stream = proc.process(t, mapping)
1309 stream = proc.process(t, mapping)
1309 if self.minchunk:
1310 if self.minchunk:
1310 stream = util.increasingchunks(stream, min=self.minchunk,
1311 stream = util.increasingchunks(stream, min=self.minchunk,
1311 max=self.maxchunk)
1312 max=self.maxchunk)
1312 return stream
1313 return stream
1313
1314
1314 def templatepaths():
1315 def templatepaths():
1315 '''return locations used for template files.'''
1316 '''return locations used for template files.'''
1316 pathsrel = ['templates']
1317 pathsrel = ['templates']
1317 paths = [os.path.normpath(os.path.join(util.datapath, f))
1318 paths = [os.path.normpath(os.path.join(util.datapath, f))
1318 for f in pathsrel]
1319 for f in pathsrel]
1319 return [p for p in paths if os.path.isdir(p)]
1320 return [p for p in paths if os.path.isdir(p)]
1320
1321
1321 def templatepath(name):
1322 def templatepath(name):
1322 '''return location of template file. returns None if not found.'''
1323 '''return location of template file. returns None if not found.'''
1323 for p in templatepaths():
1324 for p in templatepaths():
1324 f = os.path.join(p, name)
1325 f = os.path.join(p, name)
1325 if os.path.exists(f):
1326 if os.path.exists(f):
1326 return f
1327 return f
1327 return None
1328 return None
1328
1329
1329 def stylemap(styles, paths=None):
1330 def stylemap(styles, paths=None):
1330 """Return path to mapfile for a given style.
1331 """Return path to mapfile for a given style.
1331
1332
1332 Searches mapfile in the following locations:
1333 Searches mapfile in the following locations:
1333 1. templatepath/style/map
1334 1. templatepath/style/map
1334 2. templatepath/map-style
1335 2. templatepath/map-style
1335 3. templatepath/map
1336 3. templatepath/map
1336 """
1337 """
1337
1338
1338 if paths is None:
1339 if paths is None:
1339 paths = templatepaths()
1340 paths = templatepaths()
1340 elif isinstance(paths, str):
1341 elif isinstance(paths, str):
1341 paths = [paths]
1342 paths = [paths]
1342
1343
1343 if isinstance(styles, str):
1344 if isinstance(styles, str):
1344 styles = [styles]
1345 styles = [styles]
1345
1346
1346 for style in styles:
1347 for style in styles:
1347 # only plain name is allowed to honor template paths
1348 # only plain name is allowed to honor template paths
1348 if (not style
1349 if (not style
1349 or style in (os.curdir, os.pardir)
1350 or style in (os.curdir, os.pardir)
1350 or pycompat.ossep in style
1351 or pycompat.ossep in style
1351 or pycompat.osaltsep and pycompat.osaltsep in style):
1352 or pycompat.osaltsep and pycompat.osaltsep in style):
1352 continue
1353 continue
1353 locations = [os.path.join(style, 'map'), 'map-' + style]
1354 locations = [os.path.join(style, 'map'), 'map-' + style]
1354 locations.append('map')
1355 locations.append('map')
1355
1356
1356 for path in paths:
1357 for path in paths:
1357 for location in locations:
1358 for location in locations:
1358 mapfile = os.path.join(path, location)
1359 mapfile = os.path.join(path, location)
1359 if os.path.isfile(mapfile):
1360 if os.path.isfile(mapfile):
1360 return style, mapfile
1361 return style, mapfile
1361
1362
1362 raise RuntimeError("No hgweb templates found in %r" % paths)
1363 raise RuntimeError("No hgweb templates found in %r" % paths)
1363
1364
1364 def loadfunction(ui, extname, registrarobj):
1365 def loadfunction(ui, extname, registrarobj):
1365 """Load template function from specified registrarobj
1366 """Load template function from specified registrarobj
1366 """
1367 """
1367 for name, func in registrarobj._table.iteritems():
1368 for name, func in registrarobj._table.iteritems():
1368 funcs[name] = func
1369 funcs[name] = func
1369
1370
1370 # tell hggettext to extract docstrings from these functions:
1371 # tell hggettext to extract docstrings from these functions:
1371 i18nfunctions = funcs.values()
1372 i18nfunctions = funcs.values()
@@ -1,3745 +1,3746 b''
1 # util.py - Mercurial utility functions and platform specific implementations
1 # util.py - Mercurial utility functions and platform specific implementations
2 #
2 #
3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
6 #
6 #
7 # This software may be used and distributed according to the terms of the
7 # This software may be used and distributed according to the terms of the
8 # GNU General Public License version 2 or any later version.
8 # GNU General Public License version 2 or any later version.
9
9
10 """Mercurial utility functions and platform specific implementations.
10 """Mercurial utility functions and platform specific implementations.
11
11
12 This contains helper routines that are independent of the SCM core and
12 This contains helper routines that are independent of the SCM core and
13 hide platform-specific details from the core.
13 hide platform-specific details from the core.
14 """
14 """
15
15
16 from __future__ import absolute_import
16 from __future__ import absolute_import
17
17
18 import bz2
18 import bz2
19 import calendar
19 import calendar
20 import codecs
20 import codecs
21 import collections
21 import collections
22 import datetime
22 import datetime
23 import errno
23 import errno
24 import gc
24 import gc
25 import hashlib
25 import hashlib
26 import imp
26 import imp
27 import os
27 import os
28 import platform as pyplatform
28 import platform as pyplatform
29 import re as remod
29 import re as remod
30 import shutil
30 import shutil
31 import signal
31 import signal
32 import socket
32 import socket
33 import stat
33 import stat
34 import string
34 import string
35 import subprocess
35 import subprocess
36 import sys
36 import sys
37 import tempfile
37 import tempfile
38 import textwrap
38 import textwrap
39 import time
39 import time
40 import traceback
40 import traceback
41 import warnings
41 import warnings
42 import zlib
42 import zlib
43
43
44 from . import (
44 from . import (
45 encoding,
45 encoding,
46 error,
46 error,
47 i18n,
47 i18n,
48 osutil,
48 osutil,
49 parsers,
49 parsers,
50 pycompat,
50 pycompat,
51 )
51 )
52
52
53 cookielib = pycompat.cookielib
53 cookielib = pycompat.cookielib
54 empty = pycompat.empty
54 empty = pycompat.empty
55 httplib = pycompat.httplib
55 httplib = pycompat.httplib
56 httpserver = pycompat.httpserver
56 httpserver = pycompat.httpserver
57 pickle = pycompat.pickle
57 pickle = pycompat.pickle
58 queue = pycompat.queue
58 queue = pycompat.queue
59 socketserver = pycompat.socketserver
59 socketserver = pycompat.socketserver
60 stderr = pycompat.stderr
60 stderr = pycompat.stderr
61 stdin = pycompat.stdin
61 stdin = pycompat.stdin
62 stdout = pycompat.stdout
62 stdout = pycompat.stdout
63 stringio = pycompat.stringio
63 stringio = pycompat.stringio
64 urlerr = pycompat.urlerr
64 urlerr = pycompat.urlerr
65 urlreq = pycompat.urlreq
65 urlreq = pycompat.urlreq
66 xmlrpclib = pycompat.xmlrpclib
66 xmlrpclib = pycompat.xmlrpclib
67
67
68 def isatty(fp):
68 def isatty(fp):
69 try:
69 try:
70 return fp.isatty()
70 return fp.isatty()
71 except AttributeError:
71 except AttributeError:
72 return False
72 return False
73
73
74 # glibc determines buffering on first write to stdout - if we replace a TTY
74 # glibc determines buffering on first write to stdout - if we replace a TTY
75 # destined stdout with a pipe destined stdout (e.g. pager), we want line
75 # destined stdout with a pipe destined stdout (e.g. pager), we want line
76 # buffering
76 # buffering
77 if isatty(stdout):
77 if isatty(stdout):
78 stdout = os.fdopen(stdout.fileno(), pycompat.sysstr('wb'), 1)
78 stdout = os.fdopen(stdout.fileno(), pycompat.sysstr('wb'), 1)
79
79
80 if pycompat.osname == 'nt':
80 if pycompat.osname == 'nt':
81 from . import windows as platform
81 from . import windows as platform
82 stdout = platform.winstdout(stdout)
82 stdout = platform.winstdout(stdout)
83 else:
83 else:
84 from . import posix as platform
84 from . import posix as platform
85
85
86 _ = i18n._
86 _ = i18n._
87
87
88 bindunixsocket = platform.bindunixsocket
88 bindunixsocket = platform.bindunixsocket
89 cachestat = platform.cachestat
89 cachestat = platform.cachestat
90 checkexec = platform.checkexec
90 checkexec = platform.checkexec
91 checklink = platform.checklink
91 checklink = platform.checklink
92 copymode = platform.copymode
92 copymode = platform.copymode
93 executablepath = platform.executablepath
93 executablepath = platform.executablepath
94 expandglobs = platform.expandglobs
94 expandglobs = platform.expandglobs
95 explainexit = platform.explainexit
95 explainexit = platform.explainexit
96 findexe = platform.findexe
96 findexe = platform.findexe
97 gethgcmd = platform.gethgcmd
97 gethgcmd = platform.gethgcmd
98 getuser = platform.getuser
98 getuser = platform.getuser
99 getpid = os.getpid
99 getpid = os.getpid
100 groupmembers = platform.groupmembers
100 groupmembers = platform.groupmembers
101 groupname = platform.groupname
101 groupname = platform.groupname
102 hidewindow = platform.hidewindow
102 hidewindow = platform.hidewindow
103 isexec = platform.isexec
103 isexec = platform.isexec
104 isowner = platform.isowner
104 isowner = platform.isowner
105 localpath = platform.localpath
105 localpath = platform.localpath
106 lookupreg = platform.lookupreg
106 lookupreg = platform.lookupreg
107 makedir = platform.makedir
107 makedir = platform.makedir
108 nlinks = platform.nlinks
108 nlinks = platform.nlinks
109 normpath = platform.normpath
109 normpath = platform.normpath
110 normcase = platform.normcase
110 normcase = platform.normcase
111 normcasespec = platform.normcasespec
111 normcasespec = platform.normcasespec
112 normcasefallback = platform.normcasefallback
112 normcasefallback = platform.normcasefallback
113 openhardlinks = platform.openhardlinks
113 openhardlinks = platform.openhardlinks
114 oslink = platform.oslink
114 oslink = platform.oslink
115 parsepatchoutput = platform.parsepatchoutput
115 parsepatchoutput = platform.parsepatchoutput
116 pconvert = platform.pconvert
116 pconvert = platform.pconvert
117 poll = platform.poll
117 poll = platform.poll
118 popen = platform.popen
118 popen = platform.popen
119 posixfile = platform.posixfile
119 posixfile = platform.posixfile
120 quotecommand = platform.quotecommand
120 quotecommand = platform.quotecommand
121 readpipe = platform.readpipe
121 readpipe = platform.readpipe
122 rename = platform.rename
122 rename = platform.rename
123 removedirs = platform.removedirs
123 removedirs = platform.removedirs
124 samedevice = platform.samedevice
124 samedevice = platform.samedevice
125 samefile = platform.samefile
125 samefile = platform.samefile
126 samestat = platform.samestat
126 samestat = platform.samestat
127 setbinary = platform.setbinary
127 setbinary = platform.setbinary
128 setflags = platform.setflags
128 setflags = platform.setflags
129 setsignalhandler = platform.setsignalhandler
129 setsignalhandler = platform.setsignalhandler
130 shellquote = platform.shellquote
130 shellquote = platform.shellquote
131 spawndetached = platform.spawndetached
131 spawndetached = platform.spawndetached
132 split = platform.split
132 split = platform.split
133 sshargs = platform.sshargs
133 sshargs = platform.sshargs
134 statfiles = getattr(osutil, 'statfiles', platform.statfiles)
134 statfiles = getattr(osutil, 'statfiles', platform.statfiles)
135 statisexec = platform.statisexec
135 statisexec = platform.statisexec
136 statislink = platform.statislink
136 statislink = platform.statislink
137 testpid = platform.testpid
137 testpid = platform.testpid
138 umask = platform.umask
138 umask = platform.umask
139 unlink = platform.unlink
139 unlink = platform.unlink
140 username = platform.username
140 username = platform.username
141
141
142 # Python compatibility
142 # Python compatibility
143
143
144 _notset = object()
144 _notset = object()
145
145
146 # disable Python's problematic floating point timestamps (issue4836)
146 # disable Python's problematic floating point timestamps (issue4836)
147 # (Python hypocritically says you shouldn't change this behavior in
147 # (Python hypocritically says you shouldn't change this behavior in
148 # libraries, and sure enough Mercurial is not a library.)
148 # libraries, and sure enough Mercurial is not a library.)
149 os.stat_float_times(False)
149 os.stat_float_times(False)
150
150
151 def safehasattr(thing, attr):
151 def safehasattr(thing, attr):
152 return getattr(thing, attr, _notset) is not _notset
152 return getattr(thing, attr, _notset) is not _notset
153
153
154 def bitsfrom(container):
154 def bitsfrom(container):
155 bits = 0
155 bits = 0
156 for bit in container:
156 for bit in container:
157 bits |= bit
157 bits |= bit
158 return bits
158 return bits
159
159
160 # python 2.6 still have deprecation warning enabled by default. We do not want
160 # python 2.6 still have deprecation warning enabled by default. We do not want
161 # to display anything to standard user so detect if we are running test and
161 # to display anything to standard user so detect if we are running test and
162 # only use python deprecation warning in this case.
162 # only use python deprecation warning in this case.
163 _dowarn = bool(encoding.environ.get('HGEMITWARNINGS'))
163 _dowarn = bool(encoding.environ.get('HGEMITWARNINGS'))
164 if _dowarn:
164 if _dowarn:
165 # explicitly unfilter our warning for python 2.7
165 # explicitly unfilter our warning for python 2.7
166 #
166 #
167 # The option of setting PYTHONWARNINGS in the test runner was investigated.
167 # The option of setting PYTHONWARNINGS in the test runner was investigated.
168 # However, module name set through PYTHONWARNINGS was exactly matched, so
168 # However, module name set through PYTHONWARNINGS was exactly matched, so
169 # we cannot set 'mercurial' and have it match eg: 'mercurial.scmutil'. This
169 # we cannot set 'mercurial' and have it match eg: 'mercurial.scmutil'. This
170 # makes the whole PYTHONWARNINGS thing useless for our usecase.
170 # makes the whole PYTHONWARNINGS thing useless for our usecase.
171 warnings.filterwarnings(r'default', r'', DeprecationWarning, r'mercurial')
171 warnings.filterwarnings(r'default', r'', DeprecationWarning, r'mercurial')
172 warnings.filterwarnings(r'default', r'', DeprecationWarning, r'hgext')
172 warnings.filterwarnings(r'default', r'', DeprecationWarning, r'hgext')
173 warnings.filterwarnings(r'default', r'', DeprecationWarning, r'hgext3rd')
173 warnings.filterwarnings(r'default', r'', DeprecationWarning, r'hgext3rd')
174
174
175 def nouideprecwarn(msg, version, stacklevel=1):
175 def nouideprecwarn(msg, version, stacklevel=1):
176 """Issue an python native deprecation warning
176 """Issue an python native deprecation warning
177
177
178 This is a noop outside of tests, use 'ui.deprecwarn' when possible.
178 This is a noop outside of tests, use 'ui.deprecwarn' when possible.
179 """
179 """
180 if _dowarn:
180 if _dowarn:
181 msg += ("\n(compatibility will be dropped after Mercurial-%s,"
181 msg += ("\n(compatibility will be dropped after Mercurial-%s,"
182 " update your code.)") % version
182 " update your code.)") % version
183 warnings.warn(msg, DeprecationWarning, stacklevel + 1)
183 warnings.warn(msg, DeprecationWarning, stacklevel + 1)
184
184
185 DIGESTS = {
185 DIGESTS = {
186 'md5': hashlib.md5,
186 'md5': hashlib.md5,
187 'sha1': hashlib.sha1,
187 'sha1': hashlib.sha1,
188 'sha512': hashlib.sha512,
188 'sha512': hashlib.sha512,
189 }
189 }
190 # List of digest types from strongest to weakest
190 # List of digest types from strongest to weakest
191 DIGESTS_BY_STRENGTH = ['sha512', 'sha1', 'md5']
191 DIGESTS_BY_STRENGTH = ['sha512', 'sha1', 'md5']
192
192
193 for k in DIGESTS_BY_STRENGTH:
193 for k in DIGESTS_BY_STRENGTH:
194 assert k in DIGESTS
194 assert k in DIGESTS
195
195
196 class digester(object):
196 class digester(object):
197 """helper to compute digests.
197 """helper to compute digests.
198
198
199 This helper can be used to compute one or more digests given their name.
199 This helper can be used to compute one or more digests given their name.
200
200
201 >>> d = digester(['md5', 'sha1'])
201 >>> d = digester(['md5', 'sha1'])
202 >>> d.update('foo')
202 >>> d.update('foo')
203 >>> [k for k in sorted(d)]
203 >>> [k for k in sorted(d)]
204 ['md5', 'sha1']
204 ['md5', 'sha1']
205 >>> d['md5']
205 >>> d['md5']
206 'acbd18db4cc2f85cedef654fccc4a4d8'
206 'acbd18db4cc2f85cedef654fccc4a4d8'
207 >>> d['sha1']
207 >>> d['sha1']
208 '0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33'
208 '0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33'
209 >>> digester.preferred(['md5', 'sha1'])
209 >>> digester.preferred(['md5', 'sha1'])
210 'sha1'
210 'sha1'
211 """
211 """
212
212
213 def __init__(self, digests, s=''):
213 def __init__(self, digests, s=''):
214 self._hashes = {}
214 self._hashes = {}
215 for k in digests:
215 for k in digests:
216 if k not in DIGESTS:
216 if k not in DIGESTS:
217 raise Abort(_('unknown digest type: %s') % k)
217 raise Abort(_('unknown digest type: %s') % k)
218 self._hashes[k] = DIGESTS[k]()
218 self._hashes[k] = DIGESTS[k]()
219 if s:
219 if s:
220 self.update(s)
220 self.update(s)
221
221
222 def update(self, data):
222 def update(self, data):
223 for h in self._hashes.values():
223 for h in self._hashes.values():
224 h.update(data)
224 h.update(data)
225
225
226 def __getitem__(self, key):
226 def __getitem__(self, key):
227 if key not in DIGESTS:
227 if key not in DIGESTS:
228 raise Abort(_('unknown digest type: %s') % k)
228 raise Abort(_('unknown digest type: %s') % k)
229 return self._hashes[key].hexdigest()
229 return self._hashes[key].hexdigest()
230
230
231 def __iter__(self):
231 def __iter__(self):
232 return iter(self._hashes)
232 return iter(self._hashes)
233
233
234 @staticmethod
234 @staticmethod
235 def preferred(supported):
235 def preferred(supported):
236 """returns the strongest digest type in both supported and DIGESTS."""
236 """returns the strongest digest type in both supported and DIGESTS."""
237
237
238 for k in DIGESTS_BY_STRENGTH:
238 for k in DIGESTS_BY_STRENGTH:
239 if k in supported:
239 if k in supported:
240 return k
240 return k
241 return None
241 return None
242
242
243 class digestchecker(object):
243 class digestchecker(object):
244 """file handle wrapper that additionally checks content against a given
244 """file handle wrapper that additionally checks content against a given
245 size and digests.
245 size and digests.
246
246
247 d = digestchecker(fh, size, {'md5': '...'})
247 d = digestchecker(fh, size, {'md5': '...'})
248
248
249 When multiple digests are given, all of them are validated.
249 When multiple digests are given, all of them are validated.
250 """
250 """
251
251
252 def __init__(self, fh, size, digests):
252 def __init__(self, fh, size, digests):
253 self._fh = fh
253 self._fh = fh
254 self._size = size
254 self._size = size
255 self._got = 0
255 self._got = 0
256 self._digests = dict(digests)
256 self._digests = dict(digests)
257 self._digester = digester(self._digests.keys())
257 self._digester = digester(self._digests.keys())
258
258
259 def read(self, length=-1):
259 def read(self, length=-1):
260 content = self._fh.read(length)
260 content = self._fh.read(length)
261 self._digester.update(content)
261 self._digester.update(content)
262 self._got += len(content)
262 self._got += len(content)
263 return content
263 return content
264
264
265 def validate(self):
265 def validate(self):
266 if self._size != self._got:
266 if self._size != self._got:
267 raise Abort(_('size mismatch: expected %d, got %d') %
267 raise Abort(_('size mismatch: expected %d, got %d') %
268 (self._size, self._got))
268 (self._size, self._got))
269 for k, v in self._digests.items():
269 for k, v in self._digests.items():
270 if v != self._digester[k]:
270 if v != self._digester[k]:
271 # i18n: first parameter is a digest name
271 # i18n: first parameter is a digest name
272 raise Abort(_('%s mismatch: expected %s, got %s') %
272 raise Abort(_('%s mismatch: expected %s, got %s') %
273 (k, v, self._digester[k]))
273 (k, v, self._digester[k]))
274
274
275 try:
275 try:
276 buffer = buffer
276 buffer = buffer
277 except NameError:
277 except NameError:
278 if not pycompat.ispy3:
278 if not pycompat.ispy3:
279 def buffer(sliceable, offset=0, length=None):
279 def buffer(sliceable, offset=0, length=None):
280 if length is not None:
280 if length is not None:
281 return sliceable[offset:offset + length]
281 return sliceable[offset:offset + length]
282 return sliceable[offset:]
282 return sliceable[offset:]
283 else:
283 else:
284 def buffer(sliceable, offset=0, length=None):
284 def buffer(sliceable, offset=0, length=None):
285 if length is not None:
285 if length is not None:
286 return memoryview(sliceable)[offset:offset + length]
286 return memoryview(sliceable)[offset:offset + length]
287 return memoryview(sliceable)[offset:]
287 return memoryview(sliceable)[offset:]
288
288
289 closefds = pycompat.osname == 'posix'
289 closefds = pycompat.osname == 'posix'
290
290
291 _chunksize = 4096
291 _chunksize = 4096
292
292
293 class bufferedinputpipe(object):
293 class bufferedinputpipe(object):
294 """a manually buffered input pipe
294 """a manually buffered input pipe
295
295
296 Python will not let us use buffered IO and lazy reading with 'polling' at
296 Python will not let us use buffered IO and lazy reading with 'polling' at
297 the same time. We cannot probe the buffer state and select will not detect
297 the same time. We cannot probe the buffer state and select will not detect
298 that data are ready to read if they are already buffered.
298 that data are ready to read if they are already buffered.
299
299
300 This class let us work around that by implementing its own buffering
300 This class let us work around that by implementing its own buffering
301 (allowing efficient readline) while offering a way to know if the buffer is
301 (allowing efficient readline) while offering a way to know if the buffer is
302 empty from the output (allowing collaboration of the buffer with polling).
302 empty from the output (allowing collaboration of the buffer with polling).
303
303
304 This class lives in the 'util' module because it makes use of the 'os'
304 This class lives in the 'util' module because it makes use of the 'os'
305 module from the python stdlib.
305 module from the python stdlib.
306 """
306 """
307
307
308 def __init__(self, input):
308 def __init__(self, input):
309 self._input = input
309 self._input = input
310 self._buffer = []
310 self._buffer = []
311 self._eof = False
311 self._eof = False
312 self._lenbuf = 0
312 self._lenbuf = 0
313
313
314 @property
314 @property
315 def hasbuffer(self):
315 def hasbuffer(self):
316 """True is any data is currently buffered
316 """True is any data is currently buffered
317
317
318 This will be used externally a pre-step for polling IO. If there is
318 This will be used externally a pre-step for polling IO. If there is
319 already data then no polling should be set in place."""
319 already data then no polling should be set in place."""
320 return bool(self._buffer)
320 return bool(self._buffer)
321
321
322 @property
322 @property
323 def closed(self):
323 def closed(self):
324 return self._input.closed
324 return self._input.closed
325
325
326 def fileno(self):
326 def fileno(self):
327 return self._input.fileno()
327 return self._input.fileno()
328
328
329 def close(self):
329 def close(self):
330 return self._input.close()
330 return self._input.close()
331
331
332 def read(self, size):
332 def read(self, size):
333 while (not self._eof) and (self._lenbuf < size):
333 while (not self._eof) and (self._lenbuf < size):
334 self._fillbuffer()
334 self._fillbuffer()
335 return self._frombuffer(size)
335 return self._frombuffer(size)
336
336
337 def readline(self, *args, **kwargs):
337 def readline(self, *args, **kwargs):
338 if 1 < len(self._buffer):
338 if 1 < len(self._buffer):
339 # this should not happen because both read and readline end with a
339 # this should not happen because both read and readline end with a
340 # _frombuffer call that collapse it.
340 # _frombuffer call that collapse it.
341 self._buffer = [''.join(self._buffer)]
341 self._buffer = [''.join(self._buffer)]
342 self._lenbuf = len(self._buffer[0])
342 self._lenbuf = len(self._buffer[0])
343 lfi = -1
343 lfi = -1
344 if self._buffer:
344 if self._buffer:
345 lfi = self._buffer[-1].find('\n')
345 lfi = self._buffer[-1].find('\n')
346 while (not self._eof) and lfi < 0:
346 while (not self._eof) and lfi < 0:
347 self._fillbuffer()
347 self._fillbuffer()
348 if self._buffer:
348 if self._buffer:
349 lfi = self._buffer[-1].find('\n')
349 lfi = self._buffer[-1].find('\n')
350 size = lfi + 1
350 size = lfi + 1
351 if lfi < 0: # end of file
351 if lfi < 0: # end of file
352 size = self._lenbuf
352 size = self._lenbuf
353 elif 1 < len(self._buffer):
353 elif 1 < len(self._buffer):
354 # we need to take previous chunks into account
354 # we need to take previous chunks into account
355 size += self._lenbuf - len(self._buffer[-1])
355 size += self._lenbuf - len(self._buffer[-1])
356 return self._frombuffer(size)
356 return self._frombuffer(size)
357
357
358 def _frombuffer(self, size):
358 def _frombuffer(self, size):
359 """return at most 'size' data from the buffer
359 """return at most 'size' data from the buffer
360
360
361 The data are removed from the buffer."""
361 The data are removed from the buffer."""
362 if size == 0 or not self._buffer:
362 if size == 0 or not self._buffer:
363 return ''
363 return ''
364 buf = self._buffer[0]
364 buf = self._buffer[0]
365 if 1 < len(self._buffer):
365 if 1 < len(self._buffer):
366 buf = ''.join(self._buffer)
366 buf = ''.join(self._buffer)
367
367
368 data = buf[:size]
368 data = buf[:size]
369 buf = buf[len(data):]
369 buf = buf[len(data):]
370 if buf:
370 if buf:
371 self._buffer = [buf]
371 self._buffer = [buf]
372 self._lenbuf = len(buf)
372 self._lenbuf = len(buf)
373 else:
373 else:
374 self._buffer = []
374 self._buffer = []
375 self._lenbuf = 0
375 self._lenbuf = 0
376 return data
376 return data
377
377
378 def _fillbuffer(self):
378 def _fillbuffer(self):
379 """read data to the buffer"""
379 """read data to the buffer"""
380 data = os.read(self._input.fileno(), _chunksize)
380 data = os.read(self._input.fileno(), _chunksize)
381 if not data:
381 if not data:
382 self._eof = True
382 self._eof = True
383 else:
383 else:
384 self._lenbuf += len(data)
384 self._lenbuf += len(data)
385 self._buffer.append(data)
385 self._buffer.append(data)
386
386
387 def popen2(cmd, env=None, newlines=False):
387 def popen2(cmd, env=None, newlines=False):
388 # Setting bufsize to -1 lets the system decide the buffer size.
388 # Setting bufsize to -1 lets the system decide the buffer size.
389 # The default for bufsize is 0, meaning unbuffered. This leads to
389 # The default for bufsize is 0, meaning unbuffered. This leads to
390 # poor performance on Mac OS X: http://bugs.python.org/issue4194
390 # poor performance on Mac OS X: http://bugs.python.org/issue4194
391 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
391 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
392 close_fds=closefds,
392 close_fds=closefds,
393 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
393 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
394 universal_newlines=newlines,
394 universal_newlines=newlines,
395 env=env)
395 env=env)
396 return p.stdin, p.stdout
396 return p.stdin, p.stdout
397
397
398 def popen3(cmd, env=None, newlines=False):
398 def popen3(cmd, env=None, newlines=False):
399 stdin, stdout, stderr, p = popen4(cmd, env, newlines)
399 stdin, stdout, stderr, p = popen4(cmd, env, newlines)
400 return stdin, stdout, stderr
400 return stdin, stdout, stderr
401
401
402 def popen4(cmd, env=None, newlines=False, bufsize=-1):
402 def popen4(cmd, env=None, newlines=False, bufsize=-1):
403 p = subprocess.Popen(cmd, shell=True, bufsize=bufsize,
403 p = subprocess.Popen(cmd, shell=True, bufsize=bufsize,
404 close_fds=closefds,
404 close_fds=closefds,
405 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
405 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
406 stderr=subprocess.PIPE,
406 stderr=subprocess.PIPE,
407 universal_newlines=newlines,
407 universal_newlines=newlines,
408 env=env)
408 env=env)
409 return p.stdin, p.stdout, p.stderr, p
409 return p.stdin, p.stdout, p.stderr, p
410
410
411 def version():
411 def version():
412 """Return version information if available."""
412 """Return version information if available."""
413 try:
413 try:
414 from . import __version__
414 from . import __version__
415 return __version__.version
415 return __version__.version
416 except ImportError:
416 except ImportError:
417 return 'unknown'
417 return 'unknown'
418
418
419 def versiontuple(v=None, n=4):
419 def versiontuple(v=None, n=4):
420 """Parses a Mercurial version string into an N-tuple.
420 """Parses a Mercurial version string into an N-tuple.
421
421
422 The version string to be parsed is specified with the ``v`` argument.
422 The version string to be parsed is specified with the ``v`` argument.
423 If it isn't defined, the current Mercurial version string will be parsed.
423 If it isn't defined, the current Mercurial version string will be parsed.
424
424
425 ``n`` can be 2, 3, or 4. Here is how some version strings map to
425 ``n`` can be 2, 3, or 4. Here is how some version strings map to
426 returned values:
426 returned values:
427
427
428 >>> v = '3.6.1+190-df9b73d2d444'
428 >>> v = '3.6.1+190-df9b73d2d444'
429 >>> versiontuple(v, 2)
429 >>> versiontuple(v, 2)
430 (3, 6)
430 (3, 6)
431 >>> versiontuple(v, 3)
431 >>> versiontuple(v, 3)
432 (3, 6, 1)
432 (3, 6, 1)
433 >>> versiontuple(v, 4)
433 >>> versiontuple(v, 4)
434 (3, 6, 1, '190-df9b73d2d444')
434 (3, 6, 1, '190-df9b73d2d444')
435
435
436 >>> versiontuple('3.6.1+190-df9b73d2d444+20151118')
436 >>> versiontuple('3.6.1+190-df9b73d2d444+20151118')
437 (3, 6, 1, '190-df9b73d2d444+20151118')
437 (3, 6, 1, '190-df9b73d2d444+20151118')
438
438
439 >>> v = '3.6'
439 >>> v = '3.6'
440 >>> versiontuple(v, 2)
440 >>> versiontuple(v, 2)
441 (3, 6)
441 (3, 6)
442 >>> versiontuple(v, 3)
442 >>> versiontuple(v, 3)
443 (3, 6, None)
443 (3, 6, None)
444 >>> versiontuple(v, 4)
444 >>> versiontuple(v, 4)
445 (3, 6, None, None)
445 (3, 6, None, None)
446
446
447 >>> v = '3.9-rc'
447 >>> v = '3.9-rc'
448 >>> versiontuple(v, 2)
448 >>> versiontuple(v, 2)
449 (3, 9)
449 (3, 9)
450 >>> versiontuple(v, 3)
450 >>> versiontuple(v, 3)
451 (3, 9, None)
451 (3, 9, None)
452 >>> versiontuple(v, 4)
452 >>> versiontuple(v, 4)
453 (3, 9, None, 'rc')
453 (3, 9, None, 'rc')
454
454
455 >>> v = '3.9-rc+2-02a8fea4289b'
455 >>> v = '3.9-rc+2-02a8fea4289b'
456 >>> versiontuple(v, 2)
456 >>> versiontuple(v, 2)
457 (3, 9)
457 (3, 9)
458 >>> versiontuple(v, 3)
458 >>> versiontuple(v, 3)
459 (3, 9, None)
459 (3, 9, None)
460 >>> versiontuple(v, 4)
460 >>> versiontuple(v, 4)
461 (3, 9, None, 'rc+2-02a8fea4289b')
461 (3, 9, None, 'rc+2-02a8fea4289b')
462 """
462 """
463 if not v:
463 if not v:
464 v = version()
464 v = version()
465 parts = remod.split('[\+-]', v, 1)
465 parts = remod.split('[\+-]', v, 1)
466 if len(parts) == 1:
466 if len(parts) == 1:
467 vparts, extra = parts[0], None
467 vparts, extra = parts[0], None
468 else:
468 else:
469 vparts, extra = parts
469 vparts, extra = parts
470
470
471 vints = []
471 vints = []
472 for i in vparts.split('.'):
472 for i in vparts.split('.'):
473 try:
473 try:
474 vints.append(int(i))
474 vints.append(int(i))
475 except ValueError:
475 except ValueError:
476 break
476 break
477 # (3, 6) -> (3, 6, None)
477 # (3, 6) -> (3, 6, None)
478 while len(vints) < 3:
478 while len(vints) < 3:
479 vints.append(None)
479 vints.append(None)
480
480
481 if n == 2:
481 if n == 2:
482 return (vints[0], vints[1])
482 return (vints[0], vints[1])
483 if n == 3:
483 if n == 3:
484 return (vints[0], vints[1], vints[2])
484 return (vints[0], vints[1], vints[2])
485 if n == 4:
485 if n == 4:
486 return (vints[0], vints[1], vints[2], extra)
486 return (vints[0], vints[1], vints[2], extra)
487
487
488 # used by parsedate
488 # used by parsedate
489 defaultdateformats = (
489 defaultdateformats = (
490 '%Y-%m-%dT%H:%M:%S', # the 'real' ISO8601
490 '%Y-%m-%dT%H:%M:%S', # the 'real' ISO8601
491 '%Y-%m-%dT%H:%M', # without seconds
491 '%Y-%m-%dT%H:%M', # without seconds
492 '%Y-%m-%dT%H%M%S', # another awful but legal variant without :
492 '%Y-%m-%dT%H%M%S', # another awful but legal variant without :
493 '%Y-%m-%dT%H%M', # without seconds
493 '%Y-%m-%dT%H%M', # without seconds
494 '%Y-%m-%d %H:%M:%S', # our common legal variant
494 '%Y-%m-%d %H:%M:%S', # our common legal variant
495 '%Y-%m-%d %H:%M', # without seconds
495 '%Y-%m-%d %H:%M', # without seconds
496 '%Y-%m-%d %H%M%S', # without :
496 '%Y-%m-%d %H%M%S', # without :
497 '%Y-%m-%d %H%M', # without seconds
497 '%Y-%m-%d %H%M', # without seconds
498 '%Y-%m-%d %I:%M:%S%p',
498 '%Y-%m-%d %I:%M:%S%p',
499 '%Y-%m-%d %H:%M',
499 '%Y-%m-%d %H:%M',
500 '%Y-%m-%d %I:%M%p',
500 '%Y-%m-%d %I:%M%p',
501 '%Y-%m-%d',
501 '%Y-%m-%d',
502 '%m-%d',
502 '%m-%d',
503 '%m/%d',
503 '%m/%d',
504 '%m/%d/%y',
504 '%m/%d/%y',
505 '%m/%d/%Y',
505 '%m/%d/%Y',
506 '%a %b %d %H:%M:%S %Y',
506 '%a %b %d %H:%M:%S %Y',
507 '%a %b %d %I:%M:%S%p %Y',
507 '%a %b %d %I:%M:%S%p %Y',
508 '%a, %d %b %Y %H:%M:%S', # GNU coreutils "/bin/date --rfc-2822"
508 '%a, %d %b %Y %H:%M:%S', # GNU coreutils "/bin/date --rfc-2822"
509 '%b %d %H:%M:%S %Y',
509 '%b %d %H:%M:%S %Y',
510 '%b %d %I:%M:%S%p %Y',
510 '%b %d %I:%M:%S%p %Y',
511 '%b %d %H:%M:%S',
511 '%b %d %H:%M:%S',
512 '%b %d %I:%M:%S%p',
512 '%b %d %I:%M:%S%p',
513 '%b %d %H:%M',
513 '%b %d %H:%M',
514 '%b %d %I:%M%p',
514 '%b %d %I:%M%p',
515 '%b %d %Y',
515 '%b %d %Y',
516 '%b %d',
516 '%b %d',
517 '%H:%M:%S',
517 '%H:%M:%S',
518 '%I:%M:%S%p',
518 '%I:%M:%S%p',
519 '%H:%M',
519 '%H:%M',
520 '%I:%M%p',
520 '%I:%M%p',
521 )
521 )
522
522
523 extendeddateformats = defaultdateformats + (
523 extendeddateformats = defaultdateformats + (
524 "%Y",
524 "%Y",
525 "%Y-%m",
525 "%Y-%m",
526 "%b",
526 "%b",
527 "%b %Y",
527 "%b %Y",
528 )
528 )
529
529
530 def cachefunc(func):
530 def cachefunc(func):
531 '''cache the result of function calls'''
531 '''cache the result of function calls'''
532 # XXX doesn't handle keywords args
532 # XXX doesn't handle keywords args
533 if func.__code__.co_argcount == 0:
533 if func.__code__.co_argcount == 0:
534 cache = []
534 cache = []
535 def f():
535 def f():
536 if len(cache) == 0:
536 if len(cache) == 0:
537 cache.append(func())
537 cache.append(func())
538 return cache[0]
538 return cache[0]
539 return f
539 return f
540 cache = {}
540 cache = {}
541 if func.__code__.co_argcount == 1:
541 if func.__code__.co_argcount == 1:
542 # we gain a small amount of time because
542 # we gain a small amount of time because
543 # we don't need to pack/unpack the list
543 # we don't need to pack/unpack the list
544 def f(arg):
544 def f(arg):
545 if arg not in cache:
545 if arg not in cache:
546 cache[arg] = func(arg)
546 cache[arg] = func(arg)
547 return cache[arg]
547 return cache[arg]
548 else:
548 else:
549 def f(*args):
549 def f(*args):
550 if args not in cache:
550 if args not in cache:
551 cache[args] = func(*args)
551 cache[args] = func(*args)
552 return cache[args]
552 return cache[args]
553
553
554 return f
554 return f
555
555
556 class sortdict(dict):
556 class sortdict(dict):
557 '''a simple sorted dictionary'''
557 '''a simple sorted dictionary'''
558 def __init__(self, data=None):
558 def __init__(self, data=None):
559 self._list = []
559 self._list = []
560 if data:
560 if data:
561 self.update(data)
561 self.update(data)
562 def copy(self):
562 def copy(self):
563 return sortdict(self)
563 return sortdict(self)
564 def __setitem__(self, key, val):
564 def __setitem__(self, key, val):
565 if key in self:
565 if key in self:
566 self._list.remove(key)
566 self._list.remove(key)
567 self._list.append(key)
567 self._list.append(key)
568 dict.__setitem__(self, key, val)
568 dict.__setitem__(self, key, val)
569 def __iter__(self):
569 def __iter__(self):
570 return self._list.__iter__()
570 return self._list.__iter__()
571 def update(self, src):
571 def update(self, src):
572 if isinstance(src, dict):
572 if isinstance(src, dict):
573 src = src.iteritems()
573 src = src.iteritems()
574 for k, v in src:
574 for k, v in src:
575 self[k] = v
575 self[k] = v
576 def clear(self):
576 def clear(self):
577 dict.clear(self)
577 dict.clear(self)
578 self._list = []
578 self._list = []
579 def items(self):
579 def items(self):
580 return [(k, self[k]) for k in self._list]
580 return [(k, self[k]) for k in self._list]
581 def __delitem__(self, key):
581 def __delitem__(self, key):
582 dict.__delitem__(self, key)
582 dict.__delitem__(self, key)
583 self._list.remove(key)
583 self._list.remove(key)
584 def pop(self, key, *args, **kwargs):
584 def pop(self, key, *args, **kwargs):
585 try:
585 try:
586 self._list.remove(key)
586 self._list.remove(key)
587 except ValueError:
587 except ValueError:
588 pass
588 pass
589 return dict.pop(self, key, *args, **kwargs)
589 return dict.pop(self, key, *args, **kwargs)
590 def keys(self):
590 def keys(self):
591 return self._list[:]
591 return self._list[:]
592 def iterkeys(self):
592 def iterkeys(self):
593 return self._list.__iter__()
593 return self._list.__iter__()
594 def iteritems(self):
594 def iteritems(self):
595 for k in self._list:
595 for k in self._list:
596 yield k, self[k]
596 yield k, self[k]
597 def insert(self, index, key, val):
597 def insert(self, index, key, val):
598 self._list.insert(index, key)
598 self._list.insert(index, key)
599 dict.__setitem__(self, key, val)
599 dict.__setitem__(self, key, val)
600 def __repr__(self):
600 def __repr__(self):
601 if not self:
601 if not self:
602 return '%s()' % self.__class__.__name__
602 return '%s()' % self.__class__.__name__
603 return '%s(%r)' % (self.__class__.__name__, self.items())
603 return '%s(%r)' % (self.__class__.__name__, self.items())
604
604
605 class _lrucachenode(object):
605 class _lrucachenode(object):
606 """A node in a doubly linked list.
606 """A node in a doubly linked list.
607
607
608 Holds a reference to nodes on either side as well as a key-value
608 Holds a reference to nodes on either side as well as a key-value
609 pair for the dictionary entry.
609 pair for the dictionary entry.
610 """
610 """
611 __slots__ = (u'next', u'prev', u'key', u'value')
611 __slots__ = (u'next', u'prev', u'key', u'value')
612
612
613 def __init__(self):
613 def __init__(self):
614 self.next = None
614 self.next = None
615 self.prev = None
615 self.prev = None
616
616
617 self.key = _notset
617 self.key = _notset
618 self.value = None
618 self.value = None
619
619
620 def markempty(self):
620 def markempty(self):
621 """Mark the node as emptied."""
621 """Mark the node as emptied."""
622 self.key = _notset
622 self.key = _notset
623
623
624 class lrucachedict(object):
624 class lrucachedict(object):
625 """Dict that caches most recent accesses and sets.
625 """Dict that caches most recent accesses and sets.
626
626
627 The dict consists of an actual backing dict - indexed by original
627 The dict consists of an actual backing dict - indexed by original
628 key - and a doubly linked circular list defining the order of entries in
628 key - and a doubly linked circular list defining the order of entries in
629 the cache.
629 the cache.
630
630
631 The head node is the newest entry in the cache. If the cache is full,
631 The head node is the newest entry in the cache. If the cache is full,
632 we recycle head.prev and make it the new head. Cache accesses result in
632 we recycle head.prev and make it the new head. Cache accesses result in
633 the node being moved to before the existing head and being marked as the
633 the node being moved to before the existing head and being marked as the
634 new head node.
634 new head node.
635 """
635 """
636 def __init__(self, max):
636 def __init__(self, max):
637 self._cache = {}
637 self._cache = {}
638
638
639 self._head = head = _lrucachenode()
639 self._head = head = _lrucachenode()
640 head.prev = head
640 head.prev = head
641 head.next = head
641 head.next = head
642 self._size = 1
642 self._size = 1
643 self._capacity = max
643 self._capacity = max
644
644
645 def __len__(self):
645 def __len__(self):
646 return len(self._cache)
646 return len(self._cache)
647
647
648 def __contains__(self, k):
648 def __contains__(self, k):
649 return k in self._cache
649 return k in self._cache
650
650
651 def __iter__(self):
651 def __iter__(self):
652 # We don't have to iterate in cache order, but why not.
652 # We don't have to iterate in cache order, but why not.
653 n = self._head
653 n = self._head
654 for i in range(len(self._cache)):
654 for i in range(len(self._cache)):
655 yield n.key
655 yield n.key
656 n = n.next
656 n = n.next
657
657
658 def __getitem__(self, k):
658 def __getitem__(self, k):
659 node = self._cache[k]
659 node = self._cache[k]
660 self._movetohead(node)
660 self._movetohead(node)
661 return node.value
661 return node.value
662
662
663 def __setitem__(self, k, v):
663 def __setitem__(self, k, v):
664 node = self._cache.get(k)
664 node = self._cache.get(k)
665 # Replace existing value and mark as newest.
665 # Replace existing value and mark as newest.
666 if node is not None:
666 if node is not None:
667 node.value = v
667 node.value = v
668 self._movetohead(node)
668 self._movetohead(node)
669 return
669 return
670
670
671 if self._size < self._capacity:
671 if self._size < self._capacity:
672 node = self._addcapacity()
672 node = self._addcapacity()
673 else:
673 else:
674 # Grab the last/oldest item.
674 # Grab the last/oldest item.
675 node = self._head.prev
675 node = self._head.prev
676
676
677 # At capacity. Kill the old entry.
677 # At capacity. Kill the old entry.
678 if node.key is not _notset:
678 if node.key is not _notset:
679 del self._cache[node.key]
679 del self._cache[node.key]
680
680
681 node.key = k
681 node.key = k
682 node.value = v
682 node.value = v
683 self._cache[k] = node
683 self._cache[k] = node
684 # And mark it as newest entry. No need to adjust order since it
684 # And mark it as newest entry. No need to adjust order since it
685 # is already self._head.prev.
685 # is already self._head.prev.
686 self._head = node
686 self._head = node
687
687
688 def __delitem__(self, k):
688 def __delitem__(self, k):
689 node = self._cache.pop(k)
689 node = self._cache.pop(k)
690 node.markempty()
690 node.markempty()
691
691
692 # Temporarily mark as newest item before re-adjusting head to make
692 # Temporarily mark as newest item before re-adjusting head to make
693 # this node the oldest item.
693 # this node the oldest item.
694 self._movetohead(node)
694 self._movetohead(node)
695 self._head = node.next
695 self._head = node.next
696
696
697 # Additional dict methods.
697 # Additional dict methods.
698
698
699 def get(self, k, default=None):
699 def get(self, k, default=None):
700 try:
700 try:
701 return self._cache[k].value
701 return self._cache[k].value
702 except KeyError:
702 except KeyError:
703 return default
703 return default
704
704
705 def clear(self):
705 def clear(self):
706 n = self._head
706 n = self._head
707 while n.key is not _notset:
707 while n.key is not _notset:
708 n.markempty()
708 n.markempty()
709 n = n.next
709 n = n.next
710
710
711 self._cache.clear()
711 self._cache.clear()
712
712
713 def copy(self):
713 def copy(self):
714 result = lrucachedict(self._capacity)
714 result = lrucachedict(self._capacity)
715 n = self._head.prev
715 n = self._head.prev
716 # Iterate in oldest-to-newest order, so the copy has the right ordering
716 # Iterate in oldest-to-newest order, so the copy has the right ordering
717 for i in range(len(self._cache)):
717 for i in range(len(self._cache)):
718 result[n.key] = n.value
718 result[n.key] = n.value
719 n = n.prev
719 n = n.prev
720 return result
720 return result
721
721
722 def _movetohead(self, node):
722 def _movetohead(self, node):
723 """Mark a node as the newest, making it the new head.
723 """Mark a node as the newest, making it the new head.
724
724
725 When a node is accessed, it becomes the freshest entry in the LRU
725 When a node is accessed, it becomes the freshest entry in the LRU
726 list, which is denoted by self._head.
726 list, which is denoted by self._head.
727
727
728 Visually, let's make ``N`` the new head node (* denotes head):
728 Visually, let's make ``N`` the new head node (* denotes head):
729
729
730 previous/oldest <-> head <-> next/next newest
730 previous/oldest <-> head <-> next/next newest
731
731
732 ----<->--- A* ---<->-----
732 ----<->--- A* ---<->-----
733 | |
733 | |
734 E <-> D <-> N <-> C <-> B
734 E <-> D <-> N <-> C <-> B
735
735
736 To:
736 To:
737
737
738 ----<->--- N* ---<->-----
738 ----<->--- N* ---<->-----
739 | |
739 | |
740 E <-> D <-> C <-> B <-> A
740 E <-> D <-> C <-> B <-> A
741
741
742 This requires the following moves:
742 This requires the following moves:
743
743
744 C.next = D (node.prev.next = node.next)
744 C.next = D (node.prev.next = node.next)
745 D.prev = C (node.next.prev = node.prev)
745 D.prev = C (node.next.prev = node.prev)
746 E.next = N (head.prev.next = node)
746 E.next = N (head.prev.next = node)
747 N.prev = E (node.prev = head.prev)
747 N.prev = E (node.prev = head.prev)
748 N.next = A (node.next = head)
748 N.next = A (node.next = head)
749 A.prev = N (head.prev = node)
749 A.prev = N (head.prev = node)
750 """
750 """
751 head = self._head
751 head = self._head
752 # C.next = D
752 # C.next = D
753 node.prev.next = node.next
753 node.prev.next = node.next
754 # D.prev = C
754 # D.prev = C
755 node.next.prev = node.prev
755 node.next.prev = node.prev
756 # N.prev = E
756 # N.prev = E
757 node.prev = head.prev
757 node.prev = head.prev
758 # N.next = A
758 # N.next = A
759 # It is tempting to do just "head" here, however if node is
759 # It is tempting to do just "head" here, however if node is
760 # adjacent to head, this will do bad things.
760 # adjacent to head, this will do bad things.
761 node.next = head.prev.next
761 node.next = head.prev.next
762 # E.next = N
762 # E.next = N
763 node.next.prev = node
763 node.next.prev = node
764 # A.prev = N
764 # A.prev = N
765 node.prev.next = node
765 node.prev.next = node
766
766
767 self._head = node
767 self._head = node
768
768
769 def _addcapacity(self):
769 def _addcapacity(self):
770 """Add a node to the circular linked list.
770 """Add a node to the circular linked list.
771
771
772 The new node is inserted before the head node.
772 The new node is inserted before the head node.
773 """
773 """
774 head = self._head
774 head = self._head
775 node = _lrucachenode()
775 node = _lrucachenode()
776 head.prev.next = node
776 head.prev.next = node
777 node.prev = head.prev
777 node.prev = head.prev
778 node.next = head
778 node.next = head
779 head.prev = node
779 head.prev = node
780 self._size += 1
780 self._size += 1
781 return node
781 return node
782
782
783 def lrucachefunc(func):
783 def lrucachefunc(func):
784 '''cache most recent results of function calls'''
784 '''cache most recent results of function calls'''
785 cache = {}
785 cache = {}
786 order = collections.deque()
786 order = collections.deque()
787 if func.__code__.co_argcount == 1:
787 if func.__code__.co_argcount == 1:
788 def f(arg):
788 def f(arg):
789 if arg not in cache:
789 if arg not in cache:
790 if len(cache) > 20:
790 if len(cache) > 20:
791 del cache[order.popleft()]
791 del cache[order.popleft()]
792 cache[arg] = func(arg)
792 cache[arg] = func(arg)
793 else:
793 else:
794 order.remove(arg)
794 order.remove(arg)
795 order.append(arg)
795 order.append(arg)
796 return cache[arg]
796 return cache[arg]
797 else:
797 else:
798 def f(*args):
798 def f(*args):
799 if args not in cache:
799 if args not in cache:
800 if len(cache) > 20:
800 if len(cache) > 20:
801 del cache[order.popleft()]
801 del cache[order.popleft()]
802 cache[args] = func(*args)
802 cache[args] = func(*args)
803 else:
803 else:
804 order.remove(args)
804 order.remove(args)
805 order.append(args)
805 order.append(args)
806 return cache[args]
806 return cache[args]
807
807
808 return f
808 return f
809
809
810 class propertycache(object):
810 class propertycache(object):
811 def __init__(self, func):
811 def __init__(self, func):
812 self.func = func
812 self.func = func
813 self.name = func.__name__
813 self.name = func.__name__
814 def __get__(self, obj, type=None):
814 def __get__(self, obj, type=None):
815 result = self.func(obj)
815 result = self.func(obj)
816 self.cachevalue(obj, result)
816 self.cachevalue(obj, result)
817 return result
817 return result
818
818
819 def cachevalue(self, obj, value):
819 def cachevalue(self, obj, value):
820 # __dict__ assignment required to bypass __setattr__ (eg: repoview)
820 # __dict__ assignment required to bypass __setattr__ (eg: repoview)
821 obj.__dict__[self.name] = value
821 obj.__dict__[self.name] = value
822
822
823 def pipefilter(s, cmd):
823 def pipefilter(s, cmd):
824 '''filter string S through command CMD, returning its output'''
824 '''filter string S through command CMD, returning its output'''
825 p = subprocess.Popen(cmd, shell=True, close_fds=closefds,
825 p = subprocess.Popen(cmd, shell=True, close_fds=closefds,
826 stdin=subprocess.PIPE, stdout=subprocess.PIPE)
826 stdin=subprocess.PIPE, stdout=subprocess.PIPE)
827 pout, perr = p.communicate(s)
827 pout, perr = p.communicate(s)
828 return pout
828 return pout
829
829
830 def tempfilter(s, cmd):
830 def tempfilter(s, cmd):
831 '''filter string S through a pair of temporary files with CMD.
831 '''filter string S through a pair of temporary files with CMD.
832 CMD is used as a template to create the real command to be run,
832 CMD is used as a template to create the real command to be run,
833 with the strings INFILE and OUTFILE replaced by the real names of
833 with the strings INFILE and OUTFILE replaced by the real names of
834 the temporary files generated.'''
834 the temporary files generated.'''
835 inname, outname = None, None
835 inname, outname = None, None
836 try:
836 try:
837 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
837 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
838 fp = os.fdopen(infd, pycompat.sysstr('wb'))
838 fp = os.fdopen(infd, pycompat.sysstr('wb'))
839 fp.write(s)
839 fp.write(s)
840 fp.close()
840 fp.close()
841 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
841 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
842 os.close(outfd)
842 os.close(outfd)
843 cmd = cmd.replace('INFILE', inname)
843 cmd = cmd.replace('INFILE', inname)
844 cmd = cmd.replace('OUTFILE', outname)
844 cmd = cmd.replace('OUTFILE', outname)
845 code = os.system(cmd)
845 code = os.system(cmd)
846 if pycompat.sysplatform == 'OpenVMS' and code & 1:
846 if pycompat.sysplatform == 'OpenVMS' and code & 1:
847 code = 0
847 code = 0
848 if code:
848 if code:
849 raise Abort(_("command '%s' failed: %s") %
849 raise Abort(_("command '%s' failed: %s") %
850 (cmd, explainexit(code)))
850 (cmd, explainexit(code)))
851 return readfile(outname)
851 return readfile(outname)
852 finally:
852 finally:
853 try:
853 try:
854 if inname:
854 if inname:
855 os.unlink(inname)
855 os.unlink(inname)
856 except OSError:
856 except OSError:
857 pass
857 pass
858 try:
858 try:
859 if outname:
859 if outname:
860 os.unlink(outname)
860 os.unlink(outname)
861 except OSError:
861 except OSError:
862 pass
862 pass
863
863
864 filtertable = {
864 filtertable = {
865 'tempfile:': tempfilter,
865 'tempfile:': tempfilter,
866 'pipe:': pipefilter,
866 'pipe:': pipefilter,
867 }
867 }
868
868
869 def filter(s, cmd):
869 def filter(s, cmd):
870 "filter a string through a command that transforms its input to its output"
870 "filter a string through a command that transforms its input to its output"
871 for name, fn in filtertable.iteritems():
871 for name, fn in filtertable.iteritems():
872 if cmd.startswith(name):
872 if cmd.startswith(name):
873 return fn(s, cmd[len(name):].lstrip())
873 return fn(s, cmd[len(name):].lstrip())
874 return pipefilter(s, cmd)
874 return pipefilter(s, cmd)
875
875
876 def binary(s):
876 def binary(s):
877 """return true if a string is binary data"""
877 """return true if a string is binary data"""
878 return bool(s and '\0' in s)
878 return bool(s and '\0' in s)
879
879
880 def increasingchunks(source, min=1024, max=65536):
880 def increasingchunks(source, min=1024, max=65536):
881 '''return no less than min bytes per chunk while data remains,
881 '''return no less than min bytes per chunk while data remains,
882 doubling min after each chunk until it reaches max'''
882 doubling min after each chunk until it reaches max'''
883 def log2(x):
883 def log2(x):
884 if not x:
884 if not x:
885 return 0
885 return 0
886 i = 0
886 i = 0
887 while x:
887 while x:
888 x >>= 1
888 x >>= 1
889 i += 1
889 i += 1
890 return i - 1
890 return i - 1
891
891
892 buf = []
892 buf = []
893 blen = 0
893 blen = 0
894 for chunk in source:
894 for chunk in source:
895 buf.append(chunk)
895 buf.append(chunk)
896 blen += len(chunk)
896 blen += len(chunk)
897 if blen >= min:
897 if blen >= min:
898 if min < max:
898 if min < max:
899 min = min << 1
899 min = min << 1
900 nmin = 1 << log2(blen)
900 nmin = 1 << log2(blen)
901 if nmin > min:
901 if nmin > min:
902 min = nmin
902 min = nmin
903 if min > max:
903 if min > max:
904 min = max
904 min = max
905 yield ''.join(buf)
905 yield ''.join(buf)
906 blen = 0
906 blen = 0
907 buf = []
907 buf = []
908 if buf:
908 if buf:
909 yield ''.join(buf)
909 yield ''.join(buf)
910
910
911 Abort = error.Abort
911 Abort = error.Abort
912
912
913 def always(fn):
913 def always(fn):
914 return True
914 return True
915
915
916 def never(fn):
916 def never(fn):
917 return False
917 return False
918
918
919 def nogc(func):
919 def nogc(func):
920 """disable garbage collector
920 """disable garbage collector
921
921
922 Python's garbage collector triggers a GC each time a certain number of
922 Python's garbage collector triggers a GC each time a certain number of
923 container objects (the number being defined by gc.get_threshold()) are
923 container objects (the number being defined by gc.get_threshold()) are
924 allocated even when marked not to be tracked by the collector. Tracking has
924 allocated even when marked not to be tracked by the collector. Tracking has
925 no effect on when GCs are triggered, only on what objects the GC looks
925 no effect on when GCs are triggered, only on what objects the GC looks
926 into. As a workaround, disable GC while building complex (huge)
926 into. As a workaround, disable GC while building complex (huge)
927 containers.
927 containers.
928
928
929 This garbage collector issue have been fixed in 2.7.
929 This garbage collector issue have been fixed in 2.7.
930 """
930 """
931 if sys.version_info >= (2, 7):
931 if sys.version_info >= (2, 7):
932 return func
932 return func
933 def wrapper(*args, **kwargs):
933 def wrapper(*args, **kwargs):
934 gcenabled = gc.isenabled()
934 gcenabled = gc.isenabled()
935 gc.disable()
935 gc.disable()
936 try:
936 try:
937 return func(*args, **kwargs)
937 return func(*args, **kwargs)
938 finally:
938 finally:
939 if gcenabled:
939 if gcenabled:
940 gc.enable()
940 gc.enable()
941 return wrapper
941 return wrapper
942
942
943 def pathto(root, n1, n2):
943 def pathto(root, n1, n2):
944 '''return the relative path from one place to another.
944 '''return the relative path from one place to another.
945 root should use os.sep to separate directories
945 root should use os.sep to separate directories
946 n1 should use os.sep to separate directories
946 n1 should use os.sep to separate directories
947 n2 should use "/" to separate directories
947 n2 should use "/" to separate directories
948 returns an os.sep-separated path.
948 returns an os.sep-separated path.
949
949
950 If n1 is a relative path, it's assumed it's
950 If n1 is a relative path, it's assumed it's
951 relative to root.
951 relative to root.
952 n2 should always be relative to root.
952 n2 should always be relative to root.
953 '''
953 '''
954 if not n1:
954 if not n1:
955 return localpath(n2)
955 return localpath(n2)
956 if os.path.isabs(n1):
956 if os.path.isabs(n1):
957 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
957 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
958 return os.path.join(root, localpath(n2))
958 return os.path.join(root, localpath(n2))
959 n2 = '/'.join((pconvert(root), n2))
959 n2 = '/'.join((pconvert(root), n2))
960 a, b = splitpath(n1), n2.split('/')
960 a, b = splitpath(n1), n2.split('/')
961 a.reverse()
961 a.reverse()
962 b.reverse()
962 b.reverse()
963 while a and b and a[-1] == b[-1]:
963 while a and b and a[-1] == b[-1]:
964 a.pop()
964 a.pop()
965 b.pop()
965 b.pop()
966 b.reverse()
966 b.reverse()
967 return pycompat.ossep.join((['..'] * len(a)) + b) or '.'
967 return pycompat.ossep.join((['..'] * len(a)) + b) or '.'
968
968
969 def mainfrozen():
969 def mainfrozen():
970 """return True if we are a frozen executable.
970 """return True if we are a frozen executable.
971
971
972 The code supports py2exe (most common, Windows only) and tools/freeze
972 The code supports py2exe (most common, Windows only) and tools/freeze
973 (portable, not much used).
973 (portable, not much used).
974 """
974 """
975 return (safehasattr(sys, "frozen") or # new py2exe
975 return (safehasattr(sys, "frozen") or # new py2exe
976 safehasattr(sys, "importers") or # old py2exe
976 safehasattr(sys, "importers") or # old py2exe
977 imp.is_frozen(u"__main__")) # tools/freeze
977 imp.is_frozen(u"__main__")) # tools/freeze
978
978
979 # the location of data files matching the source code
979 # the location of data files matching the source code
980 if mainfrozen() and getattr(sys, 'frozen', None) != 'macosx_app':
980 if mainfrozen() and getattr(sys, 'frozen', None) != 'macosx_app':
981 # executable version (py2exe) doesn't support __file__
981 # executable version (py2exe) doesn't support __file__
982 datapath = os.path.dirname(pycompat.sysexecutable)
982 datapath = os.path.dirname(pycompat.sysexecutable)
983 else:
983 else:
984 datapath = os.path.dirname(pycompat.fsencode(__file__))
984 datapath = os.path.dirname(pycompat.fsencode(__file__))
985
985
986 i18n.setdatapath(datapath)
986 i18n.setdatapath(datapath)
987
987
988 _hgexecutable = None
988 _hgexecutable = None
989
989
990 def hgexecutable():
990 def hgexecutable():
991 """return location of the 'hg' executable.
991 """return location of the 'hg' executable.
992
992
993 Defaults to $HG or 'hg' in the search path.
993 Defaults to $HG or 'hg' in the search path.
994 """
994 """
995 if _hgexecutable is None:
995 if _hgexecutable is None:
996 hg = encoding.environ.get('HG')
996 hg = encoding.environ.get('HG')
997 mainmod = sys.modules[pycompat.sysstr('__main__')]
997 mainmod = sys.modules[pycompat.sysstr('__main__')]
998 if hg:
998 if hg:
999 _sethgexecutable(hg)
999 _sethgexecutable(hg)
1000 elif mainfrozen():
1000 elif mainfrozen():
1001 if getattr(sys, 'frozen', None) == 'macosx_app':
1001 if getattr(sys, 'frozen', None) == 'macosx_app':
1002 # Env variable set by py2app
1002 # Env variable set by py2app
1003 _sethgexecutable(encoding.environ['EXECUTABLEPATH'])
1003 _sethgexecutable(encoding.environ['EXECUTABLEPATH'])
1004 else:
1004 else:
1005 _sethgexecutable(pycompat.sysexecutable)
1005 _sethgexecutable(pycompat.sysexecutable)
1006 elif (os.path.basename(
1006 elif (os.path.basename(
1007 pycompat.fsencode(getattr(mainmod, '__file__', ''))) == 'hg'):
1007 pycompat.fsencode(getattr(mainmod, '__file__', ''))) == 'hg'):
1008 _sethgexecutable(pycompat.fsencode(mainmod.__file__))
1008 _sethgexecutable(pycompat.fsencode(mainmod.__file__))
1009 else:
1009 else:
1010 exe = findexe('hg') or os.path.basename(sys.argv[0])
1010 exe = findexe('hg') or os.path.basename(sys.argv[0])
1011 _sethgexecutable(exe)
1011 _sethgexecutable(exe)
1012 return _hgexecutable
1012 return _hgexecutable
1013
1013
1014 def _sethgexecutable(path):
1014 def _sethgexecutable(path):
1015 """set location of the 'hg' executable"""
1015 """set location of the 'hg' executable"""
1016 global _hgexecutable
1016 global _hgexecutable
1017 _hgexecutable = path
1017 _hgexecutable = path
1018
1018
1019 def _isstdout(f):
1019 def _isstdout(f):
1020 fileno = getattr(f, 'fileno', None)
1020 fileno = getattr(f, 'fileno', None)
1021 return fileno and fileno() == sys.__stdout__.fileno()
1021 return fileno and fileno() == sys.__stdout__.fileno()
1022
1022
1023 def shellenviron(environ=None):
1023 def shellenviron(environ=None):
1024 """return environ with optional override, useful for shelling out"""
1024 """return environ with optional override, useful for shelling out"""
1025 def py2shell(val):
1025 def py2shell(val):
1026 'convert python object into string that is useful to shell'
1026 'convert python object into string that is useful to shell'
1027 if val is None or val is False:
1027 if val is None or val is False:
1028 return '0'
1028 return '0'
1029 if val is True:
1029 if val is True:
1030 return '1'
1030 return '1'
1031 return str(val)
1031 return str(val)
1032 env = dict(encoding.environ)
1032 env = dict(encoding.environ)
1033 if environ:
1033 if environ:
1034 env.update((k, py2shell(v)) for k, v in environ.iteritems())
1034 env.update((k, py2shell(v)) for k, v in environ.iteritems())
1035 env['HG'] = hgexecutable()
1035 env['HG'] = hgexecutable()
1036 return env
1036 return env
1037
1037
1038 def system(cmd, environ=None, cwd=None, out=None):
1038 def system(cmd, environ=None, cwd=None, out=None):
1039 '''enhanced shell command execution.
1039 '''enhanced shell command execution.
1040 run with environment maybe modified, maybe in different dir.
1040 run with environment maybe modified, maybe in different dir.
1041
1041
1042 if out is specified, it is assumed to be a file-like object that has a
1042 if out is specified, it is assumed to be a file-like object that has a
1043 write() method. stdout and stderr will be redirected to out.'''
1043 write() method. stdout and stderr will be redirected to out.'''
1044 try:
1044 try:
1045 stdout.flush()
1045 stdout.flush()
1046 except Exception:
1046 except Exception:
1047 pass
1047 pass
1048 cmd = quotecommand(cmd)
1048 cmd = quotecommand(cmd)
1049 if pycompat.sysplatform == 'plan9' and (sys.version_info[0] == 2
1049 if pycompat.sysplatform == 'plan9' and (sys.version_info[0] == 2
1050 and sys.version_info[1] < 7):
1050 and sys.version_info[1] < 7):
1051 # subprocess kludge to work around issues in half-baked Python
1051 # subprocess kludge to work around issues in half-baked Python
1052 # ports, notably bichued/python:
1052 # ports, notably bichued/python:
1053 if not cwd is None:
1053 if not cwd is None:
1054 os.chdir(cwd)
1054 os.chdir(cwd)
1055 rc = os.system(cmd)
1055 rc = os.system(cmd)
1056 else:
1056 else:
1057 env = shellenviron(environ)
1057 env = shellenviron(environ)
1058 if out is None or _isstdout(out):
1058 if out is None or _isstdout(out):
1059 rc = subprocess.call(cmd, shell=True, close_fds=closefds,
1059 rc = subprocess.call(cmd, shell=True, close_fds=closefds,
1060 env=env, cwd=cwd)
1060 env=env, cwd=cwd)
1061 else:
1061 else:
1062 proc = subprocess.Popen(cmd, shell=True, close_fds=closefds,
1062 proc = subprocess.Popen(cmd, shell=True, close_fds=closefds,
1063 env=env, cwd=cwd, stdout=subprocess.PIPE,
1063 env=env, cwd=cwd, stdout=subprocess.PIPE,
1064 stderr=subprocess.STDOUT)
1064 stderr=subprocess.STDOUT)
1065 for line in iter(proc.stdout.readline, ''):
1065 for line in iter(proc.stdout.readline, ''):
1066 out.write(line)
1066 out.write(line)
1067 proc.wait()
1067 proc.wait()
1068 rc = proc.returncode
1068 rc = proc.returncode
1069 if pycompat.sysplatform == 'OpenVMS' and rc & 1:
1069 if pycompat.sysplatform == 'OpenVMS' and rc & 1:
1070 rc = 0
1070 rc = 0
1071 return rc
1071 return rc
1072
1072
1073 def checksignature(func):
1073 def checksignature(func):
1074 '''wrap a function with code to check for calling errors'''
1074 '''wrap a function with code to check for calling errors'''
1075 def check(*args, **kwargs):
1075 def check(*args, **kwargs):
1076 try:
1076 try:
1077 return func(*args, **kwargs)
1077 return func(*args, **kwargs)
1078 except TypeError:
1078 except TypeError:
1079 if len(traceback.extract_tb(sys.exc_info()[2])) == 1:
1079 if len(traceback.extract_tb(sys.exc_info()[2])) == 1:
1080 raise error.SignatureError
1080 raise error.SignatureError
1081 raise
1081 raise
1082
1082
1083 return check
1083 return check
1084
1084
1085 # a whilelist of known filesystems where hardlink works reliably
1085 # a whilelist of known filesystems where hardlink works reliably
1086 _hardlinkfswhitelist = set([
1086 _hardlinkfswhitelist = set([
1087 'btrfs',
1087 'btrfs',
1088 'ext2',
1088 'ext2',
1089 'ext3',
1089 'ext3',
1090 'ext4',
1090 'ext4',
1091 'hfs',
1091 'hfs',
1092 'jfs',
1092 'jfs',
1093 'reiserfs',
1093 'reiserfs',
1094 'tmpfs',
1094 'tmpfs',
1095 'ufs',
1095 'ufs',
1096 'xfs',
1096 'xfs',
1097 'zfs',
1097 'zfs',
1098 ])
1098 ])
1099
1099
1100 def copyfile(src, dest, hardlink=False, copystat=False, checkambig=False):
1100 def copyfile(src, dest, hardlink=False, copystat=False, checkambig=False):
1101 '''copy a file, preserving mode and optionally other stat info like
1101 '''copy a file, preserving mode and optionally other stat info like
1102 atime/mtime
1102 atime/mtime
1103
1103
1104 checkambig argument is used with filestat, and is useful only if
1104 checkambig argument is used with filestat, and is useful only if
1105 destination file is guarded by any lock (e.g. repo.lock or
1105 destination file is guarded by any lock (e.g. repo.lock or
1106 repo.wlock).
1106 repo.wlock).
1107
1107
1108 copystat and checkambig should be exclusive.
1108 copystat and checkambig should be exclusive.
1109 '''
1109 '''
1110 assert not (copystat and checkambig)
1110 assert not (copystat and checkambig)
1111 oldstat = None
1111 oldstat = None
1112 if os.path.lexists(dest):
1112 if os.path.lexists(dest):
1113 if checkambig:
1113 if checkambig:
1114 oldstat = checkambig and filestat(dest)
1114 oldstat = checkambig and filestat(dest)
1115 unlink(dest)
1115 unlink(dest)
1116 if hardlink:
1116 if hardlink:
1117 # Hardlinks are problematic on CIFS (issue4546), do not allow hardlinks
1117 # Hardlinks are problematic on CIFS (issue4546), do not allow hardlinks
1118 # unless we are confident that dest is on a whitelisted filesystem.
1118 # unless we are confident that dest is on a whitelisted filesystem.
1119 try:
1119 try:
1120 fstype = getfstype(os.path.dirname(dest))
1120 fstype = getfstype(os.path.dirname(dest))
1121 except OSError:
1121 except OSError:
1122 fstype = None
1122 fstype = None
1123 if fstype not in _hardlinkfswhitelist:
1123 if fstype not in _hardlinkfswhitelist:
1124 hardlink = False
1124 hardlink = False
1125 if hardlink:
1125 if hardlink:
1126 try:
1126 try:
1127 oslink(src, dest)
1127 oslink(src, dest)
1128 return
1128 return
1129 except (IOError, OSError):
1129 except (IOError, OSError):
1130 pass # fall back to normal copy
1130 pass # fall back to normal copy
1131 if os.path.islink(src):
1131 if os.path.islink(src):
1132 os.symlink(os.readlink(src), dest)
1132 os.symlink(os.readlink(src), dest)
1133 # copytime is ignored for symlinks, but in general copytime isn't needed
1133 # copytime is ignored for symlinks, but in general copytime isn't needed
1134 # for them anyway
1134 # for them anyway
1135 else:
1135 else:
1136 try:
1136 try:
1137 shutil.copyfile(src, dest)
1137 shutil.copyfile(src, dest)
1138 if copystat:
1138 if copystat:
1139 # copystat also copies mode
1139 # copystat also copies mode
1140 shutil.copystat(src, dest)
1140 shutil.copystat(src, dest)
1141 else:
1141 else:
1142 shutil.copymode(src, dest)
1142 shutil.copymode(src, dest)
1143 if oldstat and oldstat.stat:
1143 if oldstat and oldstat.stat:
1144 newstat = filestat(dest)
1144 newstat = filestat(dest)
1145 if newstat.isambig(oldstat):
1145 if newstat.isambig(oldstat):
1146 # stat of copied file is ambiguous to original one
1146 # stat of copied file is ambiguous to original one
1147 advanced = (oldstat.stat.st_mtime + 1) & 0x7fffffff
1147 advanced = (oldstat.stat.st_mtime + 1) & 0x7fffffff
1148 os.utime(dest, (advanced, advanced))
1148 os.utime(dest, (advanced, advanced))
1149 except shutil.Error as inst:
1149 except shutil.Error as inst:
1150 raise Abort(str(inst))
1150 raise Abort(str(inst))
1151
1151
1152 def copyfiles(src, dst, hardlink=None, progress=lambda t, pos: None):
1152 def copyfiles(src, dst, hardlink=None, progress=lambda t, pos: None):
1153 """Copy a directory tree using hardlinks if possible."""
1153 """Copy a directory tree using hardlinks if possible."""
1154 num = 0
1154 num = 0
1155
1155
1156 gettopic = lambda: hardlink and _('linking') or _('copying')
1156 gettopic = lambda: hardlink and _('linking') or _('copying')
1157
1157
1158 if os.path.isdir(src):
1158 if os.path.isdir(src):
1159 if hardlink is None:
1159 if hardlink is None:
1160 hardlink = (os.stat(src).st_dev ==
1160 hardlink = (os.stat(src).st_dev ==
1161 os.stat(os.path.dirname(dst)).st_dev)
1161 os.stat(os.path.dirname(dst)).st_dev)
1162 topic = gettopic()
1162 topic = gettopic()
1163 os.mkdir(dst)
1163 os.mkdir(dst)
1164 for name, kind in osutil.listdir(src):
1164 for name, kind in osutil.listdir(src):
1165 srcname = os.path.join(src, name)
1165 srcname = os.path.join(src, name)
1166 dstname = os.path.join(dst, name)
1166 dstname = os.path.join(dst, name)
1167 def nprog(t, pos):
1167 def nprog(t, pos):
1168 if pos is not None:
1168 if pos is not None:
1169 return progress(t, pos + num)
1169 return progress(t, pos + num)
1170 hardlink, n = copyfiles(srcname, dstname, hardlink, progress=nprog)
1170 hardlink, n = copyfiles(srcname, dstname, hardlink, progress=nprog)
1171 num += n
1171 num += n
1172 else:
1172 else:
1173 if hardlink is None:
1173 if hardlink is None:
1174 hardlink = (os.stat(os.path.dirname(src)).st_dev ==
1174 hardlink = (os.stat(os.path.dirname(src)).st_dev ==
1175 os.stat(os.path.dirname(dst)).st_dev)
1175 os.stat(os.path.dirname(dst)).st_dev)
1176 topic = gettopic()
1176 topic = gettopic()
1177
1177
1178 if hardlink:
1178 if hardlink:
1179 try:
1179 try:
1180 oslink(src, dst)
1180 oslink(src, dst)
1181 except (IOError, OSError):
1181 except (IOError, OSError):
1182 hardlink = False
1182 hardlink = False
1183 shutil.copy(src, dst)
1183 shutil.copy(src, dst)
1184 else:
1184 else:
1185 shutil.copy(src, dst)
1185 shutil.copy(src, dst)
1186 num += 1
1186 num += 1
1187 progress(topic, num)
1187 progress(topic, num)
1188 progress(topic, None)
1188 progress(topic, None)
1189
1189
1190 return hardlink, num
1190 return hardlink, num
1191
1191
1192 _winreservednames = '''con prn aux nul
1192 _winreservednames = '''con prn aux nul
1193 com1 com2 com3 com4 com5 com6 com7 com8 com9
1193 com1 com2 com3 com4 com5 com6 com7 com8 com9
1194 lpt1 lpt2 lpt3 lpt4 lpt5 lpt6 lpt7 lpt8 lpt9'''.split()
1194 lpt1 lpt2 lpt3 lpt4 lpt5 lpt6 lpt7 lpt8 lpt9'''.split()
1195 _winreservedchars = ':*?"<>|'
1195 _winreservedchars = ':*?"<>|'
1196 def checkwinfilename(path):
1196 def checkwinfilename(path):
1197 r'''Check that the base-relative path is a valid filename on Windows.
1197 r'''Check that the base-relative path is a valid filename on Windows.
1198 Returns None if the path is ok, or a UI string describing the problem.
1198 Returns None if the path is ok, or a UI string describing the problem.
1199
1199
1200 >>> checkwinfilename("just/a/normal/path")
1200 >>> checkwinfilename("just/a/normal/path")
1201 >>> checkwinfilename("foo/bar/con.xml")
1201 >>> checkwinfilename("foo/bar/con.xml")
1202 "filename contains 'con', which is reserved on Windows"
1202 "filename contains 'con', which is reserved on Windows"
1203 >>> checkwinfilename("foo/con.xml/bar")
1203 >>> checkwinfilename("foo/con.xml/bar")
1204 "filename contains 'con', which is reserved on Windows"
1204 "filename contains 'con', which is reserved on Windows"
1205 >>> checkwinfilename("foo/bar/xml.con")
1205 >>> checkwinfilename("foo/bar/xml.con")
1206 >>> checkwinfilename("foo/bar/AUX/bla.txt")
1206 >>> checkwinfilename("foo/bar/AUX/bla.txt")
1207 "filename contains 'AUX', which is reserved on Windows"
1207 "filename contains 'AUX', which is reserved on Windows"
1208 >>> checkwinfilename("foo/bar/bla:.txt")
1208 >>> checkwinfilename("foo/bar/bla:.txt")
1209 "filename contains ':', which is reserved on Windows"
1209 "filename contains ':', which is reserved on Windows"
1210 >>> checkwinfilename("foo/bar/b\07la.txt")
1210 >>> checkwinfilename("foo/bar/b\07la.txt")
1211 "filename contains '\\x07', which is invalid on Windows"
1211 "filename contains '\\x07', which is invalid on Windows"
1212 >>> checkwinfilename("foo/bar/bla ")
1212 >>> checkwinfilename("foo/bar/bla ")
1213 "filename ends with ' ', which is not allowed on Windows"
1213 "filename ends with ' ', which is not allowed on Windows"
1214 >>> checkwinfilename("../bar")
1214 >>> checkwinfilename("../bar")
1215 >>> checkwinfilename("foo\\")
1215 >>> checkwinfilename("foo\\")
1216 "filename ends with '\\', which is invalid on Windows"
1216 "filename ends with '\\', which is invalid on Windows"
1217 >>> checkwinfilename("foo\\/bar")
1217 >>> checkwinfilename("foo\\/bar")
1218 "directory name ends with '\\', which is invalid on Windows"
1218 "directory name ends with '\\', which is invalid on Windows"
1219 '''
1219 '''
1220 if path.endswith('\\'):
1220 if path.endswith('\\'):
1221 return _("filename ends with '\\', which is invalid on Windows")
1221 return _("filename ends with '\\', which is invalid on Windows")
1222 if '\\/' in path:
1222 if '\\/' in path:
1223 return _("directory name ends with '\\', which is invalid on Windows")
1223 return _("directory name ends with '\\', which is invalid on Windows")
1224 for n in path.replace('\\', '/').split('/'):
1224 for n in path.replace('\\', '/').split('/'):
1225 if not n:
1225 if not n:
1226 continue
1226 continue
1227 for c in pycompat.bytestr(n):
1227 for c in pycompat.bytestr(n):
1228 if c in _winreservedchars:
1228 if c in _winreservedchars:
1229 return _("filename contains '%s', which is reserved "
1229 return _("filename contains '%s', which is reserved "
1230 "on Windows") % c
1230 "on Windows") % c
1231 if ord(c) <= 31:
1231 if ord(c) <= 31:
1232 return _("filename contains %r, which is invalid "
1232 return _("filename contains %r, which is invalid "
1233 "on Windows") % c
1233 "on Windows") % c
1234 base = n.split('.')[0]
1234 base = n.split('.')[0]
1235 if base and base.lower() in _winreservednames:
1235 if base and base.lower() in _winreservednames:
1236 return _("filename contains '%s', which is reserved "
1236 return _("filename contains '%s', which is reserved "
1237 "on Windows") % base
1237 "on Windows") % base
1238 t = n[-1]
1238 t = n[-1]
1239 if t in '. ' and n not in '..':
1239 if t in '. ' and n not in '..':
1240 return _("filename ends with '%s', which is not allowed "
1240 return _("filename ends with '%s', which is not allowed "
1241 "on Windows") % t
1241 "on Windows") % t
1242
1242
1243 if pycompat.osname == 'nt':
1243 if pycompat.osname == 'nt':
1244 checkosfilename = checkwinfilename
1244 checkosfilename = checkwinfilename
1245 timer = time.clock
1245 timer = time.clock
1246 else:
1246 else:
1247 checkosfilename = platform.checkosfilename
1247 checkosfilename = platform.checkosfilename
1248 timer = time.time
1248 timer = time.time
1249
1249
1250 if safehasattr(time, "perf_counter"):
1250 if safehasattr(time, "perf_counter"):
1251 timer = time.perf_counter
1251 timer = time.perf_counter
1252
1252
1253 def makelock(info, pathname):
1253 def makelock(info, pathname):
1254 try:
1254 try:
1255 return os.symlink(info, pathname)
1255 return os.symlink(info, pathname)
1256 except OSError as why:
1256 except OSError as why:
1257 if why.errno == errno.EEXIST:
1257 if why.errno == errno.EEXIST:
1258 raise
1258 raise
1259 except AttributeError: # no symlink in os
1259 except AttributeError: # no symlink in os
1260 pass
1260 pass
1261
1261
1262 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
1262 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
1263 os.write(ld, info)
1263 os.write(ld, info)
1264 os.close(ld)
1264 os.close(ld)
1265
1265
1266 def readlock(pathname):
1266 def readlock(pathname):
1267 try:
1267 try:
1268 return os.readlink(pathname)
1268 return os.readlink(pathname)
1269 except OSError as why:
1269 except OSError as why:
1270 if why.errno not in (errno.EINVAL, errno.ENOSYS):
1270 if why.errno not in (errno.EINVAL, errno.ENOSYS):
1271 raise
1271 raise
1272 except AttributeError: # no symlink in os
1272 except AttributeError: # no symlink in os
1273 pass
1273 pass
1274 fp = posixfile(pathname)
1274 fp = posixfile(pathname)
1275 r = fp.read()
1275 r = fp.read()
1276 fp.close()
1276 fp.close()
1277 return r
1277 return r
1278
1278
1279 def fstat(fp):
1279 def fstat(fp):
1280 '''stat file object that may not have fileno method.'''
1280 '''stat file object that may not have fileno method.'''
1281 try:
1281 try:
1282 return os.fstat(fp.fileno())
1282 return os.fstat(fp.fileno())
1283 except AttributeError:
1283 except AttributeError:
1284 return os.stat(fp.name)
1284 return os.stat(fp.name)
1285
1285
1286 # File system features
1286 # File system features
1287
1287
1288 def fscasesensitive(path):
1288 def fscasesensitive(path):
1289 """
1289 """
1290 Return true if the given path is on a case-sensitive filesystem
1290 Return true if the given path is on a case-sensitive filesystem
1291
1291
1292 Requires a path (like /foo/.hg) ending with a foldable final
1292 Requires a path (like /foo/.hg) ending with a foldable final
1293 directory component.
1293 directory component.
1294 """
1294 """
1295 s1 = os.lstat(path)
1295 s1 = os.lstat(path)
1296 d, b = os.path.split(path)
1296 d, b = os.path.split(path)
1297 b2 = b.upper()
1297 b2 = b.upper()
1298 if b == b2:
1298 if b == b2:
1299 b2 = b.lower()
1299 b2 = b.lower()
1300 if b == b2:
1300 if b == b2:
1301 return True # no evidence against case sensitivity
1301 return True # no evidence against case sensitivity
1302 p2 = os.path.join(d, b2)
1302 p2 = os.path.join(d, b2)
1303 try:
1303 try:
1304 s2 = os.lstat(p2)
1304 s2 = os.lstat(p2)
1305 if s2 == s1:
1305 if s2 == s1:
1306 return False
1306 return False
1307 return True
1307 return True
1308 except OSError:
1308 except OSError:
1309 return True
1309 return True
1310
1310
1311 try:
1311 try:
1312 import re2
1312 import re2
1313 _re2 = None
1313 _re2 = None
1314 except ImportError:
1314 except ImportError:
1315 _re2 = False
1315 _re2 = False
1316
1316
1317 class _re(object):
1317 class _re(object):
1318 def _checkre2(self):
1318 def _checkre2(self):
1319 global _re2
1319 global _re2
1320 try:
1320 try:
1321 # check if match works, see issue3964
1321 # check if match works, see issue3964
1322 _re2 = bool(re2.match(r'\[([^\[]+)\]', '[ui]'))
1322 _re2 = bool(re2.match(r'\[([^\[]+)\]', '[ui]'))
1323 except ImportError:
1323 except ImportError:
1324 _re2 = False
1324 _re2 = False
1325
1325
1326 def compile(self, pat, flags=0):
1326 def compile(self, pat, flags=0):
1327 '''Compile a regular expression, using re2 if possible
1327 '''Compile a regular expression, using re2 if possible
1328
1328
1329 For best performance, use only re2-compatible regexp features. The
1329 For best performance, use only re2-compatible regexp features. The
1330 only flags from the re module that are re2-compatible are
1330 only flags from the re module that are re2-compatible are
1331 IGNORECASE and MULTILINE.'''
1331 IGNORECASE and MULTILINE.'''
1332 if _re2 is None:
1332 if _re2 is None:
1333 self._checkre2()
1333 self._checkre2()
1334 if _re2 and (flags & ~(remod.IGNORECASE | remod.MULTILINE)) == 0:
1334 if _re2 and (flags & ~(remod.IGNORECASE | remod.MULTILINE)) == 0:
1335 if flags & remod.IGNORECASE:
1335 if flags & remod.IGNORECASE:
1336 pat = '(?i)' + pat
1336 pat = '(?i)' + pat
1337 if flags & remod.MULTILINE:
1337 if flags & remod.MULTILINE:
1338 pat = '(?m)' + pat
1338 pat = '(?m)' + pat
1339 try:
1339 try:
1340 return re2.compile(pat)
1340 return re2.compile(pat)
1341 except re2.error:
1341 except re2.error:
1342 pass
1342 pass
1343 return remod.compile(pat, flags)
1343 return remod.compile(pat, flags)
1344
1344
1345 @propertycache
1345 @propertycache
1346 def escape(self):
1346 def escape(self):
1347 '''Return the version of escape corresponding to self.compile.
1347 '''Return the version of escape corresponding to self.compile.
1348
1348
1349 This is imperfect because whether re2 or re is used for a particular
1349 This is imperfect because whether re2 or re is used for a particular
1350 function depends on the flags, etc, but it's the best we can do.
1350 function depends on the flags, etc, but it's the best we can do.
1351 '''
1351 '''
1352 global _re2
1352 global _re2
1353 if _re2 is None:
1353 if _re2 is None:
1354 self._checkre2()
1354 self._checkre2()
1355 if _re2:
1355 if _re2:
1356 return re2.escape
1356 return re2.escape
1357 else:
1357 else:
1358 return remod.escape
1358 return remod.escape
1359
1359
1360 re = _re()
1360 re = _re()
1361
1361
1362 _fspathcache = {}
1362 _fspathcache = {}
1363 def fspath(name, root):
1363 def fspath(name, root):
1364 '''Get name in the case stored in the filesystem
1364 '''Get name in the case stored in the filesystem
1365
1365
1366 The name should be relative to root, and be normcase-ed for efficiency.
1366 The name should be relative to root, and be normcase-ed for efficiency.
1367
1367
1368 Note that this function is unnecessary, and should not be
1368 Note that this function is unnecessary, and should not be
1369 called, for case-sensitive filesystems (simply because it's expensive).
1369 called, for case-sensitive filesystems (simply because it's expensive).
1370
1370
1371 The root should be normcase-ed, too.
1371 The root should be normcase-ed, too.
1372 '''
1372 '''
1373 def _makefspathcacheentry(dir):
1373 def _makefspathcacheentry(dir):
1374 return dict((normcase(n), n) for n in os.listdir(dir))
1374 return dict((normcase(n), n) for n in os.listdir(dir))
1375
1375
1376 seps = pycompat.ossep
1376 seps = pycompat.ossep
1377 if pycompat.osaltsep:
1377 if pycompat.osaltsep:
1378 seps = seps + pycompat.osaltsep
1378 seps = seps + pycompat.osaltsep
1379 # Protect backslashes. This gets silly very quickly.
1379 # Protect backslashes. This gets silly very quickly.
1380 seps.replace('\\','\\\\')
1380 seps.replace('\\','\\\\')
1381 pattern = remod.compile(br'([^%s]+)|([%s]+)' % (seps, seps))
1381 pattern = remod.compile(br'([^%s]+)|([%s]+)' % (seps, seps))
1382 dir = os.path.normpath(root)
1382 dir = os.path.normpath(root)
1383 result = []
1383 result = []
1384 for part, sep in pattern.findall(name):
1384 for part, sep in pattern.findall(name):
1385 if sep:
1385 if sep:
1386 result.append(sep)
1386 result.append(sep)
1387 continue
1387 continue
1388
1388
1389 if dir not in _fspathcache:
1389 if dir not in _fspathcache:
1390 _fspathcache[dir] = _makefspathcacheentry(dir)
1390 _fspathcache[dir] = _makefspathcacheentry(dir)
1391 contents = _fspathcache[dir]
1391 contents = _fspathcache[dir]
1392
1392
1393 found = contents.get(part)
1393 found = contents.get(part)
1394 if not found:
1394 if not found:
1395 # retry "once per directory" per "dirstate.walk" which
1395 # retry "once per directory" per "dirstate.walk" which
1396 # may take place for each patches of "hg qpush", for example
1396 # may take place for each patches of "hg qpush", for example
1397 _fspathcache[dir] = contents = _makefspathcacheentry(dir)
1397 _fspathcache[dir] = contents = _makefspathcacheentry(dir)
1398 found = contents.get(part)
1398 found = contents.get(part)
1399
1399
1400 result.append(found or part)
1400 result.append(found or part)
1401 dir = os.path.join(dir, part)
1401 dir = os.path.join(dir, part)
1402
1402
1403 return ''.join(result)
1403 return ''.join(result)
1404
1404
1405 def getfstype(dirpath):
1405 def getfstype(dirpath):
1406 '''Get the filesystem type name from a directory (best-effort)
1406 '''Get the filesystem type name from a directory (best-effort)
1407
1407
1408 Returns None if we are unsure. Raises OSError on ENOENT, EPERM, etc.
1408 Returns None if we are unsure. Raises OSError on ENOENT, EPERM, etc.
1409 '''
1409 '''
1410 return getattr(osutil, 'getfstype', lambda x: None)(dirpath)
1410 return getattr(osutil, 'getfstype', lambda x: None)(dirpath)
1411
1411
1412 def checknlink(testfile):
1412 def checknlink(testfile):
1413 '''check whether hardlink count reporting works properly'''
1413 '''check whether hardlink count reporting works properly'''
1414
1414
1415 # testfile may be open, so we need a separate file for checking to
1415 # testfile may be open, so we need a separate file for checking to
1416 # work around issue2543 (or testfile may get lost on Samba shares)
1416 # work around issue2543 (or testfile may get lost on Samba shares)
1417 f1 = testfile + ".hgtmp1"
1417 f1 = testfile + ".hgtmp1"
1418 if os.path.lexists(f1):
1418 if os.path.lexists(f1):
1419 return False
1419 return False
1420 try:
1420 try:
1421 posixfile(f1, 'w').close()
1421 posixfile(f1, 'w').close()
1422 except IOError:
1422 except IOError:
1423 try:
1423 try:
1424 os.unlink(f1)
1424 os.unlink(f1)
1425 except OSError:
1425 except OSError:
1426 pass
1426 pass
1427 return False
1427 return False
1428
1428
1429 f2 = testfile + ".hgtmp2"
1429 f2 = testfile + ".hgtmp2"
1430 fd = None
1430 fd = None
1431 try:
1431 try:
1432 oslink(f1, f2)
1432 oslink(f1, f2)
1433 # nlinks() may behave differently for files on Windows shares if
1433 # nlinks() may behave differently for files on Windows shares if
1434 # the file is open.
1434 # the file is open.
1435 fd = posixfile(f2)
1435 fd = posixfile(f2)
1436 return nlinks(f2) > 1
1436 return nlinks(f2) > 1
1437 except OSError:
1437 except OSError:
1438 return False
1438 return False
1439 finally:
1439 finally:
1440 if fd is not None:
1440 if fd is not None:
1441 fd.close()
1441 fd.close()
1442 for f in (f1, f2):
1442 for f in (f1, f2):
1443 try:
1443 try:
1444 os.unlink(f)
1444 os.unlink(f)
1445 except OSError:
1445 except OSError:
1446 pass
1446 pass
1447
1447
1448 def endswithsep(path):
1448 def endswithsep(path):
1449 '''Check path ends with os.sep or os.altsep.'''
1449 '''Check path ends with os.sep or os.altsep.'''
1450 return (path.endswith(pycompat.ossep)
1450 return (path.endswith(pycompat.ossep)
1451 or pycompat.osaltsep and path.endswith(pycompat.osaltsep))
1451 or pycompat.osaltsep and path.endswith(pycompat.osaltsep))
1452
1452
1453 def splitpath(path):
1453 def splitpath(path):
1454 '''Split path by os.sep.
1454 '''Split path by os.sep.
1455 Note that this function does not use os.altsep because this is
1455 Note that this function does not use os.altsep because this is
1456 an alternative of simple "xxx.split(os.sep)".
1456 an alternative of simple "xxx.split(os.sep)".
1457 It is recommended to use os.path.normpath() before using this
1457 It is recommended to use os.path.normpath() before using this
1458 function if need.'''
1458 function if need.'''
1459 return path.split(pycompat.ossep)
1459 return path.split(pycompat.ossep)
1460
1460
1461 def gui():
1461 def gui():
1462 '''Are we running in a GUI?'''
1462 '''Are we running in a GUI?'''
1463 if pycompat.sysplatform == 'darwin':
1463 if pycompat.sysplatform == 'darwin':
1464 if 'SSH_CONNECTION' in encoding.environ:
1464 if 'SSH_CONNECTION' in encoding.environ:
1465 # handle SSH access to a box where the user is logged in
1465 # handle SSH access to a box where the user is logged in
1466 return False
1466 return False
1467 elif getattr(osutil, 'isgui', None):
1467 elif getattr(osutil, 'isgui', None):
1468 # check if a CoreGraphics session is available
1468 # check if a CoreGraphics session is available
1469 return osutil.isgui()
1469 return osutil.isgui()
1470 else:
1470 else:
1471 # pure build; use a safe default
1471 # pure build; use a safe default
1472 return True
1472 return True
1473 else:
1473 else:
1474 return pycompat.osname == "nt" or encoding.environ.get("DISPLAY")
1474 return pycompat.osname == "nt" or encoding.environ.get("DISPLAY")
1475
1475
1476 def mktempcopy(name, emptyok=False, createmode=None):
1476 def mktempcopy(name, emptyok=False, createmode=None):
1477 """Create a temporary file with the same contents from name
1477 """Create a temporary file with the same contents from name
1478
1478
1479 The permission bits are copied from the original file.
1479 The permission bits are copied from the original file.
1480
1480
1481 If the temporary file is going to be truncated immediately, you
1481 If the temporary file is going to be truncated immediately, you
1482 can use emptyok=True as an optimization.
1482 can use emptyok=True as an optimization.
1483
1483
1484 Returns the name of the temporary file.
1484 Returns the name of the temporary file.
1485 """
1485 """
1486 d, fn = os.path.split(name)
1486 d, fn = os.path.split(name)
1487 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, dir=d)
1487 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, dir=d)
1488 os.close(fd)
1488 os.close(fd)
1489 # Temporary files are created with mode 0600, which is usually not
1489 # Temporary files are created with mode 0600, which is usually not
1490 # what we want. If the original file already exists, just copy
1490 # what we want. If the original file already exists, just copy
1491 # its mode. Otherwise, manually obey umask.
1491 # its mode. Otherwise, manually obey umask.
1492 copymode(name, temp, createmode)
1492 copymode(name, temp, createmode)
1493 if emptyok:
1493 if emptyok:
1494 return temp
1494 return temp
1495 try:
1495 try:
1496 try:
1496 try:
1497 ifp = posixfile(name, "rb")
1497 ifp = posixfile(name, "rb")
1498 except IOError as inst:
1498 except IOError as inst:
1499 if inst.errno == errno.ENOENT:
1499 if inst.errno == errno.ENOENT:
1500 return temp
1500 return temp
1501 if not getattr(inst, 'filename', None):
1501 if not getattr(inst, 'filename', None):
1502 inst.filename = name
1502 inst.filename = name
1503 raise
1503 raise
1504 ofp = posixfile(temp, "wb")
1504 ofp = posixfile(temp, "wb")
1505 for chunk in filechunkiter(ifp):
1505 for chunk in filechunkiter(ifp):
1506 ofp.write(chunk)
1506 ofp.write(chunk)
1507 ifp.close()
1507 ifp.close()
1508 ofp.close()
1508 ofp.close()
1509 except: # re-raises
1509 except: # re-raises
1510 try: os.unlink(temp)
1510 try: os.unlink(temp)
1511 except OSError: pass
1511 except OSError: pass
1512 raise
1512 raise
1513 return temp
1513 return temp
1514
1514
1515 class filestat(object):
1515 class filestat(object):
1516 """help to exactly detect change of a file
1516 """help to exactly detect change of a file
1517
1517
1518 'stat' attribute is result of 'os.stat()' if specified 'path'
1518 'stat' attribute is result of 'os.stat()' if specified 'path'
1519 exists. Otherwise, it is None. This can avoid preparative
1519 exists. Otherwise, it is None. This can avoid preparative
1520 'exists()' examination on client side of this class.
1520 'exists()' examination on client side of this class.
1521 """
1521 """
1522 def __init__(self, path):
1522 def __init__(self, path):
1523 try:
1523 try:
1524 self.stat = os.stat(path)
1524 self.stat = os.stat(path)
1525 except OSError as err:
1525 except OSError as err:
1526 if err.errno != errno.ENOENT:
1526 if err.errno != errno.ENOENT:
1527 raise
1527 raise
1528 self.stat = None
1528 self.stat = None
1529
1529
1530 __hash__ = object.__hash__
1530 __hash__ = object.__hash__
1531
1531
1532 def __eq__(self, old):
1532 def __eq__(self, old):
1533 try:
1533 try:
1534 # if ambiguity between stat of new and old file is
1534 # if ambiguity between stat of new and old file is
1535 # avoided, comparison of size, ctime and mtime is enough
1535 # avoided, comparison of size, ctime and mtime is enough
1536 # to exactly detect change of a file regardless of platform
1536 # to exactly detect change of a file regardless of platform
1537 return (self.stat.st_size == old.stat.st_size and
1537 return (self.stat.st_size == old.stat.st_size and
1538 self.stat.st_ctime == old.stat.st_ctime and
1538 self.stat.st_ctime == old.stat.st_ctime and
1539 self.stat.st_mtime == old.stat.st_mtime)
1539 self.stat.st_mtime == old.stat.st_mtime)
1540 except AttributeError:
1540 except AttributeError:
1541 return False
1541 return False
1542
1542
1543 def isambig(self, old):
1543 def isambig(self, old):
1544 """Examine whether new (= self) stat is ambiguous against old one
1544 """Examine whether new (= self) stat is ambiguous against old one
1545
1545
1546 "S[N]" below means stat of a file at N-th change:
1546 "S[N]" below means stat of a file at N-th change:
1547
1547
1548 - S[n-1].ctime < S[n].ctime: can detect change of a file
1548 - S[n-1].ctime < S[n].ctime: can detect change of a file
1549 - S[n-1].ctime == S[n].ctime
1549 - S[n-1].ctime == S[n].ctime
1550 - S[n-1].ctime < S[n].mtime: means natural advancing (*1)
1550 - S[n-1].ctime < S[n].mtime: means natural advancing (*1)
1551 - S[n-1].ctime == S[n].mtime: is ambiguous (*2)
1551 - S[n-1].ctime == S[n].mtime: is ambiguous (*2)
1552 - S[n-1].ctime > S[n].mtime: never occurs naturally (don't care)
1552 - S[n-1].ctime > S[n].mtime: never occurs naturally (don't care)
1553 - S[n-1].ctime > S[n].ctime: never occurs naturally (don't care)
1553 - S[n-1].ctime > S[n].ctime: never occurs naturally (don't care)
1554
1554
1555 Case (*2) above means that a file was changed twice or more at
1555 Case (*2) above means that a file was changed twice or more at
1556 same time in sec (= S[n-1].ctime), and comparison of timestamp
1556 same time in sec (= S[n-1].ctime), and comparison of timestamp
1557 is ambiguous.
1557 is ambiguous.
1558
1558
1559 Base idea to avoid such ambiguity is "advance mtime 1 sec, if
1559 Base idea to avoid such ambiguity is "advance mtime 1 sec, if
1560 timestamp is ambiguous".
1560 timestamp is ambiguous".
1561
1561
1562 But advancing mtime only in case (*2) doesn't work as
1562 But advancing mtime only in case (*2) doesn't work as
1563 expected, because naturally advanced S[n].mtime in case (*1)
1563 expected, because naturally advanced S[n].mtime in case (*1)
1564 might be equal to manually advanced S[n-1 or earlier].mtime.
1564 might be equal to manually advanced S[n-1 or earlier].mtime.
1565
1565
1566 Therefore, all "S[n-1].ctime == S[n].ctime" cases should be
1566 Therefore, all "S[n-1].ctime == S[n].ctime" cases should be
1567 treated as ambiguous regardless of mtime, to avoid overlooking
1567 treated as ambiguous regardless of mtime, to avoid overlooking
1568 by confliction between such mtime.
1568 by confliction between such mtime.
1569
1569
1570 Advancing mtime "if isambig(oldstat)" ensures "S[n-1].mtime !=
1570 Advancing mtime "if isambig(oldstat)" ensures "S[n-1].mtime !=
1571 S[n].mtime", even if size of a file isn't changed.
1571 S[n].mtime", even if size of a file isn't changed.
1572 """
1572 """
1573 try:
1573 try:
1574 return (self.stat.st_ctime == old.stat.st_ctime)
1574 return (self.stat.st_ctime == old.stat.st_ctime)
1575 except AttributeError:
1575 except AttributeError:
1576 return False
1576 return False
1577
1577
1578 def avoidambig(self, path, old):
1578 def avoidambig(self, path, old):
1579 """Change file stat of specified path to avoid ambiguity
1579 """Change file stat of specified path to avoid ambiguity
1580
1580
1581 'old' should be previous filestat of 'path'.
1581 'old' should be previous filestat of 'path'.
1582
1582
1583 This skips avoiding ambiguity, if a process doesn't have
1583 This skips avoiding ambiguity, if a process doesn't have
1584 appropriate privileges for 'path'.
1584 appropriate privileges for 'path'.
1585 """
1585 """
1586 advanced = (old.stat.st_mtime + 1) & 0x7fffffff
1586 advanced = (old.stat.st_mtime + 1) & 0x7fffffff
1587 try:
1587 try:
1588 os.utime(path, (advanced, advanced))
1588 os.utime(path, (advanced, advanced))
1589 except OSError as inst:
1589 except OSError as inst:
1590 if inst.errno == errno.EPERM:
1590 if inst.errno == errno.EPERM:
1591 # utime() on the file created by another user causes EPERM,
1591 # utime() on the file created by another user causes EPERM,
1592 # if a process doesn't have appropriate privileges
1592 # if a process doesn't have appropriate privileges
1593 return
1593 return
1594 raise
1594 raise
1595
1595
1596 def __ne__(self, other):
1596 def __ne__(self, other):
1597 return not self == other
1597 return not self == other
1598
1598
1599 class atomictempfile(object):
1599 class atomictempfile(object):
1600 '''writable file object that atomically updates a file
1600 '''writable file object that atomically updates a file
1601
1601
1602 All writes will go to a temporary copy of the original file. Call
1602 All writes will go to a temporary copy of the original file. Call
1603 close() when you are done writing, and atomictempfile will rename
1603 close() when you are done writing, and atomictempfile will rename
1604 the temporary copy to the original name, making the changes
1604 the temporary copy to the original name, making the changes
1605 visible. If the object is destroyed without being closed, all your
1605 visible. If the object is destroyed without being closed, all your
1606 writes are discarded.
1606 writes are discarded.
1607
1607
1608 checkambig argument of constructor is used with filestat, and is
1608 checkambig argument of constructor is used with filestat, and is
1609 useful only if target file is guarded by any lock (e.g. repo.lock
1609 useful only if target file is guarded by any lock (e.g. repo.lock
1610 or repo.wlock).
1610 or repo.wlock).
1611 '''
1611 '''
1612 def __init__(self, name, mode='w+b', createmode=None, checkambig=False):
1612 def __init__(self, name, mode='w+b', createmode=None, checkambig=False):
1613 self.__name = name # permanent name
1613 self.__name = name # permanent name
1614 self._tempname = mktempcopy(name, emptyok=('w' in mode),
1614 self._tempname = mktempcopy(name, emptyok=('w' in mode),
1615 createmode=createmode)
1615 createmode=createmode)
1616 self._fp = posixfile(self._tempname, mode)
1616 self._fp = posixfile(self._tempname, mode)
1617 self._checkambig = checkambig
1617 self._checkambig = checkambig
1618
1618
1619 # delegated methods
1619 # delegated methods
1620 self.read = self._fp.read
1620 self.read = self._fp.read
1621 self.write = self._fp.write
1621 self.write = self._fp.write
1622 self.seek = self._fp.seek
1622 self.seek = self._fp.seek
1623 self.tell = self._fp.tell
1623 self.tell = self._fp.tell
1624 self.fileno = self._fp.fileno
1624 self.fileno = self._fp.fileno
1625
1625
1626 def close(self):
1626 def close(self):
1627 if not self._fp.closed:
1627 if not self._fp.closed:
1628 self._fp.close()
1628 self._fp.close()
1629 filename = localpath(self.__name)
1629 filename = localpath(self.__name)
1630 oldstat = self._checkambig and filestat(filename)
1630 oldstat = self._checkambig and filestat(filename)
1631 if oldstat and oldstat.stat:
1631 if oldstat and oldstat.stat:
1632 rename(self._tempname, filename)
1632 rename(self._tempname, filename)
1633 newstat = filestat(filename)
1633 newstat = filestat(filename)
1634 if newstat.isambig(oldstat):
1634 if newstat.isambig(oldstat):
1635 # stat of changed file is ambiguous to original one
1635 # stat of changed file is ambiguous to original one
1636 advanced = (oldstat.stat.st_mtime + 1) & 0x7fffffff
1636 advanced = (oldstat.stat.st_mtime + 1) & 0x7fffffff
1637 os.utime(filename, (advanced, advanced))
1637 os.utime(filename, (advanced, advanced))
1638 else:
1638 else:
1639 rename(self._tempname, filename)
1639 rename(self._tempname, filename)
1640
1640
1641 def discard(self):
1641 def discard(self):
1642 if not self._fp.closed:
1642 if not self._fp.closed:
1643 try:
1643 try:
1644 os.unlink(self._tempname)
1644 os.unlink(self._tempname)
1645 except OSError:
1645 except OSError:
1646 pass
1646 pass
1647 self._fp.close()
1647 self._fp.close()
1648
1648
1649 def __del__(self):
1649 def __del__(self):
1650 if safehasattr(self, '_fp'): # constructor actually did something
1650 if safehasattr(self, '_fp'): # constructor actually did something
1651 self.discard()
1651 self.discard()
1652
1652
1653 def __enter__(self):
1653 def __enter__(self):
1654 return self
1654 return self
1655
1655
1656 def __exit__(self, exctype, excvalue, traceback):
1656 def __exit__(self, exctype, excvalue, traceback):
1657 if exctype is not None:
1657 if exctype is not None:
1658 self.discard()
1658 self.discard()
1659 else:
1659 else:
1660 self.close()
1660 self.close()
1661
1661
1662 def unlinkpath(f, ignoremissing=False):
1662 def unlinkpath(f, ignoremissing=False):
1663 """unlink and remove the directory if it is empty"""
1663 """unlink and remove the directory if it is empty"""
1664 if ignoremissing:
1664 if ignoremissing:
1665 tryunlink(f)
1665 tryunlink(f)
1666 else:
1666 else:
1667 unlink(f)
1667 unlink(f)
1668 # try removing directories that might now be empty
1668 # try removing directories that might now be empty
1669 try:
1669 try:
1670 removedirs(os.path.dirname(f))
1670 removedirs(os.path.dirname(f))
1671 except OSError:
1671 except OSError:
1672 pass
1672 pass
1673
1673
1674 def tryunlink(f):
1674 def tryunlink(f):
1675 """Attempt to remove a file, ignoring ENOENT errors."""
1675 """Attempt to remove a file, ignoring ENOENT errors."""
1676 try:
1676 try:
1677 unlink(f)
1677 unlink(f)
1678 except OSError as e:
1678 except OSError as e:
1679 if e.errno != errno.ENOENT:
1679 if e.errno != errno.ENOENT:
1680 raise
1680 raise
1681
1681
1682 def makedirs(name, mode=None, notindexed=False):
1682 def makedirs(name, mode=None, notindexed=False):
1683 """recursive directory creation with parent mode inheritance
1683 """recursive directory creation with parent mode inheritance
1684
1684
1685 Newly created directories are marked as "not to be indexed by
1685 Newly created directories are marked as "not to be indexed by
1686 the content indexing service", if ``notindexed`` is specified
1686 the content indexing service", if ``notindexed`` is specified
1687 for "write" mode access.
1687 for "write" mode access.
1688 """
1688 """
1689 try:
1689 try:
1690 makedir(name, notindexed)
1690 makedir(name, notindexed)
1691 except OSError as err:
1691 except OSError as err:
1692 if err.errno == errno.EEXIST:
1692 if err.errno == errno.EEXIST:
1693 return
1693 return
1694 if err.errno != errno.ENOENT or not name:
1694 if err.errno != errno.ENOENT or not name:
1695 raise
1695 raise
1696 parent = os.path.dirname(os.path.abspath(name))
1696 parent = os.path.dirname(os.path.abspath(name))
1697 if parent == name:
1697 if parent == name:
1698 raise
1698 raise
1699 makedirs(parent, mode, notindexed)
1699 makedirs(parent, mode, notindexed)
1700 try:
1700 try:
1701 makedir(name, notindexed)
1701 makedir(name, notindexed)
1702 except OSError as err:
1702 except OSError as err:
1703 # Catch EEXIST to handle races
1703 # Catch EEXIST to handle races
1704 if err.errno == errno.EEXIST:
1704 if err.errno == errno.EEXIST:
1705 return
1705 return
1706 raise
1706 raise
1707 if mode is not None:
1707 if mode is not None:
1708 os.chmod(name, mode)
1708 os.chmod(name, mode)
1709
1709
1710 def readfile(path):
1710 def readfile(path):
1711 with open(path, 'rb') as fp:
1711 with open(path, 'rb') as fp:
1712 return fp.read()
1712 return fp.read()
1713
1713
1714 def writefile(path, text):
1714 def writefile(path, text):
1715 with open(path, 'wb') as fp:
1715 with open(path, 'wb') as fp:
1716 fp.write(text)
1716 fp.write(text)
1717
1717
1718 def appendfile(path, text):
1718 def appendfile(path, text):
1719 with open(path, 'ab') as fp:
1719 with open(path, 'ab') as fp:
1720 fp.write(text)
1720 fp.write(text)
1721
1721
1722 class chunkbuffer(object):
1722 class chunkbuffer(object):
1723 """Allow arbitrary sized chunks of data to be efficiently read from an
1723 """Allow arbitrary sized chunks of data to be efficiently read from an
1724 iterator over chunks of arbitrary size."""
1724 iterator over chunks of arbitrary size."""
1725
1725
1726 def __init__(self, in_iter):
1726 def __init__(self, in_iter):
1727 """in_iter is the iterator that's iterating over the input chunks."""
1727 """in_iter is the iterator that's iterating over the input chunks."""
1728 def splitbig(chunks):
1728 def splitbig(chunks):
1729 for chunk in chunks:
1729 for chunk in chunks:
1730 if len(chunk) > 2**20:
1730 if len(chunk) > 2**20:
1731 pos = 0
1731 pos = 0
1732 while pos < len(chunk):
1732 while pos < len(chunk):
1733 end = pos + 2 ** 18
1733 end = pos + 2 ** 18
1734 yield chunk[pos:end]
1734 yield chunk[pos:end]
1735 pos = end
1735 pos = end
1736 else:
1736 else:
1737 yield chunk
1737 yield chunk
1738 self.iter = splitbig(in_iter)
1738 self.iter = splitbig(in_iter)
1739 self._queue = collections.deque()
1739 self._queue = collections.deque()
1740 self._chunkoffset = 0
1740 self._chunkoffset = 0
1741
1741
1742 def read(self, l=None):
1742 def read(self, l=None):
1743 """Read L bytes of data from the iterator of chunks of data.
1743 """Read L bytes of data from the iterator of chunks of data.
1744 Returns less than L bytes if the iterator runs dry.
1744 Returns less than L bytes if the iterator runs dry.
1745
1745
1746 If size parameter is omitted, read everything"""
1746 If size parameter is omitted, read everything"""
1747 if l is None:
1747 if l is None:
1748 return ''.join(self.iter)
1748 return ''.join(self.iter)
1749
1749
1750 left = l
1750 left = l
1751 buf = []
1751 buf = []
1752 queue = self._queue
1752 queue = self._queue
1753 while left > 0:
1753 while left > 0:
1754 # refill the queue
1754 # refill the queue
1755 if not queue:
1755 if not queue:
1756 target = 2**18
1756 target = 2**18
1757 for chunk in self.iter:
1757 for chunk in self.iter:
1758 queue.append(chunk)
1758 queue.append(chunk)
1759 target -= len(chunk)
1759 target -= len(chunk)
1760 if target <= 0:
1760 if target <= 0:
1761 break
1761 break
1762 if not queue:
1762 if not queue:
1763 break
1763 break
1764
1764
1765 # The easy way to do this would be to queue.popleft(), modify the
1765 # The easy way to do this would be to queue.popleft(), modify the
1766 # chunk (if necessary), then queue.appendleft(). However, for cases
1766 # chunk (if necessary), then queue.appendleft(). However, for cases
1767 # where we read partial chunk content, this incurs 2 dequeue
1767 # where we read partial chunk content, this incurs 2 dequeue
1768 # mutations and creates a new str for the remaining chunk in the
1768 # mutations and creates a new str for the remaining chunk in the
1769 # queue. Our code below avoids this overhead.
1769 # queue. Our code below avoids this overhead.
1770
1770
1771 chunk = queue[0]
1771 chunk = queue[0]
1772 chunkl = len(chunk)
1772 chunkl = len(chunk)
1773 offset = self._chunkoffset
1773 offset = self._chunkoffset
1774
1774
1775 # Use full chunk.
1775 # Use full chunk.
1776 if offset == 0 and left >= chunkl:
1776 if offset == 0 and left >= chunkl:
1777 left -= chunkl
1777 left -= chunkl
1778 queue.popleft()
1778 queue.popleft()
1779 buf.append(chunk)
1779 buf.append(chunk)
1780 # self._chunkoffset remains at 0.
1780 # self._chunkoffset remains at 0.
1781 continue
1781 continue
1782
1782
1783 chunkremaining = chunkl - offset
1783 chunkremaining = chunkl - offset
1784
1784
1785 # Use all of unconsumed part of chunk.
1785 # Use all of unconsumed part of chunk.
1786 if left >= chunkremaining:
1786 if left >= chunkremaining:
1787 left -= chunkremaining
1787 left -= chunkremaining
1788 queue.popleft()
1788 queue.popleft()
1789 # offset == 0 is enabled by block above, so this won't merely
1789 # offset == 0 is enabled by block above, so this won't merely
1790 # copy via ``chunk[0:]``.
1790 # copy via ``chunk[0:]``.
1791 buf.append(chunk[offset:])
1791 buf.append(chunk[offset:])
1792 self._chunkoffset = 0
1792 self._chunkoffset = 0
1793
1793
1794 # Partial chunk needed.
1794 # Partial chunk needed.
1795 else:
1795 else:
1796 buf.append(chunk[offset:offset + left])
1796 buf.append(chunk[offset:offset + left])
1797 self._chunkoffset += left
1797 self._chunkoffset += left
1798 left -= chunkremaining
1798 left -= chunkremaining
1799
1799
1800 return ''.join(buf)
1800 return ''.join(buf)
1801
1801
1802 def filechunkiter(f, size=131072, limit=None):
1802 def filechunkiter(f, size=131072, limit=None):
1803 """Create a generator that produces the data in the file size
1803 """Create a generator that produces the data in the file size
1804 (default 131072) bytes at a time, up to optional limit (default is
1804 (default 131072) bytes at a time, up to optional limit (default is
1805 to read all data). Chunks may be less than size bytes if the
1805 to read all data). Chunks may be less than size bytes if the
1806 chunk is the last chunk in the file, or the file is a socket or
1806 chunk is the last chunk in the file, or the file is a socket or
1807 some other type of file that sometimes reads less data than is
1807 some other type of file that sometimes reads less data than is
1808 requested."""
1808 requested."""
1809 assert size >= 0
1809 assert size >= 0
1810 assert limit is None or limit >= 0
1810 assert limit is None or limit >= 0
1811 while True:
1811 while True:
1812 if limit is None:
1812 if limit is None:
1813 nbytes = size
1813 nbytes = size
1814 else:
1814 else:
1815 nbytes = min(limit, size)
1815 nbytes = min(limit, size)
1816 s = nbytes and f.read(nbytes)
1816 s = nbytes and f.read(nbytes)
1817 if not s:
1817 if not s:
1818 break
1818 break
1819 if limit:
1819 if limit:
1820 limit -= len(s)
1820 limit -= len(s)
1821 yield s
1821 yield s
1822
1822
1823 def makedate(timestamp=None):
1823 def makedate(timestamp=None):
1824 '''Return a unix timestamp (or the current time) as a (unixtime,
1824 '''Return a unix timestamp (or the current time) as a (unixtime,
1825 offset) tuple based off the local timezone.'''
1825 offset) tuple based off the local timezone.'''
1826 if timestamp is None:
1826 if timestamp is None:
1827 timestamp = time.time()
1827 timestamp = time.time()
1828 if timestamp < 0:
1828 if timestamp < 0:
1829 hint = _("check your clock")
1829 hint = _("check your clock")
1830 raise Abort(_("negative timestamp: %d") % timestamp, hint=hint)
1830 raise Abort(_("negative timestamp: %d") % timestamp, hint=hint)
1831 delta = (datetime.datetime.utcfromtimestamp(timestamp) -
1831 delta = (datetime.datetime.utcfromtimestamp(timestamp) -
1832 datetime.datetime.fromtimestamp(timestamp))
1832 datetime.datetime.fromtimestamp(timestamp))
1833 tz = delta.days * 86400 + delta.seconds
1833 tz = delta.days * 86400 + delta.seconds
1834 return timestamp, tz
1834 return timestamp, tz
1835
1835
1836 def datestr(date=None, format='%a %b %d %H:%M:%S %Y %1%2'):
1836 def datestr(date=None, format='%a %b %d %H:%M:%S %Y %1%2'):
1837 """represent a (unixtime, offset) tuple as a localized time.
1837 """represent a (unixtime, offset) tuple as a localized time.
1838 unixtime is seconds since the epoch, and offset is the time zone's
1838 unixtime is seconds since the epoch, and offset is the time zone's
1839 number of seconds away from UTC.
1839 number of seconds away from UTC.
1840
1840
1841 >>> datestr((0, 0))
1841 >>> datestr((0, 0))
1842 'Thu Jan 01 00:00:00 1970 +0000'
1842 'Thu Jan 01 00:00:00 1970 +0000'
1843 >>> datestr((42, 0))
1843 >>> datestr((42, 0))
1844 'Thu Jan 01 00:00:42 1970 +0000'
1844 'Thu Jan 01 00:00:42 1970 +0000'
1845 >>> datestr((-42, 0))
1845 >>> datestr((-42, 0))
1846 'Wed Dec 31 23:59:18 1969 +0000'
1846 'Wed Dec 31 23:59:18 1969 +0000'
1847 >>> datestr((0x7fffffff, 0))
1847 >>> datestr((0x7fffffff, 0))
1848 'Tue Jan 19 03:14:07 2038 +0000'
1848 'Tue Jan 19 03:14:07 2038 +0000'
1849 >>> datestr((-0x80000000, 0))
1849 >>> datestr((-0x80000000, 0))
1850 'Fri Dec 13 20:45:52 1901 +0000'
1850 'Fri Dec 13 20:45:52 1901 +0000'
1851 """
1851 """
1852 t, tz = date or makedate()
1852 t, tz = date or makedate()
1853 if "%1" in format or "%2" in format or "%z" in format:
1853 if "%1" in format or "%2" in format or "%z" in format:
1854 sign = (tz > 0) and "-" or "+"
1854 sign = (tz > 0) and "-" or "+"
1855 minutes = abs(tz) // 60
1855 minutes = abs(tz) // 60
1856 q, r = divmod(minutes, 60)
1856 q, r = divmod(minutes, 60)
1857 format = format.replace("%z", "%1%2")
1857 format = format.replace("%z", "%1%2")
1858 format = format.replace("%1", "%c%02d" % (sign, q))
1858 format = format.replace("%1", "%c%02d" % (sign, q))
1859 format = format.replace("%2", "%02d" % r)
1859 format = format.replace("%2", "%02d" % r)
1860 d = t - tz
1860 d = t - tz
1861 if d > 0x7fffffff:
1861 if d > 0x7fffffff:
1862 d = 0x7fffffff
1862 d = 0x7fffffff
1863 elif d < -0x80000000:
1863 elif d < -0x80000000:
1864 d = -0x80000000
1864 d = -0x80000000
1865 # Never use time.gmtime() and datetime.datetime.fromtimestamp()
1865 # Never use time.gmtime() and datetime.datetime.fromtimestamp()
1866 # because they use the gmtime() system call which is buggy on Windows
1866 # because they use the gmtime() system call which is buggy on Windows
1867 # for negative values.
1867 # for negative values.
1868 t = datetime.datetime(1970, 1, 1) + datetime.timedelta(seconds=d)
1868 t = datetime.datetime(1970, 1, 1) + datetime.timedelta(seconds=d)
1869 s = encoding.strtolocal(t.strftime(encoding.strfromlocal(format)))
1869 s = encoding.strtolocal(t.strftime(encoding.strfromlocal(format)))
1870 return s
1870 return s
1871
1871
1872 def shortdate(date=None):
1872 def shortdate(date=None):
1873 """turn (timestamp, tzoff) tuple into iso 8631 date."""
1873 """turn (timestamp, tzoff) tuple into iso 8631 date."""
1874 return datestr(date, format='%Y-%m-%d')
1874 return datestr(date, format='%Y-%m-%d')
1875
1875
1876 def parsetimezone(s):
1876 def parsetimezone(s):
1877 """find a trailing timezone, if any, in string, and return a
1877 """find a trailing timezone, if any, in string, and return a
1878 (offset, remainder) pair"""
1878 (offset, remainder) pair"""
1879
1879
1880 if s.endswith("GMT") or s.endswith("UTC"):
1880 if s.endswith("GMT") or s.endswith("UTC"):
1881 return 0, s[:-3].rstrip()
1881 return 0, s[:-3].rstrip()
1882
1882
1883 # Unix-style timezones [+-]hhmm
1883 # Unix-style timezones [+-]hhmm
1884 if len(s) >= 5 and s[-5] in "+-" and s[-4:].isdigit():
1884 if len(s) >= 5 and s[-5] in "+-" and s[-4:].isdigit():
1885 sign = (s[-5] == "+") and 1 or -1
1885 sign = (s[-5] == "+") and 1 or -1
1886 hours = int(s[-4:-2])
1886 hours = int(s[-4:-2])
1887 minutes = int(s[-2:])
1887 minutes = int(s[-2:])
1888 return -sign * (hours * 60 + minutes) * 60, s[:-5].rstrip()
1888 return -sign * (hours * 60 + minutes) * 60, s[:-5].rstrip()
1889
1889
1890 # ISO8601 trailing Z
1890 # ISO8601 trailing Z
1891 if s.endswith("Z") and s[-2:-1].isdigit():
1891 if s.endswith("Z") and s[-2:-1].isdigit():
1892 return 0, s[:-1]
1892 return 0, s[:-1]
1893
1893
1894 # ISO8601-style [+-]hh:mm
1894 # ISO8601-style [+-]hh:mm
1895 if (len(s) >= 6 and s[-6] in "+-" and s[-3] == ":" and
1895 if (len(s) >= 6 and s[-6] in "+-" and s[-3] == ":" and
1896 s[-5:-3].isdigit() and s[-2:].isdigit()):
1896 s[-5:-3].isdigit() and s[-2:].isdigit()):
1897 sign = (s[-6] == "+") and 1 or -1
1897 sign = (s[-6] == "+") and 1 or -1
1898 hours = int(s[-5:-3])
1898 hours = int(s[-5:-3])
1899 minutes = int(s[-2:])
1899 minutes = int(s[-2:])
1900 return -sign * (hours * 60 + minutes) * 60, s[:-6]
1900 return -sign * (hours * 60 + minutes) * 60, s[:-6]
1901
1901
1902 return None, s
1902 return None, s
1903
1903
1904 def strdate(string, format, defaults=None):
1904 def strdate(string, format, defaults=None):
1905 """parse a localized time string and return a (unixtime, offset) tuple.
1905 """parse a localized time string and return a (unixtime, offset) tuple.
1906 if the string cannot be parsed, ValueError is raised."""
1906 if the string cannot be parsed, ValueError is raised."""
1907 if defaults is None:
1907 if defaults is None:
1908 defaults = {}
1908 defaults = {}
1909
1909
1910 # NOTE: unixtime = localunixtime + offset
1910 # NOTE: unixtime = localunixtime + offset
1911 offset, date = parsetimezone(string)
1911 offset, date = parsetimezone(string)
1912
1912
1913 # add missing elements from defaults
1913 # add missing elements from defaults
1914 usenow = False # default to using biased defaults
1914 usenow = False # default to using biased defaults
1915 for part in ("S", "M", "HI", "d", "mb", "yY"): # decreasing specificity
1915 for part in ("S", "M", "HI", "d", "mb", "yY"): # decreasing specificity
1916 part = pycompat.bytestr(part)
1916 found = [True for p in part if ("%"+p) in format]
1917 found = [True for p in part if ("%"+p) in format]
1917 if not found:
1918 if not found:
1918 date += "@" + defaults[part][usenow]
1919 date += "@" + defaults[part][usenow]
1919 format += "@%" + part[0]
1920 format += "@%" + part[0]
1920 else:
1921 else:
1921 # We've found a specific time element, less specific time
1922 # We've found a specific time element, less specific time
1922 # elements are relative to today
1923 # elements are relative to today
1923 usenow = True
1924 usenow = True
1924
1925
1925 timetuple = time.strptime(date, format)
1926 timetuple = time.strptime(date, format)
1926 localunixtime = int(calendar.timegm(timetuple))
1927 localunixtime = int(calendar.timegm(timetuple))
1927 if offset is None:
1928 if offset is None:
1928 # local timezone
1929 # local timezone
1929 unixtime = int(time.mktime(timetuple))
1930 unixtime = int(time.mktime(timetuple))
1930 offset = unixtime - localunixtime
1931 offset = unixtime - localunixtime
1931 else:
1932 else:
1932 unixtime = localunixtime + offset
1933 unixtime = localunixtime + offset
1933 return unixtime, offset
1934 return unixtime, offset
1934
1935
1935 def parsedate(date, formats=None, bias=None):
1936 def parsedate(date, formats=None, bias=None):
1936 """parse a localized date/time and return a (unixtime, offset) tuple.
1937 """parse a localized date/time and return a (unixtime, offset) tuple.
1937
1938
1938 The date may be a "unixtime offset" string or in one of the specified
1939 The date may be a "unixtime offset" string or in one of the specified
1939 formats. If the date already is a (unixtime, offset) tuple, it is returned.
1940 formats. If the date already is a (unixtime, offset) tuple, it is returned.
1940
1941
1941 >>> parsedate(' today ') == parsedate(\
1942 >>> parsedate(' today ') == parsedate(\
1942 datetime.date.today().strftime('%b %d'))
1943 datetime.date.today().strftime('%b %d'))
1943 True
1944 True
1944 >>> parsedate( 'yesterday ') == parsedate((datetime.date.today() -\
1945 >>> parsedate( 'yesterday ') == parsedate((datetime.date.today() -\
1945 datetime.timedelta(days=1)\
1946 datetime.timedelta(days=1)\
1946 ).strftime('%b %d'))
1947 ).strftime('%b %d'))
1947 True
1948 True
1948 >>> now, tz = makedate()
1949 >>> now, tz = makedate()
1949 >>> strnow, strtz = parsedate('now')
1950 >>> strnow, strtz = parsedate('now')
1950 >>> (strnow - now) < 1
1951 >>> (strnow - now) < 1
1951 True
1952 True
1952 >>> tz == strtz
1953 >>> tz == strtz
1953 True
1954 True
1954 """
1955 """
1955 if bias is None:
1956 if bias is None:
1956 bias = {}
1957 bias = {}
1957 if not date:
1958 if not date:
1958 return 0, 0
1959 return 0, 0
1959 if isinstance(date, tuple) and len(date) == 2:
1960 if isinstance(date, tuple) and len(date) == 2:
1960 return date
1961 return date
1961 if not formats:
1962 if not formats:
1962 formats = defaultdateformats
1963 formats = defaultdateformats
1963 date = date.strip()
1964 date = date.strip()
1964
1965
1965 if date == 'now' or date == _('now'):
1966 if date == 'now' or date == _('now'):
1966 return makedate()
1967 return makedate()
1967 if date == 'today' or date == _('today'):
1968 if date == 'today' or date == _('today'):
1968 date = datetime.date.today().strftime('%b %d')
1969 date = datetime.date.today().strftime('%b %d')
1969 elif date == 'yesterday' or date == _('yesterday'):
1970 elif date == 'yesterday' or date == _('yesterday'):
1970 date = (datetime.date.today() -
1971 date = (datetime.date.today() -
1971 datetime.timedelta(days=1)).strftime('%b %d')
1972 datetime.timedelta(days=1)).strftime('%b %d')
1972
1973
1973 try:
1974 try:
1974 when, offset = map(int, date.split(' '))
1975 when, offset = map(int, date.split(' '))
1975 except ValueError:
1976 except ValueError:
1976 # fill out defaults
1977 # fill out defaults
1977 now = makedate()
1978 now = makedate()
1978 defaults = {}
1979 defaults = {}
1979 for part in ("d", "mb", "yY", "HI", "M", "S"):
1980 for part in ("d", "mb", "yY", "HI", "M", "S"):
1980 # this piece is for rounding the specific end of unknowns
1981 # this piece is for rounding the specific end of unknowns
1981 b = bias.get(part)
1982 b = bias.get(part)
1982 if b is None:
1983 if b is None:
1983 if part[0:1] in "HMS":
1984 if part[0:1] in "HMS":
1984 b = "00"
1985 b = "00"
1985 else:
1986 else:
1986 b = "0"
1987 b = "0"
1987
1988
1988 # this piece is for matching the generic end to today's date
1989 # this piece is for matching the generic end to today's date
1989 n = datestr(now, "%" + part[0:1])
1990 n = datestr(now, "%" + part[0:1])
1990
1991
1991 defaults[part] = (b, n)
1992 defaults[part] = (b, n)
1992
1993
1993 for format in formats:
1994 for format in formats:
1994 try:
1995 try:
1995 when, offset = strdate(date, format, defaults)
1996 when, offset = strdate(date, format, defaults)
1996 except (ValueError, OverflowError):
1997 except (ValueError, OverflowError):
1997 pass
1998 pass
1998 else:
1999 else:
1999 break
2000 break
2000 else:
2001 else:
2001 raise Abort(_('invalid date: %r') % date)
2002 raise Abort(_('invalid date: %r') % date)
2002 # validate explicit (probably user-specified) date and
2003 # validate explicit (probably user-specified) date and
2003 # time zone offset. values must fit in signed 32 bits for
2004 # time zone offset. values must fit in signed 32 bits for
2004 # current 32-bit linux runtimes. timezones go from UTC-12
2005 # current 32-bit linux runtimes. timezones go from UTC-12
2005 # to UTC+14
2006 # to UTC+14
2006 if when < -0x80000000 or when > 0x7fffffff:
2007 if when < -0x80000000 or when > 0x7fffffff:
2007 raise Abort(_('date exceeds 32 bits: %d') % when)
2008 raise Abort(_('date exceeds 32 bits: %d') % when)
2008 if offset < -50400 or offset > 43200:
2009 if offset < -50400 or offset > 43200:
2009 raise Abort(_('impossible time zone offset: %d') % offset)
2010 raise Abort(_('impossible time zone offset: %d') % offset)
2010 return when, offset
2011 return when, offset
2011
2012
2012 def matchdate(date):
2013 def matchdate(date):
2013 """Return a function that matches a given date match specifier
2014 """Return a function that matches a given date match specifier
2014
2015
2015 Formats include:
2016 Formats include:
2016
2017
2017 '{date}' match a given date to the accuracy provided
2018 '{date}' match a given date to the accuracy provided
2018
2019
2019 '<{date}' on or before a given date
2020 '<{date}' on or before a given date
2020
2021
2021 '>{date}' on or after a given date
2022 '>{date}' on or after a given date
2022
2023
2023 >>> p1 = parsedate("10:29:59")
2024 >>> p1 = parsedate("10:29:59")
2024 >>> p2 = parsedate("10:30:00")
2025 >>> p2 = parsedate("10:30:00")
2025 >>> p3 = parsedate("10:30:59")
2026 >>> p3 = parsedate("10:30:59")
2026 >>> p4 = parsedate("10:31:00")
2027 >>> p4 = parsedate("10:31:00")
2027 >>> p5 = parsedate("Sep 15 10:30:00 1999")
2028 >>> p5 = parsedate("Sep 15 10:30:00 1999")
2028 >>> f = matchdate("10:30")
2029 >>> f = matchdate("10:30")
2029 >>> f(p1[0])
2030 >>> f(p1[0])
2030 False
2031 False
2031 >>> f(p2[0])
2032 >>> f(p2[0])
2032 True
2033 True
2033 >>> f(p3[0])
2034 >>> f(p3[0])
2034 True
2035 True
2035 >>> f(p4[0])
2036 >>> f(p4[0])
2036 False
2037 False
2037 >>> f(p5[0])
2038 >>> f(p5[0])
2038 False
2039 False
2039 """
2040 """
2040
2041
2041 def lower(date):
2042 def lower(date):
2042 d = {'mb': "1", 'd': "1"}
2043 d = {'mb': "1", 'd': "1"}
2043 return parsedate(date, extendeddateformats, d)[0]
2044 return parsedate(date, extendeddateformats, d)[0]
2044
2045
2045 def upper(date):
2046 def upper(date):
2046 d = {'mb': "12", 'HI': "23", 'M': "59", 'S': "59"}
2047 d = {'mb': "12", 'HI': "23", 'M': "59", 'S': "59"}
2047 for days in ("31", "30", "29"):
2048 for days in ("31", "30", "29"):
2048 try:
2049 try:
2049 d["d"] = days
2050 d["d"] = days
2050 return parsedate(date, extendeddateformats, d)[0]
2051 return parsedate(date, extendeddateformats, d)[0]
2051 except Abort:
2052 except Abort:
2052 pass
2053 pass
2053 d["d"] = "28"
2054 d["d"] = "28"
2054 return parsedate(date, extendeddateformats, d)[0]
2055 return parsedate(date, extendeddateformats, d)[0]
2055
2056
2056 date = date.strip()
2057 date = date.strip()
2057
2058
2058 if not date:
2059 if not date:
2059 raise Abort(_("dates cannot consist entirely of whitespace"))
2060 raise Abort(_("dates cannot consist entirely of whitespace"))
2060 elif date[0] == "<":
2061 elif date[0] == "<":
2061 if not date[1:]:
2062 if not date[1:]:
2062 raise Abort(_("invalid day spec, use '<DATE'"))
2063 raise Abort(_("invalid day spec, use '<DATE'"))
2063 when = upper(date[1:])
2064 when = upper(date[1:])
2064 return lambda x: x <= when
2065 return lambda x: x <= when
2065 elif date[0] == ">":
2066 elif date[0] == ">":
2066 if not date[1:]:
2067 if not date[1:]:
2067 raise Abort(_("invalid day spec, use '>DATE'"))
2068 raise Abort(_("invalid day spec, use '>DATE'"))
2068 when = lower(date[1:])
2069 when = lower(date[1:])
2069 return lambda x: x >= when
2070 return lambda x: x >= when
2070 elif date[0] == "-":
2071 elif date[0] == "-":
2071 try:
2072 try:
2072 days = int(date[1:])
2073 days = int(date[1:])
2073 except ValueError:
2074 except ValueError:
2074 raise Abort(_("invalid day spec: %s") % date[1:])
2075 raise Abort(_("invalid day spec: %s") % date[1:])
2075 if days < 0:
2076 if days < 0:
2076 raise Abort(_("%s must be nonnegative (see 'hg help dates')")
2077 raise Abort(_("%s must be nonnegative (see 'hg help dates')")
2077 % date[1:])
2078 % date[1:])
2078 when = makedate()[0] - days * 3600 * 24
2079 when = makedate()[0] - days * 3600 * 24
2079 return lambda x: x >= when
2080 return lambda x: x >= when
2080 elif " to " in date:
2081 elif " to " in date:
2081 a, b = date.split(" to ")
2082 a, b = date.split(" to ")
2082 start, stop = lower(a), upper(b)
2083 start, stop = lower(a), upper(b)
2083 return lambda x: x >= start and x <= stop
2084 return lambda x: x >= start and x <= stop
2084 else:
2085 else:
2085 start, stop = lower(date), upper(date)
2086 start, stop = lower(date), upper(date)
2086 return lambda x: x >= start and x <= stop
2087 return lambda x: x >= start and x <= stop
2087
2088
2088 def stringmatcher(pattern, casesensitive=True):
2089 def stringmatcher(pattern, casesensitive=True):
2089 """
2090 """
2090 accepts a string, possibly starting with 're:' or 'literal:' prefix.
2091 accepts a string, possibly starting with 're:' or 'literal:' prefix.
2091 returns the matcher name, pattern, and matcher function.
2092 returns the matcher name, pattern, and matcher function.
2092 missing or unknown prefixes are treated as literal matches.
2093 missing or unknown prefixes are treated as literal matches.
2093
2094
2094 helper for tests:
2095 helper for tests:
2095 >>> def test(pattern, *tests):
2096 >>> def test(pattern, *tests):
2096 ... kind, pattern, matcher = stringmatcher(pattern)
2097 ... kind, pattern, matcher = stringmatcher(pattern)
2097 ... return (kind, pattern, [bool(matcher(t)) for t in tests])
2098 ... return (kind, pattern, [bool(matcher(t)) for t in tests])
2098 >>> def itest(pattern, *tests):
2099 >>> def itest(pattern, *tests):
2099 ... kind, pattern, matcher = stringmatcher(pattern, casesensitive=False)
2100 ... kind, pattern, matcher = stringmatcher(pattern, casesensitive=False)
2100 ... return (kind, pattern, [bool(matcher(t)) for t in tests])
2101 ... return (kind, pattern, [bool(matcher(t)) for t in tests])
2101
2102
2102 exact matching (no prefix):
2103 exact matching (no prefix):
2103 >>> test('abcdefg', 'abc', 'def', 'abcdefg')
2104 >>> test('abcdefg', 'abc', 'def', 'abcdefg')
2104 ('literal', 'abcdefg', [False, False, True])
2105 ('literal', 'abcdefg', [False, False, True])
2105
2106
2106 regex matching ('re:' prefix)
2107 regex matching ('re:' prefix)
2107 >>> test('re:a.+b', 'nomatch', 'fooadef', 'fooadefbar')
2108 >>> test('re:a.+b', 'nomatch', 'fooadef', 'fooadefbar')
2108 ('re', 'a.+b', [False, False, True])
2109 ('re', 'a.+b', [False, False, True])
2109
2110
2110 force exact matches ('literal:' prefix)
2111 force exact matches ('literal:' prefix)
2111 >>> test('literal:re:foobar', 'foobar', 're:foobar')
2112 >>> test('literal:re:foobar', 'foobar', 're:foobar')
2112 ('literal', 're:foobar', [False, True])
2113 ('literal', 're:foobar', [False, True])
2113
2114
2114 unknown prefixes are ignored and treated as literals
2115 unknown prefixes are ignored and treated as literals
2115 >>> test('foo:bar', 'foo', 'bar', 'foo:bar')
2116 >>> test('foo:bar', 'foo', 'bar', 'foo:bar')
2116 ('literal', 'foo:bar', [False, False, True])
2117 ('literal', 'foo:bar', [False, False, True])
2117
2118
2118 case insensitive regex matches
2119 case insensitive regex matches
2119 >>> itest('re:A.+b', 'nomatch', 'fooadef', 'fooadefBar')
2120 >>> itest('re:A.+b', 'nomatch', 'fooadef', 'fooadefBar')
2120 ('re', 'A.+b', [False, False, True])
2121 ('re', 'A.+b', [False, False, True])
2121
2122
2122 case insensitive literal matches
2123 case insensitive literal matches
2123 >>> itest('ABCDEFG', 'abc', 'def', 'abcdefg')
2124 >>> itest('ABCDEFG', 'abc', 'def', 'abcdefg')
2124 ('literal', 'ABCDEFG', [False, False, True])
2125 ('literal', 'ABCDEFG', [False, False, True])
2125 """
2126 """
2126 if pattern.startswith('re:'):
2127 if pattern.startswith('re:'):
2127 pattern = pattern[3:]
2128 pattern = pattern[3:]
2128 try:
2129 try:
2129 flags = 0
2130 flags = 0
2130 if not casesensitive:
2131 if not casesensitive:
2131 flags = remod.I
2132 flags = remod.I
2132 regex = remod.compile(pattern, flags)
2133 regex = remod.compile(pattern, flags)
2133 except remod.error as e:
2134 except remod.error as e:
2134 raise error.ParseError(_('invalid regular expression: %s')
2135 raise error.ParseError(_('invalid regular expression: %s')
2135 % e)
2136 % e)
2136 return 're', pattern, regex.search
2137 return 're', pattern, regex.search
2137 elif pattern.startswith('literal:'):
2138 elif pattern.startswith('literal:'):
2138 pattern = pattern[8:]
2139 pattern = pattern[8:]
2139
2140
2140 match = pattern.__eq__
2141 match = pattern.__eq__
2141
2142
2142 if not casesensitive:
2143 if not casesensitive:
2143 ipat = encoding.lower(pattern)
2144 ipat = encoding.lower(pattern)
2144 match = lambda s: ipat == encoding.lower(s)
2145 match = lambda s: ipat == encoding.lower(s)
2145 return 'literal', pattern, match
2146 return 'literal', pattern, match
2146
2147
2147 def shortuser(user):
2148 def shortuser(user):
2148 """Return a short representation of a user name or email address."""
2149 """Return a short representation of a user name or email address."""
2149 f = user.find('@')
2150 f = user.find('@')
2150 if f >= 0:
2151 if f >= 0:
2151 user = user[:f]
2152 user = user[:f]
2152 f = user.find('<')
2153 f = user.find('<')
2153 if f >= 0:
2154 if f >= 0:
2154 user = user[f + 1:]
2155 user = user[f + 1:]
2155 f = user.find(' ')
2156 f = user.find(' ')
2156 if f >= 0:
2157 if f >= 0:
2157 user = user[:f]
2158 user = user[:f]
2158 f = user.find('.')
2159 f = user.find('.')
2159 if f >= 0:
2160 if f >= 0:
2160 user = user[:f]
2161 user = user[:f]
2161 return user
2162 return user
2162
2163
2163 def emailuser(user):
2164 def emailuser(user):
2164 """Return the user portion of an email address."""
2165 """Return the user portion of an email address."""
2165 f = user.find('@')
2166 f = user.find('@')
2166 if f >= 0:
2167 if f >= 0:
2167 user = user[:f]
2168 user = user[:f]
2168 f = user.find('<')
2169 f = user.find('<')
2169 if f >= 0:
2170 if f >= 0:
2170 user = user[f + 1:]
2171 user = user[f + 1:]
2171 return user
2172 return user
2172
2173
2173 def email(author):
2174 def email(author):
2174 '''get email of author.'''
2175 '''get email of author.'''
2175 r = author.find('>')
2176 r = author.find('>')
2176 if r == -1:
2177 if r == -1:
2177 r = None
2178 r = None
2178 return author[author.find('<') + 1:r]
2179 return author[author.find('<') + 1:r]
2179
2180
2180 def ellipsis(text, maxlength=400):
2181 def ellipsis(text, maxlength=400):
2181 """Trim string to at most maxlength (default: 400) columns in display."""
2182 """Trim string to at most maxlength (default: 400) columns in display."""
2182 return encoding.trim(text, maxlength, ellipsis='...')
2183 return encoding.trim(text, maxlength, ellipsis='...')
2183
2184
2184 def unitcountfn(*unittable):
2185 def unitcountfn(*unittable):
2185 '''return a function that renders a readable count of some quantity'''
2186 '''return a function that renders a readable count of some quantity'''
2186
2187
2187 def go(count):
2188 def go(count):
2188 for multiplier, divisor, format in unittable:
2189 for multiplier, divisor, format in unittable:
2189 if abs(count) >= divisor * multiplier:
2190 if abs(count) >= divisor * multiplier:
2190 return format % (count / float(divisor))
2191 return format % (count / float(divisor))
2191 return unittable[-1][2] % count
2192 return unittable[-1][2] % count
2192
2193
2193 return go
2194 return go
2194
2195
2195 def processlinerange(fromline, toline):
2196 def processlinerange(fromline, toline):
2196 """Check that linerange <fromline>:<toline> makes sense and return a
2197 """Check that linerange <fromline>:<toline> makes sense and return a
2197 0-based range.
2198 0-based range.
2198
2199
2199 >>> processlinerange(10, 20)
2200 >>> processlinerange(10, 20)
2200 (9, 20)
2201 (9, 20)
2201 >>> processlinerange(2, 1)
2202 >>> processlinerange(2, 1)
2202 Traceback (most recent call last):
2203 Traceback (most recent call last):
2203 ...
2204 ...
2204 ParseError: line range must be positive
2205 ParseError: line range must be positive
2205 >>> processlinerange(0, 5)
2206 >>> processlinerange(0, 5)
2206 Traceback (most recent call last):
2207 Traceback (most recent call last):
2207 ...
2208 ...
2208 ParseError: fromline must be strictly positive
2209 ParseError: fromline must be strictly positive
2209 """
2210 """
2210 if toline - fromline < 0:
2211 if toline - fromline < 0:
2211 raise error.ParseError(_("line range must be positive"))
2212 raise error.ParseError(_("line range must be positive"))
2212 if fromline < 1:
2213 if fromline < 1:
2213 raise error.ParseError(_("fromline must be strictly positive"))
2214 raise error.ParseError(_("fromline must be strictly positive"))
2214 return fromline - 1, toline
2215 return fromline - 1, toline
2215
2216
2216 bytecount = unitcountfn(
2217 bytecount = unitcountfn(
2217 (100, 1 << 30, _('%.0f GB')),
2218 (100, 1 << 30, _('%.0f GB')),
2218 (10, 1 << 30, _('%.1f GB')),
2219 (10, 1 << 30, _('%.1f GB')),
2219 (1, 1 << 30, _('%.2f GB')),
2220 (1, 1 << 30, _('%.2f GB')),
2220 (100, 1 << 20, _('%.0f MB')),
2221 (100, 1 << 20, _('%.0f MB')),
2221 (10, 1 << 20, _('%.1f MB')),
2222 (10, 1 << 20, _('%.1f MB')),
2222 (1, 1 << 20, _('%.2f MB')),
2223 (1, 1 << 20, _('%.2f MB')),
2223 (100, 1 << 10, _('%.0f KB')),
2224 (100, 1 << 10, _('%.0f KB')),
2224 (10, 1 << 10, _('%.1f KB')),
2225 (10, 1 << 10, _('%.1f KB')),
2225 (1, 1 << 10, _('%.2f KB')),
2226 (1, 1 << 10, _('%.2f KB')),
2226 (1, 1, _('%.0f bytes')),
2227 (1, 1, _('%.0f bytes')),
2227 )
2228 )
2228
2229
2229 # Matches a single EOL which can either be a CRLF where repeated CR
2230 # Matches a single EOL which can either be a CRLF where repeated CR
2230 # are removed or a LF. We do not care about old Macintosh files, so a
2231 # are removed or a LF. We do not care about old Macintosh files, so a
2231 # stray CR is an error.
2232 # stray CR is an error.
2232 _eolre = remod.compile(br'\r*\n')
2233 _eolre = remod.compile(br'\r*\n')
2233
2234
2234 def tolf(s):
2235 def tolf(s):
2235 return _eolre.sub('\n', s)
2236 return _eolre.sub('\n', s)
2236
2237
2237 def tocrlf(s):
2238 def tocrlf(s):
2238 return _eolre.sub('\r\n', s)
2239 return _eolre.sub('\r\n', s)
2239
2240
2240 if pycompat.oslinesep == '\r\n':
2241 if pycompat.oslinesep == '\r\n':
2241 tonativeeol = tocrlf
2242 tonativeeol = tocrlf
2242 fromnativeeol = tolf
2243 fromnativeeol = tolf
2243 else:
2244 else:
2244 tonativeeol = pycompat.identity
2245 tonativeeol = pycompat.identity
2245 fromnativeeol = pycompat.identity
2246 fromnativeeol = pycompat.identity
2246
2247
2247 def escapestr(s):
2248 def escapestr(s):
2248 # call underlying function of s.encode('string_escape') directly for
2249 # call underlying function of s.encode('string_escape') directly for
2249 # Python 3 compatibility
2250 # Python 3 compatibility
2250 return codecs.escape_encode(s)[0]
2251 return codecs.escape_encode(s)[0]
2251
2252
2252 def unescapestr(s):
2253 def unescapestr(s):
2253 return codecs.escape_decode(s)[0]
2254 return codecs.escape_decode(s)[0]
2254
2255
2255 def uirepr(s):
2256 def uirepr(s):
2256 # Avoid double backslash in Windows path repr()
2257 # Avoid double backslash in Windows path repr()
2257 return repr(s).replace('\\\\', '\\')
2258 return repr(s).replace('\\\\', '\\')
2258
2259
2259 # delay import of textwrap
2260 # delay import of textwrap
2260 def MBTextWrapper(**kwargs):
2261 def MBTextWrapper(**kwargs):
2261 class tw(textwrap.TextWrapper):
2262 class tw(textwrap.TextWrapper):
2262 """
2263 """
2263 Extend TextWrapper for width-awareness.
2264 Extend TextWrapper for width-awareness.
2264
2265
2265 Neither number of 'bytes' in any encoding nor 'characters' is
2266 Neither number of 'bytes' in any encoding nor 'characters' is
2266 appropriate to calculate terminal columns for specified string.
2267 appropriate to calculate terminal columns for specified string.
2267
2268
2268 Original TextWrapper implementation uses built-in 'len()' directly,
2269 Original TextWrapper implementation uses built-in 'len()' directly,
2269 so overriding is needed to use width information of each characters.
2270 so overriding is needed to use width information of each characters.
2270
2271
2271 In addition, characters classified into 'ambiguous' width are
2272 In addition, characters classified into 'ambiguous' width are
2272 treated as wide in East Asian area, but as narrow in other.
2273 treated as wide in East Asian area, but as narrow in other.
2273
2274
2274 This requires use decision to determine width of such characters.
2275 This requires use decision to determine width of such characters.
2275 """
2276 """
2276 def _cutdown(self, ucstr, space_left):
2277 def _cutdown(self, ucstr, space_left):
2277 l = 0
2278 l = 0
2278 colwidth = encoding.ucolwidth
2279 colwidth = encoding.ucolwidth
2279 for i in xrange(len(ucstr)):
2280 for i in xrange(len(ucstr)):
2280 l += colwidth(ucstr[i])
2281 l += colwidth(ucstr[i])
2281 if space_left < l:
2282 if space_left < l:
2282 return (ucstr[:i], ucstr[i:])
2283 return (ucstr[:i], ucstr[i:])
2283 return ucstr, ''
2284 return ucstr, ''
2284
2285
2285 # overriding of base class
2286 # overriding of base class
2286 def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
2287 def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
2287 space_left = max(width - cur_len, 1)
2288 space_left = max(width - cur_len, 1)
2288
2289
2289 if self.break_long_words:
2290 if self.break_long_words:
2290 cut, res = self._cutdown(reversed_chunks[-1], space_left)
2291 cut, res = self._cutdown(reversed_chunks[-1], space_left)
2291 cur_line.append(cut)
2292 cur_line.append(cut)
2292 reversed_chunks[-1] = res
2293 reversed_chunks[-1] = res
2293 elif not cur_line:
2294 elif not cur_line:
2294 cur_line.append(reversed_chunks.pop())
2295 cur_line.append(reversed_chunks.pop())
2295
2296
2296 # this overriding code is imported from TextWrapper of Python 2.6
2297 # this overriding code is imported from TextWrapper of Python 2.6
2297 # to calculate columns of string by 'encoding.ucolwidth()'
2298 # to calculate columns of string by 'encoding.ucolwidth()'
2298 def _wrap_chunks(self, chunks):
2299 def _wrap_chunks(self, chunks):
2299 colwidth = encoding.ucolwidth
2300 colwidth = encoding.ucolwidth
2300
2301
2301 lines = []
2302 lines = []
2302 if self.width <= 0:
2303 if self.width <= 0:
2303 raise ValueError("invalid width %r (must be > 0)" % self.width)
2304 raise ValueError("invalid width %r (must be > 0)" % self.width)
2304
2305
2305 # Arrange in reverse order so items can be efficiently popped
2306 # Arrange in reverse order so items can be efficiently popped
2306 # from a stack of chucks.
2307 # from a stack of chucks.
2307 chunks.reverse()
2308 chunks.reverse()
2308
2309
2309 while chunks:
2310 while chunks:
2310
2311
2311 # Start the list of chunks that will make up the current line.
2312 # Start the list of chunks that will make up the current line.
2312 # cur_len is just the length of all the chunks in cur_line.
2313 # cur_len is just the length of all the chunks in cur_line.
2313 cur_line = []
2314 cur_line = []
2314 cur_len = 0
2315 cur_len = 0
2315
2316
2316 # Figure out which static string will prefix this line.
2317 # Figure out which static string will prefix this line.
2317 if lines:
2318 if lines:
2318 indent = self.subsequent_indent
2319 indent = self.subsequent_indent
2319 else:
2320 else:
2320 indent = self.initial_indent
2321 indent = self.initial_indent
2321
2322
2322 # Maximum width for this line.
2323 # Maximum width for this line.
2323 width = self.width - len(indent)
2324 width = self.width - len(indent)
2324
2325
2325 # First chunk on line is whitespace -- drop it, unless this
2326 # First chunk on line is whitespace -- drop it, unless this
2326 # is the very beginning of the text (i.e. no lines started yet).
2327 # is the very beginning of the text (i.e. no lines started yet).
2327 if self.drop_whitespace and chunks[-1].strip() == '' and lines:
2328 if self.drop_whitespace and chunks[-1].strip() == '' and lines:
2328 del chunks[-1]
2329 del chunks[-1]
2329
2330
2330 while chunks:
2331 while chunks:
2331 l = colwidth(chunks[-1])
2332 l = colwidth(chunks[-1])
2332
2333
2333 # Can at least squeeze this chunk onto the current line.
2334 # Can at least squeeze this chunk onto the current line.
2334 if cur_len + l <= width:
2335 if cur_len + l <= width:
2335 cur_line.append(chunks.pop())
2336 cur_line.append(chunks.pop())
2336 cur_len += l
2337 cur_len += l
2337
2338
2338 # Nope, this line is full.
2339 # Nope, this line is full.
2339 else:
2340 else:
2340 break
2341 break
2341
2342
2342 # The current line is full, and the next chunk is too big to
2343 # The current line is full, and the next chunk is too big to
2343 # fit on *any* line (not just this one).
2344 # fit on *any* line (not just this one).
2344 if chunks and colwidth(chunks[-1]) > width:
2345 if chunks and colwidth(chunks[-1]) > width:
2345 self._handle_long_word(chunks, cur_line, cur_len, width)
2346 self._handle_long_word(chunks, cur_line, cur_len, width)
2346
2347
2347 # If the last chunk on this line is all whitespace, drop it.
2348 # If the last chunk on this line is all whitespace, drop it.
2348 if (self.drop_whitespace and
2349 if (self.drop_whitespace and
2349 cur_line and cur_line[-1].strip() == ''):
2350 cur_line and cur_line[-1].strip() == ''):
2350 del cur_line[-1]
2351 del cur_line[-1]
2351
2352
2352 # Convert current line back to a string and store it in list
2353 # Convert current line back to a string and store it in list
2353 # of all lines (return value).
2354 # of all lines (return value).
2354 if cur_line:
2355 if cur_line:
2355 lines.append(indent + ''.join(cur_line))
2356 lines.append(indent + ''.join(cur_line))
2356
2357
2357 return lines
2358 return lines
2358
2359
2359 global MBTextWrapper
2360 global MBTextWrapper
2360 MBTextWrapper = tw
2361 MBTextWrapper = tw
2361 return tw(**kwargs)
2362 return tw(**kwargs)
2362
2363
2363 def wrap(line, width, initindent='', hangindent=''):
2364 def wrap(line, width, initindent='', hangindent=''):
2364 maxindent = max(len(hangindent), len(initindent))
2365 maxindent = max(len(hangindent), len(initindent))
2365 if width <= maxindent:
2366 if width <= maxindent:
2366 # adjust for weird terminal size
2367 # adjust for weird terminal size
2367 width = max(78, maxindent + 1)
2368 width = max(78, maxindent + 1)
2368 line = line.decode(pycompat.sysstr(encoding.encoding),
2369 line = line.decode(pycompat.sysstr(encoding.encoding),
2369 pycompat.sysstr(encoding.encodingmode))
2370 pycompat.sysstr(encoding.encodingmode))
2370 initindent = initindent.decode(pycompat.sysstr(encoding.encoding),
2371 initindent = initindent.decode(pycompat.sysstr(encoding.encoding),
2371 pycompat.sysstr(encoding.encodingmode))
2372 pycompat.sysstr(encoding.encodingmode))
2372 hangindent = hangindent.decode(pycompat.sysstr(encoding.encoding),
2373 hangindent = hangindent.decode(pycompat.sysstr(encoding.encoding),
2373 pycompat.sysstr(encoding.encodingmode))
2374 pycompat.sysstr(encoding.encodingmode))
2374 wrapper = MBTextWrapper(width=width,
2375 wrapper = MBTextWrapper(width=width,
2375 initial_indent=initindent,
2376 initial_indent=initindent,
2376 subsequent_indent=hangindent)
2377 subsequent_indent=hangindent)
2377 return wrapper.fill(line).encode(pycompat.sysstr(encoding.encoding))
2378 return wrapper.fill(line).encode(pycompat.sysstr(encoding.encoding))
2378
2379
2379 if (pyplatform.python_implementation() == 'CPython' and
2380 if (pyplatform.python_implementation() == 'CPython' and
2380 sys.version_info < (3, 0)):
2381 sys.version_info < (3, 0)):
2381 # There is an issue in CPython that some IO methods do not handle EINTR
2382 # There is an issue in CPython that some IO methods do not handle EINTR
2382 # correctly. The following table shows what CPython version (and functions)
2383 # correctly. The following table shows what CPython version (and functions)
2383 # are affected (buggy: has the EINTR bug, okay: otherwise):
2384 # are affected (buggy: has the EINTR bug, okay: otherwise):
2384 #
2385 #
2385 # | < 2.7.4 | 2.7.4 to 2.7.12 | >= 3.0
2386 # | < 2.7.4 | 2.7.4 to 2.7.12 | >= 3.0
2386 # --------------------------------------------------
2387 # --------------------------------------------------
2387 # fp.__iter__ | buggy | buggy | okay
2388 # fp.__iter__ | buggy | buggy | okay
2388 # fp.read* | buggy | okay [1] | okay
2389 # fp.read* | buggy | okay [1] | okay
2389 #
2390 #
2390 # [1]: fixed by changeset 67dc99a989cd in the cpython hg repo.
2391 # [1]: fixed by changeset 67dc99a989cd in the cpython hg repo.
2391 #
2392 #
2392 # Here we workaround the EINTR issue for fileobj.__iter__. Other methods
2393 # Here we workaround the EINTR issue for fileobj.__iter__. Other methods
2393 # like "read*" are ignored for now, as Python < 2.7.4 is a minority.
2394 # like "read*" are ignored for now, as Python < 2.7.4 is a minority.
2394 #
2395 #
2395 # Although we can workaround the EINTR issue for fp.__iter__, it is slower:
2396 # Although we can workaround the EINTR issue for fp.__iter__, it is slower:
2396 # "for x in fp" is 4x faster than "for x in iter(fp.readline, '')" in
2397 # "for x in fp" is 4x faster than "for x in iter(fp.readline, '')" in
2397 # CPython 2, because CPython 2 maintains an internal readahead buffer for
2398 # CPython 2, because CPython 2 maintains an internal readahead buffer for
2398 # fp.__iter__ but not other fp.read* methods.
2399 # fp.__iter__ but not other fp.read* methods.
2399 #
2400 #
2400 # On modern systems like Linux, the "read" syscall cannot be interrupted
2401 # On modern systems like Linux, the "read" syscall cannot be interrupted
2401 # when reading "fast" files like on-disk files. So the EINTR issue only
2402 # when reading "fast" files like on-disk files. So the EINTR issue only
2402 # affects things like pipes, sockets, ttys etc. We treat "normal" (S_ISREG)
2403 # affects things like pipes, sockets, ttys etc. We treat "normal" (S_ISREG)
2403 # files approximately as "fast" files and use the fast (unsafe) code path,
2404 # files approximately as "fast" files and use the fast (unsafe) code path,
2404 # to minimize the performance impact.
2405 # to minimize the performance impact.
2405 if sys.version_info >= (2, 7, 4):
2406 if sys.version_info >= (2, 7, 4):
2406 # fp.readline deals with EINTR correctly, use it as a workaround.
2407 # fp.readline deals with EINTR correctly, use it as a workaround.
2407 def _safeiterfile(fp):
2408 def _safeiterfile(fp):
2408 return iter(fp.readline, '')
2409 return iter(fp.readline, '')
2409 else:
2410 else:
2410 # fp.read* are broken too, manually deal with EINTR in a stupid way.
2411 # fp.read* are broken too, manually deal with EINTR in a stupid way.
2411 # note: this may block longer than necessary because of bufsize.
2412 # note: this may block longer than necessary because of bufsize.
2412 def _safeiterfile(fp, bufsize=4096):
2413 def _safeiterfile(fp, bufsize=4096):
2413 fd = fp.fileno()
2414 fd = fp.fileno()
2414 line = ''
2415 line = ''
2415 while True:
2416 while True:
2416 try:
2417 try:
2417 buf = os.read(fd, bufsize)
2418 buf = os.read(fd, bufsize)
2418 except OSError as ex:
2419 except OSError as ex:
2419 # os.read only raises EINTR before any data is read
2420 # os.read only raises EINTR before any data is read
2420 if ex.errno == errno.EINTR:
2421 if ex.errno == errno.EINTR:
2421 continue
2422 continue
2422 else:
2423 else:
2423 raise
2424 raise
2424 line += buf
2425 line += buf
2425 if '\n' in buf:
2426 if '\n' in buf:
2426 splitted = line.splitlines(True)
2427 splitted = line.splitlines(True)
2427 line = ''
2428 line = ''
2428 for l in splitted:
2429 for l in splitted:
2429 if l[-1] == '\n':
2430 if l[-1] == '\n':
2430 yield l
2431 yield l
2431 else:
2432 else:
2432 line = l
2433 line = l
2433 if not buf:
2434 if not buf:
2434 break
2435 break
2435 if line:
2436 if line:
2436 yield line
2437 yield line
2437
2438
2438 def iterfile(fp):
2439 def iterfile(fp):
2439 fastpath = True
2440 fastpath = True
2440 if type(fp) is file:
2441 if type(fp) is file:
2441 fastpath = stat.S_ISREG(os.fstat(fp.fileno()).st_mode)
2442 fastpath = stat.S_ISREG(os.fstat(fp.fileno()).st_mode)
2442 if fastpath:
2443 if fastpath:
2443 return fp
2444 return fp
2444 else:
2445 else:
2445 return _safeiterfile(fp)
2446 return _safeiterfile(fp)
2446 else:
2447 else:
2447 # PyPy and CPython 3 do not have the EINTR issue thus no workaround needed.
2448 # PyPy and CPython 3 do not have the EINTR issue thus no workaround needed.
2448 def iterfile(fp):
2449 def iterfile(fp):
2449 return fp
2450 return fp
2450
2451
2451 def iterlines(iterator):
2452 def iterlines(iterator):
2452 for chunk in iterator:
2453 for chunk in iterator:
2453 for line in chunk.splitlines():
2454 for line in chunk.splitlines():
2454 yield line
2455 yield line
2455
2456
2456 def expandpath(path):
2457 def expandpath(path):
2457 return os.path.expanduser(os.path.expandvars(path))
2458 return os.path.expanduser(os.path.expandvars(path))
2458
2459
2459 def hgcmd():
2460 def hgcmd():
2460 """Return the command used to execute current hg
2461 """Return the command used to execute current hg
2461
2462
2462 This is different from hgexecutable() because on Windows we want
2463 This is different from hgexecutable() because on Windows we want
2463 to avoid things opening new shell windows like batch files, so we
2464 to avoid things opening new shell windows like batch files, so we
2464 get either the python call or current executable.
2465 get either the python call or current executable.
2465 """
2466 """
2466 if mainfrozen():
2467 if mainfrozen():
2467 if getattr(sys, 'frozen', None) == 'macosx_app':
2468 if getattr(sys, 'frozen', None) == 'macosx_app':
2468 # Env variable set by py2app
2469 # Env variable set by py2app
2469 return [encoding.environ['EXECUTABLEPATH']]
2470 return [encoding.environ['EXECUTABLEPATH']]
2470 else:
2471 else:
2471 return [pycompat.sysexecutable]
2472 return [pycompat.sysexecutable]
2472 return gethgcmd()
2473 return gethgcmd()
2473
2474
2474 def rundetached(args, condfn):
2475 def rundetached(args, condfn):
2475 """Execute the argument list in a detached process.
2476 """Execute the argument list in a detached process.
2476
2477
2477 condfn is a callable which is called repeatedly and should return
2478 condfn is a callable which is called repeatedly and should return
2478 True once the child process is known to have started successfully.
2479 True once the child process is known to have started successfully.
2479 At this point, the child process PID is returned. If the child
2480 At this point, the child process PID is returned. If the child
2480 process fails to start or finishes before condfn() evaluates to
2481 process fails to start or finishes before condfn() evaluates to
2481 True, return -1.
2482 True, return -1.
2482 """
2483 """
2483 # Windows case is easier because the child process is either
2484 # Windows case is easier because the child process is either
2484 # successfully starting and validating the condition or exiting
2485 # successfully starting and validating the condition or exiting
2485 # on failure. We just poll on its PID. On Unix, if the child
2486 # on failure. We just poll on its PID. On Unix, if the child
2486 # process fails to start, it will be left in a zombie state until
2487 # process fails to start, it will be left in a zombie state until
2487 # the parent wait on it, which we cannot do since we expect a long
2488 # the parent wait on it, which we cannot do since we expect a long
2488 # running process on success. Instead we listen for SIGCHLD telling
2489 # running process on success. Instead we listen for SIGCHLD telling
2489 # us our child process terminated.
2490 # us our child process terminated.
2490 terminated = set()
2491 terminated = set()
2491 def handler(signum, frame):
2492 def handler(signum, frame):
2492 terminated.add(os.wait())
2493 terminated.add(os.wait())
2493 prevhandler = None
2494 prevhandler = None
2494 SIGCHLD = getattr(signal, 'SIGCHLD', None)
2495 SIGCHLD = getattr(signal, 'SIGCHLD', None)
2495 if SIGCHLD is not None:
2496 if SIGCHLD is not None:
2496 prevhandler = signal.signal(SIGCHLD, handler)
2497 prevhandler = signal.signal(SIGCHLD, handler)
2497 try:
2498 try:
2498 pid = spawndetached(args)
2499 pid = spawndetached(args)
2499 while not condfn():
2500 while not condfn():
2500 if ((pid in terminated or not testpid(pid))
2501 if ((pid in terminated or not testpid(pid))
2501 and not condfn()):
2502 and not condfn()):
2502 return -1
2503 return -1
2503 time.sleep(0.1)
2504 time.sleep(0.1)
2504 return pid
2505 return pid
2505 finally:
2506 finally:
2506 if prevhandler is not None:
2507 if prevhandler is not None:
2507 signal.signal(signal.SIGCHLD, prevhandler)
2508 signal.signal(signal.SIGCHLD, prevhandler)
2508
2509
2509 def interpolate(prefix, mapping, s, fn=None, escape_prefix=False):
2510 def interpolate(prefix, mapping, s, fn=None, escape_prefix=False):
2510 """Return the result of interpolating items in the mapping into string s.
2511 """Return the result of interpolating items in the mapping into string s.
2511
2512
2512 prefix is a single character string, or a two character string with
2513 prefix is a single character string, or a two character string with
2513 a backslash as the first character if the prefix needs to be escaped in
2514 a backslash as the first character if the prefix needs to be escaped in
2514 a regular expression.
2515 a regular expression.
2515
2516
2516 fn is an optional function that will be applied to the replacement text
2517 fn is an optional function that will be applied to the replacement text
2517 just before replacement.
2518 just before replacement.
2518
2519
2519 escape_prefix is an optional flag that allows using doubled prefix for
2520 escape_prefix is an optional flag that allows using doubled prefix for
2520 its escaping.
2521 its escaping.
2521 """
2522 """
2522 fn = fn or (lambda s: s)
2523 fn = fn or (lambda s: s)
2523 patterns = '|'.join(mapping.keys())
2524 patterns = '|'.join(mapping.keys())
2524 if escape_prefix:
2525 if escape_prefix:
2525 patterns += '|' + prefix
2526 patterns += '|' + prefix
2526 if len(prefix) > 1:
2527 if len(prefix) > 1:
2527 prefix_char = prefix[1:]
2528 prefix_char = prefix[1:]
2528 else:
2529 else:
2529 prefix_char = prefix
2530 prefix_char = prefix
2530 mapping[prefix_char] = prefix_char
2531 mapping[prefix_char] = prefix_char
2531 r = remod.compile(r'%s(%s)' % (prefix, patterns))
2532 r = remod.compile(r'%s(%s)' % (prefix, patterns))
2532 return r.sub(lambda x: fn(mapping[x.group()[1:]]), s)
2533 return r.sub(lambda x: fn(mapping[x.group()[1:]]), s)
2533
2534
2534 def getport(port):
2535 def getport(port):
2535 """Return the port for a given network service.
2536 """Return the port for a given network service.
2536
2537
2537 If port is an integer, it's returned as is. If it's a string, it's
2538 If port is an integer, it's returned as is. If it's a string, it's
2538 looked up using socket.getservbyname(). If there's no matching
2539 looked up using socket.getservbyname(). If there's no matching
2539 service, error.Abort is raised.
2540 service, error.Abort is raised.
2540 """
2541 """
2541 try:
2542 try:
2542 return int(port)
2543 return int(port)
2543 except ValueError:
2544 except ValueError:
2544 pass
2545 pass
2545
2546
2546 try:
2547 try:
2547 return socket.getservbyname(port)
2548 return socket.getservbyname(port)
2548 except socket.error:
2549 except socket.error:
2549 raise Abort(_("no port number associated with service '%s'") % port)
2550 raise Abort(_("no port number associated with service '%s'") % port)
2550
2551
2551 _booleans = {'1': True, 'yes': True, 'true': True, 'on': True, 'always': True,
2552 _booleans = {'1': True, 'yes': True, 'true': True, 'on': True, 'always': True,
2552 '0': False, 'no': False, 'false': False, 'off': False,
2553 '0': False, 'no': False, 'false': False, 'off': False,
2553 'never': False}
2554 'never': False}
2554
2555
2555 def parsebool(s):
2556 def parsebool(s):
2556 """Parse s into a boolean.
2557 """Parse s into a boolean.
2557
2558
2558 If s is not a valid boolean, returns None.
2559 If s is not a valid boolean, returns None.
2559 """
2560 """
2560 return _booleans.get(s.lower(), None)
2561 return _booleans.get(s.lower(), None)
2561
2562
2562 _hextochr = dict((a + b, chr(int(a + b, 16)))
2563 _hextochr = dict((a + b, chr(int(a + b, 16)))
2563 for a in string.hexdigits for b in string.hexdigits)
2564 for a in string.hexdigits for b in string.hexdigits)
2564
2565
2565 class url(object):
2566 class url(object):
2566 r"""Reliable URL parser.
2567 r"""Reliable URL parser.
2567
2568
2568 This parses URLs and provides attributes for the following
2569 This parses URLs and provides attributes for the following
2569 components:
2570 components:
2570
2571
2571 <scheme>://<user>:<passwd>@<host>:<port>/<path>?<query>#<fragment>
2572 <scheme>://<user>:<passwd>@<host>:<port>/<path>?<query>#<fragment>
2572
2573
2573 Missing components are set to None. The only exception is
2574 Missing components are set to None. The only exception is
2574 fragment, which is set to '' if present but empty.
2575 fragment, which is set to '' if present but empty.
2575
2576
2576 If parsefragment is False, fragment is included in query. If
2577 If parsefragment is False, fragment is included in query. If
2577 parsequery is False, query is included in path. If both are
2578 parsequery is False, query is included in path. If both are
2578 False, both fragment and query are included in path.
2579 False, both fragment and query are included in path.
2579
2580
2580 See http://www.ietf.org/rfc/rfc2396.txt for more information.
2581 See http://www.ietf.org/rfc/rfc2396.txt for more information.
2581
2582
2582 Note that for backward compatibility reasons, bundle URLs do not
2583 Note that for backward compatibility reasons, bundle URLs do not
2583 take host names. That means 'bundle://../' has a path of '../'.
2584 take host names. That means 'bundle://../' has a path of '../'.
2584
2585
2585 Examples:
2586 Examples:
2586
2587
2587 >>> url('http://www.ietf.org/rfc/rfc2396.txt')
2588 >>> url('http://www.ietf.org/rfc/rfc2396.txt')
2588 <url scheme: 'http', host: 'www.ietf.org', path: 'rfc/rfc2396.txt'>
2589 <url scheme: 'http', host: 'www.ietf.org', path: 'rfc/rfc2396.txt'>
2589 >>> url('ssh://[::1]:2200//home/joe/repo')
2590 >>> url('ssh://[::1]:2200//home/joe/repo')
2590 <url scheme: 'ssh', host: '[::1]', port: '2200', path: '/home/joe/repo'>
2591 <url scheme: 'ssh', host: '[::1]', port: '2200', path: '/home/joe/repo'>
2591 >>> url('file:///home/joe/repo')
2592 >>> url('file:///home/joe/repo')
2592 <url scheme: 'file', path: '/home/joe/repo'>
2593 <url scheme: 'file', path: '/home/joe/repo'>
2593 >>> url('file:///c:/temp/foo/')
2594 >>> url('file:///c:/temp/foo/')
2594 <url scheme: 'file', path: 'c:/temp/foo/'>
2595 <url scheme: 'file', path: 'c:/temp/foo/'>
2595 >>> url('bundle:foo')
2596 >>> url('bundle:foo')
2596 <url scheme: 'bundle', path: 'foo'>
2597 <url scheme: 'bundle', path: 'foo'>
2597 >>> url('bundle://../foo')
2598 >>> url('bundle://../foo')
2598 <url scheme: 'bundle', path: '../foo'>
2599 <url scheme: 'bundle', path: '../foo'>
2599 >>> url(r'c:\foo\bar')
2600 >>> url(r'c:\foo\bar')
2600 <url path: 'c:\\foo\\bar'>
2601 <url path: 'c:\\foo\\bar'>
2601 >>> url(r'\\blah\blah\blah')
2602 >>> url(r'\\blah\blah\blah')
2602 <url path: '\\\\blah\\blah\\blah'>
2603 <url path: '\\\\blah\\blah\\blah'>
2603 >>> url(r'\\blah\blah\blah#baz')
2604 >>> url(r'\\blah\blah\blah#baz')
2604 <url path: '\\\\blah\\blah\\blah', fragment: 'baz'>
2605 <url path: '\\\\blah\\blah\\blah', fragment: 'baz'>
2605 >>> url(r'file:///C:\users\me')
2606 >>> url(r'file:///C:\users\me')
2606 <url scheme: 'file', path: 'C:\\users\\me'>
2607 <url scheme: 'file', path: 'C:\\users\\me'>
2607
2608
2608 Authentication credentials:
2609 Authentication credentials:
2609
2610
2610 >>> url('ssh://joe:xyz@x/repo')
2611 >>> url('ssh://joe:xyz@x/repo')
2611 <url scheme: 'ssh', user: 'joe', passwd: 'xyz', host: 'x', path: 'repo'>
2612 <url scheme: 'ssh', user: 'joe', passwd: 'xyz', host: 'x', path: 'repo'>
2612 >>> url('ssh://joe@x/repo')
2613 >>> url('ssh://joe@x/repo')
2613 <url scheme: 'ssh', user: 'joe', host: 'x', path: 'repo'>
2614 <url scheme: 'ssh', user: 'joe', host: 'x', path: 'repo'>
2614
2615
2615 Query strings and fragments:
2616 Query strings and fragments:
2616
2617
2617 >>> url('http://host/a?b#c')
2618 >>> url('http://host/a?b#c')
2618 <url scheme: 'http', host: 'host', path: 'a', query: 'b', fragment: 'c'>
2619 <url scheme: 'http', host: 'host', path: 'a', query: 'b', fragment: 'c'>
2619 >>> url('http://host/a?b#c', parsequery=False, parsefragment=False)
2620 >>> url('http://host/a?b#c', parsequery=False, parsefragment=False)
2620 <url scheme: 'http', host: 'host', path: 'a?b#c'>
2621 <url scheme: 'http', host: 'host', path: 'a?b#c'>
2621
2622
2622 Empty path:
2623 Empty path:
2623
2624
2624 >>> url('')
2625 >>> url('')
2625 <url path: ''>
2626 <url path: ''>
2626 >>> url('#a')
2627 >>> url('#a')
2627 <url path: '', fragment: 'a'>
2628 <url path: '', fragment: 'a'>
2628 >>> url('http://host/')
2629 >>> url('http://host/')
2629 <url scheme: 'http', host: 'host', path: ''>
2630 <url scheme: 'http', host: 'host', path: ''>
2630 >>> url('http://host/#a')
2631 >>> url('http://host/#a')
2631 <url scheme: 'http', host: 'host', path: '', fragment: 'a'>
2632 <url scheme: 'http', host: 'host', path: '', fragment: 'a'>
2632
2633
2633 Only scheme:
2634 Only scheme:
2634
2635
2635 >>> url('http:')
2636 >>> url('http:')
2636 <url scheme: 'http'>
2637 <url scheme: 'http'>
2637 """
2638 """
2638
2639
2639 _safechars = "!~*'()+"
2640 _safechars = "!~*'()+"
2640 _safepchars = "/!~*'()+:\\"
2641 _safepchars = "/!~*'()+:\\"
2641 _matchscheme = remod.compile('^[a-zA-Z0-9+.\\-]+:').match
2642 _matchscheme = remod.compile('^[a-zA-Z0-9+.\\-]+:').match
2642
2643
2643 def __init__(self, path, parsequery=True, parsefragment=True):
2644 def __init__(self, path, parsequery=True, parsefragment=True):
2644 # We slowly chomp away at path until we have only the path left
2645 # We slowly chomp away at path until we have only the path left
2645 self.scheme = self.user = self.passwd = self.host = None
2646 self.scheme = self.user = self.passwd = self.host = None
2646 self.port = self.path = self.query = self.fragment = None
2647 self.port = self.path = self.query = self.fragment = None
2647 self._localpath = True
2648 self._localpath = True
2648 self._hostport = ''
2649 self._hostport = ''
2649 self._origpath = path
2650 self._origpath = path
2650
2651
2651 if parsefragment and '#' in path:
2652 if parsefragment and '#' in path:
2652 path, self.fragment = path.split('#', 1)
2653 path, self.fragment = path.split('#', 1)
2653
2654
2654 # special case for Windows drive letters and UNC paths
2655 # special case for Windows drive letters and UNC paths
2655 if hasdriveletter(path) or path.startswith('\\\\'):
2656 if hasdriveletter(path) or path.startswith('\\\\'):
2656 self.path = path
2657 self.path = path
2657 return
2658 return
2658
2659
2659 # For compatibility reasons, we can't handle bundle paths as
2660 # For compatibility reasons, we can't handle bundle paths as
2660 # normal URLS
2661 # normal URLS
2661 if path.startswith('bundle:'):
2662 if path.startswith('bundle:'):
2662 self.scheme = 'bundle'
2663 self.scheme = 'bundle'
2663 path = path[7:]
2664 path = path[7:]
2664 if path.startswith('//'):
2665 if path.startswith('//'):
2665 path = path[2:]
2666 path = path[2:]
2666 self.path = path
2667 self.path = path
2667 return
2668 return
2668
2669
2669 if self._matchscheme(path):
2670 if self._matchscheme(path):
2670 parts = path.split(':', 1)
2671 parts = path.split(':', 1)
2671 if parts[0]:
2672 if parts[0]:
2672 self.scheme, path = parts
2673 self.scheme, path = parts
2673 self._localpath = False
2674 self._localpath = False
2674
2675
2675 if not path:
2676 if not path:
2676 path = None
2677 path = None
2677 if self._localpath:
2678 if self._localpath:
2678 self.path = ''
2679 self.path = ''
2679 return
2680 return
2680 else:
2681 else:
2681 if self._localpath:
2682 if self._localpath:
2682 self.path = path
2683 self.path = path
2683 return
2684 return
2684
2685
2685 if parsequery and '?' in path:
2686 if parsequery and '?' in path:
2686 path, self.query = path.split('?', 1)
2687 path, self.query = path.split('?', 1)
2687 if not path:
2688 if not path:
2688 path = None
2689 path = None
2689 if not self.query:
2690 if not self.query:
2690 self.query = None
2691 self.query = None
2691
2692
2692 # // is required to specify a host/authority
2693 # // is required to specify a host/authority
2693 if path and path.startswith('//'):
2694 if path and path.startswith('//'):
2694 parts = path[2:].split('/', 1)
2695 parts = path[2:].split('/', 1)
2695 if len(parts) > 1:
2696 if len(parts) > 1:
2696 self.host, path = parts
2697 self.host, path = parts
2697 else:
2698 else:
2698 self.host = parts[0]
2699 self.host = parts[0]
2699 path = None
2700 path = None
2700 if not self.host:
2701 if not self.host:
2701 self.host = None
2702 self.host = None
2702 # path of file:///d is /d
2703 # path of file:///d is /d
2703 # path of file:///d:/ is d:/, not /d:/
2704 # path of file:///d:/ is d:/, not /d:/
2704 if path and not hasdriveletter(path):
2705 if path and not hasdriveletter(path):
2705 path = '/' + path
2706 path = '/' + path
2706
2707
2707 if self.host and '@' in self.host:
2708 if self.host and '@' in self.host:
2708 self.user, self.host = self.host.rsplit('@', 1)
2709 self.user, self.host = self.host.rsplit('@', 1)
2709 if ':' in self.user:
2710 if ':' in self.user:
2710 self.user, self.passwd = self.user.split(':', 1)
2711 self.user, self.passwd = self.user.split(':', 1)
2711 if not self.host:
2712 if not self.host:
2712 self.host = None
2713 self.host = None
2713
2714
2714 # Don't split on colons in IPv6 addresses without ports
2715 # Don't split on colons in IPv6 addresses without ports
2715 if (self.host and ':' in self.host and
2716 if (self.host and ':' in self.host and
2716 not (self.host.startswith('[') and self.host.endswith(']'))):
2717 not (self.host.startswith('[') and self.host.endswith(']'))):
2717 self._hostport = self.host
2718 self._hostport = self.host
2718 self.host, self.port = self.host.rsplit(':', 1)
2719 self.host, self.port = self.host.rsplit(':', 1)
2719 if not self.host:
2720 if not self.host:
2720 self.host = None
2721 self.host = None
2721
2722
2722 if (self.host and self.scheme == 'file' and
2723 if (self.host and self.scheme == 'file' and
2723 self.host not in ('localhost', '127.0.0.1', '[::1]')):
2724 self.host not in ('localhost', '127.0.0.1', '[::1]')):
2724 raise Abort(_('file:// URLs can only refer to localhost'))
2725 raise Abort(_('file:// URLs can only refer to localhost'))
2725
2726
2726 self.path = path
2727 self.path = path
2727
2728
2728 # leave the query string escaped
2729 # leave the query string escaped
2729 for a in ('user', 'passwd', 'host', 'port',
2730 for a in ('user', 'passwd', 'host', 'port',
2730 'path', 'fragment'):
2731 'path', 'fragment'):
2731 v = getattr(self, a)
2732 v = getattr(self, a)
2732 if v is not None:
2733 if v is not None:
2733 setattr(self, a, urlreq.unquote(v))
2734 setattr(self, a, urlreq.unquote(v))
2734
2735
2735 def __repr__(self):
2736 def __repr__(self):
2736 attrs = []
2737 attrs = []
2737 for a in ('scheme', 'user', 'passwd', 'host', 'port', 'path',
2738 for a in ('scheme', 'user', 'passwd', 'host', 'port', 'path',
2738 'query', 'fragment'):
2739 'query', 'fragment'):
2739 v = getattr(self, a)
2740 v = getattr(self, a)
2740 if v is not None:
2741 if v is not None:
2741 attrs.append('%s: %r' % (a, v))
2742 attrs.append('%s: %r' % (a, v))
2742 return '<url %s>' % ', '.join(attrs)
2743 return '<url %s>' % ', '.join(attrs)
2743
2744
2744 def __str__(self):
2745 def __str__(self):
2745 r"""Join the URL's components back into a URL string.
2746 r"""Join the URL's components back into a URL string.
2746
2747
2747 Examples:
2748 Examples:
2748
2749
2749 >>> str(url('http://user:pw@host:80/c:/bob?fo:oo#ba:ar'))
2750 >>> str(url('http://user:pw@host:80/c:/bob?fo:oo#ba:ar'))
2750 'http://user:pw@host:80/c:/bob?fo:oo#ba:ar'
2751 'http://user:pw@host:80/c:/bob?fo:oo#ba:ar'
2751 >>> str(url('http://user:pw@host:80/?foo=bar&baz=42'))
2752 >>> str(url('http://user:pw@host:80/?foo=bar&baz=42'))
2752 'http://user:pw@host:80/?foo=bar&baz=42'
2753 'http://user:pw@host:80/?foo=bar&baz=42'
2753 >>> str(url('http://user:pw@host:80/?foo=bar%3dbaz'))
2754 >>> str(url('http://user:pw@host:80/?foo=bar%3dbaz'))
2754 'http://user:pw@host:80/?foo=bar%3dbaz'
2755 'http://user:pw@host:80/?foo=bar%3dbaz'
2755 >>> str(url('ssh://user:pw@[::1]:2200//home/joe#'))
2756 >>> str(url('ssh://user:pw@[::1]:2200//home/joe#'))
2756 'ssh://user:pw@[::1]:2200//home/joe#'
2757 'ssh://user:pw@[::1]:2200//home/joe#'
2757 >>> str(url('http://localhost:80//'))
2758 >>> str(url('http://localhost:80//'))
2758 'http://localhost:80//'
2759 'http://localhost:80//'
2759 >>> str(url('http://localhost:80/'))
2760 >>> str(url('http://localhost:80/'))
2760 'http://localhost:80/'
2761 'http://localhost:80/'
2761 >>> str(url('http://localhost:80'))
2762 >>> str(url('http://localhost:80'))
2762 'http://localhost:80/'
2763 'http://localhost:80/'
2763 >>> str(url('bundle:foo'))
2764 >>> str(url('bundle:foo'))
2764 'bundle:foo'
2765 'bundle:foo'
2765 >>> str(url('bundle://../foo'))
2766 >>> str(url('bundle://../foo'))
2766 'bundle:../foo'
2767 'bundle:../foo'
2767 >>> str(url('path'))
2768 >>> str(url('path'))
2768 'path'
2769 'path'
2769 >>> str(url('file:///tmp/foo/bar'))
2770 >>> str(url('file:///tmp/foo/bar'))
2770 'file:///tmp/foo/bar'
2771 'file:///tmp/foo/bar'
2771 >>> str(url('file:///c:/tmp/foo/bar'))
2772 >>> str(url('file:///c:/tmp/foo/bar'))
2772 'file:///c:/tmp/foo/bar'
2773 'file:///c:/tmp/foo/bar'
2773 >>> print url(r'bundle:foo\bar')
2774 >>> print url(r'bundle:foo\bar')
2774 bundle:foo\bar
2775 bundle:foo\bar
2775 >>> print url(r'file:///D:\data\hg')
2776 >>> print url(r'file:///D:\data\hg')
2776 file:///D:\data\hg
2777 file:///D:\data\hg
2777 """
2778 """
2778 return encoding.strfromlocal(self.__bytes__())
2779 return encoding.strfromlocal(self.__bytes__())
2779
2780
2780 def __bytes__(self):
2781 def __bytes__(self):
2781 if self._localpath:
2782 if self._localpath:
2782 s = self.path
2783 s = self.path
2783 if self.scheme == 'bundle':
2784 if self.scheme == 'bundle':
2784 s = 'bundle:' + s
2785 s = 'bundle:' + s
2785 if self.fragment:
2786 if self.fragment:
2786 s += '#' + self.fragment
2787 s += '#' + self.fragment
2787 return s
2788 return s
2788
2789
2789 s = self.scheme + ':'
2790 s = self.scheme + ':'
2790 if self.user or self.passwd or self.host:
2791 if self.user or self.passwd or self.host:
2791 s += '//'
2792 s += '//'
2792 elif self.scheme and (not self.path or self.path.startswith('/')
2793 elif self.scheme and (not self.path or self.path.startswith('/')
2793 or hasdriveletter(self.path)):
2794 or hasdriveletter(self.path)):
2794 s += '//'
2795 s += '//'
2795 if hasdriveletter(self.path):
2796 if hasdriveletter(self.path):
2796 s += '/'
2797 s += '/'
2797 if self.user:
2798 if self.user:
2798 s += urlreq.quote(self.user, safe=self._safechars)
2799 s += urlreq.quote(self.user, safe=self._safechars)
2799 if self.passwd:
2800 if self.passwd:
2800 s += ':' + urlreq.quote(self.passwd, safe=self._safechars)
2801 s += ':' + urlreq.quote(self.passwd, safe=self._safechars)
2801 if self.user or self.passwd:
2802 if self.user or self.passwd:
2802 s += '@'
2803 s += '@'
2803 if self.host:
2804 if self.host:
2804 if not (self.host.startswith('[') and self.host.endswith(']')):
2805 if not (self.host.startswith('[') and self.host.endswith(']')):
2805 s += urlreq.quote(self.host)
2806 s += urlreq.quote(self.host)
2806 else:
2807 else:
2807 s += self.host
2808 s += self.host
2808 if self.port:
2809 if self.port:
2809 s += ':' + urlreq.quote(self.port)
2810 s += ':' + urlreq.quote(self.port)
2810 if self.host:
2811 if self.host:
2811 s += '/'
2812 s += '/'
2812 if self.path:
2813 if self.path:
2813 # TODO: similar to the query string, we should not unescape the
2814 # TODO: similar to the query string, we should not unescape the
2814 # path when we store it, the path might contain '%2f' = '/',
2815 # path when we store it, the path might contain '%2f' = '/',
2815 # which we should *not* escape.
2816 # which we should *not* escape.
2816 s += urlreq.quote(self.path, safe=self._safepchars)
2817 s += urlreq.quote(self.path, safe=self._safepchars)
2817 if self.query:
2818 if self.query:
2818 # we store the query in escaped form.
2819 # we store the query in escaped form.
2819 s += '?' + self.query
2820 s += '?' + self.query
2820 if self.fragment is not None:
2821 if self.fragment is not None:
2821 s += '#' + urlreq.quote(self.fragment, safe=self._safepchars)
2822 s += '#' + urlreq.quote(self.fragment, safe=self._safepchars)
2822 return s
2823 return s
2823
2824
2824 def authinfo(self):
2825 def authinfo(self):
2825 user, passwd = self.user, self.passwd
2826 user, passwd = self.user, self.passwd
2826 try:
2827 try:
2827 self.user, self.passwd = None, None
2828 self.user, self.passwd = None, None
2828 s = bytes(self)
2829 s = bytes(self)
2829 finally:
2830 finally:
2830 self.user, self.passwd = user, passwd
2831 self.user, self.passwd = user, passwd
2831 if not self.user:
2832 if not self.user:
2832 return (s, None)
2833 return (s, None)
2833 # authinfo[1] is passed to urllib2 password manager, and its
2834 # authinfo[1] is passed to urllib2 password manager, and its
2834 # URIs must not contain credentials. The host is passed in the
2835 # URIs must not contain credentials. The host is passed in the
2835 # URIs list because Python < 2.4.3 uses only that to search for
2836 # URIs list because Python < 2.4.3 uses only that to search for
2836 # a password.
2837 # a password.
2837 return (s, (None, (s, self.host),
2838 return (s, (None, (s, self.host),
2838 self.user, self.passwd or ''))
2839 self.user, self.passwd or ''))
2839
2840
2840 def isabs(self):
2841 def isabs(self):
2841 if self.scheme and self.scheme != 'file':
2842 if self.scheme and self.scheme != 'file':
2842 return True # remote URL
2843 return True # remote URL
2843 if hasdriveletter(self.path):
2844 if hasdriveletter(self.path):
2844 return True # absolute for our purposes - can't be joined()
2845 return True # absolute for our purposes - can't be joined()
2845 if self.path.startswith(r'\\'):
2846 if self.path.startswith(r'\\'):
2846 return True # Windows UNC path
2847 return True # Windows UNC path
2847 if self.path.startswith('/'):
2848 if self.path.startswith('/'):
2848 return True # POSIX-style
2849 return True # POSIX-style
2849 return False
2850 return False
2850
2851
2851 def localpath(self):
2852 def localpath(self):
2852 if self.scheme == 'file' or self.scheme == 'bundle':
2853 if self.scheme == 'file' or self.scheme == 'bundle':
2853 path = self.path or '/'
2854 path = self.path or '/'
2854 # For Windows, we need to promote hosts containing drive
2855 # For Windows, we need to promote hosts containing drive
2855 # letters to paths with drive letters.
2856 # letters to paths with drive letters.
2856 if hasdriveletter(self._hostport):
2857 if hasdriveletter(self._hostport):
2857 path = self._hostport + '/' + self.path
2858 path = self._hostport + '/' + self.path
2858 elif (self.host is not None and self.path
2859 elif (self.host is not None and self.path
2859 and not hasdriveletter(path)):
2860 and not hasdriveletter(path)):
2860 path = '/' + path
2861 path = '/' + path
2861 return path
2862 return path
2862 return self._origpath
2863 return self._origpath
2863
2864
2864 def islocal(self):
2865 def islocal(self):
2865 '''whether localpath will return something that posixfile can open'''
2866 '''whether localpath will return something that posixfile can open'''
2866 return (not self.scheme or self.scheme == 'file'
2867 return (not self.scheme or self.scheme == 'file'
2867 or self.scheme == 'bundle')
2868 or self.scheme == 'bundle')
2868
2869
2869 def hasscheme(path):
2870 def hasscheme(path):
2870 return bool(url(path).scheme)
2871 return bool(url(path).scheme)
2871
2872
2872 def hasdriveletter(path):
2873 def hasdriveletter(path):
2873 return path and path[1:2] == ':' and path[0:1].isalpha()
2874 return path and path[1:2] == ':' and path[0:1].isalpha()
2874
2875
2875 def urllocalpath(path):
2876 def urllocalpath(path):
2876 return url(path, parsequery=False, parsefragment=False).localpath()
2877 return url(path, parsequery=False, parsefragment=False).localpath()
2877
2878
2878 def hidepassword(u):
2879 def hidepassword(u):
2879 '''hide user credential in a url string'''
2880 '''hide user credential in a url string'''
2880 u = url(u)
2881 u = url(u)
2881 if u.passwd:
2882 if u.passwd:
2882 u.passwd = '***'
2883 u.passwd = '***'
2883 return bytes(u)
2884 return bytes(u)
2884
2885
2885 def removeauth(u):
2886 def removeauth(u):
2886 '''remove all authentication information from a url string'''
2887 '''remove all authentication information from a url string'''
2887 u = url(u)
2888 u = url(u)
2888 u.user = u.passwd = None
2889 u.user = u.passwd = None
2889 return str(u)
2890 return str(u)
2890
2891
2891 timecount = unitcountfn(
2892 timecount = unitcountfn(
2892 (1, 1e3, _('%.0f s')),
2893 (1, 1e3, _('%.0f s')),
2893 (100, 1, _('%.1f s')),
2894 (100, 1, _('%.1f s')),
2894 (10, 1, _('%.2f s')),
2895 (10, 1, _('%.2f s')),
2895 (1, 1, _('%.3f s')),
2896 (1, 1, _('%.3f s')),
2896 (100, 0.001, _('%.1f ms')),
2897 (100, 0.001, _('%.1f ms')),
2897 (10, 0.001, _('%.2f ms')),
2898 (10, 0.001, _('%.2f ms')),
2898 (1, 0.001, _('%.3f ms')),
2899 (1, 0.001, _('%.3f ms')),
2899 (100, 0.000001, _('%.1f us')),
2900 (100, 0.000001, _('%.1f us')),
2900 (10, 0.000001, _('%.2f us')),
2901 (10, 0.000001, _('%.2f us')),
2901 (1, 0.000001, _('%.3f us')),
2902 (1, 0.000001, _('%.3f us')),
2902 (100, 0.000000001, _('%.1f ns')),
2903 (100, 0.000000001, _('%.1f ns')),
2903 (10, 0.000000001, _('%.2f ns')),
2904 (10, 0.000000001, _('%.2f ns')),
2904 (1, 0.000000001, _('%.3f ns')),
2905 (1, 0.000000001, _('%.3f ns')),
2905 )
2906 )
2906
2907
2907 _timenesting = [0]
2908 _timenesting = [0]
2908
2909
2909 def timed(func):
2910 def timed(func):
2910 '''Report the execution time of a function call to stderr.
2911 '''Report the execution time of a function call to stderr.
2911
2912
2912 During development, use as a decorator when you need to measure
2913 During development, use as a decorator when you need to measure
2913 the cost of a function, e.g. as follows:
2914 the cost of a function, e.g. as follows:
2914
2915
2915 @util.timed
2916 @util.timed
2916 def foo(a, b, c):
2917 def foo(a, b, c):
2917 pass
2918 pass
2918 '''
2919 '''
2919
2920
2920 def wrapper(*args, **kwargs):
2921 def wrapper(*args, **kwargs):
2921 start = timer()
2922 start = timer()
2922 indent = 2
2923 indent = 2
2923 _timenesting[0] += indent
2924 _timenesting[0] += indent
2924 try:
2925 try:
2925 return func(*args, **kwargs)
2926 return func(*args, **kwargs)
2926 finally:
2927 finally:
2927 elapsed = timer() - start
2928 elapsed = timer() - start
2928 _timenesting[0] -= indent
2929 _timenesting[0] -= indent
2929 stderr.write('%s%s: %s\n' %
2930 stderr.write('%s%s: %s\n' %
2930 (' ' * _timenesting[0], func.__name__,
2931 (' ' * _timenesting[0], func.__name__,
2931 timecount(elapsed)))
2932 timecount(elapsed)))
2932 return wrapper
2933 return wrapper
2933
2934
2934 _sizeunits = (('m', 2**20), ('k', 2**10), ('g', 2**30),
2935 _sizeunits = (('m', 2**20), ('k', 2**10), ('g', 2**30),
2935 ('kb', 2**10), ('mb', 2**20), ('gb', 2**30), ('b', 1))
2936 ('kb', 2**10), ('mb', 2**20), ('gb', 2**30), ('b', 1))
2936
2937
2937 def sizetoint(s):
2938 def sizetoint(s):
2938 '''Convert a space specifier to a byte count.
2939 '''Convert a space specifier to a byte count.
2939
2940
2940 >>> sizetoint('30')
2941 >>> sizetoint('30')
2941 30
2942 30
2942 >>> sizetoint('2.2kb')
2943 >>> sizetoint('2.2kb')
2943 2252
2944 2252
2944 >>> sizetoint('6M')
2945 >>> sizetoint('6M')
2945 6291456
2946 6291456
2946 '''
2947 '''
2947 t = s.strip().lower()
2948 t = s.strip().lower()
2948 try:
2949 try:
2949 for k, u in _sizeunits:
2950 for k, u in _sizeunits:
2950 if t.endswith(k):
2951 if t.endswith(k):
2951 return int(float(t[:-len(k)]) * u)
2952 return int(float(t[:-len(k)]) * u)
2952 return int(t)
2953 return int(t)
2953 except ValueError:
2954 except ValueError:
2954 raise error.ParseError(_("couldn't parse size: %s") % s)
2955 raise error.ParseError(_("couldn't parse size: %s") % s)
2955
2956
2956 class hooks(object):
2957 class hooks(object):
2957 '''A collection of hook functions that can be used to extend a
2958 '''A collection of hook functions that can be used to extend a
2958 function's behavior. Hooks are called in lexicographic order,
2959 function's behavior. Hooks are called in lexicographic order,
2959 based on the names of their sources.'''
2960 based on the names of their sources.'''
2960
2961
2961 def __init__(self):
2962 def __init__(self):
2962 self._hooks = []
2963 self._hooks = []
2963
2964
2964 def add(self, source, hook):
2965 def add(self, source, hook):
2965 self._hooks.append((source, hook))
2966 self._hooks.append((source, hook))
2966
2967
2967 def __call__(self, *args):
2968 def __call__(self, *args):
2968 self._hooks.sort(key=lambda x: x[0])
2969 self._hooks.sort(key=lambda x: x[0])
2969 results = []
2970 results = []
2970 for source, hook in self._hooks:
2971 for source, hook in self._hooks:
2971 results.append(hook(*args))
2972 results.append(hook(*args))
2972 return results
2973 return results
2973
2974
2974 def getstackframes(skip=0, line=' %-*s in %s\n', fileline='%s:%s', depth=0):
2975 def getstackframes(skip=0, line=' %-*s in %s\n', fileline='%s:%s', depth=0):
2975 '''Yields lines for a nicely formatted stacktrace.
2976 '''Yields lines for a nicely formatted stacktrace.
2976 Skips the 'skip' last entries, then return the last 'depth' entries.
2977 Skips the 'skip' last entries, then return the last 'depth' entries.
2977 Each file+linenumber is formatted according to fileline.
2978 Each file+linenumber is formatted according to fileline.
2978 Each line is formatted according to line.
2979 Each line is formatted according to line.
2979 If line is None, it yields:
2980 If line is None, it yields:
2980 length of longest filepath+line number,
2981 length of longest filepath+line number,
2981 filepath+linenumber,
2982 filepath+linenumber,
2982 function
2983 function
2983
2984
2984 Not be used in production code but very convenient while developing.
2985 Not be used in production code but very convenient while developing.
2985 '''
2986 '''
2986 entries = [(fileline % (fn, ln), func)
2987 entries = [(fileline % (fn, ln), func)
2987 for fn, ln, func, _text in traceback.extract_stack()[:-skip - 1]
2988 for fn, ln, func, _text in traceback.extract_stack()[:-skip - 1]
2988 ][-depth:]
2989 ][-depth:]
2989 if entries:
2990 if entries:
2990 fnmax = max(len(entry[0]) for entry in entries)
2991 fnmax = max(len(entry[0]) for entry in entries)
2991 for fnln, func in entries:
2992 for fnln, func in entries:
2992 if line is None:
2993 if line is None:
2993 yield (fnmax, fnln, func)
2994 yield (fnmax, fnln, func)
2994 else:
2995 else:
2995 yield line % (fnmax, fnln, func)
2996 yield line % (fnmax, fnln, func)
2996
2997
2997 def debugstacktrace(msg='stacktrace', skip=0,
2998 def debugstacktrace(msg='stacktrace', skip=0,
2998 f=stderr, otherf=stdout, depth=0):
2999 f=stderr, otherf=stdout, depth=0):
2999 '''Writes a message to f (stderr) with a nicely formatted stacktrace.
3000 '''Writes a message to f (stderr) with a nicely formatted stacktrace.
3000 Skips the 'skip' entries closest to the call, then show 'depth' entries.
3001 Skips the 'skip' entries closest to the call, then show 'depth' entries.
3001 By default it will flush stdout first.
3002 By default it will flush stdout first.
3002 It can be used everywhere and intentionally does not require an ui object.
3003 It can be used everywhere and intentionally does not require an ui object.
3003 Not be used in production code but very convenient while developing.
3004 Not be used in production code but very convenient while developing.
3004 '''
3005 '''
3005 if otherf:
3006 if otherf:
3006 otherf.flush()
3007 otherf.flush()
3007 f.write('%s at:\n' % msg.rstrip())
3008 f.write('%s at:\n' % msg.rstrip())
3008 for line in getstackframes(skip + 1, depth=depth):
3009 for line in getstackframes(skip + 1, depth=depth):
3009 f.write(line)
3010 f.write(line)
3010 f.flush()
3011 f.flush()
3011
3012
3012 class dirs(object):
3013 class dirs(object):
3013 '''a multiset of directory names from a dirstate or manifest'''
3014 '''a multiset of directory names from a dirstate or manifest'''
3014
3015
3015 def __init__(self, map, skip=None):
3016 def __init__(self, map, skip=None):
3016 self._dirs = {}
3017 self._dirs = {}
3017 addpath = self.addpath
3018 addpath = self.addpath
3018 if safehasattr(map, 'iteritems') and skip is not None:
3019 if safehasattr(map, 'iteritems') and skip is not None:
3019 for f, s in map.iteritems():
3020 for f, s in map.iteritems():
3020 if s[0] != skip:
3021 if s[0] != skip:
3021 addpath(f)
3022 addpath(f)
3022 else:
3023 else:
3023 for f in map:
3024 for f in map:
3024 addpath(f)
3025 addpath(f)
3025
3026
3026 def addpath(self, path):
3027 def addpath(self, path):
3027 dirs = self._dirs
3028 dirs = self._dirs
3028 for base in finddirs(path):
3029 for base in finddirs(path):
3029 if base in dirs:
3030 if base in dirs:
3030 dirs[base] += 1
3031 dirs[base] += 1
3031 return
3032 return
3032 dirs[base] = 1
3033 dirs[base] = 1
3033
3034
3034 def delpath(self, path):
3035 def delpath(self, path):
3035 dirs = self._dirs
3036 dirs = self._dirs
3036 for base in finddirs(path):
3037 for base in finddirs(path):
3037 if dirs[base] > 1:
3038 if dirs[base] > 1:
3038 dirs[base] -= 1
3039 dirs[base] -= 1
3039 return
3040 return
3040 del dirs[base]
3041 del dirs[base]
3041
3042
3042 def __iter__(self):
3043 def __iter__(self):
3043 return iter(self._dirs)
3044 return iter(self._dirs)
3044
3045
3045 def __contains__(self, d):
3046 def __contains__(self, d):
3046 return d in self._dirs
3047 return d in self._dirs
3047
3048
3048 if safehasattr(parsers, 'dirs'):
3049 if safehasattr(parsers, 'dirs'):
3049 dirs = parsers.dirs
3050 dirs = parsers.dirs
3050
3051
3051 def finddirs(path):
3052 def finddirs(path):
3052 pos = path.rfind('/')
3053 pos = path.rfind('/')
3053 while pos != -1:
3054 while pos != -1:
3054 yield path[:pos]
3055 yield path[:pos]
3055 pos = path.rfind('/', 0, pos)
3056 pos = path.rfind('/', 0, pos)
3056
3057
3057 class ctxmanager(object):
3058 class ctxmanager(object):
3058 '''A context manager for use in 'with' blocks to allow multiple
3059 '''A context manager for use in 'with' blocks to allow multiple
3059 contexts to be entered at once. This is both safer and more
3060 contexts to be entered at once. This is both safer and more
3060 flexible than contextlib.nested.
3061 flexible than contextlib.nested.
3061
3062
3062 Once Mercurial supports Python 2.7+, this will become mostly
3063 Once Mercurial supports Python 2.7+, this will become mostly
3063 unnecessary.
3064 unnecessary.
3064 '''
3065 '''
3065
3066
3066 def __init__(self, *args):
3067 def __init__(self, *args):
3067 '''Accepts a list of no-argument functions that return context
3068 '''Accepts a list of no-argument functions that return context
3068 managers. These will be invoked at __call__ time.'''
3069 managers. These will be invoked at __call__ time.'''
3069 self._pending = args
3070 self._pending = args
3070 self._atexit = []
3071 self._atexit = []
3071
3072
3072 def __enter__(self):
3073 def __enter__(self):
3073 return self
3074 return self
3074
3075
3075 def enter(self):
3076 def enter(self):
3076 '''Create and enter context managers in the order in which they were
3077 '''Create and enter context managers in the order in which they were
3077 passed to the constructor.'''
3078 passed to the constructor.'''
3078 values = []
3079 values = []
3079 for func in self._pending:
3080 for func in self._pending:
3080 obj = func()
3081 obj = func()
3081 values.append(obj.__enter__())
3082 values.append(obj.__enter__())
3082 self._atexit.append(obj.__exit__)
3083 self._atexit.append(obj.__exit__)
3083 del self._pending
3084 del self._pending
3084 return values
3085 return values
3085
3086
3086 def atexit(self, func, *args, **kwargs):
3087 def atexit(self, func, *args, **kwargs):
3087 '''Add a function to call when this context manager exits. The
3088 '''Add a function to call when this context manager exits. The
3088 ordering of multiple atexit calls is unspecified, save that
3089 ordering of multiple atexit calls is unspecified, save that
3089 they will happen before any __exit__ functions.'''
3090 they will happen before any __exit__ functions.'''
3090 def wrapper(exc_type, exc_val, exc_tb):
3091 def wrapper(exc_type, exc_val, exc_tb):
3091 func(*args, **kwargs)
3092 func(*args, **kwargs)
3092 self._atexit.append(wrapper)
3093 self._atexit.append(wrapper)
3093 return func
3094 return func
3094
3095
3095 def __exit__(self, exc_type, exc_val, exc_tb):
3096 def __exit__(self, exc_type, exc_val, exc_tb):
3096 '''Context managers are exited in the reverse order from which
3097 '''Context managers are exited in the reverse order from which
3097 they were created.'''
3098 they were created.'''
3098 received = exc_type is not None
3099 received = exc_type is not None
3099 suppressed = False
3100 suppressed = False
3100 pending = None
3101 pending = None
3101 self._atexit.reverse()
3102 self._atexit.reverse()
3102 for exitfunc in self._atexit:
3103 for exitfunc in self._atexit:
3103 try:
3104 try:
3104 if exitfunc(exc_type, exc_val, exc_tb):
3105 if exitfunc(exc_type, exc_val, exc_tb):
3105 suppressed = True
3106 suppressed = True
3106 exc_type = None
3107 exc_type = None
3107 exc_val = None
3108 exc_val = None
3108 exc_tb = None
3109 exc_tb = None
3109 except BaseException:
3110 except BaseException:
3110 pending = sys.exc_info()
3111 pending = sys.exc_info()
3111 exc_type, exc_val, exc_tb = pending = sys.exc_info()
3112 exc_type, exc_val, exc_tb = pending = sys.exc_info()
3112 del self._atexit
3113 del self._atexit
3113 if pending:
3114 if pending:
3114 raise exc_val
3115 raise exc_val
3115 return received and suppressed
3116 return received and suppressed
3116
3117
3117 # compression code
3118 # compression code
3118
3119
3119 SERVERROLE = 'server'
3120 SERVERROLE = 'server'
3120 CLIENTROLE = 'client'
3121 CLIENTROLE = 'client'
3121
3122
3122 compewireprotosupport = collections.namedtuple(u'compenginewireprotosupport',
3123 compewireprotosupport = collections.namedtuple(u'compenginewireprotosupport',
3123 (u'name', u'serverpriority',
3124 (u'name', u'serverpriority',
3124 u'clientpriority'))
3125 u'clientpriority'))
3125
3126
3126 class compressormanager(object):
3127 class compressormanager(object):
3127 """Holds registrations of various compression engines.
3128 """Holds registrations of various compression engines.
3128
3129
3129 This class essentially abstracts the differences between compression
3130 This class essentially abstracts the differences between compression
3130 engines to allow new compression formats to be added easily, possibly from
3131 engines to allow new compression formats to be added easily, possibly from
3131 extensions.
3132 extensions.
3132
3133
3133 Compressors are registered against the global instance by calling its
3134 Compressors are registered against the global instance by calling its
3134 ``register()`` method.
3135 ``register()`` method.
3135 """
3136 """
3136 def __init__(self):
3137 def __init__(self):
3137 self._engines = {}
3138 self._engines = {}
3138 # Bundle spec human name to engine name.
3139 # Bundle spec human name to engine name.
3139 self._bundlenames = {}
3140 self._bundlenames = {}
3140 # Internal bundle identifier to engine name.
3141 # Internal bundle identifier to engine name.
3141 self._bundletypes = {}
3142 self._bundletypes = {}
3142 # Revlog header to engine name.
3143 # Revlog header to engine name.
3143 self._revlogheaders = {}
3144 self._revlogheaders = {}
3144 # Wire proto identifier to engine name.
3145 # Wire proto identifier to engine name.
3145 self._wiretypes = {}
3146 self._wiretypes = {}
3146
3147
3147 def __getitem__(self, key):
3148 def __getitem__(self, key):
3148 return self._engines[key]
3149 return self._engines[key]
3149
3150
3150 def __contains__(self, key):
3151 def __contains__(self, key):
3151 return key in self._engines
3152 return key in self._engines
3152
3153
3153 def __iter__(self):
3154 def __iter__(self):
3154 return iter(self._engines.keys())
3155 return iter(self._engines.keys())
3155
3156
3156 def register(self, engine):
3157 def register(self, engine):
3157 """Register a compression engine with the manager.
3158 """Register a compression engine with the manager.
3158
3159
3159 The argument must be a ``compressionengine`` instance.
3160 The argument must be a ``compressionengine`` instance.
3160 """
3161 """
3161 if not isinstance(engine, compressionengine):
3162 if not isinstance(engine, compressionengine):
3162 raise ValueError(_('argument must be a compressionengine'))
3163 raise ValueError(_('argument must be a compressionengine'))
3163
3164
3164 name = engine.name()
3165 name = engine.name()
3165
3166
3166 if name in self._engines:
3167 if name in self._engines:
3167 raise error.Abort(_('compression engine %s already registered') %
3168 raise error.Abort(_('compression engine %s already registered') %
3168 name)
3169 name)
3169
3170
3170 bundleinfo = engine.bundletype()
3171 bundleinfo = engine.bundletype()
3171 if bundleinfo:
3172 if bundleinfo:
3172 bundlename, bundletype = bundleinfo
3173 bundlename, bundletype = bundleinfo
3173
3174
3174 if bundlename in self._bundlenames:
3175 if bundlename in self._bundlenames:
3175 raise error.Abort(_('bundle name %s already registered') %
3176 raise error.Abort(_('bundle name %s already registered') %
3176 bundlename)
3177 bundlename)
3177 if bundletype in self._bundletypes:
3178 if bundletype in self._bundletypes:
3178 raise error.Abort(_('bundle type %s already registered by %s') %
3179 raise error.Abort(_('bundle type %s already registered by %s') %
3179 (bundletype, self._bundletypes[bundletype]))
3180 (bundletype, self._bundletypes[bundletype]))
3180
3181
3181 # No external facing name declared.
3182 # No external facing name declared.
3182 if bundlename:
3183 if bundlename:
3183 self._bundlenames[bundlename] = name
3184 self._bundlenames[bundlename] = name
3184
3185
3185 self._bundletypes[bundletype] = name
3186 self._bundletypes[bundletype] = name
3186
3187
3187 wiresupport = engine.wireprotosupport()
3188 wiresupport = engine.wireprotosupport()
3188 if wiresupport:
3189 if wiresupport:
3189 wiretype = wiresupport.name
3190 wiretype = wiresupport.name
3190 if wiretype in self._wiretypes:
3191 if wiretype in self._wiretypes:
3191 raise error.Abort(_('wire protocol compression %s already '
3192 raise error.Abort(_('wire protocol compression %s already '
3192 'registered by %s') %
3193 'registered by %s') %
3193 (wiretype, self._wiretypes[wiretype]))
3194 (wiretype, self._wiretypes[wiretype]))
3194
3195
3195 self._wiretypes[wiretype] = name
3196 self._wiretypes[wiretype] = name
3196
3197
3197 revlogheader = engine.revlogheader()
3198 revlogheader = engine.revlogheader()
3198 if revlogheader and revlogheader in self._revlogheaders:
3199 if revlogheader and revlogheader in self._revlogheaders:
3199 raise error.Abort(_('revlog header %s already registered by %s') %
3200 raise error.Abort(_('revlog header %s already registered by %s') %
3200 (revlogheader, self._revlogheaders[revlogheader]))
3201 (revlogheader, self._revlogheaders[revlogheader]))
3201
3202
3202 if revlogheader:
3203 if revlogheader:
3203 self._revlogheaders[revlogheader] = name
3204 self._revlogheaders[revlogheader] = name
3204
3205
3205 self._engines[name] = engine
3206 self._engines[name] = engine
3206
3207
3207 @property
3208 @property
3208 def supportedbundlenames(self):
3209 def supportedbundlenames(self):
3209 return set(self._bundlenames.keys())
3210 return set(self._bundlenames.keys())
3210
3211
3211 @property
3212 @property
3212 def supportedbundletypes(self):
3213 def supportedbundletypes(self):
3213 return set(self._bundletypes.keys())
3214 return set(self._bundletypes.keys())
3214
3215
3215 def forbundlename(self, bundlename):
3216 def forbundlename(self, bundlename):
3216 """Obtain a compression engine registered to a bundle name.
3217 """Obtain a compression engine registered to a bundle name.
3217
3218
3218 Will raise KeyError if the bundle type isn't registered.
3219 Will raise KeyError if the bundle type isn't registered.
3219
3220
3220 Will abort if the engine is known but not available.
3221 Will abort if the engine is known but not available.
3221 """
3222 """
3222 engine = self._engines[self._bundlenames[bundlename]]
3223 engine = self._engines[self._bundlenames[bundlename]]
3223 if not engine.available():
3224 if not engine.available():
3224 raise error.Abort(_('compression engine %s could not be loaded') %
3225 raise error.Abort(_('compression engine %s could not be loaded') %
3225 engine.name())
3226 engine.name())
3226 return engine
3227 return engine
3227
3228
3228 def forbundletype(self, bundletype):
3229 def forbundletype(self, bundletype):
3229 """Obtain a compression engine registered to a bundle type.
3230 """Obtain a compression engine registered to a bundle type.
3230
3231
3231 Will raise KeyError if the bundle type isn't registered.
3232 Will raise KeyError if the bundle type isn't registered.
3232
3233
3233 Will abort if the engine is known but not available.
3234 Will abort if the engine is known but not available.
3234 """
3235 """
3235 engine = self._engines[self._bundletypes[bundletype]]
3236 engine = self._engines[self._bundletypes[bundletype]]
3236 if not engine.available():
3237 if not engine.available():
3237 raise error.Abort(_('compression engine %s could not be loaded') %
3238 raise error.Abort(_('compression engine %s could not be loaded') %
3238 engine.name())
3239 engine.name())
3239 return engine
3240 return engine
3240
3241
3241 def supportedwireengines(self, role, onlyavailable=True):
3242 def supportedwireengines(self, role, onlyavailable=True):
3242 """Obtain compression engines that support the wire protocol.
3243 """Obtain compression engines that support the wire protocol.
3243
3244
3244 Returns a list of engines in prioritized order, most desired first.
3245 Returns a list of engines in prioritized order, most desired first.
3245
3246
3246 If ``onlyavailable`` is set, filter out engines that can't be
3247 If ``onlyavailable`` is set, filter out engines that can't be
3247 loaded.
3248 loaded.
3248 """
3249 """
3249 assert role in (SERVERROLE, CLIENTROLE)
3250 assert role in (SERVERROLE, CLIENTROLE)
3250
3251
3251 attr = 'serverpriority' if role == SERVERROLE else 'clientpriority'
3252 attr = 'serverpriority' if role == SERVERROLE else 'clientpriority'
3252
3253
3253 engines = [self._engines[e] for e in self._wiretypes.values()]
3254 engines = [self._engines[e] for e in self._wiretypes.values()]
3254 if onlyavailable:
3255 if onlyavailable:
3255 engines = [e for e in engines if e.available()]
3256 engines = [e for e in engines if e.available()]
3256
3257
3257 def getkey(e):
3258 def getkey(e):
3258 # Sort first by priority, highest first. In case of tie, sort
3259 # Sort first by priority, highest first. In case of tie, sort
3259 # alphabetically. This is arbitrary, but ensures output is
3260 # alphabetically. This is arbitrary, but ensures output is
3260 # stable.
3261 # stable.
3261 w = e.wireprotosupport()
3262 w = e.wireprotosupport()
3262 return -1 * getattr(w, attr), w.name
3263 return -1 * getattr(w, attr), w.name
3263
3264
3264 return list(sorted(engines, key=getkey))
3265 return list(sorted(engines, key=getkey))
3265
3266
3266 def forwiretype(self, wiretype):
3267 def forwiretype(self, wiretype):
3267 engine = self._engines[self._wiretypes[wiretype]]
3268 engine = self._engines[self._wiretypes[wiretype]]
3268 if not engine.available():
3269 if not engine.available():
3269 raise error.Abort(_('compression engine %s could not be loaded') %
3270 raise error.Abort(_('compression engine %s could not be loaded') %
3270 engine.name())
3271 engine.name())
3271 return engine
3272 return engine
3272
3273
3273 def forrevlogheader(self, header):
3274 def forrevlogheader(self, header):
3274 """Obtain a compression engine registered to a revlog header.
3275 """Obtain a compression engine registered to a revlog header.
3275
3276
3276 Will raise KeyError if the revlog header value isn't registered.
3277 Will raise KeyError if the revlog header value isn't registered.
3277 """
3278 """
3278 return self._engines[self._revlogheaders[header]]
3279 return self._engines[self._revlogheaders[header]]
3279
3280
3280 compengines = compressormanager()
3281 compengines = compressormanager()
3281
3282
3282 class compressionengine(object):
3283 class compressionengine(object):
3283 """Base class for compression engines.
3284 """Base class for compression engines.
3284
3285
3285 Compression engines must implement the interface defined by this class.
3286 Compression engines must implement the interface defined by this class.
3286 """
3287 """
3287 def name(self):
3288 def name(self):
3288 """Returns the name of the compression engine.
3289 """Returns the name of the compression engine.
3289
3290
3290 This is the key the engine is registered under.
3291 This is the key the engine is registered under.
3291
3292
3292 This method must be implemented.
3293 This method must be implemented.
3293 """
3294 """
3294 raise NotImplementedError()
3295 raise NotImplementedError()
3295
3296
3296 def available(self):
3297 def available(self):
3297 """Whether the compression engine is available.
3298 """Whether the compression engine is available.
3298
3299
3299 The intent of this method is to allow optional compression engines
3300 The intent of this method is to allow optional compression engines
3300 that may not be available in all installations (such as engines relying
3301 that may not be available in all installations (such as engines relying
3301 on C extensions that may not be present).
3302 on C extensions that may not be present).
3302 """
3303 """
3303 return True
3304 return True
3304
3305
3305 def bundletype(self):
3306 def bundletype(self):
3306 """Describes bundle identifiers for this engine.
3307 """Describes bundle identifiers for this engine.
3307
3308
3308 If this compression engine isn't supported for bundles, returns None.
3309 If this compression engine isn't supported for bundles, returns None.
3309
3310
3310 If this engine can be used for bundles, returns a 2-tuple of strings of
3311 If this engine can be used for bundles, returns a 2-tuple of strings of
3311 the user-facing "bundle spec" compression name and an internal
3312 the user-facing "bundle spec" compression name and an internal
3312 identifier used to denote the compression format within bundles. To
3313 identifier used to denote the compression format within bundles. To
3313 exclude the name from external usage, set the first element to ``None``.
3314 exclude the name from external usage, set the first element to ``None``.
3314
3315
3315 If bundle compression is supported, the class must also implement
3316 If bundle compression is supported, the class must also implement
3316 ``compressstream`` and `decompressorreader``.
3317 ``compressstream`` and `decompressorreader``.
3317
3318
3318 The docstring of this method is used in the help system to tell users
3319 The docstring of this method is used in the help system to tell users
3319 about this engine.
3320 about this engine.
3320 """
3321 """
3321 return None
3322 return None
3322
3323
3323 def wireprotosupport(self):
3324 def wireprotosupport(self):
3324 """Declare support for this compression format on the wire protocol.
3325 """Declare support for this compression format on the wire protocol.
3325
3326
3326 If this compression engine isn't supported for compressing wire
3327 If this compression engine isn't supported for compressing wire
3327 protocol payloads, returns None.
3328 protocol payloads, returns None.
3328
3329
3329 Otherwise, returns ``compenginewireprotosupport`` with the following
3330 Otherwise, returns ``compenginewireprotosupport`` with the following
3330 fields:
3331 fields:
3331
3332
3332 * String format identifier
3333 * String format identifier
3333 * Integer priority for the server
3334 * Integer priority for the server
3334 * Integer priority for the client
3335 * Integer priority for the client
3335
3336
3336 The integer priorities are used to order the advertisement of format
3337 The integer priorities are used to order the advertisement of format
3337 support by server and client. The highest integer is advertised
3338 support by server and client. The highest integer is advertised
3338 first. Integers with non-positive values aren't advertised.
3339 first. Integers with non-positive values aren't advertised.
3339
3340
3340 The priority values are somewhat arbitrary and only used for default
3341 The priority values are somewhat arbitrary and only used for default
3341 ordering. The relative order can be changed via config options.
3342 ordering. The relative order can be changed via config options.
3342
3343
3343 If wire protocol compression is supported, the class must also implement
3344 If wire protocol compression is supported, the class must also implement
3344 ``compressstream`` and ``decompressorreader``.
3345 ``compressstream`` and ``decompressorreader``.
3345 """
3346 """
3346 return None
3347 return None
3347
3348
3348 def revlogheader(self):
3349 def revlogheader(self):
3349 """Header added to revlog chunks that identifies this engine.
3350 """Header added to revlog chunks that identifies this engine.
3350
3351
3351 If this engine can be used to compress revlogs, this method should
3352 If this engine can be used to compress revlogs, this method should
3352 return the bytes used to identify chunks compressed with this engine.
3353 return the bytes used to identify chunks compressed with this engine.
3353 Else, the method should return ``None`` to indicate it does not
3354 Else, the method should return ``None`` to indicate it does not
3354 participate in revlog compression.
3355 participate in revlog compression.
3355 """
3356 """
3356 return None
3357 return None
3357
3358
3358 def compressstream(self, it, opts=None):
3359 def compressstream(self, it, opts=None):
3359 """Compress an iterator of chunks.
3360 """Compress an iterator of chunks.
3360
3361
3361 The method receives an iterator (ideally a generator) of chunks of
3362 The method receives an iterator (ideally a generator) of chunks of
3362 bytes to be compressed. It returns an iterator (ideally a generator)
3363 bytes to be compressed. It returns an iterator (ideally a generator)
3363 of bytes of chunks representing the compressed output.
3364 of bytes of chunks representing the compressed output.
3364
3365
3365 Optionally accepts an argument defining how to perform compression.
3366 Optionally accepts an argument defining how to perform compression.
3366 Each engine treats this argument differently.
3367 Each engine treats this argument differently.
3367 """
3368 """
3368 raise NotImplementedError()
3369 raise NotImplementedError()
3369
3370
3370 def decompressorreader(self, fh):
3371 def decompressorreader(self, fh):
3371 """Perform decompression on a file object.
3372 """Perform decompression on a file object.
3372
3373
3373 Argument is an object with a ``read(size)`` method that returns
3374 Argument is an object with a ``read(size)`` method that returns
3374 compressed data. Return value is an object with a ``read(size)`` that
3375 compressed data. Return value is an object with a ``read(size)`` that
3375 returns uncompressed data.
3376 returns uncompressed data.
3376 """
3377 """
3377 raise NotImplementedError()
3378 raise NotImplementedError()
3378
3379
3379 def revlogcompressor(self, opts=None):
3380 def revlogcompressor(self, opts=None):
3380 """Obtain an object that can be used to compress revlog entries.
3381 """Obtain an object that can be used to compress revlog entries.
3381
3382
3382 The object has a ``compress(data)`` method that compresses binary
3383 The object has a ``compress(data)`` method that compresses binary
3383 data. This method returns compressed binary data or ``None`` if
3384 data. This method returns compressed binary data or ``None`` if
3384 the data could not be compressed (too small, not compressible, etc).
3385 the data could not be compressed (too small, not compressible, etc).
3385 The returned data should have a header uniquely identifying this
3386 The returned data should have a header uniquely identifying this
3386 compression format so decompression can be routed to this engine.
3387 compression format so decompression can be routed to this engine.
3387 This header should be identified by the ``revlogheader()`` return
3388 This header should be identified by the ``revlogheader()`` return
3388 value.
3389 value.
3389
3390
3390 The object has a ``decompress(data)`` method that decompresses
3391 The object has a ``decompress(data)`` method that decompresses
3391 data. The method will only be called if ``data`` begins with
3392 data. The method will only be called if ``data`` begins with
3392 ``revlogheader()``. The method should return the raw, uncompressed
3393 ``revlogheader()``. The method should return the raw, uncompressed
3393 data or raise a ``RevlogError``.
3394 data or raise a ``RevlogError``.
3394
3395
3395 The object is reusable but is not thread safe.
3396 The object is reusable but is not thread safe.
3396 """
3397 """
3397 raise NotImplementedError()
3398 raise NotImplementedError()
3398
3399
3399 class _zlibengine(compressionengine):
3400 class _zlibengine(compressionengine):
3400 def name(self):
3401 def name(self):
3401 return 'zlib'
3402 return 'zlib'
3402
3403
3403 def bundletype(self):
3404 def bundletype(self):
3404 """zlib compression using the DEFLATE algorithm.
3405 """zlib compression using the DEFLATE algorithm.
3405
3406
3406 All Mercurial clients should support this format. The compression
3407 All Mercurial clients should support this format. The compression
3407 algorithm strikes a reasonable balance between compression ratio
3408 algorithm strikes a reasonable balance between compression ratio
3408 and size.
3409 and size.
3409 """
3410 """
3410 return 'gzip', 'GZ'
3411 return 'gzip', 'GZ'
3411
3412
3412 def wireprotosupport(self):
3413 def wireprotosupport(self):
3413 return compewireprotosupport('zlib', 20, 20)
3414 return compewireprotosupport('zlib', 20, 20)
3414
3415
3415 def revlogheader(self):
3416 def revlogheader(self):
3416 return 'x'
3417 return 'x'
3417
3418
3418 def compressstream(self, it, opts=None):
3419 def compressstream(self, it, opts=None):
3419 opts = opts or {}
3420 opts = opts or {}
3420
3421
3421 z = zlib.compressobj(opts.get('level', -1))
3422 z = zlib.compressobj(opts.get('level', -1))
3422 for chunk in it:
3423 for chunk in it:
3423 data = z.compress(chunk)
3424 data = z.compress(chunk)
3424 # Not all calls to compress emit data. It is cheaper to inspect
3425 # Not all calls to compress emit data. It is cheaper to inspect
3425 # here than to feed empty chunks through generator.
3426 # here than to feed empty chunks through generator.
3426 if data:
3427 if data:
3427 yield data
3428 yield data
3428
3429
3429 yield z.flush()
3430 yield z.flush()
3430
3431
3431 def decompressorreader(self, fh):
3432 def decompressorreader(self, fh):
3432 def gen():
3433 def gen():
3433 d = zlib.decompressobj()
3434 d = zlib.decompressobj()
3434 for chunk in filechunkiter(fh):
3435 for chunk in filechunkiter(fh):
3435 while chunk:
3436 while chunk:
3436 # Limit output size to limit memory.
3437 # Limit output size to limit memory.
3437 yield d.decompress(chunk, 2 ** 18)
3438 yield d.decompress(chunk, 2 ** 18)
3438 chunk = d.unconsumed_tail
3439 chunk = d.unconsumed_tail
3439
3440
3440 return chunkbuffer(gen())
3441 return chunkbuffer(gen())
3441
3442
3442 class zlibrevlogcompressor(object):
3443 class zlibrevlogcompressor(object):
3443 def compress(self, data):
3444 def compress(self, data):
3444 insize = len(data)
3445 insize = len(data)
3445 # Caller handles empty input case.
3446 # Caller handles empty input case.
3446 assert insize > 0
3447 assert insize > 0
3447
3448
3448 if insize < 44:
3449 if insize < 44:
3449 return None
3450 return None
3450
3451
3451 elif insize <= 1000000:
3452 elif insize <= 1000000:
3452 compressed = zlib.compress(data)
3453 compressed = zlib.compress(data)
3453 if len(compressed) < insize:
3454 if len(compressed) < insize:
3454 return compressed
3455 return compressed
3455 return None
3456 return None
3456
3457
3457 # zlib makes an internal copy of the input buffer, doubling
3458 # zlib makes an internal copy of the input buffer, doubling
3458 # memory usage for large inputs. So do streaming compression
3459 # memory usage for large inputs. So do streaming compression
3459 # on large inputs.
3460 # on large inputs.
3460 else:
3461 else:
3461 z = zlib.compressobj()
3462 z = zlib.compressobj()
3462 parts = []
3463 parts = []
3463 pos = 0
3464 pos = 0
3464 while pos < insize:
3465 while pos < insize:
3465 pos2 = pos + 2**20
3466 pos2 = pos + 2**20
3466 parts.append(z.compress(data[pos:pos2]))
3467 parts.append(z.compress(data[pos:pos2]))
3467 pos = pos2
3468 pos = pos2
3468 parts.append(z.flush())
3469 parts.append(z.flush())
3469
3470
3470 if sum(map(len, parts)) < insize:
3471 if sum(map(len, parts)) < insize:
3471 return ''.join(parts)
3472 return ''.join(parts)
3472 return None
3473 return None
3473
3474
3474 def decompress(self, data):
3475 def decompress(self, data):
3475 try:
3476 try:
3476 return zlib.decompress(data)
3477 return zlib.decompress(data)
3477 except zlib.error as e:
3478 except zlib.error as e:
3478 raise error.RevlogError(_('revlog decompress error: %s') %
3479 raise error.RevlogError(_('revlog decompress error: %s') %
3479 str(e))
3480 str(e))
3480
3481
3481 def revlogcompressor(self, opts=None):
3482 def revlogcompressor(self, opts=None):
3482 return self.zlibrevlogcompressor()
3483 return self.zlibrevlogcompressor()
3483
3484
3484 compengines.register(_zlibengine())
3485 compengines.register(_zlibengine())
3485
3486
3486 class _bz2engine(compressionengine):
3487 class _bz2engine(compressionengine):
3487 def name(self):
3488 def name(self):
3488 return 'bz2'
3489 return 'bz2'
3489
3490
3490 def bundletype(self):
3491 def bundletype(self):
3491 """An algorithm that produces smaller bundles than ``gzip``.
3492 """An algorithm that produces smaller bundles than ``gzip``.
3492
3493
3493 All Mercurial clients should support this format.
3494 All Mercurial clients should support this format.
3494
3495
3495 This engine will likely produce smaller bundles than ``gzip`` but
3496 This engine will likely produce smaller bundles than ``gzip`` but
3496 will be significantly slower, both during compression and
3497 will be significantly slower, both during compression and
3497 decompression.
3498 decompression.
3498
3499
3499 If available, the ``zstd`` engine can yield similar or better
3500 If available, the ``zstd`` engine can yield similar or better
3500 compression at much higher speeds.
3501 compression at much higher speeds.
3501 """
3502 """
3502 return 'bzip2', 'BZ'
3503 return 'bzip2', 'BZ'
3503
3504
3504 # We declare a protocol name but don't advertise by default because
3505 # We declare a protocol name but don't advertise by default because
3505 # it is slow.
3506 # it is slow.
3506 def wireprotosupport(self):
3507 def wireprotosupport(self):
3507 return compewireprotosupport('bzip2', 0, 0)
3508 return compewireprotosupport('bzip2', 0, 0)
3508
3509
3509 def compressstream(self, it, opts=None):
3510 def compressstream(self, it, opts=None):
3510 opts = opts or {}
3511 opts = opts or {}
3511 z = bz2.BZ2Compressor(opts.get('level', 9))
3512 z = bz2.BZ2Compressor(opts.get('level', 9))
3512 for chunk in it:
3513 for chunk in it:
3513 data = z.compress(chunk)
3514 data = z.compress(chunk)
3514 if data:
3515 if data:
3515 yield data
3516 yield data
3516
3517
3517 yield z.flush()
3518 yield z.flush()
3518
3519
3519 def decompressorreader(self, fh):
3520 def decompressorreader(self, fh):
3520 def gen():
3521 def gen():
3521 d = bz2.BZ2Decompressor()
3522 d = bz2.BZ2Decompressor()
3522 for chunk in filechunkiter(fh):
3523 for chunk in filechunkiter(fh):
3523 yield d.decompress(chunk)
3524 yield d.decompress(chunk)
3524
3525
3525 return chunkbuffer(gen())
3526 return chunkbuffer(gen())
3526
3527
3527 compengines.register(_bz2engine())
3528 compengines.register(_bz2engine())
3528
3529
3529 class _truncatedbz2engine(compressionengine):
3530 class _truncatedbz2engine(compressionengine):
3530 def name(self):
3531 def name(self):
3531 return 'bz2truncated'
3532 return 'bz2truncated'
3532
3533
3533 def bundletype(self):
3534 def bundletype(self):
3534 return None, '_truncatedBZ'
3535 return None, '_truncatedBZ'
3535
3536
3536 # We don't implement compressstream because it is hackily handled elsewhere.
3537 # We don't implement compressstream because it is hackily handled elsewhere.
3537
3538
3538 def decompressorreader(self, fh):
3539 def decompressorreader(self, fh):
3539 def gen():
3540 def gen():
3540 # The input stream doesn't have the 'BZ' header. So add it back.
3541 # The input stream doesn't have the 'BZ' header. So add it back.
3541 d = bz2.BZ2Decompressor()
3542 d = bz2.BZ2Decompressor()
3542 d.decompress('BZ')
3543 d.decompress('BZ')
3543 for chunk in filechunkiter(fh):
3544 for chunk in filechunkiter(fh):
3544 yield d.decompress(chunk)
3545 yield d.decompress(chunk)
3545
3546
3546 return chunkbuffer(gen())
3547 return chunkbuffer(gen())
3547
3548
3548 compengines.register(_truncatedbz2engine())
3549 compengines.register(_truncatedbz2engine())
3549
3550
3550 class _noopengine(compressionengine):
3551 class _noopengine(compressionengine):
3551 def name(self):
3552 def name(self):
3552 return 'none'
3553 return 'none'
3553
3554
3554 def bundletype(self):
3555 def bundletype(self):
3555 """No compression is performed.
3556 """No compression is performed.
3556
3557
3557 Use this compression engine to explicitly disable compression.
3558 Use this compression engine to explicitly disable compression.
3558 """
3559 """
3559 return 'none', 'UN'
3560 return 'none', 'UN'
3560
3561
3561 # Clients always support uncompressed payloads. Servers don't because
3562 # Clients always support uncompressed payloads. Servers don't because
3562 # unless you are on a fast network, uncompressed payloads can easily
3563 # unless you are on a fast network, uncompressed payloads can easily
3563 # saturate your network pipe.
3564 # saturate your network pipe.
3564 def wireprotosupport(self):
3565 def wireprotosupport(self):
3565 return compewireprotosupport('none', 0, 10)
3566 return compewireprotosupport('none', 0, 10)
3566
3567
3567 # We don't implement revlogheader because it is handled specially
3568 # We don't implement revlogheader because it is handled specially
3568 # in the revlog class.
3569 # in the revlog class.
3569
3570
3570 def compressstream(self, it, opts=None):
3571 def compressstream(self, it, opts=None):
3571 return it
3572 return it
3572
3573
3573 def decompressorreader(self, fh):
3574 def decompressorreader(self, fh):
3574 return fh
3575 return fh
3575
3576
3576 class nooprevlogcompressor(object):
3577 class nooprevlogcompressor(object):
3577 def compress(self, data):
3578 def compress(self, data):
3578 return None
3579 return None
3579
3580
3580 def revlogcompressor(self, opts=None):
3581 def revlogcompressor(self, opts=None):
3581 return self.nooprevlogcompressor()
3582 return self.nooprevlogcompressor()
3582
3583
3583 compengines.register(_noopengine())
3584 compengines.register(_noopengine())
3584
3585
3585 class _zstdengine(compressionengine):
3586 class _zstdengine(compressionengine):
3586 def name(self):
3587 def name(self):
3587 return 'zstd'
3588 return 'zstd'
3588
3589
3589 @propertycache
3590 @propertycache
3590 def _module(self):
3591 def _module(self):
3591 # Not all installs have the zstd module available. So defer importing
3592 # Not all installs have the zstd module available. So defer importing
3592 # until first access.
3593 # until first access.
3593 try:
3594 try:
3594 from . import zstd
3595 from . import zstd
3595 # Force delayed import.
3596 # Force delayed import.
3596 zstd.__version__
3597 zstd.__version__
3597 return zstd
3598 return zstd
3598 except ImportError:
3599 except ImportError:
3599 return None
3600 return None
3600
3601
3601 def available(self):
3602 def available(self):
3602 return bool(self._module)
3603 return bool(self._module)
3603
3604
3604 def bundletype(self):
3605 def bundletype(self):
3605 """A modern compression algorithm that is fast and highly flexible.
3606 """A modern compression algorithm that is fast and highly flexible.
3606
3607
3607 Only supported by Mercurial 4.1 and newer clients.
3608 Only supported by Mercurial 4.1 and newer clients.
3608
3609
3609 With the default settings, zstd compression is both faster and yields
3610 With the default settings, zstd compression is both faster and yields
3610 better compression than ``gzip``. It also frequently yields better
3611 better compression than ``gzip``. It also frequently yields better
3611 compression than ``bzip2`` while operating at much higher speeds.
3612 compression than ``bzip2`` while operating at much higher speeds.
3612
3613
3613 If this engine is available and backwards compatibility is not a
3614 If this engine is available and backwards compatibility is not a
3614 concern, it is likely the best available engine.
3615 concern, it is likely the best available engine.
3615 """
3616 """
3616 return 'zstd', 'ZS'
3617 return 'zstd', 'ZS'
3617
3618
3618 def wireprotosupport(self):
3619 def wireprotosupport(self):
3619 return compewireprotosupport('zstd', 50, 50)
3620 return compewireprotosupport('zstd', 50, 50)
3620
3621
3621 def revlogheader(self):
3622 def revlogheader(self):
3622 return '\x28'
3623 return '\x28'
3623
3624
3624 def compressstream(self, it, opts=None):
3625 def compressstream(self, it, opts=None):
3625 opts = opts or {}
3626 opts = opts or {}
3626 # zstd level 3 is almost always significantly faster than zlib
3627 # zstd level 3 is almost always significantly faster than zlib
3627 # while providing no worse compression. It strikes a good balance
3628 # while providing no worse compression. It strikes a good balance
3628 # between speed and compression.
3629 # between speed and compression.
3629 level = opts.get('level', 3)
3630 level = opts.get('level', 3)
3630
3631
3631 zstd = self._module
3632 zstd = self._module
3632 z = zstd.ZstdCompressor(level=level).compressobj()
3633 z = zstd.ZstdCompressor(level=level).compressobj()
3633 for chunk in it:
3634 for chunk in it:
3634 data = z.compress(chunk)
3635 data = z.compress(chunk)
3635 if data:
3636 if data:
3636 yield data
3637 yield data
3637
3638
3638 yield z.flush()
3639 yield z.flush()
3639
3640
3640 def decompressorreader(self, fh):
3641 def decompressorreader(self, fh):
3641 zstd = self._module
3642 zstd = self._module
3642 dctx = zstd.ZstdDecompressor()
3643 dctx = zstd.ZstdDecompressor()
3643 return chunkbuffer(dctx.read_from(fh))
3644 return chunkbuffer(dctx.read_from(fh))
3644
3645
3645 class zstdrevlogcompressor(object):
3646 class zstdrevlogcompressor(object):
3646 def __init__(self, zstd, level=3):
3647 def __init__(self, zstd, level=3):
3647 # Writing the content size adds a few bytes to the output. However,
3648 # Writing the content size adds a few bytes to the output. However,
3648 # it allows decompression to be more optimal since we can
3649 # it allows decompression to be more optimal since we can
3649 # pre-allocate a buffer to hold the result.
3650 # pre-allocate a buffer to hold the result.
3650 self._cctx = zstd.ZstdCompressor(level=level,
3651 self._cctx = zstd.ZstdCompressor(level=level,
3651 write_content_size=True)
3652 write_content_size=True)
3652 self._dctx = zstd.ZstdDecompressor()
3653 self._dctx = zstd.ZstdDecompressor()
3653 self._compinsize = zstd.COMPRESSION_RECOMMENDED_INPUT_SIZE
3654 self._compinsize = zstd.COMPRESSION_RECOMMENDED_INPUT_SIZE
3654 self._decompinsize = zstd.DECOMPRESSION_RECOMMENDED_INPUT_SIZE
3655 self._decompinsize = zstd.DECOMPRESSION_RECOMMENDED_INPUT_SIZE
3655
3656
3656 def compress(self, data):
3657 def compress(self, data):
3657 insize = len(data)
3658 insize = len(data)
3658 # Caller handles empty input case.
3659 # Caller handles empty input case.
3659 assert insize > 0
3660 assert insize > 0
3660
3661
3661 if insize < 50:
3662 if insize < 50:
3662 return None
3663 return None
3663
3664
3664 elif insize <= 1000000:
3665 elif insize <= 1000000:
3665 compressed = self._cctx.compress(data)
3666 compressed = self._cctx.compress(data)
3666 if len(compressed) < insize:
3667 if len(compressed) < insize:
3667 return compressed
3668 return compressed
3668 return None
3669 return None
3669 else:
3670 else:
3670 z = self._cctx.compressobj()
3671 z = self._cctx.compressobj()
3671 chunks = []
3672 chunks = []
3672 pos = 0
3673 pos = 0
3673 while pos < insize:
3674 while pos < insize:
3674 pos2 = pos + self._compinsize
3675 pos2 = pos + self._compinsize
3675 chunk = z.compress(data[pos:pos2])
3676 chunk = z.compress(data[pos:pos2])
3676 if chunk:
3677 if chunk:
3677 chunks.append(chunk)
3678 chunks.append(chunk)
3678 pos = pos2
3679 pos = pos2
3679 chunks.append(z.flush())
3680 chunks.append(z.flush())
3680
3681
3681 if sum(map(len, chunks)) < insize:
3682 if sum(map(len, chunks)) < insize:
3682 return ''.join(chunks)
3683 return ''.join(chunks)
3683 return None
3684 return None
3684
3685
3685 def decompress(self, data):
3686 def decompress(self, data):
3686 insize = len(data)
3687 insize = len(data)
3687
3688
3688 try:
3689 try:
3689 # This was measured to be faster than other streaming
3690 # This was measured to be faster than other streaming
3690 # decompressors.
3691 # decompressors.
3691 dobj = self._dctx.decompressobj()
3692 dobj = self._dctx.decompressobj()
3692 chunks = []
3693 chunks = []
3693 pos = 0
3694 pos = 0
3694 while pos < insize:
3695 while pos < insize:
3695 pos2 = pos + self._decompinsize
3696 pos2 = pos + self._decompinsize
3696 chunk = dobj.decompress(data[pos:pos2])
3697 chunk = dobj.decompress(data[pos:pos2])
3697 if chunk:
3698 if chunk:
3698 chunks.append(chunk)
3699 chunks.append(chunk)
3699 pos = pos2
3700 pos = pos2
3700 # Frame should be exhausted, so no finish() API.
3701 # Frame should be exhausted, so no finish() API.
3701
3702
3702 return ''.join(chunks)
3703 return ''.join(chunks)
3703 except Exception as e:
3704 except Exception as e:
3704 raise error.RevlogError(_('revlog decompress error: %s') %
3705 raise error.RevlogError(_('revlog decompress error: %s') %
3705 str(e))
3706 str(e))
3706
3707
3707 def revlogcompressor(self, opts=None):
3708 def revlogcompressor(self, opts=None):
3708 opts = opts or {}
3709 opts = opts or {}
3709 return self.zstdrevlogcompressor(self._module,
3710 return self.zstdrevlogcompressor(self._module,
3710 level=opts.get('level', 3))
3711 level=opts.get('level', 3))
3711
3712
3712 compengines.register(_zstdengine())
3713 compengines.register(_zstdengine())
3713
3714
3714 def bundlecompressiontopics():
3715 def bundlecompressiontopics():
3715 """Obtains a list of available bundle compressions for use in help."""
3716 """Obtains a list of available bundle compressions for use in help."""
3716 # help.makeitemsdocs() expects a dict of names to items with a .__doc__.
3717 # help.makeitemsdocs() expects a dict of names to items with a .__doc__.
3717 items = {}
3718 items = {}
3718
3719
3719 # We need to format the docstring. So use a dummy object/type to hold it
3720 # We need to format the docstring. So use a dummy object/type to hold it
3720 # rather than mutating the original.
3721 # rather than mutating the original.
3721 class docobject(object):
3722 class docobject(object):
3722 pass
3723 pass
3723
3724
3724 for name in compengines:
3725 for name in compengines:
3725 engine = compengines[name]
3726 engine = compengines[name]
3726
3727
3727 if not engine.available():
3728 if not engine.available():
3728 continue
3729 continue
3729
3730
3730 bt = engine.bundletype()
3731 bt = engine.bundletype()
3731 if not bt or not bt[0]:
3732 if not bt or not bt[0]:
3732 continue
3733 continue
3733
3734
3734 doc = pycompat.sysstr('``%s``\n %s') % (
3735 doc = pycompat.sysstr('``%s``\n %s') % (
3735 bt[0], engine.bundletype.__doc__)
3736 bt[0], engine.bundletype.__doc__)
3736
3737
3737 value = docobject()
3738 value = docobject()
3738 value.__doc__ = doc
3739 value.__doc__ = doc
3739
3740
3740 items[bt[0]] = value
3741 items[bt[0]] = value
3741
3742
3742 return items
3743 return items
3743
3744
3744 # convenient shortcut
3745 # convenient shortcut
3745 dst = debugstacktrace
3746 dst = debugstacktrace
General Comments 0
You need to be logged in to leave comments. Login now