##// END OF EJS Templates
py3: stop implicitly importing unicode...
Gregory Szorc -
r43356:bbcbb82e default
parent child Browse files
Show More
@@ -1,298 +1,297 b''
1 # __init__.py - Startup and module loading logic for Mercurial.
1 # __init__.py - Startup and module loading logic for Mercurial.
2 #
2 #
3 # Copyright 2015 Gregory Szorc <gregory.szorc@gmail.com>
3 # Copyright 2015 Gregory Szorc <gregory.szorc@gmail.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import sys
10 import sys
11
11
12 # Allow 'from mercurial import demandimport' to keep working.
12 # Allow 'from mercurial import demandimport' to keep working.
13 import hgdemandimport
13 import hgdemandimport
14
14
15 demandimport = hgdemandimport
15 demandimport = hgdemandimport
16
16
17 __all__ = []
17 __all__ = []
18
18
19 # Python 3 uses a custom module loader that transforms source code between
19 # Python 3 uses a custom module loader that transforms source code between
20 # source file reading and compilation. This is done by registering a custom
20 # source file reading and compilation. This is done by registering a custom
21 # finder that changes the spec for Mercurial modules to use a custom loader.
21 # finder that changes the spec for Mercurial modules to use a custom loader.
22 if sys.version_info[0] >= 3:
22 if sys.version_info[0] >= 3:
23 import importlib
23 import importlib
24 import importlib.abc
24 import importlib.abc
25 import io
25 import io
26 import token
26 import token
27 import tokenize
27 import tokenize
28
28
29 class hgpathentryfinder(importlib.abc.MetaPathFinder):
29 class hgpathentryfinder(importlib.abc.MetaPathFinder):
30 """A sys.meta_path finder that uses a custom module loader."""
30 """A sys.meta_path finder that uses a custom module loader."""
31
31
32 def find_spec(self, fullname, path, target=None):
32 def find_spec(self, fullname, path, target=None):
33 # Only handle Mercurial-related modules.
33 # Only handle Mercurial-related modules.
34 if not fullname.startswith(('mercurial.', 'hgext.')):
34 if not fullname.startswith(('mercurial.', 'hgext.')):
35 return None
35 return None
36 # don't try to parse binary
36 # don't try to parse binary
37 if fullname.startswith('mercurial.cext.'):
37 if fullname.startswith('mercurial.cext.'):
38 return None
38 return None
39 # third-party packages are expected to be dual-version clean
39 # third-party packages are expected to be dual-version clean
40 if fullname.startswith('mercurial.thirdparty'):
40 if fullname.startswith('mercurial.thirdparty'):
41 return None
41 return None
42 # zstd is already dual-version clean, don't try and mangle it
42 # zstd is already dual-version clean, don't try and mangle it
43 if fullname.startswith('mercurial.zstd'):
43 if fullname.startswith('mercurial.zstd'):
44 return None
44 return None
45 # rustext is built for the right python version,
45 # rustext is built for the right python version,
46 # don't try and mangle it
46 # don't try and mangle it
47 if fullname.startswith('mercurial.rustext'):
47 if fullname.startswith('mercurial.rustext'):
48 return None
48 return None
49 # pywatchman is already dual-version clean, don't try and mangle it
49 # pywatchman is already dual-version clean, don't try and mangle it
50 if fullname.startswith('hgext.fsmonitor.pywatchman'):
50 if fullname.startswith('hgext.fsmonitor.pywatchman'):
51 return None
51 return None
52
52
53 # Try to find the module using other registered finders.
53 # Try to find the module using other registered finders.
54 spec = None
54 spec = None
55 for finder in sys.meta_path:
55 for finder in sys.meta_path:
56 if finder == self:
56 if finder == self:
57 continue
57 continue
58
58
59 # Originally the API was a `find_module` method, but it was
59 # Originally the API was a `find_module` method, but it was
60 # renamed to `find_spec` in python 3.4, with a new `target`
60 # renamed to `find_spec` in python 3.4, with a new `target`
61 # argument.
61 # argument.
62 find_spec_method = getattr(finder, 'find_spec', None)
62 find_spec_method = getattr(finder, 'find_spec', None)
63 if find_spec_method:
63 if find_spec_method:
64 spec = find_spec_method(fullname, path, target=target)
64 spec = find_spec_method(fullname, path, target=target)
65 else:
65 else:
66 spec = finder.find_module(fullname)
66 spec = finder.find_module(fullname)
67 if spec is not None:
67 if spec is not None:
68 spec = importlib.util.spec_from_loader(fullname, spec)
68 spec = importlib.util.spec_from_loader(fullname, spec)
69 if spec:
69 if spec:
70 break
70 break
71
71
72 # This is a Mercurial-related module but we couldn't find it
72 # This is a Mercurial-related module but we couldn't find it
73 # using the previously-registered finders. This likely means
73 # using the previously-registered finders. This likely means
74 # the module doesn't exist.
74 # the module doesn't exist.
75 if not spec:
75 if not spec:
76 return None
76 return None
77
77
78 # TODO need to support loaders from alternate specs, like zip
78 # TODO need to support loaders from alternate specs, like zip
79 # loaders.
79 # loaders.
80 loader = hgloader(spec.name, spec.origin)
80 loader = hgloader(spec.name, spec.origin)
81 # Can't use util.safehasattr here because that would require
81 # Can't use util.safehasattr here because that would require
82 # importing util, and we're in import code.
82 # importing util, and we're in import code.
83 if hasattr(spec.loader, 'loader'): # hasattr-py3-only
83 if hasattr(spec.loader, 'loader'): # hasattr-py3-only
84 # This is a nested loader (maybe a lazy loader?)
84 # This is a nested loader (maybe a lazy loader?)
85 spec.loader.loader = loader
85 spec.loader.loader = loader
86 else:
86 else:
87 spec.loader = loader
87 spec.loader = loader
88 return spec
88 return spec
89
89
90 def replacetokens(tokens, fullname):
90 def replacetokens(tokens, fullname):
91 """Transform a stream of tokens from raw to Python 3.
91 """Transform a stream of tokens from raw to Python 3.
92
92
93 It is called by the custom module loading machinery to rewrite
93 It is called by the custom module loading machinery to rewrite
94 source/tokens between source decoding and compilation.
94 source/tokens between source decoding and compilation.
95
95
96 Returns a generator of possibly rewritten tokens.
96 Returns a generator of possibly rewritten tokens.
97
97
98 The input token list may be mutated as part of processing. However,
98 The input token list may be mutated as part of processing. However,
99 its changes do not necessarily match the output token stream.
99 its changes do not necessarily match the output token stream.
100
100
101 REMEMBER TO CHANGE ``BYTECODEHEADER`` WHEN CHANGING THIS FUNCTION
101 REMEMBER TO CHANGE ``BYTECODEHEADER`` WHEN CHANGING THIS FUNCTION
102 OR CACHED FILES WON'T GET INVALIDATED PROPERLY.
102 OR CACHED FILES WON'T GET INVALIDATED PROPERLY.
103 """
103 """
104 futureimpline = False
104 futureimpline = False
105
105
106 # The following utility functions access the tokens list and i index of
106 # The following utility functions access the tokens list and i index of
107 # the for i, t enumerate(tokens) loop below
107 # the for i, t enumerate(tokens) loop below
108 def _isop(j, *o):
108 def _isop(j, *o):
109 """Assert that tokens[j] is an OP with one of the given values"""
109 """Assert that tokens[j] is an OP with one of the given values"""
110 try:
110 try:
111 return tokens[j].type == token.OP and tokens[j].string in o
111 return tokens[j].type == token.OP and tokens[j].string in o
112 except IndexError:
112 except IndexError:
113 return False
113 return False
114
114
115 def _findargnofcall(n):
115 def _findargnofcall(n):
116 """Find arg n of a call expression (start at 0)
116 """Find arg n of a call expression (start at 0)
117
117
118 Returns index of the first token of that argument, or None if
118 Returns index of the first token of that argument, or None if
119 there is not that many arguments.
119 there is not that many arguments.
120
120
121 Assumes that token[i + 1] is '('.
121 Assumes that token[i + 1] is '('.
122
122
123 """
123 """
124 nested = 0
124 nested = 0
125 for j in range(i + 2, len(tokens)):
125 for j in range(i + 2, len(tokens)):
126 if _isop(j, ')', ']', '}'):
126 if _isop(j, ')', ']', '}'):
127 # end of call, tuple, subscription or dict / set
127 # end of call, tuple, subscription or dict / set
128 nested -= 1
128 nested -= 1
129 if nested < 0:
129 if nested < 0:
130 return None
130 return None
131 elif n == 0:
131 elif n == 0:
132 # this is the starting position of arg
132 # this is the starting position of arg
133 return j
133 return j
134 elif _isop(j, '(', '[', '{'):
134 elif _isop(j, '(', '[', '{'):
135 nested += 1
135 nested += 1
136 elif _isop(j, ',') and nested == 0:
136 elif _isop(j, ',') and nested == 0:
137 n -= 1
137 n -= 1
138
138
139 return None
139 return None
140
140
141 def _ensureunicode(j):
141 def _ensureunicode(j):
142 """Make sure the token at j is a unicode string
142 """Make sure the token at j is a unicode string
143
143
144 This rewrites a string token to include the unicode literal prefix
144 This rewrites a string token to include the unicode literal prefix
145 so the string transformer won't add the byte prefix.
145 so the string transformer won't add the byte prefix.
146
146
147 Ignores tokens that are not strings. Assumes bounds checking has
147 Ignores tokens that are not strings. Assumes bounds checking has
148 already been done.
148 already been done.
149
149
150 """
150 """
151 st = tokens[j]
151 st = tokens[j]
152 if st.type == token.STRING and st.string.startswith(("'", '"')):
152 if st.type == token.STRING and st.string.startswith(("'", '"')):
153 tokens[j] = st._replace(string='u%s' % st.string)
153 tokens[j] = st._replace(string='u%s' % st.string)
154
154
155 for i, t in enumerate(tokens):
155 for i, t in enumerate(tokens):
156 # Insert compatibility imports at "from __future__ import" line.
156 # Insert compatibility imports at "from __future__ import" line.
157 # No '\n' should be added to preserve line numbers.
157 # No '\n' should be added to preserve line numbers.
158 if (
158 if (
159 t.type == token.NAME
159 t.type == token.NAME
160 and t.string == 'import'
160 and t.string == 'import'
161 and all(u.type == token.NAME for u in tokens[i - 2 : i])
161 and all(u.type == token.NAME for u in tokens[i - 2 : i])
162 and [u.string for u in tokens[i - 2 : i]]
162 and [u.string for u in tokens[i - 2 : i]]
163 == ['from', '__future__']
163 == ['from', '__future__']
164 ):
164 ):
165 futureimpline = True
165 futureimpline = True
166 if t.type == token.NEWLINE and futureimpline:
166 if t.type == token.NEWLINE and futureimpline:
167 futureimpline = False
167 futureimpline = False
168 if fullname == 'mercurial.pycompat':
168 if fullname == 'mercurial.pycompat':
169 yield t
169 yield t
170 continue
170 continue
171 r, c = t.start
171 r, c = t.start
172 l = (
172 l = (
173 b'; from mercurial.pycompat import '
173 b'; from mercurial.pycompat import '
174 b'delattr, getattr, hasattr, setattr, '
174 b'delattr, getattr, hasattr, setattr\n'
175 b'unicode\n'
176 )
175 )
177 for u in tokenize.tokenize(io.BytesIO(l).readline):
176 for u in tokenize.tokenize(io.BytesIO(l).readline):
178 if u.type in (tokenize.ENCODING, token.ENDMARKER):
177 if u.type in (tokenize.ENCODING, token.ENDMARKER):
179 continue
178 continue
180 yield u._replace(
179 yield u._replace(
181 start=(r, c + u.start[1]), end=(r, c + u.end[1])
180 start=(r, c + u.start[1]), end=(r, c + u.end[1])
182 )
181 )
183 continue
182 continue
184
183
185 # This looks like a function call.
184 # This looks like a function call.
186 if t.type == token.NAME and _isop(i + 1, '('):
185 if t.type == token.NAME and _isop(i + 1, '('):
187 fn = t.string
186 fn = t.string
188
187
189 # *attr() builtins don't accept byte strings to 2nd argument.
188 # *attr() builtins don't accept byte strings to 2nd argument.
190 if fn in (
189 if fn in (
191 'getattr',
190 'getattr',
192 'setattr',
191 'setattr',
193 'hasattr',
192 'hasattr',
194 'safehasattr',
193 'safehasattr',
195 ) and not _isop(i - 1, '.'):
194 ) and not _isop(i - 1, '.'):
196 arg1idx = _findargnofcall(1)
195 arg1idx = _findargnofcall(1)
197 if arg1idx is not None:
196 if arg1idx is not None:
198 _ensureunicode(arg1idx)
197 _ensureunicode(arg1idx)
199
198
200 # .encode() and .decode() on str/bytes/unicode don't accept
199 # .encode() and .decode() on str/bytes/unicode don't accept
201 # byte strings on Python 3.
200 # byte strings on Python 3.
202 elif fn in ('encode', 'decode') and _isop(i - 1, '.'):
201 elif fn in ('encode', 'decode') and _isop(i - 1, '.'):
203 for argn in range(2):
202 for argn in range(2):
204 argidx = _findargnofcall(argn)
203 argidx = _findargnofcall(argn)
205 if argidx is not None:
204 if argidx is not None:
206 _ensureunicode(argidx)
205 _ensureunicode(argidx)
207
206
208 # It changes iteritems/values to items/values as they are not
207 # It changes iteritems/values to items/values as they are not
209 # present in Python 3 world.
208 # present in Python 3 world.
210 elif fn in ('iteritems', 'itervalues') and not (
209 elif fn in ('iteritems', 'itervalues') and not (
211 tokens[i - 1].type == token.NAME
210 tokens[i - 1].type == token.NAME
212 and tokens[i - 1].string == 'def'
211 and tokens[i - 1].string == 'def'
213 ):
212 ):
214 yield t._replace(string=fn[4:])
213 yield t._replace(string=fn[4:])
215 continue
214 continue
216
215
217 # Emit unmodified token.
216 # Emit unmodified token.
218 yield t
217 yield t
219
218
220 # Header to add to bytecode files. This MUST be changed when
219 # Header to add to bytecode files. This MUST be changed when
221 # ``replacetoken`` or any mechanism that changes semantics of module
220 # ``replacetoken`` or any mechanism that changes semantics of module
222 # loading is changed. Otherwise cached bytecode may get loaded without
221 # loading is changed. Otherwise cached bytecode may get loaded without
223 # the new transformation mechanisms applied.
222 # the new transformation mechanisms applied.
224 BYTECODEHEADER = b'HG\x00\x0d'
223 BYTECODEHEADER = b'HG\x00\x0e'
225
224
226 class hgloader(importlib.machinery.SourceFileLoader):
225 class hgloader(importlib.machinery.SourceFileLoader):
227 """Custom module loader that transforms source code.
226 """Custom module loader that transforms source code.
228
227
229 When the source code is converted to a code object, we transform
228 When the source code is converted to a code object, we transform
230 certain patterns to be Python 3 compatible. This allows us to write code
229 certain patterns to be Python 3 compatible. This allows us to write code
231 that is natively Python 2 and compatible with Python 3 without
230 that is natively Python 2 and compatible with Python 3 without
232 making the code excessively ugly.
231 making the code excessively ugly.
233
232
234 We do this by transforming the token stream between parse and compile.
233 We do this by transforming the token stream between parse and compile.
235
234
236 Implementing transformations invalidates caching assumptions made
235 Implementing transformations invalidates caching assumptions made
237 by the built-in importer. The built-in importer stores a header on
236 by the built-in importer. The built-in importer stores a header on
238 saved bytecode files indicating the Python/bytecode version. If the
237 saved bytecode files indicating the Python/bytecode version. If the
239 version changes, the cached bytecode is ignored. The Mercurial
238 version changes, the cached bytecode is ignored. The Mercurial
240 transformations could change at any time. This means we need to check
239 transformations could change at any time. This means we need to check
241 that cached bytecode was generated with the current transformation
240 that cached bytecode was generated with the current transformation
242 code or there could be a mismatch between cached bytecode and what
241 code or there could be a mismatch between cached bytecode and what
243 would be generated from this class.
242 would be generated from this class.
244
243
245 We supplement the bytecode caching layer by wrapping ``get_data``
244 We supplement the bytecode caching layer by wrapping ``get_data``
246 and ``set_data``. These functions are called when the
245 and ``set_data``. These functions are called when the
247 ``SourceFileLoader`` retrieves and saves bytecode cache files,
246 ``SourceFileLoader`` retrieves and saves bytecode cache files,
248 respectively. We simply add an additional header on the file. As
247 respectively. We simply add an additional header on the file. As
249 long as the version in this file is changed when semantics change,
248 long as the version in this file is changed when semantics change,
250 cached bytecode should be invalidated when transformations change.
249 cached bytecode should be invalidated when transformations change.
251
250
252 The added header has the form ``HG<VERSION>``. That is a literal
251 The added header has the form ``HG<VERSION>``. That is a literal
253 ``HG`` with 2 binary bytes indicating the transformation version.
252 ``HG`` with 2 binary bytes indicating the transformation version.
254 """
253 """
255
254
256 def get_data(self, path):
255 def get_data(self, path):
257 data = super(hgloader, self).get_data(path)
256 data = super(hgloader, self).get_data(path)
258
257
259 if not path.endswith(tuple(importlib.machinery.BYTECODE_SUFFIXES)):
258 if not path.endswith(tuple(importlib.machinery.BYTECODE_SUFFIXES)):
260 return data
259 return data
261
260
262 # There should be a header indicating the Mercurial transformation
261 # There should be a header indicating the Mercurial transformation
263 # version. If it doesn't exist or doesn't match the current version,
262 # version. If it doesn't exist or doesn't match the current version,
264 # we raise an OSError because that is what
263 # we raise an OSError because that is what
265 # ``SourceFileLoader.get_code()`` expects when loading bytecode
264 # ``SourceFileLoader.get_code()`` expects when loading bytecode
266 # paths to indicate the cached file is "bad."
265 # paths to indicate the cached file is "bad."
267 if data[0:2] != b'HG':
266 if data[0:2] != b'HG':
268 raise OSError('no hg header')
267 raise OSError('no hg header')
269 if data[0:4] != BYTECODEHEADER:
268 if data[0:4] != BYTECODEHEADER:
270 raise OSError('hg header version mismatch')
269 raise OSError('hg header version mismatch')
271
270
272 return data[4:]
271 return data[4:]
273
272
274 def set_data(self, path, data, *args, **kwargs):
273 def set_data(self, path, data, *args, **kwargs):
275 if path.endswith(tuple(importlib.machinery.BYTECODE_SUFFIXES)):
274 if path.endswith(tuple(importlib.machinery.BYTECODE_SUFFIXES)):
276 data = BYTECODEHEADER + data
275 data = BYTECODEHEADER + data
277
276
278 return super(hgloader, self).set_data(path, data, *args, **kwargs)
277 return super(hgloader, self).set_data(path, data, *args, **kwargs)
279
278
280 def source_to_code(self, data, path):
279 def source_to_code(self, data, path):
281 """Perform token transformation before compilation."""
280 """Perform token transformation before compilation."""
282 buf = io.BytesIO(data)
281 buf = io.BytesIO(data)
283 tokens = tokenize.tokenize(buf.readline)
282 tokens = tokenize.tokenize(buf.readline)
284 data = tokenize.untokenize(replacetokens(list(tokens), self.name))
283 data = tokenize.untokenize(replacetokens(list(tokens), self.name))
285 # Python's built-in importer strips frames from exceptions raised
284 # Python's built-in importer strips frames from exceptions raised
286 # for this code. Unfortunately, that mechanism isn't extensible
285 # for this code. Unfortunately, that mechanism isn't extensible
287 # and our frame will be blamed for the import failure. There
286 # and our frame will be blamed for the import failure. There
288 # are extremely hacky ways to do frame stripping. We haven't
287 # are extremely hacky ways to do frame stripping. We haven't
289 # implemented them because they are very ugly.
288 # implemented them because they are very ugly.
290 return super(hgloader, self).source_to_code(data, path)
289 return super(hgloader, self).source_to_code(data, path)
291
290
292 # We automagically register our custom importer as a side-effect of
291 # We automagically register our custom importer as a side-effect of
293 # loading. This is necessary to ensure that any entry points are able
292 # loading. This is necessary to ensure that any entry points are able
294 # to import mercurial.* modules without having to perform this
293 # to import mercurial.* modules without having to perform this
295 # registration themselves.
294 # registration themselves.
296 if not any(isinstance(x, hgpathentryfinder) for x in sys.meta_path):
295 if not any(isinstance(x, hgpathentryfinder) for x in sys.meta_path):
297 # meta_path is used before any implicit finders and before sys.path.
296 # meta_path is used before any implicit finders and before sys.path.
298 sys.meta_path.insert(0, hgpathentryfinder())
297 sys.meta_path.insert(0, hgpathentryfinder())
@@ -1,550 +1,550 b''
1 # templatefilters.py - common template expansion filters
1 # templatefilters.py - common template expansion filters
2 #
2 #
3 # Copyright 2005-2008 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2008 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import os
10 import os
11 import re
11 import re
12 import time
12 import time
13
13
14 from .i18n import _
14 from .i18n import _
15 from . import (
15 from . import (
16 encoding,
16 encoding,
17 error,
17 error,
18 node,
18 node,
19 pycompat,
19 pycompat,
20 registrar,
20 registrar,
21 templateutil,
21 templateutil,
22 url,
22 url,
23 util,
23 util,
24 )
24 )
25 from .utils import (
25 from .utils import (
26 cborutil,
26 cborutil,
27 dateutil,
27 dateutil,
28 stringutil,
28 stringutil,
29 )
29 )
30
30
31 urlerr = util.urlerr
31 urlerr = util.urlerr
32 urlreq = util.urlreq
32 urlreq = util.urlreq
33
33
34 if pycompat.ispy3:
34 if pycompat.ispy3:
35 long = int
35 long = int
36
36
37 # filters are callables like:
37 # filters are callables like:
38 # fn(obj)
38 # fn(obj)
39 # with:
39 # with:
40 # obj - object to be filtered (text, date, list and so on)
40 # obj - object to be filtered (text, date, list and so on)
41 filters = {}
41 filters = {}
42
42
43 templatefilter = registrar.templatefilter(filters)
43 templatefilter = registrar.templatefilter(filters)
44
44
45
45
46 @templatefilter(b'addbreaks', intype=bytes)
46 @templatefilter(b'addbreaks', intype=bytes)
47 def addbreaks(text):
47 def addbreaks(text):
48 """Any text. Add an XHTML "<br />" tag before the end of
48 """Any text. Add an XHTML "<br />" tag before the end of
49 every line except the last.
49 every line except the last.
50 """
50 """
51 return text.replace(b'\n', b'<br/>\n')
51 return text.replace(b'\n', b'<br/>\n')
52
52
53
53
54 agescales = [
54 agescales = [
55 (b"year", 3600 * 24 * 365, b'Y'),
55 (b"year", 3600 * 24 * 365, b'Y'),
56 (b"month", 3600 * 24 * 30, b'M'),
56 (b"month", 3600 * 24 * 30, b'M'),
57 (b"week", 3600 * 24 * 7, b'W'),
57 (b"week", 3600 * 24 * 7, b'W'),
58 (b"day", 3600 * 24, b'd'),
58 (b"day", 3600 * 24, b'd'),
59 (b"hour", 3600, b'h'),
59 (b"hour", 3600, b'h'),
60 (b"minute", 60, b'm'),
60 (b"minute", 60, b'm'),
61 (b"second", 1, b's'),
61 (b"second", 1, b's'),
62 ]
62 ]
63
63
64
64
65 @templatefilter(b'age', intype=templateutil.date)
65 @templatefilter(b'age', intype=templateutil.date)
66 def age(date, abbrev=False):
66 def age(date, abbrev=False):
67 """Date. Returns a human-readable date/time difference between the
67 """Date. Returns a human-readable date/time difference between the
68 given date/time and the current date/time.
68 given date/time and the current date/time.
69 """
69 """
70
70
71 def plural(t, c):
71 def plural(t, c):
72 if c == 1:
72 if c == 1:
73 return t
73 return t
74 return t + b"s"
74 return t + b"s"
75
75
76 def fmt(t, c, a):
76 def fmt(t, c, a):
77 if abbrev:
77 if abbrev:
78 return b"%d%s" % (c, a)
78 return b"%d%s" % (c, a)
79 return b"%d %s" % (c, plural(t, c))
79 return b"%d %s" % (c, plural(t, c))
80
80
81 now = time.time()
81 now = time.time()
82 then = date[0]
82 then = date[0]
83 future = False
83 future = False
84 if then > now:
84 if then > now:
85 future = True
85 future = True
86 delta = max(1, int(then - now))
86 delta = max(1, int(then - now))
87 if delta > agescales[0][1] * 30:
87 if delta > agescales[0][1] * 30:
88 return b'in the distant future'
88 return b'in the distant future'
89 else:
89 else:
90 delta = max(1, int(now - then))
90 delta = max(1, int(now - then))
91 if delta > agescales[0][1] * 2:
91 if delta > agescales[0][1] * 2:
92 return dateutil.shortdate(date)
92 return dateutil.shortdate(date)
93
93
94 for t, s, a in agescales:
94 for t, s, a in agescales:
95 n = delta // s
95 n = delta // s
96 if n >= 2 or s == 1:
96 if n >= 2 or s == 1:
97 if future:
97 if future:
98 return b'%s from now' % fmt(t, n, a)
98 return b'%s from now' % fmt(t, n, a)
99 return b'%s ago' % fmt(t, n, a)
99 return b'%s ago' % fmt(t, n, a)
100
100
101
101
102 @templatefilter(b'basename', intype=bytes)
102 @templatefilter(b'basename', intype=bytes)
103 def basename(path):
103 def basename(path):
104 """Any text. Treats the text as a path, and returns the last
104 """Any text. Treats the text as a path, and returns the last
105 component of the path after splitting by the path separator.
105 component of the path after splitting by the path separator.
106 For example, "foo/bar/baz" becomes "baz" and "foo/bar//" becomes "".
106 For example, "foo/bar/baz" becomes "baz" and "foo/bar//" becomes "".
107 """
107 """
108 return os.path.basename(path)
108 return os.path.basename(path)
109
109
110
110
111 @templatefilter(b'cbor')
111 @templatefilter(b'cbor')
112 def cbor(obj):
112 def cbor(obj):
113 """Any object. Serializes the object to CBOR bytes."""
113 """Any object. Serializes the object to CBOR bytes."""
114 return b''.join(cborutil.streamencode(obj))
114 return b''.join(cborutil.streamencode(obj))
115
115
116
116
117 @templatefilter(b'commondir')
117 @templatefilter(b'commondir')
118 def commondir(filelist):
118 def commondir(filelist):
119 """List of text. Treats each list item as file name with /
119 """List of text. Treats each list item as file name with /
120 as path separator and returns the longest common directory
120 as path separator and returns the longest common directory
121 prefix shared by all list items.
121 prefix shared by all list items.
122 Returns the empty string if no common prefix exists.
122 Returns the empty string if no common prefix exists.
123
123
124 The list items are not normalized, i.e. "foo/../bar" is handled as
124 The list items are not normalized, i.e. "foo/../bar" is handled as
125 file "bar" in the directory "foo/..". Leading slashes are ignored.
125 file "bar" in the directory "foo/..". Leading slashes are ignored.
126
126
127 For example, ["foo/bar/baz", "foo/baz/bar"] becomes "foo" and
127 For example, ["foo/bar/baz", "foo/baz/bar"] becomes "foo" and
128 ["foo/bar", "baz"] becomes "".
128 ["foo/bar", "baz"] becomes "".
129 """
129 """
130
130
131 def common(a, b):
131 def common(a, b):
132 if len(a) > len(b):
132 if len(a) > len(b):
133 a = b[: len(a)]
133 a = b[: len(a)]
134 elif len(b) > len(a):
134 elif len(b) > len(a):
135 b = b[: len(a)]
135 b = b[: len(a)]
136 if a == b:
136 if a == b:
137 return a
137 return a
138 for i in pycompat.xrange(len(a)):
138 for i in pycompat.xrange(len(a)):
139 if a[i] != b[i]:
139 if a[i] != b[i]:
140 return a[:i]
140 return a[:i]
141 return a
141 return a
142
142
143 try:
143 try:
144 if not filelist:
144 if not filelist:
145 return b""
145 return b""
146 dirlist = [f.lstrip(b'/').split(b'/')[:-1] for f in filelist]
146 dirlist = [f.lstrip(b'/').split(b'/')[:-1] for f in filelist]
147 if len(dirlist) == 1:
147 if len(dirlist) == 1:
148 return b'/'.join(dirlist[0])
148 return b'/'.join(dirlist[0])
149 a = min(dirlist)
149 a = min(dirlist)
150 b = max(dirlist)
150 b = max(dirlist)
151 # The common prefix of a and b is shared with all
151 # The common prefix of a and b is shared with all
152 # elements of the list since Python sorts lexicographical
152 # elements of the list since Python sorts lexicographical
153 # and [1, x] after [1].
153 # and [1, x] after [1].
154 return b'/'.join(common(a, b))
154 return b'/'.join(common(a, b))
155 except TypeError:
155 except TypeError:
156 raise error.ParseError(_(b'argument is not a list of text'))
156 raise error.ParseError(_(b'argument is not a list of text'))
157
157
158
158
159 @templatefilter(b'count')
159 @templatefilter(b'count')
160 def count(i):
160 def count(i):
161 """List or text. Returns the length as an integer."""
161 """List or text. Returns the length as an integer."""
162 try:
162 try:
163 return len(i)
163 return len(i)
164 except TypeError:
164 except TypeError:
165 raise error.ParseError(_(b'not countable'))
165 raise error.ParseError(_(b'not countable'))
166
166
167
167
168 @templatefilter(b'dirname', intype=bytes)
168 @templatefilter(b'dirname', intype=bytes)
169 def dirname(path):
169 def dirname(path):
170 """Any text. Treats the text as a path, and strips the last
170 """Any text. Treats the text as a path, and strips the last
171 component of the path after splitting by the path separator.
171 component of the path after splitting by the path separator.
172 """
172 """
173 return os.path.dirname(path)
173 return os.path.dirname(path)
174
174
175
175
176 @templatefilter(b'domain', intype=bytes)
176 @templatefilter(b'domain', intype=bytes)
177 def domain(author):
177 def domain(author):
178 """Any text. Finds the first string that looks like an email
178 """Any text. Finds the first string that looks like an email
179 address, and extracts just the domain component. Example: ``User
179 address, and extracts just the domain component. Example: ``User
180 <user@example.com>`` becomes ``example.com``.
180 <user@example.com>`` becomes ``example.com``.
181 """
181 """
182 f = author.find(b'@')
182 f = author.find(b'@')
183 if f == -1:
183 if f == -1:
184 return b''
184 return b''
185 author = author[f + 1 :]
185 author = author[f + 1 :]
186 f = author.find(b'>')
186 f = author.find(b'>')
187 if f >= 0:
187 if f >= 0:
188 author = author[:f]
188 author = author[:f]
189 return author
189 return author
190
190
191
191
192 @templatefilter(b'email', intype=bytes)
192 @templatefilter(b'email', intype=bytes)
193 def email(text):
193 def email(text):
194 """Any text. Extracts the first string that looks like an email
194 """Any text. Extracts the first string that looks like an email
195 address. Example: ``User <user@example.com>`` becomes
195 address. Example: ``User <user@example.com>`` becomes
196 ``user@example.com``.
196 ``user@example.com``.
197 """
197 """
198 return stringutil.email(text)
198 return stringutil.email(text)
199
199
200
200
201 @templatefilter(b'escape', intype=bytes)
201 @templatefilter(b'escape', intype=bytes)
202 def escape(text):
202 def escape(text):
203 """Any text. Replaces the special XML/XHTML characters "&", "<"
203 """Any text. Replaces the special XML/XHTML characters "&", "<"
204 and ">" with XML entities, and filters out NUL characters.
204 and ">" with XML entities, and filters out NUL characters.
205 """
205 """
206 return url.escape(text.replace(b'\0', b''), True)
206 return url.escape(text.replace(b'\0', b''), True)
207
207
208
208
209 para_re = None
209 para_re = None
210 space_re = None
210 space_re = None
211
211
212
212
213 def fill(text, width, initindent=b'', hangindent=b''):
213 def fill(text, width, initindent=b'', hangindent=b''):
214 '''fill many paragraphs with optional indentation.'''
214 '''fill many paragraphs with optional indentation.'''
215 global para_re, space_re
215 global para_re, space_re
216 if para_re is None:
216 if para_re is None:
217 para_re = re.compile(b'(\n\n|\n\\s*[-*]\\s*)', re.M)
217 para_re = re.compile(b'(\n\n|\n\\s*[-*]\\s*)', re.M)
218 space_re = re.compile(br' +')
218 space_re = re.compile(br' +')
219
219
220 def findparas():
220 def findparas():
221 start = 0
221 start = 0
222 while True:
222 while True:
223 m = para_re.search(text, start)
223 m = para_re.search(text, start)
224 if not m:
224 if not m:
225 uctext = encoding.unifromlocal(text[start:])
225 uctext = encoding.unifromlocal(text[start:])
226 w = len(uctext)
226 w = len(uctext)
227 while w > 0 and uctext[w - 1].isspace():
227 while w > 0 and uctext[w - 1].isspace():
228 w -= 1
228 w -= 1
229 yield (
229 yield (
230 encoding.unitolocal(uctext[:w]),
230 encoding.unitolocal(uctext[:w]),
231 encoding.unitolocal(uctext[w:]),
231 encoding.unitolocal(uctext[w:]),
232 )
232 )
233 break
233 break
234 yield text[start : m.start(0)], m.group(1)
234 yield text[start : m.start(0)], m.group(1)
235 start = m.end(1)
235 start = m.end(1)
236
236
237 return b"".join(
237 return b"".join(
238 [
238 [
239 stringutil.wrap(
239 stringutil.wrap(
240 space_re.sub(b' ', stringutil.wrap(para, width)),
240 space_re.sub(b' ', stringutil.wrap(para, width)),
241 width,
241 width,
242 initindent,
242 initindent,
243 hangindent,
243 hangindent,
244 )
244 )
245 + rest
245 + rest
246 for para, rest in findparas()
246 for para, rest in findparas()
247 ]
247 ]
248 )
248 )
249
249
250
250
251 @templatefilter(b'fill68', intype=bytes)
251 @templatefilter(b'fill68', intype=bytes)
252 def fill68(text):
252 def fill68(text):
253 """Any text. Wraps the text to fit in 68 columns."""
253 """Any text. Wraps the text to fit in 68 columns."""
254 return fill(text, 68)
254 return fill(text, 68)
255
255
256
256
257 @templatefilter(b'fill76', intype=bytes)
257 @templatefilter(b'fill76', intype=bytes)
258 def fill76(text):
258 def fill76(text):
259 """Any text. Wraps the text to fit in 76 columns."""
259 """Any text. Wraps the text to fit in 76 columns."""
260 return fill(text, 76)
260 return fill(text, 76)
261
261
262
262
263 @templatefilter(b'firstline', intype=bytes)
263 @templatefilter(b'firstline', intype=bytes)
264 def firstline(text):
264 def firstline(text):
265 """Any text. Returns the first line of text."""
265 """Any text. Returns the first line of text."""
266 try:
266 try:
267 return text.splitlines(True)[0].rstrip(b'\r\n')
267 return text.splitlines(True)[0].rstrip(b'\r\n')
268 except IndexError:
268 except IndexError:
269 return b''
269 return b''
270
270
271
271
272 @templatefilter(b'hex', intype=bytes)
272 @templatefilter(b'hex', intype=bytes)
273 def hexfilter(text):
273 def hexfilter(text):
274 """Any text. Convert a binary Mercurial node identifier into
274 """Any text. Convert a binary Mercurial node identifier into
275 its long hexadecimal representation.
275 its long hexadecimal representation.
276 """
276 """
277 return node.hex(text)
277 return node.hex(text)
278
278
279
279
280 @templatefilter(b'hgdate', intype=templateutil.date)
280 @templatefilter(b'hgdate', intype=templateutil.date)
281 def hgdate(text):
281 def hgdate(text):
282 """Date. Returns the date as a pair of numbers: "1157407993
282 """Date. Returns the date as a pair of numbers: "1157407993
283 25200" (Unix timestamp, timezone offset).
283 25200" (Unix timestamp, timezone offset).
284 """
284 """
285 return b"%d %d" % text
285 return b"%d %d" % text
286
286
287
287
288 @templatefilter(b'isodate', intype=templateutil.date)
288 @templatefilter(b'isodate', intype=templateutil.date)
289 def isodate(text):
289 def isodate(text):
290 """Date. Returns the date in ISO 8601 format: "2009-08-18 13:00
290 """Date. Returns the date in ISO 8601 format: "2009-08-18 13:00
291 +0200".
291 +0200".
292 """
292 """
293 return dateutil.datestr(text, b'%Y-%m-%d %H:%M %1%2')
293 return dateutil.datestr(text, b'%Y-%m-%d %H:%M %1%2')
294
294
295
295
296 @templatefilter(b'isodatesec', intype=templateutil.date)
296 @templatefilter(b'isodatesec', intype=templateutil.date)
297 def isodatesec(text):
297 def isodatesec(text):
298 """Date. Returns the date in ISO 8601 format, including
298 """Date. Returns the date in ISO 8601 format, including
299 seconds: "2009-08-18 13:00:13 +0200". See also the rfc3339date
299 seconds: "2009-08-18 13:00:13 +0200". See also the rfc3339date
300 filter.
300 filter.
301 """
301 """
302 return dateutil.datestr(text, b'%Y-%m-%d %H:%M:%S %1%2')
302 return dateutil.datestr(text, b'%Y-%m-%d %H:%M:%S %1%2')
303
303
304
304
305 def indent(text, prefix):
305 def indent(text, prefix):
306 '''indent each non-empty line of text after first with prefix.'''
306 '''indent each non-empty line of text after first with prefix.'''
307 lines = text.splitlines()
307 lines = text.splitlines()
308 num_lines = len(lines)
308 num_lines = len(lines)
309 endswithnewline = text[-1:] == b'\n'
309 endswithnewline = text[-1:] == b'\n'
310
310
311 def indenter():
311 def indenter():
312 for i in pycompat.xrange(num_lines):
312 for i in pycompat.xrange(num_lines):
313 l = lines[i]
313 l = lines[i]
314 if i and l.strip():
314 if i and l.strip():
315 yield prefix
315 yield prefix
316 yield l
316 yield l
317 if i < num_lines - 1 or endswithnewline:
317 if i < num_lines - 1 or endswithnewline:
318 yield b'\n'
318 yield b'\n'
319
319
320 return b"".join(indenter())
320 return b"".join(indenter())
321
321
322
322
323 @templatefilter(b'json')
323 @templatefilter(b'json')
324 def json(obj, paranoid=True):
324 def json(obj, paranoid=True):
325 """Any object. Serializes the object to a JSON formatted text."""
325 """Any object. Serializes the object to a JSON formatted text."""
326 if obj is None:
326 if obj is None:
327 return b'null'
327 return b'null'
328 elif obj is False:
328 elif obj is False:
329 return b'false'
329 return b'false'
330 elif obj is True:
330 elif obj is True:
331 return b'true'
331 return b'true'
332 elif isinstance(obj, (int, long, float)):
332 elif isinstance(obj, (int, long, float)):
333 return pycompat.bytestr(obj)
333 return pycompat.bytestr(obj)
334 elif isinstance(obj, bytes):
334 elif isinstance(obj, bytes):
335 return b'"%s"' % encoding.jsonescape(obj, paranoid=paranoid)
335 return b'"%s"' % encoding.jsonescape(obj, paranoid=paranoid)
336 elif isinstance(obj, type(u'')):
336 elif isinstance(obj, type(u'')):
337 raise error.ProgrammingError(
337 raise error.ProgrammingError(
338 b'Mercurial only does output with bytes: %r' % obj
338 b'Mercurial only does output with bytes: %r' % obj
339 )
339 )
340 elif util.safehasattr(obj, b'keys'):
340 elif util.safehasattr(obj, b'keys'):
341 out = [
341 out = [
342 b'"%s": %s'
342 b'"%s": %s'
343 % (encoding.jsonescape(k, paranoid=paranoid), json(v, paranoid))
343 % (encoding.jsonescape(k, paranoid=paranoid), json(v, paranoid))
344 for k, v in sorted(obj.iteritems())
344 for k, v in sorted(obj.iteritems())
345 ]
345 ]
346 return b'{' + b', '.join(out) + b'}'
346 return b'{' + b', '.join(out) + b'}'
347 elif util.safehasattr(obj, b'__iter__'):
347 elif util.safehasattr(obj, b'__iter__'):
348 out = [json(i, paranoid) for i in obj]
348 out = [json(i, paranoid) for i in obj]
349 return b'[' + b', '.join(out) + b']'
349 return b'[' + b', '.join(out) + b']'
350 raise error.ProgrammingError(b'cannot encode %r' % obj)
350 raise error.ProgrammingError(b'cannot encode %r' % obj)
351
351
352
352
353 @templatefilter(b'lower', intype=bytes)
353 @templatefilter(b'lower', intype=bytes)
354 def lower(text):
354 def lower(text):
355 """Any text. Converts the text to lowercase."""
355 """Any text. Converts the text to lowercase."""
356 return encoding.lower(text)
356 return encoding.lower(text)
357
357
358
358
359 @templatefilter(b'nonempty', intype=bytes)
359 @templatefilter(b'nonempty', intype=bytes)
360 def nonempty(text):
360 def nonempty(text):
361 """Any text. Returns '(none)' if the string is empty."""
361 """Any text. Returns '(none)' if the string is empty."""
362 return text or b"(none)"
362 return text or b"(none)"
363
363
364
364
365 @templatefilter(b'obfuscate', intype=bytes)
365 @templatefilter(b'obfuscate', intype=bytes)
366 def obfuscate(text):
366 def obfuscate(text):
367 """Any text. Returns the input text rendered as a sequence of
367 """Any text. Returns the input text rendered as a sequence of
368 XML entities.
368 XML entities.
369 """
369 """
370 text = unicode(text, pycompat.sysstr(encoding.encoding), r'replace')
370 text = pycompat.unicode(text, pycompat.sysstr(encoding.encoding), r'replace')
371 return b''.join([b'&#%d;' % ord(c) for c in text])
371 return b''.join([b'&#%d;' % ord(c) for c in text])
372
372
373
373
374 @templatefilter(b'permissions', intype=bytes)
374 @templatefilter(b'permissions', intype=bytes)
375 def permissions(flags):
375 def permissions(flags):
376 if b"l" in flags:
376 if b"l" in flags:
377 return b"lrwxrwxrwx"
377 return b"lrwxrwxrwx"
378 if b"x" in flags:
378 if b"x" in flags:
379 return b"-rwxr-xr-x"
379 return b"-rwxr-xr-x"
380 return b"-rw-r--r--"
380 return b"-rw-r--r--"
381
381
382
382
383 @templatefilter(b'person', intype=bytes)
383 @templatefilter(b'person', intype=bytes)
384 def person(author):
384 def person(author):
385 """Any text. Returns the name before an email address,
385 """Any text. Returns the name before an email address,
386 interpreting it as per RFC 5322.
386 interpreting it as per RFC 5322.
387 """
387 """
388 return stringutil.person(author)
388 return stringutil.person(author)
389
389
390
390
391 @templatefilter(b'revescape', intype=bytes)
391 @templatefilter(b'revescape', intype=bytes)
392 def revescape(text):
392 def revescape(text):
393 """Any text. Escapes all "special" characters, except @.
393 """Any text. Escapes all "special" characters, except @.
394 Forward slashes are escaped twice to prevent web servers from prematurely
394 Forward slashes are escaped twice to prevent web servers from prematurely
395 unescaping them. For example, "@foo bar/baz" becomes "@foo%20bar%252Fbaz".
395 unescaping them. For example, "@foo bar/baz" becomes "@foo%20bar%252Fbaz".
396 """
396 """
397 return urlreq.quote(text, safe=b'/@').replace(b'/', b'%252F')
397 return urlreq.quote(text, safe=b'/@').replace(b'/', b'%252F')
398
398
399
399
400 @templatefilter(b'rfc3339date', intype=templateutil.date)
400 @templatefilter(b'rfc3339date', intype=templateutil.date)
401 def rfc3339date(text):
401 def rfc3339date(text):
402 """Date. Returns a date using the Internet date format
402 """Date. Returns a date using the Internet date format
403 specified in RFC 3339: "2009-08-18T13:00:13+02:00".
403 specified in RFC 3339: "2009-08-18T13:00:13+02:00".
404 """
404 """
405 return dateutil.datestr(text, b"%Y-%m-%dT%H:%M:%S%1:%2")
405 return dateutil.datestr(text, b"%Y-%m-%dT%H:%M:%S%1:%2")
406
406
407
407
408 @templatefilter(b'rfc822date', intype=templateutil.date)
408 @templatefilter(b'rfc822date', intype=templateutil.date)
409 def rfc822date(text):
409 def rfc822date(text):
410 """Date. Returns a date using the same format used in email
410 """Date. Returns a date using the same format used in email
411 headers: "Tue, 18 Aug 2009 13:00:13 +0200".
411 headers: "Tue, 18 Aug 2009 13:00:13 +0200".
412 """
412 """
413 return dateutil.datestr(text, b"%a, %d %b %Y %H:%M:%S %1%2")
413 return dateutil.datestr(text, b"%a, %d %b %Y %H:%M:%S %1%2")
414
414
415
415
416 @templatefilter(b'short', intype=bytes)
416 @templatefilter(b'short', intype=bytes)
417 def short(text):
417 def short(text):
418 """Changeset hash. Returns the short form of a changeset hash,
418 """Changeset hash. Returns the short form of a changeset hash,
419 i.e. a 12 hexadecimal digit string.
419 i.e. a 12 hexadecimal digit string.
420 """
420 """
421 return text[:12]
421 return text[:12]
422
422
423
423
424 @templatefilter(b'shortbisect', intype=bytes)
424 @templatefilter(b'shortbisect', intype=bytes)
425 def shortbisect(label):
425 def shortbisect(label):
426 """Any text. Treats `label` as a bisection status, and
426 """Any text. Treats `label` as a bisection status, and
427 returns a single-character representing the status (G: good, B: bad,
427 returns a single-character representing the status (G: good, B: bad,
428 S: skipped, U: untested, I: ignored). Returns single space if `text`
428 S: skipped, U: untested, I: ignored). Returns single space if `text`
429 is not a valid bisection status.
429 is not a valid bisection status.
430 """
430 """
431 if label:
431 if label:
432 return label[0:1].upper()
432 return label[0:1].upper()
433 return b' '
433 return b' '
434
434
435
435
436 @templatefilter(b'shortdate', intype=templateutil.date)
436 @templatefilter(b'shortdate', intype=templateutil.date)
437 def shortdate(text):
437 def shortdate(text):
438 """Date. Returns a date like "2006-09-18"."""
438 """Date. Returns a date like "2006-09-18"."""
439 return dateutil.shortdate(text)
439 return dateutil.shortdate(text)
440
440
441
441
442 @templatefilter(b'slashpath', intype=bytes)
442 @templatefilter(b'slashpath', intype=bytes)
443 def slashpath(path):
443 def slashpath(path):
444 """Any text. Replaces the native path separator with slash."""
444 """Any text. Replaces the native path separator with slash."""
445 return util.pconvert(path)
445 return util.pconvert(path)
446
446
447
447
448 @templatefilter(b'splitlines', intype=bytes)
448 @templatefilter(b'splitlines', intype=bytes)
449 def splitlines(text):
449 def splitlines(text):
450 """Any text. Split text into a list of lines."""
450 """Any text. Split text into a list of lines."""
451 return templateutil.hybridlist(text.splitlines(), name=b'line')
451 return templateutil.hybridlist(text.splitlines(), name=b'line')
452
452
453
453
454 @templatefilter(b'stringescape', intype=bytes)
454 @templatefilter(b'stringescape', intype=bytes)
455 def stringescape(text):
455 def stringescape(text):
456 return stringutil.escapestr(text)
456 return stringutil.escapestr(text)
457
457
458
458
459 @templatefilter(b'stringify', intype=bytes)
459 @templatefilter(b'stringify', intype=bytes)
460 def stringify(thing):
460 def stringify(thing):
461 """Any type. Turns the value into text by converting values into
461 """Any type. Turns the value into text by converting values into
462 text and concatenating them.
462 text and concatenating them.
463 """
463 """
464 return thing # coerced by the intype
464 return thing # coerced by the intype
465
465
466
466
467 @templatefilter(b'stripdir', intype=bytes)
467 @templatefilter(b'stripdir', intype=bytes)
468 def stripdir(text):
468 def stripdir(text):
469 """Treat the text as path and strip a directory level, if
469 """Treat the text as path and strip a directory level, if
470 possible. For example, "foo" and "foo/bar" becomes "foo".
470 possible. For example, "foo" and "foo/bar" becomes "foo".
471 """
471 """
472 dir = os.path.dirname(text)
472 dir = os.path.dirname(text)
473 if dir == b"":
473 if dir == b"":
474 return os.path.basename(text)
474 return os.path.basename(text)
475 else:
475 else:
476 return dir
476 return dir
477
477
478
478
479 @templatefilter(b'tabindent', intype=bytes)
479 @templatefilter(b'tabindent', intype=bytes)
480 def tabindent(text):
480 def tabindent(text):
481 """Any text. Returns the text, with every non-empty line
481 """Any text. Returns the text, with every non-empty line
482 except the first starting with a tab character.
482 except the first starting with a tab character.
483 """
483 """
484 return indent(text, b'\t')
484 return indent(text, b'\t')
485
485
486
486
487 @templatefilter(b'upper', intype=bytes)
487 @templatefilter(b'upper', intype=bytes)
488 def upper(text):
488 def upper(text):
489 """Any text. Converts the text to uppercase."""
489 """Any text. Converts the text to uppercase."""
490 return encoding.upper(text)
490 return encoding.upper(text)
491
491
492
492
493 @templatefilter(b'urlescape', intype=bytes)
493 @templatefilter(b'urlescape', intype=bytes)
494 def urlescape(text):
494 def urlescape(text):
495 """Any text. Escapes all "special" characters. For example,
495 """Any text. Escapes all "special" characters. For example,
496 "foo bar" becomes "foo%20bar".
496 "foo bar" becomes "foo%20bar".
497 """
497 """
498 return urlreq.quote(text)
498 return urlreq.quote(text)
499
499
500
500
501 @templatefilter(b'user', intype=bytes)
501 @templatefilter(b'user', intype=bytes)
502 def userfilter(text):
502 def userfilter(text):
503 """Any text. Returns a short representation of a user name or email
503 """Any text. Returns a short representation of a user name or email
504 address."""
504 address."""
505 return stringutil.shortuser(text)
505 return stringutil.shortuser(text)
506
506
507
507
508 @templatefilter(b'emailuser', intype=bytes)
508 @templatefilter(b'emailuser', intype=bytes)
509 def emailuser(text):
509 def emailuser(text):
510 """Any text. Returns the user portion of an email address."""
510 """Any text. Returns the user portion of an email address."""
511 return stringutil.emailuser(text)
511 return stringutil.emailuser(text)
512
512
513
513
514 @templatefilter(b'utf8', intype=bytes)
514 @templatefilter(b'utf8', intype=bytes)
515 def utf8(text):
515 def utf8(text):
516 """Any text. Converts from the local character encoding to UTF-8."""
516 """Any text. Converts from the local character encoding to UTF-8."""
517 return encoding.fromlocal(text)
517 return encoding.fromlocal(text)
518
518
519
519
520 @templatefilter(b'xmlescape', intype=bytes)
520 @templatefilter(b'xmlescape', intype=bytes)
521 def xmlescape(text):
521 def xmlescape(text):
522 text = (
522 text = (
523 text.replace(b'&', b'&amp;')
523 text.replace(b'&', b'&amp;')
524 .replace(b'<', b'&lt;')
524 .replace(b'<', b'&lt;')
525 .replace(b'>', b'&gt;')
525 .replace(b'>', b'&gt;')
526 .replace(b'"', b'&quot;')
526 .replace(b'"', b'&quot;')
527 .replace(b"'", b'&#39;')
527 .replace(b"'", b'&#39;')
528 ) # &apos; invalid in HTML
528 ) # &apos; invalid in HTML
529 return re.sub(b'[\x00-\x08\x0B\x0C\x0E-\x1F]', b' ', text)
529 return re.sub(b'[\x00-\x08\x0B\x0C\x0E-\x1F]', b' ', text)
530
530
531
531
532 def websub(text, websubtable):
532 def websub(text, websubtable):
533 """:websub: Any text. Only applies to hgweb. Applies the regular
533 """:websub: Any text. Only applies to hgweb. Applies the regular
534 expression replacements defined in the websub section.
534 expression replacements defined in the websub section.
535 """
535 """
536 if websubtable:
536 if websubtable:
537 for regexp, format in websubtable:
537 for regexp, format in websubtable:
538 text = regexp.sub(format, text)
538 text = regexp.sub(format, text)
539 return text
539 return text
540
540
541
541
542 def loadfilter(ui, extname, registrarobj):
542 def loadfilter(ui, extname, registrarobj):
543 """Load template filter from specified registrarobj
543 """Load template filter from specified registrarobj
544 """
544 """
545 for name, func in registrarobj._table.iteritems():
545 for name, func in registrarobj._table.iteritems():
546 filters[name] = func
546 filters[name] = func
547
547
548
548
549 # tell hggettext to extract docstrings from these functions:
549 # tell hggettext to extract docstrings from these functions:
550 i18nfunctions = filters.values()
550 i18nfunctions = filters.values()
General Comments 0
You need to be logged in to leave comments. Login now