##// END OF EJS Templates
py3: replace os.altsep with pycompat.altsep...
Pulkit Goyal -
r30625:bcf4a975 default
parent child Browse files
Show More
@@ -1,200 +1,200
1 # hgweb/common.py - Utility functions needed by hgweb_mod and hgwebdir_mod
1 # hgweb/common.py - Utility functions needed by hgweb_mod and hgwebdir_mod
2 #
2 #
3 # Copyright 21 May 2005 - (c) 2005 Jake Edge <jake@edge2.net>
3 # Copyright 21 May 2005 - (c) 2005 Jake Edge <jake@edge2.net>
4 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 from __future__ import absolute_import
9 from __future__ import absolute_import
10
10
11 import errno
11 import errno
12 import mimetypes
12 import mimetypes
13 import os
13 import os
14
14
15 from .. import (
15 from .. import (
16 pycompat,
16 pycompat,
17 util,
17 util,
18 )
18 )
19
19
20 httpserver = util.httpserver
20 httpserver = util.httpserver
21
21
22 HTTP_OK = 200
22 HTTP_OK = 200
23 HTTP_NOT_MODIFIED = 304
23 HTTP_NOT_MODIFIED = 304
24 HTTP_BAD_REQUEST = 400
24 HTTP_BAD_REQUEST = 400
25 HTTP_UNAUTHORIZED = 401
25 HTTP_UNAUTHORIZED = 401
26 HTTP_FORBIDDEN = 403
26 HTTP_FORBIDDEN = 403
27 HTTP_NOT_FOUND = 404
27 HTTP_NOT_FOUND = 404
28 HTTP_METHOD_NOT_ALLOWED = 405
28 HTTP_METHOD_NOT_ALLOWED = 405
29 HTTP_SERVER_ERROR = 500
29 HTTP_SERVER_ERROR = 500
30
30
31
31
32 def ismember(ui, username, userlist):
32 def ismember(ui, username, userlist):
33 """Check if username is a member of userlist.
33 """Check if username is a member of userlist.
34
34
35 If userlist has a single '*' member, all users are considered members.
35 If userlist has a single '*' member, all users are considered members.
36 Can be overridden by extensions to provide more complex authorization
36 Can be overridden by extensions to provide more complex authorization
37 schemes.
37 schemes.
38 """
38 """
39 return userlist == ['*'] or username in userlist
39 return userlist == ['*'] or username in userlist
40
40
41 def checkauthz(hgweb, req, op):
41 def checkauthz(hgweb, req, op):
42 '''Check permission for operation based on request data (including
42 '''Check permission for operation based on request data (including
43 authentication info). Return if op allowed, else raise an ErrorResponse
43 authentication info). Return if op allowed, else raise an ErrorResponse
44 exception.'''
44 exception.'''
45
45
46 user = req.env.get('REMOTE_USER')
46 user = req.env.get('REMOTE_USER')
47
47
48 deny_read = hgweb.configlist('web', 'deny_read')
48 deny_read = hgweb.configlist('web', 'deny_read')
49 if deny_read and (not user or ismember(hgweb.repo.ui, user, deny_read)):
49 if deny_read and (not user or ismember(hgweb.repo.ui, user, deny_read)):
50 raise ErrorResponse(HTTP_UNAUTHORIZED, 'read not authorized')
50 raise ErrorResponse(HTTP_UNAUTHORIZED, 'read not authorized')
51
51
52 allow_read = hgweb.configlist('web', 'allow_read')
52 allow_read = hgweb.configlist('web', 'allow_read')
53 if allow_read and (not ismember(hgweb.repo.ui, user, allow_read)):
53 if allow_read and (not ismember(hgweb.repo.ui, user, allow_read)):
54 raise ErrorResponse(HTTP_UNAUTHORIZED, 'read not authorized')
54 raise ErrorResponse(HTTP_UNAUTHORIZED, 'read not authorized')
55
55
56 if op == 'pull' and not hgweb.allowpull:
56 if op == 'pull' and not hgweb.allowpull:
57 raise ErrorResponse(HTTP_UNAUTHORIZED, 'pull not authorized')
57 raise ErrorResponse(HTTP_UNAUTHORIZED, 'pull not authorized')
58 elif op == 'pull' or op is None: # op is None for interface requests
58 elif op == 'pull' or op is None: # op is None for interface requests
59 return
59 return
60
60
61 # enforce that you can only push using POST requests
61 # enforce that you can only push using POST requests
62 if req.env['REQUEST_METHOD'] != 'POST':
62 if req.env['REQUEST_METHOD'] != 'POST':
63 msg = 'push requires POST request'
63 msg = 'push requires POST request'
64 raise ErrorResponse(HTTP_METHOD_NOT_ALLOWED, msg)
64 raise ErrorResponse(HTTP_METHOD_NOT_ALLOWED, msg)
65
65
66 # require ssl by default for pushing, auth info cannot be sniffed
66 # require ssl by default for pushing, auth info cannot be sniffed
67 # and replayed
67 # and replayed
68 scheme = req.env.get('wsgi.url_scheme')
68 scheme = req.env.get('wsgi.url_scheme')
69 if hgweb.configbool('web', 'push_ssl', True) and scheme != 'https':
69 if hgweb.configbool('web', 'push_ssl', True) and scheme != 'https':
70 raise ErrorResponse(HTTP_FORBIDDEN, 'ssl required')
70 raise ErrorResponse(HTTP_FORBIDDEN, 'ssl required')
71
71
72 deny = hgweb.configlist('web', 'deny_push')
72 deny = hgweb.configlist('web', 'deny_push')
73 if deny and (not user or ismember(hgweb.repo.ui, user, deny)):
73 if deny and (not user or ismember(hgweb.repo.ui, user, deny)):
74 raise ErrorResponse(HTTP_UNAUTHORIZED, 'push not authorized')
74 raise ErrorResponse(HTTP_UNAUTHORIZED, 'push not authorized')
75
75
76 allow = hgweb.configlist('web', 'allow_push')
76 allow = hgweb.configlist('web', 'allow_push')
77 if not (allow and ismember(hgweb.repo.ui, user, allow)):
77 if not (allow and ismember(hgweb.repo.ui, user, allow)):
78 raise ErrorResponse(HTTP_UNAUTHORIZED, 'push not authorized')
78 raise ErrorResponse(HTTP_UNAUTHORIZED, 'push not authorized')
79
79
80 # Hooks for hgweb permission checks; extensions can add hooks here.
80 # Hooks for hgweb permission checks; extensions can add hooks here.
81 # Each hook is invoked like this: hook(hgweb, request, operation),
81 # Each hook is invoked like this: hook(hgweb, request, operation),
82 # where operation is either read, pull or push. Hooks should either
82 # where operation is either read, pull or push. Hooks should either
83 # raise an ErrorResponse exception, or just return.
83 # raise an ErrorResponse exception, or just return.
84 #
84 #
85 # It is possible to do both authentication and authorization through
85 # It is possible to do both authentication and authorization through
86 # this.
86 # this.
87 permhooks = [checkauthz]
87 permhooks = [checkauthz]
88
88
89
89
90 class ErrorResponse(Exception):
90 class ErrorResponse(Exception):
91 def __init__(self, code, message=None, headers=[]):
91 def __init__(self, code, message=None, headers=[]):
92 if message is None:
92 if message is None:
93 message = _statusmessage(code)
93 message = _statusmessage(code)
94 Exception.__init__(self, message)
94 Exception.__init__(self, message)
95 self.code = code
95 self.code = code
96 self.headers = headers
96 self.headers = headers
97
97
98 class continuereader(object):
98 class continuereader(object):
99 def __init__(self, f, write):
99 def __init__(self, f, write):
100 self.f = f
100 self.f = f
101 self._write = write
101 self._write = write
102 self.continued = False
102 self.continued = False
103
103
104 def read(self, amt=-1):
104 def read(self, amt=-1):
105 if not self.continued:
105 if not self.continued:
106 self.continued = True
106 self.continued = True
107 self._write('HTTP/1.1 100 Continue\r\n\r\n')
107 self._write('HTTP/1.1 100 Continue\r\n\r\n')
108 return self.f.read(amt)
108 return self.f.read(amt)
109
109
110 def __getattr__(self, attr):
110 def __getattr__(self, attr):
111 if attr in ('close', 'readline', 'readlines', '__iter__'):
111 if attr in ('close', 'readline', 'readlines', '__iter__'):
112 return getattr(self.f, attr)
112 return getattr(self.f, attr)
113 raise AttributeError
113 raise AttributeError
114
114
115 def _statusmessage(code):
115 def _statusmessage(code):
116 responses = httpserver.basehttprequesthandler.responses
116 responses = httpserver.basehttprequesthandler.responses
117 return responses.get(code, ('Error', 'Unknown error'))[0]
117 return responses.get(code, ('Error', 'Unknown error'))[0]
118
118
119 def statusmessage(code, message=None):
119 def statusmessage(code, message=None):
120 return '%d %s' % (code, message or _statusmessage(code))
120 return '%d %s' % (code, message or _statusmessage(code))
121
121
122 def get_stat(spath, fn):
122 def get_stat(spath, fn):
123 """stat fn if it exists, spath otherwise"""
123 """stat fn if it exists, spath otherwise"""
124 cl_path = os.path.join(spath, fn)
124 cl_path = os.path.join(spath, fn)
125 if os.path.exists(cl_path):
125 if os.path.exists(cl_path):
126 return os.stat(cl_path)
126 return os.stat(cl_path)
127 else:
127 else:
128 return os.stat(spath)
128 return os.stat(spath)
129
129
130 def get_mtime(spath):
130 def get_mtime(spath):
131 return get_stat(spath, "00changelog.i").st_mtime
131 return get_stat(spath, "00changelog.i").st_mtime
132
132
133 def staticfile(directory, fname, req):
133 def staticfile(directory, fname, req):
134 """return a file inside directory with guessed Content-Type header
134 """return a file inside directory with guessed Content-Type header
135
135
136 fname always uses '/' as directory separator and isn't allowed to
136 fname always uses '/' as directory separator and isn't allowed to
137 contain unusual path components.
137 contain unusual path components.
138 Content-Type is guessed using the mimetypes module.
138 Content-Type is guessed using the mimetypes module.
139 Return an empty string if fname is illegal or file not found.
139 Return an empty string if fname is illegal or file not found.
140
140
141 """
141 """
142 parts = fname.split('/')
142 parts = fname.split('/')
143 for part in parts:
143 for part in parts:
144 if (part in ('', os.curdir, os.pardir) or
144 if (part in ('', os.curdir, os.pardir) or
145 pycompat.ossep in part or
145 pycompat.ossep in part or
146 os.altsep is not None and os.altsep in part):
146 pycompat.osaltsep is not None and pycompat.osaltsep in part):
147 return
147 return
148 fpath = os.path.join(*parts)
148 fpath = os.path.join(*parts)
149 if isinstance(directory, str):
149 if isinstance(directory, str):
150 directory = [directory]
150 directory = [directory]
151 for d in directory:
151 for d in directory:
152 path = os.path.join(d, fpath)
152 path = os.path.join(d, fpath)
153 if os.path.exists(path):
153 if os.path.exists(path):
154 break
154 break
155 try:
155 try:
156 os.stat(path)
156 os.stat(path)
157 ct = mimetypes.guess_type(path)[0] or "text/plain"
157 ct = mimetypes.guess_type(path)[0] or "text/plain"
158 fp = open(path, 'rb')
158 fp = open(path, 'rb')
159 data = fp.read()
159 data = fp.read()
160 fp.close()
160 fp.close()
161 req.respond(HTTP_OK, ct, body=data)
161 req.respond(HTTP_OK, ct, body=data)
162 except TypeError:
162 except TypeError:
163 raise ErrorResponse(HTTP_SERVER_ERROR, 'illegal filename')
163 raise ErrorResponse(HTTP_SERVER_ERROR, 'illegal filename')
164 except OSError as err:
164 except OSError as err:
165 if err.errno == errno.ENOENT:
165 if err.errno == errno.ENOENT:
166 raise ErrorResponse(HTTP_NOT_FOUND)
166 raise ErrorResponse(HTTP_NOT_FOUND)
167 else:
167 else:
168 raise ErrorResponse(HTTP_SERVER_ERROR, err.strerror)
168 raise ErrorResponse(HTTP_SERVER_ERROR, err.strerror)
169
169
170 def paritygen(stripecount, offset=0):
170 def paritygen(stripecount, offset=0):
171 """count parity of horizontal stripes for easier reading"""
171 """count parity of horizontal stripes for easier reading"""
172 if stripecount and offset:
172 if stripecount and offset:
173 # account for offset, e.g. due to building the list in reverse
173 # account for offset, e.g. due to building the list in reverse
174 count = (stripecount + offset) % stripecount
174 count = (stripecount + offset) % stripecount
175 parity = (stripecount + offset) / stripecount & 1
175 parity = (stripecount + offset) / stripecount & 1
176 else:
176 else:
177 count = 0
177 count = 0
178 parity = 0
178 parity = 0
179 while True:
179 while True:
180 yield parity
180 yield parity
181 count += 1
181 count += 1
182 if stripecount and count >= stripecount:
182 if stripecount and count >= stripecount:
183 parity = 1 - parity
183 parity = 1 - parity
184 count = 0
184 count = 0
185
185
186 def get_contact(config):
186 def get_contact(config):
187 """Return repo contact information or empty string.
187 """Return repo contact information or empty string.
188
188
189 web.contact is the primary source, but if that is not set, try
189 web.contact is the primary source, but if that is not set, try
190 ui.username or $EMAIL as a fallback to display something useful.
190 ui.username or $EMAIL as a fallback to display something useful.
191 """
191 """
192 return (config("web", "contact") or
192 return (config("web", "contact") or
193 config("ui", "username") or
193 config("ui", "username") or
194 os.environ.get("EMAIL") or "")
194 os.environ.get("EMAIL") or "")
195
195
196 def caching(web, req):
196 def caching(web, req):
197 tag = 'W/"%s"' % web.mtime
197 tag = 'W/"%s"' % web.mtime
198 if req.env.get('HTTP_IF_NONE_MATCH') == tag:
198 if req.env.get('HTTP_IF_NONE_MATCH') == tag:
199 raise ErrorResponse(HTTP_NOT_MODIFIED)
199 raise ErrorResponse(HTTP_NOT_MODIFIED)
200 req.headers.append(('ETag', tag))
200 req.headers.append(('ETag', tag))
@@ -1,1268 +1,1268
1 # templater.py - template expansion for output
1 # templater.py - template expansion for output
2 #
2 #
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import os
10 import os
11 import re
11 import re
12 import types
12 import types
13
13
14 from .i18n import _
14 from .i18n import _
15 from . import (
15 from . import (
16 config,
16 config,
17 error,
17 error,
18 minirst,
18 minirst,
19 parser,
19 parser,
20 pycompat,
20 pycompat,
21 registrar,
21 registrar,
22 revset as revsetmod,
22 revset as revsetmod,
23 templatefilters,
23 templatefilters,
24 templatekw,
24 templatekw,
25 util,
25 util,
26 )
26 )
27
27
28 # template parsing
28 # template parsing
29
29
30 elements = {
30 elements = {
31 # token-type: binding-strength, primary, prefix, infix, suffix
31 # token-type: binding-strength, primary, prefix, infix, suffix
32 "(": (20, None, ("group", 1, ")"), ("func", 1, ")"), None),
32 "(": (20, None, ("group", 1, ")"), ("func", 1, ")"), None),
33 ",": (2, None, None, ("list", 2), None),
33 ",": (2, None, None, ("list", 2), None),
34 "|": (5, None, None, ("|", 5), None),
34 "|": (5, None, None, ("|", 5), None),
35 "%": (6, None, None, ("%", 6), None),
35 "%": (6, None, None, ("%", 6), None),
36 ")": (0, None, None, None, None),
36 ")": (0, None, None, None, None),
37 "+": (3, None, None, ("+", 3), None),
37 "+": (3, None, None, ("+", 3), None),
38 "-": (3, None, ("negate", 10), ("-", 3), None),
38 "-": (3, None, ("negate", 10), ("-", 3), None),
39 "*": (4, None, None, ("*", 4), None),
39 "*": (4, None, None, ("*", 4), None),
40 "/": (4, None, None, ("/", 4), None),
40 "/": (4, None, None, ("/", 4), None),
41 "integer": (0, "integer", None, None, None),
41 "integer": (0, "integer", None, None, None),
42 "symbol": (0, "symbol", None, None, None),
42 "symbol": (0, "symbol", None, None, None),
43 "string": (0, "string", None, None, None),
43 "string": (0, "string", None, None, None),
44 "template": (0, "template", None, None, None),
44 "template": (0, "template", None, None, None),
45 "end": (0, None, None, None, None),
45 "end": (0, None, None, None, None),
46 }
46 }
47
47
48 def tokenize(program, start, end, term=None):
48 def tokenize(program, start, end, term=None):
49 """Parse a template expression into a stream of tokens, which must end
49 """Parse a template expression into a stream of tokens, which must end
50 with term if specified"""
50 with term if specified"""
51 pos = start
51 pos = start
52 while pos < end:
52 while pos < end:
53 c = program[pos]
53 c = program[pos]
54 if c.isspace(): # skip inter-token whitespace
54 if c.isspace(): # skip inter-token whitespace
55 pass
55 pass
56 elif c in "(,)%|+-*/": # handle simple operators
56 elif c in "(,)%|+-*/": # handle simple operators
57 yield (c, None, pos)
57 yield (c, None, pos)
58 elif c in '"\'': # handle quoted templates
58 elif c in '"\'': # handle quoted templates
59 s = pos + 1
59 s = pos + 1
60 data, pos = _parsetemplate(program, s, end, c)
60 data, pos = _parsetemplate(program, s, end, c)
61 yield ('template', data, s)
61 yield ('template', data, s)
62 pos -= 1
62 pos -= 1
63 elif c == 'r' and program[pos:pos + 2] in ("r'", 'r"'):
63 elif c == 'r' and program[pos:pos + 2] in ("r'", 'r"'):
64 # handle quoted strings
64 # handle quoted strings
65 c = program[pos + 1]
65 c = program[pos + 1]
66 s = pos = pos + 2
66 s = pos = pos + 2
67 while pos < end: # find closing quote
67 while pos < end: # find closing quote
68 d = program[pos]
68 d = program[pos]
69 if d == '\\': # skip over escaped characters
69 if d == '\\': # skip over escaped characters
70 pos += 2
70 pos += 2
71 continue
71 continue
72 if d == c:
72 if d == c:
73 yield ('string', program[s:pos], s)
73 yield ('string', program[s:pos], s)
74 break
74 break
75 pos += 1
75 pos += 1
76 else:
76 else:
77 raise error.ParseError(_("unterminated string"), s)
77 raise error.ParseError(_("unterminated string"), s)
78 elif c.isdigit():
78 elif c.isdigit():
79 s = pos
79 s = pos
80 while pos < end:
80 while pos < end:
81 d = program[pos]
81 d = program[pos]
82 if not d.isdigit():
82 if not d.isdigit():
83 break
83 break
84 pos += 1
84 pos += 1
85 yield ('integer', program[s:pos], s)
85 yield ('integer', program[s:pos], s)
86 pos -= 1
86 pos -= 1
87 elif (c == '\\' and program[pos:pos + 2] in (r"\'", r'\"')
87 elif (c == '\\' and program[pos:pos + 2] in (r"\'", r'\"')
88 or c == 'r' and program[pos:pos + 3] in (r"r\'", r'r\"')):
88 or c == 'r' and program[pos:pos + 3] in (r"r\'", r'r\"')):
89 # handle escaped quoted strings for compatibility with 2.9.2-3.4,
89 # handle escaped quoted strings for compatibility with 2.9.2-3.4,
90 # where some of nested templates were preprocessed as strings and
90 # where some of nested templates were preprocessed as strings and
91 # then compiled. therefore, \"...\" was allowed. (issue4733)
91 # then compiled. therefore, \"...\" was allowed. (issue4733)
92 #
92 #
93 # processing flow of _evalifliteral() at 5ab28a2e9962:
93 # processing flow of _evalifliteral() at 5ab28a2e9962:
94 # outer template string -> stringify() -> compiletemplate()
94 # outer template string -> stringify() -> compiletemplate()
95 # ------------------------ ------------ ------------------
95 # ------------------------ ------------ ------------------
96 # {f("\\\\ {g(\"\\\"\")}"} \\ {g("\"")} [r'\\', {g("\"")}]
96 # {f("\\\\ {g(\"\\\"\")}"} \\ {g("\"")} [r'\\', {g("\"")}]
97 # ~~~~~~~~
97 # ~~~~~~~~
98 # escaped quoted string
98 # escaped quoted string
99 if c == 'r':
99 if c == 'r':
100 pos += 1
100 pos += 1
101 token = 'string'
101 token = 'string'
102 else:
102 else:
103 token = 'template'
103 token = 'template'
104 quote = program[pos:pos + 2]
104 quote = program[pos:pos + 2]
105 s = pos = pos + 2
105 s = pos = pos + 2
106 while pos < end: # find closing escaped quote
106 while pos < end: # find closing escaped quote
107 if program.startswith('\\\\\\', pos, end):
107 if program.startswith('\\\\\\', pos, end):
108 pos += 4 # skip over double escaped characters
108 pos += 4 # skip over double escaped characters
109 continue
109 continue
110 if program.startswith(quote, pos, end):
110 if program.startswith(quote, pos, end):
111 # interpret as if it were a part of an outer string
111 # interpret as if it were a part of an outer string
112 data = parser.unescapestr(program[s:pos])
112 data = parser.unescapestr(program[s:pos])
113 if token == 'template':
113 if token == 'template':
114 data = _parsetemplate(data, 0, len(data))[0]
114 data = _parsetemplate(data, 0, len(data))[0]
115 yield (token, data, s)
115 yield (token, data, s)
116 pos += 1
116 pos += 1
117 break
117 break
118 pos += 1
118 pos += 1
119 else:
119 else:
120 raise error.ParseError(_("unterminated string"), s)
120 raise error.ParseError(_("unterminated string"), s)
121 elif c.isalnum() or c in '_':
121 elif c.isalnum() or c in '_':
122 s = pos
122 s = pos
123 pos += 1
123 pos += 1
124 while pos < end: # find end of symbol
124 while pos < end: # find end of symbol
125 d = program[pos]
125 d = program[pos]
126 if not (d.isalnum() or d == "_"):
126 if not (d.isalnum() or d == "_"):
127 break
127 break
128 pos += 1
128 pos += 1
129 sym = program[s:pos]
129 sym = program[s:pos]
130 yield ('symbol', sym, s)
130 yield ('symbol', sym, s)
131 pos -= 1
131 pos -= 1
132 elif c == term:
132 elif c == term:
133 yield ('end', None, pos + 1)
133 yield ('end', None, pos + 1)
134 return
134 return
135 else:
135 else:
136 raise error.ParseError(_("syntax error"), pos)
136 raise error.ParseError(_("syntax error"), pos)
137 pos += 1
137 pos += 1
138 if term:
138 if term:
139 raise error.ParseError(_("unterminated template expansion"), start)
139 raise error.ParseError(_("unterminated template expansion"), start)
140 yield ('end', None, pos)
140 yield ('end', None, pos)
141
141
142 def _parsetemplate(tmpl, start, stop, quote=''):
142 def _parsetemplate(tmpl, start, stop, quote=''):
143 r"""
143 r"""
144 >>> _parsetemplate('foo{bar}"baz', 0, 12)
144 >>> _parsetemplate('foo{bar}"baz', 0, 12)
145 ([('string', 'foo'), ('symbol', 'bar'), ('string', '"baz')], 12)
145 ([('string', 'foo'), ('symbol', 'bar'), ('string', '"baz')], 12)
146 >>> _parsetemplate('foo{bar}"baz', 0, 12, quote='"')
146 >>> _parsetemplate('foo{bar}"baz', 0, 12, quote='"')
147 ([('string', 'foo'), ('symbol', 'bar')], 9)
147 ([('string', 'foo'), ('symbol', 'bar')], 9)
148 >>> _parsetemplate('foo"{bar}', 0, 9, quote='"')
148 >>> _parsetemplate('foo"{bar}', 0, 9, quote='"')
149 ([('string', 'foo')], 4)
149 ([('string', 'foo')], 4)
150 >>> _parsetemplate(r'foo\"bar"baz', 0, 12, quote='"')
150 >>> _parsetemplate(r'foo\"bar"baz', 0, 12, quote='"')
151 ([('string', 'foo"'), ('string', 'bar')], 9)
151 ([('string', 'foo"'), ('string', 'bar')], 9)
152 >>> _parsetemplate(r'foo\\"bar', 0, 10, quote='"')
152 >>> _parsetemplate(r'foo\\"bar', 0, 10, quote='"')
153 ([('string', 'foo\\')], 6)
153 ([('string', 'foo\\')], 6)
154 """
154 """
155 parsed = []
155 parsed = []
156 sepchars = '{' + quote
156 sepchars = '{' + quote
157 pos = start
157 pos = start
158 p = parser.parser(elements)
158 p = parser.parser(elements)
159 while pos < stop:
159 while pos < stop:
160 n = min((tmpl.find(c, pos, stop) for c in sepchars),
160 n = min((tmpl.find(c, pos, stop) for c in sepchars),
161 key=lambda n: (n < 0, n))
161 key=lambda n: (n < 0, n))
162 if n < 0:
162 if n < 0:
163 parsed.append(('string', parser.unescapestr(tmpl[pos:stop])))
163 parsed.append(('string', parser.unescapestr(tmpl[pos:stop])))
164 pos = stop
164 pos = stop
165 break
165 break
166 c = tmpl[n]
166 c = tmpl[n]
167 bs = (n - pos) - len(tmpl[pos:n].rstrip('\\'))
167 bs = (n - pos) - len(tmpl[pos:n].rstrip('\\'))
168 if bs % 2 == 1:
168 if bs % 2 == 1:
169 # escaped (e.g. '\{', '\\\{', but not '\\{')
169 # escaped (e.g. '\{', '\\\{', but not '\\{')
170 parsed.append(('string', parser.unescapestr(tmpl[pos:n - 1]) + c))
170 parsed.append(('string', parser.unescapestr(tmpl[pos:n - 1]) + c))
171 pos = n + 1
171 pos = n + 1
172 continue
172 continue
173 if n > pos:
173 if n > pos:
174 parsed.append(('string', parser.unescapestr(tmpl[pos:n])))
174 parsed.append(('string', parser.unescapestr(tmpl[pos:n])))
175 if c == quote:
175 if c == quote:
176 return parsed, n + 1
176 return parsed, n + 1
177
177
178 parseres, pos = p.parse(tokenize(tmpl, n + 1, stop, '}'))
178 parseres, pos = p.parse(tokenize(tmpl, n + 1, stop, '}'))
179 parsed.append(parseres)
179 parsed.append(parseres)
180
180
181 if quote:
181 if quote:
182 raise error.ParseError(_("unterminated string"), start)
182 raise error.ParseError(_("unterminated string"), start)
183 return parsed, pos
183 return parsed, pos
184
184
185 def _unnesttemplatelist(tree):
185 def _unnesttemplatelist(tree):
186 """Expand list of templates to node tuple
186 """Expand list of templates to node tuple
187
187
188 >>> def f(tree):
188 >>> def f(tree):
189 ... print prettyformat(_unnesttemplatelist(tree))
189 ... print prettyformat(_unnesttemplatelist(tree))
190 >>> f(('template', []))
190 >>> f(('template', []))
191 ('string', '')
191 ('string', '')
192 >>> f(('template', [('string', 'foo')]))
192 >>> f(('template', [('string', 'foo')]))
193 ('string', 'foo')
193 ('string', 'foo')
194 >>> f(('template', [('string', 'foo'), ('symbol', 'rev')]))
194 >>> f(('template', [('string', 'foo'), ('symbol', 'rev')]))
195 (template
195 (template
196 ('string', 'foo')
196 ('string', 'foo')
197 ('symbol', 'rev'))
197 ('symbol', 'rev'))
198 >>> f(('template', [('symbol', 'rev')])) # template(rev) -> str
198 >>> f(('template', [('symbol', 'rev')])) # template(rev) -> str
199 (template
199 (template
200 ('symbol', 'rev'))
200 ('symbol', 'rev'))
201 >>> f(('template', [('template', [('string', 'foo')])]))
201 >>> f(('template', [('template', [('string', 'foo')])]))
202 ('string', 'foo')
202 ('string', 'foo')
203 """
203 """
204 if not isinstance(tree, tuple):
204 if not isinstance(tree, tuple):
205 return tree
205 return tree
206 op = tree[0]
206 op = tree[0]
207 if op != 'template':
207 if op != 'template':
208 return (op,) + tuple(_unnesttemplatelist(x) for x in tree[1:])
208 return (op,) + tuple(_unnesttemplatelist(x) for x in tree[1:])
209
209
210 assert len(tree) == 2
210 assert len(tree) == 2
211 xs = tuple(_unnesttemplatelist(x) for x in tree[1])
211 xs = tuple(_unnesttemplatelist(x) for x in tree[1])
212 if not xs:
212 if not xs:
213 return ('string', '') # empty template ""
213 return ('string', '') # empty template ""
214 elif len(xs) == 1 and xs[0][0] == 'string':
214 elif len(xs) == 1 and xs[0][0] == 'string':
215 return xs[0] # fast path for string with no template fragment "x"
215 return xs[0] # fast path for string with no template fragment "x"
216 else:
216 else:
217 return (op,) + xs
217 return (op,) + xs
218
218
219 def parse(tmpl):
219 def parse(tmpl):
220 """Parse template string into tree"""
220 """Parse template string into tree"""
221 parsed, pos = _parsetemplate(tmpl, 0, len(tmpl))
221 parsed, pos = _parsetemplate(tmpl, 0, len(tmpl))
222 assert pos == len(tmpl), 'unquoted template should be consumed'
222 assert pos == len(tmpl), 'unquoted template should be consumed'
223 return _unnesttemplatelist(('template', parsed))
223 return _unnesttemplatelist(('template', parsed))
224
224
225 def _parseexpr(expr):
225 def _parseexpr(expr):
226 """Parse a template expression into tree
226 """Parse a template expression into tree
227
227
228 >>> _parseexpr('"foo"')
228 >>> _parseexpr('"foo"')
229 ('string', 'foo')
229 ('string', 'foo')
230 >>> _parseexpr('foo(bar)')
230 >>> _parseexpr('foo(bar)')
231 ('func', ('symbol', 'foo'), ('symbol', 'bar'))
231 ('func', ('symbol', 'foo'), ('symbol', 'bar'))
232 >>> _parseexpr('foo(')
232 >>> _parseexpr('foo(')
233 Traceback (most recent call last):
233 Traceback (most recent call last):
234 ...
234 ...
235 ParseError: ('not a prefix: end', 4)
235 ParseError: ('not a prefix: end', 4)
236 >>> _parseexpr('"foo" "bar"')
236 >>> _parseexpr('"foo" "bar"')
237 Traceback (most recent call last):
237 Traceback (most recent call last):
238 ...
238 ...
239 ParseError: ('invalid token', 7)
239 ParseError: ('invalid token', 7)
240 """
240 """
241 p = parser.parser(elements)
241 p = parser.parser(elements)
242 tree, pos = p.parse(tokenize(expr, 0, len(expr)))
242 tree, pos = p.parse(tokenize(expr, 0, len(expr)))
243 if pos != len(expr):
243 if pos != len(expr):
244 raise error.ParseError(_('invalid token'), pos)
244 raise error.ParseError(_('invalid token'), pos)
245 return _unnesttemplatelist(tree)
245 return _unnesttemplatelist(tree)
246
246
247 def prettyformat(tree):
247 def prettyformat(tree):
248 return parser.prettyformat(tree, ('integer', 'string', 'symbol'))
248 return parser.prettyformat(tree, ('integer', 'string', 'symbol'))
249
249
250 def compileexp(exp, context, curmethods):
250 def compileexp(exp, context, curmethods):
251 """Compile parsed template tree to (func, data) pair"""
251 """Compile parsed template tree to (func, data) pair"""
252 t = exp[0]
252 t = exp[0]
253 if t in curmethods:
253 if t in curmethods:
254 return curmethods[t](exp, context)
254 return curmethods[t](exp, context)
255 raise error.ParseError(_("unknown method '%s'") % t)
255 raise error.ParseError(_("unknown method '%s'") % t)
256
256
257 # template evaluation
257 # template evaluation
258
258
259 def getsymbol(exp):
259 def getsymbol(exp):
260 if exp[0] == 'symbol':
260 if exp[0] == 'symbol':
261 return exp[1]
261 return exp[1]
262 raise error.ParseError(_("expected a symbol, got '%s'") % exp[0])
262 raise error.ParseError(_("expected a symbol, got '%s'") % exp[0])
263
263
264 def getlist(x):
264 def getlist(x):
265 if not x:
265 if not x:
266 return []
266 return []
267 if x[0] == 'list':
267 if x[0] == 'list':
268 return getlist(x[1]) + [x[2]]
268 return getlist(x[1]) + [x[2]]
269 return [x]
269 return [x]
270
270
271 def gettemplate(exp, context):
271 def gettemplate(exp, context):
272 """Compile given template tree or load named template from map file;
272 """Compile given template tree or load named template from map file;
273 returns (func, data) pair"""
273 returns (func, data) pair"""
274 if exp[0] in ('template', 'string'):
274 if exp[0] in ('template', 'string'):
275 return compileexp(exp, context, methods)
275 return compileexp(exp, context, methods)
276 if exp[0] == 'symbol':
276 if exp[0] == 'symbol':
277 # unlike runsymbol(), here 'symbol' is always taken as template name
277 # unlike runsymbol(), here 'symbol' is always taken as template name
278 # even if it exists in mapping. this allows us to override mapping
278 # even if it exists in mapping. this allows us to override mapping
279 # by web templates, e.g. 'changelogtag' is redefined in map file.
279 # by web templates, e.g. 'changelogtag' is redefined in map file.
280 return context._load(exp[1])
280 return context._load(exp[1])
281 raise error.ParseError(_("expected template specifier"))
281 raise error.ParseError(_("expected template specifier"))
282
282
283 def evalfuncarg(context, mapping, arg):
283 def evalfuncarg(context, mapping, arg):
284 func, data = arg
284 func, data = arg
285 # func() may return string, generator of strings or arbitrary object such
285 # func() may return string, generator of strings or arbitrary object such
286 # as date tuple, but filter does not want generator.
286 # as date tuple, but filter does not want generator.
287 thing = func(context, mapping, data)
287 thing = func(context, mapping, data)
288 if isinstance(thing, types.GeneratorType):
288 if isinstance(thing, types.GeneratorType):
289 thing = stringify(thing)
289 thing = stringify(thing)
290 return thing
290 return thing
291
291
292 def evalboolean(context, mapping, arg):
292 def evalboolean(context, mapping, arg):
293 """Evaluate given argument as boolean, but also takes boolean literals"""
293 """Evaluate given argument as boolean, but also takes boolean literals"""
294 func, data = arg
294 func, data = arg
295 if func is runsymbol:
295 if func is runsymbol:
296 thing = func(context, mapping, data, default=None)
296 thing = func(context, mapping, data, default=None)
297 if thing is None:
297 if thing is None:
298 # not a template keyword, takes as a boolean literal
298 # not a template keyword, takes as a boolean literal
299 thing = util.parsebool(data)
299 thing = util.parsebool(data)
300 else:
300 else:
301 thing = func(context, mapping, data)
301 thing = func(context, mapping, data)
302 if isinstance(thing, bool):
302 if isinstance(thing, bool):
303 return thing
303 return thing
304 # other objects are evaluated as strings, which means 0 is True, but
304 # other objects are evaluated as strings, which means 0 is True, but
305 # empty dict/list should be False as they are expected to be ''
305 # empty dict/list should be False as they are expected to be ''
306 return bool(stringify(thing))
306 return bool(stringify(thing))
307
307
308 def evalinteger(context, mapping, arg, err):
308 def evalinteger(context, mapping, arg, err):
309 v = evalfuncarg(context, mapping, arg)
309 v = evalfuncarg(context, mapping, arg)
310 try:
310 try:
311 return int(v)
311 return int(v)
312 except (TypeError, ValueError):
312 except (TypeError, ValueError):
313 raise error.ParseError(err)
313 raise error.ParseError(err)
314
314
315 def evalstring(context, mapping, arg):
315 def evalstring(context, mapping, arg):
316 func, data = arg
316 func, data = arg
317 return stringify(func(context, mapping, data))
317 return stringify(func(context, mapping, data))
318
318
319 def evalstringliteral(context, mapping, arg):
319 def evalstringliteral(context, mapping, arg):
320 """Evaluate given argument as string template, but returns symbol name
320 """Evaluate given argument as string template, but returns symbol name
321 if it is unknown"""
321 if it is unknown"""
322 func, data = arg
322 func, data = arg
323 if func is runsymbol:
323 if func is runsymbol:
324 thing = func(context, mapping, data, default=data)
324 thing = func(context, mapping, data, default=data)
325 else:
325 else:
326 thing = func(context, mapping, data)
326 thing = func(context, mapping, data)
327 return stringify(thing)
327 return stringify(thing)
328
328
329 def runinteger(context, mapping, data):
329 def runinteger(context, mapping, data):
330 return int(data)
330 return int(data)
331
331
332 def runstring(context, mapping, data):
332 def runstring(context, mapping, data):
333 return data
333 return data
334
334
335 def _recursivesymbolblocker(key):
335 def _recursivesymbolblocker(key):
336 def showrecursion(**args):
336 def showrecursion(**args):
337 raise error.Abort(_("recursive reference '%s' in template") % key)
337 raise error.Abort(_("recursive reference '%s' in template") % key)
338 return showrecursion
338 return showrecursion
339
339
340 def _runrecursivesymbol(context, mapping, key):
340 def _runrecursivesymbol(context, mapping, key):
341 raise error.Abort(_("recursive reference '%s' in template") % key)
341 raise error.Abort(_("recursive reference '%s' in template") % key)
342
342
343 def runsymbol(context, mapping, key, default=''):
343 def runsymbol(context, mapping, key, default=''):
344 v = mapping.get(key)
344 v = mapping.get(key)
345 if v is None:
345 if v is None:
346 v = context._defaults.get(key)
346 v = context._defaults.get(key)
347 if v is None:
347 if v is None:
348 # put poison to cut recursion. we can't move this to parsing phase
348 # put poison to cut recursion. we can't move this to parsing phase
349 # because "x = {x}" is allowed if "x" is a keyword. (issue4758)
349 # because "x = {x}" is allowed if "x" is a keyword. (issue4758)
350 safemapping = mapping.copy()
350 safemapping = mapping.copy()
351 safemapping[key] = _recursivesymbolblocker(key)
351 safemapping[key] = _recursivesymbolblocker(key)
352 try:
352 try:
353 v = context.process(key, safemapping)
353 v = context.process(key, safemapping)
354 except TemplateNotFound:
354 except TemplateNotFound:
355 v = default
355 v = default
356 if callable(v):
356 if callable(v):
357 return v(**mapping)
357 return v(**mapping)
358 return v
358 return v
359
359
360 def buildtemplate(exp, context):
360 def buildtemplate(exp, context):
361 ctmpl = [compileexp(e, context, methods) for e in exp[1:]]
361 ctmpl = [compileexp(e, context, methods) for e in exp[1:]]
362 return (runtemplate, ctmpl)
362 return (runtemplate, ctmpl)
363
363
364 def runtemplate(context, mapping, template):
364 def runtemplate(context, mapping, template):
365 for func, data in template:
365 for func, data in template:
366 yield func(context, mapping, data)
366 yield func(context, mapping, data)
367
367
368 def buildfilter(exp, context):
368 def buildfilter(exp, context):
369 arg = compileexp(exp[1], context, methods)
369 arg = compileexp(exp[1], context, methods)
370 n = getsymbol(exp[2])
370 n = getsymbol(exp[2])
371 if n in context._filters:
371 if n in context._filters:
372 filt = context._filters[n]
372 filt = context._filters[n]
373 return (runfilter, (arg, filt))
373 return (runfilter, (arg, filt))
374 if n in funcs:
374 if n in funcs:
375 f = funcs[n]
375 f = funcs[n]
376 return (f, [arg])
376 return (f, [arg])
377 raise error.ParseError(_("unknown function '%s'") % n)
377 raise error.ParseError(_("unknown function '%s'") % n)
378
378
379 def runfilter(context, mapping, data):
379 def runfilter(context, mapping, data):
380 arg, filt = data
380 arg, filt = data
381 thing = evalfuncarg(context, mapping, arg)
381 thing = evalfuncarg(context, mapping, arg)
382 try:
382 try:
383 return filt(thing)
383 return filt(thing)
384 except (ValueError, AttributeError, TypeError):
384 except (ValueError, AttributeError, TypeError):
385 if isinstance(arg[1], tuple):
385 if isinstance(arg[1], tuple):
386 dt = arg[1][1]
386 dt = arg[1][1]
387 else:
387 else:
388 dt = arg[1]
388 dt = arg[1]
389 raise error.Abort(_("template filter '%s' is not compatible with "
389 raise error.Abort(_("template filter '%s' is not compatible with "
390 "keyword '%s'") % (filt.func_name, dt))
390 "keyword '%s'") % (filt.func_name, dt))
391
391
392 def buildmap(exp, context):
392 def buildmap(exp, context):
393 func, data = compileexp(exp[1], context, methods)
393 func, data = compileexp(exp[1], context, methods)
394 tfunc, tdata = gettemplate(exp[2], context)
394 tfunc, tdata = gettemplate(exp[2], context)
395 return (runmap, (func, data, tfunc, tdata))
395 return (runmap, (func, data, tfunc, tdata))
396
396
397 def runmap(context, mapping, data):
397 def runmap(context, mapping, data):
398 func, data, tfunc, tdata = data
398 func, data, tfunc, tdata = data
399 d = func(context, mapping, data)
399 d = func(context, mapping, data)
400 if util.safehasattr(d, 'itermaps'):
400 if util.safehasattr(d, 'itermaps'):
401 diter = d.itermaps()
401 diter = d.itermaps()
402 else:
402 else:
403 try:
403 try:
404 diter = iter(d)
404 diter = iter(d)
405 except TypeError:
405 except TypeError:
406 if func is runsymbol:
406 if func is runsymbol:
407 raise error.ParseError(_("keyword '%s' is not iterable") % data)
407 raise error.ParseError(_("keyword '%s' is not iterable") % data)
408 else:
408 else:
409 raise error.ParseError(_("%r is not iterable") % d)
409 raise error.ParseError(_("%r is not iterable") % d)
410
410
411 for i in diter:
411 for i in diter:
412 lm = mapping.copy()
412 lm = mapping.copy()
413 if isinstance(i, dict):
413 if isinstance(i, dict):
414 lm.update(i)
414 lm.update(i)
415 lm['originalnode'] = mapping.get('node')
415 lm['originalnode'] = mapping.get('node')
416 yield tfunc(context, lm, tdata)
416 yield tfunc(context, lm, tdata)
417 else:
417 else:
418 # v is not an iterable of dicts, this happen when 'key'
418 # v is not an iterable of dicts, this happen when 'key'
419 # has been fully expanded already and format is useless.
419 # has been fully expanded already and format is useless.
420 # If so, return the expanded value.
420 # If so, return the expanded value.
421 yield i
421 yield i
422
422
423 def buildnegate(exp, context):
423 def buildnegate(exp, context):
424 arg = compileexp(exp[1], context, exprmethods)
424 arg = compileexp(exp[1], context, exprmethods)
425 return (runnegate, arg)
425 return (runnegate, arg)
426
426
427 def runnegate(context, mapping, data):
427 def runnegate(context, mapping, data):
428 data = evalinteger(context, mapping, data,
428 data = evalinteger(context, mapping, data,
429 _('negation needs an integer argument'))
429 _('negation needs an integer argument'))
430 return -data
430 return -data
431
431
432 def buildarithmetic(exp, context, func):
432 def buildarithmetic(exp, context, func):
433 left = compileexp(exp[1], context, exprmethods)
433 left = compileexp(exp[1], context, exprmethods)
434 right = compileexp(exp[2], context, exprmethods)
434 right = compileexp(exp[2], context, exprmethods)
435 return (runarithmetic, (func, left, right))
435 return (runarithmetic, (func, left, right))
436
436
437 def runarithmetic(context, mapping, data):
437 def runarithmetic(context, mapping, data):
438 func, left, right = data
438 func, left, right = data
439 left = evalinteger(context, mapping, left,
439 left = evalinteger(context, mapping, left,
440 _('arithmetic only defined on integers'))
440 _('arithmetic only defined on integers'))
441 right = evalinteger(context, mapping, right,
441 right = evalinteger(context, mapping, right,
442 _('arithmetic only defined on integers'))
442 _('arithmetic only defined on integers'))
443 try:
443 try:
444 return func(left, right)
444 return func(left, right)
445 except ZeroDivisionError:
445 except ZeroDivisionError:
446 raise error.Abort(_('division by zero is not defined'))
446 raise error.Abort(_('division by zero is not defined'))
447
447
448 def buildfunc(exp, context):
448 def buildfunc(exp, context):
449 n = getsymbol(exp[1])
449 n = getsymbol(exp[1])
450 args = [compileexp(x, context, exprmethods) for x in getlist(exp[2])]
450 args = [compileexp(x, context, exprmethods) for x in getlist(exp[2])]
451 if n in funcs:
451 if n in funcs:
452 f = funcs[n]
452 f = funcs[n]
453 return (f, args)
453 return (f, args)
454 if n in context._filters:
454 if n in context._filters:
455 if len(args) != 1:
455 if len(args) != 1:
456 raise error.ParseError(_("filter %s expects one argument") % n)
456 raise error.ParseError(_("filter %s expects one argument") % n)
457 f = context._filters[n]
457 f = context._filters[n]
458 return (runfilter, (args[0], f))
458 return (runfilter, (args[0], f))
459 raise error.ParseError(_("unknown function '%s'") % n)
459 raise error.ParseError(_("unknown function '%s'") % n)
460
460
461 # dict of template built-in functions
461 # dict of template built-in functions
462 funcs = {}
462 funcs = {}
463
463
464 templatefunc = registrar.templatefunc(funcs)
464 templatefunc = registrar.templatefunc(funcs)
465
465
466 @templatefunc('date(date[, fmt])')
466 @templatefunc('date(date[, fmt])')
467 def date(context, mapping, args):
467 def date(context, mapping, args):
468 """Format a date. See :hg:`help dates` for formatting
468 """Format a date. See :hg:`help dates` for formatting
469 strings. The default is a Unix date format, including the timezone:
469 strings. The default is a Unix date format, including the timezone:
470 "Mon Sep 04 15:13:13 2006 0700"."""
470 "Mon Sep 04 15:13:13 2006 0700"."""
471 if not (1 <= len(args) <= 2):
471 if not (1 <= len(args) <= 2):
472 # i18n: "date" is a keyword
472 # i18n: "date" is a keyword
473 raise error.ParseError(_("date expects one or two arguments"))
473 raise error.ParseError(_("date expects one or two arguments"))
474
474
475 date = evalfuncarg(context, mapping, args[0])
475 date = evalfuncarg(context, mapping, args[0])
476 fmt = None
476 fmt = None
477 if len(args) == 2:
477 if len(args) == 2:
478 fmt = evalstring(context, mapping, args[1])
478 fmt = evalstring(context, mapping, args[1])
479 try:
479 try:
480 if fmt is None:
480 if fmt is None:
481 return util.datestr(date)
481 return util.datestr(date)
482 else:
482 else:
483 return util.datestr(date, fmt)
483 return util.datestr(date, fmt)
484 except (TypeError, ValueError):
484 except (TypeError, ValueError):
485 # i18n: "date" is a keyword
485 # i18n: "date" is a keyword
486 raise error.ParseError(_("date expects a date information"))
486 raise error.ParseError(_("date expects a date information"))
487
487
488 @templatefunc('diff([includepattern [, excludepattern]])')
488 @templatefunc('diff([includepattern [, excludepattern]])')
489 def diff(context, mapping, args):
489 def diff(context, mapping, args):
490 """Show a diff, optionally
490 """Show a diff, optionally
491 specifying files to include or exclude."""
491 specifying files to include or exclude."""
492 if len(args) > 2:
492 if len(args) > 2:
493 # i18n: "diff" is a keyword
493 # i18n: "diff" is a keyword
494 raise error.ParseError(_("diff expects zero, one, or two arguments"))
494 raise error.ParseError(_("diff expects zero, one, or two arguments"))
495
495
496 def getpatterns(i):
496 def getpatterns(i):
497 if i < len(args):
497 if i < len(args):
498 s = evalstring(context, mapping, args[i]).strip()
498 s = evalstring(context, mapping, args[i]).strip()
499 if s:
499 if s:
500 return [s]
500 return [s]
501 return []
501 return []
502
502
503 ctx = mapping['ctx']
503 ctx = mapping['ctx']
504 chunks = ctx.diff(match=ctx.match([], getpatterns(0), getpatterns(1)))
504 chunks = ctx.diff(match=ctx.match([], getpatterns(0), getpatterns(1)))
505
505
506 return ''.join(chunks)
506 return ''.join(chunks)
507
507
508 @templatefunc('files(pattern)')
508 @templatefunc('files(pattern)')
509 def files(context, mapping, args):
509 def files(context, mapping, args):
510 """All files of the current changeset matching the pattern. See
510 """All files of the current changeset matching the pattern. See
511 :hg:`help patterns`."""
511 :hg:`help patterns`."""
512 if not len(args) == 1:
512 if not len(args) == 1:
513 # i18n: "files" is a keyword
513 # i18n: "files" is a keyword
514 raise error.ParseError(_("files expects one argument"))
514 raise error.ParseError(_("files expects one argument"))
515
515
516 raw = evalstring(context, mapping, args[0])
516 raw = evalstring(context, mapping, args[0])
517 ctx = mapping['ctx']
517 ctx = mapping['ctx']
518 m = ctx.match([raw])
518 m = ctx.match([raw])
519 files = list(ctx.matches(m))
519 files = list(ctx.matches(m))
520 return templatekw.showlist("file", files, **mapping)
520 return templatekw.showlist("file", files, **mapping)
521
521
522 @templatefunc('fill(text[, width[, initialident[, hangindent]]])')
522 @templatefunc('fill(text[, width[, initialident[, hangindent]]])')
523 def fill(context, mapping, args):
523 def fill(context, mapping, args):
524 """Fill many
524 """Fill many
525 paragraphs with optional indentation. See the "fill" filter."""
525 paragraphs with optional indentation. See the "fill" filter."""
526 if not (1 <= len(args) <= 4):
526 if not (1 <= len(args) <= 4):
527 # i18n: "fill" is a keyword
527 # i18n: "fill" is a keyword
528 raise error.ParseError(_("fill expects one to four arguments"))
528 raise error.ParseError(_("fill expects one to four arguments"))
529
529
530 text = evalstring(context, mapping, args[0])
530 text = evalstring(context, mapping, args[0])
531 width = 76
531 width = 76
532 initindent = ''
532 initindent = ''
533 hangindent = ''
533 hangindent = ''
534 if 2 <= len(args) <= 4:
534 if 2 <= len(args) <= 4:
535 width = evalinteger(context, mapping, args[1],
535 width = evalinteger(context, mapping, args[1],
536 # i18n: "fill" is a keyword
536 # i18n: "fill" is a keyword
537 _("fill expects an integer width"))
537 _("fill expects an integer width"))
538 try:
538 try:
539 initindent = evalstring(context, mapping, args[2])
539 initindent = evalstring(context, mapping, args[2])
540 hangindent = evalstring(context, mapping, args[3])
540 hangindent = evalstring(context, mapping, args[3])
541 except IndexError:
541 except IndexError:
542 pass
542 pass
543
543
544 return templatefilters.fill(text, width, initindent, hangindent)
544 return templatefilters.fill(text, width, initindent, hangindent)
545
545
546 @templatefunc('pad(text, width[, fillchar=\' \'[, left=False]])')
546 @templatefunc('pad(text, width[, fillchar=\' \'[, left=False]])')
547 def pad(context, mapping, args):
547 def pad(context, mapping, args):
548 """Pad text with a
548 """Pad text with a
549 fill character."""
549 fill character."""
550 if not (2 <= len(args) <= 4):
550 if not (2 <= len(args) <= 4):
551 # i18n: "pad" is a keyword
551 # i18n: "pad" is a keyword
552 raise error.ParseError(_("pad() expects two to four arguments"))
552 raise error.ParseError(_("pad() expects two to four arguments"))
553
553
554 width = evalinteger(context, mapping, args[1],
554 width = evalinteger(context, mapping, args[1],
555 # i18n: "pad" is a keyword
555 # i18n: "pad" is a keyword
556 _("pad() expects an integer width"))
556 _("pad() expects an integer width"))
557
557
558 text = evalstring(context, mapping, args[0])
558 text = evalstring(context, mapping, args[0])
559
559
560 left = False
560 left = False
561 fillchar = ' '
561 fillchar = ' '
562 if len(args) > 2:
562 if len(args) > 2:
563 fillchar = evalstring(context, mapping, args[2])
563 fillchar = evalstring(context, mapping, args[2])
564 if len(args) > 3:
564 if len(args) > 3:
565 left = evalboolean(context, mapping, args[3])
565 left = evalboolean(context, mapping, args[3])
566
566
567 if left:
567 if left:
568 return text.rjust(width, fillchar)
568 return text.rjust(width, fillchar)
569 else:
569 else:
570 return text.ljust(width, fillchar)
570 return text.ljust(width, fillchar)
571
571
572 @templatefunc('indent(text, indentchars[, firstline])')
572 @templatefunc('indent(text, indentchars[, firstline])')
573 def indent(context, mapping, args):
573 def indent(context, mapping, args):
574 """Indents all non-empty lines
574 """Indents all non-empty lines
575 with the characters given in the indentchars string. An optional
575 with the characters given in the indentchars string. An optional
576 third parameter will override the indent for the first line only
576 third parameter will override the indent for the first line only
577 if present."""
577 if present."""
578 if not (2 <= len(args) <= 3):
578 if not (2 <= len(args) <= 3):
579 # i18n: "indent" is a keyword
579 # i18n: "indent" is a keyword
580 raise error.ParseError(_("indent() expects two or three arguments"))
580 raise error.ParseError(_("indent() expects two or three arguments"))
581
581
582 text = evalstring(context, mapping, args[0])
582 text = evalstring(context, mapping, args[0])
583 indent = evalstring(context, mapping, args[1])
583 indent = evalstring(context, mapping, args[1])
584
584
585 if len(args) == 3:
585 if len(args) == 3:
586 firstline = evalstring(context, mapping, args[2])
586 firstline = evalstring(context, mapping, args[2])
587 else:
587 else:
588 firstline = indent
588 firstline = indent
589
589
590 # the indent function doesn't indent the first line, so we do it here
590 # the indent function doesn't indent the first line, so we do it here
591 return templatefilters.indent(firstline + text, indent)
591 return templatefilters.indent(firstline + text, indent)
592
592
593 @templatefunc('get(dict, key)')
593 @templatefunc('get(dict, key)')
594 def get(context, mapping, args):
594 def get(context, mapping, args):
595 """Get an attribute/key from an object. Some keywords
595 """Get an attribute/key from an object. Some keywords
596 are complex types. This function allows you to obtain the value of an
596 are complex types. This function allows you to obtain the value of an
597 attribute on these types."""
597 attribute on these types."""
598 if len(args) != 2:
598 if len(args) != 2:
599 # i18n: "get" is a keyword
599 # i18n: "get" is a keyword
600 raise error.ParseError(_("get() expects two arguments"))
600 raise error.ParseError(_("get() expects two arguments"))
601
601
602 dictarg = evalfuncarg(context, mapping, args[0])
602 dictarg = evalfuncarg(context, mapping, args[0])
603 if not util.safehasattr(dictarg, 'get'):
603 if not util.safehasattr(dictarg, 'get'):
604 # i18n: "get" is a keyword
604 # i18n: "get" is a keyword
605 raise error.ParseError(_("get() expects a dict as first argument"))
605 raise error.ParseError(_("get() expects a dict as first argument"))
606
606
607 key = evalfuncarg(context, mapping, args[1])
607 key = evalfuncarg(context, mapping, args[1])
608 return dictarg.get(key)
608 return dictarg.get(key)
609
609
610 @templatefunc('if(expr, then[, else])')
610 @templatefunc('if(expr, then[, else])')
611 def if_(context, mapping, args):
611 def if_(context, mapping, args):
612 """Conditionally execute based on the result of
612 """Conditionally execute based on the result of
613 an expression."""
613 an expression."""
614 if not (2 <= len(args) <= 3):
614 if not (2 <= len(args) <= 3):
615 # i18n: "if" is a keyword
615 # i18n: "if" is a keyword
616 raise error.ParseError(_("if expects two or three arguments"))
616 raise error.ParseError(_("if expects two or three arguments"))
617
617
618 test = evalboolean(context, mapping, args[0])
618 test = evalboolean(context, mapping, args[0])
619 if test:
619 if test:
620 yield args[1][0](context, mapping, args[1][1])
620 yield args[1][0](context, mapping, args[1][1])
621 elif len(args) == 3:
621 elif len(args) == 3:
622 yield args[2][0](context, mapping, args[2][1])
622 yield args[2][0](context, mapping, args[2][1])
623
623
624 @templatefunc('ifcontains(needle, haystack, then[, else])')
624 @templatefunc('ifcontains(needle, haystack, then[, else])')
625 def ifcontains(context, mapping, args):
625 def ifcontains(context, mapping, args):
626 """Conditionally execute based
626 """Conditionally execute based
627 on whether the item "needle" is in "haystack"."""
627 on whether the item "needle" is in "haystack"."""
628 if not (3 <= len(args) <= 4):
628 if not (3 <= len(args) <= 4):
629 # i18n: "ifcontains" is a keyword
629 # i18n: "ifcontains" is a keyword
630 raise error.ParseError(_("ifcontains expects three or four arguments"))
630 raise error.ParseError(_("ifcontains expects three or four arguments"))
631
631
632 needle = evalstring(context, mapping, args[0])
632 needle = evalstring(context, mapping, args[0])
633 haystack = evalfuncarg(context, mapping, args[1])
633 haystack = evalfuncarg(context, mapping, args[1])
634
634
635 if needle in haystack:
635 if needle in haystack:
636 yield args[2][0](context, mapping, args[2][1])
636 yield args[2][0](context, mapping, args[2][1])
637 elif len(args) == 4:
637 elif len(args) == 4:
638 yield args[3][0](context, mapping, args[3][1])
638 yield args[3][0](context, mapping, args[3][1])
639
639
640 @templatefunc('ifeq(expr1, expr2, then[, else])')
640 @templatefunc('ifeq(expr1, expr2, then[, else])')
641 def ifeq(context, mapping, args):
641 def ifeq(context, mapping, args):
642 """Conditionally execute based on
642 """Conditionally execute based on
643 whether 2 items are equivalent."""
643 whether 2 items are equivalent."""
644 if not (3 <= len(args) <= 4):
644 if not (3 <= len(args) <= 4):
645 # i18n: "ifeq" is a keyword
645 # i18n: "ifeq" is a keyword
646 raise error.ParseError(_("ifeq expects three or four arguments"))
646 raise error.ParseError(_("ifeq expects three or four arguments"))
647
647
648 test = evalstring(context, mapping, args[0])
648 test = evalstring(context, mapping, args[0])
649 match = evalstring(context, mapping, args[1])
649 match = evalstring(context, mapping, args[1])
650 if test == match:
650 if test == match:
651 yield args[2][0](context, mapping, args[2][1])
651 yield args[2][0](context, mapping, args[2][1])
652 elif len(args) == 4:
652 elif len(args) == 4:
653 yield args[3][0](context, mapping, args[3][1])
653 yield args[3][0](context, mapping, args[3][1])
654
654
655 @templatefunc('join(list, sep)')
655 @templatefunc('join(list, sep)')
656 def join(context, mapping, args):
656 def join(context, mapping, args):
657 """Join items in a list with a delimiter."""
657 """Join items in a list with a delimiter."""
658 if not (1 <= len(args) <= 2):
658 if not (1 <= len(args) <= 2):
659 # i18n: "join" is a keyword
659 # i18n: "join" is a keyword
660 raise error.ParseError(_("join expects one or two arguments"))
660 raise error.ParseError(_("join expects one or two arguments"))
661
661
662 joinset = args[0][0](context, mapping, args[0][1])
662 joinset = args[0][0](context, mapping, args[0][1])
663 if util.safehasattr(joinset, 'itermaps'):
663 if util.safehasattr(joinset, 'itermaps'):
664 jf = joinset.joinfmt
664 jf = joinset.joinfmt
665 joinset = [jf(x) for x in joinset.itermaps()]
665 joinset = [jf(x) for x in joinset.itermaps()]
666
666
667 joiner = " "
667 joiner = " "
668 if len(args) > 1:
668 if len(args) > 1:
669 joiner = evalstring(context, mapping, args[1])
669 joiner = evalstring(context, mapping, args[1])
670
670
671 first = True
671 first = True
672 for x in joinset:
672 for x in joinset:
673 if first:
673 if first:
674 first = False
674 first = False
675 else:
675 else:
676 yield joiner
676 yield joiner
677 yield x
677 yield x
678
678
679 @templatefunc('label(label, expr)')
679 @templatefunc('label(label, expr)')
680 def label(context, mapping, args):
680 def label(context, mapping, args):
681 """Apply a label to generated content. Content with
681 """Apply a label to generated content. Content with
682 a label applied can result in additional post-processing, such as
682 a label applied can result in additional post-processing, such as
683 automatic colorization."""
683 automatic colorization."""
684 if len(args) != 2:
684 if len(args) != 2:
685 # i18n: "label" is a keyword
685 # i18n: "label" is a keyword
686 raise error.ParseError(_("label expects two arguments"))
686 raise error.ParseError(_("label expects two arguments"))
687
687
688 ui = mapping['ui']
688 ui = mapping['ui']
689 thing = evalstring(context, mapping, args[1])
689 thing = evalstring(context, mapping, args[1])
690 # preserve unknown symbol as literal so effects like 'red', 'bold',
690 # preserve unknown symbol as literal so effects like 'red', 'bold',
691 # etc. don't need to be quoted
691 # etc. don't need to be quoted
692 label = evalstringliteral(context, mapping, args[0])
692 label = evalstringliteral(context, mapping, args[0])
693
693
694 return ui.label(thing, label)
694 return ui.label(thing, label)
695
695
696 @templatefunc('latesttag([pattern])')
696 @templatefunc('latesttag([pattern])')
697 def latesttag(context, mapping, args):
697 def latesttag(context, mapping, args):
698 """The global tags matching the given pattern on the
698 """The global tags matching the given pattern on the
699 most recent globally tagged ancestor of this changeset."""
699 most recent globally tagged ancestor of this changeset."""
700 if len(args) > 1:
700 if len(args) > 1:
701 # i18n: "latesttag" is a keyword
701 # i18n: "latesttag" is a keyword
702 raise error.ParseError(_("latesttag expects at most one argument"))
702 raise error.ParseError(_("latesttag expects at most one argument"))
703
703
704 pattern = None
704 pattern = None
705 if len(args) == 1:
705 if len(args) == 1:
706 pattern = evalstring(context, mapping, args[0])
706 pattern = evalstring(context, mapping, args[0])
707
707
708 return templatekw.showlatesttags(pattern, **mapping)
708 return templatekw.showlatesttags(pattern, **mapping)
709
709
710 @templatefunc('localdate(date[, tz])')
710 @templatefunc('localdate(date[, tz])')
711 def localdate(context, mapping, args):
711 def localdate(context, mapping, args):
712 """Converts a date to the specified timezone.
712 """Converts a date to the specified timezone.
713 The default is local date."""
713 The default is local date."""
714 if not (1 <= len(args) <= 2):
714 if not (1 <= len(args) <= 2):
715 # i18n: "localdate" is a keyword
715 # i18n: "localdate" is a keyword
716 raise error.ParseError(_("localdate expects one or two arguments"))
716 raise error.ParseError(_("localdate expects one or two arguments"))
717
717
718 date = evalfuncarg(context, mapping, args[0])
718 date = evalfuncarg(context, mapping, args[0])
719 try:
719 try:
720 date = util.parsedate(date)
720 date = util.parsedate(date)
721 except AttributeError: # not str nor date tuple
721 except AttributeError: # not str nor date tuple
722 # i18n: "localdate" is a keyword
722 # i18n: "localdate" is a keyword
723 raise error.ParseError(_("localdate expects a date information"))
723 raise error.ParseError(_("localdate expects a date information"))
724 if len(args) >= 2:
724 if len(args) >= 2:
725 tzoffset = None
725 tzoffset = None
726 tz = evalfuncarg(context, mapping, args[1])
726 tz = evalfuncarg(context, mapping, args[1])
727 if isinstance(tz, str):
727 if isinstance(tz, str):
728 tzoffset, remainder = util.parsetimezone(tz)
728 tzoffset, remainder = util.parsetimezone(tz)
729 if remainder:
729 if remainder:
730 tzoffset = None
730 tzoffset = None
731 if tzoffset is None:
731 if tzoffset is None:
732 try:
732 try:
733 tzoffset = int(tz)
733 tzoffset = int(tz)
734 except (TypeError, ValueError):
734 except (TypeError, ValueError):
735 # i18n: "localdate" is a keyword
735 # i18n: "localdate" is a keyword
736 raise error.ParseError(_("localdate expects a timezone"))
736 raise error.ParseError(_("localdate expects a timezone"))
737 else:
737 else:
738 tzoffset = util.makedate()[1]
738 tzoffset = util.makedate()[1]
739 return (date[0], tzoffset)
739 return (date[0], tzoffset)
740
740
741 @templatefunc('mod(a, b)')
741 @templatefunc('mod(a, b)')
742 def mod(context, mapping, args):
742 def mod(context, mapping, args):
743 """Calculate a mod b such that a / b + a mod b == a"""
743 """Calculate a mod b such that a / b + a mod b == a"""
744 if not len(args) == 2:
744 if not len(args) == 2:
745 # i18n: "mod" is a keyword
745 # i18n: "mod" is a keyword
746 raise error.ParseError(_("mod expects two arguments"))
746 raise error.ParseError(_("mod expects two arguments"))
747
747
748 func = lambda a, b: a % b
748 func = lambda a, b: a % b
749 return runarithmetic(context, mapping, (func, args[0], args[1]))
749 return runarithmetic(context, mapping, (func, args[0], args[1]))
750
750
751 @templatefunc('relpath(path)')
751 @templatefunc('relpath(path)')
752 def relpath(context, mapping, args):
752 def relpath(context, mapping, args):
753 """Convert a repository-absolute path into a filesystem path relative to
753 """Convert a repository-absolute path into a filesystem path relative to
754 the current working directory."""
754 the current working directory."""
755 if len(args) != 1:
755 if len(args) != 1:
756 # i18n: "relpath" is a keyword
756 # i18n: "relpath" is a keyword
757 raise error.ParseError(_("relpath expects one argument"))
757 raise error.ParseError(_("relpath expects one argument"))
758
758
759 repo = mapping['ctx'].repo()
759 repo = mapping['ctx'].repo()
760 path = evalstring(context, mapping, args[0])
760 path = evalstring(context, mapping, args[0])
761 return repo.pathto(path)
761 return repo.pathto(path)
762
762
763 @templatefunc('revset(query[, formatargs...])')
763 @templatefunc('revset(query[, formatargs...])')
764 def revset(context, mapping, args):
764 def revset(context, mapping, args):
765 """Execute a revision set query. See
765 """Execute a revision set query. See
766 :hg:`help revset`."""
766 :hg:`help revset`."""
767 if not len(args) > 0:
767 if not len(args) > 0:
768 # i18n: "revset" is a keyword
768 # i18n: "revset" is a keyword
769 raise error.ParseError(_("revset expects one or more arguments"))
769 raise error.ParseError(_("revset expects one or more arguments"))
770
770
771 raw = evalstring(context, mapping, args[0])
771 raw = evalstring(context, mapping, args[0])
772 ctx = mapping['ctx']
772 ctx = mapping['ctx']
773 repo = ctx.repo()
773 repo = ctx.repo()
774
774
775 def query(expr):
775 def query(expr):
776 m = revsetmod.match(repo.ui, expr)
776 m = revsetmod.match(repo.ui, expr)
777 return m(repo)
777 return m(repo)
778
778
779 if len(args) > 1:
779 if len(args) > 1:
780 formatargs = [evalfuncarg(context, mapping, a) for a in args[1:]]
780 formatargs = [evalfuncarg(context, mapping, a) for a in args[1:]]
781 revs = query(revsetmod.formatspec(raw, *formatargs))
781 revs = query(revsetmod.formatspec(raw, *formatargs))
782 revs = list(revs)
782 revs = list(revs)
783 else:
783 else:
784 revsetcache = mapping['cache'].setdefault("revsetcache", {})
784 revsetcache = mapping['cache'].setdefault("revsetcache", {})
785 if raw in revsetcache:
785 if raw in revsetcache:
786 revs = revsetcache[raw]
786 revs = revsetcache[raw]
787 else:
787 else:
788 revs = query(raw)
788 revs = query(raw)
789 revs = list(revs)
789 revs = list(revs)
790 revsetcache[raw] = revs
790 revsetcache[raw] = revs
791
791
792 return templatekw.showrevslist("revision", revs, **mapping)
792 return templatekw.showrevslist("revision", revs, **mapping)
793
793
794 @templatefunc('rstdoc(text, style)')
794 @templatefunc('rstdoc(text, style)')
795 def rstdoc(context, mapping, args):
795 def rstdoc(context, mapping, args):
796 """Format reStructuredText."""
796 """Format reStructuredText."""
797 if len(args) != 2:
797 if len(args) != 2:
798 # i18n: "rstdoc" is a keyword
798 # i18n: "rstdoc" is a keyword
799 raise error.ParseError(_("rstdoc expects two arguments"))
799 raise error.ParseError(_("rstdoc expects two arguments"))
800
800
801 text = evalstring(context, mapping, args[0])
801 text = evalstring(context, mapping, args[0])
802 style = evalstring(context, mapping, args[1])
802 style = evalstring(context, mapping, args[1])
803
803
804 return minirst.format(text, style=style, keep=['verbose'])
804 return minirst.format(text, style=style, keep=['verbose'])
805
805
806 @templatefunc('separate(sep, args)')
806 @templatefunc('separate(sep, args)')
807 def separate(context, mapping, args):
807 def separate(context, mapping, args):
808 """Add a separator between non-empty arguments."""
808 """Add a separator between non-empty arguments."""
809 if not args:
809 if not args:
810 # i18n: "separate" is a keyword
810 # i18n: "separate" is a keyword
811 raise error.ParseError(_("separate expects at least one argument"))
811 raise error.ParseError(_("separate expects at least one argument"))
812
812
813 sep = evalstring(context, mapping, args[0])
813 sep = evalstring(context, mapping, args[0])
814 first = True
814 first = True
815 for arg in args[1:]:
815 for arg in args[1:]:
816 argstr = evalstring(context, mapping, arg)
816 argstr = evalstring(context, mapping, arg)
817 if not argstr:
817 if not argstr:
818 continue
818 continue
819 if first:
819 if first:
820 first = False
820 first = False
821 else:
821 else:
822 yield sep
822 yield sep
823 yield argstr
823 yield argstr
824
824
825 @templatefunc('shortest(node, minlength=4)')
825 @templatefunc('shortest(node, minlength=4)')
826 def shortest(context, mapping, args):
826 def shortest(context, mapping, args):
827 """Obtain the shortest representation of
827 """Obtain the shortest representation of
828 a node."""
828 a node."""
829 if not (1 <= len(args) <= 2):
829 if not (1 <= len(args) <= 2):
830 # i18n: "shortest" is a keyword
830 # i18n: "shortest" is a keyword
831 raise error.ParseError(_("shortest() expects one or two arguments"))
831 raise error.ParseError(_("shortest() expects one or two arguments"))
832
832
833 node = evalstring(context, mapping, args[0])
833 node = evalstring(context, mapping, args[0])
834
834
835 minlength = 4
835 minlength = 4
836 if len(args) > 1:
836 if len(args) > 1:
837 minlength = evalinteger(context, mapping, args[1],
837 minlength = evalinteger(context, mapping, args[1],
838 # i18n: "shortest" is a keyword
838 # i18n: "shortest" is a keyword
839 _("shortest() expects an integer minlength"))
839 _("shortest() expects an integer minlength"))
840
840
841 # _partialmatch() of filtered changelog could take O(len(repo)) time,
841 # _partialmatch() of filtered changelog could take O(len(repo)) time,
842 # which would be unacceptably slow. so we look for hash collision in
842 # which would be unacceptably slow. so we look for hash collision in
843 # unfiltered space, which means some hashes may be slightly longer.
843 # unfiltered space, which means some hashes may be slightly longer.
844 cl = mapping['ctx']._repo.unfiltered().changelog
844 cl = mapping['ctx']._repo.unfiltered().changelog
845 def isvalid(test):
845 def isvalid(test):
846 try:
846 try:
847 if cl._partialmatch(test) is None:
847 if cl._partialmatch(test) is None:
848 return False
848 return False
849
849
850 try:
850 try:
851 i = int(test)
851 i = int(test)
852 # if we are a pure int, then starting with zero will not be
852 # if we are a pure int, then starting with zero will not be
853 # confused as a rev; or, obviously, if the int is larger than
853 # confused as a rev; or, obviously, if the int is larger than
854 # the value of the tip rev
854 # the value of the tip rev
855 if test[0] == '0' or i > len(cl):
855 if test[0] == '0' or i > len(cl):
856 return True
856 return True
857 return False
857 return False
858 except ValueError:
858 except ValueError:
859 return True
859 return True
860 except error.RevlogError:
860 except error.RevlogError:
861 return False
861 return False
862
862
863 shortest = node
863 shortest = node
864 startlength = max(6, minlength)
864 startlength = max(6, minlength)
865 length = startlength
865 length = startlength
866 while True:
866 while True:
867 test = node[:length]
867 test = node[:length]
868 if isvalid(test):
868 if isvalid(test):
869 shortest = test
869 shortest = test
870 if length == minlength or length > startlength:
870 if length == minlength or length > startlength:
871 return shortest
871 return shortest
872 length -= 1
872 length -= 1
873 else:
873 else:
874 length += 1
874 length += 1
875 if len(shortest) <= length:
875 if len(shortest) <= length:
876 return shortest
876 return shortest
877
877
878 @templatefunc('strip(text[, chars])')
878 @templatefunc('strip(text[, chars])')
879 def strip(context, mapping, args):
879 def strip(context, mapping, args):
880 """Strip characters from a string. By default,
880 """Strip characters from a string. By default,
881 strips all leading and trailing whitespace."""
881 strips all leading and trailing whitespace."""
882 if not (1 <= len(args) <= 2):
882 if not (1 <= len(args) <= 2):
883 # i18n: "strip" is a keyword
883 # i18n: "strip" is a keyword
884 raise error.ParseError(_("strip expects one or two arguments"))
884 raise error.ParseError(_("strip expects one or two arguments"))
885
885
886 text = evalstring(context, mapping, args[0])
886 text = evalstring(context, mapping, args[0])
887 if len(args) == 2:
887 if len(args) == 2:
888 chars = evalstring(context, mapping, args[1])
888 chars = evalstring(context, mapping, args[1])
889 return text.strip(chars)
889 return text.strip(chars)
890 return text.strip()
890 return text.strip()
891
891
892 @templatefunc('sub(pattern, replacement, expression)')
892 @templatefunc('sub(pattern, replacement, expression)')
893 def sub(context, mapping, args):
893 def sub(context, mapping, args):
894 """Perform text substitution
894 """Perform text substitution
895 using regular expressions."""
895 using regular expressions."""
896 if len(args) != 3:
896 if len(args) != 3:
897 # i18n: "sub" is a keyword
897 # i18n: "sub" is a keyword
898 raise error.ParseError(_("sub expects three arguments"))
898 raise error.ParseError(_("sub expects three arguments"))
899
899
900 pat = evalstring(context, mapping, args[0])
900 pat = evalstring(context, mapping, args[0])
901 rpl = evalstring(context, mapping, args[1])
901 rpl = evalstring(context, mapping, args[1])
902 src = evalstring(context, mapping, args[2])
902 src = evalstring(context, mapping, args[2])
903 try:
903 try:
904 patre = re.compile(pat)
904 patre = re.compile(pat)
905 except re.error:
905 except re.error:
906 # i18n: "sub" is a keyword
906 # i18n: "sub" is a keyword
907 raise error.ParseError(_("sub got an invalid pattern: %s") % pat)
907 raise error.ParseError(_("sub got an invalid pattern: %s") % pat)
908 try:
908 try:
909 yield patre.sub(rpl, src)
909 yield patre.sub(rpl, src)
910 except re.error:
910 except re.error:
911 # i18n: "sub" is a keyword
911 # i18n: "sub" is a keyword
912 raise error.ParseError(_("sub got an invalid replacement: %s") % rpl)
912 raise error.ParseError(_("sub got an invalid replacement: %s") % rpl)
913
913
914 @templatefunc('startswith(pattern, text)')
914 @templatefunc('startswith(pattern, text)')
915 def startswith(context, mapping, args):
915 def startswith(context, mapping, args):
916 """Returns the value from the "text" argument
916 """Returns the value from the "text" argument
917 if it begins with the content from the "pattern" argument."""
917 if it begins with the content from the "pattern" argument."""
918 if len(args) != 2:
918 if len(args) != 2:
919 # i18n: "startswith" is a keyword
919 # i18n: "startswith" is a keyword
920 raise error.ParseError(_("startswith expects two arguments"))
920 raise error.ParseError(_("startswith expects two arguments"))
921
921
922 patn = evalstring(context, mapping, args[0])
922 patn = evalstring(context, mapping, args[0])
923 text = evalstring(context, mapping, args[1])
923 text = evalstring(context, mapping, args[1])
924 if text.startswith(patn):
924 if text.startswith(patn):
925 return text
925 return text
926 return ''
926 return ''
927
927
928 @templatefunc('word(number, text[, separator])')
928 @templatefunc('word(number, text[, separator])')
929 def word(context, mapping, args):
929 def word(context, mapping, args):
930 """Return the nth word from a string."""
930 """Return the nth word from a string."""
931 if not (2 <= len(args) <= 3):
931 if not (2 <= len(args) <= 3):
932 # i18n: "word" is a keyword
932 # i18n: "word" is a keyword
933 raise error.ParseError(_("word expects two or three arguments, got %d")
933 raise error.ParseError(_("word expects two or three arguments, got %d")
934 % len(args))
934 % len(args))
935
935
936 num = evalinteger(context, mapping, args[0],
936 num = evalinteger(context, mapping, args[0],
937 # i18n: "word" is a keyword
937 # i18n: "word" is a keyword
938 _("word expects an integer index"))
938 _("word expects an integer index"))
939 text = evalstring(context, mapping, args[1])
939 text = evalstring(context, mapping, args[1])
940 if len(args) == 3:
940 if len(args) == 3:
941 splitter = evalstring(context, mapping, args[2])
941 splitter = evalstring(context, mapping, args[2])
942 else:
942 else:
943 splitter = None
943 splitter = None
944
944
945 tokens = text.split(splitter)
945 tokens = text.split(splitter)
946 if num >= len(tokens) or num < -len(tokens):
946 if num >= len(tokens) or num < -len(tokens):
947 return ''
947 return ''
948 else:
948 else:
949 return tokens[num]
949 return tokens[num]
950
950
951 # methods to interpret function arguments or inner expressions (e.g. {_(x)})
951 # methods to interpret function arguments or inner expressions (e.g. {_(x)})
952 exprmethods = {
952 exprmethods = {
953 "integer": lambda e, c: (runinteger, e[1]),
953 "integer": lambda e, c: (runinteger, e[1]),
954 "string": lambda e, c: (runstring, e[1]),
954 "string": lambda e, c: (runstring, e[1]),
955 "symbol": lambda e, c: (runsymbol, e[1]),
955 "symbol": lambda e, c: (runsymbol, e[1]),
956 "template": buildtemplate,
956 "template": buildtemplate,
957 "group": lambda e, c: compileexp(e[1], c, exprmethods),
957 "group": lambda e, c: compileexp(e[1], c, exprmethods),
958 # ".": buildmember,
958 # ".": buildmember,
959 "|": buildfilter,
959 "|": buildfilter,
960 "%": buildmap,
960 "%": buildmap,
961 "func": buildfunc,
961 "func": buildfunc,
962 "+": lambda e, c: buildarithmetic(e, c, lambda a, b: a + b),
962 "+": lambda e, c: buildarithmetic(e, c, lambda a, b: a + b),
963 "-": lambda e, c: buildarithmetic(e, c, lambda a, b: a - b),
963 "-": lambda e, c: buildarithmetic(e, c, lambda a, b: a - b),
964 "negate": buildnegate,
964 "negate": buildnegate,
965 "*": lambda e, c: buildarithmetic(e, c, lambda a, b: a * b),
965 "*": lambda e, c: buildarithmetic(e, c, lambda a, b: a * b),
966 "/": lambda e, c: buildarithmetic(e, c, lambda a, b: a // b),
966 "/": lambda e, c: buildarithmetic(e, c, lambda a, b: a // b),
967 }
967 }
968
968
969 # methods to interpret top-level template (e.g. {x}, {x|_}, {x % "y"})
969 # methods to interpret top-level template (e.g. {x}, {x|_}, {x % "y"})
970 methods = exprmethods.copy()
970 methods = exprmethods.copy()
971 methods["integer"] = exprmethods["symbol"] # '{1}' as variable
971 methods["integer"] = exprmethods["symbol"] # '{1}' as variable
972
972
973 class _aliasrules(parser.basealiasrules):
973 class _aliasrules(parser.basealiasrules):
974 """Parsing and expansion rule set of template aliases"""
974 """Parsing and expansion rule set of template aliases"""
975 _section = _('template alias')
975 _section = _('template alias')
976 _parse = staticmethod(_parseexpr)
976 _parse = staticmethod(_parseexpr)
977
977
978 @staticmethod
978 @staticmethod
979 def _trygetfunc(tree):
979 def _trygetfunc(tree):
980 """Return (name, args) if tree is func(...) or ...|filter; otherwise
980 """Return (name, args) if tree is func(...) or ...|filter; otherwise
981 None"""
981 None"""
982 if tree[0] == 'func' and tree[1][0] == 'symbol':
982 if tree[0] == 'func' and tree[1][0] == 'symbol':
983 return tree[1][1], getlist(tree[2])
983 return tree[1][1], getlist(tree[2])
984 if tree[0] == '|' and tree[2][0] == 'symbol':
984 if tree[0] == '|' and tree[2][0] == 'symbol':
985 return tree[2][1], [tree[1]]
985 return tree[2][1], [tree[1]]
986
986
987 def expandaliases(tree, aliases):
987 def expandaliases(tree, aliases):
988 """Return new tree of aliases are expanded"""
988 """Return new tree of aliases are expanded"""
989 aliasmap = _aliasrules.buildmap(aliases)
989 aliasmap = _aliasrules.buildmap(aliases)
990 return _aliasrules.expand(aliasmap, tree)
990 return _aliasrules.expand(aliasmap, tree)
991
991
992 # template engine
992 # template engine
993
993
994 stringify = templatefilters.stringify
994 stringify = templatefilters.stringify
995
995
996 def _flatten(thing):
996 def _flatten(thing):
997 '''yield a single stream from a possibly nested set of iterators'''
997 '''yield a single stream from a possibly nested set of iterators'''
998 if isinstance(thing, str):
998 if isinstance(thing, str):
999 yield thing
999 yield thing
1000 elif thing is None:
1000 elif thing is None:
1001 pass
1001 pass
1002 elif not util.safehasattr(thing, '__iter__'):
1002 elif not util.safehasattr(thing, '__iter__'):
1003 yield str(thing)
1003 yield str(thing)
1004 else:
1004 else:
1005 for i in thing:
1005 for i in thing:
1006 if isinstance(i, str):
1006 if isinstance(i, str):
1007 yield i
1007 yield i
1008 elif i is None:
1008 elif i is None:
1009 pass
1009 pass
1010 elif not util.safehasattr(i, '__iter__'):
1010 elif not util.safehasattr(i, '__iter__'):
1011 yield str(i)
1011 yield str(i)
1012 else:
1012 else:
1013 for j in _flatten(i):
1013 for j in _flatten(i):
1014 yield j
1014 yield j
1015
1015
1016 def unquotestring(s):
1016 def unquotestring(s):
1017 '''unwrap quotes if any; otherwise returns unmodified string'''
1017 '''unwrap quotes if any; otherwise returns unmodified string'''
1018 if len(s) < 2 or s[0] not in "'\"" or s[0] != s[-1]:
1018 if len(s) < 2 or s[0] not in "'\"" or s[0] != s[-1]:
1019 return s
1019 return s
1020 return s[1:-1]
1020 return s[1:-1]
1021
1021
1022 class engine(object):
1022 class engine(object):
1023 '''template expansion engine.
1023 '''template expansion engine.
1024
1024
1025 template expansion works like this. a map file contains key=value
1025 template expansion works like this. a map file contains key=value
1026 pairs. if value is quoted, it is treated as string. otherwise, it
1026 pairs. if value is quoted, it is treated as string. otherwise, it
1027 is treated as name of template file.
1027 is treated as name of template file.
1028
1028
1029 templater is asked to expand a key in map. it looks up key, and
1029 templater is asked to expand a key in map. it looks up key, and
1030 looks for strings like this: {foo}. it expands {foo} by looking up
1030 looks for strings like this: {foo}. it expands {foo} by looking up
1031 foo in map, and substituting it. expansion is recursive: it stops
1031 foo in map, and substituting it. expansion is recursive: it stops
1032 when there is no more {foo} to replace.
1032 when there is no more {foo} to replace.
1033
1033
1034 expansion also allows formatting and filtering.
1034 expansion also allows formatting and filtering.
1035
1035
1036 format uses key to expand each item in list. syntax is
1036 format uses key to expand each item in list. syntax is
1037 {key%format}.
1037 {key%format}.
1038
1038
1039 filter uses function to transform value. syntax is
1039 filter uses function to transform value. syntax is
1040 {key|filter1|filter2|...}.'''
1040 {key|filter1|filter2|...}.'''
1041
1041
1042 def __init__(self, loader, filters=None, defaults=None, aliases=()):
1042 def __init__(self, loader, filters=None, defaults=None, aliases=()):
1043 self._loader = loader
1043 self._loader = loader
1044 if filters is None:
1044 if filters is None:
1045 filters = {}
1045 filters = {}
1046 self._filters = filters
1046 self._filters = filters
1047 if defaults is None:
1047 if defaults is None:
1048 defaults = {}
1048 defaults = {}
1049 self._defaults = defaults
1049 self._defaults = defaults
1050 self._aliasmap = _aliasrules.buildmap(aliases)
1050 self._aliasmap = _aliasrules.buildmap(aliases)
1051 self._cache = {} # key: (func, data)
1051 self._cache = {} # key: (func, data)
1052
1052
1053 def _load(self, t):
1053 def _load(self, t):
1054 '''load, parse, and cache a template'''
1054 '''load, parse, and cache a template'''
1055 if t not in self._cache:
1055 if t not in self._cache:
1056 # put poison to cut recursion while compiling 't'
1056 # put poison to cut recursion while compiling 't'
1057 self._cache[t] = (_runrecursivesymbol, t)
1057 self._cache[t] = (_runrecursivesymbol, t)
1058 try:
1058 try:
1059 x = parse(self._loader(t))
1059 x = parse(self._loader(t))
1060 if self._aliasmap:
1060 if self._aliasmap:
1061 x = _aliasrules.expand(self._aliasmap, x)
1061 x = _aliasrules.expand(self._aliasmap, x)
1062 self._cache[t] = compileexp(x, self, methods)
1062 self._cache[t] = compileexp(x, self, methods)
1063 except: # re-raises
1063 except: # re-raises
1064 del self._cache[t]
1064 del self._cache[t]
1065 raise
1065 raise
1066 return self._cache[t]
1066 return self._cache[t]
1067
1067
1068 def process(self, t, mapping):
1068 def process(self, t, mapping):
1069 '''Perform expansion. t is name of map element to expand.
1069 '''Perform expansion. t is name of map element to expand.
1070 mapping contains added elements for use during expansion. Is a
1070 mapping contains added elements for use during expansion. Is a
1071 generator.'''
1071 generator.'''
1072 func, data = self._load(t)
1072 func, data = self._load(t)
1073 return _flatten(func(self, mapping, data))
1073 return _flatten(func(self, mapping, data))
1074
1074
1075 engines = {'default': engine}
1075 engines = {'default': engine}
1076
1076
1077 def stylelist():
1077 def stylelist():
1078 paths = templatepaths()
1078 paths = templatepaths()
1079 if not paths:
1079 if not paths:
1080 return _('no templates found, try `hg debuginstall` for more info')
1080 return _('no templates found, try `hg debuginstall` for more info')
1081 dirlist = os.listdir(paths[0])
1081 dirlist = os.listdir(paths[0])
1082 stylelist = []
1082 stylelist = []
1083 for file in dirlist:
1083 for file in dirlist:
1084 split = file.split(".")
1084 split = file.split(".")
1085 if split[-1] in ('orig', 'rej'):
1085 if split[-1] in ('orig', 'rej'):
1086 continue
1086 continue
1087 if split[0] == "map-cmdline":
1087 if split[0] == "map-cmdline":
1088 stylelist.append(split[1])
1088 stylelist.append(split[1])
1089 return ", ".join(sorted(stylelist))
1089 return ", ".join(sorted(stylelist))
1090
1090
1091 def _readmapfile(mapfile):
1091 def _readmapfile(mapfile):
1092 """Load template elements from the given map file"""
1092 """Load template elements from the given map file"""
1093 if not os.path.exists(mapfile):
1093 if not os.path.exists(mapfile):
1094 raise error.Abort(_("style '%s' not found") % mapfile,
1094 raise error.Abort(_("style '%s' not found") % mapfile,
1095 hint=_("available styles: %s") % stylelist())
1095 hint=_("available styles: %s") % stylelist())
1096
1096
1097 base = os.path.dirname(mapfile)
1097 base = os.path.dirname(mapfile)
1098 conf = config.config(includepaths=templatepaths())
1098 conf = config.config(includepaths=templatepaths())
1099 conf.read(mapfile)
1099 conf.read(mapfile)
1100
1100
1101 cache = {}
1101 cache = {}
1102 tmap = {}
1102 tmap = {}
1103 for key, val in conf[''].items():
1103 for key, val in conf[''].items():
1104 if not val:
1104 if not val:
1105 raise error.ParseError(_('missing value'), conf.source('', key))
1105 raise error.ParseError(_('missing value'), conf.source('', key))
1106 if val[0] in "'\"":
1106 if val[0] in "'\"":
1107 if val[0] != val[-1]:
1107 if val[0] != val[-1]:
1108 raise error.ParseError(_('unmatched quotes'),
1108 raise error.ParseError(_('unmatched quotes'),
1109 conf.source('', key))
1109 conf.source('', key))
1110 cache[key] = unquotestring(val)
1110 cache[key] = unquotestring(val)
1111 elif key == "__base__":
1111 elif key == "__base__":
1112 # treat as a pointer to a base class for this style
1112 # treat as a pointer to a base class for this style
1113 path = util.normpath(os.path.join(base, val))
1113 path = util.normpath(os.path.join(base, val))
1114
1114
1115 # fallback check in template paths
1115 # fallback check in template paths
1116 if not os.path.exists(path):
1116 if not os.path.exists(path):
1117 for p in templatepaths():
1117 for p in templatepaths():
1118 p2 = util.normpath(os.path.join(p, val))
1118 p2 = util.normpath(os.path.join(p, val))
1119 if os.path.isfile(p2):
1119 if os.path.isfile(p2):
1120 path = p2
1120 path = p2
1121 break
1121 break
1122 p3 = util.normpath(os.path.join(p2, "map"))
1122 p3 = util.normpath(os.path.join(p2, "map"))
1123 if os.path.isfile(p3):
1123 if os.path.isfile(p3):
1124 path = p3
1124 path = p3
1125 break
1125 break
1126
1126
1127 bcache, btmap = _readmapfile(path)
1127 bcache, btmap = _readmapfile(path)
1128 for k in bcache:
1128 for k in bcache:
1129 if k not in cache:
1129 if k not in cache:
1130 cache[k] = bcache[k]
1130 cache[k] = bcache[k]
1131 for k in btmap:
1131 for k in btmap:
1132 if k not in tmap:
1132 if k not in tmap:
1133 tmap[k] = btmap[k]
1133 tmap[k] = btmap[k]
1134 else:
1134 else:
1135 val = 'default', val
1135 val = 'default', val
1136 if ':' in val[1]:
1136 if ':' in val[1]:
1137 val = val[1].split(':', 1)
1137 val = val[1].split(':', 1)
1138 tmap[key] = val[0], os.path.join(base, val[1])
1138 tmap[key] = val[0], os.path.join(base, val[1])
1139 return cache, tmap
1139 return cache, tmap
1140
1140
1141 class TemplateNotFound(error.Abort):
1141 class TemplateNotFound(error.Abort):
1142 pass
1142 pass
1143
1143
1144 class templater(object):
1144 class templater(object):
1145
1145
1146 def __init__(self, filters=None, defaults=None, cache=None, aliases=(),
1146 def __init__(self, filters=None, defaults=None, cache=None, aliases=(),
1147 minchunk=1024, maxchunk=65536):
1147 minchunk=1024, maxchunk=65536):
1148 '''set up template engine.
1148 '''set up template engine.
1149 filters is dict of functions. each transforms a value into another.
1149 filters is dict of functions. each transforms a value into another.
1150 defaults is dict of default map definitions.
1150 defaults is dict of default map definitions.
1151 aliases is list of alias (name, replacement) pairs.
1151 aliases is list of alias (name, replacement) pairs.
1152 '''
1152 '''
1153 if filters is None:
1153 if filters is None:
1154 filters = {}
1154 filters = {}
1155 if defaults is None:
1155 if defaults is None:
1156 defaults = {}
1156 defaults = {}
1157 if cache is None:
1157 if cache is None:
1158 cache = {}
1158 cache = {}
1159 self.cache = cache.copy()
1159 self.cache = cache.copy()
1160 self.map = {}
1160 self.map = {}
1161 self.filters = templatefilters.filters.copy()
1161 self.filters = templatefilters.filters.copy()
1162 self.filters.update(filters)
1162 self.filters.update(filters)
1163 self.defaults = defaults
1163 self.defaults = defaults
1164 self._aliases = aliases
1164 self._aliases = aliases
1165 self.minchunk, self.maxchunk = minchunk, maxchunk
1165 self.minchunk, self.maxchunk = minchunk, maxchunk
1166 self.ecache = {}
1166 self.ecache = {}
1167
1167
1168 @classmethod
1168 @classmethod
1169 def frommapfile(cls, mapfile, filters=None, defaults=None, cache=None,
1169 def frommapfile(cls, mapfile, filters=None, defaults=None, cache=None,
1170 minchunk=1024, maxchunk=65536):
1170 minchunk=1024, maxchunk=65536):
1171 """Create templater from the specified map file"""
1171 """Create templater from the specified map file"""
1172 t = cls(filters, defaults, cache, [], minchunk, maxchunk)
1172 t = cls(filters, defaults, cache, [], minchunk, maxchunk)
1173 cache, tmap = _readmapfile(mapfile)
1173 cache, tmap = _readmapfile(mapfile)
1174 t.cache.update(cache)
1174 t.cache.update(cache)
1175 t.map = tmap
1175 t.map = tmap
1176 return t
1176 return t
1177
1177
1178 def __contains__(self, key):
1178 def __contains__(self, key):
1179 return key in self.cache or key in self.map
1179 return key in self.cache or key in self.map
1180
1180
1181 def load(self, t):
1181 def load(self, t):
1182 '''Get the template for the given template name. Use a local cache.'''
1182 '''Get the template for the given template name. Use a local cache.'''
1183 if t not in self.cache:
1183 if t not in self.cache:
1184 try:
1184 try:
1185 self.cache[t] = util.readfile(self.map[t][1])
1185 self.cache[t] = util.readfile(self.map[t][1])
1186 except KeyError as inst:
1186 except KeyError as inst:
1187 raise TemplateNotFound(_('"%s" not in template map') %
1187 raise TemplateNotFound(_('"%s" not in template map') %
1188 inst.args[0])
1188 inst.args[0])
1189 except IOError as inst:
1189 except IOError as inst:
1190 raise IOError(inst.args[0], _('template file %s: %s') %
1190 raise IOError(inst.args[0], _('template file %s: %s') %
1191 (self.map[t][1], inst.args[1]))
1191 (self.map[t][1], inst.args[1]))
1192 return self.cache[t]
1192 return self.cache[t]
1193
1193
1194 def __call__(self, t, **mapping):
1194 def __call__(self, t, **mapping):
1195 ttype = t in self.map and self.map[t][0] or 'default'
1195 ttype = t in self.map and self.map[t][0] or 'default'
1196 if ttype not in self.ecache:
1196 if ttype not in self.ecache:
1197 try:
1197 try:
1198 ecls = engines[ttype]
1198 ecls = engines[ttype]
1199 except KeyError:
1199 except KeyError:
1200 raise error.Abort(_('invalid template engine: %s') % ttype)
1200 raise error.Abort(_('invalid template engine: %s') % ttype)
1201 self.ecache[ttype] = ecls(self.load, self.filters, self.defaults,
1201 self.ecache[ttype] = ecls(self.load, self.filters, self.defaults,
1202 self._aliases)
1202 self._aliases)
1203 proc = self.ecache[ttype]
1203 proc = self.ecache[ttype]
1204
1204
1205 stream = proc.process(t, mapping)
1205 stream = proc.process(t, mapping)
1206 if self.minchunk:
1206 if self.minchunk:
1207 stream = util.increasingchunks(stream, min=self.minchunk,
1207 stream = util.increasingchunks(stream, min=self.minchunk,
1208 max=self.maxchunk)
1208 max=self.maxchunk)
1209 return stream
1209 return stream
1210
1210
1211 def templatepaths():
1211 def templatepaths():
1212 '''return locations used for template files.'''
1212 '''return locations used for template files.'''
1213 pathsrel = ['templates']
1213 pathsrel = ['templates']
1214 paths = [os.path.normpath(os.path.join(util.datapath, f))
1214 paths = [os.path.normpath(os.path.join(util.datapath, f))
1215 for f in pathsrel]
1215 for f in pathsrel]
1216 return [p for p in paths if os.path.isdir(p)]
1216 return [p for p in paths if os.path.isdir(p)]
1217
1217
1218 def templatepath(name):
1218 def templatepath(name):
1219 '''return location of template file. returns None if not found.'''
1219 '''return location of template file. returns None if not found.'''
1220 for p in templatepaths():
1220 for p in templatepaths():
1221 f = os.path.join(p, name)
1221 f = os.path.join(p, name)
1222 if os.path.exists(f):
1222 if os.path.exists(f):
1223 return f
1223 return f
1224 return None
1224 return None
1225
1225
1226 def stylemap(styles, paths=None):
1226 def stylemap(styles, paths=None):
1227 """Return path to mapfile for a given style.
1227 """Return path to mapfile for a given style.
1228
1228
1229 Searches mapfile in the following locations:
1229 Searches mapfile in the following locations:
1230 1. templatepath/style/map
1230 1. templatepath/style/map
1231 2. templatepath/map-style
1231 2. templatepath/map-style
1232 3. templatepath/map
1232 3. templatepath/map
1233 """
1233 """
1234
1234
1235 if paths is None:
1235 if paths is None:
1236 paths = templatepaths()
1236 paths = templatepaths()
1237 elif isinstance(paths, str):
1237 elif isinstance(paths, str):
1238 paths = [paths]
1238 paths = [paths]
1239
1239
1240 if isinstance(styles, str):
1240 if isinstance(styles, str):
1241 styles = [styles]
1241 styles = [styles]
1242
1242
1243 for style in styles:
1243 for style in styles:
1244 # only plain name is allowed to honor template paths
1244 # only plain name is allowed to honor template paths
1245 if (not style
1245 if (not style
1246 or style in (os.curdir, os.pardir)
1246 or style in (os.curdir, os.pardir)
1247 or pycompat.ossep in style
1247 or pycompat.ossep in style
1248 or os.altsep and os.altsep in style):
1248 or pycompat.osaltsep and pycompat.osaltsep in style):
1249 continue
1249 continue
1250 locations = [os.path.join(style, 'map'), 'map-' + style]
1250 locations = [os.path.join(style, 'map'), 'map-' + style]
1251 locations.append('map')
1251 locations.append('map')
1252
1252
1253 for path in paths:
1253 for path in paths:
1254 for location in locations:
1254 for location in locations:
1255 mapfile = os.path.join(path, location)
1255 mapfile = os.path.join(path, location)
1256 if os.path.isfile(mapfile):
1256 if os.path.isfile(mapfile):
1257 return style, mapfile
1257 return style, mapfile
1258
1258
1259 raise RuntimeError("No hgweb templates found in %r" % paths)
1259 raise RuntimeError("No hgweb templates found in %r" % paths)
1260
1260
1261 def loadfunction(ui, extname, registrarobj):
1261 def loadfunction(ui, extname, registrarobj):
1262 """Load template function from specified registrarobj
1262 """Load template function from specified registrarobj
1263 """
1263 """
1264 for name, func in registrarobj._table.iteritems():
1264 for name, func in registrarobj._table.iteritems():
1265 funcs[name] = func
1265 funcs[name] = func
1266
1266
1267 # tell hggettext to extract docstrings from these functions:
1267 # tell hggettext to extract docstrings from these functions:
1268 i18nfunctions = funcs.values()
1268 i18nfunctions = funcs.values()
@@ -1,3246 +1,3246
1 # util.py - Mercurial utility functions and platform specific implementations
1 # util.py - Mercurial utility functions and platform specific implementations
2 #
2 #
3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
6 #
6 #
7 # This software may be used and distributed according to the terms of the
7 # This software may be used and distributed according to the terms of the
8 # GNU General Public License version 2 or any later version.
8 # GNU General Public License version 2 or any later version.
9
9
10 """Mercurial utility functions and platform specific implementations.
10 """Mercurial utility functions and platform specific implementations.
11
11
12 This contains helper routines that are independent of the SCM core and
12 This contains helper routines that are independent of the SCM core and
13 hide platform-specific details from the core.
13 hide platform-specific details from the core.
14 """
14 """
15
15
16 from __future__ import absolute_import
16 from __future__ import absolute_import
17
17
18 import bz2
18 import bz2
19 import calendar
19 import calendar
20 import collections
20 import collections
21 import datetime
21 import datetime
22 import errno
22 import errno
23 import gc
23 import gc
24 import hashlib
24 import hashlib
25 import imp
25 import imp
26 import os
26 import os
27 import platform as pyplatform
27 import platform as pyplatform
28 import re as remod
28 import re as remod
29 import shutil
29 import shutil
30 import signal
30 import signal
31 import socket
31 import socket
32 import stat
32 import stat
33 import string
33 import string
34 import subprocess
34 import subprocess
35 import sys
35 import sys
36 import tempfile
36 import tempfile
37 import textwrap
37 import textwrap
38 import time
38 import time
39 import traceback
39 import traceback
40 import zlib
40 import zlib
41
41
42 from . import (
42 from . import (
43 encoding,
43 encoding,
44 error,
44 error,
45 i18n,
45 i18n,
46 osutil,
46 osutil,
47 parsers,
47 parsers,
48 pycompat,
48 pycompat,
49 )
49 )
50
50
51 empty = pycompat.empty
51 empty = pycompat.empty
52 httplib = pycompat.httplib
52 httplib = pycompat.httplib
53 httpserver = pycompat.httpserver
53 httpserver = pycompat.httpserver
54 pickle = pycompat.pickle
54 pickle = pycompat.pickle
55 queue = pycompat.queue
55 queue = pycompat.queue
56 socketserver = pycompat.socketserver
56 socketserver = pycompat.socketserver
57 stderr = pycompat.stderr
57 stderr = pycompat.stderr
58 stdin = pycompat.stdin
58 stdin = pycompat.stdin
59 stdout = pycompat.stdout
59 stdout = pycompat.stdout
60 stringio = pycompat.stringio
60 stringio = pycompat.stringio
61 urlerr = pycompat.urlerr
61 urlerr = pycompat.urlerr
62 urlparse = pycompat.urlparse
62 urlparse = pycompat.urlparse
63 urlreq = pycompat.urlreq
63 urlreq = pycompat.urlreq
64 xmlrpclib = pycompat.xmlrpclib
64 xmlrpclib = pycompat.xmlrpclib
65
65
66 if os.name == 'nt':
66 if os.name == 'nt':
67 from . import windows as platform
67 from . import windows as platform
68 stdout = platform.winstdout(pycompat.stdout)
68 stdout = platform.winstdout(pycompat.stdout)
69 else:
69 else:
70 from . import posix as platform
70 from . import posix as platform
71
71
72 _ = i18n._
72 _ = i18n._
73
73
74 bindunixsocket = platform.bindunixsocket
74 bindunixsocket = platform.bindunixsocket
75 cachestat = platform.cachestat
75 cachestat = platform.cachestat
76 checkexec = platform.checkexec
76 checkexec = platform.checkexec
77 checklink = platform.checklink
77 checklink = platform.checklink
78 copymode = platform.copymode
78 copymode = platform.copymode
79 executablepath = platform.executablepath
79 executablepath = platform.executablepath
80 expandglobs = platform.expandglobs
80 expandglobs = platform.expandglobs
81 explainexit = platform.explainexit
81 explainexit = platform.explainexit
82 findexe = platform.findexe
82 findexe = platform.findexe
83 gethgcmd = platform.gethgcmd
83 gethgcmd = platform.gethgcmd
84 getuser = platform.getuser
84 getuser = platform.getuser
85 getpid = os.getpid
85 getpid = os.getpid
86 groupmembers = platform.groupmembers
86 groupmembers = platform.groupmembers
87 groupname = platform.groupname
87 groupname = platform.groupname
88 hidewindow = platform.hidewindow
88 hidewindow = platform.hidewindow
89 isexec = platform.isexec
89 isexec = platform.isexec
90 isowner = platform.isowner
90 isowner = platform.isowner
91 localpath = platform.localpath
91 localpath = platform.localpath
92 lookupreg = platform.lookupreg
92 lookupreg = platform.lookupreg
93 makedir = platform.makedir
93 makedir = platform.makedir
94 nlinks = platform.nlinks
94 nlinks = platform.nlinks
95 normpath = platform.normpath
95 normpath = platform.normpath
96 normcase = platform.normcase
96 normcase = platform.normcase
97 normcasespec = platform.normcasespec
97 normcasespec = platform.normcasespec
98 normcasefallback = platform.normcasefallback
98 normcasefallback = platform.normcasefallback
99 openhardlinks = platform.openhardlinks
99 openhardlinks = platform.openhardlinks
100 oslink = platform.oslink
100 oslink = platform.oslink
101 parsepatchoutput = platform.parsepatchoutput
101 parsepatchoutput = platform.parsepatchoutput
102 pconvert = platform.pconvert
102 pconvert = platform.pconvert
103 poll = platform.poll
103 poll = platform.poll
104 popen = platform.popen
104 popen = platform.popen
105 posixfile = platform.posixfile
105 posixfile = platform.posixfile
106 quotecommand = platform.quotecommand
106 quotecommand = platform.quotecommand
107 readpipe = platform.readpipe
107 readpipe = platform.readpipe
108 rename = platform.rename
108 rename = platform.rename
109 removedirs = platform.removedirs
109 removedirs = platform.removedirs
110 samedevice = platform.samedevice
110 samedevice = platform.samedevice
111 samefile = platform.samefile
111 samefile = platform.samefile
112 samestat = platform.samestat
112 samestat = platform.samestat
113 setbinary = platform.setbinary
113 setbinary = platform.setbinary
114 setflags = platform.setflags
114 setflags = platform.setflags
115 setsignalhandler = platform.setsignalhandler
115 setsignalhandler = platform.setsignalhandler
116 shellquote = platform.shellquote
116 shellquote = platform.shellquote
117 spawndetached = platform.spawndetached
117 spawndetached = platform.spawndetached
118 split = platform.split
118 split = platform.split
119 sshargs = platform.sshargs
119 sshargs = platform.sshargs
120 statfiles = getattr(osutil, 'statfiles', platform.statfiles)
120 statfiles = getattr(osutil, 'statfiles', platform.statfiles)
121 statisexec = platform.statisexec
121 statisexec = platform.statisexec
122 statislink = platform.statislink
122 statislink = platform.statislink
123 testpid = platform.testpid
123 testpid = platform.testpid
124 umask = platform.umask
124 umask = platform.umask
125 unlink = platform.unlink
125 unlink = platform.unlink
126 unlinkpath = platform.unlinkpath
126 unlinkpath = platform.unlinkpath
127 username = platform.username
127 username = platform.username
128
128
129 # Python compatibility
129 # Python compatibility
130
130
131 _notset = object()
131 _notset = object()
132
132
133 # disable Python's problematic floating point timestamps (issue4836)
133 # disable Python's problematic floating point timestamps (issue4836)
134 # (Python hypocritically says you shouldn't change this behavior in
134 # (Python hypocritically says you shouldn't change this behavior in
135 # libraries, and sure enough Mercurial is not a library.)
135 # libraries, and sure enough Mercurial is not a library.)
136 os.stat_float_times(False)
136 os.stat_float_times(False)
137
137
138 def safehasattr(thing, attr):
138 def safehasattr(thing, attr):
139 return getattr(thing, attr, _notset) is not _notset
139 return getattr(thing, attr, _notset) is not _notset
140
140
141 DIGESTS = {
141 DIGESTS = {
142 'md5': hashlib.md5,
142 'md5': hashlib.md5,
143 'sha1': hashlib.sha1,
143 'sha1': hashlib.sha1,
144 'sha512': hashlib.sha512,
144 'sha512': hashlib.sha512,
145 }
145 }
146 # List of digest types from strongest to weakest
146 # List of digest types from strongest to weakest
147 DIGESTS_BY_STRENGTH = ['sha512', 'sha1', 'md5']
147 DIGESTS_BY_STRENGTH = ['sha512', 'sha1', 'md5']
148
148
149 for k in DIGESTS_BY_STRENGTH:
149 for k in DIGESTS_BY_STRENGTH:
150 assert k in DIGESTS
150 assert k in DIGESTS
151
151
152 class digester(object):
152 class digester(object):
153 """helper to compute digests.
153 """helper to compute digests.
154
154
155 This helper can be used to compute one or more digests given their name.
155 This helper can be used to compute one or more digests given their name.
156
156
157 >>> d = digester(['md5', 'sha1'])
157 >>> d = digester(['md5', 'sha1'])
158 >>> d.update('foo')
158 >>> d.update('foo')
159 >>> [k for k in sorted(d)]
159 >>> [k for k in sorted(d)]
160 ['md5', 'sha1']
160 ['md5', 'sha1']
161 >>> d['md5']
161 >>> d['md5']
162 'acbd18db4cc2f85cedef654fccc4a4d8'
162 'acbd18db4cc2f85cedef654fccc4a4d8'
163 >>> d['sha1']
163 >>> d['sha1']
164 '0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33'
164 '0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33'
165 >>> digester.preferred(['md5', 'sha1'])
165 >>> digester.preferred(['md5', 'sha1'])
166 'sha1'
166 'sha1'
167 """
167 """
168
168
169 def __init__(self, digests, s=''):
169 def __init__(self, digests, s=''):
170 self._hashes = {}
170 self._hashes = {}
171 for k in digests:
171 for k in digests:
172 if k not in DIGESTS:
172 if k not in DIGESTS:
173 raise Abort(_('unknown digest type: %s') % k)
173 raise Abort(_('unknown digest type: %s') % k)
174 self._hashes[k] = DIGESTS[k]()
174 self._hashes[k] = DIGESTS[k]()
175 if s:
175 if s:
176 self.update(s)
176 self.update(s)
177
177
178 def update(self, data):
178 def update(self, data):
179 for h in self._hashes.values():
179 for h in self._hashes.values():
180 h.update(data)
180 h.update(data)
181
181
182 def __getitem__(self, key):
182 def __getitem__(self, key):
183 if key not in DIGESTS:
183 if key not in DIGESTS:
184 raise Abort(_('unknown digest type: %s') % k)
184 raise Abort(_('unknown digest type: %s') % k)
185 return self._hashes[key].hexdigest()
185 return self._hashes[key].hexdigest()
186
186
187 def __iter__(self):
187 def __iter__(self):
188 return iter(self._hashes)
188 return iter(self._hashes)
189
189
190 @staticmethod
190 @staticmethod
191 def preferred(supported):
191 def preferred(supported):
192 """returns the strongest digest type in both supported and DIGESTS."""
192 """returns the strongest digest type in both supported and DIGESTS."""
193
193
194 for k in DIGESTS_BY_STRENGTH:
194 for k in DIGESTS_BY_STRENGTH:
195 if k in supported:
195 if k in supported:
196 return k
196 return k
197 return None
197 return None
198
198
199 class digestchecker(object):
199 class digestchecker(object):
200 """file handle wrapper that additionally checks content against a given
200 """file handle wrapper that additionally checks content against a given
201 size and digests.
201 size and digests.
202
202
203 d = digestchecker(fh, size, {'md5': '...'})
203 d = digestchecker(fh, size, {'md5': '...'})
204
204
205 When multiple digests are given, all of them are validated.
205 When multiple digests are given, all of them are validated.
206 """
206 """
207
207
208 def __init__(self, fh, size, digests):
208 def __init__(self, fh, size, digests):
209 self._fh = fh
209 self._fh = fh
210 self._size = size
210 self._size = size
211 self._got = 0
211 self._got = 0
212 self._digests = dict(digests)
212 self._digests = dict(digests)
213 self._digester = digester(self._digests.keys())
213 self._digester = digester(self._digests.keys())
214
214
215 def read(self, length=-1):
215 def read(self, length=-1):
216 content = self._fh.read(length)
216 content = self._fh.read(length)
217 self._digester.update(content)
217 self._digester.update(content)
218 self._got += len(content)
218 self._got += len(content)
219 return content
219 return content
220
220
221 def validate(self):
221 def validate(self):
222 if self._size != self._got:
222 if self._size != self._got:
223 raise Abort(_('size mismatch: expected %d, got %d') %
223 raise Abort(_('size mismatch: expected %d, got %d') %
224 (self._size, self._got))
224 (self._size, self._got))
225 for k, v in self._digests.items():
225 for k, v in self._digests.items():
226 if v != self._digester[k]:
226 if v != self._digester[k]:
227 # i18n: first parameter is a digest name
227 # i18n: first parameter is a digest name
228 raise Abort(_('%s mismatch: expected %s, got %s') %
228 raise Abort(_('%s mismatch: expected %s, got %s') %
229 (k, v, self._digester[k]))
229 (k, v, self._digester[k]))
230
230
231 try:
231 try:
232 buffer = buffer
232 buffer = buffer
233 except NameError:
233 except NameError:
234 if not pycompat.ispy3:
234 if not pycompat.ispy3:
235 def buffer(sliceable, offset=0):
235 def buffer(sliceable, offset=0):
236 return sliceable[offset:]
236 return sliceable[offset:]
237 else:
237 else:
238 def buffer(sliceable, offset=0):
238 def buffer(sliceable, offset=0):
239 return memoryview(sliceable)[offset:]
239 return memoryview(sliceable)[offset:]
240
240
241 closefds = os.name == 'posix'
241 closefds = os.name == 'posix'
242
242
243 _chunksize = 4096
243 _chunksize = 4096
244
244
245 class bufferedinputpipe(object):
245 class bufferedinputpipe(object):
246 """a manually buffered input pipe
246 """a manually buffered input pipe
247
247
248 Python will not let us use buffered IO and lazy reading with 'polling' at
248 Python will not let us use buffered IO and lazy reading with 'polling' at
249 the same time. We cannot probe the buffer state and select will not detect
249 the same time. We cannot probe the buffer state and select will not detect
250 that data are ready to read if they are already buffered.
250 that data are ready to read if they are already buffered.
251
251
252 This class let us work around that by implementing its own buffering
252 This class let us work around that by implementing its own buffering
253 (allowing efficient readline) while offering a way to know if the buffer is
253 (allowing efficient readline) while offering a way to know if the buffer is
254 empty from the output (allowing collaboration of the buffer with polling).
254 empty from the output (allowing collaboration of the buffer with polling).
255
255
256 This class lives in the 'util' module because it makes use of the 'os'
256 This class lives in the 'util' module because it makes use of the 'os'
257 module from the python stdlib.
257 module from the python stdlib.
258 """
258 """
259
259
260 def __init__(self, input):
260 def __init__(self, input):
261 self._input = input
261 self._input = input
262 self._buffer = []
262 self._buffer = []
263 self._eof = False
263 self._eof = False
264 self._lenbuf = 0
264 self._lenbuf = 0
265
265
266 @property
266 @property
267 def hasbuffer(self):
267 def hasbuffer(self):
268 """True is any data is currently buffered
268 """True is any data is currently buffered
269
269
270 This will be used externally a pre-step for polling IO. If there is
270 This will be used externally a pre-step for polling IO. If there is
271 already data then no polling should be set in place."""
271 already data then no polling should be set in place."""
272 return bool(self._buffer)
272 return bool(self._buffer)
273
273
274 @property
274 @property
275 def closed(self):
275 def closed(self):
276 return self._input.closed
276 return self._input.closed
277
277
278 def fileno(self):
278 def fileno(self):
279 return self._input.fileno()
279 return self._input.fileno()
280
280
281 def close(self):
281 def close(self):
282 return self._input.close()
282 return self._input.close()
283
283
284 def read(self, size):
284 def read(self, size):
285 while (not self._eof) and (self._lenbuf < size):
285 while (not self._eof) and (self._lenbuf < size):
286 self._fillbuffer()
286 self._fillbuffer()
287 return self._frombuffer(size)
287 return self._frombuffer(size)
288
288
289 def readline(self, *args, **kwargs):
289 def readline(self, *args, **kwargs):
290 if 1 < len(self._buffer):
290 if 1 < len(self._buffer):
291 # this should not happen because both read and readline end with a
291 # this should not happen because both read and readline end with a
292 # _frombuffer call that collapse it.
292 # _frombuffer call that collapse it.
293 self._buffer = [''.join(self._buffer)]
293 self._buffer = [''.join(self._buffer)]
294 self._lenbuf = len(self._buffer[0])
294 self._lenbuf = len(self._buffer[0])
295 lfi = -1
295 lfi = -1
296 if self._buffer:
296 if self._buffer:
297 lfi = self._buffer[-1].find('\n')
297 lfi = self._buffer[-1].find('\n')
298 while (not self._eof) and lfi < 0:
298 while (not self._eof) and lfi < 0:
299 self._fillbuffer()
299 self._fillbuffer()
300 if self._buffer:
300 if self._buffer:
301 lfi = self._buffer[-1].find('\n')
301 lfi = self._buffer[-1].find('\n')
302 size = lfi + 1
302 size = lfi + 1
303 if lfi < 0: # end of file
303 if lfi < 0: # end of file
304 size = self._lenbuf
304 size = self._lenbuf
305 elif 1 < len(self._buffer):
305 elif 1 < len(self._buffer):
306 # we need to take previous chunks into account
306 # we need to take previous chunks into account
307 size += self._lenbuf - len(self._buffer[-1])
307 size += self._lenbuf - len(self._buffer[-1])
308 return self._frombuffer(size)
308 return self._frombuffer(size)
309
309
310 def _frombuffer(self, size):
310 def _frombuffer(self, size):
311 """return at most 'size' data from the buffer
311 """return at most 'size' data from the buffer
312
312
313 The data are removed from the buffer."""
313 The data are removed from the buffer."""
314 if size == 0 or not self._buffer:
314 if size == 0 or not self._buffer:
315 return ''
315 return ''
316 buf = self._buffer[0]
316 buf = self._buffer[0]
317 if 1 < len(self._buffer):
317 if 1 < len(self._buffer):
318 buf = ''.join(self._buffer)
318 buf = ''.join(self._buffer)
319
319
320 data = buf[:size]
320 data = buf[:size]
321 buf = buf[len(data):]
321 buf = buf[len(data):]
322 if buf:
322 if buf:
323 self._buffer = [buf]
323 self._buffer = [buf]
324 self._lenbuf = len(buf)
324 self._lenbuf = len(buf)
325 else:
325 else:
326 self._buffer = []
326 self._buffer = []
327 self._lenbuf = 0
327 self._lenbuf = 0
328 return data
328 return data
329
329
330 def _fillbuffer(self):
330 def _fillbuffer(self):
331 """read data to the buffer"""
331 """read data to the buffer"""
332 data = os.read(self._input.fileno(), _chunksize)
332 data = os.read(self._input.fileno(), _chunksize)
333 if not data:
333 if not data:
334 self._eof = True
334 self._eof = True
335 else:
335 else:
336 self._lenbuf += len(data)
336 self._lenbuf += len(data)
337 self._buffer.append(data)
337 self._buffer.append(data)
338
338
339 def popen2(cmd, env=None, newlines=False):
339 def popen2(cmd, env=None, newlines=False):
340 # Setting bufsize to -1 lets the system decide the buffer size.
340 # Setting bufsize to -1 lets the system decide the buffer size.
341 # The default for bufsize is 0, meaning unbuffered. This leads to
341 # The default for bufsize is 0, meaning unbuffered. This leads to
342 # poor performance on Mac OS X: http://bugs.python.org/issue4194
342 # poor performance on Mac OS X: http://bugs.python.org/issue4194
343 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
343 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
344 close_fds=closefds,
344 close_fds=closefds,
345 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
345 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
346 universal_newlines=newlines,
346 universal_newlines=newlines,
347 env=env)
347 env=env)
348 return p.stdin, p.stdout
348 return p.stdin, p.stdout
349
349
350 def popen3(cmd, env=None, newlines=False):
350 def popen3(cmd, env=None, newlines=False):
351 stdin, stdout, stderr, p = popen4(cmd, env, newlines)
351 stdin, stdout, stderr, p = popen4(cmd, env, newlines)
352 return stdin, stdout, stderr
352 return stdin, stdout, stderr
353
353
354 def popen4(cmd, env=None, newlines=False, bufsize=-1):
354 def popen4(cmd, env=None, newlines=False, bufsize=-1):
355 p = subprocess.Popen(cmd, shell=True, bufsize=bufsize,
355 p = subprocess.Popen(cmd, shell=True, bufsize=bufsize,
356 close_fds=closefds,
356 close_fds=closefds,
357 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
357 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
358 stderr=subprocess.PIPE,
358 stderr=subprocess.PIPE,
359 universal_newlines=newlines,
359 universal_newlines=newlines,
360 env=env)
360 env=env)
361 return p.stdin, p.stdout, p.stderr, p
361 return p.stdin, p.stdout, p.stderr, p
362
362
363 def version():
363 def version():
364 """Return version information if available."""
364 """Return version information if available."""
365 try:
365 try:
366 from . import __version__
366 from . import __version__
367 return __version__.version
367 return __version__.version
368 except ImportError:
368 except ImportError:
369 return 'unknown'
369 return 'unknown'
370
370
371 def versiontuple(v=None, n=4):
371 def versiontuple(v=None, n=4):
372 """Parses a Mercurial version string into an N-tuple.
372 """Parses a Mercurial version string into an N-tuple.
373
373
374 The version string to be parsed is specified with the ``v`` argument.
374 The version string to be parsed is specified with the ``v`` argument.
375 If it isn't defined, the current Mercurial version string will be parsed.
375 If it isn't defined, the current Mercurial version string will be parsed.
376
376
377 ``n`` can be 2, 3, or 4. Here is how some version strings map to
377 ``n`` can be 2, 3, or 4. Here is how some version strings map to
378 returned values:
378 returned values:
379
379
380 >>> v = '3.6.1+190-df9b73d2d444'
380 >>> v = '3.6.1+190-df9b73d2d444'
381 >>> versiontuple(v, 2)
381 >>> versiontuple(v, 2)
382 (3, 6)
382 (3, 6)
383 >>> versiontuple(v, 3)
383 >>> versiontuple(v, 3)
384 (3, 6, 1)
384 (3, 6, 1)
385 >>> versiontuple(v, 4)
385 >>> versiontuple(v, 4)
386 (3, 6, 1, '190-df9b73d2d444')
386 (3, 6, 1, '190-df9b73d2d444')
387
387
388 >>> versiontuple('3.6.1+190-df9b73d2d444+20151118')
388 >>> versiontuple('3.6.1+190-df9b73d2d444+20151118')
389 (3, 6, 1, '190-df9b73d2d444+20151118')
389 (3, 6, 1, '190-df9b73d2d444+20151118')
390
390
391 >>> v = '3.6'
391 >>> v = '3.6'
392 >>> versiontuple(v, 2)
392 >>> versiontuple(v, 2)
393 (3, 6)
393 (3, 6)
394 >>> versiontuple(v, 3)
394 >>> versiontuple(v, 3)
395 (3, 6, None)
395 (3, 6, None)
396 >>> versiontuple(v, 4)
396 >>> versiontuple(v, 4)
397 (3, 6, None, None)
397 (3, 6, None, None)
398
398
399 >>> v = '3.9-rc'
399 >>> v = '3.9-rc'
400 >>> versiontuple(v, 2)
400 >>> versiontuple(v, 2)
401 (3, 9)
401 (3, 9)
402 >>> versiontuple(v, 3)
402 >>> versiontuple(v, 3)
403 (3, 9, None)
403 (3, 9, None)
404 >>> versiontuple(v, 4)
404 >>> versiontuple(v, 4)
405 (3, 9, None, 'rc')
405 (3, 9, None, 'rc')
406
406
407 >>> v = '3.9-rc+2-02a8fea4289b'
407 >>> v = '3.9-rc+2-02a8fea4289b'
408 >>> versiontuple(v, 2)
408 >>> versiontuple(v, 2)
409 (3, 9)
409 (3, 9)
410 >>> versiontuple(v, 3)
410 >>> versiontuple(v, 3)
411 (3, 9, None)
411 (3, 9, None)
412 >>> versiontuple(v, 4)
412 >>> versiontuple(v, 4)
413 (3, 9, None, 'rc+2-02a8fea4289b')
413 (3, 9, None, 'rc+2-02a8fea4289b')
414 """
414 """
415 if not v:
415 if not v:
416 v = version()
416 v = version()
417 parts = remod.split('[\+-]', v, 1)
417 parts = remod.split('[\+-]', v, 1)
418 if len(parts) == 1:
418 if len(parts) == 1:
419 vparts, extra = parts[0], None
419 vparts, extra = parts[0], None
420 else:
420 else:
421 vparts, extra = parts
421 vparts, extra = parts
422
422
423 vints = []
423 vints = []
424 for i in vparts.split('.'):
424 for i in vparts.split('.'):
425 try:
425 try:
426 vints.append(int(i))
426 vints.append(int(i))
427 except ValueError:
427 except ValueError:
428 break
428 break
429 # (3, 6) -> (3, 6, None)
429 # (3, 6) -> (3, 6, None)
430 while len(vints) < 3:
430 while len(vints) < 3:
431 vints.append(None)
431 vints.append(None)
432
432
433 if n == 2:
433 if n == 2:
434 return (vints[0], vints[1])
434 return (vints[0], vints[1])
435 if n == 3:
435 if n == 3:
436 return (vints[0], vints[1], vints[2])
436 return (vints[0], vints[1], vints[2])
437 if n == 4:
437 if n == 4:
438 return (vints[0], vints[1], vints[2], extra)
438 return (vints[0], vints[1], vints[2], extra)
439
439
440 # used by parsedate
440 # used by parsedate
441 defaultdateformats = (
441 defaultdateformats = (
442 '%Y-%m-%dT%H:%M:%S', # the 'real' ISO8601
442 '%Y-%m-%dT%H:%M:%S', # the 'real' ISO8601
443 '%Y-%m-%dT%H:%M', # without seconds
443 '%Y-%m-%dT%H:%M', # without seconds
444 '%Y-%m-%dT%H%M%S', # another awful but legal variant without :
444 '%Y-%m-%dT%H%M%S', # another awful but legal variant without :
445 '%Y-%m-%dT%H%M', # without seconds
445 '%Y-%m-%dT%H%M', # without seconds
446 '%Y-%m-%d %H:%M:%S', # our common legal variant
446 '%Y-%m-%d %H:%M:%S', # our common legal variant
447 '%Y-%m-%d %H:%M', # without seconds
447 '%Y-%m-%d %H:%M', # without seconds
448 '%Y-%m-%d %H%M%S', # without :
448 '%Y-%m-%d %H%M%S', # without :
449 '%Y-%m-%d %H%M', # without seconds
449 '%Y-%m-%d %H%M', # without seconds
450 '%Y-%m-%d %I:%M:%S%p',
450 '%Y-%m-%d %I:%M:%S%p',
451 '%Y-%m-%d %H:%M',
451 '%Y-%m-%d %H:%M',
452 '%Y-%m-%d %I:%M%p',
452 '%Y-%m-%d %I:%M%p',
453 '%Y-%m-%d',
453 '%Y-%m-%d',
454 '%m-%d',
454 '%m-%d',
455 '%m/%d',
455 '%m/%d',
456 '%m/%d/%y',
456 '%m/%d/%y',
457 '%m/%d/%Y',
457 '%m/%d/%Y',
458 '%a %b %d %H:%M:%S %Y',
458 '%a %b %d %H:%M:%S %Y',
459 '%a %b %d %I:%M:%S%p %Y',
459 '%a %b %d %I:%M:%S%p %Y',
460 '%a, %d %b %Y %H:%M:%S', # GNU coreutils "/bin/date --rfc-2822"
460 '%a, %d %b %Y %H:%M:%S', # GNU coreutils "/bin/date --rfc-2822"
461 '%b %d %H:%M:%S %Y',
461 '%b %d %H:%M:%S %Y',
462 '%b %d %I:%M:%S%p %Y',
462 '%b %d %I:%M:%S%p %Y',
463 '%b %d %H:%M:%S',
463 '%b %d %H:%M:%S',
464 '%b %d %I:%M:%S%p',
464 '%b %d %I:%M:%S%p',
465 '%b %d %H:%M',
465 '%b %d %H:%M',
466 '%b %d %I:%M%p',
466 '%b %d %I:%M%p',
467 '%b %d %Y',
467 '%b %d %Y',
468 '%b %d',
468 '%b %d',
469 '%H:%M:%S',
469 '%H:%M:%S',
470 '%I:%M:%S%p',
470 '%I:%M:%S%p',
471 '%H:%M',
471 '%H:%M',
472 '%I:%M%p',
472 '%I:%M%p',
473 )
473 )
474
474
475 extendeddateformats = defaultdateformats + (
475 extendeddateformats = defaultdateformats + (
476 "%Y",
476 "%Y",
477 "%Y-%m",
477 "%Y-%m",
478 "%b",
478 "%b",
479 "%b %Y",
479 "%b %Y",
480 )
480 )
481
481
482 def cachefunc(func):
482 def cachefunc(func):
483 '''cache the result of function calls'''
483 '''cache the result of function calls'''
484 # XXX doesn't handle keywords args
484 # XXX doesn't handle keywords args
485 if func.__code__.co_argcount == 0:
485 if func.__code__.co_argcount == 0:
486 cache = []
486 cache = []
487 def f():
487 def f():
488 if len(cache) == 0:
488 if len(cache) == 0:
489 cache.append(func())
489 cache.append(func())
490 return cache[0]
490 return cache[0]
491 return f
491 return f
492 cache = {}
492 cache = {}
493 if func.__code__.co_argcount == 1:
493 if func.__code__.co_argcount == 1:
494 # we gain a small amount of time because
494 # we gain a small amount of time because
495 # we don't need to pack/unpack the list
495 # we don't need to pack/unpack the list
496 def f(arg):
496 def f(arg):
497 if arg not in cache:
497 if arg not in cache:
498 cache[arg] = func(arg)
498 cache[arg] = func(arg)
499 return cache[arg]
499 return cache[arg]
500 else:
500 else:
501 def f(*args):
501 def f(*args):
502 if args not in cache:
502 if args not in cache:
503 cache[args] = func(*args)
503 cache[args] = func(*args)
504 return cache[args]
504 return cache[args]
505
505
506 return f
506 return f
507
507
508 class sortdict(dict):
508 class sortdict(dict):
509 '''a simple sorted dictionary'''
509 '''a simple sorted dictionary'''
510 def __init__(self, data=None):
510 def __init__(self, data=None):
511 self._list = []
511 self._list = []
512 if data:
512 if data:
513 self.update(data)
513 self.update(data)
514 def copy(self):
514 def copy(self):
515 return sortdict(self)
515 return sortdict(self)
516 def __setitem__(self, key, val):
516 def __setitem__(self, key, val):
517 if key in self:
517 if key in self:
518 self._list.remove(key)
518 self._list.remove(key)
519 self._list.append(key)
519 self._list.append(key)
520 dict.__setitem__(self, key, val)
520 dict.__setitem__(self, key, val)
521 def __iter__(self):
521 def __iter__(self):
522 return self._list.__iter__()
522 return self._list.__iter__()
523 def update(self, src):
523 def update(self, src):
524 if isinstance(src, dict):
524 if isinstance(src, dict):
525 src = src.iteritems()
525 src = src.iteritems()
526 for k, v in src:
526 for k, v in src:
527 self[k] = v
527 self[k] = v
528 def clear(self):
528 def clear(self):
529 dict.clear(self)
529 dict.clear(self)
530 self._list = []
530 self._list = []
531 def items(self):
531 def items(self):
532 return [(k, self[k]) for k in self._list]
532 return [(k, self[k]) for k in self._list]
533 def __delitem__(self, key):
533 def __delitem__(self, key):
534 dict.__delitem__(self, key)
534 dict.__delitem__(self, key)
535 self._list.remove(key)
535 self._list.remove(key)
536 def pop(self, key, *args, **kwargs):
536 def pop(self, key, *args, **kwargs):
537 dict.pop(self, key, *args, **kwargs)
537 dict.pop(self, key, *args, **kwargs)
538 try:
538 try:
539 self._list.remove(key)
539 self._list.remove(key)
540 except ValueError:
540 except ValueError:
541 pass
541 pass
542 def keys(self):
542 def keys(self):
543 return self._list
543 return self._list
544 def iterkeys(self):
544 def iterkeys(self):
545 return self._list.__iter__()
545 return self._list.__iter__()
546 def iteritems(self):
546 def iteritems(self):
547 for k in self._list:
547 for k in self._list:
548 yield k, self[k]
548 yield k, self[k]
549 def insert(self, index, key, val):
549 def insert(self, index, key, val):
550 self._list.insert(index, key)
550 self._list.insert(index, key)
551 dict.__setitem__(self, key, val)
551 dict.__setitem__(self, key, val)
552 def __repr__(self):
552 def __repr__(self):
553 if not self:
553 if not self:
554 return '%s()' % self.__class__.__name__
554 return '%s()' % self.__class__.__name__
555 return '%s(%r)' % (self.__class__.__name__, self.items())
555 return '%s(%r)' % (self.__class__.__name__, self.items())
556
556
557 class _lrucachenode(object):
557 class _lrucachenode(object):
558 """A node in a doubly linked list.
558 """A node in a doubly linked list.
559
559
560 Holds a reference to nodes on either side as well as a key-value
560 Holds a reference to nodes on either side as well as a key-value
561 pair for the dictionary entry.
561 pair for the dictionary entry.
562 """
562 """
563 __slots__ = (u'next', u'prev', u'key', u'value')
563 __slots__ = (u'next', u'prev', u'key', u'value')
564
564
565 def __init__(self):
565 def __init__(self):
566 self.next = None
566 self.next = None
567 self.prev = None
567 self.prev = None
568
568
569 self.key = _notset
569 self.key = _notset
570 self.value = None
570 self.value = None
571
571
572 def markempty(self):
572 def markempty(self):
573 """Mark the node as emptied."""
573 """Mark the node as emptied."""
574 self.key = _notset
574 self.key = _notset
575
575
576 class lrucachedict(object):
576 class lrucachedict(object):
577 """Dict that caches most recent accesses and sets.
577 """Dict that caches most recent accesses and sets.
578
578
579 The dict consists of an actual backing dict - indexed by original
579 The dict consists of an actual backing dict - indexed by original
580 key - and a doubly linked circular list defining the order of entries in
580 key - and a doubly linked circular list defining the order of entries in
581 the cache.
581 the cache.
582
582
583 The head node is the newest entry in the cache. If the cache is full,
583 The head node is the newest entry in the cache. If the cache is full,
584 we recycle head.prev and make it the new head. Cache accesses result in
584 we recycle head.prev and make it the new head. Cache accesses result in
585 the node being moved to before the existing head and being marked as the
585 the node being moved to before the existing head and being marked as the
586 new head node.
586 new head node.
587 """
587 """
588 def __init__(self, max):
588 def __init__(self, max):
589 self._cache = {}
589 self._cache = {}
590
590
591 self._head = head = _lrucachenode()
591 self._head = head = _lrucachenode()
592 head.prev = head
592 head.prev = head
593 head.next = head
593 head.next = head
594 self._size = 1
594 self._size = 1
595 self._capacity = max
595 self._capacity = max
596
596
597 def __len__(self):
597 def __len__(self):
598 return len(self._cache)
598 return len(self._cache)
599
599
600 def __contains__(self, k):
600 def __contains__(self, k):
601 return k in self._cache
601 return k in self._cache
602
602
603 def __iter__(self):
603 def __iter__(self):
604 # We don't have to iterate in cache order, but why not.
604 # We don't have to iterate in cache order, but why not.
605 n = self._head
605 n = self._head
606 for i in range(len(self._cache)):
606 for i in range(len(self._cache)):
607 yield n.key
607 yield n.key
608 n = n.next
608 n = n.next
609
609
610 def __getitem__(self, k):
610 def __getitem__(self, k):
611 node = self._cache[k]
611 node = self._cache[k]
612 self._movetohead(node)
612 self._movetohead(node)
613 return node.value
613 return node.value
614
614
615 def __setitem__(self, k, v):
615 def __setitem__(self, k, v):
616 node = self._cache.get(k)
616 node = self._cache.get(k)
617 # Replace existing value and mark as newest.
617 # Replace existing value and mark as newest.
618 if node is not None:
618 if node is not None:
619 node.value = v
619 node.value = v
620 self._movetohead(node)
620 self._movetohead(node)
621 return
621 return
622
622
623 if self._size < self._capacity:
623 if self._size < self._capacity:
624 node = self._addcapacity()
624 node = self._addcapacity()
625 else:
625 else:
626 # Grab the last/oldest item.
626 # Grab the last/oldest item.
627 node = self._head.prev
627 node = self._head.prev
628
628
629 # At capacity. Kill the old entry.
629 # At capacity. Kill the old entry.
630 if node.key is not _notset:
630 if node.key is not _notset:
631 del self._cache[node.key]
631 del self._cache[node.key]
632
632
633 node.key = k
633 node.key = k
634 node.value = v
634 node.value = v
635 self._cache[k] = node
635 self._cache[k] = node
636 # And mark it as newest entry. No need to adjust order since it
636 # And mark it as newest entry. No need to adjust order since it
637 # is already self._head.prev.
637 # is already self._head.prev.
638 self._head = node
638 self._head = node
639
639
640 def __delitem__(self, k):
640 def __delitem__(self, k):
641 node = self._cache.pop(k)
641 node = self._cache.pop(k)
642 node.markempty()
642 node.markempty()
643
643
644 # Temporarily mark as newest item before re-adjusting head to make
644 # Temporarily mark as newest item before re-adjusting head to make
645 # this node the oldest item.
645 # this node the oldest item.
646 self._movetohead(node)
646 self._movetohead(node)
647 self._head = node.next
647 self._head = node.next
648
648
649 # Additional dict methods.
649 # Additional dict methods.
650
650
651 def get(self, k, default=None):
651 def get(self, k, default=None):
652 try:
652 try:
653 return self._cache[k].value
653 return self._cache[k].value
654 except KeyError:
654 except KeyError:
655 return default
655 return default
656
656
657 def clear(self):
657 def clear(self):
658 n = self._head
658 n = self._head
659 while n.key is not _notset:
659 while n.key is not _notset:
660 n.markempty()
660 n.markempty()
661 n = n.next
661 n = n.next
662
662
663 self._cache.clear()
663 self._cache.clear()
664
664
665 def copy(self):
665 def copy(self):
666 result = lrucachedict(self._capacity)
666 result = lrucachedict(self._capacity)
667 n = self._head.prev
667 n = self._head.prev
668 # Iterate in oldest-to-newest order, so the copy has the right ordering
668 # Iterate in oldest-to-newest order, so the copy has the right ordering
669 for i in range(len(self._cache)):
669 for i in range(len(self._cache)):
670 result[n.key] = n.value
670 result[n.key] = n.value
671 n = n.prev
671 n = n.prev
672 return result
672 return result
673
673
674 def _movetohead(self, node):
674 def _movetohead(self, node):
675 """Mark a node as the newest, making it the new head.
675 """Mark a node as the newest, making it the new head.
676
676
677 When a node is accessed, it becomes the freshest entry in the LRU
677 When a node is accessed, it becomes the freshest entry in the LRU
678 list, which is denoted by self._head.
678 list, which is denoted by self._head.
679
679
680 Visually, let's make ``N`` the new head node (* denotes head):
680 Visually, let's make ``N`` the new head node (* denotes head):
681
681
682 previous/oldest <-> head <-> next/next newest
682 previous/oldest <-> head <-> next/next newest
683
683
684 ----<->--- A* ---<->-----
684 ----<->--- A* ---<->-----
685 | |
685 | |
686 E <-> D <-> N <-> C <-> B
686 E <-> D <-> N <-> C <-> B
687
687
688 To:
688 To:
689
689
690 ----<->--- N* ---<->-----
690 ----<->--- N* ---<->-----
691 | |
691 | |
692 E <-> D <-> C <-> B <-> A
692 E <-> D <-> C <-> B <-> A
693
693
694 This requires the following moves:
694 This requires the following moves:
695
695
696 C.next = D (node.prev.next = node.next)
696 C.next = D (node.prev.next = node.next)
697 D.prev = C (node.next.prev = node.prev)
697 D.prev = C (node.next.prev = node.prev)
698 E.next = N (head.prev.next = node)
698 E.next = N (head.prev.next = node)
699 N.prev = E (node.prev = head.prev)
699 N.prev = E (node.prev = head.prev)
700 N.next = A (node.next = head)
700 N.next = A (node.next = head)
701 A.prev = N (head.prev = node)
701 A.prev = N (head.prev = node)
702 """
702 """
703 head = self._head
703 head = self._head
704 # C.next = D
704 # C.next = D
705 node.prev.next = node.next
705 node.prev.next = node.next
706 # D.prev = C
706 # D.prev = C
707 node.next.prev = node.prev
707 node.next.prev = node.prev
708 # N.prev = E
708 # N.prev = E
709 node.prev = head.prev
709 node.prev = head.prev
710 # N.next = A
710 # N.next = A
711 # It is tempting to do just "head" here, however if node is
711 # It is tempting to do just "head" here, however if node is
712 # adjacent to head, this will do bad things.
712 # adjacent to head, this will do bad things.
713 node.next = head.prev.next
713 node.next = head.prev.next
714 # E.next = N
714 # E.next = N
715 node.next.prev = node
715 node.next.prev = node
716 # A.prev = N
716 # A.prev = N
717 node.prev.next = node
717 node.prev.next = node
718
718
719 self._head = node
719 self._head = node
720
720
721 def _addcapacity(self):
721 def _addcapacity(self):
722 """Add a node to the circular linked list.
722 """Add a node to the circular linked list.
723
723
724 The new node is inserted before the head node.
724 The new node is inserted before the head node.
725 """
725 """
726 head = self._head
726 head = self._head
727 node = _lrucachenode()
727 node = _lrucachenode()
728 head.prev.next = node
728 head.prev.next = node
729 node.prev = head.prev
729 node.prev = head.prev
730 node.next = head
730 node.next = head
731 head.prev = node
731 head.prev = node
732 self._size += 1
732 self._size += 1
733 return node
733 return node
734
734
735 def lrucachefunc(func):
735 def lrucachefunc(func):
736 '''cache most recent results of function calls'''
736 '''cache most recent results of function calls'''
737 cache = {}
737 cache = {}
738 order = collections.deque()
738 order = collections.deque()
739 if func.__code__.co_argcount == 1:
739 if func.__code__.co_argcount == 1:
740 def f(arg):
740 def f(arg):
741 if arg not in cache:
741 if arg not in cache:
742 if len(cache) > 20:
742 if len(cache) > 20:
743 del cache[order.popleft()]
743 del cache[order.popleft()]
744 cache[arg] = func(arg)
744 cache[arg] = func(arg)
745 else:
745 else:
746 order.remove(arg)
746 order.remove(arg)
747 order.append(arg)
747 order.append(arg)
748 return cache[arg]
748 return cache[arg]
749 else:
749 else:
750 def f(*args):
750 def f(*args):
751 if args not in cache:
751 if args not in cache:
752 if len(cache) > 20:
752 if len(cache) > 20:
753 del cache[order.popleft()]
753 del cache[order.popleft()]
754 cache[args] = func(*args)
754 cache[args] = func(*args)
755 else:
755 else:
756 order.remove(args)
756 order.remove(args)
757 order.append(args)
757 order.append(args)
758 return cache[args]
758 return cache[args]
759
759
760 return f
760 return f
761
761
762 class propertycache(object):
762 class propertycache(object):
763 def __init__(self, func):
763 def __init__(self, func):
764 self.func = func
764 self.func = func
765 self.name = func.__name__
765 self.name = func.__name__
766 def __get__(self, obj, type=None):
766 def __get__(self, obj, type=None):
767 result = self.func(obj)
767 result = self.func(obj)
768 self.cachevalue(obj, result)
768 self.cachevalue(obj, result)
769 return result
769 return result
770
770
771 def cachevalue(self, obj, value):
771 def cachevalue(self, obj, value):
772 # __dict__ assignment required to bypass __setattr__ (eg: repoview)
772 # __dict__ assignment required to bypass __setattr__ (eg: repoview)
773 obj.__dict__[self.name] = value
773 obj.__dict__[self.name] = value
774
774
775 def pipefilter(s, cmd):
775 def pipefilter(s, cmd):
776 '''filter string S through command CMD, returning its output'''
776 '''filter string S through command CMD, returning its output'''
777 p = subprocess.Popen(cmd, shell=True, close_fds=closefds,
777 p = subprocess.Popen(cmd, shell=True, close_fds=closefds,
778 stdin=subprocess.PIPE, stdout=subprocess.PIPE)
778 stdin=subprocess.PIPE, stdout=subprocess.PIPE)
779 pout, perr = p.communicate(s)
779 pout, perr = p.communicate(s)
780 return pout
780 return pout
781
781
782 def tempfilter(s, cmd):
782 def tempfilter(s, cmd):
783 '''filter string S through a pair of temporary files with CMD.
783 '''filter string S through a pair of temporary files with CMD.
784 CMD is used as a template to create the real command to be run,
784 CMD is used as a template to create the real command to be run,
785 with the strings INFILE and OUTFILE replaced by the real names of
785 with the strings INFILE and OUTFILE replaced by the real names of
786 the temporary files generated.'''
786 the temporary files generated.'''
787 inname, outname = None, None
787 inname, outname = None, None
788 try:
788 try:
789 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
789 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
790 fp = os.fdopen(infd, 'wb')
790 fp = os.fdopen(infd, 'wb')
791 fp.write(s)
791 fp.write(s)
792 fp.close()
792 fp.close()
793 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
793 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
794 os.close(outfd)
794 os.close(outfd)
795 cmd = cmd.replace('INFILE', inname)
795 cmd = cmd.replace('INFILE', inname)
796 cmd = cmd.replace('OUTFILE', outname)
796 cmd = cmd.replace('OUTFILE', outname)
797 code = os.system(cmd)
797 code = os.system(cmd)
798 if sys.platform == 'OpenVMS' and code & 1:
798 if sys.platform == 'OpenVMS' and code & 1:
799 code = 0
799 code = 0
800 if code:
800 if code:
801 raise Abort(_("command '%s' failed: %s") %
801 raise Abort(_("command '%s' failed: %s") %
802 (cmd, explainexit(code)))
802 (cmd, explainexit(code)))
803 return readfile(outname)
803 return readfile(outname)
804 finally:
804 finally:
805 try:
805 try:
806 if inname:
806 if inname:
807 os.unlink(inname)
807 os.unlink(inname)
808 except OSError:
808 except OSError:
809 pass
809 pass
810 try:
810 try:
811 if outname:
811 if outname:
812 os.unlink(outname)
812 os.unlink(outname)
813 except OSError:
813 except OSError:
814 pass
814 pass
815
815
816 filtertable = {
816 filtertable = {
817 'tempfile:': tempfilter,
817 'tempfile:': tempfilter,
818 'pipe:': pipefilter,
818 'pipe:': pipefilter,
819 }
819 }
820
820
821 def filter(s, cmd):
821 def filter(s, cmd):
822 "filter a string through a command that transforms its input to its output"
822 "filter a string through a command that transforms its input to its output"
823 for name, fn in filtertable.iteritems():
823 for name, fn in filtertable.iteritems():
824 if cmd.startswith(name):
824 if cmd.startswith(name):
825 return fn(s, cmd[len(name):].lstrip())
825 return fn(s, cmd[len(name):].lstrip())
826 return pipefilter(s, cmd)
826 return pipefilter(s, cmd)
827
827
828 def binary(s):
828 def binary(s):
829 """return true if a string is binary data"""
829 """return true if a string is binary data"""
830 return bool(s and '\0' in s)
830 return bool(s and '\0' in s)
831
831
832 def increasingchunks(source, min=1024, max=65536):
832 def increasingchunks(source, min=1024, max=65536):
833 '''return no less than min bytes per chunk while data remains,
833 '''return no less than min bytes per chunk while data remains,
834 doubling min after each chunk until it reaches max'''
834 doubling min after each chunk until it reaches max'''
835 def log2(x):
835 def log2(x):
836 if not x:
836 if not x:
837 return 0
837 return 0
838 i = 0
838 i = 0
839 while x:
839 while x:
840 x >>= 1
840 x >>= 1
841 i += 1
841 i += 1
842 return i - 1
842 return i - 1
843
843
844 buf = []
844 buf = []
845 blen = 0
845 blen = 0
846 for chunk in source:
846 for chunk in source:
847 buf.append(chunk)
847 buf.append(chunk)
848 blen += len(chunk)
848 blen += len(chunk)
849 if blen >= min:
849 if blen >= min:
850 if min < max:
850 if min < max:
851 min = min << 1
851 min = min << 1
852 nmin = 1 << log2(blen)
852 nmin = 1 << log2(blen)
853 if nmin > min:
853 if nmin > min:
854 min = nmin
854 min = nmin
855 if min > max:
855 if min > max:
856 min = max
856 min = max
857 yield ''.join(buf)
857 yield ''.join(buf)
858 blen = 0
858 blen = 0
859 buf = []
859 buf = []
860 if buf:
860 if buf:
861 yield ''.join(buf)
861 yield ''.join(buf)
862
862
863 Abort = error.Abort
863 Abort = error.Abort
864
864
865 def always(fn):
865 def always(fn):
866 return True
866 return True
867
867
868 def never(fn):
868 def never(fn):
869 return False
869 return False
870
870
871 def nogc(func):
871 def nogc(func):
872 """disable garbage collector
872 """disable garbage collector
873
873
874 Python's garbage collector triggers a GC each time a certain number of
874 Python's garbage collector triggers a GC each time a certain number of
875 container objects (the number being defined by gc.get_threshold()) are
875 container objects (the number being defined by gc.get_threshold()) are
876 allocated even when marked not to be tracked by the collector. Tracking has
876 allocated even when marked not to be tracked by the collector. Tracking has
877 no effect on when GCs are triggered, only on what objects the GC looks
877 no effect on when GCs are triggered, only on what objects the GC looks
878 into. As a workaround, disable GC while building complex (huge)
878 into. As a workaround, disable GC while building complex (huge)
879 containers.
879 containers.
880
880
881 This garbage collector issue have been fixed in 2.7.
881 This garbage collector issue have been fixed in 2.7.
882 """
882 """
883 if sys.version_info >= (2, 7):
883 if sys.version_info >= (2, 7):
884 return func
884 return func
885 def wrapper(*args, **kwargs):
885 def wrapper(*args, **kwargs):
886 gcenabled = gc.isenabled()
886 gcenabled = gc.isenabled()
887 gc.disable()
887 gc.disable()
888 try:
888 try:
889 return func(*args, **kwargs)
889 return func(*args, **kwargs)
890 finally:
890 finally:
891 if gcenabled:
891 if gcenabled:
892 gc.enable()
892 gc.enable()
893 return wrapper
893 return wrapper
894
894
895 def pathto(root, n1, n2):
895 def pathto(root, n1, n2):
896 '''return the relative path from one place to another.
896 '''return the relative path from one place to another.
897 root should use os.sep to separate directories
897 root should use os.sep to separate directories
898 n1 should use os.sep to separate directories
898 n1 should use os.sep to separate directories
899 n2 should use "/" to separate directories
899 n2 should use "/" to separate directories
900 returns an os.sep-separated path.
900 returns an os.sep-separated path.
901
901
902 If n1 is a relative path, it's assumed it's
902 If n1 is a relative path, it's assumed it's
903 relative to root.
903 relative to root.
904 n2 should always be relative to root.
904 n2 should always be relative to root.
905 '''
905 '''
906 if not n1:
906 if not n1:
907 return localpath(n2)
907 return localpath(n2)
908 if os.path.isabs(n1):
908 if os.path.isabs(n1):
909 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
909 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
910 return os.path.join(root, localpath(n2))
910 return os.path.join(root, localpath(n2))
911 n2 = '/'.join((pconvert(root), n2))
911 n2 = '/'.join((pconvert(root), n2))
912 a, b = splitpath(n1), n2.split('/')
912 a, b = splitpath(n1), n2.split('/')
913 a.reverse()
913 a.reverse()
914 b.reverse()
914 b.reverse()
915 while a and b and a[-1] == b[-1]:
915 while a and b and a[-1] == b[-1]:
916 a.pop()
916 a.pop()
917 b.pop()
917 b.pop()
918 b.reverse()
918 b.reverse()
919 return pycompat.ossep.join((['..'] * len(a)) + b) or '.'
919 return pycompat.ossep.join((['..'] * len(a)) + b) or '.'
920
920
921 def mainfrozen():
921 def mainfrozen():
922 """return True if we are a frozen executable.
922 """return True if we are a frozen executable.
923
923
924 The code supports py2exe (most common, Windows only) and tools/freeze
924 The code supports py2exe (most common, Windows only) and tools/freeze
925 (portable, not much used).
925 (portable, not much used).
926 """
926 """
927 return (safehasattr(sys, "frozen") or # new py2exe
927 return (safehasattr(sys, "frozen") or # new py2exe
928 safehasattr(sys, "importers") or # old py2exe
928 safehasattr(sys, "importers") or # old py2exe
929 imp.is_frozen(u"__main__")) # tools/freeze
929 imp.is_frozen(u"__main__")) # tools/freeze
930
930
931 # the location of data files matching the source code
931 # the location of data files matching the source code
932 if mainfrozen() and getattr(sys, 'frozen', None) != 'macosx_app':
932 if mainfrozen() and getattr(sys, 'frozen', None) != 'macosx_app':
933 # executable version (py2exe) doesn't support __file__
933 # executable version (py2exe) doesn't support __file__
934 datapath = os.path.dirname(sys.executable)
934 datapath = os.path.dirname(sys.executable)
935 else:
935 else:
936 datapath = os.path.dirname(__file__)
936 datapath = os.path.dirname(__file__)
937
937
938 if not isinstance(datapath, bytes):
938 if not isinstance(datapath, bytes):
939 datapath = pycompat.fsencode(datapath)
939 datapath = pycompat.fsencode(datapath)
940
940
941 i18n.setdatapath(datapath)
941 i18n.setdatapath(datapath)
942
942
943 _hgexecutable = None
943 _hgexecutable = None
944
944
945 def hgexecutable():
945 def hgexecutable():
946 """return location of the 'hg' executable.
946 """return location of the 'hg' executable.
947
947
948 Defaults to $HG or 'hg' in the search path.
948 Defaults to $HG or 'hg' in the search path.
949 """
949 """
950 if _hgexecutable is None:
950 if _hgexecutable is None:
951 hg = os.environ.get('HG')
951 hg = os.environ.get('HG')
952 mainmod = sys.modules['__main__']
952 mainmod = sys.modules['__main__']
953 if hg:
953 if hg:
954 _sethgexecutable(hg)
954 _sethgexecutable(hg)
955 elif mainfrozen():
955 elif mainfrozen():
956 if getattr(sys, 'frozen', None) == 'macosx_app':
956 if getattr(sys, 'frozen', None) == 'macosx_app':
957 # Env variable set by py2app
957 # Env variable set by py2app
958 _sethgexecutable(os.environ['EXECUTABLEPATH'])
958 _sethgexecutable(os.environ['EXECUTABLEPATH'])
959 else:
959 else:
960 _sethgexecutable(sys.executable)
960 _sethgexecutable(sys.executable)
961 elif os.path.basename(getattr(mainmod, '__file__', '')) == 'hg':
961 elif os.path.basename(getattr(mainmod, '__file__', '')) == 'hg':
962 _sethgexecutable(mainmod.__file__)
962 _sethgexecutable(mainmod.__file__)
963 else:
963 else:
964 exe = findexe('hg') or os.path.basename(sys.argv[0])
964 exe = findexe('hg') or os.path.basename(sys.argv[0])
965 _sethgexecutable(exe)
965 _sethgexecutable(exe)
966 return _hgexecutable
966 return _hgexecutable
967
967
968 def _sethgexecutable(path):
968 def _sethgexecutable(path):
969 """set location of the 'hg' executable"""
969 """set location of the 'hg' executable"""
970 global _hgexecutable
970 global _hgexecutable
971 _hgexecutable = path
971 _hgexecutable = path
972
972
973 def _isstdout(f):
973 def _isstdout(f):
974 fileno = getattr(f, 'fileno', None)
974 fileno = getattr(f, 'fileno', None)
975 return fileno and fileno() == sys.__stdout__.fileno()
975 return fileno and fileno() == sys.__stdout__.fileno()
976
976
977 def system(cmd, environ=None, cwd=None, onerr=None, errprefix=None, out=None):
977 def system(cmd, environ=None, cwd=None, onerr=None, errprefix=None, out=None):
978 '''enhanced shell command execution.
978 '''enhanced shell command execution.
979 run with environment maybe modified, maybe in different dir.
979 run with environment maybe modified, maybe in different dir.
980
980
981 if command fails and onerr is None, return status, else raise onerr
981 if command fails and onerr is None, return status, else raise onerr
982 object as exception.
982 object as exception.
983
983
984 if out is specified, it is assumed to be a file-like object that has a
984 if out is specified, it is assumed to be a file-like object that has a
985 write() method. stdout and stderr will be redirected to out.'''
985 write() method. stdout and stderr will be redirected to out.'''
986 if environ is None:
986 if environ is None:
987 environ = {}
987 environ = {}
988 try:
988 try:
989 stdout.flush()
989 stdout.flush()
990 except Exception:
990 except Exception:
991 pass
991 pass
992 def py2shell(val):
992 def py2shell(val):
993 'convert python object into string that is useful to shell'
993 'convert python object into string that is useful to shell'
994 if val is None or val is False:
994 if val is None or val is False:
995 return '0'
995 return '0'
996 if val is True:
996 if val is True:
997 return '1'
997 return '1'
998 return str(val)
998 return str(val)
999 origcmd = cmd
999 origcmd = cmd
1000 cmd = quotecommand(cmd)
1000 cmd = quotecommand(cmd)
1001 if sys.platform == 'plan9' and (sys.version_info[0] == 2
1001 if sys.platform == 'plan9' and (sys.version_info[0] == 2
1002 and sys.version_info[1] < 7):
1002 and sys.version_info[1] < 7):
1003 # subprocess kludge to work around issues in half-baked Python
1003 # subprocess kludge to work around issues in half-baked Python
1004 # ports, notably bichued/python:
1004 # ports, notably bichued/python:
1005 if not cwd is None:
1005 if not cwd is None:
1006 os.chdir(cwd)
1006 os.chdir(cwd)
1007 rc = os.system(cmd)
1007 rc = os.system(cmd)
1008 else:
1008 else:
1009 env = dict(os.environ)
1009 env = dict(os.environ)
1010 env.update((k, py2shell(v)) for k, v in environ.iteritems())
1010 env.update((k, py2shell(v)) for k, v in environ.iteritems())
1011 env['HG'] = hgexecutable()
1011 env['HG'] = hgexecutable()
1012 if out is None or _isstdout(out):
1012 if out is None or _isstdout(out):
1013 rc = subprocess.call(cmd, shell=True, close_fds=closefds,
1013 rc = subprocess.call(cmd, shell=True, close_fds=closefds,
1014 env=env, cwd=cwd)
1014 env=env, cwd=cwd)
1015 else:
1015 else:
1016 proc = subprocess.Popen(cmd, shell=True, close_fds=closefds,
1016 proc = subprocess.Popen(cmd, shell=True, close_fds=closefds,
1017 env=env, cwd=cwd, stdout=subprocess.PIPE,
1017 env=env, cwd=cwd, stdout=subprocess.PIPE,
1018 stderr=subprocess.STDOUT)
1018 stderr=subprocess.STDOUT)
1019 for line in iter(proc.stdout.readline, ''):
1019 for line in iter(proc.stdout.readline, ''):
1020 out.write(line)
1020 out.write(line)
1021 proc.wait()
1021 proc.wait()
1022 rc = proc.returncode
1022 rc = proc.returncode
1023 if sys.platform == 'OpenVMS' and rc & 1:
1023 if sys.platform == 'OpenVMS' and rc & 1:
1024 rc = 0
1024 rc = 0
1025 if rc and onerr:
1025 if rc and onerr:
1026 errmsg = '%s %s' % (os.path.basename(origcmd.split(None, 1)[0]),
1026 errmsg = '%s %s' % (os.path.basename(origcmd.split(None, 1)[0]),
1027 explainexit(rc)[0])
1027 explainexit(rc)[0])
1028 if errprefix:
1028 if errprefix:
1029 errmsg = '%s: %s' % (errprefix, errmsg)
1029 errmsg = '%s: %s' % (errprefix, errmsg)
1030 raise onerr(errmsg)
1030 raise onerr(errmsg)
1031 return rc
1031 return rc
1032
1032
1033 def checksignature(func):
1033 def checksignature(func):
1034 '''wrap a function with code to check for calling errors'''
1034 '''wrap a function with code to check for calling errors'''
1035 def check(*args, **kwargs):
1035 def check(*args, **kwargs):
1036 try:
1036 try:
1037 return func(*args, **kwargs)
1037 return func(*args, **kwargs)
1038 except TypeError:
1038 except TypeError:
1039 if len(traceback.extract_tb(sys.exc_info()[2])) == 1:
1039 if len(traceback.extract_tb(sys.exc_info()[2])) == 1:
1040 raise error.SignatureError
1040 raise error.SignatureError
1041 raise
1041 raise
1042
1042
1043 return check
1043 return check
1044
1044
1045 def copyfile(src, dest, hardlink=False, copystat=False, checkambig=False):
1045 def copyfile(src, dest, hardlink=False, copystat=False, checkambig=False):
1046 '''copy a file, preserving mode and optionally other stat info like
1046 '''copy a file, preserving mode and optionally other stat info like
1047 atime/mtime
1047 atime/mtime
1048
1048
1049 checkambig argument is used with filestat, and is useful only if
1049 checkambig argument is used with filestat, and is useful only if
1050 destination file is guarded by any lock (e.g. repo.lock or
1050 destination file is guarded by any lock (e.g. repo.lock or
1051 repo.wlock).
1051 repo.wlock).
1052
1052
1053 copystat and checkambig should be exclusive.
1053 copystat and checkambig should be exclusive.
1054 '''
1054 '''
1055 assert not (copystat and checkambig)
1055 assert not (copystat and checkambig)
1056 oldstat = None
1056 oldstat = None
1057 if os.path.lexists(dest):
1057 if os.path.lexists(dest):
1058 if checkambig:
1058 if checkambig:
1059 oldstat = checkambig and filestat(dest)
1059 oldstat = checkambig and filestat(dest)
1060 unlink(dest)
1060 unlink(dest)
1061 # hardlinks are problematic on CIFS, quietly ignore this flag
1061 # hardlinks are problematic on CIFS, quietly ignore this flag
1062 # until we find a way to work around it cleanly (issue4546)
1062 # until we find a way to work around it cleanly (issue4546)
1063 if False and hardlink:
1063 if False and hardlink:
1064 try:
1064 try:
1065 oslink(src, dest)
1065 oslink(src, dest)
1066 return
1066 return
1067 except (IOError, OSError):
1067 except (IOError, OSError):
1068 pass # fall back to normal copy
1068 pass # fall back to normal copy
1069 if os.path.islink(src):
1069 if os.path.islink(src):
1070 os.symlink(os.readlink(src), dest)
1070 os.symlink(os.readlink(src), dest)
1071 # copytime is ignored for symlinks, but in general copytime isn't needed
1071 # copytime is ignored for symlinks, but in general copytime isn't needed
1072 # for them anyway
1072 # for them anyway
1073 else:
1073 else:
1074 try:
1074 try:
1075 shutil.copyfile(src, dest)
1075 shutil.copyfile(src, dest)
1076 if copystat:
1076 if copystat:
1077 # copystat also copies mode
1077 # copystat also copies mode
1078 shutil.copystat(src, dest)
1078 shutil.copystat(src, dest)
1079 else:
1079 else:
1080 shutil.copymode(src, dest)
1080 shutil.copymode(src, dest)
1081 if oldstat and oldstat.stat:
1081 if oldstat and oldstat.stat:
1082 newstat = filestat(dest)
1082 newstat = filestat(dest)
1083 if newstat.isambig(oldstat):
1083 if newstat.isambig(oldstat):
1084 # stat of copied file is ambiguous to original one
1084 # stat of copied file is ambiguous to original one
1085 advanced = (oldstat.stat.st_mtime + 1) & 0x7fffffff
1085 advanced = (oldstat.stat.st_mtime + 1) & 0x7fffffff
1086 os.utime(dest, (advanced, advanced))
1086 os.utime(dest, (advanced, advanced))
1087 except shutil.Error as inst:
1087 except shutil.Error as inst:
1088 raise Abort(str(inst))
1088 raise Abort(str(inst))
1089
1089
1090 def copyfiles(src, dst, hardlink=None, progress=lambda t, pos: None):
1090 def copyfiles(src, dst, hardlink=None, progress=lambda t, pos: None):
1091 """Copy a directory tree using hardlinks if possible."""
1091 """Copy a directory tree using hardlinks if possible."""
1092 num = 0
1092 num = 0
1093
1093
1094 if hardlink is None:
1094 if hardlink is None:
1095 hardlink = (os.stat(src).st_dev ==
1095 hardlink = (os.stat(src).st_dev ==
1096 os.stat(os.path.dirname(dst)).st_dev)
1096 os.stat(os.path.dirname(dst)).st_dev)
1097 if hardlink:
1097 if hardlink:
1098 topic = _('linking')
1098 topic = _('linking')
1099 else:
1099 else:
1100 topic = _('copying')
1100 topic = _('copying')
1101
1101
1102 if os.path.isdir(src):
1102 if os.path.isdir(src):
1103 os.mkdir(dst)
1103 os.mkdir(dst)
1104 for name, kind in osutil.listdir(src):
1104 for name, kind in osutil.listdir(src):
1105 srcname = os.path.join(src, name)
1105 srcname = os.path.join(src, name)
1106 dstname = os.path.join(dst, name)
1106 dstname = os.path.join(dst, name)
1107 def nprog(t, pos):
1107 def nprog(t, pos):
1108 if pos is not None:
1108 if pos is not None:
1109 return progress(t, pos + num)
1109 return progress(t, pos + num)
1110 hardlink, n = copyfiles(srcname, dstname, hardlink, progress=nprog)
1110 hardlink, n = copyfiles(srcname, dstname, hardlink, progress=nprog)
1111 num += n
1111 num += n
1112 else:
1112 else:
1113 if hardlink:
1113 if hardlink:
1114 try:
1114 try:
1115 oslink(src, dst)
1115 oslink(src, dst)
1116 except (IOError, OSError):
1116 except (IOError, OSError):
1117 hardlink = False
1117 hardlink = False
1118 shutil.copy(src, dst)
1118 shutil.copy(src, dst)
1119 else:
1119 else:
1120 shutil.copy(src, dst)
1120 shutil.copy(src, dst)
1121 num += 1
1121 num += 1
1122 progress(topic, num)
1122 progress(topic, num)
1123 progress(topic, None)
1123 progress(topic, None)
1124
1124
1125 return hardlink, num
1125 return hardlink, num
1126
1126
1127 _winreservednames = '''con prn aux nul
1127 _winreservednames = '''con prn aux nul
1128 com1 com2 com3 com4 com5 com6 com7 com8 com9
1128 com1 com2 com3 com4 com5 com6 com7 com8 com9
1129 lpt1 lpt2 lpt3 lpt4 lpt5 lpt6 lpt7 lpt8 lpt9'''.split()
1129 lpt1 lpt2 lpt3 lpt4 lpt5 lpt6 lpt7 lpt8 lpt9'''.split()
1130 _winreservedchars = ':*?"<>|'
1130 _winreservedchars = ':*?"<>|'
1131 def checkwinfilename(path):
1131 def checkwinfilename(path):
1132 r'''Check that the base-relative path is a valid filename on Windows.
1132 r'''Check that the base-relative path is a valid filename on Windows.
1133 Returns None if the path is ok, or a UI string describing the problem.
1133 Returns None if the path is ok, or a UI string describing the problem.
1134
1134
1135 >>> checkwinfilename("just/a/normal/path")
1135 >>> checkwinfilename("just/a/normal/path")
1136 >>> checkwinfilename("foo/bar/con.xml")
1136 >>> checkwinfilename("foo/bar/con.xml")
1137 "filename contains 'con', which is reserved on Windows"
1137 "filename contains 'con', which is reserved on Windows"
1138 >>> checkwinfilename("foo/con.xml/bar")
1138 >>> checkwinfilename("foo/con.xml/bar")
1139 "filename contains 'con', which is reserved on Windows"
1139 "filename contains 'con', which is reserved on Windows"
1140 >>> checkwinfilename("foo/bar/xml.con")
1140 >>> checkwinfilename("foo/bar/xml.con")
1141 >>> checkwinfilename("foo/bar/AUX/bla.txt")
1141 >>> checkwinfilename("foo/bar/AUX/bla.txt")
1142 "filename contains 'AUX', which is reserved on Windows"
1142 "filename contains 'AUX', which is reserved on Windows"
1143 >>> checkwinfilename("foo/bar/bla:.txt")
1143 >>> checkwinfilename("foo/bar/bla:.txt")
1144 "filename contains ':', which is reserved on Windows"
1144 "filename contains ':', which is reserved on Windows"
1145 >>> checkwinfilename("foo/bar/b\07la.txt")
1145 >>> checkwinfilename("foo/bar/b\07la.txt")
1146 "filename contains '\\x07', which is invalid on Windows"
1146 "filename contains '\\x07', which is invalid on Windows"
1147 >>> checkwinfilename("foo/bar/bla ")
1147 >>> checkwinfilename("foo/bar/bla ")
1148 "filename ends with ' ', which is not allowed on Windows"
1148 "filename ends with ' ', which is not allowed on Windows"
1149 >>> checkwinfilename("../bar")
1149 >>> checkwinfilename("../bar")
1150 >>> checkwinfilename("foo\\")
1150 >>> checkwinfilename("foo\\")
1151 "filename ends with '\\', which is invalid on Windows"
1151 "filename ends with '\\', which is invalid on Windows"
1152 >>> checkwinfilename("foo\\/bar")
1152 >>> checkwinfilename("foo\\/bar")
1153 "directory name ends with '\\', which is invalid on Windows"
1153 "directory name ends with '\\', which is invalid on Windows"
1154 '''
1154 '''
1155 if path.endswith('\\'):
1155 if path.endswith('\\'):
1156 return _("filename ends with '\\', which is invalid on Windows")
1156 return _("filename ends with '\\', which is invalid on Windows")
1157 if '\\/' in path:
1157 if '\\/' in path:
1158 return _("directory name ends with '\\', which is invalid on Windows")
1158 return _("directory name ends with '\\', which is invalid on Windows")
1159 for n in path.replace('\\', '/').split('/'):
1159 for n in path.replace('\\', '/').split('/'):
1160 if not n:
1160 if not n:
1161 continue
1161 continue
1162 for c in n:
1162 for c in n:
1163 if c in _winreservedchars:
1163 if c in _winreservedchars:
1164 return _("filename contains '%s', which is reserved "
1164 return _("filename contains '%s', which is reserved "
1165 "on Windows") % c
1165 "on Windows") % c
1166 if ord(c) <= 31:
1166 if ord(c) <= 31:
1167 return _("filename contains %r, which is invalid "
1167 return _("filename contains %r, which is invalid "
1168 "on Windows") % c
1168 "on Windows") % c
1169 base = n.split('.')[0]
1169 base = n.split('.')[0]
1170 if base and base.lower() in _winreservednames:
1170 if base and base.lower() in _winreservednames:
1171 return _("filename contains '%s', which is reserved "
1171 return _("filename contains '%s', which is reserved "
1172 "on Windows") % base
1172 "on Windows") % base
1173 t = n[-1]
1173 t = n[-1]
1174 if t in '. ' and n not in '..':
1174 if t in '. ' and n not in '..':
1175 return _("filename ends with '%s', which is not allowed "
1175 return _("filename ends with '%s', which is not allowed "
1176 "on Windows") % t
1176 "on Windows") % t
1177
1177
1178 if os.name == 'nt':
1178 if os.name == 'nt':
1179 checkosfilename = checkwinfilename
1179 checkosfilename = checkwinfilename
1180 else:
1180 else:
1181 checkosfilename = platform.checkosfilename
1181 checkosfilename = platform.checkosfilename
1182
1182
1183 def makelock(info, pathname):
1183 def makelock(info, pathname):
1184 try:
1184 try:
1185 return os.symlink(info, pathname)
1185 return os.symlink(info, pathname)
1186 except OSError as why:
1186 except OSError as why:
1187 if why.errno == errno.EEXIST:
1187 if why.errno == errno.EEXIST:
1188 raise
1188 raise
1189 except AttributeError: # no symlink in os
1189 except AttributeError: # no symlink in os
1190 pass
1190 pass
1191
1191
1192 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
1192 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
1193 os.write(ld, info)
1193 os.write(ld, info)
1194 os.close(ld)
1194 os.close(ld)
1195
1195
1196 def readlock(pathname):
1196 def readlock(pathname):
1197 try:
1197 try:
1198 return os.readlink(pathname)
1198 return os.readlink(pathname)
1199 except OSError as why:
1199 except OSError as why:
1200 if why.errno not in (errno.EINVAL, errno.ENOSYS):
1200 if why.errno not in (errno.EINVAL, errno.ENOSYS):
1201 raise
1201 raise
1202 except AttributeError: # no symlink in os
1202 except AttributeError: # no symlink in os
1203 pass
1203 pass
1204 fp = posixfile(pathname)
1204 fp = posixfile(pathname)
1205 r = fp.read()
1205 r = fp.read()
1206 fp.close()
1206 fp.close()
1207 return r
1207 return r
1208
1208
1209 def fstat(fp):
1209 def fstat(fp):
1210 '''stat file object that may not have fileno method.'''
1210 '''stat file object that may not have fileno method.'''
1211 try:
1211 try:
1212 return os.fstat(fp.fileno())
1212 return os.fstat(fp.fileno())
1213 except AttributeError:
1213 except AttributeError:
1214 return os.stat(fp.name)
1214 return os.stat(fp.name)
1215
1215
1216 # File system features
1216 # File system features
1217
1217
1218 def fscasesensitive(path):
1218 def fscasesensitive(path):
1219 """
1219 """
1220 Return true if the given path is on a case-sensitive filesystem
1220 Return true if the given path is on a case-sensitive filesystem
1221
1221
1222 Requires a path (like /foo/.hg) ending with a foldable final
1222 Requires a path (like /foo/.hg) ending with a foldable final
1223 directory component.
1223 directory component.
1224 """
1224 """
1225 s1 = os.lstat(path)
1225 s1 = os.lstat(path)
1226 d, b = os.path.split(path)
1226 d, b = os.path.split(path)
1227 b2 = b.upper()
1227 b2 = b.upper()
1228 if b == b2:
1228 if b == b2:
1229 b2 = b.lower()
1229 b2 = b.lower()
1230 if b == b2:
1230 if b == b2:
1231 return True # no evidence against case sensitivity
1231 return True # no evidence against case sensitivity
1232 p2 = os.path.join(d, b2)
1232 p2 = os.path.join(d, b2)
1233 try:
1233 try:
1234 s2 = os.lstat(p2)
1234 s2 = os.lstat(p2)
1235 if s2 == s1:
1235 if s2 == s1:
1236 return False
1236 return False
1237 return True
1237 return True
1238 except OSError:
1238 except OSError:
1239 return True
1239 return True
1240
1240
1241 try:
1241 try:
1242 import re2
1242 import re2
1243 _re2 = None
1243 _re2 = None
1244 except ImportError:
1244 except ImportError:
1245 _re2 = False
1245 _re2 = False
1246
1246
1247 class _re(object):
1247 class _re(object):
1248 def _checkre2(self):
1248 def _checkre2(self):
1249 global _re2
1249 global _re2
1250 try:
1250 try:
1251 # check if match works, see issue3964
1251 # check if match works, see issue3964
1252 _re2 = bool(re2.match(r'\[([^\[]+)\]', '[ui]'))
1252 _re2 = bool(re2.match(r'\[([^\[]+)\]', '[ui]'))
1253 except ImportError:
1253 except ImportError:
1254 _re2 = False
1254 _re2 = False
1255
1255
1256 def compile(self, pat, flags=0):
1256 def compile(self, pat, flags=0):
1257 '''Compile a regular expression, using re2 if possible
1257 '''Compile a regular expression, using re2 if possible
1258
1258
1259 For best performance, use only re2-compatible regexp features. The
1259 For best performance, use only re2-compatible regexp features. The
1260 only flags from the re module that are re2-compatible are
1260 only flags from the re module that are re2-compatible are
1261 IGNORECASE and MULTILINE.'''
1261 IGNORECASE and MULTILINE.'''
1262 if _re2 is None:
1262 if _re2 is None:
1263 self._checkre2()
1263 self._checkre2()
1264 if _re2 and (flags & ~(remod.IGNORECASE | remod.MULTILINE)) == 0:
1264 if _re2 and (flags & ~(remod.IGNORECASE | remod.MULTILINE)) == 0:
1265 if flags & remod.IGNORECASE:
1265 if flags & remod.IGNORECASE:
1266 pat = '(?i)' + pat
1266 pat = '(?i)' + pat
1267 if flags & remod.MULTILINE:
1267 if flags & remod.MULTILINE:
1268 pat = '(?m)' + pat
1268 pat = '(?m)' + pat
1269 try:
1269 try:
1270 return re2.compile(pat)
1270 return re2.compile(pat)
1271 except re2.error:
1271 except re2.error:
1272 pass
1272 pass
1273 return remod.compile(pat, flags)
1273 return remod.compile(pat, flags)
1274
1274
1275 @propertycache
1275 @propertycache
1276 def escape(self):
1276 def escape(self):
1277 '''Return the version of escape corresponding to self.compile.
1277 '''Return the version of escape corresponding to self.compile.
1278
1278
1279 This is imperfect because whether re2 or re is used for a particular
1279 This is imperfect because whether re2 or re is used for a particular
1280 function depends on the flags, etc, but it's the best we can do.
1280 function depends on the flags, etc, but it's the best we can do.
1281 '''
1281 '''
1282 global _re2
1282 global _re2
1283 if _re2 is None:
1283 if _re2 is None:
1284 self._checkre2()
1284 self._checkre2()
1285 if _re2:
1285 if _re2:
1286 return re2.escape
1286 return re2.escape
1287 else:
1287 else:
1288 return remod.escape
1288 return remod.escape
1289
1289
1290 re = _re()
1290 re = _re()
1291
1291
1292 _fspathcache = {}
1292 _fspathcache = {}
1293 def fspath(name, root):
1293 def fspath(name, root):
1294 '''Get name in the case stored in the filesystem
1294 '''Get name in the case stored in the filesystem
1295
1295
1296 The name should be relative to root, and be normcase-ed for efficiency.
1296 The name should be relative to root, and be normcase-ed for efficiency.
1297
1297
1298 Note that this function is unnecessary, and should not be
1298 Note that this function is unnecessary, and should not be
1299 called, for case-sensitive filesystems (simply because it's expensive).
1299 called, for case-sensitive filesystems (simply because it's expensive).
1300
1300
1301 The root should be normcase-ed, too.
1301 The root should be normcase-ed, too.
1302 '''
1302 '''
1303 def _makefspathcacheentry(dir):
1303 def _makefspathcacheentry(dir):
1304 return dict((normcase(n), n) for n in os.listdir(dir))
1304 return dict((normcase(n), n) for n in os.listdir(dir))
1305
1305
1306 seps = pycompat.ossep
1306 seps = pycompat.ossep
1307 if os.altsep:
1307 if pycompat.osaltsep:
1308 seps = seps + os.altsep
1308 seps = seps + pycompat.osaltsep
1309 # Protect backslashes. This gets silly very quickly.
1309 # Protect backslashes. This gets silly very quickly.
1310 seps.replace('\\','\\\\')
1310 seps.replace('\\','\\\\')
1311 pattern = remod.compile(r'([^%s]+)|([%s]+)' % (seps, seps))
1311 pattern = remod.compile(r'([^%s]+)|([%s]+)' % (seps, seps))
1312 dir = os.path.normpath(root)
1312 dir = os.path.normpath(root)
1313 result = []
1313 result = []
1314 for part, sep in pattern.findall(name):
1314 for part, sep in pattern.findall(name):
1315 if sep:
1315 if sep:
1316 result.append(sep)
1316 result.append(sep)
1317 continue
1317 continue
1318
1318
1319 if dir not in _fspathcache:
1319 if dir not in _fspathcache:
1320 _fspathcache[dir] = _makefspathcacheentry(dir)
1320 _fspathcache[dir] = _makefspathcacheentry(dir)
1321 contents = _fspathcache[dir]
1321 contents = _fspathcache[dir]
1322
1322
1323 found = contents.get(part)
1323 found = contents.get(part)
1324 if not found:
1324 if not found:
1325 # retry "once per directory" per "dirstate.walk" which
1325 # retry "once per directory" per "dirstate.walk" which
1326 # may take place for each patches of "hg qpush", for example
1326 # may take place for each patches of "hg qpush", for example
1327 _fspathcache[dir] = contents = _makefspathcacheentry(dir)
1327 _fspathcache[dir] = contents = _makefspathcacheentry(dir)
1328 found = contents.get(part)
1328 found = contents.get(part)
1329
1329
1330 result.append(found or part)
1330 result.append(found or part)
1331 dir = os.path.join(dir, part)
1331 dir = os.path.join(dir, part)
1332
1332
1333 return ''.join(result)
1333 return ''.join(result)
1334
1334
1335 def checknlink(testfile):
1335 def checknlink(testfile):
1336 '''check whether hardlink count reporting works properly'''
1336 '''check whether hardlink count reporting works properly'''
1337
1337
1338 # testfile may be open, so we need a separate file for checking to
1338 # testfile may be open, so we need a separate file for checking to
1339 # work around issue2543 (or testfile may get lost on Samba shares)
1339 # work around issue2543 (or testfile may get lost on Samba shares)
1340 f1 = testfile + ".hgtmp1"
1340 f1 = testfile + ".hgtmp1"
1341 if os.path.lexists(f1):
1341 if os.path.lexists(f1):
1342 return False
1342 return False
1343 try:
1343 try:
1344 posixfile(f1, 'w').close()
1344 posixfile(f1, 'w').close()
1345 except IOError:
1345 except IOError:
1346 try:
1346 try:
1347 os.unlink(f1)
1347 os.unlink(f1)
1348 except OSError:
1348 except OSError:
1349 pass
1349 pass
1350 return False
1350 return False
1351
1351
1352 f2 = testfile + ".hgtmp2"
1352 f2 = testfile + ".hgtmp2"
1353 fd = None
1353 fd = None
1354 try:
1354 try:
1355 oslink(f1, f2)
1355 oslink(f1, f2)
1356 # nlinks() may behave differently for files on Windows shares if
1356 # nlinks() may behave differently for files on Windows shares if
1357 # the file is open.
1357 # the file is open.
1358 fd = posixfile(f2)
1358 fd = posixfile(f2)
1359 return nlinks(f2) > 1
1359 return nlinks(f2) > 1
1360 except OSError:
1360 except OSError:
1361 return False
1361 return False
1362 finally:
1362 finally:
1363 if fd is not None:
1363 if fd is not None:
1364 fd.close()
1364 fd.close()
1365 for f in (f1, f2):
1365 for f in (f1, f2):
1366 try:
1366 try:
1367 os.unlink(f)
1367 os.unlink(f)
1368 except OSError:
1368 except OSError:
1369 pass
1369 pass
1370
1370
1371 def endswithsep(path):
1371 def endswithsep(path):
1372 '''Check path ends with os.sep or os.altsep.'''
1372 '''Check path ends with os.sep or os.altsep.'''
1373 return (path.endswith(pycompat.ossep)
1373 return (path.endswith(pycompat.ossep)
1374 or os.altsep and path.endswith(os.altsep))
1374 or pycompat.osaltsep and path.endswith(pycompat.osaltsep))
1375
1375
1376 def splitpath(path):
1376 def splitpath(path):
1377 '''Split path by os.sep.
1377 '''Split path by os.sep.
1378 Note that this function does not use os.altsep because this is
1378 Note that this function does not use os.altsep because this is
1379 an alternative of simple "xxx.split(os.sep)".
1379 an alternative of simple "xxx.split(os.sep)".
1380 It is recommended to use os.path.normpath() before using this
1380 It is recommended to use os.path.normpath() before using this
1381 function if need.'''
1381 function if need.'''
1382 return path.split(pycompat.ossep)
1382 return path.split(pycompat.ossep)
1383
1383
1384 def gui():
1384 def gui():
1385 '''Are we running in a GUI?'''
1385 '''Are we running in a GUI?'''
1386 if sys.platform == 'darwin':
1386 if sys.platform == 'darwin':
1387 if 'SSH_CONNECTION' in os.environ:
1387 if 'SSH_CONNECTION' in os.environ:
1388 # handle SSH access to a box where the user is logged in
1388 # handle SSH access to a box where the user is logged in
1389 return False
1389 return False
1390 elif getattr(osutil, 'isgui', None):
1390 elif getattr(osutil, 'isgui', None):
1391 # check if a CoreGraphics session is available
1391 # check if a CoreGraphics session is available
1392 return osutil.isgui()
1392 return osutil.isgui()
1393 else:
1393 else:
1394 # pure build; use a safe default
1394 # pure build; use a safe default
1395 return True
1395 return True
1396 else:
1396 else:
1397 return os.name == "nt" or os.environ.get("DISPLAY")
1397 return os.name == "nt" or os.environ.get("DISPLAY")
1398
1398
1399 def mktempcopy(name, emptyok=False, createmode=None):
1399 def mktempcopy(name, emptyok=False, createmode=None):
1400 """Create a temporary file with the same contents from name
1400 """Create a temporary file with the same contents from name
1401
1401
1402 The permission bits are copied from the original file.
1402 The permission bits are copied from the original file.
1403
1403
1404 If the temporary file is going to be truncated immediately, you
1404 If the temporary file is going to be truncated immediately, you
1405 can use emptyok=True as an optimization.
1405 can use emptyok=True as an optimization.
1406
1406
1407 Returns the name of the temporary file.
1407 Returns the name of the temporary file.
1408 """
1408 """
1409 d, fn = os.path.split(name)
1409 d, fn = os.path.split(name)
1410 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, dir=d)
1410 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, dir=d)
1411 os.close(fd)
1411 os.close(fd)
1412 # Temporary files are created with mode 0600, which is usually not
1412 # Temporary files are created with mode 0600, which is usually not
1413 # what we want. If the original file already exists, just copy
1413 # what we want. If the original file already exists, just copy
1414 # its mode. Otherwise, manually obey umask.
1414 # its mode. Otherwise, manually obey umask.
1415 copymode(name, temp, createmode)
1415 copymode(name, temp, createmode)
1416 if emptyok:
1416 if emptyok:
1417 return temp
1417 return temp
1418 try:
1418 try:
1419 try:
1419 try:
1420 ifp = posixfile(name, "rb")
1420 ifp = posixfile(name, "rb")
1421 except IOError as inst:
1421 except IOError as inst:
1422 if inst.errno == errno.ENOENT:
1422 if inst.errno == errno.ENOENT:
1423 return temp
1423 return temp
1424 if not getattr(inst, 'filename', None):
1424 if not getattr(inst, 'filename', None):
1425 inst.filename = name
1425 inst.filename = name
1426 raise
1426 raise
1427 ofp = posixfile(temp, "wb")
1427 ofp = posixfile(temp, "wb")
1428 for chunk in filechunkiter(ifp):
1428 for chunk in filechunkiter(ifp):
1429 ofp.write(chunk)
1429 ofp.write(chunk)
1430 ifp.close()
1430 ifp.close()
1431 ofp.close()
1431 ofp.close()
1432 except: # re-raises
1432 except: # re-raises
1433 try: os.unlink(temp)
1433 try: os.unlink(temp)
1434 except OSError: pass
1434 except OSError: pass
1435 raise
1435 raise
1436 return temp
1436 return temp
1437
1437
1438 class filestat(object):
1438 class filestat(object):
1439 """help to exactly detect change of a file
1439 """help to exactly detect change of a file
1440
1440
1441 'stat' attribute is result of 'os.stat()' if specified 'path'
1441 'stat' attribute is result of 'os.stat()' if specified 'path'
1442 exists. Otherwise, it is None. This can avoid preparative
1442 exists. Otherwise, it is None. This can avoid preparative
1443 'exists()' examination on client side of this class.
1443 'exists()' examination on client side of this class.
1444 """
1444 """
1445 def __init__(self, path):
1445 def __init__(self, path):
1446 try:
1446 try:
1447 self.stat = os.stat(path)
1447 self.stat = os.stat(path)
1448 except OSError as err:
1448 except OSError as err:
1449 if err.errno != errno.ENOENT:
1449 if err.errno != errno.ENOENT:
1450 raise
1450 raise
1451 self.stat = None
1451 self.stat = None
1452
1452
1453 __hash__ = object.__hash__
1453 __hash__ = object.__hash__
1454
1454
1455 def __eq__(self, old):
1455 def __eq__(self, old):
1456 try:
1456 try:
1457 # if ambiguity between stat of new and old file is
1457 # if ambiguity between stat of new and old file is
1458 # avoided, comparison of size, ctime and mtime is enough
1458 # avoided, comparison of size, ctime and mtime is enough
1459 # to exactly detect change of a file regardless of platform
1459 # to exactly detect change of a file regardless of platform
1460 return (self.stat.st_size == old.stat.st_size and
1460 return (self.stat.st_size == old.stat.st_size and
1461 self.stat.st_ctime == old.stat.st_ctime and
1461 self.stat.st_ctime == old.stat.st_ctime and
1462 self.stat.st_mtime == old.stat.st_mtime)
1462 self.stat.st_mtime == old.stat.st_mtime)
1463 except AttributeError:
1463 except AttributeError:
1464 return False
1464 return False
1465
1465
1466 def isambig(self, old):
1466 def isambig(self, old):
1467 """Examine whether new (= self) stat is ambiguous against old one
1467 """Examine whether new (= self) stat is ambiguous against old one
1468
1468
1469 "S[N]" below means stat of a file at N-th change:
1469 "S[N]" below means stat of a file at N-th change:
1470
1470
1471 - S[n-1].ctime < S[n].ctime: can detect change of a file
1471 - S[n-1].ctime < S[n].ctime: can detect change of a file
1472 - S[n-1].ctime == S[n].ctime
1472 - S[n-1].ctime == S[n].ctime
1473 - S[n-1].ctime < S[n].mtime: means natural advancing (*1)
1473 - S[n-1].ctime < S[n].mtime: means natural advancing (*1)
1474 - S[n-1].ctime == S[n].mtime: is ambiguous (*2)
1474 - S[n-1].ctime == S[n].mtime: is ambiguous (*2)
1475 - S[n-1].ctime > S[n].mtime: never occurs naturally (don't care)
1475 - S[n-1].ctime > S[n].mtime: never occurs naturally (don't care)
1476 - S[n-1].ctime > S[n].ctime: never occurs naturally (don't care)
1476 - S[n-1].ctime > S[n].ctime: never occurs naturally (don't care)
1477
1477
1478 Case (*2) above means that a file was changed twice or more at
1478 Case (*2) above means that a file was changed twice or more at
1479 same time in sec (= S[n-1].ctime), and comparison of timestamp
1479 same time in sec (= S[n-1].ctime), and comparison of timestamp
1480 is ambiguous.
1480 is ambiguous.
1481
1481
1482 Base idea to avoid such ambiguity is "advance mtime 1 sec, if
1482 Base idea to avoid such ambiguity is "advance mtime 1 sec, if
1483 timestamp is ambiguous".
1483 timestamp is ambiguous".
1484
1484
1485 But advancing mtime only in case (*2) doesn't work as
1485 But advancing mtime only in case (*2) doesn't work as
1486 expected, because naturally advanced S[n].mtime in case (*1)
1486 expected, because naturally advanced S[n].mtime in case (*1)
1487 might be equal to manually advanced S[n-1 or earlier].mtime.
1487 might be equal to manually advanced S[n-1 or earlier].mtime.
1488
1488
1489 Therefore, all "S[n-1].ctime == S[n].ctime" cases should be
1489 Therefore, all "S[n-1].ctime == S[n].ctime" cases should be
1490 treated as ambiguous regardless of mtime, to avoid overlooking
1490 treated as ambiguous regardless of mtime, to avoid overlooking
1491 by confliction between such mtime.
1491 by confliction between such mtime.
1492
1492
1493 Advancing mtime "if isambig(oldstat)" ensures "S[n-1].mtime !=
1493 Advancing mtime "if isambig(oldstat)" ensures "S[n-1].mtime !=
1494 S[n].mtime", even if size of a file isn't changed.
1494 S[n].mtime", even if size of a file isn't changed.
1495 """
1495 """
1496 try:
1496 try:
1497 return (self.stat.st_ctime == old.stat.st_ctime)
1497 return (self.stat.st_ctime == old.stat.st_ctime)
1498 except AttributeError:
1498 except AttributeError:
1499 return False
1499 return False
1500
1500
1501 def avoidambig(self, path, old):
1501 def avoidambig(self, path, old):
1502 """Change file stat of specified path to avoid ambiguity
1502 """Change file stat of specified path to avoid ambiguity
1503
1503
1504 'old' should be previous filestat of 'path'.
1504 'old' should be previous filestat of 'path'.
1505
1505
1506 This skips avoiding ambiguity, if a process doesn't have
1506 This skips avoiding ambiguity, if a process doesn't have
1507 appropriate privileges for 'path'.
1507 appropriate privileges for 'path'.
1508 """
1508 """
1509 advanced = (old.stat.st_mtime + 1) & 0x7fffffff
1509 advanced = (old.stat.st_mtime + 1) & 0x7fffffff
1510 try:
1510 try:
1511 os.utime(path, (advanced, advanced))
1511 os.utime(path, (advanced, advanced))
1512 except OSError as inst:
1512 except OSError as inst:
1513 if inst.errno == errno.EPERM:
1513 if inst.errno == errno.EPERM:
1514 # utime() on the file created by another user causes EPERM,
1514 # utime() on the file created by another user causes EPERM,
1515 # if a process doesn't have appropriate privileges
1515 # if a process doesn't have appropriate privileges
1516 return
1516 return
1517 raise
1517 raise
1518
1518
1519 def __ne__(self, other):
1519 def __ne__(self, other):
1520 return not self == other
1520 return not self == other
1521
1521
1522 class atomictempfile(object):
1522 class atomictempfile(object):
1523 '''writable file object that atomically updates a file
1523 '''writable file object that atomically updates a file
1524
1524
1525 All writes will go to a temporary copy of the original file. Call
1525 All writes will go to a temporary copy of the original file. Call
1526 close() when you are done writing, and atomictempfile will rename
1526 close() when you are done writing, and atomictempfile will rename
1527 the temporary copy to the original name, making the changes
1527 the temporary copy to the original name, making the changes
1528 visible. If the object is destroyed without being closed, all your
1528 visible. If the object is destroyed without being closed, all your
1529 writes are discarded.
1529 writes are discarded.
1530
1530
1531 checkambig argument of constructor is used with filestat, and is
1531 checkambig argument of constructor is used with filestat, and is
1532 useful only if target file is guarded by any lock (e.g. repo.lock
1532 useful only if target file is guarded by any lock (e.g. repo.lock
1533 or repo.wlock).
1533 or repo.wlock).
1534 '''
1534 '''
1535 def __init__(self, name, mode='w+b', createmode=None, checkambig=False):
1535 def __init__(self, name, mode='w+b', createmode=None, checkambig=False):
1536 self.__name = name # permanent name
1536 self.__name = name # permanent name
1537 self._tempname = mktempcopy(name, emptyok=('w' in mode),
1537 self._tempname = mktempcopy(name, emptyok=('w' in mode),
1538 createmode=createmode)
1538 createmode=createmode)
1539 self._fp = posixfile(self._tempname, mode)
1539 self._fp = posixfile(self._tempname, mode)
1540 self._checkambig = checkambig
1540 self._checkambig = checkambig
1541
1541
1542 # delegated methods
1542 # delegated methods
1543 self.read = self._fp.read
1543 self.read = self._fp.read
1544 self.write = self._fp.write
1544 self.write = self._fp.write
1545 self.seek = self._fp.seek
1545 self.seek = self._fp.seek
1546 self.tell = self._fp.tell
1546 self.tell = self._fp.tell
1547 self.fileno = self._fp.fileno
1547 self.fileno = self._fp.fileno
1548
1548
1549 def close(self):
1549 def close(self):
1550 if not self._fp.closed:
1550 if not self._fp.closed:
1551 self._fp.close()
1551 self._fp.close()
1552 filename = localpath(self.__name)
1552 filename = localpath(self.__name)
1553 oldstat = self._checkambig and filestat(filename)
1553 oldstat = self._checkambig and filestat(filename)
1554 if oldstat and oldstat.stat:
1554 if oldstat and oldstat.stat:
1555 rename(self._tempname, filename)
1555 rename(self._tempname, filename)
1556 newstat = filestat(filename)
1556 newstat = filestat(filename)
1557 if newstat.isambig(oldstat):
1557 if newstat.isambig(oldstat):
1558 # stat of changed file is ambiguous to original one
1558 # stat of changed file is ambiguous to original one
1559 advanced = (oldstat.stat.st_mtime + 1) & 0x7fffffff
1559 advanced = (oldstat.stat.st_mtime + 1) & 0x7fffffff
1560 os.utime(filename, (advanced, advanced))
1560 os.utime(filename, (advanced, advanced))
1561 else:
1561 else:
1562 rename(self._tempname, filename)
1562 rename(self._tempname, filename)
1563
1563
1564 def discard(self):
1564 def discard(self):
1565 if not self._fp.closed:
1565 if not self._fp.closed:
1566 try:
1566 try:
1567 os.unlink(self._tempname)
1567 os.unlink(self._tempname)
1568 except OSError:
1568 except OSError:
1569 pass
1569 pass
1570 self._fp.close()
1570 self._fp.close()
1571
1571
1572 def __del__(self):
1572 def __del__(self):
1573 if safehasattr(self, '_fp'): # constructor actually did something
1573 if safehasattr(self, '_fp'): # constructor actually did something
1574 self.discard()
1574 self.discard()
1575
1575
1576 def __enter__(self):
1576 def __enter__(self):
1577 return self
1577 return self
1578
1578
1579 def __exit__(self, exctype, excvalue, traceback):
1579 def __exit__(self, exctype, excvalue, traceback):
1580 if exctype is not None:
1580 if exctype is not None:
1581 self.discard()
1581 self.discard()
1582 else:
1582 else:
1583 self.close()
1583 self.close()
1584
1584
1585 def makedirs(name, mode=None, notindexed=False):
1585 def makedirs(name, mode=None, notindexed=False):
1586 """recursive directory creation with parent mode inheritance
1586 """recursive directory creation with parent mode inheritance
1587
1587
1588 Newly created directories are marked as "not to be indexed by
1588 Newly created directories are marked as "not to be indexed by
1589 the content indexing service", if ``notindexed`` is specified
1589 the content indexing service", if ``notindexed`` is specified
1590 for "write" mode access.
1590 for "write" mode access.
1591 """
1591 """
1592 try:
1592 try:
1593 makedir(name, notindexed)
1593 makedir(name, notindexed)
1594 except OSError as err:
1594 except OSError as err:
1595 if err.errno == errno.EEXIST:
1595 if err.errno == errno.EEXIST:
1596 return
1596 return
1597 if err.errno != errno.ENOENT or not name:
1597 if err.errno != errno.ENOENT or not name:
1598 raise
1598 raise
1599 parent = os.path.dirname(os.path.abspath(name))
1599 parent = os.path.dirname(os.path.abspath(name))
1600 if parent == name:
1600 if parent == name:
1601 raise
1601 raise
1602 makedirs(parent, mode, notindexed)
1602 makedirs(parent, mode, notindexed)
1603 try:
1603 try:
1604 makedir(name, notindexed)
1604 makedir(name, notindexed)
1605 except OSError as err:
1605 except OSError as err:
1606 # Catch EEXIST to handle races
1606 # Catch EEXIST to handle races
1607 if err.errno == errno.EEXIST:
1607 if err.errno == errno.EEXIST:
1608 return
1608 return
1609 raise
1609 raise
1610 if mode is not None:
1610 if mode is not None:
1611 os.chmod(name, mode)
1611 os.chmod(name, mode)
1612
1612
1613 def readfile(path):
1613 def readfile(path):
1614 with open(path, 'rb') as fp:
1614 with open(path, 'rb') as fp:
1615 return fp.read()
1615 return fp.read()
1616
1616
1617 def writefile(path, text):
1617 def writefile(path, text):
1618 with open(path, 'wb') as fp:
1618 with open(path, 'wb') as fp:
1619 fp.write(text)
1619 fp.write(text)
1620
1620
1621 def appendfile(path, text):
1621 def appendfile(path, text):
1622 with open(path, 'ab') as fp:
1622 with open(path, 'ab') as fp:
1623 fp.write(text)
1623 fp.write(text)
1624
1624
1625 class chunkbuffer(object):
1625 class chunkbuffer(object):
1626 """Allow arbitrary sized chunks of data to be efficiently read from an
1626 """Allow arbitrary sized chunks of data to be efficiently read from an
1627 iterator over chunks of arbitrary size."""
1627 iterator over chunks of arbitrary size."""
1628
1628
1629 def __init__(self, in_iter):
1629 def __init__(self, in_iter):
1630 """in_iter is the iterator that's iterating over the input chunks.
1630 """in_iter is the iterator that's iterating over the input chunks.
1631 targetsize is how big a buffer to try to maintain."""
1631 targetsize is how big a buffer to try to maintain."""
1632 def splitbig(chunks):
1632 def splitbig(chunks):
1633 for chunk in chunks:
1633 for chunk in chunks:
1634 if len(chunk) > 2**20:
1634 if len(chunk) > 2**20:
1635 pos = 0
1635 pos = 0
1636 while pos < len(chunk):
1636 while pos < len(chunk):
1637 end = pos + 2 ** 18
1637 end = pos + 2 ** 18
1638 yield chunk[pos:end]
1638 yield chunk[pos:end]
1639 pos = end
1639 pos = end
1640 else:
1640 else:
1641 yield chunk
1641 yield chunk
1642 self.iter = splitbig(in_iter)
1642 self.iter = splitbig(in_iter)
1643 self._queue = collections.deque()
1643 self._queue = collections.deque()
1644 self._chunkoffset = 0
1644 self._chunkoffset = 0
1645
1645
1646 def read(self, l=None):
1646 def read(self, l=None):
1647 """Read L bytes of data from the iterator of chunks of data.
1647 """Read L bytes of data from the iterator of chunks of data.
1648 Returns less than L bytes if the iterator runs dry.
1648 Returns less than L bytes if the iterator runs dry.
1649
1649
1650 If size parameter is omitted, read everything"""
1650 If size parameter is omitted, read everything"""
1651 if l is None:
1651 if l is None:
1652 return ''.join(self.iter)
1652 return ''.join(self.iter)
1653
1653
1654 left = l
1654 left = l
1655 buf = []
1655 buf = []
1656 queue = self._queue
1656 queue = self._queue
1657 while left > 0:
1657 while left > 0:
1658 # refill the queue
1658 # refill the queue
1659 if not queue:
1659 if not queue:
1660 target = 2**18
1660 target = 2**18
1661 for chunk in self.iter:
1661 for chunk in self.iter:
1662 queue.append(chunk)
1662 queue.append(chunk)
1663 target -= len(chunk)
1663 target -= len(chunk)
1664 if target <= 0:
1664 if target <= 0:
1665 break
1665 break
1666 if not queue:
1666 if not queue:
1667 break
1667 break
1668
1668
1669 # The easy way to do this would be to queue.popleft(), modify the
1669 # The easy way to do this would be to queue.popleft(), modify the
1670 # chunk (if necessary), then queue.appendleft(). However, for cases
1670 # chunk (if necessary), then queue.appendleft(). However, for cases
1671 # where we read partial chunk content, this incurs 2 dequeue
1671 # where we read partial chunk content, this incurs 2 dequeue
1672 # mutations and creates a new str for the remaining chunk in the
1672 # mutations and creates a new str for the remaining chunk in the
1673 # queue. Our code below avoids this overhead.
1673 # queue. Our code below avoids this overhead.
1674
1674
1675 chunk = queue[0]
1675 chunk = queue[0]
1676 chunkl = len(chunk)
1676 chunkl = len(chunk)
1677 offset = self._chunkoffset
1677 offset = self._chunkoffset
1678
1678
1679 # Use full chunk.
1679 # Use full chunk.
1680 if offset == 0 and left >= chunkl:
1680 if offset == 0 and left >= chunkl:
1681 left -= chunkl
1681 left -= chunkl
1682 queue.popleft()
1682 queue.popleft()
1683 buf.append(chunk)
1683 buf.append(chunk)
1684 # self._chunkoffset remains at 0.
1684 # self._chunkoffset remains at 0.
1685 continue
1685 continue
1686
1686
1687 chunkremaining = chunkl - offset
1687 chunkremaining = chunkl - offset
1688
1688
1689 # Use all of unconsumed part of chunk.
1689 # Use all of unconsumed part of chunk.
1690 if left >= chunkremaining:
1690 if left >= chunkremaining:
1691 left -= chunkremaining
1691 left -= chunkremaining
1692 queue.popleft()
1692 queue.popleft()
1693 # offset == 0 is enabled by block above, so this won't merely
1693 # offset == 0 is enabled by block above, so this won't merely
1694 # copy via ``chunk[0:]``.
1694 # copy via ``chunk[0:]``.
1695 buf.append(chunk[offset:])
1695 buf.append(chunk[offset:])
1696 self._chunkoffset = 0
1696 self._chunkoffset = 0
1697
1697
1698 # Partial chunk needed.
1698 # Partial chunk needed.
1699 else:
1699 else:
1700 buf.append(chunk[offset:offset + left])
1700 buf.append(chunk[offset:offset + left])
1701 self._chunkoffset += left
1701 self._chunkoffset += left
1702 left -= chunkremaining
1702 left -= chunkremaining
1703
1703
1704 return ''.join(buf)
1704 return ''.join(buf)
1705
1705
1706 def filechunkiter(f, size=131072, limit=None):
1706 def filechunkiter(f, size=131072, limit=None):
1707 """Create a generator that produces the data in the file size
1707 """Create a generator that produces the data in the file size
1708 (default 131072) bytes at a time, up to optional limit (default is
1708 (default 131072) bytes at a time, up to optional limit (default is
1709 to read all data). Chunks may be less than size bytes if the
1709 to read all data). Chunks may be less than size bytes if the
1710 chunk is the last chunk in the file, or the file is a socket or
1710 chunk is the last chunk in the file, or the file is a socket or
1711 some other type of file that sometimes reads less data than is
1711 some other type of file that sometimes reads less data than is
1712 requested."""
1712 requested."""
1713 assert size >= 0
1713 assert size >= 0
1714 assert limit is None or limit >= 0
1714 assert limit is None or limit >= 0
1715 while True:
1715 while True:
1716 if limit is None:
1716 if limit is None:
1717 nbytes = size
1717 nbytes = size
1718 else:
1718 else:
1719 nbytes = min(limit, size)
1719 nbytes = min(limit, size)
1720 s = nbytes and f.read(nbytes)
1720 s = nbytes and f.read(nbytes)
1721 if not s:
1721 if not s:
1722 break
1722 break
1723 if limit:
1723 if limit:
1724 limit -= len(s)
1724 limit -= len(s)
1725 yield s
1725 yield s
1726
1726
1727 def makedate(timestamp=None):
1727 def makedate(timestamp=None):
1728 '''Return a unix timestamp (or the current time) as a (unixtime,
1728 '''Return a unix timestamp (or the current time) as a (unixtime,
1729 offset) tuple based off the local timezone.'''
1729 offset) tuple based off the local timezone.'''
1730 if timestamp is None:
1730 if timestamp is None:
1731 timestamp = time.time()
1731 timestamp = time.time()
1732 if timestamp < 0:
1732 if timestamp < 0:
1733 hint = _("check your clock")
1733 hint = _("check your clock")
1734 raise Abort(_("negative timestamp: %d") % timestamp, hint=hint)
1734 raise Abort(_("negative timestamp: %d") % timestamp, hint=hint)
1735 delta = (datetime.datetime.utcfromtimestamp(timestamp) -
1735 delta = (datetime.datetime.utcfromtimestamp(timestamp) -
1736 datetime.datetime.fromtimestamp(timestamp))
1736 datetime.datetime.fromtimestamp(timestamp))
1737 tz = delta.days * 86400 + delta.seconds
1737 tz = delta.days * 86400 + delta.seconds
1738 return timestamp, tz
1738 return timestamp, tz
1739
1739
1740 def datestr(date=None, format='%a %b %d %H:%M:%S %Y %1%2'):
1740 def datestr(date=None, format='%a %b %d %H:%M:%S %Y %1%2'):
1741 """represent a (unixtime, offset) tuple as a localized time.
1741 """represent a (unixtime, offset) tuple as a localized time.
1742 unixtime is seconds since the epoch, and offset is the time zone's
1742 unixtime is seconds since the epoch, and offset is the time zone's
1743 number of seconds away from UTC.
1743 number of seconds away from UTC.
1744
1744
1745 >>> datestr((0, 0))
1745 >>> datestr((0, 0))
1746 'Thu Jan 01 00:00:00 1970 +0000'
1746 'Thu Jan 01 00:00:00 1970 +0000'
1747 >>> datestr((42, 0))
1747 >>> datestr((42, 0))
1748 'Thu Jan 01 00:00:42 1970 +0000'
1748 'Thu Jan 01 00:00:42 1970 +0000'
1749 >>> datestr((-42, 0))
1749 >>> datestr((-42, 0))
1750 'Wed Dec 31 23:59:18 1969 +0000'
1750 'Wed Dec 31 23:59:18 1969 +0000'
1751 >>> datestr((0x7fffffff, 0))
1751 >>> datestr((0x7fffffff, 0))
1752 'Tue Jan 19 03:14:07 2038 +0000'
1752 'Tue Jan 19 03:14:07 2038 +0000'
1753 >>> datestr((-0x80000000, 0))
1753 >>> datestr((-0x80000000, 0))
1754 'Fri Dec 13 20:45:52 1901 +0000'
1754 'Fri Dec 13 20:45:52 1901 +0000'
1755 """
1755 """
1756 t, tz = date or makedate()
1756 t, tz = date or makedate()
1757 if "%1" in format or "%2" in format or "%z" in format:
1757 if "%1" in format or "%2" in format or "%z" in format:
1758 sign = (tz > 0) and "-" or "+"
1758 sign = (tz > 0) and "-" or "+"
1759 minutes = abs(tz) // 60
1759 minutes = abs(tz) // 60
1760 q, r = divmod(minutes, 60)
1760 q, r = divmod(minutes, 60)
1761 format = format.replace("%z", "%1%2")
1761 format = format.replace("%z", "%1%2")
1762 format = format.replace("%1", "%c%02d" % (sign, q))
1762 format = format.replace("%1", "%c%02d" % (sign, q))
1763 format = format.replace("%2", "%02d" % r)
1763 format = format.replace("%2", "%02d" % r)
1764 d = t - tz
1764 d = t - tz
1765 if d > 0x7fffffff:
1765 if d > 0x7fffffff:
1766 d = 0x7fffffff
1766 d = 0x7fffffff
1767 elif d < -0x80000000:
1767 elif d < -0x80000000:
1768 d = -0x80000000
1768 d = -0x80000000
1769 # Never use time.gmtime() and datetime.datetime.fromtimestamp()
1769 # Never use time.gmtime() and datetime.datetime.fromtimestamp()
1770 # because they use the gmtime() system call which is buggy on Windows
1770 # because they use the gmtime() system call which is buggy on Windows
1771 # for negative values.
1771 # for negative values.
1772 t = datetime.datetime(1970, 1, 1) + datetime.timedelta(seconds=d)
1772 t = datetime.datetime(1970, 1, 1) + datetime.timedelta(seconds=d)
1773 s = t.strftime(format)
1773 s = t.strftime(format)
1774 return s
1774 return s
1775
1775
1776 def shortdate(date=None):
1776 def shortdate(date=None):
1777 """turn (timestamp, tzoff) tuple into iso 8631 date."""
1777 """turn (timestamp, tzoff) tuple into iso 8631 date."""
1778 return datestr(date, format='%Y-%m-%d')
1778 return datestr(date, format='%Y-%m-%d')
1779
1779
1780 def parsetimezone(s):
1780 def parsetimezone(s):
1781 """find a trailing timezone, if any, in string, and return a
1781 """find a trailing timezone, if any, in string, and return a
1782 (offset, remainder) pair"""
1782 (offset, remainder) pair"""
1783
1783
1784 if s.endswith("GMT") or s.endswith("UTC"):
1784 if s.endswith("GMT") or s.endswith("UTC"):
1785 return 0, s[:-3].rstrip()
1785 return 0, s[:-3].rstrip()
1786
1786
1787 # Unix-style timezones [+-]hhmm
1787 # Unix-style timezones [+-]hhmm
1788 if len(s) >= 5 and s[-5] in "+-" and s[-4:].isdigit():
1788 if len(s) >= 5 and s[-5] in "+-" and s[-4:].isdigit():
1789 sign = (s[-5] == "+") and 1 or -1
1789 sign = (s[-5] == "+") and 1 or -1
1790 hours = int(s[-4:-2])
1790 hours = int(s[-4:-2])
1791 minutes = int(s[-2:])
1791 minutes = int(s[-2:])
1792 return -sign * (hours * 60 + minutes) * 60, s[:-5].rstrip()
1792 return -sign * (hours * 60 + minutes) * 60, s[:-5].rstrip()
1793
1793
1794 # ISO8601 trailing Z
1794 # ISO8601 trailing Z
1795 if s.endswith("Z") and s[-2:-1].isdigit():
1795 if s.endswith("Z") and s[-2:-1].isdigit():
1796 return 0, s[:-1]
1796 return 0, s[:-1]
1797
1797
1798 # ISO8601-style [+-]hh:mm
1798 # ISO8601-style [+-]hh:mm
1799 if (len(s) >= 6 and s[-6] in "+-" and s[-3] == ":" and
1799 if (len(s) >= 6 and s[-6] in "+-" and s[-3] == ":" and
1800 s[-5:-3].isdigit() and s[-2:].isdigit()):
1800 s[-5:-3].isdigit() and s[-2:].isdigit()):
1801 sign = (s[-6] == "+") and 1 or -1
1801 sign = (s[-6] == "+") and 1 or -1
1802 hours = int(s[-5:-3])
1802 hours = int(s[-5:-3])
1803 minutes = int(s[-2:])
1803 minutes = int(s[-2:])
1804 return -sign * (hours * 60 + minutes) * 60, s[:-6]
1804 return -sign * (hours * 60 + minutes) * 60, s[:-6]
1805
1805
1806 return None, s
1806 return None, s
1807
1807
1808 def strdate(string, format, defaults=[]):
1808 def strdate(string, format, defaults=[]):
1809 """parse a localized time string and return a (unixtime, offset) tuple.
1809 """parse a localized time string and return a (unixtime, offset) tuple.
1810 if the string cannot be parsed, ValueError is raised."""
1810 if the string cannot be parsed, ValueError is raised."""
1811 # NOTE: unixtime = localunixtime + offset
1811 # NOTE: unixtime = localunixtime + offset
1812 offset, date = parsetimezone(string)
1812 offset, date = parsetimezone(string)
1813
1813
1814 # add missing elements from defaults
1814 # add missing elements from defaults
1815 usenow = False # default to using biased defaults
1815 usenow = False # default to using biased defaults
1816 for part in ("S", "M", "HI", "d", "mb", "yY"): # decreasing specificity
1816 for part in ("S", "M", "HI", "d", "mb", "yY"): # decreasing specificity
1817 found = [True for p in part if ("%"+p) in format]
1817 found = [True for p in part if ("%"+p) in format]
1818 if not found:
1818 if not found:
1819 date += "@" + defaults[part][usenow]
1819 date += "@" + defaults[part][usenow]
1820 format += "@%" + part[0]
1820 format += "@%" + part[0]
1821 else:
1821 else:
1822 # We've found a specific time element, less specific time
1822 # We've found a specific time element, less specific time
1823 # elements are relative to today
1823 # elements are relative to today
1824 usenow = True
1824 usenow = True
1825
1825
1826 timetuple = time.strptime(date, format)
1826 timetuple = time.strptime(date, format)
1827 localunixtime = int(calendar.timegm(timetuple))
1827 localunixtime = int(calendar.timegm(timetuple))
1828 if offset is None:
1828 if offset is None:
1829 # local timezone
1829 # local timezone
1830 unixtime = int(time.mktime(timetuple))
1830 unixtime = int(time.mktime(timetuple))
1831 offset = unixtime - localunixtime
1831 offset = unixtime - localunixtime
1832 else:
1832 else:
1833 unixtime = localunixtime + offset
1833 unixtime = localunixtime + offset
1834 return unixtime, offset
1834 return unixtime, offset
1835
1835
1836 def parsedate(date, formats=None, bias=None):
1836 def parsedate(date, formats=None, bias=None):
1837 """parse a localized date/time and return a (unixtime, offset) tuple.
1837 """parse a localized date/time and return a (unixtime, offset) tuple.
1838
1838
1839 The date may be a "unixtime offset" string or in one of the specified
1839 The date may be a "unixtime offset" string or in one of the specified
1840 formats. If the date already is a (unixtime, offset) tuple, it is returned.
1840 formats. If the date already is a (unixtime, offset) tuple, it is returned.
1841
1841
1842 >>> parsedate(' today ') == parsedate(\
1842 >>> parsedate(' today ') == parsedate(\
1843 datetime.date.today().strftime('%b %d'))
1843 datetime.date.today().strftime('%b %d'))
1844 True
1844 True
1845 >>> parsedate( 'yesterday ') == parsedate((datetime.date.today() -\
1845 >>> parsedate( 'yesterday ') == parsedate((datetime.date.today() -\
1846 datetime.timedelta(days=1)\
1846 datetime.timedelta(days=1)\
1847 ).strftime('%b %d'))
1847 ).strftime('%b %d'))
1848 True
1848 True
1849 >>> now, tz = makedate()
1849 >>> now, tz = makedate()
1850 >>> strnow, strtz = parsedate('now')
1850 >>> strnow, strtz = parsedate('now')
1851 >>> (strnow - now) < 1
1851 >>> (strnow - now) < 1
1852 True
1852 True
1853 >>> tz == strtz
1853 >>> tz == strtz
1854 True
1854 True
1855 """
1855 """
1856 if bias is None:
1856 if bias is None:
1857 bias = {}
1857 bias = {}
1858 if not date:
1858 if not date:
1859 return 0, 0
1859 return 0, 0
1860 if isinstance(date, tuple) and len(date) == 2:
1860 if isinstance(date, tuple) and len(date) == 2:
1861 return date
1861 return date
1862 if not formats:
1862 if not formats:
1863 formats = defaultdateformats
1863 formats = defaultdateformats
1864 date = date.strip()
1864 date = date.strip()
1865
1865
1866 if date == 'now' or date == _('now'):
1866 if date == 'now' or date == _('now'):
1867 return makedate()
1867 return makedate()
1868 if date == 'today' or date == _('today'):
1868 if date == 'today' or date == _('today'):
1869 date = datetime.date.today().strftime('%b %d')
1869 date = datetime.date.today().strftime('%b %d')
1870 elif date == 'yesterday' or date == _('yesterday'):
1870 elif date == 'yesterday' or date == _('yesterday'):
1871 date = (datetime.date.today() -
1871 date = (datetime.date.today() -
1872 datetime.timedelta(days=1)).strftime('%b %d')
1872 datetime.timedelta(days=1)).strftime('%b %d')
1873
1873
1874 try:
1874 try:
1875 when, offset = map(int, date.split(' '))
1875 when, offset = map(int, date.split(' '))
1876 except ValueError:
1876 except ValueError:
1877 # fill out defaults
1877 # fill out defaults
1878 now = makedate()
1878 now = makedate()
1879 defaults = {}
1879 defaults = {}
1880 for part in ("d", "mb", "yY", "HI", "M", "S"):
1880 for part in ("d", "mb", "yY", "HI", "M", "S"):
1881 # this piece is for rounding the specific end of unknowns
1881 # this piece is for rounding the specific end of unknowns
1882 b = bias.get(part)
1882 b = bias.get(part)
1883 if b is None:
1883 if b is None:
1884 if part[0] in "HMS":
1884 if part[0] in "HMS":
1885 b = "00"
1885 b = "00"
1886 else:
1886 else:
1887 b = "0"
1887 b = "0"
1888
1888
1889 # this piece is for matching the generic end to today's date
1889 # this piece is for matching the generic end to today's date
1890 n = datestr(now, "%" + part[0])
1890 n = datestr(now, "%" + part[0])
1891
1891
1892 defaults[part] = (b, n)
1892 defaults[part] = (b, n)
1893
1893
1894 for format in formats:
1894 for format in formats:
1895 try:
1895 try:
1896 when, offset = strdate(date, format, defaults)
1896 when, offset = strdate(date, format, defaults)
1897 except (ValueError, OverflowError):
1897 except (ValueError, OverflowError):
1898 pass
1898 pass
1899 else:
1899 else:
1900 break
1900 break
1901 else:
1901 else:
1902 raise Abort(_('invalid date: %r') % date)
1902 raise Abort(_('invalid date: %r') % date)
1903 # validate explicit (probably user-specified) date and
1903 # validate explicit (probably user-specified) date and
1904 # time zone offset. values must fit in signed 32 bits for
1904 # time zone offset. values must fit in signed 32 bits for
1905 # current 32-bit linux runtimes. timezones go from UTC-12
1905 # current 32-bit linux runtimes. timezones go from UTC-12
1906 # to UTC+14
1906 # to UTC+14
1907 if when < -0x80000000 or when > 0x7fffffff:
1907 if when < -0x80000000 or when > 0x7fffffff:
1908 raise Abort(_('date exceeds 32 bits: %d') % when)
1908 raise Abort(_('date exceeds 32 bits: %d') % when)
1909 if offset < -50400 or offset > 43200:
1909 if offset < -50400 or offset > 43200:
1910 raise Abort(_('impossible time zone offset: %d') % offset)
1910 raise Abort(_('impossible time zone offset: %d') % offset)
1911 return when, offset
1911 return when, offset
1912
1912
1913 def matchdate(date):
1913 def matchdate(date):
1914 """Return a function that matches a given date match specifier
1914 """Return a function that matches a given date match specifier
1915
1915
1916 Formats include:
1916 Formats include:
1917
1917
1918 '{date}' match a given date to the accuracy provided
1918 '{date}' match a given date to the accuracy provided
1919
1919
1920 '<{date}' on or before a given date
1920 '<{date}' on or before a given date
1921
1921
1922 '>{date}' on or after a given date
1922 '>{date}' on or after a given date
1923
1923
1924 >>> p1 = parsedate("10:29:59")
1924 >>> p1 = parsedate("10:29:59")
1925 >>> p2 = parsedate("10:30:00")
1925 >>> p2 = parsedate("10:30:00")
1926 >>> p3 = parsedate("10:30:59")
1926 >>> p3 = parsedate("10:30:59")
1927 >>> p4 = parsedate("10:31:00")
1927 >>> p4 = parsedate("10:31:00")
1928 >>> p5 = parsedate("Sep 15 10:30:00 1999")
1928 >>> p5 = parsedate("Sep 15 10:30:00 1999")
1929 >>> f = matchdate("10:30")
1929 >>> f = matchdate("10:30")
1930 >>> f(p1[0])
1930 >>> f(p1[0])
1931 False
1931 False
1932 >>> f(p2[0])
1932 >>> f(p2[0])
1933 True
1933 True
1934 >>> f(p3[0])
1934 >>> f(p3[0])
1935 True
1935 True
1936 >>> f(p4[0])
1936 >>> f(p4[0])
1937 False
1937 False
1938 >>> f(p5[0])
1938 >>> f(p5[0])
1939 False
1939 False
1940 """
1940 """
1941
1941
1942 def lower(date):
1942 def lower(date):
1943 d = {'mb': "1", 'd': "1"}
1943 d = {'mb': "1", 'd': "1"}
1944 return parsedate(date, extendeddateformats, d)[0]
1944 return parsedate(date, extendeddateformats, d)[0]
1945
1945
1946 def upper(date):
1946 def upper(date):
1947 d = {'mb': "12", 'HI': "23", 'M': "59", 'S': "59"}
1947 d = {'mb': "12", 'HI': "23", 'M': "59", 'S': "59"}
1948 for days in ("31", "30", "29"):
1948 for days in ("31", "30", "29"):
1949 try:
1949 try:
1950 d["d"] = days
1950 d["d"] = days
1951 return parsedate(date, extendeddateformats, d)[0]
1951 return parsedate(date, extendeddateformats, d)[0]
1952 except Abort:
1952 except Abort:
1953 pass
1953 pass
1954 d["d"] = "28"
1954 d["d"] = "28"
1955 return parsedate(date, extendeddateformats, d)[0]
1955 return parsedate(date, extendeddateformats, d)[0]
1956
1956
1957 date = date.strip()
1957 date = date.strip()
1958
1958
1959 if not date:
1959 if not date:
1960 raise Abort(_("dates cannot consist entirely of whitespace"))
1960 raise Abort(_("dates cannot consist entirely of whitespace"))
1961 elif date[0] == "<":
1961 elif date[0] == "<":
1962 if not date[1:]:
1962 if not date[1:]:
1963 raise Abort(_("invalid day spec, use '<DATE'"))
1963 raise Abort(_("invalid day spec, use '<DATE'"))
1964 when = upper(date[1:])
1964 when = upper(date[1:])
1965 return lambda x: x <= when
1965 return lambda x: x <= when
1966 elif date[0] == ">":
1966 elif date[0] == ">":
1967 if not date[1:]:
1967 if not date[1:]:
1968 raise Abort(_("invalid day spec, use '>DATE'"))
1968 raise Abort(_("invalid day spec, use '>DATE'"))
1969 when = lower(date[1:])
1969 when = lower(date[1:])
1970 return lambda x: x >= when
1970 return lambda x: x >= when
1971 elif date[0] == "-":
1971 elif date[0] == "-":
1972 try:
1972 try:
1973 days = int(date[1:])
1973 days = int(date[1:])
1974 except ValueError:
1974 except ValueError:
1975 raise Abort(_("invalid day spec: %s") % date[1:])
1975 raise Abort(_("invalid day spec: %s") % date[1:])
1976 if days < 0:
1976 if days < 0:
1977 raise Abort(_("%s must be nonnegative (see 'hg help dates')")
1977 raise Abort(_("%s must be nonnegative (see 'hg help dates')")
1978 % date[1:])
1978 % date[1:])
1979 when = makedate()[0] - days * 3600 * 24
1979 when = makedate()[0] - days * 3600 * 24
1980 return lambda x: x >= when
1980 return lambda x: x >= when
1981 elif " to " in date:
1981 elif " to " in date:
1982 a, b = date.split(" to ")
1982 a, b = date.split(" to ")
1983 start, stop = lower(a), upper(b)
1983 start, stop = lower(a), upper(b)
1984 return lambda x: x >= start and x <= stop
1984 return lambda x: x >= start and x <= stop
1985 else:
1985 else:
1986 start, stop = lower(date), upper(date)
1986 start, stop = lower(date), upper(date)
1987 return lambda x: x >= start and x <= stop
1987 return lambda x: x >= start and x <= stop
1988
1988
1989 def stringmatcher(pattern):
1989 def stringmatcher(pattern):
1990 """
1990 """
1991 accepts a string, possibly starting with 're:' or 'literal:' prefix.
1991 accepts a string, possibly starting with 're:' or 'literal:' prefix.
1992 returns the matcher name, pattern, and matcher function.
1992 returns the matcher name, pattern, and matcher function.
1993 missing or unknown prefixes are treated as literal matches.
1993 missing or unknown prefixes are treated as literal matches.
1994
1994
1995 helper for tests:
1995 helper for tests:
1996 >>> def test(pattern, *tests):
1996 >>> def test(pattern, *tests):
1997 ... kind, pattern, matcher = stringmatcher(pattern)
1997 ... kind, pattern, matcher = stringmatcher(pattern)
1998 ... return (kind, pattern, [bool(matcher(t)) for t in tests])
1998 ... return (kind, pattern, [bool(matcher(t)) for t in tests])
1999
1999
2000 exact matching (no prefix):
2000 exact matching (no prefix):
2001 >>> test('abcdefg', 'abc', 'def', 'abcdefg')
2001 >>> test('abcdefg', 'abc', 'def', 'abcdefg')
2002 ('literal', 'abcdefg', [False, False, True])
2002 ('literal', 'abcdefg', [False, False, True])
2003
2003
2004 regex matching ('re:' prefix)
2004 regex matching ('re:' prefix)
2005 >>> test('re:a.+b', 'nomatch', 'fooadef', 'fooadefbar')
2005 >>> test('re:a.+b', 'nomatch', 'fooadef', 'fooadefbar')
2006 ('re', 'a.+b', [False, False, True])
2006 ('re', 'a.+b', [False, False, True])
2007
2007
2008 force exact matches ('literal:' prefix)
2008 force exact matches ('literal:' prefix)
2009 >>> test('literal:re:foobar', 'foobar', 're:foobar')
2009 >>> test('literal:re:foobar', 'foobar', 're:foobar')
2010 ('literal', 're:foobar', [False, True])
2010 ('literal', 're:foobar', [False, True])
2011
2011
2012 unknown prefixes are ignored and treated as literals
2012 unknown prefixes are ignored and treated as literals
2013 >>> test('foo:bar', 'foo', 'bar', 'foo:bar')
2013 >>> test('foo:bar', 'foo', 'bar', 'foo:bar')
2014 ('literal', 'foo:bar', [False, False, True])
2014 ('literal', 'foo:bar', [False, False, True])
2015 """
2015 """
2016 if pattern.startswith('re:'):
2016 if pattern.startswith('re:'):
2017 pattern = pattern[3:]
2017 pattern = pattern[3:]
2018 try:
2018 try:
2019 regex = remod.compile(pattern)
2019 regex = remod.compile(pattern)
2020 except remod.error as e:
2020 except remod.error as e:
2021 raise error.ParseError(_('invalid regular expression: %s')
2021 raise error.ParseError(_('invalid regular expression: %s')
2022 % e)
2022 % e)
2023 return 're', pattern, regex.search
2023 return 're', pattern, regex.search
2024 elif pattern.startswith('literal:'):
2024 elif pattern.startswith('literal:'):
2025 pattern = pattern[8:]
2025 pattern = pattern[8:]
2026 return 'literal', pattern, pattern.__eq__
2026 return 'literal', pattern, pattern.__eq__
2027
2027
2028 def shortuser(user):
2028 def shortuser(user):
2029 """Return a short representation of a user name or email address."""
2029 """Return a short representation of a user name or email address."""
2030 f = user.find('@')
2030 f = user.find('@')
2031 if f >= 0:
2031 if f >= 0:
2032 user = user[:f]
2032 user = user[:f]
2033 f = user.find('<')
2033 f = user.find('<')
2034 if f >= 0:
2034 if f >= 0:
2035 user = user[f + 1:]
2035 user = user[f + 1:]
2036 f = user.find(' ')
2036 f = user.find(' ')
2037 if f >= 0:
2037 if f >= 0:
2038 user = user[:f]
2038 user = user[:f]
2039 f = user.find('.')
2039 f = user.find('.')
2040 if f >= 0:
2040 if f >= 0:
2041 user = user[:f]
2041 user = user[:f]
2042 return user
2042 return user
2043
2043
2044 def emailuser(user):
2044 def emailuser(user):
2045 """Return the user portion of an email address."""
2045 """Return the user portion of an email address."""
2046 f = user.find('@')
2046 f = user.find('@')
2047 if f >= 0:
2047 if f >= 0:
2048 user = user[:f]
2048 user = user[:f]
2049 f = user.find('<')
2049 f = user.find('<')
2050 if f >= 0:
2050 if f >= 0:
2051 user = user[f + 1:]
2051 user = user[f + 1:]
2052 return user
2052 return user
2053
2053
2054 def email(author):
2054 def email(author):
2055 '''get email of author.'''
2055 '''get email of author.'''
2056 r = author.find('>')
2056 r = author.find('>')
2057 if r == -1:
2057 if r == -1:
2058 r = None
2058 r = None
2059 return author[author.find('<') + 1:r]
2059 return author[author.find('<') + 1:r]
2060
2060
2061 def ellipsis(text, maxlength=400):
2061 def ellipsis(text, maxlength=400):
2062 """Trim string to at most maxlength (default: 400) columns in display."""
2062 """Trim string to at most maxlength (default: 400) columns in display."""
2063 return encoding.trim(text, maxlength, ellipsis='...')
2063 return encoding.trim(text, maxlength, ellipsis='...')
2064
2064
2065 def unitcountfn(*unittable):
2065 def unitcountfn(*unittable):
2066 '''return a function that renders a readable count of some quantity'''
2066 '''return a function that renders a readable count of some quantity'''
2067
2067
2068 def go(count):
2068 def go(count):
2069 for multiplier, divisor, format in unittable:
2069 for multiplier, divisor, format in unittable:
2070 if count >= divisor * multiplier:
2070 if count >= divisor * multiplier:
2071 return format % (count / float(divisor))
2071 return format % (count / float(divisor))
2072 return unittable[-1][2] % count
2072 return unittable[-1][2] % count
2073
2073
2074 return go
2074 return go
2075
2075
2076 bytecount = unitcountfn(
2076 bytecount = unitcountfn(
2077 (100, 1 << 30, _('%.0f GB')),
2077 (100, 1 << 30, _('%.0f GB')),
2078 (10, 1 << 30, _('%.1f GB')),
2078 (10, 1 << 30, _('%.1f GB')),
2079 (1, 1 << 30, _('%.2f GB')),
2079 (1, 1 << 30, _('%.2f GB')),
2080 (100, 1 << 20, _('%.0f MB')),
2080 (100, 1 << 20, _('%.0f MB')),
2081 (10, 1 << 20, _('%.1f MB')),
2081 (10, 1 << 20, _('%.1f MB')),
2082 (1, 1 << 20, _('%.2f MB')),
2082 (1, 1 << 20, _('%.2f MB')),
2083 (100, 1 << 10, _('%.0f KB')),
2083 (100, 1 << 10, _('%.0f KB')),
2084 (10, 1 << 10, _('%.1f KB')),
2084 (10, 1 << 10, _('%.1f KB')),
2085 (1, 1 << 10, _('%.2f KB')),
2085 (1, 1 << 10, _('%.2f KB')),
2086 (1, 1, _('%.0f bytes')),
2086 (1, 1, _('%.0f bytes')),
2087 )
2087 )
2088
2088
2089 def uirepr(s):
2089 def uirepr(s):
2090 # Avoid double backslash in Windows path repr()
2090 # Avoid double backslash in Windows path repr()
2091 return repr(s).replace('\\\\', '\\')
2091 return repr(s).replace('\\\\', '\\')
2092
2092
2093 # delay import of textwrap
2093 # delay import of textwrap
2094 def MBTextWrapper(**kwargs):
2094 def MBTextWrapper(**kwargs):
2095 class tw(textwrap.TextWrapper):
2095 class tw(textwrap.TextWrapper):
2096 """
2096 """
2097 Extend TextWrapper for width-awareness.
2097 Extend TextWrapper for width-awareness.
2098
2098
2099 Neither number of 'bytes' in any encoding nor 'characters' is
2099 Neither number of 'bytes' in any encoding nor 'characters' is
2100 appropriate to calculate terminal columns for specified string.
2100 appropriate to calculate terminal columns for specified string.
2101
2101
2102 Original TextWrapper implementation uses built-in 'len()' directly,
2102 Original TextWrapper implementation uses built-in 'len()' directly,
2103 so overriding is needed to use width information of each characters.
2103 so overriding is needed to use width information of each characters.
2104
2104
2105 In addition, characters classified into 'ambiguous' width are
2105 In addition, characters classified into 'ambiguous' width are
2106 treated as wide in East Asian area, but as narrow in other.
2106 treated as wide in East Asian area, but as narrow in other.
2107
2107
2108 This requires use decision to determine width of such characters.
2108 This requires use decision to determine width of such characters.
2109 """
2109 """
2110 def _cutdown(self, ucstr, space_left):
2110 def _cutdown(self, ucstr, space_left):
2111 l = 0
2111 l = 0
2112 colwidth = encoding.ucolwidth
2112 colwidth = encoding.ucolwidth
2113 for i in xrange(len(ucstr)):
2113 for i in xrange(len(ucstr)):
2114 l += colwidth(ucstr[i])
2114 l += colwidth(ucstr[i])
2115 if space_left < l:
2115 if space_left < l:
2116 return (ucstr[:i], ucstr[i:])
2116 return (ucstr[:i], ucstr[i:])
2117 return ucstr, ''
2117 return ucstr, ''
2118
2118
2119 # overriding of base class
2119 # overriding of base class
2120 def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
2120 def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
2121 space_left = max(width - cur_len, 1)
2121 space_left = max(width - cur_len, 1)
2122
2122
2123 if self.break_long_words:
2123 if self.break_long_words:
2124 cut, res = self._cutdown(reversed_chunks[-1], space_left)
2124 cut, res = self._cutdown(reversed_chunks[-1], space_left)
2125 cur_line.append(cut)
2125 cur_line.append(cut)
2126 reversed_chunks[-1] = res
2126 reversed_chunks[-1] = res
2127 elif not cur_line:
2127 elif not cur_line:
2128 cur_line.append(reversed_chunks.pop())
2128 cur_line.append(reversed_chunks.pop())
2129
2129
2130 # this overriding code is imported from TextWrapper of Python 2.6
2130 # this overriding code is imported from TextWrapper of Python 2.6
2131 # to calculate columns of string by 'encoding.ucolwidth()'
2131 # to calculate columns of string by 'encoding.ucolwidth()'
2132 def _wrap_chunks(self, chunks):
2132 def _wrap_chunks(self, chunks):
2133 colwidth = encoding.ucolwidth
2133 colwidth = encoding.ucolwidth
2134
2134
2135 lines = []
2135 lines = []
2136 if self.width <= 0:
2136 if self.width <= 0:
2137 raise ValueError("invalid width %r (must be > 0)" % self.width)
2137 raise ValueError("invalid width %r (must be > 0)" % self.width)
2138
2138
2139 # Arrange in reverse order so items can be efficiently popped
2139 # Arrange in reverse order so items can be efficiently popped
2140 # from a stack of chucks.
2140 # from a stack of chucks.
2141 chunks.reverse()
2141 chunks.reverse()
2142
2142
2143 while chunks:
2143 while chunks:
2144
2144
2145 # Start the list of chunks that will make up the current line.
2145 # Start the list of chunks that will make up the current line.
2146 # cur_len is just the length of all the chunks in cur_line.
2146 # cur_len is just the length of all the chunks in cur_line.
2147 cur_line = []
2147 cur_line = []
2148 cur_len = 0
2148 cur_len = 0
2149
2149
2150 # Figure out which static string will prefix this line.
2150 # Figure out which static string will prefix this line.
2151 if lines:
2151 if lines:
2152 indent = self.subsequent_indent
2152 indent = self.subsequent_indent
2153 else:
2153 else:
2154 indent = self.initial_indent
2154 indent = self.initial_indent
2155
2155
2156 # Maximum width for this line.
2156 # Maximum width for this line.
2157 width = self.width - len(indent)
2157 width = self.width - len(indent)
2158
2158
2159 # First chunk on line is whitespace -- drop it, unless this
2159 # First chunk on line is whitespace -- drop it, unless this
2160 # is the very beginning of the text (i.e. no lines started yet).
2160 # is the very beginning of the text (i.e. no lines started yet).
2161 if self.drop_whitespace and chunks[-1].strip() == '' and lines:
2161 if self.drop_whitespace and chunks[-1].strip() == '' and lines:
2162 del chunks[-1]
2162 del chunks[-1]
2163
2163
2164 while chunks:
2164 while chunks:
2165 l = colwidth(chunks[-1])
2165 l = colwidth(chunks[-1])
2166
2166
2167 # Can at least squeeze this chunk onto the current line.
2167 # Can at least squeeze this chunk onto the current line.
2168 if cur_len + l <= width:
2168 if cur_len + l <= width:
2169 cur_line.append(chunks.pop())
2169 cur_line.append(chunks.pop())
2170 cur_len += l
2170 cur_len += l
2171
2171
2172 # Nope, this line is full.
2172 # Nope, this line is full.
2173 else:
2173 else:
2174 break
2174 break
2175
2175
2176 # The current line is full, and the next chunk is too big to
2176 # The current line is full, and the next chunk is too big to
2177 # fit on *any* line (not just this one).
2177 # fit on *any* line (not just this one).
2178 if chunks and colwidth(chunks[-1]) > width:
2178 if chunks and colwidth(chunks[-1]) > width:
2179 self._handle_long_word(chunks, cur_line, cur_len, width)
2179 self._handle_long_word(chunks, cur_line, cur_len, width)
2180
2180
2181 # If the last chunk on this line is all whitespace, drop it.
2181 # If the last chunk on this line is all whitespace, drop it.
2182 if (self.drop_whitespace and
2182 if (self.drop_whitespace and
2183 cur_line and cur_line[-1].strip() == ''):
2183 cur_line and cur_line[-1].strip() == ''):
2184 del cur_line[-1]
2184 del cur_line[-1]
2185
2185
2186 # Convert current line back to a string and store it in list
2186 # Convert current line back to a string and store it in list
2187 # of all lines (return value).
2187 # of all lines (return value).
2188 if cur_line:
2188 if cur_line:
2189 lines.append(indent + ''.join(cur_line))
2189 lines.append(indent + ''.join(cur_line))
2190
2190
2191 return lines
2191 return lines
2192
2192
2193 global MBTextWrapper
2193 global MBTextWrapper
2194 MBTextWrapper = tw
2194 MBTextWrapper = tw
2195 return tw(**kwargs)
2195 return tw(**kwargs)
2196
2196
2197 def wrap(line, width, initindent='', hangindent=''):
2197 def wrap(line, width, initindent='', hangindent=''):
2198 maxindent = max(len(hangindent), len(initindent))
2198 maxindent = max(len(hangindent), len(initindent))
2199 if width <= maxindent:
2199 if width <= maxindent:
2200 # adjust for weird terminal size
2200 # adjust for weird terminal size
2201 width = max(78, maxindent + 1)
2201 width = max(78, maxindent + 1)
2202 line = line.decode(encoding.encoding, encoding.encodingmode)
2202 line = line.decode(encoding.encoding, encoding.encodingmode)
2203 initindent = initindent.decode(encoding.encoding, encoding.encodingmode)
2203 initindent = initindent.decode(encoding.encoding, encoding.encodingmode)
2204 hangindent = hangindent.decode(encoding.encoding, encoding.encodingmode)
2204 hangindent = hangindent.decode(encoding.encoding, encoding.encodingmode)
2205 wrapper = MBTextWrapper(width=width,
2205 wrapper = MBTextWrapper(width=width,
2206 initial_indent=initindent,
2206 initial_indent=initindent,
2207 subsequent_indent=hangindent)
2207 subsequent_indent=hangindent)
2208 return wrapper.fill(line).encode(encoding.encoding)
2208 return wrapper.fill(line).encode(encoding.encoding)
2209
2209
2210 if (pyplatform.python_implementation() == 'CPython' and
2210 if (pyplatform.python_implementation() == 'CPython' and
2211 sys.version_info < (3, 0)):
2211 sys.version_info < (3, 0)):
2212 # There is an issue in CPython that some IO methods do not handle EINTR
2212 # There is an issue in CPython that some IO methods do not handle EINTR
2213 # correctly. The following table shows what CPython version (and functions)
2213 # correctly. The following table shows what CPython version (and functions)
2214 # are affected (buggy: has the EINTR bug, okay: otherwise):
2214 # are affected (buggy: has the EINTR bug, okay: otherwise):
2215 #
2215 #
2216 # | < 2.7.4 | 2.7.4 to 2.7.12 | >= 3.0
2216 # | < 2.7.4 | 2.7.4 to 2.7.12 | >= 3.0
2217 # --------------------------------------------------
2217 # --------------------------------------------------
2218 # fp.__iter__ | buggy | buggy | okay
2218 # fp.__iter__ | buggy | buggy | okay
2219 # fp.read* | buggy | okay [1] | okay
2219 # fp.read* | buggy | okay [1] | okay
2220 #
2220 #
2221 # [1]: fixed by changeset 67dc99a989cd in the cpython hg repo.
2221 # [1]: fixed by changeset 67dc99a989cd in the cpython hg repo.
2222 #
2222 #
2223 # Here we workaround the EINTR issue for fileobj.__iter__. Other methods
2223 # Here we workaround the EINTR issue for fileobj.__iter__. Other methods
2224 # like "read*" are ignored for now, as Python < 2.7.4 is a minority.
2224 # like "read*" are ignored for now, as Python < 2.7.4 is a minority.
2225 #
2225 #
2226 # Although we can workaround the EINTR issue for fp.__iter__, it is slower:
2226 # Although we can workaround the EINTR issue for fp.__iter__, it is slower:
2227 # "for x in fp" is 4x faster than "for x in iter(fp.readline, '')" in
2227 # "for x in fp" is 4x faster than "for x in iter(fp.readline, '')" in
2228 # CPython 2, because CPython 2 maintains an internal readahead buffer for
2228 # CPython 2, because CPython 2 maintains an internal readahead buffer for
2229 # fp.__iter__ but not other fp.read* methods.
2229 # fp.__iter__ but not other fp.read* methods.
2230 #
2230 #
2231 # On modern systems like Linux, the "read" syscall cannot be interrupted
2231 # On modern systems like Linux, the "read" syscall cannot be interrupted
2232 # when reading "fast" files like on-disk files. So the EINTR issue only
2232 # when reading "fast" files like on-disk files. So the EINTR issue only
2233 # affects things like pipes, sockets, ttys etc. We treat "normal" (S_ISREG)
2233 # affects things like pipes, sockets, ttys etc. We treat "normal" (S_ISREG)
2234 # files approximately as "fast" files and use the fast (unsafe) code path,
2234 # files approximately as "fast" files and use the fast (unsafe) code path,
2235 # to minimize the performance impact.
2235 # to minimize the performance impact.
2236 if sys.version_info >= (2, 7, 4):
2236 if sys.version_info >= (2, 7, 4):
2237 # fp.readline deals with EINTR correctly, use it as a workaround.
2237 # fp.readline deals with EINTR correctly, use it as a workaround.
2238 def _safeiterfile(fp):
2238 def _safeiterfile(fp):
2239 return iter(fp.readline, '')
2239 return iter(fp.readline, '')
2240 else:
2240 else:
2241 # fp.read* are broken too, manually deal with EINTR in a stupid way.
2241 # fp.read* are broken too, manually deal with EINTR in a stupid way.
2242 # note: this may block longer than necessary because of bufsize.
2242 # note: this may block longer than necessary because of bufsize.
2243 def _safeiterfile(fp, bufsize=4096):
2243 def _safeiterfile(fp, bufsize=4096):
2244 fd = fp.fileno()
2244 fd = fp.fileno()
2245 line = ''
2245 line = ''
2246 while True:
2246 while True:
2247 try:
2247 try:
2248 buf = os.read(fd, bufsize)
2248 buf = os.read(fd, bufsize)
2249 except OSError as ex:
2249 except OSError as ex:
2250 # os.read only raises EINTR before any data is read
2250 # os.read only raises EINTR before any data is read
2251 if ex.errno == errno.EINTR:
2251 if ex.errno == errno.EINTR:
2252 continue
2252 continue
2253 else:
2253 else:
2254 raise
2254 raise
2255 line += buf
2255 line += buf
2256 if '\n' in buf:
2256 if '\n' in buf:
2257 splitted = line.splitlines(True)
2257 splitted = line.splitlines(True)
2258 line = ''
2258 line = ''
2259 for l in splitted:
2259 for l in splitted:
2260 if l[-1] == '\n':
2260 if l[-1] == '\n':
2261 yield l
2261 yield l
2262 else:
2262 else:
2263 line = l
2263 line = l
2264 if not buf:
2264 if not buf:
2265 break
2265 break
2266 if line:
2266 if line:
2267 yield line
2267 yield line
2268
2268
2269 def iterfile(fp):
2269 def iterfile(fp):
2270 fastpath = True
2270 fastpath = True
2271 if type(fp) is file:
2271 if type(fp) is file:
2272 fastpath = stat.S_ISREG(os.fstat(fp.fileno()).st_mode)
2272 fastpath = stat.S_ISREG(os.fstat(fp.fileno()).st_mode)
2273 if fastpath:
2273 if fastpath:
2274 return fp
2274 return fp
2275 else:
2275 else:
2276 return _safeiterfile(fp)
2276 return _safeiterfile(fp)
2277 else:
2277 else:
2278 # PyPy and CPython 3 do not have the EINTR issue thus no workaround needed.
2278 # PyPy and CPython 3 do not have the EINTR issue thus no workaround needed.
2279 def iterfile(fp):
2279 def iterfile(fp):
2280 return fp
2280 return fp
2281
2281
2282 def iterlines(iterator):
2282 def iterlines(iterator):
2283 for chunk in iterator:
2283 for chunk in iterator:
2284 for line in chunk.splitlines():
2284 for line in chunk.splitlines():
2285 yield line
2285 yield line
2286
2286
2287 def expandpath(path):
2287 def expandpath(path):
2288 return os.path.expanduser(os.path.expandvars(path))
2288 return os.path.expanduser(os.path.expandvars(path))
2289
2289
2290 def hgcmd():
2290 def hgcmd():
2291 """Return the command used to execute current hg
2291 """Return the command used to execute current hg
2292
2292
2293 This is different from hgexecutable() because on Windows we want
2293 This is different from hgexecutable() because on Windows we want
2294 to avoid things opening new shell windows like batch files, so we
2294 to avoid things opening new shell windows like batch files, so we
2295 get either the python call or current executable.
2295 get either the python call or current executable.
2296 """
2296 """
2297 if mainfrozen():
2297 if mainfrozen():
2298 if getattr(sys, 'frozen', None) == 'macosx_app':
2298 if getattr(sys, 'frozen', None) == 'macosx_app':
2299 # Env variable set by py2app
2299 # Env variable set by py2app
2300 return [os.environ['EXECUTABLEPATH']]
2300 return [os.environ['EXECUTABLEPATH']]
2301 else:
2301 else:
2302 return [sys.executable]
2302 return [sys.executable]
2303 return gethgcmd()
2303 return gethgcmd()
2304
2304
2305 def rundetached(args, condfn):
2305 def rundetached(args, condfn):
2306 """Execute the argument list in a detached process.
2306 """Execute the argument list in a detached process.
2307
2307
2308 condfn is a callable which is called repeatedly and should return
2308 condfn is a callable which is called repeatedly and should return
2309 True once the child process is known to have started successfully.
2309 True once the child process is known to have started successfully.
2310 At this point, the child process PID is returned. If the child
2310 At this point, the child process PID is returned. If the child
2311 process fails to start or finishes before condfn() evaluates to
2311 process fails to start or finishes before condfn() evaluates to
2312 True, return -1.
2312 True, return -1.
2313 """
2313 """
2314 # Windows case is easier because the child process is either
2314 # Windows case is easier because the child process is either
2315 # successfully starting and validating the condition or exiting
2315 # successfully starting and validating the condition or exiting
2316 # on failure. We just poll on its PID. On Unix, if the child
2316 # on failure. We just poll on its PID. On Unix, if the child
2317 # process fails to start, it will be left in a zombie state until
2317 # process fails to start, it will be left in a zombie state until
2318 # the parent wait on it, which we cannot do since we expect a long
2318 # the parent wait on it, which we cannot do since we expect a long
2319 # running process on success. Instead we listen for SIGCHLD telling
2319 # running process on success. Instead we listen for SIGCHLD telling
2320 # us our child process terminated.
2320 # us our child process terminated.
2321 terminated = set()
2321 terminated = set()
2322 def handler(signum, frame):
2322 def handler(signum, frame):
2323 terminated.add(os.wait())
2323 terminated.add(os.wait())
2324 prevhandler = None
2324 prevhandler = None
2325 SIGCHLD = getattr(signal, 'SIGCHLD', None)
2325 SIGCHLD = getattr(signal, 'SIGCHLD', None)
2326 if SIGCHLD is not None:
2326 if SIGCHLD is not None:
2327 prevhandler = signal.signal(SIGCHLD, handler)
2327 prevhandler = signal.signal(SIGCHLD, handler)
2328 try:
2328 try:
2329 pid = spawndetached(args)
2329 pid = spawndetached(args)
2330 while not condfn():
2330 while not condfn():
2331 if ((pid in terminated or not testpid(pid))
2331 if ((pid in terminated or not testpid(pid))
2332 and not condfn()):
2332 and not condfn()):
2333 return -1
2333 return -1
2334 time.sleep(0.1)
2334 time.sleep(0.1)
2335 return pid
2335 return pid
2336 finally:
2336 finally:
2337 if prevhandler is not None:
2337 if prevhandler is not None:
2338 signal.signal(signal.SIGCHLD, prevhandler)
2338 signal.signal(signal.SIGCHLD, prevhandler)
2339
2339
2340 def interpolate(prefix, mapping, s, fn=None, escape_prefix=False):
2340 def interpolate(prefix, mapping, s, fn=None, escape_prefix=False):
2341 """Return the result of interpolating items in the mapping into string s.
2341 """Return the result of interpolating items in the mapping into string s.
2342
2342
2343 prefix is a single character string, or a two character string with
2343 prefix is a single character string, or a two character string with
2344 a backslash as the first character if the prefix needs to be escaped in
2344 a backslash as the first character if the prefix needs to be escaped in
2345 a regular expression.
2345 a regular expression.
2346
2346
2347 fn is an optional function that will be applied to the replacement text
2347 fn is an optional function that will be applied to the replacement text
2348 just before replacement.
2348 just before replacement.
2349
2349
2350 escape_prefix is an optional flag that allows using doubled prefix for
2350 escape_prefix is an optional flag that allows using doubled prefix for
2351 its escaping.
2351 its escaping.
2352 """
2352 """
2353 fn = fn or (lambda s: s)
2353 fn = fn or (lambda s: s)
2354 patterns = '|'.join(mapping.keys())
2354 patterns = '|'.join(mapping.keys())
2355 if escape_prefix:
2355 if escape_prefix:
2356 patterns += '|' + prefix
2356 patterns += '|' + prefix
2357 if len(prefix) > 1:
2357 if len(prefix) > 1:
2358 prefix_char = prefix[1:]
2358 prefix_char = prefix[1:]
2359 else:
2359 else:
2360 prefix_char = prefix
2360 prefix_char = prefix
2361 mapping[prefix_char] = prefix_char
2361 mapping[prefix_char] = prefix_char
2362 r = remod.compile(r'%s(%s)' % (prefix, patterns))
2362 r = remod.compile(r'%s(%s)' % (prefix, patterns))
2363 return r.sub(lambda x: fn(mapping[x.group()[1:]]), s)
2363 return r.sub(lambda x: fn(mapping[x.group()[1:]]), s)
2364
2364
2365 def getport(port):
2365 def getport(port):
2366 """Return the port for a given network service.
2366 """Return the port for a given network service.
2367
2367
2368 If port is an integer, it's returned as is. If it's a string, it's
2368 If port is an integer, it's returned as is. If it's a string, it's
2369 looked up using socket.getservbyname(). If there's no matching
2369 looked up using socket.getservbyname(). If there's no matching
2370 service, error.Abort is raised.
2370 service, error.Abort is raised.
2371 """
2371 """
2372 try:
2372 try:
2373 return int(port)
2373 return int(port)
2374 except ValueError:
2374 except ValueError:
2375 pass
2375 pass
2376
2376
2377 try:
2377 try:
2378 return socket.getservbyname(port)
2378 return socket.getservbyname(port)
2379 except socket.error:
2379 except socket.error:
2380 raise Abort(_("no port number associated with service '%s'") % port)
2380 raise Abort(_("no port number associated with service '%s'") % port)
2381
2381
2382 _booleans = {'1': True, 'yes': True, 'true': True, 'on': True, 'always': True,
2382 _booleans = {'1': True, 'yes': True, 'true': True, 'on': True, 'always': True,
2383 '0': False, 'no': False, 'false': False, 'off': False,
2383 '0': False, 'no': False, 'false': False, 'off': False,
2384 'never': False}
2384 'never': False}
2385
2385
2386 def parsebool(s):
2386 def parsebool(s):
2387 """Parse s into a boolean.
2387 """Parse s into a boolean.
2388
2388
2389 If s is not a valid boolean, returns None.
2389 If s is not a valid boolean, returns None.
2390 """
2390 """
2391 return _booleans.get(s.lower(), None)
2391 return _booleans.get(s.lower(), None)
2392
2392
2393 _hextochr = dict((a + b, chr(int(a + b, 16)))
2393 _hextochr = dict((a + b, chr(int(a + b, 16)))
2394 for a in string.hexdigits for b in string.hexdigits)
2394 for a in string.hexdigits for b in string.hexdigits)
2395
2395
2396 class url(object):
2396 class url(object):
2397 r"""Reliable URL parser.
2397 r"""Reliable URL parser.
2398
2398
2399 This parses URLs and provides attributes for the following
2399 This parses URLs and provides attributes for the following
2400 components:
2400 components:
2401
2401
2402 <scheme>://<user>:<passwd>@<host>:<port>/<path>?<query>#<fragment>
2402 <scheme>://<user>:<passwd>@<host>:<port>/<path>?<query>#<fragment>
2403
2403
2404 Missing components are set to None. The only exception is
2404 Missing components are set to None. The only exception is
2405 fragment, which is set to '' if present but empty.
2405 fragment, which is set to '' if present but empty.
2406
2406
2407 If parsefragment is False, fragment is included in query. If
2407 If parsefragment is False, fragment is included in query. If
2408 parsequery is False, query is included in path. If both are
2408 parsequery is False, query is included in path. If both are
2409 False, both fragment and query are included in path.
2409 False, both fragment and query are included in path.
2410
2410
2411 See http://www.ietf.org/rfc/rfc2396.txt for more information.
2411 See http://www.ietf.org/rfc/rfc2396.txt for more information.
2412
2412
2413 Note that for backward compatibility reasons, bundle URLs do not
2413 Note that for backward compatibility reasons, bundle URLs do not
2414 take host names. That means 'bundle://../' has a path of '../'.
2414 take host names. That means 'bundle://../' has a path of '../'.
2415
2415
2416 Examples:
2416 Examples:
2417
2417
2418 >>> url('http://www.ietf.org/rfc/rfc2396.txt')
2418 >>> url('http://www.ietf.org/rfc/rfc2396.txt')
2419 <url scheme: 'http', host: 'www.ietf.org', path: 'rfc/rfc2396.txt'>
2419 <url scheme: 'http', host: 'www.ietf.org', path: 'rfc/rfc2396.txt'>
2420 >>> url('ssh://[::1]:2200//home/joe/repo')
2420 >>> url('ssh://[::1]:2200//home/joe/repo')
2421 <url scheme: 'ssh', host: '[::1]', port: '2200', path: '/home/joe/repo'>
2421 <url scheme: 'ssh', host: '[::1]', port: '2200', path: '/home/joe/repo'>
2422 >>> url('file:///home/joe/repo')
2422 >>> url('file:///home/joe/repo')
2423 <url scheme: 'file', path: '/home/joe/repo'>
2423 <url scheme: 'file', path: '/home/joe/repo'>
2424 >>> url('file:///c:/temp/foo/')
2424 >>> url('file:///c:/temp/foo/')
2425 <url scheme: 'file', path: 'c:/temp/foo/'>
2425 <url scheme: 'file', path: 'c:/temp/foo/'>
2426 >>> url('bundle:foo')
2426 >>> url('bundle:foo')
2427 <url scheme: 'bundle', path: 'foo'>
2427 <url scheme: 'bundle', path: 'foo'>
2428 >>> url('bundle://../foo')
2428 >>> url('bundle://../foo')
2429 <url scheme: 'bundle', path: '../foo'>
2429 <url scheme: 'bundle', path: '../foo'>
2430 >>> url(r'c:\foo\bar')
2430 >>> url(r'c:\foo\bar')
2431 <url path: 'c:\\foo\\bar'>
2431 <url path: 'c:\\foo\\bar'>
2432 >>> url(r'\\blah\blah\blah')
2432 >>> url(r'\\blah\blah\blah')
2433 <url path: '\\\\blah\\blah\\blah'>
2433 <url path: '\\\\blah\\blah\\blah'>
2434 >>> url(r'\\blah\blah\blah#baz')
2434 >>> url(r'\\blah\blah\blah#baz')
2435 <url path: '\\\\blah\\blah\\blah', fragment: 'baz'>
2435 <url path: '\\\\blah\\blah\\blah', fragment: 'baz'>
2436 >>> url(r'file:///C:\users\me')
2436 >>> url(r'file:///C:\users\me')
2437 <url scheme: 'file', path: 'C:\\users\\me'>
2437 <url scheme: 'file', path: 'C:\\users\\me'>
2438
2438
2439 Authentication credentials:
2439 Authentication credentials:
2440
2440
2441 >>> url('ssh://joe:xyz@x/repo')
2441 >>> url('ssh://joe:xyz@x/repo')
2442 <url scheme: 'ssh', user: 'joe', passwd: 'xyz', host: 'x', path: 'repo'>
2442 <url scheme: 'ssh', user: 'joe', passwd: 'xyz', host: 'x', path: 'repo'>
2443 >>> url('ssh://joe@x/repo')
2443 >>> url('ssh://joe@x/repo')
2444 <url scheme: 'ssh', user: 'joe', host: 'x', path: 'repo'>
2444 <url scheme: 'ssh', user: 'joe', host: 'x', path: 'repo'>
2445
2445
2446 Query strings and fragments:
2446 Query strings and fragments:
2447
2447
2448 >>> url('http://host/a?b#c')
2448 >>> url('http://host/a?b#c')
2449 <url scheme: 'http', host: 'host', path: 'a', query: 'b', fragment: 'c'>
2449 <url scheme: 'http', host: 'host', path: 'a', query: 'b', fragment: 'c'>
2450 >>> url('http://host/a?b#c', parsequery=False, parsefragment=False)
2450 >>> url('http://host/a?b#c', parsequery=False, parsefragment=False)
2451 <url scheme: 'http', host: 'host', path: 'a?b#c'>
2451 <url scheme: 'http', host: 'host', path: 'a?b#c'>
2452
2452
2453 Empty path:
2453 Empty path:
2454
2454
2455 >>> url('')
2455 >>> url('')
2456 <url path: ''>
2456 <url path: ''>
2457 >>> url('#a')
2457 >>> url('#a')
2458 <url path: '', fragment: 'a'>
2458 <url path: '', fragment: 'a'>
2459 >>> url('http://host/')
2459 >>> url('http://host/')
2460 <url scheme: 'http', host: 'host', path: ''>
2460 <url scheme: 'http', host: 'host', path: ''>
2461 >>> url('http://host/#a')
2461 >>> url('http://host/#a')
2462 <url scheme: 'http', host: 'host', path: '', fragment: 'a'>
2462 <url scheme: 'http', host: 'host', path: '', fragment: 'a'>
2463
2463
2464 Only scheme:
2464 Only scheme:
2465
2465
2466 >>> url('http:')
2466 >>> url('http:')
2467 <url scheme: 'http'>
2467 <url scheme: 'http'>
2468 """
2468 """
2469
2469
2470 _safechars = "!~*'()+"
2470 _safechars = "!~*'()+"
2471 _safepchars = "/!~*'()+:\\"
2471 _safepchars = "/!~*'()+:\\"
2472 _matchscheme = remod.compile('^[a-zA-Z0-9+.\\-]+:').match
2472 _matchscheme = remod.compile('^[a-zA-Z0-9+.\\-]+:').match
2473
2473
2474 def __init__(self, path, parsequery=True, parsefragment=True):
2474 def __init__(self, path, parsequery=True, parsefragment=True):
2475 # We slowly chomp away at path until we have only the path left
2475 # We slowly chomp away at path until we have only the path left
2476 self.scheme = self.user = self.passwd = self.host = None
2476 self.scheme = self.user = self.passwd = self.host = None
2477 self.port = self.path = self.query = self.fragment = None
2477 self.port = self.path = self.query = self.fragment = None
2478 self._localpath = True
2478 self._localpath = True
2479 self._hostport = ''
2479 self._hostport = ''
2480 self._origpath = path
2480 self._origpath = path
2481
2481
2482 if parsefragment and '#' in path:
2482 if parsefragment and '#' in path:
2483 path, self.fragment = path.split('#', 1)
2483 path, self.fragment = path.split('#', 1)
2484
2484
2485 # special case for Windows drive letters and UNC paths
2485 # special case for Windows drive letters and UNC paths
2486 if hasdriveletter(path) or path.startswith('\\\\'):
2486 if hasdriveletter(path) or path.startswith('\\\\'):
2487 self.path = path
2487 self.path = path
2488 return
2488 return
2489
2489
2490 # For compatibility reasons, we can't handle bundle paths as
2490 # For compatibility reasons, we can't handle bundle paths as
2491 # normal URLS
2491 # normal URLS
2492 if path.startswith('bundle:'):
2492 if path.startswith('bundle:'):
2493 self.scheme = 'bundle'
2493 self.scheme = 'bundle'
2494 path = path[7:]
2494 path = path[7:]
2495 if path.startswith('//'):
2495 if path.startswith('//'):
2496 path = path[2:]
2496 path = path[2:]
2497 self.path = path
2497 self.path = path
2498 return
2498 return
2499
2499
2500 if self._matchscheme(path):
2500 if self._matchscheme(path):
2501 parts = path.split(':', 1)
2501 parts = path.split(':', 1)
2502 if parts[0]:
2502 if parts[0]:
2503 self.scheme, path = parts
2503 self.scheme, path = parts
2504 self._localpath = False
2504 self._localpath = False
2505
2505
2506 if not path:
2506 if not path:
2507 path = None
2507 path = None
2508 if self._localpath:
2508 if self._localpath:
2509 self.path = ''
2509 self.path = ''
2510 return
2510 return
2511 else:
2511 else:
2512 if self._localpath:
2512 if self._localpath:
2513 self.path = path
2513 self.path = path
2514 return
2514 return
2515
2515
2516 if parsequery and '?' in path:
2516 if parsequery and '?' in path:
2517 path, self.query = path.split('?', 1)
2517 path, self.query = path.split('?', 1)
2518 if not path:
2518 if not path:
2519 path = None
2519 path = None
2520 if not self.query:
2520 if not self.query:
2521 self.query = None
2521 self.query = None
2522
2522
2523 # // is required to specify a host/authority
2523 # // is required to specify a host/authority
2524 if path and path.startswith('//'):
2524 if path and path.startswith('//'):
2525 parts = path[2:].split('/', 1)
2525 parts = path[2:].split('/', 1)
2526 if len(parts) > 1:
2526 if len(parts) > 1:
2527 self.host, path = parts
2527 self.host, path = parts
2528 else:
2528 else:
2529 self.host = parts[0]
2529 self.host = parts[0]
2530 path = None
2530 path = None
2531 if not self.host:
2531 if not self.host:
2532 self.host = None
2532 self.host = None
2533 # path of file:///d is /d
2533 # path of file:///d is /d
2534 # path of file:///d:/ is d:/, not /d:/
2534 # path of file:///d:/ is d:/, not /d:/
2535 if path and not hasdriveletter(path):
2535 if path and not hasdriveletter(path):
2536 path = '/' + path
2536 path = '/' + path
2537
2537
2538 if self.host and '@' in self.host:
2538 if self.host and '@' in self.host:
2539 self.user, self.host = self.host.rsplit('@', 1)
2539 self.user, self.host = self.host.rsplit('@', 1)
2540 if ':' in self.user:
2540 if ':' in self.user:
2541 self.user, self.passwd = self.user.split(':', 1)
2541 self.user, self.passwd = self.user.split(':', 1)
2542 if not self.host:
2542 if not self.host:
2543 self.host = None
2543 self.host = None
2544
2544
2545 # Don't split on colons in IPv6 addresses without ports
2545 # Don't split on colons in IPv6 addresses without ports
2546 if (self.host and ':' in self.host and
2546 if (self.host and ':' in self.host and
2547 not (self.host.startswith('[') and self.host.endswith(']'))):
2547 not (self.host.startswith('[') and self.host.endswith(']'))):
2548 self._hostport = self.host
2548 self._hostport = self.host
2549 self.host, self.port = self.host.rsplit(':', 1)
2549 self.host, self.port = self.host.rsplit(':', 1)
2550 if not self.host:
2550 if not self.host:
2551 self.host = None
2551 self.host = None
2552
2552
2553 if (self.host and self.scheme == 'file' and
2553 if (self.host and self.scheme == 'file' and
2554 self.host not in ('localhost', '127.0.0.1', '[::1]')):
2554 self.host not in ('localhost', '127.0.0.1', '[::1]')):
2555 raise Abort(_('file:// URLs can only refer to localhost'))
2555 raise Abort(_('file:// URLs can only refer to localhost'))
2556
2556
2557 self.path = path
2557 self.path = path
2558
2558
2559 # leave the query string escaped
2559 # leave the query string escaped
2560 for a in ('user', 'passwd', 'host', 'port',
2560 for a in ('user', 'passwd', 'host', 'port',
2561 'path', 'fragment'):
2561 'path', 'fragment'):
2562 v = getattr(self, a)
2562 v = getattr(self, a)
2563 if v is not None:
2563 if v is not None:
2564 setattr(self, a, pycompat.urlunquote(v))
2564 setattr(self, a, pycompat.urlunquote(v))
2565
2565
2566 def __repr__(self):
2566 def __repr__(self):
2567 attrs = []
2567 attrs = []
2568 for a in ('scheme', 'user', 'passwd', 'host', 'port', 'path',
2568 for a in ('scheme', 'user', 'passwd', 'host', 'port', 'path',
2569 'query', 'fragment'):
2569 'query', 'fragment'):
2570 v = getattr(self, a)
2570 v = getattr(self, a)
2571 if v is not None:
2571 if v is not None:
2572 attrs.append('%s: %r' % (a, v))
2572 attrs.append('%s: %r' % (a, v))
2573 return '<url %s>' % ', '.join(attrs)
2573 return '<url %s>' % ', '.join(attrs)
2574
2574
2575 def __str__(self):
2575 def __str__(self):
2576 r"""Join the URL's components back into a URL string.
2576 r"""Join the URL's components back into a URL string.
2577
2577
2578 Examples:
2578 Examples:
2579
2579
2580 >>> str(url('http://user:pw@host:80/c:/bob?fo:oo#ba:ar'))
2580 >>> str(url('http://user:pw@host:80/c:/bob?fo:oo#ba:ar'))
2581 'http://user:pw@host:80/c:/bob?fo:oo#ba:ar'
2581 'http://user:pw@host:80/c:/bob?fo:oo#ba:ar'
2582 >>> str(url('http://user:pw@host:80/?foo=bar&baz=42'))
2582 >>> str(url('http://user:pw@host:80/?foo=bar&baz=42'))
2583 'http://user:pw@host:80/?foo=bar&baz=42'
2583 'http://user:pw@host:80/?foo=bar&baz=42'
2584 >>> str(url('http://user:pw@host:80/?foo=bar%3dbaz'))
2584 >>> str(url('http://user:pw@host:80/?foo=bar%3dbaz'))
2585 'http://user:pw@host:80/?foo=bar%3dbaz'
2585 'http://user:pw@host:80/?foo=bar%3dbaz'
2586 >>> str(url('ssh://user:pw@[::1]:2200//home/joe#'))
2586 >>> str(url('ssh://user:pw@[::1]:2200//home/joe#'))
2587 'ssh://user:pw@[::1]:2200//home/joe#'
2587 'ssh://user:pw@[::1]:2200//home/joe#'
2588 >>> str(url('http://localhost:80//'))
2588 >>> str(url('http://localhost:80//'))
2589 'http://localhost:80//'
2589 'http://localhost:80//'
2590 >>> str(url('http://localhost:80/'))
2590 >>> str(url('http://localhost:80/'))
2591 'http://localhost:80/'
2591 'http://localhost:80/'
2592 >>> str(url('http://localhost:80'))
2592 >>> str(url('http://localhost:80'))
2593 'http://localhost:80/'
2593 'http://localhost:80/'
2594 >>> str(url('bundle:foo'))
2594 >>> str(url('bundle:foo'))
2595 'bundle:foo'
2595 'bundle:foo'
2596 >>> str(url('bundle://../foo'))
2596 >>> str(url('bundle://../foo'))
2597 'bundle:../foo'
2597 'bundle:../foo'
2598 >>> str(url('path'))
2598 >>> str(url('path'))
2599 'path'
2599 'path'
2600 >>> str(url('file:///tmp/foo/bar'))
2600 >>> str(url('file:///tmp/foo/bar'))
2601 'file:///tmp/foo/bar'
2601 'file:///tmp/foo/bar'
2602 >>> str(url('file:///c:/tmp/foo/bar'))
2602 >>> str(url('file:///c:/tmp/foo/bar'))
2603 'file:///c:/tmp/foo/bar'
2603 'file:///c:/tmp/foo/bar'
2604 >>> print url(r'bundle:foo\bar')
2604 >>> print url(r'bundle:foo\bar')
2605 bundle:foo\bar
2605 bundle:foo\bar
2606 >>> print url(r'file:///D:\data\hg')
2606 >>> print url(r'file:///D:\data\hg')
2607 file:///D:\data\hg
2607 file:///D:\data\hg
2608 """
2608 """
2609 if self._localpath:
2609 if self._localpath:
2610 s = self.path
2610 s = self.path
2611 if self.scheme == 'bundle':
2611 if self.scheme == 'bundle':
2612 s = 'bundle:' + s
2612 s = 'bundle:' + s
2613 if self.fragment:
2613 if self.fragment:
2614 s += '#' + self.fragment
2614 s += '#' + self.fragment
2615 return s
2615 return s
2616
2616
2617 s = self.scheme + ':'
2617 s = self.scheme + ':'
2618 if self.user or self.passwd or self.host:
2618 if self.user or self.passwd or self.host:
2619 s += '//'
2619 s += '//'
2620 elif self.scheme and (not self.path or self.path.startswith('/')
2620 elif self.scheme and (not self.path or self.path.startswith('/')
2621 or hasdriveletter(self.path)):
2621 or hasdriveletter(self.path)):
2622 s += '//'
2622 s += '//'
2623 if hasdriveletter(self.path):
2623 if hasdriveletter(self.path):
2624 s += '/'
2624 s += '/'
2625 if self.user:
2625 if self.user:
2626 s += urlreq.quote(self.user, safe=self._safechars)
2626 s += urlreq.quote(self.user, safe=self._safechars)
2627 if self.passwd:
2627 if self.passwd:
2628 s += ':' + urlreq.quote(self.passwd, safe=self._safechars)
2628 s += ':' + urlreq.quote(self.passwd, safe=self._safechars)
2629 if self.user or self.passwd:
2629 if self.user or self.passwd:
2630 s += '@'
2630 s += '@'
2631 if self.host:
2631 if self.host:
2632 if not (self.host.startswith('[') and self.host.endswith(']')):
2632 if not (self.host.startswith('[') and self.host.endswith(']')):
2633 s += urlreq.quote(self.host)
2633 s += urlreq.quote(self.host)
2634 else:
2634 else:
2635 s += self.host
2635 s += self.host
2636 if self.port:
2636 if self.port:
2637 s += ':' + urlreq.quote(self.port)
2637 s += ':' + urlreq.quote(self.port)
2638 if self.host:
2638 if self.host:
2639 s += '/'
2639 s += '/'
2640 if self.path:
2640 if self.path:
2641 # TODO: similar to the query string, we should not unescape the
2641 # TODO: similar to the query string, we should not unescape the
2642 # path when we store it, the path might contain '%2f' = '/',
2642 # path when we store it, the path might contain '%2f' = '/',
2643 # which we should *not* escape.
2643 # which we should *not* escape.
2644 s += urlreq.quote(self.path, safe=self._safepchars)
2644 s += urlreq.quote(self.path, safe=self._safepchars)
2645 if self.query:
2645 if self.query:
2646 # we store the query in escaped form.
2646 # we store the query in escaped form.
2647 s += '?' + self.query
2647 s += '?' + self.query
2648 if self.fragment is not None:
2648 if self.fragment is not None:
2649 s += '#' + urlreq.quote(self.fragment, safe=self._safepchars)
2649 s += '#' + urlreq.quote(self.fragment, safe=self._safepchars)
2650 return s
2650 return s
2651
2651
2652 def authinfo(self):
2652 def authinfo(self):
2653 user, passwd = self.user, self.passwd
2653 user, passwd = self.user, self.passwd
2654 try:
2654 try:
2655 self.user, self.passwd = None, None
2655 self.user, self.passwd = None, None
2656 s = str(self)
2656 s = str(self)
2657 finally:
2657 finally:
2658 self.user, self.passwd = user, passwd
2658 self.user, self.passwd = user, passwd
2659 if not self.user:
2659 if not self.user:
2660 return (s, None)
2660 return (s, None)
2661 # authinfo[1] is passed to urllib2 password manager, and its
2661 # authinfo[1] is passed to urllib2 password manager, and its
2662 # URIs must not contain credentials. The host is passed in the
2662 # URIs must not contain credentials. The host is passed in the
2663 # URIs list because Python < 2.4.3 uses only that to search for
2663 # URIs list because Python < 2.4.3 uses only that to search for
2664 # a password.
2664 # a password.
2665 return (s, (None, (s, self.host),
2665 return (s, (None, (s, self.host),
2666 self.user, self.passwd or ''))
2666 self.user, self.passwd or ''))
2667
2667
2668 def isabs(self):
2668 def isabs(self):
2669 if self.scheme and self.scheme != 'file':
2669 if self.scheme and self.scheme != 'file':
2670 return True # remote URL
2670 return True # remote URL
2671 if hasdriveletter(self.path):
2671 if hasdriveletter(self.path):
2672 return True # absolute for our purposes - can't be joined()
2672 return True # absolute for our purposes - can't be joined()
2673 if self.path.startswith(r'\\'):
2673 if self.path.startswith(r'\\'):
2674 return True # Windows UNC path
2674 return True # Windows UNC path
2675 if self.path.startswith('/'):
2675 if self.path.startswith('/'):
2676 return True # POSIX-style
2676 return True # POSIX-style
2677 return False
2677 return False
2678
2678
2679 def localpath(self):
2679 def localpath(self):
2680 if self.scheme == 'file' or self.scheme == 'bundle':
2680 if self.scheme == 'file' or self.scheme == 'bundle':
2681 path = self.path or '/'
2681 path = self.path or '/'
2682 # For Windows, we need to promote hosts containing drive
2682 # For Windows, we need to promote hosts containing drive
2683 # letters to paths with drive letters.
2683 # letters to paths with drive letters.
2684 if hasdriveletter(self._hostport):
2684 if hasdriveletter(self._hostport):
2685 path = self._hostport + '/' + self.path
2685 path = self._hostport + '/' + self.path
2686 elif (self.host is not None and self.path
2686 elif (self.host is not None and self.path
2687 and not hasdriveletter(path)):
2687 and not hasdriveletter(path)):
2688 path = '/' + path
2688 path = '/' + path
2689 return path
2689 return path
2690 return self._origpath
2690 return self._origpath
2691
2691
2692 def islocal(self):
2692 def islocal(self):
2693 '''whether localpath will return something that posixfile can open'''
2693 '''whether localpath will return something that posixfile can open'''
2694 return (not self.scheme or self.scheme == 'file'
2694 return (not self.scheme or self.scheme == 'file'
2695 or self.scheme == 'bundle')
2695 or self.scheme == 'bundle')
2696
2696
2697 def hasscheme(path):
2697 def hasscheme(path):
2698 return bool(url(path).scheme)
2698 return bool(url(path).scheme)
2699
2699
2700 def hasdriveletter(path):
2700 def hasdriveletter(path):
2701 return path and path[1:2] == ':' and path[0:1].isalpha()
2701 return path and path[1:2] == ':' and path[0:1].isalpha()
2702
2702
2703 def urllocalpath(path):
2703 def urllocalpath(path):
2704 return url(path, parsequery=False, parsefragment=False).localpath()
2704 return url(path, parsequery=False, parsefragment=False).localpath()
2705
2705
2706 def hidepassword(u):
2706 def hidepassword(u):
2707 '''hide user credential in a url string'''
2707 '''hide user credential in a url string'''
2708 u = url(u)
2708 u = url(u)
2709 if u.passwd:
2709 if u.passwd:
2710 u.passwd = '***'
2710 u.passwd = '***'
2711 return str(u)
2711 return str(u)
2712
2712
2713 def removeauth(u):
2713 def removeauth(u):
2714 '''remove all authentication information from a url string'''
2714 '''remove all authentication information from a url string'''
2715 u = url(u)
2715 u = url(u)
2716 u.user = u.passwd = None
2716 u.user = u.passwd = None
2717 return str(u)
2717 return str(u)
2718
2718
2719 def isatty(fp):
2719 def isatty(fp):
2720 try:
2720 try:
2721 return fp.isatty()
2721 return fp.isatty()
2722 except AttributeError:
2722 except AttributeError:
2723 return False
2723 return False
2724
2724
2725 timecount = unitcountfn(
2725 timecount = unitcountfn(
2726 (1, 1e3, _('%.0f s')),
2726 (1, 1e3, _('%.0f s')),
2727 (100, 1, _('%.1f s')),
2727 (100, 1, _('%.1f s')),
2728 (10, 1, _('%.2f s')),
2728 (10, 1, _('%.2f s')),
2729 (1, 1, _('%.3f s')),
2729 (1, 1, _('%.3f s')),
2730 (100, 0.001, _('%.1f ms')),
2730 (100, 0.001, _('%.1f ms')),
2731 (10, 0.001, _('%.2f ms')),
2731 (10, 0.001, _('%.2f ms')),
2732 (1, 0.001, _('%.3f ms')),
2732 (1, 0.001, _('%.3f ms')),
2733 (100, 0.000001, _('%.1f us')),
2733 (100, 0.000001, _('%.1f us')),
2734 (10, 0.000001, _('%.2f us')),
2734 (10, 0.000001, _('%.2f us')),
2735 (1, 0.000001, _('%.3f us')),
2735 (1, 0.000001, _('%.3f us')),
2736 (100, 0.000000001, _('%.1f ns')),
2736 (100, 0.000000001, _('%.1f ns')),
2737 (10, 0.000000001, _('%.2f ns')),
2737 (10, 0.000000001, _('%.2f ns')),
2738 (1, 0.000000001, _('%.3f ns')),
2738 (1, 0.000000001, _('%.3f ns')),
2739 )
2739 )
2740
2740
2741 _timenesting = [0]
2741 _timenesting = [0]
2742
2742
2743 def timed(func):
2743 def timed(func):
2744 '''Report the execution time of a function call to stderr.
2744 '''Report the execution time of a function call to stderr.
2745
2745
2746 During development, use as a decorator when you need to measure
2746 During development, use as a decorator when you need to measure
2747 the cost of a function, e.g. as follows:
2747 the cost of a function, e.g. as follows:
2748
2748
2749 @util.timed
2749 @util.timed
2750 def foo(a, b, c):
2750 def foo(a, b, c):
2751 pass
2751 pass
2752 '''
2752 '''
2753
2753
2754 def wrapper(*args, **kwargs):
2754 def wrapper(*args, **kwargs):
2755 start = time.time()
2755 start = time.time()
2756 indent = 2
2756 indent = 2
2757 _timenesting[0] += indent
2757 _timenesting[0] += indent
2758 try:
2758 try:
2759 return func(*args, **kwargs)
2759 return func(*args, **kwargs)
2760 finally:
2760 finally:
2761 elapsed = time.time() - start
2761 elapsed = time.time() - start
2762 _timenesting[0] -= indent
2762 _timenesting[0] -= indent
2763 stderr.write('%s%s: %s\n' %
2763 stderr.write('%s%s: %s\n' %
2764 (' ' * _timenesting[0], func.__name__,
2764 (' ' * _timenesting[0], func.__name__,
2765 timecount(elapsed)))
2765 timecount(elapsed)))
2766 return wrapper
2766 return wrapper
2767
2767
2768 _sizeunits = (('m', 2**20), ('k', 2**10), ('g', 2**30),
2768 _sizeunits = (('m', 2**20), ('k', 2**10), ('g', 2**30),
2769 ('kb', 2**10), ('mb', 2**20), ('gb', 2**30), ('b', 1))
2769 ('kb', 2**10), ('mb', 2**20), ('gb', 2**30), ('b', 1))
2770
2770
2771 def sizetoint(s):
2771 def sizetoint(s):
2772 '''Convert a space specifier to a byte count.
2772 '''Convert a space specifier to a byte count.
2773
2773
2774 >>> sizetoint('30')
2774 >>> sizetoint('30')
2775 30
2775 30
2776 >>> sizetoint('2.2kb')
2776 >>> sizetoint('2.2kb')
2777 2252
2777 2252
2778 >>> sizetoint('6M')
2778 >>> sizetoint('6M')
2779 6291456
2779 6291456
2780 '''
2780 '''
2781 t = s.strip().lower()
2781 t = s.strip().lower()
2782 try:
2782 try:
2783 for k, u in _sizeunits:
2783 for k, u in _sizeunits:
2784 if t.endswith(k):
2784 if t.endswith(k):
2785 return int(float(t[:-len(k)]) * u)
2785 return int(float(t[:-len(k)]) * u)
2786 return int(t)
2786 return int(t)
2787 except ValueError:
2787 except ValueError:
2788 raise error.ParseError(_("couldn't parse size: %s") % s)
2788 raise error.ParseError(_("couldn't parse size: %s") % s)
2789
2789
2790 class hooks(object):
2790 class hooks(object):
2791 '''A collection of hook functions that can be used to extend a
2791 '''A collection of hook functions that can be used to extend a
2792 function's behavior. Hooks are called in lexicographic order,
2792 function's behavior. Hooks are called in lexicographic order,
2793 based on the names of their sources.'''
2793 based on the names of their sources.'''
2794
2794
2795 def __init__(self):
2795 def __init__(self):
2796 self._hooks = []
2796 self._hooks = []
2797
2797
2798 def add(self, source, hook):
2798 def add(self, source, hook):
2799 self._hooks.append((source, hook))
2799 self._hooks.append((source, hook))
2800
2800
2801 def __call__(self, *args):
2801 def __call__(self, *args):
2802 self._hooks.sort(key=lambda x: x[0])
2802 self._hooks.sort(key=lambda x: x[0])
2803 results = []
2803 results = []
2804 for source, hook in self._hooks:
2804 for source, hook in self._hooks:
2805 results.append(hook(*args))
2805 results.append(hook(*args))
2806 return results
2806 return results
2807
2807
2808 def getstackframes(skip=0, line=' %-*s in %s\n', fileline='%s:%s'):
2808 def getstackframes(skip=0, line=' %-*s in %s\n', fileline='%s:%s'):
2809 '''Yields lines for a nicely formatted stacktrace.
2809 '''Yields lines for a nicely formatted stacktrace.
2810 Skips the 'skip' last entries.
2810 Skips the 'skip' last entries.
2811 Each file+linenumber is formatted according to fileline.
2811 Each file+linenumber is formatted according to fileline.
2812 Each line is formatted according to line.
2812 Each line is formatted according to line.
2813 If line is None, it yields:
2813 If line is None, it yields:
2814 length of longest filepath+line number,
2814 length of longest filepath+line number,
2815 filepath+linenumber,
2815 filepath+linenumber,
2816 function
2816 function
2817
2817
2818 Not be used in production code but very convenient while developing.
2818 Not be used in production code but very convenient while developing.
2819 '''
2819 '''
2820 entries = [(fileline % (fn, ln), func)
2820 entries = [(fileline % (fn, ln), func)
2821 for fn, ln, func, _text in traceback.extract_stack()[:-skip - 1]]
2821 for fn, ln, func, _text in traceback.extract_stack()[:-skip - 1]]
2822 if entries:
2822 if entries:
2823 fnmax = max(len(entry[0]) for entry in entries)
2823 fnmax = max(len(entry[0]) for entry in entries)
2824 for fnln, func in entries:
2824 for fnln, func in entries:
2825 if line is None:
2825 if line is None:
2826 yield (fnmax, fnln, func)
2826 yield (fnmax, fnln, func)
2827 else:
2827 else:
2828 yield line % (fnmax, fnln, func)
2828 yield line % (fnmax, fnln, func)
2829
2829
2830 def debugstacktrace(msg='stacktrace', skip=0, f=stderr, otherf=stdout):
2830 def debugstacktrace(msg='stacktrace', skip=0, f=stderr, otherf=stdout):
2831 '''Writes a message to f (stderr) with a nicely formatted stacktrace.
2831 '''Writes a message to f (stderr) with a nicely formatted stacktrace.
2832 Skips the 'skip' last entries. By default it will flush stdout first.
2832 Skips the 'skip' last entries. By default it will flush stdout first.
2833 It can be used everywhere and intentionally does not require an ui object.
2833 It can be used everywhere and intentionally does not require an ui object.
2834 Not be used in production code but very convenient while developing.
2834 Not be used in production code but very convenient while developing.
2835 '''
2835 '''
2836 if otherf:
2836 if otherf:
2837 otherf.flush()
2837 otherf.flush()
2838 f.write('%s at:\n' % msg)
2838 f.write('%s at:\n' % msg)
2839 for line in getstackframes(skip + 1):
2839 for line in getstackframes(skip + 1):
2840 f.write(line)
2840 f.write(line)
2841 f.flush()
2841 f.flush()
2842
2842
2843 class dirs(object):
2843 class dirs(object):
2844 '''a multiset of directory names from a dirstate or manifest'''
2844 '''a multiset of directory names from a dirstate or manifest'''
2845
2845
2846 def __init__(self, map, skip=None):
2846 def __init__(self, map, skip=None):
2847 self._dirs = {}
2847 self._dirs = {}
2848 addpath = self.addpath
2848 addpath = self.addpath
2849 if safehasattr(map, 'iteritems') and skip is not None:
2849 if safehasattr(map, 'iteritems') and skip is not None:
2850 for f, s in map.iteritems():
2850 for f, s in map.iteritems():
2851 if s[0] != skip:
2851 if s[0] != skip:
2852 addpath(f)
2852 addpath(f)
2853 else:
2853 else:
2854 for f in map:
2854 for f in map:
2855 addpath(f)
2855 addpath(f)
2856
2856
2857 def addpath(self, path):
2857 def addpath(self, path):
2858 dirs = self._dirs
2858 dirs = self._dirs
2859 for base in finddirs(path):
2859 for base in finddirs(path):
2860 if base in dirs:
2860 if base in dirs:
2861 dirs[base] += 1
2861 dirs[base] += 1
2862 return
2862 return
2863 dirs[base] = 1
2863 dirs[base] = 1
2864
2864
2865 def delpath(self, path):
2865 def delpath(self, path):
2866 dirs = self._dirs
2866 dirs = self._dirs
2867 for base in finddirs(path):
2867 for base in finddirs(path):
2868 if dirs[base] > 1:
2868 if dirs[base] > 1:
2869 dirs[base] -= 1
2869 dirs[base] -= 1
2870 return
2870 return
2871 del dirs[base]
2871 del dirs[base]
2872
2872
2873 def __iter__(self):
2873 def __iter__(self):
2874 return self._dirs.iterkeys()
2874 return self._dirs.iterkeys()
2875
2875
2876 def __contains__(self, d):
2876 def __contains__(self, d):
2877 return d in self._dirs
2877 return d in self._dirs
2878
2878
2879 if safehasattr(parsers, 'dirs'):
2879 if safehasattr(parsers, 'dirs'):
2880 dirs = parsers.dirs
2880 dirs = parsers.dirs
2881
2881
2882 def finddirs(path):
2882 def finddirs(path):
2883 pos = path.rfind('/')
2883 pos = path.rfind('/')
2884 while pos != -1:
2884 while pos != -1:
2885 yield path[:pos]
2885 yield path[:pos]
2886 pos = path.rfind('/', 0, pos)
2886 pos = path.rfind('/', 0, pos)
2887
2887
2888 class ctxmanager(object):
2888 class ctxmanager(object):
2889 '''A context manager for use in 'with' blocks to allow multiple
2889 '''A context manager for use in 'with' blocks to allow multiple
2890 contexts to be entered at once. This is both safer and more
2890 contexts to be entered at once. This is both safer and more
2891 flexible than contextlib.nested.
2891 flexible than contextlib.nested.
2892
2892
2893 Once Mercurial supports Python 2.7+, this will become mostly
2893 Once Mercurial supports Python 2.7+, this will become mostly
2894 unnecessary.
2894 unnecessary.
2895 '''
2895 '''
2896
2896
2897 def __init__(self, *args):
2897 def __init__(self, *args):
2898 '''Accepts a list of no-argument functions that return context
2898 '''Accepts a list of no-argument functions that return context
2899 managers. These will be invoked at __call__ time.'''
2899 managers. These will be invoked at __call__ time.'''
2900 self._pending = args
2900 self._pending = args
2901 self._atexit = []
2901 self._atexit = []
2902
2902
2903 def __enter__(self):
2903 def __enter__(self):
2904 return self
2904 return self
2905
2905
2906 def enter(self):
2906 def enter(self):
2907 '''Create and enter context managers in the order in which they were
2907 '''Create and enter context managers in the order in which they were
2908 passed to the constructor.'''
2908 passed to the constructor.'''
2909 values = []
2909 values = []
2910 for func in self._pending:
2910 for func in self._pending:
2911 obj = func()
2911 obj = func()
2912 values.append(obj.__enter__())
2912 values.append(obj.__enter__())
2913 self._atexit.append(obj.__exit__)
2913 self._atexit.append(obj.__exit__)
2914 del self._pending
2914 del self._pending
2915 return values
2915 return values
2916
2916
2917 def atexit(self, func, *args, **kwargs):
2917 def atexit(self, func, *args, **kwargs):
2918 '''Add a function to call when this context manager exits. The
2918 '''Add a function to call when this context manager exits. The
2919 ordering of multiple atexit calls is unspecified, save that
2919 ordering of multiple atexit calls is unspecified, save that
2920 they will happen before any __exit__ functions.'''
2920 they will happen before any __exit__ functions.'''
2921 def wrapper(exc_type, exc_val, exc_tb):
2921 def wrapper(exc_type, exc_val, exc_tb):
2922 func(*args, **kwargs)
2922 func(*args, **kwargs)
2923 self._atexit.append(wrapper)
2923 self._atexit.append(wrapper)
2924 return func
2924 return func
2925
2925
2926 def __exit__(self, exc_type, exc_val, exc_tb):
2926 def __exit__(self, exc_type, exc_val, exc_tb):
2927 '''Context managers are exited in the reverse order from which
2927 '''Context managers are exited in the reverse order from which
2928 they were created.'''
2928 they were created.'''
2929 received = exc_type is not None
2929 received = exc_type is not None
2930 suppressed = False
2930 suppressed = False
2931 pending = None
2931 pending = None
2932 self._atexit.reverse()
2932 self._atexit.reverse()
2933 for exitfunc in self._atexit:
2933 for exitfunc in self._atexit:
2934 try:
2934 try:
2935 if exitfunc(exc_type, exc_val, exc_tb):
2935 if exitfunc(exc_type, exc_val, exc_tb):
2936 suppressed = True
2936 suppressed = True
2937 exc_type = None
2937 exc_type = None
2938 exc_val = None
2938 exc_val = None
2939 exc_tb = None
2939 exc_tb = None
2940 except BaseException:
2940 except BaseException:
2941 pending = sys.exc_info()
2941 pending = sys.exc_info()
2942 exc_type, exc_val, exc_tb = pending = sys.exc_info()
2942 exc_type, exc_val, exc_tb = pending = sys.exc_info()
2943 del self._atexit
2943 del self._atexit
2944 if pending:
2944 if pending:
2945 raise exc_val
2945 raise exc_val
2946 return received and suppressed
2946 return received and suppressed
2947
2947
2948 # compression code
2948 # compression code
2949
2949
2950 class compressormanager(object):
2950 class compressormanager(object):
2951 """Holds registrations of various compression engines.
2951 """Holds registrations of various compression engines.
2952
2952
2953 This class essentially abstracts the differences between compression
2953 This class essentially abstracts the differences between compression
2954 engines to allow new compression formats to be added easily, possibly from
2954 engines to allow new compression formats to be added easily, possibly from
2955 extensions.
2955 extensions.
2956
2956
2957 Compressors are registered against the global instance by calling its
2957 Compressors are registered against the global instance by calling its
2958 ``register()`` method.
2958 ``register()`` method.
2959 """
2959 """
2960 def __init__(self):
2960 def __init__(self):
2961 self._engines = {}
2961 self._engines = {}
2962 # Bundle spec human name to engine name.
2962 # Bundle spec human name to engine name.
2963 self._bundlenames = {}
2963 self._bundlenames = {}
2964 # Internal bundle identifier to engine name.
2964 # Internal bundle identifier to engine name.
2965 self._bundletypes = {}
2965 self._bundletypes = {}
2966
2966
2967 def __getitem__(self, key):
2967 def __getitem__(self, key):
2968 return self._engines[key]
2968 return self._engines[key]
2969
2969
2970 def __contains__(self, key):
2970 def __contains__(self, key):
2971 return key in self._engines
2971 return key in self._engines
2972
2972
2973 def __iter__(self):
2973 def __iter__(self):
2974 return iter(self._engines.keys())
2974 return iter(self._engines.keys())
2975
2975
2976 def register(self, engine):
2976 def register(self, engine):
2977 """Register a compression engine with the manager.
2977 """Register a compression engine with the manager.
2978
2978
2979 The argument must be a ``compressionengine`` instance.
2979 The argument must be a ``compressionengine`` instance.
2980 """
2980 """
2981 if not isinstance(engine, compressionengine):
2981 if not isinstance(engine, compressionengine):
2982 raise ValueError(_('argument must be a compressionengine'))
2982 raise ValueError(_('argument must be a compressionengine'))
2983
2983
2984 name = engine.name()
2984 name = engine.name()
2985
2985
2986 if name in self._engines:
2986 if name in self._engines:
2987 raise error.Abort(_('compression engine %s already registered') %
2987 raise error.Abort(_('compression engine %s already registered') %
2988 name)
2988 name)
2989
2989
2990 bundleinfo = engine.bundletype()
2990 bundleinfo = engine.bundletype()
2991 if bundleinfo:
2991 if bundleinfo:
2992 bundlename, bundletype = bundleinfo
2992 bundlename, bundletype = bundleinfo
2993
2993
2994 if bundlename in self._bundlenames:
2994 if bundlename in self._bundlenames:
2995 raise error.Abort(_('bundle name %s already registered') %
2995 raise error.Abort(_('bundle name %s already registered') %
2996 bundlename)
2996 bundlename)
2997 if bundletype in self._bundletypes:
2997 if bundletype in self._bundletypes:
2998 raise error.Abort(_('bundle type %s already registered by %s') %
2998 raise error.Abort(_('bundle type %s already registered by %s') %
2999 (bundletype, self._bundletypes[bundletype]))
2999 (bundletype, self._bundletypes[bundletype]))
3000
3000
3001 # No external facing name declared.
3001 # No external facing name declared.
3002 if bundlename:
3002 if bundlename:
3003 self._bundlenames[bundlename] = name
3003 self._bundlenames[bundlename] = name
3004
3004
3005 self._bundletypes[bundletype] = name
3005 self._bundletypes[bundletype] = name
3006
3006
3007 self._engines[name] = engine
3007 self._engines[name] = engine
3008
3008
3009 @property
3009 @property
3010 def supportedbundlenames(self):
3010 def supportedbundlenames(self):
3011 return set(self._bundlenames.keys())
3011 return set(self._bundlenames.keys())
3012
3012
3013 @property
3013 @property
3014 def supportedbundletypes(self):
3014 def supportedbundletypes(self):
3015 return set(self._bundletypes.keys())
3015 return set(self._bundletypes.keys())
3016
3016
3017 def forbundlename(self, bundlename):
3017 def forbundlename(self, bundlename):
3018 """Obtain a compression engine registered to a bundle name.
3018 """Obtain a compression engine registered to a bundle name.
3019
3019
3020 Will raise KeyError if the bundle type isn't registered.
3020 Will raise KeyError if the bundle type isn't registered.
3021
3021
3022 Will abort if the engine is known but not available.
3022 Will abort if the engine is known but not available.
3023 """
3023 """
3024 engine = self._engines[self._bundlenames[bundlename]]
3024 engine = self._engines[self._bundlenames[bundlename]]
3025 if not engine.available():
3025 if not engine.available():
3026 raise error.Abort(_('compression engine %s could not be loaded') %
3026 raise error.Abort(_('compression engine %s could not be loaded') %
3027 engine.name())
3027 engine.name())
3028 return engine
3028 return engine
3029
3029
3030 def forbundletype(self, bundletype):
3030 def forbundletype(self, bundletype):
3031 """Obtain a compression engine registered to a bundle type.
3031 """Obtain a compression engine registered to a bundle type.
3032
3032
3033 Will raise KeyError if the bundle type isn't registered.
3033 Will raise KeyError if the bundle type isn't registered.
3034
3034
3035 Will abort if the engine is known but not available.
3035 Will abort if the engine is known but not available.
3036 """
3036 """
3037 engine = self._engines[self._bundletypes[bundletype]]
3037 engine = self._engines[self._bundletypes[bundletype]]
3038 if not engine.available():
3038 if not engine.available():
3039 raise error.Abort(_('compression engine %s could not be loaded') %
3039 raise error.Abort(_('compression engine %s could not be loaded') %
3040 engine.name())
3040 engine.name())
3041 return engine
3041 return engine
3042
3042
3043 compengines = compressormanager()
3043 compengines = compressormanager()
3044
3044
3045 class compressionengine(object):
3045 class compressionengine(object):
3046 """Base class for compression engines.
3046 """Base class for compression engines.
3047
3047
3048 Compression engines must implement the interface defined by this class.
3048 Compression engines must implement the interface defined by this class.
3049 """
3049 """
3050 def name(self):
3050 def name(self):
3051 """Returns the name of the compression engine.
3051 """Returns the name of the compression engine.
3052
3052
3053 This is the key the engine is registered under.
3053 This is the key the engine is registered under.
3054
3054
3055 This method must be implemented.
3055 This method must be implemented.
3056 """
3056 """
3057 raise NotImplementedError()
3057 raise NotImplementedError()
3058
3058
3059 def available(self):
3059 def available(self):
3060 """Whether the compression engine is available.
3060 """Whether the compression engine is available.
3061
3061
3062 The intent of this method is to allow optional compression engines
3062 The intent of this method is to allow optional compression engines
3063 that may not be available in all installations (such as engines relying
3063 that may not be available in all installations (such as engines relying
3064 on C extensions that may not be present).
3064 on C extensions that may not be present).
3065 """
3065 """
3066 return True
3066 return True
3067
3067
3068 def bundletype(self):
3068 def bundletype(self):
3069 """Describes bundle identifiers for this engine.
3069 """Describes bundle identifiers for this engine.
3070
3070
3071 If this compression engine isn't supported for bundles, returns None.
3071 If this compression engine isn't supported for bundles, returns None.
3072
3072
3073 If this engine can be used for bundles, returns a 2-tuple of strings of
3073 If this engine can be used for bundles, returns a 2-tuple of strings of
3074 the user-facing "bundle spec" compression name and an internal
3074 the user-facing "bundle spec" compression name and an internal
3075 identifier used to denote the compression format within bundles. To
3075 identifier used to denote the compression format within bundles. To
3076 exclude the name from external usage, set the first element to ``None``.
3076 exclude the name from external usage, set the first element to ``None``.
3077
3077
3078 If bundle compression is supported, the class must also implement
3078 If bundle compression is supported, the class must also implement
3079 ``compressstream`` and `decompressorreader``.
3079 ``compressstream`` and `decompressorreader``.
3080 """
3080 """
3081 return None
3081 return None
3082
3082
3083 def compressstream(self, it, opts=None):
3083 def compressstream(self, it, opts=None):
3084 """Compress an iterator of chunks.
3084 """Compress an iterator of chunks.
3085
3085
3086 The method receives an iterator (ideally a generator) of chunks of
3086 The method receives an iterator (ideally a generator) of chunks of
3087 bytes to be compressed. It returns an iterator (ideally a generator)
3087 bytes to be compressed. It returns an iterator (ideally a generator)
3088 of bytes of chunks representing the compressed output.
3088 of bytes of chunks representing the compressed output.
3089
3089
3090 Optionally accepts an argument defining how to perform compression.
3090 Optionally accepts an argument defining how to perform compression.
3091 Each engine treats this argument differently.
3091 Each engine treats this argument differently.
3092 """
3092 """
3093 raise NotImplementedError()
3093 raise NotImplementedError()
3094
3094
3095 def decompressorreader(self, fh):
3095 def decompressorreader(self, fh):
3096 """Perform decompression on a file object.
3096 """Perform decompression on a file object.
3097
3097
3098 Argument is an object with a ``read(size)`` method that returns
3098 Argument is an object with a ``read(size)`` method that returns
3099 compressed data. Return value is an object with a ``read(size)`` that
3099 compressed data. Return value is an object with a ``read(size)`` that
3100 returns uncompressed data.
3100 returns uncompressed data.
3101 """
3101 """
3102 raise NotImplementedError()
3102 raise NotImplementedError()
3103
3103
3104 class _zlibengine(compressionengine):
3104 class _zlibengine(compressionengine):
3105 def name(self):
3105 def name(self):
3106 return 'zlib'
3106 return 'zlib'
3107
3107
3108 def bundletype(self):
3108 def bundletype(self):
3109 return 'gzip', 'GZ'
3109 return 'gzip', 'GZ'
3110
3110
3111 def compressstream(self, it, opts=None):
3111 def compressstream(self, it, opts=None):
3112 opts = opts or {}
3112 opts = opts or {}
3113
3113
3114 z = zlib.compressobj(opts.get('level', -1))
3114 z = zlib.compressobj(opts.get('level', -1))
3115 for chunk in it:
3115 for chunk in it:
3116 data = z.compress(chunk)
3116 data = z.compress(chunk)
3117 # Not all calls to compress emit data. It is cheaper to inspect
3117 # Not all calls to compress emit data. It is cheaper to inspect
3118 # here than to feed empty chunks through generator.
3118 # here than to feed empty chunks through generator.
3119 if data:
3119 if data:
3120 yield data
3120 yield data
3121
3121
3122 yield z.flush()
3122 yield z.flush()
3123
3123
3124 def decompressorreader(self, fh):
3124 def decompressorreader(self, fh):
3125 def gen():
3125 def gen():
3126 d = zlib.decompressobj()
3126 d = zlib.decompressobj()
3127 for chunk in filechunkiter(fh):
3127 for chunk in filechunkiter(fh):
3128 while chunk:
3128 while chunk:
3129 # Limit output size to limit memory.
3129 # Limit output size to limit memory.
3130 yield d.decompress(chunk, 2 ** 18)
3130 yield d.decompress(chunk, 2 ** 18)
3131 chunk = d.unconsumed_tail
3131 chunk = d.unconsumed_tail
3132
3132
3133 return chunkbuffer(gen())
3133 return chunkbuffer(gen())
3134
3134
3135 compengines.register(_zlibengine())
3135 compengines.register(_zlibengine())
3136
3136
3137 class _bz2engine(compressionengine):
3137 class _bz2engine(compressionengine):
3138 def name(self):
3138 def name(self):
3139 return 'bz2'
3139 return 'bz2'
3140
3140
3141 def bundletype(self):
3141 def bundletype(self):
3142 return 'bzip2', 'BZ'
3142 return 'bzip2', 'BZ'
3143
3143
3144 def compressstream(self, it, opts=None):
3144 def compressstream(self, it, opts=None):
3145 opts = opts or {}
3145 opts = opts or {}
3146 z = bz2.BZ2Compressor(opts.get('level', 9))
3146 z = bz2.BZ2Compressor(opts.get('level', 9))
3147 for chunk in it:
3147 for chunk in it:
3148 data = z.compress(chunk)
3148 data = z.compress(chunk)
3149 if data:
3149 if data:
3150 yield data
3150 yield data
3151
3151
3152 yield z.flush()
3152 yield z.flush()
3153
3153
3154 def decompressorreader(self, fh):
3154 def decompressorreader(self, fh):
3155 def gen():
3155 def gen():
3156 d = bz2.BZ2Decompressor()
3156 d = bz2.BZ2Decompressor()
3157 for chunk in filechunkiter(fh):
3157 for chunk in filechunkiter(fh):
3158 yield d.decompress(chunk)
3158 yield d.decompress(chunk)
3159
3159
3160 return chunkbuffer(gen())
3160 return chunkbuffer(gen())
3161
3161
3162 compengines.register(_bz2engine())
3162 compengines.register(_bz2engine())
3163
3163
3164 class _truncatedbz2engine(compressionengine):
3164 class _truncatedbz2engine(compressionengine):
3165 def name(self):
3165 def name(self):
3166 return 'bz2truncated'
3166 return 'bz2truncated'
3167
3167
3168 def bundletype(self):
3168 def bundletype(self):
3169 return None, '_truncatedBZ'
3169 return None, '_truncatedBZ'
3170
3170
3171 # We don't implement compressstream because it is hackily handled elsewhere.
3171 # We don't implement compressstream because it is hackily handled elsewhere.
3172
3172
3173 def decompressorreader(self, fh):
3173 def decompressorreader(self, fh):
3174 def gen():
3174 def gen():
3175 # The input stream doesn't have the 'BZ' header. So add it back.
3175 # The input stream doesn't have the 'BZ' header. So add it back.
3176 d = bz2.BZ2Decompressor()
3176 d = bz2.BZ2Decompressor()
3177 d.decompress('BZ')
3177 d.decompress('BZ')
3178 for chunk in filechunkiter(fh):
3178 for chunk in filechunkiter(fh):
3179 yield d.decompress(chunk)
3179 yield d.decompress(chunk)
3180
3180
3181 return chunkbuffer(gen())
3181 return chunkbuffer(gen())
3182
3182
3183 compengines.register(_truncatedbz2engine())
3183 compengines.register(_truncatedbz2engine())
3184
3184
3185 class _noopengine(compressionengine):
3185 class _noopengine(compressionengine):
3186 def name(self):
3186 def name(self):
3187 return 'none'
3187 return 'none'
3188
3188
3189 def bundletype(self):
3189 def bundletype(self):
3190 return 'none', 'UN'
3190 return 'none', 'UN'
3191
3191
3192 def compressstream(self, it, opts=None):
3192 def compressstream(self, it, opts=None):
3193 return it
3193 return it
3194
3194
3195 def decompressorreader(self, fh):
3195 def decompressorreader(self, fh):
3196 return fh
3196 return fh
3197
3197
3198 compengines.register(_noopengine())
3198 compengines.register(_noopengine())
3199
3199
3200 class _zstdengine(compressionengine):
3200 class _zstdengine(compressionengine):
3201 def name(self):
3201 def name(self):
3202 return 'zstd'
3202 return 'zstd'
3203
3203
3204 @propertycache
3204 @propertycache
3205 def _module(self):
3205 def _module(self):
3206 # Not all installs have the zstd module available. So defer importing
3206 # Not all installs have the zstd module available. So defer importing
3207 # until first access.
3207 # until first access.
3208 try:
3208 try:
3209 from . import zstd
3209 from . import zstd
3210 # Force delayed import.
3210 # Force delayed import.
3211 zstd.__version__
3211 zstd.__version__
3212 return zstd
3212 return zstd
3213 except ImportError:
3213 except ImportError:
3214 return None
3214 return None
3215
3215
3216 def available(self):
3216 def available(self):
3217 return bool(self._module)
3217 return bool(self._module)
3218
3218
3219 def bundletype(self):
3219 def bundletype(self):
3220 return 'zstd', 'ZS'
3220 return 'zstd', 'ZS'
3221
3221
3222 def compressstream(self, it, opts=None):
3222 def compressstream(self, it, opts=None):
3223 opts = opts or {}
3223 opts = opts or {}
3224 # zstd level 3 is almost always significantly faster than zlib
3224 # zstd level 3 is almost always significantly faster than zlib
3225 # while providing no worse compression. It strikes a good balance
3225 # while providing no worse compression. It strikes a good balance
3226 # between speed and compression.
3226 # between speed and compression.
3227 level = opts.get('level', 3)
3227 level = opts.get('level', 3)
3228
3228
3229 zstd = self._module
3229 zstd = self._module
3230 z = zstd.ZstdCompressor(level=level).compressobj()
3230 z = zstd.ZstdCompressor(level=level).compressobj()
3231 for chunk in it:
3231 for chunk in it:
3232 data = z.compress(chunk)
3232 data = z.compress(chunk)
3233 if data:
3233 if data:
3234 yield data
3234 yield data
3235
3235
3236 yield z.flush()
3236 yield z.flush()
3237
3237
3238 def decompressorreader(self, fh):
3238 def decompressorreader(self, fh):
3239 zstd = self._module
3239 zstd = self._module
3240 dctx = zstd.ZstdDecompressor()
3240 dctx = zstd.ZstdDecompressor()
3241 return chunkbuffer(dctx.read_from(fh))
3241 return chunkbuffer(dctx.read_from(fh))
3242
3242
3243 compengines.register(_zstdengine())
3243 compengines.register(_zstdengine())
3244
3244
3245 # convenient shortcut
3245 # convenient shortcut
3246 dst = debugstacktrace
3246 dst = debugstacktrace
General Comments 0
You need to be logged in to leave comments. Login now